and reconnection functionality if the connection to the database is lost
This commit is contained in:
parent
7f6262b470
commit
9f0a9c939f
@ -1,13 +1,25 @@
|
||||
package handlers
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"gitea.futuresens.co.uk/futuresens/hardlink/db"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type preauthSpoolRecord struct {
|
||||
CreatedAt time.Time `json:"createdAt"`
|
||||
CheckoutDate string `json:"checkoutDate"` // keep as received
|
||||
Fields map[string]string `json:"fields"` // ChipDNA result.Fields
|
||||
}
|
||||
|
||||
func (app *App) getDB(ctx context.Context) (*sql.DB, error) {
|
||||
app.dbMu.Lock()
|
||||
defer app.dbMu.Unlock()
|
||||
@ -40,7 +52,6 @@ func (app *App) getDB(ctx context.Context) (*sql.DB, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Optional ping (InitMSSQL already pings, but this keeps semantics explicit)
|
||||
pingCtx, cancel2 := context.WithTimeout(dialCtx, 1*time.Second)
|
||||
defer cancel2()
|
||||
|
||||
@ -52,3 +63,144 @@ func (app *App) getDB(ctx context.Context) (*sql.DB, error) {
|
||||
app.db = dbConn
|
||||
return app.db, nil
|
||||
}
|
||||
|
||||
func (app *App) spoolPath() string {
|
||||
// keep it near logs; adjust if you prefer a dedicated dir
|
||||
// ensure LogDir ends with separator in your config loader
|
||||
return filepath.Join(app.cfg.LogDir, "preauth_spool.ndjson")
|
||||
}
|
||||
|
||||
// persistPreauth tries DB first; if DB is down or insert fails, it spools to file.
|
||||
// It never returns an error to the caller (so your HTTP flow stays simple),
|
||||
// but it logs failures.
|
||||
func (app *App) persistPreauth(ctx context.Context, fields map[string]string, checkoutDate string) {
|
||||
// First, try DB (with your reconnect logic inside getDB)
|
||||
dbConn, err := app.getDB(ctx)
|
||||
if err == nil && dbConn != nil {
|
||||
if err := db.InsertPreauth(ctx, dbConn, fields, checkoutDate); err == nil {
|
||||
// opportunistic drain once DB is alive
|
||||
go app.drainPreauthSpool(context.Background())
|
||||
return
|
||||
} else {
|
||||
log.WithError(err).Warn("DB insert failed; will spool preauth")
|
||||
}
|
||||
} else {
|
||||
log.WithError(err).Warn("DB unavailable; will spool preauth")
|
||||
}
|
||||
|
||||
// Fallback: spool to file
|
||||
rec := preauthSpoolRecord{
|
||||
CreatedAt: time.Now().UTC(),
|
||||
CheckoutDate: checkoutDate,
|
||||
Fields: fields,
|
||||
}
|
||||
if spErr := app.spoolPreauth(rec); spErr != nil {
|
||||
log.WithError(spErr).Error("failed to spool preauth")
|
||||
}
|
||||
}
|
||||
|
||||
// append one line JSON (NDJSON)
|
||||
func (app *App) spoolPreauth(rec preauthSpoolRecord) error {
|
||||
p := app.spoolPath()
|
||||
|
||||
f, err := os.OpenFile(p, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
|
||||
if err != nil {
|
||||
return fmt.Errorf("open spool file: %w", err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
b, err := json.Marshal(rec)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshal spool record: %w", err)
|
||||
}
|
||||
|
||||
if _, err := f.Write(append(b, '\n')); err != nil {
|
||||
return fmt.Errorf("write spool record: %w", err)
|
||||
}
|
||||
|
||||
return f.Sync() // ensure it's on disk
|
||||
}
|
||||
|
||||
// Drain spool into DB.
|
||||
// Strategy: read all lines, insert each; keep failures in a temp file; then replace original.
|
||||
func (app *App) drainPreauthSpool(ctx context.Context) {
|
||||
dbConn, err := app.getDB(ctx)
|
||||
if err != nil {
|
||||
return // still down, nothing to do
|
||||
}
|
||||
|
||||
spool := app.spoolPath()
|
||||
in, err := os.Open(spool)
|
||||
if err != nil {
|
||||
// no spool is fine
|
||||
return
|
||||
}
|
||||
defer in.Close()
|
||||
|
||||
tmp := spool + ".tmp"
|
||||
out, err := os.OpenFile(tmp, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0666)
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("drain spool: open tmp failed")
|
||||
return
|
||||
}
|
||||
defer out.Close()
|
||||
|
||||
sc := bufio.NewScanner(in)
|
||||
// allow long lines if receipts ever sneak in (shouldn't, but safe)
|
||||
buf := make([]byte, 0, 64*1024)
|
||||
sc.Buffer(buf, 2*1024*1024)
|
||||
|
||||
var (
|
||||
okCount int
|
||||
failCount int
|
||||
)
|
||||
|
||||
for sc.Scan() {
|
||||
line := sc.Bytes()
|
||||
if len(line) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
var rec preauthSpoolRecord
|
||||
if err := json.Unmarshal(line, &rec); err != nil {
|
||||
// malformed line: keep it so we don't lose evidence
|
||||
_, _ = out.Write(append(line, '\n'))
|
||||
failCount++
|
||||
continue
|
||||
}
|
||||
|
||||
// attempt insert
|
||||
if err := db.InsertPreauth(ctx, dbConn, rec.Fields, rec.CheckoutDate); err != nil {
|
||||
// DB still flaky or data issue: keep it for later retry
|
||||
_, _ = out.Write(append(line, '\n'))
|
||||
failCount++
|
||||
continue
|
||||
}
|
||||
|
||||
okCount++
|
||||
}
|
||||
|
||||
if err := sc.Err(); err != nil {
|
||||
log.WithError(err).Warn("drain spool: scanner error")
|
||||
// best effort; do not replace spool
|
||||
return
|
||||
}
|
||||
|
||||
_ = out.Sync()
|
||||
|
||||
// Replace original spool with temp (atomic on Windows is best-effort; still OK here)
|
||||
_ = in.Close()
|
||||
_ = out.Close()
|
||||
|
||||
if err := os.Rename(tmp, spool); err != nil {
|
||||
log.WithError(err).Warn("drain spool: rename failed")
|
||||
return
|
||||
}
|
||||
|
||||
if okCount > 0 || failCount > 0 {
|
||||
log.WithFields(log.Fields{
|
||||
"inserted": okCount,
|
||||
"remaining": failCount,
|
||||
}).Info("preauth spool drained")
|
||||
}
|
||||
}
|
||||
|
||||
@ -2,6 +2,7 @@ package handlers
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
@ -15,7 +16,6 @@ import (
|
||||
|
||||
"gitea.futuresens.co.uk/futuresens/cmstypes"
|
||||
"gitea.futuresens.co.uk/futuresens/hardlink/config"
|
||||
"gitea.futuresens.co.uk/futuresens/hardlink/db"
|
||||
"gitea.futuresens.co.uk/futuresens/hardlink/dispenser"
|
||||
"gitea.futuresens.co.uk/futuresens/hardlink/errorhandlers"
|
||||
"gitea.futuresens.co.uk/futuresens/hardlink/lockserver"
|
||||
@ -137,14 +137,7 @@ func (app *App) takePreauthorization(w http.ResponseWriter, r *http.Request) {
|
||||
theResponse.Status = result.Status
|
||||
theResponse.Data, save = payment.BuildPreauthRedirectURL(result.Fields)
|
||||
if save {
|
||||
dbConn, err := app.getDB(r.Context())
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("DB unavailable; preauth not stored")
|
||||
} else {
|
||||
if err := db.InsertPreauth(r.Context(), dbConn, result.Fields, theRequest.CheckoutDate); err != nil {
|
||||
log.WithError(err).Warn("Failed to store preauth in DB")
|
||||
}
|
||||
}
|
||||
go app.persistPreauth(context.Background(), result.Fields, theRequest.CheckoutDate)
|
||||
}
|
||||
|
||||
writeTransactionResult(w, http.StatusOK, theResponse)
|
||||
|
||||
@ -2,6 +2,14 @@
|
||||
|
||||
builtVersion is a const in main.go
|
||||
|
||||
#### 1.1.0 - 26 January 2026
|
||||
divided `/starttransaction` endpoint into two separate endpoints:
|
||||
`/takepreauth` to request preauthorization payment
|
||||
`/takepayment` to request taking payment
|
||||
added preauth releaser functionality to release preauthorization payments after a defined time period
|
||||
added db connection check before adding a transaction to the database
|
||||
and reconnection functionality if the connection to the database is lost
|
||||
|
||||
#### 1.0.30 - 09 January 2026
|
||||
improved logging for preauth releaser
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user