Skip to content

Commit

Permalink
Merge pull request #76 from taosdata/3.0
Browse files Browse the repository at this point in the history
merge long sql fix
  • Loading branch information
sheyanjie-qq authored Dec 15, 2023
2 parents 1d78ddd + b028aed commit 03bd1f5
Show file tree
Hide file tree
Showing 2 changed files with 34 additions and 17 deletions.
43 changes: 30 additions & 13 deletions api/audit.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@ var auditLogger = log.GetLogger("audit")

const MAX_DETAIL_LEN = 50000

var MAX_SQL_LEN = 1000 * 1000

type Audit struct {
username string
password string
Expand Down Expand Up @@ -103,18 +105,14 @@ func (a *Audit) handleBatchFunc() gin.HandlerFunc {
return
}

sql := parseBatchSql(auditArray.Records)
err = handleBatchRecord(auditArray.Records, a.conn)

if err != nil {
auditLogger.WithError(err).Errorf("## parse sql error")
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("timestamp format error. %s", err)})
auditLogger.WithError(err).Errorf("## process records error")
c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("process records error. %s", err)})
return
}

if _, err = a.conn.Exec(context.Background(), sql); err != nil {
auditLogger.WithError(err).Error("##save audit data error", "sql", sql)
c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("save audit data error: %s", err)})
return
}
c.JSON(http.StatusOK, gin.H{})
}
}
Expand Down Expand Up @@ -179,21 +177,40 @@ func parseSql(audit AuditInfo) string {
getTableName(audit), audit.ClusterID, audit.Timestamp, audit.User, audit.Operation, audit.Db, audit.Resource, audit.ClientAdd, details)
}

func parseBatchSql(auditArray []AuditInfo) string {
func handleBatchRecord(auditArray []AuditInfo, conn *db.Connector) error {

var builder strings.Builder
var head = fmt.Sprintf(
"insert into %s using operations tags ('%s') values",
getTableName(auditArray[0]), auditArray[0].ClusterID)
builder.WriteString(head)

builder.WriteString(head)
for _, audit := range auditArray {

details := handleDetails(audit.Details)
varluesStr := fmt.Sprintf(
valuesStr := fmt.Sprintf(
"(%s, '%s', '%s', '%s', '%s', '%s', '%s') ",
audit.Timestamp, audit.User, audit.Operation, audit.Db, audit.Resource, audit.ClientAdd, details)
builder.WriteString(varluesStr)

if (builder.Len() + len(valuesStr)) > MAX_SQL_LEN {
sql := builder.String()
if _, err := conn.Exec(context.Background(), sql); err != nil {
return err
}
builder.Reset()
builder.WriteString(head)
}
builder.WriteString(valuesStr)
}
return builder.String()

if builder.Len() > len(head) {
sql := builder.String()
if _, err := conn.Exec(context.Background(), sql); err != nil {
return err
}
}

return nil
}

func getTableName(audit AuditInfo) string {
Expand Down
8 changes: 4 additions & 4 deletions api/audit_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -88,9 +88,9 @@ func TestAudit(t *testing.T) {
})
}

MAX_SQL_LEN = 300
// test audit batch
input := `{"records": [{"timestamp": "1699839716440000000", "cluster_id": "cluster_id_batch", "user": "user", "operation": "operation", "db":"dbnamea", "resource":"resourcenamea", "client_add": "localhost:30000", "details": "detail"},` +
`{"timestamp": "1699839716441000000", "cluster_id": "cluster_id_batch", "user": "user", "operation": "operation", "db":"dbnamea", "resource":"resourcenamea", "client_add": "localhost:30000", "details": "detail"}]}`
input := `{"records":[{"timestamp":"1702548856940013848","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:45166","db":"test","resource":"","details":"d630302"},{"timestamp":"1702548856939746458","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:45230","db":"test","resource":"","details":"d130277"},{"timestamp":"1702548856939586665","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:50288","db":"test","resource":"","details":"d5268"},{"timestamp":"1702548856939528940","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:50222","db":"test","resource":"","details":"d255282"},{"timestamp":"1702548856939336371","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:45126","db":"test","resource":"","details":"d755297"},{"timestamp":"1702548856939075131","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:45122","db":"test","resource":"","details":"d380325"},{"timestamp":"1702548856938640661","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:45152","db":"test","resource":"","details":"d255281"},{"timestamp":"1702548856938505795","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:45122","db":"test","resource":"","details":"d130276"},{"timestamp":"1702548856938363319","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:45178","db":"test","resource":"","details":"d755296"},{"timestamp":"1702548856938201478","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:45166","db":"test","resource":"","details":"d380324"},{"timestamp":"1702548856937740618","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:50288","db":"test","resource":"","details":"d5266"}]}`

t.Run("testbatch", func(t *testing.T) {

Expand All @@ -109,8 +109,8 @@ func TestAudit(t *testing.T) {
router.ServeHTTP(w, req)
assert.Equal(t, 200, w.Code)

data, err := conn.Query(context.Background(), "select ts, details from audit.operations where cluster_id='cluster_id_batch'")
data, err := conn.Query(context.Background(), "select ts, details from audit.operations where cluster_id='8468922059162439502'")
assert.NoError(t, err)
assert.Equal(t, 2, len(data.Data))
assert.Equal(t, 11, len(data.Data))
})
}

0 comments on commit 03bd1f5

Please sign in to comment.