Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

add api-logs-limit flag #777

Merged
merged 5 commits into from
Jun 25, 2024
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 3 additions & 2 deletions api/api.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@ func New(
allowCustomTracer bool,
enableReqLogger bool,
enableMetrics bool,
logsLimit uint64,
) (http.HandlerFunc, func()) {
origins := strings.Split(strings.TrimSpace(allowedOrigins), ",")
for i, o := range origins {
Expand All @@ -72,9 +73,9 @@ func New(
Mount(router, "/accounts")

if !skipLogs {
events.New(repo, logDB).
events.New(repo, logDB, logsLimit).
Mount(router, "/logs/event")
transfers.New(repo, logDB).
transfers.New(repo, logDB, logsLimit).
Mount(router, "/logs/transfer")
}
blocks.New(repo, bft).
Expand Down
10 changes: 6 additions & 4 deletions api/events/events.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,21 +17,23 @@ import (
)

type Events struct {
repo *chain.Repository
db *logdb.LogDB
repo *chain.Repository
db *logdb.LogDB
limit uint64
}

func New(repo *chain.Repository, db *logdb.LogDB) *Events {
func New(repo *chain.Repository, db *logdb.LogDB, logsLimit uint64) *Events {
return &Events{
repo,
db,
logsLimit,
}
}

// Filter query events with option
func (e *Events) filter(ctx context.Context, ef *EventFilter) ([]*FilteredEvent, error) {
chain := e.repo.NewBestChain()
filter, err := convertEventFilter(chain, ef)
filter, err := convertEventFilter(chain, ef, e.limit)
if err != nil {
return nil, err
}
Expand Down
10 changes: 9 additions & 1 deletion api/events/events_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,8 @@ import (
"github.com/vechain/thor/v2/tx"
)

const defaultLogLimit uint64 = 1000

var ts *httptest.Server

var (
Expand Down Expand Up @@ -142,7 +144,7 @@ func initEventServer(t *testing.T, logDb *logdb.LogDB) {

repo, _ := chain.NewRepository(muxDb, b)

events.New(repo, logDb).Mount(router, "/events")
events.New(repo, logDb, defaultLogLimit).Mount(router, "/events")
ts = httptest.NewServer(router)
}

Expand Down Expand Up @@ -210,3 +212,9 @@ func newReceipt() *tx.Receipt {
},
}
}

func TestNormalize(t *testing.T) {
assert.Equal(t, &logdb.Options{Offset: 0, Limit: 10}, events.NormalizeOptions(nil, 10))
assert.Equal(t, &logdb.Options{Offset: 10, Limit: 5}, events.NormalizeOptions(&logdb.Options{Offset: 10, Limit: 5}, 10))
assert.Equal(t, &logdb.Options{Offset: 10, Limit: 10}, events.NormalizeOptions(&logdb.Options{Offset: 10, Limit: 15}, 10))
}
18 changes: 16 additions & 2 deletions api/events/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -101,14 +101,14 @@ type EventFilter struct {
Order logdb.Order `json:"order"`
}

func convertEventFilter(chain *chain.Chain, filter *EventFilter) (*logdb.EventFilter, error) {
func convertEventFilter(chain *chain.Chain, filter *EventFilter, logsLimit uint64) (*logdb.EventFilter, error) {
rng, err := ConvertRange(chain, filter.Range)
if err != nil {
return nil, err
}
f := &logdb.EventFilter{
Range: rng,
Options: filter.Options,
Options: NormalizeOptions(filter.Options, logsLimit),
Order: filter.Order,
}
if len(filter.CriteriaSet) > 0 {
Expand Down Expand Up @@ -187,3 +187,17 @@ func ConvertRange(chain *chain.Chain, r *Range) (*logdb.Range, error) {
To: uint32(r.To),
}, nil
}

func NormalizeOptions(ops *logdb.Options, defaultLimit uint64) *logdb.Options {
if ops == nil {
return &logdb.Options{
Offset: 0,
Limit: defaultLimit,
}
}

if ops.Limit > defaultLimit {
ops.Limit = defaultLimit
}
return ops
}
10 changes: 6 additions & 4 deletions api/transfers/transfers.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,14 +18,16 @@ import (
)

type Transfers struct {
repo *chain.Repository
db *logdb.LogDB
repo *chain.Repository
db *logdb.LogDB
limit uint64
}

func New(repo *chain.Repository, db *logdb.LogDB) *Transfers {
func New(repo *chain.Repository, db *logdb.LogDB, logsLimit uint64) *Transfers {
return &Transfers{
repo,
db,
logsLimit,
}
}

Expand All @@ -39,7 +41,7 @@ func (t *Transfers) filter(ctx context.Context, filter *TransferFilter) ([]*Filt
transfers, err := t.db.FilterTransfers(ctx, &logdb.TransferFilter{
CriteriaSet: filter.CriteriaSet,
Range: rng,
Options: filter.Options,
Options: events.NormalizeOptions(filter.Options, t.limit),
Order: filter.Order,
})
if err != nil {
Expand Down
4 changes: 3 additions & 1 deletion api/transfers/transfers_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,8 @@ import (
"github.com/vechain/thor/v2/tx"
)

const defaultLogLimit uint64 = 1000

var ts *httptest.Server

func TestEmptyTransfers(t *testing.T) {
Expand Down Expand Up @@ -133,7 +135,7 @@ func initTransferServer(t *testing.T, logDb *logdb.LogDB) {

repo, _ := chain.NewRepository(muxDb, b)

transfers.New(repo, logDb).Mount(router, "/transfers")
transfers.New(repo, logDb, defaultLogLimit).Mount(router, "/transfers")
ts = httptest.NewServer(router)
}

Expand Down
5 changes: 5 additions & 0 deletions cmd/thor/flags.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,11 @@ var (
Name: "api-allow-custom-tracer",
Usage: "allow custom JS tracer to be used tracer API",
}
apiLogsLimitFlag = cli.IntFlag{
libotony marked this conversation as resolved.
Show resolved Hide resolved
Name: "api-logs-limit",
Value: 1000,
Usage: "limit the number of logs returned by /logs API",
}
enableAPILogsFlag = cli.BoolFlag{
Name: "enable-api-logs",
Usage: "enables API requests logging",
Expand Down
4 changes: 4 additions & 0 deletions cmd/thor/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,7 @@ func main() {
apiBacktraceLimitFlag,
apiAllowCustomTracerFlag,
enableAPILogsFlag,
apiLogsLimitFlag,
verbosityFlag,
maxPeersFlag,
p2pPortFlag,
Expand Down Expand Up @@ -109,6 +110,7 @@ func main() {
apiBacktraceLimitFlag,
apiAllowCustomTracerFlag,
enableAPILogsFlag,
apiLogsLimitFlag,
onDemandFlag,
blockInterval,
persistFlag,
Expand Down Expand Up @@ -234,6 +236,7 @@ func defaultAction(ctx *cli.Context) error {
ctx.Bool(apiAllowCustomTracerFlag.Name),
ctx.Bool(enableAPILogsFlag.Name),
ctx.Bool(enableMetricsFlag.Name),
uint64(ctx.Int(apiLogsLimitFlag.Name)),
)
defer func() { log.Info("closing API..."); apiCloser() }()

Expand Down Expand Up @@ -363,6 +366,7 @@ func soloAction(ctx *cli.Context) error {
ctx.Bool(apiAllowCustomTracerFlag.Name),
ctx.Bool(enableAPILogsFlag.Name),
ctx.Bool(enableMetricsFlag.Name),
uint64(ctx.Int(apiLogsLimitFlag.Name)),
)
defer func() { log.Info("closing API..."); apiCloser() }()

Expand Down
49 changes: 11 additions & 38 deletions logdb/logdb.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,7 @@ import (
)

const (
refIDQuery = "(SELECT id FROM ref WHERE data=?)"
limitThreshold = 1000
refIDQuery = "(SELECT id FROM ref WHERE data=?)"
)

type LogDB struct {
Expand Down Expand Up @@ -108,14 +107,8 @@ FROM (%v) e
LEFT JOIN ref r7 ON e.topic3 = r7.id
LEFT JOIN ref r8 ON e.topic4 = r8.id`

if filter == nil { // default query filtering
filter = &EventFilter{
Options: &Options{
Offset: 0,
Limit: limitThreshold,
},
Order: "desc",
}
if filter == nil {
return db.queryEvents(ctx, fmt.Sprintf(query, "event"))
}

var (
Expand Down Expand Up @@ -153,17 +146,10 @@ FROM (%v) e
}

// if there is limit option, set order inside subquery
subQuery += " LIMIT ?, ?" // all queries are bounded to a max of 1000 results
if filter.Options != nil && filter.Options.Limit > 1000 {
// offset could have been specified
filter.Options.Limit = limitThreshold
} else if filter.Options == nil {
filter.Options = &Options{
Offset: 0,
Limit: limitThreshold,
}
if filter.Options != nil {
subQuery += " LIMIT ?, ?"
args = append(args, filter.Options.Offset, filter.Options.Limit)
}
args = append(args, filter.Options.Offset, filter.Options.Limit)

subQuery = "SELECT e.* FROM (" + subQuery + ") s LEFT JOIN event e ON s.seq = e.seq"

Expand All @@ -179,14 +165,8 @@ FROM (%v) t
LEFT JOIN ref r3 ON t.sender = r3.id
LEFT JOIN ref r4 ON t.recipient = r4.id`

if filter == nil { // default query filtering
filter = &TransferFilter{
Options: &Options{
Offset: 0,
Limit: limitThreshold,
},
Order: "desc",
}
if filter == nil {
return db.queryTransfers(ctx, fmt.Sprintf(query, "transfer"))
}

var (
Expand Down Expand Up @@ -223,17 +203,10 @@ FROM (%v) t
}

// if there is limit option, set order inside subquery
subQuery += " LIMIT ?, ?"
if filter.Options != nil && filter.Options.Limit > limitThreshold {
// offset could have been specified
filter.Options.Limit = limitThreshold
} else if filter.Options == nil {
filter.Options = &Options{
Offset: 0,
Limit: limitThreshold,
}
if filter.Options != nil {
subQuery += " LIMIT ?, ?"
args = append(args, filter.Options.Offset, filter.Options.Limit)
}
args = append(args, filter.Options.Offset, filter.Options.Limit)

subQuery = "SELECT e.* FROM (" + subQuery + ") s LEFT JOIN transfer e ON s.seq = e.seq"

Expand Down
24 changes: 9 additions & 15 deletions logdb/logdb_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ func TestEvents(t *testing.T) {
var allEvents eventLogs
var allTransfers transferLogs

for i := 0; i < 2000; i++ {
for i := 0; i < 100; i++ {
b = new(block.Builder).
ParentID(b.Header().ID()).
Transaction(newTx()).
Expand Down Expand Up @@ -187,14 +187,11 @@ func TestEvents(t *testing.T) {
arg *logdb.EventFilter
want eventLogs
}{
{"query all events", &logdb.EventFilter{}, allEvents[:1000]},
{"query all events with nil option", nil, allEvents.Reverse()[:1000]},
{"query all events asc", &logdb.EventFilter{Order: logdb.ASC}, allEvents[:1000]},
{"query all events desc", &logdb.EventFilter{Order: logdb.DESC}, allEvents.Reverse()[:1000]},
{"query all events", &logdb.EventFilter{}, allEvents},
{"query all events with nil option", nil, allEvents},
{"query all events asc", &logdb.EventFilter{Order: logdb.ASC}, allEvents},
{"query all events desc", &logdb.EventFilter{Order: logdb.DESC}, allEvents.Reverse()},
{"query all events limit offset", &logdb.EventFilter{Options: &logdb.Options{Offset: 1, Limit: 10}}, allEvents[1:11]},
{"query all transfers offset", &logdb.EventFilter{Options: &logdb.Options{Offset: 1500, Limit: 10000}, Order: logdb.ASC}, allEvents[1500:2500]},
{"query all events outsized limit ", &logdb.EventFilter{Options: &logdb.Options{Limit: 2000}}, allEvents[:1000]},
{"query all events outsized limit offset", &logdb.EventFilter{Options: &logdb.Options{Offset: 2, Limit: 2000}}, allEvents[2:1002]},
{"query all events range", &logdb.EventFilter{Range: &logdb.Range{From: 10, To: 20}}, allEvents.Filter(func(ev *logdb.Event) bool { return ev.BlockNumber >= 10 && ev.BlockNumber <= 20 })},
{"query events with range and desc", &logdb.EventFilter{Range: &logdb.Range{From: 10, To: 20}, Order: logdb.DESC}, allEvents.Filter(func(ev *logdb.Event) bool { return ev.BlockNumber >= 10 && ev.BlockNumber <= 20 }).Reverse()},
{"query events with limit with desc", &logdb.EventFilter{Order: logdb.DESC, Options: &logdb.Options{Limit: 10}}, allEvents.Reverse()[0:10]},
Expand All @@ -221,14 +218,11 @@ func TestEvents(t *testing.T) {
arg *logdb.TransferFilter
want transferLogs
}{
{"query all transfers", &logdb.TransferFilter{}, allTransfers[:1000]},
{"query all transfers with nil option", nil, allTransfers.Reverse()[:1000]},
{"query all transfers asc", &logdb.TransferFilter{Order: logdb.ASC}, allTransfers[:1000]},
{"query all transfers desc", &logdb.TransferFilter{Order: logdb.DESC}, allTransfers.Reverse()[:1000]},
{"query all transfers", &logdb.TransferFilter{}, allTransfers},
{"query all transfers with nil option", nil, allTransfers},
{"query all transfers asc", &logdb.TransferFilter{Order: logdb.ASC}, allTransfers},
{"query all transfers desc", &logdb.TransferFilter{Order: logdb.DESC}, allTransfers.Reverse()},
{"query all transfers limit offset", &logdb.TransferFilter{Options: &logdb.Options{Offset: 1, Limit: 10}}, allTransfers[1:11]},
{"query all transfers offset", &logdb.TransferFilter{Options: &logdb.Options{Offset: 1500, Limit: 10000}, Order: logdb.ASC}, allTransfers[1500:2500]},
{"query all transfers outsized limit ", &logdb.TransferFilter{Options: &logdb.Options{Limit: 2000}}, allTransfers[:1000]},
{"query all transfers outsized limit offset", &logdb.TransferFilter{Options: &logdb.Options{Offset: 2, Limit: 2000}}, allTransfers[2:1002]},
{"query all transfers range", &logdb.TransferFilter{Range: &logdb.Range{From: 10, To: 20}}, allTransfers.Filter(func(tr *logdb.Transfer) bool { return tr.BlockNumber >= 10 && tr.BlockNumber <= 20 })},
{"query transfers with range and desc", &logdb.TransferFilter{Range: &logdb.Range{From: 10, To: 20}, Order: logdb.DESC}, allTransfers.Filter(func(tr *logdb.Transfer) bool { return tr.BlockNumber >= 10 && tr.BlockNumber <= 20 }).Reverse()},
{"query transfers with limit with desc", &logdb.TransferFilter{Order: logdb.DESC, Options: &logdb.Options{Limit: 10}}, allTransfers.Reverse()[0:10]},
Expand Down
Loading