From bdb8534a21fe7bc7dfb17fd8fcae21a7096f4788 Mon Sep 17 00:00:00 2001 From: Gus Eggert Date: Thu, 21 Apr 2022 15:11:10 -0400 Subject: [PATCH] feat: persist limit changes to config This changes the "ipfs swarm limit" command so that When limit changes are applied via the command line, they are persisted to the repo config, so that they remain in effect when the daemon restarts. This also removes limit.json support, to simplify the behavior. The schema for Swarm.ResourceMgr.Limits is exactly the same as limit.json, so existing limit.json can be dropped into the IPFS config easily using something like: cat ~/.ipfs/config | jq ".Swarm.ResourceMgr.Limits = $(cat limit.json)" > ~/.ipfs/config This upgrades to Resource Manager v0.3.0, which exports the config schema so that we don't have to maintain our own copy of it. --- config/swarm.go | 46 ++--------------- core/commands/swarm.go | 5 +- core/node/libp2p/rcmgr.go | 81 ++++++++++++++++-------------- go.mod | 2 +- go.sum | 3 +- test/sharness/t0139-swarm-rcmgr.sh | 68 +++++++++++++++---------- 6 files changed, 96 insertions(+), 109 deletions(-) diff --git a/config/swarm.go b/config/swarm.go index be420298497c..83f42a295437 100644 --- a/config/swarm.go +++ b/config/swarm.go @@ -1,5 +1,7 @@ package config +import rcmgr "github.com/libp2p/go-libp2p-resource-manager" + type SwarmConfig struct { // AddrFilters specifies a set libp2p addresses that we should never // dial or receive connections from. @@ -137,10 +139,8 @@ type ConnMgr struct { // type ResourceMgr struct { // Enables the Network Resource Manager feature - Enabled Flag `json:",omitempty"` - - /* TODO: decide if and how we want to expose limits in our config - Limits *ResourceMgrScopeConfig `json:",omitempty"` */ + Enabled Flag `json:",omitempty"` + Limits *rcmgr.BasicLimiterConfig `json:",omitempty"` } const ( @@ -150,41 +150,3 @@ const ( ResourceMgrProtocolScopePrefix = "proto:" ResourceMgrPeerScopePrefix = "peer:" ) - -/* TODO: decide if and how we want to expose limits in our config -type ResourceMgrLimitsConfig struct { - System *ResourceMgrScopeConfig `json:",omitempty"` - Transient *ResourceMgrScopeConfig `json:",omitempty"` - - ServiceDefault *ResourceMgrScopeConfig `json:",omitempty"` - ServicePeerDefault *ResourceMgrScopeConfig `json:",omitempty"` - Service map[string]ResourceMgrScopeConfig `json:",omitempty"` - ServicePeer map[string]ResourceMgrScopeConfig `json:",omitempty"` - - ProtocolDefault *ResourceMgrScopeConfig `json:",omitempty"` - ProtocolPeerDefault *ResourceMgrScopeConfig `json:",omitempty"` - Protocol map[string]ResourceMgrScopeConfig `json:",omitempty"` - ProtocolPeer map[string]ResourceMgrScopeConfig `json:",omitempty"` - - PeerDefault *ResourceMgrScopeConfig `json:",omitempty"` - Peer map[string]ResourceMgrScopeConfig `json:",omitempty"` - - Conn *ResourceMgrScopeConfig `json:",omitempty"` - Stream *ResourceMgrScopeConfig `json:",omitempty"` -} -*/ - -// libp2p Network Resource Manager config for a scope -type ResourceMgrScopeConfig struct { - Dynamic bool `json:",omitempty"` - // set if Dynamic is false - Memory int64 `json:",omitempty"` - // set if Dynamic is true - MemoryFraction float64 `json:",omitempty"` - MinMemory int64 `json:",omitempty"` - MaxMemory int64 `json:",omitempty"` - - Streams, StreamsInbound, StreamsOutbound int - Conns, ConnsInbound, ConnsOutbound int - FD int -} diff --git a/core/commands/swarm.go b/core/commands/swarm.go index 61f40e456aad..4d8fd3b6396d 100644 --- a/core/commands/swarm.go +++ b/core/commands/swarm.go @@ -23,6 +23,7 @@ import ( cmds "github.com/ipfs/go-ipfs-cmds" inet "github.com/libp2p/go-libp2p-core/network" "github.com/libp2p/go-libp2p-core/peer" + rcmgr "github.com/libp2p/go-libp2p-resource-manager" ma "github.com/multiformats/go-multiaddr" madns "github.com/multiformats/go-multiaddr-dns" mamask "github.com/whyrusleeping/multiaddr-filter" @@ -401,7 +402,7 @@ For permanent limits set Swarm.ResourceMgr.Limits in the $IPFS_PATH/config file. // set scope limit to new values (when limit.json is passed as a second arg) if req.Files != nil { - var newLimit config.ResourceMgrScopeConfig + var newLimit rcmgr.BasicLimitConfig it := req.Files.Entries() if it.Next() { file := files.FileFromEntry(it) @@ -411,7 +412,7 @@ For permanent limits set Swarm.ResourceMgr.Limits in the $IPFS_PATH/config file. if err := json.NewDecoder(file).Decode(&newLimit); err != nil { return errors.New("failed to decode JSON as ResourceMgrScopeConfig") } - return libp2p.NetSetLimit(node.ResourceManager, scope, newLimit) + return libp2p.NetSetLimit(node.ResourceManager, node.Repo, scope, newLimit) } if err := it.Err(); err != nil { return fmt.Errorf("error opening limit JSON file: %w", err) diff --git a/core/node/libp2p/rcmgr.go b/core/node/libp2p/rcmgr.go index 511e31852427..fc466cc38715 100644 --- a/core/node/libp2p/rcmgr.go +++ b/core/node/libp2p/rcmgr.go @@ -2,7 +2,6 @@ package libp2p import ( "context" - "errors" "fmt" "os" "path/filepath" @@ -27,7 +26,6 @@ var NoResourceMgrError = fmt.Errorf("missing ResourceMgr: make sure the daemon i func ResourceManager(cfg config.SwarmConfig) func(fx.Lifecycle, repo.Repo) (network.ResourceManager, Libp2pOpts, error) { return func(lc fx.Lifecycle, repo repo.Repo) (network.ResourceManager, Libp2pOpts, error) { - var limiter *rcmgr.BasicLimiter var manager network.ResourceManager var opts Libp2pOpts @@ -47,25 +45,12 @@ func ResourceManager(cfg config.SwarmConfig) func(fx.Lifecycle, repo.Repo) (netw repoPath, err := config.PathRoot() if err != nil { - return nil, opts, fmt.Errorf("error opening IPFS_PATH: %w", err) + return nil, opts, fmt.Errorf("opening IPFS_PATH: %w", err) } - // Create limiter: - // - parse $IPFS_PATH/limits.json if exists - // - use defaultLimits from rcmgr_defaults.go defaultLimits := adjustedDefaultLimits(cfg) - limitFilePath := filepath.Join(repoPath, NetLimitDefaultFilename) - limitFile, err := os.Open(limitFilePath) - switch { - case err == nil: - defer limitFile.Close() - limiter, err = rcmgr.NewLimiterFromJSON(limitFile, defaultLimits) - if err != nil { - return nil, opts, fmt.Errorf("error parsing libp2p limit file: %w", err) - } - case errors.Is(err, os.ErrNotExist): - limiter = rcmgr.NewStaticLimiter(defaultLimits) - default: + limiter, err := rcmgr.NewLimiter(*cfg.ResourceMgr.Limits, defaultLimits) + if err != nil { return nil, opts, err } @@ -80,9 +65,8 @@ func ResourceManager(cfg config.SwarmConfig) func(fx.Lifecycle, repo.Repo) (netw manager, err = rcmgr.NewResourceManager(limiter, ropts...) if err != nil { - return nil, opts, fmt.Errorf("error creating libp2p resource manager: %w", err) + return nil, opts, fmt.Errorf("creating libp2p resource manager: %w", err) } - } else { log.Debug("libp2p resource manager is disabled") manager = network.NullResourceManager @@ -196,14 +180,13 @@ func NetStat(mgr network.ResourceManager, scope string) (NetStatOut, error) { } } -func NetLimit(mgr network.ResourceManager, scope string) (config.ResourceMgrScopeConfig, error) { - var result config.ResourceMgrScopeConfig +func NetLimit(mgr network.ResourceManager, scope string) (rcmgr.BasicLimitConfig, error) { + var result rcmgr.BasicLimitConfig getLimit := func(s network.ResourceScope) error { limiter, ok := s.(rcmgr.ResourceScopeLimiter) if !ok { // NullResourceManager return NoResourceMgrError } - limit := limiter.Limit() switch l := limit.(type) { case *rcmgr.StaticLimit: @@ -280,7 +263,8 @@ func NetLimit(mgr network.ResourceManager, scope string) (config.ResourceMgrScop } } -func NetSetLimit(mgr network.ResourceManager, scope string, limit config.ResourceMgrScopeConfig) error { +// NetSetLimit sets new ResourceManager limits for the given scope. The limits take effect immediately, and are also persisted to the repo config. +func NetSetLimit(mgr network.ResourceManager, repo repo.Repo, scope string, limit rcmgr.BasicLimitConfig) error { setLimit := func(s network.ResourceScope) error { limiter, ok := s.(rcmgr.ResourceScopeLimiter) if !ok { // NullResourceManager @@ -324,45 +308,68 @@ func NetSetLimit(mgr network.ResourceManager, scope string, limit config.Resourc return nil } + cfg, err := repo.Config() + if err != nil { + return fmt.Errorf("reading config to set limit: %w", err) + } + + setConfigLimit := func(f func(c *rcmgr.BasicLimiterConfig)) { + if cfg.Swarm.ResourceMgr.Limits == nil { + cfg.Swarm.ResourceMgr.Limits = &rcmgr.BasicLimiterConfig{} + } + f(cfg.Swarm.ResourceMgr.Limits) + } + switch { case scope == config.ResourceMgrSystemScope: - err := mgr.ViewSystem(func(s network.ResourceScope) error { + err = mgr.ViewSystem(func(s network.ResourceScope) error { return setLimit(s) }) - return err + setConfigLimit(func(c *rcmgr.BasicLimiterConfig) { c.System = &limit }) case scope == config.ResourceMgrTransientScope: - err := mgr.ViewTransient(func(s network.ResourceScope) error { + err = mgr.ViewTransient(func(s network.ResourceScope) error { return setLimit(s) }) - return err + setConfigLimit(func(c *rcmgr.BasicLimiterConfig) { c.Transient = &limit }) case strings.HasPrefix(scope, config.ResourceMgrServiceScopePrefix): - svc := scope[4:] - err := mgr.ViewService(svc, func(s network.ServiceScope) error { + svc := strings.TrimPrefix(scope, config.ResourceMgrServiceScopePrefix) + err = mgr.ViewService(svc, func(s network.ServiceScope) error { return setLimit(s) }) - return err + setConfigLimit(func(c *rcmgr.BasicLimiterConfig) { c.Service[svc] = limit }) case strings.HasPrefix(scope, config.ResourceMgrProtocolScopePrefix): - proto := scope[6:] - err := mgr.ViewProtocol(protocol.ID(proto), func(s network.ProtocolScope) error { + proto := strings.TrimPrefix(scope, config.ResourceMgrProtocolScopePrefix) + err = mgr.ViewProtocol(protocol.ID(proto), func(s network.ProtocolScope) error { return setLimit(s) }) - return err + setConfigLimit(func(c *rcmgr.BasicLimiterConfig) { c.Service[proto] = limit }) case strings.HasPrefix(scope, config.ResourceMgrPeerScopePrefix): - p := scope[5:] - pid, err := peer.Decode(p) + p := strings.TrimPrefix(scope, config.ResourceMgrPeerScopePrefix) + var pid peer.ID + pid, err = peer.Decode(p) if err != nil { return fmt.Errorf("invalid peer ID: %q: %w", p, err) } err = mgr.ViewPeer(pid, func(s network.PeerScope) error { return setLimit(s) }) - return err + setConfigLimit(func(c *rcmgr.BasicLimiterConfig) { c.Service[p] = limit }) default: return fmt.Errorf("invalid scope %q", scope) } + + if err != nil { + return err + } + + if err := repo.SetConfig(cfg); err != nil { + return fmt.Errorf("writing new limits to repo config: %w", err) + } + + return nil } diff --git a/go.mod b/go.mod index 84097cd2c1ce..b6031927e7c6 100644 --- a/go.mod +++ b/go.mod @@ -82,7 +82,7 @@ require ( github.com/libp2p/go-libp2p-pubsub-router v0.5.0 github.com/libp2p/go-libp2p-quic-transport v0.16.1 github.com/libp2p/go-libp2p-record v0.1.3 - github.com/libp2p/go-libp2p-resource-manager v0.1.5 + github.com/libp2p/go-libp2p-resource-manager v0.3.0 github.com/libp2p/go-libp2p-routing-helpers v0.2.3 github.com/libp2p/go-libp2p-swarm v0.10.2 github.com/libp2p/go-libp2p-testing v0.8.0 diff --git a/go.sum b/go.sum index b4bb172b402a..7ae76b85037c 100644 --- a/go.sum +++ b/go.sum @@ -882,8 +882,9 @@ github.com/libp2p/go-libp2p-record v0.1.0/go.mod h1:ujNc8iuE5dlKWVy6wuL6dd58t0n7 github.com/libp2p/go-libp2p-record v0.1.2/go.mod h1:pal0eNcT5nqZaTV7UGhqeGqxFgGdsU/9W//C8dqjQDk= github.com/libp2p/go-libp2p-record v0.1.3 h1:R27hoScIhQf/A8XJZ8lYpnqh9LatJ5YbHs28kCIfql0= github.com/libp2p/go-libp2p-record v0.1.3/go.mod h1:yNUff/adKIfPnYQXgp6FQmNu3gLJ6EMg7+/vv2+9pY4= -github.com/libp2p/go-libp2p-resource-manager v0.1.5 h1:7J6t9KLFS0MxXDTfqA6rwfVCZl/yLQnXW5LpZjHAANI= github.com/libp2p/go-libp2p-resource-manager v0.1.5/go.mod h1:wJPNjeE4XQlxeidwqVY5G6DLOKqFK33u2n8blpl0I6Y= +github.com/libp2p/go-libp2p-resource-manager v0.3.0 h1:2+cYxUNi33tcydsVLt6K5Fv2E3OTiVeafltecAj15E0= +github.com/libp2p/go-libp2p-resource-manager v0.3.0/go.mod h1:K+eCkiapf+ey/LADO4TaMpMTP9/Qde/uLlrnRqV4PLQ= github.com/libp2p/go-libp2p-routing v0.0.1/go.mod h1:N51q3yTr4Zdr7V8Jt2JIktVU+3xBBylx1MZeVA6t1Ys= github.com/libp2p/go-libp2p-routing-helpers v0.2.3 h1:xY61alxJ6PurSi+MXbywZpelvuU4U4p/gPTxjqCqTzY= github.com/libp2p/go-libp2p-routing-helpers v0.2.3/go.mod h1:795bh+9YeoFl99rMASoiVgHdi5bjack0N1+AFAdbvBw= diff --git a/test/sharness/t0139-swarm-rcmgr.sh b/test/sharness/t0139-swarm-rcmgr.sh index 39bbf1d52047..5e922d0464aa 100755 --- a/test/sharness/t0139-swarm-rcmgr.sh +++ b/test/sharness/t0139-swarm-rcmgr.sh @@ -9,12 +9,12 @@ test_init_ipfs # swarm limit|stats should fail in offline mode test_expect_success 'disconnected: swarm limit requires running daemon' ' - test_expect_code 1 ipfs swarm limit system 2> actual && - test_should_contain "missing ResourceMgr" actual + test_expect_code 1 ipfs swarm limit system 2> actual && + test_should_contain "missing ResourceMgr" actual ' test_expect_success 'disconnected: swarm stats requires running daemon' ' - test_expect_code 1 ipfs swarm stats all 2> actual && - test_should_contain "missing ResourceMgr" actual + test_expect_code 1 ipfs swarm stats all 2> actual && + test_should_contain "missing ResourceMgr" actual ' # swarm limit|stats should fail in online mode by default @@ -22,44 +22,60 @@ test_expect_success 'disconnected: swarm stats requires running daemon' ' test_launch_ipfs_daemon test_expect_success 'ResourceMgr disabled by default: swarm limit requires Swarm.ResourceMgr.Enabled' ' - test_expect_code 1 ipfs swarm limit system 2> actual && - test_should_contain "missing ResourceMgr" actual + test_expect_code 1 ipfs swarm limit system 2> actual && + test_should_contain "missing ResourceMgr" actual ' test_expect_success 'ResourceMgr disabled by default: swarm stats requires Swarm.ResourceMgr.Enabled' ' - test_expect_code 1 ipfs swarm stats all 2> actual && - test_should_contain "missing ResourceMgr" actual + test_expect_code 1 ipfs swarm stats all 2> actual && + test_should_contain "missing ResourceMgr" actual ' # swarm limit|stat should work when Swarm.ResourceMgr.Enabled test_kill_ipfs_daemon + test_expect_success "test_config_set succeeds" " - ipfs config --json Swarm.ResourceMgr.Enabled true + ipfs config --json Swarm.ResourceMgr.Enabled true && + ipfs config --json Swarm.ResourceMgr.Limits.System.Conns 99999 " + test_launch_ipfs_daemon # every scope has the same fields, so we only inspect System test_expect_success 'ResourceMgr enabled: swarm limit' ' - ipfs swarm limit system --enc=json | tee json && - jq -e .Conns < json && - jq -e .ConnsInbound < json && - jq -e .ConnsOutbound < json && - jq -e .FD < json && - jq -e .Memory < json && - jq -e .Streams < json && - jq -e .StreamsInbound < json && - jq -e .StreamsOutbound < json + ipfs swarm limit system --enc=json | tee json && + jq -e ".Conns == 99999" < json && + jq -e .ConnsInbound < json && + jq -e .ConnsOutbound < json && + jq -e .FD < json && + jq -e .Memory < json && + jq -e .Streams < json && + jq -e .StreamsInbound < json && + jq -e .StreamsOutbound < json ' # every scope has the same fields, so we only inspect System test_expect_success 'ResourceMgr enabled: swarm stats' ' - ipfs swarm stats all --enc=json | tee json && - jq -e .System.Memory < json && - jq -e .System.NumConnsInbound < json && - jq -e .System.NumConnsOutbound < json && - jq -e .System.NumFD < json && - jq -e .System.NumStreamsInbound < json && - jq -e .System.NumStreamsOutbound < json && - jq -e .Transient.Memory < json + ipfs swarm stats all --enc=json | tee json && + jq -e .System.Memory < json && + jq -e .System.NumConnsInbound < json && + jq -e .System.NumConnsOutbound < json && + jq -e .System.NumFD < json && + jq -e .System.NumStreamsInbound < json && + jq -e .System.NumStreamsOutbound < json && + jq -e .Transient.Memory < json +' + +test_expect_success 'Set system memory limit while the daemon is running' ' + ipfs swarm limit system | jq ".Memory = 99998" > system.json && + ipfs swarm limit system system.json +' + +test_expect_success 'The new system limits were written to the config' ' + jq -e ".Swarm.ResourceMgr.Limits.System.Memory == 99998" < "$IPFS_PATH/config" +' + +test_expect_success 'The new system limits are in the swarm limit output' ' + ipfs swarm limit system --enc=json | jq -e ".Memory == 99998" ' test_kill_ipfs_daemon