Spamming DHT errors: "ignoring incoming dht message while not in server mode"

Been getting this error continuously on my ipns node. The node is responsible for republishing almost 1000 IPNS records.

System Specs -

12 CPU
48GB RAM

IPFS Version - Was 0.21.0 now upgraded to 0.24.0, haven鈥檛 tried with 0.24 but was getting errors with 0.21.0.

In one of the github issues (High CPU/RAM and spamming DHT errors: "ignoring incoming dht message while not in server mode" 路 Issue #7573 路 ipfs/kubo 路 GitHub) It鈥檚 mentioned to share the pprof dump -
Putting the pprof dump of the system after i restarted it and running it for a while - https://bafybeicw4kza5a4vmgmdovp5i5nc6jv7wpsok7v33diiog372zf4trmeaa.ipfs.sphn.link/

  "API": {
    "HTTPHeaders": {}
  },
  "Addresses": {
    "API": "/ip4/0.0.0.0/tcp/5001",
    "Announce": null,
    "AppendAnnounce": [
      "/ip4/84.32.214.46/tcp/31401",
      "/ip4/84.32.214.46/udp/31402/quic-v1",
      "/ip4/84.32.214.46/udp/31402/quic-v1/webtransport",
      "/ip4/127.0.0.1/tcp/4001",
      "/ip4/127.0.0.1/udp/4002/quic-v1",
      "/ip4/127.0.0.1/udp/4002/quic-v1/webtransport",
      "/ip4/0.0.0.0/tcp/4001",
      "/ip4/0.0.0.0/udp/4002/quic-v1",
      "/ip4/0.0.0.0/udp/4002/quic-v1/webtransport"
    ],
    "Gateway": "/ip4/0.0.0.0/tcp/8080",
    "NoAnnounce": [
      "/ip4/10.0.0.0/ipcidr/8",
      "/ip4/100.64.0.0/ipcidr/10",
      "/ip4/169.254.0.0/ipcidr/16",
      "/ip4/172.16.0.0/ipcidr/12",
      "/ip4/192.0.0.0/ipcidr/24",
      "/ip4/192.0.2.0/ipcidr/24",
      "/ip4/192.168.0.0/ipcidr/16",
      "/ip4/198.18.0.0/ipcidr/15",
      "/ip4/198.51.100.0/ipcidr/24",
      "/ip4/203.0.113.0/ipcidr/24",
      "/ip4/240.0.0.0/ipcidr/4",
      "/ip6/100::/ipcidr/64",
      "/ip6/2001:2::/ipcidr/48",
      "/ip6/2001:db8::/ipcidr/32",
      "/ip6/fc00::/ipcidr/7",
      "/ip6/fe80::/ipcidr/10"
    ],
    "Swarm": [
      "/ip4/0.0.0.0/tcp/4001",
      "/ip6/::/tcp/4001",
      "/ip4/0.0.0.0/udp/4001/quic-v1",
      "/ip4/0.0.0.0/udp/4001/quic-v1/webtransport",
      "/ip6/::/udp/4001/quic-v1",
      "/ip6/::/udp/4001/quic-v1/webtransport"
    ]
  },
  "AutoNAT": {},
  "Bootstrap": [
    "/ip4/5.199.162.220/tcp/30401/p2p/12D3KooWNbvFFq8kzS3eMonQYEmFyWTHboMZnHKr4XnN53gJKqhH",
    "/ip4/5.199.168.17/tcp/30401/p2p/12D3KooWBCzniwFvfS67HyZsJtiehd7RHtcKanGGCd8h6bjVWnkb",
    "/ip4/5.199.162.230/tcp/30401/p2p/12D3KooWFdciAga3zq3TZdrmDospFNrWRALe6VM6JpoKGEfVrqEi",
    "/dnsaddr/bootstrap.libp2p.io/p2p/QmNnooDu7bfjPFoTZYxMNLWUQJyrVwtbZg5gBMjTezGAJN",
    "/dnsaddr/bootstrap.libp2p.io/p2p/QmQCU2EcMqAqQPR2i9bChDtGNJchTbq5TbXJJ16u19uLTa",
    "/dnsaddr/bootstrap.libp2p.io/p2p/QmbLHAnMoJPWSCR5Zhtx6BHJX9KiKNN6tpvbUcqanj75Nb",
    "/dnsaddr/bootstrap.libp2p.io/p2p/QmcZf59bWwK5XFi76CZX8cbJ4BhTzzA3gU1ZjYZcYW3dwt",
    "/ip4/104.131.131.82/tcp/4001/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ",
    "/ip4/104.131.131.82/udp/4001/quic-v1/p2p/QmaCpDMGvV2BGHeYERUEnRQAwe3N8SzbUtfsmvsqQLuvuJ"
  ],
  "DNS": {
    "Resolvers": {}
  },
  "Datastore": {
    "BloomFilterSize": 0,
    "GCPeriod": "1h",
    "HashOnRead": false,
    "Spec": {
      "child": {
        "path": "badgerds",
        "syncWrites": false,
        "truncate": true,
        "type": "badgerds"
      },
      "prefix": "badger.datastore",
      "type": "measure"
    },
    "StorageGCWatermark": 90,
    "StorageMax": "10GB"
  },
  "Discovery": {
    "MDNS": {
      "Enabled": false
    }
  },
  "Experimental": {
    "FilestoreEnabled": false,
    "GraphsyncEnabled": false,
    "Libp2pStreamMounting": false,
    "OptimisticProvide": false,
    "OptimisticProvideJobsPoolSize": 0,
    "P2pHttpProxy": false,
    "StrategicProviding": false,
    "UrlstoreEnabled": false
  },
  "Gateway": {
    "APICommands": [],
    "DeserializedResponses": null,
    "HTTPHeaders": {},
    "NoDNSLink": false,
    "NoFetch": false,
    "PathPrefixes": [],
    "PublicGateways": null,
    "RootRedirect": ""
  },
  "Identity": {
    "PeerID": "12D3KooWFCFcduvDiy1SNF977D1rmfUdhHsWMn3AqZcXPdcLQRN4"
  },
  "Internal": {},
  "Ipns": {
    "RecordLifetime": "1000h",
    "RepublishPeriod": "24h",
    "ResolveCacheSize": 0,
    "UsePubsub": true
  },
  "Migration": {
    "DownloadSources": [],
    "Keep": ""
  },
  "Mounts": {
    "FuseAllowOther": false,
    "IPFS": "/ipfs",
    "IPNS": "/ipns"
  },
  "Peering": {
    "Peers": null
  },
  "Pinning": {
    "RemoteServices": {}
  },
  "Plugins": {
    "Plugins": null
  },
  "Provider": {
    "Strategy": ""
  },
  "Pubsub": {
    "DisableSigning": false,
    "Router": ""
  },
  "Reprovider": {},
  "ResourceMgr": {},
  "Routing": {
    "AcceleratedDHTClient": false,
    "Methods": null,
    "Routers": null
  },
  "Swarm": {
    "AddrFilters": [
      "/ip4/10.0.0.0/ipcidr/8",
      "/ip4/100.64.0.0/ipcidr/10",
      "/ip4/169.254.0.0/ipcidr/16",
      "/ip4/172.16.0.0/ipcidr/12",
      "/ip4/192.0.0.0/ipcidr/24",
      "/ip4/192.0.2.0/ipcidr/24",
      "/ip4/192.168.0.0/ipcidr/16",
      "/ip4/198.18.0.0/ipcidr/15",
      "/ip4/198.51.100.0/ipcidr/24",
      "/ip4/203.0.113.0/ipcidr/24",
      "/ip4/240.0.0.0/ipcidr/4",
      "/ip6/100::/ipcidr/64",
      "/ip6/2001:2::/ipcidr/48",
      "/ip6/2001:db8::/ipcidr/32",
      "/ip6/fc00::/ipcidr/7",
      "/ip6/fe80::/ipcidr/10"
    ],
    "ConnMgr": {
      "GracePeriod": "15s",
      "HighWater": 2000,
      "LowWater": 500
    },
    "DisableBandwidthMetrics": false,
    "DisableNatPortMap": true,
    "RelayClient": {},
    "RelayService": {},
    "ResourceMgr": {
      "MaxMemory": "22000000000"
    },
    "Transports": {
      "Multiplexers": {},
      "Network": {},
      "Security": {}
    }
  }
}

would love to get some help around this

Can you post logs ? (do they include the peer id ?)

I guess this could be the DHT crawler.

Don鈥檛 have the logs, I have the log level set to only error, but this is the only text printed in the logs, no peer id. I can put it to info or debug and share that ?

restarted the node with debug logs haven鈥檛 gotten the same error yet but here are the logs might give some idea - https://bafybeid2s7wi3yruqqlyuohawdir4jogwqwpzlhxg6xqzis6zvxhpwtkfi.ipfs.sphn.link/

I鈥檒l add it to the error message.