Prometheus is failing frequently

We have total > 200 service monitors to scrape metrics from pods and we started observing prometheus pod restarts with below errors.

ts=2022-12-14T23:03:00.447Z caller=scrape.go:1292 level=debug component="scrape manager" scrape_pool=serviceMonitor/monitor/r-media-servicemonitor/0 target=http://x.x.x.x:8080/prometheus msg="Scrape failed" err="Get \"http://x.x.x.x:8080/prometheus\": context deadline exceeded"
ts=2022-12-14T23:03:00.789Z caller=scrape.go:1292 level=debug component="scrape manager" scrape_pool=serviceMonitor/monitor/r-widget-servicemonitor/0 target=http://x.x.x.x:8080/prometheus msg="Scrape failed" err="server returned HTTP status 404 "
ts=2022-12-14T23:03:02.359Z caller=scrape.go:1292 level=debug component="scrape manager" scrape_pool=serviceMonitor/monitor/r-media-servicemonitor/0 target=http://x.x.x.x:8080/prometheus msg="Scrape failed" err="Get \"http://x.x.x.x:8080/prometheus\": context deadline exceeded"
ts=2022-12-14T23:03:07.331Z caller=scrape.go:1292 level=debug component="scrape manager" scrape_pool=serviceMonitor/monitor/r-hubspot-servicemonitor/0 target=http://x.x.x.x:8080/prometheus msg="Scrape failed" err="server returned HTTP status 404 Not Found"
fatal error: runtime: out of memory

runtime stack:
runtime.throw({0x2c8e219, 0x1dc00000})
	/usr/local/go/src/runtime/panic.go:1198 +0x71
runtime.sysMap(0xee82c00000, 0x4299e0, 0xc002b13e90)
	/usr/local/go/src/runtime/mem_linux.go:169 +0x96
runtime.(*mheap).grow(0x4da8a40, 0xec27)
	/usr/local/go/src/runtime/mheap.go:1393 +0x225
runtime.(*mheap).allocSpan(0x4da8a40, 0xec27, 0x0, 0x1)
	/usr/local/go/src/runtime/mheap.go:1179 +0x165
runtime.(*mheap).alloc.func1()
	/usr/local/go/src/runtime/mheap.go:913 +0x69
runtime.systemstack()
	/usr/local/go/src/runtime/asm_amd64.s:383 +0x49

goroutine 4217 [running]:
runtime.systemstack_switch()
	/usr/local/go/src/runtime/asm_amd64.s:350 fp=0xcd71537650 sp=0xcd71537648 pc=0x466880
runtime.(*mheap).alloc(0x1d84e000, 0xec27, 0x80, 0x0)
	/usr/local/go/src/runtime/mheap.go:907 +0x73 fp=0xcd715376a0 sp=0xcd71537650 pc=0x425d13
runtime.(*mcache).allocLarge(0xc0052a2000, 0x1d84dbd8, 0xa3, 0x1)
	/usr/local/go/src/runtime/mcache.go:227 +0x89 fp=0xcd71537700 sp=0xcd715376a0 pc=0x416989
runtime.mallocgc(0x1d84dbd8, 0x24e9c00, 0x1)
	/usr/local/go/src/runtime/malloc.go:1082 +0x5c5 fp=0xcd71537780 sp=0xcd71537700 pc=0x40cd45
runtime.makeslice(0x0, 0x0, 0xc)
	/usr/local/go/src/runtime/slice.go:98 +0x52 fp=0xcd715377a8 sp=0xcd71537780 pc=0x44cb32
bytes.makeSlice(0x1d84dbd8)
	/usr/local/go/src/bytes/buffer.go:229 +0x65 fp=0xcd715377f8 sp=0xcd715377a8 pc=0x536425
bytes.(*Buffer).grow(0xd9da3c7dd0, 0x200)
	/usr/local/go/src/bytes/buffer.go:142 +0x11f fp=0xcd71537840 sp=0xcd715377f8 pc=0x535ddf
bytes.(*Buffer).ReadFrom(0xd9da3c7dd0, {0x354b0e0, 0xd6d2131968})
	/usr/local/go/src/bytes/buffer.go:202 +0x45 fp=0xcd71537898 sp=0xcd71537840 pc=0x536265
io.copyBuffer({0x3544a40, 0xd9da3c7dd0}, {0x354b0e0, 0xd6d2131968}, {0x0, 0x0, 0x0})
	/usr/local/go/src/io/io.go:409 +0x14b fp=0xcd71537918 sp=0xcd71537898 pc=0x4d1f8b
io.Copy(...)
	/usr/local/go/src/io/io.go:382
github.com/prometheus/prometheus/scrape.(*targetScraper).scrape(0xc003992880, {0x35a0d00, 0xc1cb974060}, {0x3544a40, 0xd9da3c7dd0})
	/app/scrape/scrape.go:786 +0x848 fp=0xcd71537b58 sp=0xcd71537918 pc=0x21dd2c8
github.com/prometheus/prometheus/scrape.(*scrapeLoop).scrapeAndReport(0xc001d87040, {0xedb2c4d0f, 0x4d8e460, 0x4d8e460}, {0xedb2c4d03, 0x4d8e460, 0x4d8e460}, 0x0)
	/app/scrape/scrape.go:1279 +0x8a3 fp=0xcd71537e40 sp=0xcd71537b58 pc=0x21e0883
github.com/prometheus/prometheus/scrape.(*scrapeLoop).run(0xc001d87040, 0xc001418600)
	/app/scrape/scrape.go:1203 +0x351 fp=0xcd71537fc0 sp=0xcd71537e40 pc=0x21dfc51
github.com/prometheus/prometheus/scrape.(*scrapePool).sync·dwrap·18()
	/app/scrape/scrape.go:584 +0x2f fp=0xcd71537fe0 sp=0xcd71537fc0 pc=0x21db78f
runtime.goexit()
	/usr/local/go/src/runtime/asm_amd64.s:1581 +0x1 fp=0xcd71537fe8 sp=0xcd71537fe0 pc=0x468961
created by github.com/prometheus/prometheus/scrape.(*scrapePool).sync
	/app/scrape/scrape.go:584 +0xa55

Can someone give some hints to fix the issues.

As the error message suggests you need to allocate more memory to your Prometheus pod.