Incorrect data,PromQL:node_memory_MemTotal_bytes

PromQL

PromQL:node_memory_MemTotal_bytes{job=“kubernetes-node-exporter-xxx.xxx”}/1024/1024/1024
{instance=“k8s-idc-test-worker-2”,job=“kubernetes-node-exporter-xxx.xxx”} 157.31474685668945
{instance=“k8s-idc-test-worker-1”,job=“kubernetes-node-exporter-xxx.xxx”} 157.31474685668945
{instance=“k8s-idc-test-master-3”,job=“kubernetes-node-exporter-xxx.xxx”} 157.31474685668945
{instance=“k8s-idc-test-master-2”,job=“kubernetes-node-exporter-xxx.xxx”} 157.31474685668945
{instance=“k8s-idc-test-master-1”,job=“kubernetes-node-exporter-xxx.xxx”} 157.31474685668945

server configuration:
k8s-idc-test-master-1 memory=8G
k8s-idc-test-master-2 memory=8G
k8s-idc-test-master-3 memory=8G
k8s-idc-test-worker-1 memory=157G
k8s-idc-test-worker-2 memory=157G
memory size is incorrect

node_exporter

node_exporter, version 1.2.2 (branch: HEAD, revision: 2664536)
build user: root@b9cb4aa2eb17
build date: 20210806-13:44:18
go version: go1.16.7
platform: linux/amd64

Host operating system

5.4.143-1.el7.elrepo.x86_64

Prometheus version

2.16.0

prometheus.yaml

global:
  scrape_interval:     30s
  evaluation_interval: 30s

rule_files:
  - "/etc/prometheus/rules/*.yml"

scrape_configs:

# api-server
- job_name: 'kubernetes-apiservers-xxx.xxx'
  kubernetes_sd_configs:
    - role: endpoints
      api_server: https://192.168.xxx.xxx:9443
      tls_config:
        insecure_skip_verify: true
      bearer_token_file: /etc/prometheus/serviceaccount/token_xxx.xxx
  tls_config:
    insecure_skip_verify: true
  bearer_token_file: /etc/prometheus/serviceaccount/token_xxx.xxx
  scheme: https
  relabel_configs:
    - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
      action: keep
      regex: default;kubernetes;https
    - target_label: __address__
      replacement: 192.168.xxx.xxx:9443

# cadvisor
- job_name: 'kubernetes-nodes-cadvisor-xxx.xxx'
  kubernetes_sd_configs:
    - role: node
      api_server: https://192.168.xxx.xxx:9443
      tls_config:
        insecure_skip_verify: true
      bearer_token_file: /etc/prometheus/serviceaccount/token_xxx.xxx
  tls_config:
    insecure_skip_verify: true
  bearer_token_file: /etc/prometheus/serviceaccount/token_xxx.xxx
  scheme: https
  relabel_configs:
    - action: labelmap
      regex: __meta_kubernetes_node_label_(.+)
    - target_label: __address__
      replacement: 192.168.xxx.xxx:9443
    - source_labels: [__meta_kubernetes_node_name]
      regex: (.+)
      target_label: __metrics_path__
      replacement: /api/v1/nodes/${1}/proxy/metrics/cadvisor

# nodes
- job_name: 'kubernetes-nodes-xxx.xxx'
  scheme: https
  kubernetes_sd_configs:
    - role: node
      api_server: https://192.168.xxx.xxx:9443
      tls_config:
        insecure_skip_verify: true
      bearer_token_file: /etc/prometheus/serviceaccount/token_xxx.xxx
  tls_config:
    insecure_skip_verify: true
  bearer_token_file: /etc/prometheus/serviceaccount/token_xxx.xxx
  relabel_configs:
    - action: labelmap
      regex: __meta_kubernetes_node_label_(.+)
    - target_label: __address__
      replacement: 192.168.xxx.xxx:9443
    - source_labels: [__meta_kubernetes_node_name]
      regex: (.+)
      target_label: __metrics_path__
      replacement: /api/v1/nodes/${1}/proxy/metrics

# node-exporter
- job_name: 'kubernetes-node-exporter-xxx.xxx'
  scheme: http
  kubernetes_sd_configs:
    - role: node
      api_server: https://192.168.xxx.xxx:9443
      tls_config:
        insecure_skip_verify: true
      bearer_token_file: /etc/prometheus/serviceaccount/token_xxx.xxx
  tls_config:
    insecure_skip_verify: true
  bearer_token_file: /etc/prometheus/serviceaccount/token_xxx.xxx
  relabel_configs:
    - source_labels: [__meta_kubernetes_node_address_InteralIP]
      regex: (.+)
      action: replace
      target_label: __address__
    - source_labels: [__address__]
      target_label: __address__
      action: replace
      regex: (.*):(.*)
      replacement: ${1}:31672

# kube-state-metrics
- job_name: 'kube-state-metrics-xxx.xxx'
  static_configs:
    - targets: ['192.168.xxx.xxx:30004', '192.168.xxx.xxx:30005']

k8s daemonset node-exporter.yaml

apiVersion: apps/v1
kind: DaemonSet
metadata:
  name: node-exporter
  namespace: kube-system
  labels:
    k8s-app: node-exporter
spec:
  selector:
    matchLabels:
      k8s-app: node-exporter
  template:
    metadata:
      labels:
        k8s-app: node-exporter
    spec:
      containers:
      - image: prom/node-exporter:v1.2.2
        args:
        - --path.procfs
        - /host/proc
        - --path.sysfs
        - /host/sys
        - --path.rootfs
        - /host/root
        - --collector.filesystem.ignored-mount-points
        - '"^/(sys|proc|dev|host|etc)($|/)"'
        - --collector.processes
        name: node-exporter
        ports:
        - containerPort: 9100
          protocol: TCP
          name: http
        volumeMounts:
        - mountPath: /host/dev
          name: dev
        - mountPath: /host/proc
          name: proc
        - mountPath: /host/sys
          name: sys
        - mountPath: /host/root
          name: rootfs
      hostIPC: true
      hostNetwork: true
      hostPID: true
      tolerations:
      - effect: NoSchedule
        operator: Exists
      volumes:
      - hostPath:
          path: /proc
          type: ""
        name: proc
      - hostPath:
          path: /dev
          type: ""
        name: dev
      - hostPath:
          path: /sys
          type: ""
        name: sys
      - hostPath:
          path: /
          type: ""
        name: rootfs
---
apiVersion: v1
kind: Service
metadata:
  labels:
    k8s-app: node-exporter
  name: node-exporter
  namespace: kube-system
spec:
  ports:
  - name: http
    port: 9100
    nodePort: 31672
    protocol: TCP
  type: NodePort
  selector:
    k8s-app: node-exporter