Problems with kube-prometheus-stack

I deployed prom-stack using kube-prometheus-stack --version 61.2.0
Then I followed the instructions here Alerting - Prometheus Operator to configure alerts
My procedure looked like this:

root@master01:~/v1.30/08-kube-prometheus-stack/alertmanager# cat alertmanager.yaml 
route:
  # Group alerts with the same alert name and job into one notification.
  groupBy: ['alertname', 'job']
  
  #continue: false

  # Wait 30s before sending the initial notification for a group.
  groupWait: 30s
  # Wait 5m before sending follow-up notifications for new alerts in a group.
  groupInterval: 2m
  # Wait 3h before repeating the same notification again.
  repeatInterval: 3h

  # By default, send notifications to team A's Slack channel.
  receiver: wechat

  # Child routes with job-specific matchers & overrides.
  routes:
  - matchers:
    - matchType: =
      name: alertname
      value: DemoServiceInstanceDown
    receiver: wechat
    # Child route 1. Match alerts for job="job-a".
      # Send these alerts to team A's Slack channel...
	  
receivers:
- name: 'wechat'
  wechat_configs: 
  - send_resolved: true
    message: '{{ template "wechat.default.message" . }}'
    to_party: '1'         
    agent_id: '1000002'     
    api_secret: '8xxxxxxxxxxxxxxxxxxxxxxxxx'
root@master01:~/v1.30/08-kube-prometheus-stack/alertmanager# cat wechat.tmpl 
{{ define "wechat.default.message" }}
{{- if gt (len .Alerts.Firing) 0 -}}
{{- range $index, $alert := .Alerts -}}
{{- if eq $index 0 }}
========= 监控报警 =========
告警状态:{{   .Status }}
告警级别:{{ .Labels.severity }}
告警类型:{{ $alert.Labels.alertname }}
.....................


kubectl create secret generic -n monitoring   alertmanager-prometheus-stack-wechat   --from-file=wechat.tmpl   --from-file=alertmanager.yaml   --dry-run=client --save-config -o yaml | kubectl apply -f 

Then I modified the values.yaml file of the chart

root@master01:~/v1.30/08-kube-prometheus-stack# cat thanos-sidecar.yaml
alertmanager:
  config:
    global:
      resolve_timeout: 2m
      wechat_api_url: https://qyapi.weixin.qq.com/cgi-bin/
      wechat_api_secret: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
      wechat_api_corp_id: ww3ef8df46db59fd4e
  alertmanagerSpec:
    configSecret: alertmanager-prometheus-stack-wechat
	
	
helm  -n monitoring  upgrade prometheus-stack   prometheus-community/kube-prometheus-stack --version 61.2.0 --values thanos-sidecar.yaml

Then I went inside the pod and it didn’t work as expected

root@master01:~/v1.30/08-kube-prometheus-stack# kubectl    -n  monitoring  exec   -it  alertmanager-prometheus-stack-kube-prom-alertmanager-0   -- ls  -l  /etc/alertmanager/config  
total 0
lrwxrwxrwx    1 root     2000            27 Jul 10 07:45 alertmanager.yaml.gz -> ..data/alertmanager.yaml.gz
lrwxrwxrwx    1 root     2000            18 Jul 10 07:45 wechat.tmpl -> ..data/wechat.tmpl
root@master01:~/v1.30/08-kube-prometheus-stack# kubectl    -n  monitoring  exec   -it  alertmanager-prometheus-stack-kube-prom-alertmanager-0   -- zcat   /etc/alertmanager/config/alertmanager.yaml.gz
route:
  receiver: "null"
receivers:
- name: "null"
templates: []