企业🤖AI智能体构建引擎,智能编排和调试,一键部署,支持知识库和私有化部署方案 广告
[TOC] load测试主要是模拟创建各种类型的资源。然后判断API调用延时那两个SLO是否满足。一般来说,这里不再创建工作负载与Pod,所以与节点规模的关系不大。 ### **测试的配置文件** ##### **config.yaml** ``` # ASSUMPTIONS: # - Underlying cluster should have 100+ nodes. # - Number of nodes should be divisible by NODES_PER_NAMESPACE (default 100). # - The number of created SVCs is half the number of created Deployments. # - Only half of Deployments will be assigned 1-1 to existing SVCs. #Constants # Cater for the case where the number of nodes is less than nodes per namespace. See https://github.com/kubernetes/perf-tests/issues/887 {{$NODES_PER_NAMESPACE := MinInt .Nodes (DefaultParam .NODES_PER_NAMESPACE 100)}} # See https://github.com/kubernetes/perf-tests/pull/1667#issuecomment-769642266 {{$IS_SMALL_CLUSTER := lt .Nodes 100}} {{$PODS_PER_NODE := DefaultParam .PODS_PER_NODE 30}} {{$LOAD_TEST_THROUGHPUT := DefaultParam .CL2_LOAD_TEST_THROUGHPUT 10}} {{$DELETE_TEST_THROUGHPUT := DefaultParam .CL2_DELETE_TEST_THROUGHPUT $LOAD_TEST_THROUGHPUT}} {{$BIG_GROUP_SIZE := DefaultParam .BIG_GROUP_SIZE 250}} {{$MEDIUM_GROUP_SIZE := DefaultParam .MEDIUM_GROUP_SIZE 30}} {{$SMALL_GROUP_SIZE := DefaultParam .SMALL_GROUP_SIZE 5}} {{$SMALL_STATEFUL_SETS_PER_NAMESPACE := DefaultParam .SMALL_STATEFUL_SETS_PER_NAMESPACE 1}} {{$MEDIUM_STATEFUL_SETS_PER_NAMESPACE := DefaultParam .MEDIUM_STATEFUL_SETS_PER_NAMESPACE 1}} {{$ENABLE_CHAOSMONKEY := DefaultParam .ENABLE_CHAOSMONKEY false}} {{$ENABLE_API_AVAILABILITY_MEASUREMENT := DefaultParam .CL2_ENABLE_API_AVAILABILITY_MEASUREMENT false}} {{$ENABLE_HUGE_SERVICES := DefaultParam .CL2_ENABLE_HUGE_SERVICES false}} {{$HUGE_SERVICES_SIZE := DefaultParam .CL2_HUGE_SERVICES_SIZE .Nodes}} # Determines number of pods per deployment. Should be a divider of $HUGE_SERVICES_SIZE. {{$HUGE_SERVICES_PODS_PER_DEPLOYMENT := DefaultParam .CL2_HUGE_SERVICES_PODS_PER_DEPLOYMENT 1000}} {{$RANDOM_SCALE_FACTOR := 0.5}} #Variables {{$namespaces := DivideInt .Nodes $NODES_PER_NAMESPACE}} {{$totalPods := MultiplyInt $namespaces $NODES_PER_NAMESPACE $PODS_PER_NODE}} {{$podsPerNamespace := DivideInt $totalPods $namespaces}} {{$saturationTime := DivideInt $totalPods $LOAD_TEST_THROUGHPUT}} {{$deletionTime := DivideInt $totalPods $DELETE_TEST_THROUGHPUT}} # bigDeployments - 1/4 of namespace pods should be in big Deployments. {{$bigDeploymentsPerNamespace := DivideInt $podsPerNamespace (MultiplyInt 4 $BIG_GROUP_SIZE)}} # mediumDeployments - 1/4 of namespace pods should be in medium Deployments. {{$mediumDeploymentsPerNamespace := DivideInt $podsPerNamespace (MultiplyInt 4 $MEDIUM_GROUP_SIZE)}} # smallDeployments - 1/2 of namespace pods should be in small Deployments. {{$smallDeploymentsPerNamespace := DivideInt $podsPerNamespace (MultiplyInt 2 $SMALL_GROUP_SIZE)}} # Stateful sets are enabled. Reduce the number of small and medium deployments per namespace # See https://github.com/kubernetes/perf-tests/issues/1036#issuecomment-607631768 # Ensure non zero or negative after subtraction. {{$smallDeploymentsPerNamespace := MaxInt 0 (SubtractInt $smallDeploymentsPerNamespace $SMALL_STATEFUL_SETS_PER_NAMESPACE)}} {{$mediumDeploymentsPerNamespace := MaxInt 0 (SubtractInt $mediumDeploymentsPerNamespace $MEDIUM_STATEFUL_SETS_PER_NAMESPACE)}} # Jobs are enabled. Reduce the number of small, medium, big deployments per namespace. # Ensure non zero or negative after subtraction. {{$smallDeploymentsPerNamespace := MaxInt 0 (SubtractInt $smallDeploymentsPerNamespace 1)}} {{$mediumDeploymentsPerNamespace := MaxInt 0 (SubtractInt $mediumDeploymentsPerNamespace 1)}} {{$bigDeploymentsPerNamespace := MaxInt 0 (SubtractInt $bigDeploymentsPerNamespace 1)}} # Disable big jobs on small clusters. {{$bigJobsPerNamespace := IfThenElse $IS_SMALL_CLUSTER 0 1}} # The minimal number of pods to be used to measure various things like # pod-startup-latency or scheduler-throughput. The purpose of it is to avoid # problems in small clusters where we wouldn't have enough samples (pods) to # measure things accurately. {{$MIN_PODS_IN_SMALL_CLUSTERS := 500}} # BEGIN scheduler-throughput section # TODO( https://github.com/kubernetes/perf-tests/issues/1027): Lower the number of "min-pods" once we fix the scheduler throughput measurement. {{$totalSchedulerThroughputPods := MaxInt (MultiplyInt 2 $MIN_PODS_IN_SMALL_CLUSTERS) .Nodes}} {{$schedulerThroughputReplicasPerNamespace := 1}} {{$schedulerThroughputPodsPerDeployment := .Nodes}} {{$schedulerThroughputNamespaces := DivideInt $totalSchedulerThroughputPods $schedulerThroughputPodsPerDeployment}} {{if and $ENABLE_HUGE_SERVICES (ge $HUGE_SERVICES_SIZE $HUGE_SERVICES_PODS_PER_DEPLOYMENT)}} {{$schedulerThroughputReplicasPerNamespace = DivideInt $HUGE_SERVICES_SIZE $HUGE_SERVICES_PODS_PER_DEPLOYMENT}} {{$schedulerThroughputPodsPerDeployment = $HUGE_SERVICES_PODS_PER_DEPLOYMENT}} {{$schedulerThroughputNamespaces = 1}} {{end}} # Set schedulerThroughputNamespaces to 1 on small clusters otherwise it will result # in an unnecessary number of namespaces. {{$schedulerThroughputNamespaces := IfThenElse $IS_SMALL_CLUSTER 1 $schedulerThroughputNamespaces}} # END scheduler-throughput section # Command to be executed {{$EXEC_COMMAND := DefaultParam .CL2_EXEC_COMMAND nil}} {{$EXIT_AFTER_EXEC := DefaultParam .CL2_EXIT_AFTER_EXEC false}} {{$SLEEP_AFTER_EXEC_DURATION := DefaultParam .CL2_SLEEP_AFTER_EXEC_DURATION "0s"}} {{$latencyPodImage := DefaultParam .CL2_LATENCY_POD_IMAGE "k8s.gcr.io/pause:3.1"}} name: load namespace: number: {{AddInt $namespaces $schedulerThroughputNamespaces}} tuningSets: - name: Sequence parallelismLimitedLoad: parallelismLimit: 1 # Dedicated tuningSet for SchedulerThroughput phases that results in fully # parallel creation of deployments. - name: SchedulerThroughputParallel parallelismLimitedLoad: parallelismLimit: {{MultiplyInt $schedulerThroughputNamespaces $schedulerThroughputReplicasPerNamespace}} # TODO(https://github.com/kubernetes/perf-tests/issues/1024): This TuningSet is used only for pod-startup-latency, get rid of it # Uniform5qps: for each running phase, use 5 qps. - name: Uniform5qps qpsLoad: qps: 5 # Global100qps: use 100 qps globally: # * split equally qps among all running phases # * if some phase finishes, other phases will get more qps. - name: Global100qps globalQPSLoad: qps: 100 burst: 1 - name: RandomizedSaturationTimeLimited RandomizedTimeLimitedLoad: timeLimit: {{$saturationTime}}s - name: RandomizedScalingTimeLimited RandomizedTimeLimitedLoad: # The expected number of created/deleted pods is totalPods/4 when scaling, # as each RS changes its size from X to a uniform random value in [X/2, 3X/2]. # To match 10 [pods/s] requirement, we need to divide saturationTime by 4. timeLimit: {{DivideInt $saturationTime 4}}s - name: DeletionTimeLimited TimeLimitedLoad: timeLimit: 5m - name: RandomizedDeletionTimeLimited RandomizedTimeLimitedLoad: timeLimit: {{$deletionTime}}s {{if $ENABLE_CHAOSMONKEY}} chaosMonkey: nodeFailure: failureRate: 0.01 interval: 5m jitterFactor: 2.0 simulatedDowntime: 10m {{end}} steps: - module: path: /modules/measurements.yaml params: action: start - module: path: modules/services.yaml params: actionName: "Creating" namespaces: {{$namespaces}} smallServicesPerNamespace: {{DivideInt (AddInt $smallDeploymentsPerNamespace 1) 2}} mediumServicesPerNamespace: {{DivideInt (AddInt $mediumDeploymentsPerNamespace 1) 2}} bigServicesPerNamespace: {{DivideInt (AddInt $bigDeploymentsPerNamespace 1) 2}} - name: Creating PriorityClass for DaemonSets phases: - replicasPerNamespace: 1 tuningSet: Sequence objectBundle: - basename: daemonset-priorityclass objectTemplatePath: daemonset-priorityclass.yaml # Moved from reconcile-objects.yaml to mitigate https://github.com/kubernetes/kubernetes/issues/96635. # TODO(https://github.com/kubernetes/perf-tests/issues/1823): Merge back to reconcile-objects.yaml once the k/k bug is fixed. - module: path: /modules/configmaps-secrets.yaml params: actionName: create tuningSet: Global100qps namespaces: {{$namespaces}} bigDeploymentsPerNamespace: {{$bigDeploymentsPerNamespace}} mediumDeploymentsPerNamespace: {{$mediumDeploymentsPerNamespace}} smallDeploymentsPerNamespace: {{$smallDeploymentsPerNamespace}} # 不再测试工作负载 #- module: # path: /modules/reconcile-objects.yaml # params: # actionName: "create" # namespaces: {{$namespaces}} # tuningSet: RandomizedSaturationTimeLimited # testMaxReplicaFactor: {{$RANDOM_SCALE_FACTOR}} # # We rely on the fact that daemonset is using the same image as the 'pod-startup-latency' module. # # The goal is to cache the image to all nodes before we start any latency pod, # # so that when we measure pod startup latency, the image is already present on all nodes. # # This way, the pod startup latency we measure excludes (or limits impact) of image pulling, # # whuch matches our SLO definition: https://github.com/kubernetes/community/blob/master/sig-scalability/slos/pod_startup_latency.md. # daemonSetImage: {{$latencyPodImage}} # daemonSetEnv: "before update" # daemonSetReplicas: 1 # bigDeploymentSize: {{$BIG_GROUP_SIZE}} # bigDeploymentsPerNamespace: {{$bigDeploymentsPerNamespace}} # mediumDeploymentSize: {{$MEDIUM_GROUP_SIZE}} # mediumDeploymentsPerNamespace: {{$mediumDeploymentsPerNamespace}} # smallDeploymentSize: {{$SMALL_GROUP_SIZE}} # smallDeploymentsPerNamespace: {{$smallDeploymentsPerNamespace}} # smallStatefulSetSize: {{$SMALL_GROUP_SIZE}} # smallStatefulSetsPerNamespace: {{$SMALL_STATEFUL_SETS_PER_NAMESPACE}} # mediumStatefulSetSize: {{$MEDIUM_GROUP_SIZE}} # mediumStatefulSetsPerNamespace: {{$MEDIUM_STATEFUL_SETS_PER_NAMESPACE}} # bigJobSize: {{$BIG_GROUP_SIZE}} # bigJobsPerNamespace: {{$bigJobsPerNamespace}} # mediumJobSize: {{$MEDIUM_GROUP_SIZE}} # mediumJobsPerNamespace: 1 # smallJobSize: {{$SMALL_GROUP_SIZE}} # smallJobsPerNamespace: 1 # 不再测试scheduler-throughput #{{if not $IS_SMALL_CLUSTER}} ## BEGIN scheduler throughput # {{if $ENABLE_HUGE_SERVICES}} #- module: # path: modules/huge-services.yaml # params: # action: create # namespaces: {{$namespaces}} # replicasPerNamespace: 1 # schedulerThroughputNamespaces: {{$schedulerThroughputNamespaces}} # {{end}} #- module: # path: modules/scheduler-throughput.yaml # params: # action: create # namespaces: {{$namespaces}} # replicasPerNamespace: {{$schedulerThroughputReplicasPerNamespace}} # schedulerThroughputNamespaces: {{$schedulerThroughputNamespaces}} # schedulerThroughputPodsPerDeployment: {{$schedulerThroughputPodsPerDeployment}} # {{if $ENABLE_HUGE_SERVICES}} # serviceName: huge-service # {{end}} #{{end}} # kubemark集群不支持EXEC_COMMAND #{{if $EXEC_COMMAND}} # #{{if $ENABLE_API_AVAILABILITY_MEASUREMENT}} #- name: Pausing APIAvailability measurement # measurements: # - Identifier: APIAvailability # Method: APIAvailability # Params: # action: pause #{{end}} # #- name: Exec command # measurements: # - Identifier: ExecCommand # Method: Exec # Params: # command: # {{range $EXEC_COMMAND}} # - {{.}} # {{end}} # #{{if $ENABLE_API_AVAILABILITY_MEASUREMENT}} #- name: Unpausing APIAvailability measurement # measurements: # - Identifier: APIAvailability # Method: APIAvailability # Params: # action: unpause #{{end}} # #- name: Sleep # measurements: # - Identifier: WaitAfterExec # Method: Sleep # Params: # duration: {{$SLEEP_AFTER_EXEC_DURATION}} #{{end}} {{if not $EXIT_AFTER_EXEC}} #{{if not $IS_SMALL_CLUSTER}} #- module: # path: modules/scheduler-throughput.yaml # params: # action: delete # namespaces: {{$namespaces}} # replicasPerNamespace: 0 # schedulerThroughputNamespaces: {{$schedulerThroughputNamespaces}} # schedulerThroughputPodsPerDeployment: {{$schedulerThroughputPodsPerDeployment}} # {{if $ENABLE_HUGE_SERVICES}} # serviceName: huge-service #- module: # path: modules/huge-services.yaml # params: # action: delete # namespaces: {{$namespaces}} # replicasPerNamespace: 0 # schedulerThroughputNamespaces: {{$schedulerThroughputNamespaces}} # {{end}} # END scheduler throughput {{end}} #{{if not $IS_SMALL_CLUSTER}} ## TODO(kubernetes/perf-tests/issues/1024): We shouldn't have a dedicated module for measuring pod-startup-latency. #- module: # path: modules/pod-startup-latency.yaml # params: # namespaces: {{$namespaces}} # minPodsInSmallCluster: {{$MIN_PODS_IN_SMALL_CLUSTERS}} # image: {{$latencyPodImage}} #{{end}} #- module: # path: /modules/reconcile-objects.yaml # params: # actionName: "scale and update" # namespaces: {{$namespaces}} # tuningSet: RandomizedScalingTimeLimited # randomScaleFactor: {{$RANDOM_SCALE_FACTOR}} # testMaxReplicaFactor: {{$RANDOM_SCALE_FACTOR}} # daemonSetImage: {{$latencyPodImage}} # daemonSetEnv: "after update" # daemonSetReplicas: 1 # bigDeploymentSize: {{$BIG_GROUP_SIZE}} # bigDeploymentsPerNamespace: {{$bigDeploymentsPerNamespace}} # mediumDeploymentSize: {{$MEDIUM_GROUP_SIZE}} # mediumDeploymentsPerNamespace: {{$mediumDeploymentsPerNamespace}} # smallDeploymentSize: {{$SMALL_GROUP_SIZE}} # smallDeploymentsPerNamespace: {{$smallDeploymentsPerNamespace}} # smallStatefulSetSize: {{$SMALL_GROUP_SIZE}} # smallStatefulSetsPerNamespace: {{$SMALL_STATEFUL_SETS_PER_NAMESPACE}} # mediumStatefulSetSize: {{$MEDIUM_GROUP_SIZE}} # mediumStatefulSetsPerNamespace: {{$MEDIUM_STATEFUL_SETS_PER_NAMESPACE}} # bigJobSize: {{$BIG_GROUP_SIZE}} # bigJobsPerNamespace: {{$bigJobsPerNamespace}} # mediumJobSize: {{$MEDIUM_GROUP_SIZE}} # mediumJobsPerNamespace: 1 # smallJobSize: {{$SMALL_GROUP_SIZE}} # smallJobsPerNamespace: 1 # #- module: # path: /modules/reconcile-objects.yaml # params: # actionName: "delete" # namespaces: {{$namespaces}} # tuningSet: RandomizedDeletionTimeLimited # testMaxReplicaFactor: {{$RANDOM_SCALE_FACTOR}} # daemonSetReplicas: 0 # bigDeploymentSize: {{$BIG_GROUP_SIZE}} # bigDeploymentsPerNamespace: 0 # mediumDeploymentSize: {{$MEDIUM_GROUP_SIZE}} # mediumDeploymentsPerNamespace: 0 # smallDeploymentSize: {{$SMALL_GROUP_SIZE}} # smallDeploymentsPerNamespace: 0 # smallStatefulSetSize: {{$SMALL_GROUP_SIZE}} # smallStatefulSetsPerNamespace: 0 # mediumStatefulSetSize: {{$MEDIUM_GROUP_SIZE}} # mediumStatefulSetsPerNamespace: 0 # bigJobSize: {{$BIG_GROUP_SIZE}} # bigJobsPerNamespace: 0 # mediumJobSize: {{$MEDIUM_GROUP_SIZE}} # mediumJobsPerNamespace: 0 # smallJobSize: {{$SMALL_GROUP_SIZE}} # smallJobsPerNamespace: 0 # pvSmallStatefulSetSize: {{$SMALL_STATEFUL_SETS_PER_NAMESPACE}} # pvMediumStatefulSetSize: {{$MEDIUM_STATEFUL_SETS_PER_NAMESPACE}} - module: path: /modules/configmaps-secrets.yaml params: actionName: delete tuningSet: Global100qps namespaces: {{$namespaces}} bigDeploymentsPerNamespace: 0 mediumDeploymentsPerNamespace: 0 smallDeploymentsPerNamespace: 0 - name: Deleting PriorityClass for DaemonSets phases: - replicasPerNamespace: 0 tuningSet: Sequence objectBundle: - basename: daemonset-priorityclass objectTemplatePath: daemonset-priorityclass.yaml - module: path: modules/services.yaml params: actionName: "Deleting" namespaces: {{$namespaces}} smallServicesPerNamespace: 0 mediumServicesPerNamespace: 0 bigServicesPerNamespace: 0 {{end}} # not EXIT_AFTER_EXEC - module: path: /modules/measurements.yaml params: action: gather ``` ##### **module/measurement.yaml** ``` ## Measurement module defines test scoped measurement. ## Input params # Valid actions: "start", "gather" {{$action := .action}} ## Feature-gates and configs: {{$ALLOWED_SLOW_API_CALLS := DefaultParam .CL2_ALLOWED_SLOW_API_CALLS 0}} {{$API_AVAILABILITY_PERCENTAGE_THRESHOLD := DefaultParam .CL2_API_AVAILABILITY_PERCENTAGE_THRESHOLD 0.0}} {{$CLUSTER_OOMS_IGNORED_PROCESSES := DefaultParam .CL2_CLUSTER_OOMS_IGNORED_PROCESSES ""}} {{$CUSTOM_API_CALL_THRESHOLDS := DefaultParam .CUSTOM_API_CALL_THRESHOLDS ""}} {{$ENABLE_API_AVAILABILITY_MEASUREMENT := DefaultParam .CL2_ENABLE_API_AVAILABILITY_MEASUREMENT false}} {{$ENABLE_CLUSTER_OOMS_TRACKER := DefaultParam .CL2_ENABLE_CLUSTER_OOMS_TRACKER true}} {{$ENABLE_NODE_LOCAL_DNS_LATENCY := DefaultParam .CL2_ENABLE_NODE_LOCAL_DNS_LATENCY false}} {{$ENABLE_RESTART_COUNT_CHECK := DefaultParam .ENABLE_RESTART_COUNT_CHECK true}} {{$ENABLE_SYSTEM_POD_METRICS:= DefaultParam .ENABLE_SYSTEM_POD_METRICS true}} {{$ENABLE_VIOLATIONS_FOR_API_CALL_PROMETHEUS := DefaultParam .CL2_ENABLE_VIOLATIONS_FOR_API_CALL_PROMETHEUS false}} {{$ENABLE_VIOLATIONS_FOR_API_CALL_PROMETHEUS_SIMPLE := DefaultParam .CL2_ENABLE_VIOLATIONS_FOR_API_CALL_PROMETHEUS_SIMPLE true}} {{$NODE_LOCAL_DNS_LATENCY_THRESHOLD := DefaultParam .CL2_NODE_LOCAL_DNS_LATENCY_THRESHOLD "5s"}} {{$PROMETHEUS_SCRAPE_KUBE_PROXY := DefaultParam .PROMETHEUS_SCRAPE_KUBE_PROXY true}} {{$PROMETHEUS_SCRAPE_KUBE_STATE_METRICS := DefaultParam .PROMETHEUS_SCRAPE_KUBE_STATE_METRICS false}} {{$PROMETHEUS_SCRAPE_METRICS_SERVER_METRICS := DefaultParam .PROMETHEUS_SCRAPE_METRICS_SERVER_METRICS false}} {{$RESTART_COUNT_THRESHOLD_OVERRIDES:= DefaultParam .RESTART_COUNT_THRESHOLD_OVERRIDES ""}} {{$USE_SIMPLE_LATENCY_QUERY := DefaultParam .USE_SIMPLE_LATENCY_QUERY false}} # Probe measurements shared parameter {{$PROBE_MEASUREMENTS_CHECK_PROBES_READY_TIMEOUT := DefaultParam .CL2_PROBE_MEASUREMENTS_CHECK_PROBES_READY_TIMEOUT "15m"}} steps: - name: "{{$action}}ing measurements" measurements: - Identifier: APIResponsivenessPrometheus Method: APIResponsivenessPrometheus Params: action: {{$action}} {{if not $USE_SIMPLE_LATENCY_QUERY}} enableViolations: {{$ENABLE_VIOLATIONS_FOR_API_CALL_PROMETHEUS}} allowedSlowCalls: {{$ALLOWED_SLOW_API_CALLS}} customThresholds: {{YamlQuote $CUSTOM_API_CALL_THRESHOLDS 4}} {{end}} - Identifier: APIResponsivenessPrometheusSimple Method: APIResponsivenessPrometheus Params: action: {{$action}} enableViolations: {{$ENABLE_VIOLATIONS_FOR_API_CALL_PROMETHEUS_SIMPLE}} useSimpleLatencyQuery: true summaryName: APIResponsivenessPrometheus_simple allowedSlowCalls: {{$ALLOWED_SLOW_API_CALLS}} customThresholds: {{YamlQuote $CUSTOM_API_CALL_THRESHOLDS 4}} # density中已经测试了PodStartupLatency,这里不再测试 #Identifier: CreatePhasePodStartupLatency # Method: PodStartupLatency # Params: # action: {{$action}} # labelSelector: group = load # threshold: 1h # TODO(https://github.com/kubernetes/perf-tests/issues/1024): Ideally, this should be 5s # kubemark集群不支持InClusterNetworkLatency指标 #- Identifier: InClusterNetworkLatency # Method: InClusterNetworkLatency # Params: # action: {{$action}} # checkProbesReadyTimeout: {{$PROBE_MEASUREMENTS_CHECK_PROBES_READY_TIMEOUT}} # replicasPerProbe: {{AddInt 2 (DivideInt .Nodes 100)}} # kubemark集群不支持该指标 #{{if $ENABLE_NODE_LOCAL_DNS_LATENCY}} # - Identifier: NodeLocalDNSLatency # Method: NodeLocalDNSLatencyPrometheus # Params: # action: {{$action}} # enableViolations: true # threshold: {{$NODE_LOCAL_DNS_LATENCY_THRESHOLD}} #{{end}} # 不清楚该指标,先保留 #- Identifier: SLOMeasurement # Method: SLOMeasurement # Params: # action: {{$action}} # checkProbesReadyTimeout: {{$PROBE_MEASUREMENTS_CHECK_PROBES_READY_TIMEOUT}} # replicasPerProbe: {{AddInt 2 (DivideInt .Nodes 100)}} # kubemark集群不支持以下指标 #{{if $PROMETHEUS_SCRAPE_KUBE_PROXY}} # - Identifier: NetworkProgrammingLatency # Method: NetworkProgrammingLatency # Params: # action: {{$action}} #{{end}} #{{if $PROMETHEUS_SCRAPE_KUBE_STATE_METRICS}} # - Identifier: KubeStateMetricsLatency # Method: KubeStateMetricsLatency # Params: # action: {{$action}} #{{end}} #{{if $PROMETHEUS_SCRAPE_METRICS_SERVER_METRICS}} # - Identifier: MetricsServerPrometheus # Method: MetricsServerPrometheus # Params: # action: {{$action}} #{{end}} {{if $ENABLE_API_AVAILABILITY_MEASUREMENT}} - Identifier: APIAvailability Method: APIAvailability Params: action: {{$action}} pollFrequency: "5s" hostPollTimeoutSeconds: 5 threshold: {{$API_AVAILABILITY_PERCENTAGE_THRESHOLD}} {{end}} #- Identifier: TestMetrics # Method: TestMetrics # Params: # action: {{$action}} # systemPodMetricsEnabled: {{$ENABLE_SYSTEM_POD_METRICS}} # clusterOOMsIgnoredProcesses: {{YamlQuote $CLUSTER_OOMS_IGNORED_PROCESSES 4}} # clusterOOMsTrackerEnabled: {{$ENABLE_CLUSTER_OOMS_TRACKER}} # restartCountThresholdOverrides: {{YamlQuote $RESTART_COUNT_THRESHOLD_OVERRIDES 4}} # enableRestartCountCheck: {{$ENABLE_RESTART_COUNT_CHECK}} ``` 其他的文件内容保持不变 ### **测试命令** 和density的命令完全一样。