日韩性视频-久久久蜜桃-www中文字幕-在线中文字幕av-亚洲欧美一区二区三区四区-撸久久-香蕉视频一区-久久无码精品丰满人妻-国产高潮av-激情福利社-日韩av网址大全-国产精品久久999-日本五十路在线-性欧美在线-久久99精品波多结衣一区-男女午夜免费视频-黑人极品ⅴideos精品欧美棵-人人妻人人澡人人爽精品欧美一区-日韩一区在线看-欧美a级在线免费观看

歡迎訪問(wèn) 生活随笔!

生活随笔

當(dāng)前位置: 首頁(yè) > 编程资源 > 综合教程 >内容正文

综合教程

esrally自定义tracks实践

發(fā)布時(shí)間:2023/12/19 综合教程 36 生活家
生活随笔 收集整理的這篇文章主要介紹了 esrally自定义tracks实践 小編覺(jué)得挺不錯(cuò)的,現(xiàn)在分享給大家,幫大家做個(gè)參考.

一、使用官方的log類(lèi)型,通過(guò)改造challenge來(lái)實(shí)現(xiàn)parallel的功能,實(shí)現(xiàn)對(duì)同一個(gè)index進(jìn)行讀寫(xiě)混合的測(cè)試需求

    {
      "name": "append-no-conflicts",
      "description": "Indexes the whole document corpus using Elasticsearch default settings. We only adjust the number of replicas as we benchmark a single node cluster and Rally will only start the benchmark if the cluster turns green. Document ids are unique so all index operations are append only. After that a couple of queries are run.",
      "default": true,
      "schedule": [
        {
          "operation": "delete-index"
        },
        {
          "operation": {
            "operation-type": "create-index",
            "settings": {{index_settings | default({}) | tojson}}
          }
        },
        {
          "name": "check-cluster-health",
          "operation": {
            "operation-type": "cluster-health",
            "index": "logs-*",
            "request-params": {
              "wait_for_status": "{{cluster_health | default('green')}}",
              "wait_for_no_relocating_shards": "true"
            },
            "retry-until-success": true
          }
        },
        {
            "parallel": {
                  "completed-by": "index-append",    #表示當(dāng)index-append完成時(shí)結(jié)束并發(fā)
                  "tasks": [

                        {
                          "operation": "index-append",
                          "warmup-time-period": 240,
                          "clients": {{bulk_indexing_clients | default(8)}}
                        },
                        {
                          "operation": "default",
                          "clients": 1,
                          "warmup-iterations": 500,
                          "iterations": 100,
                          "target-throughput": 8
                        },
                        {
                          "operation": "term",
                          "clients": 1,
                          "warmup-iterations": 500,
                          "iterations": 100,
                          "target-throughput": 50
                        },
                        {
                          "operation": "range",
                          "clients": 1,
                          "warmup-iterations": 100,
                          "iterations": 100,
                          "target-throughput": 1
                        },
                        {
                          "operation": "hourly_agg",
                          "clients": 1,
                          "warmup-iterations": 100,
                          "iterations": 100,
                          "target-throughput": 0.2
                        },
                        {
                          "operation": "scroll",
                          "clients": 1,
                          "warmup-iterations": 100,
                          "iterations": 200,
                          "#COMMENT": "Throughput is considered per request. So we issue one scroll request per second which will retrieve 25 pages",
                          "target-throughput": 1
                        }
                  ]
                }
        }
      ]
    },

二、根據(jù)實(shí)際業(yè)務(wù)日志來(lái)生成data和track使性能測(cè)試結(jié)果更加貼合實(shí)際業(yè)務(wù)

1、從已有集群中的數(shù)據(jù)自動(dòng)生成log和track

esrally版本要求2.0以上

參考:https://esrally.readthedocs.io/en/2.1.0/adding_tracks.html?highlight=custom

esrally create-track --track=acme --target-hosts=127.0.0.1:9200 --indices="products,companies" --output-path=~/tracks

最終生成效果:

airkafka_pm02_2021-02-01-documents-1k.json:xxx-1k表示用于test_mode模式,數(shù)據(jù)量較小,用于測(cè)試

airkafka_pm02_2021-02-01.json:是對(duì)應(yīng)log的index

airkafka_pm02_2021-02-01-documents.json.offset:log日志偏移量

{
  "version": 2,
  "description": "Tracker-generated track for nsh",
  "indices": [
    {
      "name": "airkafka_pm02_2021-02-12",
      "body": "airkafka_pm02_2021-02-12.json", #可以改成track.json所在目錄下的其他自定義的index
	  "types": ["doc"]   #由于es7以上版本不支持type字段,因此生成的track.json沒(méi)有該字段。如果是對(duì)es7以下版本進(jìn)行測(cè)試,需要增加該字段
    }, 
    {
      "name": "airkafka_pm02_2021-02-01",
      "body": "airkafka_pm02_2021-02-12.json",
	  "types": ["doc"]
    }
  ],
  "corpora": [
    {
      "name": "nsh",
	  "target-type": "doc",     #該字段對(duì)應(yīng)的是bulk插入的_type字段,必須要指定,不然會(huì)報(bào)type missing的錯(cuò)誤
      "documents": [
        {
          "target-index": "airkafka_pm02_2021-02-12",
          "source-file": "airkafka_pm02_2021-02-12-documents.json.bz2",
          "document-count": 14960567,
          "compressed-bytes": 814346714,
          "uncompressed-bytes": 12138377222
        },
		{
          "target-index": "airkafka_pm02_2021-02-01",
          "source-file": "airkafka_pm02_2021-02-01-documents.json.bz2",
          "document-count": 24000503,     #需要跟實(shí)際的documents文件里的數(shù)量一致
          "compressed-bytes": 1296215463,
          "uncompressed-bytes": 19551041674
        }
      ]
    }
  ],
  "operations": [    #自動(dòng)生成的track.json里不會(huì)區(qū)分operatin和challenge,可以自己拆分定義,按照這個(gè)模板來(lái)就行
    {
      "name": "index-append",
      "operation-type": "bulk",
      "bulk-size": {{bulk_size | default(5000)}},
      "ingest-percentage": 100,
      "corpora": "nsh"         #要改成上面corpora的name
    },
    {
      "name": "default",       #name可以改成其他自定義的
      "operation-type": "search",       #operation-type只支持search
      "index": "airkafka_pm02_2021-*",
      "body": {
        "query": {
          "match_all": {}
        }
      }
    },
    {
      "name": "term",
      "operation-type": "search",
      "index": "airkafka_pm02_2021-*",  #index也可以自定義
	  "body": {     #body里的query語(yǔ)句可以根據(jù)業(yè)務(wù)需求自定義
		"query": {
		  "term": {
			"log_id.raw": {
			  "value": "gm_client_app_profile_log"
			}
		  }
		}
	  }
    },
    {
      "name": "range",
      "operation-type": "search",
      "index": "airkafka_pm02_2021-*",
		"body": {
			"query": {
			  "range": {
				  "deveice_level": {
					"gte": 0,
					  "lt": 3
				  }
			  }
			}
		}
    },
    {
      "name": "hourly_agg",
      "operation-type": "search",
      "index": "airkafka_pm02_2021-*",
		"body": {
			"size": 0,
			"aggs": {
			  "by_hour": {
				"date_histogram": {
				  "field": "@timestamp",
				  "interval": "hour"
				}
			  }
			}
		}
    },
    {
      "name": "scroll",
      "operation-type": "search",
      "index": "airkafka_pm02_2021-*",
      "pages": 25,
      "results-per-page": 1000,
      "body": {
        "query": {
          "match_all": {}
        }
      }
    }
  ],
  "challenges": [  #可以自定義多個(gè)不同的challenge,然后命令行里指定需要運(yùn)行的challenge
        {
      "name": "append-no-conflicts",
      "description": "Indexes the whole document corpus using Elasticsearch default settings. We only adjust the number of replicas as we benchmark a single node cluster and Rally will only start the benchmark if the cluster turns green. Document ids are unique so all index operations are append only. After that a couple of queries are run.",
      "default": true,
      "schedule": [
        {
          "operation": "delete-index"
        },
        {
          "operation": {
            "operation-type": "create-index",
            "settings": {}
          }
        },
        {
          "name": "check-cluster-health",
          "operation": {
            "operation-type": "cluster-health",
            "index": "airkafka_pm02_2021-*",
            "request-params": {
              "wait_for_status": "green",
              "wait_for_no_relocating_shards": "true"
            },
            "retry-until-success": true
          }
        },
        {
            "parallel": {
                  "completed-by": "index-append",
                  "tasks": [

                        {
                          "operation": "index-append",
                          "warmup-time-period": 240,
                          "clients": {{bulk_indexing_clients | default(8)}}
                        },
                        {
                          "operation": "default",
                          "clients": 1,
                          "warmup-iterations": 500,
                          "iterations": 100,
                          "target-throughput": 8      #限定最大的tps,類(lèi)似于jmeter里的目標(biāo)加壓。此時(shí)service time和letency的大小不一致,service time小于letency,真正具有參考意義的是service time
                        },
                        {
                          "operation": "term",
                          "clients": 1,
                          "warmup-iterations": 500,
                          "iterations": 100,
                          "target-throughput": 50
                        },
                        {
                          "operation": "range",
                          "clients": 1,
                          "warmup-iterations": 100,
                          "iterations": 100,
                          "target-throughput": 1
                        },
                        {
                          "operation": "hourly_agg",
                          "clients": 1,
                          "warmup-iterations": 100,
                          "iterations": 100,
                          "target-throughput": 0.2
                        },
                        {
                          "operation": "scroll",
                          "clients": 1,
                          "warmup-iterations": 100,
                          "iterations": 200,
                          "#COMMENT": "Throughput is considered per request. So we issue one scroll request per second which will retrieve 25 pages",
                          "target-throughput": 1
                        }
                  ]
                }
        }
      ]
    },
    {
      "name": "append-no-conflicts-index-only",
      "description": "Indexes the whole document corpus using Elasticsearch default settings. We only adjust the number of replicas as we benchmark a single node cluster and Rally will only start the benchmark if the cluster turns green. Document ids are unique so all index operations are append only.",
      "schedule": [
        {
          "operation": "delete-index"
        },
        {
          "operation": {
            "operation-type": "create-index",
            "settings": {}
          }
        },
        {
          "name": "check-cluster-health",
          "operation": {
            "operation-type": "cluster-health",
            "index": "airkafka_pm02_2021-*",
            "request-params": {
              "wait_for_status": "green",
              "wait_for_no_relocating_shards": "true"
            },
            "retry-until-success": true
          }
        },
        {
          "operation": "index-append",
          "warmup-time-period": 240,
          "clients": 8
        },
        {
          "name": "refresh-after-index",
          "operation": "refresh"
        },
        {
          "operation": {
            "operation-type": "force-merge",
            "request-timeout": 7200
          }
        },
        {
          "name": "refresh-after-force-merge",
          "operation": "refresh"
        },
        {
          "name": "wait-until-merges-finish",
          "operation": {
            "operation-type": "index-stats",
            "index": "_all",
            "condition": {
              "path": "_all.total.merges.current",
              "expected-value": 0
            },
            "retry-until-success": true,
            "include-in-reporting": false
          }
        }
      ]
    }
  ]
}

  

"target-throughput": 50   #不指定則表示esrally盡最大可能發(fā)送消息,即測(cè)最大的性能,指定則是按照指定的tps發(fā)送。注意,如果指定,service time和letency是不一樣的,letency要大于service time,實(shí)際的es性能需要看service time

 

在自定義track的時(shí)候出現(xiàn)的一些錯(cuò)誤以及解決辦法

1、https://discuss.elastic.co/t/esrally-got-the-benchmark-ended-already-during-warmup-when-running-custom-track/186076/3

2、--on-error=abort 打開(kāi)該開(kāi)關(guān),esrally將會(huì)在第一次出錯(cuò)時(shí)就停止,同時(shí)記錄錯(cuò)誤日志,建議調(diào)試tracks打開(kāi)

3、在調(diào)試track的時(shí)候,把bulk_size設(shè)置小一點(diǎn),這樣error時(shí)同樣的日志會(huì)比較少,方便查看完整日志

4、service time和letency的區(qū)別

總結(jié)

以上是生活随笔為你收集整理的esrally自定义tracks实践的全部?jī)?nèi)容,希望文章能夠幫你解決所遇到的問(wèn)題。

如果覺(jué)得生活随笔網(wǎng)站內(nèi)容還不錯(cuò),歡迎將生活随笔推薦給好友。