https://tidb.net/blog/bda86911
注意事项1:tidb-test 为集群名称
tiup cluster edit-config tidb-test
cat ~/.tiup/storage/cluster/clusters/tidb-test/meta.yaml
配置文件按作用域分为 global 与 local
global:对所有节点有效(优先级低)
local:只对当前节点有效(优先级高)
若在 global 配置和单个实例中存在同名配置项,那么以 local 配置的为准。
global:集群全局配置,可单独针对每个组件配置
pd_servers:PD 实例的配置,用来指定 PD 组件部署到哪些机器上
如果是多实例混部,或者和其他服务混部。可以限制组件的最大使用量。配置在local host
- host: 172.16.188.123
resource_control:
memory_limit: 100G
cpu_quota: 1000%
启动顺序
如果有TiCDC组件则启动顺序是这样的
如果同时包含 TiCDC TiFlash -> Drainer 这3个组件,顺序是什么样的?
这块比较简单,和MySQL语法兼容。
SHOW VARIABLES LIKE ‘’;
SET GLOBAL param=value;
修改完之后会固化到 TiKV 的表中INFORMATION_SCHEMA.SESSION_VARIABLES。不用担心重启后丢失修改。妈妈在也不用担心我的学习了。
user: tidb
tidb_version: v4.0.15
last_ops_ver: |-
v1.2.3 tiup
Go Version: go1.13
Git Branch: release-1.2
GitHash: df7e28a
topology:
global:
user: tidb
ssh_port: 22
ssh_type: builtin
deploy_dir: /data/tdb
data_dir: /data/tdb/data
os: linux
arch: amd64
monitored:
node_exporter_port: 9100
blackbox_exporter_port: 9115
deploy_dir: /data/tdb/monitor-9100
data_dir: /data/tdb/data/monitor-9100
log_dir: /data/tdb/monitor-9100/log
server_configs:
tidb:
alter-primary-key: true
binlog.enable: true
binlog.ignore-error: false
binlog.write-timeout: 15s
compatible-kill-query: false
enable-streaming: true
host: 0.0.0.0
lease: 45s
log.enable-timestamp: true
log.expensive-threshold: 10000
log.file.max-days: 8
log.format: text
log.level: info
log.query-log-max-len: 4096
log.slow-threshold: 500
lower-case-table-names: 2
oom-action: log
performance.committer-concurrency: 256
performance.stmt-count-limit: 500000
performance.tcp-keep-alive: true
performance.txn-total-size-limit: 104857600
prepared-plan-cache.enabled: true
proxy-protocol.networks: 172.16.188.123
run-ddl: true
split-table: true
store: tikv
tikv-client.grpc-connection-count: 32
tikv-client.max-batch-size: 256
token-limit: 1500
tikv:
coprocessor.region-max-size: 384MB
coprocessor.region-split-size: 256MB
gRPC.grpc-concurrency: 8
log-level: info
raftdb.max-background-jobs: 8
raftstore.apply-max-batch-size: 16384
raftstore.apply-pool-size: 8
raftstore.hibernate-regions: true
raftstore.raft-max-inflight-msgs: 20480
raftstore.raft-max-size-per-msg: 2MB
raftstore.region-split-check-diff: 32MB
raftstore.store-max-batch-size: 16384
raftstore.store-pool-size: 8
raftstore.sync-log: false
readpool.coprocessor.max-tasks-per-worker-normal: 8000
readpool.unified.max-thread-count: 32
rocksdb.bytes-per-sync: 512MB
rocksdb.compaction-readahead-size: 2MB
rocksdb.defaultcf.level0-slowdown-writes-trigger: 32
rocksdb.defaultcf.level0-stop-writes-trigger: 64
rocksdb.defaultcf.max-write-buffer-number: 24
rocksdb.defaultcf.write-buffer-size: 256MB
rocksdb.lockcf.level0-slowdown-writes-trigger: 32
rocksdb.lockcf.level0-stop-writes-trigger: 64
rocksdb.max-background-flushes: 4
rocksdb.max-background-jobs: 8
rocksdb.max-sub-compactions: 4
rocksdb.use-direct-io-for-flush-and-compaction: true
rocksdb.wal-bytes-per-sync: 256MB
rocksdb.writecf.level0-slowdown-writes-trigger: 32
rocksdb.writecf.level0-stop-writes-trigger: 64
rocksdb.writecf.max-write-buffer-number: 24
rocksdb.writecf.write-buffer-size: 256MB
storage.block-cache.capacity: 8GB
storage.scheduler-concurrency: 4096000
storage.scheduler-worker-pool-size: 8
pd:
auto-compaction-mod: periodic
auto-compaction-retention: 10m
quota-backend-bytes: 17179869184
tiflash: {}
tiflash-learner: {}
pump: {}
drainer: {}
cdc: {}
tidb_servers:
- host: 172.16.188.123
ssh_port: 22
port: 4000
status_port: 10080
deploy_dir: /data/tdb/tidb-4000
arch: amd64
os: linux
- host: 172.16.188.143
ssh_port: 22
port: 4000
status_port: 10080
deploy_dir: /data/tdb/tidb-4000
arch: amd64
os: linux
tikv_servers:
- host: 172.16.188.140
ssh_port: 22
port: 20160
status_port: 20180
deploy_dir: /data/tdb/tikv-20160
data_dir: /data/tdb/data/tikv-20160
arch: amd64
os: linux
- host: 172.16.188.143
ssh_port: 22
port: 20160
status_port: 20180
deploy_dir: /data/tdb/tikv-20160
data_dir: /data/tdb/data/tikv-20160
arch: amd64
os: linux
- host: 172.16.188.113
ssh_port: 22
port: 20160
status_port: 20180
deploy_dir: /data/tdb/tikv-20160
data_dir: /data/tdb/data/tikv-20160
arch: amd64
os: linux
tiflash_servers: []
pd_servers:
- host: 172.16.188.120
ssh_port: 22
name: pd-172.16.188.120-2379
client_port: 2379
peer_port: 2380
deploy_dir: /data/tdb/pd-2379
data_dir: /data/tdb/data/pd-2379
arch: amd64
os: linux
- host: 172.16.188.118
ssh_port: 22
name: pd-172.16.188.118-2379
client_port: 2379
peer_port: 2380
deploy_dir: /data/tdb/pd-2379
data_dir: /data/tdb/data/pd-2379
arch: amd64
os: linux
- host: 172.16.188.125
ssh_port: 22
name: pd-172.16.188.125-2379
client_port: 2379
peer_port: 2380
deploy_dir: /data/tdb/pd-2379
data_dir: /data/tdb/data/pd-2379
arch: amd64
os: linux
pump_servers:
- host: 172.16.188.143
ssh_port: 22
port: 8250
deploy_dir: /data/pump
data_dir: /data/pump/data
log_dir: /data/pump/log
config:
gc: 5
arch: amd64
os: linux
cdc_servers:
- host: 172.16.188.120
ssh_port: 22
port: 8300
deploy_dir: /data/tdb/cdc-8300
arch: amd64
os: linux
monitoring_servers:
- host: 172.16.188.113
ssh_port: 22
port: 9090
deploy_dir: /data/tdb/prometheus-9090
data_dir: /data/tdb/data/prometheus-9090
arch: amd64
os: linux
grafana_servers:
- host: 172.16.188.123
ssh_port: 22
port: 3000
deploy_dir: /data/tdb/grafana-3000
arch: amd64
os: linux
alertmanager_servers:
- host: 172.16.188.123
ssh_port: 22
web_port: 9093
cluster_port: 9094
deploy_dir: /data/tdb/alertmanager-9093
data_dir: /data/tdb/data/alertmanager-9093
arch: amd64
os: linux
https://github.com/pingcap/docs-cn/tree/master/config-templates