carbon-clickhouse
使用ClickHouse作为存储的Graphite指标接收器
生产状态
最新版本稳定,可用于生产环境
概述
Docker
Docker镜像可在packages页面获取。
构建
需要golang 1.18+
# 构建二进制文件
git clone https://github.com/lomik/carbon-clickhouse.git
cd carbon-clickhouse
make
ClickHouse配置
-
在config.xml中添加
graphite_rollup
部分。示例在这里。你可以使用carbon-schema-to-clickhouse从graphite的storage-schemas.conf生成rollup xml。 -
创建表。
CREATE TABLE graphite (
Path String,
Value Float64,
Time UInt32,
Date Date,
Timestamp UInt32
) ENGINE = GraphiteMergeTree('graphite_rollup')
PARTITION BY toYYYYMM(Date)
ORDER BY (Path, Time);
-- 可选表,用于更快的指标搜索
CREATE TABLE graphite_index (
Date Date,
Level UInt32,
Path String,
Version UInt32
) ENGINE = ReplacingMergeTree(Version)
PARTITION BY toYYYYMM(Date)
ORDER BY (Level, Path, Date);
-- 可选表,用于存储Graphite标签
CREATE TABLE graphite_tagged (
Date Date,
Tag1 String,
Path String,
Tags Array(String),
Version UInt32
) ENGINE = ReplacingMergeTree(Version)
PARTITION BY toYYYYMM(Date)
ORDER BY (Tag1, Path, Date);
你可以创建复制表。参见ClickHouse文档
配置
$ carbon-clickhouse -help
carbon-clickhouse的用法:
-check-config=false: 检查配置并退出
-config="": 配置文件名
-config-print-default=false: 打印默认配置
-version=false: 打印版本
默认情况下日期是不正确的(不总是UTC),但这从项目开始就使用,可能会产生一些bug。
更改为UTC需要重建points/index/tags表(将Date重新计算为真正的UTC)或使用宽日期范围的查询。
设置data.utc-date = true
来启用此功能。
如果不使用UTC日期,则需要在同一时区运行carbon-clickhouse和graphite-clickhouse。
[common]
# 存储所有内部carbon-clickhouse图表的前缀。支持的宏:{host}
metric-prefix = "carbon.agents.{host}"
# 存储内部carbon指标的端点。有效值:"" 或 "local"、"tcp://host:port"、"udp://host:port"
metric-endpoint = "local"
# 存储内部指标的间隔。类似CARBON_METRIC_INTERVAL
metric-interval = "1m0s"
# GOMAXPROCS
max-cpu = 1
[logging]
# "stderr"、"stdout"可以用作文件名
file = "/var/log/carbon-clickhouse/carbon-clickhouse.log"
# 日志错误级别。有效值:"debug"、"info"、"warn"、"error"
level = "info"
[data]
# 缓冲接收数据的文件夹
path = "/data/carbon-clickhouse/"
# 根据大小和间隔轮换(并上传)文件
# 轮换(并上传)文件大小(以字节为单位,也可以使用k、m和g单位)
# chunk-max-size = '512m'
chunk-max-size = 0
# 轮换(并上传)文件间隔
# 最小化chunk-interval以减少接收点和存储之间的延迟
chunk-interval = "1s"
# 如果未处理的文件数量增加,则自动增加chunk间隔
# 示例,当未处理文件数量 >= 5时将chunk间隔设置为10秒,当未处理文件数量 >= 20时设置为60秒:
# chunk-auto-interval = "5:10s,20:60s"
chunk-auto-interval = ""
# 存储临时文件时使用的压缩算法。
# 当Clickhouse长时间不可用时,可能有助于减少空间使用。
# 当前支持:none、lz4
compression = "none"
# 使用的压缩级别。
# 对于"lz4",0表示使用普通LZ4,>=1使用LZ4HC,深度为此值(越高压缩效果越好,但速度越慢)
compression-level = 0
# 默认情况下日期是不正确的(不总是UTC)
#utc-date = false
[upload.graphite]
type = "points"
table = "graphite"
threads = 1
url = "http://localhost:8123/"
# compress-data启用发送到clickhouse时的gzip压缩
compress-data = true
timeout = "1m0s"
# 将零值保存到Timestamp列(适用于point和posts-reverse表)
zero-timestamp = false
[upload.graphite_index]
type = "index"
table = "graphite_index"
threads = 1
url = "http://localhost:8123/"
timeout = "1m0s"
cache-ttl = "12h0m0s"
# 在内存中存储指标的哈希值而不是完整的指标名称
# 允许的值:"", "city64"(空值 - 禁用)
hash = ""
# 如果应禁用每日索引,默认为`false`
disable-daily-index = false
# # 你可以定义任何支持类型的其他上传目的地:
# # - points
# # - index
# # - tagged(如下所述)
# # - points-reverse(与points相同的方案,但路径'a1.b2.c3'存储为'c3.b2.a1')
# # 对于类型为"points"和"points-reverse"的上传器,可以使用模式忽略数据。例如
# [upload.graphite]
# type = "graphite"
# table = "graphite.points"
# threads = 1
# url = "http://localhost:8123/"
# timeout = "30s"
# ignored-patterns = [
# "a1.b2.*.c3",
# ]
# # 可用作标记系列索引的额外表格
# # 还有机会避免为某些指标编写标记。
# # 以下是示例,忽略标记的指标。
# [upload.graphite_tagged]
# type = "tagged"
# table = "graphite_tagged"
# threads = 1
# url = "http://localhost:8123/"
# timeout = "1m0s"
# cache-ttl = "12h0m0s"
# ignored-tagged-metrics = [
# "a.b.c.d", # 对于形如a.b.c.d?tagName1=tagValue1&tagName2=tagValue2...的指标,所有标记(除__name__外)将被忽略
# "*", # 对所有指标,所有标记(除__name__外)将被忽略;这是唯一使用通配符的特殊情况
# ]
#
# 可以使用OpenSSL证书(mTLS)连接到ClickHouse,如下所示:
# [upload.graphite]
# type = "points"
# table = "graphite"
# threads = 1
# compress-data = true
# zero-timestamp = false
# timeout = "1m0s"
# url = "https://localhost:8443/" # url使用https
# [upload.graphite.tls]
# ca-cert = [ "<path/to/rootCA.crt>", "<path/to/other/rootCA.crt>" ]
# server-name = "<server-name>"
# insecure-skip-verify = false # 如果为true,将不验证服务器证书
# [[upload.graphite.tls.certificates]]
# key = "<path/to/client.key>"
# cert = "<path/to/client.crt>"
[udp]
listen = ":2003"
enabled = true
# 如果时间戳 > 现在 + 值,则丢弃接收的点。0 - 不丢弃任何内容
drop-future = "0s"
# 如果时间戳 < 现在 - 值,则丢弃接收的点。0 - 不丢弃任何内容
drop-past = "0s"
# 丢弃名称长度超过此值的指标。0 - 不丢弃任何内容
drop-longer-than = 0
[tcp]
listen = ":2003"
enabled = true
drop-future = "0s"
drop-past = "0s"
drop-longer-than = 0
[pickle]
listen = ":2004"
enabled = true
drop-future = "0s"
drop-past = "0s"
drop-longer-than = 0
# https://github.com/lomik/carbon-clickhouse/blob/master/grpc/carbon.proto
[grpc]
listen = ":2005"
enabled = false
drop-future = "0s"
drop-past = "0s"
drop-longer-than = 0
[prometheus]
listen = ":2006"
enabled = false
drop-future = "0s"
drop-past = "0s"
drop-longer-than = 0
[telegraf_http_json]
listen = ":2007"
enabled = false
drop-future = "0s"
drop-past = "0s"
drop-longer-than = 0
# 用于连接telegraf指标和字段的字符(默认为"_",出于历史原因和Prometheus兼容性)
concat = "."
# Golang pprof + 一些额外位置
#
# 最后1000个被"drop-future"、"drop-past"和"drop-longer-than"规则丢弃的点:
# /debug/receive/tcp/dropped/
# /debug/receive/udp/dropped/
# /debug/receive/pickle/dropped/
# /debug/receive/grpc/dropped/
# /debug/receive/prometheus/dropped/
# /debug/receive/telegraf_http_json/dropped/
[pprof]
listen = "localhost:7007"
enabled = false
# 你可以像在InfluxDB中一样使用标记匹配。格式完全相同。
# 它将解析所有尚未有标记的指标。
# 更多信息请参见 https://docs.influxdata.com/influxdb/v1.7/supported_protocols/graphite/
# 示例:
# [convert_to_tagged]
# enabled = true
# separator = "_"
# tags = ["region=us-east", "zone=1c"]
# templates = [
# "generated.* .measurement.cpu metric=idle",
# "* host.measurement* template_match=none",
# ]