ClickHouse-SQL使用
# ClickHouse SQL命令
[toc]
# 创建集群数据库
CREATE DATABASE IF NOT EXISTS ${数据库名称} ON CLUSTER ${集群名称} ENGINE=Atomic;
CREATE DATABASE IF NOT EXISTS db_cluster ON CLUSTER default_cluster ENGINE=Atomic;
1
2
2
集群名称查看方法:
查看ClickHouse配置文件
metrika.xml
,default_cluster
就是集群名称<clickhouse_remote_servers> <default_cluster> <shard> <internal_replication>true</internal_replication> <replica> <host>9.0.16.8</host> <port>9000</port> </replica><replica> <host>9.0.16.13</host> <port>9000</port> </replica> </shard> </default_cluster> </clickhouse_remote_servers>
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# 删除集群数据库
drop database ${需要删除的数据库} on CLUSTER ${集群名称}
drop database db1 on CLUSTER clickhouse_cluster
1
2
2
# 创建表 TODO
# 删除集群表
DROP table ${表名称} ON CLUSTER ${集群名称};
DROP table ads_user_profile ON CLUSTER default_cluster;
1
2
2
清空表数据
ALTER TABLE msdp_main.ads_user_profile_d DELETE WHERE one_id != '1';
1
# 查看所有数据库
select * from system.databases
1
# 创建视图表
CREATE TABLE IF NOT EXISTS ${数据库}.${视图表名称} ON CLUSTER ${集群名称}
AS ${数据库}.${集群表名称}
ENGINE = Distributed(${集群名称},${数据库},${集群表名称});
CREATE TABLE IF NOT EXISTS msdp_main.ads_user_profile ON CLUSTER default_cluster
AS msdp_main.ads_user_profile_d
ENGINE = Distributed(default_cluster,msdp_main,ads_user_profile_d);
1
2
3
4
5
6
7
2
3
4
5
6
7
# 根据JSON key查询
select * from msdp_main.ads_user_profile aup where visitParamHas(labels, 'T01')
1
# TODO 查看磁盘使用状态
在客户端查询
SELECT
hostname() AS hostname,
database AS database,
table AS ` Table name `,
count(*) AS parts,
count(DISTINCT partition) AS distinct_partitions,
sum(rows) AS Rows,
formatReadableSize(sum(data_uncompressed_bytes)) AS uncomp,
formatReadableSize(sum(data_compressed_bytes)) AS comp,
sum(bytes_on_disk) AS total_bytes,
formatReadableSize(sum(bytes_on_disk)) AS total_on_disk,
round(
(
sum(data_compressed_bytes) / sum(data_uncompressed_bytes)
) * 100,
0
) AS ratio
FROM
clusterAllReplicas('default_cluster', system.parts)
WHERE
rows != 0
GROUP BY
hostname,
database,
table WITH ROLLUP
ORDER BY
total_bytes DESC;
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
查询结果
# TODO 只查看当前节点磁盘分布
SELECT database, table, partition, name part_name, active, bytes_on_disk
FROM system.parts ORDER BY database, table, partition, name;
SELECT database, sum(bytes_on_disk)
FROM system.parts
GROUP BY database;
1
2
3
4
5
2
3
4
5
# 删除30天以前日志数据
ALTER TABLE system.query_log ON CLUSTER default_cluster MODIFY TTL event_date + INTERVAL 30 DAY DELETE;
ALTER TABLE system.trace_log ON CLUSTER default_cluster MODIFY TTL event_date + INTERVAL 30 DAY DELETE;
ALTER TABLE system.query_thread_log ON CLUSTER default_cluster MODIFY TTL event_date + INTERVAL 30 DAY DELETE;
1
2
3
4
5
2
3
4
5
# 直接删除日志数据
ALTER table `system`.trace_log DROP PARTITION 2021xx;
ALTER table `system`.query_log DROP PARTITION 2021xx;
ALTER table `system`.query_thread_log DROP PARTITION 2021xx;
ALTER table `system`.trace_log on cluster default_cluster DROP PARTITION 202205;
1
2
3
4
5
6
7
2
3
4
5
6
7
# 异常:分区太大无法删除
可能的报错,分区太大了不让删除
9.0.16.13 9000 359 Code: 359, e.displayText() = DB::Exception: Table or Partition in system.query_thread_log was not dropped.\nReason:\n1. Size (129.61 GB) is greater than max_[table/partition]_size_to_drop (50.00 GB)\n2. File \'/data/clickhouse/clickhouse-server/flags/force_drop_table\' intended to force DROP doesn\'t exist\nHow to fix this:\n1. Either increase (or set to zero) max_[table/partition]_size_to_drop in server config\n2. Either create forcing file /data/clickhouse/clickhouse-server/flags/force_drop_table and make sure that ClickHouse has write permission for it.\nExample:\nsudo touch \'/data/clickhouse/clickhouse-server/flags/force_drop_table\' && sudo chmod 666 \'/data/clickhouse/clickhouse-server/flags/force_drop_table\' (version 21.8.12.1) 1 0
9.0.16.8 9000 359 Code: 359, e.displayText() = DB::Exception: Table or Partition in system.query_thread_log was not dropped.\nReason:\n1. Size (130.02 GB) is greater than max_[table/partition]_size_to_drop (50.00 GB)\n2. File \'/data/clickhouse/clickhouse-server/flags/force_drop_table\' intended to force DROP doesn\'t exist\nHow to fix this:\n1. Either increase (or set to zero) max_[table/partition]_size_to_drop in server config\n2. Either create forcing file /data/clickhouse/clickhouse-server/flags/force_drop_table and make sure that ClickHouse has write permission for it.\nExample:\nsudo touch \'/data/clickhouse/clickhouse-server/flags/force_drop_table\' && sudo chmod 666 \'/data/clickhouse/clickhouse-server/flags/force_drop_table\' (version 21.8.12.1) 0 0
Code: 359, e.displayText() = DB::Exception: There was an error on [9.0.16.13:9000]: Code: 359, e.displayText() = DB::Exception: Table or Partition in system.query_thread_log was not dropped.
Reason:
1. Size (129.61 GB) is greater than max_[table/partition]_size_to_drop (50.00 GB)
2. File '/data/clickhouse/clickhouse-server/flags/force_drop_table' intended to force DROP doesn't exist
How to fix this:
1. Either increase (or set to zero) max_[table/partition]_size_to_drop in server config
2. Either create forcing file /data/clickhouse/clickhouse-server/flags/force_drop_table and make sure that ClickHouse has write permission for it.
Example:
sudo touch '/data/clickhouse/clickhouse-server/flags/force_drop_table' && sudo chmod 666 '/data/clickhouse/clickhouse-server/flags/force_drop_table' (version 21.8.12.1) (version 21.8.12.1)
1
2
3
4
5
6
7
8
9
10
11
2
3
4
5
6
7
8
9
10
11
解决方法:
此报错原因是ck的保护机制,表/分区大小超过50G,默认不允删除 解决办法: config.xml 配置后重启集群生效 <max_table_size_to_drop>0 </max_table_size_to_drop> <max_partition_size_to_drop>0 </max_partition_size_to_drop>
# 查看表的所有partition
select partition,name,database,table from system.parts where database='system' and table ='query_thread_log';
1
# 查看当前正在执行的SQL
SELECT query_id,query FROM system.processes
1
# 根据执行时间排序查看正在执行的语句
select * from system.processes order by elapsed desc limit 10;
1
# 查看正在执行的mutations语句
select * from system.mutations where is_done = 0;
1
# 杀掉卡住的mutation操作,id填上面查询到的
KILL mutation where mutation_id = '';
KILL MUTATION WHERE database = 'default' AND table = 'table';
1
2
2
上次更新: 2023/03/10, 17:30:33