Iceberg 基本操作和快速入门一-CSDN博客
启动spark会话
docker exec -it spark-iceberg spark-sql
创建表
CREATE TABLE prod.db.sample ( id bigint NOT NULL COMMENT 'unique id', data string) USING iceberg;
创建分区表
CREATE TABLE prod.db.sample_par (
id bigint,
data string,
category string)
USING iceberg
PARTITIONED BY (category);
修改表名
ALTER TABLE prod.db.sample RENAME TO prod.db.new_name;
修改表配置
ALTER TABLE prod.db.sample SET TBLPROPERTIES (
'read.split.target-size'='268435456'
);
取消配置
ALTER TABLE prod.db.sample UNSET TBLPROPERTIES ('read.split.target-size');
添加列
ALTER TABLE prod.db.sample
ADD COLUMNS (
new_column string comment 'new_column docs'
);
重命名列
ALTER TABLE prod.db.sample RENAME COLUMN data TO payload;
删除列
ALTER TABLE prod.db.sample DROP COLUMN id;
插入数据
insert into prod.db.sample values(1, 'test', 'type1', cast('2023-04-01 12:00:00' as timestamp), 'newcolums
');
更新数据
----语法
MERGE INTO prod.db.target t -- a target table
USING (SELECT ...) s -- the source updates
ON t.id = s.id -- condition to find updates for target rows
WHEN ... -- updates
CREATE TABLE prod.db.source ( id bigint NOT NULL COMMENT 'unique id', data string) USING iceberg;
---示例
INSERT INTO prod.db.source VALUES (1, 'a'), (2, 'b');
CREATE TABLE prod.db.target ( id bigint NOT NULL COMMENT 'unique id', data string) USING iceberg;
MERGE INTO prod.db.target t USING (SELECT * from prod.db.source) s ON t.id = s.id
WHEN MATCHED THEN UPDATE SET t.data = s.data
WHEN NOT MATCHED THEN INSERT *
WHEN MATCHED AND s.op = 'delete' THEN DELETE
WHEN MATCHED AND t.count IS NULL AND s.op = 'increment' THEN UPDATE SET t.count = 0
WHEN NOT MATCHED AND s.event_time > still_valid_threshold THEN INSERT (id, count) VALUES (s.id, 1)
覆盖写入
INSERT OVERWRITE prod.my_app.logs
PARTITION (level = 'INFO')
SELECT uuid, first(level), first(ts), first(message)
FROM prod.my_app.logs
WHERE level = 'INFO'
GROUP BY uuid
查询数据
select * from prod.db.sample;
查询元数据信息
SELECT * FROM prod.db.sample.files;
删除数据
delete from prod.db.source where id = 2;
删除表
DROP TABLE prod.db.sample;
删除表并删除表的内容
DROP TABLE prod.db.sample PURGE;