--对于分区表constraint_exclusion 这个参数需要配置为partition或onpostgres=# show constraint_exclusion ; constraint_exclusion ---------------------- partition --创建父子表, 用于存储分区数据create table t(id int primary key);create table t1(like t including all) inherits(t);create table t2(like t including all) inherits(t);create table t3(like t including all) inherits(t);create table t4(like t including all) inherits(t);--PostgreSQL的子表和子表之间的约束是没有任何关系的, 所以也可以有重叠, 即非全局约束. alter table t1 add constraint ck_t1_1 check(id<0); alter table t2 add constraint ck_t2_1 check(id>=0 and id<100); alter table t3 add constraint ck_t3_1 check(id>=100 and id<200); alter table t4 add constraint ck_t4_1 check(id>=200); --分区字段传入常量, 执行时扫描的是父表和约束对应的子表 :postgres=# explain select * from t where id=10; QUERY PLAN ----------------------------------------------------------------------------- Append (cost=0.00..8.17 rows=2 width=4) -> Seq Scan on t (cost=0.00..0.00 rows=1 width=4) Filter: (id = 10) -> Index Only Scan using t2_pkey on t2 (cost=0.15..8.17 rows=1 width=4) Index Cond: (id = 10)(5 rows)--分区字段传入常量, 执行时扫描的是父表和约束对应的子表;postgres=# prepare p_test as select * from t where id=$1;PREPAREpostgres=# explain execute p_test(1); QUERY PLAN ----------------------------------------------------------------------------- Append (cost=0.00..8.17 rows=2 width=4) -> Seq Scan on t (cost=0.00..0.00 rows=1 width=4) Filter: (id = 1) -> Index Only Scan using t2_pkey on t2 (cost=0.15..8.17 rows=1 width=4) Index Cond: (id = 1)(5 rows)--子句查询, 执行时扫描的是父表和所有子表, 注意这里使用的子查询是子表的查询, 理论上应该是扫描父表和该子表postgres=# explain select * from t where id=(select id from t1 limit 1); QUERY PLAN ----------------------------------------------------------------------------- Append (cost=0.01..32.70 rows=5 width=4) InitPlan 1 (returns $0) -> Limit (cost=0.00..0.01 rows=1 width=4) -> Seq Scan on t1 t1_1 (cost=0.00..34.00 rows=2400 width=4) -> Seq Scan on t (cost=0.00..0.00 rows=1 width=4) Filter: (id = $0) -> Index Only Scan using t1_pkey on t1 (cost=0.15..8.17 rows=1 width=4) Index Cond: (id = $0) -> Index Only Scan using t2_pkey on t2 (cost=0.15..8.17 rows=1 width=4) Index Cond: (id = $0) -> Index Only Scan using t3_pkey on t3 (cost=0.15..8.17 rows=1 width=4) Index Cond: (id = $0) -> Index Only Scan using t4_pkey on t4 (cost=0.15..8.17 rows=1 width=4) Index Cond: (id = $0)(14 rows)--综上可知在对分区表进行查询时最好使用字面常量,而不要使用子查询之类复杂的sql--如果子表上约束删除,则pg不得不把删除约束的子表也加入到查询中(即使子表可以忽略)alter table t4 drop constraint ck_t4_1;postgres=# explain select * from t where id=10; QUERY PLAN ----------------------------------------------------------------------------- Append (cost=0.00..16.34 rows=3 width=4) -> Seq Scan on t (cost=0.00..0.00 rows=1 width=4) Filter: (id = 10) -> Index Only Scan using t2_pkey on t2 (cost=0.15..8.17 rows=1 width=4) Index Cond: (id = 10) -> Index Only Scan using t4_pkey on t4 (cost=0.15..8.17 rows=1 width=4) Index Cond: (id = 10)(7 rows)--如果constraint_exclusion设置为off,pg不得不进行全表扫描postgres=# set constraint_exclusion=off;SETpostgres=# explain select * from t where id=10; QUERY PLAN ----------------------------------------------------------------------------- Append (cost=0.00..32.69 rows=5 width=4) -> Seq Scan on t (cost=0.00..0.00 rows=1 width=4) Filter: (id = 10) -> Index Only Scan using t1_pkey on t1 (cost=0.15..8.17 rows=1 width=4) Index Cond: (id = 10) -> Index Only Scan using t2_pkey on t2 (cost=0.15..8.17 rows=1 width=4) Index Cond: (id = 10) -> Index Only Scan using t3_pkey on t3 (cost=0.15..8.17 rows=1 width=4) Index Cond: (id = 10) -> Index Only Scan using t4_pkey on t4 (cost=0.15..8.17 rows=1 width=4) Index Cond: (id = 10)(11 rows)--分区表上一般针对分区建立相对应的分区索引--建在父表的索引为全局索引,但如果你表没有数据要查询子表时,则分区表要进行全表扫描--父表建立的全局索引postgres=# d+ p Table "public.p" Column | Type | Modifiers | Storage | Stats target | Description -----------+--------------------------------+-----------+---------+--------------+------------- city_id | integer | not null | plain | | logtime | timestamp(0) without time zone | not null | plain | | peaktemp | integer | | plain | | unitsales | integer | | plain | | Indexes: "idx_city_id" btree (city_id) "idx_p_logtime" btree (logtime)Triggers: delete_p_trigger BEFORE DELETE ON p FOR EACH ROW EXECUTE PROCEDURE p_delete_trigger() insert_p_trigger BEFORE INSERT ON p FOR EACH ROW EXECUTE PROCEDURE p_insert_trigger()Child tables: p_201201, p_201202, p_201203, p_201204, p_201205, p_201206, p_201207, p_201208, p_201209, p_201210, p_201211, p_201212, p_defaultHas OIDs: no--分区没有索引,不能使用父表索引postgres=# explain select * from p_201202 where city_id=2 and logtime=timestamp ‘2012-02-02 12:59:59‘; QUERY PLAN ---------------------------------------------------------------------------------------------- Seq Scan on p_201202 (cost=0.00..214.01 rows=2 width=20) Filter: ((city_id = 2) AND (logtime = ‘2012-02-02 12:59:59‘::timestamp without time zone))(2 rows)--建立分区索引,可以使用分区索引postgres=# CREATE INDEX idx_p_201202_city_id ON p_201202 (city_id);CREATE INDEXpostgres=# explain select * from p_201202 where city_id=2 and logtime=timestamp ‘2012-02-02 12:59:59‘; QUERY PLAN -------------------------------------------------------------------------------------- Index Scan using idx_p_201202_city_id on p_201202 (cost=0.29..8.33 rows=2 width=20) Index Cond: (city_id = 2) Filter: (logtime = ‘2012-02-02 12:59:59‘::timestamp without time zone)--也可以指定只查询父表的数据postgres=# select * from only p; city_id | logtime | peaktemp | unitsales ---------+---------+----------+-----------(0 rows)--如果一个分区表,父子表之间不再有继承关系,则查询父表时不再过滤到子表postgres=# alter table t3 no inherit t; ALTER TABLEpostgres=# explain select count(*) from t; QUERY PLAN ------------------------------------------------------------------ Aggregate (cost=73.50..73.51 rows=1 width=0) -> Append (cost=0.00..62.80 rows=4281 width=0) -> Seq Scan on t (cost=0.00..0.00 rows=1 width=0) -> Seq Scan on t1 (cost=0.00..31.40 rows=2140 width=0) -> Seq Scan on t2 (cost=0.00..31.40 rows=2140 width=0)(5 rows)--再次添加继承,查询父表可以过滤到子表postgres=# alter table t3 inherit t; ALTER TABLEpostgres=# explain select count(*) from t; QUERY PLAN ------------------------------------------------------------------ Aggregate (cost=110.25..110.26 rows=1 width=0) -> Append (cost=0.00..94.20 rows=6421 width=0) -> Seq Scan on t (cost=0.00..0.00 rows=1 width=0) -> Seq Scan on t1 (cost=0.00..31.40 rows=2140 width=0) -> Seq Scan on t2 (cost=0.00..31.40 rows=2140 width=0) -> Seq Scan on t3 (cost=0.00..31.40 rows=2140 width=0)(6 rows)--以下为p表测试数据代码CREATE TABLE p ( city_id int not null, logtime timestamp(0) not null, peaktemp int, unitsales int);CREATE INDEX idx_p_logtime ON p (logtime);CREATE TABLE p_201201 (LIKE p INCLUDING all) INHERITS (p);CREATE TABLE p_201202 (LIKE p INCLUDING all) INHERITS (p);CREATE TABLE p_201203 (LIKE p INCLUDING all) INHERITS (p);CREATE TABLE p_201204 (LIKE p INCLUDING all) INHERITS (p);CREATE TABLE p_201205 (LIKE p INCLUDING all) INHERITS (p);CREATE TABLE p_201206 (LIKE p INCLUDING all) INHERITS (p);CREATE TABLE p_201207 (LIKE p INCLUDING all) INHERITS (p);CREATE TABLE p_201208 (LIKE p INCLUDING all) INHERITS (p);CREATE TABLE p_201209 (LIKE p INCLUDING all) INHERITS (p);CREATE TABLE p_201210 (LIKE p INCLUDING all) INHERITS (p);CREATE TABLE p_201211 (LIKE p INCLUDING all) INHERITS (p);CREATE TABLE p_201212 (LIKE p INCLUDING all) INHERITS (p);CREATE TABLE p_default (LIKE p INCLUDING all) INHERITS (p);CREATE OR REPLACE FUNCTION p_insert_trigger()RETURNS TRIGGER AS $$BEGIN IF ( NEW.logtime >= DATE ‘2012-01-01‘ AND NEW.logtime < DATE ‘2012-02-01‘ ) THEN INSERT INTO p_201201 VALUES (NEW.*); ELSIF ( NEW.logtime >= DATE ‘2012-02-01‘ AND NEW.logtime < DATE ‘2012-03-01‘ ) THEN INSERT INTO p_201202 VALUES (NEW.*); ELSIF ( NEW.logtime >= DATE ‘2012-03-01‘ AND NEW.logtime < DATE ‘2012-04-01‘ ) THEN INSERT INTO p_201203 VALUES (NEW.*); ELSIF ( NEW.logtime >= DATE ‘2012-04-01‘ AND NEW.logtime < DATE ‘2012-05-01‘ ) THEN INSERT INTO p_201204 VALUES (NEW.*); ELSIF ( NEW.logtime >= DATE ‘2012-05-01‘ AND NEW.logtime < DATE ‘2012-06-01‘ ) THEN INSERT INTO p_201205 VALUES (NEW.*); ELSIF ( NEW.logtime >= DATE ‘2012-06-01‘ AND NEW.logtime < DATE ‘2012-07-01‘ ) THEN INSERT INTO p_201206 VALUES (NEW.*); ELSIF ( NEW.logtime >= DATE ‘2012-07-01‘ AND NEW.logtime < DATE ‘2012-08-01‘ ) THEN INSERT INTO p_201207 VALUES (NEW.*); ELSIF ( NEW.logtime >= DATE ‘2012-08-01‘ AND NEW.logtime < DATE ‘2012-09-01‘ ) THEN INSERT INTO p_201208 VALUES (NEW.*); ELSIF ( NEW.logtime >= DATE ‘2012-09-01‘ AND NEW.logtime < DATE ‘2012-10-01‘ ) THEN INSERT INTO p_201209 VALUES (NEW.*); ELSIF ( NEW.logtime >= DATE ‘2012-10-01‘ AND NEW.logtime < DATE ‘2012-11-01‘ ) THEN INSERT INTO p_201210 VALUES (NEW.*); ELSIF ( NEW.logtime >= DATE ‘2012-11-01‘ AND NEW.logtime < DATE ‘2012-12-01‘ ) THEN INSERT INTO p_201211 VALUES (NEW.*); ELSIF ( NEW.logtime >= DATE ‘2012-12-01‘ AND NEW.logtime < DATE ‘2013-01-01‘ ) THEN INSERT INTO p_201212 VALUES (NEW.*); ELSIF ( NEW.logtime >= DATE ‘2013-01-01‘ OR NEW.logtime < DATE ‘2012-01-01‘ ) THEN INSERT INTO p_default VALUES (NEW.*); ELSE RAISE EXCEPTION ‘Date out of range. Fix the p_insert_trigger() function!‘; END IF; RETURN NULL;END;$$ LANGUAGE plpgsql;CREATE OR REPLACE FUNCTION p_delete_trigger()RETURNS TRIGGER AS $$BEGIN IF ( OLD.logtime >= DATE ‘2012-01-01‘ AND OLD.logtime < DATE ‘2012-02-01‘ ) THEN DELETE FROM p_201201 WHERE logtime=OLD.logtime; ELSIF ( OLD.logtime >= DATE ‘2012-02-01‘ AND OLD.logtime < DATE ‘2012-03-01‘ ) THEN DELETE FROM p_201202 WHERE logtime=OLD.logtime; ELSIF ( OLD.logtime >= DATE ‘2012-03-01‘ AND OLD.logtime < DATE ‘2012-04-01‘ ) THEN DELETE FROM p_201203 WHERE logtime=OLD.logtime; ELSIF ( OLD.logtime >= DATE ‘2012-04-01‘ AND OLD.logtime < DATE ‘2012-05-01‘ ) THEN DELETE FROM p_201204 WHERE logtime=OLD.logtime; ELSIF ( OLD.logtime >= DATE ‘2012-05-01‘ AND OLD.logtime < DATE ‘2012-06-01‘ ) THEN DELETE FROM p_201205 WHERE logtime=OLD.logtime; ELSIF ( OLD.logtime >= DATE ‘2012-06-01‘ AND OLD.logtime < DATE ‘2012-07-01‘ ) THEN DELETE FROM p_201206 WHERE logtime=OLD.logtime; ELSIF ( OLD.logtime >= DATE ‘2012-07-01‘ AND OLD.logtime < DATE ‘2012-08-01‘ ) THEN DELETE FROM p_201207 WHERE logtime=OLD.logtime; ELSIF ( OLD.logtime >= DATE ‘2012-08-01‘ AND OLD.logtime < DATE ‘2012-09-01‘ ) THEN DELETE FROM p_201208 WHERE logtime=OLD.logtime; ELSIF ( OLD.logtime >= DATE ‘2012-09-01‘ AND OLD.logtime < DATE ‘2012-10-01‘ ) THEN DELETE FROM p_201209 WHERE logtime=OLD.logtime; ELSIF ( OLD.logtime >= DATE ‘2012-10-01‘ AND OLD.logtime < DATE ‘2012-11-01‘ ) THEN DELETE FROM p_201210 WHERE logtime=OLD.logtime; ELSIF ( OLD.logtime >= DATE ‘2012-11-01‘ AND OLD.logtime < DATE ‘2012-12-01‘ ) THEN DELETE FROM p_201211 WHERE logtime=OLD.logtime; ELSIF ( OLD.logtime >= DATE ‘2012-12-01‘ AND OLD.logtime < DATE ‘2013-01-01‘ ) THEN DELETE FROM p_201212 WHERE logtime=OLD.logtime; ELSIF ( OLD.logtime >= DATE ‘2013-01-01‘ OR OLD.logtime < DATE ‘2012-01-01‘ ) THEN DELETE FROM p_default WHERE logtime=OLD.logtime; ELSE RAISE EXCEPTION ‘Date out of range. Fix the p_insert_trigger() function!‘; END IF; RETURN NULL;END;$$ LANGUAGE plpgsql;CREATE TRIGGER insert_p_trigger BEFORE INSERT ON p FOR EACH ROW EXECUTE PROCEDURE p_insert_trigger();CREATE TRIGGER delete_p_trigger BEFORE DELETE ON p FOR EACH ROW EXECUTE PROCEDURE p_delete_trigger();INSERT INTO p (city_id, logtime, peaktemp, unitsales) VALUES (1, timestamp ‘2012-01-02 12:59:59‘, 20, 10);INSERT INTO p (city_id, logtime, peaktemp, unitsales) VALUES (2, timestamp ‘2012-02-02 12:59:59‘, 20, 10);INSERT INTO p (city_id, logtime, peaktemp, unitsales) VALUES (3, timestamp ‘2012-03-02 12:59:59‘, 20, 10);INSERT INTO p (city_id, logtime, peaktemp, unitsales) VALUES (4, timestamp ‘2012-04-02 12:59:59‘, 20, 10);INSERT INTO p (city_id, logtime, peaktemp, unitsales) VALUES (5, timestamp ‘2012-05-02 12:59:59‘, 20, 10);INSERT INTO p (city_id, logtime, peaktemp, unitsales) VALUES (6, timestamp ‘2012-06-02 12:59:59‘, 20, 10);INSERT INTO p (city_id, logtime, peaktemp, unitsales) VALUES (7, timestamp ‘2012-07-02 12:59:59‘, 20, 10);INSERT INTO p (city_id, logtime, peaktemp, unitsales) VALUES (8, timestamp ‘2012-08-02 12:59:59‘, 20, 10);INSERT INTO p (city_id, logtime, peaktemp, unitsales) VALUES (9, timestamp ‘2012-09-02 12:59:59‘, 20, 10);INSERT INTO p (city_id, logtime, peaktemp, unitsales) VALUES (10, timestamp ‘2012-10-02 12:59:59‘, 20, 10);INSERT INTO p (city_id, logtime, peaktemp, unitsales) VALUES (11, timestamp ‘2012-11-02 12:59:59‘, 20, 10);INSERT INTO p (city_id, logtime, peaktemp, unitsales) VALUES (12, timestamp ‘2012-12-02 12:59:59‘, 20, 10);INSERT INTO p (city_id, logtime, peaktemp, unitsales) VALUES (13, timestamp ‘2013-01-02 12:59:59‘, 20, 10);INSERT INTO p (city_id, logtime, peaktemp, unitsales) VALUES (14, timestamp ‘2011-12-02 12:59:59‘, 20, 10);INSERT INTO p (city_id, logtime, peaktemp, unitsales) select m, timestamp ‘2012-02-02 12:59:59‘, 20, 10 from generate_series(1,10000) m;explain select * from p_201202 where city_id=2 and logtime=timestamp ‘2012-02-02 12:59:59‘;
转载:https://yq.aliyun.com/articles/2637?spm=5176.100240.searchblog.12.59Jibq#
转载:postgresql分区与优化
标签:
小编还为您整理了以下内容,可能对您也有帮助:
示例讲解PostgreSQL表分区的三种方式
表分区是解决一些因单表过大引用的性能问题的方式,比如某张表过大就会造成查询变慢,可能分区是一种解决方案。一般建议当单表大小超过内存就可以考虑表分区了。PostgreSQL的表分区有三种方式:
本文通过示例讲解如何进行这三种方式的分区。
为方便,我们通过Docker的方式启动一个PostgreSQL,可参考:《Docker启动PostgreSQL并推荐几款连接工具》。我们要选择较高的版本,否则不支持Hash分区,命令如下:
先创建一张表带有年龄,然后我们根据年龄分段来进行分区,创建表语句如下:
这个语句已经指定了按age字段来分区了,接着创建分区表:
这里创建了四张分区表,分别对应年龄是0到10岁、11到20岁、21到30岁、30岁以上。
接着我们插入一些数据:
可以看到这里的表名还是 pkslow_person_r ,而不是具体的分区表,说明对于客户端是无感知的。
我们查询也一样的:
但实际上是有分区表存在的:
而且分区表与主表的字段是一致的。
查询分区表,就只能查到那个特定分区的数据了:
类似的,列表分区是按特定的值来分区,比较某个城市的数据放在一个分区里。这里不再给出每一步的讲解,代码如下:
当我们查询第一个分区的时候,只有广州的数据:
哈希分区是指按字段取哈希值后再分区。具体的语句如下:
可以看到创建分区表的时候,我们用了取模的方式,所以如果要创建N个分区表,就要取N取模。
随便查询一张分区表如下:
可以看到同是SZ的哈希值是一样的,肯定会分在同一个分区,而BJ的哈希值取模后也属于同一个分区。
本文讲解了PostgreSQL分区的三种方式。
代码请查看:https://github.com/LarryDpk/pkslow-samples
Postgresql 建索引性能优化
客户新上线了一套监控系统,可以监控到所有的执行慢的SQL,监控到有个批量任务导入大量数据后,进行索引创建。耗时需要几分钟,虽然不影响业务,但是需要整改。
这种问题的处理思路,都是大拆小,搞并发。
在测试环境,创建一个大表进行测试,创建大量的假数据。
创建表
随机字符串生成函数
生成大量的数据
经测试发现这种方法创建数据太慢了,改成使用COPY的方式创建数据。
排查发现random_string效率太低,生成一条数据接近1ms
重新创建表
写程序创建8600万条数据放在test.csv中
导入大量数据
测试基准数据
\timing 开启计时
耗时532824.434 ms
耗时385838.893 ms,提升 38%的性能,非常不错,但是远远不够。
仍然会出发告警。
重新创建表
创建分区
并发创建INDEX,并记录每个分区索引创建的开始时间和结束时间;
耗时 = 最大结束时间 - 最小开始时间 = 137 s,速度提升接近4倍。
顺序创建INDEX,并记录每个分区索引创建的开始时间和结束时间;
耗时 = 每个索引的耗时相加 = 457358.14 ms,速度提升 16.5%
顺序创建INDEX,优化并发
耗时 = 每个索引的耗时相加 = 292027.642 ms, 速度提升接近两倍。
在开启了并发参数的情况下,如果再叠加并发分区INDEX创建,会不会有惊喜呢?
并发创建INDEX,并记录每个分区索引创建的开始时间和结束时间;
耗时 = 最大结束时间 - 最小开始时间 = 141 s,速度还不如默认并发参数下的表现。应该是资源发生争抢导致的,通过系统监控发现CPU已经打满了。
分区并发是目前能想到的最优化手段了。
还需要结合查询的情况进行分析,分区会带来一点点的性能下降是否影响也需要考虑一下。
分区时目前能避开监控报警的唯一手段了,另外还钻了监控报警的空子。
客户的监控是基于单条语句的,单个分区的最大创建时间为47s,控制在分钟以内了。
postgresql 怎么自动创建分区
在数据库日渐庞大的今天,为了方便对数据库数据的管理,比如按时间,按地区去统计一些数据时,基数过于庞大,多有不便。很多商业数据库都提供分区的概念,按不同的维度去存放数据,便于后期的管理,PostgreSQL也不例外。
PostgresSQL分区的意思是把逻辑上的一个大表分割成物理上的几块儿。分区不仅能带来访问速度的提升,关键的是,它能带来管理和维护上的方便。
分区的具体好处是:
某些类型的查询性能可以得到极大提升。
更新的性能也可以得到提升,因为表的每块的索引要比在整个数据集上的索引要小。如果索引不能全部放在内存里,那么在索引上的读和写都会产生更多的磁盘访问。
批量删除可以用简单的删除某个分区来实现。
可以将很少用的数据移动到便宜的、转速慢的存储介质上。
在PG里表分区是通过表继承来实现的,一般都是建立一个主表,里面是空,然后每个分区都去继承它。无论何时,都应保证主表里面是空的。
小表分区不实际,表在多大情况下才考虑分区呢?PostgresSQL官方给出的建议是:当表本身大小超过了机器物理内存的实际大小时(the size of the table should exceed the physical memory of the database server),可以考虑分区。
PG目前(9.2.2)仅支持范围分区和列表分区,尚未支持散列分区。
二、环境
系统环境:CentOS release 6.3 (Final)
PostgreSQL版本:PostgreSQL 9.2.2 on x86_64-unknown-linux-gnu, compiled by gcc (GCC) 4.4.6 20120305 (Red Hat 4.4.6-4), 64-bit
三、实现分区
3.1 创建主表
david=# create table tbl_partition (
david(# id integer,
david(# name varchar(20),
david(# gender boolean,
david(# join_date date,
david(# dept char(4));
CREATE TABLE
david=#
3.2 创建分区表
david=# create table tbl_partition_201211 (
check ( join_date >= DATE '2012-11-01' AND join_date < DATE '2012-12-01' )
) INHERITS (tbl_partition);
CREATE TABLE
david=# create table tbl_partition_201212 (
check ( join_date >= DATE '2012-12-01' AND join_date < DATE '2013-01-01' )
) INHERITS (tbl_partition);
CREATE TABLE
david=# create table tbl_partition_201301 (
check ( join_date >= DATE '2013-01-01' AND join_date < DATE '2013-02-01' )
) INHERITS (tbl_partition);
CREATE TABLE
david=# create table tbl_partition_201302 (
check ( join_date >= DATE '2013-02-01' AND join_date < DATE '2013-03-01' )
) INHERITS (tbl_partition);
CREATE TABLE
david=# create table tbl_partition_201303 (
check ( join_date >= DATE '2013-03-01' AND join_date < DATE '2013-04-01' )
) INHERITS (tbl_partition);
CREATE TABLE
david=# create table tbl_partition_201304 (
check ( join_date >= DATE '2013-04-01' AND join_date < DATE '2013-05-01' )
) INHERITS (tbl_partition);
CREATE TABLE
david=# create table tbl_partition_201305 (
check ( join_date >= DATE '2013-05-01' AND join_date < DATE '2013-06-01' )
) INHERITS (tbl_partition);
CREATE TABLE
david=#
3.3 分区键上建索引
david=# create index tbl_partition_201211_joindate on tbl_partition_201211 (join_date);
CREATE INDEX
david=# create index tbl_partition_201212_joindate on tbl_partition_201212 (join_date);
CREATE INDEX
david=# create index tbl_partition_201301_joindate on tbl_partition_201301 (join_date);
CREATE INDEX
david=# create index tbl_partition_201302_joindate on tbl_partition_201302 (join_date);
CREATE INDEX
david=# create index tbl_partition_201303_joindate on tbl_partition_201303 (join_date);
CREATE INDEX
david=# create index tbl_partition_201304_joindate on tbl_partition_201304 (join_date);
CREATE INDEX
david=# create index tbl_partition_201305_joindate on tbl_partition_201305 (join_date);
CREATE INDEX
david=#
对于开发人员来说,希望数据库是透明的,只管 insert into tbl_partition。对于数据插向哪个分区,则希望由DB决定。这点,ORACLE实现了,但是PG不行,需要前期人工处理下。
3.4 创建触发器函数
david=# CREATE OR REPLACE FUNCTION tbl_partition_insert_trigger()
RETURNS TRIGGER AS $$
BEGIN
IF ( NEW.join_date >= DATE '2012-11-01' AND
NEW.join_date < DATE '2012-12-01' ) THEN
INSERT INTO tbl_partition_201211 VALUES (NEW.*);
ELSIF ( NEW.join_date >= DATE '2012-12-01' AND
NEW.join_date < DATE '2013-01-01' ) THEN
INSERT INTO tbl_partition_201212 VALUES (NEW.*);
ELSIF ( NEW.join_date >= DATE '2013-01-01' AND
NEW.join_date < DATE '2013-02-01' ) THEN
INSERT INTO tbl_partition_201301 VALUES (NEW.*);
ELSIF ( NEW.join_date >= DATE '2013-02-01' AND
NEW.join_date < DATE '2013-03-01' ) THEN
INSERT INTO tbl_partition_201302 VALUES (NEW.*);
ELSIF ( NEW.join_date >= DATE '2013-03-01' AND
NEW.join_date < DATE '2013-04-01' ) THEN
INSERT INTO tbl_partition_201303 VALUES (NEW.*);
ELSIF ( NEW.join_date >= DATE '2013-04-01' AND
NEW.join_date < DATE '2013-05-01' ) THEN
INSERT INTO tbl_partition_201304 VALUES (NEW.*);
ELSIF ( NEW.join_date >= DATE '2013-05-01' AND
NEW.join_date < DATE '2013-06-01' ) THEN
INSERT INTO tbl_partition_201305 VALUES (NEW.*);
ELSE
RAISE EXCEPTION 'Date out of range. Fix the tbl_partition_insert_trigger() function!';
END IF;
RETURN NULL;
END;
$$
LANGUAGE plpgsql;
CREATE FUNCTION
david=#
说明:如果不想丢失数据,上面的ELSE 条件可以改成 INSERT INTO tbl_partition_error_join_date VALUES (NEW.*); 同时需要创建一张结构和tbl_partition 一样的表tbl_partition_error_join_date,这样,错误的join_date 数据就可以插入到这张表中而不是报错了。
附上出处链接:http://www.cnblogs.com/mchina/archive/2013/04/09/2973427.html
postgresql 怎么自动创建分区
在数据库日渐庞大的今天,为了方便对数据库数据的管理,比如按时间,按地区去统计一些数据时,基数过于庞大,多有不便。很多商业数据库都提供分区的概念,按不同的维度去存放数据,便于后期的管理,PostgreSQL也不例外。
PostgresSQL分区的意思是把逻辑上的一个大表分割成物理上的几块儿。分区不仅能带来访问速度的提升,关键的是,它能带来管理和维护上的方便。
分区的具体好处是:
某些类型的查询性能可以得到极大提升。
更新的性能也可以得到提升,因为表的每块的索引要比在整个数据集上的索引要小。如果索引不能全部放在内存里,那么在索引上的读和写都会产生更多的磁盘访问。
批量删除可以用简单的删除某个分区来实现。
可以将很少用的数据移动到便宜的、转速慢的存储介质上。
在PG里表分区是通过表继承来实现的,一般都是建立一个主表,里面是空,然后每个分区都去继承它。无论何时,都应保证主表里面是空的。
小表分区不实际,表在多大情况下才考虑分区呢?PostgresSQL官方给出的建议是:当表本身大小超过了机器物理内存的实际大小时(the size of the table should exceed the physical memory of the database server),可以考虑分区。
PG目前(9.2.2)仅支持范围分区和列表分区,尚未支持散列分区。
二、环境
系统环境:CentOS release 6.3 (Final)
PostgreSQL版本:PostgreSQL 9.2.2 on x86_64-unknown-linux-gnu, compiled by gcc (GCC) 4.4.6 20120305 (Red Hat 4.4.6-4), 64-bit
三、实现分区
3.1 创建主表
david=# create table tbl_partition (
david(# id integer,
david(# name varchar(20),
david(# gender boolean,
david(# join_date date,
david(# dept char(4));
CREATE TABLE
david=#
3.2 创建分区表
david=# create table tbl_partition_201211 (
check ( join_date >= DATE '2012-11-01' AND join_date < DATE '2012-12-01' )
) INHERITS (tbl_partition);
CREATE TABLE
david=# create table tbl_partition_201212 (
check ( join_date >= DATE '2012-12-01' AND join_date < DATE '2013-01-01' )
) INHERITS (tbl_partition);
CREATE TABLE
david=# create table tbl_partition_201301 (
check ( join_date >= DATE '2013-01-01' AND join_date < DATE '2013-02-01' )
) INHERITS (tbl_partition);
CREATE TABLE
david=# create table tbl_partition_201302 (
check ( join_date >= DATE '2013-02-01' AND join_date < DATE '2013-03-01' )
) INHERITS (tbl_partition);
CREATE TABLE
david=# create table tbl_partition_201303 (
check ( join_date >= DATE '2013-03-01' AND join_date < DATE '2013-04-01' )
) INHERITS (tbl_partition);
CREATE TABLE
david=# create table tbl_partition_201304 (
check ( join_date >= DATE '2013-04-01' AND join_date < DATE '2013-05-01' )
) INHERITS (tbl_partition);
CREATE TABLE
david=# create table tbl_partition_201305 (
check ( join_date >= DATE '2013-05-01' AND join_date < DATE '2013-06-01' )
) INHERITS (tbl_partition);
CREATE TABLE
david=#
3.3 分区键上建索引
david=# create index tbl_partition_201211_joindate on tbl_partition_201211 (join_date);
CREATE INDEX
david=# create index tbl_partition_201212_joindate on tbl_partition_201212 (join_date);
CREATE INDEX
david=# create index tbl_partition_201301_joindate on tbl_partition_201301 (join_date);
CREATE INDEX
david=# create index tbl_partition_201302_joindate on tbl_partition_201302 (join_date);
CREATE INDEX
david=# create index tbl_partition_201303_joindate on tbl_partition_201303 (join_date);
CREATE INDEX
david=# create index tbl_partition_201304_joindate on tbl_partition_201304 (join_date);
CREATE INDEX
david=# create index tbl_partition_201305_joindate on tbl_partition_201305 (join_date);
CREATE INDEX
david=#
对于开发人员来说,希望数据库是透明的,只管 insert into tbl_partition。对于数据插向哪个分区,则希望由DB决定。这点,ORACLE实现了,但是PG不行,需要前期人工处理下。
3.4 创建触发器函数
david=# CREATE OR REPLACE FUNCTION tbl_partition_insert_trigger()
RETURNS TRIGGER AS $$
BEGIN
IF ( NEW.join_date >= DATE '2012-11-01' AND
NEW.join_date < DATE '2012-12-01' ) THEN
INSERT INTO tbl_partition_201211 VALUES (NEW.*);
ELSIF ( NEW.join_date >= DATE '2012-12-01' AND
NEW.join_date < DATE '2013-01-01' ) THEN
INSERT INTO tbl_partition_201212 VALUES (NEW.*);
ELSIF ( NEW.join_date >= DATE '2013-01-01' AND
NEW.join_date < DATE '2013-02-01' ) THEN
INSERT INTO tbl_partition_201301 VALUES (NEW.*);
ELSIF ( NEW.join_date >= DATE '2013-02-01' AND
NEW.join_date < DATE '2013-03-01' ) THEN
INSERT INTO tbl_partition_201302 VALUES (NEW.*);
ELSIF ( NEW.join_date >= DATE '2013-03-01' AND
NEW.join_date < DATE '2013-04-01' ) THEN
INSERT INTO tbl_partition_201303 VALUES (NEW.*);
ELSIF ( NEW.join_date >= DATE '2013-04-01' AND
NEW.join_date < DATE '2013-05-01' ) THEN
INSERT INTO tbl_partition_201304 VALUES (NEW.*);
ELSIF ( NEW.join_date >= DATE '2013-05-01' AND
NEW.join_date < DATE '2013-06-01' ) THEN
INSERT INTO tbl_partition_201305 VALUES (NEW.*);
ELSE
RAISE EXCEPTION 'Date out of range. Fix the tbl_partition_insert_trigger() function!';
END IF;
RETURN NULL;
END;
$$
LANGUAGE plpgsql;
CREATE FUNCTION
david=#
说明:如果不想丢失数据,上面的ELSE 条件可以改成 INSERT INTO tbl_partition_error_join_date VALUES (NEW.*); 同时需要创建一张结构和tbl_partition 一样的表tbl_partition_error_join_date,这样,错误的join_date 数据就可以插入到这张表中而不是报错了。
附上出处链接:http://www.cnblogs.com/mchina/archive/2013/04/09/2973427.html
技术文档 - PostgreSQL 性能优化之 fsync 参数
目 录
总 结
PostgreSQL 通过调用系统 fsync() 或者其他使得事务内容写入到物理磁盘,这样可以保证操作系统或者数据库出现宕机后,仍然可以恢复到某一个一致性的状态。理论上讲 PostgreSQL 的 fsync 功能关闭,可以实现性能的提升,但是带来的影响就是需要承担数据的丢失,因为出现系统宕机或者数据库崩溃的时候有一些数据是没有落盘的。
本文将验证 fsync 参数的性能影响,以及参数关闭时数据库宕机后的影响。
数据量:1000W
fsync 参数:on
初始化表:user_info
pgbench 压测
pgbench 结果
pgbench 压测
pgbench 结果
数据量:1000W
fsync 参数:off
初始化表:user_info
pgbench 压测
pgbench 结果
pgbench 压测
pgbench 结果
通过对比发现,将 fsync 改为 off,对于读 TPS,参数 fsync 的影响不大,对于写 TPS,性能有一定提升。
现在验证参数关闭时数据库宕机后的影响
首先,使用将数据库性能跑起来
然后,模拟服务器断电
之后,启动数据库
提示信息:比致命错误还过分的错误。
结果:数据库无法启动,原因就是因为无法找到一个有效的 checkpoint 记录,这就是因为 fsync 设置为 off,由于数据库异常宕机导致。可以通过使用 pg_resetxlog 恢复数据库,但是会造成部分数据无法找回,数据丢失;也可以通过备份恢复,同样也会丢失部分数据。
fsync 参数对于读 TPS 的性能影响不大,对于写 TPS 的性能有一些影响,设置为 off,写 TPS 性能有一定提升,但是存在数据库宕机后无法正常启动,即使恢复后启动数据库,也会有数据丢失的很大风险。因此生产环境非必要时,不要将此参数设置为 off,还是使用默认的 on 比较稳妥。
技术文档 - PostgreSQL 性能优化之 fsync 参数
目 录
总 结
PostgreSQL 通过调用系统 fsync() 或者其他使得事务内容写入到物理磁盘,这样可以保证操作系统或者数据库出现宕机后,仍然可以恢复到某一个一致性的状态。理论上讲 PostgreSQL 的 fsync 功能关闭,可以实现性能的提升,但是带来的影响就是需要承担数据的丢失,因为出现系统宕机或者数据库崩溃的时候有一些数据是没有落盘的。
本文将验证 fsync 参数的性能影响,以及参数关闭时数据库宕机后的影响。
数据量:1000W
fsync 参数:on
初始化表:user_info
pgbench 压测
pgbench 结果
pgbench 压测
pgbench 结果
数据量:1000W
fsync 参数:off
初始化表:user_info
pgbench 压测
pgbench 结果
pgbench 压测
pgbench 结果
通过对比发现,将 fsync 改为 off,对于读 TPS,参数 fsync 的影响不大,对于写 TPS,性能有一定提升。
现在验证参数关闭时数据库宕机后的影响
首先,使用将数据库性能跑起来
然后,模拟服务器断电
之后,启动数据库
提示信息:比致命错误还过分的错误。
结果:数据库无法启动,原因就是因为无法找到一个有效的 checkpoint 记录,这就是因为 fsync 设置为 off,由于数据库异常宕机导致。可以通过使用 pg_resetxlog 恢复数据库,但是会造成部分数据无法找回,数据丢失;也可以通过备份恢复,同样也会丢失部分数据。
fsync 参数对于读 TPS 的性能影响不大,对于写 TPS 的性能有一些影响,设置为 off,写 TPS 性能有一定提升,但是存在数据库宕机后无法正常启动,即使恢复后启动数据库,也会有数据丢失的很大风险。因此生产环境非必要时,不要将此参数设置为 off,还是使用默认的 on 比较稳妥。
postgresql如何对上千万条数据的查询进行优化?需要关联两张表,已经对查询时间列做了索引!
可以在创建分区表,通过时间段分区
PostgreSQL Partition
create_range_partitions
select create_range_partitions('trip_agg_data'::regclass,'time_key',0::bigint,1::bigint,0,false) ;
trip_agg_data --主表OID
time_key --分区列名,必须有not null约束
0::bigint: --起始值 bigint取决于time_key 类型
1::bigint --间隔值 必须大于0
0 --分区个数
false --不立即迁移数据
add_range_partition
select add_range_partition('trip_agg_data'::REGCLASS,0,2019010100,'trip_agg_data_lt_20190101');
2019010100 不能超过time_key类型的最大值
partition_table_concurrently
select partition_table_concurrently ('trip_agg_data_tim'::regclass,10000, 1.0);
迁移数据到子表
10000 --一个事务批量迁移多少记录
1.0 --获得行锁失败时,休眠多久再次获取,重试60次退出任务
pathman_concurrent_part_tasks
select * from pathman_concurrent_part_tasks;
查看数据迁移进度
disable_pathman_for
select disable_pathman_for('trip_agg_data_tim'::regclass);
禁用分区
drop_range_partition
SELECT drop_range_partition(partition, false) /* move data to parent */
FROM pathman_partition_list WHERE parent = 'trip_agg_data'::regclass
and partition = 'trip_agg_data_greater_20200101'::regclass
删除某个分区表
pathman_partition_list
select * from pathman_partition_list;
查看所有分区表信息
set_enable_parent
select set_enable_parent('trip_agg_data'::regclass,false);
禁用主表
split_range_partition
范围分区
merge_range_partitions
合并范围分区
append_range_partition
向后添加范围分区
prepend_range_partition
向前添加范围分区
drop_range_partition
drop_range_partition(partition TEXT, -- 分区名称
delete_data BOOLEAN DEFAULT TRUE) -- 是否删除分区数据,如果false,表示分区数据迁移到主表
drop_partitions
drop_partitions(parent REGCLASS,
delete_data BOOLEAN DEFAULT FALSE)
attach_range_partition
attach_range_partition(relation REGCLASS, -- 主表OID
partition REGCLASS, -- 分区表OID
start_value ANYELEMENT, -- 起始值
end_value ANYELEMENT) -- 结束值
detach_range_partition
detach_range_partition(partition REGCLASS) -- 指定分区名,转换为普通表
-- 1, create table trip_agg_data_2019
create table trip_agg_data_2019 as select * from trip_agg_data where 1=0;
alter table trip_agg_data_2019 alter column time_key set not null;
-- 2, create_range_partition
select create_range_partitions('trip_agg_data_2019'::regclass,'time_key',0::bigint,1::bigint,0,false) ;
-- 3, detach_range_partition for trip_agg_data'
select detach_range_partition('trip_agg_data_20190101');
select detach_range_partition('trip_agg_data_20190201');
select detach_range_partition('trip_agg_data_20190301');
select detach_range_partition('trip_agg_data_20190401');
select detach_range_partition('trip_agg_data_20190501');
select detach_range_partition('trip_agg_data_20190601');
select detach_range_partition('trip_agg_data_20190701');
select detach_range_partition('trip_agg_data_20190801');
select detach_range_partition('trip_agg_data_20190901');
select detach_range_partition('trip_agg_data_20191001');
select detach_range_partition('trip_agg_data_20191101');
select detach_range_partition('trip_agg_data_20191201');
-- 4, add_range_partition for trip_agg_data'
select add_range_partition('trip_agg_data'::REGCLASS,2019010100,2020010100,'trip_agg_data_20190101_20200101');
-- 5, attach_range_partition for trip_agg_data_2019
select attach_range_partition('trip_agg_data_2019'::REGCLASS,'trip_agg_data_20190101',2019010100,2019020100);
select attach_range_partition('trip_agg_data_2019'::REGCLASS,'trip_agg_data_20190201',2019020100,2019030100);
select attach_range_partition('trip_agg_data_2019'::REGCLASS,'trip_agg_data_20190301',2019030100,2019040100);
select attach_range_partition('trip_agg_data_2019'::REGCLASS,'trip_agg_data_20190401',2019040100,2019050100);
select attach_range_partition('trip_agg_data_2019'::REGCLASS,'trip_agg_data_20190501',2019050100,2019060100);
select attach_range_partition('trip_agg_data_2019'::REGCLASS,'trip_agg_data_20190601',2019060100,2019070100);
select attach_range_partition('trip_agg_data_2019'::REGCLASS,'trip_agg_data_20190701',2019070100,2019080100);
select attach_range_partition('trip_agg_data_2019'::REGCLASS,'trip_agg_data_20190801',2019080100,2019090100);
select attach_range_partition('trip_agg_data_2019'::REGCLASS,'trip_agg_data_20190901',2019090100,2019100100);
select attach_range_partition('trip_agg_data_2019'::REGCLASS,'trip_agg_data_20191001',2019100100,2019110100);
select attach_range_partition('trip_agg_data_2019'::REGCLASS,'trip_agg_data_20191101',2019110100,2019120100);
select attach_range_partition('trip_agg_data_2019'::REGCLASS,'trip_agg_data_20191201',2019120100,2020010100);
-- 6, disable parent for trip_agg_data_2019
select set_enable_parent('trip_agg_data_2019'::regclass,false);
alter table trip_agg_data alter column time_key set not null;
select create_range_partitions('trip_agg_data'::regclass,'time_key',0::bigint,1::bigint,0,false) ;
select add_range_partition('trip_agg_data'::REGCLASS,0,2019010100,'trip_agg_data_lt_20190101');
select add_range_partition('trip_agg_data'::REGCLASS,2019010100,2019020100,'trip_agg_data_20190101');
select add_range_partition('trip_agg_data'::REGCLASS,2019020100,2019030100,'trip_agg_data_20190201');
select add_range_partition('trip_agg_data'::REGCLASS,2019030100,2019040100,'trip_agg_data_20190301');
select add_range_partition('trip_agg_data'::REGCLASS,2019040100,2019050100,'trip_agg_data_20190401');
select add_range_partition('trip_agg_data'::REGCLASS,2019050100,2019060100,'trip_agg_data_20190501');
select add_range_partition('trip_agg_data'::REGCLASS,2019060100,2019070100,'trip_agg_data_20190601');
select add_range_partition('trip_agg_data'::REGCLASS,2019070100,2019080100,'trip_agg_data_20190701');
select add_range_partition('trip_agg_data'::REGCLASS,2019080100,2019090100,'trip_agg_data_20190801');
select add_range_partition('trip_agg_data'::REGCLASS,2019090100,2019100100,'trip_agg_data_20190901');
select add_range_partition('trip_agg_data'::REGCLASS,2019100100,2019110100,'trip_agg_data_20191001');
select add_range_partition('trip_agg_data'::REGCLASS,2019110100,2019120100,'trip_agg_data_20191101');
select add_range_partition('trip_agg_data'::REGCLASS,2019120100,2020010100,'trip_agg_data_20191201');
select add_range_partition('trip_agg_data'::REGCLASS,2020010100,2020020100,'trip_agg_data_20200101');
select add_range_partition('trip_agg_data'::REGCLASS,2020020100,2020030100,'trip_agg_data_20200201');
select add_range_partition('trip_agg_data'::REGCLASS,2020030100,2020040100,'trip_agg_data_20200301');
select add_range_partition('trip_agg_data'::REGCLASS,2020040100,2020050100,'trip_agg_data_20200401');
select add_range_partition('trip_agg_data'::REGCLASS,2020050100,2020060100,'trip_agg_data_20200501');
select add_range_partition('trip_agg_data'::REGCLASS,2020060100,2020070100,'trip_agg_data_20200601');
select add_range_partition('trip_agg_data'::REGCLASS,2020070100,2020080100,'trip_agg_data_20200701');
select add_range_partition('trip_agg_data'::REGCLASS,2020080100,2020090100,'trip_agg_data_20200801');
select add_range_partition('trip_agg_data'::REGCLASS,2020090100,2020100100,'trip_agg_data_20200901');
select add_range_partition('trip_agg_data'::REGCLASS,2020100100,2020110100,'trip_agg_data_20201001');
select add_range_partition('trip_agg_data'::REGCLASS,2020110100,2020120100,'trip_agg_data_20201101');
select add_range_partition('trip_agg_data'::REGCLASS,2020120100,2021010100,'trip_agg_data_20201201');
select add_range_partition('trip_agg_data'::REGCLASS,2021010100,2021020100,'trip_agg_data_20210101');
select add_range_partition('trip_agg_data'::REGCLASS,2021020100,2021030100,'trip_agg_data_20210201');
select add_range_partition('trip_agg_data'::REGCLASS,2021030100,2021040100,'trip_agg_data_20210301');
select add_range_partition('trip_agg_data'::REGCLASS,2021040100,2021050100,'trip_agg_data_20210401');
select add_range_partition('trip_agg_data'::REGCLASS,2021050100,2021060100,'trip_agg_data_20210501');
select add_range_partition('trip_agg_data'::REGCLASS,2021060100,2021070100,'trip_agg_data_20210601');
select add_range_partition('trip_agg_data'::REGCLASS,2021070100,2021080100,'trip_agg_data_20210701');
select add_range_partition('trip_agg_data'::REGCLASS,2021080100,2021090100,'trip_agg_data_20210801');
select add_range_partition('trip_agg_data'::REGCLASS,2021090100,2021100100,'trip_agg_data_20210901');
select add_range_partition('trip_agg_data'::REGCLASS,2021100100,2021110100,'trip_agg_data_20211001');
select add_range_partition('trip_agg_data'::REGCLASS,2021110100,2021120100,'trip_agg_data_20211101');
select add_range_partition('trip_agg_data'::REGCLASS,2021120100,2022010100,'trip_agg_data_20211201');
select add_range_partition('trip_agg_data'::REGCLASS,2022010100,2022020100,'trip_agg_data_20220101');
select add_range_partition('trip_agg_data'::REGCLASS,2022020100,2022030100,'trip_agg_data_20220201');
select add_range_partition('trip_agg_data'::REGCLASS,2022030100,2022040100,'trip_agg_data_20220301');
select add_range_partition('trip_agg_data'::REGCLASS,2022040100,2022050100,'trip_agg_data_20220401');
select add_range_partition('trip_agg_data'::REGCLASS,2022050100,2022060100,'trip_agg_data_20220501');
select add_range_partition('trip_agg_data'::REGCLASS,2022060100,2022070100,'trip_agg_data_20220601');
select add_range_partition('trip_agg_data'::REGCLASS,2022070100,2022080100,'trip_agg_data_20220701');
select add_range_partition('trip_agg_data'::REGCLASS,2022080100,2022090100,'trip_agg_data_20220801');
select add_range_partition('trip_agg_data'::REGCLASS,2022090100,2022100100,'trip_agg_data_20220901');
select add_range_partition('trip_agg_data'::REGCLASS,2022100100,2022110100,'trip_agg_data_20221001');
select add_range_partition('trip_agg_data'::REGCLASS,2022110100,2022120100,'trip_agg_data_20221101');
select add_range_partition('trip_agg_data'::REGCLASS,2022120100,2023010100,'trip_agg_data_20221201');
select add_range_partition('trip_agg_data'::REGCLASS,2023010100,2023020100,'trip_agg_data_20230101');
select add_range_partition('trip_agg_data'::REGCLASS,2023020100,2023030100,'trip_agg_data_20230201');
select add_range_partition('trip_agg_data'::REGCLASS,2023030100,2023040100,'trip_agg_data_20230301');
select add_range_partition('trip_agg_data'::REGCLASS,2023040100,2023050100,'trip_agg_data_20230401');
select add_range_partition('trip_agg_data'::REGCLASS,2023050100,2023060100,'trip_agg_data_20230501');
select add_range_partition('trip_agg_data'::REGCLASS,2023060100,2023070100,'trip_agg_data_20230601');
select add_range_partition('trip_agg_data'::REGCLASS,2023070100,2023080100,'trip_agg_data_20230701');
select add_range_partition('trip_agg_data'::REGCLASS,2023080100,2023090100,'trip_agg_data_20230801');
select add_range_partition('trip_agg_data'::REGCLASS,2023090100,2023100100,'trip_agg_data_20230901');
select add_range_partition('trip_agg_data'::REGCLASS,2023100100,2023110100,'trip_agg_data_20231001');
select add_range_partition('trip_agg_data'::REGCLASS,2023110100,2023120100,'trip_agg_data_20231101');
select add_range_partition('trip_agg_data'::REGCLASS,2023120100,2024010100,'trip_agg_data_20231201');
select add_range_partition('trip_agg_data'::REGCLASS,2024010100,2024020100,'trip_agg_data_20240101');
select add_range_partition('trip_agg_data'::REGCLASS,2024020100,2024030100,'trip_agg_data_20240201');
select add_range_partition('trip_agg_data'::REGCLASS,2024030100,2024040100,'trip_agg_data_20240301');
select add_range_partition('trip_agg_data'::REGCLASS,2024040100,2024050100,'trip_agg_data_20240401');
select add_range_partition('trip_agg_data'::REGCLASS,2024050100,2024060100,'trip_agg_data_20240501');
select add_range_partition('trip_agg_data'::REGCLASS,2024060100,2024070100,'trip_agg_data_20240601');
select add_range_partition('trip_agg_data'::REGCLASS,2024070100,2024080100,'trip_agg_data_20240701');
select add_range_partition('trip_agg_data'::REGCLASS,2024080100,2024090100,'trip_agg_data_20240801');
select add_range_partition('trip_agg_data'::REGCLASS,2024090100,2024100100,'trip_agg_data_20240901');
select add_range_partition('trip_agg_data'::REGCLASS,2024100100,2024110100,'trip_agg_data_20241001');
select add_range_partition('trip_agg_data'::REGCLASS,2024110100,2024120100,'trip_agg_data_20241101');
select add_range_partition('trip_agg_data'::REGCLASS,2024120100,2025010100,'trip_agg_data_20241201');
select add_range_partition('trip_agg_data'::REGCLASS,2025010100,2025020100,'trip_agg_data_20250101');
select add_range_partition('trip_agg_data'::REGCLASS,2025020100,2025030100,'trip_agg_data_20250201');
select add_range_partition('trip_agg_data'::REGCLASS,2025030100,2025040100,'trip_agg_data_20250301');
select add_range_partition('trip_agg_data'::REGCLASS,2025040100,2025050100,'trip_agg_data_20250401');
select add_range_partition('trip_agg_data'::REGCLASS,2025050100,2025060100,'trip_agg_data_20250501');
select add_range_partition('trip_agg_data'::REGCLASS,2025060100,2025070100,'trip_agg_data_20250601');
select add_range_partition('trip_agg_data'::REGCLASS,2025070100,2025080100,'trip_agg_data_20250701');
select add_range_partition('trip_agg_data'::REGCLASS,2025080100,2025090100,'trip_agg_data_20250801');
select add_range_partition('trip_agg_data'::REGCLASS,2025090100,2025100100,'trip_agg_data_20250901');
select add_range_partition('trip_agg_data'::REGCLASS,2025100100,2025110100,'trip_agg_data_20251001');
select add_range_partition('trip_agg_data'::REGCLASS,2025110100,2025120100,'trip_agg_data_20251101');
select add_range_partition('trip_agg_data'::REGCLASS,2025120100,2026010100,'trip_agg_data_20251201');
select add_range_partition('trip_agg_data'::REGCLASS,2026010100,2026020100,'trip_agg_data_20260101');
select add_range_partition('trip_agg_data'::REGCLASS,2026020100,2026030100,'trip_agg_data_20260201');
select add_range_partition('trip_agg_data'::REGCLASS,2026030100,2026040100,'trip_agg_data_20260301');
select add_range_partition('trip_agg_data'::REGCLASS,2026040100,2026050100,'trip_agg_data_20260401');
select add_range_partition('trip_agg_data'::REGCLASS,2026050100,2026060100,'trip_agg_data_20260501');
select add_range_partition('trip_agg_data'::REGCLASS,2026060100,2026070100,'trip_agg_data_20260601');
select add_range_partition('trip_agg_data'::REGCLASS,2026070100,2026080100,'trip_agg_data_20260701');
select add_range_partition('trip_agg_data'::REGCLASS,2026080100,2026090100,'trip_agg_data_20260801');
select add_range_partition('trip_agg_data'::REGCLASS,2026090100,2026100100,'trip_agg_data_20260901');
select add_range_partition('trip_agg_data'::REGCLASS,2026100100,2026110100,'trip_agg_data_20261001');
select add_range_partition('trip_agg_data'::REGCLASS,2026110100,2026120100,'trip_agg_data_20261101');
select add_range_partition('trip_agg_data'::REGCLASS,2026120100,2027010100,'trip_agg_data_20261201');
select add_range_partition('trip_agg_data'::REGCLASS,2027010100,2027020100,'trip_agg_data_20270101');
select add_range_partition('trip_agg_data'::REGCLASS,2027020100,2027030100,'trip_agg_data_20270201');
select add_range_partition('trip_agg_data'::REGCLASS,2027030100,2027040100,'trip_agg_data_20270301');
select add_range_partition('trip_agg_data'::REGCLASS,2027040100,2027050100,'trip_agg_data_20270401');
select add_range_partition('trip_agg_data'::REGCLASS,2027050100,2027060100,'trip_agg_data_20270501');
select add_range_partition('trip_agg_data'::REGCLASS,2027060100,2027070100,'trip_agg_data_20270601');
select add_range_partition('trip_agg_data'::REGCLASS,2027070100,2027080100,'trip_agg_data_20270701');
select add_range_partition('trip_agg_data'::REGCLASS,2027080100,2027090100,'trip_agg_data_20270801');
select add_range_partition('trip_agg_data'::REGCLASS,2027090100,2027100100,'trip_agg_data_20270901');
select add_range_partition('trip_agg_data'::REGCLASS,2027100100,2027110100,'trip_agg_data_20271001');
select add_range_partition('trip_agg_data'::REGCLASS,2027110100,2027120100,'trip_agg_data_20271101');
select add_range_partition('trip_agg_data'::REGCLASS,2027120100,2028010100,'trip_agg_data_20271201');
select add_range_partition('trip_agg_data'::REGCLASS,2028010100,2028020100,'trip_agg_data_20280101');
select add_range_partition('trip_agg_data'::REGCLASS,2028020100,2028030100,'trip_agg_data_20280201');
select add_range_partition('trip_agg_data'::REGCLASS,2028030100,2028040100,'trip_agg_data_20280301');
select add_range_partition('trip_agg_data'::REGCLASS,2028040100,2028050100,'trip_agg_data_20280401');
select add_range_partition('trip_agg_data'::REGCLASS,2028050100,2028060100,'trip_agg_data_20280501');
select add_range_partition('trip_agg_data'::REGCLASS,2028060100,2028070100,'trip_agg_data_20280601');
select add_range_partition('trip_agg_data'::REGCLASS,2028070100,2028080100,'trip_agg_data_20280701');
select add_range_partition('trip_agg_data'::REGCLASS,2028080100,2028090100,'trip_agg_data_20280801');
select add_range_partition('trip_agg_data'::REGCLASS,2028090100,2028100100,'trip_agg_data_20280901');
select add_range_partition('trip_agg_data'::REGCLASS,2028100100,2028110100,'trip_agg_data_20281001');
select add_range_partition('trip_agg_data'::REGCLASS,2028110100,2028120100,'trip_agg_data_20281101');
select add_range_partition('trip_agg_data'::REGCLASS,2028120100,2029010100,'trip_agg_data_20281201');
select add_range_partition('trip_agg_data'::REGCLASS,2029010100,2029020100,'trip_agg_data_20290101');
select add_range_partition('trip_agg_data'::REGCLASS,2029020100,2029030100,'trip_agg_data_20290201');
select add_range_partition('trip_agg_data'::REGCLASS,2029030100,2029040100,'trip_agg_data_20290301');
select add_range_partition('trip_agg_data'::REGCLASS,2029040100,2029050100,'trip_agg_data_20290401');
select add_range_partition('trip_agg_data'::REGCLASS,2029050100,2029060100,'trip_agg_data_20290501');
select add_range_partition('trip_agg_data'::REGCLASS,2029060100,2029070100,'trip_agg_data_20290601');
select add_range_partition('trip_agg_data'::REGCLASS,2029070100,2029080100,'trip_agg_data_20290701');
select add_range_partition('trip_agg_data'::REGCLASS,2029080100,2029090100,'trip_agg_data_20290801');
select add_range_partition('trip_agg_data'::REGCLASS,2029090100,2029100100,'trip_agg_data_20290901');
select add_range_partition('trip_agg_data'::REGCLASS,2029100100,2029110100,'trip_agg_data_20291001');
select add_range_partition('trip_agg_data'::REGCLASS,2029110100,2029120100,'trip_agg_data_20291101');
select add_range_partition('trip_agg_data'::REGCLASS,2029120100,2030010100,'trip_agg_data_20291201');
select add_range_partition('trip_agg_data'::REGCLASS,2030010100,2147483647,'trip_agg_data_gt_20300101');
select partition_table_concurrently ('trip_agg_data'::regclass,10000, 1.0);
select set_enable_parent('trip_agg_data'::regclass,false);