--由于pg中表和索引的信息收集都是基于时间点的,对于以往的信息无法与现在的信息进行对比,故写下此工具进行统计信息收集 --创建数据信息的schema create schema db_stat; --创建收集信息的基础表 create table db_stat.snapshot_pg_stat_all_indexes (relid int,indexrelid int,schemaname varchar(200),relname varchar(550),indexrelname varchar(550),idx_scan bigint,idx_tup_read bigint,idx_tup_fetch bigint,snapid int,snap_create_time timestamp,host_ip cidr,host_port int,host_type varchar(20),comment varchar(500)); create index idx_stat_indexe_relname_indexrelname_createtime on db_stat.snapshot_pg_stat_all_indexes(relname varchar_pattern_ops,indexrelname varchar_pattern_ops,snap_create_time); create index idx_stat_indexe_snapid on db_stat.snapshot_pg_stat_all_indexes(snapid); create table db_stat.snapshot_pg_stat_all_tables (relid int,seq_scan bigint,seq_tup_read bigint,n_tup_ins bigint,n_tup_upd bigint,n_tup_del bigint,n_tup_hot_upd bigint,n_live_tup bigint,n_dead_tup bigint,last_vacuum timestamp,last_autovacuum timestamp,last_analyze timestamp,last_autoanalyze timestamp,vacuum_count bigint,autovacuum_count bigint,analyze_count bigint,autoanalyze_count bigint,comment varchar(500)); create index idx_stat_table_relname_createtime on db_stat.snapshot_pg_stat_all_tables(relname varchar_pattern_ops,snap_create_time); create index idx_stat_table_snapid on db_stat.snapshot_pg_stat_all_tables(snapid); create table db_stat.snapshot_pg_statio_all_indexes (relid int,idx_blks_read bigint,idx_blks_hit bigint,comment varchar(500)); create index idx_statio_indexe_relname_indexrelname_createtime on db_stat.snapshot_pg_statio_all_indexes(relname varchar_pattern_ops,snap_create_time); create index idx_statio_indexe_snapid on db_stat.snapshot_pg_statio_all_indexes(snapid); create table db_stat.snapshot_pg_statio_all_tables (relid int,heap_blks_read bigint,heap_blks_hit bigint,toast_blks_read bigint,toast_blks_hit bigint,tidx_blks_read bigint,tidx_blks_hit bigint,comment varchar(500)); create index idx_statio_table_relname_createtime on db_stat.snapshot_pg_statio_all_tables(relname varchar_pattern_ops,snap_create_time); create index idx_statio_table_snapid on db_stat.snapshot_pg_statio_all_tables(snapid); --创建快照的序列 create sequence db_stat.seq_snapshot minvalue 1 maxvalue 99999999999999; --每收集完信息之后,对时间,主机列等进行填充 create or replace function db_stat.process_snapshot_table(in i_host_ip cidr,in i_host_port int,in i_host_type varchar,in i_comment varchar default '') returns int as $$ declare v_snapid int; _detail text; _hint text; _message text; begin select nextval('db_stat.seq_snapshot') into v_snapid; update db_stat.snapshot_pg_stat_all_indexes set snapid=v_snapid,snap_create_time=now(),host_ip=i_host_ip,host_port=i_host_port,host_type=i_host_type,comment=i_comment where snapid is null; update db_stat.snapshot_pg_stat_all_tables set snapid=v_snapid,comment=i_comment where snapid is null; update db_stat.snapshot_pg_statio_all_indexes set snapid=v_snapid,comment=i_comment where snapid is null; update db_stat.snapshot_pg_statio_all_tables set snapid=v_snapid,comment=i_comment where snapid is null; -- 返回值 1 代表成功,0 代表失败 return 1; EXCEPTION WHEN others then GET STACKED DIAGNOSTICS _message = message_text,_detail = pg_exception_detail,_hint = pg_exception_hint; raise notice 'message: %,detail: %,hint: %',_message,_detail,_hint; return 0; end; $$ language plpgsql; --收动进行信息采集,测试用 INSERT INTO db_stat.snapshot_pg_stat_all_indexes(relid,indexrelid,schemaname,relname,indexrelname,idx_scan,idx_tup_read,idx_tup_fetch) SELECT relid,idx_tup_fetch FROM pg_stat_all_indexes; INSERT INTO db_stat.snapshot_pg_stat_all_tables(relid,seq_scan,seq_tup_read,idx_tup_fetch,n_tup_ins,n_tup_upd,n_tup_del,n_tup_hot_upd,n_live_tup,n_dead_tup,last_vacuum,last_autovacuum,last_analyze,last_autoanalyze,vacuum_count,autovacuum_count,analyze_count,autoanalyze_count) SELECT relid,autoanalyze_count FROM pg_stat_all_tables; INSERT INTO db_stat.snapshot_pg_statio_all_indexes(relid,idx_blks_read,idx_blks_hit) SELECT relid,idx_blks_hit FROM pg_statio_all_indexes; INSERT INTO db_stat.snapshot_pg_statio_all_tables(relid,heap_blks_read,heap_blks_hit,idx_blks_hit,toast_blks_read,toast_blks_hit,tidx_blks_read,tidx_blks_hit) SELECT relid,tidx_blks_hit FROM pg_statio_all_tables; -- select db_stat.process_snapshot_table('192.168.174.10',5432,'MASTER',''); --创建一个shell脚本,每天通过定时任务进行信息采集 cat snap_stat.sh #!/bin/sh source ~/.bash_profile source /etc/profile Psql="psql" help_msg (){ echo "" echo "Usage:" echo " -f 要输出结果的文件,如果为null,则默认为/tmp/snapshot_pg_stat.log" echo " -u 数据库连接用户名,如果为null,则为postgresql默认" echo " -d 连接的数据库名,则为postgresql默认" echo " -H 数据库的主机ip,则为postgresql默认" echo " -p 数据库的端口,则为postgresql默认" echo " -m 数据库的类型,MASTER为主,SLAVE为从" echo "" exit 0 } # end functions while getopts "f:u:d:H:p:m:" flag do case $flag in f) FILENAME=$OPTARG ;; u) USERNAME=$OPTARG ;; d) DATABASE=$OPTARG ;; H) HOST=$OPTARG ;; p) PORT=$OPTARG ;; m) DATABASE_TYPE=$OPTARG ;; \?|h) help_msg ;; esac done if [ $USERNAME"x" == "x" ] then USERNAME=postgres fi if [ $DATABASE"x" == "x" ] then DATABASE=postgres fi if [ $HOST"x" == "x" ] then help_msg fi if [ $PORT"x" == "x" ] then PORT=5432 fi if [ $DATABASE_TYPE"x" == "x" ] then DATABASE_TYPE=MASTER fi if [ $FILENAME"x" == "x" ] then FILENAME=/tmp/snapshot_pg_stat.log fi OUTPUT_FILENAME=/tmp/snapshot_pg_stat.csv echo "" > $FILENAME if [ ! -f $FILENAME ] then touch $FILENAME else printf "" | tee -a $FILENAME fi echo "脚本于时间 `date "+%Y-%m-%d %H:%M:%S"` 开始执行" >> $FILENAME echo "脚本开始于`date "+%Y-%m-%d %H:%M:%S"` 处理pg_stat_all_indexes表" >> $FILENAME $Psql -p $PORT -U $USERNAME -d $DATABASE -c "copy (select relid,idx_tup_fetch from pg_stat_all_indexes) to '$OUTPUT_FILENAME' with csv" $Psql -p 5432 -U postgres -d postgres -h 192.168.174.11 -c "\copy db_stat.snapshot_pg_stat_all_indexes(relid,idx_tup_fetch) from '$OUTPUT_FILENAME' with csv" echo "脚本开始于`date "+%Y-%m-%d %H:%M:%S"` 处理pg_stat_all_tables表" >> $FILENAME $Psql -p $PORT -U $USERNAME -d $DATABASE -c "copy (select relid,autoanalyze_count from pg_stat_all_tables) to '$OUTPUT_FILENAME' with csv" $Psql -p 5432 -U postgres -d postgres -h 192.168.174.11 -c "\copy db_stat.snapshot_pg_stat_all_tables(relid,autoanalyze_count) from '$OUTPUT_FILENAME' with csv" echo "脚本开始于`date "+%Y-%m-%d %H:%M:%S"` 处理pg_statio_all_indexes表" >> $FILENAME $Psql -p $PORT -U $USERNAME -d $DATABASE -c "copy (select relid,idx_blks_hit from pg_statio_all_indexes) to '$OUTPUT_FILENAME' with csv" $Psql -p 5432 -U postgres -d postgres -h 192.168.174.11 -c "\copy db_stat.snapshot_pg_statio_all_indexes(relid,idx_blks_hit) from '$OUTPUT_FILENAME' with csv" echo "脚本开始于`date "+%Y-%m-%d %H:%M:%S"` 处理pg_statio_all_tables表" >> $FILENAME $Psql -p $PORT -U $USERNAME -d $DATABASE -c "copy (select relid,tidx_blks_hit from pg_statio_all_tables) to '$OUTPUT_FILENAME' with csv" $Psql -p 5432 -U postgres -d postgres -h 192.168.174.11 -c "\copy db_stat.snapshot_pg_statio_all_tables(relid,tidx_blks_hit) from '$OUTPUT_FILENAME' with csv" $Psql -p 5432 -U postgres -d postgres -h 192.168.174.11 -c "select db_stat.process_snapshot_table('$HOST',$PORT,'$DATABASE_TYPE','database stat snapshot');" echo "############################################################################################" >> $FILENAME echo "脚本于时间 `date "+%Y-%m-%d %H:%M:%S"` 结束执行" >> $FILENAME --清空数据表 truncate table db_stat.snapshot_pg_stat_all_indexes ; truncate table db_stat.snapshot_pg_stat_all_tables ; truncate table db_stat.snapshot_pg_statio_all_indexes ; truncate table db_stat.snapshot_pg_statio_all_tables ; --手动执行shell脚本 ./snap_stat.sh -d mydb -p 5432 -m SLAVE -u postgres -H 192.168.174.10 --定时任务,每天8点开始执行 8 8 * * * /db/pgsql/snap_stat.sh -d mydb -p 5435 -m SLAVE -u postgres -H 192.168.174.10 --查看使用比较少的索引 select * from ( SELECT t.relname,t.indexrelname,max(idx_scan)-min(idx_scan) AS diff_idx_scan,max(idx_tup_read)-min(idx_tup_read) AS diff_idx_tup_read FROM db_stat.snapshot_pg_stat_all_indexes t --WHERE snap_create_time BETWEEN '2015-12-11' AND '2016-03-11' GROUP BY t.relname,t.indexrelname) t1 order by diff_idx_scan,indexrelname ; --查看索引使用率趋势图 select relname,snap_day,diff_idx_scan,case when sum(diff_idx_scan) over w1 >0 then diff_idx_scan*100/sum(diff_idx_scan) over w1 else 0 end as diff_idx_scan_percent,diff_idx_tup_read,case when sum(diff_idx_tup_read) over w1 >0 then diff_idx_tup_read*100/sum(diff_idx_tup_read) over w1 else 0 end as diff_idx_tup_read_percent from ( SELECT t.relname,date_trunc('hour',snap_create_time) snap_day,t.idx_scan-lag(t.idx_scan,1) over w AS diff_idx_scan,t.idx_tup_read - lag(t.idx_tup_read,1) over w AS diff_idx_tup_read from db_stat.snapshot_pg_stat_all_indexes t --where indexrelname in ('','') WINDOW w AS (PARTITION BY t.relname,t.indexrelname ORDER BY date_trunc('hour',t.snap_create_time)) ) t1 where diff_idx_scan is not null WINDOW w1 as (PARTITION BY t1.relname,t1.indexrelname) order by relname,snap_day;