threads
listlengths
1
275
[ { "msg_contents": "Group by using CHAR columns takes abnormally big time.\n\nHow to speed it ?\n\nAndrus.\n\n8.1.4, cluster locale is en-us, db encoding is utf-8\n\nset search_path to firma2,public;\nexplain analyze SELECT\n CASE WHEN bilkaib.raha='EEK' THEN 0 ELSE bilkaib.id END,\n bilkaib.DB,\n CASE WHEN dbkonto.objekt1='+' THEN bilkaib.DBOBJEKT ELSE null END:: \nCHAR(10) AS dbobjekt,\n CASE WHEN dbkonto.objekt2='+' THEN bilkaib.DB2OBJEKT ELSE null END:: \nCHAR(10) AS db2objekt,\n CASE WHEN dbkonto.objekt3='+' THEN bilkaib.DB3OBJEKT ELSE null END:: \nCHAR(10) AS db3objekt,\n CASE WHEN dbkonto.objekt4='+' THEN bilkaib.DB4OBJEKT ELSE null END:: \nCHAR(10) AS db4objekt,\n CASE WHEN dbkonto.objekt5='+' THEN bilkaib.DB5OBJEKT ELSE null END:: \nCHAR(10) AS db5objekt,\n CASE WHEN dbkonto.objekt6='+' THEN bilkaib.DB6OBJEKT ELSE null END:: \nCHAR(10) AS db6objekt,\n CASE WHEN dbkonto.objekt7='+' THEN bilkaib.DB7OBJEKT ELSE null END:: \nCHAR(10) AS db7objekt,\n CASE WHEN dbkonto.objekt8='+' THEN bilkaib.DB8OBJEKT ELSE null END:: \nCHAR(10) AS db8objekt,\n CASE WHEN dbkonto.objekt9='+' THEN bilkaib.DB9OBJEKT ELSE null END:: \nCHAR(10) AS db9objekt,\n bilkaib.CR,\n CASE WHEN crkonto.objekt1='+' THEN bilkaib.crOBJEKT ELSE null END:: \nCHAR(10) AS crobjekt,\n CASE WHEN crkonto.objekt2='+' THEN bilkaib.cr2OBJEKT ELSE null END:: \nCHAR(10) AS cr2objekt,\n CASE WHEN crkonto.objekt3='+' THEN bilkaib.cr3OBJEKT ELSE null END:: \nCHAR(10) AS cr3objekt,\n CASE WHEN crkonto.objekt4='+' THEN bilkaib.cr4OBJEKT ELSE null END:: \nCHAR(10) AS cr4objekt,\n CASE WHEN crkonto.objekt5='+' THEN bilkaib.cr5OBJEKT ELSE null END:: \nCHAR(10) AS cr5objekt,\n CASE WHEN crkonto.objekt6='+' THEN bilkaib.cr6OBJEKT ELSE null END:: \nCHAR(10) AS cr6objekt,\n CASE WHEN crkonto.objekt7='+' THEN bilkaib.cr7OBJEKT ELSE null END:: \nCHAR(10) AS cr7objekt,\n CASE WHEN crkonto.objekt8='+' THEN bilkaib.cr8OBJEKT ELSE null END:: \nCHAR(10) AS cr8objekt,\n CASE WHEN crkonto.objekt9='+' THEN bilkaib.cr9OBJEKT ELSE null END:: \nCHAR(10) AS cr9objekt,\n bilkaib.RAHA,\n CASE WHEN crkonto.klienkaupa OR dbkonto.klienkaupa OR crkonto.tyyp IN \n('K','I') OR dbkonto.tyyp IN ('K','I')\n THEN bilkaib.KLIENT ELSE NULL END AS klient,\n\n bilkaib.EXCHRATE,\n\n CASE WHEN crkonto.klienkaupa OR dbkonto.klienkaupa\n OR crkonto.tyyp IN ('K','I') OR dbkonto.tyyp IN ('K','I')\n THEN\n klient.nimi ELSE NULL END AS kliendinim, -- 24.\n\n CAST(CASE WHEN crkonto.arvekaupa OR dbkonto.arvekaupa\n OR (bilkaib.cr<>'00' AND crkonto.tyyp='K')\n OR (bilkaib.db<>'00' AND dbkonto.tyyp='K')\n THEN bilkaib.doknr ELSE NULL END AS CHAR(25)) AS doknr\n\n ,bilkaib.ratediffer\n ,CASE WHEN bilkaib.raha='EEK' THEN DATE'20070101' ELSE bilkaib.kuupaev END \nAS kuupaev\n\n ,SUM(bilkaib.summa)::numeric(14,2) AS summa\n from BILKAIB join KONTO CRKONTO ON bilkaib.cr=crkonto.kontonr AND\n crkonto.iseloom='A'\n join KONTO DBKONTO ON bilkaib.db=dbkonto.kontonr AND\n dbkonto.iseloom='A'\n left join klient on bilkaib.klient=klient.kood\n where ( bilkaib.cr LIKE '112'||'%' OR bilkaib.db LIKE '112'||'%' ) AND \nbilkaib.kuupaev BETWEEN '2007-01-01' AND '2008-11-26'\n GROUP BY \n1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28\n\n\"GroupAggregate (cost=52316.23..61434.48 rows=41923 width=838) (actual \ntime=10771.337..11372.135 rows=577 loops=1)\"\n\" -> Sort (cost=52316.23..52421.03 rows=41923 width=838) (actual \ntime=10770.529..11012.651 rows=52156 loops=1)\"\n\" Sort Key: CASE WHEN (bilkaib.raha = 'EEK'::bpchar) THEN 0 ELSE \nbilkaib.id END, bilkaib.db, (CASE WHEN (dbkonto.objekt1 = '+'::bpchar) THEN \nbilkaib.dbobjekt ELSE NULL::bpchar END)::character(10), (CASE WHEN \n(dbkonto.objekt2 = '+'::bpchar) THEN bilkaib.db2objekt ELSE NULL::bpchar \nEND)::character(10), (CASE WHEN (dbkonto.objekt3 = '+'::bpchar) THEN \nbilkaib.db3objekt ELSE NULL::bpchar END)::character(10), (CASE WHEN \n(dbkonto.objekt4 = '+'::bpchar) THEN bilkaib.db4objekt ELSE NULL::bpchar \nEND)::character(10), (CASE WHEN (dbkonto.objekt5 = '+'::bpchar) THEN \nbilkaib.db5objekt ELSE NULL::bpchar END)::character(10), (CASE WHEN \n(dbkonto.objekt6 = '+'::bpchar) THEN bilkaib.db6objekt ELSE NULL::bpchar \nEND)::character(10), (CASE WHEN (dbkonto.objekt7 = '+'::bpchar) THEN \nbilkaib.db7objekt ELSE NULL::bpchar END)::character(10), (CASE WHEN \n(dbkonto.objekt8 = '+'::bpchar) THEN bilkaib.db8objekt ELSE NULL::bpchar \nEND)::character(10), (CASE WHEN (dbkonto.objekt9 = '+'::bpchar) THEN \nbilkaib.db9objekt ELSE NULL::bpchar END)::character(10), bilkaib.cr, (CASE \nWHEN (crkonto.objekt1 = '+'::bpchar) THEN bilkaib.crobjekt ELSE NULL::bpchar \nEND)::character(10), (CASE WHEN (crkonto.objekt2 = '+'::bpchar) THEN \nbilkaib.cr2objekt ELSE NULL::bpchar END)::character(10), (CASE WHEN \n(crkonto.objekt3 = '+'::bpchar) THEN bilkaib.cr3objekt ELSE NULL::bpchar \nEND)::character(10), (CASE WHEN (crkonto.objekt4 = '+'::bpchar) THEN \nbilkaib.cr4objekt ELSE NULL::bpchar END)::character(10), (CASE WHEN \n(crkonto.objekt5 = '+'::bpchar) THEN bilkaib.cr5objekt ELSE NULL::bpchar \nEND)::character(10), (CASE WHEN (crkonto.objekt6 = '+'::bpchar) THEN \nbilkaib.cr6objekt ELSE NULL::bpchar END)::character(10), (CASE WHEN \n(crkonto.objekt7 = '+'::bpchar) THEN bilkaib.cr7objekt ELSE NULL::bpchar \nEND)::character(10), (CASE WHEN (crkonto.objekt8 = '+'::bpchar) THEN \nbilkaib.cr8objekt ELSE NULL::bpchar END)::character(10), (CASE WHEN \n(crkonto.objekt9 = '+'::bpchar) THEN bilkaib.cr9objekt ELSE NULL::bpchar \nEND)::character(10), bilkaib.raha, CASE WHEN ((crkonto.klienkaupa)::boolean \nOR (dbkonto.klienkaupa)::boolean OR (crkonto.tyyp = 'K'::bpchar) OR \n(crkonto.tyyp = 'I'::bpchar) OR (dbkonto.tyyp = 'K'::bpchar) OR \n(dbkonto.tyyp = 'I'::bpchar)) THEN bilkaib.klient ELSE NULL::bpchar END, \nbilkaib.exchrate, CASE WHEN ((crkonto.klienkaupa)::boolean OR \n(dbkonto.klienkaupa)::boolean OR (crkonto.tyyp = 'K'::bpchar) OR \n(crkonto.tyyp = 'I'::bpchar) OR (dbkonto.tyyp = 'K'::bpchar) OR \n(dbkonto.tyyp = 'I'::bpchar)) THEN klient.nimi ELSE NULL::bpchar END, (CASE \nWHEN ((crkonto.arvekaupa)::boolean OR (dbkonto.arvekaupa)::boolean OR \n((bilkaib.cr <> '00'::bpchar) AND (crkonto.tyyp = 'K'::bpchar)) OR \n((bilkaib.db <> '00'::bpchar) AND (dbkonto.tyyp = 'K'::bpchar))) THEN \nbilkaib.doknr ELSE NULL::bpchar END)::character(25), bilkaib.ratediffer, \nCASE WHEN (bilkaib.raha = 'EEK'::bpchar) THEN '2007-01-01'::date ELSE \nbilkaib.kuupaev END\"\n\" -> Hash Left Join (cost=936.48..40184.64 rows=41923 width=838) \n(actual time=46.000..2820.944 rows=52156 loops=1)\"\n\" Hash Cond: (\"outer\".klient = \"inner\".kood)\"\n\" -> Hash Join (cost=785.35..34086.74 rows=41923 width=764) \n(actual time=34.547..1563.790 rows=52156 loops=1)\"\n\" Hash Cond: (\"outer\".cr = \"inner\".kontonr)\"\n\" -> Hash Join (cost=764.26..33403.76 rows=48533 \nwidth=712) (actual time=32.069..1082.505 rows=52156 loops=1)\"\n\" Hash Cond: (\"outer\".db = \"inner\".kontonr)\"\n\" -> Bitmap Heap Scan on bilkaib \n(cost=743.17..32616.41 rows=56185 width=660) (actual time=29.652..518.289 \nrows=52156 loops=1)\"\n\" Recheck Cond: ((cr ~~ '112%'::text) OR (db \n~~ '112%'::text))\"\n\" Filter: (((cr ~~ '112%'::text) OR (db ~~ \n'112%'::text)) AND (kuupaev >= '2007-01-01'::date) AND (kuupaev <= \n'2008-11-26'::date))\"\n\" -> BitmapOr (cost=743.17..743.17 \nrows=65862 width=0) (actual time=26.539..26.539 rows=0 loops=1)\"\n\" -> Bitmap Index Scan on \nbilkaib_cr_pattern_idx (cost=0.00..236.63 rows=20939 width=0) (actual \ntime=8.510..8.510 rows=21028 loops=1)\"\n\" Index Cond: ((cr ~>=~ \n'112'::bpchar) AND (cr ~<~ '113'::bpchar))\"\n\" -> Bitmap Index Scan on \nbilkaib_db_pattern_idx (cost=0.00..506.54 rows=44923 width=0) (actual \ntime=18.013..18.013 rows=45426 loops=1)\"\n\" Index Cond: ((db ~>=~ \n'112'::bpchar) AND (db ~<~ '113'::bpchar))\"\n\" -> Hash (cost=20.49..20.49 rows=241 width=66) \n(actual time=2.375..2.375 rows=241 loops=1)\"\n\" -> Seq Scan on konto dbkonto \n(cost=0.00..20.49 rows=241 width=66) (actual time=0.011..1.207 rows=241 \nloops=1)\"\n\" Filter: (iseloom = 'A'::bpchar)\"\n\" -> Hash (cost=20.49..20.49 rows=241 width=66) (actual \ntime=2.451..2.451 rows=241 loops=1)\"\n\" -> Seq Scan on konto crkonto (cost=0.00..20.49 \nrows=241 width=66) (actual time=0.022..1.259 rows=241 loops=1)\"\n\" Filter: (iseloom = 'A'::bpchar)\"\n\" -> Hash (cost=147.90..147.90 rows=1290 width=90) (actual \ntime=11.371..11.371 rows=1290 loops=1)\"\n\" -> Seq Scan on klient (cost=0.00..147.90 rows=1290 \nwidth=90) (actual time=0.009..5.587 rows=1290 loops=1)\"\n\"Total runtime: 11380.437 ms\"\n\n\nIf group by is removed same query runs 8 times (!) faster:\n\nset search_path to firma2,public;\nexplain analyze SELECT\n SUM(bilkaib.summa)::numeric(14,2) AS summa\n from BILKAIB join KONTO CRKONTO ON bilkaib.cr=crkonto.kontonr AND\n crkonto.iseloom='A'\n join KONTO DBKONTO ON bilkaib.db=dbkonto.kontonr AND\n dbkonto.iseloom='A'\n left join klient on bilkaib.klient=klient.kood\n where ( bilkaib.cr LIKE '112'||'%' OR bilkaib.db LIKE '112'||'%' ) AND \nbilkaib.kuupaev BETWEEN '2007-01-01' AND '2008-11-26'\n\n\"Aggregate (cost=34944.27..34944.28 rows=1 width=11) (actual \ntime=1781.456..1781.460 rows=1 loops=1)\"\n\" -> Hash Left Join (cost=936.48..34839.46 rows=41923 width=11) (actual \ntime=41.194..1545.105 rows=52156 loops=1)\"\n\" Hash Cond: (\"outer\".klient = \"inner\".kood)\"\n\" -> Hash Join (cost=785.35..34086.74 rows=41923 width=27) (actual \ntime=30.372..1120.431 rows=52156 loops=1)\"\n\" Hash Cond: (\"outer\".cr = \"inner\".kontonr)\"\n\" -> Hash Join (cost=764.26..33403.76 rows=48533 width=41) \n(actual time=28.168..710.336 rows=52156 loops=1)\"\n\" Hash Cond: (\"outer\".db = \"inner\".kontonr)\"\n\" -> Bitmap Heap Scan on bilkaib (cost=743.17..32616.41 \nrows=56185 width=55) (actual time=25.970..294.638 rows=52156 loops=1)\"\n\" Recheck Cond: ((cr ~~ '112%'::text) OR (db ~~ \n'112%'::text))\"\n\" Filter: (((cr ~~ '112%'::text) OR (db ~~ \n'112%'::text)) AND (kuupaev >= '2007-01-01'::date) AND (kuupaev <= \n'2008-11-26'::date))\"\n\" -> BitmapOr (cost=743.17..743.17 rows=65862 \nwidth=0) (actual time=23.056..23.056 rows=0 loops=1)\"\n\" -> Bitmap Index Scan on \nbilkaib_cr_pattern_idx (cost=0.00..236.63 rows=20939 width=0) (actual \ntime=7.414..7.414 rows=21028 loops=1)\"\n\" Index Cond: ((cr ~>=~ '112'::bpchar) \nAND (cr ~<~ '113'::bpchar))\"\n\" -> Bitmap Index Scan on \nbilkaib_db_pattern_idx (cost=0.00..506.54 rows=44923 width=0) (actual \ntime=15.627..15.627 rows=45426 loops=1)\"\n\" Index Cond: ((db ~>=~ '112'::bpchar) \nAND (db ~<~ '113'::bpchar))\"\n\" -> Hash (cost=20.49..20.49 rows=241 width=14) (actual \ntime=2.164..2.164 rows=241 loops=1)\"\n\" -> Seq Scan on konto dbkonto (cost=0.00..20.49 \nrows=241 width=14) (actual time=0.012..1.205 rows=241 loops=1)\"\n\" Filter: (iseloom = 'A'::bpchar)\"\n\" -> Hash (cost=20.49..20.49 rows=241 width=14) (actual \ntime=2.177..2.177 rows=241 loops=1)\"\n\" -> Seq Scan on konto crkonto (cost=0.00..20.49 \nrows=241 width=14) (actual time=0.019..1.203 rows=241 loops=1)\"\n\" Filter: (iseloom = 'A'::bpchar)\"\n\" -> Hash (cost=147.90..147.90 rows=1290 width=16) (actual \ntime=10.782..10.782 rows=1290 loops=1)\"\n\" -> Seq Scan on klient (cost=0.00..147.90 rows=1290 \nwidth=16) (actual time=0.009..5.597 rows=1290 loops=1)\"\n\"Total runtime: 1781.673 ms\"\n\n", "msg_date": "Fri, 28 Nov 2008 17:04:50 +0200", "msg_from": "\"Andrus\" <[email protected]>", "msg_from_op": true, "msg_subject": "Increasing GROUP BY CHAR columns speed" }, { "msg_contents": "The below query is spending most of its time in the sort, or perhaps the complicated check condition before it.\nThe explain has a 8 second gap in time between the 2.8 seconds after the Hash Left Join and before the Sort. I'm guessing its hidden in the sort.\n\nYou can get the planner to switch from a sort to a hash aggregate with a large work_mem. Try calling\n\nSET work_mem = '100MB';\n\nbefore this query first.\n\nIt may not help that much if the check time is as expensive as it looks in the plan below, but its very easy to try.\nIf it does help, you may want to temporarily increase that value only for this query rather than making it a default in the config file.\n________________________________________\nFrom: [email protected] [[email protected]] On Behalf Of Andrus [[email protected]]\nSent: Friday, November 28, 2008 7:04 AM\nTo: [email protected]\nSubject: [PERFORM] Increasing GROUP BY CHAR columns speed\n\nGroup by using CHAR columns takes abnormally big time.\n\nHow to speed it ?\n\nAndrus.\n\n8.1.4, cluster locale is en-us, db encoding is utf-8\n\nset search_path to firma2,public;\nexplain analyze SELECT\n CASE WHEN bilkaib.raha='EEK' THEN 0 ELSE bilkaib.id END,\n bilkaib.DB,\n CASE WHEN dbkonto.objekt1='+' THEN bilkaib.DBOBJEKT ELSE null END::\nCHAR(10) AS dbobjekt,\n CASE WHEN dbkonto.objekt2='+' THEN bilkaib.DB2OBJEKT ELSE null END::\nCHAR(10) AS db2objekt,\n CASE WHEN dbkonto.objekt3='+' THEN bilkaib.DB3OBJEKT ELSE null END::\nCHAR(10) AS db3objekt,\n CASE WHEN dbkonto.objekt4='+' THEN bilkaib.DB4OBJEKT ELSE null END::\nCHAR(10) AS db4objekt,\n CASE WHEN dbkonto.objekt5='+' THEN bilkaib.DB5OBJEKT ELSE null END::\nCHAR(10) AS db5objekt,\n CASE WHEN dbkonto.objekt6='+' THEN bilkaib.DB6OBJEKT ELSE null END::\nCHAR(10) AS db6objekt,\n CASE WHEN dbkonto.objekt7='+' THEN bilkaib.DB7OBJEKT ELSE null END::\nCHAR(10) AS db7objekt,\n CASE WHEN dbkonto.objekt8='+' THEN bilkaib.DB8OBJEKT ELSE null END::\nCHAR(10) AS db8objekt,\n CASE WHEN dbkonto.objekt9='+' THEN bilkaib.DB9OBJEKT ELSE null END::\nCHAR(10) AS db9objekt,\n bilkaib.CR,\n CASE WHEN crkonto.objekt1='+' THEN bilkaib.crOBJEKT ELSE null END::\nCHAR(10) AS crobjekt,\n CASE WHEN crkonto.objekt2='+' THEN bilkaib.cr2OBJEKT ELSE null END::\nCHAR(10) AS cr2objekt,\n CASE WHEN crkonto.objekt3='+' THEN bilkaib.cr3OBJEKT ELSE null END::\nCHAR(10) AS cr3objekt,\n CASE WHEN crkonto.objekt4='+' THEN bilkaib.cr4OBJEKT ELSE null END::\nCHAR(10) AS cr4objekt,\n CASE WHEN crkonto.objekt5='+' THEN bilkaib.cr5OBJEKT ELSE null END::\nCHAR(10) AS cr5objekt,\n CASE WHEN crkonto.objekt6='+' THEN bilkaib.cr6OBJEKT ELSE null END::\nCHAR(10) AS cr6objekt,\n CASE WHEN crkonto.objekt7='+' THEN bilkaib.cr7OBJEKT ELSE null END::\nCHAR(10) AS cr7objekt,\n CASE WHEN crkonto.objekt8='+' THEN bilkaib.cr8OBJEKT ELSE null END::\nCHAR(10) AS cr8objekt,\n CASE WHEN crkonto.objekt9='+' THEN bilkaib.cr9OBJEKT ELSE null END::\nCHAR(10) AS cr9objekt,\n bilkaib.RAHA,\n CASE WHEN crkonto.klienkaupa OR dbkonto.klienkaupa OR crkonto.tyyp IN\n('K','I') OR dbkonto.tyyp IN ('K','I')\n THEN bilkaib.KLIENT ELSE NULL END AS klient,\n\n bilkaib.EXCHRATE,\n\n CASE WHEN crkonto.klienkaupa OR dbkonto.klienkaupa\n OR crkonto.tyyp IN ('K','I') OR dbkonto.tyyp IN ('K','I')\n THEN\n klient.nimi ELSE NULL END AS kliendinim, -- 24.\n\n CAST(CASE WHEN crkonto.arvekaupa OR dbkonto.arvekaupa\n OR (bilkaib.cr<>'00' AND crkonto.tyyp='K')\n OR (bilkaib.db<>'00' AND dbkonto.tyyp='K')\n THEN bilkaib.doknr ELSE NULL END AS CHAR(25)) AS doknr\n\n ,bilkaib.ratediffer\n ,CASE WHEN bilkaib.raha='EEK' THEN DATE'20070101' ELSE bilkaib.kuupaev END\nAS kuupaev\n\n ,SUM(bilkaib.summa)::numeric(14,2) AS summa\n from BILKAIB join KONTO CRKONTO ON bilkaib.cr=crkonto.kontonr AND\n crkonto.iseloom='A'\n join KONTO DBKONTO ON bilkaib.db=dbkonto.kontonr AND\n dbkonto.iseloom='A'\n left join klient on bilkaib.klient=klient.kood\n where ( bilkaib.cr LIKE '112'||'%' OR bilkaib.db LIKE '112'||'%' ) AND\nbilkaib.kuupaev BETWEEN '2007-01-01' AND '2008-11-26'\n GROUP BY\n1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28\n\n\"GroupAggregate (cost=52316.23..61434.48 rows=41923 width=838) (actual\ntime=10771.337..11372.135 rows=577 loops=1)\"\n\" -> Sort (cost=52316.23..52421.03 rows=41923 width=838) (actual\ntime=10770.529..11012.651 rows=52156 loops=1)\"\n\" Sort Key: CASE WHEN (bilkaib.raha = 'EEK'::bpchar) THEN 0 ELSE\nbilkaib.id END, bilkaib.db, (CASE WHEN (dbkonto.objekt1 = '+'::bpchar) THEN\nbilkaib.dbobjekt ELSE NULL::bpchar END)::character(10), (CASE WHEN\n(dbkonto.objekt2 = '+'::bpchar) THEN bilkaib.db2objekt ELSE NULL::bpchar\nEND)::character(10), (CASE WHEN (dbkonto.objekt3 = '+'::bpchar) THEN\nbilkaib.db3objekt ELSE NULL::bpchar END)::character(10), (CASE WHEN\n(dbkonto.objekt4 = '+'::bpchar) THEN bilkaib.db4objekt ELSE NULL::bpchar\nEND)::character(10), (CASE WHEN (dbkonto.objekt5 = '+'::bpchar) THEN\nbilkaib.db5objekt ELSE NULL::bpchar END)::character(10), (CASE WHEN\n(dbkonto.objekt6 = '+'::bpchar) THEN bilkaib.db6objekt ELSE NULL::bpchar\nEND)::character(10), (CASE WHEN (dbkonto.objekt7 = '+'::bpchar) THEN\nbilkaib.db7objekt ELSE NULL::bpchar END)::character(10), (CASE WHEN\n(dbkonto.objekt8 = '+'::bpchar) THEN bilkaib.db8objekt ELSE NULL::bpchar\nEND)::character(10), (CASE WHEN (dbkonto.objekt9 = '+'::bpchar) THEN\nbilkaib.db9objekt ELSE NULL::bpchar END)::character(10), bilkaib.cr, (CASE\nWHEN (crkonto.objekt1 = '+'::bpchar) THEN bilkaib.crobjekt ELSE NULL::bpchar\nEND)::character(10), (CASE WHEN (crkonto.objekt2 = '+'::bpchar) THEN\nbilkaib.cr2objekt ELSE NULL::bpchar END)::character(10), (CASE WHEN\n(crkonto.objekt3 = '+'::bpchar) THEN bilkaib.cr3objekt ELSE NULL::bpchar\nEND)::character(10), (CASE WHEN (crkonto.objekt4 = '+'::bpchar) THEN\nbilkaib.cr4objekt ELSE NULL::bpchar END)::character(10), (CASE WHEN\n(crkonto.objekt5 = '+'::bpchar) THEN bilkaib.cr5objekt ELSE NULL::bpchar\nEND)::character(10), (CASE WHEN (crkonto.objekt6 = '+'::bpchar) THEN\nbilkaib.cr6objekt ELSE NULL::bpchar END)::character(10), (CASE WHEN\n(crkonto.objekt7 = '+'::bpchar) THEN bilkaib.cr7objekt ELSE NULL::bpchar\nEND)::character(10), (CASE WHEN (crkonto.objekt8 = '+'::bpchar) THEN\nbilkaib.cr8objekt ELSE NULL::bpchar END)::character(10), (CASE WHEN\n(crkonto.objekt9 = '+'::bpchar) THEN bilkaib.cr9objekt ELSE NULL::bpchar\nEND)::character(10), bilkaib.raha, CASE WHEN ((crkonto.klienkaupa)::boolean\nOR (dbkonto.klienkaupa)::boolean OR (crkonto.tyyp = 'K'::bpchar) OR\n(crkonto.tyyp = 'I'::bpchar) OR (dbkonto.tyyp = 'K'::bpchar) OR\n(dbkonto.tyyp = 'I'::bpchar)) THEN bilkaib.klient ELSE NULL::bpchar END,\nbilkaib.exchrate, CASE WHEN ((crkonto.klienkaupa)::boolean OR\n(dbkonto.klienkaupa)::boolean OR (crkonto.tyyp = 'K'::bpchar) OR\n(crkonto.tyyp = 'I'::bpchar) OR (dbkonto.tyyp = 'K'::bpchar) OR\n(dbkonto.tyyp = 'I'::bpchar)) THEN klient.nimi ELSE NULL::bpchar END, (CASE\nWHEN ((crkonto.arvekaupa)::boolean OR (dbkonto.arvekaupa)::boolean OR\n((bilkaib.cr <> '00'::bpchar) AND (crkonto.tyyp = 'K'::bpchar)) OR\n((bilkaib.db <> '00'::bpchar) AND (dbkonto.tyyp = 'K'::bpchar))) THEN\nbilkaib.doknr ELSE NULL::bpchar END)::character(25), bilkaib.ratediffer,\nCASE WHEN (bilkaib.raha = 'EEK'::bpchar) THEN '2007-01-01'::date ELSE\nbilkaib.kuupaev END\"\n\" -> Hash Left Join (cost=936.48..40184.64 rows=41923 width=838)\n(actual time=46.000..2820.944 rows=52156 loops=1)\"\n\" Hash Cond: (\"outer\".klient = \"inner\".kood)\"\n\" -> Hash Join (cost=785.35..34086.74 rows=41923 width=764)\n(actual time=34.547..1563.790 rows=52156 loops=1)\"\n\" Hash Cond: (\"outer\".cr = \"inner\".kontonr)\"\n\" -> Hash Join (cost=764.26..33403.76 rows=48533\nwidth=712) (actual time=32.069..1082.505 rows=52156 loops=1)\"\n\" Hash Cond: (\"outer\".db = \"inner\".kontonr)\"\n\" -> Bitmap Heap Scan on bilkaib\n(cost=743.17..32616.41 rows=56185 width=660) (actual time=29.652..518.289\nrows=52156 loops=1)\"\n\" Recheck Cond: ((cr ~~ '112%'::text) OR (db\n~~ '112%'::text))\"\n\" Filter: (((cr ~~ '112%'::text) OR (db ~~\n'112%'::text)) AND (kuupaev >= '2007-01-01'::date) AND (kuupaev <=\n'2008-11-26'::date))\"\n\" -> BitmapOr (cost=743.17..743.17\nrows=65862 width=0) (actual time=26.539..26.539 rows=0 loops=1)\"\n\" -> Bitmap Index Scan on\nbilkaib_cr_pattern_idx (cost=0.00..236.63 rows=20939 width=0) (actual\ntime=8.510..8.510 rows=21028 loops=1)\"\n\" Index Cond: ((cr ~>=~\n'112'::bpchar) AND (cr ~<~ '113'::bpchar))\"\n\" -> Bitmap Index Scan on\nbilkaib_db_pattern_idx (cost=0.00..506.54 rows=44923 width=0) (actual\ntime=18.013..18.013 rows=45426 loops=1)\"\n\" Index Cond: ((db ~>=~\n'112'::bpchar) AND (db ~<~ '113'::bpchar))\"\n\" -> Hash (cost=20.49..20.49 rows=241 width=66)\n(actual time=2.375..2.375 rows=241 loops=1)\"\n\" -> Seq Scan on konto dbkonto\n(cost=0.00..20.49 rows=241 width=66) (actual time=0.011..1.207 rows=241\nloops=1)\"\n\" Filter: (iseloom = 'A'::bpchar)\"\n\" -> Hash (cost=20.49..20.49 rows=241 width=66) (actual\ntime=2.451..2.451 rows=241 loops=1)\"\n\" -> Seq Scan on konto crkonto (cost=0.00..20.49\nrows=241 width=66) (actual time=0.022..1.259 rows=241 loops=1)\"\n\" Filter: (iseloom = 'A'::bpchar)\"\n\" -> Hash (cost=147.90..147.90 rows=1290 width=90) (actual\ntime=11.371..11.371 rows=1290 loops=1)\"\n\" -> Seq Scan on klient (cost=0.00..147.90 rows=1290\nwidth=90) (actual time=0.009..5.587 rows=1290 loops=1)\"\n\"Total runtime: 11380.437 ms\"\n\n\nIf group by is removed same query runs 8 times (!) faster:\n\nset search_path to firma2,public;\nexplain analyze SELECT\n SUM(bilkaib.summa)::numeric(14,2) AS summa\n from BILKAIB join KONTO CRKONTO ON bilkaib.cr=crkonto.kontonr AND\n crkonto.iseloom='A'\n join KONTO DBKONTO ON bilkaib.db=dbkonto.kontonr AND\n dbkonto.iseloom='A'\n left join klient on bilkaib.klient=klient.kood\n where ( bilkaib.cr LIKE '112'||'%' OR bilkaib.db LIKE '112'||'%' ) AND\nbilkaib.kuupaev BETWEEN '2007-01-01' AND '2008-11-26'\n\n\"Aggregate (cost=34944.27..34944.28 rows=1 width=11) (actual\ntime=1781.456..1781.460 rows=1 loops=1)\"\n\" -> Hash Left Join (cost=936.48..34839.46 rows=41923 width=11) (actual\ntime=41.194..1545.105 rows=52156 loops=1)\"\n\" Hash Cond: (\"outer\".klient = \"inner\".kood)\"\n\" -> Hash Join (cost=785.35..34086.74 rows=41923 width=27) (actual\ntime=30.372..1120.431 rows=52156 loops=1)\"\n\" Hash Cond: (\"outer\".cr = \"inner\".kontonr)\"\n\" -> Hash Join (cost=764.26..33403.76 rows=48533 width=41)\n(actual time=28.168..710.336 rows=52156 loops=1)\"\n\" Hash Cond: (\"outer\".db = \"inner\".kontonr)\"\n\" -> Bitmap Heap Scan on bilkaib (cost=743.17..32616.41\nrows=56185 width=55) (actual time=25.970..294.638 rows=52156 loops=1)\"\n\" Recheck Cond: ((cr ~~ '112%'::text) OR (db ~~\n'112%'::text))\"\n\" Filter: (((cr ~~ '112%'::text) OR (db ~~\n'112%'::text)) AND (kuupaev >= '2007-01-01'::date) AND (kuupaev <=\n'2008-11-26'::date))\"\n\" -> BitmapOr (cost=743.17..743.17 rows=65862\nwidth=0) (actual time=23.056..23.056 rows=0 loops=1)\"\n\" -> Bitmap Index Scan on\nbilkaib_cr_pattern_idx (cost=0.00..236.63 rows=20939 width=0) (actual\ntime=7.414..7.414 rows=21028 loops=1)\"\n\" Index Cond: ((cr ~>=~ '112'::bpchar)\nAND (cr ~<~ '113'::bpchar))\"\n\" -> Bitmap Index Scan on\nbilkaib_db_pattern_idx (cost=0.00..506.54 rows=44923 width=0) (actual\ntime=15.627..15.627 rows=45426 loops=1)\"\n\" Index Cond: ((db ~>=~ '112'::bpchar)\nAND (db ~<~ '113'::bpchar))\"\n\" -> Hash (cost=20.49..20.49 rows=241 width=14) (actual\ntime=2.164..2.164 rows=241 loops=1)\"\n\" -> Seq Scan on konto dbkonto (cost=0.00..20.49\nrows=241 width=14) (actual time=0.012..1.205 rows=241 loops=1)\"\n\" Filter: (iseloom = 'A'::bpchar)\"\n\" -> Hash (cost=20.49..20.49 rows=241 width=14) (actual\ntime=2.177..2.177 rows=241 loops=1)\"\n\" -> Seq Scan on konto crkonto (cost=0.00..20.49\nrows=241 width=14) (actual time=0.019..1.203 rows=241 loops=1)\"\n\" Filter: (iseloom = 'A'::bpchar)\"\n\" -> Hash (cost=147.90..147.90 rows=1290 width=16) (actual\ntime=10.782..10.782 rows=1290 loops=1)\"\n\" -> Seq Scan on klient (cost=0.00..147.90 rows=1290\nwidth=16) (actual time=0.009..5.597 rows=1290 loops=1)\"\n\"Total runtime: 1781.673 ms\"\n\n\n--\nSent via pgsql-performance mailing list ([email protected])\nTo make changes to your subscription:\nhttp://www.postgresql.org/mailpref/pgsql-performance\n", "msg_date": "Fri, 28 Nov 2008 09:07:37 -0800", "msg_from": "Scott Carey <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Increasing GROUP BY CHAR columns speed" }, { "msg_contents": "Scott,\n\nThank you.\n\n>The below query is spending most of its time in the sort, or perhaps the\n>complicated check condition before it.\n>The explain has a 8 second gap in time between the 2.8 seconds after the\n>Hash Left Join and before the Sort. I'm guessing its hidden in the sort.\n>You can get the planner to switch from a sort to a hash aggregate with a\n>large work_mem. Try calling\n>SET work_mem = '100MB';\n>before this query first.\n>It may not help that much if the check time is as expensive as it looks in\n>the plan below, but its very easy to try.\n>If it does help, you may want to temporarily increase that value only for\n>this query rather than making it a default in the config file.\n\nSET work_mem = 2097151 (this is max allowed value) or SET work_mem = 97151\ndecreases query time from 12 seconds to 9 seconds.\n\nMy application may ran in servers with 1 GB RAM only. I'm afraid than in\nthose servers 2097151 will cause error and abort query.\n\nIs it reasonable to add\n\nSET work_mem = 97151\n\nbefore this query and\n\nSET work_mem TO DEFAULT\n\nafter this query ?\nOr should I use max value in cases where there are much more data ? This\nquery may return a much more data for longer period and more accounts.\n\nCASE WHEN dbkonto.objekt1='+' THEN bilkaib.DBOBJEKT ELSE null END::\nCHAR(10) AS dbobjekt\n\nis ugly. I tried to rewrite it using NullIfNot(dbkonto.objekt1, '+') AS\ndbobjekt, bot got error\nERROR: function nullifnot(character, \"unknown\") does not exist\n\nHow to re-write this in nicer and faster way ?\n\nFor most of rows checks\n\nWHEN objektn='+'\n\nwill fail: objektn values are usually rarely equal to '+': they are empty\nor null mostly.\n\nMaybe this can be used to optimize the query.\n\nBtw.\nTom Lane's reply from earlier discussion about this query speed (then there\nwere '' instead of NULL in group columns) some years ago:\n\n\"I think the problem is probably that you're sorting two dozen CHAR\ncolumns, and that in many of the rows all these entries are '' forcing\nthe sort code to compare all two dozen columns (not so)? So the sort\nends up doing lots and lots and lots of CHAR comparisons. Which can\nbe slow, especially in non-C locales.\"\n\nlocale specific check is not nessecary for those CHAR(10) columns. How to\nforce PostgreSql to use binary check for grouping ?\nSome dbms allow to mark columns as C locale. I havent found this nor\nchartobin() function in PostgreSql.\nWill creating BinaryNullIfNot(dbkonto.objekt1, '+') function solve this ?\n\nAndrus.\n\nNew testcase:\n\nset search_path to firma2,public;\nSET work_mem = 2097151; -- 9 seconds\n-- SET work_mem = 1097151; -- 9 seconds\n--SET work_mem to default; -- 12 seconds\n\nexplain analyze SELECT\n CASE WHEN bilkaib.raha='EEK' THEN 0 ELSE bilkaib.id END,\n bilkaib.DB,\n CASE WHEN dbkonto.objekt1='+' THEN bilkaib.DBOBJEKT ELSE null END::\nCHAR(10) AS dbobjekt,\n CASE WHEN dbkonto.objekt2='+' THEN bilkaib.DB2OBJEKT ELSE null END::\nCHAR(10) AS db2objekt,\n CASE WHEN dbkonto.objekt3='+' THEN bilkaib.DB3OBJEKT ELSE null END::\nCHAR(10) AS db3objekt,\n CASE WHEN dbkonto.objekt4='+' THEN bilkaib.DB4OBJEKT ELSE null END::\nCHAR(10) AS db4objekt,\n CASE WHEN dbkonto.objekt5='+' THEN bilkaib.DB5OBJEKT ELSE null END::\nCHAR(10) AS db5objekt,\n CASE WHEN dbkonto.objekt6='+' THEN bilkaib.DB6OBJEKT ELSE null END::\nCHAR(10) AS db6objekt,\n CASE WHEN dbkonto.objekt7='+' THEN bilkaib.DB7OBJEKT ELSE null END::\nCHAR(10) AS db7objekt,\n CASE WHEN dbkonto.objekt8='+' THEN bilkaib.DB8OBJEKT ELSE null END::\nCHAR(10) AS db8objekt,\n CASE WHEN dbkonto.objekt9='+' THEN bilkaib.DB9OBJEKT ELSE null END::\nCHAR(10) AS db9objekt,\n bilkaib.CR,\n CASE WHEN crkonto.objekt1='+' THEN bilkaib.crOBJEKT ELSE null END::\nCHAR(10) AS crobjekt,\n CASE WHEN crkonto.objekt2='+' THEN bilkaib.cr2OBJEKT ELSE null END::\nCHAR(10) AS cr2objekt,\n CASE WHEN crkonto.objekt3='+' THEN bilkaib.cr3OBJEKT ELSE null END::\nCHAR(10) AS cr3objekt,\n CASE WHEN crkonto.objekt4='+' THEN bilkaib.cr4OBJEKT ELSE null END::\nCHAR(10) AS cr4objekt,\n CASE WHEN crkonto.objekt5='+' THEN bilkaib.cr5OBJEKT ELSE null END::\nCHAR(10) AS cr5objekt,\n CASE WHEN crkonto.objekt6='+' THEN bilkaib.cr6OBJEKT ELSE null END::\nCHAR(10) AS cr6objekt,\n CASE WHEN crkonto.objekt7='+' THEN bilkaib.cr7OBJEKT ELSE null END::\nCHAR(10) AS cr7objekt,\n CASE WHEN crkonto.objekt8='+' THEN bilkaib.cr8OBJEKT ELSE null END::\nCHAR(10) AS cr8objekt,\n CASE WHEN crkonto.objekt9='+' THEN bilkaib.cr9OBJEKT ELSE null END::\nCHAR(10) AS cr9objekt,\n bilkaib.RAHA,\n CASE WHEN crkonto.klienkaupa OR dbkonto.klienkaupa OR crkonto.tyyp IN\n('K','I') OR dbkonto.tyyp IN ('K','I')\n THEN bilkaib.KLIENT ELSE NULL END AS klient,\n\n bilkaib.EXCHRATE,\n\n CASE WHEN crkonto.klienkaupa OR dbkonto.klienkaupa\n OR crkonto.tyyp IN ('K','I') OR dbkonto.tyyp IN ('K','I')\n THEN\n klient.nimi ELSE NULL END AS kliendinim, -- 24.\n\n CAST(CASE WHEN crkonto.arvekaupa OR dbkonto.arvekaupa\n OR (bilkaib.cr<>'00' AND crkonto.tyyp='K')\n OR (bilkaib.db<>'00' AND dbkonto.tyyp='K')\n THEN bilkaib.doknr ELSE NULL END AS CHAR(25)) AS doknr\n\n ,bilkaib.ratediffer\n ,CASE WHEN bilkaib.raha='EEK' THEN DATE'20070101' ELSE bilkaib.kuupaev END\nAS kuupaev\n\n ,SUM(bilkaib.summa)::numeric(14,2) AS summa\n from BILKAIB join KONTO CRKONTO ON bilkaib.cr=crkonto.kontonr AND\n crkonto.iseloom='A'\n join KONTO DBKONTO ON bilkaib.db=dbkonto.kontonr AND\n dbkonto.iseloom='A'\n left join klient on bilkaib.klient=klient.kood\n where ( bilkaib.cr LIKE '112'||'%' OR bilkaib.db LIKE '112'||'%' ) AND\nbilkaib.kuupaev BETWEEN '2007-01-01' AND '2008-11-26'\n GROUP BY\n1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28\n\n\"GroupAggregate (cost=43403.38..52521.63 rows=41923 width=838) (actual\ntime=8083.171..8620.908 rows=577 loops=1)\"\n\" -> Sort (cost=43403.38..43508.19 rows=41923 width=838) (actual\ntime=8082.456..8273.259 rows=52156 loops=1)\"\n\" Sort Key: CASE WHEN (bilkaib.raha = 'EEK'::bpchar) THEN 0 ELSE\nbilkaib.id END, bilkaib.db, (CASE WHEN (dbkonto.objekt1 = '+'::bpchar) THEN\nbilkaib.dbobjekt ELSE NULL::bpchar END)::character(10), (CASE WHEN\n(dbkonto.objekt2 = '+'::bpchar) THEN bilkaib.db2objekt ELSE NULL::bpchar\nEND)::character(10), (CASE WHEN (dbkonto.objekt3 = '+'::bpchar) THEN\nbilkaib.db3objekt ELSE NULL::bpchar END)::character(10), (CASE WHEN\n(dbkonto.objekt4 = '+'::bpchar) THEN bilkaib.db4objekt ELSE NULL::bpchar\nEND)::character(10), (CASE WHEN (dbkonto.objekt5 = '+'::bpchar) THEN\nbilkaib.db5objekt ELSE NULL::bpchar END)::character(10), (CASE WHEN\n(dbkonto.objekt6 = '+'::bpchar) THEN bilkaib.db6objekt ELSE NULL::bpchar\nEND)::character(10), (CASE WHEN (dbkonto.objekt7 = '+'::bpchar) THEN\nbilkaib.db7objekt ELSE NULL::bpchar END)::character(10), (CASE WHEN\n(dbkonto.objekt8 = '+'::bpchar) THEN bilkaib.db8objekt ELSE NULL::bpchar\nEND)::character(10), (CASE WHEN (dbkonto.objekt9 = '+'::bpchar) THEN\nbilkaib.db9objekt ELSE NULL::bpchar END)::character(10), bilkaib.cr, (CASE\nWHEN (crkonto.objekt1 = '+'::bpchar) THEN bilkaib.crobjekt ELSE NULL::bpchar\nEND)::character(10), (CASE WHEN (crkonto.objekt2 = '+'::bpchar) THEN\nbilkaib.cr2objekt ELSE NULL::bpchar END)::character(10), (CASE WHEN\n(crkonto.objekt3 = '+'::bpchar) THEN bilkaib.cr3objekt ELSE NULL::bpchar\nEND)::character(10), (CASE WHEN (crkonto.objekt4 = '+'::bpchar) THEN\nbilkaib.cr4objekt ELSE NULL::bpchar END)::character(10), (CASE WHEN\n(crkonto.objekt5 = '+'::bpchar) THEN bilkaib.cr5objekt ELSE NULL::bpchar\nEND)::character(10), (CASE WHEN (crkonto.objekt6 = '+'::bpchar) THEN\nbilkaib.cr6objekt ELSE NULL::bpchar END)::character(10), (CASE WHEN\n(crkonto.objekt7 = '+'::bpchar) THEN bilkaib.cr7objekt ELSE NULL::bpchar\nEND)::character(10), (CASE WHEN (crkonto.objekt8 = '+'::bpchar) THEN\nbilkaib.cr8objekt ELSE NULL::bpchar END)::character(10), (CASE WHEN\n(crkonto.objekt9 = '+'::bpchar) THEN bilkaib.cr9objekt ELSE NULL::bpchar\nEND)::character(10), bilkaib.raha, CASE WHEN ((crkonto.klienkaupa)::boolean\nOR (dbkonto.klienkaupa)::boolean OR (crkonto.tyyp = 'K'::bpchar) OR\n(crkonto.tyyp = 'I'::bpchar) OR (dbkonto.tyyp = 'K'::bpchar) OR\n(dbkonto.tyyp = 'I'::bpchar)) THEN bilkaib.klient ELSE NULL::bpchar END,\nbilkaib.exchrate, CASE WHEN ((crkonto.klienkaupa)::boolean OR\n(dbkonto.klienkaupa)::boolean OR (crkonto.tyyp = 'K'::bpchar) OR\n(crkonto.tyyp = 'I'::bpchar) OR (dbkonto.tyyp = 'K'::bpchar) OR\n(dbkonto.tyyp = 'I'::bpchar)) THEN klient.nimi ELSE NULL::bpchar END, (CASE\nWHEN ((crkonto.arvekaupa)::boolean OR (dbkonto.arvekaupa)::boolean OR\n((bilkaib.cr <> '00'::bpchar) AND (crkonto.tyyp = 'K'::bpchar)) OR\n((bilkaib.db <> '00'::bpchar) AND (dbkonto.tyyp = 'K'::bpchar))) THEN\nbilkaib.doknr ELSE NULL::bpchar END)::character(25), bilkaib.ratediffer,\nCASE WHEN (bilkaib.raha = 'EEK'::bpchar) THEN '2007-01-01'::date ELSE\nbilkaib.kuupaev END\"\n\" -> Hash Left Join (cost=936.48..40184.64 rows=41923 width=838)\n(actual time=47.409..2427.059 rows=52156 loops=1)\"\n\" Hash Cond: (\"outer\".klient = \"inner\".kood)\"\n\" -> Hash Join (cost=785.35..34086.74 rows=41923 width=764)\n(actual time=35.669..1414.794 rows=52156 loops=1)\"\n\" Hash Cond: (\"outer\".cr = \"inner\".kontonr)\"\n\" -> Hash Join (cost=764.26..33403.76 rows=48533\nwidth=712) (actual time=32.839..954.784 rows=52156 loops=1)\"\n\" Hash Cond: (\"outer\".db = \"inner\".kontonr)\"\n\" -> Bitmap Heap Scan on bilkaib\n(cost=743.17..32616.41 rows=56185 width=660) (actual time=30.337..448.153\nrows=52156 loops=1)\"\n\" Recheck Cond: ((cr ~~ '112%'::text) OR (db\n~~ '112%'::text))\"\n\" Filter: (((cr ~~ '112%'::text) OR (db ~~\n'112%'::text)) AND (kuupaev >= '2007-01-01'::date) AND (kuupaev <=\n'2008-11-26'::date))\"\n\" -> BitmapOr (cost=743.17..743.17\nrows=65862 width=0) (actual time=27.194..27.194 rows=0 loops=1)\"\n\" -> Bitmap Index Scan on\nbilkaib_cr_pattern_idx (cost=0.00..236.63 rows=20939 width=0) (actual\ntime=8.833..8.833 rows=21028 loops=1)\"\n\" Index Cond: ((cr ~>=~\n'112'::bpchar) AND (cr ~<~ '113'::bpchar))\"\n\" -> Bitmap Index Scan on\nbilkaib_db_pattern_idx (cost=0.00..506.54 rows=44923 width=0) (actual\ntime=18.345..18.345 rows=45426 loops=1)\"\n\" Index Cond: ((db ~>=~\n'112'::bpchar) AND (db ~<~ '113'::bpchar))\"\n\" -> Hash (cost=20.49..20.49 rows=241 width=66)\n(actual time=2.450..2.450 rows=241 loops=1)\"\n\" -> Seq Scan on konto dbkonto\n(cost=0.00..20.49 rows=241 width=66) (actual time=0.014..1.232 rows=241\nloops=1)\"\n\" Filter: (iseloom = 'A'::bpchar)\"\n\" -> Hash (cost=20.49..20.49 rows=241 width=66) (actual\ntime=2.799..2.799 rows=241 loops=1)\"\n\" -> Seq Scan on konto crkonto (cost=0.00..20.49\nrows=241 width=66) (actual time=0.029..1.536 rows=241 loops=1)\"\n\" Filter: (iseloom = 'A'::bpchar)\"\n\" -> Hash (cost=147.90..147.90 rows=1290 width=90) (actual\ntime=11.661..11.661 rows=1290 loops=1)\"\n\" -> Seq Scan on klient (cost=0.00..147.90 rows=1290\nwidth=90) (actual time=0.014..5.808 rows=1290 loops=1)\"\n\"Total runtime: 8634.630 ms\"\n\n", "msg_date": "Fri, 28 Nov 2008 19:58:59 +0200", "msg_from": "\"Andrus\" <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Increasing GROUP BY CHAR columns speed" }, { "msg_contents": "On Fri, Nov 28, 2008 at 10:58 AM, Andrus <[email protected]> wrote:\n>\n> SET work_mem = 2097151 (this is max allowed value) or SET work_mem = 97151\n> decreases query time from 12 seconds to 9 seconds.\n\nSetting work_mem so high that it allocates memory that isn't there\nWILL slow down your queries, because the OS will then wind up swapping\nout one part of the swap to make room for another part. There are\nvalues between 100M and 2G. Run it with increasing work_mem from 100\nto 500 or so Meg and see if that helps. Keep an eye on vmstat 1 or\nsomething to see if your machine starts swapping out while running the\nquery. If it does you've overallocated memory and things will start\nto slow down a LOT.\n", "msg_date": "Fri, 28 Nov 2008 11:24:35 -0700", "msg_from": "\"Scott Marlowe\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Increasing GROUP BY CHAR columns speed" }, { "msg_contents": "I'm not sure that postgres allocates the whole work_mem each time, and in any event, the OS won't try and allocate to RAM until the page is actually used. My experience is that oversized work_mem doesn't hurt until it is actually used. Else, my configuration with 1000 concurrent connections and work_mem = 1GB would have blown up. I don't have that much RAM + SWAP * overcommit. Of the 1000 connections, only 8 run queries that would ever need more than 2 or 3 MB of space to execute. Of course, one has to be certain what the client connections do for it to be very over sized, so I would not recommend the above in general.\n\n----------\nBack to this query:\n\nIn the query case shown, the explain analyze shows:\n\"GroupAggregate (cost=43403.38..52521.63 rows=41923 width=838) (actual\ntime=8083.171..8620.908 rows=577 loops=1)\"\n\nThus, the planner thought that it needed ~40K ish rows for results of ~800 bytes in size, hence an approximation of the required hash space is 80M. However, it returns only 577 rows, so the actual needed hash space is much smaller. This is a common problem I've seen -- the query planner has very conservative estimates for result row counts from any non-trivial filter condition / aggregate which leads to poor query plans.\n\nI'd be suprised if this query used more than 1MB total work_mem in reality for that last step if it used a hash. As it stands, sorting will actually use much more.\n\nI'm still not sure why the planner chose to sort rather than hash with oversized work_mem (is there an implied order in the query results I missed?). My guess is that this query can still get much faster if a hash is possible on the last part. It looks like the gain so far has more to do with sorting purely in memory which reduced the number of compares required. But that is just a guess.\n\n________________________________________\nFrom: Scott Marlowe [[email protected]]\nSent: Friday, November 28, 2008 10:24 AM\nTo: Andrus\nCc: Scott Carey; [email protected]\nSubject: Re: [PERFORM] Increasing GROUP BY CHAR columns speed\n\nOn Fri, Nov 28, 2008 at 10:58 AM, Andrus <[email protected]> wrote:\n>\n> SET work_mem = 2097151 (this is max allowed value) or SET work_mem = 97151\n> decreases query time from 12 seconds to 9 seconds.\n\nSetting work_mem so high that it allocates memory that isn't there\nWILL slow down your queries, because the OS will then wind up swapping\nout one part of the swap to make room for another part. There are\nvalues between 100M and 2G. Run it with increasing work_mem from 100\nto 500 or so Meg and see if that helps. Keep an eye on vmstat 1 or\nsomething to see if your machine starts swapping out while running the\nquery. If it does you've overallocated memory and things will start\nto slow down a LOT.\n", "msg_date": "Fri, 28 Nov 2008 11:51:07 -0800", "msg_from": "Scott Carey <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Increasing GROUP BY CHAR columns speed" }, { "msg_contents": "Application should work with any server starting at 8.1 with any RAM size \n(probably starting at 1 GB).\n\nHow to find safe value which does not cause error in SET work_mem command ?\n\nIf I use 2 GB maybe this can cause excaption when running in server with 1 \nGB RAM where this setting may be not allowed.\n\nAndrus. \n\n", "msg_date": "Fri, 28 Nov 2008 21:57:43 +0200", "msg_from": "\"Andrus\" <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Increasing GROUP BY CHAR columns speed" }, { "msg_contents": "I it seems that slowness is caused by grouping by column\n\nexchrate numeric(13,8)\n\nif this column is excluded, query takes 12 seconds\nif this column in present, query takes 27 (!) seconds.\nHow to fix this ?\n\nAndrus.\n\nset search_path to firma2,public;\nSET work_mem = 2097151 ;\nexplain analyze SELECT\n CASE WHEN bilkaib.raha='EEK' THEN 0 ELSE bilkaib.id END,\n bilkaib.DB,\n bilkaib.CR,\n bilkaib.RAHA, -- 12 sek\n bilkaib.EXCHRATE, -- 27 sec\n SUM(bilkaib.summa)::numeric(14,2) AS summa\n from BILKAIB join KONTO CRKONTO ON bilkaib.cr=crkonto.kontonr AND\n crkonto.iseloom='A'\n join KONTO DBKONTO ON bilkaib.db=dbkonto.kontonr AND\n dbkonto.iseloom='A'\n left join klient on bilkaib.klient=klient.kood\n WHERE --(bilkaib.cr LIKE ''||'%' OR bilkaib.db LIKE ''||'%')\n bilkaib.kuupaev BETWEEN '2008-01-01' AND '2008-12-31'\n GROUP BY 1,2,3,4,5\n\n\"GroupAggregate (cost=71338.72..79761.05 rows=240638 width=58) (actual \ntime=24570.085..27382.022 rows=217 loops=1)\"\n\" -> Sort (cost=71338.72..71940.31 rows=240638 width=58) (actual \ntime=24566.700..25744.006 rows=322202 loops=1)\"\n\" Sort Key: CASE WHEN (bilkaib.raha = 'EEK'::bpchar) THEN 0 ELSE \nbilkaib.id END, bilkaib.db, bilkaib.cr, bilkaib.raha, bilkaib.exchrate\"\n\" -> Hash Left Join (cost=193.31..49829.89 rows=240638 width=58) \n(actual time=17.072..9901.578 rows=322202 loops=1)\"\n\" Hash Cond: (\"outer\".klient = \"inner\".kood)\"\n\" -> Hash Join (cost=42.18..45624.00 rows=240638 width=74) \n(actual time=4.715..7151.111 rows=322202 loops=1)\"\n\" Hash Cond: (\"outer\".cr = \"inner\".kontonr)\"\n\" -> Hash Join (cost=21.09..41803.63 rows=278581 \nwidth=74) (actual time=2.306..4598.703 rows=322202 loops=1)\"\n\" Hash Cond: (\"outer\".db = \"inner\".kontonr)\"\n\" -> Seq Scan on bilkaib (cost=0.00..37384.19 \nrows=322507 width=74) (actual time=0.075..1895.027 rows=322202 loops=1)\"\n\" Filter: ((kuupaev >= '2008-01-01'::date) \nAND (kuupaev <= '2008-12-31'::date))\"\n\" -> Hash (cost=20.49..20.49 rows=241 width=14) \n(actual time=2.193..2.193 rows=241 loops=1)\"\n\" -> Seq Scan on konto dbkonto \n(cost=0.00..20.49 rows=241 width=14) (actual time=0.011..1.189 rows=241 \nloops=1)\"\n\" Filter: (iseloom = 'A'::bpchar)\"\n\" -> Hash (cost=20.49..20.49 rows=241 width=14) (actual \ntime=2.386..2.386 rows=241 loops=1)\"\n\" -> Seq Scan on konto crkonto (cost=0.00..20.49 \nrows=241 width=14) (actual time=0.020..1.394 rows=241 loops=1)\"\n\" Filter: (iseloom = 'A'::bpchar)\"\n\" -> Hash (cost=147.90..147.90 rows=1290 width=16) (actual \ntime=12.319..12.319 rows=1290 loops=1)\"\n\" -> Seq Scan on klient (cost=0.00..147.90 rows=1290 \nwidth=16) (actual time=0.032..6.979 rows=1290 loops=1)\"\n\"Total runtime: 27434.724 ms\"\n\n\nset search_path to firma2,public;\nSET work_mem = 2097151 ;\nexplain analyze SELECT\n CASE WHEN bilkaib.raha='EEK' THEN 0 ELSE bilkaib.id END,\n bilkaib.DB,\n bilkaib.CR,\n bilkaib.RAHA,\n SUM(bilkaib.summa)::numeric(14,2) AS summa\n from BILKAIB join KONTO CRKONTO ON bilkaib.cr=crkonto.kontonr AND\n crkonto.iseloom='A'\n join KONTO DBKONTO ON bilkaib.db=dbkonto.kontonr AND\n dbkonto.iseloom='A'\n left join klient on bilkaib.klient=klient.kood\n WHERE\n bilkaib.kuupaev BETWEEN '2008-01-01' AND '2008-12-31'\n GROUP BY 1,2,3,4\n\n\"HashAggregate (cost=52837.86..57049.03 rows=240638 width=50) (actual \ntime=11744.137..11745.578 rows=215 loops=1)\"\n\" -> Hash Left Join (cost=193.31..49829.89 rows=240638 width=50) (actual \ntime=17.330..9826.549 rows=322202 loops=1)\"\n\" Hash Cond: (\"outer\".klient = \"inner\".kood)\"\n\" -> Hash Join (cost=42.18..45624.00 rows=240638 width=66) (actual \ntime=4.804..7141.983 rows=322202 loops=1)\"\n\" Hash Cond: (\"outer\".cr = \"inner\".kontonr)\"\n\" -> Hash Join (cost=21.09..41803.63 rows=278581 width=66) \n(actual time=2.343..4600.683 rows=322202 loops=1)\"\n\" Hash Cond: (\"outer\".db = \"inner\".kontonr)\"\n\" -> Seq Scan on bilkaib (cost=0.00..37384.19 \nrows=322507 width=66) (actual time=0.081..1939.376 rows=322202 loops=1)\"\n\" Filter: ((kuupaev >= '2008-01-01'::date) AND \n(kuupaev <= '2008-12-31'::date))\"\n\" -> Hash (cost=20.49..20.49 rows=241 width=14) (actual \ntime=2.207..2.207 rows=241 loops=1)\"\n\" -> Seq Scan on konto dbkonto (cost=0.00..20.49 \nrows=241 width=14) (actual time=0.014..1.179 rows=241 loops=1)\"\n\" Filter: (iseloom = 'A'::bpchar)\"\n\" -> Hash (cost=20.49..20.49 rows=241 width=14) (actual \ntime=2.426..2.426 rows=241 loops=1)\"\n\" -> Seq Scan on konto crkonto (cost=0.00..20.49 \nrows=241 width=14) (actual time=0.029..1.444 rows=241 loops=1)\"\n\" Filter: (iseloom = 'A'::bpchar)\"\n\" -> Hash (cost=147.90..147.90 rows=1290 width=16) (actual \ntime=12.477..12.477 rows=1290 loops=1)\"\n\" -> Seq Scan on klient (cost=0.00..147.90 rows=1290 \nwidth=16) (actual time=0.034..7.081 rows=1290 loops=1)\"\n\"Total runtime: 11748.066 ms\"\n\n", "msg_date": "Fri, 28 Nov 2008 22:30:53 +0200", "msg_from": "\"Andrus\" <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Increasing GROUP BY CHAR columns speed" }, { "msg_contents": "On Fri, Nov 28, 2008 at 12:51 PM, Scott Carey <[email protected]> wrote:\n> I'm not sure that postgres allocates the whole work_mem each time, and in any event, the OS won't try and allocate to RAM until the page is actually used. My experience is that oversized work_mem doesn't hurt until it is actually used. Else, my configuration with 1000 concurrent connections and work_mem = 1GB would have blown up. I don't have that much RAM + SWAP * overcommit. Of the 1000 connections, only 8 run queries that would ever need more than 2 or 3 MB of space to execute. Of course, one has to be certain what the client connections do for it to be very over sized, so I would not recommend the above in general.\n\nThat's kinda like saying I have a revolver with 100 chambers and only\none bullet, and it seems perfectly safe to put it to my head and keep\npulling the trigger.\n\nOf course pg doesn't allocate the whole amount every time. It\nallocates what it needs, up to the max you allow. by setting it to 1G\nit's quite possible that eventually enough queries will be running\nthat need a fair bit of work_mem and suddenly bang, your server is in\na swap storm and goes non-responsive.\n\nIt's far better to set it to something reasonable, like 4 or 8 Meg,\nthen for the individual queries that need more set it higher at run\ntime.\n", "msg_date": "Fri, 28 Nov 2008 13:34:29 -0700", "msg_from": "\"Scott Marlowe\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Increasing GROUP BY CHAR columns speed" }, { "msg_contents": ">I'm still not sure why the planner chose to sort rather than hash with \n>oversized work_mem (is there an implied order in the query results I \n>missed?).\n\nGroup by contains decimal column exchrate. Maybe pg is not capable to use \nhash with numeric datatype.\n\n> My guess is that this query can still get much faster if a hash is \n> possible on the last part. It looks like the gain so far has more to do \n> with sorting purely in memory which reduced the number of compares \n> required. But that is just a guess.\n\nI fixed this by adding cast to :::float\n\nbilkaib.exchrate:::float\n\nIn this case query is much faster.\nHopefully this will not affect to result since numeric(13,8) can casted to \nfloat without data loss.\n\nAndrus. \n\n", "msg_date": "Sat, 29 Nov 2008 19:43:50 +0200", "msg_from": "\"Andrus\" <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Increasing GROUP BY CHAR columns speed" }, { "msg_contents": "On Sat, Nov 29, 2008 at 6:43 PM, Andrus <[email protected]> wrote:\n>> I'm still not sure why the planner chose to sort rather than hash with\n>> oversized work_mem (is there an implied order in the query results I\n>> missed?).\n>\n> Group by contains decimal column exchrate. Maybe pg is not capable to use\n> hash with numeric datatype.\n\nIt is in 8.3. I think sorting was improved dramatically since 8.1 as well.\n\n> I fixed this by adding cast to :::float\n>\n> bilkaib.exchrate:::float\n>\n> In this case query is much faster.\n> Hopefully this will not affect to result since numeric(13,8) can casted to\n> float without data loss.\n\nThat's not true. Even pretty simple values like 1.1 cannot be\nrepresented precisely in a float. It would display properly though\nwhich might be all you're concerned with here. I'm not sure whether\nthat's true for all values in numeric(13,8) though\n\nDo you really need to be grouping on so many columns? If they're\nnormally all the same perhaps you can do two queries, one which\nfetches the common values without any group by, just a simple\naggregate, and a second which groups by all these columns but only for\nthe few exceptional records.\n\nYou could avoid the collation support on the char() columns by casting\nthem to bytea first. That might be a bit of a headache though.\n\n-- \ngreg\n", "msg_date": "Sun, 30 Nov 2008 14:02:51 +0100", "msg_from": "\"Greg Stark\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Increasing GROUP BY CHAR columns speed" } ]
[ { "msg_contents": ">I it seems that slowness is caused by grouping by column\n> \n> exchrate numeric(13,8)\n\nexchrate has different values in few rows.\nIt has usually value 0\nIn this sample query it is always 0.\n\nI tried not change exchrate with \n\nnullif( bilkaib.EXCHRATE,0) \n\nbut this does not up speed query, no idea why.\n\nAndrus.\n", "msg_date": "Fri, 28 Nov 2008 22:43:57 +0200", "msg_from": "\"Andrus\" <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Increasing GROUP BY CHAR columns speed" } ]
[ { "msg_contents": "I am struggeling with the following query which fetches a random subset \nof 200 questions that matches certain tags within certain languages. \nHowever, the query takes forever to evaluate, even though I have a \n\"limit 200\" appended. Any ideas on how to optimize it?\n\nQUERY: ================================================\n\nSELECT distinct q.question_id\n FROM question_tags qt, questions q\n WHERE q.question_id = qt.question_id\n AND q.STATUS = 1\n AND not q.author_id = 105\n AND ((qt.language_id = 5 and qt.tag_id in \n(1101,917,734,550,367,183)) or (qt.language_id = 4 and qt.tag_id in \n(856,428)) or (qt.language_id =\n 3 and qt.tag_id in (1156,1028,899,771,642,514,385,257,128)) or \n(qt.language_id = 2 and qt.tag_id in \n(1193,1101,1009,917,826,734,642,550,458,367,275,183,91)))\n and q.question_id not in (413)\n LIMIT 200\n\nEXPLAIN ANALYZE: =========================================\n\n Limit (cost=1.50..1267.27 rows=200 width=4) (actual \ntime=278.169..880.934 rows=200 loops=1)\n -> Unique (cost=1.50..317614.50 rows=50185 width=4) (actual \ntime=278.165..880.843 rows=200 loops=1)\n -> Merge Join (cost=1.50..317489.04 rows=50185 width=4) \n(actual time=278.162..880.579 rows=441 loops=1)\n Merge Cond: (qt.question_id = q.question_id)\n -> Index Scan using question_tags_question_id on \nquestion_tags qt (cost=0.00..301256.96 rows=82051 width=4) (actual \ntime=24.171..146.811 rows=6067 loops=1)\n Filter: (((language_id = 5) AND (tag_id = ANY \n('{1101,917,734,550,367,183}'::integer[]))) OR ((language_id = 4) AND \n(tag_id = ANY ('{856,428}'::integer[]))) OR ((language_id = 3) AND \n(tag_id = ANY ('{1156,1028,899,771,642,514,385,257,128}'::integer[]))) \nOR ((language_id = 2) AND (tag_id = ANY \n('{1193,1101,1009,917,826,734,642,550,458,367,275,183,91}'::integer[]))))\n -> Index Scan using questions_pkey on questions q \n(cost=0.00..15464.12 rows=83488 width=4) (actual time=222.956..731.737 \nrows=1000 loops=1)\n Filter: ((q.author_id <> 105) AND (q.question_id \n<> 413) AND (q.status = 1))\n Total runtime: 881.152 ms\n(9 rows)\n\n", "msg_date": "Sun, 30 Nov 2008 19:45:11 +0100", "msg_from": "tmp <[email protected]>", "msg_from_op": true, "msg_subject": "Query optimization" }, { "msg_contents": "Le Sunday 30 November 2008 19:45:11 tmp, vous avez écrit :\n> I am struggeling with the following query which fetches a random subset\n> of 200 questions that matches certain tags within certain languages.\n> However, the query takes forever to evaluate, even though I have a\n> \"limit 200\" appended. Any ideas on how to optimize it?\n>\n> QUERY: ================================================\n>\n> SELECT distinct q.question_id\n> FROM question_tags qt, questions q\n> WHERE q.question_id = qt.question_id\n> AND q.STATUS = 1\n> AND not q.author_id = 105\n> AND ((qt.language_id = 5 and qt.tag_id in\n> (1101,917,734,550,367,183)) or (qt.language_id = 4 and qt.tag_id in\n> (856,428)) or (qt.language_id =\n> 3 and qt.tag_id in (1156,1028,899,771,642,514,385,257,128)) or\n> (qt.language_id = 2 and qt.tag_id in\n> (1193,1101,1009,917,826,734,642,550,458,367,275,183,91)))\n> and q.question_id not in (413)\n> LIMIT 200\n>\n> EXPLAIN ANALYZE: =========================================\n>\n> Limit (cost=1.50..1267.27 rows=200 width=4) (actual\n> time=278.169..880.934 rows=200 loops=1)\n> -> Unique (cost=1.50..317614.50 rows=50185 width=4) (actual\n> time=278.165..880.843 rows=200 loops=1)\n> -> Merge Join (cost=1.50..317489.04 rows=50185 width=4)\n> (actual time=278.162..880.579 rows=441 loops=1)\n> Merge Cond: (qt.question_id = q.question_id)\n> -> Index Scan using question_tags_question_id on\n> question_tags qt (cost=0.00..301256.96 rows=82051 width=4) (actual\n> time=24.171..146.811 rows=6067 loops=1)\n> Filter: (((language_id = 5) AND (tag_id = ANY\n> ('{1101,917,734,550,367,183}'::integer[]))) OR ((language_id = 4) AND\n> (tag_id = ANY ('{856,428}'::integer[]))) OR ((language_id = 3) AND\n> (tag_id = ANY ('{1156,1028,899,771,642,514,385,257,128}'::integer[])))\n> OR ((language_id = 2) AND (tag_id = ANY\n> ('{1193,1101,1009,917,826,734,642,550,458,367,275,183,91}'::integer[]))))\n> -> Index Scan using questions_pkey on questions q\n> (cost=0.00..15464.12 rows=83488 width=4) (actual time=222.956..731.737\n> rows=1000 loops=1)\n> Filter: ((q.author_id <> 105) AND (q.question_id\n> <> 413) AND (q.status = 1))\n> Total runtime: 881.152 ms\n> (9 rows)\n\nFirst, because of the distinct, the limit 200 wont reduce the work to be done \na lot : it will still have to sort for the unique. Only when everything is \nsorted will it take only the first 200 records. And anyhow it seems there are \nonly 441 rows before doing the distinct, so, at least for this query, it \nwon't change a lot the times.\n\nThen it seems to me that you may try to create composed indexes, because there \nis a lot of filtering after the index scans (that is if you want the query to \nbe faster). \nMaybe (q.author_id,q.status).\n\nFor qt you may try (qt.language_id,qt.tag_id)...\n\nHope it helps\n\nCheers\n", "msg_date": "Sun, 30 Nov 2008 20:37:39 +0100", "msg_from": "Marc Cousin <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Query optimization" }, { "msg_contents": "\n> I am struggeling with the following query which fetches a random subset \n> of 200 questions that matches certain tags within certain languages. \n> However, the query takes forever to evaluate, even though I have a \n> \"limit 200\" appended. Any ideas on how to optimize it?\n>\n> QUERY: ================================================\n>\n> SELECT distinct q.question_id\n> FROM question_tags qt, questions q\n> WHERE q.question_id = qt.question_id\n> AND q.STATUS = 1\n> AND not q.author_id = 105\n> AND ((qt.language_id = 5 and qt.tag_id in \n> (1101,917,734,550,367,183)) or (qt.language_id = 4 and qt.tag_id in \n> (856,428)) or (qt.language_id =\n> 3 and qt.tag_id in (1156,1028,899,771,642,514,385,257,128)) or \n> (qt.language_id = 2 and qt.tag_id in \n> (1193,1101,1009,917,826,734,642,550,458,367,275,183,91)))\n> and q.question_id not in (413)\n> LIMIT 200\n>\n> EXPLAIN ANALYZE: =========================================\n>\n> Limit (cost=1.50..1267.27 rows=200 width=4) (actual \n> time=278.169..880.934 rows=200 loops=1)\n> -> Unique (cost=1.50..317614.50 rows=50185 width=4) (actual \n> time=278.165..880.843 rows=200 loops=1)\n> -> Merge Join (cost=1.50..317489.04 rows=50185 width=4) \n> (actual time=278.162..880.579 rows=441 loops=1)\n> Merge Cond: (qt.question_id = q.question_id)\n> -> Index Scan using question_tags_question_id on \n> question_tags qt (cost=0.00..301256.96 rows=82051 width=4) (actual \n> time=24.171..146.811 rows=6067 loops=1)\n> Filter: (((language_id = 5) AND (tag_id = ANY \n> ('{1101,917,734,550,367,183}'::integer[]))) OR ((language_id = 4) AND \n> (tag_id = ANY ('{856,428}'::integer[]))) OR ((language_id = 3) AND \n> (tag_id = ANY ('{1156,1028,899,771,642,514,385,257,128}'::integer[]))) \n> OR ((language_id = 2) AND (tag_id = ANY \n> ('{1193,1101,1009,917,826,734,642,550,458,367,275,183,91}'::integer[]))))\n> -> Index Scan using questions_pkey on questions q \n> (cost=0.00..15464.12 rows=83488 width=4) (actual time=222.956..731.737 \n> rows=1000 loops=1)\n> Filter: ((q.author_id <> 105) AND (q.question_id \n> <> 413) AND (q.status = 1))\n> Total runtime: 881.152 ms\n> (9 rows)\n\n\tAn index on (language_id,tag_id) should be the first thing to try.\n\tOr perhaps even (status,language_id,tag_id) or (language_id, tad_id, \nstatus) (but that depends on the stats on \"status\" column).\n\n\tAn index on author_id will probably not be useful for this particular \nquery because your condition is \"author_id != constant\".\n\n\tAlso CLUSTER question_tags on (language_id, tad_id).\n\n\tWhat is the database size versus RAM ? You must have a hell of a lot of \nquestions to make this slow... (or bloat ?)\n\n\n", "msg_date": "Mon, 01 Dec 2008 14:24:38 +0100", "msg_from": "PFC <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Query optimization" } ]
[ { "msg_contents": "explain analyze SELECT sum(1)\n FROM dok JOIN rid USING (dokumnr)\n WHERE dok.kuupaev>='2008-05-01'\n and\n ( (\n dok.doktyyp IN\n('V','G','Y','K','I','T','D','N','H','M','E','B','A','R','C','F','J','Q')\n AND CASE WHEN NOT dok.objrealt OR dok.doktyyp='I' THEN dok.yksus\nELSE rid.kuluobjekt END LIKE 'AEGVIIDU%'\n )\n OR\n ( dok.doktyyp IN ('O','S','I','U','D','P')\n AND CASE WHEN dok.objrealt THEN rid.kuluobjekt ELSE dok.sihtyksus\nEND LIKE 'AEGVIIDU%'\n )\n )\n\n\"Aggregate (cost=369240.67..369240.68 rows=1 width=0) (actual\ntime=41135.557..41135.560 rows=1 loops=1)\"\n\" -> Hash Join (cost=96614.24..369229.39 rows=4508 width=0) (actual\ntime=5859.704..40912.979 rows=59390 loops=1)\"\n\" Hash Cond: (\"outer\".dokumnr = \"inner\".dokumnr)\"\n\" Join Filter: ((((\"inner\".doktyyp = 'V'::bpchar) OR (\"inner\".doktyyp\n= 'G'::bpchar) OR (\"inner\".doktyyp = 'Y'::bpchar) OR (\"inner\".doktyyp =\n'K'::bpchar) OR (\"inner\".doktyyp = 'I'::bpchar) OR (\"inner\".doktyyp =\n'T'::bpchar) OR (\"inner\".doktyyp = 'D'::bpchar) OR (\"inner\".doktyyp =\n'N'::bpchar) OR (\"inner\".doktyyp = 'H'::bpchar) OR (\"inner\".doktyyp =\n'M'::bpchar) OR (\"inner\".doktyyp = 'E'::bpchar) OR (\"inner\".doktyyp =\n'B'::bpchar) OR (\"inner\".doktyyp = 'A'::bpchar) OR (\"inner\".doktyyp =\n'R'::bpchar) OR (\"inner\".doktyyp = 'C'::bpchar) OR (\"inner\".doktyyp =\n'F'::bpchar) OR (\"inner\".doktyyp = 'J'::bpchar) OR (\"inner\".doktyyp =\n'Q'::bpchar)) AND (CASE WHEN ((NOT (\"inner\".objrealt)::boolean) OR\n(\"inner\".doktyyp = 'I'::bpchar)) THEN \"inner\".yksus ELSE \"outer\".kuluobjekt\nEND ~~ 'AEGVIIDU%'::text)) OR (((\"inner\".doktyyp = 'O'::bpchar) OR\n(\"inner\".doktyyp = 'S'::bpchar) OR (\"inner\".doktyyp = 'I'::bpchar) OR\n(\"inner\".doktyyp = 'U'::bpchar) OR (\"inner\".doktyyp = 'D'::bpchar) OR\n(\"inner\".doktyyp = 'P'::bpchar)) AND (CASE WHEN (\"inner\".objrealt)::boolean\nTHEN \"outer\".kuluobjekt ELSE \"inner\".sihtyksus END ~~ 'AEGVIIDU%'::text)))\"\n\" -> Seq Scan on rid (cost=0.00..129911.53 rows=3299853 width=18)\n(actual time=0.039..17277.888 rows=3299777 loops=1)\"\n\" -> Hash (cost=92983.97..92983.97 rows=336110 width=38) (actual\ntime=3965.478..3965.478 rows=337455 loops=1)\"\n\" -> Bitmap Heap Scan on dok (cost=1993.66..92983.97\nrows=336110 width=38) (actual time=135.810..2389.703 rows=337455 loops=1)\"\n\" Recheck Cond: (kuupaev >= '2008-05-01'::date)\"\n\" Filter: ((doktyyp = 'V'::bpchar) OR (doktyyp =\n'G'::bpchar) OR (doktyyp = 'Y'::bpchar) OR (doktyyp = 'K'::bpchar) OR\n(doktyyp = 'I'::bpchar) OR (doktyyp = 'T'::bpchar) OR (doktyyp =\n'D'::bpchar) OR (doktyyp = 'N'::bpchar) OR (doktyyp = 'H'::bpchar) OR\n(doktyyp = 'M'::bpchar) OR (doktyyp = 'E'::bpchar) OR (doktyyp =\n'B'::bpchar) OR (doktyyp = 'A'::bpchar) OR (doktyyp = 'R'::bpchar) OR\n(doktyyp = 'C'::bpchar) OR (doktyyp = 'F'::bpchar) OR (doktyyp =\n'J'::bpchar) OR (doktyyp = 'Q'::bpchar) OR (doktyyp = 'O'::bpchar) OR\n(doktyyp = 'S'::bpchar) OR (doktyyp = 'I'::bpchar) OR (doktyyp =\n'U'::bpchar) OR (doktyyp = 'D'::bpchar) OR (doktyyp = 'P'::bpchar))\"\n\" -> Bitmap Index Scan on dok_kuupaev_idx\n(cost=0.00..1993.66 rows=347618 width=0) (actual time=97.881..97.881\nrows=337770 loops=1)\"\n\" Index Cond: (kuupaev >= '2008-05-01'::date)\"\n\"Total runtime: 41136.348 ms\"\n\n8.1.4 Db is analyzed, default_statistics_target is 40.\nPostgreSql still choices seq scan over rid.\n\nThis query can optimized as follows:\n\n1. kuupaev >= '2008-05-01' index can reduce number of scanned rows 10 times\n(to 330000)\n2. AEGVIIDU% can reduce number of rows 6 times (to 60000)\n\nHow to force pg to use indexes for those conditions ?\n\nThis query can be executed against different shops groups (int this case\nthere is other value than AEGVIIDU) and for different date.\n\nThere are 6 different shop groups containing roughly same number or records\neach.\nSo using index on AEGVIIDU% can decrease number of scanned rows 6 times.\nUsually 90% of dok records contain 'Y' in dok.doktyyp column and\ndok.objrealt is false for those records.\n\nIs it possible to use come functional index or other method to speed it ?\n\nrid.kuluobjekt, dok.yksus and dok.sihtyksus types are char(10).\n\nThere are indexes\n\ndok(yksus bpchar_pattern_ops)\ndok(sihtyksus bpchar_pattern_ops)\n\nIs it possible to re-write query that it uses those indexes or create some\nother indexes?\nUsing 8.1.4, us-en locale, utf-8 db encoding.\nselect column list is removed from sample.\n\nAndrus. \n\n", "msg_date": "Sun, 30 Nov 2008 22:17:34 +0200", "msg_from": "\"Andrus\" <[email protected]>", "msg_from_op": true, "msg_subject": "Seq scan over 3.3 millions of rows instead of using date and pattern\n\tindexes" } ]
[ { "msg_contents": "I've tryied 4 times to post this message to pgsql-performance without\nsuccess... No return, even an error...\n\nBelow is my problem; a query that perform bad when using a filter almost equal.\n\nThe problem (8.2.11):\nEXPLAIN ANALYZE\nSELECT\nresource,\ncategory,\nuserid,\ntitle,\nyear,\nmonth,\nSUM(hours)\nFROM\n(\n SELECT\n r.name AS resource,\n ARRAY(SELECT ca.cat_name FROM egw_categories ca WHERE\nca.cat_id::text = ANY (string_to_array(c.cal_category, ','))) AS\ncategory,\n cu.cal_user_id AS userid,\n c.cal_title AS title,\n EXTRACT(year FROM (TIMESTAMP WITH TIME ZONE 'epoch' + cd.cal_start *\nINTERVAL '1 second')) AS year,\n EXTRACT(month FROM (TIMESTAMP WITH TIME ZONE 'epoch' + cd.cal_start\n* INTERVAL '1 second')) AS month,\n (cd.cal_end - cd.cal_start) * INTERVAL '1 second' AS hours\n FROM egw_cal_user cr\n JOIN egw_cal_dates cd\n ON\n cd.cal_id = cr.cal_id AND\n cr.cal_recur_date = COALESCE((SELECT cd.cal_start FROM\negw_cal_dates icd WHERE icd.cal_id = cd.cal_id AND icd.cal_start =\ncd.cal_start AND icd.cal_start <> 0), 0)\n JOIN egw_resources r\n ON\n r.res_id = cr.cal_user_id AND\n cr.cal_user_type = 'r'\n JOIN egw_categories ct\n ON ct.cat_id = r.cat_id\n JOIN egw_cal c\n ON c.cal_id = cd.cal_id\n LEFT JOIN egw_cal_user cu\n ON\n cu.cal_id = cr.cal_id AND\n cu.cal_user_type = 'u' AND\n cu.cal_recur_date = COALESCE((SELECT cd.cal_start FROM\negw_cal_dates icd WHERE icd.cal_id = cd.cal_id AND icd.cal_start =\ncd.cal_start AND icd.cal_start <> 0), 0)\n WHERE\n ct.cat_main = 133 AND\n r.res_id = 8522 AND\n cu.cal_user_id = 278827 AND\n EXTRACT(year FROM (TIMESTAMP 'epoch' + cd.cal_start * INTERVAL '1\nsecond')) = 2008 AND\n EXTRACT(month FROM (TIMESTAMP 'epoch' + cd.cal_start * INTERVAL '1\nsecond')) IN (10,11,12)\n) foo\nGROUP BY\nresource,\ncategory,\nuserid,\ntitle,\nyear,\nmonth\nORDER BY\nyear,\nmonth,\nresource,\ncategory,\nuserid,\ntitle;\n\n\n QUERY PLAN\n----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\nGroupAggregate (cost=125.20..183.67 rows=1 width=180) (actual\ntime=475276.902..475277.130 rows=4 loops=1)\n -> Sort (cost=125.20..125.20 rows=1 width=180) (actual\ntime=475276.822..475276.920 rows=64 loops=1)\n Sort Key: date_part('year'::text, ('1969-12-31\n21:00:00-03'::timestamp with time zone + ((cd.cal_start)::double\nprecision * '00:00:01'::interval))), date_part('month'::text,\n('1969-12-31 21:00:00-03'::timestamp with time zone +\n((cd.cal_start)::double precision * '00:00:01'::interval))), r.name,\n(subplan), cu.cal_user_id, c.cal_title\n -> Nested Loop (cost=0.00..125.19 rows=1 width=180) (actual\ntime=22188.889..475275.364 rows=64 loops=1)\n Join Filter: ((cr.cal_recur_date = COALESCE((subplan),\n0::bigint)) AND (cu.cal_recur_date = COALESCE((subplan), 0::bigint)))\n -> Nested Loop (cost=0.00..42.00 rows=1 width=192)\n(actual time=0.535..2788.339 rows=511222 loops=1)\n -> Nested Loop (cost=0.00..38.71 rows=1\nwidth=171) (actual time=0.121..90.021 rows=1105 loops=1)\n -> Nested Loop (cost=0.00..35.84 rows=1\nwidth=120) (actual time=0.104..18.855 rows=1105 loops=1)\n -> Nested Loop (cost=0.00..16.55\nrows=1 width=112) (actual time=0.046..0.058 rows=1 loops=1)\n -> Index Scan using\negw_resources_pkey on egw_resources r (cost=0.00..8.27 rows=1\nwidth=116) (actual time=0.025..0.029 rows=1 loops=1)\n Index Cond: (res_id = 8522)\n -> Index Scan using\negw_categories_pkey on egw_categories ct (cost=0.00..8.27 rows=1\nwidth=4) (actual time=0.012..0.015 rows=1 loops=1)\n Index Cond: (ct.cat_id = r.cat_id)\n Filter: (cat_main = 133)\n -> Index Scan using idx_egw_0001 on\negw_cal_user cr (cost=0.00..19.23 rows=4 width=21) (actual\ntime=0.044..14.742 rows=1105 loops=1)\n Index Cond: (((r.res_id)::text =\n(cr.cal_user_id)::text) AND ((cr.cal_user_type)::text = 'r'::text))\n -> Index Scan using egw_cal_pkey on egw_cal\nc (cost=0.00..2.86 rows=1 width=51) (actual time=0.053..0.056 rows=1\nloops=1105)\n Index Cond: (cr.cal_id = c.cal_id)\n -> Index Scan using egw_cal_user_pkey on\negw_cal_user cu (cost=0.00..3.28 rows=1 width=21) (actual\ntime=0.017..0.978 rows=463 loops=1105)\n Index Cond: ((cu.cal_id = cr.cal_id) AND\n((cu.cal_user_type)::text = 'u'::text) AND ((cu.cal_user_id)::text =\n'278827'::text))\n -> Index Scan using egw_cal_dates_pkey on egw_cal_dates\ncd (cost=0.00..8.18 rows=1 width=20) (actual time=0.014..0.570\nrows=30 loops=511222)\n Index Cond: (cd.cal_id = cr.cal_id)\n Filter: ((date_part('year'::text, ('1970-01-01\n00:00:00'::timestamp without time zone + ((cal_start)::double\nprecision * '00:00:01'::interval))) = 2008::double precision) AND\n(date_part('month'::text, ('1970-01-01 00:00:00'::timestamp without\ntime zone + ((cal_start)::double precision * '00:00:01'::interval))) =\nANY ('{10,11,12}'::double precision[])))\n SubPlan\n -> Index Scan using egw_cal_dates_pkey on\negw_cal_dates icd (cost=0.00..8.28 rows=1 width=0) (actual\ntime=0.004..0.006 rows=1 loops=29520)\n Index Cond: ((cal_id = $2) AND (cal_start = $1))\n Filter: (cal_start <> 0)\n -> Index Scan using egw_cal_dates_pkey on\negw_cal_dates icd (cost=0.00..8.28 rows=1 width=0) (actual\ntime=0.005..0.006 rows=1 loops=15158976)\n Index Cond: ((cal_id = $2) AND (cal_start = $1))\n Filter: (cal_start <> 0)\n -> Bitmap Heap Scan on egw_categories ca\n(cost=30.59..58.41 rows=10 width=50) (actual time=0.024..0.025 rows=1\nloops=64)\n Recheck Cond: ((cat_id)::text = ANY\n(string_to_array(($0)::text, ','::text)))\n -> Bitmap Index Scan on idx_egw_0005\n(cost=0.00..30.59 rows=10 width=0) (actual time=0.016..0.016 rows=1\nloops=64)\n Index Cond: ((cat_id)::text = ANY\n(string_to_array(($0)::text, ','::text)))\nTotal runtime: 475277.646 ms\n(35 registros)\n\nGood one:\nEXPLAIN ANALYZE\nSELECT\nresource,\ncategory,\nuserid,\ntitle,\nyear,\nmonth,\nSUM(hours)\nFROM\n(\n SELECT\n r.name AS resource,\n ARRAY(SELECT ca.cat_name FROM egw_categories ca WHERE\nca.cat_id::text = ANY (string_to_array(c.cal_category, ','))) AS\ncategory,\n cu.cal_user_id AS userid,\n c.cal_title AS title,\n EXTRACT(year FROM (TIMESTAMP WITH TIME ZONE 'epoch' + cd.cal_start *\nINTERVAL '1 second')) AS year,\n EXTRACT(month FROM (TIMESTAMP WITH TIME ZONE 'epoch' + cd.cal_start\n* INTERVAL '1 second')) AS month,\n (cd.cal_end - cd.cal_start) * INTERVAL '1 second' AS hours\n FROM egw_cal_user cr\n JOIN egw_cal_dates cd\n ON\n cd.cal_id = cr.cal_id AND\n cr.cal_recur_date = COALESCE((SELECT cd.cal_start FROM\negw_cal_dates icd WHERE icd.cal_id = cd.cal_id AND icd.cal_start =\ncd.cal_start AND icd.cal_start <> 0), 0)\n JOIN egw_resources r\n ON\n r.res_id = cr.cal_user_id AND\n cr.cal_user_type = 'r'\n JOIN egw_categories ct\n ON ct.cat_id = r.cat_id\n JOIN egw_cal c\n ON c.cal_id = cd.cal_id\n LEFT JOIN egw_cal_user cu\n ON\n cu.cal_id = cr.cal_id AND\n cu.cal_user_type = 'u' AND\n cu.cal_recur_date = COALESCE((SELECT cd.cal_start FROM\negw_cal_dates icd WHERE icd.cal_id = cd.cal_id AND icd.cal_start =\ncd.cal_start AND icd.cal_start <> 0), 0)\n WHERE\n ct.cat_main = 133 AND\n cu.cal_user_id = 278827 AND\n EXTRACT(year FROM (TIMESTAMP 'epoch' + cd.cal_start * INTERVAL '1\nsecond')) = 2008 AND\n EXTRACT(month FROM (TIMESTAMP 'epoch' + cd.cal_start * INTERVAL '1\nsecond')) IN (10,11,12)\n) foo\nGROUP BY\nresource,\ncategory,\nuserid,\ntitle,\nyear,\nmonth\nORDER BY\nyear,\nmonth,\nresource,\ncategory,\nuserid,\ntitle;\n\n\n QUERY PLAN\n----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\nGroupAggregate (cost=257.62..316.09 rows=1 width=180) (actual\ntime=135.885..136.112 rows=4 loops=1)\n -> Sort (cost=257.62..257.62 rows=1 width=180) (actual\ntime=135.822..135.923 rows=64 loops=1)\n Sort Key: date_part('year'::text, ('1969-12-31\n21:00:00-03'::timestamp with time zone + ((cd.cal_start)::double\nprecision * '00:00:01'::interval))), date_part('month'::text,\n('1969-12-31 21:00:00-03'::timestamp with time zone +\n((cd.cal_start)::double precision * '00:00:01'::interval))), r.name,\n(subplan), cu.cal_user_id, c.cal_title\n -> Nested Loop (cost=40.72..257.61 rows=1 width=180) (actual\ntime=91.067..134.580 rows=64 loops=1)\n -> Nested Loop (cost=40.72..190.89 rows=1 width=145)\n(actual time=90.951..131.857 rows=64 loops=1)\n -> Nested Loop (cost=32.44..166.02 rows=1\nwidth=132) (actual time=33.022..127.492 rows=196 loops=1)\n Join Filter: ((r.res_id)::text =\n(cr.cal_user_id)::text)\n -> Nested Loop (cost=32.44..110.02 rows=1\nwidth=33) (actual time=32.927..82.418 rows=196 loops=1)\n -> Bitmap Heap Scan on egw_cal_dates\ncd (cost=24.16..35.42 rows=3 width=20) (actual time=3.834..10.337\nrows=3871 loops=1)\n Recheck Cond:\n((date_part('year'::text, ('1970-01-01 00:00:00'::timestamp without\ntime zone + ((cal_start)::double precision * '00:00:01'::interval))) =\n2008::double precision) AND (date_part('month'::text, ('1970-01-01\n00:00:00'::timestamp without time zone + ((cal_start)::double\nprecision * '00:00:01'::interval))) = ANY ('{10,11,12}'::double\nprecision[])))\n -> BitmapAnd\n(cost=24.16..24.16 rows=3 width=0) (actual time=3.795..3.795 rows=0\nloops=1)\n -> Bitmap Index Scan on\nidx_egw_0002 (cost=0.00..5.99 rows=230 width=0) (actual\ntime=1.972..1.972 rows=14118 loops=1)\n Index Cond:\n(date_part('year'::text, ('1970-01-01 00:00:00'::timestamp without\ntime zone + ((cal_start)::double precision * '00:00:01'::interval))) =\n2008::double precision)\n -> Bitmap Index Scan on\nidx_egw_0006 (cost=0.00..17.92 rows=687 width=0) (actual\ntime=1.789..1.789 rows=12799 loops=1)\n Index Cond:\n(date_part('month'::text, ('1970-01-01 00:00:00'::timestamp without\ntime zone + ((cal_start)::double precision * '00:00:01'::interval))) =\nANY ('{10,11,12}'::double precision[]))\n -> Index Scan using egw_cal_user_pkey\non egw_cal_user cr (cost=8.28..16.57 rows=1 width=21) (actual\ntime=0.005..0.005 rows=0 loops=3871)\n Index Cond: ((cd.cal_id =\ncr.cal_id) AND (cr.cal_recur_date = COALESCE((subplan), 0::bigint))\nAND ((cr.cal_user_type)::text = 'r'::text))\n SubPlan\n -> Index Scan using\negw_cal_dates_pkey on egw_cal_dates icd (cost=0.00..8.28 rows=1\nwidth=0) (actual time=0.005..0.006 rows=1 loops=3871)\n Index Cond: ((cal_id =\n$2) AND (cal_start = $1))\n Filter: (cal_start <> 0)\n -> Index Scan using\negw_cal_dates_pkey on egw_cal_dates icd (cost=0.00..8.28 rows=1\nwidth=0) (never executed)\n Index Cond: ((cal_id =\n$2) AND (cal_start = $1))\n Filter: (cal_start <> 0)\n -> Nested Loop (cost=0.00..55.80 rows=13\nwidth=112) (actual time=0.013..0.197 rows=13 loops=196)\n -> Index Scan using idx_egw_0003 on\negw_categories ct (cost=0.00..13.16 rows=3 width=4) (actual\ntime=0.004..0.032 rows=15 loops=196)\n Index Cond: (cat_main = 133)\n -> Index Scan using idx_egw_0004 on\negw_resources r (cost=0.00..14.08 rows=11 width=116) (actual\ntime=0.003..0.005 rows=1 loops=2940)\n Index Cond: (ct.cat_id = r.cat_id)\n -> Index Scan using egw_cal_user_pkey on\negw_cal_user cu (cost=8.28..16.58 rows=1 width=21) (actual\ntime=0.007..0.008 rows=0 loops=196)\n Index Cond: ((cu.cal_id = cr.cal_id) AND\n(cu.cal_recur_date = COALESCE((subplan), 0::bigint)) AND\n((cu.cal_user_type)::text = 'u'::text) AND ((cu.cal_user_id)::text =\n'278827'::text))\n SubPlan\n -> Index Scan using egw_cal_dates_pkey on\negw_cal_dates icd (cost=0.00..8.28 rows=1 width=0) (actual\ntime=0.005..0.007 rows=1 loops=196)\n Index Cond: ((cal_id = $2) AND\n(cal_start = $1))\n Filter: (cal_start <> 0)\n -> Index Scan using egw_cal_dates_pkey on\negw_cal_dates icd (cost=0.00..8.28 rows=1 width=0) (never executed)\n Index Cond: ((cal_id = $2) AND\n(cal_start = $1))\n Filter: (cal_start <> 0)\n -> Index Scan using egw_cal_pkey on egw_cal c\n(cost=0.00..8.27 rows=1 width=51) (actual time=0.004..0.006 rows=1\nloops=64)\n Index Cond: (c.cal_id = cd.cal_id)\n SubPlan\n -> Bitmap Heap Scan on egw_categories ca\n(cost=30.59..58.41 rows=10 width=50) (actual time=0.013..0.015 rows=1\nloops=64)\n Recheck Cond: ((cat_id)::text = ANY\n(string_to_array(($0)::text, ','::text)))\n -> Bitmap Index Scan on idx_egw_0005\n(cost=0.00..30.59 rows=10 width=0) (actual time=0.008..0.008 rows=1\nloops=64)\n Index Cond: ((cat_id)::text = ANY\n(string_to_array(($0)::text, ','::text)))\nTotal runtime: 136.702 ms\n(46 registros)\n\nAnother good one:\nEXPLAIN ANALYZE\nSELECT\nresource,\ncategory,\nuserid,\ntitle,\nyear,\nmonth,\nSUM(hours)\nFROM\n(\n SELECT\n r.name AS resource,\n ARRAY(SELECT ca.cat_name FROM egw_categories ca WHERE\nca.cat_id::text = ANY (string_to_array(c.cal_category, ','))) AS\ncategory,\n cu.cal_user_id AS userid,\n c.cal_title AS title,\n EXTRACT(year FROM (TIMESTAMP WITH TIME ZONE 'epoch' + cd.cal_start *\nINTERVAL '1 second')) AS year,\n EXTRACT(month FROM (TIMESTAMP WITH TIME ZONE 'epoch' + cd.cal_start\n* INTERVAL '1 second')) AS month,\n (cd.cal_end - cd.cal_start) * INTERVAL '1 second' AS hours\n FROM egw_cal_user cr\n JOIN egw_cal_dates cd\n ON\n cd.cal_id = cr.cal_id AND\n cr.cal_recur_date = COALESCE((SELECT cd.cal_start FROM\negw_cal_dates icd WHERE icd.cal_id = cd.cal_id AND icd.cal_start =\ncd.cal_start AND icd.cal_start <> 0), 0)\n JOIN egw_resources r\n ON\n r.res_id = cr.cal_user_id AND\n cr.cal_user_type = 'r'\n JOIN egw_categories ct\n ON ct.cat_id = r.cat_id\n JOIN egw_cal c\n ON c.cal_id = cd.cal_id\n LEFT JOIN egw_cal_user cu\n ON\n cu.cal_id = cr.cal_id AND\n cu.cal_user_type = 'u' AND\n cu.cal_recur_date = COALESCE((SELECT cd.cal_start FROM\negw_cal_dates icd WHERE icd.cal_id = cd.cal_id AND icd.cal_start =\ncd.cal_start AND icd.cal_start <> 0), 0)\n WHERE\n ct.cat_main = 133 AND\n r.res_id = 8522 AND\n EXTRACT(year FROM (TIMESTAMP 'epoch' + cd.cal_start * INTERVAL '1\nsecond')) = 2008 AND\n EXTRACT(month FROM (TIMESTAMP 'epoch' + cd.cal_start * INTERVAL '1\nsecond')) IN (10,11,12)\n) foo\nGROUP BY\nresource,\ncategory,\nuserid,\ntitle,\nyear,\nmonth\nORDER BY\nyear,\nmonth,\nresource,\ncategory,\nuserid,\ntitle;\n\n\n QUERY PLAN\n----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\nGroupAggregate (cost=132.17..190.64 rows=1 width=180) (actual\ntime=978.502..979.174 rows=18 loops=1)\n -> Sort (cost=132.17..132.17 rows=1 width=180) (actual\ntime=978.448..978.686 rows=160 loops=1)\n Sort Key: date_part('year'::text, ('1969-12-31\n21:00:00-03'::timestamp with time zone + ((cd.cal_start)::double\nprecision * '00:00:01'::interval))), date_part('month'::text,\n('1969-12-31 21:00:00-03'::timestamp with time zone +\n((cd.cal_start)::double precision * '00:00:01'::interval))), r.name,\n(subplan), cu.cal_user_id, c.cal_title\n -> Nested Loop Left Join (cost=8.28..132.16 rows=1\nwidth=180) (actual time=0.441..974.424 rows=160 loops=1)\n -> Nested Loop (cost=0.00..48.86 rows=1 width=179)\n(actual time=0.341..966.004 rows=149 loops=1)\n Join Filter: (cr.cal_recur_date =\nCOALESCE((subplan), 0::bigint))\n -> Nested Loop (cost=0.00..38.71 rows=1\nwidth=171) (actual time=0.123..19.732 rows=1105 loops=1)\n -> Nested Loop (cost=0.00..35.84 rows=1\nwidth=120) (actual time=0.106..5.790 rows=1105 loops=1)\n -> Nested Loop (cost=0.00..16.55\nrows=1 width=112) (actual time=0.046..0.060 rows=1 loops=1)\n -> Index Scan using\negw_resources_pkey on egw_resources r (cost=0.00..8.27 rows=1\nwidth=116) (actual time=0.025..0.029 rows=1 loops=1)\n Index Cond: (res_id = 8522)\n -> Index Scan using\negw_categories_pkey on egw_categories ct (cost=0.00..8.27 rows=1\nwidth=4) (actual time=0.012..0.016 rows=1 loops=1)\n Index Cond: (ct.cat_id = r.cat_id)\n Filter: (cat_main = 133)\n -> Index Scan using idx_egw_0001 on\negw_cal_user cr (cost=0.00..19.23 rows=4 width=21) (actual\ntime=0.044..2.297 rows=1105 loops=1)\n Index Cond: (((r.res_id)::text =\n(cr.cal_user_id)::text) AND ((cr.cal_user_type)::text = 'r'::text))\n -> Index Scan using egw_cal_pkey on egw_cal\nc (cost=0.00..2.86 rows=1 width=51) (actual time=0.004..0.006 rows=1\nloops=1105)\n Index Cond: (c.cal_id = cr.cal_id)\n -> Index Scan using egw_cal_dates_pkey on\negw_cal_dates cd (cost=0.00..1.86 rows=1 width=20) (actual\ntime=0.013..0.526 rows=28 loops=1105)\n Index Cond: (cd.cal_id = cr.cal_id)\n Filter: ((date_part('year'::text,\n('1970-01-01 00:00:00'::timestamp without time zone +\n((cal_start)::double precision * '00:00:01'::interval))) =\n2008::double precision) AND (date_part('month'::text, ('1970-01-01\n00:00:00'::timestamp without time zone + ((cal_start)::double\nprecision * '00:00:01'::interval))) = ANY ('{10,11,12}'::double\nprecision[])))\n SubPlan\n -> Index Scan using egw_cal_dates_pkey on\negw_cal_dates icd (cost=0.00..8.28 rows=1 width=0) (actual\ntime=0.005..0.006 rows=1 loops=30929)\n Index Cond: ((cal_id = $2) AND (cal_start = $1))\n Filter: (cal_start <> 0)\n -> Index Scan using egw_cal_user_pkey on egw_cal_user\ncu (cost=8.28..16.57 rows=1 width=21) (actual time=0.007..0.009\nrows=1 loops=149)\n Index Cond: ((cu.cal_id = cr.cal_id) AND\n(cu.cal_recur_date = COALESCE((subplan), 0::bigint)) AND\n((cu.cal_user_type)::text = 'u'::text))\n SubPlan\n -> Index Scan using egw_cal_dates_pkey on\negw_cal_dates icd (cost=0.00..8.28 rows=1 width=0) (actual\ntime=0.004..0.006 rows=1 loops=149)\n Index Cond: ((cal_id = $2) AND (cal_start = $1))\n Filter: (cal_start <> 0)\n -> Index Scan using egw_cal_dates_pkey on\negw_cal_dates icd (cost=0.00..8.28 rows=1 width=0) (never executed)\n Index Cond: ((cal_id = $2) AND (cal_start = $1))\n Filter: (cal_start <> 0)\n SubPlan\n -> Bitmap Heap Scan on egw_categories ca\n(cost=30.59..58.41 rows=10 width=50) (actual time=0.013..0.014 rows=1\nloops=160)\n Recheck Cond: ((cat_id)::text = ANY\n(string_to_array(($0)::text, ','::text)))\n -> Bitmap Index Scan on idx_egw_0005\n(cost=0.00..30.59 rows=10 width=0) (actual time=0.008..0.008 rows=1\nloops=160)\n Index Cond: ((cat_id)::text = ANY\n(string_to_array(($0)::text, ','::text)))\nTotal runtime: 979.685 ms\n(40 registros)\n\nAlmost the same thing on 8.3.5 (another hardware):\nEXPLAIN ANALYZE\nSELECT\nresource,\ncategory,\nuserid,\ntitle,\nyear,\nmonth,\nSUM(hours)\nFROM\n(\n SELECT\n r.name AS resource,\n ARRAY(SELECT ca.cat_name FROM egw_categories ca WHERE\nca.cat_id::text = ANY (string_to_array(c.cal_category, ','))) AS\ncategory,\n cu.cal_user_id AS userid,\n c.cal_title AS title,\n EXTRACT(year FROM (TIMESTAMP WITH TIME ZONE 'epoch' + cd.cal_start *\nINTERVAL '1 second')) AS year,\n EXTRACT(month FROM (TIMESTAMP WITH TIME ZONE 'epoch' + cd.cal_start\n* INTERVAL '1 second')) AS month,\n (cd.cal_end - cd.cal_start) * INTERVAL '1 second' AS hours\n FROM egw_cal_user cr\n JOIN egw_cal_dates cd\n ON\n cd.cal_id = cr.cal_id AND\n cr.cal_recur_date = COALESCE((SELECT cd.cal_start FROM\negw_cal_dates icd WHERE icd.cal_id = cd.cal_id AND icd.cal_start =\ncd.cal_start AND icd.cal_start <> 0), 0)\n JOIN egw_resources r\n ON\n r.res_id::text = cr.cal_user_id AND\n cr.cal_user_type = 'r'\n JOIN egw_categories ct\n ON ct.cat_id = r.cat_id\n JOIN egw_cal c\n ON c.cal_id = cd.cal_id\n LEFT JOIN egw_cal_user cu\n ON\n cu.cal_id = cr.cal_id AND\n cu.cal_user_type = 'u' AND\n cu.cal_recur_date = COALESCE((SELECT cd.cal_start FROM\negw_cal_dates icd WHERE icd.cal_id = cd.cal_id AND icd.cal_start =\ncd.cal_start AND icd.cal_start <> 0), 0)\n WHERE\n ct.cat_main = 133 AND\n r.res_id = 8522 AND\n cu.cal_user_id = '278827' AND\n EXTRACT(year FROM (TIMESTAMP 'epoch' + cd.cal_start * INTERVAL '1\nsecond')) = 2008 AND\n EXTRACT(month FROM (TIMESTAMP 'epoch' + cd.cal_start * INTERVAL '1\nsecond')) IN (10,11,12)\n) foo\nGROUP BY\nresource,\ncategory,\nuserid,\ntitle,\nyear,\nmonth\nORDER BY\nyear,\nmonth,\nresource,\ncategory,\nuserid,\ntitle;\n\n\n QUERY PLAN\n----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\nGroupAggregate (cost=131.17..215.53 rows=1 width=175) (actual\ntime=3179750.197..3179750.747 rows=4 loops=1)\n -> Sort (cost=131.17..131.18 rows=1 width=175) (actual\ntime=3179749.897..3179750.007 rows=64 loops=1)\n Sort Key: (date_part('year'::text, ('1969-12-31\n21:00:00-03'::timestamp with time zone + ((cd.cal_start)::double\nprecision * '00:00:01'::interval)))), (date_part('month'::text,\n('1969-12-31 21:00:00-03'::timestamp with time zone +\n((cd.cal_start)::double precision * '00:00:01'::interval)))), r.name,\n((subplan)), cu.cal_user_id, c.cal_title\n Sort Method: quicksort Memory: 33kB\n -> Nested Loop (cost=0.01..131.16 rows=1 width=175) (actual\ntime=155178.467..3179744.010 rows=64 loops=1)\n Join Filter: ((cr.cal_recur_date = COALESCE((subplan),\n0::bigint)) AND (cu.cal_recur_date = COALESCE((subplan), 0::bigint)))\n -> Nested Loop (cost=0.01..29.25 rows=1 width=187)\n(actual time=0.274..55246.217 rows=511222 loops=1)\n -> Nested Loop (cost=0.01..27.36 rows=1\nwidth=140) (actual time=0.255..13234.191 rows=511222 loops=1)\n -> Nested Loop (cost=0.01..24.85 rows=1\nwidth=122) (actual time=0.226..126.224 rows=1093 loops=1)\n -> Nested Loop (cost=0.01..16.57\nrows=1 width=126) (actual time=0.192..20.013 rows=1093 loops=1)\n -> Index Scan using\negw_resources_pkey on egw_resources r (cost=0.00..8.27 rows=1\nwidth=118) (actual time=0.055..0.061 rows=1 loops=1)\n Index Cond: (res_id = 8522)\n -> Index Scan using\nidx_egw_0001 on egw_cal_user cr (cost=0.01..8.28 rows=1 width=18)\n(actual time=0.093..9.802 rows=1093 loops=1)\n Index Cond:\n(((cr.cal_user_id)::text = (r.res_id)::text) AND\n((cr.cal_user_type)::text = 'r'::text))\n -> Index Scan using\negw_categories_pkey on egw_categories ct (cost=0.00..8.27 rows=1\nwidth=4) (actual time=0.064..0.077 rows=1 loops=1093)\n Index Cond: (ct.cat_id = r.cat_id)\n Filter: (ct.cat_main = 133)\n -> Index Scan using egw_cal_user_pkey on\negw_cal_user cu (cost=0.00..2.49 rows=1 width=18) (actual\ntime=0.138..5.611 rows=468 loops=1093)\n Index Cond: ((cu.cal_id = cr.cal_id)\nAND ((cu.cal_user_type)::text = 'u'::text) AND ((cu.cal_user_id)::text\n= '278827'::text))\n -> Index Scan using egw_cal_pkey on egw_cal c\n(cost=0.00..1.89 rows=1 width=47) (actual time=0.044..0.052 rows=1\nloops=511222)\n Index Cond: (c.cal_id = cr.cal_id)\n -> Index Scan using egw_cal_dates_pkey on egw_cal_dates\ncd (cost=0.00..1.02 rows=1 width=20) (actual time=0.108..4.281\nrows=30 loops=511222)\n Index Cond: (cd.cal_id = cr.cal_id)\n Filter: ((date_part('year'::text, ('1970-01-01\n00:00:00'::timestamp without time zone + ((cd.cal_start)::double\nprecision * '00:00:01'::interval))) = 2008::double precision) AND\n(date_part('month'::text, ('1970-01-01 00:00:00'::timestamp without\ntime zone + ((cd.cal_start)::double precision *\n'00:00:01'::interval))) = ANY ('{10,11,12}'::double precision[])))\n SubPlan\n -> Index Scan using egw_cal_dates_pkey on\negw_cal_dates icd (cost=0.00..8.28 rows=1 width=0) (actual\ntime=0.029..0.035 rows=1 loops=29520)\n Index Cond: ((cal_id = $2) AND (cal_start = $1))\n Filter: (cal_start <> 0)\n -> Index Scan using egw_cal_dates_pkey on\negw_cal_dates icd (cost=0.00..8.28 rows=1 width=0) (actual\ntime=0.034..0.040 rows=1 loops=15158976)\n Index Cond: ((cal_id = $2) AND (cal_start = $1))\n Filter: (cal_start <> 0)\n -> Bitmap Heap Scan on egw_categories ca\n(cost=31.29..84.29 rows=100 width=48) (actual time=0.127..0.131 rows=1\nloops=64)\n Recheck Cond: ((cat_id)::text = ANY\n(string_to_array(($0)::text, ','::text)))\n -> Bitmap Index Scan on idx_egw_0005\n(cost=0.00..31.27 rows=100 width=0) (actual time=0.097..0.097 rows=1\nloops=64)\n Index Cond: ((cat_id)::text = ANY\n(string_to_array(($0)::text, ','::text)))\nTotal runtime: 3179751.510 ms\n(36 registros)\n\nAny suggestions? Is it a BUG?\n\nRegards,\n\n--\nDaniel Cristian Cruz\nクルズ クリスチアン ダニエル\n", "msg_date": "Mon, 1 Dec 2008 11:26:42 -0200", "msg_from": "\"Daniel Cristian Cruz\" <[email protected]>", "msg_from_op": true, "msg_subject": "Not so simple query and a half million loop" }, { "msg_contents": "Maybe someone would like to see it without broken lines (I do).\n\nRegards,\n\n2008/12/1 Daniel Cristian Cruz <[email protected]>:\n> I've tryied 4 times to post this message to pgsql-performance without\n> success... No return, even an error...\n>\n> Below is my problem; a query that perform bad when using a filter almost equal.\n>\n> ...\n\n-- \nDaniel Cristian Cruz\nクルズ クリスチアン ダニエル", "msg_date": "Mon, 1 Dec 2008 13:21:15 -0200", "msg_from": "\"Daniel Cristian Cruz\" <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Not so simple query and a half million loop" }, { "msg_contents": "No one knows why I got \"actual time=0.014..0.570 rows=30 loops=511222\"\nand \"actual time=0.005..0.006 rows=1 loops=15158976\"?\n\nWith:\n cu.cal_user_type = 'u' AND\n cu.cal_recur_date = COALESCE((SELECT cd.cal_start FROM egw_cal_dates\nicd WHERE icd.cal_id = cd.cal_id AND icd.cal_start = cd.cal_start AND\nicd.cal_start <> 0), 0)\n ~ 450 seconds\n\nWith\n cu.cal_user_type = 'u'\n ~ 130 ms\n\nWith\n cu.cal_recur_date = COALESCE((SELECT cd.cal_start FROM egw_cal_dates\nicd WHERE icd.cal_id = cd.cal_id AND icd.cal_start = cd.cal_start AND\nicd.cal_start <> 0), 0)\n ~ 1 second\n\nI've tryied to increase statistics to maximum, with no success.\n\nThis is the eGroupware database schema, with a few records.\n\nRegards,\n\n2008/12/1 Daniel Cristian Cruz <[email protected]>:\n> Maybe someone would like to see it without broken lines (I do).\n>\n> Regards,\n>\n> 2008/12/1 Daniel Cristian Cruz <[email protected]>:\n>> I've tryied 4 times to post this message to pgsql-performance without\n>> success... No return, even an error...\n>>\n>> Below is my problem; a query that perform bad when using a filter almost equal.\n>>\n>> ...\n>\n> --\n> Daniel Cristian Cruz\n> クルズ クリスチアン ダニエル\n>\n\n\n\n-- \nDaniel Cristian Cruz\nクルズ クリスチアン ダニエル\n", "msg_date": "Tue, 2 Dec 2008 08:10:14 -0200", "msg_from": "\"Daniel Cristian Cruz\" <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Not so simple query and a half million loop" }, { "msg_contents": "Hi, all!\n\nNo one knows why I got \"actual time=0.014..0.570 rows=30 loops=511222\"\nand \"actual time=0.005..0.006 rows=1 loops=15158976\"?\n\nWith:\n cu.cal_user_type = 'u' AND\n cu.cal_recur_date = COALESCE((SELECT cd.cal_start FROM egw_cal_dates\nicd WHERE icd.cal_id = cd.cal_id AND icd.cal_start = cd.cal_start AND\nicd.cal_start <> 0), 0)\n ~ 450 seconds\n\nWith\n cu.cal_user_type = 'u'\n ~ 130 ms\n\nWith\n cu.cal_recur_date = COALESCE((SELECT cd.cal_start FROM egw_cal_dates\nicd WHERE icd.cal_id = cd.cal_id AND icd.cal_start = cd.cal_start AND\nicd.cal_start <> 0), 0)\n ~ 1 second\n\nI've tryied to increase statistics to maximum, with no success.\n\nThis is the eGroupware database schema, with a few records.\n\nThe complete queries and plan folows.\n\n---------- Forwarded message ----------\nFrom: Daniel Cristian Cruz <[email protected]>\nDate: 2008/12/1\nSubject: Not so simple query and a half million loop\nTo: pgsql-admin <[email protected]>\n\n\nI've tryied 4 times to post this message to pgsql-performance without\nsuccess... No return, even an error...\n\n-- \nDaniel Cristian Cruz\nクルズ クリスチアン ダニエル", "msg_date": "Tue, 2 Dec 2008 14:02:35 -0200", "msg_from": "\"Daniel Cristian Cruz\" <[email protected]>", "msg_from_op": true, "msg_subject": "Fwd: Not so simple query and a half million loop" }, { "msg_contents": "Daniel Cristian Cruz escribi�:\n> No one knows why I got \"actual time=0.014..0.570 rows=30 loops=511222\"\n> and \"actual time=0.005..0.006 rows=1 loops=15158976\"?\n> \n> With:\n> cu.cal_user_type = 'u' AND\n> cu.cal_recur_date = COALESCE((SELECT cd.cal_start FROM egw_cal_dates\n> icd WHERE icd.cal_id = cd.cal_id AND icd.cal_start = cd.cal_start AND\n> icd.cal_start <> 0), 0)\n> ~ 450 seconds\n\nI don't know, but why are you using such an expression instead of an\nouter join?\n\n-- \nAlvaro Herrera http://www.CommandPrompt.com/\nPostgreSQL Replication, Consulting, Custom Development, 24x7 support\n", "msg_date": "Tue, 2 Dec 2008 13:07:37 -0300", "msg_from": "Alvaro Herrera <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Not so simple query and a half million loop" }, { "msg_contents": "Because of a Bad Model...\n\nWhen it is a single event egw_cal_dates always has a record with cal_start =\n0.\n\nWhen it is a recurrent event, egw_cal_dates has a record with cal_start = 0\nand a record with cal_start equal to recurrent start.\n\nWith this construction it returns correctly calendar exceptions...\n\nIt's somewhat complex but it returns slowly ok.\n\nRegards,\n\n\"Alvaro Herrera\" <[email protected]> escreveu:\n> Daniel Cristian Cruz escribió:\n>> No one knows why I got \"actual time=0.014..0.570 rows=30 loops=511222\"\n>> and \"actual time=0.005..0.006 rows=1 loops=15158976\"?\n>> \n>> With:\n>> cu.cal_user_type = 'u' AND\n>> cu.cal_recur_date = COALESCE((SELECT cd.cal_start FROM egw_cal_dates\n>> icd WHERE icd.cal_id = cd.cal_id AND icd.cal_start = cd.cal_start AND\n>> icd.cal_start <> 0), 0)\n>> ~ 450 seconds\n> \n> I don't know, but why are you using such an expression instead of an\n> outer join?\n> \n> -- \n> Alvaro Herrera \nhttp://www.CommandPrompt.com/\n> PostgreSQL Replication, Consulting, Custom Development, 24x7 support\n> \n> -- \n> Sent via pgsql-admin mailing list ([email protected])\n> To make changes to your subscription:\n> http://www.postgresql.org/mailpref/pgsql-admin\n> \n\n--\n<span style=\"color: #000080\">Daniel Cristian Cruz\n</span>Administrador de Banco de Dados\nDireção Regional - Núcleo de Tecnologia da Informação\nSENAI - SC\nTelefone: 48-3239-1422 (ramal 1422)\n\n\n\n", "msg_date": "Tue, 2 Dec 2008 14:36:51 -0200", "msg_from": "DANIEL CRISTIAN CRUZ <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Not so simple query and a half million loop" }, { "msg_contents": "Daniel Cristian Cruz wrote:\n\n> No one knows why I got \"actual time=0.014..0.570 rows=30 loops=511222\"\n> and \"actual time=0.005..0.006 rows=1 loops=15158976\"?\n\nYou've already tried increasing the stats targets and ANALYZEing, so:\n\nPlease post the output of VACUUM ANALYZE VERBOSE on the tables of interest.\n\n> I've tryied 4 times to post this message to pgsql-performance without\n> success... No return, even an error...\n\nWell, the message arrives. However, many people ignore anything with\n\"Fwd:\" in it in a mailing list, since it's someone lazily resending a\nmessage by clicking on the forward button thus they've probably seen the\nmessage before.\n\n--\nCraig Ringer\n", "msg_date": "Fri, 19 Dec 2008 10:25:29 +0900", "msg_from": "Craig Ringer <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Fwd: Not so simple query and a half million loop" } ]
[ { "msg_contents": "Hi there,\n\nWe've recently started seeing some context switch storm issues on our\nprimary Postgres database, and I was wondering if anyone had\nencountered similar issues or had any ideas as to what could be\ncausing these issues.\n\nThe machine configuration is:\n\n8xIntel Xeon Harpertown 5430 (2.66GHz)\n32Gb of RAM\n10xRAID10 (15k RPM, SAS) for Postgres\nRHEL 5.2 (2.6.18-92.1.10.el5) + deadline scheduler\n\nWe're running Postgres 8.3.4.\n\nSome postgresql.conf settings:\n\nshared_buffers = 10922MB\neffective_cache_size = 19200MB\ndefault_statistics_target = 100\n\nThe database is about 150Gb in size (according to pg_database_size.)\n\nOur workload is probably something like 98% reads / 2% writes. Most of\nour queries are fast, short-lived SELECTs across 5 tables.\n\nSome performance numbers during \"normal\" times and when the CS storm\nis in progress:\n\nNormal load:\n\nReq/s - 3326 - 3630\nAverage query runtime - 4729 us - 7415 us\ncount(*) from pg_locks - 10 - 200\nContext switches/s - 12k - 21k\n\nDuring CS storm:\n\nReq/s - 2362 - 3054\nAverage query runtime - 20731 us - 103387 us\ncount(*) from pg_locks - 1000 - 1400\nContext switches/s - 38k - 55k\n\nDuring the CS storm period, 99% of locks are granted = t, mode =\nAccessShareLock across the 5 most commonly read tables (and their\nindexes). In one sample of pg_locks, there was only one\nRowExclusiveLock on a less frequently read table.\n\nWe used to use a 16-way Intel Xeon Tigerton in this machine, but\nPostgres would basically become unresponsive under 120k - 200k context\nswitches/s, so we switched to the 8-way Harpertown. The problems\ndisappeared, but they've now come back. :)\n\nI asked Gavin S and Neil C about this issue before mailing the list --\nGavin said this was a known issue which was hard to reproduce, and\nNeil said that most (all known?) context switch issues were fixed in\n8.2+.\n\nWe can pretty much reproduce this consistently, though it doesn't\nhappen *all* the time (maybe 2-4 hours every week).\n\nPostgres is the only thing running on the machine -- and at the time\nof these CS spikes autovacuum is not running and there was no\ncheckpoint in progress.\n\nPlease let me know if any further information is needed, or if there's\nanything I can do to try and gain more insight into the cause of these\nCS storms.\n\nThanks!\n\nRegards,\nOmar\n", "msg_date": "Tue, 2 Dec 2008 19:51:06 +1100", "msg_from": "\"Omar Kilani\" <[email protected]>", "msg_from_op": true, "msg_subject": "Context switch storms" }, { "msg_contents": ">>> \"Omar Kilani\" <[email protected]> wrote: \n \n> During CS storm:\n> count(*) from pg_locks - 1000 - 1400\n \nWhat do you have for max_connections?\n \nWith the hardware and load you describe, I would guess you would limit\ncontext switching and see best performance with a connection pool that\nqueues requests, keeping the actual connections to the database around\n30. You only have so many resources available; having a large number\nof queries all contending for them is less efficient than having just\nenough queries active to keep them all busy. I have typically seen\nthe plateau fall off to degradation at or before (CPU count * 2) +\n(spindle count).\n \n-Kevin\n", "msg_date": "Fri, 05 Dec 2008 16:42:03 -0600", "msg_from": "\"Kevin Grittner\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Context switch storms" } ]
[ { "msg_contents": "Has anyone benchmarked this controller (PCIe/4x, 512 MB BBC)? We try to \nuse it with 8x SATA 1TB drives in RAID-5 mode under Linux, and measure \nstrange values. An individual drive is capable of delivering 91 MB/sec \nsequential read performance, and we get values ~102MB/sec out of a \n8-drive RAID5, seems to be ridiculous slow. Write performance seems to \nbe much better, ~300 MB /sec - seems ok to me.\n\nI guess I must be doing something wrong, I cannot believe that a 500 ᅵ \ncontroller is delivering such poor performance.\n\n\n", "msg_date": "Tue, 02 Dec 2008 10:22:40 +0100", "msg_from": "Mario Weilguni <[email protected]>", "msg_from_op": true, "msg_subject": "Experience with HP Smart Array P400 and SATA drives?" }, { "msg_contents": ">>> Mario Weilguni <[email protected]> wrote: \n> Has anyone benchmarked this controller (PCIe/4x, 512 MB BBC)? We try\nto \n> use it with 8x SATA 1TB drives in RAID-5 mode under Linux, and\nmeasure \n> strange values. An individual drive is capable of delivering 91\nMB/sec \n> sequential read performance, and we get values ~102MB/sec out of a \n> 8-drive RAID5, seems to be ridiculous slow. Write performance seems\nto \n> be much better, ~300 MB /sec - seems ok to me.\n \nWhat's your stripe size?\n \n-Kevin\n", "msg_date": "Fri, 05 Dec 2008 16:48:23 -0600", "msg_from": "\"Kevin Grittner\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Experience with HP Smart Array P400 and SATA\n\tdrives?" }, { "msg_contents": "On Tue, Dec 2, 2008 at 2:22 AM, Mario Weilguni <[email protected]> wrote:\n> Has anyone benchmarked this controller (PCIe/4x, 512 MB BBC)? We try to use\n> it with 8x SATA 1TB drives in RAID-5 mode under Linux, and measure strange\n> values. An individual drive is capable of delivering 91 MB/sec sequential\n> read performance, and we get values ~102MB/sec out of a 8-drive RAID5, seems\n> to be ridiculous slow. Write performance seems to be much better, ~300 MB\n> /sec - seems ok to me.\n>\n> I guess I must be doing something wrong, I cannot believe that a 500 €\n> controller is delivering such poor performance.\n\nA few suggestions... Try to find the latest driver for your card, try\nusing the card as nothing but a caching controller and run your RAID\non software in linux (or whatever IS you're on). Test a 2 drive\nRAID-0 to see what kind of performance increase you get. If you can't\ndd a big file off of a RAID-0 at about 2x the rate of a single drive\nthen something IS wrong with it. Try RAID 10. Try RAID-1 sets on the\ncontroller and RAID 0 over that in software.\n", "msg_date": "Fri, 5 Dec 2008 16:00:53 -0700", "msg_from": "\"Scott Marlowe\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Experience with HP Smart Array P400 and SATA drives?" }, { "msg_contents": "> >>> Mario Weilguni <[email protected]> wrote:\n> > strange values. An individual drive is capable of delivering 91\n> > MB/sec\n>\n> > sequential read performance, and we get values ~102MB/sec out of a\n> > 8-drive RAID5, seems to be ridiculous slow. \n\n\nWhat command are you using to test the reads?\n\nSome recommendations to try:\n\n1) /sbin/blockdev --setra 2048 device (where device is the partition or LVM \nvolume)\n\n2) Use XFS, and make sure your stripe settings match the RAID.\n\nHaving said that, 102MB/sec sounds really low for any modern controller with \n8 drives, regardless of tuning or filesystem choice.\n\n-- \nCorporations will ingest natural resources and defecate garbage until all \nresources are depleted, debt can no longer be repaid and our money becomes \nworthless - Jay Hanson\n", "msg_date": "Fri, 5 Dec 2008 15:30:01 -0800", "msg_from": "Alan Hodgson <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Experience with HP Smart Array P400 and SATA drives?" }, { "msg_contents": "On Fri, 5 Dec 2008, Alan Hodgson wrote:\n\n> 1) /sbin/blockdev --setra 2048 device (where device is the partition or \n> LVM volume)\n\nNormally, when I see write speed dramatically faster than write, it does \nmean that something about the read-ahead is set wrong. While I don't have \none to check, it looks to me like the P400 does its own read-ahead though, \nwhich may be more effective to tweak than what Linux does. I'd suggest \ntaking a look at the settings with HP's admin utility and see how it's set \nfor that and for allocation of RAM to the read cache. I've seen some \nRAID5 configs that put too much caching on the write side by default to \ncompensate for the deficiencies of that RAID level, and you have to push \nsome of that back toward the read side to balance it out right.\n\n> 2) Use XFS, and make sure your stripe settings match the RAID.\n\nXFS has good performance, but I can't get over how many system failure \ncorruption reports I hear about it. In any case, there's no reason this \nsystem shouldn't perform fine on ext3.\n\n--\n* Greg Smith [email protected] http://www.gregsmith.com Baltimore, MD\n", "msg_date": "Fri, 5 Dec 2008 19:29:49 -0500 (EST)", "msg_from": "Greg Smith <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Experience with HP Smart Array P400 and SATA drives?" }, { "msg_contents": "On Fri, Dec 5, 2008 at 5:29 PM, Greg Smith <[email protected]> wrote:\n\n> XFS has good performance, but I can't get over how many system failure\n> corruption reports I hear about it. In any case, there's no reason this\n> system shouldn't perform fine on ext3.\n\nFor simple testing you can take the file system out of the equation by\njust using dd to / from the raw partition. I have a feeling this HP\nRAID controller is just a low end piece o crap is the problem. I seem\nto remember references to the P800 series controller being quite a bit\nbetter than this one. But that was a while back.\n", "msg_date": "Fri, 5 Dec 2008 18:39:34 -0700", "msg_from": "\"Scott Marlowe\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Experience with HP Smart Array P400 and SATA drives?" }, { "msg_contents": "Scott Marlowe schrieb:\n> On Tue, Dec 2, 2008 at 2:22 AM, Mario Weilguni <[email protected]> wrote:\n> \n>> Has anyone benchmarked this controller (PCIe/4x, 512 MB BBC)? We try to use\n>> it with 8x SATA 1TB drives in RAID-5 mode under Linux, and measure strange\n>> values. An individual drive is capable of delivering 91 MB/sec sequential\n>> read performance, and we get values ~102MB/sec out of a 8-drive RAID5, seems\n>> to be ridiculous slow. Write performance seems to be much better, ~300 MB\n>> /sec - seems ok to me.\n>>\n>> I guess I must be doing something wrong, I cannot believe that a 500 �\n>> controller is delivering such poor performance.\n>> \n>\n> A few suggestions... Try to find the latest driver for your card, try\n> using the card as nothing but a caching controller and run your RAID\n> on software in linux (or whatever IS you're on). Test a 2 drive\n> RAID-0 to see what kind of performance increase you get. If you can't\n> dd a big file off of a RAID-0 at about 2x the rate of a single drive\n> then something IS wrong with it. Try RAID 10. Try RAID-1 sets on the\n> controller and RAID 0 over that in software.\n>\n> \n\nI've already tried Softraid with individual drives, performs much \nbetter. However, it's no option to use softraid, so I'm stuck. The card \nhas the latest firmware installed, and there are no drivers needed, \nthey're already included in the linux kernel.\n\nI still think we must be doing something wrong here, I googled the \ncontroller and Linux, and did not find anything indicating a problem. \nThe HP SmartArray series is quite common, so a lot of users would have \nthe same problem.\n\nThanks!\n", "msg_date": "Tue, 09 Dec 2008 13:10:11 +0100", "msg_from": "Mario Weilguni <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Experience with HP Smart Array P400 and SATA drives?" }, { "msg_contents": "Kevin Grittner schrieb:\n>>>> Mario Weilguni <[email protected]> wrote: \n>>>> \n>> Has anyone benchmarked this controller (PCIe/4x, 512 MB BBC)? We try\n>> \n> to \n> \n>> use it with 8x SATA 1TB drives in RAID-5 mode under Linux, and\n>> \n> measure \n> \n>> strange values. An individual drive is capable of delivering 91\n>> \n> MB/sec \n> \n>> sequential read performance, and we get values ~102MB/sec out of a \n>> 8-drive RAID5, seems to be ridiculous slow. Write performance seems\n>> \n> to \n> \n>> be much better, ~300 MB /sec - seems ok to me.\n>> \n> \n> What's your stripe size?\n> \n> -Kevin\n> \nWe used the default settings, it's 64k. Might a bigger value help here?\n", "msg_date": "Tue, 09 Dec 2008 13:15:21 +0100", "msg_from": "Mario Weilguni <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Experience with HP Smart Array P400 and SATA\tdrives?" }, { "msg_contents": "Alan Hodgson schrieb:\n>>>>> Mario Weilguni <[email protected]> wrote:\n>>>>> \n>>> strange values. An individual drive is capable of delivering 91\n>>> MB/sec\n>>> \n>>> sequential read performance, and we get values ~102MB/sec out of a\n>>> 8-drive RAID5, seems to be ridiculous slow. \n>>> \n>\n>\n> What command are you using to test the reads?\n>\n> Some recommendations to try:\n>\n> 1) /sbin/blockdev --setra 2048 device (where device is the partition or LVM \n> volume)\n>\n> 2) Use XFS, and make sure your stripe settings match the RAID.\n>\n> Having said that, 102MB/sec sounds really low for any modern controller with \n> 8 drives, regardless of tuning or filesystem choice.\n>\n> \n\nFirst, thanks alot for this and all the other answers.\n\nI measured the raw device performance:\ndd if=/dev/cciss/c0d0 bs=64k count=100000 of=/dev/null\n\nI get poor performance when all 8 drives are configured as one, large \nRAID-5, and slightly poorer performance when configured as JBOD. In \nproduction, we use XFS as FS, but I doubt this has anything to do with \nFS tuning.\n\n\n", "msg_date": "Tue, 09 Dec 2008 13:17:37 +0100", "msg_from": "Mario Weilguni <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Experience with HP Smart Array P400 and SATA drives?" }, { "msg_contents": "On Tue, Dec 9, 2008 at 5:17 AM, Mario Weilguni <[email protected]> wrote:\n> Alan Hodgson schrieb:\n>>>>>>\n>>>>>> Mario Weilguni <[email protected]> wrote:\n>>>>>>\n>>>>\n>>>> strange values. An individual drive is capable of delivering 91\n>>>> MB/sec\n>>>> sequential read performance, and we get values ~102MB/sec out of a\n>>>> 8-drive RAID5, seems to be ridiculous slow.\n>>\n>>\n>> What command are you using to test the reads?\n>>\n>> Some recommendations to try:\n>>\n>> 1) /sbin/blockdev --setra 2048 device (where device is the partition or\n>> LVM volume)\n>>\n>> 2) Use XFS, and make sure your stripe settings match the RAID.\n>>\n>> Having said that, 102MB/sec sounds really low for any modern controller\n>> with 8 drives, regardless of tuning or filesystem choice.\n>>\n>>\n>\n> First, thanks alot for this and all the other answers.\n>\n> I measured the raw device performance:\n> dd if=/dev/cciss/c0d0 bs=64k count=100000 of=/dev/null\n>\n> I get poor performance when all 8 drives are configured as one, large\n> RAID-5, and slightly poorer performance when configured as JBOD. In\n> production, we use XFS as FS, but I doubt this has anything to do with FS\n> tuning.\n\nYeah, having just trawled the pgsql-performance archives, there are\nplenty of instances of people having terrible performance from HP\nsmart array controllers before the P800. Is it possible for you to\ntrade up to a better RAID controller? Whichever salesman sold you the\nP400 should take one for the team and make this right for you.\n", "msg_date": "Tue, 9 Dec 2008 08:30:38 -0700", "msg_from": "\"Scott Marlowe\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Experience with HP Smart Array P400 and SATA drives?" }, { "msg_contents": "On Tue, 2008-12-09 at 13:10 +0100, Mario Weilguni wrote:\n> Scott Marlowe schrieb:\n> > On Tue, Dec 2, 2008 at 2:22 AM, Mario Weilguni <[email protected]> wrote:\n\n> I still think we must be doing something wrong here, I googled the \n> controller and Linux, and did not find anything indicating a problem. \n> The HP SmartArray series is quite common, so a lot of users would have \n> the same problem.\n\nYes the SmartArray series is quite common and actually know to perform\nreasonably well, in RAID 10. You still appear to be trying RAID 5.\n\nJoshua D. Drake\n\n\n> \n> Thanks!\n> \n-- \nPostgreSQL\n Consulting, Development, Support, Training\n 503-667-4564 - http://www.commandprompt.com/\n The PostgreSQL Company, serving since 1997\n\n", "msg_date": "Tue, 09 Dec 2008 07:58:28 -0800", "msg_from": "\"Joshua D. Drake\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Experience with HP Smart Array P400 and SATA drives?" }, { "msg_contents": "We reached a fairly good performance on a P400 controller (8 SATA 146GB \n2,5\" 10k rpm) with raid5 or raid6 Linux software raid: the writing \nbandwidth reached about 140 MB/s sustained throughput (the hardware \nraid5 gave a sustained 20 MB/s...). With a second, equal controller (16 \ndisks) we reached (raid6 spanning all 16 disks) about 200 MB/s sustained.\n\nThe CPU load is negligible. Reading performance is about 20% better.\n\nBest regards and my apologies for my bad English.\n\nGT\n\nP.S.: on a P800, 12 SATA 750GB 3,5\" 7200 rpm, the hardware raid5 writing \nperformance was about 30 MB/s, software raid5 is between 60 and 80 MB/s.\n\n\n\nScott Marlowe ha scritto:\n> On Tue, Dec 9, 2008 at 5:17 AM, Mario Weilguni <[email protected]> wrote:\n>> Alan Hodgson schrieb:\n>>>>>>> Mario Weilguni <[email protected]> wrote:\n>>>>>>>\n>>>>> strange values. An individual drive is capable of delivering 91\n>>>>> MB/sec\n>>>>> sequential read performance, and we get values ~102MB/sec out of a\n>>>>> 8-drive RAID5, seems to be ridiculous slow.\n>>>\n>>> What command are you using to test the reads?\n>>>\n>>> Some recommendations to try:\n>>>\n>>> 1) /sbin/blockdev --setra 2048 device (where device is the partition or\n>>> LVM volume)\n>>>\n>>> 2) Use XFS, and make sure your stripe settings match the RAID.\n>>>\n>>> Having said that, 102MB/sec sounds really low for any modern controller\n>>> with 8 drives, regardless of tuning or filesystem choice.\n>>>\n>>>\n>> First, thanks alot for this and all the other answers.\n>>\n>> I measured the raw device performance:\n>> dd if=/dev/cciss/c0d0 bs=64k count=100000 of=/dev/null\n>>\n>> I get poor performance when all 8 drives are configured as one, large\n>> RAID-5, and slightly poorer performance when configured as JBOD. In\n>> production, we use XFS as FS, but I doubt this has anything to do with FS\n>> tuning.\n> \n> Yeah, having just trawled the pgsql-performance archives, there are\n> plenty of instances of people having terrible performance from HP\n> smart array controllers before the P800. Is it possible for you to\n> trade up to a better RAID controller? Whichever salesman sold you the\n> P400 should take one for the team and make this right for you.\n>", "msg_date": "Tue, 09 Dec 2008 17:03:48 +0100", "msg_from": "Gabriele Turchi <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Experience with HP Smart Array P400 and SATA drives?" }, { "msg_contents": "* Joshua D. Drake <[email protected]> [081209 11:01]:\n \n> Yes the SmartArray series is quite common and actually know to perform\n> reasonably well, in RAID 10. You still appear to be trying RAID 5.\n\n*boggle* \n\nAre people *still* using raid5?\n\n/me gives up!\n\n-- \nAidan Van Dyk Create like a god,\[email protected] command like a king,\nhttp://www.highrise.ca/ work like a slave.", "msg_date": "Tue, 9 Dec 2008 11:17:50 -0500", "msg_from": "Aidan Van Dyk <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Experience with HP Smart Array P400 and SATA drives?" }, { "msg_contents": "On Tue, Dec 9, 2008 at 9:03 AM, Gabriele Turchi\n<[email protected]> wrote:\n> We reached a fairly good performance on a P400 controller (8 SATA 146GB 2,5\"\n> 10k rpm) with raid5 or raid6 Linux software raid: the writing bandwidth\n> reached about 140 MB/s sustained throughput (the hardware raid5 gave a\n> sustained 20 MB/s...). With a second, equal controller (16 disks) we reached\n> (raid6 spanning all 16 disks) about 200 MB/s sustained.\n\nThat's better than you were getting but still quite slow. I was\nbothered that my 12x15k4 SAS RAID-10 array could only sustain about\n350Megs/second sequential read, thinking that each drive should be\nable to approach 80 or so megs/second and I was only getting about\n60...\n\nThis sounds more and more like HP is trying to undercompete along with\nDell in the RAID controller market or at least the raid controller\ndriver market.\n", "msg_date": "Tue, 9 Dec 2008 09:25:15 -0700", "msg_from": "\"Scott Marlowe\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Experience with HP Smart Array P400 and SATA drives?" }, { "msg_contents": "Aidan Van Dyk wrote:\n> * Joshua D. Drake <[email protected]> [081209 11:01]:\n> \n>> Yes the SmartArray series is quite common and actually know to perform\n>> reasonably well, in RAID 10. You still appear to be trying RAID 5.\n> \n> *boggle* \n> \n> Are people *still* using raid5?\n> \n> /me gives up!\n\nWhat do you suggest when there is not enough room for a RAID 10?\n", "msg_date": "Tue, 09 Dec 2008 18:27:00 +0200", "msg_from": "Peter Eisentraut <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Experience with HP Smart Array P400 and SATA drives?" }, { "msg_contents": "* Peter Eisentraut <[email protected]> [081209 11:28]:\n\n> What do you suggest when there is not enough room for a RAID 10?\n\nMore disks ;-)\n\nBut if you've given up on performance and reliability in favour of\ncheaper storage, I guess raid5 is ok. But then I'm not sure what the\npoint of asking about it's poor performance is...\n\na.\n\n-- \nAidan Van Dyk Create like a god,\[email protected] command like a king,\nhttp://www.highrise.ca/ work like a slave.", "msg_date": "Tue, 9 Dec 2008 11:35:55 -0500", "msg_from": "Aidan Van Dyk <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Experience with HP Smart Array P400 and SATA drives?" }, { "msg_contents": "On Tue, 2008-12-09 at 18:27 +0200, Peter Eisentraut wrote:\n> Aidan Van Dyk wrote:\n> > * Joshua D. Drake <[email protected]> [081209 11:01]:\n> > \n> >> Yes the SmartArray series is quite common and actually know to perform\n> >> reasonably well, in RAID 10. You still appear to be trying RAID 5.\n> > \n> > *boggle* \n> > \n> > Are people *still* using raid5?\n> > \n> > /me gives up!\n> \n> What do you suggest when there is not enough room for a RAID 10?\n\nRAID 1.\n\nJoshua D. Drake\n\n\n> \n-- \nPostgreSQL\n Consulting, Development, Support, Training\n 503-667-4564 - http://www.commandprompt.com/\n The PostgreSQL Company, serving since 1997\n\n", "msg_date": "Tue, 09 Dec 2008 08:43:08 -0800", "msg_from": "\"Joshua D. Drake\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Experience with HP Smart Array P400 and SATA drives?" }, { "msg_contents": "On Tue, 2008-12-09 at 09:25 -0700, Scott Marlowe wrote:\n> On Tue, Dec 9, 2008 at 9:03 AM, Gabriele Turchi\n> <[email protected]> wrote:\n> > We reached a fairly good performance on a P400 controller (8 SATA 146GB 2,5\"\n> > 10k rpm) with raid5 or raid6 Linux software raid: the writing bandwidth\n> > reached about 140 MB/s sustained throughput (the hardware raid5 gave a\n> > sustained 20 MB/s...). With a second, equal controller (16 disks) we reached\n> > (raid6 spanning all 16 disks) about 200 MB/s sustained.\n> \n> That's better than you were getting but still quite slow. I was\n> bothered that my 12x15k4 SAS RAID-10 array could only sustain about\n> 350Megs/second sequential read, thinking that each drive should be\n> able to approach 80 or so megs/second and I was only getting about\n> 60...\n> \n> This sounds more and more like HP is trying to undercompete along with\n> Dell in the RAID controller market or at least the raid controller\n> driver market.\n\nIt is certainly possible. The 400 is the higher end of the lower end\nwith HP... 200, 400, 600, 800 (800 is a nice controller).\n\nJoshua D. Drake\n\n\n> \n-- \nPostgreSQL\n Consulting, Development, Support, Training\n 503-667-4564 - http://www.commandprompt.com/\n The PostgreSQL Company, serving since 1997\n\n", "msg_date": "Tue, 09 Dec 2008 08:43:46 -0800", "msg_from": "\"Joshua D. Drake\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Experience with HP Smart Array P400 and SATA drives?" }, { "msg_contents": "Scott Marlowe schrieb:\n> On Tue, Dec 9, 2008 at 5:17 AM, Mario Weilguni <[email protected]> wrote:\n> \n>> Alan Hodgson schrieb:\n>> \n>>>>>>> Mario Weilguni <[email protected]> wrote:\n>>>>>>>\n>>>>>>> \n>>>>> strange values. An individual drive is capable of delivering 91\n>>>>> MB/sec\n>>>>> sequential read performance, and we get values ~102MB/sec out of a\n>>>>> 8-drive RAID5, seems to be ridiculous slow.\n>>>>> \n>>> What command are you using to test the reads?\n>>>\n>>> Some recommendations to try:\n>>>\n>>> 1) /sbin/blockdev --setra 2048 device (where device is the partition or\n>>> LVM volume)\n>>>\n>>> 2) Use XFS, and make sure your stripe settings match the RAID.\n>>>\n>>> Having said that, 102MB/sec sounds really low for any modern controller\n>>> with 8 drives, regardless of tuning or filesystem choice.\n>>>\n>>>\n>>> \n>> First, thanks alot for this and all the other answers.\n>>\n>> I measured the raw device performance:\n>> dd if=/dev/cciss/c0d0 bs=64k count=100000 of=/dev/null\n>>\n>> I get poor performance when all 8 drives are configured as one, large\n>> RAID-5, and slightly poorer performance when configured as JBOD. In\n>> production, we use XFS as FS, but I doubt this has anything to do with FS\n>> tuning.\n>> \n>\n> Yeah, having just trawled the pgsql-performance archives, there are\n> plenty of instances of people having terrible performance from HP\n> smart array controllers before the P800. Is it possible for you to\n> trade up to a better RAID controller? Whichever salesman sold you the\n> P400 should take one for the team and make this right for you.\n>\n> \nA customer of us uses the P400 on a different machine, 8 SAS drives \n(Raid 5 as well), and the performance is very, very good. So we thought \nit's a good choice. Maybe the SATA drives are the root of this problem?\n\n\n", "msg_date": "Wed, 10 Dec 2008 08:45:32 +0100", "msg_from": "Mario Weilguni <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Experience with HP Smart Array P400 and SATA drives?" }, { "msg_contents": "It could be the drives, it could be a particular interaction between them and the drivers or firmware.\n\nDo you know if NCQ is activated for them?\nCan you test a single drive JBOD through the array to the same drive through something else, perhaps the motherboard's SATA port?\n\nYou may also have better luck with software raid-0 on top of 2 4 disk raid 5's or raid 10s. But not if a single disk JBOD still performs well under par. You may need new drivers for the card, or firmware for the drive and or card. Or, the card may simply be incompatible with those drives. I've seen several hard drive - raid card incompatibilities before.\n\n\n\nOn 12/9/08 11:45 PM, \"Mario Weilguni\" <[email protected]> wrote:\n\nScott Marlowe schrieb:\n> On Tue, Dec 9, 2008 at 5:17 AM, Mario Weilguni <[email protected]> wrote:\n>\n>> Alan Hodgson schrieb:\n>>\n>>>>>>> Mario Weilguni <[email protected]> wrote:\n>>>>>>>\n>>>>>>>\n>>>>> strange values. An individual drive is capable of delivering 91\n>>>>> MB/sec\n>>>>> sequential read performance, and we get values ~102MB/sec out of a\n>>>>> 8-drive RAID5, seems to be ridiculous slow.\n>>>>>\n>>> What command are you using to test the reads?\n>>>\n>>> Some recommendations to try:\n>>>\n>>> 1) /sbin/blockdev --setra 2048 device (where device is the partition or\n>>> LVM volume)\n>>>\n>>> 2) Use XFS, and make sure your stripe settings match the RAID.\n>>>\n>>> Having said that, 102MB/sec sounds really low for any modern controller\n>>> with 8 drives, regardless of tuning or filesystem choice.\n>>>\n>>>\n>>>\n>> First, thanks alot for this and all the other answers.\n>>\n>> I measured the raw device performance:\n>> dd if=/dev/cciss/c0d0 bs=64k count=100000 of=/dev/null\n>>\n>> I get poor performance when all 8 drives are configured as one, large\n>> RAID-5, and slightly poorer performance when configured as JBOD. In\n>> production, we use XFS as FS, but I doubt this has anything to do with FS\n>> tuning.\n>>\n>\n> Yeah, having just trawled the pgsql-performance archives, there are\n> plenty of instances of people having terrible performance from HP\n> smart array controllers before the P800. Is it possible for you to\n> trade up to a better RAID controller? Whichever salesman sold you the\n> P400 should take one for the team and make this right for you.\n>\n>\nA customer of us uses the P400 on a different machine, 8 SAS drives\n(Raid 5 as well), and the performance is very, very good. So we thought\nit's a good choice. Maybe the SATA drives are the root of this problem?\n\n\n\n--\nSent via pgsql-performance mailing list ([email protected])\nTo make changes to your subscription:\nhttp://www.postgresql.org/mailpref/pgsql-performance\n\n\n\n\nRe: [PERFORM] Experience with HP Smart Array P400 and SATA drives?\n\n\nIt could be the drives, it could be a particular interaction between them and the drivers or firmware.\n\nDo you know if NCQ is activated for them?\nCan you test a single drive JBOD through the array to the same drive through something else, perhaps the motherboard’s SATA port?\n\nYou may also have better luck with software raid-0 on top of 2 4 disk raid 5’s or raid 10s.  But not if a single disk JBOD still performs well under par.  You may need new drivers for the card, or firmware for the drive  and or card.  Or, the card may simply be incompatible with those drives.  I’ve seen several hard drive – raid card incompatibilities before. \n\n\n\nOn 12/9/08 11:45 PM, \"Mario Weilguni\" <[email protected]> wrote:\n\nScott Marlowe schrieb:\n> On Tue, Dec 9, 2008 at 5:17 AM, Mario Weilguni <[email protected]> wrote:\n>\n>> Alan Hodgson schrieb:\n>>\n>>>>>>> Mario Weilguni <[email protected]> wrote:\n>>>>>>>\n>>>>>>>\n>>>>> strange values. An individual drive is capable of delivering 91\n>>>>> MB/sec\n>>>>>      sequential read performance, and we get values ~102MB/sec out of a\n>>>>> 8-drive RAID5, seems to be ridiculous slow.\n>>>>>\n>>> What command are you using to test the reads?\n>>>\n>>> Some recommendations to try:\n>>>\n>>> 1) /sbin/blockdev --setra 2048 device (where device is the partition or\n>>> LVM volume)\n>>>\n>>> 2) Use XFS, and make sure your stripe settings match the RAID.\n>>>\n>>> Having said that, 102MB/sec sounds really low for any modern controller\n>>> with 8 drives, regardless of tuning or filesystem choice.\n>>>\n>>>\n>>>\n>> First, thanks alot for this and all the other answers.\n>>\n>> I measured the raw device performance:\n>> dd if=/dev/cciss/c0d0 bs=64k count=100000 of=/dev/null\n>>\n>> I get poor performance when all 8 drives are configured as one, large\n>> RAID-5, and slightly poorer performance when configured as JBOD. In\n>> production, we use XFS as FS, but I doubt this has anything to do with FS\n>> tuning.\n>>\n>\n> Yeah, having just trawled the pgsql-performance archives, there are\n> plenty of instances of people having terrible performance from HP\n> smart array controllers before the P800.  Is it possible for you to\n> trade up to a better RAID controller?  Whichever salesman sold you the\n> P400 should take one for the team and make this right for you.\n>\n>\nA customer of us uses the P400 on a different machine, 8 SAS drives\n(Raid 5 as well), and the performance is very, very good. So we thought\nit's a good choice. Maybe the SATA drives are the root of this problem?\n\n\n\n--\nSent via pgsql-performance mailing list ([email protected])\nTo make changes to your subscription:\nhttp://www.postgresql.org/mailpref/pgsql-performance", "msg_date": "Tue, 9 Dec 2008 23:58:57 -0800", "msg_from": "Scott Carey <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Experience with HP Smart Array P400 and SATA drives?" }, { "msg_contents": "Aidan Van Dyk schrieb:\n> * Joshua D. Drake <[email protected]> [081209 11:01]:\n> \n> \n>> Yes the SmartArray series is quite common and actually know to perform\n>> reasonably well, in RAID 10. You still appear to be trying RAID 5.\n>> \n>\n> *boggle* \n>\n> Are people *still* using raid5?\n>\n> /me gives up!\n>\n> \n\nWhy not? I know it's not performing as good as RAID-10, but it does not \nwaste 50% diskspace. RAID-6 is no option, because the performance is \neven worse. And, on another system with RAID-5 + spare and SAS drives, \nthe same controller is working very well.\n\n", "msg_date": "Wed, 10 Dec 2008 13:29:13 +0100", "msg_from": "Mario Weilguni <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Experience with HP Smart Array P400 and SATA drives?" }, { "msg_contents": "On Wed, Dec 10, 2008 at 5:29 AM, Mario Weilguni <[email protected]> wrote:\n> Aidan Van Dyk schrieb:\n>>\n>> * Joshua D. Drake <[email protected]> [081209 11:01]:\n>>\n>>>\n>>> Yes the SmartArray series is quite common and actually know to perform\n>>> reasonably well, in RAID 10. You still appear to be trying RAID 5.\n>>>\n>>\n>> *boggle*\n>> Are people *still* using raid5?\n>>\n>> /me gives up!\n>>\n>>\n>\n> Why not? I know it's not performing as good as RAID-10, but it does not\n> waste 50% diskspace. RAID-6 is no option, because the performance is even\n> worse. And, on another system with RAID-5 + spare and SAS drives, the same\n> controller is working very well.\n\nI wouldn't refer to it as \"waste\". It's a tradeoff. With RAID-10 you\nget good performance at a cost of having 1/2 the storage capacity of\nyour drives combined. RAID-5 says you're more worried about your\nbudget than performance, and sometimes that's the case. The\nproduction servers where I work run a 25G database on a 12 disk\nRAID-10. The fact that we get less than a terabyte from 12 147G scsi\ndisks is no great loss to us, we're interested in having a disk array\nthat can handle a few 100 transactions per second even if a disk dies.\n\nAlso, RAID-6 is faster, IF one of your disks has died (and you're on a\ngood RAID controller). RAID-5 degraded performance is abysmal even on\ngood controllers. RAID-10 with a lost disk is about the same as\nRAID-10 with all its disks in terms of performance. If you're running\na production 24/7 database server, you can't afford to lose 80%+ of\nyour throughput when a single drive fails.\n\nBut this has all been covered before (many times) both on this list\nand over the internet. RAID-5 is useful for large data stores that\ncan afford downtime. And there are plenty of apps like that. If your\napp isn't like that, i.e. needs to be up and performing well 24/7, or\nclose to it, then RAID5 is a tragic mistake to make.\n", "msg_date": "Wed, 10 Dec 2008 06:34:17 -0700", "msg_from": "\"Scott Marlowe\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Experience with HP Smart Array P400 and SATA drives?" }, { "msg_contents": "On Wed, Dec 10, 2008 at 12:45 AM, Mario Weilguni <[email protected]> wrote:\n>\n> A customer of us uses the P400 on a different machine, 8 SAS drives (Raid 5\n> as well), and the performance is very, very good. So we thought it's a good\n> choice. Maybe the SATA drives are the root of this problem?\n\nWhat tests have you or the customer done to confirm that performance\nis very very good? A lot of times the system is not as fast as the\ncustomer thinks, it's just faster than it was before and they're\nhappy. Also, there could be problems in the driver or firmware on\nyour P400 versus the customer one. I'd look for those differences as\nwell. I doubt SATA versus SAS is the problem, but who knows...\n", "msg_date": "Wed, 10 Dec 2008 07:17:52 -0700", "msg_from": "\"Scott Marlowe\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Experience with HP Smart Array P400 and SATA drives?" }, { "msg_contents": "Scott Marlowe schrieb:\n> On Wed, Dec 10, 2008 at 12:45 AM, Mario Weilguni <[email protected]> wrote:\n> \n>> A customer of us uses the P400 on a different machine, 8 SAS drives (Raid 5\n>> as well), and the performance is very, very good. So we thought it's a good\n>> choice. Maybe the SATA drives are the root of this problem?\n>> \n>\n> What tests have you or the customer done to confirm that performance\n> is very very good? A lot of times the system is not as fast as the\n> customer thinks, it's just faster than it was before and they're\n> happy. Also, there could be problems in the driver or firmware on\n> your P400 versus the customer one. I'd look for those differences as\n> well. I doubt SATA versus SAS is the problem, but who knows...\n>\n> \nWell, I cannot take the box offline to make usefull tests like tiobench \nor bonnie, but even with the current service running I get from a simple \ndd between 270 and 340 MB/sec sustained read over 30% of the disk.\n\nIt also performed extremly good when I put the box into production, \npg_bench values were impressing, but I do not have them at hand.\n\nHowever, currently we are seriously considering dropping RAID5 in favor \nof RAID10, we will test this week if this performs better.\n\nRegards\nMario\n", "msg_date": "Wed, 10 Dec 2008 15:30:20 +0100", "msg_from": "Mario Weilguni <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Experience with HP Smart Array P400 and SATA drives?" }, { "msg_contents": "Me, I'd *never) allow RAID-5 for data I cared abut.\n\nAmong other references, see <http://www.miracleas.com/BAARF/RAID5_versus_RAID10.txt>\n\n(sorry for top-posting -- challenged reader and no other content to add)\n\nGreg Williamson\nSenior DBA\nDigitalGlobe\n\nConfidentiality Notice: This e-mail message, including any attachments, is for the sole use of the intended recipient(s) and may contain confidential and privileged information and must be protected in accordance with those provisions. Any unauthorized review, use, disclosure or distribution is prohibited. If you are not the intended recipient, please contact the sender by reply e-mail and destroy all copies of the original message.\n\n(My corporate masters made me say this.)\n\n\n\n-----Original Message-----\nFrom: [email protected] on behalf of Mario Weilguni\nSent: Wed 12/10/2008 4:29 AM\nTo: Aidan Van Dyk\nCc: Joshua D. Drake; Scott Marlowe; [email protected]\nSubject: Re: [PERFORM] Experience with HP Smart Array P400 and SATA drives?\n \nAidan Van Dyk schrieb:\n> * Joshua D. Drake <[email protected]> [081209 11:01]:\n> \n> \n>> Yes the SmartArray series is quite common and actually know to perform\n>> reasonably well, in RAID 10. You still appear to be trying RAID 5.\n>> \n>\n> *boggle* \n>\n> Are people *still* using raid5?\n>\n> /me gives up!\n>\n> \n\nWhy not? I know it's not performing as good as RAID-10, but it does not \nwaste 50% diskspace. RAID-6 is no option, because the performance is \neven worse. And, on another system with RAID-5 + spare and SAS drives, \nthe same controller is working very well.\n\n\n-- \nSent via pgsql-performance mailing list ([email protected])\nTo make changes to your subscription:\nhttp://www.postgresql.org/mailpref/pgsql-performance\n\n\n\n\n\n\nRE: [PERFORM] Experience with HP Smart Array P400 and SATA drives?\n\n\n\nMe, I'd *never) allow RAID-5 for data I cared abut.\n\nAmong other references, see <http://www.miracleas.com/BAARF/RAID5_versus_RAID10.txt>\n\n(sorry for top-posting -- challenged reader and no other content to add)\n\nGreg Williamson\nSenior DBA\nDigitalGlobe\n\nConfidentiality Notice: This e-mail message, including any attachments, is for the sole use of the intended recipient(s) and may contain confidential and privileged information and must be protected in accordance with those provisions. Any unauthorized review, use, disclosure or distribution is prohibited. If you are not the intended recipient, please contact the sender by reply e-mail and destroy all copies of the original message.\n\n(My corporate masters made me say this.)\n\n\n\n-----Original Message-----\nFrom: [email protected] on behalf of Mario Weilguni\nSent: Wed 12/10/2008 4:29 AM\nTo: Aidan Van Dyk\nCc: Joshua D. Drake; Scott Marlowe; [email protected]\nSubject: Re: [PERFORM] Experience with HP Smart Array P400 and SATA drives?\n\nAidan Van Dyk schrieb:\n> * Joshua D. Drake <[email protected]> [081209 11:01]:\n> \n>  \n>> Yes the SmartArray series is quite common and actually know to perform\n>> reasonably well, in RAID 10. You still appear to be trying RAID 5.\n>>    \n>\n> *boggle*\n>\n> Are people *still* using raid5?\n>\n> /me gives up!\n>\n>  \n\nWhy not? I know it's not performing as good as RAID-10, but it does not\nwaste 50% diskspace. RAID-6 is no option, because the performance is\neven worse. And, on another system with RAID-5 + spare and SAS drives,\nthe same controller is working very well.\n\n\n--\nSent via pgsql-performance mailing list ([email protected])\nTo make changes to your subscription:\nhttp://www.postgresql.org/mailpref/pgsql-performance", "msg_date": "Wed, 10 Dec 2008 08:21:30 -0700", "msg_from": "\"Gregory Williamson\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Experience with HP Smart Array P400 and SATA drives?" }, { "msg_contents": "* Mario Weilguni <[email protected]> [081210 07:31]:\n\n> Why not? I know it's not performing as good as RAID-10, but it does not \n> waste 50% diskspace. RAID-6 is no option, because the performance is \n> even worse. And, on another system with RAID-5 + spare and SAS drives, \n> the same controller is working very well.\n\nLike Scott said, it's all about trade-offs.\n\nWith raid5, you get abysmal write performance, \"make me not sleep at\nnight\" inconsistent parity issues, and a degraded mode that will a\nnightmare ...\n\n... and as a trade-off you save a little money, and get good \"read only\"\nperformance ...\n\n... as long as you don't ever have a disk or system crash ...\n\n... or can afford to rebuild if you do ...\n\n... etc ...\n\n-- \nAidan Van Dyk Create like a god,\[email protected] command like a king,\nhttp://www.highrise.ca/ work like a slave.", "msg_date": "Wed, 10 Dec 2008 10:52:08 -0500", "msg_from": "Aidan Van Dyk <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Experience with HP Smart Array P400 and SATA drives?" }, { "msg_contents": "Aidan Van Dyk schrieb:\n> * Mario Weilguni <[email protected]> [081210 07:31]:\n>\n> \n>> Why not? I know it's not performing as good as RAID-10, but it does not \n>> waste 50% diskspace. RAID-6 is no option, because the performance is \n>> even worse. And, on another system with RAID-5 + spare and SAS drives, \n>> the same controller is working very well.\n>> \n>\n> Like Scott said, it's all about trade-offs.\n>\n> With raid5, you get abysmal write performance, \"make me not sleep at\n> night\" inconsistent parity issues, and a degraded mode that will a\n> nightmare ...\n>\n> ... and as a trade-off you save a little money, and get good \"read only\"\n> performance ...\n>\n> ... as long as you don't ever have a disk or system crash ...\n>\n> ... or can afford to rebuild if you do ...\n>\n> ... etc ...\n> \n\nIn fact, for this system we're currently going to RAID10, I'm convinced \nnow. With other systems we have, RAID5 is a safe option for one reason, \nthe machines are clusters, so we have (sort of) RAID50 here:\nMachine A/RAID5 <-- DRBD --> Machine B/RAID5\n\nSeems reliable enough for me. But in this case, the machine will be \nstandalone, and so RAID5 might really not be the best choice.\n\n\nHowever, I'm pretty sure we'll have the same problems with RAID10, the \nproblem seems to have to do with P400 and/or SATA drives.\n\n", "msg_date": "Wed, 10 Dec 2008 17:05:06 +0100", "msg_from": "Mario Weilguni <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Experience with HP Smart Array P400 and SATA drives?" }, { "msg_contents": "On Wed, Dec 10, 2008 at 9:05 AM, Mario Weilguni <[email protected]> wrote:\n> In fact, for this system we're currently going to RAID10, I'm convinced now.\n> With other systems we have, RAID5 is a safe option for one reason, the\n> machines are clusters, so we have (sort of) RAID50 here:\n> Machine A/RAID5 <-- DRBD --> Machine B/RAID5\n>\n> Seems reliable enough for me. But in this case, the machine will be\n> standalone, and so RAID5 might really not be the best choice.\n>\n> However, I'm pretty sure we'll have the same problems with RAID10, the\n> problem seems to have to do with P400 and/or SATA drives.\n\nYeah, I'm thinking there's something off in your system and until you\nresolve it you're going to have issues. I'd check the following:\n\nfirmware on RAID controller\nhow it runs with a couple of SAS drives in RAID-1 or RAID-0 (just for testing)\nOS version / kernel version / driver version. especially compared to\nyour customer's machine. See how much of his environment you can\nclone until performance goes up where it should be. Then change one\nthing at a time until you break it again. I'm sure everyone here would\nlike to know what makes a P400 fast or slow.\n\nOr, if you don't have time to mess with it, just order an escalade or\nareca card and be done with it. :)\n", "msg_date": "Wed, 10 Dec 2008 09:17:28 -0700", "msg_from": "\"Scott Marlowe\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Experience with HP Smart Array P400 and SATA drives?" }, { "msg_contents": "On Wed, 10 Dec 2008, Scott Marlowe wrote:\n> Or, if you don't have time to mess with it, just order an escalade or\n> areca card and be done with it. :)\n\nI'd be interested in recommendations for RAID cards for small SATA \nsystems. It's not anything to do with Postgres - I'm just intending to set \nup a little four-drive array for my home computer, with cheap 1TB SATA \ndrives.\n\nWhat PCI-Express or motherboard built-in SATA RAID controllers for about \nfour drives are there out there that are good, and well supported by \nLinux? What level of support is there for monitoring and reporting of RAID \nstatus?\n\nAlso, is it possible to set the drives in a hardware RAID into \nauto-spindown mode? I'm going to be putting a SSD in as the main system \ndrive, with the RAID array to hold my large stuff which I only work on \npart of the time.\n\nMatthew\n\n-- \n Failure is not an option. It comes bundled with your Microsoft product. \n -- Ferenc Mantfeld\n", "msg_date": "Wed, 10 Dec 2008 18:08:01 +0000 (GMT)", "msg_from": "Matthew Wakeling <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Experience with HP Smart Array P400 and SATA drives?" }, { "msg_contents": "On Wed, 10 Dec 2008, Matthew Wakeling wrote:\n\n> I'd be interested in recommendations for RAID cards for small SATA systems. \n> It's not anything to do with Postgres - I'm just intending to set up a little \n> four-drive array for my home computer, with cheap 1TB SATA drives.\n\nThen why are you thinking of RAID cards? On a Linux only host, you might \nas well just get a standard cheap multi-port SATA card that's compatible \nwith the OS, plug the four drives in, and run software RAID. Anything \nelse you put in the middle is going to add complications in terms of \nthings like getting SMART error data from the drives, and the SW RAID will \nprobably be faster too.\n\nA great source for checking Linux compatibility is \nhttp://linux-ata.org/driver-status.html\n\nThe only reason I have a good controller card in my system at home is \nbecause it lets me do realistic performance tests with a write cache, so \nfsync is accelerated when working with PostgreSQL. If just putting a \nbunch of drives in there was my only concern, I'd have just bought a cheap \nSilicon Image 3124 PCI-Express board.\n\n> What PCI-Express or motherboard built-in SATA RAID controllers for about four \n> drives are there out there that are good, and well supported by Linux? What \n> level of support is there for monitoring and reporting of RAID status?\n\n3ware 9650SE-4LPML is what I'd buy today if I wanted hardware SATA RAID. \nWhen I made a similar decision some time ago, I bought an Areca 1210 \ninstead, but two things have changed since then. One, I've become \nincreasingly unsatisfied with the limitations of the closed-source \ncontroller management tool Areca supplies. And the performance of 3ware's \nearlier 9550 model really lagged relative to Areca, while the newer 9650 \nis quite competative. More details on all of that on the blog entry I \nwrote after my last disk failure: \nhttp://notemagnet.blogspot.com/2008/08/linux-disk-failures-areca-is-not-so.html\n\n--\n* Greg Smith [email protected] http://www.gregsmith.com Baltimore, MD\n", "msg_date": "Wed, 10 Dec 2008 14:13:03 -0500 (EST)", "msg_from": "Greg Smith <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Experience with HP Smart Array P400 and SATA drives?" }, { "msg_contents": "On Wed, Dec 10, 2008 at 12:13 PM, Greg Smith <[email protected]> wrote:\n> On Wed, 10 Dec 2008, Matthew Wakeling wrote:\n>\n>> I'd be interested in recommendations for RAID cards for small SATA\n>> systems. It's not anything to do with Postgres - I'm just intending to set\n>> up a little four-drive array for my home computer, with cheap 1TB SATA\n>> drives.\n>\n> Then why are you thinking of RAID cards? On a Linux only host, you might as\n> well just get a standard cheap multi-port SATA card that's compatible with\n> the OS, plug the four drives in, and run software RAID. Anything else you\n> put in the middle is going to add complications in terms of things like\n> getting SMART error data from the drives, and the SW RAID will probably be\n> faster too.\n\nNote that you could combine the two and use a caching controller in\njbod mode and do the raid in linux kernel sw mode. Just a thought.\nNot sure about the smart stuff though.\n\n>> What PCI-Express or motherboard built-in SATA RAID controllers for about\n>> four drives are there out there that are good, and well supported by Linux?\n>> What level of support is there for monitoring and reporting of RAID status?\n>\n> 3ware 9650SE-4LPML is what I'd buy today if I wanted hardware SATA RAID.\n> When I made a similar decision some time ago, I bought an Areca 1210\n> instead, but two things have changed since then. One, I've become\n> increasingly unsatisfied with the limitations of the closed-source\n> controller management tool Areca supplies.\n\nNote that Areca's newest controllers, the 1680 series, have a separate\nethernet port with snmp traps so you don't have to use any special\nclosed source software to monitor them. Just FYI for anyone\nconsidering them.\n", "msg_date": "Wed, 10 Dec 2008 12:41:37 -0700", "msg_from": "\"Scott Marlowe\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Experience with HP Smart Array P400 and SATA drives?" }, { "msg_contents": "On Wed, Dec 10, 2008 at 2:13 PM, Greg Smith <[email protected]> wrote:\n\n> 3ware 9650SE-4LPML is what I'd buy today if I wanted hardware SATA RAID.\n\nFWIW, I just put together a system with exactly that (4 320g drives in\nraid 10) and have been pleased with the results. I won't have any\ndowntime to be able to get performance benchmarks until the current\ncompute/write pass finishes in a week and a half or so, but I don't\nhave any complaints with how it's performing for our app.\n\nDB size is ~120gb now and add ~7gb/day during the current phase, at\nwhich point it'll move to a light-write, high-read data warehouse\nstyle usage pattern.\n\n-- \n- David T. Wilson\[email protected]\n", "msg_date": "Wed, 10 Dec 2008 16:37:27 -0500", "msg_from": "\"David Wilson\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Experience with HP Smart Array P400 and SATA drives?" }, { "msg_contents": "On Wed, 10 Dec 2008, Greg Smith wrote:\n>> I'd be interested in recommendations for RAID cards for small SATA systems. \n>> It's not anything to do with Postgres - I'm just intending to set up a \n>> little four-drive array for my home computer, with cheap 1TB SATA drives.\n>\n> Then why are you thinking of RAID cards? On a Linux only host, you might as \n> well just get a standard cheap multi-port SATA card that's compatible with \n> the OS, plug the four drives in, and run software RAID. Anything else you \n> put in the middle is going to add complications in terms of things like \n> getting SMART error data from the drives, and the SW RAID will probably be \n> faster too.\n>\n> A great source for checking Linux compatibility is \n> http://linux-ata.org/driver-status.html\n\nThanks, that is the kind of info I was looking for. It looks like most \nsensible SATA controller manufacturers are converging towards the open \nahci controller standard, which is useful.\n\nMatthew\n\n-- \n The best way to accelerate a Microsoft product is at 9.8 metres per second\n per second.\n - Anonymous\n", "msg_date": "Thu, 11 Dec 2008 13:47:19 +0000 (GMT)", "msg_from": "Matthew Wakeling <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Experience with HP Smart Array P400 and SATA drives?" } ]
[ { "msg_contents": "I noticed that query\n\nSELECT dok.*\n FROM dok\nJOIN (SELECT DISTINCT dokumnr FROM temptbl ) x USING(dokumnr);\n\nis slow in 8.1.4\nI cannot use explain analyze since this query uses results from temporary \ntable temptbl which is not available.\n\nSometimes innter table returns only 1 row so maybe seq scan is selected \ninstead of single row index access becauses expected count is 1000\n\nAs I understand, PostgreSql requires manually running ANALYZE for temporary \ntables if their row count is different from 1000\n\nHow to force PostgreSql to analyze inner table in this query or use other \nway to get index using query plan if inner query returns single row ?\n\nHow \n\n", "msg_date": "Tue, 2 Dec 2008 14:06:36 +0200", "msg_from": "\"Andrus\" <[email protected]>", "msg_from_op": true, "msg_subject": "analyzing intermediate query" }, { "msg_contents": "> I noticed that query\n>\n> SELECT dok.*\n> FROM dok\n> JOIN (SELECT DISTINCT dokumnr FROM temptbl ) x USING(dokumnr);\n>\n> is slow in 8.1.4\n> I cannot use explain analyze since this query uses results from \n> temporary table temptbl which is not available.\n\n\tGenerally if you know your temptbl will always contains a few rows (say, \ngenerally a few and never more than a few thousands) it is better to use \nsomething like that :\n\n\t- get list of items\n\t- SELECT * FROM table WHERE id IN (...)\n\n\tOf course you must be pretty damn sure that the list isn't gonna contain \n10 million items. Or else you'll have a little problem. But it generally \nworks pretty well. The overhead of generating and parsing the IN() is \nlower than the overhead of temptables...\n\n\n\tBy the way, sometime ago there was talk about adding estimation of number \nof rows returned to set-returning functions. What's the status of this ? \nIt doesn't seem to have survived...\n\n8.3> EXPLAIN SELECT * FROM generate_series( 1,10 );\n QUERY PLAN\n------------------------------------------------------------------------\n Function Scan on generate_series (cost=0.00..12.50 rows=1000 width=4)\n\n\n> Sometimes innter table returns only 1 row so maybe seq scan is selected \n> instead of single row index access becauses expected count is 1000\n>\n> As I understand, PostgreSql requires manually running ANALYZE for \n> temporary tables if their row count is different from 1000\n>\n> How to force PostgreSql to analyze inner table in this query or use \n> other way to get index using query plan if inner query returns single \n> row ?\n>\n> How\n\n\n", "msg_date": "Tue, 02 Dec 2008 14:48:36 +0100", "msg_from": "PFC <[email protected]>", "msg_from_op": false, "msg_subject": "Re: analyzing intermediate query" }, { "msg_contents": "> Generally if you know your temptbl will always contains a few rows (say, \n> generally a few and never more than a few thousands) it is better to use \n> something like that :\n>\n> - get list of items\n> - SELECT * FROM table WHERE id IN (...)\n\nMy list can contain 1 .. 100000 records and table contains 3000000 records \nand is growing.\n\nAs discussed here few time ago, IN (...) forces seq scan over 3000000 rows \nand maybe stack overflow exception also occurs (stack overflow occurs in \n8.0, maybe it is fixed in 8.1).\n\nUsing temp table + ANALYZE enables bitmap index scan for this query and is \nthus a lot faster.\n\nI formerly used IN (...) but changed this to use temp table + primary key on \ntemp table + analyze this temp table.\n\nUsing 8.1.4\n\nI can switch this to temp table also if it helps.\nThis requires some special logic to generate temp table name since there may \nbe a number of such tables in single transaction, so is would be major appl \nrewrite.\n\nAndrus. \n\n", "msg_date": "Tue, 2 Dec 2008 16:00:48 +0200", "msg_from": "\"Andrus\" <[email protected]>", "msg_from_op": true, "msg_subject": "Re: analyzing intermediate query" }, { "msg_contents": "\n> My list can contain 1 .. 100000 records and table contains 3000000 \n> records and is growing.\n\n\tAh. No IN(), then ;)\n\tTemp table + ANALYZE seems your only option...\n\n", "msg_date": "Tue, 02 Dec 2008 15:13:50 +0100", "msg_from": "PFC <[email protected]>", "msg_from_op": false, "msg_subject": "Re: analyzing intermediate query" }, { "msg_contents": ">> My list can contain 1 .. 100000 records and table contains 3000000 \n>> records and is growing.\n>\n> Ah. No IN(), then ;)\n> Temp table + ANALYZE seems your only option...\n\nIn 8.3 or 8.4 I think that IN() or temp table produce exactly the same \nresult.\n\nAndrus. \n\n", "msg_date": "Tue, 2 Dec 2008 16:37:37 +0200", "msg_from": "\"Andrus\" <[email protected]>", "msg_from_op": true, "msg_subject": "Re: analyzing intermediate query" }, { "msg_contents": "\n>>> My list can contain 1 .. 100000 records and table contains 3000000 \n>>> records and is growing.\n>>\n>> Ah. No IN(), then ;)\n>> Temp table + ANALYZE seems your only option...\n>\n> In 8.3 or 8.4 I think that IN() or temp table produce exactly the same \n> result.\n>\n> Andrus.\n\n\tOh, I just thought about something, I don't remember in which version it \nwas added, but :\n\nEXPLAIN ANALYZE SELECT sum(column1) FROM (VALUES ...a million integers... \n) AS v\n\n\tPostgres is perfectly happy with that ; it's either a bit slow (about 1 \nsecond) or very fast depending on how you view things...\n\nAggregate (cost=15000.00..15000.01 rows=1 width=4) (actual \ntime=1060.253..1060.253 rows=1 loops=1)\n-> Values Scan on \"*VALUES*\" (cost=0.00..12500.00 rows=1000000 width=4) \n(actual time=0.009..634.728 rows=1000000 loops=1)\nTotal runtime: 1091.420 ms\n\n\tThe most interesting thing, of course, is that the statistics are exact.\n\tYou can use VALUES like a table (Join, whatever).\n\tOf course it's always slightly annoying to juggle around with result sets \nand stuff them in comma-separated strings, but it works.\n\n\tHere it knows there's few rows ===> nested loop\n\nEXPLAIN SELECT a.* FROM annonces a JOIN (VALUES \n(0),(1),(2),(3),(4),(5),(6),(7)) AS v ON (a.id=v.column1);\n QUERY PLAN\n----------------------------------------------------------------------------------------\n Nested Loop (cost=0.00..66.73 rows=8 width=943)\n -> Values Scan on \"*VALUES*\" (cost=0.00..0.10 rows=8 width=4)\n -> Index Scan using annonces_pkey on annonces a (cost=0.00..8.32 \nrows=1 width=943)\n Index Cond: (a.id = \"*VALUES*\".column1)\n\n\tWith a million values it goes hash of course, etc.\n", "msg_date": "Tue, 02 Dec 2008 16:14:58 +0100", "msg_from": "PFC <[email protected]>", "msg_from_op": false, "msg_subject": "Re: analyzing intermediate query" }, { "msg_contents": "> Oh, I just thought about something, I don't remember in which version it \n> was added, but :\n>\n> EXPLAIN ANALYZE SELECT sum(column1) FROM (VALUES ...a million \n> ntegers... ) AS v\n>\n> Postgres is perfectly happy with that ; it's either a bit slow (about 1 \n> second) or very fast depending on how you view things...\n\nI tried in 8.1.4\n\nselect * from (values (0)) xx\n\nbut got\n\nERROR: syntax error at or near \")\"\nSQL state: 42601\nCharacter: 26\n\nEven if this works this may be not solution: I need to apply distinct to \ntemporary table. Temporary table may contain duplicate values and without \nDISTINCT join produces invalid result.\nTemporary table itself is created from data from server tables, it is not \ngenerated from list.\n\nI can use\n\nSELECT dok.*\n FROM dok\nWHERE dokumnr IN (SELECT dokumnr FROM temptbl)\n\nbut this seems never use bitmap index scan in 8.1.4\n\nSadly, creating second temporary table from first temporary table specially \nfor this query seems to be only solution.\n\nWhen materialized row count will be added so that statistics is exact and \nselect count(*) from tbl runs fast ?\n\nAndrus. \n\n", "msg_date": "Tue, 2 Dec 2008 17:50:53 +0200", "msg_from": "\"Andrus\" <[email protected]>", "msg_from_op": true, "msg_subject": "Re: analyzing intermediate query" }, { "msg_contents": "Often times, switching an inner subselect that requires a distinct to a group by on that column yields better results. In this case, the IN should be equivalent, so it probably will not help. This would look like:\n\nSELECT dok.*\n FROM dok\nJOIN (SELECT dokumnr FROM temptbl GROUP BY dokumnr ) x USING(dokumnr);\n\nWhether that hepls depends on how big dokumnr is and where the query bottleneck is. Note there are subtle differences between DISTINCT and GROUP BY with respect to nulls.\n\n________________________________________\nFrom: [email protected] [[email protected]] On Behalf Of Andrus [[email protected]]\nSent: Tuesday, December 02, 2008 7:50 AM\nTo: [email protected]; PFC\nSubject: Re: [PERFORM] analyzing intermediate query\n\n> Oh, I just thought about something, I don't remember in which version it\n> was added, but :\n>\n> EXPLAIN ANALYZE SELECT sum(column1) FROM (VALUES ...a million\n> ntegers... ) AS v\n>\n> Postgres is perfectly happy with that ; it's either a bit slow (about 1\n> second) or very fast depending on how you view things...\n\nI tried in 8.1.4\n\nselect * from (values (0)) xx\n\nbut got\n\nERROR: syntax error at or near \")\"\nSQL state: 42601\nCharacter: 26\n\nEven if this works this may be not solution: I need to apply distinct to\ntemporary table. Temporary table may contain duplicate values and without\nDISTINCT join produces invalid result.\nTemporary table itself is created from data from server tables, it is not\ngenerated from list.\n\nI can use\n\nSELECT dok.*\n FROM dok\nWHERE dokumnr IN (SELECT dokumnr FROM temptbl)\n\nbut this seems never use bitmap index scan in 8.1.4\n\nSadly, creating second temporary table from first temporary table specially\nfor this query seems to be only solution.\n\nWhen materialized row count will be added so that statistics is exact and\nselect count(*) from tbl runs fast ?\n\nAndrus.\n\n\n--\nSent via pgsql-performance mailing list ([email protected])\nTo make changes to your subscription:\nhttp://www.postgresql.org/mailpref/pgsql-performance\n", "msg_date": "Tue, 2 Dec 2008 09:50:03 -0800", "msg_from": "Scott Carey <[email protected]>", "msg_from_op": false, "msg_subject": "Re: analyzing intermediate query" }, { "msg_contents": "Scott,\n\n>Often times, switching an inner subselect that requires a distinct to a \n>group by on that column yields better results. In this case, the IN should \n>be equivalent, so it probably will not help. This would look like:\n\nSELECT dok.*\n FROM dok\nJOIN (SELECT dokumnr FROM temptbl GROUP BY dokumnr ) x USING(dokumnr);\n\nThank you. This may be great idea.\nI changed my query to use GROUP BY instead of DISTINCT\n\n>Whether that hepls depends on how big dokumnr is and where the query \n>bottleneck is.\n\nI'm wondering how this can solve the issue when there is single or few \ndokumnr columns.\nPlanner still thinks that temptbl projection contains 1000 rows and uses seq \nscan instead of using bitmap index on dok table.\n\nI tried\n\nSELECT dok.*\n FROM dok\nJOIN (SELECT dokumnr FROM temptbl GROUP BY dokumnr ANALYZE ) x \nUSING(dokumnr);\n\nbut got error.\n\n> Note there are subtle differences between DISTINCT and GROUP BY with \n> respect to nulls.\n\ndokumnr is int type and is not null always.\n\nAndrus. \n\n", "msg_date": "Tue, 2 Dec 2008 20:58:43 +0200", "msg_from": "\"Andrus\" <[email protected]>", "msg_from_op": true, "msg_subject": "Re: analyzing intermediate query" }, { "msg_contents": "Have you tried running ANALYZE on the temp table before selecting out of it? That should give it the statistics necessary to correctly guess the output of a group by on a single column.\n\nANALYZE temptbl;\nSELECT dok.*\n FROM dok\nJOIN (SELECT dokumnr FROM temptbl GROUP BY dokumnr ) x USING(dokumnr);\n\n-----Original Message-----\nFrom: Andrus [mailto:[email protected]]\nSent: Tuesday, December 02, 2008 10:59 AM\nTo: Scott Carey; [email protected]; PFC\nSubject: Re: [PERFORM] analyzing intermediate query\n\nScott,\n\n>Often times, switching an inner subselect that requires a distinct to a\n>group by on that column yields better results. In this case, the IN should\n>be equivalent, so it probably will not help. This would look like:\n\nSELECT dok.*\n FROM dok\nJOIN (SELECT dokumnr FROM temptbl GROUP BY dokumnr ) x USING(dokumnr);\n\nThank you. This may be great idea.\nI changed my query to use GROUP BY instead of DISTINCT\n\n>Whether that hepls depends on how big dokumnr is and where the query\n>bottleneck is.\n\nI'm wondering how this can solve the issue when there is single or few\ndokumnr columns.\nPlanner still thinks that temptbl projection contains 1000 rows and uses seq\nscan instead of using bitmap index on dok table.\n\nI tried\n\nSELECT dok.*\n FROM dok\nJOIN (SELECT dokumnr FROM temptbl GROUP BY dokumnr ANALYZE ) x\nUSING(dokumnr);\n\nbut got error.\n\n> Note there are subtle differences between DISTINCT and GROUP BY with\n> respect to nulls.\n\ndokumnr is int type and is not null always.\n\nAndrus.\n\n", "msg_date": "Tue, 2 Dec 2008 13:21:57 -0800", "msg_from": "Scott Carey <[email protected]>", "msg_from_op": false, "msg_subject": "Re: analyzing intermediate query" } ]
[ { "msg_contents": "\nHi. I have a problem on one of our production servers. A fairly \ncomplicated query is running, and the backend process is using 30 GB of \nRAM. The machine only has 32GB, and is understandably swapping like crazy. \nMy colleague is creating swap files as quickly as it can use them up.\n\nThe work_mem setting on this machine is 1000MB, running Postgres 8.3.0.\n\nHere is an excerpt from top:\n\ntop - 15:54:17 up 57 days, 6:49, 3 users, load average: 20.17, 21.29, 16.31\nTasks: 250 total, 2 running, 248 sleeping, 0 stopped, 0 zombie\nCpu(s): 3.1%us, 2.5%sy, 0.0%ni, 15.2%id, 78.7%wa, 0.0%hi, 0.5%si, 0.0%st\nMem: 32961364k total, 32898588k used, 62776k free, 22440k buffers\nSwap: 8096344k total, 8096344k used, 0k free, 6056472k cached\n\n PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND\n27192 postgres 18 0 30.6g 22g 1984 R 31 71.7 32:20.09 postgres: flymine production-release-15.0 192.168.128.84(33736) INSERT\n 650 root 10 -5 0 0 0 S 5 0.0 13:56.10 [kswapd2]\n 5513 postgres 15 0 130m 19m 364 S 4 0.1 1067:04 postgres: stats collector process\n 957 root 10 -5 0 0 0 D 1 0.0 1:39.13 [md2_raid1]\n 649 root 10 -5 0 0 0 D 1 0.0 14:14.95 [kswapd1]\n28599 root 15 0 0 0 0 D 1 0.0 0:01.25 [pdflush]\n 648 root 10 -5 0 0 0 S 0 0.0 15:10.68 [kswapd0]\n 2585 root 10 -5 0 0 0 D 0 0.0 67:15.89 [kjournald]\n\nThe query that is being run is an INSERT INTO table SELECT a fairly \ncomplex query.\n\nAny ideas why this is going so badly, and what I can do to solve it?\n\nMatthew\n\n-- \n First law of computing: Anything can go wro\n sig: Segmentation fault. core dumped.\n", "msg_date": "Wed, 3 Dec 2008 16:01:48 +0000 (GMT)", "msg_from": "Matthew Wakeling <[email protected]>", "msg_from_op": true, "msg_subject": "Postgres using more memory than it should" }, { "msg_contents": "In response to Matthew Wakeling <[email protected]>:\n> \n> Hi. I have a problem on one of our production servers. A fairly \n> complicated query is running, and the backend process is using 30 GB of \n> RAM. The machine only has 32GB, and is understandably swapping like crazy. \n> My colleague is creating swap files as quickly as it can use them up.\n> \n> The work_mem setting on this machine is 1000MB, running Postgres 8.3.0.\n\nIf your query it dealing with a lot of data, it could easily use 1G per\nsort operation. If there are a lot of sorts (more than 32) you'll end\nup with this problem.\n\n1G is probably too much memory to allocate for work_mem.\n\n-- \nBill Moran\nCollaborative Fusion Inc.\nhttp://people.collaborativefusion.com/~wmoran/\n\[email protected]\nPhone: 412-422-3463x4023\n\n****************************************************************\nIMPORTANT: This message contains confidential information and is\nintended only for the individual named. If the reader of this\nmessage is not an intended recipient (or the individual\nresponsible for the delivery of this message to an intended\nrecipient), please be advised that any re-use, dissemination,\ndistribution or copying of this message is prohibited. Please\nnotify the sender immediately by e-mail if you have received\nthis e-mail by mistake and delete this e-mail from your system.\nE-mail transmission cannot be guaranteed to be secure or\nerror-free as information could be intercepted, corrupted, lost,\ndestroyed, arrive late or incomplete, or contain viruses. The\nsender therefore does not accept liability for any errors or\nomissions in the contents of this message, which arise as a\nresult of e-mail transmission.\n****************************************************************\n", "msg_date": "Wed, 3 Dec 2008 11:14:30 -0500", "msg_from": "Bill Moran <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Postgres using more memory than it should" }, { "msg_contents": ">\n> Hi. I have a problem on one of our production servers. A fairly\n> complicated query is running, and the backend process is using 30 GB of\n> RAM. The machine only has 32GB, and is understandably swapping like crazy.\n> My colleague is creating swap files as quickly as it can use them up.\n>\n> The work_mem setting on this machine is 1000MB, running Postgres 8.3.0.\n\nAre you aware that this is a per-session / per-sort settings? That means,\nif you have 10 sessions, each of them running query with 2 sort steps in\nthe plan, it may occupy up to 20 GB of RAM (if both sorts use the whole\n1GB of RAM).\n\nregards\nTomas\n\n", "msg_date": "Wed, 3 Dec 2008 17:25:12 +0100 (CET)", "msg_from": "[email protected]", "msg_from_op": false, "msg_subject": "Re: Postgres using more memory than it should" }, { "msg_contents": "On Wed, 3 Dec 2008, [email protected] wrote:\n>> Hi. I have a problem on one of our production servers. A fairly\n>> complicated query is running, and the backend process is using 30 GB of\n>> RAM. The machine only has 32GB, and is understandably swapping like crazy.\n>> My colleague is creating swap files as quickly as it can use them up.\n>>\n>> The work_mem setting on this machine is 1000MB, running Postgres 8.3.0.\n>\n> Are you aware that this is a per-session / per-sort settings? That means,\n> if you have 10 sessions, each of them running query with 2 sort steps in\n> the plan, it may occupy up to 20 GB of RAM (if both sorts use the whole\n> 1GB of RAM).\n\nQuite aware, thanks.\n\nHaving sent the process a SIGINT and inspected the logs, I now have a \nquery to explain. Looking at it, there is one single sort, and ten hash \noperations, which would equate to 10GB, not 30GB. What is more worrying is \nthat now that the query has been stopped, the backend process is still \nhanging onto the RAM.\n\nMatthew\n\n-- \n Failure is not an option. It comes bundled with your Microsoft product. \n -- Ferenc Mantfeld\n", "msg_date": "Wed, 3 Dec 2008 16:34:12 +0000 (GMT)", "msg_from": "Matthew Wakeling <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Postgres using more memory than it should" }, { "msg_contents": "On Wed, Dec 3, 2008 at 9:34 AM, Matthew Wakeling <[email protected]> wrote:\n> On Wed, 3 Dec 2008, [email protected] wrote:\n>>>\n>>> Hi. I have a problem on one of our production servers. A fairly\n>>> complicated query is running, and the backend process is using 30 GB of\n>>> RAM. The machine only has 32GB, and is understandably swapping like\n>>> crazy.\n>>> My colleague is creating swap files as quickly as it can use them up.\n>>>\n>>> The work_mem setting on this machine is 1000MB, running Postgres 8.3.0.\n>>\n>> Are you aware that this is a per-session / per-sort settings? That means,\n>> if you have 10 sessions, each of them running query with 2 sort steps in\n>> the plan, it may occupy up to 20 GB of RAM (if both sorts use the whole\n>> 1GB of RAM).\n>\n> Quite aware, thanks.\n>\n> Having sent the process a SIGINT and inspected the logs, I now have a query\n> to explain. Looking at it, there is one single sort, and ten hash\n> operations, which would equate to 10GB, not 30GB. What is more worrying is\n> that now that the query has been stopped, the backend process is still\n> hanging onto the RAM.\n\nWhat's your setting for share_buffers, as that's likely what the\nbackend is holding onto.\n\nAlso, you should REALLY update to 8.3.5 as there are some nasty bugs\nfixed from 8.3.0 you don't want to run into. Who knows, you might be\nbeing bitten by one right now. Unlike other bits of software floating\naround, pgsql updates are bug fix / security fix only, with no major\ncode changes allowed, since those go into the next release which is\nusually ~1 year later anyway.\n", "msg_date": "Wed, 3 Dec 2008 09:48:14 -0700", "msg_from": "\"Scott Marlowe\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Postgres using more memory than it should" }, { "msg_contents": "On Wed, 3 Dec 2008, Scott Marlowe wrote:\n>> Having sent the process a SIGINT and inspected the logs, I now have a query\n>> to explain. Looking at it, there is one single sort, and ten hash\n>> operations, which would equate to 10GB, not 30GB. What is more worrying is\n>> that now that the query has been stopped, the backend process is still\n>> hanging onto the RAM.\n>\n> What's your setting for share_buffers, as that's likely what the\n> backend is holding onto.\n\nShared buffers are set at 500MB, which is what all the other backends are \nholding onto. It's just the one backend that is using 30GB. At the moment, \nit is being swapped out, but the system seems responsive. We'll restart \nthe whole lot some time in the middle of the night when noone minds.\n\n> Also, you should REALLY update to 8.3.5 as there are some nasty bugs\n> fixed from 8.3.0 you don't want to run into. Who knows, you might be\n> being bitten by one right now. Unlike other bits of software floating\n> around, pgsql updates are bug fix / security fix only, with no major\n> code changes allowed, since those go into the next release which is\n> usually ~1 year later anyway.\n\nIt's possible, although I didn't see any relevant memory leaks in the \nrelease notes. This is one of the only machines we have that has not been \nupgraded, and it is on our schedule. Because it is running a slightly old \nversion of RedHat Fedora, upgrading involves more horribleness than our \nsysadmin is willing to do on the fly with the server up.\n\nMatthew\n\n-- \n The email of the species is more deadly than the mail.\n", "msg_date": "Wed, 3 Dec 2008 16:59:43 +0000 (GMT)", "msg_from": "Matthew Wakeling <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Postgres using more memory than it should" }, { "msg_contents": "On Wed, Dec 03, 2008 at 04:01:48PM +0000, Matthew Wakeling wrote:\n> The work_mem setting on this machine is 1000MB, running Postgres 8.3.0.\n\nCheck bug report from 2008-11-28, by Grzegorz Jaskiewicz:\nquery failed, not enough memory on 8.3.5\n\nhttp://archives.postgresql.org/pgsql-bugs/2008-11/msg00180.php\n\ndepesz\n\n-- \nLinkedin: http://www.linkedin.com/in/depesz / blog: http://www.depesz.com/\njid/gtalk: [email protected] / aim:depeszhdl / skype:depesz_hdl / gg:6749007\n", "msg_date": "Wed, 3 Dec 2008 18:53:47 +0100", "msg_from": "hubert depesz lubaczewski <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Postgres using more memory than it should" }, { "msg_contents": "On Wed, Dec 3, 2008 at 9:59 AM, Matthew Wakeling <[email protected]> wrote:\n> On Wed, 3 Dec 2008, Scott Marlowe wrote:\n>> Also, you should REALLY update to 8.3.5 as there are some nasty bugs\n>> fixed from 8.3.0 you don't want to run into. Who knows, you might be\n>> being bitten by one right now. Unlike other bits of software floating\n>> around, pgsql updates are bug fix / security fix only, with no major\n>> code changes allowed, since those go into the next release which is\n>> usually ~1 year later anyway.\n>\n> It's possible, although I didn't see any relevant memory leaks in the\n> release notes. This is one of the only machines we have that has not been\n> upgraded, and it is on our schedule. Because it is running a slightly old\n> version of RedHat Fedora, upgrading involves more horribleness than our\n> sysadmin is willing to do on the fly with the server up.\n\nThat makes absolutely no sense. If it's an in house built rpm, you\njust create a new one with the same .spec file, if it was built from\nsource it's a simple ./configure --youroptionshere ;make;make install.\n You need a new sysadmin.\n", "msg_date": "Wed, 3 Dec 2008 12:14:43 -0700", "msg_from": "\"Scott Marlowe\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Postgres using more memory than it should" }, { "msg_contents": "On Wed, 3 Dec 2008, hubert depesz lubaczewski wrote:\n> Check bug report from 2008-11-28, by Grzegorz Jaskiewicz:\n> query failed, not enough memory on 8.3.5\n>\n> http://archives.postgresql.org/pgsql-bugs/2008-11/msg00180.php\n\nThanks, that does explain everything. So workmem is not a hard limit on \nthe amount of memory used per hash. Once the planner has committed to \nusing a hash (and it can only use estimates to work out whether it will \nfit in workmem), then the execution will blindly go ahead and try and fit \nabsolutely everything in a hash in memory even if it doesn't fit in \nworkmem.\n\nI agree it would be nice to fix this, but I'm not sure how at the moment.\n\nMatthew\n\n-- \n To most people, solutions mean finding the answers. But to chemists,\n solutions are things that are still all mixed up.\n", "msg_date": "Thu, 4 Dec 2008 11:35:49 +0000 (GMT)", "msg_from": "Matthew Wakeling <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Postgres using more memory than it should" }, { "msg_contents": "On Thu, 4 Dec 2008, Matthew Wakeling wrote:\n>> http://archives.postgresql.org/pgsql-bugs/2008-11/msg00180.php\n>\n> Thanks, that does explain everything.\n\nOh right, yes. It explains everything *except* the fact that the backend \nis still holding onto all the RAM after the query is finished. Could the \nfact that we SIGINTed it in the middle of the query explain that at all?\n\nMatthew\n\n-- \n I'd try being be a pessimist, but it probably wouldn't work anyway.\n", "msg_date": "Thu, 4 Dec 2008 13:06:34 +0000 (GMT)", "msg_from": "Matthew Wakeling <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Postgres using more memory than it should" } ]
[ { "msg_contents": "Hi. I have a longish collection of SQL statements stored in a file that I\nrun periodically via cron. Running this \"script\" takes a bit too long, even\nfor a cron job, and I would like to streamline it.\nIs there a way to tell Postgres to print out, after each SQL statement is\nexecuted, how long it took to execute?\n\nThanks!\n\nKynn\n\nHi.  I have a longish collection of SQL statements stored in a file that I run periodically via cron.  Running this \"script\" takes a bit too long, even for a cron job, and I would like to streamline it.\nIs there a way to tell Postgres to print out, after each SQL statement is executed, how long it took to execute?Thanks!\nKynn", "msg_date": "Wed, 3 Dec 2008 14:31:02 -0500", "msg_from": "\"Kynn Jones\" <[email protected]>", "msg_from_op": true, "msg_subject": "How to profile an \"SQL script\"?" }, { "msg_contents": "Kynn Jones <[email protected]> schrieb:\n\n> Hi. I have a longish collection of SQL statements stored in a file that I run\n> periodically via cron. Running this \"script\" takes a bit too long, even for a\n> cron job, and I would like to streamline it.\n> \n> Is there a way to tell Postgres to print out, after each SQL statement is\n> executed, how long it took to execute?\n\nDo you run this with psql? You can switch on timing-output, with\n\\timing. It displays after each statement the run-time for this\nstatement.\n\n\nAndreas\n-- \nReally, I'm not out to destroy Microsoft. That will just be a completely\nunintentional side effect. (Linus Torvalds)\n\"If I was god, I would recompile penguin with --enable-fly.\" (unknown)\nKaufbach, Saxony, Germany, Europe. N 51.05082�, E 13.56889�\n", "msg_date": "Wed, 3 Dec 2008 20:40:23 +0100", "msg_from": "Andreas Kretschmer <[email protected]>", "msg_from_op": false, "msg_subject": "Re: How to profile an \"SQL script\"?" }, { "msg_contents": "Andreas Kretschmer wrote:\n> Kynn Jones <[email protected]> schrieb:\n> \n>> Hi. I have a longish collection of SQL statements stored in a file that I run\n>> periodically via cron. Running this \"script\" takes a bit too long, even for a\n>> cron job, and I would like to streamline it.\n>>\n>> Is there a way to tell Postgres to print out, after each SQL statement is\n>> executed, how long it took to execute?\n> \n> Do you run this with psql? You can switch on timing-output, with\n> \\timing. It displays after each statement the run-time for this\n> statement.\n\nSee also log_duration and log_min_duration settings.\n\n-- \n Heikki Linnakangas\n EnterpriseDB http://www.enterprisedb.com\n", "msg_date": "Wed, 03 Dec 2008 22:46:54 +0200", "msg_from": "Heikki Linnakangas <[email protected]>", "msg_from_op": false, "msg_subject": "Re: How to profile an \"SQL script\"?" }, { "msg_contents": "Andreas, Heikki:\n\nThanks!\nKynn\n\nAndreas, Heikki:Thanks!Kynn", "msg_date": "Wed, 3 Dec 2008 19:03:00 -0500", "msg_from": "\"Kynn Jones\" <[email protected]>", "msg_from_op": true, "msg_subject": "Re: How to profile an \"SQL script\"?" } ]
[ { "msg_contents": "Hello,\n\nI'm having problems with the following bad performing select-statement\nin a trigger-function (on update before):\n\n ...\n for c in\n select id_country, sum(cost) as sum_cost\n from costs\n where id_user = p_id_user\n and id_state = 1\n and date(request) between p_begin and p_until\n group by id_country;\n loop\n ...\n end loop;\n ...\n\nExplain shows that the following existing partial index isn't used:\n\n CREATE INDEX ix_costs_user_state_date_0701\n ON costs\n USING btree(id_user, id_state, date(request))\n WHERE id_state = 1 AND date(request) >= '2007-01-01'::date AND\ndate(request) <= '2007-01-31'::date;\n\n\nThe funny thing is, that while executing the statement with type-casted\nstring-literals the index is used as expected:\n\n ...\n for c in\n select id_country, sum(cost) as sum_cost\n from costs\n where id_user = p_id_user\n and id_state = 1\n and date(request) between '2007-01-01'::date AND '2007-01-31'::date\n group by id_country;\n loop\n ...\n end loop;\n ...\n\nAny ideas?\n\n\nBest regards\n\nRainer Rogatzki (mailto:[email protected])\n", "msg_date": "Thu, 4 Dec 2008 14:32:44 +0100", "msg_from": "\"Rogatzki Rainer\" <[email protected]>", "msg_from_op": true, "msg_subject": "Trigger function, bad performance " }, { "msg_contents": "Rogatzki Rainer wrote:\n> I'm having problems with the following bad performing select-statement\n> in a trigger-function (on update before):\n> \n> ...\n> for c in\n> select id_country, sum(cost) as sum_cost\n> from costs\n> where id_user = p_id_user\n> and id_state = 1\n> and date(request) between p_begin and p_until\n> group by id_country;\n> loop\n> ...\n> end loop;\n> ...\n> \n> Explain shows that the following existing partial index isn't used:\n> \n> CREATE INDEX ix_costs_user_state_date_0701\n> ON costs\n> USING btree(id_user, id_state, date(request))\n> WHERE id_state = 1 AND date(request) >= '2007-01-01'::date AND\n> date(request) <= '2007-01-31'::date;\n> \n> \n> The funny thing is, that while executing the statement with \n> type-casted\n> string-literals the index is used as expected:\n> \n> ...\n> for c in\n> select id_country, sum(cost) as sum_cost\n> from costs\n> where id_user = p_id_user\n> and id_state = 1\n> and date(request) between '2007-01-01'::date AND \n> '2007-01-31'::date\n> group by id_country;\n> loop\n> ...\n> end loop;\n> ...\n> \n> Any ideas?\n\nThe problem is that \"p_begin\" and \"p_until\" are variables. Consequently PostgreSQL,\nwhen the function is run the first time, will prepare this statement:\n\n select id_country, sum(cost) as sum_cost\n from costs\n where id_user = $1\n and id_state = 1\n and date(request) between $2 and $3\n group by id_country;\n\nThat prepared statement will be reused for subsequent invocations of the trigger\nfunction, whiere the parameters will probably have different values.\n\nSo it cannot use the partial index.\n\nIf you want the index to be used, don't include \"date(request)\" in the WHERE clause.\n\nYours,\nLaurenz Albe\n", "msg_date": "Fri, 5 Dec 2008 09:24:10 +0100", "msg_from": "\"Albe Laurenz\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Trigger function, bad performance " }, { "msg_contents": "> Rogatzki Rainer wrote:\n> > I'm having problems with the following bad performing\nselect-statement \n> > in a trigger-function (on update before):\n> > \n> > ...\n> > for c in\n> > select id_country, sum(cost) as sum_cost\n> > from costs\n> > where id_user = p_id_user\n> > and id_state = 1\n> > and date(request) between p_begin and p_until\n> > group by id_country;\n> > loop\n> > ...\n> > end loop;\n> > ...\n> > \n> > Explain shows that the following existing partial index isn't used:\n> > \n> > CREATE INDEX ix_costs_user_state_date_0701\n> > ON costs\n> > USING btree(id_user, id_state, date(request))\n> > WHERE id_state = 1 AND date(request) >= '2007-01-01'::date AND\n> > date(request) <= '2007-01-31'::date;\n> > \n> > \n> > The funny thing is, that while executing the statement with \n> > type-casted string-literals the index is used as expected:\n> > \n> > ...\n> > for c in\n> > select id_country, sum(cost) as sum_cost\n> > from costs\n> > where id_user = p_id_user\n> > and id_state = 1\n> > and date(request) between '2007-01-01'::date AND \n> > '2007-01-31'::date\n> > group by id_country;\n> > loop\n> > ...\n> > end loop;\n> > ...\n> > \n> > Any ideas?\n\nAlbe Laurenz wrote:\n> The problem is that \"p_begin\" and \"p_until\" are variables.\nConsequently PostgreSQL, when the function is run the first time, will\nprepare this statement:\n> \n> select id_country, sum(cost) as sum_cost\n> from costs\n> where id_user = $1\n> and id_state = 1\n> and date(request) between $2 and $3\n> group by id_country;\n> \n> That prepared statement will be reused for subsequent invocations of\nthe trigger function, whiere the parameters will probably have different\nvalues.\n> \n> So it cannot use the partial index.\n> \n> If you want the index to be used, don't include \"date(request)\" in the\nWHERE clause.\n> \n> Yours,\n> Laurenz Albe\n\nHello Laurenz,\n\nthank you for your analysis!\n\nUnfortunately your proposal is no option for me, since I do have to\ninclude the WHERE clause in both index and procedure.\n\nBy the way: The parameters in the WHERE clause (p_begin,p_until) come\nfrom a RECORD which is filled before via SELECT INTO with begin and end\ntablefields of the type date like the following:\n\n my_record RECORD;\n ...\n select into my_record p_begin, p_until\n from accounting_interval\n where id = 1;\n\nI omitted this information in my first posting to make it easier to\nread.\n\nIn fact I extracted the bad performing statement to let pgadmin explain\nand the same effect shows:\n\n -- Example with bad performance since index isn't used\n explain \n select c.id_country, sum(c.cost) as sum_cost\n from costs c, accounting_interval a\n where c.id_user = 123\n and c.id_state = 1\n and a.id = 1\n and date(c.request) between a.p_begin and a.p_until\n group by id_country;\n\n -- Example with invoked index (100 times better performance)\n explain \n select c.id_country, sum(c.cost) as sum_cost\n from costs c\n where c.id_user = 123\n and c.id_state = 1\n and date(c.request) between '2007-01-01'::date and '2007-01-31'::date\n group by id_country;\n\nHere I cannot see why statement preparation has an effect at all.\n\nApart from this I don't really understand why statement preparation\ncombined with parameters in functions prevent index invocation.\nEspecially since p_id_user is a parameter as well which doesn't prevent\nthe usage of another existing index on costs.id_user and costs.id_state.\n\n\nSo you see me still clueless :O)\n\n\nBest regards\nRainer Rogatzki\n", "msg_date": "Fri, 5 Dec 2008 11:41:11 +0100", "msg_from": "\"Rogatzki Rainer\" <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Trigger function, bad performance " }, { "msg_contents": "am Fri, dem 05.12.2008, um 11:41:11 +0100 mailte Rogatzki Rainer folgendes:\n> -- Example with bad performance since index isn't used\n> explain \n> select c.id_country, sum(c.cost) as sum_cost\n> from costs c, accounting_interval a\n> where c.id_user = 123\n> and c.id_state = 1\n> and a.id = 1\n> and date(c.request) between a.p_begin and a.p_until\n> group by id_country;\n> \n> -- Example with invoked index (100 times better performance)\n> explain \n> select c.id_country, sum(c.cost) as sum_cost\n> from costs c\n> where c.id_user = 123\n> and c.id_state = 1\n> and date(c.request) between '2007-01-01'::date and '2007-01-31'::date\n> group by id_country;\n> \n> Here I cannot see why statement preparation has an effect at all.\n\nThe planner don't know the parameters at compile-time. Because of this\nfakt, the planner choose a other plan (a seq-scan).\n\nYou can try to use execute 'your query'. In this case the planner\ninvestigate a new plan, and (maybe) with the index.\n\n\nAndreas\n-- \nAndreas Kretschmer\nKontakt: Heynitz: 035242/47150, D1: 0160/7141639 (mehr: -> Header)\nGnuPG-ID: 0x3FFF606C, privat 0x7F4584DA http://wwwkeys.de.pgp.net\n", "msg_date": "Fri, 5 Dec 2008 12:08:02 +0100", "msg_from": "\"A. Kretschmer\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Trigger function, bad performance" }, { "msg_contents": "> > Rogatzki wrote:\n> > -- Example with bad performance since index isn't used\n> > explain \n> > select c.id_country, sum(c.cost) as sum_cost\n> > from costs c, accounting_interval a\n> > where c.id_user = 123\n> > and c.id_state = 1\n> > and a.id = 1\n> > and date(c.request) between a.p_begin and a.p_until\n> > group by id_country;\n> > \n> > -- Example with invoked index (100 times better performance)\n> > explain \n> > select c.id_country, sum(c.cost) as sum_cost\n> > from costs c\n> > where c.id_user = 123\n> > and c.id_state = 1\n> > and date(c.request) between '2007-01-01'::date and\n'2007-01-31'::date\n> > group by id_country;\n> > \n> > Here I cannot see why statement preparation has an effect at all.\n> \n> Andreas Kretschmer wrote:\n> The planner don't know the parameters at compile-time. Because of this\nfakt, the planner choose a other plan (a seq-scan).\n> \n> You can try to use execute 'your query'. In this case the planner\ninvestigate a new plan, and (maybe) with the index.\n> \n> \n> Andreas\n> --\n> Andreas Kretschmer\n> Kontakt: Heynitz: 035242/47150, D1: 0160/7141639 (mehr: -> Header)\n> GnuPG-ID: 0x3FFF606C, privat 0x7F4584DA http://wwwkeys.de.pgp.net\nHello Andreas,\n\nyour hint did the trick - thank you very much!\n\nAfter using \"execute 'my query'\" the index is used as expected.\n\nThough I still wonder why the poor performance occurred since november,\nwithout having done any relevant changes to neither postgres nor db\nmodel (including index, procedures, ...) as far as I know.\n\nAnyway - I'm deeply content with your solution.\n\n\nBest regards\n\nRainer Rogatzki (mailto:[email protected])\n", "msg_date": "Fri, 5 Dec 2008 14:23:33 +0100", "msg_from": "\"Rogatzki Rainer\" <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Trigger function, bad performance" }, { "msg_contents": "am Fri, dem 05.12.2008, um 14:23:33 +0100 mailte Rogatzki Rainer folgendes:\n> Hello Andreas,\n> \n> your hint did the trick - thank you very much!\n\nglad to help you...\n\n\nAndreas\n-- \nAndreas Kretschmer\nKontakt: Heynitz: 035242/47150, D1: 0160/7141639 (mehr: -> Header)\nGnuPG-ID: 0x3FFF606C, privat 0x7F4584DA http://wwwkeys.de.pgp.net\n", "msg_date": "Fri, 5 Dec 2008 14:36:47 +0100", "msg_from": "\"A. Kretschmer\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Trigger function, bad performance" }, { "msg_contents": "Rogatzki Rainer wrote:\n> > > in a trigger-function (on update before):\n> > > \n> > > ...\n> > > for c in\n> > > select id_country, sum(cost) as sum_cost\n> > > from costs\n> > > where id_user = p_id_user\n> > > and id_state = 1\n> > > and date(request) between p_begin and p_until\n> > > group by id_country;\n> > > loop\n> > > ...\n> > > end loop;\n> > > ...\n> > > \n> > > Explain shows that the following existing partial index isn't used:\n> > > \n> > > CREATE INDEX ix_costs_user_state_date_0701\n> > > ON costs\n> > > USING btree(id_user, id_state, date(request))\n> > > WHERE id_state = 1 AND date(request) >= '2007-01-01'::date AND\n> > > date(request) <= '2007-01-31'::date;\n> > > \n> >\n> > The problem is that \"p_begin\" and \"p_until\" are variables.\n> > \n> > So it cannot use the partial index.\n> > \n> > If you want the index to be used, don't include \"date(request)\" in the\n> > WHERE clause.\n> \n> Unfortunately your proposal is no option for me, since I do have to\n> include the WHERE clause in both index and procedure.\n\nYou have been ordered to use a partial index?\n\n> Apart from this I don't really understand why statement preparation\n> combined with parameters in functions prevent index invocation.\n> Especially since p_id_user is a parameter as well which doesn't prevent\n> the usage of another existing index on costs.id_user and costs.id_state.\n\nThe connection with parameters is by chance.\n\nThe main thing is that both \"p_begin\" and \"p_until\" are variables.\n\nAndreas Kretschmer gave you the advice you'll want: use dynamic SQL.\n\nYours,\nLaurenz Albe\n", "msg_date": "Fri, 5 Dec 2008 16:16:37 +0100", "msg_from": "\"Albe Laurenz\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Trigger function, bad performance " }, { "msg_contents": "Laurenz Albe wrote:\n> You have been ordered to use a partial index?\n> \n> > Apart from this I don't really understand why statement preparation \n> > combined with parameters in functions prevent index invocation.\n> > Especially since p_id_user is a parameter as well which doesn't \n> > prevent the usage of another existing index on costs.id_user and\ncosts.id_state.\n> \n> The connection with parameters is by chance.\n> \n> The main thing is that both \"p_begin\" and \"p_until\" are variables.\n> \n> Andreas Kretschmer gave you the advice you'll want: use dynamic SQL.\n> \n> Yours,\n> Laurenz Albe\nWell it was no 'order' to use a partial index but it was necessary,\nsince the table is vastly filled with log-entries that have to persist\nover 4 years for documentation. Since old entries are accessed and\nchanged less often we decided to introduce partial indexes for a better\nperformance. This prooved to speed up especially frequently used monthly\nreports.\n\nNow I understand (thanks to Andreas Kretschmer and you) that at\nexecution planning time postgres cannot decide what partial index to use\nfor following procedure calls and thus doesn't invoke it.\n\nAfter implementing Andreas' proposal (execute 'my statement') the\nexpected index is used and performance is 125 time better :O))\n\n\nBest regards\nRainer Rogatzki\n\n", "msg_date": "Fri, 5 Dec 2008 16:41:14 +0100", "msg_from": "\"Rogatzki Rainer\" <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Trigger function, bad performance " } ]
[ { "msg_contents": "Hi,\n\nI am trying to restore a table out of a dump, and I get an 'out of\nmemory' error.\n\nThe table I want to restore is 5GB big.\n\nHere is the exact message :\n\nadmaxg@goules:/home/backup-sas$ pg_restore -F c -a -d axabas -t cabmnt \naxabas.dmp\npg_restore: [archiver (db)] Error while PROCESSING TOC:\npg_restore: [archiver (db)] Error from TOC entry 5492; 0 43701 TABLE \nDATA cabmnt axabas\npg_restore: [archiver (db)] COPY failed: ERROR: out of memory\nDETAIL: Failed on request of size 40.\nCONTEXT: COPY cabmnt, line 9038995: \"FHSJ CPTGEN RE \n200806_004 6.842725E7 6.842725E7 \\N 7321100 1101\n\\N \n00016 \\N \\N \\N \\N \\N \\N -1278.620...\"\nWARNING: errors ignored on restore: 1\n\nLooking at the os level, the process is effectively eating all memory\n(incl. swap), that is around 24 GB...\n\nSo, here is my question : is pg_restore supposed to eat all memory ? and\nis there something I can do to prevent that ?\n\nThanks,\n\nFranck\n\n\n", "msg_date": "Thu, 04 Dec 2008 15:08:35 +0100", "msg_from": "Franck Routier <[email protected]>", "msg_from_op": true, "msg_subject": "pg_restore : out of memory" }, { "msg_contents": "Franck Routier wrote:\n> Hi,\n> \n> I am trying to restore a table out of a dump, and I get an 'out of\n> memory' error.\n\n- Operating system?\n- PostgreSQL version?\n- PostgreSQL configuration - work_mem, shared_buffers, etc?\n\n> So, here is my question : is pg_restore supposed to eat all memory ?\n\nNo, but PostgreSQL's backends will if you tell them there's more memory\navailable than there really is.\n\n> and\n> is there something I can do to prevent that ?\n\nAdjust your PostgreSQL configuration to ensure that shared_buffers,\nwork_mem, etc are appropriate for the system and don't tell Pg to use\nmore memory than is actually available.\n\npg_restore isn't using up your memory. The PostgreSQL backend is.\n\n--\nCraig Ringer\n", "msg_date": "Fri, 19 Dec 2008 10:50:43 +0900", "msg_from": "Craig Ringer <[email protected]>", "msg_from_op": false, "msg_subject": "Re: pg_restore : out of memory" }, { "msg_contents": "On Thu, Dec 4, 2008 at 7:38 PM, Franck Routier <[email protected]>wrote:\n\n> Hi,\n>\n> I am trying to restore a table out of a dump, and I get an 'out of\n> memory' error.\n>\n> The table I want to restore is 5GB big.\n>\n> Here is the exact message :\n>\n> admaxg@goules:/home/backup-sas$ pg_restore -F c -a -d axabas -t cabmnt\n> axabas.dmp\n> pg_restore: [archiver (db)] Error while PROCESSING TOC:\n> pg_restore: [archiver (db)] Error from TOC entry 5492; 0 43701 TABLE\n> DATA cabmnt axabas\n> pg_restore: [archiver (db)] COPY failed: ERROR: out of memory\n> DETAIL: Failed on request of size 40.\n> CONTEXT: COPY cabmnt, line 9038995: \"FHSJ CPTGEN RE\n> 200806_004 6.842725E7 6.842725E7 \\N 7321100 1101\n> \\N\n> 00016 \\N \\N \\N \\N \\N \\N -1278.620...\"\n> WARNING: errors ignored on restore: 1\n>\n> Looking at the os level, the process is effectively eating all memory\n> (incl. swap), that is around 24 GB...\n>\nhow are you ensuring that it eats up all memory..\n\npost those outputs ?\n\n>\n> So, here is my question : is pg_restore supposed to eat all memory ? and\n> is there something I can do to prevent that ?\n>\n> Thanks,\n>\n> Franck\n>\n>\n>\n> --\n> Sent via pgsql-performance mailing list ([email protected])\n> To make changes to your subscription:\n> http://www.postgresql.org/mailpref/pgsql-performance\n>\n\nOn Thu, Dec 4, 2008 at 7:38 PM, Franck Routier <[email protected]> wrote:\nHi,\n\nI am trying to restore a table out of a dump, and I get an 'out of\nmemory' error.\n\nThe table I want to restore is 5GB big.\n\nHere is the exact message :\n\nadmaxg@goules:/home/backup-sas$ pg_restore -F c -a -d axabas -t cabmnt\naxabas.dmp\npg_restore: [archiver (db)] Error while PROCESSING TOC:\npg_restore: [archiver (db)] Error from TOC entry 5492; 0 43701 TABLE\nDATA cabmnt axabas\npg_restore: [archiver (db)] COPY failed: ERROR:  out of memory\nDETAIL:  Failed on request of size 40.\nCONTEXT:  COPY cabmnt, line 9038995: \"FHSJ    CPTGEN    RE\n200806_004    6.842725E7    6.842725E7    \\N    7321100    1101\n\\N\n00016    \\N    \\N    \\N    \\N    \\N    \\N    -1278.620...\"\nWARNING: errors ignored on restore: 1\n\nLooking at the os level, the process is effectively eating all memory\n(incl. swap), that is around 24 GB...\nhow are you ensuring that it eats up all memory..post those outputs ? \n\nSo, here is my question : is pg_restore supposed to eat all memory ? and\nis there something I can do to prevent that ?\n\nThanks,\n\nFranck\n\n\n\n--\nSent via pgsql-performance mailing list ([email protected])\nTo make changes to your subscription:\nhttp://www.postgresql.org/mailpref/pgsql-performance", "msg_date": "Fri, 19 Dec 2008 13:30:36 +0530", "msg_from": "\"sathiya psql\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: pg_restore : out of memory" } ]
[ { "msg_contents": "Database performance geeks,\n\nWe have a number of patches pending for 8.4 designed to improve database \nperformance in a variety of circumstances. We need as many users as possible \nto build test versions of PostgreSQL with these patches, and test how well \nthey perform, and report back in some detail.\n\nParticularly, users with unusual hardware architectures (16 or more cores, \nARM, Power, SSD, NFS-mounted data) or operating systems (Solaris, OSX, \nWindows-64) are really helpful. Testers need to be familiar with building \nPostgreSQL from source and patching it, as well as basic PostgreSQL Tuning \n(except for the Wizard Patch) and have some kind of performance test \navailable, ideally something based on your own application use.\n\nIf you are going to use pgbench to test, *please* read Greg Smith's notes \nfirst: \nhttp://www.westnet.com/~gsmith/gregsmith/content/postgresql/pgbench-scaling.htm\n\nThe Wiki (http://wiki.postgresql.org/wiki/CommitFest_2008-11) has a full list \nof patches, but below are the ones in particular we could use help with.\n\nYou *do* need to read the entire mail threads which I link to below to \nunderstand the patches. Thanks for your help!\n\nProposal of PITR performance improvement (Koichi Suzuki):\nhttp://archives.postgresql.org/message-id/[email protected]\nhttp://archives.postgresql.org/message-id/[email protected]\n\nSimple postgresql.conf wizard\nhttp://archives.postgresql.org/message-id/[email protected]\nhttp://archives.postgresql.org/message-id/[email protected]\n\nImprove Performance of Multi-Batch Hash Join for Skewed Data Sets\nhttp://archives.postgresql.org/message-id/6EEA43D22289484890D119821101B1DF2C1683@exchange20.mercury.ad.ubc.ca\nhttp://archives.postgresql.org/message-id/[email protected]\nhttp://archives.postgresql.org/message-id/[email protected]\n\nWindow Functions\nhttp://archives.postgresql.org/message-id/[email protected]\nhttp://archives.postgresql.org/message-id/[email protected]\nhttp://archives.postgresql.org/message-id/839FB90FF49D4120B7107ED0D7B3E5B6@amd64\n\nparallel restore\n(especially need to test on 16+ cores)\nhttp://archives.postgresql.org/message-id/[email protected]\n\nB-Tree emulation for GIN\nhttp://archives.postgresql.org/message-id/[email protected]\nhttp://archives.postgresql.org/message-id/[email protected]\n\nAlso, the following patches currently still have bugs, but when the bugs are \nfixed I'll be looking for performance testers, so please either watch the \nwiki or watch this space:\n\n-- Block-level CRC checks (Alvaro Herrera)\n-- Auto Partitioning Patch (Nikhil Sontakke)\n-- posix_fadvise (Gregory Stark)\n-- Hash Join-Filter Pruning using Bloom Filters\n-- On-disk bitmap indexes\n\nPlease report your results, with the patchname in the subject line, on this \nmailing list or on -hackers. Thank you, and your help will get a better 8.4 \nout sooner.\n\n-- \nJosh Berkus\nPostgreSQL\nSan Francisco\n", "msg_date": "Sun, 7 Dec 2008 11:38:01 -0800", "msg_from": "Josh Berkus <[email protected]>", "msg_from_op": true, "msg_subject": "Need help with 8.4 Performance Testing" }, { "msg_contents": "Josh,\n\nSince a number of these performance patches use our hash function, would\nit make sense to apply the last patch to upgrade the hash function mix()\nto the two function mix()/final()? Since the additional changes increases\nthe performance of the hash function by another 50% or so. My two cents.\n\nRegards,\nKen\n\nOn Sun, Dec 07, 2008 at 11:38:01AM -0800, Josh Berkus wrote:\n> Database performance geeks,\n> \n> We have a number of patches pending for 8.4 designed to improve database \n> performance in a variety of circumstances. We need as many users as possible \n> to build test versions of PostgreSQL with these patches, and test how well \n> they perform, and report back in some detail.\n> \n> Particularly, users with unusual hardware architectures (16 or more cores, \n> ARM, Power, SSD, NFS-mounted data) or operating systems (Solaris, OSX, \n> Windows-64) are really helpful. Testers need to be familiar with building \n> PostgreSQL from source and patching it, as well as basic PostgreSQL Tuning \n> (except for the Wizard Patch) and have some kind of performance test \n> available, ideally something based on your own application use.\n> \n> If you are going to use pgbench to test, *please* read Greg Smith's notes \n> first: \n> http://www.westnet.com/~gsmith/gregsmith/content/postgresql/pgbench-scaling.htm\n> \n> The Wiki (http://wiki.postgresql.org/wiki/CommitFest_2008-11) has a full list \n> of patches, but below are the ones in particular we could use help with.\n> \n> You *do* need to read the entire mail threads which I link to below to \n> understand the patches. Thanks for your help!\n> \n> Proposal of PITR performance improvement (Koichi Suzuki):\n> http://archives.postgresql.org/message-id/[email protected]\n> http://archives.postgresql.org/message-id/[email protected]\n> \n> Simple postgresql.conf wizard\n> http://archives.postgresql.org/message-id/[email protected]\n> http://archives.postgresql.org/message-id/[email protected]\n> \n> Improve Performance of Multi-Batch Hash Join for Skewed Data Sets\n> http://archives.postgresql.org/message-id/6EEA43D22289484890D119821101B1DF2C1683@exchange20.mercury.ad.ubc.ca\n> http://archives.postgresql.org/message-id/[email protected]\n> http://archives.postgresql.org/message-id/[email protected]\n> \n> Window Functions\n> http://archives.postgresql.org/message-id/[email protected]\n> http://archives.postgresql.org/message-id/[email protected]\n> http://archives.postgresql.org/message-id/839FB90FF49D4120B7107ED0D7B3E5B6@amd64\n> \n> parallel restore\n> (especially need to test on 16+ cores)\n> http://archives.postgresql.org/message-id/[email protected]\n> \n> B-Tree emulation for GIN\n> http://archives.postgresql.org/message-id/[email protected]\n> http://archives.postgresql.org/message-id/[email protected]\n> \n> Also, the following patches currently still have bugs, but when the bugs are \n> fixed I'll be looking for performance testers, so please either watch the \n> wiki or watch this space:\n> \n> -- Block-level CRC checks (Alvaro Herrera)\n> -- Auto Partitioning Patch (Nikhil Sontakke)\n> -- posix_fadvise (Gregory Stark)\n> -- Hash Join-Filter Pruning using Bloom Filters\n> -- On-disk bitmap indexes\n> \n> Please report your results, with the patchname in the subject line, on this \n> mailing list or on -hackers. Thank you, and your help will get a better 8.4 \n> out sooner.\n> \n> -- \n> Josh Berkus\n> PostgreSQL\n> San Francisco\n> \n> -- \n> Sent via pgsql-performance mailing list ([email protected])\n> To make changes to your subscription:\n> http://www.postgresql.org/mailpref/pgsql-performance\n> \n", "msg_date": "Sun, 7 Dec 2008 20:08:56 -0600", "msg_from": "Kenneth Marshall <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Need help with 8.4 Performance Testing" }, { "msg_contents": "On Sun, Dec 7, 2008 at 12:38 PM, Josh Berkus <[email protected]> wrote:\n\nI've got a pair of 8 core opteron 16 drive machines I would like to\ntest it on. If nothing else I'll just take queries from the log to\nrun against an 8.4 install. It'll have to be late at night though...\n\n> If you are going to use pgbench to test, *please* read Greg Smith's notes\n> first:\n> http://www.westnet.com/~gsmith/gregsmith/content/postgresql/pgbench-scaling.htm\n\nWhen I last used pgbench I wanted to test it with an extremely large\ndataset, but it maxes out at -s 4xxx or so, and that's only in the\n40Gigabyte range. Is the limit raised for the pgbench included in\ncontrib in 8.4? I'm guessing it's an arbitrary limit.\n", "msg_date": "Sun, 7 Dec 2008 22:40:17 -0700", "msg_from": "\"Scott Marlowe\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Need help with 8.4 Performance Testing" }, { "msg_contents": "\nI'll be glad to test the patches using pgbench on my POWER4 box \nrunning AIX 5.3 and an IA64 that runs HP-UX 11.31.\n\nDerek\n\nOn Dec 7, 2008, at 2:38 PM, Josh Berkus <[email protected]> wrote:\n\n> Database performance geeks,\n>\n> We have a number of patches pending for 8.4 designed to improve \n> database\n> performance in a variety of circumstances. We need as many users as \n> possible\n> to build test versions of PostgreSQL with these patches, and test \n> how well\n> they perform, and report back in some detail.\n>\n> Particularly, users with unusual hardware architectures (16 or more \n> cores,\n> ARM, Power, SSD, NFS-mounted data) or operating systems (Solaris, OSX,\n> Windows-64) are really helpful. Testers need to be familiar with \n> building\n> PostgreSQL from source and patching it, as well as basic PostgreSQL \n> Tuning\n> (except for the Wizard Patch) and have some kind of performance test\n> available, ideally something based on your own application use.\n>\n> If you are going to use pgbench to test, *please* read Greg Smith's \n> notes\n> first:\n> http://www.westnet.com/~gsmith/gregsmith/content/postgresql/pgbench-scaling.htm\n>\n> The Wiki (http://wiki.postgresql.org/wiki/CommitFest_2008-11) has a \n> full list\n> of patches, but below are the ones in particular we could use help \n> with.\n>\n> You *do* need to read the entire mail threads which I link to below to\n> understand the patches. Thanks for your help!\n>\n> Proposal of PITR performance improvement (Koichi Suzuki):\n> http://archives.postgresql.org/message-id/[email protected]\n> http://archives.postgresql.org/message-id/[email protected]\n>\n> Simple postgresql.conf wizard\n> http://archives.postgresql.org/message-id/[email protected]\n> http://archives.postgresql.org/message-id/[email protected]\n>\n> Improve Performance of Multi-Batch Hash Join for Skewed Data Sets\n> http://archives.postgresql.org/message-id/6EEA43D22289484890D119821101B1DF2C1683@exchange20.mercury.ad.ubc.ca\n> http://archives.postgresql.org/message-id/[email protected]\n> http://archives.postgresql.org/message-id/[email protected]\n>\n> Window Functions\n> http://archives.postgresql.org/message-id/[email protected]\n> http://archives.postgresql.org/message-id/[email protected]\n> http://archives.postgresql.org/message-id/839FB90FF49D4120B7107ED0D7B3E5B6@amd64\n>\n> parallel restore\n> (especially need to test on 16+ cores)\n> http://archives.postgresql.org/message-id/[email protected]\n>\n> B-Tree emulation for GIN\n> http://archives.postgresql.org/message-id/[email protected]\n> http://archives.postgresql.org/message-id/[email protected]\n>\n> Also, the following patches currently still have bugs, but when the \n> bugs are\n> fixed I'll be looking for performance testers, so please either \n> watch the\n> wiki or watch this space:\n>\n> -- Block-level CRC checks (Alvaro Herrera)\n> -- Auto Partitioning Patch (Nikhil Sontakke)\n> -- posix_fadvise (Gregory Stark)\n> -- Hash Join-Filter Pruning using Bloom Filters\n> -- On-disk bitmap indexes\n>\n> Please report your results, with the patchname in the subject line, \n> on this\n> mailing list or on -hackers. Thank you, and your help will get a \n> better 8.4\n> out sooner.\n>\n> -- \n> Josh Berkus\n> PostgreSQL\n> San Francisco\n>\n> -- \n> Sent via pgsql-performance mailing list ([email protected] \n> )\n> To make changes to your subscription:\n> http://www.postgresql.org/mailpref/pgsql-performance\n", "msg_date": "Mon, 8 Dec 2008 01:45:10 -0500", "msg_from": "Derek Lewis <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Need help with 8.4 Performance Testing" }, { "msg_contents": "On Sun, 7 Dec 2008, Scott Marlowe wrote:\n\n> When I last used pgbench I wanted to test it with an extremely large\n> dataset, but it maxes out at -s 4xxx or so, and that's only in the\n> 40Gigabyte range. Is the limit raised for the pgbench included in\n> contrib in 8.4? I'm guessing it's an arbitrary limit.\n\nThere's no artificial limit, just ones that result from things like \ninteger overflow. I don't think has been an issue so far because pgbench \nbecomes seek limited and stops producing interesting results once the \ndatabase exceeds the sum of all available caching, which means you'd need \nmore than 32GB of RAM in the system running pgbench before this is an \nissue. Which happens to be the largest size system I've ever ran it on...\n\nI'd expect this statement around line 1060 of pgbench.c \nto overflow first:\n\n for (i = 0; i < naccounts * scale; i++)\n\nWhere i is an integer, naccounts=100,000 , and scale is what you input. \nThat will overflow a signed 32-bit integer at a scale of 2147. If you had \ntests that failed at twice that, I wonder if you were actually executing \nagainst a weird data set for scales of (2148..4294).\n\nIt's not completely trivial to fix (I just tried), the whole loop needs to \nbe restructured to run against scale and 100,000 separately while keeping \nthe current progress report intact. I'll take a shot at fixing it \ncorrectly, this is a bug that should be corrected before 8.4 goes out. I \nguarantee that version will be used on systems with 64GB of RAM where this \nmatters.\n\n--\n* Greg Smith [email protected] http://www.gregsmith.com Baltimore, MD\n", "msg_date": "Mon, 8 Dec 2008 15:15:36 -0500 (EST)", "msg_from": "Greg Smith <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Need help with 8.4 Performance Testing" }, { "msg_contents": "On Mon, Dec 8, 2008 at 1:15 PM, Greg Smith <[email protected]> wrote:\n> On Sun, 7 Dec 2008, Scott Marlowe wrote:\n>\n>> When I last used pgbench I wanted to test it with an extremely large\n>> dataset, but it maxes out at -s 4xxx or so, and that's only in the\n>> 40Gigabyte range. Is the limit raised for the pgbench included in\n>> contrib in 8.4? I'm guessing it's an arbitrary limit.\n>\n> There's no artificial limit, just ones that result from things like integer\n> overflow. I don't think has been an issue so far because pgbench becomes\n> seek limited and stops producing interesting results once the database\n> exceeds the sum of all available caching, which means you'd need more than\n> 32GB of RAM in the system running pgbench before this is an issue. Which\n> happens to be the largest size system I've ever ran it on...\n\nWell, I have 32 Gig of ram and wanted to test it against a database\nthat was at least twice as big as memory. I'm not sure why you'd\nconsider the results uninteresting though, I'd think knowing how the\ndb will perform with a very large transactional store that is twice or\nmore the size of memory would be when it starts getting interesting.\n", "msg_date": "Mon, 8 Dec 2008 13:31:29 -0700", "msg_from": "\"Scott Marlowe\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Need help with 8.4 Performance Testing" }, { "msg_contents": "On Mon, 8 Dec 2008, Scott Marlowe wrote:\n\n> Well, I have 32 Gig of ram and wanted to test it against a database\n> that was at least twice as big as memory. I'm not sure why you'd\n> consider the results uninteresting though, I'd think knowing how the\n> db will perform with a very large transactional store that is twice or\n> more the size of memory would be when it starts getting interesting.\n\nIf you refer back to the picture associated with the link Josh suggested:\n\nhttp://www.westnet.com/~gsmith/gregsmith/content/postgresql/scaling.png\n\nYou'll see that pgbench results hit a big drop once you clear the amount \nof memory being used to cache the accounts table. This curve isn't unique \nto what I did; I've seen the same basic shape traced out by multiple other \ntesters on different hardware, independent of me. It just expands to the \nright based on the amount of RAM available.\n\nAll I was trying to suggest was that even if you've got 32GB of RAM, you \nmay already be into the more flat right section of that curve even with a \n40GB database. That was a little system with 1GB of RAM+256MB of disk \ncache, and it was already toast at 750MB of database. Once you've gotten \na database big enough to reach that point, results degrade to something \nrelated to database seeks/second rather than anything else, and further \nincreases don't give you that much more info. This is why I'm not sure if \nthe current limit really matters with 32GB of RAM, but it sure will be \nimportant if you want any sort of useful pgbench results at 64GB.\n\n--\n* Greg Smith [email protected] http://www.gregsmith.com Baltimore, MD\n", "msg_date": "Mon, 8 Dec 2008 17:52:30 -0500 (EST)", "msg_from": "Greg Smith <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Need help with 8.4 Performance Testing" }, { "msg_contents": "On Mon, Dec 8, 2008 at 5:52 PM, Greg Smith <[email protected]> wrote:\n> On Mon, 8 Dec 2008, Scott Marlowe wrote:\n>\n>> Well, I have 32 Gig of ram and wanted to test it against a database\n>> that was at least twice as big as memory. I'm not sure why you'd\n>> consider the results uninteresting though, I'd think knowing how the\n>> db will perform with a very large transactional store that is twice or\n>> more the size of memory would be when it starts getting interesting.\n>\n> If you refer back to the picture associated with the link Josh suggested:\n>\n> http://www.westnet.com/~gsmith/gregsmith/content/postgresql/scaling.png\n>\n> You'll see that pgbench results hit a big drop once you clear the amount of\n> memory being used to cache the accounts table. This curve isn't unique to\n> what I did; I've seen the same basic shape traced out by multiple other\n> testers on different hardware, independent of me. It just expands to the\n> right based on the amount of RAM available.\n>\n> All I was trying to suggest was that even if you've got 32GB of RAM, you may\n> already be into the more flat right section of that curve even with a 40GB\n> database. That was a little system with 1GB of RAM+256MB of disk cache, and\n> it was already toast at 750MB of database. Once you've gotten a database\n> big enough to reach that point, results degrade to something related to\n> database seeks/second rather than anything else, and further increases don't\n> give you that much more info. This is why I'm not sure if the current limit\n> really matters with 32GB of RAM, but it sure will be important if you want\n> any sort of useful pgbench results at 64GB.\n\n\nI wonder if shared_buffers has any effect on how far you can go before\nyou hit the 'tipping point'.\n\nmerlin\n", "msg_date": "Mon, 8 Dec 2008 20:23:16 -0500", "msg_from": "\"Merlin Moncure\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Need help with 8.4 Performance Testing" }, { "msg_contents": "On Mon, 8 Dec 2008, Merlin Moncure wrote:\n\n> I wonder if shared_buffers has any effect on how far you can go before\n> you hit the 'tipping point'.\n\nIf your operating system has any reasonable caching itself, not so much at \nfirst. As long as the index on the account table fits in shared_buffers, \neven the basic sort of caching logic an OS uses is perfectly functional \nfor swapping the individual pages of the account table in and out, the \nmain limiting factor on pgbench performance.\n\nThere is a further out tipping point I've theorized about but not really \nexplored: the point where even the database indexes stop fitting in \nmemory usefully. As you get closer to that, I'd expect that the clock \nsweep algorithm used by shared_buffers should make it a bit more likely \nthat those important blocks would hang around usefully if you put them \nthere, rather than giving most of the memory to the OS to manage.\n\nSince the data is about 7.5X as large as the indexes, that point is way \nfurther out than the basic bottlenecks. And if you graph pgbench results \non a scale that usefully shows the results for in-memory TPS scores, you \ncan barely see that part of the chart a well. One day I may get to \nmapping that out better, and if I do it will be interesting to see if the \nbalance of shared_buffers to OS cache works the way I expect. I was \nwaiting until I finished the pgtune program for that, that's building some \nof the guts I wanted to make it easier to tweak postgresql.conf settings \nprogrammatically in between pgbench runs.\n\n--\n* Greg Smith [email protected] http://www.gregsmith.com Baltimore, MD\n", "msg_date": "Mon, 8 Dec 2008 23:00:38 -0500 (EST)", "msg_from": "Greg Smith <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Need help with 8.4 Performance Testing" }, { "msg_contents": "\nGreg Smith <[email protected]> writes:\n\n> On Mon, 8 Dec 2008, Merlin Moncure wrote:\n>\n>> I wonder if shared_buffers has any effect on how far you can go before\n>> you hit the 'tipping point'.\n>\n> If your operating system has any reasonable caching itself, not so much at\n> first. As long as the index on the account table fits in shared_buffers, even\n> the basic sort of caching logic an OS uses is perfectly functional for swapping\n> the individual pages of the account table in and out, the main limiting factor\n> on pgbench performance.\n\nI would expect higher shared_buffers to raise the curve before the first\nbreakpoint but after the first breakpoint make the drop steeper and deeper.\nThe equilibrium where the curve becomes flatter should be lower.\n\nThat is, as long as the database fits entirely in RAM having more of the\nbuffers be immediately in shared buffers is better. Once there's contention\nfor the precious cache stealing some of it for duplicated buffers will only\nhurt.\n\n> There is a further out tipping point I've theorized about but not really\n> explored: the point where even the database indexes stop fitting in memory\n> usefully. As you get closer to that, I'd expect that the clock sweep algorithm\n> used by shared_buffers should make it a bit more likely that those important\n> blocks would hang around usefully if you put them there, rather than giving\n> most of the memory to the OS to manage.\n\nHm, sounds logical. At that point the slow drop-off should become even\nshallower and possibly become completely flat. Greater shared_buffers might\nstart helping again at that point.\n\n-- \n Gregory Stark\n EnterpriseDB http://www.enterprisedb.com\n Ask me about EnterpriseDB's RemoteDBA services!\n", "msg_date": "Tue, 09 Dec 2008 13:02:02 +0000", "msg_from": "Gregory Stark <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Need help with 8.4 Performance Testing" }, { "msg_contents": "-----BEGIN PGP SIGNED MESSAGE-----\nHash: SHA1\n\nGreg Smith wrote:\n| On Mon, 8 Dec 2008, Merlin Moncure wrote:\n|\n|> I wonder if shared_buffers has any effect on how far you can go before\n|> you hit the 'tipping point'.\n|\n| If your operating system has any reasonable caching itself, not so much at\n| first. As long as the index on the account table fits in shared_buffers,\n| even the basic sort of caching logic an OS uses is perfectly functional\n| for swapping the individual pages of the account table in and out, the\n| main limiting factor on pgbench performance.\n|\n| There is a further out tipping point I've theorized about but not really\n| explored: the point where even the database indexes stop fitting in\n| memory usefully. As you get closer to that, I'd expect that the clock\n| sweep algorithm used by shared_buffers should make it a bit more likely\n| that those important blocks would hang around usefully if you put them\n| there, rather than giving most of the memory to the OS to manage.\n\nI am by no means an expert at this.\n\nBut one thing that can matter is whether you want to improve just the\nperformance of the dbms, or the performance of the entire system, on which\nthe dbms runs. Because if you want to improve the whole system, you would\nwant as much of the caching to take place in the system's buffers so the use\nof the memory could be optimized over the entire workload, not just the load\nof the dbms itself. I suppose on a dedicated system with only one dbms\nrunning with only one database open (at a time, anyway), this might be moot,\nbut not otherwise.\n\nNow I agree that it would be good to get the entire index (or at least the\nworking set of the index) into the memory of the computer. But does it\nreally matter if it is in the system's cache, or the postgres cache? Is it\nany more likely to be in postgres's cache than in the system cache if the\nsystem is hurting for memory? I would think the system would be equally\nlikely to page out \"idle\" pages no matter where they are unless they are\nlocked to memory, and I do not know if all operating systems can do this,\nand even if they can, I do not know if postgres uses that ability. I doubt\nit, since I believe (at least in Linux) a process can do that only if run as\nroot, which I imagine few (if any) users do.\n|\n| Since the data is about 7.5X as large as the indexes, that point is way\n| further out than the basic bottlenecks. And if you graph pgbench results\n| on a scale that usefully shows the results for in-memory TPS scores, you\n| can barely see that part of the chart a well. One day I may get to\n| mapping that out better, and if I do it will be interesting to see if the\n| balance of shared_buffers to OS cache works the way I expect. I was\n| waiting until I finished the pgtune program for that, that's building some\n| of the guts I wanted to make it easier to tweak postgresql.conf settings\n| programmatically in between pgbench runs.\n|\n| --\n| * Greg Smith [email protected] http://www.gregsmith.com Baltimore, MD\n|\n\n\n- --\n~ .~. Jean-David Beyer Registered Linux User 85642.\n~ /V\\ PGP-Key: 9A2FC99A Registered Machine 241939.\n~ /( )\\ Shrewsbury, New Jersey http://counter.li.org\n~ ^^-^^ 07:55:02 up 5 days, 18:13, 4 users, load average: 4.18, 4.17, 4.11\n-----BEGIN PGP SIGNATURE-----\nVersion: GnuPG v1.4.5 (GNU/Linux)\nComment: Using GnuPG with CentOS - http://enigmail.mozdev.org\n\niD8DBQFJPm2+Ptu2XpovyZoRAlcJAKCIN098quZKZ7MfAs3MOkuL3WWxrQCdHCVl\nsUQoIVleRWVLvcMZoihztpE=\n=n6uO\n-----END PGP SIGNATURE-----\n", "msg_date": "Tue, 09 Dec 2008 08:08:14 -0500", "msg_from": "Jean-David Beyer <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Need help with 8.4 Performance Testing" }, { "msg_contents": "On Tuesday 09 December 2008 13:08:14 Jean-David Beyer wrote:\n> \n> and even if they can, I do not know if postgres uses that ability. I doubt\n> it, since I believe (at least in Linux) a process can do that only if run\n> as root, which I imagine few (if any) users do.\n\nDisclaimer: I'm not a system programmer... \n\nI believe that at Linux kernel revision 2.6.8 and before processes need Posix \ncapability CAP_IPC_LOCK, and 2.6.9 and after they need CAP_IPC_LOCK to lock \nmore than RLIMIT_MEMLOCK.\n\nIt is a capability, so a process can run as any user assuming it is started \nwith or gained the capability.\n\nNo idea if Postgres uses any of this, other than to protect security of \ncertain password operations there is probably not much point. If key parts of \nyour database are being paged out, get more RAM, if idle parts of your \ndatabase are paged out, you probably could more usefully apply that RAM for \nsomething else.\n\nThe Varnish cache design is the place to look for enlightenment on relying on \nthe kernel paging (using memory mapped files) rather than trying to do it \nyourself, but then a proxy server is a lot simpler than a RDBMS. That said, \nVarnish is fast at what it does (reverse HTTP proxy) !\n", "msg_date": "Tue, 9 Dec 2008 13:58:43 +0000", "msg_from": "Simon Waters <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Need help with 8.4 Performance Testing" }, { "msg_contents": "On Sun, Dec 7, 2008 at 7:38 PM, Josh Berkus <[email protected]> wrote:\n>\n> Also, the following patches currently still have bugs, but when the bugs are\n> fixed I'll be looking for performance testers, so please either watch the\n> wiki or watch this space:\n>...\n> -- posix_fadvise (Gregory Stark)\n\nEh? Quite possibly but none that I'm aware of. The only problem is a\ncouple of trivial bits of bitrot. I'll a post an update now if you\nwant.\n\n\n\n\n\n-- \ngreg\n", "msg_date": "Tue, 9 Dec 2008 14:56:28 +0000", "msg_from": "\"Greg Stark\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Need help with 8.4 Performance Testing" }, { "msg_contents": "> ________________________________________\n> From: [email protected] [[email protected]] On Behalf Of > Jean-David Beyer [[email protected]]\n> Sent: Tuesday, December 09, 2008 5:08 AM\n> To: [email protected]\n> Subject: Re: [PERFORM] Need help with 8.4 Performance Testing\n> -----BEGIN PGP SIGNED MESSAGE-----\n> Hash: SHA1\n\n> But one thing that can matter is whether you want to improve just the\n> performance of the dbms, or the performance of the entire system, on which\n> the dbms runs. Because if you want to improve the whole system, you would\n> want as much of the caching to take place in the system's buffers so the use\n> of the memory could be optimized over the entire workload, not just the load\n> of the dbms itself. I suppose on a dedicated system with only one dbms\n> running with only one database open (at a time, anyway), this might be moot,\n> but not otherwise.\n\nYes, the OS is in better position to arbitrate between multiple things. Of course, we aren't talking about the highest performance databases if we are talking about mixed use systems though.\nAdditionally, the OS can never really get it right, with a DB or other apps. Any app can behave badly and grab too much RAM and access it regularly enough for it to not be 'idle' much but give the OS VM fits trying to figure out if its important or not versus other processes.\n\n> Now I agree that it would be good to get the entire index (or at least the\n> working set of the index) into the memory of the computer. But does it\n> really matter if it is in the system's cache, or the postgres cache? Is it\n> any more likely to be in postgres's cache than in the system cache if the\n> system is hurting for memory? I would think the system would be equally\n> likely to page out \"idle\" pages no matter where they are unless they are\n> locked to memory, and I do not know if all operating systems can do this,\n> and even if they can, I do not know if postgres uses that ability. I doubt\n> it, since I believe (at least in Linux) a process can do that only if run as\n> root, which I imagine few (if any) users do.\n\nThe problem, is when none of them are really 'idle'. When the OS has to decide which pages, all of which have been accessed recently, to evict. Most OS's will make bad choices if the load is mixed random and sequential access, as they treat all pages equally with respect to freshness versus eviction.\nAnother problem is that there IS a difference between being in postgres' cache and the OS cache. One is more expensive to retrieve than the other. Significantly.\n\nAaccessing buffers in shared_buffers, in process, uses a good chunk less CPU (and data copy and shared buffer eviction overhead) than going over the sys call to the OS.\n\nAnd as far as I can tell, even after the 8.4 fadvise patch, all I/O is in block_size chunks. (hopefully I am wrong)\nMy system is now CPU bound, the I/O can do sequential reads of more than 1.2GB/sec but Postgres can't do a seqscan 30% as fast because it eats up CPU like crazy just reading and identifying tuples. It does seqscans ~ 25% faster if its from shared_buffers than from the OS's page cache though. Seqscans are between 250MB/sec and 400MB/sec peak, from mem or disk, typically showing no more than 35% iostat utilization of the array if off disk -- so we run a few concurrently where we can.\n\nIn addition to the fadvise patch, postgres needs to merge adjacent I/O's into larger ones to reduce the overhead. It only really needs to merge up to sizes of about 128k or 256k, and gain a 8x to 16x drop in syscall overhead, and additionally potentially save code trips down the shared buffer management code paths. At lest, thats my guess I haven't looked at any code and could be wrong.\n\n\nAdditionally, the \"If your operating system has any reasonable caching itself\" comment earlier in this conversation --- Linux (2.6.18, Centos 5.2) does NOT. I can easily make it spend 100% CPU in system time trying to figure out what to do with the system cache for an hour. Just do large seqscans with memory pressure from work_mem or other forces that the OS will not deem 'idle'. Once the requested memory is ~75% of the system total, it will freak out. Linux simply will not give up that last 25% or so of the RAM for anything but page cache, even though the disk subsustem is very fast and most of the access is sequential, marginalizing the benefit of the cache. Depending on how you tune it, it will either spin system cpu or swap storm, but the system cpu spin times for the same work load are a lot shorter than an equivalent swap storm.\nMount the data drive in O_DIRECT and the problem vanishes. I've been told that this problem may be gone in some of the latest kernels. I have seriously considered bumping shared_buffers up a lot and mounting the thing direct -- but then we lose the useful scheduler and readahead algorithms. The other way around (small shared_buffers, let the OS do it) hurts performance overall quite a bit -- randomly accessed pages get pushed out to the OS cache more often, and the OS tosses thouse out when a big seqscan occurs, resulting in a lot more random access from disk and more disk bound periods of time. Great wonder, this operating system caching, eh?\n\nIn any event, don't hold up these OS page cache things as if they're the best thing in the world for a database, they have serious flaws themselves and typically are difficult or impossible to tune to be ideal for a database.\n\nIts one thing to propose that a database build its own file system (hard, and why bother?) versus have a database manage its own page cache intelligently and access the OS file system as optimally as it can. In both of the latter, the DB knows much more about what data is really important than the OS (and could for example, prioritize cache versus work_mem intelligently while the OS can get that one horribly wrong in my experience, and knows when a huge seqscan occurs to make caching those results low priority). No matter how you do it using the OS cache, you cache twice and copy twice. O_DIRECT isn't usually an option for other reasons, the OS disk scheduler, readahead, and other benefits of a file system are real and substantial. If you are caching twice, you might as well have the \"closer\" copy of that data be the larger, more efficient pool.\n\nAs for tipping points and pg_bench -- It doesn't seem to reflect the kind of workload we use postgres for at all, though my workload does a lot of big hashes and seqscans, and I'm curious how much improved those may be due to the hash improvements. 32GB RAM and 3TB data (about 250GB scanned regularly) here. And yes, we are almost completely CPU bound now except for a few tasks. Iostat only reports above 65% disk utilization for about 5% of the workload duty-cycle, and is regularly < 20%. COPY doesn't get anywhere near platter speeds, on indexless bulk transfer. The highest disk usage spikes occur when some of our radom-access data/indexes get shoved out of cache. These aren't too large, but high enough seqscan load will cause postgres and the OS to dump them from cache. If we put these on some SSD's the disk utilization % would drop a lot further.\n\nI feel confident in saying that in about a year, I could spec out a medium sized budget for hardware ($25k) for almost any postgres setup and make it almost pure CPU bound.\nSSDs and hybrid tech such as ZFS L2ARC make this possible with easy access to 10k+ iops, and it it will take no more than 12 SATA drives in raid 10 next year (and a good controller or software raid) to get 1GB/sec sequential reads.\n", "msg_date": "Tue, 9 Dec 2008 08:37:13 -0800", "msg_from": "Scott Carey <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Need help with 8.4 Performance Testing" }, { "msg_contents": "On Tue, Dec 9, 2008 at 9:37 AM, Scott Carey <[email protected]> wrote:\n\n> As for tipping points and pg_bench -- It doesn't seem to reflect the kind of workload we use postgres for at all, though my workload does a lot of big hashes and seqscans, and I'm curious how much improved those may be due to the hash improvements. 32GB RAM and 3TB data (about 250GB scanned regularly) here. And yes, we are almost completely CPU bound now except for a few tasks. Iostat only reports above 65% disk utilization for about 5% of the workload duty-cycle, and is regularly < 20%. COPY doesn't get anywhere near platter speeds, on indexless bulk transfer. The highest disk usage spikes occur when some of our radom-access data/indexes get shoved out of cache. These aren't too large, but high enough seqscan load will cause postgres and the OS to dump them from cache. If we put these on some SSD's the disk utilization % would drop a lot further.\n\nIt definitely reflects our usage pattern, which is very random and\ninvolves tiny bits of data scattered throughout the database. Our\ncurrent database is about 20-25 Gig, which means it's quickly reaching\nthe point where it will not fit in our 32G of ram, and it's likely to\ngrow too big for 64Gig before a year or two is out.\n\n> I feel confident in saying that in about a year, I could spec out a medium sized budget for hardware ($25k) for almost any postgres setup and make it almost pure CPU bound.\n> SSDs and hybrid tech such as ZFS L2ARC make this possible with easy access to 10k+ iops, and it it will take no more than 12 SATA drives in raid 10 next year (and a good controller or software raid) to get 1GB/sec sequential reads.\n\nLucky you, having needs that are fulfilled by sequential reads. :)\n\nI wonder how many hard drives it would take to be CPU bound on random\naccess patterns? About 40 to 60? And probably 15k / SAS drives to\nboot. Cause that's what we're looking at in the next few years where\nI work.\n", "msg_date": "Tue, 9 Dec 2008 10:21:39 -0700", "msg_from": "\"Scott Marlowe\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Need help with 8.4 Performance Testing" }, { "msg_contents": "On Tue, 2008-12-09 at 10:21 -0700, Scott Marlowe wrote:\n> On Tue, Dec 9, 2008 at 9:37 AM, Scott Carey <[email protected]> wrote:\n\n> Lucky you, having needs that are fulfilled by sequential reads. :)\n> \n> I wonder how many hard drives it would take to be CPU bound on random\n> access patterns? About 40 to 60? And probably 15k / SAS drives to\n> boot. Cause that's what we're looking at in the next few years where\n> I work.\n\nI was able to achieve only 10-20% IO/Wait even after beating the heck\nout of the machine with 50 spindles (of course it does have 16 CPUs):\n\nhttp://www.commandprompt.com/blogs/joshua_drake/2008/04/is_that_performance_i_smell_ext2_vs_ext3_on_50_spindles_testing_for_postgresql/\n\n\n> \n-- \nPostgreSQL\n Consulting, Development, Support, Training\n 503-667-4564 - http://www.commandprompt.com/\n The PostgreSQL Company, serving since 1997\n\n", "msg_date": "Tue, 09 Dec 2008 09:26:12 -0800", "msg_from": "\"Joshua D. Drake\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Need help with 8.4 Performance Testing" }, { "msg_contents": "> Lucky you, having needs that are fulfilled by sequential reads. :)\n\n> I wonder how many hard drives it would take to be CPU bound on random\n> access patterns? About 40 to 60? And probably 15k / SAS drives to\n> boot. Cause that's what we're looking at in the next few years where\n> I work.\n\nAbout $3000 worth of Intel --- mainstream SSD's = 240GB space (6 in raid 10) today, 2x to 3x that storage area in 1 year.\n\nRandom reads are even easier, provided you don't need more than 500GB or so.\n\nAnd with something like ZFS + L2ARC you can back your data with large slow iops disks and have cache access to data without requiring mirrors on the cache ($3k of ssds for that covers 2x the area, then).\n", "msg_date": "Tue, 9 Dec 2008 09:28:02 -0800", "msg_from": "Scott Carey <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Need help with 8.4 Performance Testing" }, { "msg_contents": "On Tue, 9 Dec 2008, Scott Marlowe wrote:\n> I wonder how many hard drives it would take to be CPU bound on random\n> access patterns? About 40 to 60? And probably 15k / SAS drives to\n> boot. Cause that's what we're looking at in the next few years where\n> I work.\n\nThere's a problem with that thinking. That is, in order to exercise many \nspindles, you will need to have just as many (if not more) concurrent \nrequests. And if you have many concurrent requests, then you can spread \nthem over multiple CPUs. So it's more a case of \"How many hard drives PER \nCPU\". It also becomes a matter of whether Postgres can scale that well.\n\nMatthew\n\n-- \n Those who do not understand Unix are condemned to reinvent it, poorly.\n -- Henry Spencer\n", "msg_date": "Tue, 9 Dec 2008 17:35:16 +0000 (GMT)", "msg_from": "Matthew Wakeling <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Need help with 8.4 Performance Testing" }, { "msg_contents": "On Tue, Dec 9, 2008 at 10:35 AM, Matthew Wakeling <[email protected]> wrote:\n> On Tue, 9 Dec 2008, Scott Marlowe wrote:\n>>\n>> I wonder how many hard drives it would take to be CPU bound on random\n>> access patterns? About 40 to 60? And probably 15k / SAS drives to\n>> boot. Cause that's what we're looking at in the next few years where\n>> I work.\n>\n> There's a problem with that thinking. That is, in order to exercise many\n> spindles, you will need to have just as many (if not more) concurrent\n> requests. And if you have many concurrent requests, then you can spread them\n> over multiple CPUs. So it's more a case of \"How many hard drives PER CPU\".\n> It also becomes a matter of whether Postgres can scale that well.\n\nFor us, all that is true. We typically have a dozen or more\nconcurrent requests running at once. We'll likely see that increase\nlinearly with our increase in users over the next year or so. We\nbought the machines with dual quad core opterons knowing the 6,8 and\n12 core opterons were due out on the same socket design in the next\nyear or so and we could upgrade those too if needed. PostgreSQL seems\nto scale well in most tests I've seen to at least 16 cores, and after\nthat it's anyone's guess. The Sparc Niagra seems capable of scaling\nto 32 threads on 8 cores with pgsql 8.2 quite well.\n\nI worry about the linux kernel scaling that well, and we might have to\nlook at open solaris or something like the solaris kernel under ubuntu\ndistro to get better scaling.\n", "msg_date": "Tue, 9 Dec 2008 10:46:37 -0700", "msg_from": "\"Scott Marlowe\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Need help with 8.4 Performance Testing" }, { "msg_contents": "Let me re-phrase this.\n\nFor today, at 200GB or less of required space, and 500GB or less next year.\n\n\"Where we're going, we don't NEED spindles.\"\n\n\nSeriously, go down to the store and get 6 X25-M's, they're as cheap as $550 each and will be sub $500 soon. These are more than sufficient for all but heavy write workloads (each can withstand ~600+ TB of writes in a lifetime, and SMART will tell you before they go). 6 in a RAID 10 will give you 750MB/sec read, and equivalent MB/sec in random reads. I've tested them. Random writes are very very fast too, faster than any SAS drive.\nPut this in your current system, and you won't need to upgrade the RAM unless you need items in cache to reduce CPU load or need it for the work_mem space.\n\nSpindles will soon be only for capacity and sequential access performance requirements. Solid state will be for IOPS, and I would argue that for most Postgres installations, already is (now that the Intel SSD drive, which does random writes and read/write concurrency well, has arrived - more such next gen drives are on the way).\n\n\nOn 12/9/08 9:28 AM, \"Scott Carey\" <[email protected]> wrote:\n\n> Lucky you, having needs that are fulfilled by sequential reads. :)\n\n> I wonder how many hard drives it would take to be CPU bound on random\n> access patterns? About 40 to 60? And probably 15k / SAS drives to\n> boot. Cause that's what we're looking at in the next few years where\n> I work.\n\nAbout $3000 worth of Intel --- mainstream SSD's = 240GB space (6 in raid 10) today, 2x to 3x that storage area in 1 year.\n\nRandom reads are even easier, provided you don't need more than 500GB or so.\n\nAnd with something like ZFS + L2ARC you can back your data with large slow iops disks and have cache access to data without requiring mirrors on the cache ($3k of ssds for that covers 2x the area, then).\n\n--\nSent via pgsql-performance mailing list ([email protected])\nTo make changes to your subscription:\nhttp://www.postgresql.org/mailpref/pgsql-performance\n\n\n\n\nRe: [PERFORM] Need help with 8.4 Performance Testing\n\n\nLet me re-phrase this.\n\nFor today, at 200GB or less of required space, and 500GB or less next year.\n\n“Where we’re going, we don’t NEED spindles.”\n\n\nSeriously, go down to the store and get 6 X25-M’s, they’re as cheap as $550 each and will be sub $500 soon.  These are more than sufficient for all but heavy write workloads (each can withstand ~600+ TB of writes in a lifetime, and SMART will tell you before they go).  6 in a RAID 10 will give you 750MB/sec read, and equivalent MB/sec in random reads.  I’ve tested them.  Random writes are very very fast too, faster than any SAS drive.\nPut this in your current system, and you won’t need to upgrade the RAM unless you need items in cache to reduce CPU load or need it for the work_mem space.\n\nSpindles will soon be only for capacity and sequential access performance requirements.  Solid state will be for IOPS, and I would argue that for most Postgres installations, already is (now that the Intel SSD drive, which does random writes and read/write concurrency well, has arrived — more such next gen drives are on the way).\n\n\nOn 12/9/08 9:28 AM, \"Scott Carey\" <[email protected]> wrote:\n\n> Lucky you, having needs that are fulfilled by sequential reads.  :)\n\n> I wonder how many hard drives it would take to be CPU bound on random\n> access patterns?  About 40 to 60?  And probably 15k / SAS drives to\n> boot.  Cause that's what we're looking at in the next few years where\n> I work.\n\nAbout $3000 worth of Intel --- mainstream SSD's = 240GB space (6 in raid 10) today, 2x to 3x that storage area in 1 year.\n\nRandom reads are even easier, provided you don't need more than 500GB or so.\n\nAnd with something like ZFS + L2ARC you can back your data with large slow iops disks and have cache access to data without requiring mirrors on the cache ($3k of ssds for that covers 2x the area, then).\n\n--\nSent via pgsql-performance mailing list ([email protected])\nTo make changes to your subscription:\nhttp://www.postgresql.org/mailpref/pgsql-performance", "msg_date": "Tue, 9 Dec 2008 10:01:32 -0800", "msg_from": "Scott Carey <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Need help with 8.4 Performance Testing" }, { "msg_contents": "On Tue, Dec 9, 2008 at 11:01 AM, Scott Carey <[email protected]> wrote:\n> Let me re-phrase this.\n>\n> For today, at 200GB or less of required space, and 500GB or less next year.\n>\n> \"Where we're going, we don't NEED spindles.\"\n\nThose intel SSDs sound compelling. I've been waiting for SSDs to get\ncompetitive price and performance wise for a while, and when the\nintels came out and I read the first benchmarks I immediately began\nscheming. Sadly, that was right after we're ordered our new 16 drive\nservers, and I didn't have time to try something new and hope it would\nwork. Now that the servers are up and running, we'll probably look at\nadding the SSDs next summer before our high load period begins.\n", "msg_date": "Tue, 9 Dec 2008 11:08:03 -0700", "msg_from": "\"Scott Marlowe\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Need help with 8.4 Performance Testing" }, { "msg_contents": "On Tue, 2008-12-09 at 11:08 -0700, Scott Marlowe wrote:\n> On Tue, Dec 9, 2008 at 11:01 AM, Scott Carey <[email protected]> wrote:\n> > Let me re-phrase this.\n> >\n> > For today, at 200GB or less of required space, and 500GB or less next year.\n> >\n> > \"Where we're going, we don't NEED spindles.\"\n> \n> Those intel SSDs sound compelling. I've been waiting for SSDs to get\n> competitive price and performance wise for a while, and when the\n> intels came out and I read the first benchmarks I immediately began\n> scheming. Sadly, that was right after we're ordered our new 16 drive\n> servers, and I didn't have time to try something new and hope it would\n> work. Now that the servers are up and running, we'll probably look at\n> adding the SSDs next summer before our high load period begins.\n> \n\nThe idea of SSDs is interesting. However I think I will wait for all the\nother early adopters to figure out the problems before I start\nsuggesting them to clients.\n\nHard drives work, their cheap and fast. I can get 25 spindles, 15k in a\n3U with controller and battery backed cache for <$10k.\n\nJoshua D. Drake\n\n\n\n-- \nPostgreSQL\n Consulting, Development, Support, Training\n 503-667-4564 - http://www.commandprompt.com/\n The PostgreSQL Company, serving since 1997\n\n", "msg_date": "Tue, 09 Dec 2008 10:11:25 -0800", "msg_from": "\"Joshua D. Drake\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Need help with 8.4 Performance Testing" }, { "msg_contents": "On Tue, Dec 9, 2008 at 1:11 PM, Joshua D. Drake <[email protected]> wrote:\n> Hard drives work, their cheap and fast. I can get 25 spindles, 15k in a\n> 3U with controller and battery backed cache for <$10k.\n\nWhile I agree with your general sentiments about early adoption, etc\n(the intel ssd products are the first flash drives that appear to have\nreal promise in the enterprise), the numbers tell a different story.\nA *single* X25-E will give similar sustained write IOPS as your tray\nfor far less price and a much better worst case read latency. All\nthis without the 25 sets of whizzing ball bearings, painful spin-up\ntimes, fanning, RAID controller firmware, and various other sundry\ntechnologies to make the whole thing work.\n\nThe main issue that I see with flash SSD is if the promised wear\nlifetimes are believable in high load environments and the mechanism\nof failure (slowly degrade into read only) is accurate.\n\nSo, at least in relative terms, 15k sas drives are not 'fast'. They\nare terribly, awfully, painfully slow. They are also not cheap in\nterms of $/IOPS. The end is near.\n\nmerlin\n", "msg_date": "Tue, 9 Dec 2008 15:07:04 -0500", "msg_from": "\"Merlin Moncure\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Need help with 8.4 Performance Testing" }, { "msg_contents": "On Tue, 2008-12-09 at 15:07 -0500, Merlin Moncure wrote:\n> On Tue, Dec 9, 2008 at 1:11 PM, Joshua D. Drake <[email protected]> wrote:\n> > Hard drives work, their cheap and fast. I can get 25 spindles, 15k in a\n> > 3U with controller and battery backed cache for <$10k.\n> \n> While I agree with your general sentiments about early adoption, etc\n> (the intel ssd products are the first flash drives that appear to have\n> real promise in the enterprise), the numbers tell a different story.\n\nOh I have read about them and I am excited. I am just saying that there\nare plenty of people who can take advantage of the unknown without the\nworry of the pain that can cause. My client, can't.\n\n> \n> The main issue that I see with flash SSD is if the promised wear\n> lifetimes are believable in high load environments and the mechanism\n> of failure (slowly degrade into read only) is accurate.\n> \n\nRight.\n\n> So, at least in relative terms, 15k sas drives are not 'fast'. They\n> are terribly, awfully, painfully slow. They are also not cheap in\n> terms of $/IOPS. The end is near.\n> \n\nNo doubt about it. I give it 24 months tops.\n\nJoshua D. Drake\n\n\n> merlin\n> \n-- \nPostgreSQL\n Consulting, Development, Support, Training\n 503-667-4564 - http://www.commandprompt.com/\n The PostgreSQL Company, serving since 1997\n\n", "msg_date": "Tue, 09 Dec 2008 12:20:42 -0800", "msg_from": "\"Joshua D. Drake\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Need help with 8.4 Performance Testing" }, { "msg_contents": "Which brings this back around to the point I care the most about:\n\nI/O per second will diminish as the most common database performance limiting factor in Postgres 8.4's lifetime, and become almost irrelevant in 8.5's.\nBecoming more CPU efficient will become very important, and for some, already is. The community needs to be proactive on this front.\nThis turns a lot of old assumptions on their head, from the database down through the OS and filesystem. We're bound to run into many surprises due to this major shift in something that has had its performance characteristics taken for granted for decades.\n\n> On 12/9/08 12:20 PM, \"Joshua D. Drake\" <[email protected]> wrote:\n\n> > So, at least in relative terms, 15k sas drives are not 'fast'. They\n> > are terribly, awfully, painfully slow. They are also not cheap in\n> > terms of $/IOPS. The end is near.\n> >\n\n> No doubt about it. I give it 24 months tops.\n>\n> Joshua D. Drake\n\n\n\n\n\nRe: [PERFORM] Need help with 8.4 Performance Testing\n\n\nWhich brings this back around to the point I care the most about:\n\nI/O per second will diminish as the most common database performance limiting factor in Postgres 8.4’s lifetime, and become almost irrelevant in 8.5’s.\nBecoming more CPU efficient will become very important, and for some, already is.  The community needs to be proactive on this front.  \nThis turns a lot of old assumptions on their head, from the database down through the OS and filesystem.  We’re bound to run into many surprises due to this major shift in something that has had its performance characteristics taken for granted for decades.\n\n> On 12/9/08 12:20 PM, \"Joshua D. Drake\" <[email protected]> wrote:\n\n> > So, at least in relative terms, 15k sas drives are not 'fast'. They\n> > are terribly, awfully, painfully slow.  They are also not cheap in\n> > terms of $/IOPS.  The end is near.\n> >\n\n> No doubt about it. I give it 24 months tops.\n> \n> Joshua D. Drake", "msg_date": "Tue, 9 Dec 2008 14:05:18 -0800", "msg_from": "Scott Carey <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Need help with 8.4 Performance Testing" }, { "msg_contents": "Scott Carey <[email protected]> writes:\n> Which brings this back around to the point I care the most about:\n> I/O per second will diminish as the most common database performance limiting factor in Postgres 8.4's lifetime, and become almost irrelevant in 8.5's.\n> Becoming more CPU efficient will become very important, and for some, already is. The community needs to be proactive on this front.\n> This turns a lot of old assumptions on their head, from the database down through the OS and filesystem. We're bound to run into many surprises due to this major shift in something that has had its performance characteristics taken for granted for decades.\n\nHmm ... I wonder whether this means that the current work on\nparallelizing I/O (the posix_fadvise patch in particular) is a dead\nend. Because what that is basically going to do is expend more CPU\nto improve I/O efficiency. If you believe this thesis then that's\nnot the road we want to go down.\n\n\t\t\tregards, tom lane\n", "msg_date": "Tue, 09 Dec 2008 17:38:35 -0500", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Need help with 8.4 Performance Testing " }, { "msg_contents": "> Hmm ... I wonder whether this means that the current work on\n> parallelizing I/O (the posix_fadvise patch in particular) is a dead\n> end. Because what that is basically going to do is expend more CPU\n> to improve I/O efficiency. If you believe this thesis then that's\n> not the road we want to go down.\n\nI don't believe the thesis. The gap between disk speeds and memory\nspeeds may narrow over time, but I doubt it's likely to disappear\naltogether any time soon, and certainly not for all users.\n\nBesides which, I believe the CPU overhead of that patch is pretty darn\nsmall when the feature is not enabled.\n\n...Robert\n", "msg_date": "Tue, 9 Dec 2008 17:58:44 -0500", "msg_from": "\"Robert Haas\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Need help with 8.4 Performance Testing" }, { "msg_contents": "On Tue, 2008-12-09 at 17:38 -0500, Tom Lane wrote:\n> Scott Carey <[email protected]> writes:\n> > Which brings this back around to the point I care the most about:\n> > I/O per second will diminish as the most common database performance limiting factor in Postgres 8.4's lifetime, and become almost irrelevant in 8.5's.\n> > Becoming more CPU efficient will become very important, and for some, already is. The community needs to be proactive on this front.\n> > This turns a lot of old assumptions on their head, from the database down through the OS and filesystem. We're bound to run into many surprises due to this major shift in something that has had its performance characteristics taken for granted for decades.\n> \n> Hmm ... I wonder whether this means that the current work on\n> parallelizing I/O (the posix_fadvise patch in particular) is a dead\n> end. Because what that is basically going to do is expend more CPU\n> to improve I/O efficiency. If you believe this thesis then that's\n> not the road we want to go down.\n\nThe per cpu performance increase against the per I/O system increase\nline is going to be vastly different. Anything that reduces overall I/O\nis going to help (remember, you can never go too fast).\n\nThe idea that somehow I/O per second will diminish as the most common\ndatabase performance factor is IMO a pipe dream. Even as good as SSDs\nare getting, they still have to go through the bus. Something CPUs are\nbetter at (especially those CPUs that connect to memory directly without\nthe bus).\n\nIn 5 years maybe, in the next two postgresql releases, not likely. Not\nto mention all of this is around the idea of a different class of\nhardware than 99% of our community will be running.\n\n\nSincerely,\n\nJoshua D. Drake\n\n\n> \n> \t\t\tregards, tom lane\n> \n-- \nPostgreSQL\n Consulting, Development, Support, Training\n 503-667-4564 - http://www.commandprompt.com/\n The PostgreSQL Company, serving since 1997\n\n", "msg_date": "Tue, 09 Dec 2008 15:05:13 -0800", "msg_from": "\"Joshua D. Drake\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Need help with 8.4 Performance Testing" }, { "msg_contents": "On Tue, 9 Dec 2008, Robert Haas wrote:\n\n> I don't believe the thesis. The gap between disk speeds and memory\n> speeds may narrow over time, but I doubt it's likely to disappear\n> altogether any time soon, and certainly not for all users.\n\nI think the \"not for all users\" is the critical part. In 2 years, we may \n(or may not) start using SSD instead of traditional drives for new \ninstalls, but we certainly won't be throwing out our existing servers any \ntime soon just because something (much) better is now available.\n", "msg_date": "Tue, 9 Dec 2008 15:07:42 -0800 (PST)", "msg_from": "Ben Chobot <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Need help with 8.4 Performance Testing" }, { "msg_contents": "Tom Lane wrote:\n> Scott Carey <[email protected]> writes:\n> \n>> Which brings this back around to the point I care the most about:\n>> I/O per second will diminish as the most common database performance limiting factor in Postgres 8.4's lifetime, and become almost irrelevant in 8.5's.\n>> Becoming more CPU efficient will become very important, and for some, already is. The community needs to be proactive on this front.\n>> This turns a lot of old assumptions on their head, from the database down through the OS and filesystem. We're bound to run into many surprises due to this major shift in something that has had its performance characteristics taken for granted for decades.\n>> \n>\n> Hmm ... I wonder whether this means that the current work on\n> parallelizing I/O (the posix_fadvise patch in particular) is a dead\n> end. Because what that is basically going to do is expend more CPU\n> to improve I/O efficiency. If you believe this thesis then that's\n> not the road we want to go down.\n>\n> \t\t\tregards, tom lane\n> \n\nWhat does the CPU/ Memory/Bus performance road map look like?\n\nIs the IO performance for storage device for what ever it be, going to \nbe on par with the above to cause this problem?\n\nOnce IO performance numbers start jumping up I think DBA will have the \ntemptation start leaving more and more data in the production database \ninstead of moving it out of the production database. Or start \nconsolidating databases onto fewer servers . Again pushing more load \nonto the IO. \n\n\n\n\n\n\nTom Lane wrote:\n\nScott Carey <[email protected]> writes:\n \n\nWhich brings this back around to the point I care the most about:\nI/O per second will diminish as the most common database performance limiting factor in Postgres 8.4's lifetime, and become almost irrelevant in 8.5's.\nBecoming more CPU efficient will become very important, and for some, already is. The community needs to be proactive on this front.\nThis turns a lot of old assumptions on their head, from the database down through the OS and filesystem. We're bound to run into many surprises due to this major shift in something that has had its performance characteristics taken for granted for decades.\n \n\n\nHmm ... I wonder whether this means that the current work on\nparallelizing I/O (the posix_fadvise patch in particular) is a dead\nend. Because what that is basically going to do is expend more CPU\nto improve I/O efficiency. If you believe this thesis then that's\nnot the road we want to go down.\n\n\t\t\tregards, tom lane\n \n\n\nWhat does the CPU/ Memory/Bus performance road map look like?\n\nIs the IO performance for storage device for what ever it be, going to\nbe on par with the above to cause this problem?\n\nOnce IO performance numbers start jumping up I think DBA will have the\ntemptation start leaving more and more data in the production database \ninstead of moving it out of the production database. Or start\nconsolidating databases onto fewer servers .  Again pushing more load\nonto the IO.", "msg_date": "Tue, 09 Dec 2008 18:19:35 -0500", "msg_from": "justin <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Need help with 8.4 Performance Testing" }, { "msg_contents": "Prefetch CPU cost should be rather low in the grand scheme of things, and does help performance even for very fast I/O. I would not expect a very large CPU use increase from that sort of patch in the grand scheme of things - there is a lot that is more expensive to do on a per block basis.\n\nThere are two ways to look at non-I/O bound performance:\n* Aggregate performance across many concurrent activities - here you want the least CPU used possible per action, and the least collisions on locks or shared data structures. Using resources for as short of an interval as possible also helps a lot here.\n* Single query performance, where you want to shorten the query time, perhaps at the cost of more average CPU. Here, something like the fadvise stuff helps - as would any thread parallelism. Perhaps less efficient in aggregate, but more efficient for a single query.\n\nOverall CPU cost of accessing and reading data. If this comes from disk, the big gains will be along the whole chain: Driver to file system cache, file system cache to process, process specific tasks (cache eviction, placement, tracking), examining page tuples, locating tuples within pages, etc. Anything that currently occurs on a per-block basis that could be done in a larger batch or set of blocks may be a big gain. Another place that commonly consumes CPU in larger software projects is memory allocation if more advanced allocation techniques are not used. I have no idea what Postgres uses here however. I do know that commercial databases have extensive work in this area for performance, as well as reliability (harder to cause a leak, or easier to detect) and ease of use (don't have to even bother to free in certain contexts).\n\n> On 12/9/08 2:58 PM, \"Robert Haas\" <[email protected]> wrote:\n\n> I don't believe the thesis. The gap between disk speeds and memory\n> speeds may narrow over time, but I doubt it's likely to disappear\n> altogether any time soon, and certainly not for all users.\n\nWell, when select count(1) reads pages slower than my disk, its 16x + slower than my RAM. Until one can demonstrate that the system can even read pages in RAM faster than what disks will do next year, it doesn't matter much that RAM is faster. It does matter that RAM is faster for sorts, hashes, and other operations, but at the current time it does not for the raw pages themselves, from what I can measure.\n\nThis is in fact, central to my point. Things will be CPU bound, not I/O bound. It is mentioned that we still have to access things over the bus, and memory is faster, etc. But Postgres is too CPU bound on page access to take advantage of the fact that memory is faster (for reading data pages).\n\nThe biggest change is not just that disks are getting closer to RAM, but that the random I/O penalty is diminishing significantly. Low latencies makes seek-driven queries that used to consume mostly disk time consume CPU time instead. High CPU costs for accessing pages makes a fast disk surprisingly close to RAM speed.\n\n> Besides which, I believe the CPU overhead of that patch is pretty darn\n> small when the feature is not enabled.\n\n> ...Robert\n\nI doubt it is much CPU, on or off. It will help with SSD's when optimizing a single query, it may not help much if a system has enough 'natural' parallelism from other concurrent queries. However there is a clear CPU benefit for getting individual queries out of the way faster, and occupying precious work_mem or other resources for a shorter time. Occupying resources for a shorter period always translates to some CPU savings on a machine running at its limit with high concurrency.\n\n\n\nRe: [PERFORM] Need help with 8.4 Performance Testing\n\n\nPrefetch CPU cost should be rather low in the grand scheme of things, and does help performance even for very fast I/O.  I would not expect a very large CPU use increase from that sort of patch in the grand scheme of things — there is a lot that is more expensive to do on a per block basis.\n\nThere are two ways to look at non-I/O bound performance:\n* Aggregate performance across many concurrent activities — here you want the least CPU used possible per action, and the least collisions on locks or shared data structures.  Using resources for as short of an interval as possible also helps a lot here.\n* Single query performance, where you want to shorten the query time, perhaps at the cost of more average CPU.  Here, something like the fadvise stuff helps — as would any thread parallelism.  Perhaps less efficient in aggregate, but more efficient for a single query.\n\nOverall CPU cost of accessing and reading data.  If this comes from disk, the big gains will be along the whole chain:  Driver to file system cache, file system cache to process, process specific tasks (cache eviction, placement, tracking), examining page tuples, locating tuples within pages, etc.   Anything that currently occurs on a per-block basis that could be done in a larger batch or set of blocks may be a big gain.  Another place that commonly consumes CPU in larger software projects is memory allocation if more advanced allocation techniques are not used.  I have no idea what Postgres uses here however.  I do know that commercial databases have extensive work in this area for performance, as well as reliability (harder to cause a leak, or easier to detect) and ease of use (don’t have to even bother to free in certain contexts).  \n\n> On 12/9/08 2:58 PM, \"Robert Haas\" <[email protected]> wrote:\n\n> I don't believe the thesis.  The gap between disk speeds and memory\n> speeds may narrow over time, but I doubt it's likely to disappear\n> altogether any time soon, and certainly not for all users.\n\nWell, when select count(1) reads pages slower than my disk, its 16x + slower than my RAM.  Until one can demonstrate that the system can even read pages in RAM faster than what disks will do next year, it doesn’t matter much that RAM is faster.   It does matter that RAM is faster for sorts, hashes, and other operations, but at the current time it does not for the raw pages themselves, from what I can measure.\n\nThis is in fact, central to my point.  Things will be CPU bound, not I/O bound.  It is mentioned that we still have to access things over the bus, and memory is faster, etc.  But Postgres is too CPU bound on page access to take advantage of the fact that memory is faster (for reading data pages).\n\nThe biggest change is not just that disks are getting closer to RAM, but that the random I/O penalty is diminishing significantly.  Low latencies makes seek-driven queries that used to consume mostly disk time consume CPU time instead.  High CPU costs for accessing pages makes a fast disk surprisingly close to RAM speed.\n\n> Besides which, I believe the CPU overhead of that patch is pretty darn\n> small when the feature is not enabled.\n\n> ...Robert\n\nI doubt it is much CPU, on or off.  It will help with SSD’s when optimizing a single query, it may not help much if a system has enough ‘natural’ parallelism from other concurrent queries.  However there is a clear CPU benefit for getting individual queries out of the way faster, and occupying precious work_mem or other resources for a shorter time.  Occupying resources for a shorter period always translates to some CPU savings on a machine running at its limit with high concurrency.", "msg_date": "Tue, 9 Dec 2008 15:34:49 -0800", "msg_from": "Scott Carey <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Need help with 8.4 Performance Testing" }, { "msg_contents": "Scott Carey <[email protected]> writes:\n\n> And as far as I can tell, even after the 8.4 fadvise patch, all I/O is in\n> block_size chunks. (hopefully I am wrong) \n>...\n> In addition to the fadvise patch, postgres needs to merge adjacent I/O's\n> into larger ones to reduce the overhead. It only really needs to merge up to\n> sizes of about 128k or 256k, and gain a 8x to 16x drop in syscall overhead,\n> and additionally potentially save code trips down the shared buffer\n> management code paths. At lest, thats my guess I haven't looked at any code\n> and could be wrong.\n\nThere are a lot of assumptions here that I would be interested in seeing\nexperiments to back up.\n\nFWIW when I was doing testing of posix_fadvise I did a *lot* of experiments\nthough only with a couple systems. One had a 3-drive array and one with a\n15-drive array, both running Linux. I sometimes could speed up the sequential\nscan by about 10% but not consistently. It was never more than about 15% shy\nof the highest throughput from dd. And incidentally the throughput from dd\ndidn't seem to depend much at all on the blocksize.\n\nOn your system does \"dd bs=8k\" and \"dd bs=128k\" really have an 8x performance\ndifference?\n\nIn short, at least from the evidence available, this all seems like it might\nbe holdover beliefs from the olden days of sysadmining where syscalls were\nmuch slower and OS filesystem caches much dumber.\n\nI'm still interested in looking into it but I'll have to see actual vmstat or\niostat output while it's happening, preferably some oprofile results too. And\nhow many drives do you actually need to get into this situation. Also, what is\nthe output of \"vacuum verbose\" on the table?\n\n\n> Additionally, the \"If your operating system has any reasonable caching\n> itself\" comment earlier in this conversation --- Linux (2.6.18, Centos 5.2)\n> does NOT. I can easily make it spend 100% CPU in system time trying to\n> figure out what to do with the system cache for an hour. Just do large\n> seqscans with memory pressure from work_mem or other forces that the OS will\n> not deem 'idle'. Once the requested memory is ~75% of the system total, it\n> will freak out. Linux simply will not give up that last 25% or so of the RAM\n> for anything but page cache\n\nThis seems like just a misconfigured system. Linux and most Unixen definitely\nexpect to have a substantial portion of RAM dedicated to disk cache. Keep in\nmind all your executable pages count towards this page cache too. You can\nadjust this to some extent with the \"swappiness\" variable in Linux -- but I\ndoubt you'll be happy with the results regardless.\n\n> The other way around (small shared_buffers, let the OS do it) hurts\n> performance overall quite a bit -- randomly accessed pages get pushed out to\n> the OS cache more often, and the OS tosses thouse out when a big seqscan\n> occurs, resulting in a lot more random access from disk and more disk bound\n> periods of time. Great wonder, this operating system caching, eh?\n\nHow do you observe this?\n\n\n-- \n Gregory Stark\n EnterpriseDB http://www.enterprisedb.com\n Get trained by Bruce Momjian - ask me about EnterpriseDB's PostgreSQL training!\n", "msg_date": "Wed, 10 Dec 2008 00:45:24 +0000", "msg_from": "Gregory Stark <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Need help with 8.4 Performance Testing" }, { "msg_contents": "\nMatthew Wakeling <[email protected]> writes:\n\n> On Tue, 9 Dec 2008, Scott Marlowe wrote:\n>> I wonder how many hard drives it would take to be CPU bound on random\n>> access patterns? About 40 to 60? And probably 15k / SAS drives to\n>> boot. Cause that's what we're looking at in the next few years where\n>> I work.\n>\n> There's a problem with that thinking. That is, in order to exercise many\n> spindles, you will need to have just as many (if not more) concurrent requests.\n> And if you have many concurrent requests, then you can spread them over\n> multiple CPUs. So it's more a case of \"How many hard drives PER CPU\". It also\n> becomes a matter of whether Postgres can scale that well.\n\nWell:\n\n$ units\n2445 units, 71 prefixes, 33 nonlinear units\nYou have: 8192 byte/5ms\nYou want: MB/s\n\t* 1.6384\n\t/ 0.61035156\n\nAt 1.6MB/s per drive if find Postgres is cpu-bound doing sequential scans at\n1GB/s you'll need about 640 drives to keep one cpu satisfied doing random I/O\n-- assuming you have perfect read-ahead and the read-ahead itself doesn't add\ncpu overhead. Both of which are false of course, but at least in theory that's\nwhat it'll take.\n\n\n-- \n Gregory Stark\n EnterpriseDB http://www.enterprisedb.com\n Ask me about EnterpriseDB's On-Demand Production Tuning\n", "msg_date": "Wed, 10 Dec 2008 00:54:37 +0000", "msg_from": "Gregory Stark <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Need help with 8.4 Performance Testing" }, { "msg_contents": "Tom Lane wrote:\n> Scott Carey <[email protected]> writes:\n>> Which brings this back around to the point I care the most about:\n>> I/O per second will diminish as the most common database performance limiting factor in Postgres 8.4's lifetime, and become almost irrelevant in 8.5's.\n>> Becoming more CPU efficient will become very important, and for some, already is. The community needs to be proactive on this front.\n>> This turns a lot of old assumptions on their head, from the database down through the OS and filesystem. We're bound to run into many surprises due to this major shift in something that has had its performance characteristics taken for granted for decades.\n> \n> Hmm ... I wonder whether this means that the current work on\n> parallelizing I/O (the posix_fadvise patch in particular) is a dead\n> end. Because what that is basically going to do is expend more CPU\n> to improve I/O efficiency. If you believe this thesis then that's\n> not the road we want to go down.\n> \n\nI imagine the larger postgres installations will still benefit from\nthis patch - because I imagine they will stay on hard disks for\nquite some time; simply because the cost of 70TB of disks seems like\nit'll be lower than RAM for at least the intermediate term.\n\nI imagine the smaller postgres installations will also still benefit\nfrom this patch - because my postgres installations with the most\npainful I/O bottlenecks are small virtual machines without dedicated\ndrives where I/O (I guess emulated by the virtual machine software)\nis very painful.\n\nPerhaps there's a mid-sized system that won't benefit from fadvise()\nin the intermediate term -- where the size of the database is about\nthe same size as a cost-effective flash drive -- but I don't have\nany databases in that range now.\n\n", "msg_date": "Tue, 09 Dec 2008 17:37:41 -0800", "msg_from": "Ron Mayer <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Need help with 8.4 Performance Testing" }, { "msg_contents": "justin wrote:\n> Tom Lane wrote:\n>> Hmm ... I wonder whether this means that the current work on\n>> parallelizing I/O (the posix_fadvise patch in particular) is a dead\n>> end. Because what that is basically going to do is expend more CPU\n>> to improve I/O efficiency. If you believe this thesis then that's\n>> not the road we want to go down.\n>>\n>> \t\t\tregards, tom lane\n>>\n> What does the CPU/ Memory/Bus performance road map look like?\n> \n> Is the IO performance for storage device for what ever it be, going to \n> be on par with the above to cause this problem?\n\nFlash memory will become just a fourth layer in the memory caching system (on-board CPU, high-speed secondary cache, main memory, and persistent memory). The idea of external storage will probably disappear altogether -- computers will just have memory, and won't forget anything when you turn them off. Since most computers are 64 bits these days, all data and programs will just hang out in memory at all times, and be directly addressable by the CPU.\n\nThe distinction between disk and memory arose from the fact that disks were large, slow devices relative to \"core\" memory and had to be connected by long wires, hence the need for I/O subsystems. As flash memory becomes mainstream, I expect this distinction to disappear.\n\nCraig\n", "msg_date": "Tue, 09 Dec 2008 17:56:57 -0800", "msg_from": "Craig James <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Need help with 8.4 Performance Testing" }, { "msg_contents": "> Well, when select count(1) reads pages slower than my disk, its 16x + slower\n> than my RAM. Until one can demonstrate that the system can even read pages\n> in RAM faster than what disks will do next year, it doesn't matter much that\n> RAM is faster. It does matter that RAM is faster for sorts, hashes, and\n> other operations, but at the current time it does not for the raw pages\n> themselves, from what I can measure.\n>\n> This is in fact, central to my point. Things will be CPU bound, not I/O\n> bound. It is mentioned that we still have to access things over the bus,\n> and memory is faster, etc. But Postgres is too CPU bound on page access to\n> take advantage of the fact that memory is faster (for reading data pages).\n\nAs I understand it, a big part of the reason for the posix_fadvise\npatch is that the current system doesn't do a good job leveraging many\nspindles in the service of a single query. So the problem is not that\nthe CPU overhead is too large in some general sense but that the disk\nand CPU operations get serialized, leading to an overall loss of\nperformance. On the other hand, there are certainly cases (such as a\ndatabase which is entirely in RAM, or all the commonly used parts are\nin RAM) where there really isn't very much I/O, and in those cases of\ncourse the CPU cost will dominate.\n\n...Robert\n", "msg_date": "Tue, 9 Dec 2008 22:06:31 -0500", "msg_from": "\"Robert Haas\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Need help with 8.4 Performance Testing" }, { "msg_contents": "Just to clarify, I'm not talking about random I/O bound loads today, on hard drives, targetted by the fadvise stuff - these aren't CPU bound, and they will be helped by it.\n\nFor sequential scans, this situation is different, since the OS has sufficient read-ahead prefetching algorithms of its own for sequential reads, and the CPU work and I/O work ends up happening in parallel due to that.\n\nFor what it is worth, you can roughly double to triple the iops of an Intel X-25M on pure random reads if you queue up multiple concurrent reads rather than serialize them. But it is not due to spindles, it is due to the latency of the SATA interface and the ability of the controller chip to issue reads to flash devices on different banks concurrently to some extent.\n\n\nOn 12/9/08 7:06 PM, \"Robert Haas\" <[email protected]> wrote:\n\n> Well, when select count(1) reads pages slower than my disk, its 16x + slower\n> than my RAM. Until one can demonstrate that the system can even read pages\n> in RAM faster than what disks will do next year, it doesn't matter much that\n> RAM is faster. It does matter that RAM is faster for sorts, hashes, and\n> other operations, but at the current time it does not for the raw pages\n> themselves, from what I can measure.\n>\n> This is in fact, central to my point. Things will be CPU bound, not I/O\n> bound. It is mentioned that we still have to access things over the bus,\n> and memory is faster, etc. But Postgres is too CPU bound on page access to\n> take advantage of the fact that memory is faster (for reading data pages).\n\nAs I understand it, a big part of the reason for the posix_fadvise\npatch is that the current system doesn't do a good job leveraging many\nspindles in the service of a single query. So the problem is not that\nthe CPU overhead is too large in some general sense but that the disk\nand CPU operations get serialized, leading to an overall loss of\nperformance. On the other hand, there are certainly cases (such as a\ndatabase which is entirely in RAM, or all the commonly used parts are\nin RAM) where there really isn't very much I/O, and in those cases of\ncourse the CPU cost will dominate.\n\n...Robert\n\n\n\n\nRe: [PERFORM] Need help with 8.4 Performance Testing\n\n\nJust to clarify, I’m not talking about random I/O bound loads today, on hard drives, targetted by the fadvise stuff — these aren’t CPU bound, and they will be helped by it.\n\nFor sequential scans, this situation is different, since the OS has sufficient read-ahead prefetching algorithms of its own for sequential reads, and the CPU work and I/O work ends up happening in parallel due to that.\n\nFor what it is worth, you can roughly double to triple the iops of an Intel X-25M on pure random reads if you queue up multiple concurrent reads rather than serialize them.  But it is not due to spindles, it is due to the latency of the SATA interface and the ability of the controller chip to issue reads to flash devices on different banks concurrently to some extent.  \n\n\nOn 12/9/08 7:06 PM, \"Robert Haas\" <[email protected]> wrote:\n\n> Well, when select count(1) reads pages slower than my disk, its 16x + slower\n> than my RAM.  Until one can demonstrate that the system can even read pages\n> in RAM faster than what disks will do next year, it doesn't matter much that\n> RAM is faster.   It does matter that RAM is faster for sorts, hashes, and\n> other operations, but at the current time it does not for the raw pages\n> themselves, from what I can measure.\n>\n> This is in fact, central to my point.  Things will be CPU bound, not I/O\n> bound.  It is mentioned that we still have to access things over the bus,\n> and memory is faster, etc.  But Postgres is too CPU bound on page access to\n> take advantage of the fact that memory is faster (for reading data pages).\n\nAs I understand it, a big part of the reason for the posix_fadvise\npatch is that the current system doesn't do a good job leveraging many\nspindles in the service of a single query.  So the problem is not that\nthe CPU overhead is too large in some general sense but that the disk\nand CPU operations get serialized, leading to an overall loss of\nperformance.  On the other hand, there are certainly cases (such as a\ndatabase which is entirely in RAM, or all the commonly used parts are\nin RAM) where there really isn't very much I/O, and in those cases of\ncourse the CPU cost will dominate.\n\n...Robert", "msg_date": "Tue, 9 Dec 2008 21:48:02 -0800", "msg_from": "Scott Carey <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Need help with 8.4 Performance Testing" }, { "msg_contents": "I did some further tests, that alter some of my statements below.\n\nMainly:\n\n* I can do select count(1) queries at closer to disk speeds than my older tests (on a different machine) indicated. I can get ~800MB/sec where the disks can do 1200MB/sec and other single process tasks can go 1100MB/sec. It is mostly CPU bound during these tests.\nAccessing from within shared_buffers rather than from the OS cache is faster 20% ish, but it is hard to measure consistently (hard for me to be certain its in that cache).\nSlightly more complicated scan queries (simple group by aggregates, returning < 1000 rows) slow down to ~1/3 disk speed, depending on various factors that are not worth getting into. So, postgres is a less CPU bound currently than I thought.\n* The linux caching 100% system CPU spin issue seems related to something pinning memory on my machine. Not sure what it is yet, the only thing running on the machine is postgres, and the size of the pinned memory is roughly equal to shared_buffers.\n* I have some tests that conflict with prior results, that seem to depend on whether postgres is on or off when I run the disk benchmark. This may be related to the pinned memory above. Specifically, larger block size reads reduce overall CPU usage more if there isn't anything else running on the system than if there is - attributable to more time spent in kswapd when postgres is on (and mostly idle). Further investigation is needed here.\n\nOn 12/9/08 8:37 AM, \"Scott Carey\" <[email protected]> wrote:\n\n> ________________________________________\n> From: [email protected] [[email protected]] On Behalf Of > Jean-David Beyer [[email protected]]\n> Sent: Tuesday, December 09, 2008 5:08 AM\n> To: [email protected]\n> Subject: Re: [PERFORM] Need help with 8.4 Performance Testing\n> -----BEGIN PGP SIGNED MESSAGE-----\n> Hash: SHA1\n\n> But one thing that can matter is whether you want to improve just the\n> performance of the dbms, or the performance of the entire system, on which\n> the dbms runs. Because if you want to improve the whole system, you would\n> want as much of the caching to take place in the system's buffers so the use\n> of the memory could be optimized over the entire workload, not just the load\n> of the dbms itself. I suppose on a dedicated system with only one dbms\n> running with only one database open (at a time, anyway), this might be moot,\n> but not otherwise.\n\nYes, the OS is in better position to arbitrate between multiple things. Of course, we aren't talking about the highest performance databases if we are talking about mixed use systems though.\nAdditionally, the OS can never really get it right, with a DB or other apps. Any app can behave badly and grab too much RAM and access it regularly enough for it to not be 'idle' much but give the OS VM fits trying to figure out if its important or not versus other processes.\n\n> Now I agree that it would be good to get the entire index (or at least the\n> working set of the index) into the memory of the computer. But does it\n> really matter if it is in the system's cache, or the postgres cache? Is it\n> any more likely to be in postgres's cache than in the system cache if the\n> system is hurting for memory? I would think the system would be equally\n> likely to page out \"idle\" pages no matter where they are unless they are\n> locked to memory, and I do not know if all operating systems can do this,\n> and even if they can, I do not know if postgres uses that ability. I doubt\n> it, since I believe (at least in Linux) a process can do that only if run as\n> root, which I imagine few (if any) users do.\n\nThe problem, is when none of them are really 'idle'. When the OS has to decide which pages, all of which have been accessed recently, to evict. Most OS's will make bad choices if the load is mixed random and sequential access, as they treat all pages equally with respect to freshness versus eviction.\nAnother problem is that there IS a difference between being in postgres' cache and the OS cache. One is more expensive to retrieve than the other. Significantly.\n\nAaccessing buffers in shared_buffers, in process, uses a good chunk less CPU (and data copy and shared buffer eviction overhead) than going over the sys call to the OS.\n\nAnd as far as I can tell, even after the 8.4 fadvise patch, all I/O is in block_size chunks. (hopefully I am wrong)\nMy system is now CPU bound, the I/O can do sequential reads of more than 1.2GB/sec but Postgres can't do a seqscan 30% as fast because it eats up CPU like crazy just reading and identifying tuples. It does seqscans ~ 25% faster if its from shared_buffers than from the OS's page cache though. Seqscans are between 250MB/sec and 400MB/sec peak, from mem or disk, typically showing no more than 35% iostat utilization of the array if off disk -- so we run a few concurrently where we can.\n\nIn addition to the fadvise patch, postgres needs to merge adjacent I/O's into larger ones to reduce the overhead. It only really needs to merge up to sizes of about 128k or 256k, and gain a 8x to 16x drop in syscall overhead, and additionally potentially save code trips down the shared buffer management code paths. At lest, thats my guess I haven't looked at any code and could be wrong.\n\n\nAdditionally, the \"If your operating system has any reasonable caching itself\" comment earlier in this conversation --- Linux (2.6.18, Centos 5.2) does NOT. I can easily make it spend 100% CPU in system time trying to figure out what to do with the system cache for an hour. Just do large seqscans with memory pressure from work_mem or other forces that the OS will not deem 'idle'. Once the requested memory is ~75% of the system total, it will freak out. Linux simply will not give up that last 25% or so of the RAM for anything but page cache, even though the disk subsustem is very fast and most of the access is sequential, marginalizing the benefit of the cache. Depending on how you tune it, it will either spin system cpu or swap storm, but the system cpu spin times for the same work load are a lot shorter than an equivalent swap storm.\nMount the data drive in O_DIRECT and the problem vanishes. I've been told that this problem may be gone in some of the latest kernels. I have seriously considered bumping shared_buffers up a lot and mounting the thing direct -- but then we lose the useful scheduler and readahead algorithms. The other way around (small shared_buffers, let the OS do it) hurts performance overall quite a bit -- randomly accessed pages get pushed out to the OS cache more often, and the OS tosses thouse out when a big seqscan occurs, resulting in a lot more random access from disk and more disk bound periods of time. Great wonder, this operating system caching, eh?\n\nIn any event, don't hold up these OS page cache things as if they're the best thing in the world for a database, they have serious flaws themselves and typically are difficult or impossible to tune to be ideal for a database.\n\nIts one thing to propose that a database build its own file system (hard, and why bother?) versus have a database manage its own page cache intelligently and access the OS file system as optimally as it can. In both of the latter, the DB knows much more about what data is really important than the OS (and could for example, prioritize cache versus work_mem intelligently while the OS can get that one horribly wrong in my experience, and knows when a huge seqscan occurs to make caching those results low priority). No matter how you do it using the OS cache, you cache twice and copy twice. O_DIRECT isn't usually an option for other reasons, the OS disk scheduler, readahead, and other benefits of a file system are real and substantial. If you are caching twice, you might as well have the \"closer\" copy of that data be the larger, more efficient pool.\n\nAs for tipping points and pg_bench -- It doesn't seem to reflect the kind of workload we use postgres for at all, though my workload does a lot of big hashes and seqscans, and I'm curious how much improved those may be due to the hash improvements. 32GB RAM and 3TB data (about 250GB scanned regularly) here. And yes, we are almost completely CPU bound now except for a few tasks. Iostat only reports above 65% disk utilization for about 5% of the workload duty-cycle, and is regularly < 20%. COPY doesn't get anywhere near platter speeds, on indexless bulk transfer. The highest disk usage spikes occur when some of our radom-access data/indexes get shoved out of cache. These aren't too large, but high enough seqscan load will cause postgres and the OS to dump them from cache. If we put these on some SSD's the disk utilization % would drop a lot further.\n\nI feel confident in saying that in about a year, I could spec out a medium sized budget for hardware ($25k) for almost any postgres setup and make it almost pure CPU bound.\nSSDs and hybrid tech such as ZFS L2ARC make this possible with easy access to 10k+ iops, and it it will take no more than 12 SATA drives in raid 10 next year (and a good controller or software raid) to get 1GB/sec sequential reads.\n\n\n\n\nRe: [PERFORM] Need help with 8.4 Performance Testing\n\n\nI did some further tests, that alter some of my statements below.  \n\nMainly:\n\n* I can do select count(1) queries at closer to disk speeds than my older tests (on a different machine) indicated.  I can get ~800MB/sec where the disks can do 1200MB/sec and other single process tasks can go 1100MB/sec.  It is mostly CPU bound during these tests.\nAccessing from within shared_buffers rather than from the OS cache is faster 20% ish, but it is hard to measure consistently (hard for me to be certain its in that cache).\nSlightly more complicated scan queries (simple group by aggregates, returning < 1000 rows) slow down to ~1/3 disk speed, depending on various factors that are not worth getting into.  So, postgres is a less CPU bound currently than I thought.\n* The linux caching 100% system CPU spin issue seems related to something pinning memory on my machine.  Not sure what it is yet, the only thing running on the machine is postgres, and the size of the pinned memory is roughly equal to shared_buffers.\n* I have some tests that conflict with prior results, that seem to depend on whether postgres is on or off when I run the disk benchmark.  This may be related to the pinned memory above.  Specifically, larger block size reads reduce overall CPU usage more if there isn’t anything else running on the system than if there is — attributable to more time spent in kswapd when postgres is on (and mostly idle).  Further investigation is needed here.\n\nOn 12/9/08 8:37 AM, \"Scott Carey\" <[email protected]> wrote:\n\n> ________________________________________\n> From: [email protected] [[email protected]] On Behalf Of > Jean-David Beyer [[email protected]]\n> Sent: Tuesday, December 09, 2008 5:08 AM\n> To: [email protected]\n> Subject: Re: [PERFORM] Need help with 8.4 Performance Testing\n> -----BEGIN PGP SIGNED MESSAGE-----\n> Hash: SHA1\n\n> But one thing that can matter is whether you want to improve just the\n> performance of the dbms, or the performance of the entire system, on which\n> the dbms runs. Because if you want to improve the whole system, you would\n> want as much of the caching to take place in the system's buffers so the use\n> of the memory could be optimized over the entire workload, not just the load\n> of the dbms itself. I suppose on a dedicated system with only one dbms\n> running with only one database open (at a time, anyway), this might be moot,\n> but not otherwise.\n\nYes, the OS is in better position to arbitrate between multiple things.  Of course, we aren't talking about the highest performance databases if we are talking about mixed use systems though.\nAdditionally, the OS can never really get it right, with a DB or other apps.  Any app can behave badly and grab too much RAM and access it regularly enough for it to not be 'idle' much but give the OS VM fits trying to figure out if its important or not versus other processes.\n\n> Now I agree that it would be good to get the entire index (or at least the\n> working set of the index) into the memory of the computer. But does it\n> really matter if it is in the system's cache, or the postgres cache? Is it\n> any more likely to be in postgres's cache than in the system cache if the\n> system is hurting for memory? I would think the system would be equally\n> likely to page out \"idle\" pages no matter where they are unless they are\n> locked to memory, and I do not know if all operating systems can do this,\n> and even if they can, I do not know if postgres uses that ability. I doubt\n> it, since I believe (at least in Linux) a process can do that only if run as\n> root, which I imagine few (if any) users do.\n\nThe problem, is when none of them are really 'idle'.  When the OS has to decide which pages, all of which have been accessed recently, to evict.  Most OS's will make bad choices if the load is mixed random and sequential access, as they treat all pages equally with respect to freshness versus eviction.\nAnother problem is that there IS a difference between being in postgres' cache and the OS cache.  One is more expensive to retrieve than the other.  Significantly.\n\nAaccessing buffers in shared_buffers, in process, uses a good chunk less CPU (and data copy and shared buffer eviction overhead) than going over the sys call to the OS.\n\nAnd as far as I can tell, even after the 8.4 fadvise patch, all I/O is in block_size chunks.  (hopefully I am wrong)\nMy system is now CPU bound, the I/O can do sequential reads of more than 1.2GB/sec but Postgres can't do a seqscan 30% as fast because it eats up CPU like crazy just reading and identifying tuples.  It does seqscans ~ 25% faster if its from shared_buffers than from the OS's page cache though.   Seqscans are between 250MB/sec and 400MB/sec peak, from mem or disk, typically showing no more than 35% iostat utilization of the array if off disk -- so we run a few concurrently where we can.\n\nIn addition to the fadvise patch, postgres needs to merge adjacent I/O's into larger ones to reduce the overhead.  It only really needs to merge up to sizes of about 128k or 256k, and gain a 8x to 16x drop in syscall overhead, and additionally potentially save code trips down the shared buffer management code paths.  At lest, thats my guess I haven't looked at any code and could be wrong.\n\n\nAdditionally, the \"If your operating system has any reasonable caching itself\" comment earlier in this conversation ---  Linux (2.6.18, Centos 5.2) does NOT.  I can easily make it spend 100% CPU in system time trying to figure out what to do with the system cache for an hour.  Just do large seqscans with memory pressure from work_mem or other forces that the OS will not deem 'idle'.  Once the requested memory is ~75% of the system total, it will freak out.  Linux simply will not give up that last 25% or so of the RAM for anything but page cache, even though the disk subsustem is very fast and most of the access is sequential, marginalizing the benefit of the cache.  Depending on how you tune it, it will either spin system cpu or swap storm, but the system cpu spin times for the same work load are a lot shorter than an equivalent swap storm.\nMount the data drive in O_DIRECT and the problem vanishes.  I've been told that this problem may be gone in some of the latest kernels.  I have seriously considered bumping shared_buffers up a lot and mounting the thing direct -- but then we lose the useful scheduler and readahead algorithms.  The other way around (small shared_buffers, let the OS do it) hurts performance overall quite a bit -- randomly accessed pages get pushed out to the OS cache more often, and the OS tosses thouse out when a big seqscan occurs, resulting in a lot more random access from disk and more disk bound periods of time. Great wonder, this operating system caching, eh?\n\nIn any event, don't hold up these OS page cache things as if they're the best thing in the world for a database, they have serious flaws themselves and typically are difficult or impossible to tune to be ideal for a database.\n\nIts one thing to propose that a database build its own file system (hard, and why bother?) versus have a database manage its own page cache intelligently and access the OS file system as optimally as it can.  In both of the latter, the DB knows much more about what data is really important than the OS (and could for example, prioritize cache versus work_mem intelligently while the OS can get that one horribly wrong in my experience, and knows when a huge seqscan occurs to make caching those results low priority).  No matter how you do it using the OS cache, you cache twice and copy twice.  O_DIRECT isn't usually an option for other reasons, the OS disk scheduler, readahead, and other benefits of a file system are real and substantial.  If you are caching twice, you might as well have the \"closer\" copy of that data be the larger, more efficient pool.\n\nAs for tipping points and pg_bench -- It doesn't seem to reflect the kind of workload we use postgres for at all, though my workload does a lot of big hashes and seqscans, and I'm curious how much improved those may be due to the hash improvements.  32GB RAM and 3TB data (about 250GB scanned regularly) here.  And yes, we are almost completely CPU bound now except for a few tasks.  Iostat only reports above 65% disk utilization for about 5% of the workload duty-cycle, and is regularly < 20%.  COPY doesn't get anywhere near platter speeds, on indexless bulk transfer.  The highest disk usage spikes occur when some of our radom-access data/indexes get shoved out of cache.  These aren't too large, but high enough seqscan load will cause postgres and the OS to dump them from cache.  If we put these on some SSD's the disk utilization % would drop a lot further.\n\nI feel confident in saying that in about a year, I could spec out a medium sized budget for hardware ($25k) for almost any postgres setup and make it almost pure CPU bound.\nSSDs and hybrid tech such as ZFS L2ARC make this possible with easy access to 10k+ iops, and it it will take no more than 12 SATA drives in raid 10 next year (and a good controller or software raid) to get 1GB/sec sequential reads.", "msg_date": "Wed, 10 Dec 2008 00:20:12 -0800", "msg_from": "Scott Carey <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Need help with 8.4 Performance Testing" }, { "msg_contents": "On Tue, 9 Dec 2008, Scott Carey wrote:\n> For what it is worth, you can roughly double to triple the iops of an Intel X-25M on pure random reads if you queue up\n> multiple concurrent reads rather than serialize them.  But it is not due to spindles, it is due to the latency of the\n> SATA interface and the ability of the controller chip to issue reads to flash devices on different banks concurrently\n> to some extent.  \n\nSpindles, banks. What's the difference? The fact is that you have multiple \n\"things\", and queuing up requests in the controller means that more than \none at a time can be active. The X-25M is basically a RAID controller in \nits own right, connected to ten flash devices.\n\nMatthew\n\n-- \n Don't worry! The world can't end today because it's already tomorrow\n in Australia.", "msg_date": "Wed, 10 Dec 2008 13:41:23 +0000 (GMT)", "msg_from": "Matthew Wakeling <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Need help with 8.4 Performance Testing" }, { "msg_contents": "\n> I would expect higher shared_buffers to raise the curve before the first\n> breakpoint but after the first breakpoint make the drop steeper and deeper.\n> The equilibrium where the curve becomes flatter should be lower.\n\nOn SpecJAppserver specifically, I remember seeing a drop when the \ndatabase size grew beyond the size of shared_buffers.\n\n--Josh\n", "msg_date": "Thu, 11 Dec 2008 12:05:08 -0800", "msg_from": "Josh Berkus <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Need help with 8.4 Performance Testing" }, { "msg_contents": "Greg Stark wrote:\n> On Sun, Dec 7, 2008 at 7:38 PM, Josh Berkus <[email protected]> wrote:\n>> Also, the following patches currently still have bugs, but when the bugs are\n>> fixed I'll be looking for performance testers, so please either watch the\n>> wiki or watch this space:\n>> ...\n>> -- posix_fadvise (Gregory Stark)\n> \n> Eh? Quite possibly but none that I'm aware of. The only problem is a\n> couple of trivial bits of bitrot. I'll a post an update now if you\n> want.\n\nI'm just going off the status on hackers archives. I didn't actually \ntry to build it before posting that.\n\nIf you have an updated patch, link on CommitFest page? Thanks.\n\n--Josh\n", "msg_date": "Thu, 11 Dec 2008 12:07:15 -0800", "msg_from": "Josh Berkus <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Need help with 8.4 Performance Testing" }, { "msg_contents": "Tom,\n\n> Hmm ... I wonder whether this means that the current work on\n> parallelizing I/O (the posix_fadvise patch in particular) is a dead\n> end. Because what that is basically going to do is expend more CPU\n> to improve I/O efficiency. If you believe this thesis then that's\n> not the road we want to go down.\n\nNope. People who adminster small databases keep forgetting that there \nis another class of users with multiple terabytes of data. Those users \naren't getting away from spinning disk anytime in the next 5 years.\n\nAdditionally, but making PostgreSQL work better with OS-based FS \noptimization, we are well positioned to take advantage of any special \nfeatures which Linux, Solaris, BSD etc. add to utilize new hardware like \nSSDs. posix_fadvise is a great example of this.\n\n--Josh\n", "msg_date": "Thu, 11 Dec 2008 12:15:00 -0800", "msg_from": "Josh Berkus <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Need help with 8.4 Performance Testing" }, { "msg_contents": "Scott Marlowe wrote:\n> involves tiny bits of data scattered throughout the database. Our\n> current database is about 20-25 Gig, which means it's quickly reaching\n> the point where it will not fit in our 32G of ram, and it's likely to\n> grow too big for 64Gig before a year or two is out.\n> \n...\n> I wonder how many hard drives it would take to be CPU bound on random\n> access patterns? About 40 to 60? And probably 15k / SAS drives to\n> \nWell, its not a very big database and you're seek bound - so what's \nwrong with the latest\ngeneration flash drives? They're perfect for what you want to do it \nseems, and you can\nprobably get what you need using the new ARC cache on flash stuff in ZFS.\n\n\n", "msg_date": "Thu, 11 Dec 2008 23:09:35 +0000", "msg_from": "James Mansion <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Need help with 8.4 Performance Testing" }, { "msg_contents": "On Tue, 9 Dec 2008, Scott Carey wrote:\n\n> My system is now CPU bound, the I/O can do sequential reads of more than \n> 1.2GB/sec but Postgres can't do a seqscan 30% as fast because it eats up \n> CPU like crazy just reading and identifying tuples... In addition to the \n> fadvise patch, postgres needs to merge adjacent I/O's into larger ones \n> to reduce the overhead.\n\nDo you have any profile data to back that up? I think it's more likely \nthat bottlenecks are on the tuple processing side of things as you also \nsuggested. There's really no sense guessing; one quick session with \nsomething like oprofile would be more informative than any amount of \nspeculation on what's going on.\n\n> Additionally, the \"If your operating system has any reasonable caching \n> itself\" comment earlier in this conversation --- Linux (2.6.18, Centos \n> 5.2) does NOT. I can easily make it spend 100% CPU in system time \n> trying to figure out what to do with the system cache for an hour.\n\nHave you ever looked into how much memory ends up showing up as \n\"Writeback\" in /proc/meminfo when this happens? The biggest problem with \nthat kernel out of the box on the kind of workload you're describing is \nthat it will try and buffer way too much on the write side by default, \nwhich can easily get you into the sort of ugly situations you describe. \nI regularly adjust that kernel to lower dirty_ratio in particular \ndramatically from the default of 40 to keep that from happening. I did a \nwhole blog entry on one of those if you're not familiar with this \nparticular bit of painful defaults already: \nhttp://notemagnet.blogspot.com/2008/08/linux-write-cache-mystery.html\n\n> I feel confident in saying that in about a year, I could spec out a \n> medium sized budget for hardware ($25k) for almost any postgres setup \n> and make it almost pure CPU bound.\n\nThe largest database I manage is running on a Sun X4500, which is right at \nthat price point. I've never seen it not be CPU bound. Even though \npeople are pulling data that's spread across a few TB of disk, the only \ntime I ever see it straining to keep up with something there's always a \nsingle CPU pegged.\n\n--\n* Greg Smith [email protected] http://www.gregsmith.com Baltimore, MD\n", "msg_date": "Fri, 12 Dec 2008 02:55:52 -0500 (EST)", "msg_from": "Greg Smith <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Need help with 8.4 Performance Testing" }, { "msg_contents": "On Tue, Dec 9, 2008 at 1:11 PM, Joshua D. Drake <[email protected]> wrote:\n>> Those intel SSDs sound compelling. I've been waiting for SSDs to get\n>> competitive price and performance wise for a while, and when the\n>> intels came out and I read the first benchmarks I immediately began\n>> scheming. Sadly, that was right after we're ordered our new 16 drive\n>> servers, and I didn't have time to try something new and hope it would\n>> work. Now that the servers are up and running, we'll probably look at\n>> adding the SSDs next summer before our high load period begins.\n>\n> The idea of SSDs is interesting. However I think I will wait for all the\n> other early adopters to figure out the problems before I start\n> suggesting them to clients.\n>\n> Hard drives work, their cheap and fast. I can get 25 spindles, 15k in a\n> 3U with controller and battery backed cache for <$10k.\n\nThis may be a little off-topic, but I'd be interested in hearing more\ndetails about how you (or others) would do this... manufacturer,\nmodel, configuration? How many hard drives do you need to get 25\nspindles? And where can you get that many 15K hard drives for under\n$10K? My lack of experience in this area is showing here, but,\nseriously, any suggestions appreciated.\n\n...Robert\n", "msg_date": "Sat, 13 Dec 2008 08:22:54 -0500", "msg_from": "\"Robert Haas\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Need help with 8.4 Performance Testing" }, { "msg_contents": "On Sat, Dec 13, 2008 at 6:22 AM, Robert Haas <[email protected]> wrote:\n> On Tue, Dec 9, 2008 at 1:11 PM, Joshua D. Drake <[email protected]> wrote:\n>>> Those intel SSDs sound compelling. I've been waiting for SSDs to get\n>>> competitive price and performance wise for a while, and when the\n>>> intels came out and I read the first benchmarks I immediately began\n>>> scheming. Sadly, that was right after we're ordered our new 16 drive\n>>> servers, and I didn't have time to try something new and hope it would\n>>> work. Now that the servers are up and running, we'll probably look at\n>>> adding the SSDs next summer before our high load period begins.\n>>\n>> The idea of SSDs is interesting. However I think I will wait for all the\n>> other early adopters to figure out the problems before I start\n>> suggesting them to clients.\n>>\n>> Hard drives work, their cheap and fast. I can get 25 spindles, 15k in a\n>> 3U with controller and battery backed cache for <$10k.\n>\n> This may be a little off-topic, but I'd be interested in hearing more\n> details about how you (or others) would do this... manufacturer,\n> model, configuration? How many hard drives do you need to get 25\n> spindles? And where can you get that many 15K hard drives for under\n> $10K? My lack of experience in this area is showing here, but,\n> seriously, any suggestions appreciated.\n\nI don't know about fitting in a 3U, but you can get a 24 15k 73G SAS\ndrive machine with 8 gig ram and a single quad core CPU from\naberdeeninc.com for $10,055.00.\n", "msg_date": "Sat, 13 Dec 2008 08:15:36 -0700", "msg_from": "\"Scott Marlowe\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Need help with 8.4 Performance Testing" }, { "msg_contents": "On Sat, 13 Dec 2008, Robert Haas wrote:\n\n> On Tue, Dec 9, 2008 at 1:11 PM, Joshua D. Drake <[email protected]> wrote:\n>>> Those intel SSDs sound compelling. I've been waiting for SSDs to get\n>>> competitive price and performance wise for a while, and when the\n>>> intels came out and I read the first benchmarks I immediately began\n>>> scheming. Sadly, that was right after we're ordered our new 16 drive\n>>> servers, and I didn't have time to try something new and hope it would\n>>> work. Now that the servers are up and running, we'll probably look at\n>>> adding the SSDs next summer before our high load period begins.\n>>\n>> The idea of SSDs is interesting. However I think I will wait for all the\n>> other early adopters to figure out the problems before I start\n>> suggesting them to clients.\n>>\n>> Hard drives work, their cheap and fast. I can get 25 spindles, 15k in a\n>> 3U with controller and battery backed cache for <$10k.\n>\n> This may be a little off-topic, but I'd be interested in hearing more\n> details about how you (or others) would do this... manufacturer,\n> model, configuration? How many hard drives do you need to get 25\n> spindles? And where can you get that many 15K hard drives for under\n> $10K? My lack of experience in this area is showing here, but,\n> seriously, any suggestions appreciated.\n\n1 spindle == 1 hard drive\n\nDavid Lang\n", "msg_date": "Sat, 13 Dec 2008 07:44:42 -0800 (PST)", "msg_from": "[email protected]", "msg_from_op": false, "msg_subject": "Re: Need help with 8.4 Performance Testing" }, { "msg_contents": "On Sat, 2008-12-13 at 07:44 -0800, [email protected] wrote:\n> On Sat, 13 Dec 2008, Robert Haas wrote:\n\n> > This may be a little off-topic, but I'd be interested in hearing more\n> > details about how you (or others) would do this... manufacturer,\n> > model, configuration? How many hard drives do you need to get 25\n> > spindles? And where can you get that many 15K hard drives for under\n> > $10K? My lack of experience in this area is showing here, but,\n> > seriously, any suggestions appreciated.\n> \n\nhttp://h71016.www7.hp.com/ctoBases.asp?oi=E9CED&BEID=19701&SBLID=&ProductLineId=450&FamilyId=2570&LowBaseId=15222&LowPrice=$1,899.00&familyviewgroup=757&viewtype=Matrix\n\nRetail cost, 15k, 36GB drives, ~ 12k. A phone call and threat of buying\ndell gets its for ~ 10k.\n\nJoshua D. Drake\n\n\n-- \nPostgreSQL\n Consulting, Development, Support, Training\n 503-667-4564 - http://www.commandprompt.com/\n The PostgreSQL Company, serving since 1997\n\n", "msg_date": "Sat, 13 Dec 2008 10:37:43 -0800", "msg_from": "\"Joshua D. Drake\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Need help with 8.4 Performance Testing" }, { "msg_contents": "On Sat, Dec 13, 2008 at 11:37 AM, Joshua D. Drake <[email protected]> wrote:\n> On Sat, 2008-12-13 at 07:44 -0800, [email protected] wrote:\n>> On Sat, 13 Dec 2008, Robert Haas wrote:\n>\n>> > This may be a little off-topic, but I'd be interested in hearing more\n>> > details about how you (or others) would do this... manufacturer,\n>> > model, configuration? How many hard drives do you need to get 25\n>> > spindles? And where can you get that many 15K hard drives for under\n>> > $10K? My lack of experience in this area is showing here, but,\n>> > seriously, any suggestions appreciated.\n>>\n>\n> http://h71016.www7.hp.com/ctoBases.asp?oi=E9CED&BEID=19701&SBLID=&ProductLineId=450&FamilyId=2570&LowBaseId=15222&LowPrice=$1,899.00&familyviewgroup=757&viewtype=Matrix\n>\n> Retail cost, 15k, 36GB drives, ~ 12k. A phone call and threat of buying\n> dell gets its for ~ 10k.\n\nI prefer to deal with companies that I don't have to horse trade with\nto get a good deal. You can threaten dell and get good deals, but if\nyou're not watching, and let your boss handle an order suddenly you've\ngot an $800 Perc 5e on your hands.\n", "msg_date": "Sat, 13 Dec 2008 12:45:46 -0700", "msg_from": "\"Scott Marlowe\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Need help with 8.4 Performance Testing" }, { "msg_contents": "On Sat, 2008-12-13 at 12:45 -0700, Scott Marlowe wrote:\n> On Sat, Dec 13, 2008 at 11:37 AM, Joshua D. Drake <[email protected]> wrote:\n> > On Sat, 2008-12-13 at 07:44 -0800, [email protected] wrote:\n> >> On Sat, 13 Dec 2008, Robert Haas wrote:\n\n> > http://h71016.www7.hp.com/ctoBases.asp?oi=E9CED&BEID=19701&SBLID=&ProductLineId=450&FamilyId=2570&LowBaseId=15222&LowPrice=$1,899.00&familyviewgroup=757&viewtype=Matrix\n> >\n> > Retail cost, 15k, 36GB drives, ~ 12k. A phone call and threat of buying\n> > dell gets its for ~ 10k.\n> \n> I prefer to deal with companies that I don't have to horse trade with\n> to get a good deal. You can threaten dell and get good deals, but if\n\nAnd what company would that be? There is zero major server manufacturer\nthat doesn't do the, \"Oh you have a competitive bid... let's just lower\nthat quote for you\"\n\nNote: HP can beat Dell, every time on an apples to apples quote. At\nleast when I have done it.\n\nJoshua D. Drake\n\n\n\n\n\n> \n-- \nPostgreSQL\n Consulting, Development, Support, Training\n 503-667-4564 - http://www.commandprompt.com/\n The PostgreSQL Company, serving since 1997\n\n", "msg_date": "Sat, 13 Dec 2008 11:47:39 -0800", "msg_from": "\"Joshua D. Drake\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Need help with 8.4 Performance Testing" }, { "msg_contents": "On Sat, Dec 13, 2008 at 12:47 PM, Joshua D. Drake <[email protected]> wrote:\n> On Sat, 2008-12-13 at 12:45 -0700, Scott Marlowe wrote:\n>> On Sat, Dec 13, 2008 at 11:37 AM, Joshua D. Drake <[email protected]> wrote:\n>> > On Sat, 2008-12-13 at 07:44 -0800, [email protected] wrote:\n>> >> On Sat, 13 Dec 2008, Robert Haas wrote:\n>\n>> > http://h71016.www7.hp.com/ctoBases.asp?oi=E9CED&BEID=19701&SBLID=&ProductLineId=450&FamilyId=2570&LowBaseId=15222&LowPrice=$1,899.00&familyviewgroup=757&viewtype=Matrix\n>> >\n>> > Retail cost, 15k, 36GB drives, ~ 12k. A phone call and threat of buying\n>> > dell gets its for ~ 10k.\n>>\n>> I prefer to deal with companies that I don't have to horse trade with\n>> to get a good deal. You can threaten dell and get good deals, but if\n>\n> And what company would that be? There is zero major server manufacturer\n> that doesn't do the, \"Oh you have a competitive bid... let's just lower\n> that quote for you\"\n>\n> Note: HP can beat Dell, every time on an apples to apples quote. At\n> least when I have done it.\n\nAberdeen inc, the one I listed in my previous response. Their on site\nprice for a single quad core xeon, 8 gig 800MHZ ram and 24 15k5 SAS\ndrives is $10,080 or so. I've never had to tell them I was getting a\nbetter price anywhere else. They just give me a great quote each\ntime, they have very fast and efficient customer service, and they\ngive a 5 year warranty on everything they custom build for you. I'm a\nvery satisfied customer.\n\nBTW, they seem to have saved a fair bit of money and passed the\nsavings along to you and me, by designing their website in one whole\nafternoon. :) Yeah, it's ugly, but it works well enough to get an\nidea what you want and then ask for a quote.\n\nWe got our 8 core opteron 32 Gig ram areca 1680i and 16 15k4 Seagates\nfor $11,000 or so at the end of summer. When we first plugged them\nin, one machine started hanging after 24-48 hours of steady load\ntesting. CAlled them, they said it sounded like a RAID card and\nthey'd ship me a new one next day business. Let me make it clear, I\ndidn't tell them I thought it was the RAID card, or that I needed it\nthe next day, they decided that's what it sounded like, and they\nshipped it to me, FOC. Arrived the next business day. Which was a\nmonday. Between friday and monday I swapped the RAID controllers\nbetween the two machines, and after 36 hours of testing, the problem\nfollowed the RAID controller and the other machine locked up. Got the\nnew controller, put it in and ran a week long test, no failures. We\nhave since had one drive fail (each machine has two hot spares) and\nthey cross shipped the replacement and we got it in 3 days.\n\nFew places have made me so happy with a decision to spend $22k on\nservers as these guys have.\n", "msg_date": "Sat, 13 Dec 2008 12:57:38 -0700", "msg_from": "\"Scott Marlowe\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Need help with 8.4 Performance Testing" }, { "msg_contents": "On Sat, 2008-12-13 at 12:57 -0700, Scott Marlowe wrote:\n> On Sat, Dec 13, 2008 at 12:47 PM, Joshua D. Drake <[email protected]> wrote:\n> > On Sat, 2008-12-13 at 12:45 -0700, Scott Marlowe wrote:\n> >> On Sat, Dec 13, 2008 at 11:37 AM, Joshua D. Drake <[email protected]> wrote:\n> >> > On Sat, 2008-12-13 at 07:44 -0800, [email protected] wrote:\n> >> >> On Sat, 13 Dec 2008, Robert Haas wrote:\n\n> > Note: HP can beat Dell, every time on an apples to apples quote. At\n> > least when I have done it.\n> \n> Aberdeen inc, the one I listed in my previous response. \n\nSorry didn't see it.\n\n> Their on site\n> price for a single quad core xeon, 8 gig 800MHZ ram and 24 15k5 SAS\n> drives is $10,080 or so. I've never had to tell them I was getting a\n> better price anywhere else. They just give me a great quote each\n> time, they have very fast and efficient customer service, and they\n> give a 5 year warranty on everything they custom build for you. I'm a\n> very satisfied customer.\n\nWell that would work for CMD but CMDs customer's would say, \"Who the\nheck is Aberdeen?\"\n\n\n> Few places have made me so happy with a decision to spend $22k on\n> servers as these guys have.\n> \n\nWell that is definitely a solid testimony. I don't have any experience\nwith them but I know that the smaller companies always provide better\nservice so I have no reason to doubt you. The larger the company gets\nthe harder it is to get through the muck of bureaucracy.\n\nI have to be honest though, unless the customer explicitly states we\ndon't have a problem with white box, its going to be HP, DELL or IBM,\noccasionally SUN but once they see how ridiculous the initial quotes\nfrom Sun are they generally don't want to deal with them anymore.\n\nSincerely,\n\nJoshua D. Drake\n\n\n\n-- \nPostgreSQL\n Consulting, Development, Support, Training\n 503-667-4564 - http://www.commandprompt.com/\n The PostgreSQL Company, serving since 1997\n\n", "msg_date": "Sat, 13 Dec 2008 12:03:49 -0800", "msg_from": "\"Joshua D. Drake\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Need help with 8.4 Performance Testing" }, { "msg_contents": "El Sábado, 13 de Diciembre de 2008 Scott Marlowe escribió:\n> On Sat, Dec 13, 2008 at 11:37 AM, Joshua D. Drake <[email protected]> wrote:\n> > On Sat, 2008-12-13 at 07:44 -0800, [email protected] wrote:\n> >> On Sat, 13 Dec 2008, Robert Haas wrote:\n> >\n> >> > This may be a little off-topic, but I'd be interested in hearing more\n> >> > details about how you (or others) would do this... manufacturer,\n> >> > model, configuration? How many hard drives do you need to get 25\n> >> > spindles? And where can you get that many 15K hard drives for under\n> >> > $10K? My lack of experience in this area is showing here, but,\n> >> > seriously, any suggestions appreciated.\n> >>\n> >\n> > http://h71016.www7.hp.com/ctoBases.asp?oi=E9CED&BEID=19701&SBLID=&ProductLineId=450&FamilyId=2570&LowBaseId=15222&LowPrice=$1,899.00&familyviewgroup=757&viewtype=Matrix\n> >\n> > Retail cost, 15k, 36GB drives, ~ 12k. A phone call and threat of buying\n> > dell gets its for ~ 10k.\n> \n> I prefer to deal with companies that I don't have to horse trade with\n> to get a good deal. You can threaten dell and get good deals, but if\n> you're not watching, and let your boss handle an order suddenly you've\n> got an $800 Perc 5e on your hands.\n> \nare Perc 5 bad cards? is that for being sata?\n\nAnd what about perc 4 ? they are scsi\n\n\n\nThanks in advance\n\n\n-- \nNo imprima este correo si no es necesario, necesitará el dinero del papel para pagar las charlas del señor Gore.\n->>-----------------------------------------------\n Clist UAH a.k.a Angel\n---------------------------------[www.uah.es]-<<--\n\"Los politicos y los pañales han de cambiarse cada cierto tiempo.\"\n", "msg_date": "Sat, 13 Dec 2008 22:00:38 +0100", "msg_from": "Angel Alvarez <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Need help with 8.4 Performance Testing" }, { "msg_contents": "On Sat, Dec 13, 2008 at 1:03 PM, Joshua D. Drake <[email protected]> wrote:\n> On Sat, 2008-12-13 at 12:57 -0700, Scott Marlowe wrote:\n>> On Sat, Dec 13, 2008 at 12:47 PM, Joshua D. Drake <[email protected]> wrote:\n>> > On Sat, 2008-12-13 at 12:45 -0700, Scott Marlowe wrote:\n>> >> On Sat, Dec 13, 2008 at 11:37 AM, Joshua D. Drake <[email protected]> wrote:\n>> >> > On Sat, 2008-12-13 at 07:44 -0800, [email protected] wrote:\n>> >> >> On Sat, 13 Dec 2008, Robert Haas wrote:\n>\n>> > Note: HP can beat Dell, every time on an apples to apples quote. At\n>> > least when I have done it.\n>>\n>> Aberdeen inc, the one I listed in my previous response.\n>\n> Sorry didn't see it.\n>\n>> Their on site\n>> price for a single quad core xeon, 8 gig 800MHZ ram and 24 15k5 SAS\n>> drives is $10,080 or so. I've never had to tell them I was getting a\n>> better price anywhere else. They just give me a great quote each\n>> time, they have very fast and efficient customer service, and they\n>> give a 5 year warranty on everything they custom build for you. I'm a\n>> very satisfied customer.\n>\n> Well that would work for CMD but CMDs customer's would say, \"Who the\n> heck is Aberdeen?\"\n\nFor me, it's the company I recommend.\n\nHere's the point I make. If there's any problem, whatsoever, with\naberdeen equipment when I'm putting it in, I'll come fix it and\ninterface. If you pick someone else, YOU get to interface and replace\nparts, I'm not doing it. Their time versus my time. The same is true\nwith anything else I that I recommend.\n\n>> Few places have made me so happy with a decision to spend $22k on\n>> servers as these guys have.\n>\n> Well that is definitely a solid testimony. I don't have any experience\n> with them but I know that the smaller companies always provide better\n> service so I have no reason to doubt you. The larger the company gets\n> the harder it is to get through the muck of bureaucracy.\n>\n> I have to be honest though, unless the customer explicitly states we\n> don't have a problem with white box, its going to be HP, DELL or IBM,\n> occasionally SUN but once they see how ridiculous the initial quotes\n> from Sun are they generally don't want to deal with them anymore.\n\nI've made it quite clear to bosses in the past that if they stick me\nwith dud hardware (i.e. dell 2650s with adaptec RAID controllers, et.\nal.) that I will not be handling the long, drudging fight with the\nvendor. I'll just sell it on ebay if it won't work and we'll order\nwhat I said the first time. One or two times of that happening and\nthey don't second guess me much anymore. My current boss is much more\nprogressive, and while he had his doubts, he too is now sold.\n\nIsn't it amazing how many small businesses won't buy from other small\nbusinesses? They'd much rather give their money to a company they\ndon't like because \"they'll be around a while\" (the big company).\n", "msg_date": "Sat, 13 Dec 2008 19:16:22 -0700", "msg_from": "\"Scott Marlowe\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Need help with 8.4 Performance Testing" }, { "msg_contents": "On Sat, 2008-12-13 at 19:16 -0700, Scott Marlowe wrote:\n\n> Isn't it amazing how many small businesses won't buy from other small\n> businesses? They'd much rather give their money to a company they\n> don't like because \"they'll be around a while\" (the big company).\n> \n\nTrue enough!\n\nJoshua D. Drake\n\n\n-- \nPostgreSQL\n Consulting, Development, Support, Training\n 503-667-4564 - http://www.commandprompt.com/\n The PostgreSQL Company, serving since 1997\n\n", "msg_date": "Sat, 13 Dec 2008 19:09:01 -0800", "msg_from": "\"Joshua D. Drake\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Need help with 8.4 Performance Testing" }, { "msg_contents": "\"Scott Marlowe\" <[email protected]> writes:\n> Isn't it amazing how many small businesses won't buy from other small\n> businesses?\n\nIt's entertaining that Dell is considered one of the \"safe choices\"\nin this thread. They were a pretty small business not so long ago\n(and remain a lot smaller than IBM or HP) ...\n\n\t\t\tregards, tom lane\n", "msg_date": "Sun, 14 Dec 2008 00:48:44 -0500", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Need help with 8.4 Performance Testing " } ]
[ { "msg_contents": "Hi,\n\nI've discovered a peculiarity with using btrim in an index and was \nwondering if anyone has any input.\n\nMy table is like this:\n Table \"public.m_object_paper\"\n Column | Type | Modifiers\n---------------------+------------------------+------------------------\n id | integer | not null\n title | character varying(200) | not null\n x_firstname | character varying(50) |\n x_lastname | character varying(50) |\n<...snip...>\n page_count | smallint |\n compare_to_database | bit varying | not null\nIndexes:\n \"m_object_paper_pkey\" PRIMARY KEY, btree (id)\n \"last_name_fnc_idx\" btree (lower(btrim(x_lastname::text)))\n \"m_object_paper_assignment_idx\" btree (assignment)\n \"m_object_paper_owner_idx\" btree (owner) CLUSTER\n<...snip to end...>\n\nMy query is like this:\nSELECT m_object_paper.id FROM m_object_paper, m_assignment WHERE \nm_object_paper.assignment = m_assignment.id AND \nm_object_paper.owner=-1 AND m_assignment.class = 2450798 AND \nlower(btrim(x_firstname)) = lower(btrim($FIRSTNAME)) and \nlower(btrim(x_lastname)) = lower(btrim($LASTNAME));\n\nStrangely, if $LASTNAME is 5 chars, the query plan looks like this:\ntii=# explain SELECT m_object_paper.id FROM m_object_paper, \nm_assignment WHERE m_object_paper.assignment = m_assignment.id AND \nm_object_paper.owner=-1 AND m_assignment.class = 2450798 AND \nlower(btrim(x_firstname)) = lower(btrim('Jordan')) and \nlower(btrim(x_lastname)) = lower(btrim('Smith'));\n QUERY PLAN\n---------------------------------------------------------------------------------------------------------------\n Hash Join (cost=181704.85..291551.77 rows=1 width=4)\n Hash Cond: (m_object_paper.assignment = m_assignment.id)\n -> Bitmap Heap Scan on m_object_paper (cost=181524.86..291369.66 \nrows=562 width=8)\n Recheck Cond: ((lower(btrim((x_lastname)::text)) = \n'smith'::text) AND (owner = (-1)))\n Filter: (lower(btrim((x_firstname)::text)) = 'jordan'::text)\n -> BitmapAnd (cost=181524.86..181524.86 rows=112429 width=0)\n -> Bitmap Index Scan on last_name_fnc_idx \n(cost=0.00..5468.29 rows=496740 width=0)\n Index Cond: (lower(btrim((x_lastname)::text)) = \n'smith'::text)\n -> Bitmap Index Scan on m_object_paper_owner_idx \n(cost=0.00..176056.04 rows=16061244 width=0)\n Index Cond: (owner = (-1))\n -> Hash (cost=177.82..177.82 rows=174 width=4)\n -> Index Scan using m_assignment_class_idx on m_assignment \n(cost=0.00..177.82 rows=174 width=4)\n Index Cond: (class = 2450798)\n(13 rows)\n\nHowever, if $LASTNAME is != 5 chars (1 char, 100 chars, doesn't make a \ndifference), the query plan looks like this:\ntii=# explain SELECT m_object_paper.id FROM m_object_paper, \nm_assignment WHERE m_object_paper.assignment = m_assignment.id AND \nm_object_paper.owner=-1 AND m_assignment.class = 2450798 AND \nlower(btrim(x_firstname)) = lower(btrim('Jordan')) and \nlower(btrim(x_lastname)) = lower(btrim('Smithers'));\n QUERY PLAN\n---------------------------------------------------------------------------------------------------\n Nested Loop (cost=0.00..10141.06 rows=1 width=4)\n -> Index Scan using last_name_fnc_idx on m_object_paper \n(cost=0.00..10114.24 rows=11 width=8)\n Index Cond: (lower(btrim((x_lastname)::text)) = \n'smithers'::text)\n Filter: ((owner = (-1)) AND \n(lower(btrim((x_firstname)::text)) = 'jordan'::text))\n -> Index Scan using m_assignment_pkey on m_assignment \n(cost=0.00..2.43 rows=1 width=4)\n Index Cond: (m_assignment.id = m_object_paper.assignment)\n Filter: (m_assignment.class = 2450798)\n(7 rows)\n\nIn practice, the difference is 300+ seconds when $LASTNAME == 5 chars \nand <1 second when $LASTNAME != 5 chars.\n\nWould anyone know what's going on here? Is there something about the \nway btrim works, or perhaps with the way indexes are created? It's \nstrange that the query plan would change for just one case (\"Jones,\" \n\"Smith,\" \"Brown,\" etc., all cause the query plan to use that extra \nheap scan).\n\nThanks for any help!\n--Richard\n", "msg_date": "Tue, 9 Dec 2008 11:56:45 -0800", "msg_from": "Richard Yen <[email protected]>", "msg_from_op": true, "msg_subject": "query plan with index having a btrim is different for strings of\n\tdifferent length" }, { "msg_contents": "On Tue, Dec 9, 2008 at 2:56 PM, Richard Yen <[email protected]> wrote:\n\n> In practice, the difference is 300+ seconds when $LASTNAME == 5 chars and <1\n> second when $LASTNAME != 5 chars.\n>\n> Would anyone know what's going on here? Is there something about the way\n> btrim works, or perhaps with the way indexes are created? It's strange that\n> the query plan would change for just one case (\"Jones,\" \"Smith,\" \"Brown,\"\n> etc., all cause the query plan to use that extra heap scan).\n\nThose are likely common names, and may be showing up in the table\nstats as common values, causing the planner to change things around.\nDoes this hold even for non-existent 5-character lastname strings?\n\nSpeaking of table statistics, might be worth upping the statistics\ntarget on that table/column, analyzing, and seeing if you get\ndifferent results.\n\n-- \n- David T. Wilson\[email protected]\n", "msg_date": "Tue, 9 Dec 2008 17:41:33 -0500", "msg_from": "\"David Wilson\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: query plan with index having a btrim is different for strings of\n\tdifferent length" }, { "msg_contents": "Richard Yen <[email protected]> writes:\n> I've discovered a peculiarity with using btrim in an index and was \n> wondering if anyone has any input.\n\nWhat PG version is this?\n\nIn particular, I'm wondering if it's one of the early 8.2.x releases,\nwhich had some bugs in and around choose_bitmap_and() that caused\nthem to sometimes make weird choices of indexes in a BitmapAnd plan\nstructure ...\n\nLike David, I'm pretty dubious that the behavior has anything to do with\nstrings being 5 characters long exactly; but it could very much depend\non whether the string you choose is a common last name or not. That\nwould change the estimated number of matching rows and hence affect the\napparent relative attractiveness of different indexes.\n\n\t\t\tregards, tom lane\n", "msg_date": "Tue, 09 Dec 2008 18:27:23 -0500", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: query plan with index having a btrim is different for strings of\n\tdifferent length" }, { "msg_contents": "On Dec 9, 2008, at 3:27 PM, Tom Lane wrote:\n\n> Richard Yen <[email protected]> writes:\n>> I've discovered a peculiarity with using btrim in an index and was\n>> wondering if anyone has any input.\n>\n> What PG version is this?\n\nThis is running on 8.3.3\n\n> In particular, I'm wondering if it's one of the early 8.2.x releases,\n> which had some bugs in and around choose_bitmap_and() that caused\n> them to sometimes make weird choices of indexes in a BitmapAnd plan\n> structure ...\n>\n> Like David, I'm pretty dubious that the behavior has anything to do \n> with\n> strings being 5 characters long exactly; but it could very much depend\n> on whether the string you choose is a common last name or not. That\n> would change the estimated number of matching rows and hence affect \n> the\n> apparent relative attractiveness of different indexes.\n\n\nYou guys are right. I tried \"Miller\" and gave me the same result. Is \nthere any way to tune this so that for the common last names, the \nquery run time doesn't jump from <1s to >300s?\nThanks for the help!\n--Richard \n", "msg_date": "Wed, 10 Dec 2008 11:28:43 -0800", "msg_from": "Richard Yen <[email protected]>", "msg_from_op": true, "msg_subject": "Re: query plan with index having a btrim is different for strings of\n\tdifferent length" }, { "msg_contents": "Richard Yen <[email protected]> writes:\n> You guys are right. I tried \"Miller\" and gave me the same result. Is \n> there any way to tune this so that for the common last names, the \n> query run time doesn't jump from <1s to >300s?\n\nIf the planner's estimation is that far off then there must be something\nvery weird about the table statistics, but you haven't given us any clue\nwhat.\n\n\t\t\tregards, tom lane\n", "msg_date": "Wed, 10 Dec 2008 14:34:33 -0500", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: query plan with index having a btrim is different for strings of\n\tdifferent length" }, { "msg_contents": "> You guys are right. I tried \"Miller\" and gave me the same result. Is there\n> any way to tune this so that for the common last names, the query run time\n> doesn't jump from <1s to >300s?\n> Thanks for the help!\n\nCan you send the output of EXPLAIN ANALYZE for both cases?\n\n...Robert\n", "msg_date": "Wed, 10 Dec 2008 14:39:42 -0500", "msg_from": "\"Robert Haas\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: query plan with index having a btrim is different for strings of\n\tdifferent length" }, { "msg_contents": "BTW, if your queries typically constrain both lastname and firstname,\nit'd likely be worthwhile to make a 2-column index on\n\tlower(btrim(x_lastname)), lower(btrim(x_firstname))\n\n\t\t\tregards, tom lane\n", "msg_date": "Wed, 10 Dec 2008 15:04:54 -0500", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: query plan with index having a btrim is different for strings of\n\tdifferent length" }, { "msg_contents": "On Dec 10, 2008, at 11:34 AM, Tom Lane wrote:\n\n> Richard Yen <[email protected]> writes:\n>> You guys are right. I tried \"Miller\" and gave me the same result. \n>> Is\n>> there any way to tune this so that for the common last names, the\n>> query run time doesn't jump from <1s to >300s?\n>\n> If the planner's estimation is that far off then there must be \n> something\n> very weird about the table statistics, but you haven't given us any \n> clue\n> what.\n\nWow, thanks for helping me out here. I don't have much experience \nwith deconstructing queries and working with stats, so here's what I \ncould gather. If you need more information, please let me know.\n\ntii=# select * from pg_stat_all_tables where relname = \n'm_object_paper' or relname = 'm_assignment';\n-[ RECORD 1 ]----+------------------------------\nrelid | 17516\nschemaname | public\nrelname | m_assignment\nseq_scan | 274\nseq_tup_read | 1039457272\nidx_scan | 372379230\nidx_tup_fetch | 2365235708\nn_tup_ins | 5641638\nn_tup_upd | 520684\nn_tup_del | 30339\nn_tup_hot_upd | 406929\nn_live_tup | 5611665\nn_dead_tup | 11877\nlast_vacuum |\nlast_autovacuum | 2008-12-04 17:44:57.309717-08\nlast_analyze | 2008-10-20 15:09:50.943652-07\nlast_autoanalyze | 2008-08-15 17:16:14.588153-07\n-[ RECORD 2 ]----+------------------------------\nrelid | 17792\nschemaname | public\nrelname | m_object_paper\nseq_scan | 83613\nseq_tup_read | 184330159906\nidx_scan | 685219945\nidx_tup_fetch | 222892138627\nn_tup_ins | 71564825\nn_tup_upd | 27558792\nn_tup_del | 3058\nn_tup_hot_upd | 22410985\nn_live_tup | 71559627\nn_dead_tup | 585467\nlast_vacuum | 2008-10-24 14:36:45.134936-07\nlast_autovacuum | 2008-12-05 07:02:40.52712-08\nlast_analyze | 2008-11-25 14:42:04.185798-08\nlast_autoanalyze | 2008-08-15 17:20:28.42811-07\n\ntii=# select * from pg_statio_all_tables where relname = \n'm_object_paper' or relname = 'm_assignment';\n-[ RECORD 1 ]---+---------------\nrelid | 17516\nschemaname | public\nrelname | m_assignment\nheap_blks_read | 22896372\nheap_blks_hit | 1753777105\nidx_blks_read | 7879634\nidx_blks_hit | 1157729592\ntoast_blks_read | 0\ntoast_blks_hit | 0\ntidx_blks_read | 0\ntidx_blks_hit | 0\n-[ RECORD 2 ]---+---------------\nrelid | 17792\nschemaname | public\nrelname | m_object_paper\nheap_blks_read | 2604944369\nheap_blks_hit | 116307527781\nidx_blks_read | 133534908\nidx_blks_hit | 3601637440\ntoast_blks_read | 0\ntoast_blks_hit | 0\ntidx_blks_read | 0\ntidx_blks_hit | 0\n\nAlso, yes, we've kicked around the idea of doing an index on the \nconcatenation of the first and last names--that would definitely be \nmore unique, and I think we're actually going to move to that. Just \nthought I'd dig deeper here to learn more.\n\nThanks!\n--Richard\n", "msg_date": "Wed, 10 Dec 2008 13:54:50 -0800", "msg_from": "Richard Yen <[email protected]>", "msg_from_op": true, "msg_subject": "Re: query plan with index having a btrim is different for strings of\n\tdifferent length" }, { "msg_contents": "On Dec 10, 2008, at 11:39 AM, Robert Haas wrote:\n\n>> You guys are right. I tried \"Miller\" and gave me the same result. \n>> Is there\n>> any way to tune this so that for the common last names, the query \n>> run time\n>> doesn't jump from <1s to >300s?\n>> Thanks for the help!\n>\n> Can you send the output of EXPLAIN ANALYZE for both cases?\n\n\ntii=# explain analyze SELECT m_object_paper.id FROM m_object_paper, \nm_assignment WHERE m_object_paper.assignment = m_assignment.id AND \nm_object_paper.owner=-1 AND m_assignment.class = 2450798 AND \nlower(btrim(x_firstname)) = lower(btrim('Jordan')) and \nlower(btrim(x_lastname)) = lower(btrim('Smithers'));\n \nQUERY PLAN\n-----------------------------------------------------------------------------------------------------------------------------------------------\n Nested Loop (cost=0.00..10141.07 rows=1 width=4) (actual \ntime=33.004..33.004 rows=0 loops=1)\n -> Index Scan using last_name_fnc_idx on m_object_paper \n(cost=0.00..10114.25 rows=11 width=8) (actual time=33.003..33.003 \nrows=0 loops=1)\n Index Cond: (lower(btrim((x_lastname)::text)) = \n'smithers'::text)\n Filter: ((owner = (-1)) AND \n(lower(btrim((x_firstname)::text)) = 'jordan'::text))\n -> Index Scan using m_assignment_pkey on m_assignment \n(cost=0.00..2.43 rows=1 width=4) (never executed)\n Index Cond: (m_assignment.id = m_object_paper.assignment)\n Filter: (m_assignment.class = 2450798)\n Total runtime: 33.070 ms\n(8 rows)\n\ntii=# explain analyze SELECT m_object_paper.id FROM m_object_paper, \nm_assignment WHERE m_object_paper.assignment = m_assignment.id AND \nm_object_paper.owner=-1 AND m_assignment.class = 2450798 AND \nlower(btrim(x_firstname)) = lower(btrim('Jordan')) and \nlower(btrim(x_lastname)) = lower(btrim('Smith'));\n QUERY \n PLAN\n--------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n Hash Join (cost=181867.87..291714.78 rows=1 width=4) (actual \ntime=124746.426..139252.850 rows=1 loops=1)\n Hash Cond: (m_object_paper.assignment = m_assignment.id)\n -> Bitmap Heap Scan on m_object_paper (cost=181687.88..291532.67 \nrows=562 width=8) (actual time=124672.328..139248.919 rows=58 loops=1)\n Recheck Cond: ((lower(btrim((x_lastname)::text)) = \n'smith'::text) AND (owner = (-1)))\n Filter: (lower(btrim((x_firstname)::text)) = 'jordan'::text)\n -> BitmapAnd (cost=181687.88..181687.88 rows=112429 \nwidth=0) (actual time=124245.890..124245.890 rows=0 loops=1)\n -> Bitmap Index Scan on last_name_fnc_idx \n(cost=0.00..5476.30 rows=496740 width=0) (actual \ntime=16194.803..16194.803 rows=521382 loops=1)\n Index Cond: (lower(btrim((x_lastname)::text)) = \n'smith'::text)\n -> Bitmap Index Scan on m_object_paper_owner_idx \n(cost=0.00..176211.04 rows=16061244 width=0) (actual \ntime=107928.054..107928.054 rows=15494737 loops=1)\n Index Cond: (owner = (-1))\n -> Hash (cost=177.82..177.82 rows=174 width=4) (actual \ntime=3.764..3.764 rows=5 loops=1)\n -> Index Scan using m_assignment_class_idx on m_assignment \n(cost=0.00..177.82 rows=174 width=4) (actual time=0.039..3.756 rows=5 \nloops=1)\n Index Cond: (class = 2450798)\n Total runtime: 139255.109 ms\n(14 rows)\n\nThis example doesn't have a > 300s run time, but there are a few in my \nlog that are. In either case, I guess you get the picture.\n\nThanks for the help!\n--Richard\n", "msg_date": "Wed, 10 Dec 2008 14:04:57 -0800", "msg_from": "Richard Yen <[email protected]>", "msg_from_op": true, "msg_subject": "Re: query plan with index having a btrim is different for strings of\n\tdifferent length" }, { "msg_contents": ">\n> tii=# explain analyze SELECT m_object_paper.id FROM m_object_paper,\n> m_assignment WHERE m_object_paper.assignment = m_assignment.id AND\n> m_object_paper.owner=-1 AND m_assignment.class = 2450798 AND\n> lower(btrim(x_firstname)) = lower(btrim('Jordan')) and\n> lower(btrim(x_lastname)) = lower(btrim('Smith'));\n\nIs there an index on \"m_object_paper.assignment\"? It could solve the\nproblem.\n\nWith current indices on \"btrim(last_name)\" and \"owner\" you are just throwing\nthe rows away (you have 521382 rows with \"smith\", 15494737 with owner=-1 and\nonly 58 of them have both \"smith\"/\"jordan\" and -1).\n\nConsider creating index on m_object_paper using\nbtree(lower(btrim(x_lastname))) where owner=-1; (it might add firstname\ncolumn there as per Tom's suggestion)\n\nOr just index on (owner, lower(...)) if you have other queries with\ndifferent values for owner.\n\nOne more point that could improve bitmap scans is greater value for\nwork_mem. You'll need 8*15494737 ~ 130Mb == 130000 for work_mem (however,\nthat is way too high unless you have lots of RAM and just couple of active\ndatabase sessions)\n\n\nRegards,\nVladimir Sitnikov\n\n\ntii=# explain analyze SELECT m_object_paper.id FROM m_object_paper, m_assignment WHERE m_object_paper.assignment = m_assignment.id AND m_object_paper.owner=-1 AND m_assignment.class = 2450798 AND lower(btrim(x_firstname)) = lower(btrim('Jordan')) and lower(btrim(x_lastname)) = lower(btrim('Smith'));\nIs there an index on \"m_object_paper.assignment\"? It could solve the problem.With current indices on \"btrim(last_name)\" and \"owner\" you are just throwing the rows away (you have 521382 rows with \"smith\", 15494737 with owner=-1 and only 58 of them have both \"smith\"/\"jordan\" and -1).\nConsider creating index on m_object_paper using btree(lower(btrim(x_lastname))) where owner=-1; (it might add firstname column there as per Tom's suggestion)Or just index on (owner, lower(...)) if you have other queries with different values for owner.\nOne more point that could improve bitmap scans is greater value for work_mem. You'll need 8*15494737 ~ 130Mb == 130000 for work_mem (however, that is way too high unless you have lots of RAM and just couple of active database sessions)\nRegards,Vladimir Sitnikov", "msg_date": "Thu, 11 Dec 2008 01:36:51 +0300", "msg_from": "\"Vladimir Sitnikov\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: query plan with index having a btrim is different for strings of\n\tdifferent length" }, { "msg_contents": "Richard Yen <[email protected]> writes:\n> Is there any way to tune this so that for the common last names, the query \n> run time doesn't jump from <1s to >300s?\n\nWell, as near as I can tell there's factor of a couple hundred\ndifference between the frequencies of 'smith' and 'smithers', so\nyou shouldn't really expect similar runtimes for the two cases.\n\nHaving said that, I still think you should try to index both first\nand last name. Also I wonder whether the index on owner is worth\nhaving at all. It definitely doesn't seem worthwhile to index the\nentries with owner = -1, since there are so many; so maybe you could\nmake it a partial index that excludes those entries, in order to prevent\nthe planner from trying to use it for this case.\n\n\t\t\tregards, tom lane\n", "msg_date": "Wed, 10 Dec 2008 19:08:28 -0500", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: query plan with index having a btrim is different for strings of\n\tdifferent length" }, { "msg_contents": "On Dec 10, 2008, at 4:08 PM, Tom Lane wrote:\n\n> Richard Yen <[email protected]> writes:\n>> Is there any way to tune this so that for the common last names, \n>> the query\n>> run time doesn't jump from <1s to >300s?\n>\n> Well, as near as I can tell there's factor of a couple hundred\n> difference between the frequencies of 'smith' and 'smithers', so\n> you shouldn't really expect similar runtimes for the two cases.\n>\n> Having said that, I still think you should try to index both first\n> and last name. Also I wonder whether the index on owner is worth\n> having at all. It definitely doesn't seem worthwhile to index the\n> entries with owner = -1, since there are so many; so maybe you could\n> make it a partial index that excludes those entries, in order to \n> prevent\n> the planner from trying to use it for this case.\n\nCreated the 2-col index, and the query runs much faster on all \npermutations.\n\nThanks much for all your help,\n--Richard\n", "msg_date": "Wed, 10 Dec 2008 17:44:23 -0800", "msg_from": "Richard Yen <[email protected]>", "msg_from_op": true, "msg_subject": "Re: query plan with index having a btrim is different for strings of\n\tdifferent length" } ]
[ { "msg_contents": "I have postgresql 8.3.5 installed on MacOS X / Darwin. I remember \nsetting shared memory buffer parameters and that solved the initial \nperformance problem, but after running several tests, the performance \ngoes way, way down. Restarting the server doesn't seem to help.\n\nI'm using pqxx to access the database, if that makes any difference.\n\n-- \nVincent\n\n\n\n\n\nI have postgresql 8.3.5 installed on MacOS X / Darwin.  I remember setting shared memory buffer parameters and that solved the initial performance problem, but after running several tests, the performance goes way, way down. Restarting the server doesn't seem to help.I'm using pqxx to access the database, if that makes any difference.   -- Vincent", "msg_date": "Tue, 9 Dec 2008 21:16:43 -0600", "msg_from": "Vincent Predoehl <[email protected]>", "msg_from_op": true, "msg_subject": "Degenerate Performance Problem" }, { "msg_contents": "On Tue, Dec 9, 2008 at 8:16 PM, Vincent Predoehl\n<[email protected]> wrote:\n> I have postgresql 8.3.5 installed on MacOS X / Darwin. I remember setting\n> shared memory buffer parameters and that solved the initial performance\n> problem, but after running several tests, the performance goes way, way\n> down. Restarting the server doesn't seem to help.\n> I'm using pqxx to access the database, if that makes any difference.\n\nCould be a vacuuming issue.\n\nWhat does\n\nvacuum verbose;\n\non the database say?\n", "msg_date": "Tue, 9 Dec 2008 20:47:23 -0700", "msg_from": "\"Scott Marlowe\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Degenerate Performance Problem" } ]
[ { "msg_contents": "Hi All,\nI would like to ask to you, how many connections a db server can handle. \nI know the question is not so easy, and actually I don't want to known a \n\"number\" but something like:\n- up to 100 connections: small load, low entry server is enough\n- up to 200 connections: the db server starts to sweat, you need a \ndedicated medium server\n- up to 300 connections: hard work, dedicated server\n- up to 500 connections: hard work, dedicated high level server\n\nI would like just to understand when we can talk about small/medium/high \nload.\n\nAt the moment I'm using a quad-proc system with a 6 disk 1+0 RAID array \nand 2 separate disks for the OS and write-ahead logs.\n\nThanks\nste\n", "msg_date": "Fri, 12 Dec 2008 11:07:08 +0100", "msg_from": "Stefano Nichele <[email protected]>", "msg_from_op": true, "msg_subject": "db server load" }, { "msg_contents": "On Fri, Dec 12, 2008 at 3:07 AM, Stefano Nichele\n<[email protected]> wrote:\n> Hi All,\n> I would like to ask to you, how many connections a db server can handle. I\n> know the question is not so easy, and actually I don't want to known a\n> \"number\" but something like:\n> - up to 100 connections: small load, low entry server is enough\n> - up to 200 connections: the db server starts to sweat, you need a dedicated\n> medium server\n> - up to 300 connections: hard work, dedicated server\n> - up to 500 connections: hard work, dedicated high level server\n>\n> I would like just to understand when we can talk about small/medium/high\n> load.\n\nWell, it's of course more than just how many connections you have.\nWhat percentage of the connections are idle? Are you running small\ntight multi-statement transactions, or huge reporting queries? The db\nserver we have at work routinely has 100+ connections, but of those,\nthere are only a dozen or so actively running, and they are small and\ntransactional in nature. The machine handling this is very\noverpowered, with 8 opteron cores and 12 disks in a RAID-10 for data\nand 2 in another RAID-10 for everything else (pg_xlog, logging, etc)\nunder a very nice hardware RAID card with battery backed cache. We've\ntested it to much higher loads and it's held up quite well.\n\nWith the current load, and handling a hundred or so transactions per\nsecond, the top of top looks like this:\n\ntop - 14:40:55 up 123 days, 2:24, 1 user, load average: 1.08, 0.97, 1.04\nTasks: 284 total, 1 running, 283 sleeping, 0 stopped, 0 zombie\nCpu0 : 2.8%us, 0.4%sy, 0.0%ni, 96.7%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st\nCpu1 : 2.5%us, 0.3%sy, 0.0%ni, 97.2%id, 0.1%wa, 0.0%hi, 0.0%si, 0.0%st\nCpu2 : 2.5%us, 0.2%sy, 0.0%ni, 97.1%id, 0.1%wa, 0.1%hi, 0.0%si, 0.0%st\nCpu3 : 10.0%us, 0.7%sy, 0.0%ni, 89.0%id, 0.1%wa, 0.0%hi, 0.2%si, 0.0%st\nCpu4 : 13.0%us, 0.9%sy, 0.0%ni, 85.9%id, 0.1%wa, 0.0%hi, 0.1%si, 0.0%st\nCpu5 : 13.5%us, 0.9%sy, 0.0%ni, 85.3%id, 0.1%wa, 0.0%hi, 0.1%si, 0.0%st\nCpu6 : 16.2%us, 1.1%sy, 0.0%ni, 82.2%id, 0.3%wa, 0.0%hi, 0.2%si, 0.0%st\nCpu7 : 34.3%us, 2.4%sy, 0.0%ni, 61.3%id, 0.1%wa, 0.4%hi, 1.5%si, 0.0%st\n\nsingle line cpu looks like this:\n\nCpu(s): 6.1%us, 0.8%sy, 0.0%ni, 92.9%id, 0.0%wa, 0.0%hi, 0.1%si, 0.0%st\n\na line from vmstat 30 looks like this:\n\n 1 0 12548 2636232 588964 27689652 0 0 0 3089 3096 4138\n9 2 89 0 0\n\nwhich shows us writing out at ~3M/sec. This machine, running pgbench\non a db twice the size of the one it currently runs on, can get\nthroughput of 30 to 50 megabytes per second. peaks at about 60,\nrandom access.\n\n> At the moment I'm using a quad-proc system with a 6 disk 1+0 RAID array and\n> 2 separate disks for the OS and write-ahead logs.\n\nRun some realistic load tests and monitor the machine with vmstat and\ntop and iostat, etc... then compare those numbers to your day to day\nnumbers to get an idea how close to max performance you're running to\nsee how much headroom you have.\n", "msg_date": "Fri, 12 Dec 2008 14:44:51 -0700", "msg_from": "\"Scott Marlowe\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: db server load" }, { "msg_contents": "Hi Scott,\nas you know since the other thread, I performed some tests:\n\n-bash-3.1$ pgbench -c 50 -t 1000\nstarting vacuum...end.\ntransaction type: TPC-B (sort of)\nscaling factor: 100\nnumber of clients: 50\nnumber of transactions per client: 1000\nnumber of transactions actually processed: 50000/50000\ntps = 377.351354 (including connections establishing)\ntps = 377.788377 (excluding connections establishing)\n\nSome vmstat samplings in the meantime:\n\nprocs -----------memory---------- ---swap-- -----io---- --system-- \n-----cpu------\n r b swpd free buff cache si so bi bo in cs us sy \nid wa st\n0 4 92 127880 8252 3294512 0 0 458 12399 2441 14903 22 9 \n34 35 0\n11 49 92 125336 8288 3297016 0 0 392 11071 2390 11568 17 \n7 51 24 0\n 0 2 92 124548 8304 3297764 0 0 126 8249 2291 3829 5 3 \n64 28 0\n 0 1 92 127268 7796 3295672 0 0 493 11387 2323 14221 23 \n9 47 21 0\n 0 2 92 127256 7848 3295492 0 0 501 10654 2215 14599 24 \n9 42 24 0\n 0 2 92 125772 7892 3295656 0 0 34 7541 2311 327 0 1 \n59 40 0\n 0 1 92 127188 7952 3294084 0 0 537 11039 2274 15460 23 \n10 43 24 0\n 7 4 92 123816 7996 3298620 0 0 253 8946 2284 7310 11 5 \n52 32 0\n 0 2 92 126652 8536 3294220 0 0 440 9563 2307 9036 13 6 \n56 25 0\n 0 10 92 125268 8584 3296116 0 0 426 10696 2285 11034 20 \n9 39 32 0\n 0 2 92 124168 8604 3297252 0 0 104 8385 2319 4162 3 3 \n40 54 0\n 0 8 92 123780 8648 3296456 0 0 542 11498 2298 16613 25 \n10 16 48 0\n \n\n-bash-3.1$ pgbench -t 10000 -c 50\nstarting vacuum...end.\ntransaction type: SELECT only\nscaling factor: 100\nnumber of clients: 50\nnumber of transactions per client: 10000\nnumber of transactions actually processed: 500000/500000\ntps = 8571.573651 (including connections establishing)\ntps = 8594.357138 (excluding connections establishing)\n\n\n-bash-3.1$ pgbench -t 10000 -c 50 -S\nstarting vacuum...end.\ntransaction type: SELECT only\nscaling factor: 100\nnumber of clients: 50\nnumber of transactions per client: 10000\nnumber of transactions actually processed: 500000/500000\ntps = 8571.573651 (including connections establishing)\ntps = 8594.357138 (excluding connections establishing)\n\n\n(next test is with scaling factor 1)\n\n-bash-3.1$ pgbench -t 20000 -c 8 -S pgbench\nstarting vacuum...end.\ntransaction type: SELECT only\nscaling factor: 1\nnumber of clients: 8\nnumber of transactions per client: 20000\nnumber of transactions actually processed: 160000/160000\ntps = 11695.895318 (including connections establishing)\ntps = 11715.603720 (excluding connections establishing)\n\nAny comment ?\n\nI can give you also some details about database usage of my application:\n- number of active connections: about 60\n- number of idle connections: about 60\n\nHere some number from a mine old pgfouine report:\n- query peak: 378 queries/s\n- select: 53,1%, insert 3,8%, update 2,2 %, delete 2,8 %\n\nThe application is basically a web application and the db size is 37 GB.\n\nHow would you classify the load ? small/medium/high ?\n\nCheers,\nste\n\nScott Marlowe wrote:\n> On Fri, Dec 12, 2008 at 3:07 AM, Stefano Nichele\n> <[email protected]> wrote:\n> \n>> Hi All,\n>> I would like to ask to you, how many connections a db server can handle. I\n>> know the question is not so easy, and actually I don't want to known a\n>> \"number\" but something like:\n>> - up to 100 connections: small load, low entry server is enough\n>> - up to 200 connections: the db server starts to sweat, you need a dedicated\n>> medium server\n>> - up to 300 connections: hard work, dedicated server\n>> - up to 500 connections: hard work, dedicated high level server\n>>\n>> I would like just to understand when we can talk about small/medium/high\n>> load.\n>> \n>\n> Well, it's of course more than just how many connections you have.\n> What percentage of the connections are idle? Are you running small\n> tight multi-statement transactions, or huge reporting queries? The db\n> server we have at work routinely has 100+ connections, but of those,\n> there are only a dozen or so actively running, and they are small and\n> transactional in nature. The machine handling this is very\n> overpowered, with 8 opteron cores and 12 disks in a RAID-10 for data\n> and 2 in another RAID-10 for everything else (pg_xlog, logging, etc)\n> under a very nice hardware RAID card with battery backed cache. We've\n> tested it to much higher loads and it's held up quite well.\n>\n> With the current load, and handling a hundred or so transactions per\n> second, the top of top looks like this:\n>\n> top - 14:40:55 up 123 days, 2:24, 1 user, load average: 1.08, 0.97, 1.04\n> Tasks: 284 total, 1 running, 283 sleeping, 0 stopped, 0 zombie\n> Cpu0 : 2.8%us, 0.4%sy, 0.0%ni, 96.7%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st\n> Cpu1 : 2.5%us, 0.3%sy, 0.0%ni, 97.2%id, 0.1%wa, 0.0%hi, 0.0%si, 0.0%st\n> Cpu2 : 2.5%us, 0.2%sy, 0.0%ni, 97.1%id, 0.1%wa, 0.1%hi, 0.0%si, 0.0%st\n> Cpu3 : 10.0%us, 0.7%sy, 0.0%ni, 89.0%id, 0.1%wa, 0.0%hi, 0.2%si, 0.0%st\n> Cpu4 : 13.0%us, 0.9%sy, 0.0%ni, 85.9%id, 0.1%wa, 0.0%hi, 0.1%si, 0.0%st\n> Cpu5 : 13.5%us, 0.9%sy, 0.0%ni, 85.3%id, 0.1%wa, 0.0%hi, 0.1%si, 0.0%st\n> Cpu6 : 16.2%us, 1.1%sy, 0.0%ni, 82.2%id, 0.3%wa, 0.0%hi, 0.2%si, 0.0%st\n> Cpu7 : 34.3%us, 2.4%sy, 0.0%ni, 61.3%id, 0.1%wa, 0.4%hi, 1.5%si, 0.0%st\n>\n> single line cpu looks like this:\n>\n> Cpu(s): 6.1%us, 0.8%sy, 0.0%ni, 92.9%id, 0.0%wa, 0.0%hi, 0.1%si, 0.0%st\n>\n> a line from vmstat 30 looks like this:\n>\n> 1 0 12548 2636232 588964 27689652 0 0 0 3089 3096 4138\n> 9 2 89 0 0\n>\n> which shows us writing out at ~3M/sec. This machine, running pgbench\n> on a db twice the size of the one it currently runs on, can get\n> throughput of 30 to 50 megabytes per second. peaks at about 60,\n> random access.\n>\n> \n>> At the moment I'm using a quad-proc system with a 6 disk 1+0 RAID array and\n>> 2 separate disks for the OS and write-ahead logs.\n>> \n>\n> Run some realistic load tests and monitor the machine with vmstat and\n> top and iostat, etc... then compare those numbers to your day to day\n> numbers to get an idea how close to max performance you're running to\n> see how much headroom you have.\n>\n> \n\n", "msg_date": "Mon, 12 Jan 2009 11:07:33 +0100", "msg_from": "Stefano Nichele <[email protected]>", "msg_from_op": true, "msg_subject": "Re: db server load" } ]
[ { "msg_contents": "Hi\n\nI'm trying to compare different filesystems for postgres using pgbench. \n The problem I've seen is that my IO wait is very very low. Is there a \nway I can get pgbench to do more to push that up a little?\n\n-- \nAdrian Moisey\nActing Systems Designer | CareerJunction | Your Future Starts Here.\nWeb: www.careerjunction.co.za | Email: [email protected]\nPhone: +27 21 818 8621 | Mobile: +27 82 858 7830 | Fax: +27 21 818 8855\n", "msg_date": "Mon, 15 Dec 2008 08:46:48 +0200", "msg_from": "Adrian Moisey <[email protected]>", "msg_from_op": true, "msg_subject": "filesystems benchmark" }, { "msg_contents": ">\n> I'm trying to compare different filesystems for postgres using pgbench.\n> The problem I've seen is that my IO wait is very very low. Is there a way\n> I can get pgbench to do more to push that up a little?\n\nWhy do you expect to see high IO wait?\n\nDoes your database fit in system memory? If you do not modify the data and\nit is not large enough, it is unlikely you will hit IO wait.\n\nDo you have write cache enabled for the file system/device? Even if you have\nsome DML statements, it would not be that easy to saturate write cache\nenabled storage (especially, when it fits in memory).\n\n\nRegards,\nVladimir Sitnikov\n\n\nI'm trying to compare different filesystems for postgres using pgbench.  The problem I've seen is that my IO wait is very very low.  Is there a way I can get pgbench to do more to push that up a little?\nWhy do you expect to see high IO wait? Does your database fit in system memory? If you do not modify the data and it is not large enough, it is unlikely you will hit IO wait.\nDo you have write cache enabled for the file system/device? Even if you have some DML statements, it would not be that easy to saturate write cache enabled storage (especially, when it fits in memory).\nRegards,Vladimir Sitnikov", "msg_date": "Mon, 15 Dec 2008 15:02:39 +0300", "msg_from": "\"Vladimir Sitnikov\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: filesystems benchmark" }, { "msg_contents": "On Mon, 15 Dec 2008, Adrian Moisey wrote:\n\n> I'm trying to compare different filesystems for postgres using pgbench. The \n> problem I've seen is that my IO wait is very very low. Is there a way I can \n> get pgbench to do more to push that up a little?\n\nIncrease the database scale until the wait is what you expect. See \nhttp://www.westnet.com/~gsmith/gregsmith/content/postgresql/pgbench-scaling.htm \nfor details.\n\nThe latest filesystem performance comparison I'm aware of is at \nhttp://www.baconandtech.com/files/filesystem_io.pdf\n\nYou might also take a look at that to make sure you're not re-mapping \nterritory that's already been explored.\n\n--\n* Greg Smith [email protected] http://www.gregsmith.com Baltimore, MD\n", "msg_date": "Mon, 15 Dec 2008 12:31:51 -0500 (EST)", "msg_from": "Greg Smith <[email protected]>", "msg_from_op": false, "msg_subject": "Re: filesystems benchmark" } ]
[ { "msg_contents": "PostgreSQL 8.3.5, the system is now stable (uptime > 10 days). \nPostgreSQL stats collector uses 100% CPU forever:\n\nOn Thursday:\n\nlast pid: 29509; load averages: 2.36, 2.01, \n2.03 \nup 5+17:28:56 04:02:53\n196 processes: 3 running, 184 sleeping, 9 zombie\nCPU states: 5.3% user, 0.0% nice, 15.3% system, 0.0% interrupt, 79.4% \nidle\nMem: 1009M Active, 5995M Inact, 528M Wired, 354M Cache, 214M Buf, 12M Free\nSwap: 16G Total, 500K Used, 16G Free\n\n PID USERNAME THR PRI NICE SIZE RES STATE C TIME WCPU \nCOMMAND\n26709 pgsql 1 106 0 22400K 6832K CPU6 6 973:52 99.02% \npostgres\n\nToday:\n\nlast pid: 71326; load averages: 2.69, 3.15, \n2.92 \nup 10+20:13:44 06:47:41\n176 processes: 3 running, 166 sleeping, 7 zombie\nCPU states: % user, % nice, % system, % interrupt, % \nidle\nMem: 928M Active, 5868M Inact, 557M Wired, 380M Cache, 214M Buf, 172M Free\nSwap: 16G Total, 620K Used, 16G Free\n\n PID USERNAME THR PRI NICE SIZE RES STATE C TIME WCPU \nCOMMAND\n44689 pgsql 1 107 0 22400K 7060K CPU6 6 748:07 99.02% \npostgres\n64221 www 1 96 0 158M 27628K select 5 0:18 4.20% httpd\n68567 www 1 20 0 151M 23092K lockf 0 0:03 2.39% httpd\n\n...\n\nshopzeus# uptime\n 6:48AM up 10 days, 20:14, 1 user, load averages: 2.21, 3.01, 2.87\n\nMore than 10 hours on a dual-quad core Xeon 5420??? We have two \ndatabases, total database size is about 15GB.\n(The stats collector also uses significant disk I/O.)\n\nThursday:\n\n# date\nThu Dec 11 04:05:00 EST 2008\n# ls -l ~pgsql/data/\n# ls -l ~pgsql/data/global/pgstat.stat\n-rw------- 1 pgsql pgsql 231673 Dec 10 12:01 \n/usr/local/pgsql/data/global/pgstat.stat\n\nToday:\n\n#date\nTue Dec 16 06:48:27 EST 2008\n# cd ~pgsql/data\n# ls -l global/pgstat.stat\n-rw------- 1 pgsql pgsql 232358 Dec 15 18:45 global/pgstat.stat\n\nLooks like the pgstat.stat was not updated since the pg stats collector \n(re)started.\n\n#uname -a\nFreeBSD shopzeus.com 7.0-RELEASE-p5 FreeBSD 7.0-RELEASE-p5 #0: Mon Nov \n17 21:37:25 EST 2008 \[email protected]:/usr/obj/usr/src/sys/SHOPZEUS amd64\n\nAfter restarting the postmaster, the process disappeares for a while \n(some hours, sometimes for one day), then it start updating the stat \nfile correctly.\n\nPlease advise.\n\nThanks,\n\n Laszlo\n\n", "msg_date": "Tue, 16 Dec 2008 13:00:54 +0100", "msg_from": "Laszlo Nagy <[email protected]>", "msg_from_op": true, "msg_subject": "rebellious pg stats collector (reopened case)" }, { "msg_contents": "Laszlo Nagy wrote:\n> PostgreSQL 8.3.5, the system is now stable (uptime > 10 days). \n> PostgreSQL stats collector uses 100% CPU forever:\n\nCould you grab a few stack traces from it and post them? Also possibly\nuseful, leave strace running on the pgstat process for a while and post\nthe output somewhere.\n\n-- \nAlvaro Herrera http://www.CommandPrompt.com/\nPostgreSQL Replication, Consulting, Custom Development, 24x7 support\n", "msg_date": "Tue, 16 Dec 2008 10:45:42 -0300", "msg_from": "Alvaro Herrera <[email protected]>", "msg_from_op": false, "msg_subject": "Re: rebellious pg stats collector (reopened case)" }, { "msg_contents": "Alvaro Herrera wrote:\n> Laszlo Nagy wrote:\n> \n>> PostgreSQL 8.3.5, the system is now stable (uptime > 10 days). \n>> PostgreSQL stats collector uses 100% CPU forever:\n>> \n>\n> Could you grab a few stack traces from it and post them? Also possibly\n> useful, leave strace running on the pgstat process for a while and post\n> the output somewhere.\n> \nVery interesting results. Before I used 'truss' on the process, it was:\n\n78816 pgsql 1 106 0 22400K 7100K CPU6 6 24.2H 99.02% \npostgres\n\nAfter I started \"truss -p 78815\" the first message I got was:\n\nSIGNAL 17 (SIGSTOP)\n\nand the it waited for some seconds. Then my screen was filled with \nmessages like this:\n\npoll({7/POLLIN|POLLERR},1,2000) = 1 (0x1)\nrecvfrom(7,\"\\^A\\0\\0\\0\\M-X\\^C\\0\\0\\M^T\\f\\r\\0\\n\"...,1000,0x0,NULL,0x0) = \n984 (0x3d8)\npoll({7/POLLIN|POLLERR},1,2000) = 1 (0x1)\nrecvfrom(7,\"\\^A\\0\\0\\0\\M-X\\^C\\0\\0\\M^T\\f\\r\\0\\n\"...,1000,0x0,NULL,0x0) = \n984 (0x3d8)\npoll({7/POLLIN|POLLERR},1,2000) = 1 (0x1)\nrecvfrom(7,\"\\^A\\0\\0\\0\\M^X\\^A\\0\\0\\M^T\\f\\r\\0\"...,1000,0x0,NULL,0x0) = 408 \n(0x198)\npoll({7/POLLIN|POLLERR},1,2000) = 1 (0x1)\nrecvfrom(7,\"\\b\\0\\0\\0@\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,1000,0x0,NULL,0x0) = 64 \n(0x40)\npoll({7/POLLIN|POLLERR},1,2000) = 1 (0x1)\nrecvfrom(7,\"\\b\\0\\0\\0@\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,1000,0x0,NULL,0x0) = 64 \n(0x40)\npoll({7/POLLIN|POLLERR},1,2000) = 1 (0x1)\nrecvfrom(7,\"\\^A\\0\\0\\0\\M-X\\0\\0\\0\\M^O\\v\\b\\0\\^B\"...,1000,0x0,NULL,0x0) = \n216 (0xd8)\npoll({7/POLLIN|POLLERR},1,2000) = 1 (0x1)\nrecvfrom(7,\"\\^A\\0\\0\\0\\M-x\\^A\\0\\0\\0\\0\\0\\0\\^E\"...,1000,0x0,NULL,0x0) = 504 \n(0x1f8)\npoll({7/POLLIN|POLLERR},1,2000) = 1 (0x1)\nrecvfrom(7,\"\\^A\\0\\0\\0\\M-X\\0\\0\\0\\M^T\\f\\r\\0\\^B\"...,1000,0x0,NULL,0x0) = \n216 (0xd8)\npoll({7/POLLIN|POLLERR},1,2000) = 1 (0x1)\nrecvfrom(7,\"\\^A\\0\\0\\0\\M-x\\^A\\0\\0\\0\\0\\0\\0\\^E\"...,1000,0x0,NULL,0x0) = 504 \n(0x1f8)\npoll({7/POLLIN|POLLERR},1,2000) = 1 (0x1)\nrecvfrom(7,\"\\^A\\0\\0\\0\\M-X\\0\\0\\0\\M^T\\f\\r\\0\\^B\"...,1000,0x0,NULL,0x0) = \n216 (0xd8)\npoll({7/POLLIN|POLLERR},1,2000) = 1 (0x1)\nrecvfrom(7,\"\\^A\\0\\0\\0\\M-x\\^A\\0\\0\\0\\0\\0\\0\\^E\"...,1000,0x0,NULL,0x0) = 504 \n(0x1f8)\npoll({7/POLLIN|POLLERR},1,2000) = 1 (0x1)\nrecvfrom(7,\"\\b\\0\\0\\0@\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,1000,0x0,NULL,0x0) = 64 \n(0x40)\npoll({7/POLLIN|POLLERR},1,2000) = 1 (0x1)\n\nI terminated the debugger with Ctrl+C. Then I started it again:\n\n\n#truss -p 78815\nSIGNAL 17 (SIGSTOP)\npoll({7/POLLIN|POLLERR},1,2000) = 0 (0x0)\ngetppid(0x1,0x1,0x7d0,0x8014606cc,0xffffffff80a579c0,0x7fffffffceb8) = \n95860 (0x17674)\nopen(\"global/pgstat.tmp\",O_WRONLY|O_CREAT|O_TRUNC,0666) = 4 (0x4)\nfstat(4,{mode=-rw------- ,inode=73223196,size=0,blksize=4096}) = 0 (0x0)\nwrite(4,\"\\M^W\\M-<\\M-%\\^A\\^F\\n\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\M-Z\\^C\\0\\0\\0\\0\\0\\0\\M-d\\M^E\\M-0u\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0+\\0\\0\\0\\0\\0\\0\\0\\^N\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\^C\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0T\\^T\\^N\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\^R\\M-(\\M^[\\v\\M-^\\M-0A\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\^N\\0\\0\\0\\0\\0\\0\\0\\\\n\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0T\\M-A\\n\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0~\\M-d\\M-fp\\n\\M-^\\M-0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\^E\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"$\\0\\0\\0\\0\\0\\0+\\^V\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0Z\\M-F%\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0|\\^B\\0\\0\\0\\0\\0\\0<\\M-I\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0T\\M-f\\t\\^V\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\M-H'\\M-4\\M-N\\n\\M-^\\M-0A\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\^N\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\b\\0\\0\\0\\0\\0L\\^B\\0\\0\\0\\0\\0\\0\\M^^\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0dD\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\09\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0>\\M-h\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0T%;\\r\\0\\0\\0\\0\\0 \\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\M-I\\\\j\\n\\M-^\\M-0A\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0Y\\240\\^A\\0\\0\\0\\0\\0\\^C\\M-3\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\^N\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\^A\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0P\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\^A\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\^P\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\M-P\\^R\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,3119) = 3119 (0xc2f)\nclose(4) = 0 (0x0)\nrename(\"global/pgstat.tmp\",\"global/pgstat.stat\") = 0 (0x0)\nprocess exit, rval = 0\n\nI did NOT send the SIGSTOP signal to the process. The pgstat collector \nwas running at 100%CPU for 24 hours, when I attached the debugger, and \nattaching the debugger somehow caused the process to get a SIGSTOP, but \nthe process did not exit. When I started the debugger for the second \ntime, it got a SIGSTOP again, and this made the collector exiting the \nendless loop (instead of stopping it).\n\nWhat is the next step?\n\n Laszlo\n\n", "msg_date": "Fri, 19 Dec 2008 09:54:36 +0100", "msg_from": "Laszlo Nagy <[email protected]>", "msg_from_op": true, "msg_subject": "Re: rebellious pg stats collector (reopened case)" }, { "msg_contents": "Laszlo Nagy wrote:\n> Alvaro Herrera wrote:\n>> Laszlo Nagy wrote:\n>> \n>>> PostgreSQL 8.3.5, the system is now stable (uptime > 10 days). \n>>> PostgreSQL stats collector uses 100% CPU forever:\n>>> \n>>\n>> Could you grab a few stack traces from it and post them? Also possibly\n>> useful, leave strace running on the pgstat process for a while and post\n>> the output somewhere.\n>> \n> Very interesting results. Before I used 'truss' on the process, it was:\n>\n> 78816 pgsql 1 106 0 22400K 7100K CPU6 6 24.2H 99.02% \n> postgres\n>\n> After I started \"truss -p 78815\" the first message I got was:\n\nIt was 78816 and you traced 78815? Are you sure the process with 24h of\nCPU was pgstat?\n\n-- \nAlvaro Herrera http://www.CommandPrompt.com/\nThe PostgreSQL Company - Command Prompt, Inc.\n", "msg_date": "Fri, 19 Dec 2008 10:04:46 -0300", "msg_from": "Alvaro Herrera <[email protected]>", "msg_from_op": false, "msg_subject": "Re: rebellious pg stats collector (reopened case)" }, { "msg_contents": "\n> It was 78816 and you traced 78815? Are you sure the process with 24h of\n> CPU was pgstat?\n> \nI'm sorry that was a typo. Of course I traced the good process (proof is \nthat at the end it renamed a file to \"global/pgstat.stat\".\n\nAnd yes, \"top\" showed 24H in the TIME column and 99% in the WCPU column. \nAlso this process used 50-90% of disk I/O continuously, slowing down the \ncomputer.\n\nI'm sure that it will happen again, then I'm going to post another trace \n(and make sure that I trace the good process).\n\nBest,\n\n Laszlo\n\n\n", "msg_date": "Fri, 19 Dec 2008 14:13:38 +0100", "msg_from": "Laszlo Nagy <[email protected]>", "msg_from_op": true, "msg_subject": "Re: rebellious pg stats collector (reopened case)" }, { "msg_contents": "OK, here is the new test.\n\nshopzeus# ps l -p 39766 -p 39767 -p 39769\n UID PID PPID CPU PRI NI VSZ RSS MWCHAN STAT TT TIME COMMAND\n 70 39766 78806 0 96 0 451960 423896 select Ss ?? 8:41.85 \npostgres: writer process (postgres)\n 70 39767 78806 0 96 0 451960 7184 select Ss ?? 0:58.75 \npostgres: wal writer process (postgres)\n 70 39769 78806 0 96 0 22400 7008 select Ss ?? 14:42.39 \npostgres: stats collector process (postgres)\n\n\ntop -otime 10\n\n\nlast pid: 7984; load averages: 1.38, 1.51, \n1.49 \nup 16+18:18:22 05:02:20\n170 processes: 3 running, 161 sleeping, 1 stopped, 5 zombie\nCPU states: % user, % nice, % system, % interrupt, % \nidle\nMem: 959M Active, 5947M Inact, 562M Wired, 275M Cache, 214M Buf, 164M Free\nSwap: 16G Total, 1088K Used, 16G Free\n\n PID USERNAME THR PRI NICE SIZE RES STATE C TIME WCPU \nCOMMAND\n78806 pgsql 1 98 0 441M 6960K select 4 67:14 0.39% \npostgres\n39769 pgsql 1 96 0 22400K 7008K select 7 14:43 0.00% \npostgres\n39766 pgsql 1 96 0 441M 414M select 3 8:42 0.00% \npostgres\n10602 root 1 96 0 149M 18916K select 4 5:30 0.00% httpd\n67535 root 1 4 0 3720K 1336K kqread 0 3:54 0.00% \ndovecot\n 795 root 1 4 0 8188K 1452K kqread 4 3:21 0.00% \nmaster\n95791 shopzeus 3 116 20 59892K 17440K select 3 2:22 0.00% \npython\n32397 postfix 1 4 0 8280K 1980K kqread 6 1:23 0.00% qmgr\n67536 root 1 4 0 5996K 1448K kqread 4 1:07 0.00% \ndovecot-auth\n 525 root 1 96 0 4684K 1156K select 3 1:03 0.00% \nsyslogd\n\nIn other words, the stats collector is running since 14 hours.\n\nshopzeus# date\nMon Dec 22 05:03:24 EST 2008\nshopzeus# ls -l ~pgsql/data/global/pgstat.stat\n-rw------- 1 pgsql pgsql 232084 Dec 22 05:03 \n/usr/local/pgsql/data/global/pgstat.stat\n\nLooks like it is updating the stats file continuously. Now I try to trace:\n\n\nshopzeus# truss -p 39769\npoll({7/POLLIN|POLLERR},1,2000) = 1 (0x1)\nrecvfrom(7,\"\\^A\\0\\0\\0\\M-X\\^C\\0\\0\\M^T\\f\\r\\0\\n\"...,1000,0x0,NULL,0x0) = \n984 (0x3d8)\npoll({7/POLLIN|POLLERR},1,2000) = 1 (0x1)\nrecvfrom(7,\"\\^A\\0\\0\\0\\M-X\\^C\\0\\0\\M^T\\f\\r\\0\\n\"...,1000,0x0,NULL,0x0) = \n984 (0x3d8)\npoll({7/POLLIN|POLLERR},1,2000) = 1 (0x1)\nrecvfrom(7,\"\\^A\\0\\0\\0\\M-X\\^C\\0\\0\\M^T\\f\\r\\0\\n\"...,1000,0x0,NULL,0x0) = \n984 (0x3d8)\npoll({7/POLLIN|POLLERR},1,2000) = 1 (0x1)\nrecvfrom(7,\"\\^A\\0\\0\\0X\\^B\\0\\0\\M^T\\f\\r\\0\\^F\\0\"...,1000,0x0,NULL,0x0) = \n600 (0x258)\npoll({7/POLLIN|POLLERR},1,2000) ERR#4 'Interrupted \nsystem call'\nSIGNAL 14 (SIGALRM)\nsigreturn(0x7fffffffca70,0x0,0x7fffffffca70,0xffffffff809fb5c0,0x7fffffffffc0,0x7fffffffca60) \nERR#4 'Interrupted system call'\ngetppid(0x1,0x1,0x801901120,0x4,0x7fffffffd39f,0x6) = 11204 (0x2bc4)\nopen(\"global/pgstat.tmp\",O_WRONLY|O_CREAT|O_TRUNC,0666) = 4 (0x4)\nfstat(4,{mode=-rw------- ,inode=73223197,size=0,blksize=4096}) = 0 (0x0)\nwrite(4,\"\\M^W\\M-<\\M-%\\^A`\\^B\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"l\\0\\0\\0\\0\\0\\0\\0\\M-T\\M-s.\\M-`\\M-r\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\t\\0\\0\\0\\0\\0\\0\\0\\^C\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\^C\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0T\\^T\\^N\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\M-aKo\\M-s\\M-a\\M-0A\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\^C\\0\\0\\0\\0\\0\\0\\0\\M-&\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0T\\M-A\\n\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\M-_o\\M-T\\M-u\\M-p\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\^E\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\^A\\0\\0\\0\\0\\0\\0\\^?\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0X\\M-<\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\b\\^D@f\\0\\0\\0\\0\\0\\0\\0F \\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0T\\M-2\\f\\b\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0;\\0\\0\\0\\0\\0\\0\\0\\^R\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\n\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\r\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0T_\\n\\0\\0\\0\\0\\0\\0\\M-*R\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"r\\M^W\\M-`\\M-p\\M-a\\M-0A\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\M-a\\M-p\\M-a\\M-0A\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0c\\^Q\\^A\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\^F\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0Z\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\^D\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,4096) = 4096 (0x1000)\nwrite(4,\"\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0\"...,2708) = 2708 (0xa94)\nclose(4) = 0 (0x0)\nrename(\"global/pgstat.tmp\",\"global/pgstat.stat\") = 0 (0x0)\nprocess exit, rval = 0\n\nAnd the stats collector exited! It cannot be by accident. Last time I \nchecked, it had the same problem: tried to poll something. It was in an \ninifinte loop, polling something but always got POLERR. Then because I \nstarted to trace it, it exited with rval=0 or rval=1.\n\nI do not understand why it exited after I started to trace it? Is this \nsome kind of bug?\n\nThanks,\n\n Laszlo\n\n", "msg_date": "Mon, 22 Dec 2008 11:11:30 +0100", "msg_from": "Laszlo Nagy <[email protected]>", "msg_from_op": true, "msg_subject": "Re: rebellious pg stats collector (reopened case)" }, { "msg_contents": "Laszlo Nagy <[email protected]> writes:\n> And the stats collector exited! It cannot be by accident. Last time I \n> checked, it had the same problem: tried to poll something. It was in an \n> inifinte loop, polling something but always got POLERR. Then because I \n> started to trace it, it exited with rval=0 or rval=1.\n\n> I do not understand why it exited after I started to trace it? Is this \n> some kind of bug?\n\nAFAICS, the only ways to reach the exit(0) in PgstatCollectorMain are\n(1) need_exit becomes set (and the only way for that to happen is\nto receive a SIGQUIT signal); or (2) PostmasterIsAlive(true) fails,\nwhich implies that getppid() is returning something different than it\nused to.\n\nI wonder whether your tracing tool is affecting the result of\ngetppid(). Most people would consider that a bug in the tracing tool.\n\n\t\t\tregards, tom lane\n", "msg_date": "Mon, 22 Dec 2008 08:23:48 -0500", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: rebellious pg stats collector (reopened case) " }, { "msg_contents": "Tom Lane wrote:\n\n> I wonder whether your tracing tool is affecting the result of\n> getppid(). Most people would consider that a bug in the tracing tool.\n\nProbably having a close look at the PPID column in ps or top during the\ntruss run would prove this.\n\nStill, the actual problem being debugged is something else.\n\n-- \nAlvaro Herrera http://www.CommandPrompt.com/\nThe PostgreSQL Company - Command Prompt, Inc.\n", "msg_date": "Mon, 22 Dec 2008 10:27:21 -0300", "msg_from": "Alvaro Herrera <[email protected]>", "msg_from_op": false, "msg_subject": "Re: rebellious pg stats collector (reopened case)" }, { "msg_contents": "Alvaro Herrera <[email protected]> writes:\n> Tom Lane wrote:\n>> I wonder whether your tracing tool is affecting the result of\n>> getppid(). Most people would consider that a bug in the tracing tool.\n\n> Probably having a close look at the PPID column in ps or top during the\n> truss run would prove this.\n\nIt's at least conceivable that the result being returned inside the\nprocess is different from what the rest of the world sees. What'd prove\nthis one way or the other is to write a trivial test program along the\nlines of\n\n\twhile(1) {\n\t\tsleep(1);\n\t\tprintf(\"ppid = %d\\n\", getppid());\n\t}\n\nand see if its output changes when you start to trace it.\n\n> Still, the actual problem being debugged is something else.\n\nAgreed, but we need to understand what the tools being used to\ninvestigate the problem are doing ...\n\n\t\t\tregards, tom lane\n", "msg_date": "Mon, 22 Dec 2008 08:36:10 -0500", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: rebellious pg stats collector (reopened case) " }, { "msg_contents": "\n> and see if its output changes when you start to trace it.\n> \n%cat test.c\n#include <stdio.h>\n\nint main() {\n while(1) {\n sleep(5);\n printf(\"ppid = %d\\n\", getppid());\n }\n}\n\n%gcc -o test test.c\n%./test\nppid = 47653\nppid = 47653\nppid = 47653 # Started \"truss -p 48864\" here!\nppid = 49073\nppid = 49073\nppid = 49073\n\n> Agreed, but we need to understand what the tools being used to\n> investigate the problem are doing ...\n> \nUnfortunately, I'm not able to install strace:\n\n# pwd\n/usr/ports/devel/strace\n# make\n===> strace-4.5.7 is only for i386, while you are running amd64.\n*** Error code 1\n\nStop in /usr/ports/devel/strace.\n\nI'll happily install any trace tool, but have no clue which one would help.\n\n\n\n", "msg_date": "Mon, 22 Dec 2008 14:57:33 +0100", "msg_from": "Laszlo Nagy <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [ADMIN] rebellious pg stats collector (reopened case)" }, { "msg_contents": "\n> and see if its output changes when you start to trace it.\n> \n%cat test.c\n#include <stdio.h>\n\nint main() {\n while(1) {\n sleep(5);\n printf(\"ppid = %d\\n\", getppid());\n }\n}\n\n%gcc -o test test.c\n%./test\nppid = 47653\nppid = 47653\nppid = 47653 # Started \"truss -p 48864\" here!\nppid = 49073\nppid = 49073\nppid = 49073\n\n> Agreed, but we need to understand what the tools being used to\n> investigate the problem are doing ...\n> \nUnfortunately, I'm not able to install strace:\n\n# pwd\n/usr/ports/devel/strace\n# make\n===> strace-4.5.7 is only for i386, while you are running amd64.\n*** Error code 1\n\nStop in /usr/ports/devel/strace.\n\nI'll happily install any trace tool, but have no clue which one would help.\n\n\n\n", "msg_date": "Mon, 22 Dec 2008 14:58:59 +0100", "msg_from": "Laszlo Nagy <[email protected]>", "msg_from_op": true, "msg_subject": "Re: rebellious pg stats collector (reopened case)" }, { "msg_contents": "Posted to the wrong list by mistake. Sorry.\n", "msg_date": "Mon, 22 Dec 2008 14:59:32 +0100", "msg_from": "Laszlo Nagy <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [ADMIN] rebellious pg stats collector (reopened case)" }, { "msg_contents": "Laszlo Nagy wrote:\n\n> %gcc -o test test.c\n> %./test\n> ppid = 47653\n> ppid = 47653\n> ppid = 47653 # Started \"truss -p 48864\" here!\n> ppid = 49073\n> ppid = 49073\n> ppid = 49073\n\nI think you should report that as a bug to Sun.\n\n-- \nAlvaro Herrera http://www.CommandPrompt.com/\nThe PostgreSQL Company - Command Prompt, Inc.\n", "msg_date": "Mon, 22 Dec 2008 11:00:06 -0300", "msg_from": "Alvaro Herrera <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [ADMIN] rebellious pg stats collector (reopened case)" }, { "msg_contents": "Tom Lane wrote:\n> Alvaro Herrera <[email protected]> writes:\n> \n>> Tom Lane wrote:\n>> \n>>> I wonder whether your tracing tool is affecting the result of\n>>> getppid(). Most people would consider that a bug in the tracing tool.\n>>> \n>\n> \nI wrote to an official the FreeBSD list about this getppid() problem but \ngot no answer other than that \"this behaviour is documented\". :-(\n\nThe problem is still there:\n\n\n PID USERNAME THR PRI NICE SIZE RES STATE C TIME WCPU \nCOMMAND\n11205 pgsql 1 104 0 22400K 7112K CPU5 5 159.7H 99.02% \npostgres\n\n\n100% CPU since 159 hours! What can I do? Instead of tracing system \ncalls, is there a way to start the stats collector in debug mode? Or \nmaybe is it possible to change the source code, and disable the \"is \npostmaster alive\" check for testing?\n\nThanks\n\n", "msg_date": "Mon, 29 Dec 2008 09:06:51 +0100", "msg_from": "Laszlo Nagy <[email protected]>", "msg_from_op": true, "msg_subject": "Re: rebellious pg stats collector (reopened case)" }, { "msg_contents": "Laszlo Nagy <[email protected]> writes:\n> maybe is it possible to change the source code, and disable the \"is \n> postmaster alive\" check for testing?\n\nRather than disabling it, it'd probably be more convenient to make\nany getppid value except 1 (the init process) be treated as \"it's\nalive\". Otherwise you'll have trouble with the collector not stopping\nwhen you want it to. Look into src/backend/storage/ipc/pmsignal.c:\n\n-\t\treturn (getppid() == PostmasterPid);\n+\t\treturn (getppid() != 1);\n\n(Obviously this is a hack, but it should work on any Unixish system)\n\n\t\t\tregards, tom lane\n", "msg_date": "Mon, 29 Dec 2008 10:26:33 -0500", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: rebellious pg stats collector (reopened case) " }, { "msg_contents": "\n> alive\". Otherwise you'll have trouble with the collector not stopping\n> when you want it to. Look into src/backend/storage/ipc/pmsignal.c:\n>\n> -\t\treturn (getppid() == PostmasterPid);\n> +\t\treturn (getppid() != 1);\n>\n> (Obviously this is a hack, but it should work on any Unixish system)\n> \nThanks. I'm going to try this next weekend (when nobody works on the \ndatabase) and come back with the results.\n", "msg_date": "Tue, 30 Dec 2008 14:17:01 +0100", "msg_from": "Laszlo Nagy <[email protected]>", "msg_from_op": true, "msg_subject": "Re: rebellious pg stats collector (reopened case)" }, { "msg_contents": "Laszlo Nagy ďż˝rta:\n>\n>> alive\". Otherwise you'll have trouble with the collector not stopping\n>> when you want it to. Look into src/backend/storage/ipc/pmsignal.c:\n>>\n>> - return (getppid() == PostmasterPid);\n>> + return (getppid() != 1);\n>>\n>> (Obviously this is a hack, but it should work on any Unixish system)\n>> \nSorry for coming back so late. I changed this, and reinstalled \npostgresql but it won't start. Some messages from the log:\n\n\nJan 8 00:40:29 shopzeus postgres[80522]: [3814-1] FATAL: the database \nsystem is in recovery mode\nJan 8 00:40:30 shopzeus postgres[80559]: [3849-1] FATAL: the database \nsystem is in recovery mode\nJan 8 00:40:30 shopzeus postgres[80632]: [3917-1] FATAL: the database \nsystem is in recovery mode\nJan 8 00:40:31 shopzeus postgres[80662]: [3946-1] FATAL: the database \nsystem is in recovery mode\nJan 8 00:40:31 shopzeus postgres[80700]: [3981-1] FATAL: the database \nsystem is in recovery mode\nJan 8 00:40:31 shopzeus postgres[80734]: [4014-1] FATAL: the database \nsystem is in recovery mode\nJan 8 00:40:31 shopzeus postgres[80738]: [4017-1] FATAL: the database \nsystem is in recovery mode\nJan 8 00:40:32 shopzeus postgres[80768]: [4046-1] FATAL: the database \nsystem is in recovery mode\nJan 8 00:40:32 shopzeus postgres[80802]: [4078-1] FATAL: the database \nsystem is in recovery mode\nJan 8 00:40:32 shopzeus postgres[80806]: [4079-1] WARNING: terminating \nconnection because of crash of another server process\nJan 8 00:40:32 shopzeus postgres[80806]: [4079-2] DETAIL: The \npostmaster has commanded this server process to roll back the current \ntransaction and exit, because another server\nJan 8 00:40:32 shopzeus postgres[80806]: [4079-3] process exited \nabnormally and possibly corrupted shared memory.\nJan 8 00:40:32 shopzeus postgres[80806]: [4079-4] HINT: In a moment \nyou should be able to reconnect to the database and repeat your command.\nJan 8 00:40:32 shopzeus postgres[80835]: [4109-1] FATAL: the database \nsystem is in recovery mode\nJan 8 00:40:32 shopzeus postgres[80837]: [4110-1] FATAL: the database \nsystem is in recovery mode\nJan 8 00:40:33 shopzeus postgres[80870]: [4141-1] FATAL: the database \nsystem is in recovery mode\nJan 8 00:40:33 shopzeus postgres[80900]: [4170-1] FATAL: the database \nsystem is in recovery mode\nJan 8 00:40:34 shopzeus postgres[80938]: [4205-1] FATAL: the database \nsystem is in recovery mode\nJan 8 00:40:34 shopzeus postgres[80967]: [4233-1] FATAL: the database \nsystem is in recovery mode\n\n(I had to reinstall the original version because it will be used today.)\n\n", "msg_date": "Thu, 08 Jan 2009 06:50:25 +0100", "msg_from": "Laszlo Nagy <[email protected]>", "msg_from_op": true, "msg_subject": "Re: rebellious pg stats collector (reopened case)" } ]
[ { "msg_contents": "Hi,\n\nwe have a some bad queries (developers are working on that), some of\nthem run in 17 secs and that is the average but when analyzing logs i\nfound that from time to time some of them took upto 3 mins (the same\nquery that normally runs in 17secs).\n\nso my question is: how could i look for contention problems?\n\n-- \nAtentamente,\nJaime Casanova\nSoporte y capacitación de PostgreSQL\nAsesoría y desarrollo de sistemas\nGuayaquil - Ecuador\nCel. +59387171157\n", "msg_date": "Tue, 16 Dec 2008 14:32:01 -0500", "msg_from": "\"Jaime Casanova\" <[email protected]>", "msg_from_op": true, "msg_subject": "measure database contention" }, { "msg_contents": "Jaime Casanova wrote:\n> we have a some bad queries (developers are working on that), some of\n> them run in 17 secs and that is the average but when analyzing logs i\n> found that from time to time some of them took upto 3 mins (the same\n> query that normally runs in 17secs).\n> \n> so my question is: how could i look for contention problems?\n\nA good first step is to identify the bottleneck.\n\nFrequently, but not always, this is I/O.\nDo you see a lot of I/O wait? Are the disks busy?\n\nI don't know anything about your system, but I once experienced a\nsimilar problem with a 2.6 Linux system where things improved considerably\nafter changing the I/O-scheduler to \"elevator=deadline\".\n\nYours,\nLaurenz Albe\n", "msg_date": "Wed, 17 Dec 2008 08:34:23 +0100", "msg_from": "\"Albe Laurenz\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: measure database contention" }, { "msg_contents": "On Tue, Dec 16, 2008 at 2:32 PM, Jaime Casanova\n<[email protected]> wrote:\n> we have a some bad queries (developers are working on that), some of\n> them run in 17 secs and that is the average but when analyzing logs i\n> found that from time to time some of them took upto 3 mins (the same\n> query that normally runs in 17secs).\n>\n> so my question is: how could i look for contention problems?\n\nIs it the exact same query? Sometimes you might find that the query\nplan changes depending on the particular values you have in there; it\nis worth running \"EXPLAIN ANALYZE\" to look for such cases.\n\nYou might also want to look at pg_locks.\n\n...Robert\n", "msg_date": "Wed, 17 Dec 2008 09:18:54 -0500", "msg_from": "\"Robert Haas\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: measure database contention" }, { "msg_contents": "On Tue, Dec 16, 2008 at 2:32 PM, Jaime Casanova\n<[email protected]> wrote:\n> Hi,\n>\n> we have a some bad queries (developers are working on that), some of\n> them run in 17 secs and that is the average but when analyzing logs i\n> found that from time to time some of them took upto 3 mins (the same\n> query that normally runs in 17secs).\n>\n> so my question is: how could i look for contention problems?\n\nSometimes queries can have fluctuating plans. For example this can\nhappen if you have sorts or hashes that are very near the allowed\nlimit set in work_mem. so you want to catch it both ways via explain\nanalyze.\n\nmerlin\n", "msg_date": "Wed, 17 Dec 2008 09:49:26 -0500", "msg_from": "\"Merlin Moncure\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: measure database contention" }, { "msg_contents": "On Wed, Dec 17, 2008 at 2:34 AM, Albe Laurenz <[email protected]> wrote:\n> Jaime Casanova wrote:\n>> we have a some bad queries (developers are working on that), some of\n>> them run in 17 secs and that is the average but when analyzing logs i\n>> found that from time to time some of them took upto 3 mins (the same\n>> query that normally runs in 17secs).\n>>\n>> so my question is: how could i look for contention problems?\n>\n> A good first step is to identify the bottleneck.\n>\n> Frequently, but not always, this is I/O.\n> Do you see a lot of I/O wait? Are the disks busy?\n>\n\nthe disks are an RAID 10 with 4 sata disks of 15000rpm\nand nop iostat reports avg of 0.12 iowait\n\n> I don't know anything about your system, but I once experienced a\n> similar problem with a 2.6 Linux system where things improved considerably\n> after changing the I/O-scheduler to \"elevator=deadline\".\n>\n\ni don't understand I/O-schedulers at all... anyone knows what is the\nrecommended for postgres?\n\n-- \nAtentamente,\nJaime Casanova\nSoporte y capacitación de PostgreSQL\nAsesoría y desarrollo de sistemas\nGuayaquil - Ecuador\nCel. +59387171157\n", "msg_date": "Wed, 17 Dec 2008 10:22:04 -0500", "msg_from": "\"Jaime Casanova\" <[email protected]>", "msg_from_op": true, "msg_subject": "Re: measure database contention" }, { "msg_contents": "On Wed, Dec 17, 2008 at 9:18 AM, Robert Haas <[email protected]> wrote:\n> On Tue, Dec 16, 2008 at 2:32 PM, Jaime Casanova\n> <[email protected]> wrote:\n>> we have a some bad queries (developers are working on that), some of\n>> them run in 17 secs and that is the average but when analyzing logs i\n>> found that from time to time some of them took upto 3 mins (the same\n>> query that normally runs in 17secs).\n>>\n>> so my question is: how could i look for contention problems?\n>\n> Is it the exact same query?\n\nis the exact query... i think it will be removed later today because\nis a bad query anyway... but my fear is that something like happens\neven with good ones...\n\nmaybe chekpoints could be the problem?\ni have 8.3.5 and condigured checkpoint_timeout in 15 minutes,\nchekpoint_segments 6 and checkpoint_completion_target to 0.5\n\ni'm putting log_checkpoints to on, but should be good if there is way\nto analyze them better than looking through the log\n\n> Sometimes you might find that the query\n> plan changes depending on the particular values you have in there; it\n> is worth running \"EXPLAIN ANALYZE\" to look for such cases.\n>\n\ndon't think that could happen in this query, because there is no way\nit will choose something better than seqscan\n\n> You might also want to look at pg_locks.\n>\n\nOnly Shared ones...\n\n\nPS: more info about my system (sorry for don't giving it in the first post)\n\n2 PROCESSORS Xeon(R) CPU E5430 @ 2.66GHz with 4 cores each\n18Gb in Ram (4gb in shared_buffers, 4mb in work_mem)\nthe db size is 2gb (reported by pg_database_size)\n\nmax. concurrent connections seen until now: 256\n\n\n-- \nAtentamente,\nJaime Casanova\nSoporte y capacitación de PostgreSQL\nAsesoría y desarrollo de sistemas\nGuayaquil - Ecuador\nCel. +59387171157\n", "msg_date": "Wed, 17 Dec 2008 10:38:29 -0500", "msg_from": "\"Jaime Casanova\" <[email protected]>", "msg_from_op": true, "msg_subject": "Re: measure database contention" }, { "msg_contents": "> is the exact query... i think it will be removed later today because\n> is a bad query anyway... but my fear is that something like happens\n> even with good ones...\n>\n> maybe chekpoints could be the problem?\n> i have 8.3.5 and condigured checkpoint_timeout in 15 minutes,\n> chekpoint_segments 6 and checkpoint_completion_target to 0.5\n\nWell, it might help if you could provide the query, and the EXPLAIN output.\n\nUnless the query is updating data (rather than just retrieving it),\ncheckpoints shouldn't be involved (I think).\n\n...Robert\n", "msg_date": "Wed, 17 Dec 2008 11:56:56 -0500", "msg_from": "\"Robert Haas\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: measure database contention" }, { "msg_contents": "On Wed, Dec 17, 2008 at 11:56 AM, Robert Haas <[email protected]> wrote:\n>> is the exact query... i think it will be removed later today because\n>> is a bad query anyway... but my fear is that something like happens\n>> even with good ones...\n>>\n>> maybe chekpoints could be the problem?\n>> i have 8.3.5 and condigured checkpoint_timeout in 15 minutes,\n>> chekpoint_segments 6 and checkpoint_completion_target to 0.5\n>\n> Well, it might help if you could provide the query, and the EXPLAIN output.\n>\n\nok... remember i say it's a bad query ;)\nactually, seems there's a suitable index for that query (i guess it is\nusing it because of the order by)\n\nmic=# explain analyze\nmic-# SELECT * FROM tgen_persona ORDER BY empresa_id, persona_id ASC;\n QUERY PLAN\n---------------------------------------------------------------------------------------------------------------------------------------------\n Index Scan using pk_tgen_persona on tgen_persona (cost=0.00..8534.09\nrows=86547 width=884) (actual time=0.096..129.980 rows=86596 loops=1)\n Total runtime: 175.952 ms\n(2 rows)\n\nas you see, explain analyze says it will execute in 175.952ms and\nbecause of network transfer of data executing this from pgadmin in\nanother machine it runs for 17s... but from time to time pgFouine is\nshown upto 345.11 sec\n\n-- \nAtentamente,\nJaime Casanova\nSoporte y capacitación de PostgreSQL\nAsesoría y desarrollo de sistemas\nGuayaquil - Ecuador\nCel. +59387171157\n", "msg_date": "Wed, 17 Dec 2008 13:19:36 -0500", "msg_from": "\"Jaime Casanova\" <[email protected]>", "msg_from_op": true, "msg_subject": "Re: measure database contention" }, { "msg_contents": "On Wed, Dec 17, 2008 at 11:19 AM, Jaime Casanova\n<[email protected]> wrote:\n> On Wed, Dec 17, 2008 at 11:56 AM, Robert Haas <[email protected]> wrote:\n>>> is the exact query... i think it will be removed later today because\n>>> is a bad query anyway... but my fear is that something like happens\n>>> even with good ones...\n>>>\n>>> maybe chekpoints could be the problem?\n>>> i have 8.3.5 and condigured checkpoint_timeout in 15 minutes,\n>>> chekpoint_segments 6 and checkpoint_completion_target to 0.5\n>>\n>> Well, it might help if you could provide the query, and the EXPLAIN output.\n>>\n>\n> ok... remember i say it's a bad query ;)\n> actually, seems there's a suitable index for that query (i guess it is\n> using it because of the order by)\n>\n> mic=# explain analyze\n> mic-# SELECT * FROM tgen_persona ORDER BY empresa_id, persona_id ASC;\n> QUERY PLAN\n> ---------------------------------------------------------------------------------------------------------------------------------------------\n> Index Scan using pk_tgen_persona on tgen_persona (cost=0.00..8534.09\n> rows=86547 width=884) (actual time=0.096..129.980 rows=86596 loops=1)\n> Total runtime: 175.952 ms\n> (2 rows)\n>\n> as you see, explain analyze says it will execute in 175.952ms and\n> because of network transfer of data executing this from pgadmin in\n> another machine it runs for 17s... but from time to time pgFouine is\n> shown upto 345.11 sec\n\nI know it's a bad query but did you try clustering on that index?\nThen a seq scan followed by a sort would likely be cheaper and faster.\n 85k rows aren't that many really.\n", "msg_date": "Wed, 17 Dec 2008 11:57:33 -0700", "msg_from": "\"Scott Marlowe\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: measure database contention" }, { "msg_contents": "> as you see, explain analyze says it will execute in 175.952ms and\n> because of network transfer of data executing this from pgadmin in\n> another machine it runs for 17s... but from time to time pgFouine is\n> shown upto 345.11 sec\n\nWell, 86000 rows is not enough to give PostgreSQL a headache, even on\nmediocre hardware. So I think that most likely culprit is the\napplication that is reading the data - pgadmin or pgFouine.\n\n...Robert\n", "msg_date": "Wed, 17 Dec 2008 16:14:52 -0500", "msg_from": "\"Robert Haas\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: measure database contention" } ]
[ { "msg_contents": "Hi,\n\nWe are trying to implement slony as a replication tool for one of our\ndatabase. The Insert and updates have increased by approximately double\nmaking some of our important script slow.\n\nThe database in concern is a warehouse and we have added additional primary\nkey to support slony by using default vale as SERIAL for the primary key.\n\nCan you any one let us know what we can do to speed up the queries and if it\nis a good idea to use Slony for db warehouse?\n\nLet me know if you need any further informatiom.\n\nRegards,\nNimesh.\n\nHi,\n \nWe are trying to implement slony as a replication tool for one of our database. The Insert and updates have increased by approximately double making some of our important script slow. \n \nThe database in concern is a warehouse and we have added additional primary key to support slony by using default vale as SERIAL for the primary key.\n \nCan you any one let us know what we can do to speed up the queries and if it is a good idea to use Slony for db warehouse?\n \nLet me know if you need any further informatiom.\n \nRegards,\nNimesh.", "msg_date": "Wed, 17 Dec 2008 09:33:21 +0530", "msg_from": "\"Nimesh Satam\" <[email protected]>", "msg_from_op": true, "msg_subject": "insert and Update slow after implementing slony." }, { "msg_contents": "On Tue, Dec 16, 2008 at 8:03 PM, Nimesh Satam <[email protected]> wrote:\n> We are trying to implement slony as a replication tool for one of our\n> database. The Insert and updates have increased by approximately double\n> making some of our important script slow.\n\nWhat version of PostgreSQL are you running and on what type of hardware?\n\nI suspect that moving pg_log onto a separate spindle and/or upgrading\nyour RAID controller to something with a BBU and configured in\nwrite-back mode would get most of your performance back.\n\nIf you aren't running PostgreSQL 8.3, that might also help update\nperformance significantly as well.\n\n-Dave\n", "msg_date": "Wed, 17 Dec 2008 11:59:14 -0800", "msg_from": "\"David Rees\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: insert and Update slow after implementing slony." } ]
[ { "msg_contents": "Hi all,\n\nSo after a long hiatus after running this OLTP workload at the OSDL,\nmany of you know the community has had some equipment donated by HP: a\nDL380 G5 and an MSA70 disk array. We are currently using the hardware\nto do some tuning exercises to show the effects of various GUC\nparameters. I wanted to share what I've started with for input for\nwhat is realistic to tune an OLTP database on a single large LUN. The\ninitial goal is to show how much can (or can't) be tuned on an OLTP\ntype workload with just database and kernel parameters before\nphysically partitioning the database. I hope this is actually a\nuseful exercise (it was certainly helped get the kit updated a little\nbit.)\n\nTo recap, dbt2 is a fair-use derivative of the TPC-C benchmark. We\nare using a 1000 warehouse database, which amounts to about 100GB of\nraw text data. The DL380 G5 is an 8 core Xeon E5405 with 32GB of\nmemory. The MSA70 is a 25-disk 15,000 RPM SAS array, currently\nconfigured as a 25-disk RAID-0 array. More specific hardware details\ncan be found here:\n\nhttp://wiki.postgresql.org/wiki/HP_ProLiant_DL380_G5_Tuning_Guide#Hardware_Details\n\nSo first task is to show the confidence of the results, here are a\nlink to a few repeated runs using all default GUC values except the\nnumber of connections (250):\n\nhttp://pugs.postgresql.org/node/502\n\nHere are links to how the throughput changes when increasing shared_buffers:\n\nhttp://pugs.postgresql.org/node/505\n\nAnd another series of tests to show how throughput changes when\ncheckpoint_segments are increased:\n\nhttp://pugs.postgresql.org/node/503\n\nThe links go to a graphical summary and raw data. Note that the\nmaximum theoretical throughput at this scale factor is approximately\n12000 notpm.\n\nMy first glance takes tells me that the system performance is quite\nerratic when increasing the shared_buffers. I'm also not what to\ngather from increasing the checkpoint_segments. Is it simply that the\nmore checkpoint segments you have, the more time the database spends\nfsyncing when at a checkpoint?\n\nMoving forward, what other parameters (or combinations of) do people\nfeel would be valuable to illustrate with this workload?\n\nRegards,\nMark\n", "msg_date": "Sat, 20 Dec 2008 16:54:39 -0800", "msg_from": "\"Mark Wong\" <[email protected]>", "msg_from_op": true, "msg_subject": "dbt-2 tuning results with postgresql-8.3.5" }, { "msg_contents": "\"Mark Wong\" <[email protected]> writes:\n\n> To recap, dbt2 is a fair-use derivative of the TPC-C benchmark. We\n> are using a 1000 warehouse database, which amounts to about 100GB of\n> raw text data. \n\nReally? Do you get conforming results with 1,000 warehouses? What's the 95th\npercentile response time?\n\n-- \n Gregory Stark\n EnterpriseDB http://www.enterprisedb.com\n Ask me about EnterpriseDB's On-Demand Production Tuning\n", "msg_date": "Sun, 21 Dec 2008 01:33:02 +0000", "msg_from": "Gregory Stark <[email protected]>", "msg_from_op": false, "msg_subject": "Re: dbt-2 tuning results with postgresql-8.3.5" }, { "msg_contents": "\nOn Dec 20, 2008, at 5:33 PM, Gregory Stark wrote:\n\n> \"Mark Wong\" <[email protected]> writes:\n>\n>> To recap, dbt2 is a fair-use derivative of the TPC-C benchmark. We\n>> are using a 1000 warehouse database, which amounts to about 100GB of\n>> raw text data.\n>\n> Really? Do you get conforming results with 1,000 warehouses? What's \n> the 95th\n> percentile response time?\n\nNo, the results are not conforming. You and others have pointed that \nout already. The 95th percentile response time are calculated on each \npage of the previous links.\n\nI find your questions a little odd for the input I'm asking for. Are \nyou under the impression we are trying to publish benchmarking \nresults? Perhaps this is a simple misunderstanding?\n\nRegards,\nMark\n", "msg_date": "Sun, 21 Dec 2008 01:29:46 -0800", "msg_from": "Mark Wong <[email protected]>", "msg_from_op": false, "msg_subject": "Re: dbt-2 tuning results with postgresql-8.3.5" }, { "msg_contents": "Mark Wong <[email protected]> writes:\n\n> On Dec 20, 2008, at 5:33 PM, Gregory Stark wrote:\n>\n>> \"Mark Wong\" <[email protected]> writes:\n>>\n>>> To recap, dbt2 is a fair-use derivative of the TPC-C benchmark. We\n>>> are using a 1000 warehouse database, which amounts to about 100GB of\n>>> raw text data.\n>>\n>> Really? Do you get conforming results with 1,000 warehouses? What's the 95th\n>> percentile response time?\n>\n> No, the results are not conforming. You and others have pointed that out\n> already. The 95th percentile response time are calculated on each page of the\n> previous links.\n\nWhere exactly? Maybe I'm blind but I don't see them.\n\n>\n> I find your questions a little odd for the input I'm asking for. Are you\n> under the impression we are trying to publish benchmarking results? Perhaps\n> this is a simple misunderstanding?\n\nHm, perhaps. The \"conventional\" way to run TPC-C is to run it with larger and\nlarger scale factors until you find out the largest scale factor you can get a\nconformant result at. In other words the scale factor is an output, not an\ninput variable.\n\nYou're using TPC-C just as an example workload and looking to see how to\nmaximize the TPM for a given scale factor. I guess there's nothing wrong with\nthat as long as everyone realizes it's not a TPC-C benchmark.\n\nExcept that if the 95th percentile response times are well above a second I\nhave to wonder whether the situation reflects an actual production OLTP system\nwell. It implies there are so many concurrent sessions that any given query is\nbeing context switched out for seconds at a time.\n\nI have to imagine that a real production system would consider the system\noverloaded as soon as queries start taking significantly longer than they take\non an unloaded system. People monitor the service wait times and queue depths\nfor i/o systems closely and having several seconds of wait time is a highly\nabnormal situation.\n\nI'm not sure how bad that is for the benchmarks. The only effect that comes to\nmind is that it might exaggerate the effects of some i/o intensive operations\nthat under normal conditions might not cause any noticeable impact like wal\nlog file switches or even checkpoints.\n\nIf you have a good i/o controller it might confuse your results a bit when\nyou're comparing random and sequential i/o because the controller might be\nable to sort requests by physical position better than in a typical oltp\nenvironment where the wait queues are too short to effectively do that.\n\n-- \n Gregory Stark\n EnterpriseDB http://www.enterprisedb.com\n Ask me about EnterpriseDB's Slony Replication support!\n", "msg_date": "Mon, 22 Dec 2008 06:56:24 +0000", "msg_from": "Gregory Stark <[email protected]>", "msg_from_op": false, "msg_subject": "Re: dbt-2 tuning results with postgresql-8.3.5" }, { "msg_contents": "On Sun, Dec 21, 2008 at 10:56 PM, Gregory Stark <[email protected]> wrote:\n> Mark Wong <[email protected]> writes:\n>\n>> On Dec 20, 2008, at 5:33 PM, Gregory Stark wrote:\n>>\n>>> \"Mark Wong\" <[email protected]> writes:\n>>>\n>>>> To recap, dbt2 is a fair-use derivative of the TPC-C benchmark. We\n>>>> are using a 1000 warehouse database, which amounts to about 100GB of\n>>>> raw text data.\n>>>\n>>> Really? Do you get conforming results with 1,000 warehouses? What's the 95th\n>>> percentile response time?\n>>\n>> No, the results are not conforming. You and others have pointed that out\n>> already. The 95th percentile response time are calculated on each page of the\n>> previous links.\n>\n> Where exactly? Maybe I'm blind but I don't see them.\n\nHere's an example:\n\nhttp://207.173.203.223/~markwkm/community6/dbt2/baseline.1000.1/report/\n\nThe links on the blog entries should be pointing to their respective\nreports. I spot checked a few and it seems I got some right. I\nprobably didn't make it clear you needed to click on the results to\nsee the reports.\n\n>> I find your questions a little odd for the input I'm asking for. Are you\n>> under the impression we are trying to publish benchmarking results? Perhaps\n>> this is a simple misunderstanding?\n>\n> Hm, perhaps. The \"conventional\" way to run TPC-C is to run it with larger and\n> larger scale factors until you find out the largest scale factor you can get a\n> conformant result at. In other words the scale factor is an output, not an\n> input variable.\n>\n> You're using TPC-C just as an example workload and looking to see how to\n> maximize the TPM for a given scale factor. I guess there's nothing wrong with\n> that as long as everyone realizes it's not a TPC-C benchmark.\n\nPerhaps, but we're not trying to run a TPC-C benchmark. We're trying\nto illustrate how performance changes with an understood OLTP\nworkload. The purpose is to show how the system bahaves more so than\nwhat the maximum transactions are. We try to advertise the kit the\nand work for self learning, we never try to pass dbt-2 off as a\nbenchmarking kit.\n\n> Except that if the 95th percentile response times are well above a second I\n> have to wonder whether the situation reflects an actual production OLTP system\n> well. It implies there are so many concurrent sessions that any given query is\n> being context switched out for seconds at a time.\n>\n> I have to imagine that a real production system would consider the system\n> overloaded as soon as queries start taking significantly longer than they take\n> on an unloaded system. People monitor the service wait times and queue depths\n> for i/o systems closely and having several seconds of wait time is a highly\n> abnormal situation.\n\nWe attempt to illustrate the response times on the reports. For\nexample, there is a histogram (drawn as a scatter plot) illustrating\nthe number of transactions vs. the response time for each transaction.\n This is for the New Order transaction:\n\nhttp://207.173.203.223/~markwkm/community6/dbt2/baseline.1000.1/report/dist_n.png\n\nWe also plot the response time for a transaction vs the elapsed time\n(also as a scatter plot). Again, this is for the New Order\ntransaction:\n\nhttp://207.173.203.223/~markwkm/community6/dbt2/baseline.1000.1/report/rt_n.png\n\n> I'm not sure how bad that is for the benchmarks. The only effect that comes to\n> mind is that it might exaggerate the effects of some i/o intensive operations\n> that under normal conditions might not cause any noticeable impact like wal\n> log file switches or even checkpoints.\n\nI'm not sure I'm following. Is this something than can be shown by\nany stats collection or profiling? This vaguely reminds me of the the\nsignificant spikes in system time (and dips everywhere else) when the\noperating system is fsyncing during a checkpoint that we've always\nobserved when running this in the past.\n\n> If you have a good i/o controller it might confuse your results a bit when\n> you're comparing random and sequential i/o because the controller might be\n> able to sort requests by physical position better than in a typical oltp\n> environment where the wait queues are too short to effectively do that.\n\nThanks for the input.\n\nRegards,\nMark\n", "msg_date": "Mon, 22 Dec 2008 00:50:20 -0800", "msg_from": "\"Mark Wong\" <[email protected]>", "msg_from_op": true, "msg_subject": "Re: dbt-2 tuning results with postgresql-8.3.5" }, { "msg_contents": "On Sat, 20 Dec 2008, Mark Wong wrote:\n\n> Here are links to how the throughput changes when increasing \n> shared_buffers: http://pugs.postgresql.org/node/505 My first glance \n> takes tells me that the system performance is quite erratic when \n> increasing the shared_buffers.\n\nIf you smooth that curve out a bit, you have to throw out the 22528MB \nfigure as meaningless--particularly since it's way too close to the cliff \nwhere performance dives hard. The sweet spot looks to me like 11264MB to \n17408MB. I'd say 14336MB is the best performing setting that's in the \nmiddle of a stable area.\n\n> And another series of tests to show how throughput changes when \n> checkpoint_segments are increased: http://pugs.postgresql.org/node/503 \n> I'm also not what to gather from increasing the checkpoint_segments.\n\nWhat was shared_buffers set to here? Those two settings are not \ncompletely independent, for example at a tiny buffer size it's not as \nobvious there's a win in spreading the checkpoints out more. It's \nactually a 3-D graph, with shared_buffers and checkpoint_segments as two \naxes and the throughput as the Z value.\n\nSince that's quite time consuming to map out in its entirety, the way I'd \nsuggest navigating the territory more efficiently is to ignore the \ndefaults altogether. Start with a configuration that someone familiar \nwith tuning the database would pick for this hardware: 8192MB for \nshared_buffers and 100 checkpoint segments would be a reasonable base \npoint. Run the same tests you did here, but with the value you're not \nchanging set to those much larger values rather than the database \ndefaults, and then I think you'd end with something more interesting. \nAlso, I think the checkpoint_segments values >500 are a bit much, given \nwhat level of recovery time would come with a crash at that setting. \nSmaller steps from a smaller range would be better there I think.\n\n--\n* Greg Smith [email protected] http://www.gregsmith.com Baltimore, MD\n", "msg_date": "Mon, 22 Dec 2008 03:59:42 -0500 (EST)", "msg_from": "Greg Smith <[email protected]>", "msg_from_op": false, "msg_subject": "Re: dbt-2 tuning results with postgresql-8.3.5" }, { "msg_contents": "\"Mark Wong\" <[email protected]> writes:\n\n>> I'm not sure how bad that is for the benchmarks. The only effect that comes to\n>> mind is that it might exaggerate the effects of some i/o intensive operations\n>> that under normal conditions might not cause any noticeable impact like wal\n>> log file switches or even checkpoints.\n>\n> I'm not sure I'm following. \n\nAll I'm saying is that the performance characteristics won't be the same when\nthe service wait times are 1-10 seconds rather than the 20-30ms at which alarm\nbells would start to ring on a real production system.\n\nI'm not exactly sure what changes it might make though.\n\n-- \n Gregory Stark\n EnterpriseDB http://www.enterprisedb.com\n Ask me about EnterpriseDB's RemoteDBA services!\n", "msg_date": "Mon, 22 Dec 2008 10:35:35 +0000", "msg_from": "Gregory Stark <[email protected]>", "msg_from_op": false, "msg_subject": "Re: dbt-2 tuning results with postgresql-8.3.5" }, { "msg_contents": "\"Mark Wong\" <[email protected]> writes:\n\n> Thanks for the input.\n\nIn a more constructive vein:\n\n1) autovacuum doesn't seem to be properly tracked. It looks like you're just\n tracking the autovacuum process and not the actual vacuum subprocesses\n which it spawns.\n\n2) The response time graphs would be more informative if you excluded the\n ramp-up portion of the test. As it is there are big spikes at the low end\n but it's not clear whether they're really part of the curve or due to\n ramp-up. This is especially visible in the stock-level graph where it\n throws off the whole y scale.\n\n-- \n Gregory Stark\n EnterpriseDB http://www.enterprisedb.com\n Ask me about EnterpriseDB's On-Demand Production Tuning\n", "msg_date": "Mon, 22 Dec 2008 10:56:50 +0000", "msg_from": "Gregory Stark <[email protected]>", "msg_from_op": false, "msg_subject": "Re: dbt-2 tuning results with postgresql-8.3.5" }, { "msg_contents": ">>> \"Mark Wong\" <[email protected]> wrote: \n \n> The DL380 G5 is an 8 core Xeon E5405 with 32GB of\n> memory. The MSA70 is a 25-disk 15,000 RPM SAS array, currently\n> configured as a 25-disk RAID-0 array.\n \n> number of connections (250):\n \n> Moving forward, what other parameters (or combinations of) do people\n> feel would be valuable to illustrate with this workload?\n \nTo configure PostgreSQL for OLTP on that hardware, I would strongly\nrecommend the use of a connection pool which queues requests above\nsome limit on concurrent queries. My guess is that you'll see best\nresults with a limit somewhere aound 40, based on my tests indicating\nthat performance drops off above (cpucount * 2) + spindlecount.\n \nI wouldn't consider tests of the other parameters as being very useful\nbefore tuning this. This is more or less equivalent to the \"engines\"\nconfiguration in Sybase, for example.\n \n-Kevin\n", "msg_date": "Mon, 22 Dec 2008 09:27:29 -0600", "msg_from": "\"Kevin Grittner\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: dbt-2 tuning results with postgresql-8.3.5" }, { "msg_contents": "On Mon, Dec 22, 2008 at 12:59 AM, Greg Smith <[email protected]> wrote:\n> On Sat, 20 Dec 2008, Mark Wong wrote:\n>\n>> Here are links to how the throughput changes when increasing\n>> shared_buffers: http://pugs.postgresql.org/node/505 My first glance takes\n>> tells me that the system performance is quite erratic when increasing the\n>> shared_buffers.\n>\n> If you smooth that curve out a bit, you have to throw out the 22528MB figure\n> as meaningless--particularly since it's way too close to the cliff where\n> performance dives hard. The sweet spot looks to me like 11264MB to 17408MB.\n> I'd say 14336MB is the best performing setting that's in the middle of a\n> stable area.\n>\n>> And another series of tests to show how throughput changes when\n>> checkpoint_segments are increased: http://pugs.postgresql.org/node/503 I'm\n>> also not what to gather from increasing the checkpoint_segments.\n>\n> What was shared_buffers set to here? Those two settings are not completely\n> independent, for example at a tiny buffer size it's not as obvious there's a\n> win in spreading the checkpoints out more. It's actually a 3-D graph, with\n> shared_buffers and checkpoint_segments as two axes and the throughput as the\n> Z value.\n\nThe shared_buffers are the default, 24MB. The database parameters are\nsaved, probably unclearly, here's an example link:\n\nhttp://207.173.203.223/~markwkm/community6/dbt2/baseline.1000.1/db/param.out\n\n> Since that's quite time consuming to map out in its entirety, the way I'd\n> suggest navigating the territory more efficiently is to ignore the defaults\n> altogether. Start with a configuration that someone familiar with tuning\n> the database would pick for this hardware: 8192MB for shared_buffers and\n> 100 checkpoint segments would be a reasonable base point. Run the same\n> tests you did here, but with the value you're not changing set to those much\n> larger values rather than the database defaults, and then I think you'd end\n> with something more interesting. Also, I think the checkpoint_segments\n> values >500 are a bit much, given what level of recovery time would come\n> with a crash at that setting. Smaller steps from a smaller range would be\n> better there I think.\n\nI should probably run your pgtune script, huh?\n\nRegards,\nMark\n", "msg_date": "Mon, 22 Dec 2008 19:30:05 -0800", "msg_from": "\"Mark Wong\" <[email protected]>", "msg_from_op": true, "msg_subject": "Re: dbt-2 tuning results with postgresql-8.3.5" }, { "msg_contents": "On Mon, Dec 22, 2008 at 2:56 AM, Gregory Stark <[email protected]> wrote:\n> \"Mark Wong\" <[email protected]> writes:\n>\n>> Thanks for the input.\n>\n> In a more constructive vein:\n>\n> 1) autovacuum doesn't seem to be properly tracked. It looks like you're just\n> tracking the autovacuum process and not the actual vacuum subprocesses\n> which it spawns.\n\nHrm, tracking just the launcher process certainly doesn't help. Are\nthe spawned processed short lived? I take a snapshot of\n/proc/<pid>/io data every 60 seconds. The only thing I see named\nautovacuum is the launcher process. Or perhaps I can't read? Here is\nthe raw data of the /proc/<pid>/io captures:\n\nhttp://207.173.203.223/~markwkm/community6/dbt2/baseline.1000.1/db/iopp.out\n\n> 2) The response time graphs would be more informative if you excluded the\n> ramp-up portion of the test. As it is there are big spikes at the low end\n> but it's not clear whether they're really part of the curve or due to\n> ramp-up. This is especially visible in the stock-level graph where it\n> throws off the whole y scale.\n\nOk, we'll take note and see what we can do.\n\nRegards,\nMark\n", "msg_date": "Mon, 22 Dec 2008 19:36:05 -0800", "msg_from": "\"Mark Wong\" <[email protected]>", "msg_from_op": true, "msg_subject": "Re: dbt-2 tuning results with postgresql-8.3.5" }, { "msg_contents": "On Mon, Dec 22, 2008 at 7:27 AM, Kevin Grittner\n<[email protected]> wrote:\n>>>> \"Mark Wong\" <[email protected]> wrote:\n>\n>> The DL380 G5 is an 8 core Xeon E5405 with 32GB of\n>> memory. The MSA70 is a 25-disk 15,000 RPM SAS array, currently\n>> configured as a 25-disk RAID-0 array.\n>\n>> number of connections (250):\n>\n>> Moving forward, what other parameters (or combinations of) do people\n>> feel would be valuable to illustrate with this workload?\n>\n> To configure PostgreSQL for OLTP on that hardware, I would strongly\n> recommend the use of a connection pool which queues requests above\n> some limit on concurrent queries. My guess is that you'll see best\n> results with a limit somewhere aound 40, based on my tests indicating\n> that performance drops off above (cpucount * 2) + spindlecount.\n\nYeah, we are using a homegrown connection concentrator as part of the\ntest kit, but it's not very intelligent.\n\n> I wouldn't consider tests of the other parameters as being very useful\n> before tuning this. This is more or less equivalent to the \"engines\"\n> configuration in Sybase, for example.\n\nRight, I have the database configured for 250 connections but I'm\nusing 200 of them. I'm pretty sure for this scale factor 200 is more\nthan enough. Nevertheless I should go through the exercise.\n\nRegards,\nMark\n", "msg_date": "Mon, 22 Dec 2008 19:48:57 -0800", "msg_from": "\"Mark Wong\" <[email protected]>", "msg_from_op": true, "msg_subject": "Re: dbt-2 tuning results with postgresql-8.3.5" }, { "msg_contents": "On Mon, 22 Dec 2008, Mark Wong wrote:\n\n> The shared_buffers are the default, 24MB. The database parameters are\n> saved, probably unclearly, here's an example link:\n>\n> http://207.173.203.223/~markwkm/community6/dbt2/baseline.1000.1/db/param.out\n\nThat's a bit painful to slog through to find what was changed from the \ndefaults. How about saving the output from this query instead, or in \naddition to the version sorted by name:\n\nselect name,setting,source,short_desc from pg_settings order by \nsource,name;\n\nMakes it easier to ignore everything that isn't set.\n\n> I should probably run your pgtune script, huh?\n\nThat's basically where the suggestions for center points I made came from. \nThe only other thing that does that might be interesting to examine is \nthat it bumps up checkpoint_completion_target to 0.9 once you've got a \nlarge number of checkpoint_segments.\n\n--\n* Greg Smith [email protected] http://www.gregsmith.com Baltimore, MD\n", "msg_date": "Mon, 22 Dec 2008 22:51:08 -0500 (EST)", "msg_from": "Greg Smith <[email protected]>", "msg_from_op": false, "msg_subject": "Re: dbt-2 tuning results with postgresql-8.3.5" }, { "msg_contents": "Mark Wong escribi�:\n\n> Hrm, tracking just the launcher process certainly doesn't help. Are\n> the spawned processed short lived? I take a snapshot of\n> /proc/<pid>/io data every 60 seconds.\n\nThe worker processes can be short-lived, but if they are, obviously they\nare not vacuuming the large tables. If you want to track all autovacuum\nactions, change autovacuum_log_min_messages to 0.\n\n-- \nAlvaro Herrera http://www.CommandPrompt.com/\nPostgreSQL Replication, Consulting, Custom Development, 24x7 support\n", "msg_date": "Tue, 23 Dec 2008 09:01:06 -0300", "msg_from": "Alvaro Herrera <[email protected]>", "msg_from_op": false, "msg_subject": "Re: dbt-2 tuning results with postgresql-8.3.5" }, { "msg_contents": "Hi Mark,\n\nGood to see you producing results again.\n\n\nOn Sat, 2008-12-20 at 16:54 -0800, Mark Wong wrote:\n> Here are links to how the throughput changes when increasing shared_buffers:\n> \n> http://pugs.postgresql.org/node/505\n\nOnly starnge thing here is the result at 22528MB. It's the only normal\none there. Seems to be a freeze occurring on most tests around the 30\nminute mark, which delays many backends and reduces writes. \n\nReduction in performance as shared_buffers increases looks normal.\n\nIncrease wal_buffers, but look for something else as well. Try to get a\nbacktrace from when the lock up happens. It may not be Postgres?\n\n> And another series of tests to show how throughput changes when\n> checkpoint_segments are increased:\n> \n> http://pugs.postgresql.org/node/503\n> \n> The links go to a graphical summary and raw data. Note that the\n> maximum theoretical throughput at this scale factor is approximately\n> 12000 notpm.\n> \n> My first glance takes tells me that the system performance is quite\n> erratic when increasing the shared_buffers. I'm also not what to\n> gather from increasing the checkpoint_segments. Is it simply that the\n> more checkpoint segments you have, the more time the database spends\n> fsyncing when at a checkpoint?\n\nI would ignore the checkpoint_segment tests because you aren't using a\nrealistic value of shared_buffers. I doubt any such effect is noticeable\nwhen you use a realistic value determined from set of tests 505.\n\n-- \n Simon Riggs www.2ndQuadrant.com\n PostgreSQL Training, Services and Support\n\n", "msg_date": "Wed, 24 Dec 2008 14:14:06 +0000", "msg_from": "Simon Riggs <[email protected]>", "msg_from_op": false, "msg_subject": "Re: dbt-2 tuning results with postgresql-8.3.5" }, { "msg_contents": "On Mon, Dec 22, 2008 at 7:27 AM, Kevin Grittner\n<[email protected]> wrote:\n>>>> \"Mark Wong\" <[email protected]> wrote:\n>\n>> The DL380 G5 is an 8 core Xeon E5405 with 32GB of\n>> memory. The MSA70 is a 25-disk 15,000 RPM SAS array, currently\n>> configured as a 25-disk RAID-0 array.\n>\n>> number of connections (250):\n>\n>> Moving forward, what other parameters (or combinations of) do people\n>> feel would be valuable to illustrate with this workload?\n>\n> To configure PostgreSQL for OLTP on that hardware, I would strongly\n> recommend the use of a connection pool which queues requests above\n> some limit on concurrent queries. My guess is that you'll see best\n> results with a limit somewhere aound 40, based on my tests indicating\n> that performance drops off above (cpucount * 2) + spindlecount.\n\nIt appears to peak around 220 database connections:\n\nhttp://pugs.postgresql.org/node/514\n\nOf course the system still isn't really tuned all that much... I\nwouldn't be surprised if the workload peaked at a different number of\nconnections as it is tuned more.\n\n> I wouldn't consider tests of the other parameters as being very useful\n> before tuning this. This is more or less equivalent to the \"engines\"\n> configuration in Sybase, for example.\n\nRegards,\nMark\n", "msg_date": "Mon, 12 Jan 2009 21:42:15 -0800", "msg_from": "\"Mark Wong\" <[email protected]>", "msg_from_op": true, "msg_subject": "Re: dbt-2 tuning results with postgresql-8.3.5" }, { "msg_contents": ">>> \"Mark Wong\" <[email protected]> wrote: \n \n> It appears to peak around 220 database connections:\n> \n> http://pugs.postgresql.org/node/514\n \nInteresting. What did you use for connection pooling?\n \nMy tests have never stayed that flat as the connections in use\nclimbed. I'm curious why we're seeing such different results.\n \n-Kevin\n", "msg_date": "Tue, 13 Jan 2009 09:40:35 -0600", "msg_from": "\"Kevin Grittner\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: dbt-2 tuning results with postgresql-8.3.5" }, { "msg_contents": "On Tue, Jan 13, 2009 at 7:40 AM, Kevin Grittner\n<[email protected]> wrote:\n>>>> \"Mark Wong\" <[email protected]> wrote:\n>\n>> It appears to peak around 220 database connections:\n>>\n>> http://pugs.postgresql.org/node/514\n>\n> Interesting. What did you use for connection pooling?\n\nIt's a fairly dumb but custom built C program for the test kit:\n\nhttp://git.postgresql.org/?p=~markwkm/dbt2.git;a=summary\n\nI think the bulk of the logic is in src/client.c, src/db_threadpool.c,\nand src/transaction_queue.c.\n\n> My tests have never stayed that flat as the connections in use\n> climbed. I'm curious why we're seeing such different results.\n\nI'm sure the difference in workloads makes a difference. Like you\nimplied earlier, I think we have to figure out what works best in for\nour own workloads.\n\nRegards,\nMark\n", "msg_date": "Tue, 13 Jan 2009 19:49:25 -0800", "msg_from": "\"Mark Wong\" <[email protected]>", "msg_from_op": true, "msg_subject": "Re: dbt-2 tuning results with postgresql-8.3.5" }, { "msg_contents": "On Mon, Dec 22, 2008 at 12:59 AM, Greg Smith <[email protected]> wrote:\n> On Sat, 20 Dec 2008, Mark Wong wrote:\n>\n>> Here are links to how the throughput changes when increasing\n>> shared_buffers: http://pugs.postgresql.org/node/505 My first glance takes\n>> tells me that the system performance is quite erratic when increasing the\n>> shared_buffers.\n>\n> If you smooth that curve out a bit, you have to throw out the 22528MB figure\n> as meaningless--particularly since it's way too close to the cliff where\n> performance dives hard. The sweet spot looks to me like 11264MB to 17408MB.\n> I'd say 14336MB is the best performing setting that's in the middle of a\n> stable area.\n>\n>> And another series of tests to show how throughput changes when\n>> checkpoint_segments are increased: http://pugs.postgresql.org/node/503 I'm\n>> also not what to gather from increasing the checkpoint_segments.\n>\n> What was shared_buffers set to here? Those two settings are not completely\n> independent, for example at a tiny buffer size it's not as obvious there's a\n> win in spreading the checkpoints out more. It's actually a 3-D graph, with\n> shared_buffers and checkpoint_segments as two axes and the throughput as the\n> Z value.\n>\n> Since that's quite time consuming to map out in its entirety, the way I'd\n> suggest navigating the territory more efficiently is to ignore the defaults\n> altogether. Start with a configuration that someone familiar with tuning\n> the database would pick for this hardware: 8192MB for shared_buffers and\n> 100 checkpoint segments would be a reasonable base point. Run the same\n> tests you did here, but with the value you're not changing set to those much\n> larger values rather than the database defaults, and then I think you'd end\n> with something more interesting. Also, I think the checkpoint_segments\n> values >500 are a bit much, given what level of recovery time would come\n> with a crash at that setting. Smaller steps from a smaller range would be\n> better there I think.\n\nSorry for the long delay. I have a trio of results (that I actually\nran about four weeks ago) setting the shared_buffers to 7680MB (I\ndon't know remember why it wasn't set to 8192MB :( ) and\ncheckpoint_segments to 100:\n\nhttp://pugs.postgresql.org/node/517\n\nI'm also capturing the PostgreSQL parameters as suggested so we can\nsee what's set in the config file, default, command line etc. It's\nthe \"Settings\" link in the \"System Summary\" section on the report web\npage.\n\nSo about a 7% change for this particular workload:\n\nhttp://pugs.postgresql.org/node/502\n\nWe're re-running some filesystem tests for an upcoming conference, so\nwe'll get back to it shortly...\n\nRegards,\nMark\n", "msg_date": "Thu, 22 Jan 2009 17:44:47 -0800", "msg_from": "Mark Wong <[email protected]>", "msg_from_op": false, "msg_subject": "Re: dbt-2 tuning results with postgresql-8.3.5" }, { "msg_contents": "On Thu, 22 Jan 2009, Mark Wong wrote:\n\n> I'm also capturing the PostgreSQL parameters as suggested so we can\n> see what's set in the config file, default, command line etc. It's\n> the \"Settings\" link in the \"System Summary\" section on the report web\n> page.\n\nThose look good, much easier to pick out the stuff that's been customized. \nI note that the Linux \"Settings\" links seems to be broken though.\n\nTo recap a summary here, what you had before were:\n\nshared_buffers=24MB checkpoint_segments=100 notpm=7527\nshared_buffers=8192MB checkpoint_segments=3 notpm=7996\n\nAnd the new spots show:\nshared_buffers=7680MB checkpoint_segments=100 notpm=9178\n\nWhat's neat about your graphs now is that I think you can see the \ncheckpoints happening in the response time graphs. For example, if you \nlook at \nhttp://207.173.203.223/~markwkm/community6/dbt2/pgtune.1000.100.1/report/rt_d.png \nand you focus on what happens just before each 10 minute mark, I'm \nguessing that response time spike is the fsync phase at the end of the \ncheckpoint. That's followed by a period where response time is really \nfast. That's because those writes are all pooling into the now cleared \nout Linux buffer cache, but pdflush isn't really being aggressive about \nwriting them out yet. On your server that can absorb quite a few writes \nbefore clients start blocking on them, which is when response time climbs \nback up.\n\nA particularly interesting bit is to compare against the result with the \npeak notpm you had in your earlier tests, where shared_buffers=15360MB: \nhttp://207.173.203.223/~markwkm/community6/dbt2/shared_buffers/shared_buffers.15360MB/report/rt_d.png\n\nWhile the average speed was faster on that one, the worst-case response \ntime was much worse. You can really see this by comparing the response \ntime distribution.\n\nBig shared buffers but low checkpoint_segments:\nhttp://207.173.203.223/~markwkm/community6/dbt2/shared_buffers/shared_buffers.15360MB/report/dist_d.png\n\nMedium shared buffers and medium checkpoint_segments:\nhttp://207.173.203.223/~markwkm/community6/dbt2/pgtune.1000.100.1/report/dist_d.png\n\nThe checkpoint spreading logic is making a lot more transactions suffer \nmoderate write delays in order to get a big improvement in worst-case \nbehavior.\n\nThe next fine-tuning bit I'd normally apply in this situation is to see if \nincreasing checkpoint_completion_target from the default (0.5) to 0.9 does \nanything to flatten out that response time graph. I've seen a modest \nincrease in wal_buffers (from the default to, say, 1MB) help smooth out \nthe rough spots too.\n\n--\n* Greg Smith [email protected] http://www.gregsmith.com Baltimore, MD\n", "msg_date": "Thu, 22 Jan 2009 22:44:51 -0500 (EST)", "msg_from": "Greg Smith <[email protected]>", "msg_from_op": false, "msg_subject": "Re: dbt-2 tuning results with postgresql-8.3.5" }, { "msg_contents": "On Thu, Jan 22, 2009 at 7:44 PM, Greg Smith <[email protected]> wrote:\n> On Thu, 22 Jan 2009, Mark Wong wrote:\n>\n>> I'm also capturing the PostgreSQL parameters as suggested so we can\n>> see what's set in the config file, default, command line etc. It's\n>> the \"Settings\" link in the \"System Summary\" section on the report web\n>> page.\n>\n> Those look good, much easier to pick out the stuff that's been customized. I\n> note that the Linux \"Settings\" links seems to be broken though.\n\nOh fudge, I think I see where my scripts are broken. We're running\nwith a different Linux kernel now than before so I don't want to grab\nthe parameters yet. I'll switch to the previous kernel to get the\nparameters after the current testing is done, and fix the scripts in\nthe meantime.\n\nRegards,\nMark\n", "msg_date": "Thu, 22 Jan 2009 22:10:15 -0800", "msg_from": "Mark Wong <[email protected]>", "msg_from_op": false, "msg_subject": "Re: dbt-2 tuning results with postgresql-8.3.5" }, { "msg_contents": "On Thu, Jan 22, 2009 at 10:10 PM, Mark Wong <[email protected]> wrote:\n> On Thu, Jan 22, 2009 at 7:44 PM, Greg Smith <[email protected]> wrote:\n>> On Thu, 22 Jan 2009, Mark Wong wrote:\n>>\n>>> I'm also capturing the PostgreSQL parameters as suggested so we can\n>>> see what's set in the config file, default, command line etc. It's\n>>> the \"Settings\" link in the \"System Summary\" section on the report web\n>>> page.\n>>\n>> Those look good, much easier to pick out the stuff that's been customized. I\n>> note that the Linux \"Settings\" links seems to be broken though.\n>\n> Oh fudge, I think I see where my scripts are broken. We're running\n> with a different Linux kernel now than before so I don't want to grab\n> the parameters yet. I'll switch to the previous kernel to get the\n> parameters after the current testing is done, and fix the scripts in\n> the meantime.\n\nSorry for the continuing delays. I have to make more time to spend on\nthis part. One of the problems is that my scripts are listing the OS\nof the main driver system, as opposed to the db system. Mean while,\nI've attached the sysctl output from kernel running on the database\nsystem, 2.6.27-gentoo-r2..\n\nRegards,\nMark", "msg_date": "Thu, 5 Feb 2009 21:04:46 -0800", "msg_from": "Mark Wong <[email protected]>", "msg_from_op": false, "msg_subject": "Re: dbt-2 tuning results with postgresql-8.3.5" }, { "msg_contents": "On Thu, 5 Feb 2009, Mark Wong wrote:\n\n> One of the problems is that my scripts are listing the OS of the main \n> driver system, as opposed to the db system.\n\nThat's not a fun problem to deal with. Last time I ran into it, I ended \nup writing a little PL/PerlU function that gathered all the kernel-level \ninfo I wanted, and then just pulled the info over by running that on the \ndriver system.\n\n--\n* Greg Smith [email protected] http://www.gregsmith.com Baltimore, MD\n", "msg_date": "Fri, 6 Feb 2009 03:22:00 -0500 (EST)", "msg_from": "Greg Smith <[email protected]>", "msg_from_op": false, "msg_subject": "Re: dbt-2 tuning results with postgresql-8.3.5" }, { "msg_contents": "On Thu, Jan 22, 2009 at 7:44 PM, Greg Smith <[email protected]> wrote:\n> The next fine-tuning bit I'd normally apply in this situation is to see if\n> increasing checkpoint_completion_target from the default (0.5) to 0.9 does\n> anything to flatten out that response time graph. I've seen a modest\n> increase in wal_buffers (from the default to, say, 1MB) help smooth out the\n> rough spots too.\n\nHi all,\n\nAfter yet another delay, I have .6 to .9 (I forgot .5. :():\n\nhttp://pugs.postgresql.org/node/526\n\nI don't think the effects of the checkpoint_completion_target are\nsignificant, and I sort of feel it's because the entire database is on\na single device. I've started doing some runs with the database log\non a separate device, so I'll be trying some of these parameters\nagain.\n\nRegards,\nMark\n", "msg_date": "Sun, 22 Feb 2009 15:03:28 -0800", "msg_from": "Mark Wong <[email protected]>", "msg_from_op": false, "msg_subject": "Re: dbt-2 tuning results with postgresql-8.3.5" } ]
[ { "msg_contents": "Hi,\n\nI am looking for some recent and hopefully genuine comparisons between\nOracle and PostgreSQL regarding their performance in large scale\napplications. Tests from real world applications would be preferable\nbut not required. Also differentiations in different areas (i.e.\ndifferent data types, query structures, clusters, hardware, etc.)\nmight be helpful as well.\n\nI don't trust the results that Google gives me.\n\nRegards,\nVictor Nawothnig\n", "msg_date": "Sun, 21 Dec 2008 09:17:46 +0100", "msg_from": "\"Victor Nawothnig\" <[email protected]>", "msg_from_op": true, "msg_subject": "PostgreSQL vs Oracle" }, { "msg_contents": "On Sun, Dec 21, 2008 at 1:17 AM, Victor Nawothnig\n<[email protected]> wrote:\n> Hi,\n>\n> I am looking for some recent and hopefully genuine comparisons between\n> Oracle and PostgreSQL regarding their performance in large scale\n> applications. Tests from real world applications would be preferable\n> but not required. Also differentiations in different areas (i.e.\n> different data types, query structures, clusters, hardware, etc.)\n> might be helpful as well.\n\nDue to the terms of the license for Oracle, no one can publish\nbenchmarks without their permission.\n\nHaving used both Oracle 9 and pgsql from 6.5 to 8.3, I can say that\nPostgreSQL is competitive for most small to medium loads I've thrown\nat it, and given the high cost of licensing for oracle, you can throw\na LOT of hardware at PostgreSQL to catch up the last 10 or 20%\nslowdown you might see in some apps.\n\nMost of Oracle's advantages are in the more advanced features like\npartitioning and reporting functions.\n\nThat said, I find PostgreSQL takes a LOT less administration to keep\nit happy. Oracle doesn't just allow a wider range of performance\ntuning, it demands it. If you don't demonstrably need Oracle's\nadvanced features then PostgreSQL is usually a better choice.\n\nLast place we worked we developed on pgsql and migrated to oracle in\nproduction (this was in the 7.4 era, when Oracle 9 was noticeably\nfaster and better than pgsql for transactional loads.) It was very\neasy to write for pgsql and migrate to oracle as most SQL queries\ndidn't need any serious changes from one db to the other.\n\nPostgreSQL generally tries to follow the SQL specs a little closer,\noracle has more crufty legacy stuff in it.\n\nSo, when you say large scale applications, are you talking OLTP or\nOLAP type workloads? My experience has been that very few OLTP apps\nget very large, as they get partitioned before they get over a few\ndozen gigabytes. OLAP, OTOH, often run into hundreds of Gigs or\nterabytes. I've found pgsql competitive in both really.\n", "msg_date": "Sun, 21 Dec 2008 01:51:19 -0700", "msg_from": "\"Scott Marlowe\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: PostgreSQL vs Oracle" }, { "msg_contents": "One year ago a Postgres teacher pointed me there:\n\nhttp://it.toolbox.com/blogs/database-soup/postgresql-publishes-first-real-benchmark-17470\nhttp://www.spec.org/jAppServer2004/results/res2007q3/jAppServer2004-20070606-00065.html\n\nthat would be just like what you're looking for.\n\nRegards,\nStefano\n\nOn Sun, Dec 21, 2008 at 9:17 AM, Victor Nawothnig\n<[email protected]> wrote:\n> Hi,\n>\n> I am looking for some recent and hopefully genuine comparisons between\n> Oracle and PostgreSQL regarding their performance in large scale\n> applications. Tests from real world applications would be preferable\n> but not required. Also differentiations in different areas (i.e.\n> different data types, query structures, clusters, hardware, etc.)\n> might be helpful as well.\n>\n> I don't trust the results that Google gives me.\n>\n> Regards,\n> Victor Nawothnig\n>\n> --\n> Sent via pgsql-performance mailing list ([email protected])\n> To make changes to your subscription:\n> http://www.postgresql.org/mailpref/pgsql-performance\n>\n", "msg_date": "Sun, 21 Dec 2008 11:15:12 +0100", "msg_from": "\"Stefano Dal Pra\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: PostgreSQL vs Oracle" }, { "msg_contents": "Victor Nawothnig wrote:\n> Hi,\n> \n> I am looking for some recent and hopefully genuine comparisons between\n> Oracle and PostgreSQL regarding their performance in large scale\n> applications. Tests from real world applications would be preferable\n> but not required. Also differentiations in different areas (i.e.\n> different data types, query structures, clusters, hardware, etc.)\n> might be helpful as well.\n\nVictor, Oracle expressly forbids, in their license agreement, anyone \nfrom publishing performance comparisons between Oracle and any other \nproduct. So you will rarely find anyone willing to publicly provide you \nany performance numbers.\n\nDifference in data structures, etc, are fairly easy to determine. \nAnyone can read the Oracle documentation.\n\n-- \nGuy Rouillier\n", "msg_date": "Sun, 21 Dec 2008 22:31:33 -0500", "msg_from": "Guy Rouillier <[email protected]>", "msg_from_op": false, "msg_subject": "Re: PostgreSQL vs Oracle" } ]
[ { "msg_contents": "Here is a query on a partitioned schema that produces a very bad query plan. The tables are fully vacuumed, analyzed with stats target 40, and no bloat (created with pure inserts, no updates or deletes).\n\nI already know of at least three bugs with the query planner and partitions listed at the end of this message.\n\nBelow are explain analyze examples on just one partition. Expanding the date range to be multiple partitions only makes this worse.\n\nFirst, the query written to go against the direct partitions, which is fast and does the right plan:\n(although, wouldn't it use less memory if it hashed the small table, and scanned the larger one against that rather than vice-versa? Or am I reading the explain wrong?)\n\nrr=> explain analyze select pl.id, sl.terms from p_pp_logs_026_2008_12_17 pl, p_s_logs_026_2008_12_17 sl where sl.session = pl.session and sl.terms <> 'null' and sl.terms is not null; QUERY PLAN\n----------------------------------------------------------------------------------------------------------------------------------------------------\n Hash Join (cost=31470.89..37199.26 rows=38597 width=27) (actual time=743.782..772.705 rows=29907 loops=1)\n Hash Cond: ((pl.session)::text = (sl.session)::text)\n -> Seq Scan on pp_logs_026_2008_12_17 pl (cost=0.00..357.49 rows=8449 width=46) (actual time=0.007..3.064 rows=8449 loops=1)\n -> Hash (cost=24822.35..24822.35 rows=531883 width=57) (actual time=743.597..743.597 rows=531453 loops=1)\n -> Seq Scan on s_logs_026_2008_12_17 sl (cost=0.00..24822.35 rows=531883 width=57) (actual time=0.011..392.242 rows=532067 loops=1)\n Filter: ((terms IS NOT NULL) AND ((terms)::text <> 'null'::text))\n Total runtime: 779.431 ms\n\nIn the above, the estimated and actual rows are about right, only a bit off after the join.\n\nWe are partitioned by s_id and date, the general form of the query used for scanning more than one combination of these values fails.\nIn the below, which restricts the data to the same tables, the planner is extremely wrong (plus the parent tables which have one irrelevant row of dummy data so that analyze works on them):\n\nexplain analyze select pl.id, sl.terms from pp_logs pl, s_logs sl where pl.s_id = 26 and sl.s_id = 26 and sl.session = pl.session and sl.terms <> 'null' and sl.terms is not null and pl.date = '2008-12-17' and sl.date = '2008-12-17';\n QUERY PLAN\n---------------------------------------------------------------------------------------------------------------------------------------------------------------\n Merge Join (cost=79036.43..416160.16 rows=22472099 width=27) (actual time=7089.961..7839.665 rows=29907 loops=1)\n Merge Cond: ((pl.session)::text = (sl.session)::text)\n -> Sort (cost=968.98..990.10 rows=8450 width=46) (actual time=193.778..196.690 rows=8449 loops=1)\n Sort Key: pl.session\n Sort Method: quicksort Memory: 1043kB\n -> Append (cost=0.00..417.84 rows=8450 width=46) (actual time=36.824..133.157 rows=8449 loops=1)\n -> Seq Scan on pp_logs pl (cost=0.00..18.10 rows=1 width=21) (actual time=0.056..0.056 rows=0 loops=1)\n Filter: ((s_id = 26) AND (date = '2008-12-17'::date))\n -> Seq Scan on pp_logs_026_2008_12_17 pl (cost=0.00..399.74 rows=8449 width=46) (actual time=36.766..129.317 rows=8449 loops=1)\n Filter: ((s_id = 26) AND (date = '2008-12-17'::date))\n -> Sort (cost=78067.45..79397.16 rows=531884 width=57) (actual time=6895.648..7094.701 rows=552412 loops=1)\n Sort Key: sl.session\n Sort Method: quicksort Memory: 92276kB\n -> Append (cost=0.00..27483.28 rows=531884 width=57) (actual time=33.759..1146.936 rows=532067 loops=1)\n -> Seq Scan on s_logs sl (cost=0.00..1.19 rows=1 width=22) (actual time=0.036..0.036 rows=0 loops=1)\n Filter: ((terms IS NOT NULL) AND ((terms)::text <> 'null'::text) AND (s_id = 26) AND (date = '2008-12-17'::date))\n -> Seq Scan on s_logs_026_2008_12_17 sl (cost=0.00..27482.09 rows=531883 width=57) (actual time=33.721..903.607 rows=532067 loops=1)\n Filter: ((terms IS NOT NULL) AND ((terms)::text <> 'null'::text) AND (s_id = 26) AND (date = '2008-12-17'::date))\n Total runtime: 7861.723 ms\n\nSome partitions have about 10x the data as the above, resulting in ~30x longer times to do the sort, but only 10x to hash. The sort also uses far, far more memory than the hash should if it were to hash the small one and hash-join the large table against that.\n\nAs you can see in the above example, this inner join, which the planner knows has two arms of row count ~530K and ~8.5K, is predicted to have 22M output rows. What?\nDid it loose its ability to even roughly know n_distinct? Can it use n_distinct for each table and combine them for a (weighted) worst case estimate and arrive at something less bizarre?\nIf I scan across more tables, the actual output grows roughly linearly, but the estimate looks like its A*ln(A*B) where A is the size of the larger arm and B is the size of the smaller one. Having an inner join result in more rows than one of the arms is difficult with n_distinct numbers like these as evidenced by the direct to table query estimate.\nPerhaps the parent table (with one row) is getting in the way?\n\nAny suggestions on how to get a good plan when accessing this via the parent table to scan multiple dates?\n\n\nKnown (to me) partition bugs related to the query planner / stats:\n* Row width estimates for scans across multiple tables are wrong (just max(each table width) rather than a weighted average - this could have been fixed in 8.3.4+ without me noticing though).\n* Column selectivity not combined rationally for aggregate statistics on multiple table scans in an arm. Example, two partitions may have an int column that has only 30 distinct values over 2M rows in each partition. The planner will decide that the selectivity is 10% rather than the worst case (30+30)/(total rows) or best case (30/(total rows)). There are many variations on this theme not just the above type of error.\n* An empty partition (usually the parent table) will propagate default statistics through the errors above leading to even more strange query plans. Empty table default statistics are generally poor, but worse in the cases above since they override good stats. Furthermore, ANALYZE won't write stats for an empty table so unless you put dummy rows in a table, you are guaranteed to get bad plans.\n\nI have put a single dummy row in each parent table with minimum column width on text columns and analyzed it to help out on the above. All partitions are fully vacuumed with no bloat and fully analyzed. After a day, they become read only.\n\nThe only time I wish I have hints is when I'm working on partitioned tables, most other times minor query modifications plus good statistics get a good plan (but perhaps a less natural query syntax). The planner is not adequate on partitioned tables.\n\n\n\nQuery planner plus partitions equals very bad plans, again\n\n\nHere is a query on a partitioned schema that produces a very bad query plan.  The tables are fully vacuumed, analyzed with stats target 40, and no bloat (created with pure inserts, no updates or deletes).\n\nI already know of at least three bugs with the query planner and partitions listed at the end of this message.\n\nBelow are explain analyze examples on just one partition.  Expanding the date range to be multiple partitions only makes this worse.  \n\nFirst, the query written to go against the direct partitions, which is fast and does the right plan:\n(although, wouldn’t it use less memory if it hashed the small table, and scanned the larger one against that rather than vice-versa?  Or am I reading the explain wrong?)\n\nrr=> explain analyze select pl.id, sl.terms from p_pp_logs_026_2008_12_17 pl, p_s_logs_026_2008_12_17 sl  where sl.session = pl.session and  sl.terms <> 'null' and sl.terms is not null;                                                                     QUERY PLAN                                                                     \n----------------------------------------------------------------------------------------------------------------------------------------------------\n Hash Join  (cost=31470.89..37199.26 rows=38597 width=27) (actual time=743.782..772.705 rows=29907 loops=1)\n   Hash Cond: ((pl.session)::text = (sl.session)::text)\n   ->  Seq Scan on pp_logs_026_2008_12_17 pl  (cost=0.00..357.49 rows=8449 width=46) (actual time=0.007..3.064 rows=8449 loops=1)\n   ->  Hash  (cost=24822.35..24822.35 rows=531883 width=57) (actual time=743.597..743.597 rows=531453 loops=1)\n         ->  Seq Scan on s_logs_026_2008_12_17 sl  (cost=0.00..24822.35 rows=531883 width=57) (actual time=0.011..392.242 rows=532067 loops=1)\n               Filter: ((terms IS NOT NULL) AND ((terms)::text <> 'null'::text))\n Total runtime: 779.431 ms\n\nIn the above, the estimated and actual rows are about right, only a bit off after the join.\n\nWe are partitioned by s_id and date, the general form of the query used for scanning more than one combination of these values fails.\nIn the below, which restricts the data to the same tables, the planner is extremely wrong (plus the parent tables which have one irrelevant row of dummy data so that analyze works on them):\n\nexplain analyze select pl.id, sl.terms from pp_logs pl, s_logs sl  where pl.s_id = 26 and sl.s_id = 26 and sl.session = pl.session and  sl.terms <> 'null' and sl.terms is not null  and pl.date = '2008-12-17' and sl.date = '2008-12-17';\n                                                                          QUERY PLAN                                                                           \n---------------------------------------------------------------------------------------------------------------------------------------------------------------\n Merge Join  (cost=79036.43..416160.16 rows=22472099 width=27) (actual time=7089.961..7839.665 rows=29907 loops=1)\n   Merge Cond: ((pl.session)::text = (sl.session)::text)\n   ->  Sort  (cost=968.98..990.10 rows=8450 width=46) (actual time=193.778..196.690 rows=8449 loops=1)\n         Sort Key: pl.session\n         Sort Method:  quicksort  Memory: 1043kB\n         ->  Append  (cost=0.00..417.84 rows=8450 width=46) (actual time=36.824..133.157 rows=8449 loops=1)\n               ->  Seq Scan on pp_logs pl  (cost=0.00..18.10 rows=1 width=21) (actual time=0.056..0.056 rows=0 loops=1)\n                     Filter: ((s_id = 26) AND (date = '2008-12-17'::date))\n               ->  Seq Scan on pp_logs_026_2008_12_17 pl  (cost=0.00..399.74 rows=8449 width=46) (actual time=36.766..129.317 rows=8449 loops=1)\n                     Filter: ((s_id = 26) AND (date = '2008-12-17'::date))\n   ->  Sort  (cost=78067.45..79397.16 rows=531884 width=57) (actual time=6895.648..7094.701 rows=552412 loops=1)\n         Sort Key: sl.session\n         Sort Method:  quicksort  Memory: 92276kB\n         ->  Append  (cost=0.00..27483.28 rows=531884 width=57) (actual time=33.759..1146.936 rows=532067 loops=1)\n               ->  Seq Scan on s_logs sl  (cost=0.00..1.19 rows=1 width=22) (actual time=0.036..0.036 rows=0 loops=1)\n                     Filter: ((terms IS NOT NULL) AND ((terms)::text <> 'null'::text) AND (s_id = 26) AND (date = '2008-12-17'::date))\n               ->  Seq Scan on s_logs_026_2008_12_17 sl  (cost=0.00..27482.09 rows=531883 width=57) (actual time=33.721..903.607 rows=532067 loops=1)\n                     Filter: ((terms IS NOT NULL) AND ((terms)::text <> 'null'::text) AND (s_id = 26) AND (date = '2008-12-17'::date))\n Total runtime: 7861.723 ms\n\nSome partitions have about 10x the data as the above, resulting in ~30x longer times to do the sort, but only 10x to hash.  The sort also uses far, far more memory than the hash should if it were to hash the small one and hash-join the large table against that.\n\nAs you can see in the above example, this inner join, which the planner knows has two arms of row count ~530K and ~8.5K, is predicted to have 22M output rows.  What?  \nDid it loose its ability to even roughly know n_distinct?  Can it use n_distinct for each table and combine them for a (weighted) worst case estimate and arrive at something less bizarre?\nIf I scan across more tables, the actual output grows roughly linearly, but the estimate looks like its A*ln(A*B) where A is the size of the larger arm and B is the size of the smaller one. Having an inner join result in more rows than one of the arms is difficult with n_distinct numbers like these as evidenced by the direct to table query estimate.\nPerhaps the parent table (with one row) is getting in the way?\n\nAny suggestions on how to get a good plan when accessing this via the parent table to scan multiple dates? \n \n\nKnown (to me) partition bugs related to the query planner / stats:  \n* Row width estimates for scans across multiple tables are wrong (just max(each table width) rather than a weighted average — this could have been fixed in 8.3.4+ without me noticing though).\n* Column selectivity not combined rationally for aggregate statistics on multiple table scans in an arm.  Example, two partitions may have an int column that has only 30 distinct values over 2M rows in each partition.  The planner will decide that the selectivity is 10% rather than the worst case (30+30)/(total rows) or best case (30/(total rows)).   There are many variations on this theme not just the above type of error.\n* An empty partition (usually the parent table) will propagate default statistics through the errors above leading to even more strange query plans.  Empty table default statistics are generally poor, but worse in the cases above since they override good stats.  Furthermore, ANALYZE won’t write stats for an empty table so unless you put dummy rows in a table, you are guaranteed to get bad plans.\n\nI have put a single dummy row in each parent table with minimum column width on text columns and analyzed it to help out on the above.  All partitions are fully vacuumed with no bloat and fully analyzed.  After a day, they become read only.\n\nThe only time I wish I have hints is when I’m working on partitioned tables, most other times minor query modifications plus good statistics get a good plan (but perhaps a less natural query syntax).  The planner is not adequate on partitioned tables.", "msg_date": "Sun, 21 Dec 2008 12:08:36 -0800", "msg_from": "Scott Carey <[email protected]>", "msg_from_op": true, "msg_subject": "Query planner plus partitions equals very bad plans, again" } ]
[ { "msg_contents": "SQL:\n\nupdate product set sz_category_id=null where am_style_kw1 is not null \nand sz_category_id is not null\n\nquery plan:\n\n\"Seq Scan on product (cost=0.00..647053.30 rows=580224 width=1609)\"\n\" Filter: ((am_style_kw1 IS NOT NULL) AND (sz_category_id IS NOT NULL))\"\n\nInformation on the table:\n\nrow count ~ 2 million\ntable size: 4841 MB\ntoast table size: 277mb\nindexes size: 4434 MB\n\nComputer: FreeBSD 7.0 stable, Dual Xeon Quad code 5420 2.5GHZ, 8GB \nmemory, 6 ES SATA disks in hw RAID 6 (+2GB write back cache) for the \ndatabase.\n\nAutovacuum is enabled. We also perform \"vacuum analyze\" on the database, \neach day.\n\nHere are some non-default values from postgresql.conf:\n\nshared_buffers=400MB\nmaintenance_work_mem = 256MB\nmax_fsm_pages = 1000000\n\nThere was almost no load on the machine (CPU: mostly idle, IO: approx. \n5% total) when we started this update.\n\nMaybe I'm wrong with this, but here is a quick calculation: the RAID \narray should do at least 100MB/sec. Reading the whole table should not \ntake more than 1 min. I think about 20% of the rows should have been \nupdated. Writting out all changes should not take too much time. I \nbelieve that this update should have been completed within 2-3 minutes.\n\nIn reality, after 2600 seconds I have cancelled the query. We monitored \ndisk I/O and it was near 100% all the time.\n\nWhat is wrong?\n\nThank you,\n\n Laszlo\n\n", "msg_date": "Mon, 22 Dec 2008 12:06:02 +0100", "msg_from": "Laszlo Nagy <[email protected]>", "msg_from_op": true, "msg_subject": "Slow table update" }, { "msg_contents": "Laszlo Nagy wrote:\n> SQL:\n>\n> update product set sz_category_id=null where am_style_kw1 is not null \n> and sz_category_id is not null\nHmm, this query:\n\nselect count(*) from product where am_style_kw1 is not null and \nsz_category_id is not null and sz_category_id<>4809\n\nopens in 10 seconds. The update would not finish in 2600 seconds. I \ndon't understand.\n\nL\n\n", "msg_date": "Mon, 22 Dec 2008 12:34:32 +0100", "msg_from": "Laszlo Nagy <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Slow table update" }, { "msg_contents": "Laszlo Nagy wrote:\n\n> \n> Laszlo Nagy wrote:\n> > SQL:\n> >\n> > update product set sz_category_id=null where am_style_kw1 is not null \n> > and sz_category_id is not null\n> Hmm, this query:\n> \n> ?select count(*) from product where am_style_kw1 is not null and \n> sz_category_id is not null and sz_category_id<>4809\n> \n> opens in 10 seconds. The update would not finish in 2600 seconds. I \n> don't understand.\n\nIf the table has some sort of FK relations it might be being slowed by the need to check a row meant to be deleted has any children.\n\nPerhaps triggers ? \n\nIf the table is very bloated with lots of dead rows (but you did say you vacuum frequently and check the results to make sure they are effective?) that would slow it down.\n\nA long running transaction elsewhere that is blocking the delete ? Did you check the locks ?\n\nHTH,\n\nGreg Williamson\nSenior DBA\nDigitalGlobe\n\nConfidentiality Notice: This e-mail message, including any attachments, is for the sole use of the intended recipient(s) and may contain confidential and privileged information and must be protected in accordance with those provisions. Any unauthorized review, use, disclosure or distribution is prohibited. If you are not the intended recipient, please contact the sender by reply e-mail and destroy all copies of the original message.\n\n(My corporate masters made me say this.)\n\n\n\n\n\n\n\nRE: [PERFORM] Slow table update\n\n\n\nLaszlo Nagy wrote:\n\n>\n> Laszlo Nagy wrote:\n> > SQL:\n> >\n> > update product set sz_category_id=null where am_style_kw1 is not null\n> > and sz_category_id is not null\n> Hmm, this query:\n>\n> ?select count(*) from product where am_style_kw1 is not null and\n> sz_category_id is not null and sz_category_id<>4809\n>\n> opens in 10 seconds. The update would not finish in 2600 seconds. I\n> don't understand.\n\nIf the table has some sort of FK relations it might be being slowed by the need to check a row meant to be deleted has any children.\n\nPerhaps triggers ?\n\nIf the table is very bloated with lots of dead rows (but you did say you vacuum frequently and check the results to make sure they are effective?) that would slow it down.\n\nA long running transaction elsewhere that is blocking the delete ? Did you check the locks ?\n\nHTH,\n\nGreg Williamson\nSenior DBA\nDigitalGlobe\n\nConfidentiality Notice: This e-mail message, including any attachments, is for the sole use of the intended recipient(s) and may contain confidential and privileged information and must be protected in accordance with those provisions. Any unauthorized review, use, disclosure or distribution is prohibited. If you are not the intended recipient, please contact the sender by reply e-mail and destroy all copies of the original message.\n\n(My corporate masters made me say this.)", "msg_date": "Mon, 22 Dec 2008 04:45:11 -0700", "msg_from": "\"Gregory Williamson\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Slow table update" }, { "msg_contents": "\n>\n> If the table has some sort of FK relations it might be being slowed by \n> the need to check a row meant to be deleted has any children.\n>\nIf you look at my SQL, there is only one column to be updated. That \ncolumn has no foreign key constraint. (It should have, but we did not \nwant to add that constraint in order to speed up updates.)\n>\n>\n> Perhaps triggers ?\n>\nTable \"product\" has no triggers.\n>\n>\n> If the table is very bloated with lots of dead rows (but you did say \n> you vacuum frequently and check the results to make sure they are \n> effective?) that would slow it down.\n>\nI'm not sure how to check if the vacuum was effective. But we have \nmax_fsm_pages=1000000 in postgresql.conf, and I do not get any errors \nfrom the daily vacuum script, so I presume that the table hasn't got too \nmany dead rows.\n\nAnyway, the table size is only 4GB. Even if half of the rows are dead, \nthe update should run quite quickly. Another argument is that when I \n\"select count(*)\" instead of \"UPDATE\", then I get the result in 10 \nseconds. I don't think that dead rows can make such a big difference \nbetween reading and writing.\n\nMy other idea was that there are so many indexes on this table, maybe \nthe update is slow because of the indexes? The column being updated has \nonly one index on it, and that is 200MB. But I have heard somewhere that \nbecause of PostgreSQL's multi version system, sometimes the system needs \nto update indexes with columns that are not being updated. I'm not sure. \nMight this be the problem?\n>\n>\n> A long running transaction elsewhere that is blocking the delete ? Did \n> you check the locks ?\n>\nSorry, this was an update. A blocking transaction would never explain \nwhy the disk I/O went up to 100% for 2600 seconds.\n\n L\n\n", "msg_date": "Mon, 22 Dec 2008 13:54:20 +0100", "msg_from": "Laszlo Nagy <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Slow table update" }, { "msg_contents": "I just tested the same on a test machine. It only has one processor 1GB \nmemory, and one SATA disk. The same \"select count(*)\" was 58 seconds. I \nstarted the same UPDATE with EXPLAIN ANALYZE. It is running since 1000 \nseconds. I'm now 100% sure that the problem is with the database, \nbecause this machine has nothing but a postgresql server running on it. \nI'll post the output of explain analyze later.\n\n", "msg_date": "Mon, 22 Dec 2008 14:27:58 +0100", "msg_from": "Laszlo Nagy <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Slow table update" }, { "msg_contents": "Laszlo Nagy <[email protected]> writes:\n>> If the table has some sort of FK relations it might be being slowed by \n>> the need to check a row meant to be deleted has any children.\n>> \n> If you look at my SQL, there is only one column to be updated. That \n> column has no foreign key constraint.\n\nThat was not the question that was asked.\n\n> My other idea was that there are so many indexes on this table, maybe \n> the update is slow because of the indexes?\n\nUpdating indexes is certainly very far from being free. How many is\n\"many\"?\n\n\t\t\tregards, tom lane\n", "msg_date": "Mon, 22 Dec 2008 08:30:22 -0500", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Slow table update " }, { "msg_contents": "The thing to keep in mind is that every update creates a new row version\nthat has to be indexed for all indexes on the table, not just the indexes on\nthe column updated. You can test the weight of indexes by copying the table\nthen trying your query again.\n\nI've heard tell that if you have a table that updates frequently but needs\nto be indexed you can get some performance by breaking it into two tables\nwith the same primary key. One table with the stuff you index and another\ntable with the stuff you update.\n\nI hope this helps.\n\n\nOn Mon, Dec 22, 2008 at 8:30 AM, Tom Lane <[email protected]> wrote:\n\n> Laszlo Nagy <[email protected]> writes:\n> >> If the table has some sort of FK relations it might be being slowed by\n> >> the need to check a row meant to be deleted has any children.\n> >>\n> > If you look at my SQL, there is only one column to be updated. That\n> > column has no foreign key constraint.\n>\n> That was not the question that was asked.\n>\n> > My other idea was that there are so many indexes on this table, maybe\n> > the update is slow because of the indexes?\n>\n> Updating indexes is certainly very far from being free. How many is\n> \"many\"?\n>\n> regards, tom lane\n>\n> --\n> Sent via pgsql-performance mailing list ([email protected])\n> To make changes to your subscription:\n> http://www.postgresql.org/mailpref/pgsql-performance\n>\n\nThe thing to keep in mind is that every update creates a new row version that has to be indexed for all indexes on the table, not just the indexes on the column updated.  You can test the weight of indexes by copying the table then trying your query again.\nI've heard tell that if you have a table that updates frequently but needs to be indexed you can get some performance by breaking it into two tables with the same primary key.  One table with the stuff you index and another table with the stuff you update.\nI hope this helps.On Mon, Dec 22, 2008 at 8:30 AM, Tom Lane <[email protected]> wrote:\nLaszlo Nagy <[email protected]> writes:\n>> If the table has some sort of FK relations it might be being slowed by\n>> the need to check a row meant to be deleted has any children.\n>>\n> If you look at my SQL, there is only one column to be updated. That\n> column has no foreign key constraint.\n\nThat was not the question that was asked.\n\n> My other idea was that there are so many indexes on this table, maybe\n> the update is slow because of the indexes?\n\nUpdating indexes is certainly very far from being free.  How many is\n\"many\"?\n\n                        regards, tom lane\n\n--\nSent via pgsql-performance mailing list ([email protected])\nTo make changes to your subscription:\nhttp://www.postgresql.org/mailpref/pgsql-performance", "msg_date": "Fri, 26 Dec 2008 09:34:09 -0500", "msg_from": "\"Nikolas Everett\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Slow table update" }, { "msg_contents": "\n>> My other idea was that there are so many indexes on this table, maybe \n>> the update is slow because of the indexes?\n>> \n>\n> Updating indexes is certainly very far from being free. How many is\n> \"many\"?\n> \nNumber of indexes = 15.\n\n3 indexex are on \"text\" type column, 500MB in size each.\nOther are on int8 and timestamp columns, cca. 200MB each.\n\n\n", "msg_date": "Mon, 29 Dec 2008 10:43:42 +0100", "msg_from": "Laszlo Nagy <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Slow table update" }, { "msg_contents": "Laszlo Nagy wrote:\n> >> My other idea was that there are so many indexes on this table, maybe \n> >> the update is slow because of the indexes?\n> >> \n> >\n> > Updating indexes is certainly very far from being free. How many is\n> > \"many\"?\n> > \n> Number of indexes = 15.\n> \n> 3 indexex are on \"text\" type column, 500MB in size each.\n> Other are on int8 and timestamp columns, cca. 200MB each.\n\nTo me, that's \"many\" ;-)\n\nThat's a lot when you think about what happens when indexed columns are changed, deleted or inserted -- a lot of background work that the database has to do.\n\nInf 8.3 the HOT feature may help if the columns being updated are indexed ... what version of PostgreSQL is this again ? (Forgive my lack of memory -- the last few days I've forgotten a lot, heh heh.)\n\nAny chances to reduce those to a bare minimum, perhaps using conditional index strategies or even some form of replication, so the primary uses indexes related to the updates and the mirror uses indexes related to the read-only / reporting needs ? Perhaps some form of staging table with no indexes to load, check data, etc. and then insert.\n\nAny way to reduce those ? Check the usage via the system stats on table/index use and try removing some and testing to see what makes a difference.\n\nHTH\n\nGreg Williamson\nSenior DBA\nDigitalGlobe\n\nConfidentiality Notice: This e-mail message, including any attachments, is for the sole use of the intended recipient(s) and may contain confidential and privileged information and must be protected in accordance with those provisions. Any unauthorized review, use, disclosure or distribution is prohibited. If you are not the intended recipient, please contact the sender by reply e-mail and destroy all copies of the original message.\n\n(My corporate masters made me say this.)\n\n\n\n\n\n\nRE: [PERFORM] Slow table update\n\n\n\n\nLaszlo Nagy wrote:\n> >> My other idea was that there are so many indexes on this table, maybe\n> >> the update is slow because of the indexes?\n> >>    \n> >\n> > Updating indexes is certainly very far from being free.  How many is\n> > \"many\"?\n> >  \n> Number of indexes = 15.\n>\n> 3 indexex are on \"text\" type column, 500MB in size each.\n> Other are on int8 and timestamp columns, cca. 200MB each.\n\nTo me, that's \"many\" ;-)\n\nThat's a lot when you think about what happens when indexed columns are changed, deleted or inserted -- a lot of background work that the database has to do.\n\nInf 8.3 the HOT feature may help if the columns being updated are indexed ... what version of PostgreSQL is this again ? (Forgive my lack of memory -- the last few days I've forgotten a lot, heh heh.)\n\nAny chances to reduce those to a bare minimum, perhaps using conditional index strategies or even some form of replication, so the primary uses indexes related to the updates and the mirror uses indexes related to the read-only / reporting needs ? Perhaps some form of staging table with no indexes to load, check data, etc. and then insert.\n\nAny way to reduce those ? Check the usage via the system stats on table/index use and try removing some and testing to see what makes a difference.\n\nHTH\n\nGreg Williamson\nSenior DBA\nDigitalGlobe\n\nConfidentiality Notice: This e-mail message, including any attachments, is for the sole use of the intended recipient(s) and may contain confidential and privileged information and must be protected in accordance with those provisions. Any unauthorized review, use, disclosure or distribution is prohibited. If you are not the intended recipient, please contact the sender by reply e-mail and destroy all copies of the original message.\n\n(My corporate masters made me say this.)", "msg_date": "Mon, 29 Dec 2008 04:00:44 -0700", "msg_from": "\"Gregory Williamson\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Slow table update" }, { "msg_contents": "\n>\n> Inf 8.3 the HOT feature may help if the columns being updated are \n> indexed ... what version of PostgreSQL is this again ? (Forgive my \n> lack of memory -- the last few days I've forgotten a lot, heh heh.)\n>\n8.3.5.\n\nThe colum that was being updated is part of one small index only.\n\n>\n> Any chances to reduce those to a bare minimum, perhaps using \n> conditional index strategies or even some form of replication, so the \n> primary uses indexes related to the updates and the mirror uses \n> indexes related to the read-only / reporting needs ? Perhaps some form \n> of staging table with no indexes to load, check data, etc. and then \n> insert.\n>\n> Any way to reduce those ? Check the usage via the system stats on \n> table/index use and try removing some and testing to see what makes a \n> difference.\n>\nWe tried to remove all indexes on a test system and the update was \nspeedy. We are going to try to reduce the row size also move static \ndescription/name/textual data into a separate table, and leave \nfrequently updated data in the original one. We tested this theoretical \nversion:\n\nQuery returned successfully: 182752 rows affected, 56885 ms execution time.\n\nThis is much faster. However, this table is used by hundreds of \nprograms. Anyway, I got the answer to my question.\n\nThank you!\n\n Laszlo\n\n", "msg_date": "Mon, 29 Dec 2008 14:11:48 +0100", "msg_from": "Laszlo Nagy <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Slow table update - SOLVED!" } ]
[ { "msg_contents": "\nHello,\n\nTo improve performances, I would like to try moving the temp_tablespaces\nlocations outside of our RAID system.\nIs it a good practice ?\n\n\nThanks,\n\nMarc Mamin\n", "msg_date": "Mon, 22 Dec 2008 15:40:10 +0100", "msg_from": "\"Marc Mamin\" <[email protected]>", "msg_from_op": true, "msg_subject": "temp_tablespaces and RAID" }, { "msg_contents": "On Mon, Dec 22, 2008 at 7:40 AM, Marc Mamin <[email protected]> wrote:\n>\n> Hello,\n>\n> To improve performances, I would like to try moving the temp_tablespaces\n> locations outside of our RAID system.\n> Is it a good practice ?\n\nMaybe yes, maybe no. If you move it to a single slow drive, then it\ncould well slow things down a fair bit when the system needs temp\nspace. OTOH, if the queries that would need temp space are few and far\nbetween, and they're slowing down the rest of the system in weird\nways, it might be the right thing to do.\n\nI'm afraid we don't have enough information to say if it's the right\nthing to do right now, but there are reasons to do it (and not).\n", "msg_date": "Mon, 22 Dec 2008 11:07:24 -0700", "msg_from": "\"Scott Marlowe\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: temp_tablespaces and RAID" } ]
[ { "msg_contents": "Hi everyone!\nI have a very large 2-column table (about 500M records) from which I want to\nremove duplicate records.\n\nI have tried many approaches, but they all take forever.\n\nThe table's definition consists of two short TEXT columns. It is a\ntemporary table generated from a query:\n\nCREATE TEMP TABLE huge_table AS SELECT x, y FROM ... ;\n\nInitially I tried\n\nCREATE TEMP TABLE huge_table AS SELECT DISTINCT x, y FROM ... ;\n\nbut after waiting for nearly an hour I aborted the query, and repeated it\nafter getting rid of the DISTINCT clause.\n\nEverything takes forever with this monster! It's uncanny. Even printing it\nout to a file takes forever, let alone creating an index for it.\n\nAny words of wisdom on how to speed this up would be appreciated.\n\nTIA!\n\nKynn\n\nHi everyone!I have a very large 2-column table (about 500M records) from which I want to remove duplicate records.\nI have tried many approaches, but they all take forever.The table's definition consists of two short TEXT columns.  It is a temporary table generated from a query:\nCREATE TEMP TABLE huge_table AS SELECT x, y FROM ... ;Initially I tried\nCREATE TEMP TABLE huge_table AS SELECT DISTINCT x, y FROM ... ;but after waiting for nearly an hour I aborted the query, and repeated it after getting rid of the DISTINCT clause.\nEverything takes forever with this monster!  It's uncanny.  Even printing it out to a file takes forever, let alone creating an index for it.\nAny words of wisdom on how to speed this up would be appreciated.TIA!Kynn", "msg_date": "Tue, 23 Dec 2008 12:25:48 -0500", "msg_from": "\"Kynn Jones\" <[email protected]>", "msg_from_op": true, "msg_subject": "How to \"unique-ify\" HUGE table?" }, { "msg_contents": "On Tue, Dec 23, 2008 at 10:25 AM, Kynn Jones <[email protected]> wrote:\n> Hi everyone!\n> I have a very large 2-column table (about 500M records) from which I want to\n> remove duplicate records.\n> I have tried many approaches, but they all take forever.\n> The table's definition consists of two short TEXT columns. It is a\n> temporary table generated from a query:\n>\n> CREATE TEMP TABLE huge_table AS SELECT x, y FROM ... ;\n> Initially I tried\n> CREATE TEMP TABLE huge_table AS SELECT DISTINCT x, y FROM ... ;\n> but after waiting for nearly an hour I aborted the query, and repeated it\n> after getting rid of the DISTINCT clause.\n> Everything takes forever with this monster! It's uncanny. Even printing it\n> out to a file takes forever, let alone creating an index for it.\n> Any words of wisdom on how to speed this up would be appreciated.\n\nDid you try cranking up work_mem to something that's a large\npercentage (25 to 50%) of total memory?\n", "msg_date": "Tue, 23 Dec 2008 10:34:28 -0700", "msg_from": "\"Scott Marlowe\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: How to \"unique-ify\" HUGE table?" }, { "msg_contents": "On Tue, 23 Dec 2008 12:25:48 -0500\n\"Kynn Jones\" <[email protected]> wrote:\n> Hi everyone!\n> I have a very large 2-column table (about 500M records) from which I want to\n> remove duplicate records.\n> \n> I have tried many approaches, but they all take forever.\n> \n> The table's definition consists of two short TEXT columns. It is a\n> temporary table generated from a query:\n> \n> CREATE TEMP TABLE huge_table AS SELECT x, y FROM ... ;\n> \n> Initially I tried\n> \n> CREATE TEMP TABLE huge_table AS SELECT DISTINCT x, y FROM ... ;\n> \n> but after waiting for nearly an hour I aborted the query, and repeated it\n\nDo you have an index on x and y? Also, does this work better?\n\nCREATE TEMP TABLE huge_table AS SELECT x, y FROM ... GROUP BY x, y;\n\nWhat does ANALYZE EXPLAIN have to say?\n\n-- \nD'Arcy J.M. Cain <[email protected]> | Democracy is three wolves\nhttp://www.druid.net/darcy/ | and a sheep voting on\n+1 416 425 1212 (DoD#0082) (eNTP) | what's for dinner.\n", "msg_date": "Tue, 23 Dec 2008 12:39:17 -0500", "msg_from": "\"D'Arcy J.M. Cain\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: How to \"unique-ify\" HUGE table?" }, { "msg_contents": "You don't say what PG version you are on, but just for kicks you may try\nusing GROUP BY instead of DISTINCT. Yes, the two should perform the\nsame, but with 8.1 (or maybe 8.0) I had seen situations where GROUP BY\nwas faster (admittedly this happened with more complex queries). So, try\nthis:\n\n CREATE TEMP TABLE huge_table AS SELECT x, y FROM foo GROUP BY 1, 2;\n\nNote that you may be tempted to add an index on foo(x,y), but I don't\nthink that helps (or at least I have not been able to hit the index in\nsimilar situations).\n\n\n> -----Original Message-----\n> From: [email protected]\n[mailto:pgsql-performance-\n> [email protected]] On Behalf Of Kynn Jones\n> Sent: Tuesday, December 23, 2008 9:26 AM\n> To: [email protected]\n> Subject: [PERFORM] How to \"unique-ify\" HUGE table?\n> \n> Hi everyone!\n> \n> I have a very large 2-column table (about 500M records) from which I\n> want to remove duplicate records.\n> \n> I have tried many approaches, but they all take forever.\n> \n> The table's definition consists of two short TEXT columns. It is a\n> temporary table generated from a query:\n> \n> \n> CREATE TEMP TABLE huge_table AS SELECT x, y FROM ... ;\n> \n> Initially I tried\n> \n> CREATE TEMP TABLE huge_table AS SELECT DISTINCT x, y FROM ... ;\n> \n> but after waiting for nearly an hour I aborted the query, and repeated\n> it after getting rid of the DISTINCT clause.\n> \n> Everything takes forever with this monster! It's uncanny. Even\n> printing it out to a file takes forever, let alone creating an index\n> for it.\n> \n> Any words of wisdom on how to speed this up would be appreciated.\n> \n> TIA!\n> \n> Kynn\n> \n> \n\n", "msg_date": "Tue, 23 Dec 2008 10:14:24 -0800", "msg_from": "\"George Pavlov\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: How to \"unique-ify\" HUGE table?" }, { "msg_contents": "On Tue, Dec 23, 2008 at 11:14 AM, George Pavlov <[email protected]> wrote:\n> You don't say what PG version you are on, but just for kicks you may try\n> using GROUP BY instead of DISTINCT. Yes, the two should perform the\n> same, but with 8.1 (or maybe 8.0) I had seen situations where GROUP BY\n> was faster (admittedly this happened with more complex queries). So, try\n> this:\n\nEven in 8.3 it looks like group by is faster. Tested it on a decent\nsized table and group by used a hash agg and ran in ~600 ms, while\ndistinct used a sort and ran in 1300 ms. That was on 500k rows. On a\nmuch larger table, one with about 10M rows, a similar statement runs\nin 1500 ms with group by and in 2390 ms when run with distinct.\n", "msg_date": "Tue, 23 Dec 2008 11:37:07 -0700", "msg_from": "\"Scott Marlowe\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: How to \"unique-ify\" HUGE table?" }, { "msg_contents": "Scott Marlowe wrote:\n> On Tue, Dec 23, 2008 at 11:14 AM, George Pavlov <[email protected]> wrote:\n>> You don't say what PG version you are on, but just for kicks you may try\n>> using GROUP BY instead of DISTINCT. Yes, the two should perform the\n>> same, but with 8.1 (or maybe 8.0) I had seen situations where GROUP BY\n>> was faster (admittedly this happened with more complex queries). So, try\n>> this:\n> \n> Even in 8.3 it looks like group by is faster. Tested it on a decent\n> sized table and group by used a hash agg and ran in ~600 ms, while\n> distinct used a sort and ran in 1300 ms. That was on 500k rows. On a\n> much larger table, one with about 10M rows, a similar statement runs\n> in 1500 ms with group by and in 2390 ms when run with distinct.\n\nNot surprising - this is a known limitation in all released versions of \npostgresql (GROUP BY can use hashing and sorting - DISTINCT only \nsorting). 8.4 is going to improve that though.\n\n\nStefan\n", "msg_date": "Tue, 23 Dec 2008 20:47:36 +0100", "msg_from": "Stefan Kaltenbrunner <[email protected]>", "msg_from_op": false, "msg_subject": "Re: How to \"unique-ify\" HUGE table?" }, { "msg_contents": "Thank you all for the very helpful advice. Upping work_mem made it possible\nfor me to generate the table within this century without bringing the server\nto a near standstill. I have not yet experimented with GROUP BY, but I'll\ndo this next.\n\nCheers,\n\nKynn\n\nThank you all for the very helpful advice.  Upping work_mem made it possible for me to generate the table within this century without bringing the server to a near standstill.  I have not yet experimented with GROUP BY, but I'll do this next.\nCheers,Kynn", "msg_date": "Wed, 24 Dec 2008 13:56:28 -0500", "msg_from": "\"Kynn Jones\" <[email protected]>", "msg_from_op": true, "msg_subject": "Re: How to \"unique-ify\" HUGE table?" } ]
[ { "msg_contents": "(NOTE: I tried sending this email from my excite account and it appears \nto have been blocked for whatever reason. But if the message does get \ndouble posted, sorry for the inconvenience.)\n\nHey all,\n\nMerry Christmas Eve, Happy Holidays, and all that good stuff. At my \nwork, I'm trying to upgrade my system from a 8.1 to 8.3 and I'm dumping \na few large static tables ahead of time to limit the amount of downtime \nduring the upgrade. The trouble is, when I dump the largest table, \nwhich is 1.1 Tb with indexes, I keep getting the following error at the \nsame point in the dump.\n\npg_dump: SQL command failed\npg_dump: Error message from server: ERROR: invalid string enlargement \nrequest size 1\npg_dump: The command was: COPY public.large_table (id, data) TO stdout;\n\nAs you can see, the table is two columns, one column is an integer, and \nthe other is bytea. Each cell in the data column can be as large as \n600mb (we had bigger rows as well but we thought they were the source of \nthe trouble and moved them elsewhere to be dealt with separately.)\n\nWe are dumping the table using this command.\n\n/var/lib/pgsql-8.3.5/bin/pg_dump -O -x -t large_table mydb | gzip -c \n > large_table.pgsql.gz\n\nOriginally we tried dumping the table with \n'/var/lib/pgsql-8.3.5/bin/pg_dump -O -x -t -F c > large_table.dump' but \nthat was to cpu intensive and slowed down other db processes too much. \nIt failed using that command as well, but I believe it is because we did \nnot have enough postgres temp hard drive space. We have since symlinked \nthe postgres temp space to a much bigger file system.\n\nThe stats of the db server is as follows,\n\nProcessors: 4x Opteron 2.4 Ghz cores\nMemory: 16 GB \nDisks: 42x 15K SCSI 146 GB disks.\n\nAlso, the large table has been vacuumed recently. Lastly, we are dumping the table over nfs to very large sata array.\n\n\nThanks again and Happy Holidays,\nTed\n\n", "msg_date": "Wed, 24 Dec 2008 12:31:23 -0500", "msg_from": "Ted Allen <[email protected]>", "msg_from_op": true, "msg_subject": "Troubles dumping a very large table. " }, { "msg_contents": "Ted Allen <[email protected]> writes:\n> during the upgrade. The trouble is, when I dump the largest table, \n> which is 1.1 Tb with indexes, I keep getting the following error at the \n> same point in the dump.\n\n> pg_dump: SQL command failed\n> pg_dump: Error message from server: ERROR: invalid string enlargement \n> request size 1\n> pg_dump: The command was: COPY public.large_table (id, data) TO stdout;\n\n> As you can see, the table is two columns, one column is an integer, and \n> the other is bytea. Each cell in the data column can be as large as \n> 600mb (we had bigger rows as well but we thought they were the source of \n> the trouble and moved them elsewhere to be dealt with separately.)\n\n600mb measured how? I have a feeling the problem is that the value\nexceeds 1Gb when converted to text form...\n\n\t\t\tregards, tom lane\n", "msg_date": "Wed, 24 Dec 2008 12:49:56 -0500", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Troubles dumping a very large table. " }, { "msg_contents": "600mb measured by get_octet_length on data. If there is a better way to measure the row/cell size, please let me know because we thought it was the >1Gb problem too. We thought we were being conservative by getting rid of the larger rows but I guess we need to get rid of even more.\n\nThanks,\nTed\n________________________________________\nFrom: Tom Lane [[email protected]]\nSent: Wednesday, December 24, 2008 12:49 PM\nTo: Ted Allen\nCc: [email protected]\nSubject: Re: [PERFORM] Troubles dumping a very large table.\n\nTed Allen <[email protected]> writes:\n> during the upgrade. The trouble is, when I dump the largest table,\n> which is 1.1 Tb with indexes, I keep getting the following error at the\n> same point in the dump.\n\n> pg_dump: SQL command failed\n> pg_dump: Error message from server: ERROR: invalid string enlargement\n> request size 1\n> pg_dump: The command was: COPY public.large_table (id, data) TO stdout;\n\n> As you can see, the table is two columns, one column is an integer, and\n> the other is bytea. Each cell in the data column can be as large as\n> 600mb (we had bigger rows as well but we thought they were the source of\n> the trouble and moved them elsewhere to be dealt with separately.)\n\n600mb measured how? I have a feeling the problem is that the value\nexceeds 1Gb when converted to text form...\n\n regards, tom lane\n", "msg_date": "Fri, 26 Dec 2008 11:02:55 -0500", "msg_from": "Ted Allen <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Troubles dumping a very large table. " }, { "msg_contents": "Ted Allen <[email protected]> writes:\n> 600mb measured by get_octet_length on data. If there is a better way to measure the row/cell size, please let me know because we thought it was the >1Gb problem too. We thought we were being conservative by getting rid of the larger rows but I guess we need to get rid of even more.\n\nYeah, the average expansion of bytea data in COPY format is about 3X :-(\nSo you need to get the max row length down to around 300mb. I'm curious\nhow you got the data in to start with --- were the values assembled on\nthe server side?\n\n\t\t\tregards, tom lane\n", "msg_date": "Fri, 26 Dec 2008 12:38:38 -0500", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Troubles dumping a very large table. " }, { "msg_contents": "On Fri, Dec 26, 2008 at 12:38 PM, Tom Lane <[email protected]> wrote:\n> Ted Allen <[email protected]> writes:\n>> 600mb measured by get_octet_length on data. If there is a better way to measure the row/cell size, please let me know because we thought it was the >1Gb problem too. We thought we were being conservative by getting rid of the larger rows but I guess we need to get rid of even more.\n>\n> Yeah, the average expansion of bytea data in COPY format is about 3X :-(\n> So you need to get the max row length down to around 300mb. I'm curious\n> how you got the data in to start with --- were the values assembled on\n> the server side?\n\nWouldn't binary style COPY be more forgiving in this regard? (if so,\nthe OP might have better luck running COPY BINARY)...\n\nThis also goes for libpq traffic..large (>1mb) bytea definately want\nto be passed using the binary switch in the protocol.\n\nmerlin\n", "msg_date": "Fri, 26 Dec 2008 14:50:32 -0500", "msg_from": "\"Merlin Moncure\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Troubles dumping a very large table." }, { "msg_contents": "\"Merlin Moncure\" <[email protected]> writes:\n> On Fri, Dec 26, 2008 at 12:38 PM, Tom Lane <[email protected]> wrote:\n>> Yeah, the average expansion of bytea data in COPY format is about 3X :-(\n>> So you need to get the max row length down to around 300mb. I'm curious\n>> how you got the data in to start with --- were the values assembled on\n>> the server side?\n\n> Wouldn't binary style COPY be more forgiving in this regard? (if so,\n> the OP might have better luck running COPY BINARY)...\n\nYeah, if he's willing to use COPY BINARY directly. AFAIR there is not\nan option to get pg_dump to use it. But maybe \"pg_dump -s\" together\nwith a manual dump of the table data is the right answer. It probably\nbeats shoving some of the rows aside as he's doing now...\n\n\t\t\tregards, tom lane\n", "msg_date": "Fri, 26 Dec 2008 15:18:50 -0500", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Troubles dumping a very large table. " }, { "msg_contents": "I was hoping use pg_dump and not to have to do a manual dump but if that \nlatest solution (moving rows >300mb elsewhere and dealing with them \nlater) does not work I'll try that. \n\nThanks everyone.\n\nMerlin Moncure wrote:\n> On Fri, Dec 26, 2008 at 12:38 PM, Tom Lane <[email protected]> wrote:\n> \n>> Ted Allen <[email protected]> writes:\n>> \n>>> 600mb measured by get_octet_length on data. If there is a better way to measure the row/cell size, please let me know because we thought it was the >1Gb problem too. We thought we were being conservative by getting rid of the larger rows but I guess we need to get rid of even more.\n>>> \n>> Yeah, the average expansion of bytea data in COPY format is about 3X :-(\n>> So you need to get the max row length down to around 300mb. I'm curious\n>> how you got the data in to start with --- were the values assembled on\n>> the server side?\n>> \n>\n> Wouldn't binary style COPY be more forgiving in this regard? (if so,\n> the OP might have better luck running COPY BINARY)...\n>\n> This also goes for libpq traffic..large (>1mb) bytea definately want\n> to be passed using the binary switch in the protocol.\n>\n> merlin\n> \n\n", "msg_date": "Sun, 28 Dec 2008 22:11:47 -0500", "msg_from": "Ted Allen <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Troubles dumping a very large table." }, { "msg_contents": "Hi,\n\nLe vendredi 26 décembre 2008, Tom Lane a écrit :\n> Yeah, if he's willing to use COPY BINARY directly. AFAIR there is not\n> an option to get pg_dump to use it. \n\nWould it be possible to consider such an additional switch to pg_dump?\n\nOf course the DBA has to know when to use it safely, but if the plan is to be \nable to restore later dump on the same machine to recover from some human \nerror (oops, forgot the WHERE clause to this DELETE statement), it seems it \nwould be a good idea.\n\nRegards,\n-- \ndim", "msg_date": "Mon, 29 Dec 2008 13:48:37 +0100", "msg_from": "Dimitri Fontaine <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Troubles dumping a very large table." } ]
[ { "msg_contents": "Hello.\n\nI am trying to tune PostgreSQL write parameters to make writing operation\nfast on a heavy-loaded database (a lot of inserts/updates).\nAfter resetting the pg_stat_bgwriter statistics (I do it by deleting\nglobal/pgstat.stat file and restarting PostgreSQL) I monitor the following:\n\n# select * from pg_stat_bgwriter;\n checkpoints_timed | checkpoints_req | buffers_checkpoint | buffers_clean |\nmaxwritten_clean | buffers_backend | buffers_alloc\n-------------------+-----------------+--------------------+---------------+------------------+-----------------+---------------\n 0 | 0 | 0 | 0\n| 0 | 164 | 6653\n\nSo, you see that just after resetting the statistics we have a large value\nin buffers_backend. Why?\nDocumentation:\nhttp://www.westnet.com/~gsmith/content/postgresql/chkp-bgw-83.htm\nsays that \"buffers_backend\n\nHello.I am trying to tune PostgreSQL write parameters to make writing operation fast on a heavy-loaded database (a lot of inserts/updates).After resetting the pg_stat_bgwriter statistics (I do it by deleting global/pgstat.stat file and restarting PostgreSQL) I monitor the following:\n# select * from pg_stat_bgwriter; checkpoints_timed | checkpoints_req | buffers_checkpoint | buffers_clean | maxwritten_clean | buffers_backend | buffers_alloc-------------------+-----------------+--------------------+---------------+------------------+-----------------+---------------\n                 0 |               0 |                  0 |             0 |                0 |             164 |          6653So, you see that just after resetting the statistics we have a large value in buffers_backend. Why?\nDocumentation: http://www.westnet.com/~gsmith/content/postgresql/chkp-bgw-83.htmsays that \"buffers_backend", "msg_date": "Fri, 26 Dec 2008 00:37:08 +0300", "msg_from": "\"Dmitry Koterov\" <[email protected]>", "msg_from_op": true, "msg_subject": "Bgwriter and pg_stat_bgwriter.buffers_clean aspects" }, { "msg_contents": "Hello.\n\nI am trying to tune PostgreSQL write parameters to make writing operation\nfast on a heavy-loaded database (a lot of inserts/updates).\nAfter resetting the pg_stat_bgwriter statistics (I do it by deleting\nglobal/pgstat.stat file and restarting PostgreSQL) I monitor the following:\n\n# select * from pg_stat_bgwriter;\n checkpoints_timed | checkpoints_req | buffers_checkpoint | buffers_clean |\nmaxwritten_clean | buffers_backend | buffers_alloc\n-------------------+-----------------+--------------------+---------------+------------------+-----------------+---------------\n 8 | 0 | 19092 | 0\n| 0 | 2285 | 30148\n\nSo, you see that some time after resetting the statistics we have:\n- a large value in buffers_backend;\n- a zero buffers_clean.\n\nWhy?\n\nDocumentation:\nhttp://www.westnet.com/~gsmith/content/postgresql/chkp-bgw-83.htm<http://www.westnet.com/%7Egsmith/content/postgresql/chkp-bgw-83.htm>\nsays that \"buffers_backend ... [is a number of] times a database backend\n(probably the client itself) had to write a page in order to make space for\nthe new allocation\", and \"buffers_clean ... [means that] the background\nwriter cleaned ... buffers (cleaned=wrote out dirty ones) during that time\".\n\nWhat I am trying to achieve is that all writing operation are performed\nasynchronously and mostly flushed to the disk before a CHECKPOINT occurred,\nso CHECKPOINT is cheap thanks to bgwiter work.\n\nCould you please explain what happened and what large buffers_backend and\nzero buffers_clean mean?\n\n\nRelated parameters:\n\nshared_buffers = 512MB\nfsync = on\nsynchronous_commit = off\nwal_writer_delay = 2000ms\ncheckpoint_segments = 20\ncheckpoint_timeout = 1min\ncheckpoint_completion_target = 0.8\ncheckpoint_warning = 1min\nbgwriter_delay = 10ms\nbgwriter_lru_maxpages = 1000\nbgwriter_lru_multiplier = 10\n\nHello.I am trying to tune PostgreSQL write parameters to make writing operation fast on a heavy-loaded database (a lot of inserts/updates).After resetting the pg_stat_bgwriter statistics (I do it by deleting global/pgstat.stat file and restarting PostgreSQL) I monitor the following:\n# select * from pg_stat_bgwriter; checkpoints_timed | checkpoints_req | buffers_checkpoint | buffers_clean | maxwritten_clean | buffers_backend | buffers_alloc-------------------+-----------------+--------------------+---------------+------------------+-----------------+---------------\n                 8 |               0 |              19092 |             0 |                0 |            2285 |         30148So, you see that some time after resetting the statistics we have:- a large value in buffers_backend;\n- a zero buffers_clean.Why?\nDocumentation: http://www.westnet.com/~gsmith/content/postgresql/chkp-bgw-83.htmsays that \"buffers_backend ... [is a number of]  times a database backend\n(probably the client itself) had to write a page in order to make space\nfor the new allocation\", and \"buffers_clean ... [means that] the background writer cleaned ... buffers (cleaned=wrote out dirty\nones) during that time\".What I am trying to achieve is that all writing operation are performed asynchronously and mostly flushed to the disk before a CHECKPOINT occurred, so CHECKPOINT is cheap thanks to bgwiter work.\nCould you please explain what happened and what large buffers_backend and zero buffers_clean mean?Related parameters:shared_buffers = 512MBfsync = on              synchronous_commit = off\nwal_writer_delay = 2000mscheckpoint_segments = 20          checkpoint_timeout = 1min         checkpoint_completion_target = 0.8checkpoint_warning = 1min         bgwriter_delay = 10ms       bgwriter_lru_maxpages = 1000\nbgwriter_lru_multiplier = 10", "msg_date": "Fri, 26 Dec 2008 00:43:44 +0300", "msg_from": "\"Dmitry Koterov\" <[email protected]>", "msg_from_op": true, "msg_subject": "Bgwriter and pg_stat_bgwriter.buffers_clean aspects" }, { "msg_contents": "Hello.\n\n(Sorry, I have sent this letter to pgsql-general@ first and only then -\nnoticed that there is a special performance mailing list. So I post it here\nnow.)\n\nI am trying to tune PostgreSQL write parameters to make writing operation\nfast on a heavy-loaded database (a lot of inserts/updates).\nAfter resetting the pg_stat_bgwriter statistics (I do it by deleting\nglobal/pgstat.stat file and restarting PostgreSQL) I monitor the following:\n\n# select * from pg_stat_bgwriter;\n checkpoints_timed | checkpoints_req | buffers_checkpoint | buffers_clean |\nmaxwritten_clean | buffers_backend | buffers_alloc\n-------------------+-----------------+--------------------+---------------+------------------+-----------------+---------------\n 8 | 0 | 19092 | 0\n| 0 | 2285 | 30148\n\nSo, you see that some time after resetting the statistics we have:\n- a large value in buffers_backend;\n- a zero buffers_clean.\n\nWhy?\n\nDocumentation:\nhttp://www.westnet.com/~gsmith/content/postgresql/chkp-bgw-83.htm<http://www.westnet.com/%7Egsmith/content/postgresql/chkp-bgw-83.htm>\nsays that \"buffers_backend ... [is a number of] times a database backend\n(probably the client itself) had to write a page in order to make space for\nthe new allocation\", and \"buffers_clean ... [means that] the background\nwriter cleaned ... buffers (cleaned=wrote out dirty ones) during that time\".\n\nWhat I am trying to achieve is that all writing operation are performed\nasynchronously and mostly flushed to the disk before a CHECKPOINT occurred,\nso CHECKPOINT is cheap thanks to bgwiter work.\n\nCould you please explain what happened and what large buffers_backend and\nzero buffers_clean mean?\n\n\nRelated parameters:\n\nshared_buffers = 512MB\nfsync = on\nsynchronous_commit = off\nwal_writer_delay = 2000ms\ncheckpoint_segments = 20\ncheckpoint_timeout = 1min\ncheckpoint_completion_target = 0.8\ncheckpoint_warning = 1min\nbgwriter_delay = 10ms\nbgwriter_lru_maxpages = 1000\nbgwriter_lru_multiplier = 10\n\nHello.(Sorry, I have sent this letter to pgsql-general@ first and only then - noticed that there is a special performance mailing list. So I post it here now.)\nI am trying to tune PostgreSQL write parameters to make writing operation fast on a heavy-loaded database (a lot of inserts/updates).After resetting the pg_stat_bgwriter statistics (I do it by deleting global/pgstat.stat file and restarting PostgreSQL) I monitor the following:\n# select * from pg_stat_bgwriter; checkpoints_timed | checkpoints_req | buffers_checkpoint | buffers_clean | maxwritten_clean | buffers_backend | buffers_alloc-------------------+-----------------+--------------------+---------------+------------------+-----------------+---------------\n\n                 8 |               0 |              19092 |             0 |                0 |            2285 |         30148So, you see that some time after resetting the statistics we have:- a large value in buffers_backend;\n\n- a zero buffers_clean.Why?\nDocumentation: http://www.westnet.com/~gsmith/content/postgresql/chkp-bgw-83.htmsays that \"buffers_backend ... [is a number of]  times a database backend\n(probably the client itself) had to write a page in order to make space\nfor the new allocation\", and \"buffers_clean ... [means that] the background writer cleaned ... buffers (cleaned=wrote out dirty\nones) during that time\".What I am trying to achieve is that all writing operation are performed asynchronously and mostly flushed to the disk before a CHECKPOINT occurred, so CHECKPOINT is cheap thanks to bgwiter work.\nCould you please explain what happened and what large buffers_backend and zero buffers_clean mean?Related parameters:shared_buffers = 512MBfsync = on              synchronous_commit = off\n\nwal_writer_delay = 2000mscheckpoint_segments = 20          checkpoint_timeout = 1min         checkpoint_completion_target = 0.8checkpoint_warning = 1min         bgwriter_delay = 10ms       bgwriter_lru_maxpages = 1000\n\nbgwriter_lru_multiplier = 10", "msg_date": "Fri, 26 Dec 2008 02:08:16 +0300", "msg_from": "\"Dmitry Koterov\" <[email protected]>", "msg_from_op": true, "msg_subject": "Bgwriter and pg_stat_bgwriter.buffers_clean aspects" }, { "msg_contents": "On Fri, 26 Dec 2008, Dmitry Koterov wrote:\n\n> checkpoint_timeout = 1min\n\nYour system is having a checkpoint every minute. You can't do that and \nexpect the background writer to do anything useful. As shown in your \nstats, all the dirty buffers are getting written out by those constant \ncheckpoints.\n\n> What I am trying to achieve is that all writing operation are performed \n> asynchronously and mostly flushed to the disk before a CHECKPOINT \n> occurred, so CHECKPOINT is cheap thanks to bgwiter work.\n\nThe background writer only tries to write out things that haven't been \naccessed recently, because the tests we did suggested the duplicated \nwrites from any other approach negated the benefits from writing them \nearlier. So it's not possible to get all the buffers clean before the \ncheckpoint starts, the ones that have been recently used can't get written \nexcept during a checkpoint.\n\nWhat does work instead is to spread the checkpoint writes over a long \nperiod, such that they are an asynchronous trickle of smaller writes. \nFor that to work, you need to set checkpoint_timeout to a fairly long \nperiod (at least the default of 5 minutes if not longer) and \ncheckpoint_segments to something fairly large. You can know the segments \nare large enough when most of the checkpoints show up in the \ncheckpoints_timed count.\n\n--\n* Greg Smith [email protected] http://www.gregsmith.com Baltimore, MD\n", "msg_date": "Sun, 28 Dec 2008 17:04:47 -0500 (EST)", "msg_from": "Greg Smith <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Bgwriter and pg_stat_bgwriter.buffers_clean aspects" }, { "msg_contents": "Thank you.\n\nBut why buffers_backend is so high? As I understood from your article,\nbuffers_backend shows the number of writes immediately caused by any write\noperations, e.g. when an INSERT has to flush something on disk, because it\nhas no space left for a new data in shared buffers. I suppose these flushes\nslow down operating greatly, and I realy see this: in my environment INSERT\nis usually performed in 1-2 ms, but sometimes it is executed in 5-6 seconds\nor even more (10 seconds), which touches statement_timeout barrier and fails\nthe whole transaction.\n\nThe main purpose is to minimize INSERT/UPDATE time or, at least, make it\nmore predictable.\n\nCould you please give an advice how to achieve this?\n\n\nOn Mon, Dec 29, 2008 at 1:04 AM, Greg Smith <[email protected]> wrote:\n\n> On Fri, 26 Dec 2008, Dmitry Koterov wrote:\n>\n> checkpoint_timeout = 1min\n>>\n>\n> Your system is having a checkpoint every minute. You can't do that and\n> expect the background writer to do anything useful. As shown in your stats,\n> all the dirty buffers are getting written out by those constant checkpoints.\n>\n> What I am trying to achieve is that all writing operation are performed\n>> asynchronously and mostly flushed to the disk before a CHECKPOINT occurred,\n>> so CHECKPOINT is cheap thanks to bgwiter work.\n>>\n>\n> The background writer only tries to write out things that haven't been\n> accessed recently, because the tests we did suggested the duplicated writes\n> from any other approach negated the benefits from writing them earlier. So\n> it's not possible to get all the buffers clean before the checkpoint starts,\n> the ones that have been recently used can't get written except during a\n> checkpoint.\n>\n> What does work instead is to spread the checkpoint writes over a long\n> period, such that they are an asynchronous trickle of smaller writes. For\n> that to work, you need to set checkpoint_timeout to a fairly long period (at\n> least the default of 5 minutes if not longer) and checkpoint_segments to\n> something fairly large. You can know the segments are large enough when\n> most of the checkpoints show up in the checkpoints_timed count.\n>\n> --\n> * Greg Smith [email protected] http://www.gregsmith.com Baltimore, MD\n>\n> --\n> Sent via pgsql-performance mailing list ([email protected])\n> To make changes to your subscription:\n> http://www.postgresql.org/mailpref/pgsql-performance\n>\n\nThank you.But why buffers_backend is so high? As I understood from your article, buffers_backend shows the number of writes immediately caused by any write operations, e.g. when an INSERT has to flush something on disk, because it has no space left for a new data in shared buffers. I suppose these flushes slow down operating greatly, and I realy see this: in my environment INSERT is usually performed in 1-2 ms, but sometimes it is executed in 5-6 seconds or even more (10 seconds), which touches statement_timeout barrier and fails the whole transaction.\nThe main purpose is to minimize INSERT/UPDATE time or, at least, make it more predictable.Could you please give an advice how to achieve this?On Mon, Dec 29, 2008 at 1:04 AM, Greg Smith <[email protected]> wrote:\nOn Fri, 26 Dec 2008, Dmitry Koterov wrote:\n\n\ncheckpoint_timeout = 1min\n\n\nYour system is having a checkpoint every minute.  You can't do that and expect the background writer to do anything useful.  As shown in your stats, all the dirty buffers are getting written out by those constant checkpoints.\n\n\n\nWhat I am trying to achieve is that all writing operation are performed asynchronously and mostly flushed to the disk before a CHECKPOINT occurred, so CHECKPOINT is cheap thanks to bgwiter work.\n\n\nThe background writer only tries to write out things that haven't been accessed recently, because the tests we did suggested the duplicated writes from any other approach negated the benefits from writing them earlier.  So it's not possible to get all the buffers clean before the checkpoint starts, the ones that have been recently used can't get written except during a checkpoint.\n\nWhat does work instead is to spread the checkpoint writes over a long period, such that they are an asynchronous trickle of smaller writes. For that to work, you need to set checkpoint_timeout to a fairly long period (at least the default of 5 minutes if not longer) and checkpoint_segments to something fairly large.  You can know the segments are large enough when most of the checkpoints show up in the checkpoints_timed count.\n\n--\n* Greg Smith [email protected] http://www.gregsmith.com Baltimore, MD\n\n-- \nSent via pgsql-performance mailing list ([email protected])\nTo make changes to your subscription:\nhttp://www.postgresql.org/mailpref/pgsql-performance", "msg_date": "Tue, 6 Jan 2009 14:23:16 +0300", "msg_from": "\"Dmitry Koterov\" <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Bgwriter and pg_stat_bgwriter.buffers_clean aspects" }, { "msg_contents": "On Tue, 6 Jan 2009, Dmitry Koterov wrote:\n\n> But why buffers_backend is so high? As I understood from your article,\n> buffers_backend shows the number of writes immediately caused by any write\n> operations, e.g. when an INSERT has to flush something on disk, because it\n> has no space left for a new data in shared buffers. I suppose these flushes\n> slow down operating greatly\n\nIn normal operation, those writes are cached by the operating system, such \nthat most backend writes will return very quickly.\n\n> I realy see this: in my environment INSERT is usually performed in 1-2 \n> ms, but sometimes it is executed in 5-6 seconds or even more (10 \n> seconds)\n\nWhen activity blocks like this, the most likely cause is because \neverything is blocked waiting for the fsync at the end of a checkpoint \nthat forces all writes out to disk. The only good way to make that go \naway is to spread the checkpoint over a long period of time. Your \nconfiguration is forcing such a syncronization every minute, which makes \nthat sort of blocking more likely to happen, merely because there so many \nchances for it.\n\nStandard good practice here for 8.3 is to set checkpoint_timeout and \ncheckpoint_segments to as high as you can stand, where the downsides to \nincreasing them is that more disk space is wasted and recovery time goes \nup. I think you're chasing after the wrong cause here and ignoring the \nobvious one. Backend writes should not cause a long stall, and tuning up \nthe background writer to the extreme you have is counterproductive (all \nyour bgwriter_* parameters would be far better off at the default than the \nextremely aggressive ones you've set them to). Meanwhile, reducing \ncheckpoint_timeout can absolutely cause what you're seeing.\n\nOne other thing: if this is a Linux system running a kernel before 2.6.22 \nand you have a lot of RAM, there's a known problem with that combination \nthat can cause writes to hang for a long time. I've got a long article \nabout it http://www.westnet.com/~gsmith/content/linux-pdflush.htm and a \nquicker run through identifying if you're running into issue and resolving \nit at \nhttp://notemagnet.blogspot.com/2008/08/linux-write-cache-mystery.html\n\n--\n* Greg Smith [email protected] http://www.gregsmith.com Baltimore, MD\n", "msg_date": "Wed, 7 Jan 2009 01:23:27 -0500 (EST)", "msg_from": "Greg Smith <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Bgwriter and pg_stat_bgwriter.buffers_clean aspects" } ]
[ { "msg_contents": "We have serveral table where the index size is much bigger than the \ntable size.\n\nExample:\n\nselect count(*) from product_price -- 2234244\n\nTable size: 400 MB\nIndex size: 600 MB\n\nAfter executing \"reindex table product_price\", index size reduced to 269MB.\n\nI believe this affects performance.\n\nVacuuming a table does not rebuild the indexes, am I right? I'm not sure \nif I need to do this manually, or is this the result of another problem? \n(For example, too many open transactions, frequent updates?)\n\n\nThanks\n\n", "msg_date": "Tue, 30 Dec 2008 10:05:59 +0100", "msg_from": "Laszlo Nagy <[email protected]>", "msg_from_op": true, "msg_subject": "Big index sizes" }, { "msg_contents": "Laszlo Nagy a �crit :\n> We have serveral table where the index size is much bigger than the\n> table size.\n> \n> Example:\n> \n> select count(*) from product_price -- 2234244\n> \n> Table size: 400 MB\n> Index size: 600 MB\n> \n> After executing \"reindex table product_price\", index size reduced to 269MB.\n> \n> I believe this affects performance.\n> \n> Vacuuming a table does not rebuild the indexes, am I right?\n\nNeither VACUUM nor VACUUM FULL rebuild the indexes. CLUSTER and REINDEX do.\n\n> I'm not sure\n> if I need to do this manually, or is this the result of another problem?\n\nYou need to do this manually.\n\n> (For example, too many open transactions, frequent updates?)\n> \n\nRegards.\n\n\n-- \nGuillaume.\n http://www.postgresqlfr.org\n http://dalibo.com\n", "msg_date": "Tue, 30 Dec 2008 10:21:11 +0100", "msg_from": "Guillaume Lelarge <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Big index sizes" }, { "msg_contents": "On Tue, Dec 30, 2008 at 4:05 AM, Laszlo Nagy <[email protected]> wrote:\n> We have serveral table where the index size is much bigger than the table\n> size.\n\nYou'll usually get index bloat in roughly the same measure that you\nget table bloat. If you always (auto)vacuum regularly, then the\namount of bloat in your indexes probably reflects the amount of bloat\nthat your tables typically accumulate between vacuums, so reindexing\nwon't help much. The indexes will just re-bloat back to about the\nsame point over the next vacuum cycle or two.\n\nOn the other hand, if your table has shrunk considerably, or if you've\njust removed a lot of bloat by vacuuming, REINDEX is often warranted.\n\nIt would be nice if the system could automatically notice and correct\nsituations that currently require VACUUM FULL or REINDEX, but it\ndoesn't.\n\n...Robert\n", "msg_date": "Tue, 30 Dec 2008 09:43:28 -0500", "msg_from": "\"Robert Haas\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Big index sizes" }, { "msg_contents": "Guillaume Lelarge <[email protected]> writes:\n> Laszlo Nagy a �crit :\n>> We have serveral table where the index size is much bigger than the\n>> table size.\n>> ...\n>> Vacuuming a table does not rebuild the indexes, am I right?\n\n> Neither VACUUM nor VACUUM FULL rebuild the indexes. CLUSTER and REINDEX do.\n\nIn fact, VACUUM FULL tends to make indexes *more* bloated not less so.\nOne fairly likely explanation for how you got into this situation is\novereager use of VACUUM FULL.\n\n\t\t\tregards, tom lane\n", "msg_date": "Tue, 30 Dec 2008 10:39:21 -0500", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Big index sizes " } ]
[ { "msg_contents": "I have to insert rows to table with 95% primary key unique_violation.\n\nI've tested 2 examples below:\n\n1)\nBEGIN\n INSERT INTO main (name, created) VALUES (i_name, CURRENT_TIMESTAMP\nAT TIME ZONE 'GMT');\n EXCEPTION WHEN UNIQUE_VIOLATION THEN\n RETURN 'error: already exists';\nEND;\nRETURN 'ok: store';\n\n2)\nPERFORM 1 FROM main WHERE name = i_name;\nIF NOT FOUND THEN\n INSERT INTO main (name, created) VALUES (i_name, CURRENT_TIMESTAMP\nAT TIME ZONE 'GMT');\n RETURN 'ok: stored';\nELSE\n RETURN 'error: already exists';\nEND IF;\n\nThe first one performs about 20% slower, have 5 times more disk i/o\nwrite operations.\nThe second one uses 20% more cpu.\nIs it because of raid1 and slow writes?\nWhat is the better solution to fit best performance?\nPg version 8.3, table size will probably grow to 100M rows\n\n\n", "msg_date": "Tue, 30 Dec 2008 13:41:42 +0300", "msg_from": "Anton Bogdanovitch <[email protected]>", "msg_from_op": true, "msg_subject": "perform 1 check vs exception when unique_violation" }, { "msg_contents": "On Tue, Dec 30, 2008 at 5:41 AM, Anton Bogdanovitch\n<[email protected]> wrote:\n> I have to insert rows to table with 95% primary key unique_violation.\n\nIf you're inserting a lot of rows at once, I think you're probably\nbetter off loading all of the data into a side table that does not\nhave a primary key, and then writing one statement to remove the\nduplicates and do all the inserts at once.\n\nINSERT INTO main (name, created)\nSELECT\n s.name, CURRENT_TIMESTAMP\nFROM\n (SELECT DISTINCT ON (name) FROM sidetable) s -- nuke duplicate\nnames within sidetable\n LEFT JOIN main m ON s.name = m.name\n WHERE m.name IS NULL; -- nuke names in sidetable that are already in main\n\nI've usually found that any kind of loop in PL/pgsql is mind-numbingly\nslow compared to doing the same thing as a single query.\n\n...Robert\n", "msg_date": "Tue, 30 Dec 2008 09:50:55 -0500", "msg_from": "\"Robert Haas\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: perform 1 check vs exception when unique_violation" } ]
[ { "msg_contents": "Hi, I am re-posting my question here after trying to find a solution\nin the PHP pgsql list with no luck.\n\nI am experiencing some performance issues that I think are stemming\nfrom prepared statements. I have a pretty simple query:\n\nSELECT cl.idOffer,cl.idaffiliate ,cl.subid,cl.datetime\nFROM click AS cl LEFT JOIN conversion AS co ON cl.clickgenerated =\nco.clickgenerated\nWHERE cl.\"date\" >= '2008-12-01'\nAND cl.\"date\" <= '2008-12-23'\nAND cl.idAffiliate = 49978\nLIMIT 10;\n\nRun times:\n\n- sub-second when issued from the command line (not prepared)\n- runs sub-second from our PHP application if I prepend the query with\n\"EXPLAIN ANALYZE\", and looking at the resulting plan, it shows the\nsame plan as when it runs quickly from the command line.\n- runs sub-second if I remove the \"prepare\" function from the PHP\napplication and execute the query with the variables in-line.\n\n- takes 200+ seconds when run from the command line inside a prepared\nstatement (see query below)\n- takes over 200s when run from our application, within the PHP PDO\nprepared function (same as preparing from the command line)\n\nI would like to continue to use bind variables to prevent sql\ninjection, but I'd like to force a plan re-parse for every single\nquery (if necessary?)\n\nDoes anyone have any ideas?\n\npostgresql 8.2.11, freshly loaded db, vacuumed and analyzed.\nAll indices in the plans are regular b-trees.\n\n\n-- good plan, from non-prepared statement\n--\ndev=# EXPLAIN ANALYZE SELECT cl.idOffer,cl.idaffiliate ,cl.subid,cl.datetime\ndev-# FROM click AS cl LEFT JOIN conversion AS co ON cl.clickgenerated\n= co.clickgenerated\ndev-# WHERE cl.\"date\" >= '2008-12-01'\ndev-# AND cl.\"date\" <= '2008-12-23'\ndev-# AND cl.idAffiliate = 49978\ndev-# LIMIT 10;\n\n Limit (cost=0.00..30.52 rows=10 width=48) (actual time=0.253..0.429\nrows=10 loops=1)\n -> Nested Loop Left Join (cost=0.00..2613524.29 rows=856328\nwidth=48) (actual time=0.251..0.416 rows=10 loops=1)\n -> Append (cost=0.00..77406.15 rows=19179 width=80) (actual\ntime=0.226..0.256 rows=10 loops=1)\n -> Seq Scan on click cl (cost=0.00..15.07 rows=1\nwidth=80) (actual time=0.001..0.001 rows=0 loops=1)\n Filter: ((date >= '2008-12-01'::date) AND (date\n<= '2008-12-23'::date) AND (idaffiliate = 49978))\n -> Bitmap Heap Scan on click_00030 cl\n(cost=406.48..77391.08 rows=19178 width=46) (actual time=0.222..0.241\nrows=10 loops=1)\n Recheck Cond: (idaffiliate = 49978)\n Filter: ((date >= '2008-12-01'::date) AND (date\n<= '2008-12-23'::date))\n -> Bitmap Index Scan on\nclick_00030_idaffiliate_idx (cost=0.00..401.68 rows=21355 width=0)\n(actual time=0.138..0.138 rows=484 loops=1)\n Index Cond: (idaffiliate = 49978)\n -> Index Scan using conversion_clickgenerated_idx on\n\"conversion\" co (cost=0.00..131.67 rows=45 width=12) (actual\ntime=0.014..0.014 rows=0 loops=10)\n Index Cond: ((cl.clickgenerated)::text =\n(co.clickgenerated)::text)\n Total runtime: 0.495 ms\n\n\n-- bad plan, from prepared statement\n--\ndev=# prepare fooplan (date,date,int,int) as\ndev-# SELECT cl.idOffer AS campaign, cl.idAffiliate AS affiliate,\ncl.idCreative AS creative, cl.subid, cl.datetime\ndev-# FROM click AS cl LEFT JOIN conversion AS co ON cl.clickGenerated\n= co.clickGenerated\ndev-# WHERE cl.\"date\" >= $1\ndev-# AND cl.\"date\" <= $2\ndev-# AND cl.idAffiliate = $3\ndev-# LIMIT $4;\nERROR: prepared statement \"fooplan\" already exists\ndev=# explain analyze execute fooplan ('2008-12-01','2008-12-23',49978,10);\n\n\n QUERY PLAN\n------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n Limit (cost=0.00..15275.74 rows=3277 width=227) (actual\ntime=201350.494..201350.765 rows=10 loops=1)\n -> Nested Loop Left Join (cost=0.00..152771.39 rows=32773\nwidth=227) (actual time=201350.489..201350.748 rows=10 loops=1)\n -> Append (cost=0.00..16031.56 rows=734 width=180) (actual\ntime=201350.438..201350.542 rows=10 loops=1)\n -> Seq Scan on click cl (cost=0.00..15.07 rows=1\nwidth=180) (actual time=0.001..0.001 rows=0 loops=1)\n Filter: ((date >= $1) AND (date <= $2) AND\n(idaffiliate = $3))\n -> Bitmap Heap Scan on click_00028 cl\n(cost=6701.97..8179.43 rows=372 width=177) (actual time=0.060..0.060\nrows=0 loops=1)\n Recheck Cond: ((idaffiliate = $3) AND (date >=\n$1) AND (date <= $2))\n -> BitmapAnd (cost=6701.97..6701.97 rows=372\nwidth=0) (actual time=0.058..0.058 rows=0 loops=1)\n -> Bitmap Index Scan on\nclick_00028_idaffiliate_idx (cost=0.00..1384.22 rows=74456 width=0)\n(actual time=0.038..0.038 rows=86 loops=1)\n Index Cond: (idaffiliate = $3)\n -> Bitmap Index Scan on\nclick_00028_date_idx (cost=0.00..5317.31 rows=253151 width=0) (actual\ntime=0.010..0.010 rows=0 loops=1)\n Index Cond: ((date >= $1) AND (date <= $2))\n -> Bitmap Heap Scan on click_00030 cl\n(cost=6383.03..7811.35 rows=360 width=141) (actual\ntime=201350.373..201350.466 rows=10 loops=1)\n Recheck Cond: ((idaffiliate = $3) AND (date >=\n$1) AND (date <= $2))\n -> BitmapAnd (cost=6383.03..6383.03 rows=360\nwidth=0) (actual time=201350.280..201350.280 rows=0 loops=1)\n -> Bitmap Index Scan on\nclick_00030_idaffiliate_idx (cost=0.00..1337.96 rows=72058 width=0)\n(actual time=0.101..0.101 rows=484 loops=1)\n Index Cond: (idaffiliate = $3)\n -> Bitmap Index Scan on\nclick_00030_date_idx (cost=0.00..5044.64 rows=240312 width=0) (actual\ntime=201347.064..201347.064 rows=43352606 loops=1)\n Index Cond: ((date >= $1) AND (date <= $2))\n -> Index Scan using click_current_massive_idx on\nclick_current cl (cost=0.00..25.70 rows=1 width=152) (never executed)\n Index Cond: ((date >= $1) AND (date <= $2) AND\n(idaffiliate = $3))\n -> Index Scan using conversion_clickgenerated_idx on\n\"conversion\" co (cost=0.00..185.51 rows=45 width=16) (actual\ntime=0.015..0.016 rows=0 loops=10)\n Index Cond: ((cl.clickgenerated)::text =\n(co.clickgenerated)::text)\n Total runtime: 201350.887 ms\n(24 rows)\n\nTime: 201351.556 ms\ndev=# deallocate fooplan;\nDEALLOCATE\n\n\nThanks!\n", "msg_date": "Tue, 30 Dec 2008 10:59:23 -0800", "msg_from": "bricklen <[email protected]>", "msg_from_op": true, "msg_subject": "Poor plan choice in prepared statement" }, { "msg_contents": "On Tue, Dec 30, 2008 at 1:59 PM, bricklen <[email protected]> wrote:\n> Hi, I am re-posting my question here after trying to find a solution\n> in the PHP pgsql list with no luck.\n>\n> I am experiencing some performance issues that I think are stemming\n> from prepared statements. I have a pretty simple query:\n> -- bad plan, from prepared statement\n> --\n> dev=# prepare fooplan (date,date,int,int) as\n> dev-# SELECT cl.idOffer AS campaign, cl.idAffiliate AS affiliate,\n> cl.idCreative AS creative, cl.subid, cl.datetime\n> dev-# FROM click AS cl LEFT JOIN conversion AS co ON cl.clickGenerated\n> = co.clickGenerated\n> dev-# WHERE cl.\"date\" >= $1\n> dev-# AND cl.\"date\" <= $2\n> dev-# AND cl.idAffiliate = $3\n> dev-# LIMIT $4;\n\nYour problem is that the query as written is hard to plan. The\ndatabase has no idea what you pass in, it has to guess. (IMO, It\nalmost always guesses wrong...I think it should assume 1 row\nreturned). Also, the db has no idea what you want to pass in at plan\ntime for date.\n\nwhat indexes do you have on click?\n\nmerlin\n", "msg_date": "Tue, 30 Dec 2008 14:42:49 -0500", "msg_from": "\"Merlin Moncure\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Poor plan choice in prepared statement" }, { "msg_contents": "Hi Merlin,\n\nOn Tue, Dec 30, 2008 at 11:42 AM, Merlin Moncure <[email protected]> wrote:\n> On Tue, Dec 30, 2008 at 1:59 PM, bricklen <[email protected]> wrote:\n>> Hi, I am re-posting my question here after trying to find a solution\n>> in the PHP pgsql list with no luck.\n>>\n>> I am experiencing some performance issues that I think are stemming\n>> from prepared statements. I have a pretty simple query:\n>> -- bad plan, from prepared statement\n>> --\n>> dev=# prepare fooplan (date,date,int,int) as\n>> dev-# SELECT cl.idOffer AS campaign, cl.idAffiliate AS affiliate,\n>> cl.idCreative AS creative, cl.subid, cl.datetime\n>> dev-# FROM click AS cl LEFT JOIN conversion AS co ON cl.clickGenerated\n>> = co.clickGenerated\n>> dev-# WHERE cl.\"date\" >= $1\n>> dev-# AND cl.\"date\" <= $2\n>> dev-# AND cl.idAffiliate = $3\n>> dev-# LIMIT $4;\n>\n> Your problem is that the query as written is hard to plan. The\n> database has no idea what you pass in, it has to guess. (IMO, It\n> almost always guesses wrong...I think it should assume 1 row\n> returned). Also, the db has no idea what you want to pass in at plan\n> time for date.\n>\n> what indexes do you have on click?\n>\n> merlin\n>\n\n\"click\" is a partitioned table, but the child tables are all the same.\nHere is the current partition:\n\ndev=# \\d click_current\n Column | Type | Modifiers\n----------------+-----------------------------+----------------------------------------------------\n id | bigint | not null default\nnextval('click_id_seq'::regclass)\n idaffiliate | integer | not null\n idsite | integer | not null\n idoffer | integer | not null\n idcreative | integer | not null\n idoptimizer | integer |\n clickgenerated | character varying | not null\n subid | character varying |\n datetime | timestamp without time zone | not null\n date | date |\n ip | inet | not null\n xip | inet |\n referrer | text |\n countrycode | character varying |\n timestamp | timestamp without time zone | not null\nIndexes:\n \"click_current_pk\" PRIMARY KEY, btree (id)\n \"click_current_clickgenerated_idx\" btree (clickgenerated)\n \"click_current_date_idx\" btree (date)\n \"click_current_idoffer_idx\" btree (idoffer)\n \"click_current_massive_idx\" btree (date, idaffiliate, idsite,\nidoffer, idcreative, idoptimizer, subid)\nCheck constraints:\n \"click_current_date_chk\" CHECK (date > '2008-12-29'::date)\n", "msg_date": "Tue, 30 Dec 2008 11:55:15 -0800", "msg_from": "bricklen <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Poor plan choice in prepared statement" }, { "msg_contents": "On Tue, Dec 30, 2008 at 12:42 PM, Merlin Moncure <[email protected]> wrote:\n> On Tue, Dec 30, 2008 at 1:59 PM, bricklen <[email protected]> wrote:\n>> Hi, I am re-posting my question here after trying to find a solution\n>> in the PHP pgsql list with no luck.\n>>\n>> I am experiencing some performance issues that I think are stemming\n>> from prepared statements. I have a pretty simple query:\n>> -- bad plan, from prepared statement\n>> --\n>> dev=# prepare fooplan (date,date,int,int) as\n>> dev-# SELECT cl.idOffer AS campaign, cl.idAffiliate AS affiliate,\n>> cl.idCreative AS creative, cl.subid, cl.datetime\n>> dev-# FROM click AS cl LEFT JOIN conversion AS co ON cl.clickGenerated\n>> = co.clickGenerated\n>> dev-# WHERE cl.\"date\" >= $1\n>> dev-# AND cl.\"date\" <= $2\n>> dev-# AND cl.idAffiliate = $3\n>> dev-# LIMIT $4;\n>\n> Your problem is that the query as written is hard to plan. The\n> database has no idea what you pass in, it has to guess. (IMO, It\n> almost always guesses wrong...I think it should assume 1 row\n> returned). Also, the db has no idea what you want to pass in at plan\n> time for date.\n\nOne of the things you can try here is to build your query then execute\nit so it has to be planned each time.\n", "msg_date": "Tue, 30 Dec 2008 13:09:55 -0700", "msg_from": "\"Scott Marlowe\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Poor plan choice in prepared statement" }, { "msg_contents": "Hi Scott,\n\nOn Tue, Dec 30, 2008 at 12:09 PM, Scott Marlowe <[email protected]> wrote:\n> On Tue, Dec 30, 2008 at 12:42 PM, Merlin Moncure <[email protected]> wrote:\n>> On Tue, Dec 30, 2008 at 1:59 PM, bricklen <[email protected]> wrote:\n>>> Hi, I am re-posting my question here after trying to find a solution\n>>> in the PHP pgsql list with no luck.\n>>>\n>>> I am experiencing some performance issues that I think are stemming\n>>> from prepared statements. I have a pretty simple query:\n>>> -- bad plan, from prepared statement\n>>> --\n>>> dev=# prepare fooplan (date,date,int,int) as\n>>> dev-# SELECT cl.idOffer AS campaign, cl.idAffiliate AS affiliate,\n>>> cl.idCreative AS creative, cl.subid, cl.datetime\n>>> dev-# FROM click AS cl LEFT JOIN conversion AS co ON cl.clickGenerated\n>>> = co.clickGenerated\n>>> dev-# WHERE cl.\"date\" >= $1\n>>> dev-# AND cl.\"date\" <= $2\n>>> dev-# AND cl.idAffiliate = $3\n>>> dev-# LIMIT $4;\n>>\n>> Your problem is that the query as written is hard to plan. The\n>> database has no idea what you pass in, it has to guess. (IMO, It\n>> almost always guesses wrong...I think it should assume 1 row\n>> returned). Also, the db has no idea what you want to pass in at plan\n>> time for date.\n>\n> One of the things you can try here is to build your query then execute\n> it so it has to be planned each time.\n>\n\nYeah, I've tested that in the application itself and it worked\ncorrectly. I am trying to discover a way to use bind variables in PHP\nwithout using the prepare function (to block sql injection), or if I\nmust use the prepare function, then force it to replan each time\nsomehow. That's part of where I'm stuck (and I'm no php guy).\n", "msg_date": "Tue, 30 Dec 2008 12:14:34 -0800", "msg_from": "bricklen <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Poor plan choice in prepared statement" }, { "msg_contents": "There is no way to force Postgres to re-plan a prepared statement. In many cases, this would be a hugely beneficial feature (perhaps part of the definition of the statement?).\n\nI have had similar issues, and had to code the application to prevent SQL injection (Postgres $ quotes and other stuff is helpful, but not always adequate or easy). With the current state of things, you'll have to often do your SQL injection defense in your application due to this weakness in prepared statements.\n\nI have also had a case where one query would take a couple hundred ms to parse, but was fairly fast to plan and execute (1/3 the parse cost) -- yet another case where a prepared statement that re-plans each execution would be helpful. At least you can prevent SQL injection and cut the parse cost. Its not all about the cost of planning the query.\n\n________________________________________\nFrom: [email protected] [[email protected]] On Behalf Of bricklen [[email protected]]\nSent: Tuesday, December 30, 2008 12:14 PM\nTo: Scott Marlowe\nCc: Merlin Moncure; [email protected]\nSubject: Re: [PERFORM] Poor plan choice in prepared statement\n\nHi Scott,\n\nOn Tue, Dec 30, 2008 at 12:09 PM, Scott Marlowe <[email protected]> wrote:\n> On Tue, Dec 30, 2008 at 12:42 PM, Merlin Moncure <[email protected]> wrote:\n>> On Tue, Dec 30, 2008 at 1:59 PM, bricklen <[email protected]> wrote:\n>>> Hi, I am re-posting my question here after trying to find a solution\n>>> in the PHP pgsql list with no luck.\n>>>\n>>> I am experiencing some performance issues that I think are stemming\n>>> from prepared statements. I have a pretty simple query:\n>>> -- bad plan, from prepared statement\n>>> --\n>>> dev=# prepare fooplan (date,date,int,int) as\n>>> dev-# SELECT cl.idOffer AS campaign, cl.idAffiliate AS affiliate,\n>>> cl.idCreative AS creative, cl.subid, cl.datetime\n>>> dev-# FROM click AS cl LEFT JOIN conversion AS co ON cl.clickGenerated\n>>> = co.clickGenerated\n>>> dev-# WHERE cl.\"date\" >= $1\n>>> dev-# AND cl.\"date\" <= $2\n>>> dev-# AND cl.idAffiliate = $3\n>>> dev-# LIMIT $4;\n>>\n>> Your problem is that the query as written is hard to plan. The\n>> database has no idea what you pass in, it has to guess. (IMO, It\n>> almost always guesses wrong...I think it should assume 1 row\n>> returned). Also, the db has no idea what you want to pass in at plan\n>> time for date.\n>\n> One of the things you can try here is to build your query then execute\n> it so it has to be planned each time.\n>\n\nYeah, I've tested that in the application itself and it worked\ncorrectly. I am trying to discover a way to use bind variables in PHP\nwithout using the prepare function (to block sql injection), or if I\nmust use the prepare function, then force it to replan each time\nsomehow. That's part of where I'm stuck (and I'm no php guy).\n\n--\nSent via pgsql-performance mailing list ([email protected])\nTo make changes to your subscription:\nhttp://www.postgresql.org/mailpref/pgsql-performance\n", "msg_date": "Tue, 30 Dec 2008 13:09:24 -0800", "msg_from": "Scott Carey <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Poor plan choice in prepared statement" }, { "msg_contents": "On Tue, Dec 30, 2008 at 1:09 PM, Scott Carey <[email protected]> wrote:\n> There is no way to force Postgres to re-plan a prepared statement. In many cases, this would be a hugely beneficial feature (perhaps part of the definition of the statement?).\n>\n> I have had similar issues, and had to code the application to prevent SQL injection (Postgres $ quotes and other stuff is helpful, but not always adequate or easy). With the current state of things, you'll have to often do your SQL injection defense in your application due to this weakness in prepared statements.\n>\n> I have also had a case where one query would take a couple hundred ms to parse, but was fairly fast to plan and execute (1/3 the parse cost) -- yet another case where a prepared statement that re-plans each execution would be helpful. At least you can prevent SQL injection and cut the parse cost. Its not all about the cost of planning the query.\n>\n\nCan you point out any standard ways of preventing sql injection\n(within or without php)? I would really rather not have to roll my own\ninput sanitation code if there are standard methods available.\nFor example, addslashes is apparently not recommended (according to a\nwarning in the postgresql docs at\nhttp://wiki.postgresql.org/wiki/8.1.4_et._al._Security_Release_FAQ).\n\n\nThanks!\n\nBricklen\n", "msg_date": "Tue, 30 Dec 2008 13:40:26 -0800", "msg_from": "bricklen <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Poor plan choice in prepared statement" }, { "msg_contents": "Scott Carey <[email protected]> writes:\n> I have also had a case where one query would take a couple hundred ms to parse, but was fairly fast to plan and execute (1/3 the parse cost) -- yet another case where a prepared statement that re-plans each execution would be helpful. At least you can prevent SQL injection and cut the parse cost. Its not all about the cost of planning the query.\n\nThe point of a prepared statement IMHO is to do the planning only once.\nThere's necessarily a tradeoff between that and having a plan that's\nperfectly adapted to specific parameter values.\n\nReasonable client-side APIs should provide the option to use out-of-line\nparameters, which is what you want to prevent SQL injection, without\nhard-wiring that to the orthogonal concept of statements whose plan is\nprepared in advance. In libpq, for instance, PQexecParams() does that.\n\n\t\t\tregards, tom lane\n", "msg_date": "Tue, 30 Dec 2008 18:02:29 -0500", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Poor plan choice in prepared statement " }, { "msg_contents": "Hi Tom,\n\nOn Tue, Dec 30, 2008 at 3:02 PM, Tom Lane <[email protected]> wrote:\n> The point of a prepared statement IMHO is to do the planning only once.\n> There's necessarily a tradeoff between that and having a plan that's\n> perfectly adapted to specific parameter values.\n\nI agree, and normally it wouldn't be an issue. In this particular\ncase, we are seeing response time to go from sub-second with\nnon-prepared queries, to over 200 seconds w/ prepared queries. Note\nthat is not an isolated case in our application, merely the numbers\nfrom this particular example.\n\n>\n> Reasonable client-side APIs should provide the option to use out-of-line\n> parameters, which is what you want to prevent SQL injection, without\n> hard-wiring that to the orthogonal concept of statements whose plan is\n> prepared in advance. In libpq, for instance, PQexecParams() does that.\n>\n> regards, tom lane\n>\n\nAgain, I agree completely. What I am after I guess are some pointers\non where to look for that, with regards to PHP. Whatever I turn up, I\nwill turn over to our developers, but before I do that I want to be\nsure I am giving them the correct advice.\n\nThanks,\n\nBricklen\n", "msg_date": "Tue, 30 Dec 2008 15:11:47 -0800", "msg_from": "bricklen <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Poor plan choice in prepared statement" }, { "msg_contents": "On Tue, 30 Dec 2008, Tom Lane wrote:\n\n> Scott Carey <[email protected]> writes:\n>> I have also had a case where one query would take a couple hundred ms to parse, but was fairly fast to plan and execute (1/3 the parse cost) -- yet another case where a prepared statement that re-plans each execution would be helpful. At least you can prevent SQL injection and cut the parse cost. Its not all about the cost of planning the query.\n>\n> The point of a prepared statement IMHO is to do the planning only once.\n> There's necessarily a tradeoff between that and having a plan that's\n> perfectly adapted to specific parameter values.\n\nactually, it does two things\n\n1. planning only once\n\n2. parsing only once.\n\nI suspect that when this was initially setup the expectation was that the \nplanning was the expensive thing that should be avoided.\n\nin this case a post earlier in the thread identified parsing of the query \nas being the expensive thing (planning + execution was 1/3 the cost of the \nparsing)\n\nsince there is not a pre-parsed interface for queries, it may make sense \nto setup a way to have the query pre-parsed, but not pre-planned for cases \nlike this.\n\nDavid Lang\n", "msg_date": "Tue, 30 Dec 2008 16:19:01 -0800 (PST)", "msg_from": "[email protected]", "msg_from_op": false, "msg_subject": "Re: Poor plan choice in prepared statement " }, { "msg_contents": "[email protected] writes:\n\n> since there is not a pre-parsed interface for queries, it may make sense to\n> setup a way to have the query pre-parsed, but not pre-planned for cases like\n> this.\n\nWhat would be more interesting would be to have plans that take into account\nthe outlier values and have alternative plans for those values.\n\n\nOne aspect that hasn't been discussed here is whether it's only certain\noutlier arguments that cause Postgres to choose the poor plan for you or\nwhether it always chooses it for all the sets of arguments you actually use.\n\nIf it's the former then it's possible you're only being bitten if the first\ntime you prepare the query happens to have one of these outlier set of\nparameters. I forget what version this went in but I think it was before 8.2\nthat Postgres started using the first set of arguments seen to plan the query.\nThis is usually an improvement over just guessing but if that first set is\nunusual it could lead to strange results.\n\n\n-- \n Gregory Stark\n EnterpriseDB http://www.enterprisedb.com\n Ask me about EnterpriseDB's On-Demand Production Tuning\n", "msg_date": "Tue, 30 Dec 2008 23:23:41 -0500", "msg_from": "Gregory Stark <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Poor plan choice in prepared statement" }, { "msg_contents": "On Tue, Dec 30, 2008 at 7:59 PM, bricklen <[email protected]> wrote:\n> I would like to continue to use bind variables to prevent sql\n> injection, but I'd like to force a plan re-parse for every single\n> query (if necessary?)\n\nAs far as I understand your problem, you don't care about using\nprepared statements.\n\nIf so, you can:\n- either use pg_query_params():\nhttp://www.php.net/manual/en/function.pg-query-params.php\n- or use an unnamed prepared statements when you don't want a prepared\nstatement if, for some reason, you really need to use prepared\nstatements in a few cases: you can specify an empty string as plan\nname. The condition for this one is that you need to upgrade to a\nrecent version of 8.3 as postponed planning of unnamed prepared\nstatements is a new feature of 8.3 and was broken in 8.3.0 and 8.3.1.\n\n-- \nGuillaume\n", "msg_date": "Wed, 31 Dec 2008 10:12:47 +0100", "msg_from": "\"Guillaume Smet\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Poor plan choice in prepared statement" }, { "msg_contents": "Hi Guillaume,\n\nOn Wed, Dec 31, 2008 at 1:12 AM, Guillaume Smet\n<[email protected]> wrote:\n> On Tue, Dec 30, 2008 at 7:59 PM, bricklen <[email protected]> wrote:\n>> I would like to continue to use bind variables to prevent sql\n>> injection, but I'd like to force a plan re-parse for every single\n>> query (if necessary?)\n>\n> As far as I understand your problem, you don't care about using\n> prepared statements.\n>\n> If so, you can:\n> - either use pg_query_params():\n> http://www.php.net/manual/en/function.pg-query-params.php\n> - or use an unnamed prepared statements when you don't want a prepared\n> statement if, for some reason, you really need to use prepared\n> statements in a few cases: you can specify an empty string as plan\n> name. The condition for this one is that you need to upgrade to a\n> recent version of 8.3 as postponed planning of unnamed prepared\n> statements is a new feature of 8.3 and was broken in 8.3.0 and 8.3.1.\n>\n> --\n> Guillaume\n>\n\nI will look into the pg_query_params() function to see if it works for\nus. I don't think your second suggestion is going to work for us,\nsince we are stuck on 8.2 for the foreseeable future.\n\nThanks for the tips though, I appreciate it!\n\nBricklen\n", "msg_date": "Wed, 31 Dec 2008 07:11:17 -0800", "msg_from": "bricklen <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Poor plan choice in prepared statement" }, { "msg_contents": "Tom Lane escribi�:\n> Scott Carey <[email protected]> writes:\n> > I have also had a case where one query would take a couple hundred\n> > ms to parse, but was fairly fast to plan and execute (1/3 the parse\n> > cost) -- yet another case where a prepared statement that re-plans\n> > each execution would be helpful. At least you can prevent SQL\n> > injection and cut the parse cost. Its not all about the cost of\n> > planning the query.\n> \n> The point of a prepared statement IMHO is to do the planning only once.\n> There's necessarily a tradeoff between that and having a plan that's\n> perfectly adapted to specific parameter values.\n\nI think it has been shown enough times that the performance drop caused\nby a worse plan can be orders of magnitudes worse than what's gained by\nproducing the plan only once. It does not seem a bad idea to provide a\nway to carry out only the parse phase, and postpone planning until the\nparameters have been received.\n\n-- \nAlvaro Herrera http://www.CommandPrompt.com/\nThe PostgreSQL Company - Command Prompt, Inc.\n", "msg_date": "Wed, 31 Dec 2008 13:01:18 -0300", "msg_from": "Alvaro Herrera <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Poor plan choice in prepared statement" }, { "msg_contents": "On Wed, Dec 31, 2008 at 11:01 AM, Alvaro Herrera\n<[email protected]> wrote:\n>> The point of a prepared statement IMHO is to do the planning only once.\n>> There's necessarily a tradeoff between that and having a plan that's\n>> perfectly adapted to specific parameter values.\n>\n> I think it has been shown enough times that the performance drop caused\n> by a worse plan can be orders of magnitudes worse than what's gained by\n> producing the plan only once. It does not seem a bad idea to provide a\n> way to carry out only the parse phase, and postpone planning until the\n> parameters have been received.\n\n+1.\n\n...Robert\n", "msg_date": "Thu, 1 Jan 2009 08:44:50 -0500", "msg_from": "\"Robert Haas\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Poor plan choice in prepared statement" }, { "msg_contents": "On Wed, Dec 31, 2008 at 5:01 PM, Alvaro Herrera\n<[email protected]> wrote:\n> I think it has been shown enough times that the performance drop caused\n> by a worse plan can be orders of magnitudes worse than what's gained by\n> producing the plan only once. It does not seem a bad idea to provide a\n> way to carry out only the parse phase, and postpone planning until the\n> parameters have been received.\n\nIt's already done in 8.3 for unnamed plans, isn't it?\n\n-- \nGuillaume\n", "msg_date": "Thu, 1 Jan 2009 19:22:56 +0100", "msg_from": "\"Guillaume Smet\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Poor plan choice in prepared statement" }, { "msg_contents": "On Thu, Jan 1, 2009 at 9:24 PM, <[email protected]> wrote:\n> forgive my ignorance here, but if it's unnamed how can you reference it\n> later to take advantage of the parsing?\n\nYou can't. That's what unnamed prepared statements are for.\n\nIt's not obvious to me that the parsing phase is worth any \"caching\".\n>From my experience, the planning phase takes far much time on complex\nqueries.\n\n-- \nGuillaume\n", "msg_date": "Thu, 1 Jan 2009 20:29:43 +0100", "msg_from": "\"Guillaume Smet\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Poor plan choice in prepared statement" }, { "msg_contents": "[email protected] writes:\n> the poster who started this thread had a query where the parsing phase \n> took significantly longer than the planning stage.\n\nThat was an anecdote utterly unsupported by evidence.\n\n\t\t\tregards, tom lane\n", "msg_date": "Thu, 01 Jan 2009 15:24:12 -0500", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Poor plan choice in prepared statement " }, { "msg_contents": "On Thu, 1 Jan 2009, Guillaume Smet wrote:\n\n> On Wed, Dec 31, 2008 at 5:01 PM, Alvaro Herrera\n> <[email protected]> wrote:\n>> I think it has been shown enough times that the performance drop caused\n>> by a worse plan can be orders of magnitudes worse than what's gained by\n>> producing the plan only once. It does not seem a bad idea to provide a\n>> way to carry out only the parse phase, and postpone planning until the\n>> parameters have been received.\n>\n> It's already done in 8.3 for unnamed plans, isn't it?\n\nforgive my ignorance here, but if it's unnamed how can you reference it \nlater to take advantage of the parsing?\n\nI may just be not understanding the terms being used here.\n\nDavid Lang\n", "msg_date": "Thu, 1 Jan 2009 12:24:52 -0800 (PST)", "msg_from": "[email protected]", "msg_from_op": false, "msg_subject": "Re: Poor plan choice in prepared statement" }, { "msg_contents": "On Thu, 1 Jan 2009, Guillaume Smet wrote:\n\n> On Thu, Jan 1, 2009 at 9:24 PM, <[email protected]> wrote:\n>> forgive my ignorance here, but if it's unnamed how can you reference it\n>> later to take advantage of the parsing?\n>\n> You can't. That's what unnamed prepared statements are for.\n>\n> It's not obvious to me that the parsing phase is worth any \"caching\".\n> From my experience, the planning phase takes far much time on complex\n> queries.\n\nthe poster who started this thread had a query where the parsing phase \ntook significantly longer than the planning stage.\n\nDavid Lang\n", "msg_date": "Thu, 1 Jan 2009 12:40:11 -0800 (PST)", "msg_from": "[email protected]", "msg_from_op": false, "msg_subject": "Re: Poor plan choice in prepared statement" } ]
[ { "msg_contents": "While looking at a complex query that is being poorly planned by\nPostgreSQL 8.2.9, I discovered that any non-trivial CASE...WHEN\nexpression seems to produce a selectivity estimate of 0.005. This\nalso happens on HEAD.\n\npsql (8.4devel)\nType \"help\" for help.\n\nhead=# create table tenk (c) as select generate_series(1,10000);\nSELECT\nhead=# alter table tenk alter column c set statistics 100;\nALTER TABLE\nhead=# analyze tenk;\nANALYZE\nhead=# explain select * from tenk where c in (1,2,3,4);\n QUERY PLAN\n------------------------------------------------------\n Seq Scan on tenk (cost=0.00..190.00 rows=4 width=4)\n Filter: (c = ANY ('{1,2,3,4}'::integer[]))\n(2 rows)\n\nhead=# explain select * from tenk where case when c in (1,2,3,4) then 1 end = 1;\n QUERY PLAN\n\n--------------------------------------------------------------------------------\n------------\n Seq Scan on tenk (cost=0.00..215.00 rows=50 width=4)\n Filter: (CASE WHEN (c = ANY ('{1,2,3,4}'::integer[])) THEN 1 ELSE NULL::integ\ner END = 1)\n(2 rows)\n\nhead=# explain select * from tenk where case when c in (1,2,3,4) then 2 end = 1;\n QUERY PLAN\n\n--------------------------------------------------------------------------------\n------------\n Seq Scan on tenk (cost=0.00..215.00 rows=50 width=4)\n Filter: (CASE WHEN (c = ANY ('{1,2,3,4}'::integer[])) THEN 2 ELSE NULL::integ\ner END = 1)\n(2 rows)\n\nhead=# \\q\n\nThe last example is particularly egregious, since it can never return\ntrue, but the previous example is not much better, since in my actual\nquery the actual selectivity (against a CASE with multiple WHEN\nbranches) can be as high as ~0.8, so a value of 0.005 isn't close. It\nends up causing a very expensive nested loop plan when something else\nwould be better.\n\nAny suggestions would be appreciated.\n\n...Robert\n", "msg_date": "Mon, 5 Jan 2009 22:15:29 -0500", "msg_from": "\"Robert Haas\" <[email protected]>", "msg_from_op": true, "msg_subject": "bad selectivity estimates for CASE" }, { "msg_contents": "\"Robert Haas\" <[email protected]> writes:\n> While looking at a complex query that is being poorly planned by\n> PostgreSQL 8.2.9, I discovered that any non-trivial CASE...WHEN\n> expression seems to produce a selectivity estimate of 0.005.\n\nIf you have an idea for a non-silly estimate, feel free to enlighten\nus...\n\n\t\t\tregards, tom lane\n", "msg_date": "Mon, 05 Jan 2009 23:40:43 -0500", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: bad selectivity estimates for CASE " }, { "msg_contents": "On Mon, Jan 5, 2009 at 11:40 PM, Tom Lane <[email protected]> wrote:\n> \"Robert Haas\" <[email protected]> writes:\n>> While looking at a complex query that is being poorly planned by\n>> PostgreSQL 8.2.9, I discovered that any non-trivial CASE...WHEN\n>> expression seems to produce a selectivity estimate of 0.005.\n>\n> If you have an idea for a non-silly estimate, feel free to enlighten\n> us...\n\nWell, presumably CASE WHEN <expr1> THEN <constant1> WHEN <expr2> THEN\n<constant2> WHEN <expr3> THEN <constant3> ... END = <constantn> could\nbe simplified to <exprn>. But that's not going to happen in time to\ndo me any good on this query, if it ever happens (and might not be\nsufficient anyway since the selectivity estimates of <expr1> may not\nbe very good either), so I was more looking for suggestions on coping\nwith the situation, since I'm sure that I'm not the first person to\nhave this type of problem.\n\n...Robert\n", "msg_date": "Tue, 6 Jan 2009 07:25:29 -0500", "msg_from": "\"Robert Haas\" <[email protected]>", "msg_from_op": true, "msg_subject": "Re: bad selectivity estimates for CASE" }, { "msg_contents": "\"Robert Haas\" <[email protected]> writes:\n> Well, presumably CASE WHEN <expr1> THEN <constant1> WHEN <expr2> THEN\n> <constant2> WHEN <expr3> THEN <constant3> ... END = <constantn> could\n> be simplified to <exprn>.\n\nNot without breaking the order-of-evaluation guarantees. Consider\n\n\tcase when x=0 then 0 when 1/x = 42 then 1 end = 1\n\nThis expression should not suffer a divide-by-zero failure but your\nproposal would allow it to do so.\n\n\t\t\tregards, tom lane\n", "msg_date": "Tue, 06 Jan 2009 08:22:00 -0500", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: bad selectivity estimates for CASE " } ]
[ { "msg_contents": "Hi list,\nI would like to ask your help in order to understand if my postgresql \nserver (ver. 8.2.9) is well configured.\nIt's a quad-proc system (32 bit) with a 6 disk 1+0 RAID array and 2 \nseparate disks for the OS and write-ahead logs with 4GB of RAM.\n\nI don't know what is the best info to help me and so I start with some \nvmstat information:\n\n > vmstat -n 30\nprocs -----------memory---------- ---swap-- -----io---- --system-- \n-----cpu------\n r b swpd free buff cache si so bi bo in cs us sy \nid wa st\n 0 23 84 129968 25060 3247860 0 0 78 50 0 2 17 5 \n33 45 0\n 1 24 84 124204 25136 3257100 0 0 3037 1154 3359 7253 25 \n7 6 62 0\n 2 30 84 124704 25136 3256344 0 0 3004 1269 3553 7906 33 \n9 7 51 0\n 0 25 84 125784 24956 3253344 0 0 3357 773 3163 6454 17 4 \n10 68 0\n 0 2 84 125744 25236 3258996 0 0 3186 567 3125 6425 24 6 \n21 50 0\n 3 7 84 124948 25500 3260088 0 0 1829 535 2706 4625 18 3 \n54 25 0\n 5 0 84 124976 25624 3259112 0 0 2067 647 3050 6163 26 6 \n41 27 0\n 0 7 84 123836 25644 3260760 0 0 2239 1065 3289 8654 27 7 \n38 28 0\n\nThese are gathered loadavg info for the same period:\n29.57 29.53 33.52 1/231 12641\n29.54 29.63 33.31 1/226 12678\n24.43 28.45 32.69 1/223 12696\n12.31 24.17 30.95 4/223 12706\n\nAt the moment as average there are about 120/150 connections and this is \nmy postgresql.conf file\n\nlisten_addresses = '*' # what IP address(es) to listen on;\nport = 5432 # (change requires restart)\nmax_connections = 400 # (change requires restart)\nssl = off # (change requires restart)\npassword_encryption = on\nshared_buffers = 32MB # min 128kB or max_connections*16kB\nmax_prepared_transactions = 0 # can be 0 or more\nwork_mem = 1MB # min 64kB\nmaintenance_work_mem = 256MB # min 1MB\nmax_fsm_pages = 204800 # min max_fsm_relations*16, 6 \nbytes each\nfsync = on # turns forced synchronization \non or off\nfull_page_writes = on # recover from partial page writes\nwal_buffers = 8MB # min 32kB\ncheckpoint_segments = 56 # in logfile segments, min 1, \n16MB each\nenable_bitmapscan = on\nenable_hashagg = on\nenable_hashjoin = on\nenable_indexscan = on\nenable_mergejoin = on\nenable_nestloop = on\nenable_seqscan = on\nenable_sort = on\nenable_tidscan = on\ncpu_tuple_cost = 0.003 # same scale as above\ncpu_index_tuple_cost = 0.001 # same scale as above\ncpu_operator_cost = 0.0005 # same scale as above\neffective_cache_size = 3GB\ngeqo_threshold = 14\nlog_destination = 'stderr' # Valid values are combinations of\nredirect_stderr = on # Enable capturing of stderr \ninto log\nlog_directory = 'pg_log' # Directory where log files are \nwritten\nlog_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # Log file name pattern.\nlog_truncate_on_rotation = on # If on, any existing log file \nof the same\nlog_rotation_age = 1d # Automatic rotation of logfiles \nwill\nlog_rotation_size = 0 # Automatic rotation of logfiles \nwill\nlog_min_duration_statement = -1 # -1 is disabled, 0 logs all \nstatements\nlog_statement = 'none' # none, ddl, mod, all\n\nautovacuum = on # enable autovacuum subprocess?\nstats_start_collector = on # must be 'on' for autovacuum\nstats_row_level = on # must be 'on' for autovacuum\n\nstatement_timeout = 150000\ndatestyle = 'iso, mdy'\nlc_messages = 'en_US.UTF-8' # locale for system \nerror message\nlc_monetary = 'en_US.UTF-8' # locale for monetary \nformatting\nlc_numeric = 'en_US.UTF-8' # locale for number \nformatting\nlc_time = 'en_US.UTF-8' # locale for time formatting\ndeadlock_timeout = 5s\nescape_string_warning = off\nstandard_conforming_strings = on\n\n\nCheers and thanks a lot in advance.\nLet me know if other info is useful.\nSte\n\n-- \nStefano Nichele\n\nFunambol Chief Architect\nFunambol :: Open Source Mobile'We' for the Mass Market :: http://www.funambol.com\n\n", "msg_date": "Tue, 06 Jan 2009 17:50:49 +0100", "msg_from": "Stefano Nichele <[email protected]>", "msg_from_op": true, "msg_subject": "understanding postgres issues/bottlenecks" }, { "msg_contents": "On Tue, Jan 6, 2009 at 11:50 AM, Stefano Nichele\n<[email protected]> wrote:\n> Hi list,\n> I would like to ask your help in order to understand if my postgresql server\n> (ver. 8.2.9) is well configured.\n> It's a quad-proc system (32 bit) with a 6 disk 1+0 RAID array and 2 separate\n> disks for the OS and write-ahead logs with 4GB of RAM.\n>\n> I don't know what is the best info to help me and so I start with some\n> vmstat information:\n>\n>> vmstat -n 30\n> procs -----------memory---------- ---swap-- -----io---- --system--\n> -----cpu------\n> r b swpd free buff cache si so bi bo in cs us sy id wa\n> st\n> 0 23 84 129968 25060 3247860 0 0 78 50 0 2 17 5 33\n> 45 0\n\nclearly i/o bound. can you throw an iostat to give some more detailed info?\nalso,\nneed o/s version, etc\ndisk info (speed, raid controller, etc)\nmy guess is your disk system is underpowered for transaction load you\nare giving.\nhow many tps is the database doing?\n\nmerlin\n", "msg_date": "Tue, 6 Jan 2009 12:16:46 -0500", "msg_from": "\"Merlin Moncure\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "I concur with Merlin you're I/O bound.\n\nAdding to his post, what RAID controller are you running, does it have\ncache, does the cache have battery backup, is the cache set to write\nback or write through?\n\nAlso, what do you get for this (need contrib module pgbench installed)\n\npgbench -i -s 100\npgbench -c 50 -n 10000\n\n? Specifically transactions per second?\n", "msg_date": "Tue, 6 Jan 2009 11:45:45 -0700", "msg_from": "\"Scott Marlowe\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "I got this bounce message from your account,\[email protected]. I'm on gmail too, but don't get a lot of\n-perform messages into my spam folder.\n\nJust in case you've got eliminatecc turned on on the mailing list\nserver, I'm resending it through the mail server without your email\naddress in it.\n\n\nI concur with Merlin you're I/O bound.\n\nAdding to his post, what RAID controller are you running, does it have\ncache, does the cache have battery backup, is the cache set to write\nback or write through?\n\nAlso, what do you get for this (need contrib module pgbench installed)\n\npgbench -i -s 100\npgbench -c 50 -n 10000\n\n? Specifically transactions per second?\n", "msg_date": "Tue, 6 Jan 2009 11:59:02 -0700", "msg_from": "\"Scott Marlowe\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: failure notice" }, { "msg_contents": "Thanks for your help. I'll give you the info you asked as soon as I'll \nhave it (i have also to install iostat but I don't have enough privilege \nto do that).\n\nBTW, why did you said I/O bound ? Which are the parameters that \nhighlight that ? Sorry for my ignorance....\n\nste\n\nMerlin Moncure wrote:\n> On Tue, Jan 6, 2009 at 11:50 AM, Stefano Nichele\n> <[email protected]> wrote:\n> \n>> Hi list,\n>> I would like to ask your help in order to understand if my postgresql server\n>> (ver. 8.2.9) is well configured.\n>> It's a quad-proc system (32 bit) with a 6 disk 1+0 RAID array and 2 separate\n>> disks for the OS and write-ahead logs with 4GB of RAM.\n>>\n>> I don't know what is the best info to help me and so I start with some\n>> vmstat information:\n>>\n>> \n>>> vmstat -n 30\n>>> \n>> procs -----------memory---------- ---swap-- -----io---- --system--\n>> -----cpu------\n>> r b swpd free buff cache si so bi bo in cs us sy id wa\n>> st\n>> 0 23 84 129968 25060 3247860 0 0 78 50 0 2 17 5 33\n>> 45 0\n>> \n>\n> clearly i/o bound. can you throw an iostat to give some more detailed info?\n> also,\n> need o/s version, etc\n> disk info (speed, raid controller, etc)\n> my guess is your disk system is underpowered for transaction load you\n> are giving.\n> how many tps is the database doing?\n>\n> merlin\n>\n> \n\n\n-- \nStefano Nichele\n\nFunambol Chief Architect\nFunambol :: Open Source Mobile'We' for the Mass Market :: http://www.funambol.com\n\n", "msg_date": "Tue, 06 Jan 2009 20:02:39 +0100", "msg_from": "Stefano Nichele <[email protected]>", "msg_from_op": true, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "On Tue, Jan 6, 2009 at 12:02 PM, Stefano Nichele\n<[email protected]> wrote:\n> Thanks for your help. I'll give you the info you asked as soon as I'll have\n> it (i have also to install iostat but I don't have enough privilege to do\n> that).\n>\n> BTW, why did you said I/O bound ? Which are the parameters that highlight\n> that ? Sorry for my ignorance....\n\nIn this output, the second to the last column is wa, which stands for\nwait, the third to last is id, which is CPU idle.\n\nr b swpd free buff cache si so bi bo in cs us sy id wa st\n1 24 84 124204 25136 3257100 0 0 3037 1154 3359 7253 25 7 6 62 0\n2 30 84 124704 25136 3256344 0 0 3004 1269 3553 7906 33 9 7 51 0\n0 25 84 125784 24956 3253344 0 0 3357 773 3163 6454 17 4 10 68 0\n\nSo here the first line reports\n25% user\n7% system\n6% idle\n62% wait\n\nSo it's mostly waiting.\n", "msg_date": "Tue, 6 Jan 2009 13:50:09 -0700", "msg_from": "\"Scott Marlowe\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "On Tue, Jan 6, 2009 at 11:02 AM, Stefano Nichele\n<[email protected]> wrote:\n> BTW, why did you said I/O bound ? Which are the parameters that highlight\n> that ? Sorry for my ignorance....\n\nIn addition to the percentage of time spent in wait as Scott said, you\ncan also see the number of processes which are blocked (b column on\nthe left). Those are the processes which would like to run but are\nwaiting for IO to complete.\n\n From your earlier vmstat output, you can see that often quite a large\nnumber of processes were waiting on IO - 20-30 of them. This is\nconfirmed by your load average. Since you only have 6 spindles, during\nthose periods, request service latency was likely very high during\nthose periods.\n\nAnyway, I would start with significantly increasing the amount of\nmemory you have allocated to shared_buffers.\n\n32MB is _way_ too low. For a dedicated PostgreSQL machine with 4GB\nRAM, 1GB going to be a lot closer to optimal, with the optimal setting\nprobably somewhere between 500MB and 2GB.\n\nAnd please post the rest of the information that the others have asked for...\n\n-Dave\n", "msg_date": "Tue, 6 Jan 2009 15:33:09 -0800", "msg_from": "\"David Rees\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "David Rees wrote:\n> On Tue, Jan 6, 2009 at 11:02 AM, Stefano Nichele\n> <[email protected]> wrote:\n>> BTW, why did you said I/O bound ? Which are the parameters that highlight\n>> that ? Sorry for my ignorance....\n> \n> In addition to the percentage of time spent in wait as Scott said, you\n> can also see the number of processes which are blocked (b column on\n> the left). Those are the processes which would like to run but are\n> waiting for IO to complete.\n> \n>>From your earlier vmstat output, you can see that often quite a large\n> number of processes were waiting on IO - 20-30 of them. This is\n> confirmed by your load average. Since you only have 6 spindles, during\n> those periods, request service latency was likely very high during\n> those periods.\n> \n> Anyway, I would start with significantly increasing the amount of\n> memory you have allocated to shared_buffers.\n> \n> 32MB is _way_ too low. For a dedicated PostgreSQL machine with 4GB\n> RAM, 1GB going to be a lot closer to optimal, with the optimal setting\n> probably somewhere between 500MB and 2GB.\n> \n> And please post the rest of the information that the others have asked for...\n> \n> -Dave\n> \n\nA couple of notes on \"iostat\":\n\n1. The package it lives in is called \"sysstat\". Most Linux distros do\n*not* install \"sysstat\" by default. Somebody should beat up on them\nabout that. :)\n\n2. It does not require admin privileges to install or execute if you\nwant to have your own personal copy. You will, however, need the Gnu C\ncompiler, \"make\", and some other things that also may not be installed.\nIn any event, you can build \"sysstat\" from source in your home directory\nif you can't get the sysadmin to install it globally.\n\n3. Once you do get it installed, the command line to run it that gives\nthe most information is\n\n$ iostat -cdmtx <sampling interval> <number of samples>\n\nThat will give you CPU utilization, throughputs in megabytes, time\nstamps and extended statistics like device utilizations, queue lengths\nand average service times. And if you're running 2.6.25 or later, it\nshould give you all the statistics for all partitions, not just devices!\n\nSomewhere buried in one of my open source projects is a Perl script that\nwill parse the \"iostat\" output into a CSV file. By the way, if you need\nto, you can be fairly aggressive with small sampling intervals. \"iostat\"\nis very efficient, and chances are, you're I/O bound anyhow so your\nprocessors will have plenty of bandwidth to format numbers out of /proc.\n:) I normally run with samples every ten seconds, but you can probably\ngo even smaller if you need to.\n", "msg_date": "Tue, 06 Jan 2009 20:17:10 -0800", "msg_from": "\"M. Edward (Ed) Borasky\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "On Wednesday 07 January 2009 04:17:10 M. Edward (Ed) Borasky wrote:\n> \n> 1. The package it lives in is called \"sysstat\". Most Linux distros do\n> *not* install \"sysstat\" by default. Somebody should beat up on them\n> about that. :)\n\nHehe, although sysstat and friends did have issues on Linux for a long time. \nNothing worse than misleading stats, so I suspect it lost a lot of friends \nback then. It is a lot better these days when most of the \"Unix\" software \ntargets Linux first, and other kernels second.\n\nAside from all the advice here about system tuning, as a system admin I'd also \nask is the box doing the job you need? And are you looking at the Postgres \nlog (with logging of slow queries) to see that queries perform in a sensible \ntime? I'd assume with the current performance figure there is an issue \nsomewhere, but I've been to places where it was as simple as adding one \nindex, or even modifying an index so it does what the application developer \nintended instead of what they ask for ;)\n", "msg_date": "Wed, 7 Jan 2009 09:31:25 +0000", "msg_from": "Simon Waters <[email protected]>", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "Simon Waters wrote:\n> On Wednesday 07 January 2009 04:17:10 M. Edward (Ed) Borasky wrote:\n>> 1. The package it lives in is called \"sysstat\". Most Linux distros do\n>> *not* install \"sysstat\" by default. Somebody should beat up on them\n>> about that. :)\n> \n> Hehe, although sysstat and friends did have issues on Linux for a long time. \n> Nothing worse than misleading stats, so I suspect it lost a lot of friends \n> back then. It is a lot better these days when most of the \"Unix\" software \n> targets Linux first, and other kernels second.\n\nI'm unfortunately familiar with that \"episode\", which turned out to be\nbugs in the Red Hat Linux kernels / drivers. I don't remember the exact\nversions, although there was at least one of them shipped at one time as\npart of RHEL 3. I may be wrong, but I don't think Sebastien ever had to\nchange any of *his* code in \"sysstat\" -- I think he had to wait for Red\nHat. :)\n\n> \n> Aside from all the advice here about system tuning, as a system admin I'd also \n> ask is the box doing the job you need? And are you looking at the Postgres \n> log (with logging of slow queries) to see that queries perform in a sensible \n> time? I'd assume with the current performance figure there is an issue \n> somewhere, but I've been to places where it was as simple as adding one \n> index, or even modifying an index so it does what the application developer \n> intended instead of what they ask for ;)\n> \n\nYeah ... the issues of application response time and throughput, cost of\nadding hardware to the box(es) vs. cost of changing the application,\nchanges in demand for the server over time, etc., are what make capacity\nplanning \"fun\". Squeezing the last megabyte per second out of a RAID\narray is the easy stuff. :)\n", "msg_date": "Wed, 07 Jan 2009 07:35:23 -0800", "msg_from": "\"M. Edward (Ed) Borasky\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "On Tue, Jan 6, 2009 at 7:45 PM, Scott Marlowe <[email protected]>wrote:\n\n> I concur with Merlin you're I/O bound.\n>\n> Adding to his post, what RAID controller are you running, does it have\n> cache, does the cache have battery backup, is the cache set to write\n> back or write through?\n\n\nAt the moment I don't have such information. It's a \"standard\" RAID\ncontroller coming with a DELL server. Is there any information I can have\nasking to the SO ?\n\n\n>\n> Also, what do you get for this (need contrib module pgbench installed)\n>\n> pgbench -i -s 100\n> pgbench -c 50 -n 10000\n>\n> ? Specifically transactions per second?\n\n\nI'll run pgbench in the next days.\n\nCheers,\nste\n\nOn Tue, Jan 6, 2009 at 7:45 PM, Scott Marlowe <[email protected]> wrote:\nI concur with Merlin you're I/O bound.\n\nAdding to his post, what RAID controller are you running, does it have\ncache, does the cache have battery backup, is the cache set to write\nback or write through? At the moment I don't have such information. It's a \"standard\" RAID controller coming with a DELL server. Is there any information I can have asking to the SO ?\n\n\nAlso, what do you get for this (need contrib module pgbench installed)\n\npgbench -i -s 100\npgbench -c 50 -n 10000\n\n? Specifically transactions per second?I'll run pgbench in the next days.Cheers,ste", "msg_date": "Wed, 7 Jan 2009 22:02:05 +0100", "msg_from": "\"Stefano Nichele\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "On Wed, Jan 7, 2009 at 2:02 PM, Stefano Nichele\n<[email protected]> wrote:\n>\n> On Tue, Jan 6, 2009 at 7:45 PM, Scott Marlowe <[email protected]>\n> wrote:\n>>\n>> I concur with Merlin you're I/O bound.\n>>\n>> Adding to his post, what RAID controller are you running, does it have\n>> cache, does the cache have battery backup, is the cache set to write\n>> back or write through?\n>\n>\n> At the moment I don't have such information. It's a \"standard\" RAID\n> controller coming with a DELL server. Is there any information I can have\n> asking to the SO ?\n\nYou can run lshw to see what flavor controller it is. Dell RAID\ncontrollers are pretty much either total crap, or mediocre at best.\nThe latest one, the Perc 6 series are squarely in the same performance\nrealm as a 4 or 5 year old LSI megaraid. The perc 5 series and before\nare total performance dogs. The really bad news is that you can't\ngenerally plug in a real RAID controller on a Dell. We put an Areca\n168-LP PCI-x8 in one of our 1950s and it wouldn't even turn on, got a\nCPU Error.\n\nDells are fine for web servers and such. For database servers they're\na total loss. The best you can do with one is to put a generic SCSI\ncard in it and connect to an external array with its own controller.\n\nWe have a perc6e and a perc5e in two different servers, and no matter\nhow we configure them, we can't get even 1/10th the performance of an\nAreca controller with the same number of drives on another machine of\nthe same basic class as the 1950s.\n\n>> Also, what do you get for this (need contrib module pgbench installed)\n>>\n>> pgbench -i -s 100\n>> pgbench -c 50 -n 10000\n>>\n>> ? Specifically transactions per second?\n>\n> I'll run pgbench in the next days.\n\nCool. That pgbench is a \"best case scenario\" benchmark. Lots of\nsmall transactions on a db that should fit into memory. If you can't\npull off a decent number there (at least a few hundred tps) then can't\nexpect better performance from real world usage.\n\nOh, and that should be:\n\npgbench -c 50 -t 10000\n\nnot -n... not enough sleep I guess.\n", "msg_date": "Wed, 7 Jan 2009 14:31:10 -0700", "msg_from": "\"Scott Marlowe\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "Just to elaborate on the horror that is a Dell perc5e. We have one in\na 1950 with battery backed cache (256 Meg I think). It has an 8 disk\n500Gig SATA drive RAID-10 array and 4 1.6GHz cpus and 10 Gigs ram.\nThis server currently serves as a mnogo search server. Here's what\nvmstat 1 looks like during the day:\n\nprocs -----------memory---------- ---swap-- -----io---- -system-- ----cpu----\n r b swpd free buff cache si so bi bo in cs us sy id wa\n 1 5 38748 60400 43172 9227632 0 0 5676 0 668 703 4 2 12 82\n 0 5 38748 58888 43176 9229360 0 0 5548 0 672 792 2 0 15 83\n 0 4 38748 64460 43184 9230476 0 0 5964 72 773 947 1 0 31 67\n 0 5 38748 61884 43272 9241564 0 0 5896 1112 674 1028 1 2 23 74\n 0 5 38748 56612 43276 9247376 0 0 5660 0 603 795 0 0 21 79\n 0 5 38748 56472 43268 9247480 0 0 5700 0 603 790 0 0 22 77\n\nNote 4 or 5 blocking, and reading in data at 5M/sec and bursting small writes.\n75 to 80% wait state\nuser and sys time around 1 or 2%\nrest is idle.\n\nThis is without adding any load from pgbench. When I add pgbench, the\nnumbers from vmstat look the same pretty much, slight increase of\nmaybe 20% bi and bo and a rise in the blocked processes.\n\nRunning vacuum on the pgbench db on this machine takes well over a\nminute. Even a small pgbench test runs slowly, getting 20 to 30 tps\nduring the day. During off hours I can get a max of about 80 tps.\n\n----------------------------------------------------------------------------------------------\n\nNow, here's my primary production server. It is NOT a Dell, or an HP,\nor an IBM. It is a high end white box machine with a Tyan mobo.\nPrimary difference here is 12 disk SAS RAID-10 and a much faster RAID\ncontroller. It's got 8 opteron 2.1GHz cores but honestly, it hardly\nuses any of them. This is it before I run pgbench. It almost looks\nlike it's sleeping. It is handling about 250 queries per second right\nnow, most serving from memory.\n\nprocs -----------memory---------- ---swap-- -----io---- --system--\n-----cpu------\n r b swpd free buff cache si so bi bo in cs us sy id wa st\n 0 0 2464 614156 626448 28658476 0 0 0 688 2515 2615 12\n 1 87 0 0\n 2 0 2464 638972 626448 28658484 0 0 8 656 2401 2454 16\n 2 82 0 0\n 1 0 2464 631852 626448 28658516 0 0 0 552 1939 1984 14\n 1 85 0 0\n 4 0 2464 617900 626448 28658532 0 0 32 500 4925 5276 19\n 2 78 0 0\n 1 0 2464 617656 626448 28658560 0 0 0 492 3363 3428 14\n 1 85 0 0\n 1 0 2464 618648 626448 28658560 0 0 0 752 3391 3579 12\n 2 85 0 0\n\nIt's not reading anything in because it fits in memory (it's got 32\nGig ram) and it's writing out a small amount. I'll run pgbench to\ngive it a heavier load and force more writes.\n\npgbench -c 10 -t 25000\nstarting vacuum...end.\ntransaction type: TPC-B (sort of)\nscaling factor: 100\nnumber of clients: 10\nnumber of transactions per client: 25000\nnumber of transactions actually processed: 250000/250000\ntps = 2866.549792 (including connections establishing)\ntps = 2868.010947 (excluding connections establishing)\n\nHere's vmstat during that period:\n\nprocs -----------memory---------- ---swap-- -----io---- --system--\n-----cpu------\n r b swpd free buff cache si so bi bo in cs us sy id wa st\n 1 2 2408 190548 626584 28770448 0 0 0 66200 8210 84391\n44 14 35 6 0\n 0 2 2408 207156 626584 28770900 0 0 0 23832 6688 6426 20\n 4 58 18 0\n 7 0 2408 295792 626588 28771904 0 0 0 34372 6831 62873\n35 12 43 11 0\n 3 2 2408 308904 626588 28773128 0 0 0 60040 6711 78235\n44 13 40 3 0\n 4 0 2408 310408 626588 28773660 0 0 0 54780 7779 37399\n28 8 50 15 0\n 6 1 2408 325808 626592 28775912 0 0 16 43588 5345 105348\n43 15 39 3 0\n10 0 2408 324304 626592 28778188 0 0 16 60984 6582 125105\n52 18 29 1 0\n 8 1 2408 339204 626596 28780248 0 0 8 47956 5203 113799\n48 18 33 1 0\n 4 2 2408 337856 626596 28782840 0 0 0 108096 12132 90391\n46 16 33 4 0\n\nNote that wait is generally around 5 to 10% on this machine.\n\nNow, I know it's a much more powerful machine, but the difference in\nperformance is not some percentage faster. It's many many factors\nfaster. Twenty or more times as faster as the dell. That dell\nmachine cost us somewhere in the range of $11,000. The MD-1000 was a\nlarge part of that cost.\n\nThe Aberdeen white box that is replacing it cost us about $11,500.\n\nI cannot understand how Dell stays in business. It certainly isn't on merit.\n", "msg_date": "Wed, 7 Jan 2009 15:06:17 -0700", "msg_from": "\"Scott Marlowe\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "On Wed, 7 Jan 2009, Scott Marlowe wrote:\n\n> I cannot understand how Dell stays in business.\n\nThere's a continuous stream of people who expect RAID5 to perform well, \ntoo, yet this surprises you?\n\n--\n* Greg Smith [email protected] http://www.gregsmith.com Baltimore, MD\n", "msg_date": "Wed, 7 Jan 2009 17:34:09 -0500 (EST)", "msg_from": "Greg Smith <[email protected]>", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "On Wed, Jan 7, 2009 at 3:34 PM, Greg Smith <[email protected]> wrote:\n> On Wed, 7 Jan 2009, Scott Marlowe wrote:\n>\n>> I cannot understand how Dell stays in business.\n>\n> There's a continuous stream of people who expect RAID5 to perform well, too,\n> yet this surprises you?\n\nI guess I've underestimated the human capacity for stupidity yet again.\n\n-- An optimist sees the glass as half full, a pessimist as half empty,\nand engineer as having redundant storage capacity.\n", "msg_date": "Wed, 7 Jan 2009 15:37:02 -0700", "msg_from": "\"Scott Marlowe\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "--- On Wed, 7/1/09, Scott Marlowe <[email protected]> wrote:\n\n > The really bad news is that\n> you can't\n> generally plug in a real RAID controller on a Dell. We put\n> an Areca\n> 168-LP PCI-x8 in one of our 1950s and it wouldn't even\n> turn on, got a\n> CPU Error.\n> \n\nHmm, I had to pull the perc5i's out of our dell servers to get them to boot off of our Adaptec 5805's\n\nAnyway that reminds me, I must remember to bring my lump hammer into work at some point...\n\n\n \n", "msg_date": "Wed, 7 Jan 2009 23:28:13 +0000 (GMT)", "msg_from": "Glyn Astill <[email protected]>", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "--- On Wed, 7/1/09, Scott Marlowe <[email protected]> wrote:\n\n> Just to elaborate on the horror that is a Dell perc5e. We\n> have one in\n> a 1950 with battery backed cache (256 Meg I think). It has\n> an 8 disk\n> 500Gig SATA drive RAID-10 array and 4 1.6GHz cpus and 10\n> Gigs ram.\n\nOur perc5i controllers performed better in raid 5 that 10. Sounds like the comment you made when I was wasting my time with that perc3 fits all dell cards perfectly; \"brain damaged\"\n\n\n \n", "msg_date": "Wed, 7 Jan 2009 23:36:51 +0000 (GMT)", "msg_from": "Glyn Astill <[email protected]>", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "On Wed, Jan 7, 2009 at 4:36 PM, Glyn Astill <[email protected]> wrote:\n> --- On Wed, 7/1/09, Scott Marlowe <[email protected]> wrote:\n>\n>> Just to elaborate on the horror that is a Dell perc5e. We\n>> have one in\n>> a 1950 with battery backed cache (256 Meg I think). It has\n>> an 8 disk\n>> 500Gig SATA drive RAID-10 array and 4 1.6GHz cpus and 10\n>> Gigs ram.\n>\n> Our perc5i controllers performed better in raid 5 that 10. Sounds like the comment you made when I was wasting my time with that perc3 fits all dell cards perfectly; \"brain damaged\"\n\nOne of the beauties of the whole Dell RAID card naming scheme is that\na Perc5i and a Perc5e can be made by different manufacturers and have\ncompletely different performance characteristics. The only common\nfactor seems to be the high level of suck they managed to generate.\n", "msg_date": "Wed, 7 Jan 2009 16:43:15 -0700", "msg_from": "\"Scott Marlowe\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "Since the discussion involves Dell PERC controllers, does anyone know if \nthe performance of LSI cards (those with the same chipsets as Dell) also \nhave similarly poor performance?\n\nI have a LSI 8888ELP card, so would like to know what other people's \nexperiences are...\n\n-bborie\n\nScott Marlowe wrote:\n> On Wed, Jan 7, 2009 at 2:02 PM, Stefano Nichele\n> <[email protected]> wrote:\n>> On Tue, Jan 6, 2009 at 7:45 PM, Scott Marlowe <[email protected]>\n>> wrote:\n>>> I concur with Merlin you're I/O bound.\n>>>\n>>> Adding to his post, what RAID controller are you running, does it have\n>>> cache, does the cache have battery backup, is the cache set to write\n>>> back or write through?\n>>\n>> At the moment I don't have such information. It's a \"standard\" RAID\n>> controller coming with a DELL server. Is there any information I can have\n>> asking to the SO ?\n> \n> You can run lshw to see what flavor controller it is. Dell RAID\n> controllers are pretty much either total crap, or mediocre at best.\n> The latest one, the Perc 6 series are squarely in the same performance\n> realm as a 4 or 5 year old LSI megaraid. The perc 5 series and before\n> are total performance dogs. The really bad news is that you can't\n> generally plug in a real RAID controller on a Dell. We put an Areca\n> 168-LP PCI-x8 in one of our 1950s and it wouldn't even turn on, got a\n> CPU Error.\n> \n> Dells are fine for web servers and such. For database servers they're\n> a total loss. The best you can do with one is to put a generic SCSI\n> card in it and connect to an external array with its own controller.\n> \n> We have a perc6e and a perc5e in two different servers, and no matter\n> how we configure them, we can't get even 1/10th the performance of an\n> Areca controller with the same number of drives on another machine of\n> the same basic class as the 1950s.\n> \n>>> Also, what do you get for this (need contrib module pgbench installed)\n>>>\n>>> pgbench -i -s 100\n>>> pgbench -c 50 -n 10000\n>>>\n>>> ? Specifically transactions per second?\n>> I'll run pgbench in the next days.\n> \n> Cool. That pgbench is a \"best case scenario\" benchmark. Lots of\n> small transactions on a db that should fit into memory. If you can't\n> pull off a decent number there (at least a few hundred tps) then can't\n> expect better performance from real world usage.\n> \n> Oh, and that should be:\n> \n> pgbench -c 50 -t 10000\n> \n> not -n... not enough sleep I guess.\n> \n\n-- \nBborie Park\nProgrammer\nCenter for Vectorborne Diseases\nUC Davis\n530-752-8380\[email protected]\n", "msg_date": "Wed, 07 Jan 2009 15:51:29 -0800", "msg_from": "Bborie Park <[email protected]>", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "If you're stuck with a Dell, the Adaptec 5 series works, I'm using 5085's in a pair and get 1200 MB/sec streaming reads best case with 20 SATA drives in RAID 10 (2 sets of 10, software raid 0 on top). Of course, Dell doesn't like you putting in somebody else's RAID card, but they support the rest of the system when you add in a third party PCIe card.\nSure, they are a bit pricey, but they are also very good performers with drivers for a lot of stuff, including OpenSolaris. I tested a PERC 6 versus this with the same drives, but only 10 total. The Adaptec was 70% faster out of the box, and still 35% faster after tuning the linux OS read-ahead and other parameters.\n\nPERC 5 / 6 cards are LSI re-badged MegaRaid cards, for those interested. With special firmware modifications that make them 'interesting'.\n\n\nOn 1/7/09 1:31 PM, \"Scott Marlowe\" <[email protected]> wrote:\n\nOn Wed, Jan 7, 2009 at 2:02 PM, Stefano Nichele\n<[email protected]> wrote:\n>\n> On Tue, Jan 6, 2009 at 7:45 PM, Scott Marlowe <[email protected]>\n> wrote:\n>>\n>> I concur with Merlin you're I/O bound.\n>>\n>> Adding to his post, what RAID controller are you running, does it have\n>> cache, does the cache have battery backup, is the cache set to write\n>> back or write through?\n>\n>\n> At the moment I don't have such information. It's a \"standard\" RAID\n> controller coming with a DELL server. Is there any information I can have\n> asking to the SO ?\n\nYou can run lshw to see what flavor controller it is. Dell RAID\ncontrollers are pretty much either total crap, or mediocre at best.\nThe latest one, the Perc 6 series are squarely in the same performance\nrealm as a 4 or 5 year old LSI megaraid. The perc 5 series and before\nare total performance dogs. The really bad news is that you can't\ngenerally plug in a real RAID controller on a Dell. We put an Areca\n168-LP PCI-x8 in one of our 1950s and it wouldn't even turn on, got a\nCPU Error.\n\nDells are fine for web servers and such. For database servers they're\na total loss. The best you can do with one is to put a generic SCSI\ncard in it and connect to an external array with its own controller.\n\nWe have a perc6e and a perc5e in two different servers, and no matter\nhow we configure them, we can't get even 1/10th the performance of an\nAreca controller with the same number of drives on another machine of\nthe same basic class as the 1950s.\n\n>> Also, what do you get for this (need contrib module pgbench installed)\n>>\n>> pgbench -i -s 100\n>> pgbench -c 50 -n 10000\n>>\n>> ? Specifically transactions per second?\n>\n> I'll run pgbench in the next days.\n\nCool. That pgbench is a \"best case scenario\" benchmark. Lots of\nsmall transactions on a db that should fit into memory. If you can't\npull off a decent number there (at least a few hundred tps) then can't\nexpect better performance from real world usage.\n\nOh, and that should be:\n\npgbench -c 50 -t 10000\n\nnot -n... not enough sleep I guess.\n\n--\nSent via pgsql-performance mailing list ([email protected])\nTo make changes to your subscription:\nhttp://www.postgresql.org/mailpref/pgsql-performance\n\n\n\n\nRe: [PERFORM] understanding postgres issues/bottlenecks\n\n\nIf you’re stuck with a Dell, the Adaptec 5 series works, I’m using 5085’s in a pair and get 1200 MB/sec streaming reads best case with 20 SATA drives in RAID 10 (2 sets of 10, software raid 0 on top).   Of course, Dell doesn’t like you putting in somebody else’s RAID card, but they support the rest of the system when you add in a third party PCIe card.\nSure, they are a bit pricey, but they are also very good performers with drivers for a lot of stuff, including OpenSolaris.  I tested a PERC 6 versus this with the same drives, but only 10 total.  The Adaptec was 70% faster out of the box, and still 35% faster after tuning the linux OS read-ahead and other parameters.\n\nPERC 5 / 6 cards are LSI re-badged MegaRaid cards, for those interested.  With special firmware modifications that make them ‘interesting’.\n\n\nOn 1/7/09 1:31 PM, \"Scott Marlowe\" <[email protected]> wrote:\n\nOn Wed, Jan 7, 2009 at 2:02 PM, Stefano Nichele\n<[email protected]> wrote:\n>\n> On Tue, Jan 6, 2009 at 7:45 PM, Scott Marlowe <[email protected]>\n> wrote:\n>>\n>> I concur with Merlin you're I/O bound.\n>>\n>> Adding to his post, what RAID controller are you running, does it have\n>> cache, does the cache have battery backup, is the cache set to write\n>> back or write through?\n>\n>\n> At the moment I don't have such information. It's a \"standard\" RAID\n> controller coming with a DELL server. Is there any information I can have\n> asking to the SO ?\n\nYou can run lshw to see what flavor controller it is.  Dell RAID\ncontrollers are pretty much either total crap, or mediocre at best.\nThe latest one, the Perc 6 series are squarely in the same performance\nrealm as a 4 or 5 year old LSI megaraid.  The perc 5 series and before\nare total performance dogs.  The really bad news is that you can't\ngenerally plug in a real RAID controller on a Dell.  We put an Areca\n168-LP PCI-x8 in one of our 1950s and it wouldn't even turn on, got a\nCPU Error.\n\nDells are fine for web servers and such.  For database servers they're\na total loss.  The best you can do with one is to put a generic SCSI\ncard in it and connect to an external array with its own controller.\n\nWe have a perc6e and a perc5e in two different servers, and no matter\nhow we configure them, we can't get even 1/10th the performance of an\nAreca controller with the same number of drives on another machine of\nthe same basic class as the 1950s.\n\n>> Also, what do you get for this (need contrib module pgbench installed)\n>>\n>> pgbench -i -s 100\n>> pgbench -c 50 -n 10000\n>>\n>> ? Specifically transactions per second?\n>\n> I'll run pgbench in the next days.\n\nCool.  That pgbench is a \"best case scenario\" benchmark.  Lots of\nsmall transactions on a db that should fit into memory.  If you can't\npull off a decent number there (at least a few hundred tps) then can't\nexpect better performance from real world usage.\n\nOh, and that should be:\n\npgbench -c 50 -t 10000\n\nnot -n... not enough sleep I guess.\n\n--\nSent via pgsql-performance mailing list ([email protected])\nTo make changes to your subscription:\nhttp://www.postgresql.org/mailpref/pgsql-performance", "msg_date": "Wed, 7 Jan 2009 16:11:34 -0800", "msg_from": "Scott Carey <[email protected]>", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "On Wed, Jan 7, 2009 at 7:11 PM, Scott Carey <[email protected]> wrote:\n> If you're stuck with a Dell, the Adaptec 5 series works, I'm using 5085's in\n> a pair and get 1200 MB/sec streaming reads best case with 20 SATA drives in\n> RAID 10 (2 sets of 10, software raid 0 on top). Of course, Dell doesn't\n> like you putting in somebody else's RAID card, but they support the rest of\n> the system when you add in a third party PCIe card.\n> Sure, they are a bit pricey, but they are also very good performers with\n> drivers for a lot of stuff, including OpenSolaris. I tested a PERC 6 versus\n> this with the same drives, but only 10 total. The Adaptec was 70% faster\n> out of the box, and still 35% faster after tuning the linux OS read-ahead\n> and other parameters.\n\nSequential read performance means precisely squat for most database\nloads. The dell stuff is ok....decent RAID 5 performance and mediocre\nraid 10. Unfortunately switching the disks to jbod and going software\nraid doesn't seem to help much. The biggest problem with dell\nhardware that I see is that overflowing the raid cache causes the\nwhole system to spectacularly grind to a halt, causing random delays.\n\nTo the OP, it looks like you are getting about 300 or so tps out of\nsdc (80% read), which is where I'm assuming the data is. I'm guessing\nmost of that is random traffic. Here's the bad news: while this is on\nthe low side for a 6 disk raid 10 7200 rpm, it's probably about what\nyour particular hardware can do. I have some general suggestions for\nyou:\n*) upgrade hardware: more/faster disks, etc\n*) disable fsync (dangerous!) can risk data loss, but maybe you have\nredundancy built in a different place. This will let linux reorganize\ni/o on top of what the hardware is doing.\n*) upgrade to postgres 8.3. Numerous efficiency advantages, and has\nthe synchronous_commit setting, which is 'fsync lite'...most of the\nadvantages and a lot less risk.\n*) tune the app\n\nmerlin\n", "msg_date": "Wed, 7 Jan 2009 20:19:36 -0500", "msg_from": "\"Merlin Moncure\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "On Wed, Jan 7, 2009 at 6:19 PM, Merlin Moncure <[email protected]> wrote:\n\nRE: Perc raid controllers\n> Unfortunately switching the disks to jbod and going software\n> raid doesn't seem to help much. The biggest problem with dell\n\nYeah, I noticed that too when I was trying to get a good config from\nthe perc 5e. Also, running software RAID0 over hardware RAID1 sets\ngave no appreciable speed boost.\n", "msg_date": "Wed, 7 Jan 2009 18:28:54 -0700", "msg_from": "\"Scott Marlowe\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "> Sequential read performance means precisely squat for most database\n> loads.\n\nDepends on the database workload. Many queries for me may scan 50GB of data for aggregation.\nBesides, it is a good test for making sure your RAID card doesn't suck. Especially running tests with sequential access CONCURRENT with random access.\nA good tuned raid setup will be able to handle a good chunk of sequential access while doing random reads concurrently. A bad one will grind to a halt.\nThe same can be said for concurrent writes and fsyncs with concurrent reads. Bad cards tend to struggle with this, good ones don't.\n\n$ sar -b\n12:00:01 AM tps rtps wtps bread/s bwrtn/s\n01:10:01 AM 1913.22 1903.74 9.48 561406.70 326.67\n01:20:02 AM 2447.71 2439.97 7.74 930357.08 148.86\n01:30:01 AM 1769.77 1740.41 29.35 581015.86 3729.37\n01:40:01 AM 1762.05 1659.06 102.99 477730.70 26137.96\n\nAnd disk utilization did not go past 85% or so during the peak load, usually much less (8 cores, 12.5% would reflect a CPU).\n\n12:00:01 AM CPU %user %nice %system %iowait %steal %idle\n01:10:01 AM all 47.92 0.00 12.92 10.22 0.00 28.94\n01:20:02 AM all 67.97 0.00 17.93 3.47 0.00 10.63\n01:30:01 AM all 46.67 0.00 10.60 7.43 0.00 35.29\n01:40:01 AM all 59.22 0.03 9.88 5.67 0.00 25.21\n\nThe workload regularly bursts to 900MB/sec with concurrent sequential scans.\n\n\n> The dell stuff is ok....decent RAID 5 performance and mediocre\n> raid 10. Unfortunately switching the disks to jbod and going software\n> raid doesn't seem to help much. The biggest problem with dell\n> hardware that I see is that overflowing the raid cache causes the\n> whole system to spectacularly grind to a halt, causing random delays.\n\nThe Adaptec stuff doesn't have the issues with cache overflow. For pure random access stuff the Dell Perc 6 is pretty good, but mix read/write it freaks out and has inconsistent performance. A PERC 6 does perform better than a 3Ware 9650 for me though. Those are both on my crap list, with 3Ware 9550 and PERC 5 both much worse.\nBoth got about 200 iops per drive on random access.\n\n\n> To the OP, it looks like you are getting about 300 or so tps out of\n> sdc (80% read), which is where I'm assuming the data is. I'm guessing\n> most of that is random traffic. Here's the bad news: while this is on\n> the low side for a 6 disk raid 10 7200 rpm, it's probably about what\n> your particular hardware can do. I have some general suggestions for\n> you:\n> *) upgrade hardware: more/faster disks, etc\n> *) disable fsync (dangerous!) can risk data loss, but maybe you have\n> redundancy built in a different place. This will let linux reorganize\n> i/o on top of what the hardware is doing.\n> *) upgrade to postgres 8.3. Numerous efficiency advantages, and has\n> the synchronous_commit setting, which is 'fsync lite'...most of the\n> advantages and a lot less risk.\n> *) tune the app\n>\n> merlin\n\nAgree with all of the above.\nThe xlogs are on sdb, which is not I/O bound, so I am not sure how much changing fsync will help.\nI second upgrading to 8.3 which is generally faster and will reduce random i/o if any sequential scans are kicking out random access data from shared_buffers.\n\nIf there is budget for an upgrade, how big is the data set, and how much will it grow?\nFor $1200 get two Intel X25-M SSD's and all random iops issues will be gone (8k iops in raid 1). Double that if 4 drives in raid 10. Unfortunately, each pair only stores 80GB. But for many, that is plenty.\n\n\n\nRe: [PERFORM] understanding postgres issues/bottlenecks\n\n\n\n> Sequential read performance means precisely squat for most database\n> loads.  \n\nDepends on the database workload.  Many queries for me may scan 50GB of data for aggregation.\nBesides, it is a good test for making sure your RAID card doesn’t suck.  Especially running tests with sequential access CONCURRENT with random access.\nA good tuned raid setup will be able to handle a good chunk of sequential access while doing random reads  concurrently.  A bad one will grind to a halt.\nThe same can be said for concurrent writes and fsyncs with concurrent reads.  Bad cards tend to struggle with this, good ones don’t.\n\n$ sar -b\n12:00:01 AM       tps      rtps      wtps   bread/s   bwrtn/s\n01:10:01 AM   1913.22   1903.74      9.48 561406.70    326.67\n01:20:02 AM   2447.71   2439.97      7.74 930357.08    148.86\n01:30:01 AM   1769.77   1740.41     29.35 581015.86   3729.37\n01:40:01 AM   1762.05   1659.06    102.99 477730.70  26137.96\n\nAnd disk utilization did not go past 85% or so during the peak load, usually much less (8 cores, 12.5% would reflect a CPU).\n\n12:00:01 AM       CPU     %user     %nice   %system   %iowait    %steal     %idle\n01:10:01 AM       all     47.92      0.00     12.92     10.22      0.00     28.94\n01:20:02 AM       all     67.97      0.00     17.93      3.47      0.00     10.63\n01:30:01 AM       all     46.67      0.00     10.60      7.43      0.00     35.29\n01:40:01 AM       all     59.22      0.03      9.88      5.67      0.00     25.21\n\nThe workload regularly bursts to 900MB/sec with concurrent sequential scans.\n\n\n> The dell stuff is ok....decent RAID 5 performance and mediocre\n> raid 10.  Unfortunately switching the disks to jbod and going software\n> raid doesn't seem to help much.  The biggest problem with dell\n> hardware that I see is that overflowing the raid cache causes the\n> whole system to spectacularly grind to a halt, causing random delays.\n\nThe Adaptec stuff doesn’t have the issues with cache overflow.  For pure random access stuff the Dell Perc 6 is pretty good, but mix read/write it freaks out and has inconsistent performance.  A PERC 6 does perform better than a 3Ware 9650 for me though.  Those are both on my crap list, with 3Ware 9550 and PERC 5 both much worse.\nBoth got about 200 iops per drive on random access.\n\n\n> To the OP, it looks like you are getting about 300 or so tps out of\n> sdc (80% read), which is where I'm assuming the data is.  I'm guessing\n> most of that is random traffic.  Here's the bad news: while this is on\n> the low side for a 6 disk raid 10 7200 rpm, it's probably about what\n> your particular hardware can do.  I have some general suggestions for\n> you:\n> *) upgrade hardware: more/faster disks, etc\n> *) disable fsync (dangerous!) can risk data loss, but maybe you have\n> redundancy built in a different place.  This will let linux reorganize\n> i/o on top of what the hardware is doing.\n> *) upgrade to postgres 8.3.  Numerous efficiency advantages, and has\n> the synchronous_commit setting, which is 'fsync lite'...most of the\n> advantages and a lot less risk.\n> *) tune the app\n> \n> merlin\n\nAgree with all of the above.\nThe xlogs are on sdb, which is not I/O bound, so I am not sure how much changing fsync will help.\nI second upgrading to 8.3 which is generally faster and will reduce random i/o if any sequential scans are kicking out random access data from shared_buffers.\n\nIf there is budget for an upgrade, how big is the data set, and how much will it grow?\nFor  $1200 get two Intel X25-M SSD’s and all random iops issues will be gone (8k iops in raid 1).  Double that if 4 drives in raid 10.  Unfortunately, each pair only stores 80GB.  But for many, that is plenty.", "msg_date": "Wed, 7 Jan 2009 18:36:05 -0800", "msg_from": "Scott Carey <[email protected]>", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "Scott Marlowe wrote:\n> On Wed, Jan 7, 2009 at 3:34 PM, Greg Smith <[email protected]> wrote:\n>> On Wed, 7 Jan 2009, Scott Marlowe wrote:\n>>\n>>> I cannot understand how Dell stays in business.\n>> There's a continuous stream of people who expect RAID5 to perform well, too,\n>> yet this surprises you?\n> \n> I guess I've underestimated the human capacity for stupidity yet again.\n> \n> -- An optimist sees the glass as half full, a pessimist as half empty,\n> and engineer as having redundant storage capacity.\n\nAh, but the half-empty glass has a fly in it. -- Kehlog Albran :)\n\n\n", "msg_date": "Wed, 07 Jan 2009 19:11:15 -0800", "msg_from": "\"M. Edward (Ed) Borasky\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "Find !\n\nDell CERC SATA RAID 2 PCI SATA 6ch\n\nRunning lspci -v:\n\n03:09.0 RAID bus controller: Adaptec AAC-RAID (rev 01)\n Subsystem: Dell CERC SATA RAID 2 PCI SATA 6ch (DellCorsair)\n Flags: bus master, 66MHz, slow devsel, latency 32, IRQ 209\n Memory at f8000000 (32-bit, prefetchable) [size=64M]\n Expansion ROM at fe800000 [disabled] [size=32K]\n Capabilities: [80] Power Management version 2\n\n\nAny consideration looking at iostat output ?\n\nCheers and thanks to all!\n\nste\n\n\nOn Wed, Jan 7, 2009 at 10:31 PM, Scott Marlowe <[email protected]>wrote:\n\n> On Wed, Jan 7, 2009 at 2:02 PM, Stefano Nichele\n> <[email protected]> wrote:\n> >\n> > On Tue, Jan 6, 2009 at 7:45 PM, Scott Marlowe <[email protected]>\n> > wrote:\n> >>\n> >> I concur with Merlin you're I/O bound.\n> >>\n> >> Adding to his post, what RAID controller are you running, does it have\n> >> cache, does the cache have battery backup, is the cache set to write\n> >> back or write through?\n> >\n> >\n> > At the moment I don't have such information. It's a \"standard\" RAID\n> > controller coming with a DELL server. Is there any information I can have\n> > asking to the SO ?\n>\n> You can run lshw to see what flavor controller it is. Dell RAID\n> controllers are pretty much either total crap, or mediocre at best.\n> The latest one, the Perc 6 series are squarely in the same performance\n> realm as a 4 or 5 year old LSI megaraid. The perc 5 series and before\n> are total performance dogs. The really bad news is that you can't\n> generally plug in a real RAID controller on a Dell. We put an Areca\n> 168-LP PCI-x8 in one of our 1950s and it wouldn't even turn on, got a\n> CPU Error.\n>\n> Dells are fine for web servers and such. For database servers they're\n> a total loss. The best you can do with one is to put a generic SCSI\n> card in it and connect to an external array with its own controller.\n>\n> We have a perc6e and a perc5e in two different servers, and no matter\n> how we configure them, we can't get even 1/10th the performance of an\n> Areca controller with the same number of drives on another machine of\n> the same basic class as the 1950s.\n>\n> >> Also, what do you get for this (need contrib module pgbench installed)\n> >>\n> >> pgbench -i -s 100\n> >> pgbench -c 50 -n 10000\n> >>\n> >> ? Specifically transactions per second?\n> >\n> > I'll run pgbench in the next days.\n>\n> Cool. That pgbench is a \"best case scenario\" benchmark. Lots of\n> small transactions on a db that should fit into memory. If you can't\n> pull off a decent number there (at least a few hundred tps) then can't\n> expect better performance from real world usage.\n>\n> Oh, and that should be:\n>\n> pgbench -c 50 -t 10000\n>\n> not -n... not enough sleep I guess.\n>\n\nFind !Dell CERC SATA RAID 2 PCI SATA 6chRunning lspci -v:03:09.0 RAID bus controller: Adaptec AAC-RAID (rev 01)        Subsystem: Dell CERC SATA RAID 2 PCI SATA 6ch (DellCorsair)        Flags: bus master, 66MHz, slow devsel, latency 32, IRQ 209\n        Memory at f8000000 (32-bit, prefetchable) [size=64M]        Expansion ROM at fe800000 [disabled] [size=32K]        Capabilities: [80] Power Management version 2Any consideration looking at iostat output ?\nCheers and thanks to all!steOn Wed, Jan 7, 2009 at 10:31 PM, Scott Marlowe <[email protected]> wrote:\nOn Wed, Jan 7, 2009 at 2:02 PM, Stefano Nichele\n<[email protected]> wrote:\n>\n> On Tue, Jan 6, 2009 at 7:45 PM, Scott Marlowe <[email protected]>\n> wrote:\n>>\n>> I concur with Merlin you're I/O bound.\n>>\n>> Adding to his post, what RAID controller are you running, does it have\n>> cache, does the cache have battery backup, is the cache set to write\n>> back or write through?\n>\n>\n> At the moment I don't have such information. It's a \"standard\" RAID\n> controller coming with a DELL server. Is there any information I can have\n> asking to the SO ?\n\nYou can run lshw to see what flavor controller it is.  Dell RAID\ncontrollers are pretty much either total crap, or mediocre at best.\nThe latest one, the Perc 6 series are squarely in the same performance\nrealm as a 4 or 5 year old LSI megaraid.  The perc 5 series and before\nare total performance dogs.  The really bad news is that you can't\ngenerally plug in a real RAID controller on a Dell.  We put an Areca\n168-LP PCI-x8 in one of our 1950s and it wouldn't even turn on, got a\nCPU Error.\n\nDells are fine for web servers and such.  For database servers they're\na total loss.  The best you can do with one is to put a generic SCSI\ncard in it and connect to an external array with its own controller.\n\nWe have a perc6e and a perc5e in two different servers, and no matter\nhow we configure them, we can't get even 1/10th the performance of an\nAreca controller with the same number of drives on another machine of\nthe same basic class as the 1950s.\n\n>> Also, what do you get for this (need contrib module pgbench installed)\n>>\n>> pgbench -i -s 100\n>> pgbench -c 50 -n 10000\n>>\n>> ? Specifically transactions per second?\n>\n> I'll run pgbench in the next days.\n\nCool.  That pgbench is a \"best case scenario\" benchmark.  Lots of\nsmall transactions on a db that should fit into memory.  If you can't\npull off a decent number there (at least a few hundred tps) then can't\nexpect better performance from real world usage.\n\nOh, and that should be:\n\npgbench -c 50 -t 10000\n\nnot -n... not enough sleep I guess.", "msg_date": "Thu, 8 Jan 2009 09:36:13 +0100", "msg_from": "\"Stefano Nichele\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "On Thu, Jan 8, 2009 at 3:36 AM, Stefano Nichele\n<[email protected]> wrote:\n> Find !\n>\n> Dell CERC SATA RAID 2 PCI SATA 6ch\n>\n> Running lspci -v:\n>\n> 03:09.0 RAID bus controller: Adaptec AAC-RAID (rev 01)\n> Subsystem: Dell CERC SATA RAID 2 PCI SATA 6ch (DellCorsair)\n\nIIRC that's the 'perc 6ir' card...no write caching. You are getting\nkilled with syncs. If you can restart the database, you can test with\nfsync=off comparing load to confirm this. (another way is to compare\nselect only vs regular transactions on pgbench).\n\nmerlin\n", "msg_date": "Thu, 8 Jan 2009 07:45:48 -0500", "msg_from": "\"Merlin Moncure\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "\n--- On Thu, 8/1/09, Stefano Nichele <[email protected]> wrote:\n\n> From: Stefano Nichele <[email protected]>\n> Subject: Re: [PERFORM] understanding postgres issues/bottlenecks\n> To: \"Scott Marlowe\" <[email protected]>\n> Cc: [email protected]\n> Date: Thursday, 8 January, 2009, 8:36 AM\n> Find !\n> \n> Dell CERC SATA RAID 2 PCI SATA 6ch\n> \n> Running lspci -v:\n> \n> 03:09.0 RAID bus controller: Adaptec AAC-RAID (rev 01)\n> Subsystem: Dell CERC SATA RAID 2 PCI SATA 6ch\n> (DellCorsair)\n> Flags: bus master, 66MHz, slow devsel, latency 32,\n> IRQ 209\n> Memory at f8000000 (32-bit, prefetchable)\n> [size=64M]\n> Expansion ROM at fe800000 [disabled] [size=32K]\n> Capabilities: [80] Power Management version 2\n> \n\nHmm, the 64M / 6ch makes it sound like this card\n\nhttp://accessories.us.dell.com/sna/products/Controllers/productdetail.aspx?c=us&l=en&s=bsd&cs=04&sku=310-5975\n\nWhich is a 6ch dell version of\n\nhttp://www.adaptec.com/en-US/support/raid/sata/AAR-2410SA/\n\nI have one on my smash pile. The only thing that makes me think otherwise is the 2 in \"CERC SATA RAID 2\" ...\n\n\n\n \n", "msg_date": "Thu, 8 Jan 2009 14:26:13 +0000 (GMT)", "msg_from": "Glyn Astill <[email protected]>", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "Glyn Astill wrote:\n> --- On Thu, 8/1/09, Stefano Nichele <[email protected]> wrote:\n>\n> \n>> From: Stefano Nichele <[email protected]>\n>> Subject: Re: [PERFORM] understanding postgres issues/bottlenecks\n>> To: \"Scott Marlowe\" <[email protected]>\n>> Cc: [email protected]\n>> Date: Thursday, 8 January, 2009, 8:36 AM\n>> Find !\n>>\n>> Dell CERC SATA RAID 2 PCI SATA 6ch\n>>\n>> Running lspci -v:\n>>\n>> 03:09.0 RAID bus controller: Adaptec AAC-RAID (rev 01)\n>> Subsystem: Dell CERC SATA RAID 2 PCI SATA 6ch\n>> (DellCorsair)\n>> Flags: bus master, 66MHz, slow devsel, latency 32,\n>> IRQ 209\n>> Memory at f8000000 (32-bit, prefetchable)\n>> [size=64M]\n>> Expansion ROM at fe800000 [disabled] [size=32K]\n>> Capabilities: [80] Power Management version 2\n>>\n>> \n>\n> Hmm, the 64M / 6ch makes it sound like this card\n>\n> http://accessories.us.dell.com/sna/products/Controllers/productdetail.aspx?c=us&l=en&s=bsd&cs=04&sku=310-5975\n> \n\nIndeed I think it's this one. The server is a Power Edge 1800 Server.\n\n> Which is a 6ch dell version of\n>\n> http://www.adaptec.com/en-US/support/raid/sata/AAR-2410SA/\n>\n> I have one on my smash pile. The only thing that makes me think otherwise is the 2 in \"CERC SATA RAID 2\" ...\n>\n>\n>\n> \n>\n> \n\n\n", "msg_date": "Thu, 08 Jan 2009 15:41:21 +0100", "msg_from": "Stefano Nichele <[email protected]>", "msg_from_op": true, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "Merlin Moncure wrote:\n> On Thu, Jan 8, 2009 at 3:36 AM, Stefano Nichele\n> <[email protected]> wrote:\n> \n>> Find !\n>>\n>> Dell CERC SATA RAID 2 PCI SATA 6ch\n>>\n>> Running lspci -v:\n>>\n>> 03:09.0 RAID bus controller: Adaptec AAC-RAID (rev 01)\n>> Subsystem: Dell CERC SATA RAID 2 PCI SATA 6ch (DellCorsair)\n>> \n>\n> IIRC that's the 'perc 6ir' card...no write caching. You are getting\n> killed with syncs. If you can restart the database, you can test with\n> fsync=off comparing load to confirm this. (another way is to compare\n> select only vs regular transactions on pgbench).\n>\n> merlin\n>\n> \n\nI'll try next Saturday.\n\nthanks\nste\n", "msg_date": "Thu, 08 Jan 2009 15:42:20 +0100", "msg_from": "Stefano Nichele <[email protected]>", "msg_from_op": true, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "If it is PowerEdge 1800 with a low end non-write cache controller, then 100 iops per SATA drive isn't too far off from what you should expect.\n\nContrary to what others are saying, I don't think that turning fsync off is killing you, you might get 25% more throughput at best.\n\nThe iostat output would indicate that you are getting just about all you can get out of it without getting faster drives or controllers.\n\nSince it is mostly random read bound, more RAM in the system would certainly help reduce the load, as would an I/O subsystem with high random i/o throughput (quality flash drives, the best quality ones currently being Intel X-25 M until the next gen Micron, Samsung, Mtron, and SanDisk ones come out - do NOT use any that don't have high random write perf).\n\nI'm guessing that the cheapest thing to do is increase the RAM on the server - I think you can fit 16GB in an 1800, if not at least 8GB, and increase shared_buffers to between 10% and 25% of that. Upgrading to 8.3.x will further help.\n\n\nOn 1/8/09 6:41 AM, \"Stefano Nichele\" <[email protected]> wrote:\n\nGlyn Astill wrote:\n> --- On Thu, 8/1/09, Stefano Nichele <[email protected]> wrote:\n>\n>\n>> From: Stefano Nichele <[email protected]>\n>> Subject: Re: [PERFORM] understanding postgres issues/bottlenecks\n>> To: \"Scott Marlowe\" <[email protected]>\n>> Cc: [email protected]\n>> Date: Thursday, 8 January, 2009, 8:36 AM\n>> Find !\n>>\n>> Dell CERC SATA RAID 2 PCI SATA 6ch\n>>\n>> Running lspci -v:\n>>\n>> 03:09.0 RAID bus controller: Adaptec AAC-RAID (rev 01)\n>> Subsystem: Dell CERC SATA RAID 2 PCI SATA 6ch\n>> (DellCorsair)\n>> Flags: bus master, 66MHz, slow devsel, latency 32,\n>> IRQ 209\n>> Memory at f8000000 (32-bit, prefetchable)\n>> [size=64M]\n>> Expansion ROM at fe800000 [disabled] [size=32K]\n>> Capabilities: [80] Power Management version 2\n>>\n>>\n>\n> Hmm, the 64M / 6ch makes it sound like this card\n>\n> http://accessories.us.dell.com/sna/products/Controllers/productdetail.aspx?c=us&l=en&s=bsd&cs=04&sku=310-5975\n>\n\nIndeed I think it's this one. The server is a Power Edge 1800 Server.\n\n> Which is a 6ch dell version of\n>\n> http://www.adaptec.com/en-US/support/raid/sata/AAR-2410SA/\n>\n> I have one on my smash pile. The only thing that makes me think otherwise is the 2 in \"CERC SATA RAID 2\" ...\n>\n>\n>\n>\n>\n>\n\n\n\n--\nSent via pgsql-performance mailing list ([email protected])\nTo make changes to your subscription:\nhttp://www.postgresql.org/mailpref/pgsql-performance\n\n\n\n\nRe: [PERFORM] understanding postgres issues/bottlenecks\n\n\nIf it is PowerEdge 1800 with a low end non-write cache controller, then 100 iops per SATA drive isn’t too far off from what you should expect.\n\nContrary to what others are saying, I don’t think that turning fsync off is killing you, you might get 25% more throughput at best.\n\nThe iostat output would indicate that you are getting just about all you can get out of it without getting faster drives or controllers.\n\nSince it is mostly random read bound, more RAM in the system would certainly help reduce the load, as would an I/O subsystem with high random i/o throughput (quality flash drives, the best quality ones currently being Intel X-25 M until the next gen Micron, Samsung, Mtron, and SanDisk ones come out — do NOT use any that don’t have high random write perf).\n\nI’m guessing that the cheapest thing to do is increase the RAM on the server — I think you can fit 16GB in an 1800, if not at least 8GB, and increase shared_buffers to between 10% and 25% of that.  Upgrading to 8.3.x will further help.\n\n\nOn 1/8/09 6:41 AM, \"Stefano Nichele\" <[email protected]> wrote:\n\nGlyn Astill wrote:\n> --- On Thu, 8/1/09, Stefano Nichele <[email protected]> wrote:\n>\n>\n>> From: Stefano Nichele <[email protected]>\n>> Subject: Re: [PERFORM] understanding postgres issues/bottlenecks\n>> To: \"Scott Marlowe\" <[email protected]>\n>> Cc: [email protected]\n>> Date: Thursday, 8 January, 2009, 8:36 AM\n>> Find !\n>>\n>> Dell CERC SATA RAID 2 PCI SATA 6ch\n>>\n>> Running lspci -v:\n>>\n>> 03:09.0 RAID bus controller: Adaptec AAC-RAID (rev 01)\n>>         Subsystem: Dell CERC SATA RAID 2 PCI SATA 6ch\n>> (DellCorsair)\n>>         Flags: bus master, 66MHz, slow devsel, latency 32,\n>> IRQ 209\n>>         Memory at f8000000 (32-bit, prefetchable)\n>> [size=64M]\n>>         Expansion ROM at fe800000 [disabled] [size=32K]\n>>         Capabilities: [80] Power Management version 2\n>>\n>>\n>\n> Hmm, the 64M / 6ch makes it sound like this card\n>\n> http://accessories.us.dell.com/sna/products/Controllers/productdetail.aspx?c=us&l=en&s=bsd&cs=04&sku=310-5975\n>\n\nIndeed I think it's this one. The server is a Power Edge 1800 Server.\n\n> Which is a 6ch dell version of\n>\n> http://www.adaptec.com/en-US/support/raid/sata/AAR-2410SA/\n>\n> I have one on my smash pile.  The only thing that makes me think otherwise is the 2 in \"CERC SATA RAID 2\" ...\n>\n>\n>\n>\n>\n>\n\n\n\n--\nSent via pgsql-performance mailing list ([email protected])\nTo make changes to your subscription:\nhttp://www.postgresql.org/mailpref/pgsql-performance", "msg_date": "Thu, 8 Jan 2009 09:52:23 -0800", "msg_from": "Scott Carey <[email protected]>", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "On Thu, Jan 8, 2009 at 9:42 AM, Stefano Nichele\n<[email protected]> wrote:\n> Merlin Moncure wrote:\n>> IIRC that's the 'perc 6ir' card...no write caching. You are getting\n>> killed with syncs. If you can restart the database, you can test with\n>> fsync=off comparing load to confirm this. (another way is to compare\n>> select only vs regular transactions on pgbench).\n>\n> I'll try next Saturday.\n>\n\njust be aware of the danger . hard reset (power off) class of failure\nwhen fsync = off means you are loading from backups.\n\nmerlin\n", "msg_date": "Thu, 8 Jan 2009 15:28:44 -0500", "msg_from": "\"Merlin Moncure\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "At 03:28 PM 1/8/2009, Merlin Moncure wrote:\n>On Thu, Jan 8, 2009 at 9:42 AM, Stefano Nichele\n><[email protected]> wrote:\n> > Merlin Moncure wrote:\n> >> IIRC that's the 'perc 6ir' card...no write caching. You are getting\n> >> killed with syncs. If you can restart the database, you can test with\n> >> fsync=off comparing load to confirm this. (another way is to compare\n> >> select only vs regular transactions on pgbench).\n> >\n> > I'll try next Saturday.\n> >\n>\n>just be aware of the danger . hard reset (power off) class of failure\n>when fsync = off means you are loading from backups.\n>\n>merlin\nThat's what redundant power conditioning UPS's are supposed to help prevent ;-)\n\nMerlin is of course absolutely correct that you are taking a bigger \nrisk if you turn fsync off.\n\nI would not recommend fysnc = off if you do not have other safety \nmeasures in place to protect against data loss because of a power event..\n(At least for most DB applications.)\n\n...and of course, those lucky few with bigger budgets can use SSD's \nand not care what fsync is set to.\n\nRon \n\n", "msg_date": "Sat, 10 Jan 2009 07:40:37 -0500", "msg_from": "Ron <[email protected]>", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "On Sat, Jan 10, 2009 at 5:40 AM, Ron <[email protected]> wrote:\n> At 03:28 PM 1/8/2009, Merlin Moncure wrote:\n>> just be aware of the danger . hard reset (power off) class of failure\n>> when fsync = off means you are loading from backups.\n>\n> That's what redundant power conditioning UPS's are supposed to help prevent\n> ;-)\n\nBut of course, they can't prevent them, but only reduce the likelihood\nof their occurrance. Everyone who's working in large hosting\nenvironments has at least one horror story to tell about a power\noutage that never should have happened.\n\n> I would not recommend fysnc = off if you do not have other safety measures\n> in place to protect against data loss because of a power event..\n> (At least for most DB applications.)\n\nAgreed. Keep in mind that you'll be losing whatever wasn't\ntransferred to the backup machines.\n\n> ...and of course, those lucky few with bigger budgets can use SSD's and not\n> care what fsync is set to.\n\nWould that prevent any corruption if the writes got out of order\nbecause of lack of fsync? Or partial writes? Or wouldn't fsync still\nneed to be turned on to keep the data safe.\n", "msg_date": "Sat, 10 Jan 2009 05:53:36 -0700", "msg_from": "\"Scott Marlowe\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "\"Scott Marlowe\" <[email protected]> writes:\n\n> On Sat, Jan 10, 2009 at 5:40 AM, Ron <[email protected]> wrote:\n>> At 03:28 PM 1/8/2009, Merlin Moncure wrote:\n>>> just be aware of the danger . hard reset (power off) class of failure\n>>> when fsync = off means you are loading from backups.\n>>\n>> That's what redundant power conditioning UPS's are supposed to help prevent\n>> ;-)\n>\n> But of course, they can't prevent them, but only reduce the likelihood\n> of their occurrance. Everyone who's working in large hosting\n> environments has at least one horror story to tell about a power\n> outage that never should have happened.\n\nOr a system crash. If the kernel panics for any reason when it has dirty\nbuffers in memory the database will need to be restored.\n\n>> ...and of course, those lucky few with bigger budgets can use SSD's and not\n>> care what fsync is set to.\n>\n> Would that prevent any corruption if the writes got out of order\n> because of lack of fsync? Or partial writes? Or wouldn't fsync still\n> need to be turned on to keep the data safe.\n\nI think the idea is that with SSDs or a RAID with a battery backed cache you\ncan leave fsync on and not have any significant performance hit since the seek\ntimes are very fast for SSD. They have limited bandwidth but bandwidth to the\nWAL is rarely an issue -- just latency.\n\n-- \n Gregory Stark\n EnterpriseDB http://www.enterprisedb.com\n Ask me about EnterpriseDB's RemoteDBA services!\n", "msg_date": "Sat, 10 Jan 2009 10:36:10 -0500", "msg_from": "Gregory Stark <[email protected]>", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "Hi,\n\[email protected] wrote:\n> On Sat, 10 Jan 2009, Gregory Stark wrote:\n>> I think the idea is that with SSDs or a RAID with a battery backed\n>> cache you\n>> can leave fsync on and not have any significant performance hit since\n>> the seek\n>> times are very fast for SSD. They have limited bandwidth but bandwidth\n>> to the\n>> WAL is rarely an issue -- just latency.\n\nThat's also my understanding.\n\n> with SSDs having extremely good read speeds, but poor (at least by\n> comparison) write speeds I wonder if any of the RAID controllers are\n> going to get a mode where they cache writes, but don't cache reads,\n> leaving all ofyour cache to handle writes.\n\nMy understanding of SSDs so far is, that they are not that bad at\nwriting *on average*, but to perform wear-leveling, they sometimes have\nto shuffle around multiple blocks at once. So there are pretty awful\nspikes for writing latency (IIRC more than 100ms has been measured on\ncheaper disks).\n\nA battery backed cache could theoretically flatten those, as long as\nyour avg. WAL throughput is below the SSDs avg. writing throughput.\n\nRegards\n\nMarkus Wanner\n", "msg_date": "Sat, 10 Jan 2009 20:00:59 +0100", "msg_from": "Markus Wanner <[email protected]>", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "On Sat, Jan 10, 2009 at 12:00 PM, Markus Wanner <[email protected]> wrote:\n> Hi,\n>\n> [email protected] wrote:\n>> On Sat, 10 Jan 2009, Gregory Stark wrote:\n>>> I think the idea is that with SSDs or a RAID with a battery backed\n>>> cache you\n>>> can leave fsync on and not have any significant performance hit since\n>>> the seek\n>>> times are very fast for SSD. They have limited bandwidth but bandwidth\n>>> to the\n>>> WAL is rarely an issue -- just latency.\n>\n> That's also my understanding.\n>\n>> with SSDs having extremely good read speeds, but poor (at least by\n>> comparison) write speeds I wonder if any of the RAID controllers are\n>> going to get a mode where they cache writes, but don't cache reads,\n>> leaving all ofyour cache to handle writes.\n>\n> My understanding of SSDs so far is, that they are not that bad at\n> writing *on average*, but to perform wear-leveling, they sometimes have\n> to shuffle around multiple blocks at once. So there are pretty awful\n> spikes for writing latency (IIRC more than 100ms has been measured on\n> cheaper disks).\n\nMultiply it by 10 and apply to both reads and writes for most cheap\nSSDs when doing random writes and reads mixed together. Which is why\nso many discussions specificall mention the intel XM series, because\nthey don't suck like that. They keep good access times even under\nseveral random read / write threads.\n\nSome review of the others was posted here a while back and it was\nastounding how slow the others became in a mixed read / write\nbenchmark.\n", "msg_date": "Sat, 10 Jan 2009 12:10:44 -0700", "msg_from": "\"Scott Marlowe\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "On Sat, 10 Jan 2009, Gregory Stark wrote:\n\n>>> ...and of course, those lucky few with bigger budgets can use SSD's and not\n>>> care what fsync is set to.\n>>\n>> Would that prevent any corruption if the writes got out of order\n>> because of lack of fsync? Or partial writes? Or wouldn't fsync still\n>> need to be turned on to keep the data safe.\n>\n> I think the idea is that with SSDs or a RAID with a battery backed cache you\n> can leave fsync on and not have any significant performance hit since the seek\n> times are very fast for SSD. They have limited bandwidth but bandwidth to the\n> WAL is rarely an issue -- just latency.\n\nI don't think that this is true, even if your SSD is battery backed RAM \n(as opposed to the flash based devices that have slower writes than \nhigh-end hard drives) you can complete 'writes' to the system RAM faster \nthan the OS can get the data to the drive, so if you don't do a fsync you \ncan still loose a lot in a power outage.\n\nraid controllers with battery backed ram cache will make the fsyncs very \ncheap (until the cache fills up anyway)\n\nwith SSDs having extremely good read speeds, but poor (at least by \ncomparison) write speeds I wonder if any of the RAID controllers are going \nto get a mode where they cache writes, but don't cache reads, leaving all \nofyour cache to handle writes.\n\nDavid Lang\n", "msg_date": "Sat, 10 Jan 2009 11:48:35 -0800 (PST)", "msg_from": "[email protected]", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "[email protected] writes:\n\n> On Sat, 10 Jan 2009, Markus Wanner wrote:\n>\n>> My understanding of SSDs so far is, that they are not that bad at\n>> writing *on average*, but to perform wear-leveling, they sometimes have\n>> to shuffle around multiple blocks at once. So there are pretty awful\n>> spikes for writing latency (IIRC more than 100ms has been measured on\n>> cheaper disks).\n\nThat would be fascinating. And frightening. A lot of people have been\nrecommending these for WAL disks and this would be make them actually *worse*\nthan regular drives.\n\n> well, I have one of those cheap disks.\n>\n> brand new out of the box, format the 32G drive, then copy large files to it\n> (~1G per file). this should do almost no wear-leveling, but it's write\n> performance is still poor and it has occasional 1 second pauses.\n\nThis isn't similar to the way WAL behaves though. What you're testing is the\nbehaviour when the bandwidth to the SSD is saturated. At that point some point\nin the stack, whether in the SSD, the USB hardware or driver, or OS buffer\ncache can start to queue up writes. The stalls you see could be the behaviour\nwhen that queue fills up and it needs to push back to higher layers.\n\nTo simulate WAL you want to transfer smaller volumes of data, well below the\nbandwidth limit of the drive, fsync the data, then pause a bit repeat. Time\neach fsync and see whether the time they take is proportional to the amount of\ndata written in the meantime or whether they randomly spike upwards.\n\n-- \n Gregory Stark\n EnterpriseDB http://www.enterprisedb.com\n Ask me about EnterpriseDB's Slony Replication support!\n", "msg_date": "Sat, 10 Jan 2009 16:19:23 -0500", "msg_from": "Gregory Stark <[email protected]>", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "On Sat, 10 Jan 2009, Markus Wanner wrote:\n\n> [email protected] wrote:\n>> On Sat, 10 Jan 2009, Gregory Stark wrote:\n>>> I think the idea is that with SSDs or a RAID with a battery backed\n>>> cache you\n>>> can leave fsync on and not have any significant performance hit since\n>>> the seek\n>>> times are very fast for SSD. They have limited bandwidth but bandwidth\n>>> to the\n>>> WAL is rarely an issue -- just latency.\n>\n> That's also my understanding.\n>\n>> with SSDs having extremely good read speeds, but poor (at least by\n>> comparison) write speeds I wonder if any of the RAID controllers are\n>> going to get a mode where they cache writes, but don't cache reads,\n>> leaving all ofyour cache to handle writes.\n>\n> My understanding of SSDs so far is, that they are not that bad at\n> writing *on average*, but to perform wear-leveling, they sometimes have\n> to shuffle around multiple blocks at once. So there are pretty awful\n> spikes for writing latency (IIRC more than 100ms has been measured on\n> cheaper disks).\n\nwell, I have one of those cheap disks.\n\nbrand new out of the box, format the 32G drive, then copy large files to \nit (~1G per file). this should do almost no wear-leveling, but it's write \nperformance is still poor and it has occasional 1 second pauses.\n\nI for my initial tests I hooked it up to a USB->SATA adapter and the write \nspeed is showing about half of what I can get on a 1.5TB SATA drive hooked \nto the same system.\n\nthe write speed is fairly comparable to what you can do with slow laptop \ndrives (even ignoring the pauses)\n\nread speed is much better (and I think limited by the USB)\n\nthe key thing with any new storage technology (including RAID controller) \nis that you need to do your own testing, treat the manufacturers specs as \nideal conditions or 'we guarentee that the product will never do better \nthan this' specs\n\nImation has a white paper on their site about solid state drive \nperformance that is interesting. among other things it shows that \nhigh-speed SCSI drives are still a significant win in \nrandom-write workloads\n\n\nat this point, if I was specing out a new high-end system I would be \nlooking at and testing somthing like the following\n\nSSD for read-mostly items (OS, possibly some indexes)\n15K SCSI drives for heavy writing (WAL, indexes, temp tables, etc)\nSATA drives for storage capacity (table contents)\n\nDavid Lang\n", "msg_date": "Sat, 10 Jan 2009 13:28:11 -0800 (PST)", "msg_from": "[email protected]", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "At 10:36 AM 1/10/2009, Gregory Stark wrote:\n>\"Scott Marlowe\" <[email protected]> writes:\n>\n> > On Sat, Jan 10, 2009 at 5:40 AM, Ron <[email protected]> wrote:\n> >> At 03:28 PM 1/8/2009, Merlin Moncure wrote:\n> >>> just be aware of the danger . hard reset (power off) class of failure\n> >>> when fsync = off means you are loading from backups.\n> >>\n> >> That's what redundant power conditioning UPS's are supposed to \n> help prevent\n> >> ;-)\n> >\n> > But of course, they can't prevent them, but only reduce the likelihood\n> > of their occurrance. Everyone who's working in large hosting\n> > environments has at least one horror story to tell about a power\n> > outage that never should have happened.\n>\n>Or a system crash. If the kernel panics for any reason when it has dirty\n>buffers in memory the database will need to be restored.\nA power conditioning UPS should prevent a building wide or circuit \nlevel bad power event, caused by either dirty power or a power loss, \nfrom affecting the host. Within the design limits of the UPS in \nquestion of course.\n\nSo the real worry with fsync = off in a environment with redundant \ndecent UPS's is pretty much limited to host level HW failures, SW \ncrashes, and unlikely catastrophes like building collapses, lightning \nstrikes, floods, etc.\nNot that your fsync setting is going to matter much in the event of \ncatastrophes in the physical environment...\n\nLike anything else, there is usually more than one way to reduce risk \nwhile at the same time meeting (realistic) performance goals.\nIf you need the performance implied by fsync off, then you have to \ntake other steps to reduce the risk of data corruption down to about \nthe same statistical level as running with fsync on. Or you have to \ndecide that you are willing to life with the increased risk (NOT my \nrecommendation for most DB hosting scenarios.)\n\n\n> >> ...and of course, those lucky few with bigger budgets can use \n> SSD's and not\n> >> care what fsync is set to.\n> >\n> > Would that prevent any corruption if the writes got out of order\n> > because of lack of fsync? Or partial writes? Or wouldn't fsync still\n> > need to be turned on to keep the data safe.\n>\n>I think the idea is that with SSDs or a RAID with a battery backed cache you\n>can leave fsync on and not have any significant performance hit since the seek\n>times are very fast for SSD. They have limited bandwidth but bandwidth to the\n>WAL is rarely an issue -- just latency.\nYes, Greg understands what I meant here. In the case of SSDs, the \nperformance hit of fsync = on is essentially zero. In the case of \nbattery backed RAM caches for RAID arrays, the efficacy is dependent \non how the size of the cache compares with the working set of the \ndisk access pattern.\n\nRon \n\n", "msg_date": "Sat, 10 Jan 2009 16:56:56 -0500", "msg_from": "Ron <[email protected]>", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "Ron <[email protected]> writes:\n\n> At 10:36 AM 1/10/2009, Gregory Stark wrote:\n>>\n>> Or a system crash. If the kernel panics for any reason when it has dirty\n>> buffers in memory the database will need to be restored.\n>\n> A power conditioning UPS should prevent a building wide or circuit level bad\n> power event\n\nExcept of course those caused *by* a faulty UPS. Or for that matter by the\npower supply in the computer or drive array, or someone just accidentally\nhitting the wrong power button.\n\nI'm surprised people are so confident in their kernels though. I know some\ncomputers with uptimes measured in years but I know far more which don't.\n\n-- \n Gregory Stark\n EnterpriseDB http://www.enterprisedb.com\n Ask me about EnterpriseDB's RemoteDBA services!\n", "msg_date": "Sat, 10 Jan 2009 17:09:55 -0500", "msg_from": "Gregory Stark <[email protected]>", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "On Sat, 10 Jan 2009, Gregory Stark wrote:\n\n> [email protected] writes:\n>\n>> On Sat, 10 Jan 2009, Markus Wanner wrote:\n>>\n>>> My understanding of SSDs so far is, that they are not that bad at\n>>> writing *on average*, but to perform wear-leveling, they sometimes have\n>>> to shuffle around multiple blocks at once. So there are pretty awful\n>>> spikes for writing latency (IIRC more than 100ms has been measured on\n>>> cheaper disks).\n>\n> That would be fascinating. And frightening. A lot of people have been\n> recommending these for WAL disks and this would be make them actually *worse*\n> than regular drives.\n>\n>> well, I have one of those cheap disks.\n>>\n>> brand new out of the box, format the 32G drive, then copy large files to it\n>> (~1G per file). this should do almost no wear-leveling, but it's write\n>> performance is still poor and it has occasional 1 second pauses.\n>\n> This isn't similar to the way WAL behaves though. What you're testing is the\n> behaviour when the bandwidth to the SSD is saturated. At that point some point\n> in the stack, whether in the SSD, the USB hardware or driver, or OS buffer\n> cache can start to queue up writes. The stalls you see could be the behaviour\n> when that queue fills up and it needs to push back to higher layers.\n>\n> To simulate WAL you want to transfer smaller volumes of data, well below the\n> bandwidth limit of the drive, fsync the data, then pause a bit repeat. Time\n> each fsync and see whether the time they take is proportional to the amount of\n> data written in the meantime or whether they randomly spike upwards.\n\nif you have a specific benchmark for me to test I would be happy to do \nthis.\n\nthe test that I did is basicly the best-case for the SSD (more-or-less \nsequential writes where the vendors claim that the drives match or \nslightly outperform the traditional disks). for random writes the vendors \nput SSDs at fewer IOPS than 5400 rpm drives, let along 15K rpm drives.\n\ntake a look at this paper \nhttp://www.imation.com/PageFiles/83/Imation-SSD-Performance-White-Paper.pdf\n\nthis is not one of the low-performance drives, they include a sandisk \ndrive in the paper that shows significantly less performance (but the same \nbasic pattern) than the imation drives.\n\nDavid Lang\n", "msg_date": "Sat, 10 Jan 2009 14:40:51 -0800 (PST)", "msg_from": "[email protected]", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "On Sat, 10 Jan 2009, Ron wrote:\n\n> At 10:36 AM 1/10/2009, Gregory Stark wrote:\n>> \"Scott Marlowe\" <[email protected]> writes:\n>> \n>> > On Sat, Jan 10, 2009 at 5:40 AM, Ron <[email protected]> wrote:\n>> >> At 03:28 PM 1/8/2009, Merlin Moncure wrote:\n>> >>> just be aware of the danger . hard reset (power off) class of failure\n>> >>> when fsync = off means you are loading from backups.\n>> >>\n>> >> That's what redundant power conditioning UPS's are supposed to help \n>> prevent\n>> >> ;-)\n>> >\n>> > But of course, they can't prevent them, but only reduce the likelihood\n>> > of their occurrance. Everyone who's working in large hosting\n>> > environments has at least one horror story to tell about a power\n>> > outage that never should have happened.\n>> \n>> Or a system crash. If the kernel panics for any reason when it has dirty\n>> buffers in memory the database will need to be restored.\n> A power conditioning UPS should prevent a building wide or circuit level bad \n> power event, caused by either dirty power or a power loss, from affecting the \n> host. Within the design limits of the UPS in question of course.\n>\n> So the real worry with fsync = off in a environment with redundant decent \n> UPS's is pretty much limited to host level HW failures, SW crashes, and \n> unlikely catastrophes like building collapses, lightning strikes, floods, \n> etc.\n\nI've seen datacenters with redundant UPSs go dark unexpectedly. it's less \ncommon, but it does happen.\n\n> Not that your fsync setting is going to matter much in the event of \n> catastrophes in the physical environment...\n\nquestionable, but sometimes true. in the physical environment disasters \nyou will loose access to your data for a while, but after the drives are \ndug out of the rubble (or dried out from the flood) the data can probably \nbe recovered.\n\nfor crying out loud, they were able to recover most of the data from the \nhard drives in the latest shuttle disaster.\n\n> Like anything else, there is usually more than one way to reduce risk while \n> at the same time meeting (realistic) performance goals.\n\nvery true.\n\n>> >> ...and of course, those lucky few with bigger budgets can use SSD's and \n>> not\n>> >> care what fsync is set to.\n>> >\n>> > Would that prevent any corruption if the writes got out of order\n>> > because of lack of fsync? Or partial writes? Or wouldn't fsync still\n>> > need to be turned on to keep the data safe.\n>> \n>> I think the idea is that with SSDs or a RAID with a battery backed cache \n>> you\n>> can leave fsync on and not have any significant performance hit since the \n>> seek\n>> times are very fast for SSD. They have limited bandwidth but bandwidth to \n>> the\n>> WAL is rarely an issue -- just latency.\n> Yes, Greg understands what I meant here. In the case of SSDs, the \n> performance hit of fsync = on is essentially zero.\n\nthis is definantly not the case.\n\nfsync off the data stays in memory and may never end up being sent to the \ndrive. RAM speeds are several orders of magnatude faster than the \ninterfaces to the drives (or even to the RAID controllers in high-speed \nslots)\n\nit may be that it's fast enough (see the other posts disputing that), but \ndon't think that it's the same.\n\nDavid Lang\n\n> In the case of battery \n> backed RAM caches for RAID arrays, the efficacy is dependent on how the size \n> of the cache compares with the working set of the disk access pattern.\n>\n> Ron \n>\n>\n", "msg_date": "Sat, 10 Jan 2009 15:09:00 -0800 (PST)", "msg_from": "[email protected]", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "Ron wrote:\n>> I think the idea is that with SSDs or a RAID with a battery backed \n>> cache you\n>> can leave fsync on and not have any significant performance hit since \n>> the seek\n>> times are very fast for SSD. They have limited bandwidth but \n>> bandwidth to the\n>> WAL is rarely an issue -- just latency.\n> Yes, Greg understands what I meant here. In the case of SSDs, the \n> performance hit of fsync = on is essentially zero. In the case of \n> battery backed RAM caches for RAID arrays, the efficacy is dependent \n> on how the size of the cache compares with the working set of the disk \n> access pattern.\nOut of interest, if we take a scenario where the working set of updates \nexceeds the size\nof the RAID card cache, has anyone tested the relative performance of \nusing the battery\nbacked RAID on WAL only and non-cached access to other drives?\n\nAnd perhaps the similar scenario with (hot) indices and WAL on a \nbattery-backed device\non the data on uncached devices?\n\nIt seems to me that if you're going to thrash the cache from data \nupdates (presumably\ncourtesy of full-page-write), then you might be better to partition the \ncache - and a\nthrashed cache can be hardly any better than no cache (so why have one?).\n\n", "msg_date": "Sun, 11 Jan 2009 08:42:01 +0000", "msg_from": "James Mansion <[email protected]>", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "On Sat, Jan 10, 2009 at 2:56 PM, Ron <[email protected]> wrote:\n> At 10:36 AM 1/10/2009, Gregory Stark wrote:\n>>\n>> \"Scott Marlowe\" <[email protected]> writes:\n>>\n>> > On Sat, Jan 10, 2009 at 5:40 AM, Ron <[email protected]> wrote:\n>> >> At 03:28 PM 1/8/2009, Merlin Moncure wrote:\n>> >>> just be aware of the danger . hard reset (power off) class of failure\n>> >>> when fsync = off means you are loading from backups.\n>> >>\n>> >> That's what redundant power conditioning UPS's are supposed to help\n>> >> prevent\n>> >> ;-)\n>> >\n>> > But of course, they can't prevent them, but only reduce the likelihood\n>> > of their occurrance. Everyone who's working in large hosting\n>> > environments has at least one horror story to tell about a power\n>> > outage that never should have happened.\n>>\n>> Or a system crash. If the kernel panics for any reason when it has dirty\n>> buffers in memory the database will need to be restored.\n>\n> A power conditioning UPS should prevent a building wide or circuit level bad\n> power event, caused by either dirty power or a power loss, from affecting\n> the host. Within the design limits of the UPS in question of course.\n\nWe had an electrician working who was supposed to have a tray\nunderneath their work. They didn't. A tiny bit of copper flew into a\npower conditioner. The power conditioner blew out, fed back to the\nother two power conditionsers, which blew, they fed back to the UPSs\nand blew them up, the power surge blew out the switch to allow the\ndiesel generator to take over. We were running 100ft extension cables\nfrom dirty wall power sockets all over the building to get the hosting\ncenter back up. There were about 12 or so database servers. The only\none that came back up without data loss or corruption was mine,\nrunning pgsql. The others, running Oracle, db2, Ingress and a few\nother databases all came back up with corrupted data on their drives\nand forced nearly day long restores.\n\nThere is no protection against a kernel crash or a power loss that is\nabsolute. And don't ever believe there is. Human error is always a\npossibility you have to be prepared to deal with.\n\n> So the real worry with fsync = off in a environment with redundant decent\n> UPS's is pretty much limited to host level HW failures, SW crashes, and\n> unlikely catastrophes like building collapses, lightning strikes, floods,\n> etc.\n> Not that your fsync setting is going to matter much in the event of\n> catastrophes in the physical environment...\n\nSure it will. SCSI cable gets pulled out, power supply fails, mobo\njust dies outright, the above mentioned situation with the power being\nlost to the data center. Meteor strikes, not so much.\n", "msg_date": "Sun, 11 Jan 2009 11:07:19 -0700", "msg_from": "\"Scott Marlowe\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "On Sun, Jan 11, 2009 at 11:07 AM, Scott Marlowe <[email protected]> wrote:\n> running pgsql. The others, running Oracle, db2, Ingress and a few\n> other databases all came back up with corrupted data on their drives\n> and forced nearly day long restores.\n\nBefore anyone thinks I'm slagging all other databases here, the\nproblem wasn't the db software, but the sysadmin / dbas who\nadministered those machines. They stared in horror when I tested my\nserver by pulling the power cords out the back during acceptance\ntesting.\n\nThey also told me we could never lose power in the hosting center\nbecause it was so wonder and redundant and that I was wasting my time.\n We lost power on a Friday afternoon. I went skiing that weekend with\na friend, my system was up and running when it got power again. I'm\npretty sure the other dbas at that company weren't skiing that\nweekend. :)\n", "msg_date": "Sun, 11 Jan 2009 11:13:09 -0700", "msg_from": "\"Scott Marlowe\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "--- On Sun, 11/1/09, Scott Marlowe <[email protected]> wrote:\n\n> They also told me we could never lose power in the hosting\n> center\n> because it was so wonder and redundant and that I was\n> wasting my time.\n\nWe'll that's just plain silly, at the very least there's always going to be some breakers / fuzes in between the power and the machines.\n\nIn fact in our building there's quite a few breaker points between our comms room on the 3rd floor and the ups / generator in the basement. It's a crappy implementation actually.\n\n\n\n \n", "msg_date": "Sun, 11 Jan 2009 20:59:58 +0000 (GMT)", "msg_from": "Glyn Astill <[email protected]>", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "On Sun, 11 Jan 2009, Glyn Astill wrote:\n\n> --- On Sun, 11/1/09, Scott Marlowe <[email protected]> wrote:\n>\n>> They also told me we could never lose power in the hosting\n>> center\n>> because it was so wonder and redundant and that I was\n>> wasting my time.\n>\n> We'll that's just plain silly, at the very least there's always going to \n> be some breakers / fuzes in between the power and the machines.\n>\n> In fact in our building there's quite a few breaker points between our \n> comms room on the 3rd floor and the ups / generator in the basement. \n> It's a crappy implementation actually.\n\nthe response I get from people is that they give their servers redundant \npoewr supplies and put them on seperate circuits so they must be safe from \nthat.\n\nbut as commented, that's not enough in the real world.\n\nDavid Lang\n", "msg_date": "Sun, 11 Jan 2009 15:35:22 -0800 (PST)", "msg_from": "[email protected]", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" } ]
[ { "msg_contents": "Tom Lane wrote:\n> \"Ryan Hansen\" <[email protected]> writes:\n[...]\n>> but when I set the shared buffer in PG and restart\n>> the service, it fails if it's above about 8 GB.\n>\n> Fails how? And what PG version is that?\n\nThe thread seems to end here as far as the specific question was\nconcerned. I just ran into the same issue though, also on Ubuntu Hardy\nwith PG 8.2.7 - if I set shared buffers to 8 GB, starting the server\nfails with\n\n2009-01-06 17:15:09.367 PST 6804 DETAIL: Failed system call was\nshmget(key=5432001, size=8810725376, 03600).\n\nthen I take the request size value from the error and do\n\necho 8810725376 > /proc/sys/kernel/shmmax\n\nand get the same error again. If I try that with shared_buffers = 7 GB\n(setting shmmax to 7706542080), it works. Even if I double the value\nfor 8 GB and set shmmax to 17621450752, I get the same error. There\nseems to be a ceiling.\n\nEarlier in this thread somebody mentioned they had set shared buffers\nto 24 GB on CentOS, so it seems to be a platform issue.\n\nI also tried to double SHMMNI, from 4096 -> 8192, as the PG error\nsuggests, but to no avail.\n\nThis is a new 16-core Dell box with 64 GB of RAM and a mid-range\ncontroller with 8 spindles in RAID 0+1, one big filesystem. The\ndatabase is currently 55 GB in size with a web application type OLTP\nload, doing ~6000 tps at peak time (and growing fast).\n\nThe problem surfaced here because we just upgraded from an 8-core\nserver with 16 GB RAM with very disappointing results initially. The\nnew server would go inexplicably slow near peak time, with context\nswitches ~100k and locks going ballistic. It seemed worse than on the\nsmaller machine.\n\nUntil we revised the configuration which I'd just copied over from the\nold box, and adjusted shared_buffers from 2 GB -> 4 GB. Now it seem to\nperform well. I found that surprising given that 2 GB is quite a lot\nalready and since I'd gathered that the benefits of cranking up shared\nbuffers are not scientifically proven, or that often if not most of\nthe time the OS's caching mechanisms are adequate or even superior to\nwhat you might achieve by fiddling with the PG configuration and\nsetting shared buffers very high.\n\nRegards,\n\nFrank\n", "msg_date": "Wed, 7 Jan 2009 03:20:08 +0100", "msg_from": "\"Frank Joerdens\" <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Memory Allocation (8 GB shared buffer limit on Ubuntu Hardy)" }, { "msg_contents": "\"Frank Joerdens\" <[email protected]> writes:\n> then I take the request size value from the error and do\n> echo 8810725376 > /proc/sys/kernel/shmmax\n> and get the same error again.\n\nWhat about shmall?\n\n\t\t\tregards, tom lane\n", "msg_date": "Tue, 06 Jan 2009 21:23:13 -0500", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Memory Allocation (8 GB shared buffer limit on Ubuntu Hardy) " }, { "msg_contents": "On Wed, Jan 7, 2009 at 3:23 AM, Tom Lane <[email protected]> wrote:\n> \"Frank Joerdens\" <[email protected]> writes:\n>> then I take the request size value from the error and do\n>> echo 8810725376 > /proc/sys/kernel/shmmax\n>> and get the same error again.\n>\n> What about shmall?\n\nYes that works, it was set to\n\nroot@db04:~# cat /proc/sys/kernel/shmall\n2097152\nroot@db04:~# getconf PAGE_SIZE\n4096\n\nwhich is 2097152 * 4096 = 85899345920 (presumably a Ubuntu default),\ni.e. slightly less than the required shmmax, which explains why 7 GB\nworks but 8 doesn't. 8810725376 / 4096 = 2151056 would appear to be\nright, and indeed after doing\n\nroot@db04:~# echo 2151056 > /proc/sys/kernel/shmall\n\nit works.\n\nThanks!\n\nFrank\n", "msg_date": "Wed, 7 Jan 2009 04:46:22 +0100", "msg_from": "\"Frank Joerdens\" <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Memory Allocation (8 GB shared buffer limit on Ubuntu Hardy)" } ]
[ { "msg_contents": "Hi,\n\nWe have recently installed slony and tsrted replication on one of our test\nmachines. When we start inserting data in to the replicated database, the\nreplication is taking properly. Over a period of time the lag increases\nbetween the two database.\n\nLooking further we found that, sl_log_1 and sl_log2 are not getting\ntruncated. Following are the versions which we are using. Slony has been\ninstalled for the first time\n\nPostgres version: 8.3.5\nslony version: 2.0.0\n\nCan someone let me know what we should be looking at?\n\nFollowing is the error which we get:\n\nNOTICE: Slony-I: cleanup stale sl_nodelock entry for pid=17961\nCONTEXT: SQL statement \"SELECT \"_inventorycluster\".cleanupNodelock()\"\nPL/pgSQL function \"cleanupevent\" line 83 at PERFORM\nNOTICE: Slony-I: Logswitch to sl_log_1 initiated\nCONTEXT: SQL statement \"SELECT \"_inventorycluster\".logswitch_start()\"\nPL/pgSQL function \"cleanupevent\" line 101 at PERFORM\n2009-01-06 22:10:43 PST INFO cleanupThread: 0.149 seconds for\ncleanupEvent()\n2009-01-06 22:10:43 PST ERROR cleanupThread: \"vacuum analyze\n\"_inventorycluster\".sl_event;\" - 2009-01-06 22:10:43 PST ERROR\ncleanupThread: \"vacuum analyze \"_inventorycluster\".sl_confirm;\" -\n2009-01-06 22:10:43 PST ERROR cleanupThread: \"vacuum analyze\n\"_inventorycluster\".sl_setsync;\" - 2009-01-06 22:10:43 PST ERROR\ncleanupThread: \"vacuum analyze \"_inventorycluster\".sl_log_1;\" - 2009-01-06\n22:10:43 PST ERROR cleanupThread: \"vacuum analyze\n\"_inventorycluster\".sl_log_2;\" - 2009-01-06 22:10:43 PST ERROR\ncleanupThread: \"vacuum analyze \"_inventorycluster\".sl_seqlog;\" - 2009-01-06\n22:10:43 PST ERROR cleanupThread: \"vacuum analyze\n\"_inventorycluster\".sl_archive_counter;\" - 2009-01-06 22:10:43 PST ERROR\ncleanupThread: \"vacuum analyze \"pg_catalog\".pg_listener;\" - 2009-01-06\n22:10:43 PST ERROR cleanupThread: \"vacuum analyze\n\"pg_catalog\".pg_statistic;\" - 2009-01-06 22:10:43 PST INFO\ncleanupThread: 0.280 seconds for vacuuming\nNOTICE: Slony-I: log switch to sl_log_1 still in progress - sl_log_2 not\ntruncated\nCONTEXT: PL/pgSQL function \"cleanupevent\" line 99 at assignment\n2009-01-06 22:21:31 PST INFO cleanupThread: 0.127 seconds for\ncleanupEvent()\n\nRegards,\nNimesh.\n\nHi,We have recently installed slony and tsrted replication on one of our test machines. When we start inserting data in to the replicated database, the replication is taking properly. Over a period of time the lag increases between the two database.\nLooking further we found that, sl_log_1 and sl_log2 are not getting truncated. Following are the versions which we are using. Slony has been installed for the first timePostgres version: 8.3.5slony version: 2.0.0\nCan someone let me know what we should be looking at?Following is the error which we get:NOTICE:  Slony-I: cleanup stale sl_nodelock entry for pid=17961CONTEXT:  SQL statement \"SELECT  \"_inventorycluster\".cleanupNodelock()\"\nPL/pgSQL function \"cleanupevent\" line 83 at PERFORMNOTICE:  Slony-I: Logswitch to sl_log_1 initiatedCONTEXT:  SQL statement \"SELECT  \"_inventorycluster\".logswitch_start()\"PL/pgSQL function \"cleanupevent\" line 101 at PERFORM\n2009-01-06 22:10:43 PST INFO   cleanupThread:    0.149 seconds for cleanupEvent()2009-01-06 22:10:43 PST ERROR  cleanupThread: \"vacuum  analyze \"_inventorycluster\".sl_event;\" - 2009-01-06 22:10:43 PST ERROR  cleanupThread: \"vacuum  analyze \"_inventorycluster\".sl_confirm;\" - 2009-01-06 22:10:43 PST ERROR  cleanupThread: \"vacuum  analyze \"_inventorycluster\".sl_setsync;\" - 2009-01-06 22:10:43 PST ERROR  cleanupThread: \"vacuum  analyze \"_inventorycluster\".sl_log_1;\" - 2009-01-06 22:10:43 PST ERROR  cleanupThread: \"vacuum  analyze \"_inventorycluster\".sl_log_2;\" - 2009-01-06 22:10:43 PST ERROR  cleanupThread: \"vacuum  analyze \"_inventorycluster\".sl_seqlog;\" - 2009-01-06 22:10:43 PST ERROR  cleanupThread: \"vacuum  analyze \"_inventorycluster\".sl_archive_counter;\" - 2009-01-06 22:10:43 PST ERROR  cleanupThread: \"vacuum  analyze \"pg_catalog\".pg_listener;\" - 2009-01-06 22:10:43 PST ERROR  cleanupThread: \"vacuum  analyze \"pg_catalog\".pg_statistic;\" - 2009-01-06 22:10:43 PST INFO   cleanupThread:    0.280 seconds for vacuuming\nNOTICE:  Slony-I: log switch to sl_log_1 still in progress - sl_log_2 not truncatedCONTEXT:  PL/pgSQL function \"cleanupevent\" line 99 at assignment2009-01-06 22:21:31 PST INFO   cleanupThread:    0.127 seconds for cleanupEvent()\nRegards,Nimesh.", "msg_date": "Wed, 7 Jan 2009 18:18:26 +0530", "msg_from": "\"Nimesh Satam\" <[email protected]>", "msg_from_op": true, "msg_subject": "Sl_log_1 and sl_log_2 not getting truncated." }, { "msg_contents": "On Wed, 2009-01-07 at 18:18 +0530, Nimesh Satam wrote:\n> Hi,\n> \n> We have recently installed slony and tsrted replication on one of our\n> test machines. When we start inserting data in to the replicated\n> database, the replication is taking properly. Over a period of time\n> the lag increases between the two database.\n\nYou should sign up to the Slony list and ask your question there.\n\n-- \nBrad Nicholson 416-673-4106\nDatabase Administrator, Afilias Canada Corp.\n\n", "msg_date": "Wed, 07 Jan 2009 10:41:25 -0500", "msg_from": "Brad Nicholson <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Sl_log_1 and sl_log_2 not getting truncated." } ]
[ { "msg_contents": "---------- Forwarded message ----------\nFrom: jose fuenmayor <[email protected]>\nDate: Wed, Jan 7, 2009 at 2:56 PM\nSubject: Casting issue!!\nTo: [email protected]\n\n\nHi all I am trying to migrate from postgresql 8.2.x to 8.3.x, i have an\nissue with casting values when i try to perform the auto cast , it does not\nwork and I get an error, how can i perform auto casting on 8.3 without\nrewrite my source code, I am using pl/pgsql. I mean i dont want to write\nvalue::dataType. I dont want to use explicit type cast. Maybe change\nsomething in the config files? to make it work like 8.2 on tha regard(cast\nvalues).\nthanks a lot!!!\nKind Regards;\nJose Fuenmayor\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nç\n\n---------- Forwarded message ----------From: jose fuenmayor <[email protected]>Date: Wed, Jan 7, 2009 at 2:56 PM\nSubject: Casting issue!!To: [email protected] all I am trying to migrate from postgresql 8.2.x to  8.3.x, i have an issue with casting values when i try to perform the auto cast , it does not work and I get an error, how can i perform  auto casting on 8.3 without rewrite my source code, I am using pl/pgsql. I mean i dont want to write value::dataType. I dont want to use explicit type cast. Maybe change something in the config files? to make it work like 8.2 on tha regard(cast values).\n\nthanks a lot!!!Kind Regards;Jose Fuenmayor\nç", "msg_date": "Wed, 7 Jan 2009 14:58:17 +0100", "msg_from": "\"jose fuenmayor\" <[email protected]>", "msg_from_op": true, "msg_subject": "Fwd: Casting issue!!" }, { "msg_contents": "On Wed, 7 Jan 2009, jose fuenmayor wrote:\n> Hi all I am trying to migrate from postgresql 8.2.x to  8.3.x, i have an issue with casting values when i try to\n> perform the auto cast , it does not work and I get an error, how can i perform  auto casting on 8.3 without\n> rewrite my source code, I am using pl/pgsql. I mean i dont want to write value::dataType. I dont want to use\n> explicit type cast. Maybe change something in the config files? to make it work like 8.2 on tha regard(cast\n> values).\n\nWhat does that have to do with performance?\n\nMatthew\n\n-- \nIlliteracy - I don't know the meaning of the word!", "msg_date": "Wed, 7 Jan 2009 14:01:52 +0000 (GMT)", "msg_from": "Matthew Wakeling <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Fwd: Casting issue!!" }, { "msg_contents": "Hi Jose,\n\nwhy haven't you post an example of the failing query, data and the exact\nerror message? The casting should work on 8.3 (and it works for me) so I\nguess there are some invalid data, invalid SQL or something like that.\n\nAnyway I doubt this is a performance issue - this falls into generic SQL\nmailing list.\n\nregards\nTomas\n\n\n> ---------- Forwarded message ----------\n> From: jose fuenmayor <[email protected]>\n> Date: Wed, Jan 7, 2009 at 2:56 PM\n> Subject: Casting issue!!\n> To: [email protected]\n>\n>\n> Hi all I am trying to migrate from postgresql 8.2.x to 8.3.x, i have an\n> issue with casting values when i try to perform the auto cast , it does\n> not\n> work and I get an error, how can i perform auto casting on 8.3 without\n> rewrite my source code, I am using pl/pgsql. I mean i dont want to write\n> value::dataType. I dont want to use explicit type cast. Maybe change\n> something in the config files? to make it work like 8.2 on tha regard(cast\n> values).\n> thanks a lot!!!\n> Kind Regards;\n> Jose Fuenmayor\n>\n>\n>\n>\n>\n>\n>\n>\n>\n>\n>\n>\n>\n>\n>\n>\n>\n>\n>\n>\n>\n>\n>\n>\n>\n>\n>\n>\n>\n>\n>\n>\n>\n>\n>\n>\n>\n>\n>\n>\n>\n>\n>\n>\n>\n>\n>\n>\n>\n> ďż˝\n>\n\n\n", "msg_date": "Wed, 7 Jan 2009 15:07:10 +0100 (CET)", "msg_from": "[email protected]", "msg_from_op": false, "msg_subject": "Re: Fwd: Casting issue!!" }, { "msg_contents": "\n\n\n--- On Wed, 7/1/09, jose fuenmayor <[email protected]> wrote:\n> \n> Hi all I am trying to migrate from postgresql 8.2.x to \n> 8.3.x, i have an\n> issue with casting values when i try to perform the auto\n> cast , it does not\n> work and I get an error, how can i perform auto casting on\n> 8.3 without\n> rewrite my source code, I am using pl/pgsql. I mean i dont\n> want to write\n> value::dataType. I dont want to use explicit type cast.\n> Maybe change\n> something in the config files? to make it work like 8.2 on\n> tha regard(cast\n> values).\n> thanks a lot!!!\n> Kind Regards;\n> Jose Fuenmayor\n\nAs the others have said; you'd be better off posting this in the pgsql-general list.\n\nHowever I think you could proabably work your way around this with a function and a cast, as described here:\n\nhttp://www.depesz.com/index.php/2008/05/05/error-operator-does-not-exist-integer-text-how-to-fix-it/\n\n\n\n \n", "msg_date": "Wed, 7 Jan 2009 15:16:44 +0000 (GMT)", "msg_from": "Glyn Astill <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Fwd: Casting issue!!" } ]
[ { "msg_contents": "Hello.\n\nSuppose I perform 1000 RANDOM writes into a file. These writes are saved\ninto Linux writeback buffer and are flushed to the disc asynchronously,\nthat's OK.\n\nThe question is: will physical writes be performed later in the sequence of\nphysical SECTOR position on the disc (minimizing head seeking)? Or Linux\nbackground writer knows nothing about physical on-disc placement and flushes\ndata in order it is saved in the RAM?\n\nE.g., if I write in the application:\n\na) block 835\nb) block 136\nc) block 956\nd) block 549\ne) block 942\n\ndows the Linux background writer save flush them e.g. in physical order \"136\n- 549 - 835 - 942 - 956\" or not?\n\nHello.Suppose I perform 1000 RANDOM writes into a file. These writes are saved into Linux writeback buffer and are flushed to the disc asynchronously, that's OK.The question is: will physical writes be performed later in the sequence of physical SECTOR position on the disc (minimizing head seeking)? Or Linux background writer knows nothing about physical on-disc placement and flushes data in order it is saved in the RAM?\nE.g., if I write in the application:a) block 835b) block 136c) block 956d) block 549e) block 942dows the Linux background writer save flush them e.g. in physical order \"136 - 549 - 835 - 942 - 956\" or not?", "msg_date": "Wed, 7 Jan 2009 21:39:12 +0300", "msg_from": "\"Dmitry Koterov\" <[email protected]>", "msg_from_op": true, "msg_subject": "Are random writes optimized sequentially by Linux kernel?" }, { "msg_contents": "On Wed, 7 Jan 2009, Dmitry Koterov wrote:\n\n> Hello.\n>\n> Suppose I perform 1000 RANDOM writes into a file. These writes are saved\n> into Linux writeback buffer and are flushed to the disc asynchronously,\n> that's OK.\n>\n> The question is: will physical writes be performed later in the sequence of\n> physical SECTOR position on the disc (minimizing head seeking)? Or Linux\n> background writer knows nothing about physical on-disc placement and flushes\n> data in order it is saved in the RAM?\n>\n> E.g., if I write in the application:\n>\n> a) block 835\n> b) block 136\n> c) block 956\n> d) block 549\n> e) block 942\n>\n> dows the Linux background writer save flush them e.g. in physical order \"136\n> - 549 - 835 - 942 - 956\" or not?\n\nyes, the linux IO scheduler will combine and re-order write requests.\n\nthey may end up being done 835-942-956-549-136 if the system thinks the \nhead happens to be past 549 and moving up when the requests hit the IO \nsystem.\n\nDavid Lang\n", "msg_date": "Wed, 7 Jan 2009 11:54:45 -0800 (PST)", "msg_from": "[email protected]", "msg_from_op": false, "msg_subject": "Re: Are random writes optimized sequentially by Linux\n kernel?" }, { "msg_contents": "OK, thank you.\n\nNow - PostgreSQL-related question. If the system reorders writes to minimize\nseeking, I suppose that in heavy write-loaded PostgreSQL instalation dstat\n(or iostat) realtime write statistics should be close to the maximum\npossible value reported by bonnie++ (or simple dd) utility.\n\nSo, if, for example, I have in a heavy-loaded PostgreSQL installation:\n- with a 50MB/s write speed limit reported by bonnie++ or dd (on a clean\nsystem),\n- under a heavy PostgreSQL load the write throughput is only 10MB/s\n(synchronous_commit is off, checkpoint is executed every 10 minutes or even\nmore),\n- writeback buffer (accordingly to /proc/meminfo) is not fully filled,\n- sometimes INSERTs or UPDATEs slow down in 10 second and more with no\nexplicit correlation with checkpoints\n\nthen - something goes wrong?\n\nWhat I am trying to understand - why does the system fall to a writing\nbottleneck (e.g. 10MB/s) much before it achieves the maximum disk throughput\n(e.g. 50MB/s). How could it happen if the Linux IO scheduler reorders write\noperations, so time for seeking is minimal?\n\n\n\nOr, better, I can reformulate the question. In which cases PostgreSQL may\nstall on INSERT/UPDATE operation if synchronous_commit is off and there are\nno locking between transactions? In which cases these operations lost their\ndeterministic time (in theory) and may slowdown in 100-1000 times?\n\n\n\nOn Wed, Jan 7, 2009 at 10:54 PM, <[email protected]> wrote:\n\n> On Wed, 7 Jan 2009, Dmitry Koterov wrote:\n>\n> Hello.\n>>\n>> Suppose I perform 1000 RANDOM writes into a file. These writes are saved\n>> into Linux writeback buffer and are flushed to the disc asynchronously,\n>> that's OK.\n>>\n>> The question is: will physical writes be performed later in the sequence\n>> of\n>> physical SECTOR position on the disc (minimizing head seeking)? Or Linux\n>> background writer knows nothing about physical on-disc placement and\n>> flushes\n>> data in order it is saved in the RAM?\n>>\n>> E.g., if I write in the application:\n>>\n>> a) block 835\n>> b) block 136\n>> c) block 956\n>> d) block 549\n>> e) block 942\n>>\n>> dows the Linux background writer save flush them e.g. in physical order\n>> \"136\n>> - 549 - 835 - 942 - 956\" or not?\n>>\n>\n> yes, the linux IO scheduler will combine and re-order write requests.\n>\n> they may end up being done 835-942-956-549-136 if the system thinks the\n> head happens to be past 549 and moving up when the requests hit the IO\n> system.\n>\n> David Lang\n>\n> --\n> Sent via pgsql-performance mailing list ([email protected])\n> To make changes to your subscription:\n> http://www.postgresql.org/mailpref/pgsql-performance\n>\n\nOK, thank you.Now - PostgreSQL-related question. If the system reorders writes to minimize seeking, I suppose that in heavy write-loaded PostgreSQL instalation dstat (or iostat) realtime write statistics should be close to the maximum possible value reported by bonnie++ (or simple dd) utility. \nSo, if, for example, I have in a heavy-loaded PostgreSQL installation:- with a 50MB/s write speed limit reported by bonnie++ or dd (on a clean system),- under a heavy PostgreSQL load the write throughput is only 10MB/s (synchronous_commit is off, checkpoint is executed every 10 minutes or even more), \n- writeback buffer (accordingly to /proc/meminfo) is not fully filled,- sometimes INSERTs or UPDATEs slow down in 10 second and more with no explicit correlation with checkpointsthen - something goes wrong?\nWhat I am trying to understand - why does the system fall to a writing bottleneck (e.g. 10MB/s) much before it achieves the maximum disk throughput (e.g. 50MB/s). How could it happen if the Linux IO scheduler reorders write operations, so time for seeking is minimal?\nOr, better, I can reformulate the question. In which cases PostgreSQL may stall on INSERT/UPDATE operation if synchronous_commit is off and there are no locking between transactions? In which cases these operations lost their deterministic time (in theory) and may slowdown in 100-1000 times?\nOn Wed, Jan 7, 2009 at 10:54 PM, <[email protected]> wrote:\nOn Wed, 7 Jan 2009, Dmitry Koterov wrote:\n\n\nHello.\n\nSuppose I perform 1000 RANDOM writes into a file. These writes are saved\ninto Linux writeback buffer and are flushed to the disc asynchronously,\nthat's OK.\n\nThe question is: will physical writes be performed later in the sequence of\nphysical SECTOR position on the disc (minimizing head seeking)? Or Linux\nbackground writer knows nothing about physical on-disc placement and flushes\ndata in order it is saved in the RAM?\n\nE.g., if I write in the application:\n\na) block 835\nb) block 136\nc) block 956\nd) block 549\ne) block 942\n\ndows the Linux background writer save flush them e.g. in physical order \"136\n- 549 - 835 - 942 - 956\" or not?\n\n\nyes, the linux IO scheduler will combine and re-order write requests.\n\nthey may end up being done 835-942-956-549-136 if the system thinks the head happens to be past 549 and moving up when the requests hit the IO system.\n\nDavid Lang\n\n-- \nSent via pgsql-performance mailing list ([email protected])\nTo make changes to your subscription:\nhttp://www.postgresql.org/mailpref/pgsql-performance", "msg_date": "Thu, 8 Jan 2009 01:32:11 +0300", "msg_from": "\"Dmitry Koterov\" <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Are random writes optimized sequentially by Linux kernel?" }, { "msg_contents": "On Wed, 7 Jan 2009, Dmitry Koterov wrote:\n\n> The question is: will physical writes be performed later in the sequence of\n> physical SECTOR position on the disc (minimizing head seeking)? Or Linux\n> background writer knows nothing about physical on-disc placement and flushes\n> data in order it is saved in the RAM?\n\nThe part of Linux that does this is called the elevator algorithm, and \neven the simplest I/O scheduler (the no-op one) does a merge+sort to \nschedule physical writes. The classic intro paper on this subject is \nhttp://www.linuxinsight.com/files/ols2004/pratt-reprint.pdf\n\n> What I am trying to understand - why does the system fall to a writing \n> bottleneck (e.g. 10MB/s) much before it achieves the maximum disk \n> throughput (e.g. 50MB/s). How could it happen if the Linux IO scheduler \n> reorders write operations, so time for seeking is minimal?\n\nI think you're underestimating how much impact even a minimal amount of \nseeking has. If the disk head has to move at all beyond a single track \nseek, you won't get anywhere close to the rated sequential speed on the \ndrive even if elevator sorting is helping out. And the minute a \ncheckpoint is involved, with its requisite fsync at the end, all the \nblocks related to that are going to be forced out of the write cache \nwithout any chance for merge+sort to lower the average disk I/O--unless \nyou spread that checkpoint write over a long period so pdflush can trickle \nto blocks out to disk.\n\n--\n* Greg Smith [email protected] http://www.gregsmith.com Baltimore, MD\n", "msg_date": "Wed, 7 Jan 2009 18:33:50 -0500 (EST)", "msg_from": "Greg Smith <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Are random writes optimized sequentially by Linux\n kernel?" }, { "msg_contents": "On Thu, 8 Jan 2009, Dmitry Koterov wrote:\n\n> OK, thank you.\n>\n> Now - PostgreSQL-related question. If the system reorders writes to minimize\n> seeking, I suppose that in heavy write-loaded PostgreSQL instalation dstat\n> (or iostat) realtime write statistics should be close to the maximum\n> possible value reported by bonnie++ (or simple dd) utility.\n\nthis is not the case for a couple of reasons\n\n1. bonnie++ and dd tend to write in one area, so seeks are not as big a \nfactor as writing across multiple areas\n\n2. postgres doesn't do the simple writes like you described earlier\n\nit does something like\n\nwrite 123-124-fsync-586-354-257-fsync-123-124-125-fsync\n\n(writes to the WAL journal, syncs it to make sure it's safe, then writes \nto the destinations, the n syncs, then updates the WAL to record that it's \nwritten....)\n\nthe fsync basicly tells the system, don't write anything more until these \nare done. and interrupts the nice write pattern.\n\nyou can address this by having large battery-backed caches that you write \nto and they batch things out to disk more efficiantly.\n\nor you can put your WAL on a seperate drive so that the syncs on that \ndon't affect the data drives (but you will still have syncs on the data \ndisks, just not as many of them)\n\nDavid Lang\n", "msg_date": "Wed, 7 Jan 2009 16:10:00 -0800 (PST)", "msg_from": "[email protected]", "msg_from_op": false, "msg_subject": "Re: Are random writes optimized sequentially by Linux\n kernel?" }, { "msg_contents": "[email protected] wrote:\n> On Thu, 8 Jan 2009, Dmitry Koterov wrote:\n> \n>> OK, thank you.\n>>\n>> Now - PostgreSQL-related question. If the system reorders writes to\n>> minimize\n>> seeking, I suppose that in heavy write-loaded PostgreSQL instalation\n>> dstat\n>> (or iostat) realtime write statistics should be close to the maximum\n>> possible value reported by bonnie++ (or simple dd) utility.\n> \n> this is not the case for a couple of reasons\n> \n> 1. bonnie++ and dd tend to write in one area, so seeks are not as big a\n> factor as writing across multiple areas\n> \n> 2. postgres doesn't do the simple writes like you described earlier\n> \n> it does something like\n> \n> write 123-124-fsync-586-354-257-fsync-123-124-125-fsync\n> \n> (writes to the WAL journal, syncs it to make sure it's safe, then writes\n> to the destinations, the n syncs, then updates the WAL to record that\n> it's written....)\n> \n> the fsync basicly tells the system, don't write anything more until\n> these are done. and interrupts the nice write pattern.\n> \n> you can address this by having large battery-backed caches that you\n> write to and they batch things out to disk more efficiantly.\n> \n> or you can put your WAL on a seperate drive so that the syncs on that\n> don't affect the data drives (but you will still have syncs on the data\n> disks, just not as many of them)\n> \n> David Lang\n> \n\n1. There are four Linux I/O schedulers to choose from in the 2.6 kernel.\nIf you *aren't* on the 2.6 kernel, give me a shout when you are. :)\n\n2. You can choose the scheduler in use \"on the fly\". This means you can\nset up a benchmark of your *real-world* application, and run it four\ntimes, once with each scheduler, *without* having to reboot or any of\nthat nonsense. That said, you will probably want to introduce some kind\nof \"page cache poisoning\" technique between these runs to force your\nbenchmark to deal with every block of data at least once off the hard drive.\n\n3. As I learned a few weeks ago, even simple 160 GB single SATA drives\nnow have some kind of scheduling algorithm built in, so your tests may\nnot show significant differences between the four schedulers. This is\neven more the case for high-end SANs. You simply must test with your\nreal workload, rather than using bonnie++, iozone, or fio, to make an\nintelligent scheduler choice.\n\n4. For those that absolutely need fine-grained optimization, there is an\nopen-source tool called \"blktrace\" that is essentially a \"sniffer for\nI/O\". It is maintained by Jens Axboe of Oracle, who also maintains the\nLinux block I/O layer! There is a \"driver\" called \"seekwatcher\", also\nopen source and maintained by Chris Mason of Oracle, that will give you\nvisualizations of the \"blktrace\" results. In any event, if you need to\nknow, you can find out exactly what the scheduler is doing block by\nblock with \"blktrace\".\n\nYou can track all of this magic down via Google. If there's enough\ninterest and I have some free cycles, I'll post an extended \"howto\" on\ndoing this. But it only took me a week or so to figure it out from\nscratch, and the documentation on \"seekwatcher\" and \"blktrace\" is\nexcellent.\n", "msg_date": "Wed, 07 Jan 2009 19:07:31 -0800", "msg_from": "\"M. Edward (Ed) Borasky\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Are random writes optimized sequentially by Linux kernel?" } ]
[ { "msg_contents": "Ok, here some information:\n\nOS: Centos 5.x (Linux 2.6.18-53.1.21.el5 #1 SMP Tue May 20 09:34:18 EDT \n2008 i686 i686 i386 GNU/Linux)\nRAID: it's a hardware RAID controller\nThe disks are 9600rpm SATA drives\n\n(6 disk 1+0 RAID array and 2 separate disks for the OS).\n\n\nAbout iostat (on sdb I have pg_xlog, on sdc I have data)\n\n > iostat -k\nLinux 2.6.18-53.1.21.el5 (*******) 01/07/2009\n\navg-cpu: %user %nice %system %iowait %steal %idle\n 17.27 0.00 5.13 45.08 0.00 32.52\n\nDevice: tps kB_read/s kB_wrtn/s kB_read kB_wrtn\nsda 30.42 38.50 170.48 182600516 808546589\nsdb 46.16 0.23 52.10 1096693 247075617\nsdc 269.26 351.51 451.00 1667112043 2138954833\n\n\n\n > iostat -x -k -d 2 5\nLinux 2.6.18-53.1.21.el5 (*******) 01/07/2009\n\nDevice: rrqm/s wrqm/s r/s w/s rkB/s wkB/s avgrq-sz \navgqu-sz await svctm %util\nsda 0.17 12.68 0.47 29.95 38.51 170.51 \n13.74 0.03 0.86 0.19 0.57\nsdb 0.01 80.11 0.05 46.11 0.23 52.01 \n2.26 0.01 0.22 0.22 1.01\nsdc 7.50 64.57 222.55 46.69 350.91 450.98 \n5.96 0.57 2.05 3.13 84.41\n\nDevice: rrqm/s wrqm/s r/s w/s rkB/s wkB/s avgrq-sz \navgqu-sz await svctm %util\nsda 0.00 0.00 0.00 0.00 0.00 0.00 \n0.00 0.00 0.00 0.00 0.00\nsdb 0.00 196.00 1.00 117.00 4.00 1252.00 \n21.29 0.02 0.19 0.19 2.30\nsdc 1.50 66.00 277.00 66.50 3100.00 832.00 \n22.89 50.84 242.30 2.91 100.10\n\nDevice: rrqm/s wrqm/s r/s w/s rkB/s wkB/s avgrq-sz \navgqu-sz await svctm %util\nsda 0.00 0.00 0.00 0.00 0.00 0.00 \n0.00 0.00 0.00 0.00 0.00\nsdb 0.00 264.50 0.00 176.50 0.00 1764.00 \n19.99 0.04 0.21 0.21 3.70\nsdc 3.50 108.50 291.50 76.00 3228.00 752.00 \n21.66 89.42 239.39 2.72 100.05\n\nDevice: rrqm/s wrqm/s r/s w/s rkB/s wkB/s avgrq-sz \navgqu-sz await svctm %util\nsda 0.00 4.98 0.00 1.00 0.00 23.88 \n48.00 0.00 0.00 0.00 0.00\nsdb 0.00 23.88 0.00 9.45 0.00 133.33 \n28.21 0.00 0.21 0.21 0.20\nsdc 1.00 105.97 274.13 53.73 3297.51 612.94 \n23.85 67.99 184.58 3.04 99.55\n\nDevice: rrqm/s wrqm/s r/s w/s rkB/s wkB/s avgrq-sz \navgqu-sz await svctm %util\nsda 0.00 0.00 0.00 0.00 0.00 0.00 \n0.00 0.00 0.00 0.00 0.00\nsdb 0.00 79.00 0.00 46.00 0.00 500.00 \n21.74 0.01 0.25 0.25 1.15\nsdc 2.50 141.00 294.00 43.50 3482.00 528.00 \n23.76 51.33 170.46 2.96 100.05\n\n\nvmstat in the same time:\n > vmstat 2\nprocs -----------memory---------- ---swap-- -----io---- --system-- \n-----cpu------\n r b swpd free buff cache si so bi bo in cs us sy \nid wa st\n 0 27 80 126380 27304 3253016 0 0 98 55 0 1 17 5 \n33 45 0\n 0 26 80 124516 27300 3255456 0 0 3438 1724 2745 4011 11 \n2 8 78 0\n 1 25 80 124148 27276 3252548 0 0 3262 2806 3572 7007 33 \n11 3 53 0\n 1 28 80 128272 27244 3248516 0 0 2816 1006 2926 5624 12 3 \n12 73 0\n\n\nI will run pgbench in the next days.\n\n\n> \n> Aside from all the advice here about system tuning, as a system admin I'd also \n> ask is the box doing the job you need? And are you looking at the Postgres \n> log (with logging of slow queries) to see that queries perform in a sensible \n> time? I'd assume with the current performance figure there is an issue \n> somewhere, but I've been to places where it was as simple as adding one \n> index, or even modifying an index so it does what the application developer \n> intended instead of what they ask for ;)\n> \n\n\nI already checked postgres log and resolved index/slow queries issues. \nActually I have queries that sometime are really fast, and sometime go \nin timeout.\nBut all the required indexes are there. For sure, there are space to \nimprove performances also in that way, but I would like also to \ninvestigate issue from other point of views (in order to understand also \nhow to monitor the server).\n\n\nCheers and thanks a lot.\nste\n\n", "msg_date": "Wed, 07 Jan 2009 20:05:43 +0100", "msg_from": "Stefano Nichele <[email protected]>", "msg_from_op": true, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "Hi All,\nI ran pgbench. Here some result:\n\n-bash-3.1$ pgbench -c 50 -t 1000\nstarting vacuum...end.\ntransaction type: TPC-B (sort of)\nscaling factor: 100\nnumber of clients: 50\nnumber of transactions per client: 1000\nnumber of transactions actually processed: 50000/50000\ntps = 377.351354 (including connections establishing)\ntps = 377.788377 (excluding connections establishing)\n\nSome vmstat samplings in the meantime:\n\nprocs -----------memory---------- ---swap-- -----io---- --system--\n-----cpu------\n r b swpd free buff cache si so bi bo in cs us sy id\nwa st\n0 4 92 127880 8252 3294512 0 0 458 12399 2441 14903 22 9 34\n35 0\n11 49 92 125336 8288 3297016 0 0 392 11071 2390 11568 17 7 51\n24 0\n 0 2 92 124548 8304 3297764 0 0 126 8249 2291 3829 5 3 64\n28 0\n 0 1 92 127268 7796 3295672 0 0 493 11387 2323 14221 23 9 47\n21 0\n 0 2 92 127256 7848 3295492 0 0 501 10654 2215 14599 24 9 42\n24 0\n 0 2 92 125772 7892 3295656 0 0 34 7541 2311 327 0 1 59\n40 0\n 0 1 92 127188 7952 3294084 0 0 537 11039 2274 15460 23 10 43\n24 0\n 7 4 92 123816 7996 3298620 0 0 253 8946 2284 7310 11 5 52\n32 0\n 0 2 92 126652 8536 3294220 0 0 440 9563 2307 9036 13 6 56\n25 0\n 0 10 92 125268 8584 3296116 0 0 426 10696 2285 11034 20 9 39\n32 0\n 0 2 92 124168 8604 3297252 0 0 104 8385 2319 4162 3 3 40\n54 0\n 0 8 92 123780 8648 3296456 0 0 542 11498 2298 16613 25 10 16\n48 0\n\n\n-bash-3.1$ pgbench -t 10000 -c 50\nstarting vacuum...end.\ntransaction type: SELECT only\nscaling factor: 100\nnumber of clients: 50\nnumber of transactions per client: 10000\nnumber of transactions actually processed: 500000/500000\ntps = 8571.573651 (including connections establishing)\ntps = 8594.357138 (excluding connections establishing)\n\n\n-bash-3.1$ pgbench -t 10000 -c 50 -S\nstarting vacuum...end.\ntransaction type: SELECT only\nscaling factor: 100\nnumber of clients: 50\nnumber of transactions per client: 10000\nnumber of transactions actually processed: 500000/500000\ntps = 8571.573651 (including connections establishing)\ntps = 8594.357138 (excluding connections establishing)\n\n\n(next test is with scaling factor 1)\n\n-bash-3.1$ pgbench -t 20000 -c 8 -S pgbench\nstarting vacuum...end.\ntransaction type: SELECT only\nscaling factor: 1\nnumber of clients: 8\nnumber of transactions per client: 20000\nnumber of transactions actually processed: 160000/160000\ntps = 11695.895318 (including connections establishing)\ntps = 11715.603720 (excluding connections establishing)\n\nAny comment ?\n\nI can give you also some details about database usage of my application:\n- number of active connections: about 60\n- number of idle connections: about 60\n\nHere some number from a mine old pgfouine report:\n- query peak: 378 queries/s\n- select: 53,1%, insert 3,8%, update 2,2 %, delete 2,8 %\n\nThe application is basically a web application and the db size is 37 GB.\n\nIs there a way to have the number of queries per second and the percentages\nof select/update/insert/delete without pgfouine ?\nWhat is the performance impact of stats_start_collector = on and\nstats_row_level = on (they are on since I use autovacuum)\n\nThanks a lot for your help.\n\nste\n\n\nOn Wed, Jan 7, 2009 at 8:05 PM, Stefano Nichele\n<[email protected]>wrote:\n\n> Ok, here some information:\n>\n> OS: Centos 5.x (Linux 2.6.18-53.1.21.el5 #1 SMP Tue May 20 09:34:18 EDT\n> 2008 i686 i686 i386 GNU/Linux)\n> RAID: it's a hardware RAID controller\n> The disks are 9600rpm SATA drives\n>\n> (6 disk 1+0 RAID array and 2 separate disks for the OS).\n>\n>\n> About iostat (on sdb I have pg_xlog, on sdc I have data)\n>\n> > iostat -k\n> Linux 2.6.18-53.1.21.el5 (*******) 01/07/2009\n>\n> avg-cpu: %user %nice %system %iowait %steal %idle\n> 17.27 0.00 5.13 45.08 0.00 32.52\n>\n> Device: tps kB_read/s kB_wrtn/s kB_read kB_wrtn\n> sda 30.42 38.50 170.48 182600516 808546589\n> sdb 46.16 0.23 52.10 1096693 247075617\n> sdc 269.26 351.51 451.00 1667112043 2138954833\n>\n>\n>\n> > iostat -x -k -d 2 5\n> Linux 2.6.18-53.1.21.el5 (*******) 01/07/2009\n>\n> Device: rrqm/s wrqm/s r/s w/s rkB/s wkB/s avgrq-sz\n> avgqu-sz await svctm %util\n> sda 0.17 12.68 0.47 29.95 38.51 170.51 13.74\n> 0.03 0.86 0.19 0.57\n> sdb 0.01 80.11 0.05 46.11 0.23 52.01 2.26\n> 0.01 0.22 0.22 1.01\n> sdc 7.50 64.57 222.55 46.69 350.91 450.98 5.96\n> 0.57 2.05 3.13 84.41\n>\n> Device: rrqm/s wrqm/s r/s w/s rkB/s wkB/s avgrq-sz\n> avgqu-sz await svctm %util\n> sda 0.00 0.00 0.00 0.00 0.00 0.00 0.00\n> 0.00 0.00 0.00 0.00\n> sdb 0.00 196.00 1.00 117.00 4.00 1252.00 21.29\n> 0.02 0.19 0.19 2.30\n> sdc 1.50 66.00 277.00 66.50 3100.00 832.00 22.89\n> 50.84 242.30 2.91 100.10\n>\n> Device: rrqm/s wrqm/s r/s w/s rkB/s wkB/s avgrq-sz\n> avgqu-sz await svctm %util\n> sda 0.00 0.00 0.00 0.00 0.00 0.00 0.00\n> 0.00 0.00 0.00 0.00\n> sdb 0.00 264.50 0.00 176.50 0.00 1764.00 19.99\n> 0.04 0.21 0.21 3.70\n> sdc 3.50 108.50 291.50 76.00 3228.00 752.00 21.66\n> 89.42 239.39 2.72 100.05\n>\n> Device: rrqm/s wrqm/s r/s w/s rkB/s wkB/s avgrq-sz\n> avgqu-sz await svctm %util\n> sda 0.00 4.98 0.00 1.00 0.00 23.88 48.00\n> 0.00 0.00 0.00 0.00\n> sdb 0.00 23.88 0.00 9.45 0.00 133.33 28.21\n> 0.00 0.21 0.21 0.20\n> sdc 1.00 105.97 274.13 53.73 3297.51 612.94 23.85\n> 67.99 184.58 3.04 99.55\n>\n> Device: rrqm/s wrqm/s r/s w/s rkB/s wkB/s avgrq-sz\n> avgqu-sz await svctm %util\n> sda 0.00 0.00 0.00 0.00 0.00 0.00 0.00\n> 0.00 0.00 0.00 0.00\n> sdb 0.00 79.00 0.00 46.00 0.00 500.00 21.74\n> 0.01 0.25 0.25 1.15\n> sdc 2.50 141.00 294.00 43.50 3482.00 528.00 23.76\n> 51.33 170.46 2.96 100.05\n>\n>\n> vmstat in the same time:\n> > vmstat 2\n> procs -----------memory---------- ---swap-- -----io---- --system--\n> -----cpu------\n> r b swpd free buff cache si so bi bo in cs us sy id\n> wa st\n> 0 27 80 126380 27304 3253016 0 0 98 55 0 1 17 5 33\n> 45 0\n> 0 26 80 124516 27300 3255456 0 0 3438 1724 2745 4011 11 2 8\n> 78 0\n> 1 25 80 124148 27276 3252548 0 0 3262 2806 3572 7007 33 11 3\n> 53 0\n> 1 28 80 128272 27244 3248516 0 0 2816 1006 2926 5624 12 3 12\n> 73 0\n>\n>\n> I will run pgbench in the next days.\n>\n>\n>\n>> Aside from all the advice here about system tuning, as a system admin I'd\n>> also ask is the box doing the job you need? And are you looking at the\n>> Postgres log (with logging of slow queries) to see that queries perform in a\n>> sensible time? I'd assume with the current performance figure there is an\n>> issue somewhere, but I've been to places where it was as simple as adding\n>> one index, or even modifying an index so it does what the application\n>> developer intended instead of what they ask for ;)\n>>\n>>\n>\n> I already checked postgres log and resolved index/slow queries issues.\n> Actually I have queries that sometime are really fast, and sometime go in\n> timeout.\n> But all the required indexes are there. For sure, there are space to\n> improve performances also in that way, but I would like also to investigate\n> issue from other point of views (in order to understand also how to monitor\n> the server).\n>\n>\n> Cheers and thanks a lot.\n> ste\n>\n>\n> --\n> Sent via pgsql-performance mailing list ([email protected])\n> To make changes to your subscription:\n> http://www.postgresql.org/mailpref/pgsql-performance\n>\n\nHi All,I ran pgbench. Here some result:-bash-3.1$ pgbench -c 50 -t 1000starting vacuum...end.transaction type: TPC-B (sort of)scaling factor: 100number of clients: 50number of transactions per client: 1000\nnumber of transactions actually processed: 50000/50000tps = 377.351354 (including connections establishing)tps = 377.788377 (excluding connections establishing)Some vmstat samplings in the meantime:\nprocs -----------memory---------- ---swap-- -----io---- --system-- -----cpu------ r  b   swpd   free   buff  cache   si   so    bi    bo   in   cs us sy id wa st0  4     92 127880   8252 3294512    0    0   458 12399 2441 14903 22  9 34 35  0\n11 49     92 125336   8288 3297016    0    0   392 11071 2390 11568 17  7 51 24  0 0  2     92 124548   8304 3297764    0    0   126  8249 2291 3829  5  3 64 28  0 0  1     92 127268   7796 3295672    0    0   493 11387 2323 14221 23  9 47 21  0\n 0  2     92 127256   7848 3295492    0    0   501 10654 2215 14599 24  9 42 24  0 0  2     92 125772   7892 3295656    0    0    34  7541 2311  327  0  1 59 40  0 0  1     92 127188   7952 3294084    0    0   537 11039 2274 15460 23 10 43 24  0\n 7  4     92 123816   7996 3298620    0    0   253  8946 2284 7310 11  5 52 32  0 0  2     92 126652   8536 3294220    0    0   440  9563 2307 9036 13  6 56 25  0 0 10     92 125268   8584 3296116    0    0   426 10696 2285 11034 20  9 39 32  0\n 0  2     92 124168   8604 3297252    0    0   104  8385 2319 4162  3  3 40 54  0 0  8     92 123780   8648 3296456    0    0   542 11498 2298 16613 25 10 16 48  0 -bash-3.1$ pgbench -t 10000 -c 50starting vacuum...end.\ntransaction type: SELECT onlyscaling factor: 100number of clients: 50number of transactions per client: 10000number of transactions actually processed: 500000/500000tps = 8571.573651 (including connections establishing)\ntps = 8594.357138 (excluding connections establishing)-bash-3.1$ pgbench -t 10000 -c 50 -Sstarting vacuum...end.transaction type: SELECT onlyscaling factor: 100number of clients: 50number of transactions per client: 10000\nnumber of transactions actually processed: 500000/500000tps = 8571.573651 (including connections establishing)tps = 8594.357138 (excluding connections establishing)(next test is with scaling factor 1)\n-bash-3.1$ pgbench -t 20000 -c 8 -S pgbenchstarting vacuum...end.transaction type: SELECT onlyscaling factor: 1number of clients: 8number of transactions per client: 20000number of transactions actually processed: 160000/160000\ntps = 11695.895318 (including connections establishing)tps = 11715.603720 (excluding connections establishing)Any comment ?I can give you also some details about database usage of my application:- number of active connections: about 60\n- number of idle connections: about 60Here some number from a mine old pgfouine report:- query peak: 378 queries/s- select: 53,1%, insert 3,8%, update 2,2 %, delete 2,8 %The application is basically a web application and the db size is 37 GB.\nIs there a way to have the number of queries per second and the percentages of  select/update/insert/delete without pgfouine ?What is the performance impact of stats_start_collector = on and stats_row_level = on (they are on since I use autovacuum)\nThanks a lot for your help.steOn Wed, Jan 7, 2009 at 8:05 PM, Stefano Nichele <[email protected]> wrote:\nOk, here some information:\n\nOS: Centos 5.x (Linux 2.6.18-53.1.21.el5 #1 SMP Tue May 20 09:34:18 EDT 2008 i686 i686 i386 GNU/Linux)\nRAID: it's a hardware RAID controller\nThe disks are 9600rpm SATA drives\n\n(6 disk 1+0 RAID array and 2 separate disks for the OS).\n\n\nAbout iostat (on sdb I have pg_xlog, on sdc I have data)\n\n> iostat -k\nLinux 2.6.18-53.1.21.el5 (*******)       01/07/2009\n\navg-cpu:  %user   %nice %system %iowait  %steal   %idle\n         17.27    0.00    5.13   45.08    0.00   32.52\n\nDevice:            tps    kB_read/s    kB_wrtn/s    kB_read    kB_wrtn\nsda              30.42        38.50       170.48  182600516  808546589\nsdb              46.16         0.23        52.10    1096693  247075617\nsdc             269.26       351.51       451.00 1667112043 2138954833\n\n\n\n> iostat -x -k -d 2 5\nLinux 2.6.18-53.1.21.el5 (*******)       01/07/2009\n\nDevice:         rrqm/s   wrqm/s   r/s   w/s    rkB/s    wkB/s avgrq-sz avgqu-sz   await  svctm  %util\nsda               0.17    12.68  0.47 29.95    38.51   170.51    13.74     0.03    0.86   0.19   0.57\nsdb               0.01    80.11  0.05 46.11     0.23    52.01     2.26     0.01    0.22   0.22   1.01\nsdc               7.50    64.57 222.55 46.69   350.91   450.98     5.96     0.57    2.05   3.13  84.41\n\nDevice:         rrqm/s   wrqm/s   r/s   w/s    rkB/s    wkB/s avgrq-sz avgqu-sz   await  svctm  %util\nsda               0.00     0.00  0.00  0.00     0.00     0.00     0.00     0.00    0.00   0.00   0.00\nsdb               0.00   196.00  1.00 117.00     4.00  1252.00    21.29     0.02    0.19   0.19   2.30\nsdc               1.50    66.00 277.00 66.50  3100.00   832.00    22.89    50.84  242.30   2.91 100.10\n\nDevice:         rrqm/s   wrqm/s   r/s   w/s    rkB/s    wkB/s avgrq-sz avgqu-sz   await  svctm  %util\nsda               0.00     0.00  0.00  0.00     0.00     0.00     0.00     0.00    0.00   0.00   0.00\nsdb               0.00   264.50  0.00 176.50     0.00  1764.00    19.99     0.04    0.21   0.21   3.70\nsdc               3.50   108.50 291.50 76.00  3228.00   752.00    21.66    89.42  239.39   2.72 100.05\n\nDevice:         rrqm/s   wrqm/s   r/s   w/s    rkB/s    wkB/s avgrq-sz avgqu-sz   await  svctm  %util\nsda               0.00     4.98  0.00  1.00     0.00    23.88    48.00     0.00    0.00   0.00   0.00\nsdb               0.00    23.88  0.00  9.45     0.00   133.33    28.21     0.00    0.21   0.21   0.20\nsdc               1.00   105.97 274.13 53.73  3297.51   612.94    23.85    67.99  184.58   3.04  99.55\n\nDevice:         rrqm/s   wrqm/s   r/s   w/s    rkB/s    wkB/s avgrq-sz avgqu-sz   await  svctm  %util\nsda               0.00     0.00  0.00  0.00     0.00     0.00     0.00     0.00    0.00   0.00   0.00\nsdb               0.00    79.00  0.00 46.00     0.00   500.00    21.74     0.01    0.25   0.25   1.15\nsdc               2.50   141.00 294.00 43.50  3482.00   528.00    23.76    51.33  170.46   2.96 100.05\n\n\nvmstat in the same time:\n> vmstat 2\nprocs -----------memory---------- ---swap-- -----io---- --system-- -----cpu------\nr  b   swpd   free   buff  cache   si   so    bi    bo   in   cs us sy id wa st\n0 27     80 126380  27304 3253016    0    0    98    55    0    1 17  5 33 45  0\n0 26     80 124516  27300 3255456    0    0  3438  1724 2745 4011 11  2  8 78  0\n1 25     80 124148  27276 3252548    0    0  3262  2806 3572 7007 33 11  3 53  0\n1 28     80 128272  27244 3248516    0    0  2816  1006 2926 5624 12  3 12 73  0\n\n\nI will run pgbench in the next days.\n\n\n\n\nAside from all the advice here about system tuning, as a system admin I'd also ask is the box doing the job you need? And are you looking at the Postgres log (with logging of slow queries) to see that queries perform in a sensible time? I'd assume with the current performance figure there is an issue somewhere, but I've been to places where it was as simple as adding one index, or even modifying an index so it does what the application developer intended instead of what they ask for ;)\n\n\n\n\nI already checked postgres log and resolved index/slow queries issues. Actually I have queries that sometime are really fast, and sometime go in timeout.\nBut all the required indexes are there. For sure, there are space to improve performances also in that way, but I would like also to investigate issue from other point of views (in order to understand also how to monitor the server).\n\n\nCheers and thanks a lot.\nste\n\n\n-- \nSent via pgsql-performance mailing list ([email protected])\nTo make changes to your subscription:\nhttp://www.postgresql.org/mailpref/pgsql-performance", "msg_date": "Sun, 11 Jan 2009 16:00:10 +0100", "msg_from": "\"Stefano Nichele\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "> Here some number from a mine old pgfouine report:\n> - query peak: 378 queries/s\n> - select: 53,1%, insert 3,8%, update 2,2 %, delete 2,8 %\n>\n>\nActually the percentages are wrong (I think pgfouine counts also other types\nof query like ET SESSION CHARACTERISTICS AS TRANSACTION READ WRITE;):\nThese are the right ones:\n\nSELECT num: 658,5 perc: 85,7 %\nINSERT num: 47,66 perc: 6,2 %\nUPDATE num: 27,49 perc: 3,6 %\nDELETE num: 34,56 perc: 4,5 %\n\n\nste\n\nHere some number from a mine old pgfouine report:- query peak: 378 queries/s\n- select: 53,1%, insert 3,8%, update 2,2 %, delete 2,8 %Actually the percentages are wrong (I think pgfouine counts also other types of query like ET SESSION CHARACTERISTICS AS TRANSACTION READ WRITE;):\nThese are the right ones:SELECT   num: 658,5  perc:  85,7 %INSERT    num: 47,66  perc:  6,2 %UPDATE  num: 27,49  perc:   3,6 %DELETE  num: 34,56  perc:  4,5 %ste", "msg_date": "Sun, 11 Jan 2009 16:09:58 +0100", "msg_from": "\"Stefano Nichele\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" } ]
[ { "msg_contents": "Hi,\n\nI'm fiddling with a hand-made block device based benchmarking thingie,\nwhich I want to run random reads and writes of relatively small blocks\n(somewhat similar to databases). I'm much less interested in measuring\nthroughput, but rather in latency. Besides varying block sizes, I'm also\ntesting with a varying number of concurrent threads and varying\nread/write ratios. As a result, I'm interested in roughly the following\ngraphs:\n\n * (single thread) i/o latency vs. seek distance\n * (single thread) throughput vs. (accurator) position\n * (single thread) i/o latency vs. no of concurrent threads\n * total requests per second + throughput vs. no of concurrent threads\n * total requests per second + throughput vs. read/write ratio\n * total requests per second + throughput vs. block size\n * distribution of access times (histogram)\n\n(Of course, not all of these are relevant for all types of storages.)\n\nDoes there already exist a tool giving (most of) these measures? Am I\nmissing something interesting? What would you expect from a block device\nbenchmarking tool?\n\nRegards\n\nMarkus Wanner\n", "msg_date": "Sat, 10 Jan 2009 19:45:23 +0100", "msg_from": "Markus Wanner <[email protected]>", "msg_from_op": true, "msg_subject": "block device benchmarking" }, { "msg_contents": "Markus Wanner wrote:\n> Hi,\n> \n> I'm fiddling with a hand-made block device based benchmarking thingie,\n> which I want to run random reads and writes of relatively small blocks\n> (somewhat similar to databases). I'm much less interested in measuring\n> throughput, but rather in latency. Besides varying block sizes, I'm also\n> testing with a varying number of concurrent threads and varying\n> read/write ratios. As a result, I'm interested in roughly the following\n> graphs:\n> \n> * (single thread) i/o latency vs. seek distance\n> * (single thread) throughput vs. (accurator) position\n> * (single thread) i/o latency vs. no of concurrent threads\n> * total requests per second + throughput vs. no of concurrent threads\n> * total requests per second + throughput vs. read/write ratio\n> * total requests per second + throughput vs. block size\n> * distribution of access times (histogram)\n> \n> (Of course, not all of these are relevant for all types of storages.)\n> \n> Does there already exist a tool giving (most of) these measures? Am I\n> missing something interesting? What would you expect from a block device\n> benchmarking tool?\n> \n> Regards\n> \n> Markus Wanner\n> \n\nCheck out the work of Jens Axboe and Alan Brunelle, specifically the\npackages \"blktrace\" and \"fio\". \"blktrace\" acts as a \"sniffer\" for I/O,\nrecording the path of every I/O operation through the block I/O layer.\nUsing another tool in the package, \"btreplay/btrecord\", you can\ntranslate the captured trace into a benchmark that re-issues the I/Os.\nAnd the third tool in the package, \"btt\", does statistical analysis. I\ndon't think you really need \"benchmarks\" if you can extract this kind of\ndetail from a real application. :)\n\nHowever, if you do want to build a benchmark, \"fio\" is a customizable\nbenchmark utility. In the absence of real-world traces, you can emulate\nany I/O activity pattern with \"fio\". \"fio\" is what Mark Wong's group has\nbeen using to characterize filesystem behavior. I'm not sure where the\npresentations are at the moment, but there is some of it at\n\nhttp://wiki.postgresql.org/wiki/HP_ProLiant_DL380_G5_Tuning_Guide\n\nThere are also some more generic filesystem benchmarks like \"iozone\" and\n\"bonnie++\". They're a good general tool for comparing filesystems and\nI/O subsystems, but the other tools are more useful if you have a\nspecific workload, for example, a PostgreSQL application.\n\nBTW ... I am working on my blktrace howto even as I type this. I don't\nhave an ETA -- that's going to depend on how long it takes me to get the\nPostgreSQL benchmarks I'm using to work on my machine. But everything\nwill be on Github at\n\nhttp://github.com/znmeb/linux_perf_viz/tree/master/blktrace-howto\n\nas it evolves.\n", "msg_date": "Sat, 10 Jan 2009 15:10:19 -0800", "msg_from": "\"M. Edward (Ed) Borasky\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: block device benchmarking" }, { "msg_contents": "Hi,\n\nM. Edward (Ed) Borasky wrote:\n> Check out the work of Jens Axboe and Alan Brunelle, specifically the\n> packages \"blktrace\" and \"fio\".\n\nThank you for these pointers, that looks pretty interesting.\n\n> There are also some more generic filesystem benchmarks like \"iozone\" and\n> \"bonnie++\".\n\nThose are targeted at filesystems, which hide some of the disk's\ncharacteristics I'm interested in. But thanks.\n\n> BTW ... I am working on my blktrace howto even as I type this. I don't\n> have an ETA -- that's going to depend on how long it takes me to get the\n> PostgreSQL benchmarks I'm using to work on my machine. But everything\n> will be on Github at\n> \n> http://github.com/znmeb/linux_perf_viz/tree/master/blktrace-howto\n> \n> as it evolves.\n\nCool, looking forward to reading that.\n\nRegards\n\nMarkus Wanner\n", "msg_date": "Sun, 11 Jan 2009 11:40:02 +0100", "msg_from": "Markus Wanner <[email protected]>", "msg_from_op": true, "msg_subject": "Re: block device benchmarking" }, { "msg_contents": "Markus Wanner wrote:\n> M. Edward (Ed) Borasky wrote:\n>> Check out the work of Jens Axboe and Alan Brunelle, specifically the\n>> packages \"blktrace\" and \"fio\".\n>> BTW ... I am working on my blktrace howto even as I type this. I don't\n>> have an ETA -- that's going to depend on how long it takes me to get the\n>> PostgreSQL benchmarks I'm using to work on my machine. But everything\n>> will be on Github at\n>>\n>> http://github.com/znmeb/linux_perf_viz/tree/master/blktrace-howto\n>>\n>> as it evolves.\n> \n> Cool, looking forward to reading that.\n\nI've got it about half done at this point. What's still to be done is\nbuild the PostgreSQL example and the specialized post-processing code.\nBut the basics -- how to build \"blktrace\" from source and run it -- are\nthere. It will probably be next weekend before I get a chance to build\nthe PostgreSQL example.\n\nhttp://github.com/znmeb/linux_perf_viz/raw/master/blktrace-howto/blktrace-howto.pdf\n\nIf anyone wants to comment on it, the best way might be to join Github\nand send me in-project messages there. I've got some documentation\nquestions I need to ask on the blktrace mailing list.\n-- \nM. Edward (Ed) Borasky, FBG, AB, PTA, PGS, MS, MNLP, NST, ACMC(P), WOM\n\nI've never met a happy clam. In fact, most of them were pretty steamed.\n", "msg_date": "Sun, 11 Jan 2009 15:28:09 -0800", "msg_from": "\"M. Edward (Ed) Borasky\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: block device benchmarking" } ]
[ { "msg_contents": "The new MLC based SSDs have better wear leveling tech and don't suffer the pauses. Intel X25-M 80 and 160 GB SSDs are both pause-free. See Anandtech's test results for details.\r\n\r\nIntel's SLC SSDs should also be good enough but they're smaller.\r\n\r\n- Luke\r\n\r\n----- Original Message -----\r\nFrom: [email protected] <[email protected]>\r\nTo: Gregory Stark <[email protected]>\r\nCc: Markus Wanner <[email protected]>; Scott Marlowe <[email protected]>; Ron <[email protected]>; [email protected] <[email protected]>\r\nSent: Sat Jan 10 14:40:51 2009\r\nSubject: Re: [PERFORM] understanding postgres issues/bottlenecks\r\n\r\nOn Sat, 10 Jan 2009, Gregory Stark wrote:\r\n\r\n> [email protected] writes:\r\n>\r\n>> On Sat, 10 Jan 2009, Markus Wanner wrote:\r\n>>\r\n>>> My understanding of SSDs so far is, that they are not that bad at\r\n>>> writing *on average*, but to perform wear-leveling, they sometimes have\r\n>>> to shuffle around multiple blocks at once. So there are pretty awful\r\n>>> spikes for writing latency (IIRC more than 100ms has been measured on\r\n>>> cheaper disks).\r\n>\r\n> That would be fascinating. And frightening. A lot of people have been\r\n> recommending these for WAL disks and this would be make them actually *worse*\r\n> than regular drives.\r\n>\r\n>> well, I have one of those cheap disks.\r\n>>\r\n>> brand new out of the box, format the 32G drive, then copy large files to it\r\n>> (~1G per file). this should do almost no wear-leveling, but it's write\r\n>> performance is still poor and it has occasional 1 second pauses.\r\n>\r\n> This isn't similar to the way WAL behaves though. What you're testing is the\r\n> behaviour when the bandwidth to the SSD is saturated. At that point some point\r\n> in the stack, whether in the SSD, the USB hardware or driver, or OS buffer\r\n> cache can start to queue up writes. The stalls you see could be the behaviour\r\n> when that queue fills up and it needs to push back to higher layers.\r\n>\r\n> To simulate WAL you want to transfer smaller volumes of data, well below the\r\n> bandwidth limit of the drive, fsync the data, then pause a bit repeat. Time\r\n> each fsync and see whether the time they take is proportional to the amount of\r\n> data written in the meantime or whether they randomly spike upwards.\r\n\r\nif you have a specific benchmark for me to test I would be happy to do\r\nthis.\r\n\r\nthe test that I did is basicly the best-case for the SSD (more-or-less\r\nsequential writes where the vendors claim that the drives match or\r\nslightly outperform the traditional disks). for random writes the vendors\r\nput SSDs at fewer IOPS than 5400 rpm drives, let along 15K rpm drives.\r\n\r\ntake a look at this paper\r\nhttp://www.imation.com/PageFiles/83/Imation-SSD-Performance-White-Paper.pdf\r\n\r\nthis is not one of the low-performance drives, they include a sandisk\r\ndrive in the paper that shows significantly less performance (but the same\r\nbasic pattern) than the imation drives.\r\n\r\nDavid Lang\r\n\r\n--\r\nSent via pgsql-performance mailing list ([email protected])\r\nTo make changes to your subscription:\r\nhttp://www.postgresql.org/mailpref/pgsql-performance\r\n", "msg_date": "Sat, 10 Jan 2009 14:54:35 -0800", "msg_from": "Luke Lonergan <[email protected]>", "msg_from_op": true, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "[email protected] wrote:\n> On Sat, 10 Jan 2009, Luke Lonergan wrote:\n>\n>> The new MLC based SSDs have better wear leveling tech and don't \n>> suffer the pauses. Intel X25-M 80 and 160 GB SSDs are both \n>> pause-free. See Anandtech's test results for details.\n>\n> they don't suffer the pauses, but they still don't have fantasic write \n> speeds.\n>\n> David Lang\n>\n>> Intel's SLC SSDs should also be good enough but they're smaller.\n>>\n\n From what I can see, SLC SSDs are still quite superior for reliability \nand (write) performance. However they are too small and too expensive \nright now. Hopefully the various manufacturers are working on improving \nthe size/price issue for SLC, as well as improving the \nperformance/reliability area for the MLC products.\n\nregards\n\nMark\n", "msg_date": "Sun, 11 Jan 2009 12:44:16 +1300", "msg_from": "Mark Kirkwood <[email protected]>", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "On Sat, 10 Jan 2009, Luke Lonergan wrote:\n\n> The new MLC based SSDs have better wear leveling tech and don't suffer \n> the pauses. Intel X25-M 80 and 160 GB SSDs are both pause-free. See \n> Anandtech's test results for details.\n\nthey don't suffer the pauses, but they still don't have fantasic write \nspeeds.\n\nDavid Lang\n\n> Intel's SLC SSDs should also be good enough but they're smaller.\n>\n> - Luke\n>\n> ----- Original Message -----\n> From: [email protected] <[email protected]>\n> To: Gregory Stark <[email protected]>\n> Cc: Markus Wanner <[email protected]>; Scott Marlowe <[email protected]>; Ron <[email protected]>; [email protected] <[email protected]>\n> Sent: Sat Jan 10 14:40:51 2009\n> Subject: Re: [PERFORM] understanding postgres issues/bottlenecks\n>\n> On Sat, 10 Jan 2009, Gregory Stark wrote:\n>\n>> [email protected] writes:\n>>\n>>> On Sat, 10 Jan 2009, Markus Wanner wrote:\n>>>\n>>>> My understanding of SSDs so far is, that they are not that bad at\n>>>> writing *on average*, but to perform wear-leveling, they sometimes have\n>>>> to shuffle around multiple blocks at once. So there are pretty awful\n>>>> spikes for writing latency (IIRC more than 100ms has been measured on\n>>>> cheaper disks).\n>>\n>> That would be fascinating. And frightening. A lot of people have been\n>> recommending these for WAL disks and this would be make them actually *worse*\n>> than regular drives.\n>>\n>>> well, I have one of those cheap disks.\n>>>\n>>> brand new out of the box, format the 32G drive, then copy large files to it\n>>> (~1G per file). this should do almost no wear-leveling, but it's write\n>>> performance is still poor and it has occasional 1 second pauses.\n>>\n>> This isn't similar to the way WAL behaves though. What you're testing is the\n>> behaviour when the bandwidth to the SSD is saturated. At that point some point\n>> in the stack, whether in the SSD, the USB hardware or driver, or OS buffer\n>> cache can start to queue up writes. The stalls you see could be the behaviour\n>> when that queue fills up and it needs to push back to higher layers.\n>>\n>> To simulate WAL you want to transfer smaller volumes of data, well below the\n>> bandwidth limit of the drive, fsync the data, then pause a bit repeat. Time\n>> each fsync and see whether the time they take is proportional to the amount of\n>> data written in the meantime or whether they randomly spike upwards.\n>\n> if you have a specific benchmark for me to test I would be happy to do\n> this.\n>\n> the test that I did is basicly the best-case for the SSD (more-or-less\n> sequential writes where the vendors claim that the drives match or\n> slightly outperform the traditional disks). for random writes the vendors\n> put SSDs at fewer IOPS than 5400 rpm drives, let along 15K rpm drives.\n>\n> take a look at this paper\n> http://www.imation.com/PageFiles/83/Imation-SSD-Performance-White-Paper.pdf\n>\n> this is not one of the low-performance drives, they include a sandisk\n> drive in the paper that shows significantly less performance (but the same\n> basic pattern) than the imation drives.\n>\n> David Lang\n>\n> --\n> Sent via pgsql-performance mailing list ([email protected])\n> To make changes to your subscription:\n> http://www.postgresql.org/mailpref/pgsql-performance\n>\n", "msg_date": "Sat, 10 Jan 2009 16:03:32 -0800 (PST)", "msg_from": "[email protected]", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "On Sun, 11 Jan 2009, Mark Kirkwood wrote:\n\n> [email protected] wrote:\n>> On Sat, 10 Jan 2009, Luke Lonergan wrote:\n>> \n>>> The new MLC based SSDs have better wear leveling tech and don't suffer the \n>>> pauses. Intel X25-M 80 and 160 GB SSDs are both pause-free. See \n>>> Anandtech's test results for details.\n>> \n>> they don't suffer the pauses, but they still don't have fantasic write \n>> speeds.\n>> \n>> David Lang\n>> \n>>> Intel's SLC SSDs should also be good enough but they're smaller.\n>>> \n>\n> From what I can see, SLC SSDs are still quite superior for reliability and \n> (write) performance. However they are too small and too expensive right now. \n> Hopefully the various manufacturers are working on improving the size/price \n> issue for SLC, as well as improving the performance/reliability area for the \n> MLC products.\n\nthe very nature of the technology means that SLC will never be as cheap as \nMLC and MLC will never be as reliable as SLC\n\ntake a look at \nhttp://www.imation.com/PageFiles/83/SSD-Reliability-Lifetime-White-Paper.pdf \nfor a good writeup of the technology.\n\nfor both technologies, the price will continue to drop, and the \nreliability and performance will continue to climb, but I don't see \nanything that would improve one without the other (well, I could see MLC \ngaining a 50% capacity boost if they can get to 3 bits per cell vs the \ncurrent 2, but that would come at the cost of reliability again)\n\nfor write performance I don't think there is as much of a difference \nbetween the two technologies. today there is a huge difference in most of \nthe shipping products, but Intel has now demonstrated that it's mostly due \nto the controller chip, so I expect much of that difference to vanish in \nthe next year or so (as new generations of controller chips ship)\n\nDavid Lang\n", "msg_date": "Sat, 10 Jan 2009 17:54:11 -0800 (PST)", "msg_from": "[email protected]", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" } ]
[ { "msg_contents": "I believe they write at 200MB/s which is outstanding for sequential BW. Not sure about the write latency, though the Anandtech benchmark results showed high detail and IIRC the write latencies were very good.\r\n\r\n- Luke\r\n\r\n----- Original Message -----\r\nFrom: [email protected] <[email protected]>\r\nTo: Luke Lonergan\r\nCc: [email protected] <[email protected]>; [email protected] <[email protected]>; [email protected] <[email protected]>; [email protected] <[email protected]>; [email protected] <[email protected]>\r\nSent: Sat Jan 10 16:03:32 2009\r\nSubject: Re: [PERFORM] understanding postgres issues/bottlenecks\r\n\r\nOn Sat, 10 Jan 2009, Luke Lonergan wrote:\r\n\r\n> The new MLC based SSDs have better wear leveling tech and don't suffer\r\n> the pauses. Intel X25-M 80 and 160 GB SSDs are both pause-free. See\r\n> Anandtech's test results for details.\r\n\r\nthey don't suffer the pauses, but they still don't have fantasic write\r\nspeeds.\r\n\r\nDavid Lang\r\n\r\n> Intel's SLC SSDs should also be good enough but they're smaller.\r\n>\r\n> - Luke\r\n>\r\n> ----- Original Message -----\r\n> From: [email protected] <[email protected]>\r\n> To: Gregory Stark <[email protected]>\r\n> Cc: Markus Wanner <[email protected]>; Scott Marlowe <[email protected]>; Ron <[email protected]>; [email protected] <[email protected]>\r\n> Sent: Sat Jan 10 14:40:51 2009\r\n> Subject: Re: [PERFORM] understanding postgres issues/bottlenecks\r\n>\r\n> On Sat, 10 Jan 2009, Gregory Stark wrote:\r\n>\r\n>> [email protected] writes:\r\n>>\r\n>>> On Sat, 10 Jan 2009, Markus Wanner wrote:\r\n>>>\r\n>>>> My understanding of SSDs so far is, that they are not that bad at\r\n>>>> writing *on average*, but to perform wear-leveling, they sometimes have\r\n>>>> to shuffle around multiple blocks at once. So there are pretty awful\r\n>>>> spikes for writing latency (IIRC more than 100ms has been measured on\r\n>>>> cheaper disks).\r\n>>\r\n>> That would be fascinating. And frightening. A lot of people have been\r\n>> recommending these for WAL disks and this would be make them actually *worse*\r\n>> than regular drives.\r\n>>\r\n>>> well, I have one of those cheap disks.\r\n>>>\r\n>>> brand new out of the box, format the 32G drive, then copy large files to it\r\n>>> (~1G per file). this should do almost no wear-leveling, but it's write\r\n>>> performance is still poor and it has occasional 1 second pauses.\r\n>>\r\n>> This isn't similar to the way WAL behaves though. What you're testing is the\r\n>> behaviour when the bandwidth to the SSD is saturated. At that point some point\r\n>> in the stack, whether in the SSD, the USB hardware or driver, or OS buffer\r\n>> cache can start to queue up writes. The stalls you see could be the behaviour\r\n>> when that queue fills up and it needs to push back to higher layers.\r\n>>\r\n>> To simulate WAL you want to transfer smaller volumes of data, well below the\r\n>> bandwidth limit of the drive, fsync the data, then pause a bit repeat. Time\r\n>> each fsync and see whether the time they take is proportional to the amount of\r\n>> data written in the meantime or whether they randomly spike upwards.\r\n>\r\n> if you have a specific benchmark for me to test I would be happy to do\r\n> this.\r\n>\r\n> the test that I did is basicly the best-case for the SSD (more-or-less\r\n> sequential writes where the vendors claim that the drives match or\r\n> slightly outperform the traditional disks). for random writes the vendors\r\n> put SSDs at fewer IOPS than 5400 rpm drives, let along 15K rpm drives.\r\n>\r\n> take a look at this paper\r\n> http://www.imation.com/PageFiles/83/Imation-SSD-Performance-White-Paper.pdf\r\n>\r\n> this is not one of the low-performance drives, they include a sandisk\r\n> drive in the paper that shows significantly less performance (but the same\r\n> basic pattern) than the imation drives.\r\n>\r\n> David Lang\r\n>\r\n> --\r\n> Sent via pgsql-performance mailing list ([email protected])\r\n> To make changes to your subscription:\r\n> http://www.postgresql.org/mailpref/pgsql-performance\r\n>\r\n", "msg_date": "Sat, 10 Jan 2009 15:08:46 -0800", "msg_from": "Luke Lonergan <[email protected]>", "msg_from_op": true, "msg_subject": "Re: understanding postgres issues/bottlenecks" } ]
[ { "msg_contents": "Not to mention the #1 cause of server faults in my experience: OS kernel bug causes a crash. Battery backup doesn't help you much there.\r\n\r\nFsync of log is necessary IMO.\r\n\r\nThat said, you could use a replication/backup strategy to get a consistent snapshot in the past if you don't mind losing some data or can recreate it from backup elsewhere.\r\n\r\nI think a strategy that puts the WAL on an SLC SSD is a very good one as of Jan/09 and will get much better in short order.\r\n\r\n- Luke\r\n\r\n----- Original Message -----\r\nFrom: [email protected] <[email protected]>\r\nTo: Glyn Astill <[email protected]>\r\nCc: Ron <[email protected]>; Scott Marlowe <[email protected]>; [email protected] <[email protected]>\r\nSent: Sun Jan 11 15:35:22 2009\r\nSubject: Re: [PERFORM] understanding postgres issues/bottlenecks\r\n\r\nOn Sun, 11 Jan 2009, Glyn Astill wrote:\r\n\r\n> --- On Sun, 11/1/09, Scott Marlowe <[email protected]> wrote:\r\n>\r\n>> They also told me we could never lose power in the hosting\r\n>> center\r\n>> because it was so wonder and redundant and that I was\r\n>> wasting my time.\r\n>\r\n> We'll that's just plain silly, at the very least there's always going to\r\n> be some breakers / fuzes in between the power and the machines.\r\n>\r\n> In fact in our building there's quite a few breaker points between our\r\n> comms room on the 3rd floor and the ups / generator in the basement.\r\n> It's a crappy implementation actually.\r\n\r\nthe response I get from people is that they give their servers redundant\r\npoewr supplies and put them on seperate circuits so they must be safe from\r\nthat.\r\n\r\nbut as commented, that's not enough in the real world.\r\n\r\nDavid Lang\r\n\r\n--\r\nSent via pgsql-performance mailing list ([email protected])\r\nTo make changes to your subscription:\r\nhttp://www.postgresql.org/mailpref/pgsql-performance\r\n", "msg_date": "Sun, 11 Jan 2009 15:16:21 -0800", "msg_from": "Luke Lonergan <[email protected]>", "msg_from_op": true, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "On Sun, Jan 11, 2009 at 4:16 PM, Luke Lonergan <[email protected]> wrote:\n> Not to mention the #1 cause of server faults in my experience: OS kernel bug causes a crash. Battery backup doesn't help you much there.\n\nI've been using pgsql since way back, in a lot of projects, and almost\nalmost of them on some flavor of linux, usually redhat. I have had\nzero kernel crashes on production dbs in that time. I'd be interested\nto know which historic kernels caused you the crashes.\n", "msg_date": "Sun, 11 Jan 2009 16:44:53 -0700", "msg_from": "\"Scott Marlowe\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "Luke Lonergan wrote:\n> Not to mention the #1 cause of server faults in my experience: OS kernel bug causes a crash. Battery backup doesn't help you much there.\n\nWell now ... that very much depends on where you *got* the server OS and\nhow you administer it. If you're talking a correctly-maintained Windows\n2003 Server installation, or a correctly-maintained Red Hat Enterprise\nLinux installation, or any other \"branded\" OS from Novell, Sun, HP,\netc., I'm guessing such crashes are much rarer than what you've\nexperienced.\n\nAnd you're probably in pretty good shape with Debian stable and the RHEL\nrespins like CentOS. I can't comment on Ubuntu server or any of the BSD\nfamily -- I've never worked with them. But you should be able to keep a\n\"branded\" server up for months, with the exception of applying security\npatches that require a reboot. And *those* can be *planned* outages!\n\nWhere you *will* have some major OS risk is with testing-level software\nor \"bleeding edge\" Linux distros like Fedora. Quite frankly, I don't\nknow why people run Fedora servers -- if it's Red Hat compatibility you\nwant, there's CentOS.\n\n\n-- \nM. Edward (Ed) Borasky, FBG, AB, PTA, PGS, MS, MNLP, NST, ACMC(P), WOM\n\nI've never met a happy clam. In fact, most of them were pretty steamed.\n", "msg_date": "Sun, 11 Jan 2009 18:43:42 -0800", "msg_from": "\"M. Edward (Ed) Borasky\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "> Where you *will* have some major OS risk is with testing-level software\n> or \"bleeding edge\" Linux distros like Fedora. Quite frankly, I don't\n> know why people run Fedora servers -- if it's Red Hat compatibility you\n> want, there's CentOS.\n\nI've had no stability problems with Fedora. The worst experience I've\nhad with that distribution is that half the time the CD-burning\nutilities seem to be flaky. As for why that and not CentOS... I like\nhaving modern versions of all of my packages. 5 years is a long time\nto get nothing but bugfixes.\n\n...Robert\n", "msg_date": "Sun, 11 Jan 2009 22:08:54 -0500", "msg_from": "\"Robert Haas\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "Robert Haas wrote:\n>> Where you *will* have some major OS risk is with testing-level software\n>> or \"bleeding edge\" Linux distros like Fedora. Quite frankly, I don't\n>> know why people run Fedora servers -- if it's Red Hat compatibility you\n>> want, there's CentOS.\n> \n> I've had no stability problems with Fedora. The worst experience I've\n> had with that distribution is that half the time the CD-burning\n> utilities seem to be flaky. As for why that and not CentOS... I like\n> having modern versions of all of my packages. 5 years is a long time\n> to get nothing but bugfixes.\nFedora is not the only distro with \"flaky CD-burning utilities\". I'm\nstill hunting down issues with k3b and brasero on my openSUSE 11.1\nsystem. \"RW\" media are your friends. :)\n\nFive years may be a long time to get nothing but bugfixes, but having to\ndo extensive upgrade testing and an OS upgrade every six months, which\nis where most \"community\" distros' release schedules are these days,\nalso seems extreme. I personally think for the money you pay for Windows\n2003 Server or RHEL, you ought to be able to go a year or more between\nunplanned reboots, and they really should minimize the number of reboots\nyou have to eat for security fixes too. Otherwise, what's the point of\nspending money for an OS? :)\n-- \nM. Edward (Ed) Borasky, FBG, AB, PTA, PGS, MS, MNLP, NST, ACMC(P), WOM\n\nI've never met a happy clam. In fact, most of them were pretty steamed.\n", "msg_date": "Sun, 11 Jan 2009 20:28:07 -0800", "msg_from": "\"M. Edward (Ed) Borasky\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "On Sun, 11 Jan 2009, M. Edward (Ed) Borasky wrote:\n\n> And you're probably in pretty good shape with Debian stable and the RHEL \n> respins like CentOS.\n\nNo one is in good shape until they've done production-level load testing \non the system and have run the sort of \"unplug it under load\" tests that \nScott was praising. Since so many of the kernel bugs you might run into \nare in device drivers, the subset of Debian/RHEL you're using is only as \nstable as the worst supported driver running on your system.\n\n> But you should be able to keep a \"branded\" server up for months, with \n> the exception of applying security patches that require a reboot.\n\nRight, this is why I only rely on Linux deployments using a name I trust: \nDell.\n\nReturning to reality, the idea that there are brands you can buy that make \nall your problems go away is rather optimistic. The number of \"branded\" \nservers I've seen that are just nearly or completely worthless for \ndatabase use is rather depressing.\n\n--\n* Greg Smith [email protected] http://www.gregsmith.com Baltimore, MD\n", "msg_date": "Sun, 11 Jan 2009 23:38:44 -0500 (EST)", "msg_from": "Greg Smith <[email protected]>", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "Greg Smith wrote:\n> Right, this is why I only rely on Linux deployments using a name I\n> trust: Dell.\n> \n> Returning to reality, the idea that there are brands you can buy that\n> make all your problems go away is rather optimistic. The number of\n> \"branded\" servers I've seen that are just nearly or completely worthless\n> for database use is rather depressing.\n\nWell, of course, they won't make *all* your problems go away. But still,\nI'd much rather have an IBM or HP or Dell server running Windows 2003 or\nRHEL or SLES than some no-name hardware running Fedora or Ubuntu.\n-- \nM. Edward (Ed) Borasky, FBG, AB, PTA, PGS, MS, MNLP, NST, ACMC(P), WOM\n\nI've never met a happy clam. In fact, most of them were pretty steamed.\n", "msg_date": "Sun, 11 Jan 2009 21:30:55 -0800", "msg_from": "\"M. Edward (Ed) Borasky\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "M. Edward (Ed) Borasky wrote:\n> Greg Smith wrote:\n> \n>> Right, this is why I only rely on Linux deployments using a name I\n>> trust: Dell.\n>>\n>> Returning to reality, the idea that there are brands you can buy that\n>> make all your problems go away is rather optimistic. The number of\n>> \"branded\" servers I've seen that are just nearly or completely worthless\n>> for database use is rather depressing.\n>> \n>\n> Well, of course, they won't make *all* your problems go away. But still,\n> I'd much rather have an IBM or HP or Dell server running Windows 2003 or\n> RHEL or SLES than some no-name hardware running Fedora or Ubuntu.\n> \nIf you use no-name hardware it all depends on how reliable your supplier \nis. The no-name white box providers I've had experience with have always \nsupplied hardware that was reliable and fast. And as they were small \ncompanies they would work with you to give you hardware that you \npreferred (e.g raid cards etc).\n\nConversely I've found big name brand suppliers would often change \ncritical parts (network or raid cards) midway through shipment - leaving \nyou with the odd-man-out server to debug silly issues with (e.g won't \nget on the network, disk array not recognized by the installation media \netc). So I'm not entirely convinced by the 'name brand is good' argument.\n\nI'd agree that Fedora is probably not the best choice for a \ndeployment(!). My experience of Ubuntu has been better, however using \nthe LTS release might be a wise move if one wants to user this distro.\n\nregards\n\nMark\n", "msg_date": "Mon, 12 Jan 2009 18:54:45 +1300", "msg_from": "Mark Kirkwood <[email protected]>", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "On Sun, Jan 11, 2009 at 8:08 PM, Robert Haas <[email protected]> wrote:\n>> Where you *will* have some major OS risk is with testing-level software\n>> or \"bleeding edge\" Linux distros like Fedora. Quite frankly, I don't\n>> know why people run Fedora servers -- if it's Red Hat compatibility you\n>> want, there's CentOS.\n>\n> I've had no stability problems with Fedora. The worst experience I've\n> had with that distribution is that half the time the CD-burning\n> utilities seem to be flaky. As for why that and not CentOS... I like\n> having modern versions of all of my packages. 5 years is a long time\n> to get nothing but bugfixes.\n\nWe basically do the same thing with our app tier servers, chasing the\nnext to latest ubuntus so we aren't running really old bits there.\n\nFor the db level it's RHEL with pgsql. Ubuntu has been pretty stable\non the app tier, but we don't push it in the same ways we push our\ndatabases either.\n", "msg_date": "Sun, 11 Jan 2009 23:37:29 -0700", "msg_from": "\"Scott Marlowe\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "On Sun, 11 Jan 2009, M. Edward (Ed) Borasky wrote:\n> Where you *will* have some major OS risk is with testing-level software\n> or \"bleeding edge\" Linux distros like Fedora.\n\nI just ran \"uptime\" on my home machine, and it said 144 days. Debian \nunstable, on no-name hardware. I guess the last time I rebooted was \nwhen I changed the graphics drivers. I can't remember the last time it \nactually crashed.\n\nI guess the moral is, you find a combination of hardware and software that \nworks well, and you may as well stick with it. The most likely things to \ndestabilise the system are drivers for \"interesting\" pieces of hardware, \nlike graphics cards and (unfortunately) some high-performance RAID cards.\n\nMatthew\n\n-- \n A good programmer is one who looks both ways before crossing a one-way street.\n Considering the quality and quantity of one-way streets in Cambridge, it\n should be no surprise that there are so many good programmers there.\n", "msg_date": "Mon, 12 Jan 2009 15:36:56 +0000 (GMT)", "msg_from": "Matthew Wakeling <[email protected]>", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "\nOn Jan 11, 2009, at 9:43 PM, M. Edward (Ed) Borasky wrote:\n\n> Luke Lonergan wrote:\n>> Not to mention the #1 cause of server faults in my experience: OS \n>> kernel bug causes a crash. Battery backup doesn't help you much \n>> there.\n>\n\nNot that long ago (a month or so) we ran into a problem where hpacucli \n(Utility for configuring/inspecting/etc HP smartarray controllers) \nwould tickle the cciss driver in such a way that it would cause a \nkernel panic. KABLAMMO (No data loss! weeeee!). The box had run for \na long time without crashes, but it seems that when we added more \ndisks and started the array building the new logical drive some \nmagical things happened.\n\nBugs happen. The [bad word] of it is catching the culprit with its \nfingers in the cookie jar.\n\n--\nJeff Trout <[email protected]>\nhttp://www.stuarthamm.net/\nhttp://www.dellsmartexitin.com/\n\n\n\n", "msg_date": "Tue, 13 Jan 2009 08:40:32 -0500", "msg_from": "Jeff <[email protected]>", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "-----BEGIN PGP SIGNED MESSAGE-----\nHash: SHA1\n\nM. Edward (Ed) Borasky wrote:\n| Luke Lonergan wrote:\n|> Not to mention the #1 cause of server faults in my experience: OS\n|> kernel bug causes a crash. Battery backup doesn't help you much there.\n|>\n|\n| Well now ... that very much depends on where you *got* the server OS and\n| how you administer it. If you're talking a correctly-maintained Windows\n| 2003 Server installation, or a correctly-maintained Red Hat Enterprise\n| Linux installation, or any other \"branded\" OS from Novell, Sun, HP, etc.,\n| I'm guessing such crashes are much rarer than what you've experienced.\n|\n| And you're probably in pretty good shape with Debian stable and the RHEL\n| respins like CentOS. I can't comment on Ubuntu server or any of the BSD\n| family -- I've never worked with them. But you should be able to keep a\n| \"branded\" server up for months, with the exception of applying security\n| patches that require a reboot. And *those* can be *planned* outages!\n|\n| Where you *will* have some major OS risk is with testing-level software\n| or \"bleeding edge\" Linux distros like Fedora. Quite frankly, I don't know\n| why people run Fedora servers -- if it's Red Hat compatibility you want,\n| there's CentOS.\n|\nLinux kernels seem to be pretty good these days. I ran Red Hat Linux 7.3\n24/7 for over 6 months, and it was discontinued years ago. I recognize that\nthis is by no means a record. It did not crash after 6 months, but I\nupgraded that box to CentOS 4 and it has been running that a long time. That\nbox has minor hardware problems that do not happen often enough to find the\nreal cause. But it stays up months at a time. All that box does is run BOINC\nand a printer server (CUPS).\n\nThis machine does not crash, but it gets rebooted whenever a new kernel\ncomes out, and has been up almost a month. It run RHEL5.\n\nI would think Fedora's kernel would probably be OK, but the other bleeding\nedge stuff I would not risk a serious server on.\n\n- --\n~ .~. Jean-David Beyer Registered Linux User 85642.\n~ /V\\ PGP-Key: 9A2FC99A Registered Machine 241939.\n~ /( )\\ Shrewsbury, New Jersey http://counter.li.org\n~ ^^-^^ 14:10:01 up 30 days, 1:55, 3 users, load average: 4.18, 4.26, 4.24\n-----BEGIN PGP SIGNATURE-----\nVersion: GnuPG v1.4.5 (GNU/Linux)\nComment: Using GnuPG with CentOS - http://enigmail.mozdev.org\n\niD8DBQFJb4zmPtu2XpovyZoRAn9TAKDFoEZ0JtoTi7T0qs9ZlI7rLxs9lACeJjDZ\nXL9rGZqzw0LjrszD1DaAhp4=\n=LdVq\n-----END PGP SIGNATURE-----\n", "msg_date": "Thu, 15 Jan 2009 14:22:14 -0500", "msg_from": "Jean-David Beyer <[email protected]>", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "Jean-David Beyer wrote:\n> M. Edward (Ed) Borasky wrote:\n> | Luke Lonergan wrote:\n> |> Not to mention the #1 cause of server faults in my experience: OS\n> |> kernel bug causes a crash. Battery backup doesn't help you much there.\n> |>\n> |\n> | Well now ... that very much depends on where you *got* the server OS and\n> | how you administer it. If you're talking a correctly-maintained Windows\n> | 2003 Server installation, or a correctly-maintained Red Hat Enterprise\n> | Linux installation, or any other \"branded\" OS from Novell, Sun, HP, etc.,\n> | I'm guessing such crashes are much rarer than what you've experienced.\n> |\n> | And you're probably in pretty good shape with Debian stable and the RHEL\n> | respins like CentOS. I can't comment on Ubuntu server or any of the BSD\n> | family -- I've never worked with them. But you should be able to keep a\n> | \"branded\" server up for months, with the exception of applying security\n> | patches that require a reboot. And *those* can be *planned* outages!\n> |\n> | Where you *will* have some major OS risk is with testing-level software\n> | or \"bleeding edge\" Linux distros like Fedora. Quite frankly, I don't know\n> | why people run Fedora servers -- if it's Red Hat compatibility you want,\n> | there's CentOS.\n> |\n> Linux kernels seem to be pretty good these days. I ran Red Hat Linux 7.3\n> 24/7 for over 6 months, and it was discontinued years ago. I recognize that\n> this is by no means a record. It did not crash after 6 months, but I\n> upgraded that box to CentOS 4 and it has been running that a long time.\n> That\n> box has minor hardware problems that do not happen often enough to find the\n> real cause. But it stays up months at a time. All that box does is run\n> BOINC\n> and a printer server (CUPS).\n> \n> This machine does not crash, but it gets rebooted whenever a new kernel\n> comes out, and has been up almost a month. It run RHEL5.\n> \n> I would think Fedora's kernel would probably be OK, but the other bleeding\n> edge stuff I would not risk a serious server on.\n\nI haven't heard much one way or the other about Fedora's kernels.\nBecause of the way their release cycle is staggered with the other major\ncommunity distros, they tend to be a number behind, say, openSUSE.\nopenSUSE 11.1, for example, just came out with 2.6.27, while Fedora came\nout with 2.6.26 only a couple of weeks before that.\n\nThe things I care the most about -- kernel-level performance metrics --\npretty much guarantee that I'm going to run a bleeding edge kernel as\nsoon as it's good enough to live through a couple of days without losing\ndata on the hard drive. And on my laptop, anything that recognizes my\nwireless and sound and leaves some of my 512 MB for applications is OK. :)\n> \n\n-- \nM. Edward (Ed) Borasky\n\nI've never met a happy clam. In fact, most of them were pretty steamed.\n", "msg_date": "Thu, 15 Jan 2009 22:03:01 -0800", "msg_from": "\"M. Edward (Ed) Borasky\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" }, { "msg_contents": "On Thu, 15 Jan 2009, Jean-David Beyer wrote:\n\n> -----BEGIN PGP SIGNED MESSAGE-----\n> Hash: SHA1\n>\n> M. Edward (Ed) Borasky wrote:\n> | Luke Lonergan wrote:\n> |> Not to mention the #1 cause of server faults in my experience: OS\n> |> kernel bug causes a crash. Battery backup doesn't help you much there.\n> |>\n> |\n> | Well now ... that very much depends on where you *got* the server OS and\n> | how you administer it. If you're talking a correctly-maintained Windows\n> | 2003 Server installation, or a correctly-maintained Red Hat Enterprise\n> | Linux installation, or any other \"branded\" OS from Novell, Sun, HP, etc.,\n> | I'm guessing such crashes are much rarer than what you've experienced.\n> |\n> | And you're probably in pretty good shape with Debian stable and the RHEL\n> | respins like CentOS. I can't comment on Ubuntu server or any of the BSD\n> | family -- I've never worked with them. But you should be able to keep a\n> | \"branded\" server up for months, with the exception of applying security\n> | patches that require a reboot. And *those* can be *planned* outages!\n> |\n> | Where you *will* have some major OS risk is with testing-level software\n> | or \"bleeding edge\" Linux distros like Fedora. Quite frankly, I don't know\n> | why people run Fedora servers -- if it's Red Hat compatibility you want,\n> | there's CentOS.\n> |\n> Linux kernels seem to be pretty good these days. I ran Red Hat Linux 7.3\n> 24/7 for over 6 months, and it was discontinued years ago. I recognize that\n> this is by no means a record. It did not crash after 6 months, but I\n> upgraded that box to CentOS 4 and it has been running that a long time. That\n> box has minor hardware problems that do not happen often enough to find the\n> real cause. But it stays up months at a time. All that box does is run BOINC\n> and a printer server (CUPS).\n>\n> This machine does not crash, but it gets rebooted whenever a new kernel\n> comes out, and has been up almost a month. It run RHEL5.\n>\n> I would think Fedora's kernel would probably be OK, but the other bleeding\n> edge stuff I would not risk a serious server on.\n\nI have been running kernel.org kernels in production for about 12 years \nnow (on what has now grown to a couple hundred servers), and I routinely \nrun from upgrade to upgrade with no crashes. I tend to upgrade every year \nor so).\n\nthat being said, things happen. I have a set of firewalls running the \nCheckpoint Secure Platform linux distribution that locked up solidly a \ncouple weeks after putting them in place (the iptables firewalls that they \nreplaced had been humming along just fine under much heavier loads for \nmonths).\n\nthe more mainstream your hardware is the safer you are (unfortunantly very \nfew RAID cards are mainstream), but I've also found that by compiling a \nminimal kernel that only supports the stuff that I need also contributes \nto reliability.\n\nbut even with my experiance, I would never architect anything with the \nexpectation that system crashes don't happen. I actually see more crashes \ndue to overheating (fans fail, AC units fail, etc) than I do from kernel \ncrashes.\n\nnot everything needs reliability. I am getting ready to build a pair of \npostgres servers that will have all safety disabled. I will get the \nredundancy I need by replicating between the pair, and if they both go \ndown (datacenter outage) it is very appropriate to loose the entire \ncontents of the system and reinitialize from scratch (in fact, every boot \nof the system will do this)\n\nbut you need to think carefully about what you are doing when you disable \nthe protection.\n\nDavid Lang\n", "msg_date": "Fri, 16 Jan 2009 00:59:53 -0800 (PST)", "msg_from": "[email protected]", "msg_from_op": false, "msg_subject": "Re: understanding postgres issues/bottlenecks" } ]
[ { "msg_contents": "Hello,\n\nI created a multicolumn index on the columns c_1,..,c_n .\nIf I do use only a true subset of these columns in a SQL query, is the \nindex still efficient?\nOr is it better to create another multicolumn index defined on this subset?\n\nThanks for any comments!\n", "msg_date": "Mon, 12 Jan 2009 18:49:48 +0100", "msg_from": "=?ISO-8859-1?Q?J=F6rg_Kiegeland?= <[email protected]>", "msg_from_op": true, "msg_subject": "multicolumn indexes still efficient if not fully stressed?" }, { "msg_contents": "On Mon, 2009-01-12 at 18:49 +0100, Jörg Kiegeland wrote:\r\n> Hello,\r\n> \r\n> I created a multicolumn index on the columns c_1,..,c_n .\r\n> If I do use only a true subset of these columns in a SQL query, is the \r\n> index still efficient?\r\n> Or is it better to create another multicolumn index defined on this subset?\r\n> \r\n> Thanks for any comments!\r\n\r\nWhy would you create a multicolumn index for all columns if that's not\r\nwhat you actually query on?\r\n\r\nThe order of columns matter for multicolumn indexes. Multicolumn\r\nindexes work best for queries that use all of the columns in the index,\r\nbut can also be helpful if at least the leftmost columns in the index\r\nare specified in the query. So it depends on the order.\r\n\r\nIf the index is defined on (c_1, c_2, c_3, c_4) and your query includes:\r\n\"WHERE c_2=val AND c_3=val AND c_4=val\", then the index is almost\r\ncertainly useless.\r\n\r\nOn the other hand, if you were to query \"WHERE c_1=val\" then if c_1 is\r\nhighly selective the index would still help.\r\n\r\nSee here:\r\nhttp://www.postgresql.org/docs/8.3/interactive/indexes-multicolumn.html\r\n\r\n-- Mark Lewis\r\n\r\n\r\n", "msg_date": "Mon, 12 Jan 2009 10:48:25 -0800", "msg_from": "\"Mark Lewis\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: multicolumn indexes still efficient if not fullystressed?" }, { "msg_contents": "J�rg Kiegeland <[email protected]> schrieb:\n\n> Hello,\n>\n> I created a multicolumn index on the columns c_1,..,c_n .\n> If I do use only a true subset of these columns in a SQL query, is the \n> index still efficient?\n> Or is it better to create another multicolumn index defined on this subset?\n\nCreate several indexes for each column, since 8.1 PG can use a so called\nBitmap Index Scan. Read more about that:\n\n- http://www.postgresql-support.de/pgbook/node492.html\n (in german, i think, you can read that)\n\n- http://en.wikipedia.org/wiki/Bitmap_index\n\n\nAndreas\n-- \nReally, I'm not out to destroy Microsoft. That will just be a completely\nunintentional side effect. (Linus Torvalds)\n\"If I was god, I would recompile penguin with --enable-fly.\" (unknown)\nKaufbach, Saxony, Germany, Europe. N 51.05082�, E 13.56889�\n", "msg_date": "Mon, 12 Jan 2009 20:23:25 +0100", "msg_from": "Andreas Kretschmer <[email protected]>", "msg_from_op": false, "msg_subject": "Re: multicolumn indexes still efficient if not fully\n\tstressed?" }, { "msg_contents": "On Mon, Jan 12, 2009 at 12:23 PM, Andreas Kretschmer\n<[email protected]> wrote:\n> Jörg Kiegeland <[email protected]> schrieb:\n>\n>> Hello,\n>>\n>> I created a multicolumn index on the columns c_1,..,c_n .\n>> If I do use only a true subset of these columns in a SQL query, is the\n>> index still efficient?\n>> Or is it better to create another multicolumn index defined on this subset?\n>\n> Create several indexes for each column, since 8.1 PG can use a so called\n> Bitmap Index Scan. Read more about that:\n\nI've found that when you do frequently query on two or more columns, a\nmulti-column index is faster than bitmap scans, especially for larger\ndata sets.\n", "msg_date": "Mon, 12 Jan 2009 13:01:46 -0700", "msg_from": "\"Scott Marlowe\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: multicolumn indexes still efficient if not fully stressed?" }, { "msg_contents": "On Monday 12 January 2009, \"Scott Marlowe\" <[email protected]> wrote:\n> I've found that when you do frequently query on two or more columns, a\n> multi-column index is faster than bitmap scans, especially for larger\n> data sets.\n\nVery much faster, especially if you're only looking for a few dozen or \nhundred rows out of multi-million row tables.\n\n-- \nCurrent Peeve: The mindset that the Internet is some sort of school for\nnovice sysadmins and that everyone -not- doing stupid dangerous things\nshould act like patient teachers with the ones who are. -- Bill Cole, NANAE \n", "msg_date": "Mon, 12 Jan 2009 12:17:47 -0800", "msg_from": "Alan Hodgson <[email protected]>", "msg_from_op": false, "msg_subject": "Re: multicolumn indexes still efficient if not fully stressed?" } ]
[ { "msg_contents": "OK, I've got a query that's running slow the first time, then fast.\nBut I can't see where the time is really being spend on the first run.\n Query and plan attached to preserve formatting.\n\nThe index scan and nested loop that feed the next layer up nested loop\nboth show very short run times. Yet the nested loop they feed to\ntakes 30 seconds to run. If I run the query a second time, everything\nlooks the same but the second nested loop now runs in well under a\nsecond.\n\nI can't figure out where my time's going to.", "msg_date": "Mon, 12 Jan 2009 15:59:24 -0700", "msg_from": "\"Scott Marlowe\" <[email protected]>", "msg_from_op": true, "msg_subject": "slow query" }, { "msg_contents": "Scott Marlowe wrote:\n\n> \n> OK, I've got a query that's running slow the first time, then fast.\n> But I can't see where the time is really being spend on the first run.\n> Query and plan attached to preserve formatting.\n\nOften this is from caching -- the first time the system has to go to disk to get the values; the subsequent times the data (and indexes, presumably) are all in RAM and so much faster.\n\nIs this plausible ?\n\nGreg Williamson\nSenior DBA\nDigitalGlobe\n\nConfidentiality Notice: This e-mail message, including any attachments, is for the sole use of the intended recipient(s) and may contain confidential and privileged information and must be protected in accordance with those provisions. Any unauthorized review, use, disclosure or distribution is prohibited. If you are not the intended recipient, please contact the sender by reply e-mail and destroy all copies of the original message.\n\n(My corporate masters made me say this.)\n\n\n\n\n\n\nRE: [PERFORM] slow query\n\n\n\nScott Marlowe wrote:\n\n>\n> OK, I've got a query that's running slow the first time, then fast.\n> But I can't see where the time is really being spend on the first run.\n> Query and plan attached to preserve formatting.\n\nOften this is from caching -- the first time the system has to go to disk to get the values; the subsequent times the data (and indexes, presumably) are all in RAM and so much faster.\n\nIs this plausible ?\n\nGreg Williamson\nSenior DBA\nDigitalGlobe\n\nConfidentiality Notice: This e-mail message, including any attachments, is for the sole use of the intended recipient(s) and may contain confidential and privileged information and must be protected in accordance with those provisions. Any unauthorized review, use, disclosure or distribution is prohibited. If you are not the intended recipient, please contact the sender by reply e-mail and destroy all copies of the original message.\n\n(My corporate masters made me say this.)", "msg_date": "Mon, 12 Jan 2009 16:38:10 -0700", "msg_from": "\"Gregory Williamson\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: slow query" }, { "msg_contents": "On Mon, Jan 12, 2009 at 2:59 PM, Scott Marlowe <[email protected]> wrote:\n> OK, I've got a query that's running slow the first time, then fast.\n> But I can't see where the time is really being spend on the first run.\n> Query and plan attached to preserve formatting.\n>\n> The index scan and nested loop that feed the next layer up nested loop\n> both show very short run times. Yet the nested loop they feed to\n> takes 30 seconds to run. If I run the query a second time, everything\n> looks the same but the second nested loop now runs in well under a\n> second.\n>\n> I can't figure out where my time's going to.\n\nIf it is any help, there is a nice tool to format your explain plan at\nhttp://explain.depesz.com\n", "msg_date": "Mon, 12 Jan 2009 15:40:27 -0800", "msg_from": "bricklen <[email protected]>", "msg_from_op": false, "msg_subject": "Re: slow query" }, { "msg_contents": "\n\"Scott Marlowe\" <[email protected]> writes:\n\n> -> Index Scan using users_orgid_idx on users u (cost=0.00..129.52 rows=5 width=271) (actual time=843.825..860.638 rows=0 loops=35)\n> Index Cond: (u.orgid = j2.orgid)\n> Filter: ((u.type_id < 10) AND (u.deleted = 0) AND ((lower((u.lname)::text) ~~ 'boat%'::text) OR (lower((u.fname)::text) ~~ 'boat%'::text) OR (lower((u.username)::text) ~~ 'boat%'::text) OR (lower(u.option1) ~~ 'boat%'::text) OR (lower((u.email)::text) ~~ '%boat%'::text) OR (lower(u.external_id) = 'boat'::text)))\n\nNot sure if this is what's going on but I find the high startup time for this\nindex scan suspicious. Either there are a lot of dead tuples (which would\nexplain the second run being fast if it marks them all as lp_dead) or there\nare a lot of matching index pointers which fail those other constraints.\nAssuming it's the latter perhaps some other index definition would let it zero\nin on the right tuples more quickly instead of having to grovel through a lot\nof irrelevant rows?\n\n-- \n Gregory Stark\n EnterpriseDB http://www.enterprisedb.com\n Ask me about EnterpriseDB's Slony Replication support!\n", "msg_date": "Mon, 12 Jan 2009 18:51:17 -0500", "msg_from": "Gregory Stark <[email protected]>", "msg_from_op": false, "msg_subject": "Re: slow query" }, { "msg_contents": "On Mon, Jan 12, 2009 at 5:59 PM, Scott Marlowe <[email protected]> wrote:\n> I can't figure out where my time's going to.\n\nLooks like it's going to:\n\n-> Index Scan using users_orgid_idx on users u (cost=0.00..129.52\nrows=5 width=271) (actual time=843.825..860.638 rows=0 loops=35)\n\nI'd guess the index/pages for users isn't in memory the first time around.\n\nNext time is:\n\n-> Index Scan using users_orgid_idx on users u (cost=0.00..129.52\nrows=5 width=271) (actual time=3.126..3.305 rows=0 loops=35)\n\n-- \n- David T. Wilson\[email protected]\n", "msg_date": "Mon, 12 Jan 2009 18:55:13 -0500", "msg_from": "\"David Wilson\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: slow query" }, { "msg_contents": "On Mon, Jan 12, 2009 at 4:55 PM, David Wilson <[email protected]> wrote:\n> On Mon, Jan 12, 2009 at 5:59 PM, Scott Marlowe <[email protected]> wrote:\n>> I can't figure out where my time's going to.\n>\n> Looks like it's going to:\n>\n> -> Index Scan using users_orgid_idx on users u (cost=0.00..129.52\n> rows=5 width=271) (actual time=843.825..860.638 rows=0 loops=35)\n>\n> I'd guess the index/pages for users isn't in memory the first time around.\n\nExactly. I keep forgetting to look at loops... sigh. Thanks!\n", "msg_date": "Mon, 12 Jan 2009 17:54:37 -0700", "msg_from": "\"Scott Marlowe\" <[email protected]>", "msg_from_op": true, "msg_subject": "Re: slow query" } ]
[ { "msg_contents": "Hi Group.\nRecently upgraded from 8.1 to 8.3 on RHEL 5 64-bit.\n\nI've noticed some performance problems that I am guessing are WAL \nrelated based on my browsing around and wondered if someone had some \nsuggestions for tuning the WAL settings. It could also help if someone \njust laughed at me and told me it wasn't WAL.\n\nI have narrowed the problem down to two pretty simple descriptions.\n\nI had a data load that I was doing with 8.1. It involved about 250k sql \nstatements that were inserts into a table with just one index. The index \nhas two fields.\nWith the upgrade to 8.3 that process started taking all night and 1/2 a \nday. It inserted at the rate of 349 records a minute.\nWhen I started working on the problem I decided to test by putting all \nstatements withing a single transaction. Just a simple BEGIN at the \nstart and COMMIT at the end. Magically it only took 7 minutes to do the \nwhole set, or 40k per minute. That seemed very odd to me, but at least I \nsolved the problem.\n\nThe most recently noticed simple problem.\nI had a table with about 20k records. We issued the statement DELETE \nFROM table where this=that.\nThis was part of a combined index and about 8k records should have been \ndeleted.\nThis statement caused all other queries to grind to a halt. It was only \nwhen I killed it that normal operation resumed. It was acting like a \nlock, but that table was not being used by any other process.\n\nSo that describes what I am seeing, let me relay what we are doing with \nwhat I think to be the relevant settings.\n\nFor the log shipping, I am using scp to send the logs to a separate \nserver. And yes they are getting sent.\nI have it set now to send the log about every two minutes since I am \ncomfortable with that amount of data loss. Here are the settings from \nthe log file that are not commented out relating to WAL. (everything \nbelow WRITE AHEAD LOG section in the default config file)\n\nsynchronous_commit = off\ncheckpoint_segments = 3 # in logfile segments, min 1, 16MB each\ncheckpoint_timeout = 5min # range 30s-1h\ncheckpoint_completion_target = 0.5 # checkpoint target duration, \n0.0 - 1.0\ncheckpoint_warning = 30s # 0 is off\narchive_mode = on # allows archiving to be done\narchive_command = '/var/lib/pgsql/data/logship.sh %f %p' \narchive_timeout = 120 # force a logfile segment switch after this\n\nThanks for any help (or laughter)\n\nRusty\n\n\n", "msg_date": "Mon, 12 Jan 2009 15:03:09 -0800", "msg_from": "Bill Preston <[email protected]>", "msg_from_op": true, "msg_subject": "Slow insert performace, 8.3 Wal related?" }, { "msg_contents": "Bill Preston <[email protected]> writes:\n> I've noticed some performance problems that I am guessing are WAL \n> related based on my browsing around and wondered if someone had some \n> suggestions for tuning the WAL settings. It could also help if someone \n> just laughed at me and told me it wasn't WAL.\n\nConsider it done ;-). I'm not sure what your problem is but it's\nunlikely to be WAL, especially not if you're using the same WAL-related\nsettings in 8.1 and 8.3.\n\nWhich you might not be. The large speedup from wrapping many small\ninserts into one transaction is entirely expected and should have\noccurred on 8.1 as well. I am suspicious that you were running 8.1 with\nfsync off and 8.3 has it on. Do you still have your 8.1\npostgresql.conf? Comparing all the non-defaulted settings would be the\nfirst thing to do.\n\nIf it's not that, I'm not sure. One cross-version difference that comes\nto mind is that 8.3 is a bit stickier about implicit casting, and so it\nseems conceivable that something about these queries was considered\nindexable in 8.1 and is not in 8.3. But you've not provided enough\ndetail to do more than speculate.\n\n\t\t\tregards, tom lane\n", "msg_date": "Mon, 12 Jan 2009 20:38:38 -0500", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Slow insert performace, 8.3 Wal related? " }, { "msg_contents": "On Monday 12 January 2009, Bill Preston <[email protected]> \nwrote:\n> I had a data load that I was doing with 8.1. It involved about 250k sql\n> statements that were inserts into a table with just one index. The index\n> has two fields.\n> With the upgrade to 8.3 that process started taking all night and 1/2 a\n> day. It inserted at the rate of 349 records a minute.\n> When I started working on the problem I decided to test by putting all\n> statements withing a single transaction. Just a simple BEGIN at the\n> start and COMMIT at the end. Magically it only took 7 minutes to do the\n> whole set, or 40k per minute. That seemed very odd to me, but at least I\n> solved the problem.\n>\n\nThat's well-known behaviour. If you don't do them in one big transaction, \nPostgreSQL has to fsync after every insert, which effectively limits your \ninsert rate to the rotational speed of your WAL drive (roughly speaking). \nIf you don't explicitly start and end transactions, PostgreSQL does it for \nyou. For every statement.\n\n> The most recently noticed simple problem.\n> I had a table with about 20k records. We issued the statement DELETE\n> FROM table where this=that.\n> This was part of a combined index and about 8k records should have been\n> deleted.\n> This statement caused all other queries to grind to a halt. It was only\n> when I killed it that normal operation resumed. It was acting like a\n> lock, but that table was not being used by any other process.\n\nAre there foreign keys on any other table(s) that point to this one? Are the \nrelevant columns in those tables indexed?\n\n\n-- \nCurrent Peeve: The mindset that the Internet is some sort of school for\nnovice sysadmins and that everyone -not- doing stupid dangerous things\nshould act like patient teachers with the ones who are. -- Bill Cole, NANAE \n", "msg_date": "Mon, 12 Jan 2009 17:46:51 -0800", "msg_from": "Alan Hodgson <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Slow insert performace, 8.3 Wal related?" }, { "msg_contents": "Wow thanks for all the help Tom and Alan. Sadly I was un-aware of the \nwell-known behavior. Consider it more will known now.\n\nFsync is off in 8.3.\nI am not too worried about what was before in 8.1 since we are not going \nback.\n\nFor the first example (bad behavior when I am not using transactions).\nIs there anyway to tell that it is going on at a given point and time? \nIs their a buffer that fills up, a stat somewhere that I can read?\nA lot of our code isn't using transactions yet so I would like a heads \nup when this problem is happening or if possible increase some parameter \nso it happens less.\n\nAs to the second example with the delete. There are no foreign keys.\nFor the index. If the table has fields a,b,c and d.\nWe have a btree index (a,b,c,d)\nand we are saying DELETE FROM table_messed_up WHERE a=x.\n\nSo the WHERE statement is the first field in the the index.\n\nNow that you have given me more knowledge, let me ask a question that \nmight lead to the answer.\n\nExample 1 happens in isolation.\nExample 2 happened on a live system with the parameters that I specified \nand a whole lot of sql statements without transactions being run at the \nsame time. In fact their probably was a whole lot of inserts on this \nvery table before the delete statement was hit.\n\nIs it possible that a problem like Example 1 caused the behavior that I \nwitnessed in Example 2? It was waiting for the WAL's to catch up or \nsomething?\n\nThanks\n\nRusty\n\n\nAlan Hodgson wrote:\n> On Monday 12 January 2009, Bill Preston <[email protected]> \n> wrote:\n> \n>> I had a data load that I was doing with 8.1. It involved about 250k sql\n>> statements that were inserts into a table with just one index. The index\n>> has two fields.\n>> With the upgrade to 8.3 that process started taking all night and 1/2 a\n>> day. It inserted at the rate of 349 records a minute.\n>> When I started working on the problem I decided to test by putting all\n>> statements withing a single transaction. Just a simple BEGIN at the\n>> start and COMMIT at the end. Magically it only took 7 minutes to do the\n>> whole set, or 40k per minute. That seemed very odd to me, but at least I\n>> solved the problem.\n>>\n>> \n>\n> That's well-known behaviour. If you don't do them in one big transaction, \n> PostgreSQL has to fsync after every insert, which effectively limits your \n> insert rate to the rotational speed of your WAL drive (roughly speaking). \n> If you don't explicitly start and end transactions, PostgreSQL does it for \n> you. For every statement.\n>\n> \n>> The most recently noticed simple problem.\n>> I had a table with about 20k records. We issued the statement DELETE\n>> FROM table where this=that.\n>> This was part of a combined index and about 8k records should have been\n>> deleted.\n>> This statement caused all other queries to grind to a halt. It was only\n>> when I killed it that normal operation resumed. It was acting like a\n>> lock, but that table was not being used by any other process.\n>> \n>\n> Are there foreign keys on any other table(s) that point to this one? Are the \n> relevant columns in those tables indexed?\n>\n>\n> \n\n", "msg_date": "Mon, 12 Jan 2009 22:13:44 -0800", "msg_from": "Bill Preston <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Slow insert performace, 8.3 Wal related?" }, { "msg_contents": "On Monday 12 January 2009, Bill Preston <[email protected]> \nwrote:\n> As to the second example with the delete. There are no foreign keys.\n> For the index. If the table has fields a,b,c and d.\n> We have a btree index (a,b,c,d)\n> and we are saying DELETE FROM table_messed_up WHERE a=x.\n>\n\nIs there anything special about this table? Does it have like a hundred \nindexes on it or something? Because deleting 8k rows from a normal table \nshould never take more than a couple of seconds.\n\n-- \nAlan\n", "msg_date": "Tue, 13 Jan 2009 08:10:21 -0800", "msg_from": "Alan Hodgson <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Slow insert performace, 8.3 Wal related?" }, { "msg_contents": "Bill Preston wrote:\n> Fsync is off in 8.3.\n\nYou should consider turning synchronous_commit off instead. That's \nalmost as good as fsync=off performance-wise, but doesn't leave your \ndatabase corrupt in case of power loss or OS crash.\n\n-- \n Heikki Linnakangas\n EnterpriseDB http://www.enterprisedb.com\n", "msg_date": "Wed, 14 Jan 2009 12:10:17 +0200", "msg_from": "Heikki Linnakangas <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Slow insert performace, 8.3 Wal related?" }, { "msg_contents": "Nothing special about that table. One index.\n\nIt really seems that the system would grind to a stand-still when a lot \nof non-transaction inserts were run combined with the creation of some \nlarge temp tables.\n\nSince we added transactions and started using truncate, things have \ncleared up nicely. The suggestions here really helped.\n\nDoes anyone know of some established postgresql consultants that can be \nhired for emergency analysis/tuning when things come up?\n\nRusty\nAlan Hodgson wrote:\n> On Monday 12 January 2009, Bill Preston <[email protected]> \n> wrote:\n> \n>> As to the second example with the delete. There are no foreign keys.\n>> For the index. If the table has fields a,b,c and d.\n>> We have a btree index (a,b,c,d)\n>> and we are saying DELETE FROM table_messed_up WHERE a=x.\n>>\n>> \n>\n> Is there anything special about this table? Does it have like a hundred \n> indexes on it or something? Because deleting 8k rows from a normal table \n> should never take more than a couple of seconds.\n>\n> \n\n", "msg_date": "Thu, 15 Jan 2009 13:55:51 -0800", "msg_from": "Bill Preston <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Slow insert performace, 8.3 Wal related?" }, { "msg_contents": "On Thu, Jan 15, 2009 at 2:55 PM, Bill Preston\n<[email protected]> wrote:\n> Nothing special about that table. One index.\n>\n> It really seems that the system would grind to a stand-still when a lot of\n> non-transaction inserts were run combined with the creation of some large\n> temp tables.\n>\n> Since we added transactions and started using truncate, things have cleared\n> up nicely. The suggestions here really helped.\n>\n> Does anyone know of some established postgresql consultants that can be\n> hired for emergency analysis/tuning when things come up?\n\nThere are several companies who have employees on this list who\nprovide for fee contract / consulting work. If you're local to me and\nneed help over a weekend I might have some spare time. :) But I'm\ngenerally pretty busy on weekends.\n", "msg_date": "Thu, 15 Jan 2009 15:32:39 -0700", "msg_from": "\"Scott Marlowe\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Slow insert performace, 8.3 Wal related?" }, { "msg_contents": "Thanks Scott.\n\nWe are in Southern California.\nWhat I need someone for when the SHTF again, and if I can't handle it, \nI have some resource to get on the job right away. And it would help if \nthey were a company that does this kind of thing so that I can get some \nbuy in from those above.\n\nRusty\nScott Marlowe wrote:\n> On Thu, Jan 15, 2009 at 2:55 PM, Bill Preston\n> <[email protected]> wrote:\n> \n>> Nothing special about that table. One index.\n>>\n>> It really seems that the system would grind to a stand-still when a lot of\n>> non-transaction inserts were run combined with the creation of some large\n>> temp tables.\n>>\n>> Since we added transactions and started using truncate, things have cleared\n>> up nicely. The suggestions here really helped.\n>>\n>> Does anyone know of some established postgresql consultants that can be\n>> hired for emergency analysis/tuning when things come up?\n>> \n>\n> There are several companies who have employees on this list who\n> provide for fee contract / consulting work. If you're local to me and\n> need help over a weekend I might have some spare time. :) But I'm\n> generally pretty busy on weekends.\n> \n\n", "msg_date": "Thu, 15 Jan 2009 14:36:19 -0800", "msg_from": "Bill Preston <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Slow insert performace, 8.3 Wal related?" }, { "msg_contents": "On Thu, Jan 15, 2009 at 2:36 PM, Bill Preston\n<[email protected]> wrote:\n> We are in Southern California.\n> What I need someone for when the SHTF again, and if I can't handle it, I\n> have some resource to get on the job right away. And it would help if they\n> were a company that does this kind of thing so that I can get some buy in\n> from those above.\n\nDid you look here?\n\nhttp://www.postgresql.org/support/professional_support_northamerica\n\nPersonally, I would first look to a company who currently pays active\nPostgreSQL developers - Command Prompt, EnterpriseDB are two prominent\nvendors on that list. Looking at their websites (I have not used the\nservices of either) Command Prompt has a number you can call for\nround-the-clock support whether you are a customer or not and fairly\nclear pricing available up front.\n\n-Dave\n", "msg_date": "Fri, 16 Jan 2009 11:50:33 -0800", "msg_from": "\"David Rees\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Slow insert performace, 8.3 Wal related?" } ]
[ { "msg_contents": "So, I had a query that uses a postgis geometry index and the planner was\nunderestimating the number of rows it would return. Because of this,\nthe planner was choosing the geometry index over a compound index on the\nother columns in the WHERE clause. So, I thought, let me increase the\nstats target for that geometry column. I did, and I got a different\n(and better) plan, but when I looked at the estimates for the simplified\nquery against the geometry column alone, I noticed that neither the cost\nnor the estimated rows changed:\n\noitest=# explain ANALYZE SELECT * FROM \"blips\" WHERE\n((ST_Contains(blips.shape,\n'0101000020E610000049111956F1EB55C0A8E49CD843F34440')) );\n \nQUERY\nPLAN \n\n-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n Index Scan using index_blips_on_shape_gist on blips (cost=0.00..7.33\nrows=1 width=13804) (actual time=0.113..745.394 rows=2827 loops=1)\n Index Cond: (shape &&\n'0101000020E610000049111956F1EB55C0A8E49CD843F34440'::geometry)\n Filter: ((shape &&\n'0101000020E610000049111956F1EB55C0A8E49CD843F34440'::geometry) AND\n_st_contains(shape,\n'0101000020E610000049111956F1EB55C0A8E49CD843F34440'::geometry))\n Total runtime: 745.977 ms\n(4 rows)\n\nTime: 747.199 ms\noitest=# ALTER TABLE blips ALTER COLUMN shape SET statistics 1000;\nALTER TABLE\nTime: 0.478 ms\noitest=# ANALYZE ;\nANALYZE\nTime: 7727.097 ms\noitest=# explain ANALYZE SELECT * FROM \"blips\" WHERE\n((ST_Contains(blips.shape,\n'0101000020E610000049111956F1EB55C0A8E49CD843F34440')) );\n \nQUERY\nPLAN \n\n-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n Index Scan using index_blips_on_shape_gist on blips (cost=0.00..7.33\nrows=1 width=13761) (actual time=0.117..755.781 rows=2827 loops=1)\n Index Cond: (shape &&\n'0101000020E610000049111956F1EB55C0A8E49CD843F34440'::geometry)\n Filter: ((shape &&\n'0101000020E610000049111956F1EB55C0A8E49CD843F34440'::geometry) AND\n_st_contains(shape,\n'0101000020E610000049111956F1EB55C0A8E49CD843F34440'::geometry))\n Total runtime: 756.396 ms\n(4 rows)\n\nThe width changed slightly, but the cost is 7.33 in both.\n\nSo, now I thought how could that have changed the plan? Did the other\nparts of the plan estimate change? So I pulled the shape column out of\nthe where clause and left the others:\n\noitest=# ALTER TABLE blips ALTER COLUMN shape SET statistics 100;\nALTER TABLE\nTime: 0.475 ms\noitest=# ANALYZE ;\nANALYZE\nTime: 1225.325 ms\noitest=# explain ANALYZE SELECT * FROM \"blips\" WHERE\n(blips.\"content_id\" = 2410268 AND blips.\"content_type\" = E'Story');\n \nQUERY\nPLAN \n------------------------------------------------------------------------------------------------------------------------------------------------------\n Index Scan using index_blips_on_content_type_and_content_id on blips \n(cost=0.00..9.01 rows=2 width=13924) (actual time=0.026..0.027 rows=2\nloops=1)\n Index Cond: (((content_type)::text = 'Story'::text) AND (content_id =\n2410268))\n Total runtime: 0.046 ms\n(3 rows)\n\nTime: 1.111 ms\noitest=# ALTER TABLE blips ALTER COLUMN shape SET statistics 1000;\nALTER TABLE\nTime: 0.506 ms\noitest=# ANALYZE ;\nANALYZE\nTime: 7785.496 ms\noitest=# explain ANALYZE SELECT * FROM \"blips\" WHERE\n(blips.\"content_id\" = 2410268 AND blips.\"content_type\" = E'Story');\n QUERY\nPLAN \n-------------------------------------------------------------------------------------------------------------------------------------\n Index Scan using index_blips_on_content_id on blips (cost=0.00..7.29\nrows=1 width=13761) (actual time=0.013..0.014 rows=2 loops=1)\n Index Cond: (content_id = 2410268)\n Filter: ((content_type)::text = 'Story'::text)\n Total runtime: 0.034 ms\n(4 rows)\n\nTime: 1.007 ms\n\nSo, my question is, should changing the stats target on the shape column\naffect the stats for the content_id and content_type columns? Also, why\ndoes the index on content_id win out over the compound index on\n(content_type, content_id)?\n\n \"index_blips_on_content_id\" btree (content_id)\n \"index_blips_on_content_type_and_content_id\" btree (content_type,\ncontent_id)\n\n-- \nJeff Frost, Owner \t<[email protected]>\nFrost Consulting, LLC \thttp://www.frostconsultingllc.com/\nPhone: 916-647-6411\tFAX: 916-405-4032\n\n", "msg_date": "Tue, 13 Jan 2009 14:34:42 -0800", "msg_from": "Jeff Frost <[email protected]>", "msg_from_op": true, "msg_subject": "strange index behaviour with different statistics target" }, { "msg_contents": "Jeff Frost <[email protected]> writes:\n> So, my question is, should changing the stats target on the shape column\n> affect the stats for the content_id and content_type columns?\n\nIt would change the size of the sample for the table, which might\nimprove the accuracy of the stats. IIRC you'd still get the same number\nof histogram entries and most-common-values for the other columns, but\nthey might be more accurate.\n\n> Also, why does the index on content_id win out over the compound index\n> on (content_type, content_id)?\n\nIt's deciding (apparently correctly, from the explain results) that the\nlarger index isn't increasing the selectivity enough to be worth its\nextra search cost. I suppose content_type = 'Story' isn't very\nselective in this table?\n\n\t\t\tregards, tom lane\n", "msg_date": "Tue, 13 Jan 2009 18:06:01 -0500", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: strange index behaviour with different statistics target " }, { "msg_contents": "On Tue, 13 Jan 2009, Tom Lane wrote:\n\n> Jeff Frost <[email protected]> writes:\n>> So, my question is, should changing the stats target on the shape column\n>> affect the stats for the content_id and content_type columns?\n>\n> It would change the size of the sample for the table, which might\n> improve the accuracy of the stats. IIRC you'd still get the same number\n> of histogram entries and most-common-values for the other columns, but\n> they might be more accurate.\n\nWhy would they be more accurate? Do they somehow correlate with the other \ncolumn's histogram and most-common-values when the stats target is increased \non that column?\n\nThe planner is choosing a plan I like for the query, I'm just trying to \nunderstand why it's doing that since the planner thinks the gist index is \ngoing to give it a single row (vs the 2827 rows it actually gets) and the fact \nthat the cost didn't change for perusing the gist index. I guess I was \nexpecting the estimated rowcount and cost for perusing the gist index to go up \nand when it didn't I was pleasantly surprised to find I got a plan I wanted \nanyway.\n\n>\n>> Also, why does the index on content_id win out over the compound index\n>> on (content_type, content_id)?\n>\n> It's deciding (apparently correctly, from the explain results) that the\n> larger index isn't increasing the selectivity enough to be worth its\n> extra search cost. I suppose content_type = 'Story' isn't very\n> selective in this table?\n\nAh! You're right, especially with this content_id!\n\n-- \nJeff Frost, Owner \t<[email protected]>\nFrost Consulting, LLC \thttp://www.frostconsultingllc.com/\nPhone: 916-647-6411\tFAX: 916-405-4032\n", "msg_date": "Tue, 13 Jan 2009 15:23:08 -0800 (PST)", "msg_from": "Jeff Frost <[email protected]>", "msg_from_op": true, "msg_subject": "Re: strange index behaviour with different statistics\n target" }, { "msg_contents": "Jeff Frost <[email protected]> writes:\n> On Tue, 13 Jan 2009, Tom Lane wrote:\n>> It would change the size of the sample for the table, which might\n>> improve the accuracy of the stats. IIRC you'd still get the same number\n>> of histogram entries and most-common-values for the other columns, but\n>> they might be more accurate.\n\n> Why would they be more accurate?\n\nThey'd be drawn from a larger sample of the table rows. If we need a\nrandom sample of N rows for the largest stats target among the columns,\nwe use all those rows for deriving the stats for the other columns too.\n\n> The planner is choosing a plan I like for the query, I'm just trying to \n> understand why it's doing that since the planner thinks the gist index is \n> going to give it a single row (vs the 2827 rows it actually gets) and the fact \n> that the cost didn't change for perusing the gist index.\n\nYou'd need to ask the postgis guys whether they have an estimator for\nST_Contains that actually does anything useful. I haven't the foggiest\nwhat the state of their stats support is.\n\n[ looks again at the plan... ] Actually it looks like the estimator\nfor && is what's at issue. Estimators are attached to operators not\nfunctions.\n\n\t\t\tregards, tom lane\n", "msg_date": "Tue, 13 Jan 2009 18:40:21 -0500", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: strange index behaviour with different statistics target " }, { "msg_contents": "On Tue, 13 Jan 2009, Tom Lane wrote:\n\n> Jeff Frost <[email protected]> writes:\n>> On Tue, 13 Jan 2009, Tom Lane wrote:\n>>> It would change the size of the sample for the table, which might\n>>> improve the accuracy of the stats. IIRC you'd still get the same number\n>>> of histogram entries and most-common-values for the other columns, but\n>>> they might be more accurate.\n>\n>> Why would they be more accurate?\n>\n> They'd be drawn from a larger sample of the table rows. If we need a\n> random sample of N rows for the largest stats target among the columns,\n> we use all those rows for deriving the stats for the other columns too.\n\nOh, ok, thanks Tom. That makes sense now.\n\n>\n>> The planner is choosing a plan I like for the query, I'm just trying to\n>> understand why it's doing that since the planner thinks the gist index is\n>> going to give it a single row (vs the 2827 rows it actually gets) and the fact\n>> that the cost didn't change for perusing the gist index.\n>\n> You'd need to ask the postgis guys whether they have an estimator for\n> ST_Contains that actually does anything useful. I haven't the foggiest\n> what the state of their stats support is.\n>\n> [ looks again at the plan... ] Actually it looks like the estimator\n> for && is what's at issue. Estimators are attached to operators not\n> functions.\n\nThanks, I'll see if I can dig up some info on that and/or post to the \npostgis list if I can't turn anything up.\n\n-- \nJeff Frost, Owner \t<[email protected]>\nFrost Consulting, LLC \thttp://www.frostconsultingllc.com/\nPhone: 916-647-6411\tFAX: 916-405-4032\n", "msg_date": "Tue, 13 Jan 2009 15:44:00 -0800 (PST)", "msg_from": "Jeff Frost <[email protected]>", "msg_from_op": true, "msg_subject": "Re: strange index behaviour with different statistics\n target" } ]
[ { "msg_contents": "We are currently storing a large amount of networking data. The\ndatabase size is over 50 Gigabytes. It also grows by 10 Gigabytes\nevery month.\n\nWe are looking if there is a way to speedup lookups by IP Address.\n\nThe primary key is a set of values. And the key does include IP\nAddress as well. Will it be more efficient to also add index on IP\nAddress to speedup lookups by IP?\n\n-- \nRegards,\n\nMaksim\n", "msg_date": "Wed, 14 Jan 2009 00:53:52 -0500", "msg_from": "\"Maksim Sosnovskiy\" <[email protected]>", "msg_from_op": true, "msg_subject": "index" }, { "msg_contents": "On Wed, Jan 14, 2009 at 12:53 AM, Maksim Sosnovskiy <[email protected]> wrote:\nWill it be more efficient to also add index on IP\n> Address to speedup lookups by IP?\n\nMost likely, especially if the IP address is not the first column in\nyour primary key index.\n\nHave you done an explain analyze of your ip lookup query? If not, do\nso; that can help. Then try creating the index and explain analyze the\nquery again to see what happens.\n\nKnowing your existing schema/indices and such would let us do more\nthan guess- and not knowing the plan your current query is using makes\nit difficult to know if there's a better one using a to-be-created\nindex.\n-- \n- David T. Wilson\[email protected]\n", "msg_date": "Wed, 14 Jan 2009 01:16:13 -0500", "msg_from": "\"David Wilson\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: index" }, { "msg_contents": "Maksim Sosnovskiy escreveu:\n> The primary key is a set of values. And the key does include IP\n> Address as well. Will it be more efficient to also add index on IP\n> Address to speedup lookups by IP?\n> \nIt depends on what set of queries that you run. If the most frequent queries\nuse ip_field, then you need an index on it. Maybe you could try ip4r [1].\n\n\n[1] http://pgfoundry.org/projects/ip4r/\n\n\n-- \n Euler Taveira de Oliveira\n http://www.timbira.com/\n", "msg_date": "Wed, 14 Jan 2009 11:39:51 -0200", "msg_from": "Euler Taveira de Oliveira <[email protected]>", "msg_from_op": false, "msg_subject": "Re: index" } ]
[ { "msg_contents": "Hi,\n\nI want to store a boolean SQL condition in a column \"condition_column\" \nof my table \"myTable\".\nThis condition refers to other columns of the same table and shall use \none parameter, e.g. \"column1=4 AND colume2+column3=param\".\nEvery row has a different condition in general..\n\nSo my query on the table should look like \"SELECT * FROM myTable WHERE \nanotherCondition AND EXECUTE condition_column(7)\"\nIn this example, the concrete argument is \"7\".\nHowever EXECUTE can only be used for prepared statements, and I dont \nknow how to prepare a statement within one single SQL query.\nDoes anybody know?\n\nOur current solution is to execute \"SELECT * FROM myTable WHERE \nanotherCondition\" where \"anotherCondition\" selects ~30% of the table,\nand we evaluate the row-specific condition on client-side by our own \ncondition-evaluator, so that we finally have ~1% from the whole table as \nresult.\nThis however fetches 1/3 of the table over a remote JDBC connection. Its \nclear that a server-side evaluation would also scan 1/3 of the table,\nhowever only the final result would be transfered to the client.\n\nThanks for any help!\n", "msg_date": "Thu, 15 Jan 2009 11:42:29 +0100", "msg_from": "=?ISO-8859-1?Q?J=F6rg_Kiegeland?= <[email protected]>", "msg_from_op": true, "msg_subject": "row-specific conditions possible?" }, { "msg_contents": "In response to J�rg Kiegeland :\n> Hi,\n\nThis list, [Perform], is obviously the wrong list for such...\n\n> \n> I want to store a boolean SQL condition in a column \"condition_column\" \n> of my table \"myTable\".\n> This condition refers to other columns of the same table and shall use \n> one parameter, e.g. \"column1=4 AND colume2+column3=param\".\n> Every row has a different condition in general..\n\nmaybe this blog-entry can help you:\nhttp://akretschmer.blogspot.com/2007/10/postgresql-formula-in-column-how.html\n\n\nFor further questions ask me privat.\n\n\nAndreas\n-- \nAndreas Kretschmer\nKontakt: Heynitz: 035242/47150, D1: 0160/7141639 (mehr: -> Header)\nGnuPG-ID: 0x3FFF606C, privat 0x7F4584DA http://wwwkeys.de.pgp.net\n", "msg_date": "Thu, 15 Jan 2009 12:06:01 +0100", "msg_from": "\"A. Kretschmer\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: row-specific conditions possible?" } ]
[ { "msg_contents": "\nHi folks,\n\n\nI really like the idea of exclusively using views as interfaces \nto the applications (each app has its own view, tuned for the\napp's access patterns). So, in my model, of course inserts and \nupdates also happen through views, also more complex operations\ncould be triggered through view writes. \n\nBut for some strange reason, my apps seem to produce high load\non the server. So either I'm just too stupid for proper schema\ndesign or the view approach is really slow. \n\nWhat exactly does happen behind the scenes if I update some \nrow in a view ? Does it end up in an full view query before \ndoing the update ?\n\n\ncu\n-- \n---------------------------------------------------------------------\n Enrico Weigelt == metux IT service - http://www.metux.de/\n---------------------------------------------------------------------\n Please visit the OpenSource QM Taskforce:\n \thttp://wiki.metux.de/public/OpenSource_QM_Taskforce\n Patches / Fixes for a lot dozens of packages in dozens of versions:\n\thttp://patches.metux.de/\n---------------------------------------------------------------------\n", "msg_date": "Thu, 15 Jan 2009 19:34:16 +0100", "msg_from": "Enrico Weigelt <[email protected]>", "msg_from_op": true, "msg_subject": "Cost of INSERT rules" } ]
[ { "msg_contents": "Hi all,\n\nI have a view that looks like this:\n\n SELECT\n CASE\n WHEN r.assetid IS NULL THEN p.assetid\n ELSE r.assetid\n END AS assetid,\n CASE\n WHEN r.userid IS NULL THEN p.userid\n ELSE r.userid\n END AS userid, p.permission, p.\"granted\", p.cascades\n FROM sq_ast_perm p\n LEFT JOIN sq_vw_ast_role r ON r.roleid::text = p.userid::text AND \nr.assetid::text = p.assetid::text;\n\n\nIt was pointed out to me that the first CASE is useless (since r.assetid \nwill always be the same as p.assetid because of the left join condition) \nso I'm looking at that to see if it'll make much of a difference and it \ndoes.\n\nI won't post the whole lot but the first line is the most interesting.\n\n# explain analyze SELECT * from sq_vw_ast_perm where assetid='30748';\n \n\n Merge Left Join (cost=9529.34..13823.76 rows=75721 width=102) (actual \ntime=284.371..341.536 rows=1 loops=1)\n\n(The row count is right - it's the total # of rows from sq_ast_perm).\n\n\nWhen I change the view to be:\n\n SELECT p.assetid,\n CASE\n WHEN r.userid IS NULL THEN p.userid\n ELSE r.userid\n END AS userid, p.permission, p.\"granted\", p.cascades\n FROM sq_ast_perm p\n LEFT JOIN sq_vw_ast_role r ON r.roleid::text = p.userid::text AND \nr.assetid::text = p.assetid::text;\n\n\nThe Merge left join only returns 3 rows:\n\n# explain analyze SELECT * from sq_vw_ast_perm where assetid='30748';\n \n Merge Left Join (cost=9507.18..9508.23 rows=3 width=70) \n(actual time=11.544..11.549 rows=1 loops=1)\n\nI thought the where condition would cut down on the rows returned, then \nthe case statement would take effect to do the null check. It seems to \nbe doing it in reverse ??\n\nRecently analyzed, only just imported so free of bloat. Running 8.1.11.\n\nThanks!\n-- \nPostgresql & php tutorials\nhttp://www.designmagick.com/\n\n", "msg_date": "Mon, 19 Jan 2009 14:30:47 +1100", "msg_from": "Chris <[email protected]>", "msg_from_op": true, "msg_subject": "left join + case - how is it processed?" }, { "msg_contents": "On Sun, Jan 18, 2009 at 10:30 PM, Chris <[email protected]> wrote:\n> Hi all,\n>\n> I have a view that looks like this:\n>\n> SELECT\n> CASE\n> WHEN r.assetid IS NULL THEN p.assetid\n> ELSE r.assetid\n> END AS assetid,\n> CASE\n> WHEN r.userid IS NULL THEN p.userid\n> ELSE r.userid\n> END AS userid, p.permission, p.\"granted\", p.cascades\n> FROM sq_ast_perm p\n> LEFT JOIN sq_vw_ast_role r ON r.roleid::text = p.userid::text AND\n> r.assetid::text = p.assetid::text;\n>\n>\n> It was pointed out to me that the first CASE is useless (since r.assetid\n> will always be the same as p.assetid because of the left join condition) so\n> I'm looking at that to see if it'll make much of a difference and it does.\n>\n> I won't post the whole lot but the first line is the most interesting.\n>\n> # explain analyze SELECT * from sq_vw_ast_perm where assetid='30748';\n>\n>\n> Merge Left Join (cost=9529.34..13823.76 rows=75721 width=102) (actual\n> time=284.371..341.536 rows=1 loops=1)\n>\n> (The row count is right - it's the total # of rows from sq_ast_perm).\n\nThe row count is VERY WRONG. Apparently the actual number of rows is\n1 and the estimate is 75721: that's bad.\n\n> When I change the view to be:\n>\n> SELECT p.assetid,\n> CASE\n> WHEN r.userid IS NULL THEN p.userid\n> ELSE r.userid\n> END AS userid, p.permission, p.\"granted\", p.cascades\n> FROM sq_ast_perm p\n> LEFT JOIN sq_vw_ast_role r ON r.roleid::text = p.userid::text AND\n> r.assetid::text = p.assetid::text;\n>\n>\n> The Merge left join only returns 3 rows:\n>\n> # explain analyze SELECT * from sq_vw_ast_perm where assetid='30748';\n>\n> Merge Left Join (cost=9507.18..9508.23 rows=3 width=70) (actual\n> time=11.544..11.549 rows=1 loops=1)\n>\n> I thought the where condition would cut down on the rows returned, then the\n> case statement would take effect to do the null check. It seems to be doing\n> it in reverse ??\n\nThe ESTIMATE is 3 rows - the actual rows are 1, just as before.\nNotice this is a much more accurate estimate: that's good.\n\nThe reason why the CASE is affecting your query planning is because\nyou are using a query that compares assetid to a constant:\n\nSELECT * from sq_vw_ast_perm where assetid='30748';\n\nWhen PostgreSQL evaluates this statement, assetid gets expanded either\ninto a case statement (with your first view definition) or into\nsq_ast_perm.assetid (with your second view definition). The latter\ndefinition allows PostgreSQL to make use of the column statistics\n(which are pretty accurate) whereas the former is probably leading to\na SWAG, because PostgreSQL isn't very good at estimating the\nselectivity of CASE. The bad selectivity estimate, in turn, is\nleading to a poor plan choice...\n\n...Robert\n", "msg_date": "Sun, 18 Jan 2009 23:17:00 -0500", "msg_from": "Robert Haas <[email protected]>", "msg_from_op": false, "msg_subject": "Re: left join + case - how is it processed?" }, { "msg_contents": "On Sun, Jan 18, 2009 at 9:30 PM, Chris <[email protected]> wrote:\n> Hi all,\n>\n> I have a view that looks like this:\n>\n> SELECT\n> CASE\n> WHEN r.assetid IS NULL THEN p.assetid\n> ELSE r.assetid\n> END AS assetid,\n> CASE\n> WHEN r.userid IS NULL THEN p.userid\n> ELSE r.userid\n> END AS userid, p.permission, p.\"granted\", p.cascades\n> FROM sq_ast_perm p\n> LEFT JOIN sq_vw_ast_role r ON r.roleid::text = p.userid::text AND\n> r.assetid::text = p.assetid::text;\n>\n>\n> It was pointed out to me that the first CASE is useless (since r.assetid\n> will always be the same as p.assetid because of the left join condition) so\n> I'm looking at that to see if it'll make much of a difference and it does.\n\nLets assume it wasn't useless because of that, it would still be\nobfuscated and probably slower because it is an explicit coalesce()\n\nSELECT coalesce( r.assetid, p.assetid ) , coalesce( r.userid , p.userid )\n\n\n> I thought the where condition would cut down on the rows returned, then the\n> case statement would take effect to do the null check. It seems to be doing\n> it in reverse ??\n# explain analyze SELECT * from sq_vw_ast_perm where assetid='30748';\n# explain analyze SELECT * from sq_vw_ast_perm where assetid='30748';\n\nIt aperas to me that both of your statements have where clauses, but I\nbelieve where isn't that explicit. I'm not sure the nature of your\nproblem permits the query optimizer to eliminate rows at all, even\nwith the where statement. \"assetid\" is probably not known when the\nquery optimizer hits, because it is computed based on the nullness of\nthe columns. I'd assume that the optimizer *could* more easily\noptimize this if you had used coalesce rather than an ad-hoc method\nwith CASE. My guess is you can exclude rows with WHERE if the the\ncolumn used is an run-time computation involving an ad-hoc CASE.\n\n\n-- \nEvan Carroll\nSystem Lord of the Internets\n", "msg_date": "Sun, 18 Jan 2009 22:18:00 -0600", "msg_from": "Evan Carroll <[email protected]>", "msg_from_op": false, "msg_subject": "Re: left join + case - how is it processed?" }, { "msg_contents": "> My guess is you can exclude rows with WHERE if the the\n> column used is an run-time computation involving an ad-hoc CASE.\n\n\n* that you can't\n\n\n-- \nEvan Carroll\nSystem Lord of the Internets\n", "msg_date": "Sun, 18 Jan 2009 22:20:04 -0600", "msg_from": "Evan Carroll <[email protected]>", "msg_from_op": false, "msg_subject": "Re: left join + case - how is it processed?" }, { "msg_contents": "\n> The reason why the CASE is affecting your query planning is because\n> you are using a query that compares assetid to a constant:\n> \n> SELECT * from sq_vw_ast_perm where assetid='30748';\n> \n> When PostgreSQL evaluates this statement, assetid gets expanded either\n> into a case statement (with your first view definition) or into\n> sq_ast_perm.assetid (with your second view definition). The latter\n> definition allows PostgreSQL to make use of the column statistics\n> (which are pretty accurate) whereas the former is probably leading to\n> a SWAG, because PostgreSQL isn't very good at estimating the\n> selectivity of CASE. The bad selectivity estimate, in turn, is\n> leading to a poor plan choice...\n\nIf I take it out of the view, it's fine:\n\n# SELECT\n# CASE\n# WHEN r.assetid IS NULL THEN p.assetid\n# ELSE r.assetid\n# END AS assetid,\n# CASE\n# WHEN r.userid IS NULL THEN p.userid\n# ELSE r.userid\n# END AS userid, p.permission, p.\"granted\", p.cascades\n# FROM sq_ast_perm p\n# LEFT JOIN sq_vw_ast_role r ON r.roleid::text = p.userid::text AND \nr.assetid::text = p.assetid::text\n# where p.assetid='30748';\n \n QUERY PLAN \n\n---------------------------------------------\n Merge Left Join (cost=9459.89..9463.13 rows=3 width=102) (actual \ntime=0.096..0.098 rows=1 loops=1)\n\nIn this case I assume the planner is doing the 'WHERE' first to cut down \nthe rows, then applying the CASE at the end.\n\nThe view it seems to be the opposite - I still don't understand why \nthat's the case.\n\n\nThough I do get the same behaviour as the view when I do it as a subselect.\n\n-- \nPostgresql & php tutorials\nhttp://www.designmagick.com/\n\n", "msg_date": "Mon, 19 Jan 2009 15:36:13 +1100", "msg_from": "Chris <[email protected]>", "msg_from_op": true, "msg_subject": "Re: left join + case - how is it processed?" }, { "msg_contents": "\n> \n>> I thought the where condition would cut down on the rows returned, then the\n>> case statement would take effect to do the null check. It seems to be doing\n>> it in reverse ??\n> # explain analyze SELECT * from sq_vw_ast_perm where assetid='30748';\n> \n> It aperas to me that both of your statements have where clauses, but I\n> believe where isn't that explicit. I'm not sure the nature of your\n> problem permits the query optimizer to eliminate rows at all, even\n> with the where statement. \"assetid\" is probably not known when the\n> query optimizer hits, because it is computed based on the nullness of\n> the columns. I'd assume that the optimizer *could* more easily\n> optimize this if you had used coalesce rather than an ad-hoc method\n> with CASE. My guess is you can exclude rows with WHERE if the the\n> column used is an run-time computation involving an ad-hoc CASE.\n\nNo difference.\n\nFull explain plan here:\n\nhttp://explain-analyze.info/query_plans/2725-query-plan-1447\n\nI can see it's doing the extra filter step at the start (4th line) which \nis not present without the coalesce/case statement. I just don't \nunderstand why it's being done at that stage.\n\n-- \nPostgresql & php tutorials\nhttp://www.designmagick.com/\n\n", "msg_date": "Mon, 19 Jan 2009 16:05:00 +1100", "msg_from": "Chris <[email protected]>", "msg_from_op": true, "msg_subject": "Re: left join + case - how is it processed?" }, { "msg_contents": "Chris <[email protected]> writes:\n> I can see it's doing the extra filter step at the start (4th line) which \n> is not present without the coalesce/case statement. I just don't \n> understand why it's being done at that stage.\n\nIt's not that hard to understand. With the original view formulation\n(or the COALESCE version), the fully expanded form of the query looks\nlike\n\n\tselect ... from p left join r ...\n\t where expression_involving_both_p_and_r = constant\n\nIf you make the view output be just p.assetid then you have\n\n\tselect ... from p left join r ...\n\t where p.assetid = constant\n\nIn the first case the planner cannot apply the WHERE restriction until\nit's formed the p+r join; so you see the condition applied as a filter\non the join node's output. In the second case, the planner can push the\nWHERE restriction down into the scan of p, since the left join doesn't\naffect it. (If a p row doesn't pass the restriction, then no join row\nformed from it can either; ergo there is no need to form those join rows\nat all.)\n\nIn general a WHERE or JOIN/ON clause cannot be applied below the point\nat which all the relations mentioned in it have been joined. There are\na few special cases where the planner can transform clauses into some\nother form that's more optimizable, but you can pretty much bet that a\nCASE will never be one of them --- CASE is more or less *defined* to\ndefeat optimization.\n\n\t\t\tregards, tom lane\n", "msg_date": "Mon, 19 Jan 2009 11:33:34 -0500", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: left join + case - how is it processed? " }, { "msg_contents": "On Sun, Jan 18, 2009 at 7:30 PM, Chris <[email protected]> wrote:\n\n> I have a view that looks like this:\n>\n> SELECT\n> CASE\n> WHEN r.assetid IS NULL THEN p.assetid\n> ELSE r.assetid\n> END AS assetid,\n> CASE\n> WHEN r.userid IS NULL THEN p.userid\n> ELSE r.userid\n> END AS userid, p.permission, p.\"granted\", p.cascades\n> FROM sq_ast_perm p\n> LEFT JOIN sq_vw_ast_role r ON r.roleid::text = p.userid::text AND\n> r.assetid::text = p.assetid::text;\n\nThe effect that you are trying to achieve with CASE statements is\nbetter suited to the COALESCE(...) function.\nhttp://www.postgresql.org/docs/8.3/interactive/functions-conditional.html#AEN14484\n\n-- \nRegards,\nRichard Broersma Jr.\n\nVisit the Los Angeles PostgreSQL Users Group (LAPUG)\nhttp://pugs.postgresql.org/lapug\n", "msg_date": "Mon, 19 Jan 2009 08:51:05 -0800", "msg_from": "\"Richard Broersma\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: left join + case - how is it processed?" }, { "msg_contents": "Tom Lane wrote:\n> Chris <[email protected]> writes:\n>> I can see it's doing the extra filter step at the start (4th line) which \n>> is not present without the coalesce/case statement. I just don't \n>> understand why it's being done at that stage.\n> \n> It's not that hard to understand. With the original view formulation\n> (or the COALESCE version), the fully expanded form of the query looks\n> like\n> \n> \tselect ... from p left join r ...\n> \t where expression_involving_both_p_and_r = constant\n> \n> If you make the view output be just p.assetid then you have\n> \n> \tselect ... from p left join r ...\n> \t where p.assetid = constant\n> \n> In the first case the planner cannot apply the WHERE restriction until\n> it's formed the p+r join; so you see the condition applied as a filter\n> on the join node's output. In the second case, the planner can push the\n> WHERE restriction down into the scan of p, since the left join doesn't\n> affect it. (If a p row doesn't pass the restriction, then no join row\n> formed from it can either; ergo there is no need to form those join rows\n> at all.)\n\nSo because the CASE is on (some of) the fields I'm joining on, in effect \nit's made part of the join condition. If the fields are outside that \n(r.userid/p.userid), then it's evaluated after.\n\nThanks!\n\n-- \nPostgresql & php tutorials\nhttp://www.designmagick.com/\n\n", "msg_date": "Tue, 20 Jan 2009 09:12:04 +1100", "msg_from": "Chris <[email protected]>", "msg_from_op": true, "msg_subject": "Re: left join + case - how is it processed?" } ]
[ { "msg_contents": "sorry, I just wonder why I can't get my message delivered...\n\n-- \nÜdvözlettel,\nGábriel Ákos\n-=E-Mail :[email protected]|Web: http://www.i-logic.hu=-\n-=Tel/fax:+3612391618 |Mobil:+36209278894 =-\n", "msg_date": "Mon, 19 Jan 2009 18:31:26 +0100", "msg_from": "Akos Gabriel <[email protected]>", "msg_from_op": true, "msg_subject": "test" } ]
[ { "msg_contents": "(now that my test went through, here is the question :) )\n\nDear Community,\n\nWe are using PostgreSQL proudly for quite a long time, but now we are\nfacing an interesting problem. Query plan seems to depend on how long\nthe IN() clause is.\n\nexplain analyze\nselect p.product_id \nfrom product p left join infx.infx_product i on\np.external_id = i.external_id where p.product_id in (7905, 7915, 7919,\n7817, 8200, 7907, 7909, 9379, 9375, 9368, 9384, 9930, 9928, 9927, 9929,\n9925, 9931, 9922, 7885, 9705, 8201, 7921);\n\n-------------------------------------------------------------------------------------------------------------------------------------------------------------------\n Nested Loop Left Join (cost=40.52..108603.02 rows=11287 width=4)\n(actual time=1.187..33.484 rows=4882 loops=1) -> Seq Scan on product\np (cost=0.00..577.41 rows=22 width=18) (actual time=1.012..5.144\nrows=22 loops=1) Filter: (product_id = ANY\n('{7905,7915,7919,7817,8200,7907,7909,9379,9375,9368,9384,9930,9928,9927,9929,9925,9931,9922,7885,9705,8201,7921}'::integer[]))\n-> Bitmap Heap Scan on infx_product i (cost=40.52..4890.70\nrows=1564 width=15) (actual time=0.107..0.563 rows=222 loops=22)\nRecheck Cond: ((p.external_id)::text = (i.external_id)::text) ->\nBitmap Index Scan on infx_product_full_external_id_key\n(cost=0.00..40.13 rows=1564 width=0) (actual time=0.091..0.091 rows=222\nloops=22) Index Cond: ((p.external_id)::text = (i.external_id)::text)\nTotal runtime: 41.470 ms (8 rows)\n\nNice. But:\n\nexplain analyze\nselect p.product_id\nfrom product p left join infx.infx_product i on p.external_id =\ni.external_id where p.product_id in (7905, 7915, 7919, 7817, 8200,\n7907, 7909, 9379, 9375, 9368, 9384, 9930, 9928, 9927, 9929, 9925, 9931,\n9922, 7885, 9705, 8201, 7921,1,1,1,1,1,1);\n\n-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n Hash Left Join (cost=127157.21..128199.91 rows=14365 width=4) (actual\ntime=11508.623..11518.198 rows=4882 loops=1) Hash Cond:\n((p.external_id)::text = (i.external_id)::text) -> Bitmap Heap Scan on\nproduct p (cost=103.25..187.88 rows=28 width=18) (actual\ntime=0.099..0.161 rows=22 loops=1) Recheck Cond: (product_id = ANY\n('{7905,7915,7919,7817,8200,7907,7909,9379,9375,9368,9384,9930,9928,9927,9929,9925,9931,9922,7885,9705,8201,7921,1,1,1,1,1,1}'::integer[]))\n-> Bitmap Index Scan on pk_product (cost=0.00..103.24 rows=28 \nwidth=0) (actual time=0.086..0.086 rows=22 loops=1) Index Cond:\n(product_id = ANY\n('{7905,7915,7919,7817,8200,7907,7909,9379,9375,9368,9384,9930,9928,9927,9929,9925,9931,9922,7885,9705,8201,7921,1,1,1,1,1,1}'::integer[]))\n-> Hash (cost=100040.65..100040.65 rows=2161065 width=15) (actual \ntime=11505.578..11505.578 rows=2161065 loops=1) -> Seq Scan on\ninfx_product i (cost=0.00..100040.65 rows=2161065 width=15) (actual\ntime=0.074..6921.127 rows=2161065 loops=1) Total runtime: 11543.758 ms\n(9 rows)\n\nAnother try:\n\nnecosgi=# set enable_hashjoin=off;\nSET\nnecosgi=# explain analyze select p.product_id from product p left join\ninfx.infx_product i on p.external_id = i.external_id where p.product_id\nin (7905, 7915, 7919, 7817, 8200, 7907, 7909, 9379, 9375, 9368, 9384,\n9930, 9928, 9927, 9929, 9925, 9931, 9922, 7885, 9705, 8201,\n7921,1,1,1,1,1,1);\n-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\nNested Loop Left Join (cost=40.42..136685.07 rows=14365 width=4)\n(actual time=1.308..33.897 rows=4882 loops=1) -> Seq Scan on product\np (cost=0.00..634.50 rows=28 width=18) (actual time=1.135..5.747\nrows=22 loops=1) Filter: (product_id = ANY\n('{7905,7915,7919,7817,8200,7907,7909,9379,9375,9368,9384,9930,9928,9927,9929,9925,9931,9922,7885,9705,8201,7921,1,1,1,1,1,1}'::integer[]))\n-> Bitmap Heap Scan on infx_product i (cost=40.42..4839.40\nrows=1564 width=15) (actual time=0.096..0.553 rows=222 loops=22)\nRecheck Cond: ((p.external_id)::text = (i.external_id)::text) ->\nBitmap Index Scan on infx_product_full_external_id_key\n(cost=0.00..40.03 rows=1564 width=0) (actual time=0.080..0.080 rows=222\nloops=22) Index Cond: ((p.external_id)::text = (i.external_id)::text)\nTotal runtime: 41.961 ms (8 rows)\n\nIt looks like the maximum cost of the nested loop is higher than the\nmaximum cost of hashjoin, that's why the planner chooses hashjoin.\nTable was analyzed, index was reindexed. This behaviour is always\nreproducible.\n\nQuestions: \n\n- how could I avoid full table scan - how can I advise to use the index?\n- how could I advise planner to better estimate the (much lower) cost\n of the nested loop?\n\nAnother info: if I put more 1's in the query, the planner will use\nmergejoins... bahh :)\n\nAny help/question/suggestion is welcome!\n\nBest regards,\nAkos\n\n", "msg_date": "Mon, 19 Jan 2009 19:07:10 +0100", "msg_from": "Akos Gabriel <[email protected]>", "msg_from_op": true, "msg_subject": "Query running long - cost estimation question..." } ]
[ { "msg_contents": "Greetings,\n\nI'm experiencing a strange query plan change on a \"simple\" request, based on\nthe LIMIT parameter. I present here two tables, named _article and _comment.\nthey are part of a much larger database.\n\nIt's a tree-like database (making use of the ltree to keep the coherence),\nand in our case, an article can have any number of comments (linked by _\narticle.id = _comment.parent_id), and a _comment can itself have ONE comment\n(at most).\n\nThe _article table contains 12 millions t-uples, and the _comment table\naround 26 millions. The server runs a postgresql 8.3.5 in its 64bits\nversion.\n\nHere are the tables definition :\n\n-- _article table\n>\n> CREATE TABLE \"ob2\".\"_article\" (\n> \"id\" bigint NOT NULL DEFAULT nextval('element_id_sequence'::regclass),\n> \"parent_id\" bigint,\n> \"path\" ltree,\n> \"data\" text,\n> \"date_creation\" timestamp without time zone NOT NULL DEFAULT now(),\n> \"date_publishing\" timestamp without time zone NOT NULL DEFAULT now(),\n> \"date_modification\" timestamp without time zone NOT NULL DEFAULT now(),\n> \"counters\" hstore,\n> \"reference\" integer NOT NULL DEFAULT\n> nextval('_article_reference_seq'::regclass),\n> \"title\" character varying NOT NULL,\n> \"text\" text,\n> \"blog_id\" bigint,\n> \"user_id\" bigint,\n> \"site_id\" bigint,\n> \"topic_id\" bigint,\n> \"community_id\" bigint,\n> CONSTRAINT \"_article_pkey\" PRIMARY KEY (id)\n> ) WITHOUT OIDS;\n> ALTER TABLE ONLY \"ob2\".\"_article\" ALTER COLUMN \"path\" SET STORAGE PLAIN;\n> ALTER TABLE ONLY \"ob2\".\"_article\" ALTER COLUMN \"title\" SET STORAGE PLAIN;\n>\n> -- Indexes\n>\n> CREATE UNIQUE INDEX _article_pkey ON _article USING btree (id);\n> CREATE INDEX gist_idx_article_path ON _article USING gist (path);\n> CREATE INDEX idx_article_blog_id ON _article USING btree (blog_id);\n> CREATE INDEX idx_article_community_id ON _article USING btree\n> (community_id);\n> CREATE INDEX idx_article_date_creation ON _article USING btree\n> (date_creation);\n> CREATE INDEX idx_article_date_modification ON _article USING btree\n> (date_modification);\n> CREATE INDEX idx_article_date_publishing ON _article USING btree\n> (date_publishing);\n> CREATE INDEX idx_article_parent_id ON _article USING btree (parent_id);\n> CREATE UNIQUE INDEX idx_article_reference_unique ON _article USING btree\n> (reference);\n> CREATE INDEX idx_article_site_id ON _article USING btree (site_id);\n> CREATE INDEX idx_article_topic_id ON _article USING btree (topic_id);\n> CREATE INDEX idx_article_user_id ON _article USING btree (user_id);\n>\n> -- _comment table\n>\n> CREATE TABLE \"ob2\".\"_comment\" (\n> \"id\" bigint NOT NULL DEFAULT nextval('element_id_sequence'::regclass),\n> \"parent_id\" bigint,\n> \"path\" ltree,\n> \"data\" text,\n> \"date_creation\" timestamp without time zone NOT NULL DEFAULT now(),\n> \"date_publishing\" timestamp without time zone NOT NULL DEFAULT now(),\n> \"date_modification\" timestamp without time zone NOT NULL DEFAULT now(),\n> \"counters\" hstore,\n> \"reference\" integer NOT NULL DEFAULT\n> nextval('_comment_reference_seq'::regclass),\n> \"text\" text,\n> \"article_id\" bigint,\n> \"blog_id\" bigint,\n> \"user_id\" bigint,\n> \"site_id\" bigint,\n> CONSTRAINT \"_comment_pkey\" PRIMARY KEY (id)\n> ) WITHOUT OIDS;\n> ALTER TABLE ONLY \"ob2\".\"_comment\" ALTER COLUMN \"path\" SET STORAGE PLAIN;\n>\n> -- Indexes\n>\n> CREATE UNIQUE INDEX _comment_pkey ON _comment USING btree (id);\n> CREATE INDEX gist_idx_comment_path ON _comment USING gist (path);\n> CREATE INDEX idx_comment_date_creation ON _comment USING btree\n> (date_creation);\n> CREATE INDEX idx_comment_date_publishing ON _comment USING btree\n> (date_publishing);\n> CREATE INDEX idx_comment_parent_id ON _comment USING btree (parent_id);\n> CREATE INDEX idx_comment_reference ON _comment USING btree (reference);\n>\n\nNow I created a function to get simply the comment reply to a given comment\n:\n\nCREATE OR REPLACE FUNCTION get_comment_response (BIGINT) RETURNS _comment AS\n> $$\n> SELECT * FROM _comment WHERE parent_id = $1;\n> $$\n> STABLE\n> COST 1\n> LANGUAGE SQL;\n>\n\nOk, now, all is set. I'd like to get with a simple query the comments of a\ngiven article, ordered by publishing date, as well as their replies if they\nexists. So I write this request :\n\nSELECT\n _comment.id,\n (get_comment_response(_comment.id)).id AS r_id\nFROM _comment\nINNER JOIN _article\n ON _article.id = _comment.parent_id\nWHERE _comment.parent_id = '17355952'\nORDER BY _comment.date_publishing ASC\nOFFSET 0\nLIMIT 10;\n\nResults are good, quite fast, BUT, when executing tests I discovered\nsomething very strange. The query was fast for 3+ comments, but very slow\nwith a limit of 1 or 2 ! Just because the query plan change :\n\nEXPLAIN\n> SELECT _comment.id,\n> (get_comment_response(_comment.id)).id AS r_id\n> FROM _comment\n> INNER JOIN _article\n> ON _article.id = _comment.parent_id\n> WHERE _comment.parent_id = '17355952'\n> ORDER BY _comment.id ASC\n> OFFSET 0\n> LIMIT 1000;\n>\n> QUERY\n> PLAN\n>\n> ---------------------------------------------------------------------------------------------------------------\n> Limit (cost=10261.19..10263.69 rows=1000 width=8)\n> -> Sort (cost=10261.19..10281.06 rows=7949 width=8)\n> Sort Key: _comment.id\n> -> Nested Loop (cost=0.00..9825.35 rows=7949 width=8)\n> -> Index Scan using _article_pkey on _article\n> (cost=0.00..9.55 rows=1 width=8)\n> Index Cond: (id = 17355952::bigint)\n> -> Index Scan using idx_comment_parent_id on _comment\n> (cost=0.00..9716.44 rows=7949 width=16)\n> Index Cond: (_comment.parent_id = 17355952::bigint)\n> (8 rows)\n>\n\nEXPLAIN\n> SELECT _comment.id,\n> (get_comment_response(_comment.id)).id AS r_id\n> FROM _comment\n> INNER JOIN _article\n> ON _article.id = _comment.parent_id\n> WHERE _comment.parent_id = '17355952'\n> ORDER BY _comment.id ASC\n> OFFSET 0\n> LIMIT 1;\n> QUERY\n> PLAN\n>\n> -----------------------------------------------------------------------------------------------------\n> Limit (cost=0.00..3588.42 rows=1 width=8)\n> -> Nested Loop (cost=0.00..28524312.40 rows=7949 width=8)\n> -> Index Scan using _comment_pkey on _comment\n> (cost=0.00..28448324.73 rows=7949 width=16)\n> Filter: (parent_id = 17355952::bigint)\n> -> Index Scan using _article_pkey on _article (cost=0.00..9.55\n> rows=1 width=8)\n> Index Cond: (_article.id = 17355952::bigint)\n> (6 rows)\n>\n\nThe second query scans the whole comment table which is very dangerous for\nproduction servers.\n\nSo did I do something wrong ? Is there a way to handle this issue smoothly ?\n\n\nThanks in advance\n\n\nYannick\n\nGreetings,I'm experiencing a strange query plan change on a \"simple\" request, based on the LIMIT parameter. I present here two tables, named _article and _comment. they are part of a much larger database.\nIt's a tree-like database (making use of the ltree to keep the coherence), and in our case, an article can have any number of comments (linked by _article.id = _comment.parent_id), and a _comment can itself have ONE comment (at most). \nThe _article table contains 12 millions t-uples, and the _comment table around 26 millions. The server runs a postgresql 8.3.5 in its 64bits version.Here are the tables definition :\n-- _article tableCREATE TABLE \"ob2\".\"_article\" (\n    \"id\" bigint NOT NULL DEFAULT nextval('element_id_sequence'::regclass),    \"parent_id\" bigint,\n    \"path\" ltree,    \"data\" text,\n    \"date_creation\" timestamp without time zone NOT NULL DEFAULT now(),    \"date_publishing\" timestamp without time zone NOT NULL DEFAULT now(),\n    \"date_modification\" timestamp without time zone NOT NULL DEFAULT now(),    \"counters\" hstore,\n    \"reference\" integer NOT NULL DEFAULT nextval('_article_reference_seq'::regclass),    \"title\" character varying NOT NULL,\n    \"text\" text,    \"blog_id\" bigint,\n    \"user_id\" bigint,    \"site_id\" bigint,\n    \"topic_id\" bigint,    \"community_id\" bigint,\n    CONSTRAINT \"_article_pkey\" PRIMARY KEY (id)) WITHOUT OIDS;\nALTER TABLE ONLY \"ob2\".\"_article\" ALTER COLUMN \"path\" SET STORAGE PLAIN;ALTER TABLE ONLY \"ob2\".\"_article\" ALTER COLUMN \"title\" SET STORAGE PLAIN;\n-- IndexesCREATE UNIQUE INDEX _article_pkey ON _article USING btree (id);\nCREATE INDEX gist_idx_article_path ON _article USING gist (path);CREATE INDEX idx_article_blog_id ON _article USING btree (blog_id);\nCREATE INDEX idx_article_community_id ON _article USING btree (community_id);CREATE INDEX idx_article_date_creation ON _article USING btree (date_creation);\nCREATE INDEX idx_article_date_modification ON _article USING btree (date_modification);CREATE INDEX idx_article_date_publishing ON _article USING btree (date_publishing);\nCREATE INDEX idx_article_parent_id ON _article USING btree (parent_id);CREATE UNIQUE INDEX idx_article_reference_unique ON _article USING btree (reference);\nCREATE INDEX idx_article_site_id ON _article USING btree (site_id);CREATE INDEX idx_article_topic_id ON _article USING btree (topic_id);\nCREATE INDEX idx_article_user_id ON _article USING btree (user_id);-- _comment table\nCREATE TABLE \"ob2\".\"_comment\" (    \"id\" bigint NOT NULL DEFAULT nextval('element_id_sequence'::regclass),\n    \"parent_id\" bigint,    \"path\" ltree,\n    \"data\" text,    \"date_creation\" timestamp without time zone NOT NULL DEFAULT now(),\n    \"date_publishing\" timestamp without time zone NOT NULL DEFAULT now(),    \"date_modification\" timestamp without time zone NOT NULL DEFAULT now(),\n    \"counters\" hstore,    \"reference\" integer NOT NULL DEFAULT nextval('_comment_reference_seq'::regclass),\n    \"text\" text,    \"article_id\" bigint,\n    \"blog_id\" bigint,    \"user_id\" bigint,\n    \"site_id\" bigint,    CONSTRAINT \"_comment_pkey\" PRIMARY KEY (id)\n) WITHOUT OIDS;ALTER TABLE ONLY \"ob2\".\"_comment\" ALTER COLUMN \"path\" SET STORAGE PLAIN;\n-- IndexesCREATE UNIQUE INDEX _comment_pkey ON _comment USING btree (id);\nCREATE INDEX gist_idx_comment_path ON _comment USING gist (path);CREATE INDEX idx_comment_date_creation ON _comment USING btree (date_creation);\nCREATE INDEX idx_comment_date_publishing ON _comment USING btree (date_publishing);CREATE INDEX idx_comment_parent_id ON _comment USING btree (parent_id);\nCREATE INDEX idx_comment_reference ON _comment USING btree (reference);Now I created a function to get simply the comment reply to a given comment :\nCREATE OR REPLACE FUNCTION get_comment_response (BIGINT) RETURNS _comment AS\n$$    SELECT * FROM _comment WHERE parent_id = $1;\n$$    STABLE\n    COST 1    LANGUAGE SQL;Ok, now, all is set. I'd like to get with a simple query the comments of a given article, ordered by publishing date, as well as their replies if they exists. So I write this request :\nSELECT     _comment.id,    (get_comment_response(_comment.id)).id AS r_id\nFROM   _commentINNER JOIN _article\n    ON _article.id = _comment.parent_idWHERE  _comment.parent_id = '17355952'\nORDER BY _comment.date_publishing ASCOFFSET 0\nLIMIT 10;Results are good, quite fast, BUT, when executing tests I discovered something very strange. The query was fast for 3+ comments, but very slow with a limit of 1 or 2 ! Just because the query plan change :\nEXPLAIN \nSELECT _comment.id,        (get_comment_response(_comment.id)).id AS r_id\nFROM   _commentINNER JOIN _article\n        ON _article.id = _comment.parent_idWHERE  _comment.parent_id = '17355952'\nORDER BY _comment.id ASCOFFSET 0\nLIMIT 1000;                                                  QUERY PLAN                                                   \n--------------------------------------------------------------------------------------------------------------- Limit  (cost=10261.19..10263.69 rows=1000 width=8)\n   ->  Sort  (cost=10261.19..10281.06 rows=7949 width=8)         Sort Key: _comment.id\n         ->  Nested Loop  (cost=0.00..9825.35 rows=7949 width=8)               ->  Index Scan using _article_pkey on _article  (cost=0.00..9.55 rows=1 width=8)\n                     Index Cond: (id = 17355952::bigint)               ->  Index Scan using idx_comment_parent_id on _comment  (cost=0.00..9716.44 rows=7949 width=16)\n                     Index Cond: (_comment.parent_id = 17355952::bigint)(8 rows)\nEXPLAIN \nSELECT _comment.id,        (get_comment_response(_comment.id)).id AS r_id\nFROM   _commentINNER JOIN _article\n        ON _article.id = _comment.parent_idWHERE  _comment.parent_id = '17355952'\nORDER BY _comment.id ASCOFFSET 0\nLIMIT 1;                                             QUERY PLAN                                              \n----------------------------------------------------------------------------------------------------- Limit  (cost=0.00..3588.42 rows=1 width=8)\n   ->  Nested Loop  (cost=0.00..28524312.40 rows=7949 width=8)         ->  Index Scan using _comment_pkey on _comment  (cost=0.00..28448324.73 rows=7949 width=16)\n               Filter: (parent_id = 17355952::bigint)         ->  Index Scan using _article_pkey on _article  (cost=0.00..9.55 rows=1 width=8)\n               Index Cond: (_article.id = 17355952::bigint)(6 rows)\nThe second query scans the whole comment table which is very dangerous for production servers. So did I do something wrong ? Is there a way to handle this issue smoothly ?Thanks in advance\nYannick", "msg_date": "Tue, 20 Jan 2009 16:45:45 +0100", "msg_from": "\"=?ISO-8859-1?Q?Yannick_Le_Gu=E9dart?=\" <[email protected]>", "msg_from_op": true, "msg_subject": "Interesting query plan change linked to the LIMIT parameter" }, { "msg_contents": "On Tue, Jan 20, 2009 at 10:45 AM, Yannick Le Guédart\n<[email protected]> wrote:\n\n>\n> The second query scans the whole comment table which is very dangerous for\n> production servers.\n\nThat's not quite true. The second does an index scan- the planner\nseems to be guessing that it'll fulfill the required limit early in\nthe index scan; only with a pathologically bad case would it actually\nhave to scan the entire thing. Basically, the second query is\noptimized to spit out the first few rows quickly, since that's all you\nasked for with the limit.\n\nNote that your first query has a final cost estimate of \"Limit\n(cost=10261.19..10263.69 rows=1000 width=8)\", indicating an estimated\n10261.19 to emit the first row; the second has \"Limit\n(cost=0.00..3588.42 rows=1 width=8)\" estimating 0.00 (basically,\ninstant) to emit the first - and only desired - row.\n\nThat all said, an explain analyze would give us a better idea of\nwhat's going on- we can't tell if the planner is making bad estimates\nwithout the knowledge of what the real timing and row count results of\nplan stages were.\n\n\n-- \n- David T. Wilson\[email protected]\n", "msg_date": "Tue, 20 Jan 2009 11:28:33 -0500", "msg_from": "\"David Wilson\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Interesting query plan change linked to the LIMIT parameter" }, { "msg_contents": "Thanks for the rapid response.\n\nI can understand the way the planner makes its guess, but as a matter of\nfact, he'll be nearly always wrong, just becausethe most commented articles\nhave only around 5000 or so comments. I ran the explain analyze tonight\nand got this results :\n\nEXPLAIN ANALYZE SELECT _comment.id,\n> (get_comment_response(_comment.id)).id AS r_id\n> FROM _comment\n> INNER JOIN _article\n> ON _article.id = _comment.parent_id\n> WHERE _comment.parent_id = '17355952'\n> ORDER BY _comment.id ASC\n> OFFSET 0\n> LIMIT 1;\n>\n> QUERY PLAN\n>\n> ---------------------------------------------------------------------------------------------------------------------------------------------------------\n> Limit (cost=0.00..3588.42 rows=1 width=8) (actual\n> time=498597.115..498597.116 rows=1 loops=1)\n> -> Nested Loop (cost=0.00..28524312.40 rows=7949 width=8) (actual\n> time=498597.114..498597.114 rows=1 loops=1)\n> -> Index Scan using _comment_pkey on _comment\n> (cost=0.00..28448324.73 rows=7949 width=16) (actual\n> time=498473.360..498473.360 rows=1 loops=1)\n> Filter: (parent_id = 17355952::bigint)\n> -> Index Scan using _article_pkey on _article (cost=0.00..9.55\n> rows=1 width=8) (actual time=63.465..63.465 rows=1 loops=1)\n> Index Cond: (_article.id = 17355952::bigint)\n> Total runtime: 498615.230 ms\n> (7 rows)\n>\n> EXPLAIN ANALYZE SELECT _comment.id,\n> (get_comment_response(_comment.id)).id AS r_id\n> FROM _comment\n> INNER JOIN _article\n> ON _article.id = _comment.parent_id\n> WHERE _comment.parent_id = '17355952'\n> ORDER BY _comment.id ASC\n> OFFSET 0\n> LIMIT 1000;\n>\n> QUERY PLAN\n>\n> -------------------------------------------------------------------------------------------------------------------------------------------------------------\n> Limit (cost=10261.19..10263.69 rows=1000 width=8) (actual\n> time=127.037..127.267 rows=1000 loops=1)\n> -> Sort (cost=10261.19..10281.06 rows=7949 width=8) (actual\n> time=127.036..127.128 rows=1000 loops=1)\n> Sort Key: _comment.id\n> Sort Method: top-N heapsort Memory: 95kB\n> -> Nested Loop (cost=0.00..9825.35 rows=7949 width=8) (actual\n> time=0.472..122.986 rows=4674 loops=1)\n> -> Index Scan using _article_pkey on _article\n> (cost=0.00..9.55 rows=1 width=8) (actual time=0.011..0.013 rows=1 loops=1)\n> Index Cond: (id = 17355952::bigint)\n> -> Index Scan using idx_comment_parent_id on _comment\n> (cost=0.00..9716.44 rows=7949 width=16) (actual time=0.235..32.869 rows=4674\n> loops=1)\n> Index Cond: (_comment.parent_id = 17355952::bigint)\n> Total runtime: 127.410 ms\n> (10 rows)\n>\n\nAs you can see, the time is dramaticaly longuer with the LIMIT 1 (or in our\ncase, LIMIT 2).\n\nYannick.\n\n2009/1/20 David Wilson <[email protected]>\n\n> On Tue, Jan 20, 2009 at 10:45 AM, Yannick Le Guédart\n> <[email protected]> wrote:\n>\n> >\n> > The second query scans the whole comment table which is very dangerous\n> for\n> > production servers.\n>\n> That's not quite true. The second does an index scan- the planner\n> seems to be guessing that it'll fulfill the required limit early in\n> the index scan; only with a pathologically bad case would it actually\n> have to scan the entire thing. Basically, the second query is\n> optimized to spit out the first few rows quickly, since that's all you\n> asked for with the limit.\n>\n> Note that your first query has a final cost estimate of \"Limit\n> (cost=10261.19..10263.69 rows=1000 width=8)\", indicating an estimated\n> 10261.19 to emit the first row; the second has \"Limit\n> (cost=0.00..3588.42 rows=1 width=8)\" estimating 0.00 (basically,\n> instant) to emit the first - and only desired - row.\n>\n> That all said, an explain analyze would give us a better idea of\n> what's going on- we can't tell if the planner is making bad estimates\n> without the knowledge of what the real timing and row count results of\n> plan stages were.\n>\n>\n> --\n> - David T. Wilson\n> [email protected]\n>\n\nThanks for the rapid response.I can understand the way the planner makes its guess, but as a matter of fact, he'll be nearly always wrong, just becausethe most commented articles have only around 5000 or so comments.  I ran the explain  analyze tonight and got this results :\nEXPLAIN ANALYZE SELECT _comment.id,\n        (get_comment_response(_comment.id)).id AS r_idFROM   _comment\nINNER JOIN _article        ON _article.id = _comment.parent_id\nWHERE  _comment.parent_id = '17355952'ORDER BY _comment.id ASC\nOFFSET 0LIMIT 1;\n                                                                       QUERY PLAN---------------------------------------------------------------------------------------------------------------------------------------------------------\n Limit  (cost=0.00..3588.42 rows=1 width=8) (actual time=498597.115..498597.116 rows=1 loops=1)   ->  Nested Loop  (cost=0.00..28524312.40 rows=7949 width=8) (actual time=498597.114..498597.114 rows=1 loops=1)\n         ->  Index Scan using _comment_pkey on _comment  (cost=0.00..28448324.73 rows=7949 width=16) (actual time=498473.360..498473.360 rows=1 loops=1)\n               Filter: (parent_id = 17355952::bigint)         ->  Index Scan using _article_pkey on _article  (cost=0.00..9.55 rows=1 width=8) (actual time=63.465..63.465 rows=1 loops=1)\n               Index Cond: (_article.id = 17355952::bigint) Total runtime: 498615.230 ms\n(7 rows)EXPLAIN ANALYZE SELECT _comment.id,\n        (get_comment_response(_comment.id)).id AS r_idFROM   _comment\nINNER JOIN _article        ON _article.id = _comment.parent_id\nWHERE  _comment.parent_id = '17355952'ORDER BY _comment.id ASC\nOFFSET 0LIMIT 1000;\n                                                                         QUERY PLAN-------------------------------------------------------------------------------------------------------------------------------------------------------------\n Limit  (cost=10261.19..10263.69 rows=1000 width=8) (actual time=127.037..127.267 rows=1000 loops=1)   ->  Sort  (cost=10261.19..10281.06 rows=7949 width=8) (actual time=127.036..127.128 rows=1000 loops=1)\n         Sort Key: _comment.id         Sort Method:  top-N heapsort  Memory: 95kB\n         ->  Nested Loop  (cost=0.00..9825.35 rows=7949 width=8) (actual time=0.472..122.986 rows=4674 loops=1)               ->  Index Scan using _article_pkey on _article  (cost=0.00..9.55 rows=1 width=8) (actual time=0.011..0.013 rows=1 loops=1)\n                     Index Cond: (id = 17355952::bigint)               ->  Index Scan using idx_comment_parent_id on _comment  (cost=0.00..9716.44 rows=7949 width=16) (actual time=0.235..32.869 rows=4674 loops=1)\n                     Index Cond: (_comment.parent_id = 17355952::bigint) Total runtime: 127.410 ms\n(10 rows)As you can see, the time is dramaticaly longuer with the LIMIT 1 (or in our case, LIMIT 2).Yannick. \n2009/1/20 David Wilson <[email protected]>\nOn Tue, Jan 20, 2009 at 10:45 AM, Yannick Le Guédart\n<[email protected]> wrote:\n\n>\n> The second query scans the whole comment table which is very dangerous for\n> production servers.\n\nThat's not quite true. The second does an index scan- the planner\nseems to be guessing that it'll fulfill the required limit early in\nthe index scan; only with a pathologically bad case would it actually\nhave to scan the entire thing. Basically, the second query is\noptimized to spit out the first few rows quickly, since that's all you\nasked for with the limit.\n\nNote that your first query has a final cost estimate of \"Limit\n(cost=10261.19..10263.69 rows=1000 width=8)\", indicating an estimated\n10261.19 to emit the first row; the second has \"Limit\n(cost=0.00..3588.42 rows=1 width=8)\" estimating 0.00 (basically,\ninstant) to emit the first - and only desired - row.\n\nThat all said, an explain analyze would give us a better idea of\nwhat's going on- we can't tell if the planner is making bad estimates\nwithout the knowledge of what the real timing and row count results of\nplan stages were.\n\n\n--\n- David T. Wilson\[email protected]", "msg_date": "Wed, 21 Jan 2009 08:37:50 +0100", "msg_from": "=?ISO-8859-1?Q?Yannick_Le_Gu=E9dart?= <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Interesting query plan change linked to the LIMIT\n\tparameter" } ]
[ { "msg_contents": "Apologies if this is a FAQ, but...\n\nGiven linux's (mis)accounting/reporting of per-process memory, including\nshared memory (see for example this page: \nhttp://lwn.net/Articles/230975/) how does postgresql interpret and use\nthe information that's provided? Does it use the information as-is? \nDoes it just accept the configuration parameters provided (e.g. --\nshared_buffers, effective_cache_size, etc.)? Or does it combine its\ninternal knowledge of what it's sharing and adjust what linux reports\naccordingly?\n\nI'm aware that there are lots of userspace tools for accessing what the\nkernel reports, but I think its reporting is still problematic,\nespecially for apps that use shared memory. (User space tools like sar,\nps, top, pmap, free, vmstat, iostat, slabinfo, et al., as well as just\nlooking at /proc fds -- /proc/meminfo, etc.)\n\n\n", "msg_date": "Wed, 21 Jan 2009 14:01:41 -0800", "msg_from": "Dave Youatt <[email protected]>", "msg_from_op": true, "msg_subject": "linux, memory (mis)accounting/reporting, and the planner/optimizer" }, { "msg_contents": "On Wed, 21 Jan 2009, Dave Youatt wrote:\n\n> Does it just accept the configuration parameters provided (e.g. --\n> shared_buffers, effective_cache_size, etc.)?\n\nThat's it. The only time PostgreSQL gets a report from the OS related to \nmemory is if it makes an allocation attempt that fails. Couldn't care \nless what Linux thinks the rest of the time--unless the OOM killer goes on \na rampage, counts shared memory badly, and decides to kill a database \nprocess that is.\n\n--\n* Greg Smith [email protected] http://www.gregsmith.com Baltimore, MD\n", "msg_date": "Wed, 21 Jan 2009 19:02:43 -0500 (EST)", "msg_from": "Greg Smith <[email protected]>", "msg_from_op": false, "msg_subject": "Re: linux, memory (mis)accounting/reporting,\n and the planner/optimizer" }, { "msg_contents": "Greg Smith wrote:\n> On Wed, 21 Jan 2009, Dave Youatt wrote:\n> \n>> Does it just accept the configuration parameters provided (e.g. --\n>> shared_buffers, effective_cache_size, etc.)?\n> \n> That's it. The only time PostgreSQL gets a report from the OS related\n> to memory is if it makes an allocation attempt that fails. Couldn't\n> care less what Linux thinks the rest of the time--unless the OOM killer\n> goes on a rampage, counts shared memory badly, and decides to kill a\n> database process that is.\n> \n> -- \n> * Greg Smith [email protected] http://www.gregsmith.com Baltimore, MD\n> \n\nThe shared memory accounting in Linux got better in the 2.6.25 kernel,\nalthough I'm not sure the user space tools are fully deployed even today\nto track it. And of course, lots of servers still use kernels older than\n2.6.25.\n\nRe the OOM killer -- maybe a patch to the kernel could make things\n\"better\"??\n\n-- \nM. Edward (Ed) Borasky\n\nI've never met a happy clam. In fact, most of them were pretty steamed.\n", "msg_date": "Wed, 21 Jan 2009 22:23:45 -0800", "msg_from": "\"M. Edward (Ed) Borasky\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: linux, memory (mis)accounting/reporting,\n and the planner/optimizer" }, { "msg_contents": "On Wed, 21 Jan 2009, M. Edward (Ed) Borasky wrote:\n\n> Re the OOM killer -- maybe a patch to the kernel could make things\n> \"better\"??\n\nPeople have tried to raise awareness of it; sample:\n\nhttp://lkml.org/lkml/2007/2/9/275\n\nwithout much success. The Linux kernel hackers dislike the whole approach \nPostgreSQL uses to allocate shared memory anyway--witness the backlash \nagainst any attempt to raise SHMMAX.\n\nI found the long thread that beats this issue to death in the archives \nagain:\n\nhttp://archives.postgresql.org/pgsql-hackers/2008-02/msg00026.php\n\nThat discussion should get raised to a higher profile eventually, maybe a \nsummary on the wiki.\n\n--\n* Greg Smith [email protected] http://www.gregsmith.com Baltimore, MD\n", "msg_date": "Thu, 22 Jan 2009 11:59:48 -0500 (EST)", "msg_from": "Greg Smith <[email protected]>", "msg_from_op": false, "msg_subject": "Re: linux, memory (mis)accounting/reporting,\n and the planner/optimizer" }, { "msg_contents": "Greg Smith wrote:\n> On Wed, 21 Jan 2009, M. Edward (Ed) Borasky wrote:\n> \n>> Re the OOM killer -- maybe a patch to the kernel could make things\n>> \"better\"??\n> \n> People have tried to raise awareness of it; sample:\n> \n> http://lkml.org/lkml/2007/2/9/275\n> \n> without much success. The Linux kernel hackers dislike the whole\n> approach PostgreSQL uses to allocate shared memory anyway--witness the\n> backlash against any attempt to raise SHMMAX.\n> \n> I found the long thread that beats this issue to death in the archives\n> again:\n> \n> http://archives.postgresql.org/pgsql-hackers/2008-02/msg00026.php\n> \n> That discussion should get raised to a higher profile eventually, maybe\n> a summary on the wiki.\n> \n> -- \n> * Greg Smith [email protected] http://www.gregsmith.com Baltimore, MD\n> \nYes, please collect as much detail as you can in some centralized place.\nFor recent kernels (2.6.25+) the memory accounting is much better, and\nif nothing else, there might be some things PostgreSQL could do to\nminimize the probability of getting hit, at the cost of some\nplatform-dependent (/proc reading) code. The problem is that\n\"enterprise\" Linux distros aren't running 2.6.25+ yet. :(\n\n-- \nM. Edward (Ed) Borasky\n\nI've never met a happy clam. In fact, most of them were pretty steamed.\n", "msg_date": "Thu, 22 Jan 2009 10:39:08 -0800", "msg_from": "\"M. Edward (Ed) Borasky\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: linux, memory (mis)accounting/reporting,\n and the planner/optimizer" } ]
[ { "msg_contents": "\nHi I am developing a database and have a couple of questions I havent \nfound an answer to yet.\n\n1) how do I find the size of an index, i.e. the size on disk?\n\n2) I have a query that is taking a long time to complete because the \ntable is about 120GB large. Its only returning 2000 rows, so in \nprinciple it should be fast. But because the data is spread across the \ntable, I am assuming it needs to do a lot of disk access to fetch the \nappropriate pages. Since the amount of data is so large I am also \nassuming that whenever I do a query all memory caches have to be \nreplaced to make room for the new pages. What I am wondering is which \nconfig settings can I use to increase the amount of memory postgres \nkeeps to cache pages and indexes?\n\nI tried work_mem and maintenance_work_mem but it does not seem to make \nmuch difference yet. Admittedly I had set it to 100M and 80M, so after \nreading a little bit more I have found that I could easily set it to \nseveral GBs. But I am not sure those are the correct config parameters \nto use for this. I havent found any others that are relevant so far.\n\nregards\n\nthomas\n", "msg_date": "Thu, 22 Jan 2009 07:23:38 +0100", "msg_from": "Thomas Finneid <[email protected]>", "msg_from_op": true, "msg_subject": "caching indexes and pages?" }, { "msg_contents": "In response to Thomas Finneid :\n> \n> Hi I am developing a database and have a couple of questions I havent \n> found an answer to yet.\n> \n> 1) how do I find the size of an index, i.e. the size on disk?\n\nhttp://www.postgresql.org/docs/8.3/interactive/functions-admin.html\nhttp://andreas.scherbaum.la/blog/archives/282-table-size,-database-size.html\n\n\n> \n> 2) I have a query that is taking a long time to complete because the \n\nHow long is a long time? *g*\n\n\n> table is about 120GB large. Its only returning 2000 rows, so in \n> principle it should be fast. But because the data is spread across the \n> table, I am assuming it needs to do a lot of disk access to fetch the \n> appropriate pages. Since the amount of data is so large I am also \n\nPlease execute your query with an additionally 'explain analyse select\n...' and look at the output. Maybe there are no propper index for your\nselect.\n\n\nRegards, Andreas\n-- \nAndreas Kretschmer\nKontakt: Heynitz: 035242/47150, D1: 0160/7141639 (mehr: -> Header)\nGnuPG-ID: 0x3FFF606C, privat 0x7F4584DA http://wwwkeys.de.pgp.net\n", "msg_date": "Thu, 22 Jan 2009 08:01:36 +0100", "msg_from": "\"A. Kretschmer\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: caching indexes and pages?" }, { "msg_contents": "Hi,\n\nThomas Finneid schrieb:\n>\n> Hi I am developing a database and have a couple of questions I havent \n> found an answer to yet.\n>\n> 1) how do I find the size of an index, i.e. the size on disk?\ni use this query:\nselect\n t.spcname as \"tablespace\"\n , pg_get_userbyid(c.relowner) as \"owner\"\n , n.nspname as \"schema\"\n , relname::text as \"name\"\n , pg_size_pretty(pg_total_relation_size(c.oid))::text as \"total size\"\n , case\n when c.relkind='i' then 'index'\n when c.relkind='t' then 'toast'\n when c.relkind='r' then 'table'\n when c.relkind='v' then 'view'\n when c.relkind='c' then 'composite type'\n when c.relkind='S' then 'sequence'\n else c.relkind::text\n end as \"type\"\nfrom\n pg_class c\n left join pg_namespace n on n.oid = c.relnamespace\n left join pg_tablespace t on t.oid = c.reltablespace\nwhere\n (pg_total_relation_size(c.oid)>>20)>0 and c.relkind!='t'\norder by\n c.relkind desc, pg_total_relation_size(c.oid) desc\n>\n> 2) I have a query that is taking a long time to complete because the \n> table is about 120GB large. Its only returning 2000 rows, so in \n> principle it should be fast. But because the data is spread across the \n> table, I am assuming it needs to do a lot of disk access to fetch the \n> appropriate pages. Since the amount of data is so large I am also \n> assuming that whenever I do a query all memory caches have to be \n> replaced to make room for the new pages. What I am wondering is which \n> config settings can I use to increase the amount of memory postgres \n> keeps to cache pages and indexes?\ntry to reorganize your data with CLUSTER and create appropriate indixes \n(dont forget to check statistics). there are several threads about \nmemory configuration. look for shared_buffers\n>\n> I tried work_mem and maintenance_work_mem but it does not seem to make \n> much difference yet. Admittedly I had set it to 100M and 80M, so after \n> reading a little bit more I have found that I could easily set it to \n> several GBs. But I am not sure those are the correct config parameters \n> to use for this. I havent found any others that are relevant so far.\n>\n> regards\n>\n> thomas\nregards\nthomas", "msg_date": "Thu, 22 Jan 2009 08:06:12 +0100", "msg_from": "Thomas Markus <[email protected]>", "msg_from_op": false, "msg_subject": "Re: caching indexes and pages?" }, { "msg_contents": "> I tried work_mem and maintenance_work_mem but it does not seem to make much\n> difference yet. Admittedly I had set it to 100M and 80M, so after reading a\n> little bit more I have found that I could easily set it to several GBs. But\n> I am not sure those are the correct config parameters to use for this. I\n> havent found any others that are relevant so far.\n\nYou probably want to increase shared_buffers by quite a large amount\nand maybe make work_mem not quite so big.\n\nIf you have 2GB of memory you might set shared_buffers to 1GB,\nwork_mem 16MB, maintenance_work_mem 64MB?\n\n...Robert\n", "msg_date": "Thu, 22 Jan 2009 10:34:29 -0500", "msg_from": "Robert Haas <[email protected]>", "msg_from_op": false, "msg_subject": "Re: caching indexes and pages?" }, { "msg_contents": "Thomas Markus wrote:\n\n> try to reorganize your data with CLUSTER and create appropriate indixes \n> (dont forget to check statistics).\n\nOne question. Assume I have clustered and new data has been added after \nthat, according to the docs that data is added \"outside\" of the \nclustered data. What happens when I run cluster again? I would assume \nits smart and to only clusteres the new data, i.e. adding it to the \nalready created clusters, as apporpriate, so the execution time would be \na lot lower, right? or would it run through and recluster everything \nfrom scratch again?\n\nthomas\n\n", "msg_date": "Thu, 22 Jan 2009 22:58:25 +0100", "msg_from": "Thomas Finneid <[email protected]>", "msg_from_op": true, "msg_subject": "Re: caching indexes and pages?" }, { "msg_contents": "On Thu, Jan 22, 2009 at 10:58:25PM +0100, Thomas Finneid wrote:\n> Thomas Markus wrote:\n>\n>> try to reorganize your data with CLUSTER and create appropriate indixes \n>> (dont forget to check statistics).\n>\n> One question. Assume I have clustered and new data has been added after \n> that, according to the docs that data is added \"outside\" of the clustered \n> data. What happens when I run cluster again? I would assume its smart and \n> to only clusteres the new data, i.e. adding it to the already created \n> clusters, as apporpriate, so the execution time would be a lot lower, \n> right? or would it run through and recluster everything from scratch again?\n>\n> thomas\n>\nIt reclusters again from scratch. You do get better performance on the\nreads from the data that is already clustered.\n\nCheers,\nKen\n", "msg_date": "Thu, 22 Jan 2009 16:03:33 -0600", "msg_from": "Kenneth Marshall <[email protected]>", "msg_from_op": false, "msg_subject": "Re: caching indexes and pages?" }, { "msg_contents": "Thomas Finneid wrote:\n> Thomas Markus wrote:\n> \n>> try to reorganize your data with CLUSTER and create appropriate\n>> indixes (dont forget to check statistics).\n> \n> One question. Assume I have clustered and new data has been added after\n> that, according to the docs that data is added \"outside\" of the\n> clustered data.\n\nCheck into FILLFACTOR (on both tables and indexes).\n\n--\nCraig Ringer\n", "msg_date": "Fri, 23 Jan 2009 14:52:05 +0900", "msg_from": "Craig Ringer <[email protected]>", "msg_from_op": false, "msg_subject": "Re: caching indexes and pages?" } ]
[ { "msg_contents": "Hi\n\nA quick question, when pg receives data to be written to a table, does \nit cache that data in memory in case a subsequent request/query would \nneed it?\n\nAs I understand it, data is stored in pages and those pages have to be \nretrieved in order to write or read data from them. So my assumption is \nthat a page used to write data would not be replaced until memory is low \nand different pages needs to be retrieved. Is this approximately correct?\n\nThomas\n", "msg_date": "Thu, 22 Jan 2009 12:12:15 +0100", "msg_from": "Thomas Finneid <[email protected]>", "msg_from_op": true, "msg_subject": "caching written values?" }, { "msg_contents": "On Thu, Jan 22, 2009 at 4:42 PM, Thomas Finneid <[email protected]> wrote:\n\n>\n> As I understand it, data is stored in pages and those pages have to be\n> retrieved in order to write or read data from them. So my assumption is that\n> a page used to write data would not be replaced until memory is low and\n> different pages needs to be retrieved. Is this approximately correct?\n>\n\nYes. That's how it works.\n\nThanks,\nPavan\n\n-- \nPavan Deolasee\nEnterpriseDB http://www.enterprisedb.com\n", "msg_date": "Thu, 22 Jan 2009 16:50:21 +0530", "msg_from": "Pavan Deolasee <[email protected]>", "msg_from_op": false, "msg_subject": "Re: caching written values?" }, { "msg_contents": "> \n> A quick question, when pg receives data to be written to a\n> table, does it cache that data in memory in case a\n> subsequent request/query would need it?\n> \n\nAfaik all pages are modified in memory, so the modified data would still be cached.\n\n\n \n", "msg_date": "Thu, 22 Jan 2009 12:06:58 +0000 (GMT)", "msg_from": "Glyn Astill <[email protected]>", "msg_from_op": false, "msg_subject": "Re: caching written values?" }, { "msg_contents": "Pavan Deolasee wrote:\n> On Thu, Jan 22, 2009 at 4:42 PM, Thomas Finneid <[email protected]> wrote:\n> \n>> As I understand it, data is stored in pages and those pages have to be\n>> retrieved in order to write or read data from them. So my assumption is that\n>> a page used to write data would not be replaced until memory is low and\n>> different pages needs to be retrieved. Is this approximately correct?\n>>\n> \n> Yes. That's how it works.\n\nIs there any possibilites of telling pg to save to disk that memory \ncached data and state when the server is shutdown, so that when the \nserver starts up again, itreads it back into the memory?\n\nregards\n\nthomas\n", "msg_date": "Thu, 22 Jan 2009 13:11:23 +0100", "msg_from": "Thomas Finneid <[email protected]>", "msg_from_op": true, "msg_subject": "Re: caching written values?" }, { "msg_contents": "(Sorry, did not include the list in the reply)\n\nPavan Deolasee wrote:\n\n> Yes. That's how it works.\n\nIs that how it works for an index as well? I just found out that I have \n an index that is 35GB, and the table is 85GB. ( I will look into the \nindex, it works fine, but an index that is almost one third of the size \nof the table, seems a little bit strange. )\nSo if it works the same way and the index uses a B-tree, I assume it \nonly loads the pages that contains the subpart of the index that are \nrelevant, is this correct?\n\nthomas\n", "msg_date": "Thu, 22 Jan 2009 13:35:31 +0100", "msg_from": "Thomas Finneid <[email protected]>", "msg_from_op": true, "msg_subject": "Re: caching written values?" }, { "msg_contents": "> Is that how it works for an index as well? I just found out that I have an\n> index that is 35GB, and the table is 85GB. ( I will look into the index, it\n> works fine, but an index that is almost one third of the size of the table,\n> seems a little bit strange. )\n> So if it works the same way and the index uses a B-tree, I assume it only\n> loads the pages that contains the subpart of the index that are relevant, is\n> this correct?\n\nYes.\n\nSee shared_buffers:\n\nhttp://www.postgresql.org/docs/8.3/static/runtime-config-resource.html\n\n...Robert\n", "msg_date": "Thu, 22 Jan 2009 10:36:37 -0500", "msg_from": "Robert Haas <[email protected]>", "msg_from_op": false, "msg_subject": "Re: caching written values?" }, { "msg_contents": "\nOn Thu, 2009-01-22 at 13:11 +0100, Thomas Finneid wrote:\n\n> Is there any possibilites of telling pg to save to disk that memory \n> cached data and state when the server is shutdown, so that when the \n> server starts up again, itreads it back into the memory?\n\nIt's possible, but not by any directly supported mechanism.\n\nYou have to consider whether the data you saved would still be required\nwhen the server restarts.\n\n-- \n Simon Riggs www.2ndQuadrant.com\n PostgreSQL Training, Services and Support\n\n", "msg_date": "Thu, 22 Jan 2009 18:05:45 +0000", "msg_from": "Simon Riggs <[email protected]>", "msg_from_op": false, "msg_subject": "Re: caching written values?" } ]
[ { "msg_contents": "Hello !\n\nI'm having a problem with a query that takes more or less 3.2 seconds to \nbe executed.\n\nThis query uses a view which encapsulates some calculations (in order to \navoid duplicating theses calculations at several places in the project).\n\nIn order to keep that post readable, I've put the view details on that \npage : <http://pastebin.com/m1523940>\n\nHere's the EXPLAIN ANALYZE of a query using that view : \n<http://pastebin.com/m1b94cd9b>\n\nAs I read the query plan, the HashAggregate takes near than 3 seconds, \nwhich represent 90% of the query duration time.\n\nHow can I see which part of the query causes the HashAggregate to be so \nslow ?\n\nHow can I optimize that view to reduce the execution duration time ?\n\nTo be accurate, I'm working on PostgreSQL 8.3.5.\n\nMany thanks in advance for any tips about that ! :-)\n\nBest Regards,\n\n-- \nBruno Baguette - [email protected]\n", "msg_date": "Thu, 22 Jan 2009 16:36:31 +0100", "msg_from": "Bruno Baguette <[email protected]>", "msg_from_op": true, "msg_subject": "Slow HashAggregate : How to optimize ?" }, { "msg_contents": "> I'm having a problem with a query that takes more or less 3.2 seconds to be\n> executed.\n>\n> This query uses a view which encapsulates some calculations (in order to\n> avoid duplicating theses calculations at several places in the project).\n>\n> In order to keep that post readable, I've put the view details on that page\n> : <http://pastebin.com/m1523940>\n>\n> Here's the EXPLAIN ANALYZE of a query using that view :\n> <http://pastebin.com/m1b94cd9b>\n>\n> As I read the query plan, the HashAggregate takes near than 3 seconds, which\n> represent 90% of the query duration time.\n\nWell, it IS aggregating almost 80,000 rows. That doesn't sound that\nbad to me. What kind of hardware are you running this on?\n\n...Robert\n", "msg_date": "Thu, 22 Jan 2009 11:27:31 -0500", "msg_from": "Robert Haas <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Slow HashAggregate : How to optimize ?" } ]
[ { "msg_contents": "Hi,\n\nI am running postgresql 8.3.5 on FreeBSD with Dual core Intel(R)\nXeon(R) CPU 3065 @ 2.33GHz, 2GB RAM and Seagate Technology -\nBarracuda 7200.10 SATA 3.0Gb/ (RAID 1).\n\nI made several benchmark test with pgbench, TPS rate is almost 40 +/- 5.\n$ pgbench -i pgbench -s 50 -U pgsql\n\n[pgsql@$ pgbench -c 200 -t 2 -U pgsql -d pgbench\n\ntransaction type: TPC-B (sort of)\nscaling factor: 10\nnumber of clients: 200\nnumber of transactions per client: 2\nnumber of transactions actually processed: 400/400\ntps = 39.044088 (including connections establishing)\ntps = 41.528795 (excluding connections establishing)\n\n[pgsql@$ pgbench -c 100 -t 5 -U pgsql -d pgbench\n\ntransaction type: TPC-B (sort of)\nscaling factor: 10\nnumber of clients: 100\nnumber of transactions per client: 5\nnumber of transactions actually processed: 500/500\ntps = 30.162271 (including connections establishing)\ntps = 30.643256 (excluding connections establishing)\n\nIs this rate is normal or not? What can I do to improve tps and insert\nperformance?\n\nHere is some changes made in postgresql.conf and sysctl.conf\n\n/etc/sysctl.conf\n#1024 MB shmmax\nkern.ipc.shmmax=1073741824\n#shmall = shmmax / 4096 (page size)\nkern.ipc.shmall=262144\nkern.ipc.semmsl=512\nkern.ipc.semmap=256\n\npostgresql.conf\n\nshared_buffers = 800MB # min 128kB or max_connections*16kB\nwork_mem = 2MB # min 64kB\nmaintenance_work_mem = 32MB # min 1MB\nmax_connections = 600 # (change requires restart)\nmax_fsm_relations = 2000 # min 100, ~70 bytes each\nsynchronous_commit = off\nwal_buffers = 1024kB # min 32kB\ncheckpoint_segments = 32 # in logfile segments, min 1, 16MB each\n#checkpoint_timeout = 5min # range 30s-1h\ncheckpoint_completion_target = 0.9 # checkpoint target duration, 0.0 - 1.0\n#checkpoint_warning = 30s # 0 is off\n", "msg_date": "Thu, 22 Jan 2009 17:47:39 +0200", "msg_from": "Ibrahim Harrani <[email protected]>", "msg_from_op": true, "msg_subject": "postgresql 8.3 tps rate" }, { "msg_contents": "> I am running postgresql 8.3.5 on FreeBSD with Dual core Intel(R)\n> Xeon(R) CPU 3065 @ 2.33GHz, 2GB RAM and Seagate Technology -\n> Barracuda 7200.10 SATA 3.0Gb/ (RAID 1).\n>\n> I made several benchmark test with pgbench, TPS rate is almost 40 +/- 5.\n> $ pgbench -i pgbench -s 50 -U pgsql\n>\n> [pgsql@$ pgbench -c 200 -t 2 -U pgsql -d pgbench\n>\n> transaction type: TPC-B (sort of)\n> scaling factor: 10\n> number of clients: 200\n> number of transactions per client: 2\n> number of transactions actually processed: 400/400\n> tps = 39.044088 (including connections establishing)\n> tps = 41.528795 (excluding connections establishing)\n>\n> [pgsql@$ pgbench -c 100 -t 5 -U pgsql -d pgbench\n>\n> transaction type: TPC-B (sort of)\n> scaling factor: 10\n> number of clients: 100\n> number of transactions per client: 5\n> number of transactions actually processed: 500/500\n> tps = 30.162271 (including connections establishing)\n> tps = 30.643256 (excluding connections establishing)\n>\n> Is this rate is normal or not? What can I do to improve tps and insert\n> performance?\n\nYou add more and faster disks.\n\n-- \nregards\nClaus\n\nWhen lenity and cruelty play for a kingdom,\nthe gentler gamester is the soonest winner.\n\nShakespeare\n", "msg_date": "Thu, 22 Jan 2009 17:36:36 +0100", "msg_from": "Claus Guttesen <[email protected]>", "msg_from_op": false, "msg_subject": "Re: postgresql 8.3 tps rate" }, { "msg_contents": "On Thu, 2009-01-22 at 17:47 +0200, Ibrahim Harrani wrote:\n> Hi,\n> \n> I am running postgresql 8.3.5 on FreeBSD with Dual core Intel(R)\n> Xeon(R) CPU 3065 @ 2.33GHz, 2GB RAM and Seagate Technology -\n> Barracuda 7200.10 SATA 3.0Gb/ (RAID 1).\n> \n> I made several benchmark test with pgbench, TPS rate is almost 40 +/- 5.\n> $ pgbench -i pgbench -s 50 -U pgsql\n> \n> [pgsql@$ pgbench -c 200 -t 2 -U pgsql -d pgbench\n> \n> transaction type: TPC-B (sort of)\n> scaling factor: 10\n> number of clients: 200\n> number of transactions per client: 2\n> number of transactions actually processed: 400/400\n> tps = 39.044088 (including connections establishing)\n> tps = 41.528795 (excluding connections establishing)\n> \n> [pgsql@$ pgbench -c 100 -t 5 -U pgsql -d pgbench\n> \n> transaction type: TPC-B (sort of)\n> scaling factor: 10\n> number of clients: 100\n> number of transactions per client: 5\n> number of transactions actually processed: 500/500\n> tps = 30.162271 (including connections establishing)\n> tps = 30.643256 (excluding connections establishing)\n> \n> Is this rate is normal or not? What can I do to improve tps and insert\n> performance?\n\nRun a real benchmark. Running 400/500 transactions doesn't give you any\nreal indication of what is going on. Run 50000 or so and see how it\nlooks.\n\nJoshua D. Drake\n\n-- \nPostgreSQL - XMPP: [email protected]\n Consulting, Development, Support, Training\n 503-667-4564 - http://www.commandprompt.com/\n The PostgreSQL Company, serving since 1997\n\n", "msg_date": "Thu, 22 Jan 2009 08:37:47 -0800", "msg_from": "\"Joshua D. Drake\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: postgresql 8.3 tps rate" }, { "msg_contents": "On 1/22/09, Ibrahim Harrani <[email protected]> wrote:\n>\n> Is this rate is normal or not? What can I do to improve tps and insert\n> performance?\n>\n> postgresql.conf\n>\n> shared_buffers = 800MB # min 128kB or max_connections*16kB\n> work_mem = 2MB # min 64kB\n> maintenance_work_mem = 32MB # min 1MB\n\nI would raise maintenance_work_mem, although it's unrelated to your issue.\n\n> max_connections = 600 # (change requires restart)\n> max_fsm_relations = 2000 # min 100, ~70 bytes each\n> synchronous_commit = off\n\nSomething is very wrong. 40tps is low, even for sata raid 1, but\nabsolutely terrible with synchronous_commit = off. This suggests you\nare problems are read related and not sync related. Can you rerun\npgbench using the custom sql feature, passing insert statements?\n\nAre you sure nothing else is going on/wrong with the box? Can we see a\nbonnie++ run?\n\nmerlin\n", "msg_date": "Thu, 22 Jan 2009 12:14:17 -0500", "msg_from": "Merlin Moncure <[email protected]>", "msg_from_op": false, "msg_subject": "Re: postgresql 8.3 tps rate" }, { "msg_contents": "Ibrahim Harrani escribi�:\n\n> I made several benchmark test with pgbench, TPS rate is almost 40 +/- 5.\n> $ pgbench -i pgbench -s 50 -U pgsql\n> \n> [pgsql@$ pgbench -c 200 -t 2 -U pgsql -d pgbench\n\nTry with 1000 transactions per client or more, instead of 2.\n\nAlso, I think you should set the \"scale\" in the prepare step (-i) at\nleast as high as the number of clients you're going to use. (I dimly\nrecall some recent development in this area that might mean I'm wrong.)\n\n-- \nAlvaro Herrera http://www.CommandPrompt.com/\nThe PostgreSQL Company - Command Prompt, Inc.\n", "msg_date": "Thu, 22 Jan 2009 16:00:12 -0300", "msg_from": "Alvaro Herrera <[email protected]>", "msg_from_op": false, "msg_subject": "Re: postgresql 8.3 tps rate" }, { "msg_contents": "Hi Merlin,\n\nHere is the bonnie++ and new pgbench result with high transaction numbers.\n\n\n$ pgbench -i -s 30 -U pgsql pgbench\n$ pbench -c 100 -t 1000 -U pgsql -d pgbench\n\ntransaction type: TPC-B (sort of)\nscaling factor: 30\nnumber of clients: 100\nnumber of transactions per client: 1000\nnumber of transactions actually processed: 100000/100000\ntps = 45.145051 (including connections establishing)\ntps = 45.162367 (excluding connections establishing)\n\n$ bonnie++\nWriting a byte at a time...done\nWriting intelligently...done\nRewriting...done\nReading a byte at a time...done\nReading intelligently...done\nstart 'em...done...done...done...done...done...\nCreate files in sequential order...done.\nStat files in sequential order...done.\nDelete files in sequential order...done.\nCreate files in random order...done.\nStat files in random order...done.\nDelete files in random order...done.\n\nVersion 1.93d ------Sequential Output------ --Sequential Input- --Random-\nConcurrency 1 -Per Chr- --Block-- -Rewrite- -Per Chr- --Block-- --Seeks--\nMachine Size K/sec %CP K/sec %CP K/sec %CP K/sec %CP K/sec %CP /sec %CP\nmyserver 300M 391 97 9619 1 8537 2 673 99 +++++ +++ 1196 16\nLatency 211ms 388ms 325ms 27652us 722us 6720ms\nVersion 1.93d ------Sequential Create------ --------Random Create--------\nmyserver -Create-- --Read--- -Delete-- -Create-- --Read--- -Delete--\n files /sec %CP /sec %CP /sec %CP /sec %CP /sec %CP /sec %CP\n 16 9004 25 +++++ +++ +++++ +++ 8246 20 +++++ +++ +++++ +++\nLatency 592ms 208us 102us 673ms 179us 100us\n1.93c,1.93d,myserver,1,1232710758,300M,,391,97,9619,1,8537,2,673,99,+++++,+++,1196,16,16,,,,,9004,25,+++++,+++,+++++,+++,8246,20,+++++,+++,+++++,+++,211ms,388ms,325ms,27652us,722us,6720ms,592ms,208us,102us,673ms,179us,100us\n\nWhen I compare my bonnie++ result with the one at\nhttp://www.westnet.com/~gsmith/content/postgresql/pg-disktesting.htm,\nIt seems that there is something wrong with the disks!?\n\nOn Thu, Jan 22, 2009 at 7:14 PM, Merlin Moncure <[email protected]> wrote:\n> On 1/22/09, Ibrahim Harrani <[email protected]> wrote:\n>>\n>> Is this rate is normal or not? What can I do to improve tps and insert\n>> performance?\n>>\n>> postgresql.conf\n>>\n>> shared_buffers = 800MB # min 128kB or max_connections*16kB\n>> work_mem = 2MB # min 64kB\n>> maintenance_work_mem = 32MB # min 1MB\n>\n> I would raise maintenance_work_mem, although it's unrelated to your issue.\n>\n>> max_connections = 600 # (change requires restart)\n>> max_fsm_relations = 2000 # min 100, ~70 bytes each\n>> synchronous_commit = off\n>\n> Something is very wrong. 40tps is low, even for sata raid 1, but\n> absolutely terrible with synchronous_commit = off. This suggests you\n> are problems are read related and not sync related. Can you rerun\n> pgbench using the custom sql feature, passing insert statements?\n>\n> Are you sure nothing else is going on/wrong with the box? Can we see a\n> bonnie++ run?\n>\n> merlin\n>\n", "msg_date": "Thu, 22 Jan 2009 23:27:36 +0200", "msg_from": "Ibrahim Harrani <[email protected]>", "msg_from_op": true, "msg_subject": "Re: postgresql 8.3 tps rate" }, { "msg_contents": "This is the another bonnie++ test result with version 1.03\n\nDelete files in random order...done.\nVersion 1.03e ------Sequential Output------ --Sequential Input- --Random-\n -Per Chr- --Block-- -Rewrite- -Per Chr- --Block-- --Seeks--\nMachine Size K/sec %CP K/sec %CP K/sec %CP K/sec %CP K/sec %CP /sec %CP\nmyserver 300M 13150 7 12713 1 13067 4 72426 53 +++++ +++ +++++ +++\n ------Sequential Create------ --------Random Create--------\n -Create-- --Read--- -Delete-- -Create-- --Read--- -Delete--\n files /sec %CP /sec %CP /sec %CP /sec %CP /sec %CP /sec %CP\n 16 1048 2 +++++ +++ 2322 3 985 2 +++++ +++ 1797 3\nmyserver,300M,13150,7,12713,1,13067,4,72426,53,+++++,+++,+++++,+++,16,1048,2,+++++,+++,2322,3,985,2,+++++,+++,1797,3\n\nAlso I attached bon_csv2html output for both version of bonnie++\n\nThanks in advance.\n\n\nOn Thu, Jan 22, 2009 at 7:14 PM, Merlin Moncure <[email protected]> wrote:\n> On 1/22/09, Ibrahim Harrani <[email protected]> wrote:\n>>\n>> Is this rate is normal or not? What can I do to improve tps and insert\n>> performance?\n>>\n>> postgresql.conf\n>>\n>> shared_buffers = 800MB # min 128kB or max_connections*16kB\n>> work_mem = 2MB # min 64kB\n>> maintenance_work_mem = 32MB # min 1MB\n>\n> I would raise maintenance_work_mem, although it's unrelated to your issue.\n>\n>> max_connections = 600 # (change requires restart)\n>> max_fsm_relations = 2000 # min 100, ~70 bytes each\n>> synchronous_commit = off\n>\n> Something is very wrong. 40tps is low, even for sata raid 1, but\n> absolutely terrible with synchronous_commit = off. This suggests you\n> are problems are read related and not sync related. Can you rerun\n> pgbench using the custom sql feature, passing insert statements?\n>\n> Are you sure nothing else is going on/wrong with the box? Can we see a\n> bonnie++ run?\n>\n> merlin\n>", "msg_date": "Thu, 22 Jan 2009 23:36:11 +0200", "msg_from": "Ibrahim Harrani <[email protected]>", "msg_from_op": true, "msg_subject": "Re: postgresql 8.3 tps rate" }, { "msg_contents": "On Thu, Jan 22, 2009 at 1:27 PM, Ibrahim Harrani\n<[email protected]> wrote:\n> Version 1.93d ------Sequential Output------ --Sequential Input- --Random-\n> Concurrency 1 -Per Chr- --Block-- -Rewrite- -Per Chr- --Block-- --Seeks--\n> Machine Size K/sec %CP K/sec %CP K/sec %CP K/sec %CP K/sec %CP /sec %CP\n> myserver 300M 391 97 9619 1 8537 2 673 99 +++++ +++ 1196 16\n> Latency 211ms 388ms 325ms 27652us 722us 6720ms\n> Version 1.93d ------Sequential Create------ --------Random Create--------\n> myserver -Create-- --Read--- -Delete-- -Create-- --Read--- -Delete--\n> files /sec %CP /sec %CP /sec %CP /sec %CP /sec %CP /sec %CP\n> 16 9004 25 +++++ +++ +++++ +++ 8246 20 +++++ +++ +++++ +++\n> Latency 592ms 208us 102us 673ms 179us 100us\n\nYou should be testing bonnie with a file size that is at least double\nthe amount of memory in your machine - in this case, 4GB files, not\n300MB files.\n\n> When I compare my bonnie++ result with the one at\n> http://www.westnet.com/~gsmith/content/postgresql/pg-disktesting.htm,\n> It seems that there is something wrong with the disks!?\n\nYes, your machine appears to be very slow. You should be able to\nwrite in the order of 30-50MB/s+ and read in the order of 40-80MB/s+.\nRandom IO should be in the 200 tps range for a 7200rpm SATA RAID1.\n\n-Dave\n", "msg_date": "Thu, 22 Jan 2009 13:41:25 -0800", "msg_from": "David Rees <[email protected]>", "msg_from_op": false, "msg_subject": "Re: postgresql 8.3 tps rate" }, { "msg_contents": "Hi David,\n\n$ I run the test again with the following options. Also I added the\nhtml output of the result.\n\n$ bonnie++ -u pgsql -n 128 -r 2048 -s 4096 -x 1\nUsing uid:70, gid:70.\nWriting with putc()...done\nWriting intelligently...done\nRewriting...done\nReading with getc()...done\nReading intelligently...done\nstart 'em...done...done...done...\nCreate files in sequential order...done.\nStat files in sequential order...done.\nDelete files in sequential order...done.\nCreate files in random order...done.\nStat files in random order...done.\nDelete files in random order...done.\nmyserver,4G,8028,5,8118,1,5079,1,36055,28,32950,3,128.4,0,128,5620,11,142084,99,88739,99,12880,26,109150,99,90362,99\n\nWhat about this result?\nThis is a intel server with onboard raid. I will check raid\nconfiguration again tomorrow. Especially Write Cache and Read Ahead\nvalues mentioned at\nhttp://www.intel.com/support/motherboards/server/sb/CS-021019.htm\n\nOn Thu, Jan 22, 2009 at 11:41 PM, David Rees <[email protected]> wrote:\n> On Thu, Jan 22, 2009 at 1:27 PM, Ibrahim Harrani\n> <[email protected]> wrote:\n>> Version 1.93d ------Sequential Output------ --Sequential Input- --Random-\n>> Concurrency 1 -Per Chr- --Block-- -Rewrite- -Per Chr- --Block-- --Seeks--\n>> Machine Size K/sec %CP K/sec %CP K/sec %CP K/sec %CP K/sec %CP /sec %CP\n>> myserver 300M 391 97 9619 1 8537 2 673 99 +++++ +++ 1196 16\n>> Latency 211ms 388ms 325ms 27652us 722us 6720ms\n>> Version 1.93d ------Sequential Create------ --------Random Create--------\n>> myserver -Create-- --Read--- -Delete-- -Create-- --Read--- -Delete--\n>> files /sec %CP /sec %CP /sec %CP /sec %CP /sec %CP /sec %CP\n>> 16 9004 25 +++++ +++ +++++ +++ 8246 20 +++++ +++ +++++ +++\n>> Latency 592ms 208us 102us 673ms 179us 100us\n>\n> You should be testing bonnie with a file size that is at least double\n> the amount of memory in your machine - in this case, 4GB files, not\n> 300MB files.\n>\n>> When I compare my bonnie++ result with the one at\n>> http://www.westnet.com/~gsmith/content/postgresql/pg-disktesting.htm,\n>> It seems that there is something wrong with the disks!?\n>\n> Yes, your machine appears to be very slow. You should be able to\n> write in the order of 30-50MB/s+ and read in the order of 40-80MB/s+.\n> Random IO should be in the 200 tps range for a 7200rpm SATA RAID1.\n>\n> -Dave\n>", "msg_date": "Fri, 23 Jan 2009 00:29:57 +0200", "msg_from": "Ibrahim Harrani <[email protected]>", "msg_from_op": true, "msg_subject": "Re: postgresql 8.3 tps rate" }, { "msg_contents": "David Rees wrote:\n> On Thu, Jan 22, 2009 at 1:27 PM, Ibrahim Harrani\n> <[email protected]> wrote:\n>> Version 1.93d ------Sequential Output------ --Sequential Input- --Random-\n>> Concurrency 1 -Per Chr- --Block-- -Rewrite- -Per Chr- --Block-- --Seeks--\n>> Machine Size K/sec %CP K/sec %CP K/sec %CP K/sec %CP K/sec %CP /sec %CP\n>> myserver 300M 391 97 9619 1 8537 2 673 99 +++++ +++ 1196 16\n>> Latency 211ms 388ms 325ms 27652us 722us 6720ms\n>> Version 1.93d ------Sequential Create------ --------Random Create--------\n>> myserver -Create-- --Read--- -Delete-- -Create-- --Read--- -Delete--\n>> files /sec %CP /sec %CP /sec %CP /sec %CP /sec %CP /sec %CP\n>> 16 9004 25 +++++ +++ +++++ +++ 8246 20 +++++ +++ +++++ +++\n>> Latency 592ms 208us 102us 673ms 179us 100us\n> \n> You should be testing bonnie with a file size that is at least double\n> the amount of memory in your machine - in this case, 4GB files, not\n> 300MB files.\n> \n>> When I compare my bonnie++ result with the one at\n>> http://www.westnet.com/~gsmith/content/postgresql/pg-disktesting.htm,\n>> It seems that there is something wrong with the disks!?\n> \n> Yes, your machine appears to be very slow. You should be able to\n> write in the order of 30-50MB/s+ and read in the order of 40-80MB/s+.\n> Random IO should be in the 200 tps range for a 7200rpm SATA RAID1.\n\nHave you tried the really basic speed test?\n\n time (dd if=/dev/zero of=bigfile bs=8192 count=1000000; sync)\n\n time dd if=bigfile of=/dev/null bs=8192\n\nDivide 8.2GB by the times reported. On a single 10K SATA drive, I get about 55MB/sec write and 61 MB/sec read.\n\nIf you can't get similar numbers, then something is wrong.\n\nCraig\n", "msg_date": "Thu, 22 Jan 2009 14:44:08 -0800", "msg_from": "Craig James <[email protected]>", "msg_from_op": false, "msg_subject": "Re: postgresql 8.3 tps rate" }, { "msg_contents": "On Thu, Jan 22, 2009 at 3:29 PM, Ibrahim Harrani\n<[email protected]> wrote:\n\n> This is a intel server with onboard raid. I will check raid\n> configuration again tomorrow. Especially Write Cache and Read Ahead\n> values mentioned at\n> http://www.intel.com/support/motherboards/server/sb/CS-021019.htm\n\nIt would be good to use software RAID to see what kind of numbers you\nget without the built in RAID. Most built in RAID solutions are only\nsuitable for holding the OS and such.\n", "msg_date": "Thu, 22 Jan 2009 17:28:13 -0700", "msg_from": "Scott Marlowe <[email protected]>", "msg_from_op": false, "msg_subject": "Re: postgresql 8.3 tps rate" }, { "msg_contents": "On Thu, 22 Jan 2009, Alvaro Herrera wrote:\n\n> Also, I think you should set the \"scale\" in the prepare step (-i) at\n> least as high as the number of clients you're going to use. (I dimly\n> recall some recent development in this area that might mean I'm wrong.)\n\nThe idea behind that maxim (clients>=scale) is that locking on the smaller \ntables will bottleneck resuls if you don't follow that advice. It's a bit \nmessier than that though. Increasing the scale will also make the \ndatabase larger, and once it gets bigger than available RAM your results \nare going to dive hard because of that, more so than the locking would \nhave held you back.\n\nAll kind of irrelevant for Ibrahim's case, because if you're not getting \nmore than 50MB/s out of your disks the pgbench results are kind of moot \nanyway--there's a larger problem to sort out first.\n\n--\n* Greg Smith [email protected] http://www.gregsmith.com Baltimore, MD\n", "msg_date": "Thu, 22 Jan 2009 22:52:47 -0500 (EST)", "msg_from": "Greg Smith <[email protected]>", "msg_from_op": false, "msg_subject": "Re: postgresql 8.3 tps rate" }, { "msg_contents": "Hi Craig,\n\nHere is the result. It seems that disk write is terrible!.\n\nroot@myserver /usr]# time (dd if=/dev/zero of=bigfile bs=8192\ncount=1000000; sync)\n\n\n1000000+0 records in\n1000000+0 records out\n8192000000 bytes transferred in 945.343806 secs (8665630 bytes/sec)\n\nreal 15m46.206s\nuser 0m0.368s\nsys 0m15.560s\n[root@myserver /usr]#\n\n[root@myserver /usr]# time dd if=bigfile of=/dev/null bs=8192\n1000000+0 records in\n1000000+0 records out\n8192000000 bytes transferred in 174.646798 secs (46906099 bytes/sec)\n\nreal 2m54.663s\nuser 0m0.246s\nsys 0m9.307s\n\n\nOn Fri, Jan 23, 2009 at 12:44 AM, Craig James\n<[email protected]> wrote:\n> David Rees wrote:\n>>\n>> On Thu, Jan 22, 2009 at 1:27 PM, Ibrahim Harrani\n>> <[email protected]> wrote:\n>>>\n>>> Version 1.93d ------Sequential Output------ --Sequential Input-\n>>> --Random-\n>>> Concurrency 1 -Per Chr- --Block-- -Rewrite- -Per Chr- --Block--\n>>> --Seeks--\n>>> Machine Size K/sec %CP K/sec %CP K/sec %CP K/sec %CP K/sec %CP\n>>> /sec %CP\n>>> myserver 300M 391 97 9619 1 8537 2 673 99 +++++ +++ 1196 16\n>>> Latency 211ms 388ms 325ms 27652us 722us\n>>> 6720ms\n>>> Version 1.93d ------Sequential Create------ --------Random\n>>> Create--------\n>>> myserver -Create-- --Read--- -Delete-- -Create-- --Read--- -Delete--\n>>> files /sec %CP /sec %CP /sec %CP /sec %CP /sec %CP /sec\n>>> %CP\n>>> 16 9004 25 +++++ +++ +++++ +++ 8246 20 +++++ +++ +++++\n>>> +++\n>>> Latency 592ms 208us 102us 673ms 179us\n>>> 100us\n>>\n>> You should be testing bonnie with a file size that is at least double\n>> the amount of memory in your machine - in this case, 4GB files, not\n>> 300MB files.\n>>\n>>> When I compare my bonnie++ result with the one at\n>>> http://www.westnet.com/~gsmith/content/postgresql/pg-disktesting.htm,\n>>> It seems that there is something wrong with the disks!?\n>>\n>> Yes, your machine appears to be very slow. You should be able to\n>> write in the order of 30-50MB/s+ and read in the order of 40-80MB/s+.\n>> Random IO should be in the 200 tps range for a 7200rpm SATA RAID1.\n>\n> Have you tried the really basic speed test?\n>\n> time (dd if=/dev/zero of=bigfile bs=8192 count=1000000; sync)\n>\n> time dd if=bigfile of=/dev/null bs=8192\n>\n> Divide 8.2GB by the times reported. On a single 10K SATA drive, I get about\n> 55MB/sec write and 61 MB/sec read.\n>\n> If you can't get similar numbers, then something is wrong.\n>\n> Craig\n>\n", "msg_date": "Fri, 23 Jan 2009 07:52:36 +0200", "msg_from": "Ibrahim Harrani <[email protected]>", "msg_from_op": true, "msg_subject": "Re: postgresql 8.3 tps rate" }, { "msg_contents": "Ibrahim Harrani wrote:\n> Hi Craig,\n> \n> Here is the result. It seems that disk write is terrible!.\n> \n> root@myserver /usr]# time (dd if=/dev/zero of=bigfile bs=8192\n> count=1000000; sync)\n> \n> \n> 1000000+0 records in\n> 1000000+0 records out\n> 8192000000 bytes transferred in 945.343806 secs (8665630 bytes/sec)\n> \n> real 15m46.206s\n> user 0m0.368s\n> sys 0m15.560s\n\nSo it's nothing to do with Postgres. I'm no expert solving this sort of problem, but I'd start by looking for:\n\n - a rogue process that's using disk bandwidth (use vmstat when the system is idle)\n - system logs, maybe there are a zillion error messages\n - if you have a second disk, try its performance\n - if you don't have a second disk, buy one, install it, and try it\n - get another SATA controller and try that\n\nOr do the reverse: Put the disk in a different computer (one that you've tested beforehand and verified is fast) and see if the problem follows the disk. Same for the SATA card.\n\nIt could be your SATA controller, the disk, some strange hdparm setting ... who knows?\n\nI ran into this once a LONG time ago with a kernal that didn't recognize the disk or driver or something, and disabled the DMA (direct-memory access) feature, which meant the CPU had to handle every single byte coming from the disk, which of course meant SLOW, plus you couldn't even type while the disk was busy. A simple manual call to hdparm(1) to force DMA on fixed it. Weird stuff like that can be very hard to find.\n\nI also saw very low write speed once on a RAID device with a battery-backed cache, when the battery went dead. The RAID controller went into its conservative mode, which for some reason was much slower than the disk's raw performance.\n\nCraig\n", "msg_date": "Thu, 22 Jan 2009 22:35:24 -0800", "msg_from": "Craig James <[email protected]>", "msg_from_op": false, "msg_subject": "Re: postgresql 8.3 tps rate" }, { "msg_contents": "On Thu, Jan 22, 2009 at 10:52 PM, Ibrahim Harrani\n<[email protected]> wrote:\n> Hi Craig,\n>\n> Here is the result. It seems that disk write is terrible!.\n>\n> root@myserver /usr]# time (dd if=/dev/zero of=bigfile bs=8192\n> count=1000000; sync)\n>\n>\n> 1000000+0 records in\n> 1000000+0 records out\n> 8192000000 bytes transferred in 945.343806 secs (8665630 bytes/sec)\n\nThat's pretty horrific. Can you try your disks without the\nmotherboard RAID controller in the way either singly or using software\nRAID?\n", "msg_date": "Fri, 23 Jan 2009 01:42:56 -0700", "msg_from": "Scott Marlowe <[email protected]>", "msg_from_op": false, "msg_subject": "Re: postgresql 8.3 tps rate" }, { "msg_contents": "On 1/23/09, Ibrahim Harrani <[email protected]> wrote:\n> Hi Craig,\n>\n> Here is the result. It seems that disk write is terrible!.\n>\n> root@myserver /usr]# time (dd if=/dev/zero of=bigfile bs=8192\n> count=1000000; sync)\n\nNote, while sequential write speeds are a good indication of general\nraid crappyness, they are not the main driver of your low pgbench\nresults (buy they may be involved with poor insert performance)\n\nThat is coming from your seek performance, which is also lousy at ~130\naccording to bonnie.\n\nmerlin\n", "msg_date": "Fri, 23 Jan 2009 09:51:20 -0500", "msg_from": "Merlin Moncure <[email protected]>", "msg_from_op": false, "msg_subject": "Re: postgresql 8.3 tps rate" }, { "msg_contents": "On Fri, 23 Jan 2009, Merlin Moncure wrote:\n\n> Note, while sequential write speeds are a good indication of general \n> raid crappyness, they are not the main driver of your low pgbench \n> results (buy they may be involved with poor insert performance) That is \n> coming from your seek performance, which is also lousy at ~130 according \n> to bonnie.\n\nThe bonnie seek test includes a write component to it: \"In 10% of cases, \nit is dirtied and written back with write(2).\" Since ~130 isn't that much \nworse than normal for a cheap 7200 RPM drive (near 200), it could easily \nbe the case that the awful performance on those writes is pulling down the \nseek score. Sequential writes are low on the scale of things that matter \nin a database context, but if they're screwed up bad enough it can ripple \ninto more important areas.\n\n--\n* Greg Smith [email protected] http://www.gregsmith.com Baltimore, MD\n", "msg_date": "Fri, 23 Jan 2009 17:58:58 -0500 (EST)", "msg_from": "Greg Smith <[email protected]>", "msg_from_op": false, "msg_subject": "Re: postgresql 8.3 tps rate" }, { "msg_contents": "Greg Smith wrote:\n> On Thu, 22 Jan 2009, Alvaro Herrera wrote:\n> \n>> Also, I think you should set the \"scale\" in the prepare step (-i) at\n>> least as high as the number of clients you're going to use. (I dimly\n>> recall some recent development in this area that might mean I'm wrong.)\n> \n> The idea behind that maxim (clients>=scale) is that locking on the\n> smaller tables will bottleneck resuls if you don't follow that advice. \n> It's a bit messier than that though. Increasing the scale will also\n> make the database larger, and once it gets bigger than available RAM\n> your results are going to dive hard because of that, more so than the\n> locking would have held you back.\n> \n> All kind of irrelevant for Ibrahim's case, because if you're not getting\n> more than 50MB/s out of your disks the pgbench results are kind of moot\n> anyway--there's a larger problem to sort out first.\n> \n> -- \n> * Greg Smith [email protected] http://www.gregsmith.com Baltimore, MD\n> \nIIRC this is a FreeBSD system, not Linux. Could there be some filesystem\nperformance issue here? I know zero about FreeBSD filesystems.\n\nAlso, is there a separate driver machine you can use to run pgbench? The\npgbench client uses resources, which could lower your throughput.\n\n-- \nM. Edward (Ed) Borasky\n\nI've never met a happy clam. In fact, most of them were pretty steamed.\n", "msg_date": "Sun, 25 Jan 2009 08:23:42 -0800", "msg_from": "\"M. Edward (Ed) Borasky\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: postgresql 8.3 tps rate" }, { "msg_contents": "[snip]\n\nI'm actually doing some very similar testing and getting very similar\nresults. My disk is a single Seagate Barracuda 7200 RPM SATA (160 GB).\nThe OS is openSUSE 11.1 (2.6.27 kernel) with the \"stock\" PostgreSQL\n8.3.5 RPM. I started out running pgbench on the same machine but just\nmoved the driver to another one trying to get better results. The other\ndriver is quite small -- 512 MB 1.6 GHz -- so I might need to shut down\nthe desktop and X on it.\n\nBut the real mystery is this: I have two XFS partitions. Let's call them\nsda5 and sda6. The PostgreSQL install put the database in\n/var/lib/pgsql, which is on sda5. But I created a tablespace on sda6\nspecifically for the database that pgbench is using, and put that\ndatabase there. At least that's what pgadmin3 is telling me I did. But\nwhen I run pgbench, I see intense I/O on *both* partitions sometimes,\nand other times I see it *only* on sda5.\n\nI can understand the \"both\" times -- I didn't move any \"system-level\"\nthings like the write-ahead logs. But what I can't understand is the\nperiods when it isn't using sda6, where the tablespace is.\n\nAnyhow, here's a short segment of the results I'm getting.\n\nstarting vacuum...end.\ntransaction type: TPC-B (sort of)\nscaling factor: 100\nnumber of clients: 2\nnumber of transactions per client: 100\nnumber of transactions actually processed: 200/200\ntps = 37.360964 (including connections establishing)\ntps = 37.430501 (excluding connections establishing)\nstarting vacuum...end.\ntransaction type: TPC-B (sort of)\nscaling factor: 100\nnumber of clients: 4\nnumber of transactions per client: 100\nnumber of transactions actually processed: 400/400\ntps = 51.768918 (including connections establishing)\ntps = 51.985556 (excluding connections establishing)\nstarting vacuum...end.\ntransaction type: TPC-B (sort of)\nscaling factor: 100\nnumber of clients: 6\nnumber of transactions per client: 100\nnumber of transactions actually processed: 600/600\ntps = 51.462103 (including connections establishing)\ntps = 51.734119 (excluding connections establishing)\nstarting vacuum...end.\ntransaction type: TPC-B (sort of)\nscaling factor: 100\nnumber of clients: 8\nnumber of transactions per client: 100\nnumber of transactions actually processed: 800/800\ntps = 44.316328 (including connections establishing)\ntps = 44.473483 (excluding connections establishing)\nstarting vacuum...end.\ntransaction type: TPC-B (sort of)\nscaling factor: 100\nnumber of clients: 10\nnumber of transactions per client: 100\nnumber of transactions actually processed: 1000/1000\ntps = 44.750672 (including connections establishing)\ntps = 44.910703 (excluding connections establishing)\nstarting vacuum...end.\ntransaction type: TPC-B (sort of)\nscaling factor: 100\nnumber of clients: 12\nnumber of transactions per client: 100\nnumber of transactions actually processed: 1200/1200\ntps = 45.048743 (including connections establishing)\ntps = 45.205084 (excluding connections establishing)\nstarting vacuum...end.\ntransaction type: TPC-B (sort of)\nscaling factor: 100\nnumber of clients: 14\nnumber of transactions per client: 100\nnumber of transactions actually processed: 1400/1400\ntps = 26.849217 (including connections establishing)\ntps = 26.916643 (excluding connections establishing)\nstarting vacuum...end.\ntransaction type: TPC-B (sort of)\nscaling factor: 100\nnumber of clients: 16\nnumber of transactions per client: 100\nnumber of transactions actually processed: 1600/1600\ntps = 11.187072 (including connections establishing)\ntps = 11.198109 (excluding connections establishing)\nstarting vacuum...end.\ntransaction type: TPC-B (sort of)\nscaling factor: 100\nnumber of clients: 18\nnumber of transactions per client: 100\nnumber of transactions actually processed: 1800/1800\ntps = 38.183978 (including connections establishing)\ntps = 38.301026 (excluding connections establishing)\nstarting vacuum...end.\ntransaction type: TPC-B (sort of)\nscaling factor: 100\nnumber of clients: 20\nnumber of transactions per client: 100\nnumber of transactions actually processed: 2000/2000\ntps = 35.012091 (including connections establishing)\ntps = 35.109165 (excluding connections establishing)\nstarting vacuum...end.\ntransaction type: TPC-B (sort of)\nscaling factor: 100\nnumber of clients: 22\nnumber of transactions per client: 100\nnumber of transactions actually processed: 2200/2200\ntps = 28.286106 (including connections establishing)\ntps = 28.350341 (excluding connections establishing)\nstarting vacuum...end.\ntransaction type: TPC-B (sort of)\nscaling factor: 100\nnumber of clients: 24\nnumber of transactions per client: 100\nnumber of transactions actually processed: 2400/2400\ntps = 29.285593 (including connections establishing)\ntps = 29.358284 (excluding connections establishing)\nstarting vacuum...end.\ntransaction type: TPC-B (sort of)\nscaling factor: 100\nnumber of clients: 26\nnumber of transactions per client: 100\nnumber of transactions actually processed: 2600/2600\ntps = 29.237558 (including connections establishing)\ntps = 29.308422 (excluding connections establishing)\nstarting vacuum...end.\ntransaction type: TPC-B (sort of)\nscaling factor: 100\nnumber of clients: 28\nnumber of transactions per client: 100\nnumber of transactions actually processed: 2800/2800\ntps = 35.251509 (including connections establishing)\ntps = 35.351999 (excluding connections establishing)\nstarting vacuum...end.\ntransaction type: TPC-B (sort of)\nscaling factor: 100\nnumber of clients: 30\nnumber of transactions per client: 100\nnumber of transactions actually processed: 3000/3000\ntps = 29.790523 (including connections establishing)\ntps = 29.863336 (excluding connections establishing)\n\n\nI'm going to move the pgbench database back to the main sda5 partition\n-- given the \"both\" periods, the seeks must be killing me. This is\nactually not a benchmark, but a way of generating sample \"blktrace\"\ndata, so when it's finally working, I'll have some block-layer traces to\ntell me exactly what's going on. :)\n\n-- \nM. Edward (Ed) Borasky\n\nI've never met a happy clam. In fact, most of them were pretty steamed.\n", "msg_date": "Sun, 25 Jan 2009 09:05:00 -0800", "msg_from": "\"M. Edward (Ed) Borasky\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: postgresql 8.3 tps rate" }, { "msg_contents": "On Sun, 25 Jan 2009, M. Edward (Ed) Borasky wrote:\n\n> I started out running pgbench on the same machine but just\n> moved the driver to another one trying to get better results.\n\nThat normally isn't necessary until you get to the point where you're \nrunning thousands of transactions per second. The CPU load of the pgbench \nisn't that big, and moving it to a network client does add its own \noverhead.\n\n> I can understand the \"both\" times -- I didn't move any \"system-level\"\n> things like the write-ahead logs. But what I can't understand is the\n> periods when it isn't using sda6, where the tablespace is.\n\nWrites to the database are buffered by the shared_buffers mechanism. If \nyou dirty a block, it has to be unused for a bit after that before it will \nbe written out. The OS also buffers writes to the database disk. The \ncombination of the two means that you can write things sometimes that \nwon't turn into physical disk I/O to the database for a while. That is \nnot true of the WAL, which will always be generating activity when running \npgbench.\n\n> number of transactions actually processed: 3000/3000\n\nGenerally, pgbench results start to be useful when you've pushed through \naround 100,000 transactions or run for a few minutes. It looks like your \nlargest client test might be approaching that threshold only because the \nTPS rate is so slow.\n\nI'm not sure what is going on with your system, but the advice showing up \nearlier in this thread is well worth heeding here: if you haven't \nthoroughly proven that your disk setup works as expected on simple I/O \ntests such as dd and bonnie++, you shouldn't be running pgbench yet. \nIt's not a tranparent benchmark unless you really understand what it's \ndoing, and you can waste endless time chasing phantom database setup \nproblems that way when you should be staring at hardware, driver, or OS \nlevel ones instead. Do you know the disks are working as they should \nhere? Does the select-only pgbench give you reasonable results?\n\n--\n* Greg Smith [email protected] http://www.gregsmith.com Baltimore, MD\n", "msg_date": "Sun, 25 Jan 2009 15:45:01 -0500 (EST)", "msg_from": "Greg Smith <[email protected]>", "msg_from_op": false, "msg_subject": "Re: postgresql 8.3 tps rate" }, { "msg_contents": "Greg Smith wrote:\n\n> I'm not sure what is going on with your system, but the advice showing\n> up earlier in this thread is well worth heeding here: if you haven't\n> thoroughly proven that your disk setup works as expected on simple I/O\n> tests such as dd and bonnie++, you shouldn't be running pgbench yet.\n> It's not a tranparent benchmark unless you really understand what it's\n> doing, and you can waste endless time chasing phantom database setup\n> problems that way when you should be staring at hardware, driver, or OS\n> level ones instead. Do you know the disks are working as they should\n> here? Does the select-only pgbench give you reasonable results?\n\nActually, this isn't so much a 'pgbench' exercise as it is a source of\n'real-world application' data for my Linux I/O performance visualization\ntools. I've done 'iozone' tests, though not recently. But what I'm\nbuilding is an I/O analysis toolset, not a database application. So I am\n\"staring at hardware, driver or OS level\" issues. :) To be more precise,\nI'm using block I/O layer tools, which are \"beneath\" the filesystem\nlayer but \"above\" the driver and hardware levels.\n\nWhat you might find interesting is that, when I presented the earlier\n(iozone) test results at the Computer Measurement Group meeting in Las\nVegas in December, there were two disk drive engineers in the audience,\nfrom, IIRC, Fujitsu. When they saw my results showing all four Linux\nschedulers yielding essentially the same performance metrics using some\nfairly tight statistical significance tests, they told me that it was\nbecause the drive was re-ordering operations according to its own\ninternal scheduler!\n\nI haven't had a chance to investigate that in any detail yet, but I\nassume that they knew what they were talking about. The drive in\nquestion is an off-the-shelf unit that I got at CompUSA as part of a\nsystem that I had them build. In any event, it's *not* a \"server-grade\nI/O subsystem\", it's a single disk drive designed for \"home desktop PC\"\nuse cases. In short, I don't expect server-grade TPS values.\n\nI did capture some 'iostat' data after I moved the 'pgbench' database\nback into the main partition where the rest PostgreSQL database lives.\nAs I expected, the device and partition utilizations were in the high 90\npercent range.\n\nI don't have the bandwidth figures from 'iostat' handy, but if the\nutilization is 98.4 percent, they may be the best I can get out of the\ndrive with the xfs filesystem and the cfq scheduler. And the choice of\nscheduler might not matter. And the choice of filesystem might not\nmatter. I may be getting all the drive can do.\n\n-- \nM. Edward (Ed) Borasky\n\nI've never met a happy clam. In fact, most of them were pretty steamed.\n", "msg_date": "Sun, 25 Jan 2009 13:59:58 -0800", "msg_from": "\"M. Edward (Ed) Borasky\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: postgresql 8.3 tps rate" }, { "msg_contents": "On Sun, 25 Jan 2009, M. Edward (Ed) Borasky wrote:\n> Actually, this isn't so much a 'pgbench' exercise as it is a source of\n> 'real-world application' data for my Linux I/O performance visualization\n> tools. I've done 'iozone' tests, though not recently. But what I'm\n> building is an I/O analysis toolset, not a database application.\n\nAre these performance results when the analysis tools are active or \ninactive? Have you investigated whether the analysis tools might be \nslowing the I/O down at all?\n\n> ...they told me that it was because the drive was re-ordering operations \n> according to its own internal scheduler!\n\nA modern SATA drive with native queuing will do that. SCSI drives have \nbeen doing that for twenty years or so.\n\nMatthew\n\n-- \n I pause for breath to allow you to get over your shock that I really did cover\n all that in only five minutes... -- Computer Science Lecturer\n", "msg_date": "Mon, 26 Jan 2009 12:18:23 +0000 (GMT)", "msg_from": "Matthew Wakeling <[email protected]>", "msg_from_op": false, "msg_subject": "Re: postgresql 8.3 tps rate" }, { "msg_contents": "Matthew Wakeling wrote:\n> On Sun, 25 Jan 2009, M. Edward (Ed) Borasky wrote:\n>> Actually, this isn't so much a 'pgbench' exercise as it is a source of\n>> 'real-world application' data for my Linux I/O performance visualization\n>> tools. I've done 'iozone' tests, though not recently. But what I'm\n>> building is an I/O analysis toolset, not a database application.\n> \n> Are these performance results when the analysis tools are active or\n> inactive? Have you investigated whether the analysis tools might be\n> slowing the I/O down at all?\n\nThere is some overhead with the finest-granularity data capture tool,\nblktrace. I haven't measured it yet but it is supposedly \"about two\npercent\" according to its creators. And the trace files it creates are\nshipped to a collection server over the network -- they are not stored\non the server under test.\n\nThe other tool set, sysstat, simply reads out counters from /proc. The\nlast time I looked at them they were using minuscule amounts of\nprocessor and RAM and doing no disk I/O. But there are (slightly) more\nefficient ways of getting the required counters from /proc, and blktrace\ncan actually reconstruct the I/O counters. People run *production*\nservers with much more intrusive performance logging tool sets than what\nI am using. :)\n\n> \n>> ...they told me that it was because the drive was re-ordering\n>> operations according to its own internal scheduler!\n> \n> A modern SATA drive with native queuing will do that. SCSI drives have\n> been doing that for twenty years or so.\n\nAt the CMG meeting I asked the disk drive engineers, \"well, if the\ndrives are doing the scheduling, why does Linux go to all the trouble?\"\nTheir answer was something like, \"smart disk drives are a relatively\nrecent invention.\" But\n\na. SANs have been smart for years, and people with I/O intensive\nworkloads use SANs designed for them if they can afford them. Linux even\nhas a \"no-op\" scheduler you can use if your SAN is doing a better job.\n\nb. The four I/O schedulers in the 2.6 kernel are relatively recent. I\ncan go dig up the exact release dates on the web, but their first\nappearance in Red Hat was RHEL 4 / 2.6.9, and I think they've been\nre-written once since then.\n\nc. If SCSI drives have been doing their own scheduling for 20 years,\nthat's even less incentive for Linux to do more than just provide\nefficient SCSI drivers. I've never gotten down into the nitty-gritty of\nSCSI, and I'm not sure there's any reason to do so now, given that other\nprotocols seem to be taking the lead.\n> \n> Matthew\n> \n\n\n-- \nM. Edward (Ed) Borasky\n\nI've never met a happy clam. In fact, most of them were pretty steamed.\n", "msg_date": "Mon, 26 Jan 2009 08:10:41 -0800", "msg_from": "\"M. Edward (Ed) Borasky\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: postgresql 8.3 tps rate" }, { "msg_contents": "M. Edward (Ed) Borasky wrote:\n\n> At the CMG meeting I asked the disk drive engineers, \"well, if the\n> drives are doing the scheduling, why does Linux go to all the trouble?\"\n\nOne big reason is that Linux knows more about the relative importance of\nI/O operations than the individual drives do. Linux's scheduler supports\npriorities in its queuing, and is able to do things like try to perform\nsmall blocking reads before long non-blocking writes in order to improve\noverall system throughput.\n\nThe individual drives just don't have the knowledge to make these\ndecisions. All they can really do is try to fetch the set of blocks\ncurrently requested in the smallest possible time. This can even be\nharmful if, in order to improve overall throughput, the drive reorders\nwhat to the OS is a very high priority operation and performs it later\nthan it would've if following the request queue directly.\n\nI've had to modify the 3ware driver to set very shallow request queues\nfor similar reasons. It was using very deep queuing and the controller\nseems to do very stupid request queuing where it maintains a single deep\nper-controller queue rather than per-array queues, so I was getting\nAWFUL request latencies on high priority small reads when bulk I/O was\nin progress elsewhere - even on another array on the same controller.\n\nI've also had to turn NCQ off on SATA disks before because it was\nfoiling Linux's I/O priority management, so tasks with low ionice levels\nstill ended up rather severely impacting system responsiveness.\n\nHopefully these devices will end up with the smarts to manage I/O in a\npriority queue where read latency is considered as well as throughput.\n\n> Their answer was something like, \"smart disk drives are a relatively\n> recent invention.\" But\n\n--\nCraig Ringer\n", "msg_date": "Tue, 27 Jan 2009 03:09:11 +0900", "msg_from": "Craig Ringer <[email protected]>", "msg_from_op": false, "msg_subject": "Re: postgresql 8.3 tps rate" }, { "msg_contents": "Craig Ringer wrote:\n> M. Edward (Ed) Borasky wrote:\n> \n>> At the CMG meeting I asked the disk drive engineers, \"well, if the\n>> drives are doing the scheduling, why does Linux go to all the trouble?\"\n> \n> One big reason is that Linux knows more about the relative importance of\n> I/O operations than the individual drives do. Linux's scheduler supports\n> priorities in its queuing, and is able to do things like try to perform\n> small blocking reads before long non-blocking writes in order to improve\n> overall system throughput.\n> \n> The individual drives just don't have the knowledge to make these\n> decisions. All they can really do is try to fetch the set of blocks\n> currently requested in the smallest possible time. This can even be\n> harmful if, in order to improve overall throughput, the drive reorders\n> what to the OS is a very high priority operation and performs it later\n> than it would've if following the request queue directly.\n> \n> I've had to modify the 3ware driver to set very shallow request queues\n> for similar reasons. It was using very deep queuing and the controller\n> seems to do very stupid request queuing where it maintains a single deep\n> per-controller queue rather than per-array queues, so I was getting\n> AWFUL request latencies on high priority small reads when bulk I/O was\n> in progress elsewhere - even on another array on the same controller.\n> \n> I've also had to turn NCQ off on SATA disks before because it was\n> foiling Linux's I/O priority management, so tasks with low ionice levels\n> still ended up rather severely impacting system responsiveness.\n> \n> Hopefully these devices will end up with the smarts to manage I/O in a\n> priority queue where read latency is considered as well as throughput.\n> \n>> Their answer was something like, \"smart disk drives are a relatively\n>> recent invention.\" But\n> \n> --\n> Craig Ringer\n> \n\nThanks!! Is there a howto somewhere on disabling this on a Seagate\nBarracuda? I'm in the middle of rebuilding my tools and analysis\nalgorithms as a \"real open source project\", and I'd love to be able to\nhave some working examples that people without access to industrial\nstrength I/O subsystems can use. :)\n\n\n-- \nM. Edward (Ed) Borasky\n\nI've never met a happy clam. In fact, most of them were pretty steamed.\n", "msg_date": "Mon, 26 Jan 2009 11:12:31 -0800", "msg_from": "\"M. Edward (Ed) Borasky\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: postgresql 8.3 tps rate" }, { "msg_contents": "On Mon, 26 Jan 2009, M. Edward (Ed) Borasky wrote:\n\n> Is there a howto somewhere on disabling this on a Seagate Barracuda?\n\nhttp://inferno.slug.org/cgi-bin/wiki?Western_Digital_NCQ is a good \ndiscussion of disabling NCQ support under Linux (both in user-space and \ndirectly in the kernel itself). A good summary of the problems with the \nWD NCQ support alluded to there are at http://lkml.org/lkml/2007/4/3/159 \nif you're curious.\n\nIt's a tough time to be picking up inexpensive consumer SATA disks right \nnow. Seagate's drive reliability has been falling hard the last couple of \nyears, but all the WD drives I've started trying out instead have just \nawful firmware. At last they're all cheap I guess.\n\nP.S. I have several of the same basic Seagate drive you have (160GB, even \nbought at CompUSA!) and would expect at least 2-3X better pgbench results \nthan you're seeing. I realized that I've never actually run that test \nwithout first tweaking the postgresql.conf \n(shared_buffers,checkpoint_segments) so that may be part of it. One of my \nsystems here has just one of those disk in it, next time I boot that up \nI'll see what results I get with an untuned config.\n\n--\n* Greg Smith [email protected] http://www.gregsmith.com Baltimore, MD\n", "msg_date": "Mon, 26 Jan 2009 14:26:55 -0500 (EST)", "msg_from": "Greg Smith <[email protected]>", "msg_from_op": false, "msg_subject": "Re: postgresql 8.3 tps rate" }, { "msg_contents": "Greg Smith wrote:\n> It's a tough time to be picking up inexpensive consumer SATA disks right\n> now. Seagate's drive reliability has been falling hard the last couple\n> of years, but all the WD drives I've started trying out instead have\n> just awful firmware. At last they're all cheap I guess.\n\nI also have a 250 GB drive in another machine that I need to move to the\nmain desktop. I don't remember the model of it -- it's been shut down\nfor a year. :( Given that drive, I really don't have a need to buy a\ndisk drive right now, and I'm saving my pennies for an\nindustrial-strength laptop that I can dual-boot Windows 64-bit and\nLinux. I've gotten amazing use out of my ancient Compaq Presario 2110US,\nconsidering it's a \"home unit\", but it's heavy, slow and has only 512 MB\nof RAM.\n\n> P.S. I have several of the same basic Seagate drive you have (160GB,\n> even bought at CompUSA!) and would expect at least 2-3X better pgbench\n> results than you're seeing. I realized that I've never actually run\n> that test without first tweaking the postgresql.conf\n> (shared_buffers,checkpoint_segments) so that may be part of it. One of\n> my systems here has just one of those disk in it, next time I boot that\n> up I'll see what results I get with an untuned config.\n> \n> -- \n> * Greg Smith [email protected] http://www.gregsmith.com Baltimore, MD\n> \nThanks!! I'm just getting into the PostgreSQL tuning part of things.\n\n-- \nM. Edward (Ed) Borasky\n\nI've never met a happy clam. In fact, most of them were pretty steamed.\n", "msg_date": "Mon, 26 Jan 2009 12:44:46 -0800", "msg_from": "\"M. Edward (Ed) Borasky\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: postgresql 8.3 tps rate" }, { "msg_contents": "M. Edward (Ed) Borasky wrote:\n> At the CMG meeting I asked the disk drive engineers, \"well, if the\n> drives are doing the scheduling, why does Linux go to all the trouble?\"\n> Their answer was something like, \"smart disk drives are a relatively\n> recent invention.\" But\n\nOne more reason?\nI imagine the disk drives wouldn't be aware of IO scheduling classes\nand priorities ( http://linux.die.net/man/1/ionice ), while I imagine\nthe linux I/O schedulers should.\n", "msg_date": "Mon, 26 Jan 2009 13:48:41 -0800", "msg_from": "Ron Mayer <[email protected]>", "msg_from_op": false, "msg_subject": "Re: postgresql 8.3 tps rate" }, { "msg_contents": "Ron Mayer wrote:\n> M. Edward (Ed) Borasky wrote:\n>> At the CMG meeting I asked the disk drive engineers, \"well, if the\n>> drives are doing the scheduling, why does Linux go to all the trouble?\"\n>> Their answer was something like, \"smart disk drives are a relatively\n>> recent invention.\" But\n> \n> One more reason?\n> I imagine the disk drives wouldn't be aware of IO scheduling classes\n> and priorities ( http://linux.die.net/man/1/ionice ), while I imagine\n> the linux I/O schedulers should.\n> \nThanks!! I see I have much to learn. Jens Axboe is one of my heroes, BTW\n-- he's half of the blktrace team. :)\n\n-- \nM. Edward (Ed) Borasky\n\nI've never met a happy clam. In fact, most of them were pretty steamed.\n", "msg_date": "Mon, 26 Jan 2009 13:56:48 -0800", "msg_from": "\"M. Edward (Ed) Borasky\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: postgresql 8.3 tps rate" } ]
[ { "msg_contents": "Hi list,\n\nClustering my indexes dramatically improves the query performance of many of\nmy queries. Also, the actual clustering takes a very long time for big\ndatabases, roughly 20 hours. I have two questions about how to improve this:\n\n1. I've tweaked maintenance_mem_max and effective_cache_size to a point\nwhere the cluster operation uses a good chunk of my physical RAM, and the OS\ndoes not start swapping. Is there any other parameter I should look at?\n\n2. Reading the documentation for cluster at\nhttp://www.postgresql.org/docs/8.3/interactive/sql-cluster.html, I see that\nall clustering does is reorder the data on disk to 'match' the order of the\nclustered index. My question is, if I dump a clustered database using\npg_dump in custom format, is it necessary to cluster after restoring it? Or\ndoes a dump/restore not guarantee that the order of the data restored is the\nsame as the original dumped database?\n\n3. Somewhat related to #2, what is the best way to move data from a staging\ndatabase on one server, to the production environment on a different server?\nI've been using pg_dump/pg_restore, but there must be a better way...\n\n\nThanks for any pointers,\n\n-Harold\n\nHi list,Clustering my indexes dramatically improves the query performance of many of my queries. Also, the actual clustering takes a very long time for big databases, roughly 20 hours. I have two questions about how to improve this:\n1. I've tweaked maintenance_mem_max and effective_cache_size to a point where the cluster operation uses a good chunk of my physical RAM, and the OS does not start swapping. Is there any other parameter I should look at?\n2. Reading the documentation for cluster at\nhttp://www.postgresql.org/docs/8.3/interactive/sql-cluster.html, I see\nthat all clustering does is reorder the data on disk to 'match' the\norder of the clustered index. My question is, if I dump a clustered\ndatabase using pg_dump in custom format, is it necessary to cluster\nafter restoring it? Or does a dump/restore not guarantee that the order\nof the data restored is the same as the original dumped database?3. Somewhat related to #2, what is the best way to move data from a staging database on one\nserver, to the production environment on a different server? I've been\nusing pg_dump/pg_restore, but there must be a better way...\nThanks for any pointers,-Harold", "msg_date": "Thu, 22 Jan 2009 14:52:12 -0500", "msg_from": "=?ISO-8859-1?Q?Harold_A=2E_Gim=E9nez_Ch=2E?= <[email protected]>", "msg_from_op": true, "msg_subject": "Question about clustering indexes and restores" }, { "msg_contents": "On Thu, Jan 22, 2009 at 02:52:12PM -0500, Harold A. Gim?nez Ch. wrote:\n> Hi list,\n> \n> Clustering my indexes dramatically improves the query performance of many of\n> my queries. Also, the actual clustering takes a very long time for big\n> databases, roughly 20 hours. I have two questions about how to improve this:\n> \n> 1. I've tweaked maintenance_mem_max and effective_cache_size to a point\n> where the cluster operation uses a good chunk of my physical RAM, and the OS\n> does not start swapping. Is there any other parameter I should look at?\n> \n> 2. Reading the documentation for cluster at\n> http://www.postgresql.org/docs/8.3/interactive/sql-cluster.html, I see that\n> all clustering does is reorder the data on disk to 'match' the order of the\n> clustered index. My question is, if I dump a clustered database using\n> pg_dump in custom format, is it necessary to cluster after restoring it? Or\n> does a dump/restore not guarantee that the order of the data restored is the\n> same as the original dumped database?\n> \n> 3. Somewhat related to #2, what is the best way to move data from a staging\n> database on one server, to the production environment on a different server?\n> I've been using pg_dump/pg_restore, but there must be a better way...\n> \n> \n> Thanks for any pointers,\n> \n> -Harold\n\nHarold,\n\nThere have been discussions on the hackers list about the pessimal\ncluster performance. Here is a pointer to the discussion, it seems\nthat a faster way is to build a new table with the desired orderwith\n\"CREATE TABLE AS ... ORDER BY ...\":\n\nhttp://www.mail-archive.com/[email protected]/msg121205.html\n\nCheers,\nKen\n", "msg_date": "Thu, 22 Jan 2009 13:58:06 -0600", "msg_from": "Kenneth Marshall <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Question about clustering indexes and restores" }, { "msg_contents": "Many thanks for your answer. I did see a comment about this in the\ndocumentation on the link I posted below.\n\nMy main question remains though: Is it necessary to cluster after a restore?\n\nThanks again!\n\nOn Thu, Jan 22, 2009 at 2:58 PM, Kenneth Marshall <[email protected]> wrote:\n\n> On Thu, Jan 22, 2009 at 02:52:12PM -0500, Harold A. Gim?nez Ch. wrote:\n> > Hi list,\n> >\n> > Clustering my indexes dramatically improves the query performance of many\n> of\n> > my queries. Also, the actual clustering takes a very long time for big\n> > databases, roughly 20 hours. I have two questions about how to improve\n> this:\n> >\n> > 1. I've tweaked maintenance_mem_max and effective_cache_size to a point\n> > where the cluster operation uses a good chunk of my physical RAM, and the\n> OS\n> > does not start swapping. Is there any other parameter I should look at?\n> >\n> > 2. Reading the documentation for cluster at\n> > http://www.postgresql.org/docs/8.3/interactive/sql-cluster.html, I see\n> that\n> > all clustering does is reorder the data on disk to 'match' the order of\n> the\n> > clustered index. My question is, if I dump a clustered database using\n> > pg_dump in custom format, is it necessary to cluster after restoring it?\n> Or\n> > does a dump/restore not guarantee that the order of the data restored is\n> the\n> > same as the original dumped database?\n> >\n> > 3. Somewhat related to #2, what is the best way to move data from a\n> staging\n> > database on one server, to the production environment on a different\n> server?\n> > I've been using pg_dump/pg_restore, but there must be a better way...\n> >\n> >\n> > Thanks for any pointers,\n> >\n> > -Harold\n>\n> Harold,\n>\n> There have been discussions on the hackers list about the pessimal\n> cluster performance. Here is a pointer to the discussion, it seems\n> that a faster way is to build a new table with the desired orderwith\n> \"CREATE TABLE AS ... ORDER BY ...\":\n>\n> http://www.mail-archive.com/[email protected]/msg121205.html\n>\n> Cheers,\n> Ken\n>\n\nMany thanks for your answer. I did see a comment about this in the documentation on the link I posted below.My main question remains though: Is it necessary to cluster after a restore?Thanks again!\nOn Thu, Jan 22, 2009 at 2:58 PM, Kenneth Marshall <[email protected]> wrote:\nOn Thu, Jan 22, 2009 at 02:52:12PM -0500, Harold A. Gim?nez Ch. wrote:\n> Hi list,\n>\n> Clustering my indexes dramatically improves the query performance of many of\n> my queries. Also, the actual clustering takes a very long time for big\n> databases, roughly 20 hours. I have two questions about how to improve this:\n>\n> 1. I've tweaked maintenance_mem_max and effective_cache_size to a point\n> where the cluster operation uses a good chunk of my physical RAM, and the OS\n> does not start swapping. Is there any other parameter I should look at?\n>\n> 2. Reading the documentation for cluster at\n> http://www.postgresql.org/docs/8.3/interactive/sql-cluster.html, I see that\n> all clustering does is reorder the data on disk to 'match' the order of the\n> clustered index. My question is, if I dump a clustered database using\n> pg_dump in custom format, is it necessary to cluster after restoring it? Or\n> does a dump/restore not guarantee that the order of the data restored is the\n> same as the original dumped database?\n>\n> 3. Somewhat related to #2, what is the best way to move data from a staging\n> database on one server, to the production environment on a different server?\n> I've been using pg_dump/pg_restore, but there must be a better way...\n>\n>\n> Thanks for any pointers,\n>\n> -Harold\n\nHarold,\n\nThere have been discussions on the hackers list about the pessimal\ncluster performance. Here is a pointer to the discussion, it seems\nthat a faster way is to build a new table with the desired orderwith\n\"CREATE TABLE AS ... ORDER BY ...\":\n\nhttp://www.mail-archive.com/[email protected]/msg121205.html\n\nCheers,\nKen", "msg_date": "Thu, 22 Jan 2009 15:02:19 -0500", "msg_from": "=?ISO-8859-1?Q?Harold_A=2E_Gim=E9nez_Ch=2E?= <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Question about clustering indexes and restores" } ]
[ { "msg_contents": "> I spotted a new interesting SSD review. it's a $379\n> 5.25\" drive bay device that holds up to 8 DDR2 DIMMS\n> (up to 8G per DIMM) and appears to the system as a SATA\n> drive (or a pair of SATA drives that you can RAID-0 to get\n> past the 300MB/s SATA bottleneck)\n> \n\nSounds very similar to the Gigabyte iRam drives of a few years ago\n\nhttp://en.wikipedia.org/wiki/I-RAM\n\n\n\n\n \n", "msg_date": "Fri, 23 Jan 2009 11:12:18 +0000 (GMT)", "msg_from": "Glyn Astill <[email protected]>", "msg_from_op": true, "msg_subject": "Re: SSD performance" }, { "msg_contents": "I spotted a new interesting SSD review. it's a $379 5.25\" drive bay device \nthat holds up to 8 DDR2 DIMMS (up to 8G per DIMM) and appears to the \nsystem as a SATA drive (or a pair of SATA drives that you can RAID-0 to \nget past the 300MB/s SATA bottleneck)\n\nthe best review I've seen only ran it on windows (and a relativly old \nhardware platform at that), I suspect it's performance would be even \nbetter under linux and with a top-notch controller card (especially with \nthe RAID option)\n\nit has a battery backup (good for 4 hours or so) and a CF cardslot that it \ncan back the ram up to (~20 min to save 32G and 15 min to restore, so not \nsomething you really want to make use of, but a good safety net)\n\nthe review also includes the Intel X-25E and X-25M drives (along with a \nvariety of SCSI and SATA drives)\n\nhttp://techreport.com/articles.x/16255/1\n\nequipped with 16G the street price should be ~$550, with 32G it should be \n~$1200 with 64G even more expensive, but the performance is very good. \nthere are times when the X-25E matches it or edges it out in these tests, \nso there is room for additional improvement, but as I noted above it may \ndo better with a better controller and non-windows OS.\n\npower consumption is slightly higher than normal hard drives at about 12w \n(_much_ higher than the X-25)\n\nthey also have a review of the X-25E vs the X-25M\n\nhttp://techreport.com/articles.x/15931/1\n\none thing that both of these reviews show is that if you are doing a \nsignificant amount of writing the X-25M is no better than a normal hard \ndrive (and much of the time in the middle to bottom of the pack compared \nto normal hard drives)\n\nDavid Lang\n", "msg_date": "Fri, 23 Jan 2009 03:35:30 -0800 (PST)", "msg_from": "[email protected]", "msg_from_op": false, "msg_subject": "SSD performance" }, { "msg_contents": "On Fri, 23 Jan 2009, Glyn Astill wrote:\n\n>> I spotted a new interesting SSD review. it's a $379\n>> 5.25\" drive bay device that holds up to 8 DDR2 DIMMS\n>> (up to 8G per DIMM) and appears to the system as a SATA\n>> drive (or a pair of SATA drives that you can RAID-0 to get\n>> past the 300MB/s SATA bottleneck)\n>>\n>\n> Sounds very similar to the Gigabyte iRam drives of a few years ago\n>\n> http://en.wikipedia.org/wiki/I-RAM\n\nsimilar concept, but there are some significant differences\n\nthe iRam was limited to 4G, used DDR ram, and used a PCI slot for power \n(which can be in \nshort supply nowdays)\n\nthis new drive can go to 64G, uses DDR2 ram (cheaper than DDR nowdays), \ngets powered like a normal SATA drive, can use two SATA channels (to be \nable to get past the throughput limits of a single SATA interface), and \nhas a CF card slot to backup the data to if the system powers down.\n\nplus the performance appears to be significantly better (even without \nusing the second SATA interface)\n\nDavid Lang\n\n", "msg_date": "Fri, 23 Jan 2009 04:39:07 -0800 (PST)", "msg_from": "[email protected]", "msg_from_op": false, "msg_subject": "Re: SSD performance" }, { "msg_contents": "On 1/23/09, [email protected] <[email protected]> wrote:\n> the review also includes the Intel X-25E and X-25M drives (along with a\n> variety of SCSI and SATA drives)\n>\n\nThe x25-e is a game changer for database storage. It's still a little\npricey for what it does but who can argue with these numbers?\nhttp://techreport.com/articles.x/15931/9\n\nmerlin\n", "msg_date": "Fri, 23 Jan 2009 09:59:46 -0500", "msg_from": "Merlin Moncure <[email protected]>", "msg_from_op": false, "msg_subject": "Re: SSD performance" }, { "msg_contents": "On Fri, 23 Jan 2009, Merlin Moncure wrote:\n\n> On 1/23/09, [email protected] <[email protected]> wrote:\n>> the review also includes the Intel X-25E and X-25M drives (along with a\n>> variety of SCSI and SATA drives)\n>>\n>\n> The x25-e is a game changer for database storage. It's still a little\n> pricey for what it does but who can argue with these numbers?\n> http://techreport.com/articles.x/15931/9\n\ntake a look at this ram based drive, specificly look at the numbers here\nhttp://techreport.com/articles.x/16255/9\n\nthey are about as much above the X25-e as the X25-e is above normal \ndrives.\n\nDavid Lang\n\n", "msg_date": "Fri, 23 Jan 2009 10:19:01 -0800 (PST)", "msg_from": "[email protected]", "msg_from_op": false, "msg_subject": "Re: SSD performance" }, { "msg_contents": "On Fri, 23 Jan 2009, [email protected] wrote:\n\n> take a look at this ram based drive, specificly look at the numbers here\n> http://techreport.com/articles.x/16255/9\n> they are about as much above the X25-e as the X25-e is above normal drives.\n\nThey're so close to having a killer product with that one. All they need \nto do is make the backup to the CF card automatic once the battery backup \npower drops low (but not so low there's not enough power to do said \nbackup) and it would actually be a reasonable solution. The whole \nbattery-backed cache approach is risky enough when the battery is expected \nto last a day or two; with this product only giving 4 hours, it not hard \nto imagine situations where you'd lose everything on there.\n\n--\n* Greg Smith [email protected] http://www.gregsmith.com Baltimore, MD\n", "msg_date": "Sun, 25 Jan 2009 02:26:44 -0500 (EST)", "msg_from": "Greg Smith <[email protected]>", "msg_from_op": false, "msg_subject": "Re: SSD performance" }, { "msg_contents": "On Sun, 25 Jan 2009, Greg Smith wrote:\n\n> On Fri, 23 Jan 2009, [email protected] wrote:\n>\n>> take a look at this ram based drive, specificly look at the numbers here\n>> http://techreport.com/articles.x/16255/9\n>> they are about as much above the X25-e as the X25-e is above normal drives.\n>\n> They're so close to having a killer product with that one. All they need to \n> do is make the backup to the CF card automatic once the battery backup power \n> drops low (but not so low there's not enough power to do said backup) and it \n> would actually be a reasonable solution. The whole battery-backed cache \n> approach is risky enough when the battery is expected to last a day or two; \n> with this product only giving 4 hours, it not hard to imagine situations \n> where you'd lose everything on there.\n\nthey currently have it do a backup immediatly on power loss (which is a \nsafe choice as the contents won't be changing without power), but it then \npowers off (which is not good for startup time afterwords)\n\nDavid Lang\n", "msg_date": "Sun, 25 Jan 2009 00:36:28 -0800 (PST)", "msg_from": "[email protected]", "msg_from_op": false, "msg_subject": "Re: SSD performance" }, { "msg_contents": "[email protected] writes:\n\n> they currently have it do a backup immediatly on power loss (which is a safe\n> choice as the contents won't be changing without power), but it then powers off\n> (which is not good for startup time afterwords)\n\nSo if you have a situation where it's power cycling rapidly each iteration\ndrains the battery of the time it takes to save the state but only charges it\nfor the time the power is on. I wonder how many iterations that gives you.\n\n-- \n Gregory Stark\n EnterpriseDB http://www.enterprisedb.com\n Ask me about EnterpriseDB's Slony Replication support!\n", "msg_date": "Sun, 25 Jan 2009 11:55:30 +0000", "msg_from": "Gregory Stark <[email protected]>", "msg_from_op": false, "msg_subject": "Re: SSD performance" }, { "msg_contents": "On Sun, 25 Jan 2009, Gregory Stark wrote:\n\n> [email protected] writes:\n>\n>> they currently have it do a backup immediatly on power loss (which is a safe\n>> choice as the contents won't be changing without power), but it then powers off\n>> (which is not good for startup time afterwords)\n>\n> So if you have a situation where it's power cycling rapidly each iteration\n> drains the battery of the time it takes to save the state but only charges it\n> for the time the power is on. I wonder how many iterations that gives you.\n\ngood question.\n\nassuming that it's smart enough to not start a save if it didn't finish \ndoing a restore, and going from the timings in the article (~20 min save, \n~15 min load and 4 hour battery life)\nyou would get ~12 cycles from the initial battery\nplus whatever you could get from the battery charging (~3 hours during the \ninitial battery time)\n\nif the battery could be fully charged in 3 hours it could keep doing this \nindefinantly.\n\nif it takes 6 hours it would get a half charge, so 12+6+3+1=22 cycles\n\nbut even the initial 12 cycles is long enough that you should probably be \ntaking action by then.\n\nin most situations you are going to have a UPS on your system anyway, and \nit will have the same type of problem (but usually with _much_ less than 4 \nhours worth of operation to start with)\n\n\nso while you could loose data from intermittent power, I think you would \nbe far more likely to loose data due to a defective battery or the CF card \nnot being fully seated or something like that.\n\nDavid Lang\n", "msg_date": "Sun, 25 Jan 2009 05:16:40 -0800 (PST)", "msg_from": "[email protected]", "msg_from_op": false, "msg_subject": "Re: SSD performance" }, { "msg_contents": "On 1/23/09 3:35 AM, \"[email protected]\" <[email protected]> wrote:\nhttp://techreport.com/articles.x/15931/1\n\none thing that both of these reviews show is that if you are doing a\nsignificant amount of writing the X-25M is no better than a normal hard\ndrive (and much of the time in the middle to bottom of the pack compared\nto normal hard drives)\n\nDavid Lang\n\n\nThe X-25-M may not have write STR rates that high compared to normal disks, but for write latency, it is FAR superior to a normal disk, and for random writes will demolish most small and medium sized raid arrays by itself. It will push 30MB to 60MB /sec of random 8k writes, or ~2000 to 12000 8k fsyncs/sec. The -E is definitely a lot better, but the -M can get you pretty far.\n\nFor any postgres installation where you don't expect to write to a WAL log at more than 30MB/sec (the vast majority), it is good enough to use (mirrored) as a WAL device, without a battery back up, with very good performance. A normal disk cannot do that.\n\nAlso, it can be used very well for the OS swap, and some other temp space to prevent swap storms from severely impacting the system.\n\nFor anyone worried about the X 25-M's ability to withstand lots of write cycles ... Calculate how long it would take you to write 800TB to the drive at a typical rate. For most use cases that's going to be > 5 years. For the 160GB version, it will take 2x as much data and time to wear it down.\n\nSamsung, SanDisk, Toshiba, Micron, and several others are expected to have low random write latency, next gen SSD's this year. A few of these are claiming > 150MB/sec for the writes, even for MLC based drives.\n\nA RAM based device is intriguing, but an ordinary SSD will be enough to make most Postgres databases CPU bound, and with those there is no concern about data loss on power failure. The Intel X 25 series does not even use the RAM on it for write cache! (it uses some SRAM on the controller chip for that, and its fsync safe) The RAM is working memory for the controller chip to cache the LBA to Physical flash block mappings and other data needed for the wear leveling, contrary to what many reviews may claim.\n\n\n\nRe: [PERFORM] SSD performance\n\n\n\n\n\nOn 1/23/09 3:35 AM, \"[email protected]\" <[email protected]> wrote:\nhttp://techreport.com/articles.x/15931/1\n\none thing that both of these reviews show is that if you are doing a\nsignificant amount of writing the X-25M is no better than a normal hard\ndrive (and much of the time in the middle to bottom of the pack compared\nto normal hard drives)\n\nDavid Lang\n\n\nThe X-25-M may not have write STR rates that high compared to normal disks, but for write latency, it is FAR superior to a normal disk, and for random writes will demolish most small and medium sized raid arrays by itself.  It will push 30MB to 60MB /sec of random 8k writes, or ~2000 to 12000 8k fsyncs/sec.  The –E is definitely a lot better, but the –M can get you pretty far.\n\nFor any postgres installation where you don’t expect to write to a WAL log at more than 30MB/sec (the vast majority), it is good enough to use (mirrored) as a WAL device, without a battery back up, with very good performance.  A normal disk cannot do that.\n\nAlso, it can be used very well for the OS swap, and some other temp space to prevent swap storms from severely impacting the system.\n\nFor anyone worried about the X 25–M’s ability to withstand lots of write cycles ... Calculate how long it would take you to write 800TB to the drive at a typical rate.  For most use cases that’s going to be > 5 years.  For the 160GB version, it will take 2x as much data and time to wear it down.   \n\nSamsung, SanDisk, Toshiba, Micron, and several others are expected to have low random write latency, next gen SSD’s this year.  A few of these are claiming > 150MB/sec for the writes, even for MLC based drives.\n\nA RAM based device is intriguing, but an ordinary SSD will be enough to make most Postgres databases CPU bound, and with those there is no concern about data loss on power failure.  The Intel X 25 series does not even use the RAM on it for write cache! (it uses some SRAM on the controller chip for that, and its fsync safe) The RAM is working memory for the controller chip to cache the LBA to Physical flash block mappings and other data needed for the wear leveling, contrary to what many reviews may claim.", "msg_date": "Fri, 30 Jan 2009 19:28:42 -0800", "msg_from": "Scott Carey <[email protected]>", "msg_from_op": false, "msg_subject": "Re: SSD performance" }, { "msg_contents": "I somehow managed to convince the powers that be to let me get a \ncouple X25-E's.\nI tossed them in my macpro (8 cores), fired up Ubuntu 8.10 and did \nsome testing.\n\nRaw numbers are very impressive. I was able to get 3700 random seek \n+read's a second. In a R1 config it stayed at 3700, but if I added \nanother process it went up to 7000, and eventually settled into the \n4000s. If I added in some random writing with fsyncs to it, it \nsettled at 2200 (to be specific, I had 3 instances going - 2 read-only \nand 1 read-20% write to get that). These numbers were obtained \nrunning a slightly modified version of pgiosim (which is on \npgfoundtry) - it randomly seeks to a \"block\" in a file and reads 8kB \nof data, optionally writing the block back out.\n\nNow, moving into reality I compiled 8.3.latest and gave it a whirl. \nRunning against a software R1 of the 2 x25-e's I got the following \npgbench results:\n(note config tweaks: work_mem=>4mb, shared_buffers=>1gb, should \nprobably have tweaked checkpoint_segs, as it was emitting lots of \nnotices about that, but I didn't).\n\n(multiple runs, avg tps)\n\nScalefactor 50, 10 clients: 1700tps\n\nAt that point I realized write caching on the drives was ON. So I \nturned it off at this point:\n\nScalefactor 50, 10 clients: 900tps\n\nAt scalefactor 50 the dataset fits well within memory, so I scaled it \nup.\n\nScalefactor 1500: 10 clients: 420tps\n\n\nWhile some of us have arrays that can smash those numbers, that is \ncrazy impressive for a plain old mirror pair. I also did not do much \ntweaking of PG itself.\n\nWhile I'm in the testing mood, are there some other tests folks would \nlike me to try out?\n\n--\nJeff Trout <[email protected]>\nhttp://www.stuarthamm.net/\nhttp://www.dellsmartexitin.com/\n\n\n\n", "msg_date": "Tue, 03 Feb 2009 12:54:53 -0500", "msg_from": "Jeff <[email protected]>", "msg_from_op": false, "msg_subject": "Re: SSD performance" }, { "msg_contents": "On Tue, Feb 3, 2009 at 9:54 AM, Jeff <[email protected]> wrote:\n> Scalefactor 50, 10 clients: 900tps\n>\n> At scalefactor 50 the dataset fits well within memory, so I scaled it up.\n>\n> Scalefactor 1500: 10 clients: 420tps\n>\n> While some of us have arrays that can smash those numbers, that is crazy\n> impressive for a plain old mirror pair. I also did not do much tweaking of\n> PG itself.\n>\n> While I'm in the testing mood, are there some other tests folks would like\n> me to try out?\n\nHow do the same benchmarks fair on regular rotating discs on the same\nsystem? Ideally we'd have numbers for 7.2k and 10k disks to give us\nsome sort of idea of exactly how much faster we're talking here. Hey,\nsince you asked, right? ;-)\n\n-Dave\n", "msg_date": "Tue, 3 Feb 2009 10:26:09 -0800", "msg_from": "David Rees <[email protected]>", "msg_from_op": false, "msg_subject": "Re: SSD performance" }, { "msg_contents": "I don't think write caching on the disks is a risk to data integrity if you are configured correctly.\nFurthermore, these drives don't use the RAM for write cache, they only use a bit of SRAM on the controller chip for that (and respect fsync), so write caching should be fine.\n\nConfirm that NCQ is on (a quick check in dmesg), I have seen degraded performance when the wrong SATA driver is in use on some linux configs, but your results indicate its probably fine.\n\nHow much RAM is in that machine?\n\nSome suggested tests if you are looking for more things to try :D\n-- What affect does the following tuning have:\n\nTurn the I/O scheduler to 'noop' ( echo noop > /sys/block/<devices>/queue/scheduler) I'm assuming the current was cfq, deadline may also be interesting, anticipatory would have comically horrible results.\nTune upward the readahead value ( blockdev -setra <value> /dev/<device>) -- try 16384 (8MB) This probably won't help that much for a pgbench tune, its more for large sequential scans in other workload types, and more important for rotating media.\nGenerally speaking with SSD's, tuning the above values does less than with hard drives.\n\nFile system effects would also be interesting. If you're in need of more tests to try, compare XFS to EXT3 (I am assuming the below is ext3).\n\nOn 2/3/09 9:54 AM, \"Jeff\" <[email protected]> wrote:\n\nI somehow managed to convince the powers that be to let me get a\ncouple X25-E's.\nI tossed them in my macpro (8 cores), fired up Ubuntu 8.10 and did\nsome testing.\n\nRaw numbers are very impressive. I was able to get 3700 random seek\n+read's a second. In a R1 config it stayed at 3700, but if I added\nanother process it went up to 7000, and eventually settled into the\n4000s. If I added in some random writing with fsyncs to it, it\nsettled at 2200 (to be specific, I had 3 instances going - 2 read-only\nand 1 read-20% write to get that). These numbers were obtained\nrunning a slightly modified version of pgiosim (which is on\npgfoundtry) - it randomly seeks to a \"block\" in a file and reads 8kB\nof data, optionally writing the block back out.\n\nNow, moving into reality I compiled 8.3.latest and gave it a whirl.\nRunning against a software R1 of the 2 x25-e's I got the following\npgbench results:\n(note config tweaks: work_mem=>4mb, shared_buffers=>1gb, should\nprobably have tweaked checkpoint_segs, as it was emitting lots of\nnotices about that, but I didn't).\n\n(multiple runs, avg tps)\n\nScalefactor 50, 10 clients: 1700tps\n\nAt that point I realized write caching on the drives was ON. So I\nturned it off at this point:\n\nScalefactor 50, 10 clients: 900tps\n\nAt scalefactor 50 the dataset fits well within memory, so I scaled it\nup.\n\nScalefactor 1500: 10 clients: 420tps\n\n\nWhile some of us have arrays that can smash those numbers, that is\ncrazy impressive for a plain old mirror pair. I also did not do much\ntweaking of PG itself.\n\nWhile I'm in the testing mood, are there some other tests folks would\nlike me to try out?\n\n--\nJeff Trout <[email protected]>\nhttp://www.stuarthamm.net/\nhttp://www.dellsmartexitin.com/\n\n\n\n\n\n\n\nRe: [PERFORM] SSD performance\n\n\nI don’t think write caching on the disks is a risk to data integrity if you are configured correctly.\nFurthermore, these drives don’t use the RAM for write cache, they only use a bit of SRAM on the controller chip for that (and respect fsync), so write caching should be fine.\n\nConfirm that NCQ is on (a quick check in dmesg),  I have seen degraded performance when the wrong SATA driver is in use on some linux configs, but your results indicate its probably fine.\n\nHow much RAM is in that machine?  \n\nSome suggested tests if you are looking for more things to try :D\n-- What affect does the following tuning have:\n\nTurn the I/O scheduler to ‘noop’  ( echo noop > /sys/block/<devices>/queue/scheduler)  I’m assuming the current was cfq, deadline may also be interesting, anticipatory would have comically horrible results.\nTune upward the readahead value ( blockdev —setra <value> /dev/<device>)  -- try 16384 (8MB)  This probably won’t help that much for a pgbench tune, its more for large sequential scans in other workload types, and more important for rotating media. \nGenerally speaking with SSD’s, tuning the above values does less than with hard drives.\n\nFile system effects would also be interesting.  If you’re in need of more tests to try, compare XFS to EXT3 (I am assuming the below is ext3).  \n\nOn 2/3/09 9:54 AM, \"Jeff\" <[email protected]> wrote:\n\nI somehow managed to convince the powers that be to let me get a\ncouple X25-E's.\nI tossed them in my macpro (8 cores), fired up Ubuntu 8.10 and did\nsome testing.\n\nRaw numbers are very impressive. I was able to get 3700 random seek\n+read's a second. In a R1 config it stayed at 3700, but if I added\nanother process it went up to 7000, and eventually settled into the\n4000s.    If I added in some random writing with fsyncs to it, it\nsettled at 2200 (to be specific, I had 3 instances going - 2 read-only\nand 1 read-20% write to get that).  These numbers were obtained\nrunning a slightly modified version of pgiosim (which is on\npgfoundtry) - it randomly seeks to a \"block\" in a file and reads 8kB\nof data, optionally writing the block back out.\n\nNow, moving into reality I compiled 8.3.latest and gave it a whirl.\nRunning against a software R1 of the 2 x25-e's  I got the following\npgbench results:\n(note config tweaks: work_mem=>4mb, shared_buffers=>1gb, should\nprobably have tweaked checkpoint_segs, as it was emitting lots of\nnotices about that, but I didn't).\n\n(multiple runs, avg tps)\n\nScalefactor 50, 10 clients: 1700tps\n\nAt that point I realized write caching on the drives was ON. So I\nturned it off at this point:\n\nScalefactor 50, 10 clients: 900tps\n\nAt scalefactor 50 the dataset fits well within memory, so I scaled it\nup.\n\nScalefactor 1500: 10 clients: 420tps\n\n\nWhile some of us have arrays that can smash those numbers, that is\ncrazy impressive for a plain old mirror pair.   I also did not do much\ntweaking of PG itself.\n\nWhile I'm in the testing mood, are there some other tests folks would\nlike me to try out?\n\n--\nJeff Trout <[email protected]>\nhttp://www.stuarthamm.net/\nhttp://www.dellsmartexitin.com/", "msg_date": "Tue, 3 Feb 2009 10:43:36 -0800", "msg_from": "Scott Carey <[email protected]>", "msg_from_op": false, "msg_subject": "Re: SSD performance" }, { "msg_contents": "On Tue, Feb 3, 2009 at 10:54 AM, Jeff <[email protected]> wrote:\n\n> Now, moving into reality I compiled 8.3.latest and gave it a whirl. Running\n> against a software R1 of the 2 x25-e's I got the following pgbench results:\n> (note config tweaks: work_mem=>4mb, shared_buffers=>1gb, should probably\n> have tweaked checkpoint_segs, as it was emitting lots of notices about that,\n> but I didn't).\n\nYou may find you get better numbers with a lower shared_buffers value,\nand definitely try cranking up number of checkpoint segments to\nsomething in the 50 to 100 range.\n\n> (multiple runs, avg tps)\n>\n> Scalefactor 50, 10 clients: 1700tps\n>\n> At that point I realized write caching on the drives was ON. So I turned it\n> off at this point:\n>\n> Scalefactor 50, 10 clients: 900tps\n>\n> At scalefactor 50 the dataset fits well within memory, so I scaled it up.\n>\n> Scalefactor 1500: 10 clients: 420tps\n>\n>\n> While some of us have arrays that can smash those numbers, that is crazy\n> impressive for a plain old mirror pair. I also did not do much tweaking of\n> PG itself.\n\nOn a scale factor or 100 my 12 disk 15k.5 seagate sas drives on an\nareca get somewhere in the 2800 to 3200 tps range on sustained tests\nfor anywhere from 8 to 32 or so concurrent clients. I get similar\nperformance falloffs as I increase the testdb scaling factor.\n\nBut for a pair of disks in a mirror with no caching controller, that's\nimpressive. I've already told my boss our next servers will likely\nhave intel's SSDs in them.\n\n> While I'm in the testing mood, are there some other tests folks would like\n> me to try out?\n\nhow about varying the number of clients with a static scalefactor?\n\n-- \nWhen fascism comes to America, it will be the intolerant selling\nfascism as diversity.\n", "msg_date": "Tue, 3 Feb 2009 12:50:40 -0700", "msg_from": "Scott Marlowe <[email protected]>", "msg_from_op": false, "msg_subject": "Re: SSD performance" }, { "msg_contents": "\nOn Feb 3, 2009, at 1:43 PM, Scott Carey wrote:\n\n> I don�t think write caching on the disks is a risk to data integrity \n> if you are configured correctly.\n> Furthermore, these drives don�t use the RAM for write cache, they \n> only use a bit of SRAM on the controller chip for that (and respect \n> fsync), so write caching should be fine.\n>\n> Confirm that NCQ is on (a quick check in dmesg), I have seen \n> degraded performance when the wrong SATA driver is in use on some \n> linux configs, but your results indicate its probably fine.\n>\n\nAs it turns out, there's a bug/problem/something with the controller \nin the macpro vs the ubuntu drives where the controller goes into \n\"works, but not as super as it could\" mode, so NCQ is effectively \ndisabled, haven't seen a workaround yet. Not sure if this problem \nexists on other distros (used ubuntu because I just wanted to try a \nlive). I read some stuff from Intel on the NCQ and in a lot of cases \nit won't make that much difference because the thing can respond so \nfast.\n\n\n> How much RAM is in that machine?\n>\n\n8GB\n\n> Some suggested tests if you are looking for more things to try :D\n> -- What affect does the following tuning have:\n>\n> Turn the I/O scheduler to �noop� ( echo noop > /sys/block/<devices>/ \n> queue/scheduler) I�m assuming the current was cfq, deadline may \n> also be interesting, anticipatory would have comically horrible \n> results.\n\nI only tested noop, if you think about it, it is the most logical one \nas an SSD really does not need an elevator at all. There is no \nrotational latency or moving of the arm that the elevator was designed \nto cope with.\n\nbut, here are the results:\nscale 50, 100 clients, 10x txns: 1600tps (a noticable improvement!)\nscale 1500, 100 clients, 10xtxns: 434tps\n\nI'm going to try to get some results for raptors, but there was \nanother post earlier today that got higher, but not ridiculously \nhigher tps but it required 14 15k disks instead of 2\n\n>\n> Tune upward the readahead value ( blockdev �setra <value> /dev/ \n> <device>) -- try 16384 (8MB) This probably won�t help that much \n> for a pgbench tune, its more for large sequential scans in other \n> workload types, and more important for rotating media.\n> Generally speaking with SSD�s, tuning the above values does less \n> than with hard drives.\n>\n\nYeah, I don't think RA will help pgbench, and for my workloads it is \nrather useless as they tend to be tons of random IO.\n\nI've got some Raptors here too I'll post numbers wed or thu.\n\n--\nJeff Trout <[email protected]>\nhttp://www.stuarthamm.net/\nhttp://www.dellsmartexitin.com/\n\n\n\n", "msg_date": "Wed, 04 Feb 2009 08:06:12 -0500", "msg_from": "Jeff <[email protected]>", "msg_from_op": false, "msg_subject": "Re: SSD performance" }, { "msg_contents": "On Wed, 4 Feb 2009, Jeff wrote:\n\n> On Feb 3, 2009, at 1:43 PM, Scott Carey wrote:\n>\n>> I don?t think write caching on the disks is a risk to data integrity if you \n>> are configured correctly.\n>> Furthermore, these drives don?t use the RAM for write cache, they only use \n>> a bit of SRAM on the controller chip for that (and respect fsync), so write \n>> caching should be fine.\n>> \n>> Confirm that NCQ is on (a quick check in dmesg), I have seen degraded \n>> performance when the wrong SATA driver is in use on some linux configs, but \n>> your results indicate its probably fine.\n>> \n>\n> As it turns out, there's a bug/problem/something with the controller in the \n> macpro vs the ubuntu drives where the controller goes into \"works, but not as \n> super as it could\" mode, so NCQ is effectively disabled, haven't seen a \n> workaround yet. Not sure if this problem exists on other distros (used ubuntu \n> because I just wanted to try a live). I read some stuff from Intel on the \n> NCQ and in a lot of cases it won't make that much difference because the \n> thing can respond so fast.\n\nactually, what I've heard is that NCQ is a win on the intel drives becouse \nit avoids having the drive wait while the OS prepares and sends the next \nwrite.\n\n>> Some suggested tests if you are looking for more things to try :D\n>> -- What affect does the following tuning have:\n>> \n>> Turn the I/O scheduler to ?noop? ( echo noop > \n>> /sys/block/<devices>/queue/scheduler) I?m assuming the current was cfq, \n>> deadline may also be interesting, anticipatory would have comically \n>> horrible results.\n>\n> I only tested noop, if you think about it, it is the most logical one as an \n> SSD really does not need an elevator at all. There is no rotational latency \n> or moving of the arm that the elevator was designed to cope with.\n\nyou would think so, but that isn't nessasarily the case. here's a post \nwhere NOOP lost to CFQ by ~24% when there were multiple proceses competing \nfor the drive (not on intel drives)\n\nhttp://www.alphatek.info/2009/02/02/io-scheduler-and-ssd-part-2/\n\nDavid Lang\n", "msg_date": "Wed, 4 Feb 2009 06:45:01 -0800 (PST)", "msg_from": "[email protected]", "msg_from_op": false, "msg_subject": "Re: SSD performance" }, { "msg_contents": "On Fri, 30 Jan 2009, Scott Carey wrote:\n> For anyone worried about the X 25–M’s ability to withstand lots of write \n> cycles ... Calculate how long it would take you to write 800TB to the \n> drive at a typical rate.  For most use cases that’s going to be > 5 \n> years.  For the 160GB version, it will take 2x as much data and time to \n> wear it down.   \n\nThis article just came out: \nhttp://www.theregister.co.uk/2009/02/20/intel_x25emmental/\n\nand\n\nhttp://www.pcper.com/article.php?aid=669\n\nIt seems that the performance of the X25-M degrades over time, as the \nwrite levelling algorithm fragments the device into little bits. \nEspecially under database-like access patterns.\n\nMatthew\n\n-- \n I quite understand I'm doing algebra on the blackboard and the usual response\n is to throw objects... If you're going to freak out... wait until party time\n and invite me along -- Computer Science Lecturer", "msg_date": "Fri, 20 Feb 2009 15:26:43 +0000 (GMT)", "msg_from": "Matthew Wakeling <[email protected]>", "msg_from_op": false, "msg_subject": "Re: SSD performance" } ]
[ { "msg_contents": "Why not simply plug your server into a UPS and get 10-20x the performance using the same approach (with OS IO cache)?\r\n\r\nIn fact, with the server it's more robust, as you don't have to transit several intervening physical devices to get to the RAM.\r\n\r\nIf you want a file interface, declare a RAMDISK.\r\n\r\nCheaper/faster/improved reliability.\r\n\r\n- Luke\r\n\r\n----- Original Message -----\r\nFrom: [email protected] <[email protected]>\r\nTo: Glyn Astill <[email protected]>\r\nCc: [email protected] <[email protected]>\r\nSent: Fri Jan 23 04:39:07 2009\r\nSubject: Re: [PERFORM] SSD performance\r\n\r\nOn Fri, 23 Jan 2009, Glyn Astill wrote:\r\n\r\n>> I spotted a new interesting SSD review. it's a $379\r\n>> 5.25\" drive bay device that holds up to 8 DDR2 DIMMS\r\n>> (up to 8G per DIMM) and appears to the system as a SATA\r\n>> drive (or a pair of SATA drives that you can RAID-0 to get\r\n>> past the 300MB/s SATA bottleneck)\r\n>>\r\n>\r\n> Sounds very similar to the Gigabyte iRam drives of a few years ago\r\n>\r\n> http://en.wikipedia.org/wiki/I-RAM\r\n\r\nsimilar concept, but there are some significant differences\r\n\r\nthe iRam was limited to 4G, used DDR ram, and used a PCI slot for power\r\n(which can be in\r\nshort supply nowdays)\r\n\r\nthis new drive can go to 64G, uses DDR2 ram (cheaper than DDR nowdays),\r\ngets powered like a normal SATA drive, can use two SATA channels (to be\r\nable to get past the throughput limits of a single SATA interface), and\r\nhas a CF card slot to backup the data to if the system powers down.\r\n\r\nplus the performance appears to be significantly better (even without\r\nusing the second SATA interface)\r\n\r\nDavid Lang\r\n\r\n\r\n--\r\nSent via pgsql-performance mailing list ([email protected])\r\nTo make changes to your subscription:\r\nhttp://www.postgresql.org/mailpref/pgsql-performance\r\n", "msg_date": "Fri, 23 Jan 2009 03:41:51 -0800", "msg_from": "Luke Lonergan <[email protected]>", "msg_from_op": true, "msg_subject": "Re: SSD performance" }, { "msg_contents": "On Fri, 23 Jan 2009, Luke Lonergan wrote:\n> Why not simply plug your server into a UPS and get 10-20x the \n> performance using the same approach (with OS IO cache)?\n>\n> In fact, with the server it's more robust, as you don't have to transit \n> several intervening physical devices to get to the RAM.\n>\n> If you want a file interface, declare a RAMDISK.\n>\n> Cheaper/faster/improved reliability.\n\nI'm sure we have gone over that one before. With that method, your data is \nat the mercy of the *entire system*. Any fault in any part of the computer \n(hardware or software) will result in the loss of all your data. In \ncontrast, a RAM-based SSD is isolated from such failures, especially if it \nbacks up to another device on power fail. You can completely trash the \ncomputer, remove the SSD and put it into another machine, and boot it up \nas normal.\n\nComputers break. Nothing is going to stop that from happening. Except VMS \nmaybe.\n\nNot arguing that your method is faster though.\n\nMatthew\n\n-- \n \"Finger to spiritual emptiness underlying everything.\"\n -- How a foreign C manual referred to a \"pointer to void.\"\n", "msg_date": "Fri, 23 Jan 2009 11:53:05 +0000 (GMT)", "msg_from": "Matthew Wakeling <[email protected]>", "msg_from_op": false, "msg_subject": "Re: SSD performance" }, { "msg_contents": "On Fri, 23 Jan 2009, Luke Lonergan wrote:\n\n> Why not simply plug your server into a UPS and get 10-20x the \n> performance using the same approach (with OS IO cache)?\n>\n> In fact, with the server it's more robust, as you don't have to transit \n> several intervening physical devices to get to the RAM.\n>\n> If you want a file interface, declare a RAMDISK.\n>\n> Cheaper/faster/improved reliability.\n\nyou can also disable fsync to not wait for your disks if you trust your \nsystem to never go down. personally I don't trust any system to not go \ndown.\n\nif you have a system crash or reboot your RAMDISK will loose it's content, \nthis device won't.\n\nalso you are limited to how many DIMMS you can put on your motherboard \n(for the dual-socket systems I am buying nowdays, I'm limited to 32G of \nram) going to a different motherboard that can support additional ram can \nbe quite expensive.\n\nthis isn't for everyone, but for people who need the performance, data \nreliability, this looks like a very interesting option.\n\nDavid Lang\n\n> - Luke\n>\n> ----- Original Message -----\n> From: [email protected] <[email protected]>\n> To: Glyn Astill <[email protected]>\n> Cc: [email protected] <[email protected]>\n> Sent: Fri Jan 23 04:39:07 2009\n> Subject: Re: [PERFORM] SSD performance\n>\n> On Fri, 23 Jan 2009, Glyn Astill wrote:\n>\n>>> I spotted a new interesting SSD review. it's a $379\n>>> 5.25\" drive bay device that holds up to 8 DDR2 DIMMS\n>>> (up to 8G per DIMM) and appears to the system as a SATA\n>>> drive (or a pair of SATA drives that you can RAID-0 to get\n>>> past the 300MB/s SATA bottleneck)\n>>>\n>>\n>> Sounds very similar to the Gigabyte iRam drives of a few years ago\n>>\n>> http://en.wikipedia.org/wiki/I-RAM\n>\n> similar concept, but there are some significant differences\n>\n> the iRam was limited to 4G, used DDR ram, and used a PCI slot for power\n> (which can be in\n> short supply nowdays)\n>\n> this new drive can go to 64G, uses DDR2 ram (cheaper than DDR nowdays),\n> gets powered like a normal SATA drive, can use two SATA channels (to be\n> able to get past the throughput limits of a single SATA interface), and\n> has a CF card slot to backup the data to if the system powers down.\n>\n> plus the performance appears to be significantly better (even without\n> using the second SATA interface)\n>\n> David Lang\n>\n>\n> --\n> Sent via pgsql-performance mailing list ([email protected])\n> To make changes to your subscription:\n> http://www.postgresql.org/mailpref/pgsql-performance\n>\n", "msg_date": "Fri, 23 Jan 2009 04:52:27 -0800 (PST)", "msg_from": "[email protected]", "msg_from_op": false, "msg_subject": "Re: SSD performance" }, { "msg_contents": "Luke Lonergan wrote:\n> Why not simply plug your server into a UPS and get 10-20x the performance using the same approach (with OS IO cache)?\n\nA big reason is that your machine may already have as much RAM as is\ncurrently economical to install. Hardware with LOTS of RAM slots can\ncost quite a bit.\n\nAnother reason is that these devices won't lose data because of an\nunexpected OS reboot. If they're fitted with a battery backup and CF\nmedia for emergency write-out, they won't lose data if your UPS runs out\nof juice either.\n\nI'd be much more confident with something like those devices than I\nwould with an OS ramdisk plus startup/shutdown scripts to initialize it\nfrom a file and write it out to a file. Wouldn't it be a pain if the UPS\ndidn't give the OS enough warning to write the RAM disk out before\nlosing power...\n\nIn any case, you're very rarely better off dedicating host memory to a\nramdisk rather than using the normal file system and letting the host\ncache it. A ramdisk really only seems to help when you're really using\nit to bypass safeties like the effects of fsync() and ordered\njournaling. There are other ways to avoid those if you really don't care\nabout your data.\n\nThese devices would be interesting for a few uses, IMO. One is temp\ntable space and sort space in Pg. Another is scratch space for apps\n(like Photoshop) that do their own VM management. There's also potential\nfor use as 1st priority OS swap space, though at least on Linux I think\nthe CPU overhead involved in swapping is so awful you wouldn't benefit\nfrom it much.\n\nI've been hoping this sort of thing would turn up again in a new\nincarnation with battery backup and CF/SD for BBU-flat safety.\n\n--\nCraig Ringer\n", "msg_date": "Fri, 23 Jan 2009 22:50:09 +0900", "msg_from": "Craig Ringer <[email protected]>", "msg_from_op": false, "msg_subject": "Re: SSD performance" }, { "msg_contents": "* Craig Ringer:\n\n> I'd be much more confident with something like those devices than I\n> would with an OS ramdisk plus startup/shutdown scripts to initialize it\n> from a file and write it out to a file. Wouldn't it be a pain if the UPS\n> didn't give the OS enough warning to write the RAM disk out before\n> losing power...\n\nThe cache warm-up time can also be quite annoying. Of course, with\nflash-backed DRAM, this is a concern as long as you use the cheaper,\nslower variants for the backing storage.\n\n-- \nFlorian Weimer <[email protected]>\nBFK edv-consulting GmbH http://www.bfk.de/\nKriegsstraße 100 tel: +49-721-96201-1\nD-76133 Karlsruhe fax: +49-721-96201-99\n", "msg_date": "Fri, 23 Jan 2009 15:30:26 +0100", "msg_from": "Florian Weimer <[email protected]>", "msg_from_op": false, "msg_subject": "Re: SSD performance" }, { "msg_contents": "[email protected] wrote:\n> On Fri, 23 Jan 2009, Luke Lonergan wrote:\n> \n>> Why not simply plug your server into a UPS and get 10-20x the\n>> performance using the same approach (with OS IO cache)?\n>>\n>> In fact, with the server it's more robust, as you don't have to\n>> transit several intervening physical devices to get to the RAM.\n>>\n>> If you want a file interface, declare a RAMDISK.\n>>\n>> Cheaper/faster/improved reliability.\n> \n> you can also disable fsync to not wait for your disks if you trust your\n> system to never go down. personally I don't trust any system to not go\n> down.\n> \n> if you have a system crash or reboot your RAMDISK will loose it's\n> content, this device won't.\n> \n> also you are limited to how many DIMMS you can put on your motherboard\n> (for the dual-socket systems I am buying nowdays, I'm limited to 32G of\n> ram) going to a different motherboard that can support additional ram\n> can be quite expensive.\n> \n> this isn't for everyone, but for people who need the performance, data\n> reliability, this looks like a very interesting option.\n> \n> David Lang\n> \n>> - Luke\n>>\n>> ----- Original Message -----\n>> From: [email protected]\n>> <[email protected]>\n>> To: Glyn Astill <[email protected]>\n>> Cc: [email protected] <[email protected]>\n>> Sent: Fri Jan 23 04:39:07 2009\n>> Subject: Re: [PERFORM] SSD performance\n>>\n>> On Fri, 23 Jan 2009, Glyn Astill wrote:\n>>\n>>>> I spotted a new interesting SSD review. it's a $379\n>>>> 5.25\" drive bay device that holds up to 8 DDR2 DIMMS\n>>>> (up to 8G per DIMM) and appears to the system as a SATA\n>>>> drive (or a pair of SATA drives that you can RAID-0 to get\n>>>> past the 300MB/s SATA bottleneck)\n>>>>\n>>>\n>>> Sounds very similar to the Gigabyte iRam drives of a few years ago\n>>>\n>>> http://en.wikipedia.org/wiki/I-RAM\n>>\n>> similar concept, but there are some significant differences\n>>\n>> the iRam was limited to 4G, used DDR ram, and used a PCI slot for power\n>> (which can be in\n>> short supply nowdays)\n>>\n>> this new drive can go to 64G, uses DDR2 ram (cheaper than DDR nowdays),\n>> gets powered like a normal SATA drive, can use two SATA channels (to be\n>> able to get past the throughput limits of a single SATA interface), and\n>> has a CF card slot to backup the data to if the system powers down.\n>>\n>> plus the performance appears to be significantly better (even without\n>> using the second SATA interface)\n>>\n>> David Lang\n>>\n>>\n>> -- \n>> Sent via pgsql-performance mailing list\n>> ([email protected])\n>> To make changes to your subscription:\n>> http://www.postgresql.org/mailpref/pgsql-performance\n>>\n> \n\nCan I call a time out here? :) There are \"always\" going to be memory\nhierarchies -- registers on the processors, multiple levels of caches,\nRAM used for programs / data / I-O caches, and non-volatile rotating\nmagnetic storage. And there are \"always\" going to be new hardware\ntechnologies cropping up at various levels in the hierarchy.\n\nThere are always going to be cost / reliability / performance\ntrade-offs, leading to \"interesting\" though perhaps not really\nbusiness-relevant \"optimizations\". The equations are there for anyone to\nuse should they want to optimize for a given workload at a given point\nin time with given business / service level constraints. See\n\nhttp://www.amazon.com/Storage-Network-Performance-Analysis-Huseyin/dp/076451685X\n\nfor all the details.\n\nI question, however, whether there's much point in seeking an optimum.\nAs was noted long ago by Nobel laureate Herbert Simon, in actual fact\nmanagers / businesses rarely optimize. Instead, they satisfice. They do\nwhat is \"good enough\", not what is best. And my own personal opinion in\nthe current context -- PostgreSQL running on an open-source operating\nsystem -- is that\n\n* large-capacity inexpensive rotating disks,\n* a hardware RAID controller containing a battery-backed cache,\n* as much RAM as one can afford and the chassis will hold, and\n* enough cores to keep the workload from becoming processor-bound\n\nare good enough. And given that, a moderate amount of software tweaking\nand balancing will get you close to a local optimum.\n\n-- \nM. Edward (Ed) Borasky\n\nI've never met a happy clam. In fact, most of them were pretty steamed.\n", "msg_date": "Fri, 23 Jan 2009 09:22:11 -0800", "msg_from": "\"M. Edward (Ed) Borasky\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: SSD performance" }, { "msg_contents": "On Fri, 2009-01-23 at 09:22 -0800, M. Edward (Ed) Borasky wrote:\n\n> I question, however, whether there's much point in seeking an optimum.\n> As was noted long ago by Nobel laureate Herbert Simon, in actual fact\n> managers / businesses rarely optimize. Instead, they satisfice. They do\n> what is \"good enough\", not what is best. And my own personal opinion in\n> the current context -- PostgreSQL running on an open-source operating\n> system -- is that\n\nThis community is notorious for \"optimum\". MySQL is notorious for \"satisfy\".\n\nWhich one would you rather store your financial information in?\n\nI actually agree with you to a degree. A loud faction of this community\nspends a little too much time mentally masturbating but without that we\nwouldn't have a lot of the very interesting features we have now.\n\n\nThere is no correct in left.\nThere is no correct in right.\nCorrectness is the result of friction caused by the mingling of the two.\n\nSincerely,\n\nJoshua D. Drake\n\n\n-- \nPostgreSQL - XMPP: [email protected]\n Consulting, Development, Support, Training\n 503-667-4564 - http://www.commandprompt.com/\n The PostgreSQL Company, serving since 1997\n\n", "msg_date": "Fri, 23 Jan 2009 09:30:07 -0800", "msg_from": "\"Joshua D. Drake\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: SSD performance" }, { "msg_contents": "On Fri, 23 Jan 2009, M. Edward (Ed) Borasky wrote:\n> * large-capacity inexpensive rotating disks,\n> * a hardware RAID controller containing a battery-backed cache,\n> * as much RAM as one can afford and the chassis will hold, and\n> * enough cores to keep the workload from becoming processor-bound\n>\n> are good enough. And given that, a moderate amount of software tweaking\n> and balancing will get you close to a local optimum.\n\nThat's certainly the case for very large-scale (in terms of data quantity) \ndatabases. However, these solid state devices do have quite an advantage \nwhen what you want to scale is the performance, rather than the data \nquantity.\n\nThe thing is, it isn't just a matter of storage heirarchy. There's the \nvolatility matter there as well. What you have in these SSDs is a device \nwhich is non-volatile, like a disc, but fast, like RAM.\n\nMatthew\n\n-- \n Anyone who goes to a psychiatrist ought to have his head examined.\n", "msg_date": "Fri, 23 Jan 2009 17:39:28 +0000 (GMT)", "msg_from": "Matthew Wakeling <[email protected]>", "msg_from_op": false, "msg_subject": "Re: SSD performance" }, { "msg_contents": "Joshua D. Drake wrote:\n> This community is notorious for \"optimum\". MySQL is notorious for \"satisfy\".\n\nWithin *this* community, MySQL is just plain notorious. Let's face it --\nwe are *not* dolphin-safe.\n\n<ducking>\n\n> \n> Which one would you rather store your financial information in?\n\nThe one that had the best data integrity, taking into account the RDBMS\n*and* the hardware and other software.\n\n> I actually agree with you to a degree. A loud faction of this community\n> spends a little too much time mentally masturbating but without that we\n> wouldn't have a lot of the very interesting features we have now.\n\nYes -- you will never hear *me* say \"Premature optimization is the root\nof all evil.\" I don't know why Hoare or Dijkstra or Knuth or Wirth or\nwhoever coined that phrase, but it's been used too many times as an\nexcuse for not doing any performance engineering, forcing the deployed\n\"solution\" to throw hardware at performance issues.\n\n\n> \n> \n> There is no correct in left.\n> There is no correct in right.\n> Correctness is the result of friction caused by the mingling of the two.\n\n\"The only good I/O is a dead I/O\" -- Mark Friedman\n\n-- \nM. Edward (Ed) Borasky\n\nI've never met a happy clam. In fact, most of them were pretty steamed.\n", "msg_date": "Fri, 23 Jan 2009 19:24:36 -0800", "msg_from": "\"M. Edward (Ed) Borasky\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: SSD performance" }, { "msg_contents": "Craig Ringer wrote:\n> These devices would be interesting for a few uses, IMO. One is temp\n> table space and sort space in Pg. Another is scratch space for apps\n> (like Photoshop) that do their own VM management. There's also potential\n> \nSurely temp tables and sort space isn't subject to fsync and won't gain \nthat much since they\nshould stay in the OS cache? The device will surely help seek- or \nsync-bound tasks.\n\nDoesn't that make it a good candidate for WAL and hot tables?\n\nJames\n\n", "msg_date": "Tue, 27 Jan 2009 06:37:39 +0000", "msg_from": "James Mansion <[email protected]>", "msg_from_op": false, "msg_subject": "Re: SSD performance" }, { "msg_contents": "On Tue, 27 Jan 2009, James Mansion wrote:\n\n> Craig Ringer wrote:\n>> These devices would be interesting for a few uses, IMO. One is temp\n>> table space and sort space in Pg. Another is scratch space for apps\n>> (like Photoshop) that do their own VM management. There's also potential\n>> \n> Surely temp tables and sort space isn't subject to fsync and won't gain that \n> much since they\n> should stay in the OS cache? The device will surely help seek- or sync-bound \n> tasks.\n>\n> Doesn't that make it a good candidate for WAL and hot tables?\n\nit doesn't just gain on fsync speed, but also raw transfer speed.\n\nif everything stays in the OS buffers than you are right, but when you \nstart to exceed those buffers is when fast storage like this is very \nuseful.\n\nDavid Lang\n", "msg_date": "Mon, 26 Jan 2009 23:51:33 -0800 (PST)", "msg_from": "[email protected]", "msg_from_op": false, "msg_subject": "Re: SSD performance" } ]
[ { "msg_contents": "Hmm - I wonder what OS it runs ;-)\r\n\r\n- Luke\r\n\r\n----- Original Message -----\r\nFrom: [email protected] <[email protected]>\r\nTo: Luke Lonergan\r\nCc: [email protected] <[email protected]>; [email protected] <[email protected]>\r\nSent: Fri Jan 23 04:52:27 2009\r\nSubject: Re: [PERFORM] SSD performance\r\n\r\nOn Fri, 23 Jan 2009, Luke Lonergan wrote:\r\n\r\n> Why not simply plug your server into a UPS and get 10-20x the\r\n> performance using the same approach (with OS IO cache)?\r\n>\r\n> In fact, with the server it's more robust, as you don't have to transit\r\n> several intervening physical devices to get to the RAM.\r\n>\r\n> If you want a file interface, declare a RAMDISK.\r\n>\r\n> Cheaper/faster/improved reliability.\r\n\r\nyou can also disable fsync to not wait for your disks if you trust your\r\nsystem to never go down. personally I don't trust any system to not go\r\ndown.\r\n\r\nif you have a system crash or reboot your RAMDISK will loose it's content,\r\nthis device won't.\r\n\r\nalso you are limited to how many DIMMS you can put on your motherboard\r\n(for the dual-socket systems I am buying nowdays, I'm limited to 32G of\r\nram) going to a different motherboard that can support additional ram can\r\nbe quite expensive.\r\n\r\nthis isn't for everyone, but for people who need the performance, data\r\nreliability, this looks like a very interesting option.\r\n\r\nDavid Lang\r\n\r\n> - Luke\r\n>\r\n> ----- Original Message -----\r\n> From: [email protected] <[email protected]>\r\n> To: Glyn Astill <[email protected]>\r\n> Cc: [email protected] <[email protected]>\r\n> Sent: Fri Jan 23 04:39:07 2009\r\n> Subject: Re: [PERFORM] SSD performance\r\n>\r\n> On Fri, 23 Jan 2009, Glyn Astill wrote:\r\n>\r\n>>> I spotted a new interesting SSD review. it's a $379\r\n>>> 5.25\" drive bay device that holds up to 8 DDR2 DIMMS\r\n>>> (up to 8G per DIMM) and appears to the system as a SATA\r\n>>> drive (or a pair of SATA drives that you can RAID-0 to get\r\n>>> past the 300MB/s SATA bottleneck)\r\n>>>\r\n>>\r\n>> Sounds very similar to the Gigabyte iRam drives of a few years ago\r\n>>\r\n>> http://en.wikipedia.org/wiki/I-RAM\r\n>\r\n> similar concept, but there are some significant differences\r\n>\r\n> the iRam was limited to 4G, used DDR ram, and used a PCI slot for power\r\n> (which can be in\r\n> short supply nowdays)\r\n>\r\n> this new drive can go to 64G, uses DDR2 ram (cheaper than DDR nowdays),\r\n> gets powered like a normal SATA drive, can use two SATA channels (to be\r\n> able to get past the throughput limits of a single SATA interface), and\r\n> has a CF card slot to backup the data to if the system powers down.\r\n>\r\n> plus the performance appears to be significantly better (even without\r\n> using the second SATA interface)\r\n>\r\n> David Lang\r\n>\r\n>\r\n> --\r\n> Sent via pgsql-performance mailing list ([email protected])\r\n> To make changes to your subscription:\r\n> http://www.postgresql.org/mailpref/pgsql-performance\r\n>\r\n", "msg_date": "Fri, 23 Jan 2009 03:53:24 -0800", "msg_from": "Luke Lonergan <[email protected]>", "msg_from_op": true, "msg_subject": "Re: SSD performance" } ]
[ { "msg_contents": "Hey All,\n\nI previously post about the troubles I was having dumping a >1Tb (size \nwith indexes) table. The rows in the table could be very large. Using \nperl's DBD::Pg we were some how able to add these very large rows \nwithout running in to the >1Gb row bug. With everyones help I \ndetermined I needed to get move the largest rows elsewhere. This \nseemed to solve that problem but a new problem has cropped up.\n\nWhen I ran pg_dump again, it ran successfully without error. Although \nthere were no errors, pg_dump dumped less then half of the rows that \nactually exist in the table. When examining the dump file (I did not \ndump in -F c format) the copy statement created by the dump is \nterminated correctly (with a \\.) and there are indeed 300+ million rows \nin the file as opposed to the 700+ million I was expecting. I don't \nbelieve I specified anything that would have caused pg_dump to dump a \ntruncated version of the table. The last row successfully dumped \ncontains only normal ascii characters and is not particularly big in \nsize. The row immediately after the last row successfully dumped \ncontains an installer file (.bin) stored in a bytea cell. It is about \n138 Mb in size. \n\nI've also been having troubles recreating this situation on a smaller DB.\n\nWe are dumping the table using this command.\n\n/var/lib/pgsql-8.3.5/bin/pg_dump -O -x -t large_table mydb | gzip -c \n > large_table.pgsql.gz\n\n\nThe stats of the db server is as follows,\n\nProcessors: 4x Opteron 2.4 Ghz cores\nMemory: 16 GB \nDisks: 42x 15K SCSI 146 GB disks.\n\n\n\nThanks again for or your help,\nTed\n", "msg_date": "Fri, 23 Jan 2009 10:56:44 -0500", "msg_from": "Ted Allen <[email protected]>", "msg_from_op": true, "msg_subject": "More Troubles Dumping a Very Large Table" } ]
[ { "msg_contents": "Hi\n\nI have noticed that my transaction log has quite large activity volume \n(up to 15MB per transaction), so with the amount of data I am using I \nhave manually moved the pg_xlog directory to a different disk. This \nallows me to have both the table space and transaction log on two \ndifferent high performance disks.\nBut my question is, since I can do this with tablespace for tables, \nindexes and so on, is there any possibilites to do a similar thing for \nthe transaction log from inside postgres? as in\n\n\tcreate indexspace = ....\n\nregards\n\nthomas\n", "msg_date": "Sat, 24 Jan 2009 08:02:06 +0100", "msg_from": "Thomas Finneid <[email protected]>", "msg_from_op": true, "msg_subject": "\"tablespace\" for tranaction log?" }, { "msg_contents": "Thomas Finneid escreveu:\n> But my question is, since I can do this with tablespace for tables,\n> indexes and so on, is there any possibilites to do a similar thing for\n> the transaction log from inside postgres? as in\n> \nNo. Because transaction log is for the entire cluster and it is too risky to\ndo such a change when the server is running. You need to stop the database and\nmake the change. Also, you can setup the xlog to a different location at\ninitdb time.\n\n\n-- \n Euler Taveira de Oliveira\n http://www.timbira.com/\n", "msg_date": "Sat, 24 Jan 2009 20:11:11 -0200", "msg_from": "Euler Taveira de Oliveira <[email protected]>", "msg_from_op": false, "msg_subject": "Re: \"tablespace\" for tranaction log?" } ]
[ { "msg_contents": "Hi\n\nI just experienced a performance loss on inserts when redefining my \nindex on a table.\n\nI have a database with the following table\n\ntable:\n id1\tint\n id2\tint\n id3\tint\n id4\tint\n val1\tfloat\n ....\n tablespace dbspace\n\nthe data id distribution is hierarchical and even, well fitting to a \nbtree, there is about 20000 rows per insert (actually a jdbc COPY)\n\noriginally I had an index on all id felds\n\nindex:\n btree on (id1, id2, id3, id4) tablespace indexspace\n\nthat gave me an insert performance of 230 ms\n\nbecause my query does not need id2 I changed the index and removed id2 \nfrom the criteria:\n\nindex:\n btree on (id1, id3, id4) tablespace indexspace\n\nnow an insert takes approx 330-430 ms\n\nAnybody have any ideas why that is? I was expecting it to take \napproximately the same amount of time or less, since there is an element \n less in the criteria.\n\nregards\n\nthomas\n\n\n", "msg_date": "Sat, 24 Jan 2009 11:57:21 +0100", "msg_from": "Thomas Finneid <[email protected]>", "msg_from_op": true, "msg_subject": "strange index performance?" }, { "msg_contents": "On Sat, Jan 24, 2009 at 3:57 AM, Thomas Finneid <[email protected]> wrote:\n> Hi\n>\n> I just experienced a performance loss on inserts when redefining my index on\n> a table.\n>\n> I have a database with the following table\n>\n> table:\n> id1 int\n> id2 int\n> id3 int\n> id4 int\n> val1 float\n> ....\n> tablespace dbspace\n>\n> the data id distribution is hierarchical and even, well fitting to a btree,\n> there is about 20000 rows per insert (actually a jdbc COPY)\n\nIs this table constantly growing, or is it at a stable plateu? I'd\nassume a constantly growing table, or one with bloat problems would\nget slower over time. About how many rows does this table have?\n", "msg_date": "Sat, 24 Jan 2009 07:57:10 -0700", "msg_from": "Scott Marlowe <[email protected]>", "msg_from_op": false, "msg_subject": "Re: strange index performance?" }, { "msg_contents": "Also, what other kind of usage patterns are going on. I wrote a\nsimple test case for this and on a table with 100,000 entries already\nin it, then inserting 10,000 in a transaction and 10,000 outside of a\ntransaction, I get insert rates of 0.1 ms and 0.5 ms respectively.\nWith a table with 1,000,000 rows already in place, the insert times\nwith all the fields in an index was 1.5ms and 4.3ms respectively.\n\nWith only i1, i3, i4, val1 in the index, the numbers for a table with\n100,000 entries to start with was 0.1ms and 0.5 ms, just like the\nabove with the larger index. With a 1,000,000 initial table, inserts\ntake 2.1 and 3.0 ms respectively.\n\nSo I don't think you've found the cause of your problem with the smaller index.\n", "msg_date": "Sat, 24 Jan 2009 12:32:58 -0700", "msg_from": "Scott Marlowe <[email protected]>", "msg_from_op": false, "msg_subject": "Re: strange index performance?" }, { "msg_contents": "Scott Marlowe wrote:\n> On Sat, Jan 24, 2009 at 3:57 AM, Thomas Finneid <[email protected]> wrote:\n >\n> Is this table constantly growing, or is it at a stable plateu? I'd\n> assume a constantly growing table, or one with bloat problems would\n> get slower over time. About how many rows does this table have?\n\nThe table is constantly growing, by 20000 rows per second.\nI did a test with an index for all id fields, and the insert time is \nconstant for every insert up to 1.2 billion rows. Then I stopped the \ntest because I didn't have more disk space allocated.\n\nregards\n\nthomas\n", "msg_date": "Sun, 25 Jan 2009 08:45:05 +0100", "msg_from": "Thomas Finneid <[email protected]>", "msg_from_op": true, "msg_subject": "Re: strange index performance?" }, { "msg_contents": "Scott Marlowe wrote:\n> Also, what other kind of usage patterns are going on. \n\nFor this test there was nothing else going on, it was just that one \nwriter. The complete usage pattern is that there is one writer that \nwrites this data, about 20000 rows per second, and then a small number \nof readers that query for some data based on id1,3,4.\n(To help you visualize it, think of a query where you want to know the \nnames all residents on the 4th floor in that particular street \nindependent of house number. So id1 would be the street, id2 would be \nthe house number, id3 would be the floor number and id4 would be the \napartment number. Such a query would only use id1,3,4)\n\n> I wrote a\n> simple test case for this and on a table with 100,000 entries already\n> in it, then inserting 10,000 in a transaction and 10,000 outside of a\n> transaction, I get insert rates of 0.1 ms and 0.5 ms respectively.\n> With a table with 1,000,000 rows already in place, the insert times\n> with all the fields in an index was 1.5ms and 4.3ms respectively.\n> \n> With only i1, i3, i4, val1 in the index, the numbers for a table with\n> 100,000 entries to start with was 0.1ms and 0.5 ms, just like the\n> above with the larger index. With a 1,000,000 initial table, inserts\n> take 2.1 and 3.0 ms respectively.\n\nHow do you run that setup, because those times are amazing, my inserts \ntake about 220ms, constantly from the first row in the table to the 1.2 \nbillionth row. The client I was using for the inserts is a bare-bone use \ncase simulation tool I have written in java, to test different insert \nand query strategies for this application. Its using JDBC copy to do the \ninserts.\n\nThere is one thing you should know though, and that is that the real \ntable I am using has 20 value fields where the 6 first fields contains a \nvalue, but that does not affect the difference int eh the execution time \nof the two indexes.\n\n> So I don't think you've found the cause of your problem with the smaller index.\n\nI dont quite understand what you are saying here, but I assume you are \nsaying that the smaller index is not the cause of the increased insert time?\n\nIf so, I did the test with both indexes on exactly the same db and \nsetup. And when the index uses all four ids the insert time is larger \nthan if I only use id1,3,4.\n\nWhat concerns me about your test, is that you dont seem to get constant \ninsert times, so there is a difference between the two tests, which \nmiuch be why you dont see the problem I am seeing with my index.\n\nregards\n\nthomas\n", "msg_date": "Sun, 25 Jan 2009 09:14:45 +0100", "msg_from": "Thomas Finneid <[email protected]>", "msg_from_op": true, "msg_subject": "Re: strange index performance?" }, { "msg_contents": "On Sun, Jan 25, 2009 at 1:14 AM, Thomas Finneid <[email protected]> wrote:\n> Scott Marlowe wrote:\n>>\n>> I wrote a\n>> simple test case for this and on a table with 100,000 entries already\n>> in it, then inserting 10,000 in a transaction and 10,000 outside of a\n>> transaction, I get insert rates of 0.1 ms and 0.5 ms respectively.\n>> With a table with 1,000,000 rows already in place, the insert times\n>> with all the fields in an index was 1.5ms and 4.3ms respectively.\n>>\n>> With only i1, i3, i4, val1 in the index, the numbers for a table with\n>> 100,000 entries to start with was 0.1ms and 0.5 ms, just like the\n>> above with the larger index. With a 1,000,000 initial table, inserts\n>> take 2.1 and 3.0 ms respectively.\n>\n> How do you run that setup, because those times are amazing, my inserts take\n> about 220ms, constantly from the first row in the table to the 1.2 billionth\n> row. The client I was using for the inserts is a bare-bone use case\n> simulation tool I have written in java, to test different insert and query\n> strategies for this application. Its using JDBC copy to do the inserts.\n\nThe setup was a simple PHP script. I've attached it to this email.\n\n> There is one thing you should know though, and that is that the real table I\n> am using has 20 value fields where the 6 first fields contains a value, but\n> that does not affect the difference int eh the execution time of the two\n> indexes.\n\nYes, but it will increase the insert time to the table depending very\nmuch on the size of those other fields.\n\n>> So I don't think you've found the cause of your problem with the smaller\n>> index.\n>\n> I dont quite understand what you are saying here, but I assume you are\n> saying that the smaller index is not the cause of the increased insert time?\n\nYes, that's what I was saying.\n\n> If so, I did the test with both indexes on exactly the same db and setup.\n> And when the index uses all four ids the insert time is larger than if I\n> only use id1,3,4.\n\nI thought it was the other way around for you, that the smaller index\nwas slower.\n\n> What concerns me about your test, is that you dont seem to get constant\n> insert times, so there is a difference between the two tests, which miuch be\n> why you dont see the problem I am seeing with my index.\n\nYes, you need to look at things like increasing the number of wal\nsegments and checkpointing. If a checkpoint is kicking in it's going\nto slow everything down.\n\nWhat version pgsql are you running? My tests were on 8.3.x on a\ncore2duo laptop with a stock slow 80Gig hard drive, but most likely\nit's lying about fsync, so that could explain some of the numbers.\n\nI just ran it on my real server, since it's late at night, there's not\nmuch else going on. With 1M rows created ahead of time I got similar\nnumbers:\n\n0.12 ms per insert with all 10,000 inserted in a transaction\n0.24 ms per insert with each insert being individual transactions\n(i.e. no begin; commt; wrapped around them all) This is on a machine\nwith a 12 disk RAID-10 array under an Areca 1680ix controller with\n512M battery backed cache. Note that the table had no other columns\nin it like yours does.", "msg_date": "Sun, 25 Jan 2009 02:58:09 -0700", "msg_from": "Scott Marlowe <[email protected]>", "msg_from_op": false, "msg_subject": "Re: strange index performance?" }, { "msg_contents": "Scott Marlowe wrote:\n> On Sun, Jan 25, 2009 at 1:14 AM, Thomas Finneid <[email protected]> wrote:\n>> Scott Marlowe wrote:\n>>> So I don't think you've found the cause of your problem with the smaller\n>>> index.\n\nOk I understand, but why dont you think the index is the problem?\n\n>> If so, I did the test with both indexes on exactly the same db and setup.\n>> And when the index uses all four ids the insert time is larger than if I\n>> only use id1,3,4.\n> \n> I thought it was the other way around for you, that the smaller index\n> was slower.\n\nSorry for the mistake, I meant to say the smaller index causes the \nslowest insert time.\n\n> What version pgsql are you running? My tests were on 8.3.x on a\n\nI am running on pg 8.2.x (kubuntu x64) with 8GB ram, 8 opteron cores and \n8 disks on a Areca Raid controller\n\n> 0.12 ms per insert with all 10,000 inserted in a transaction\n> 0.24 ms per insert with each insert being individual transactions\n> (i.e. no begin; commt; wrapped around them all) This is on a machine\n> with a 12 disk RAID-10 array under an Areca 1680ix controller with\n> 512M battery backed cache. Note that the table had no other columns\n> in it like yours does.\n\nAre you sure you mean to say 0.12 ms and not 0.12 seconds? My server \nalso uses an Areca RAID controller (8 disk controller model), but it \ndoes not matter how many disks are in the stripe, its still the same \nperformance. So, if you get that performance then I have must have set \nup postgres, the OS or the RAID controller wrong. What are the most \nimportant configurations you did to get that performance?\n\n\n", "msg_date": "Sun, 25 Jan 2009 22:54:55 +0100", "msg_from": "Thomas Finneid <[email protected]>", "msg_from_op": true, "msg_subject": "Re: strange index performance?" }, { "msg_contents": "On Sun, Jan 25, 2009 at 2:54 PM, Thomas Finneid <[email protected]> wrote:\n> Scott Marlowe wrote:\n>>\n>> On Sun, Jan 25, 2009 at 1:14 AM, Thomas Finneid <[email protected]> wrote:\n>>>\n>>> Scott Marlowe wrote:\n>>>>\n>>>> So I don't think you've found the cause of your problem with the smaller\n>>>> index.\n>\n> Ok I understand, but why dont you think the index is the problem?\n\nBecause on any decent hardware you should be able to insert a single\nrow in way under 200ms. Whether it's got an index on it or not.\n\n>> I thought it was the other way around for you, that the smaller index\n>> was slower.\n>\n> Sorry for the mistake, I meant to say the smaller index causes the slowest\n> insert time.\n\nI'm guessing that you just had more data in the table or something by\nthe time you tested that, or some cron job was running in the\nbackground, or some other issue, not the index.\n\n>> What version pgsql are you running? My tests were on 8.3.x on a\n>\n> I am running on pg 8.2.x (kubuntu x64) with 8GB ram, 8 opteron cores and 8\n> disks on a Areca Raid controller\n\nQuite a similar machine. write back cache with battery backed\ncontroller on the controller? A really old Areca like an 11xx series\nor a newer one 12xx, 16xx?\n\n>> 0.12 ms per insert with all 10,000 inserted in a transaction\n>> 0.24 ms per insert with each insert being individual transactions\n>> (i.e. no begin; commt; wrapped around them all) This is on a machine\n>> with a 12 disk RAID-10 array under an Areca 1680ix controller with\n>> 512M battery backed cache. Note that the table had no other columns\n>> in it like yours does.\n>\n> Are you sure you mean to say 0.12 ms and not 0.12 seconds? My server also\n\n0.12 seconds per insert is pretty slow. 10 inserts would take a\nsecond. I'm inserting 10,000 rows in about 2 seconds. Each insert is\ndefinitely in the 0.12 millisecond range.\n\n> uses an Areca RAID controller (8 disk controller model), but it does not\n> matter how many disks are in the stripe, its still the same performance. So,\n> if you get that performance then I have must have set up postgres, the OS or\n> the RAID controller wrong. What are the most important configurations you\n> did to get that performance?\n\nHard to say. What does bonnie++ have to say about the performance of\nyour RAID array?\n", "msg_date": "Sun, 25 Jan 2009 15:12:04 -0700", "msg_from": "Scott Marlowe <[email protected]>", "msg_from_op": false, "msg_subject": "Re: strange index performance?" }, { "msg_contents": "Scott Marlowe wrote:\n\n> I'm guessing that you just had more data in the table or something by\n> the time you tested that, or some cron job was running in the\n> background, or some other issue, not the index.\n\nIt starts from scratch and builds up. Every insert has constant time \nfrom the first to the last row, ie. row 1 to row 1.2 billion.\nThere is no background jobs or other disturbances.\n\n> Quite a similar machine. write back cache with battery backed\n> controller on the controller? A really old Areca like an 11xx series\n> or a newer one 12xx, 16xx?\n\nIts an Areca 1220. write back is enabled but it does not have a BBU, \nbecause its an development machine and not a production machine.\n\n> 0.12 seconds per insert is pretty slow. 10 inserts would take a\n> second. I'm inserting 10,000 rows in about 2 seconds. Each insert is\n> definitely in the 0.12 millisecond range.\n\nI see the confusion. I use COPY(JDBC) not INSERT, so one transaction \ncontains 20000 rows, which is copy inserted in 300 ms, so that gives a \nper row insert time of 0.015ms. So I actually have pretty decent write \nperformance. If I remove the index, the copy insert only takes about \n125ms. So the index update time amounts to half the total update time.\n\nThis still leaves me with the question of why the smaller index \n(id1,3,4) take longer to update than the larger index (id1,2,3,4)?\nUpdating an index like id1,2,3 should take shorter time, I have to test \nit first to verify, so a similar index, id1,3,4 should take \napproximately the same time.\n\nCould it have something to do with the smaller index is more complicated \nto fill in? Could the placing of the id2 filed in the table have \nanything to say about it?\n\n> Hard to say. What does bonnie++ have to say about the performance of\n> your RAID array?\n\nDont know, havent heard about it before now. But I will have a look at \nit and see if the controller and the os is set up correctly.\n\nregards\n\nthomas\n\n", "msg_date": "Mon, 26 Jan 2009 10:10:13 +0100", "msg_from": "Thomas Finneid <[email protected]>", "msg_from_op": true, "msg_subject": "Re: strange index performance?" }, { "msg_contents": "On Mon, Jan 26, 2009 at 10:10:13AM +0100, Thomas Finneid wrote:\n> Scott Marlowe wrote:\n>\n>> I'm guessing that you just had more data in the table or something by\n>> the time you tested that, or some cron job was running in the\n>> background, or some other issue, not the index.\n>\n> It starts from scratch and builds up. Every insert has constant time from \n> the first to the last row, ie. row 1 to row 1.2 billion.\n> There is no background jobs or other disturbances.\n>\n>> Quite a similar machine. write back cache with battery backed\n>> controller on the controller? A really old Areca like an 11xx series\n>> or a newer one 12xx, 16xx?\n>\n> Its an Areca 1220. write back is enabled but it does not have a BBU, \n> because its an development machine and not a production machine.\n>\n>> 0.12 seconds per insert is pretty slow. 10 inserts would take a\n>> second. I'm inserting 10,000 rows in about 2 seconds. Each insert is\n>> definitely in the 0.12 millisecond range.\n>\n> I see the confusion. I use COPY(JDBC) not INSERT, so one transaction \n> contains 20000 rows, which is copy inserted in 300 ms, so that gives a per \n> row insert time of 0.015ms. So I actually have pretty decent write \n> performance. If I remove the index, the copy insert only takes about 125ms. \n> So the index update time amounts to half the total update time.\n>\n> This still leaves me with the question of why the smaller index (id1,3,4) \n> take longer to update than the larger index (id1,2,3,4)?\n> Updating an index like id1,2,3 should take shorter time, I have to test it \n> first to verify, so a similar index, id1,3,4 should take approximately the \n> same time.\n>\n> Could it have something to do with the smaller index is more complicated to \n> fill in? Could the placing of the id2 filed in the table have anything to \n> say about it?\n>\n\nIt may be that the smaller index has update contention for the same\nblocks that the larger index does not.\n\nCheers,\nKen\n\n", "msg_date": "Mon, 26 Jan 2009 07:50:59 -0600", "msg_from": "Kenneth Marshall <[email protected]>", "msg_from_op": false, "msg_subject": "Re: strange index performance?" }, { "msg_contents": "Kenneth Marshall wrote:\n> It may be that the smaller index has update contention for the same\n> blocks that the larger index does not.\n\nIs that an assumption based on both indexes existing? if so I might \nagree, but if you are talking about only one index existing at a time \nthen could you explain what the basis for you conclusion is?\n\nregards\n\nthomas\n\n", "msg_date": "Mon, 26 Jan 2009 15:49:00 +0100", "msg_from": "Thomas Finneid <[email protected]>", "msg_from_op": true, "msg_subject": "Re: strange index performance?" }, { "msg_contents": "On Mon, Jan 26, 2009 at 03:49:00PM +0100, Thomas Finneid wrote:\n> Kenneth Marshall wrote:\n>> It may be that the smaller index has update contention for the same\n>> blocks that the larger index does not.\n>\n> Is that an assumption based on both indexes existing? if so I might agree, \n> but if you are talking about only one index existing at a time then could \n> you explain what the basis for you conclusion is?\n>\n> regards\n>\n> thomas\n>\n\nThe small index blocks would look like:\n\n|abcd|efgh|ijkl|...\n\nand the large index:\n\n|axxx|...|bxxx|...|cxxx|... and so on.\n\nNow, if you try to update a-k, the small index will be trying to\nupdate and possibly rearrange/split/... items on the same disk\nblocks while the larger index would be updating without contention.\nIt may not even be block level contention, the same argument applies\nto cachelines with in a block.\n\nCheers,\nKen\n", "msg_date": "Mon, 26 Jan 2009 09:37:09 -0600", "msg_from": "Kenneth Marshall <[email protected]>", "msg_from_op": false, "msg_subject": "Re: strange index performance?" } ]
[ { "msg_contents": "So, the eternal problem with what hardware to buy. I really miss a\nhardware buying guide for database servers now that I'm about to buy\none..\nSome general guidelines mixed with ranked lists of what hardware that\nis best, shouldn't that be on the wiki?.\n\nTHis is of course very difficult to advice about, but shouldn't\ngeneeral advice be like:\n1) A fast CPU but not on the bleeding edge\n2) As much RAM as you can fit into the machine without paying way to\nmuch for it. Use the fastest ram you can find (what is it called\ntoday? PC 1333 MHz or something like that?)\n3) Fast harddiscs. Best is raid X (what raid should one use?)\n4) Use software raid unless you have the money to buy a raid\ncontroller, in which case here is the ranking of them\n <list of brand/modells>\n ordered by quality and a general comment on exactly how much\nbetter they are than the one below on the list ;-)\n", "msg_date": "Sun, 25 Jan 2009 22:21:40 +0100", "msg_from": "A B <[email protected]>", "msg_from_op": true, "msg_subject": "[PERFORMANCE] Buying hardware" }, { "msg_contents": "On Sun, Jan 25, 2009 at 2:21 PM, A B <[email protected]> wrote:\n> So, the eternal problem with what hardware to buy. I really miss a\n> hardware buying guide for database servers now that I'm about to buy\n> one..\n> Some general guidelines mixed with ranked lists of what hardware that\n> is best, shouldn't that be on the wiki?.\n>\n> THis is of course very difficult to advice about, but shouldn't\n> geneeral advice be like:\n> 1) A fast CPU but not on the bleeding edge\n\nMore cores is more important than faster but fewer\n\n> 2) As much RAM as you can fit into the machine without paying way to\n> much for it. Use the fastest ram you can find (what is it called\n> today? PC 1333 MHz or something like that?)\n\nThe speed of the RAM isn't as important as the amount and the speed of\nthe chipset on the motherboard.\n\n> 3) Fast harddiscs. Best is raid X (what raid should one use?)\n\nAgain, more slower disks > fewer slower ones. RAID-10 is almost\nalways the right choice.\n\n> 4) Use software raid unless you have the money to buy a raid\n> controller, in which case here is the ranking of them\n> <list of brand/modells>\n\nAreca and 3ware/Escalade are the two best controllers for the money\nout right now. They tend to take turns being the absolute best as\nthey release new cards. Newer Arecas (the 1680 series) use an\nethernet port for traps and such, so no need for special software that\nmight be kernel version dependent.\n\nBoth cost about the same for their top of the line cards.\n\nMake sure you have battery backed cache.\n", "msg_date": "Sun, 25 Jan 2009 14:42:43 -0700", "msg_from": "Scott Marlowe <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [PERFORMANCE] Buying hardware" }, { "msg_contents": "On Sun, 25 Jan 2009, Scott Marlowe wrote:\n> More cores is more important than faster but fewer\n>\n> Again, more slower disks > fewer slower ones.\n\nNot necessarily. It depends what you are doing. If you're going to be \nrunning only one database connection at a time, doing really big complex \nqueries, then having really fast CPUs and discs is better than having \nlots. However, that situation is rare.\n\n> RAID-10 is almost always the right choice.\n\nAgreed. Unless you don't care about the data and need the space, where \nRAID 0 might be useful, or if you really don't need the space, where RAID \n1 might be okay. If your controller supports it.\n\nMatthew\n\n-- \n The third years are wandering about all worried at the moment because they\n have to hand in their final projects. Please be sympathetic to them, say\n things like \"ha-ha-ha\", but in a sympathetic tone of voice \n -- Computer Science Lecturer\n", "msg_date": "Mon, 26 Jan 2009 12:09:55 +0000 (GMT)", "msg_from": "Matthew Wakeling <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [PERFORMANCE] Buying hardware" }, { "msg_contents": "On Mon, Jan 26, 2009 at 4:09 AM, Matthew Wakeling <[email protected]> wrote:\n> On Sun, 25 Jan 2009, Scott Marlowe wrote:\n>>\n>> More cores is more important than faster but fewer\n>>\n>> Again, more slower disks > fewer slower ones.\n>\n> Not necessarily. It depends what you are doing. If you're going to be\n> running only one database connection at a time, doing really big complex\n> queries, then having really fast CPUs and discs is better than having lots.\n> However, that situation is rare.\n\nIf backup/restore times are important, having a fast CPU is important\nbecause backup/restore is single threaded and unable to use more than\none CPU. OK, two CPUs, one for the pg_dump process and one for the\npostgres daemon - but who buys anything with less than two cores these\ndays?\n\nWe do daily backups of our databases, and although our biggest isn't\nvery large at approximately 15GB, backups take a bit more than an hour\nwith one CPU maxed out. This system has two Xeon 5130 @ 2GHz, so even\nwith the fastest processors, we can only reduce backup times by at\nmost 50%.\n\nDuring normal workloads, processing hundreds of queries a second,\nsystem utilization stays below 10% on average - so for us, fewer cores\nthat are faster would be a better purchase than more cores that are\nslower.\n\nLots of people have databases much, much, bigger - I'd hate to imagine\nhave to restore from backup from one of those monsters.\n\n-Dave\n", "msg_date": "Mon, 26 Jan 2009 11:42:18 -0800", "msg_from": "David Rees <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [PERFORMANCE] Buying hardware" }, { "msg_contents": "\nOn Jan 26, 2009, at 2:42 PM, David Rees wrote:\n>\n> Lots of people have databases much, much, bigger - I'd hate to imagine\n> have to restore from backup from one of those monsters.\n>\n\nIf you use PITR + rsync you can create a binary snapshot of the db, so \nrestore time is simply how long it takes to untar / whatever it into \nplace. Our backup script basically does:\n\narchive backup directory\npg_start_backup\nrsync\npg_stop_backup\n\nvoila. I have 2 full copies of the db. You could even expand it a bit \nand after the rsync & friends have it fire up the instance and run \npg_dump against it for a pg_restore compatible dump \"just in case\".\n\nIt takes a long time to restore a 300GB db, even if you cheat and \nparallelify some of it. 8.4 may get a pg_restore that can load in \nparallel - which will help somewhat.\n\n--\nJeff Trout <[email protected]>\nhttp://www.stuarthamm.net/\nhttp://www.dellsmartexitin.com/\n\n\n\n", "msg_date": "Mon, 26 Jan 2009 14:58:05 -0500", "msg_from": "Jeff <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [PERFORMANCE] Buying hardware" }, { "msg_contents": "On Mon, 2009-01-26 at 14:58 -0500, Jeff wrote:\n\n> voila. I have 2 full copies of the db. You could even expand it a bit \n> and after the rsync & friends have it fire up the instance and run \n> pg_dump against it for a pg_restore compatible dump \"just in case\".\n> \n> It takes a long time to restore a 300GB db, even if you cheat and \n> parallelify some of it. 8.4 may get a pg_restore that can load in \n> parallel - which will help somewhat.\n\nSomewhat? Just to be clear, if you have the hardware for it, parallel\nrestore can take a 500GB restore in 2.5 hours (versus 15). IMO, that is\na *little* more than somewhat. Maybe, a bit? ;)\n\nJoshua D. Drake\n\n-- \nPostgreSQL - XMPP: [email protected]\n Consulting, Development, Support, Training\n 503-667-4564 - http://www.commandprompt.com/\n The PostgreSQL Company, serving since 1997\n\n", "msg_date": "Mon, 26 Jan 2009 12:00:35 -0800", "msg_from": "\"Joshua D. Drake\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [PERFORMANCE] Buying hardware" }, { "msg_contents": "The technique Jeff is speaking of below is exactly how we do it, \nexcept we use file-system snapshots vs rsync.\n\nThe problem is how slow log application is when recovering since it's \na single process, and very slow at that.\n\n-kg\n\nOn Jan 26, 2009, at 11:58 AM, Jeff wrote:\n\n>\n> On Jan 26, 2009, at 2:42 PM, David Rees wrote:\n>>\n>> Lots of people have databases much, much, bigger - I'd hate to \n>> imagine\n>> have to restore from backup from one of those monsters.\n>>\n>\n> If you use PITR + rsync you can create a binary snapshot of the db, \n> so restore time is simply how long it takes to untar / whatever it \n> into place. Our backup script basically does:\n>\n> archive backup directory\n> pg_start_backup\n> rsync\n> pg_stop_backup\n>\n> voila. I have 2 full copies of the db. You could even expand it a \n> bit and after the rsync & friends have it fire up the instance and \n> run pg_dump against it for a pg_restore compatible dump \"just in \n> case\".\n>\n> It takes a long time to restore a 300GB db, even if you cheat and \n> parallelify some of it. 8.4 may get a pg_restore that can load in \n> parallel - which will help somewhat.\n>\n> --\n> Jeff Trout <[email protected]>\n> http://www.stuarthamm.net/\n> http://www.dellsmartexitin.com/\n>\n>\n>\n>\n> -- \n> Sent via pgsql-performance mailing list ([email protected] \n> )\n> To make changes to your subscription:\n> http://www.postgresql.org/mailpref/pgsql-performance\n\n", "msg_date": "Mon, 26 Jan 2009 12:03:51 -0800", "msg_from": "Kenny Gorman <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [PERFORMANCE] Buying hardware" }, { "msg_contents": "On Mon, Jan 26, 2009 at 11:58 AM, Jeff <[email protected]> wrote:\n> On Jan 26, 2009, at 2:42 PM, David Rees wrote:\n>> Lots of people have databases much, much, bigger - I'd hate to imagine\n>> have to restore from backup from one of those monsters.\n>\n> If you use PITR + rsync you can create a binary snapshot of the db, so\n> restore time is simply how long it takes to untar / whatever it into place.\n\nGood point - we do that as well and that helps with backup times\n(though we still grab daily backups in addition to log-shipping), but\nthat still doesn't do much to avoid long restore times (at least until\npg-8.4 as Joshua mentions which can do parallel backup/restores).\n\nGoing back to the original question -\n\nTrying to come up with a general guide to buying db hardware isn't\neasy because of the number of variables like - what's your budget,\nwhat type of workload do you need to support, how big is your db, how\nlarge will you db get, etc...\n\nAs the others mentioned having a good RAID controller with a BBU cache\nis essential for good performance.\n\nRAID10 is generally the best performing raid configuration, though for\ndata warehousing where you need maximum storage you might consider a\nRAID6 with a RAID1 for the WAL.\n\nThe workload will determine whether is more beneficial to go with\nquantity rather than speed of processors - As a rough calculation you\ncan simply look at the raw GHz you're getting (multiply core speed by\nnumber of cores) - more GHz should be faster as long as your workload\nis parallelizable.\n\nAnd yes, the more memory you can squeeze into the machine, the better,\nthough you'll find that after a certain point, price starts going up\nsteeply. Of course, if you only have a 15GB database, once you reach\n16GB of memory you've pretty much hit the point of diminishing\nreturns.\n\n-Dave\n", "msg_date": "Mon, 26 Jan 2009 12:12:05 -0800", "msg_from": "David Rees <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [PERFORMANCE] Buying hardware" }, { "msg_contents": "\nOn Jan 26, 2009, at 3:00 PM, Joshua D. Drake wrote:\n\n> On Mon, 2009-01-26 at 14:58 -0500, Jeff wrote:\n>\n>> voila. I have 2 full copies of the db. You could even expand it a \n>> bit\n>> and after the rsync & friends have it fire up the instance and run\n>> pg_dump against it for a pg_restore compatible dump \"just in case\".\n>>\n>> It takes a long time to restore a 300GB db, even if you cheat and\n>> parallelify some of it. 8.4 may get a pg_restore that can load in\n>> parallel - which will help somewhat.\n>\n> Somewhat? Just to be clear, if you have the hardware for it, parallel\n> restore can take a 500GB restore in 2.5 hours (versus 15). IMO, that \n> is\n> a *little* more than somewhat. Maybe, a bit? ;)\n>\n\nI'd say that qualifies more towards just a \"smidge\" faster ;)\n\nI'm quite excited about the feature. I'm still on 8.2 mostly because \nof the downtime of the dump & restore. I wrote up some plans a while \nback on doing the poor-mans parallel restore, but I haven't had the \ntime to actually do it.\n\nTheoretically, wouldn't the parallel pg_restore be able to run against \nan 8.3 instance with a dump from 8.2? I don't see why it wouldn't be \nable to (unless it uses some handy dandy new 8.4-only catalog). Maybe \nif I get time (HAHAHA) I'll test that out..\n\n--\nJeff Trout <[email protected]>\nhttp://www.stuarthamm.net/\nhttp://www.dellsmartexitin.com/\n\n\n\n", "msg_date": "Mon, 26 Jan 2009 15:27:00 -0500", "msg_from": "Jeff <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [PERFORMANCE] Buying hardware" }, { "msg_contents": "On Mon, Jan 26, 2009 at 12:27 PM, Jeff <[email protected]> wrote:\n> I'm quite excited about the feature. I'm still on 8.2 mostly because of the\n> downtime of the dump & restore. I wrote up some plans a while back on doing\n> the poor-mans parallel restore, but I haven't had the time to actually do\n> it.\n\nWe use slony to provide read-only copies of the database for reporting\npurposes and any other queries that it's OK to serve data that may be\nslightly stale. We also have used it to upgrade our 15GB database\nwith only a few minutes of downtime instead of the minimum 2 hours it\ntakes to do a dump/restore.\n\nSlony works with any version of Postgres going back to versions before\n8.1 (anyone still use Postgres that old?). If you get all your ducks\nin a row, you can get upgraded with only seconds of downtime, though\nrealistically it takes a bit longer.\nhttp://slony.info/documentation/versionupgrade.html\n\n-Dave\n", "msg_date": "Mon, 26 Jan 2009 12:58:06 -0800", "msg_from": "David Rees <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [PERFORMANCE] Buying hardware" }, { "msg_contents": "Jeff wrote:\n\n> If you use PITR + rsync you can create a binary snapshot of the db, so \n> restore time is simply how long it takes to untar / whatever it into \n> place. Our backup script basically does:\n> \n> archive backup directory\n> pg_start_backup\n> rsync\n> pg_stop_backup\n> \n> voila. I have 2 full copies of the db.\n\nNote that this does NOT protect you against a Pg bug, a filesystem \nissue, or a RAID / disk media issue that results in corruption or damage \nto the database that isn't immediately detected.\n\nI personally wouldn't want to rely on PITR alone. I take periodic SQL \ndumps as well, using PITR more for disaster recovery in case of \nsoftware/user error, so I can roll back to when the error occurred \nrather than the potentially much earlier time of the last backup.\n\nA dump is much more likely to trip over a problem, so it's good to run \nperiodically just to make sure the entire DB is readable. It's also more \nlikely to be restorable if something goes horribly wrong.\n\nPersonally I might be OK with WAL-archiving based backups if I was using \na warm-standby server in continuous recovery mode that'd notify me if \nanything went wrong with the restore. I'm much more comfortable having \nan SQL dump around, though.\n\n> You could even expand it a bit \n> and after the rsync & friends have it fire up the instance and run \n> pg_dump against it for a pg_restore compatible dump \"just in case\".\n\nYep, that's what I do.\n\n> It takes a long time to restore a 300GB db, even if you cheat and \n> parallelify some of it. 8.4 may get a pg_restore that can load in \n> parallel - which will help somewhat.\n\nIt's a great pity that pg_dump can't dump in parallel, though. It makes \nsense given that Pg has no mechanism for one backend to \"join\" anothers' \nexisting transaction, and without that you'd risk inconsistent \nsnapshots, but it's still a bit of a pity.\n\nIs it actually that hard to let one backend join another's (strictly \nread-only) transaction? The first backend will be ensuring that tuples \nin the xmin/xmax range required by the transaction aren't VACUUMed away etc.\n\n--\nCraig Ringer\n", "msg_date": "Tue, 27 Jan 2009 10:07:33 +0900", "msg_from": "Craig Ringer <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [PERFORMANCE] Buying hardware" }, { "msg_contents": "On Mon, 26 Jan 2009, Matthew Wakeling wrote:\n\n> On Sun, 25 Jan 2009, Scott Marlowe wrote:\n>> RAID-10 is almost always the right choice.\n>\n> Agreed. Unless you don't care about the data and need the space, where RAID 0 \n> might be useful, or if you really don't need the space, where RAID 1 might be \n> okay. If your controller supports it.\n\nif you can reproduce your data at will (would not mind loosing it) you can \ndo a single disk or raid 0 (and you can disable fsync for even more \nperformance)\n\n for this you need N disks (where N is the number needed to hold your data)\n\n\nif you cannot reproduce your data at will (or it takes too long)\n\nif you need the capacity of a single disk do raid 1\n\nif you need the capacity of a small number of disks do raid 10 (raid 1 \ncombined with raid 0 to stripe redundant copies of data across multiple \ndisks)\n\nif you need the capacity of a large number of disks you need to seriously \nthink about your performance needs. the fact that raid 10 can loose data \nif the wrong 2 disks fail and requires buying 2 disks for every disk worth \nof capacity that you need are both factors. (note that some raid 10 \nimplimentations let you have more than 2 copies of your data, but your \ndisk requirements go up # copies * capacity)\n\n for these you need N*M disks (where N is the number needed to hold your \ndata and M-1 is the number of disks you can loose without loosing any \ndata)\n\nat some point it may make sense to use raid 6 for some data. It is \ndefinantly slower, but you can loose two drives and not loose any data \nwhile only needing N+2 drives\n\nto further complicate matters, some parts of your database are more \nsensitive to performance than others.\n\nthe fsync throughput of the device you have the WAL on will determine the \nmax transactions/sec of your system (and any seeking that this disk needs \nto do for other purposes will hurt this)\n\ntwo other performance sensitive areas are temporary table space and \nindexes\n question, are these more seneitive to random or sequential performance?\n\nsee the recent discussions and links to performance ratings of different \ndrive types in the thread 'SSD performance' unfortunantly the SSD drive \ntypes so overwelm the normal drives that it's hard to see the differences \nin the graphs between the 15K rpm SCSI/SAS drives and the 7200 rpm SATA \ndrives, but they are there.\n\nDavid Lang\n\n\n", "msg_date": "Mon, 26 Jan 2009 19:59:44 -0800 (PST)", "msg_from": "[email protected]", "msg_from_op": false, "msg_subject": "Re: [PERFORMANCE] Buying hardware" }, { "msg_contents": "On Mon, 26 Jan 2009, David Rees wrote:\n\n> And yes, the more memory you can squeeze into the machine, the better,\n> though you'll find that after a certain point, price starts going up\n> steeply. Of course, if you only have a 15GB database, once you reach\n> 16GB of memory you've pretty much hit the point of diminishing\n> returns.\n\nactually, you need more memory than that. besides the data itself you \nwould want memory for several other things, among them:\n\n1. your OS\n2. your indexes\n3. you per-request memory allocations (for sorting, etc)\n this is highly dependant on your workload (type and number of parallel \nrequests)\n4. 'dead' tuples in your table (that will be cleared by a vaccum, but \nhaven't been yet)\n\nand probably other things as well.\n\nI don't know how large a database will fit in 16G of ram, but I suspect \nit's closer to 8G than 15G.\n\nany experts want to throw out a rule-of-thumb here?\n\nDavid Lang\n", "msg_date": "Mon, 26 Jan 2009 20:06:09 -0800 (PST)", "msg_from": "[email protected]", "msg_from_op": false, "msg_subject": "Re: [PERFORMANCE] Buying hardware" }, { "msg_contents": ">> 4) Use software raid unless you have the money to buy a raid\n>> controller, in which case here is the ranking of them\n>> <list of brand/modells>\n>\n> Areca and 3ware/Escalade are the two best controllers for the money\n> out right now. They tend to take turns being the absolute best as\n> they release new cards. Newer Arecas (the 1680 series) use an\n> ethernet port for traps and such, so no need for special software that\n> might be kernel version dependent.\n>\n> Both cost about the same for their top of the line cards.\n>\n> Make sure you have battery backed cache.\n\nWhile browsing the net I found a server with a raid controller\n\tHP Smart Array P400/512MB BBWC Controller\nHow does one know what this is, if it is any good or so? I guess they\njust stuck their \"HP\" label onto some other raid controller?\nI could write HP but I guess that wouldn't help much. And I could also\nlook through the archives for the mailinglist. When I find the\ntime,I'll do so and try to create a wiki page.\n\nThe problem with this kind of built-in hardware is that it might suck,\nand then you can't plug in any other hardware in the box.\n", "msg_date": "Tue, 27 Jan 2009 09:20:10 +0100", "msg_from": "A B <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [PERFORMANCE] Buying hardware" }, { "msg_contents": "[email protected] wrote:\n> On Mon, 26 Jan 2009, David Rees wrote:\n> \n>> And yes, the more memory you can squeeze into the machine, the better,\n>> though you'll find that after a certain point, price starts going up\n>> steeply. Of course, if you only have a 15GB database, once you reach\n>> 16GB of memory you've pretty much hit the point of diminishing\n>> returns.\n> \n> actually, you need more memory than that. besides the data itself you\n> would want memory for several other things, among them:\n> \n> 1. your OS\n> 2. your indexes\n> 3. you per-request memory allocations (for sorting, etc)\n> this is highly dependant on your workload (type and number of parallel\n> requests)\n> 4. 'dead' tuples in your table (that will be cleared by a vaccum, but\n> haven't been yet)\n> \n> and probably other things as well.\n> \n> I don't know how large a database will fit in 16G of ram, but I suspect\n> it's closer to 8G than 15G.\n> \n> any experts want to throw out a rule-of-thumb here?\n> \n> David Lang\n> \n\nIt depends on what else the server is doing. If you're running the whole\nLAPP stack on a single box, for example, the PHP interpreter will need\nspace for intermediate data. Apache and the Linux kernel will use less\nspace.\n\nIf PostgreSQL is the only thing running on the server, though, assuming\n64-bit Linux, most of the RAM in a box that large should be in either\nmemory you've deliberately set aside for internal PostgreSQL data\nstructures or the Linux page cache.\n\nThere are starting to be some tools built that will show you how RAM is\nallocated, now that recent kernels (2.6.25+) do a better job of\naccounting for RAM pages. So I would expect the total memory dedicated\nto the database functionality to be much closer to 15 GB than 8 GB.\n\nGiven large amounts of RAM and only PostgreSQL running in the server,\nthe interesting trade-offs become\n\na. How little memory can you buy without putting your service level\nagreements at risk?\n\nb. How do you allocate the PostgreSQL-specific memory buffers at the\nexpense of the Linux page cache for optimum performance?\n-- \nM. Edward (Ed) Borasky\n\nI've never met a happy clam. In fact, most of them were pretty steamed.\n", "msg_date": "Tue, 27 Jan 2009 05:29:03 -0800", "msg_from": "\"M. Edward (Ed) Borasky\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [PERFORMANCE] Buying hardware" }, { "msg_contents": "M. Edward (Ed) Borasky wrote:\n> Given large amounts of RAM and only PostgreSQL running in the server,\n> the interesting trade-offs become\n> \n> a. How little memory can you buy without putting your service level\n> agreements at risk?\n> \n> b. How do you allocate the PostgreSQL-specific memory buffers at the\n> expense of the Linux page cache for optimum performance?\n\nc. What do I do with the idle cores? :)\n\n(or, how can I exploit them by changing my database design or the\nPostgreSQL architecture?)\n\n-- \nM. Edward (Ed) Borasky\n\nI've never met a happy clam. In fact, most of them were pretty steamed.\n", "msg_date": "Tue, 27 Jan 2009 06:03:30 -0800", "msg_from": "\"M. Edward (Ed) Borasky\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [PERFORMANCE] Buying hardware" }, { "msg_contents": "[email protected] wrote:\n> that's not quite the opposite of the statement that I was trying to make.\n> \n> assuming that you are not running anything else on the system, how much\n> data can you put on the system and run entirely out of ram.\n> \n> the database has it's overhead (sort buffers, indexes, per-request\n> buffers, 'dead tuples', etc) that mean that if you have a database that\n> an uncompressed dump takes 8G, you need substantially more than 8G of\n> ram to avoid using the disks (other than to store changes)\n> \n> how much more is the question. I know it is going to vary from\n> installation to installation, but is there any guidelines that people\n> can start with?\n\nI'm not sure there are any rules of thumb / guidelines for that. My\nexperience has been that doing no disk I/O except writing logs to disk,\ncreating and updating rows is an unrealistic expectation, even for\n\"small\" databases. The cost is prohibitive, for one thing. And for\ncapacity planning, what's probably more important is whether the service\nlevel agreements are being met, not whether you're meeting them purely\nin RAM or by re-reading data from disk sometimes.\n\nI think it's \"easy\", however, to solve the inverse problem. Borrow a\nhuge-memory server from your vendor, put your small database up on it,\nrun benchmarks and gradually reduce the amount of memory available until\nthe performance becomes unacceptable.\n\nThe tools exist to measure memory allocations while the benchmarks are\nrunning. If you get enough data points (about five for the simplest\nmodels) you can build a model that you could then \"invert\" to go the\nother way. -- take a database size and figure out how much more RAM was\nneeded to meet the SLAs.\n\nYou don't necessarily have to reboot to reduce available memory -- there\nare ways you can tie up memory without consuming processor or disk time\nto do so. But you would need to \"poison\" the caches between runs, and\nrestart PostgreSQL if you're modifying its memory allocations.\n\n-- \nM. Edward (Ed) Borasky\n\nI've never met a happy clam. In fact, most of them were pretty steamed.\n", "msg_date": "Tue, 27 Jan 2009 07:20:33 -0800", "msg_from": "\"M. Edward (Ed) Borasky\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [PERFORMANCE] Buying hardware" }, { "msg_contents": "On Tue, 27 Jan 2009, M. Edward (Ed) Borasky wrote:\n\n> [email protected] wrote:\n>> On Mon, 26 Jan 2009, David Rees wrote:\n>>\n>>> And yes, the more memory you can squeeze into the machine, the better,\n>>> though you'll find that after a certain point, price starts going up\n>>> steeply. Of course, if you only have a 15GB database, once you reach\n>>> 16GB of memory you've pretty much hit the point of diminishing\n>>> returns.\n>>\n>> actually, you need more memory than that. besides the data itself you\n>> would want memory for several other things, among them:\n>>\n>> 1. your OS\n>> 2. your indexes\n>> 3. you per-request memory allocations (for sorting, etc)\n>> this is highly dependant on your workload (type and number of parallel\n>> requests)\n>> 4. 'dead' tuples in your table (that will be cleared by a vaccum, but\n>> haven't been yet)\n>>\n>> and probably other things as well.\n>>\n>> I don't know how large a database will fit in 16G of ram, but I suspect\n>> it's closer to 8G than 15G.\n>>\n>> any experts want to throw out a rule-of-thumb here?\n>>\n>\n> There are starting to be some tools built that will show you how RAM is\n> allocated, now that recent kernels (2.6.25+) do a better job of\n> accounting for RAM pages. So I would expect the total memory dedicated\n> to the database functionality to be much closer to 15 GB than 8 GB.\n\nthat's not quite the opposite of the statement that I was trying to make.\n\nassuming that you are not running anything else on the system, how much \ndata can you put on the system and run entirely out of ram.\n\nthe database has it's overhead (sort buffers, indexes, per-request \nbuffers, 'dead tuples', etc) that mean that if you have a database that \nan uncompressed dump takes 8G, you need substantially more than 8G of ram \nto avoid using the disks (other than to store changes)\n\nhow much more is the question. I know it is going to vary from \ninstallation to installation, but is there any guidelines that people can \nstart with?\n\nDavid Lang\n", "msg_date": "Tue, 27 Jan 2009 07:41:55 -0800 (PST)", "msg_from": "[email protected]", "msg_from_op": false, "msg_subject": "Re: [PERFORMANCE] Buying hardware" }, { "msg_contents": "On Tue, Jan 27, 2009 at 1:20 AM, A B <[email protected]> wrote:\n> While browsing the net I found a server with a raid controller\n> HP Smart Array P400/512MB BBWC Controller\n> How does one know what this is, if it is any good or so? I guess they\n> just stuck their \"HP\" label onto some other raid controller?\n> I could write HP but I guess that wouldn't help much. And I could also\n> look through the archives for the mailinglist. When I find the\n> time,I'll do so and try to create a wiki page.\n\nThe only HP controller I've seen get decent reviews here has been the\nP800. The P400 has gotten pretty poor reviews. I imagine it's a fine\ncontroller for a workgroup level file server.\n\n> The problem with this kind of built-in hardware is that it might suck,\n> and then you can't plug in any other hardware in the box.\n\nExactly. Which is why I prefer high quality white box servers from\nsmaller shops.\n", "msg_date": "Wed, 28 Jan 2009 13:01:50 -0700", "msg_from": "Scott Marlowe <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [PERFORMANCE] Buying hardware" }, { "msg_contents": "On Tue, Jan 27, 2009 at 7:03 AM, M. Edward (Ed) Borasky\n<[email protected]> wrote:\n> M. Edward (Ed) Borasky wrote:\n>> Given large amounts of RAM and only PostgreSQL running in the server,\n>> the interesting trade-offs become\n>>\n>> a. How little memory can you buy without putting your service level\n>> agreements at risk?\n>>\n>> b. How do you allocate the PostgreSQL-specific memory buffers at the\n>> expense of the Linux page cache for optimum performance?\n>\n> c. What do I do with the idle cores? :)\n>\n> (or, how can I exploit them by changing my database design or the\n> PostgreSQL architecture?)\n\nYou run as many queries as you have cores at the same time? If you've\nonly ever got 1 or 2 queries running at the same time, don't buy so\nmany extra cores.\n", "msg_date": "Wed, 28 Jan 2009 13:03:25 -0700", "msg_from": "Scott Marlowe <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [PERFORMANCE] Buying hardware" } ]
[ { "msg_contents": "Folks,\n\nI turned on temp file logging for PostgreSQL to see if I needed to \nadjust work_mem. Oddly, it's logging file usage up to 33 times per \nexecuted query (and no, the query isn't large enough to need 33 separate \nsorts).\n\nAny idea what's going on here?\n\n--Josh\n", "msg_date": "Mon, 26 Jan 2009 22:53:09 -0800", "msg_from": "Josh Berkus <[email protected]>", "msg_from_op": true, "msg_subject": "Odd behavior with temp usage logging" }, { "msg_contents": "Josh Berkus wrote:\n> Folks,\n> \n> I turned on temp file logging for PostgreSQL to see if I needed to\n> adjust work_mem. Oddly, it's logging file usage up to 33 times per\n> executed query (and no, the query isn't large enough to need 33 separate\n> sorts).\n\nAre you sure there's not a sort happening inside a loop? It might help\nif you posted the actual log messages along with the output of `EXPLAIN\nANALYZE' on your query.\n\n--\nCraig Ringer\n", "msg_date": "Tue, 27 Jan 2009 17:14:06 +0900", "msg_from": "Craig Ringer <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Odd behavior with temp usage logging" }, { "msg_contents": "Craig Ringer wrote:\n> Josh Berkus wrote:\n>> Folks,\n>>\n>> I turned on temp file logging for PostgreSQL to see if I needed to\n>> adjust work_mem. Oddly, it's logging file usage up to 33 times per\n>> executed query (and no, the query isn't large enough to need 33 separate\n>> sorts).\n> \n> Are you sure there's not a sort happening inside a loop? It might help\n> if you posted the actual log messages along with the output of `EXPLAIN\n> ANALYZE' on your query.\n\nHmmm, it's possible. The sort sizes seem very large for the query in \nquestion, though. Will have to check when I get back on the system.\n\nThat would make a TODO in tempfile logging though ... it would be *far* \nmore useful to colect summary stats per query than log each individual sort.\n\n--Josh\n\n", "msg_date": "Tue, 27 Jan 2009 09:33:47 -0800", "msg_from": "Josh Berkus <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Odd behavior with temp usage logging" } ]
[ { "msg_contents": "Hi,\n\nI am relatively new to PostgreSQL(8.1) and facing the following problem.\n\nWe have indexes defined on timestamp and description (create index description_idx on event using btree (description varchar_pattern_ops))\n\nEXPLAIN ANALYZE SELECT event_id, category, current_session_number, description, event_type_id, realm_name, root_session_number, severity, source_name, target_key, target_name, timestamp, jdo_version FROM event WHERE description like '%mismatch%' ORDER BY timestamp desc;\n QUERY PLAN\n----------------------------------------------------------------------------------------------------------------------\n Sort (cost=36267.09..36272.73 rows=2256 width=314) (actual time=19255.075..20345.774 rows=647537 loops=1)\n Sort Key: \"timestamp\"\n Sort Method: external merge Disk: 194080kB\n -> Seq Scan on event (cost=0.00..36141.44 rows=2256 width=314) (actual time=0.080..1475.041 rows=647537 loops=1)\n Filter: ((description)::text ~~ '%mismatch%'::text)\n Total runtime: 22547.292 ms\n(6 rows)\n\nBut startsWith query use indexes.\n\nEXPLAIN ANALYZE SELECT event_id, category, current_session_number, description, event_type_id, realm_name, root_session_number, severity, source_name, target_key, target_name, timestamp, jdo_version FROM event WHERE description like 'mismatch%' ORDER BY timestamp desc;\n QUERY PLAN\n-------------------------------------------------------------------------------------------------------------------------------\n Sort (cost=9.26..9.27 rows=1 width=314) (actual time=0.766..0.766 rows=0 loops=1)\n Sort Key: \"timestamp\"\n Sort Method: quicksort Memory: 17kB\n -> Index Scan using description_idx on event (cost=0.00..9.25 rows=1 width=314) (actual time=0.741..0.741 rows=0 loops=1)\n Index Cond: (((description)::text ~>=~ 'mismatch'::text) AND ((description)::text ~<~ 'mismatci'::text))\n Filter: ((description)::text ~~ 'mismatch%'::text)\n Total runtime: 0.919 ms\n(7 rows)\n\nIs there any tweaks to force pgsql to use index on description?\n\nBalaji\n\nP.S The event database has 700k records.\n\n\n\n\n\n\n\n\n\nHi,\n \nI am relatively new to PostgreSQL(8.1) and facing the\nfollowing problem.\n \nWe have indexes defined on timestamp and description (create\nindex description_idx on event using btree (description varchar_pattern_ops))\n \nEXPLAIN ANALYZE SELECT event_id, category,\ncurrent_session_number, description, event_type_id, realm_name,\nroot_session_number, severity, source_name, target_key, target_name, timestamp,\njdo_version FROM event WHERE description like '%mismatch%' ORDER BY timestamp\ndesc;\n                                                     \nQUERY\nPLAN                                                     \n\n----------------------------------------------------------------------------------------------------------------------\n Sort  (cost=36267.09..36272.73 rows=2256\nwidth=314) (actual time=19255.075..20345.774 rows=647537 loops=1)\n   Sort Key: \"timestamp\"\n   Sort Method:  external merge  Disk:\n194080kB\n   ->  Seq Scan on event \n(cost=0.00..36141.44 rows=2256 width=314) (actual time=0.080..1475.041\nrows=647537 loops=1)\n         Filter:\n((description)::text ~~ '%mismatch%'::text)\n Total runtime: 22547.292 ms\n(6 rows)\n \nBut startsWith query use indexes.\n \nEXPLAIN ANALYZE SELECT event_id, category,\ncurrent_session_number, description, event_type_id, realm_name,\nroot_session_number, severity, source_name, target_key, target_name, timestamp,\njdo_version FROM event WHERE description like 'mismatch%' ORDER BY timestamp\ndesc;\n                                                         \nQUERY\nPLAN                                                          \n\n-------------------------------------------------------------------------------------------------------------------------------\n Sort  (cost=9.26..9.27 rows=1 width=314) (actual\ntime=0.766..0.766 rows=0 loops=1)\n   Sort Key: \"timestamp\"\n   Sort Method:  quicksort  Memory: 17kB\n   ->  Index Scan using description_idx on\nevent  (cost=0.00..9.25 rows=1 width=314) (actual time=0.741..0.741 rows=0\nloops=1)\n         Index Cond:\n(((description)::text ~>=~ 'mismatch'::text) AND ((description)::text ~<~\n'mismatci'::text))\n         Filter:\n((description)::text ~~ 'mismatch%'::text)\n Total runtime: 0.919 ms\n(7 rows)\n \nIs there any tweaks to force pgsql to use index on\ndescription?\n \nBalaji\n \nP.S The event database has 700k records.", "msg_date": "Tue, 27 Jan 2009 17:41:50 -0600", "msg_from": "\"Hari, Balaji\" <[email protected]>", "msg_from_op": true, "msg_subject": "LIKE Query performance" }, { "msg_contents": "Only wildspeed http://www.sai.msu.su/~megera/wiki/wildspeed\nhas index support for %text% \nBut, it has limitations.\n\nOleg\nOn Tue, 27 Jan 2009, Hari, Balaji wrote:\n\n> Hi,\n>\n> I am relatively new to PostgreSQL(8.1) and facing the following problem.\n>\n> We have indexes defined on timestamp and description (create index description_idx on event using btree (description varchar_pattern_ops))\n>\n> EXPLAIN ANALYZE SELECT event_id, category, current_session_number, description, event_type_id, realm_name, root_session_number, severity, source_name, target_key, target_name, timestamp, jdo_version FROM event WHERE description like '%mismatch%' ORDER BY timestamp desc;\n> QUERY PLAN\n> ----------------------------------------------------------------------------------------------------------------------\n> Sort (cost=36267.09..36272.73 rows=2256 width=314) (actual time=19255.075..20345.774 rows=647537 loops=1)\n> Sort Key: \"timestamp\"\n> Sort Method: external merge Disk: 194080kB\n> -> Seq Scan on event (cost=0.00..36141.44 rows=2256 width=314) (actual time=0.080..1475.041 rows=647537 loops=1)\n> Filter: ((description)::text ~~ '%mismatch%'::text)\n> Total runtime: 22547.292 ms\n> (6 rows)\n>\n> But startsWith query use indexes.\n>\n> EXPLAIN ANALYZE SELECT event_id, category, current_session_number, description, event_type_id, realm_name, root_session_number, severity, source_name, target_key, target_name, timestamp, jdo_version FROM event WHERE description like 'mismatch%' ORDER BY timestamp desc;\n> QUERY PLAN\n> -------------------------------------------------------------------------------------------------------------------------------\n> Sort (cost=9.26..9.27 rows=1 width=314) (actual time=0.766..0.766 rows=0 loops=1)\n> Sort Key: \"timestamp\"\n> Sort Method: quicksort Memory: 17kB\n> -> Index Scan using description_idx on event (cost=0.00..9.25 rows=1 width=314) (actual time=0.741..0.741 rows=0 loops=1)\n> Index Cond: (((description)::text ~>=~ 'mismatch'::text) AND ((description)::text ~<~ 'mismatci'::text))\n> Filter: ((description)::text ~~ 'mismatch%'::text)\n> Total runtime: 0.919 ms\n> (7 rows)\n>\n> Is there any tweaks to force pgsql to use index on description?\n>\n> Balaji\n>\n> P.S The event database has 700k records.\n>\n\n \tRegards,\n \t\tOleg\n_____________________________________________________________\nOleg Bartunov, Research Scientist, Head of AstroNet (www.astronet.ru),\nSternberg Astronomical Institute, Moscow University, Russia\nInternet: [email protected], http://www.sai.msu.su/~megera/\nphone: +007(495)939-16-83, +007(495)939-23-83\n", "msg_date": "Wed, 28 Jan 2009 09:26:54 +0300 (MSK)", "msg_from": "Oleg Bartunov <[email protected]>", "msg_from_op": false, "msg_subject": "Re: LIKE Query performance" }, { "msg_contents": "On Wed, Jan 28, 2009 at 12:41 AM, Hari, Balaji <[email protected]> wrote:\n> EXPLAIN ANALYZE SELECT event_id, category, current_session_number,\n> description, event_type_id, realm_name, root_session_number, severity,\n> source_name, target_key, target_name, timestamp, jdo_version FROM event\n> WHERE description like '%mismatch%' ORDER BY timestamp desc;\n(...)\n> Is there any tweaks to force pgsql to use index on description?\n\nHow long is usually the description? For me it sounds like the job for\ntsearch2 module, which should be in \"contrib\" section in 8.1.\n", "msg_date": "Wed, 28 Jan 2009 10:27:18 +0100", "msg_from": "=?UTF-8?Q?Marcin_St=C4=99pnicki?= <[email protected]>", "msg_from_op": false, "msg_subject": "Re: LIKE Query performance" }, { "msg_contents": "In response to Hari, Balaji :\n> Hi,\n> \n> \n> \n> I am relatively new to PostgreSQL(8.1) and facing the following problem.\n\nSure? 8.1? Your explain looks like 8.2 or 8.3. 8.3 contains a\nfull-text-search.\n\nIf really not 8.3 you can use tsearch2, it is a contrib-module.\n\n\nAndreas\n-- \nAndreas Kretschmer\nKontakt: Heynitz: 035242/47150, D1: 0160/7141639 (mehr: -> Header)\nGnuPG-ID: 0x3FFF606C, privat 0x7F4584DA http://wwwkeys.de.pgp.net\n", "msg_date": "Wed, 28 Jan 2009 10:43:19 +0100", "msg_from": "\"A. Kretschmer\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: LIKE Query performance" }, { "msg_contents": "> Is there any tweaks to force pgsql to use index on description?\n\nEven if you could force it to use the index, it wouldn't make the\nquery run faster.\n\nAs others have pointed out, what you really need is a different kind of index...\n\n...Robert\n", "msg_date": "Wed, 28 Jan 2009 09:08:50 -0500", "msg_from": "Robert Haas <[email protected]>", "msg_from_op": false, "msg_subject": "Re: LIKE Query performance" }, { "msg_contents": "Is there a patch to make Wildspeed work with postgresql version 8.3.1?\n\nP.S\nMy bad, the version number was incorrect in my previous mail.\n-----Original Message-----\nFrom: Oleg Bartunov [mailto:[email protected]] \nSent: Wednesday, January 28, 2009 1:27 AM\nTo: Hari, Balaji\nCc: [email protected]\nSubject: Re: [PERFORM] LIKE Query performance\n\nOnly wildspeed http://www.sai.msu.su/~megera/wiki/wildspeed\nhas index support for %text% \nBut, it has limitations.\n\nOleg\nOn Tue, 27 Jan 2009, Hari, Balaji wrote:\n\n> Hi,\n>\n> I am relatively new to PostgreSQL(8.1) and facing the following problem.\n>\n> We have indexes defined on timestamp and description (create index description_idx on event using btree (description varchar_pattern_ops))\n>\n> EXPLAIN ANALYZE SELECT event_id, category, current_session_number, description, event_type_id, realm_name, root_session_number, severity, source_name, target_key, target_name, timestamp, jdo_version FROM event WHERE description like '%mismatch%' ORDER BY timestamp desc;\n> QUERY PLAN\n> ----------------------------------------------------------------------------------------------------------------------\n> Sort (cost=36267.09..36272.73 rows=2256 width=314) (actual time=19255.075..20345.774 rows=647537 loops=1)\n> Sort Key: \"timestamp\"\n> Sort Method: external merge Disk: 194080kB\n> -> Seq Scan on event (cost=0.00..36141.44 rows=2256 width=314) (actual time=0.080..1475.041 rows=647537 loops=1)\n> Filter: ((description)::text ~~ '%mismatch%'::text)\n> Total runtime: 22547.292 ms\n> (6 rows)\n>\n> But startsWith query use indexes.\n>\n> EXPLAIN ANALYZE SELECT event_id, category, current_session_number, description, event_type_id, realm_name, root_session_number, severity, source_name, target_key, target_name, timestamp, jdo_version FROM event WHERE description like 'mismatch%' ORDER BY timestamp desc;\n> QUERY PLAN\n> -------------------------------------------------------------------------------------------------------------------------------\n> Sort (cost=9.26..9.27 rows=1 width=314) (actual time=0.766..0.766 rows=0 loops=1)\n> Sort Key: \"timestamp\"\n> Sort Method: quicksort Memory: 17kB\n> -> Index Scan using description_idx on event (cost=0.00..9.25 rows=1 width=314) (actual time=0.741..0.741 rows=0 loops=1)\n> Index Cond: (((description)::text ~>=~ 'mismatch'::text) AND ((description)::text ~<~ 'mismatci'::text))\n> Filter: ((description)::text ~~ 'mismatch%'::text)\n> Total runtime: 0.919 ms\n> (7 rows)\n>\n> Is there any tweaks to force pgsql to use index on description?\n>\n> Balaji\n>\n> P.S The event database has 700k records.\n>\n\n \tRegards,\n \t\tOleg\n_____________________________________________________________\nOleg Bartunov, Research Scientist, Head of AstroNet (www.astronet.ru),\nSternberg Astronomical Institute, Moscow University, Russia\nInternet: [email protected], http://www.sai.msu.su/~megera/\nphone: +007(495)939-16-83, +007(495)939-23-83\n\n", "msg_date": "Thu, 29 Jan 2009 11:11:51 -0600", "msg_from": "\"Hari, Balaji\" <[email protected]>", "msg_from_op": true, "msg_subject": "Re: LIKE Query performance" }, { "msg_contents": "\nWe won't need full text searching capabilities as in documents as the data type is varchar.\n\nWildspeed will exactly fit our needs.\n\n-----Original Message-----\nFrom: Oleg Bartunov [mailto:[email protected]] \nSent: Wednesday, January 28, 2009 1:27 AM\nTo: Hari, Balaji\nCc: [email protected]\nSubject: Re: [PERFORM] LIKE Query performance\n\nOnly wildspeed http://www.sai.msu.su/~megera/wiki/wildspeed\nhas index support for %text% \nBut, it has limitations.\n\nOleg\nOn Tue, 27 Jan 2009, Hari, Balaji wrote:\n\n> Hi,\n>\n> I am relatively new to PostgreSQL(8.1) and facing the following problem.\n>\n> We have indexes defined on timestamp and description (create index description_idx on event using btree (description varchar_pattern_ops))\n>\n> EXPLAIN ANALYZE SELECT event_id, category, current_session_number, description, event_type_id, realm_name, root_session_number, severity, source_name, target_key, target_name, timestamp, jdo_version FROM event WHERE description like '%mismatch%' ORDER BY timestamp desc;\n> QUERY PLAN\n> ----------------------------------------------------------------------------------------------------------------------\n> Sort (cost=36267.09..36272.73 rows=2256 width=314) (actual time=19255.075..20345.774 rows=647537 loops=1)\n> Sort Key: \"timestamp\"\n> Sort Method: external merge Disk: 194080kB\n> -> Seq Scan on event (cost=0.00..36141.44 rows=2256 width=314) (actual time=0.080..1475.041 rows=647537 loops=1)\n> Filter: ((description)::text ~~ '%mismatch%'::text)\n> Total runtime: 22547.292 ms\n> (6 rows)\n>\n> But startsWith query use indexes.\n>\n> EXPLAIN ANALYZE SELECT event_id, category, current_session_number, description, event_type_id, realm_name, root_session_number, severity, source_name, target_key, target_name, timestamp, jdo_version FROM event WHERE description like 'mismatch%' ORDER BY timestamp desc;\n> QUERY PLAN\n> -------------------------------------------------------------------------------------------------------------------------------\n> Sort (cost=9.26..9.27 rows=1 width=314) (actual time=0.766..0.766 rows=0 loops=1)\n> Sort Key: \"timestamp\"\n> Sort Method: quicksort Memory: 17kB\n> -> Index Scan using description_idx on event (cost=0.00..9.25 rows=1 width=314) (actual time=0.741..0.741 rows=0 loops=1)\n> Index Cond: (((description)::text ~>=~ 'mismatch'::text) AND ((description)::text ~<~ 'mismatci'::text))\n> Filter: ((description)::text ~~ 'mismatch%'::text)\n> Total runtime: 0.919 ms\n> (7 rows)\n>\n> Is there any tweaks to force pgsql to use index on description?\n>\n> Balaji\n>\n> P.S The event database has 700k records.\n>\n\n \tRegards,\n \t\tOleg\n_____________________________________________________________\nOleg Bartunov, Research Scientist, Head of AstroNet (www.astronet.ru),\nSternberg Astronomical Institute, Moscow University, Russia\nInternet: [email protected], http://www.sai.msu.su/~megera/\nphone: +007(495)939-16-83, +007(495)939-23-83\n\n", "msg_date": "Thu, 29 Jan 2009 11:37:48 -0600", "msg_from": "\"Hari, Balaji\" <[email protected]>", "msg_from_op": true, "msg_subject": "Re: LIKE Query performance" }, { "msg_contents": "On Thu, 29 Jan 2009, Hari, Balaji wrote:\n\n> Is there a patch to make Wildspeed work with postgresql version 8.3.1?\n\nunfortunately, no.\n\n>\n> P.S\n> My bad, the version number was incorrect in my previous mail.\n> -----Original Message-----\n> From: Oleg Bartunov [mailto:[email protected]]\n> Sent: Wednesday, January 28, 2009 1:27 AM\n> To: Hari, Balaji\n> Cc: [email protected]\n> Subject: Re: [PERFORM] LIKE Query performance\n>\n> Only wildspeed http://www.sai.msu.su/~megera/wiki/wildspeed\n> has index support for %text%\n> But, it has limitations.\n>\n> Oleg\n> On Tue, 27 Jan 2009, Hari, Balaji wrote:\n>\n>> Hi,\n>>\n>> I am relatively new to PostgreSQL(8.1) and facing the following problem.\n>>\n>> We have indexes defined on timestamp and description (create index description_idx on event using btree (description varchar_pattern_ops))\n>>\n>> EXPLAIN ANALYZE SELECT event_id, category, current_session_number, description, event_type_id, realm_name, root_session_number, severity, source_name, target_key, target_name, timestamp, jdo_version FROM event WHERE description like '%mismatch%' ORDER BY timestamp desc;\n>> QUERY PLAN\n>> ----------------------------------------------------------------------------------------------------------------------\n>> Sort (cost=36267.09..36272.73 rows=2256 width=314) (actual time=19255.075..20345.774 rows=647537 loops=1)\n>> Sort Key: \"timestamp\"\n>> Sort Method: external merge Disk: 194080kB\n>> -> Seq Scan on event (cost=0.00..36141.44 rows=2256 width=314) (actual time=0.080..1475.041 rows=647537 loops=1)\n>> Filter: ((description)::text ~~ '%mismatch%'::text)\n>> Total runtime: 22547.292 ms\n>> (6 rows)\n>>\n>> But startsWith query use indexes.\n>>\n>> EXPLAIN ANALYZE SELECT event_id, category, current_session_number, description, event_type_id, realm_name, root_session_number, severity, source_name, target_key, target_name, timestamp, jdo_version FROM event WHERE description like 'mismatch%' ORDER BY timestamp desc;\n>> QUERY PLAN\n>> -------------------------------------------------------------------------------------------------------------------------------\n>> Sort (cost=9.26..9.27 rows=1 width=314) (actual time=0.766..0.766 rows=0 loops=1)\n>> Sort Key: \"timestamp\"\n>> Sort Method: quicksort Memory: 17kB\n>> -> Index Scan using description_idx on event (cost=0.00..9.25 rows=1 width=314) (actual time=0.741..0.741 rows=0 loops=1)\n>> Index Cond: (((description)::text ~>=~ 'mismatch'::text) AND ((description)::text ~<~ 'mismatci'::text))\n>> Filter: ((description)::text ~~ 'mismatch%'::text)\n>> Total runtime: 0.919 ms\n>> (7 rows)\n>>\n>> Is there any tweaks to force pgsql to use index on description?\n>>\n>> Balaji\n>>\n>> P.S The event database has 700k records.\n>>\n>\n> \tRegards,\n> \t\tOleg\n> _____________________________________________________________\n> Oleg Bartunov, Research Scientist, Head of AstroNet (www.astronet.ru),\n> Sternberg Astronomical Institute, Moscow University, Russia\n> Internet: [email protected], http://www.sai.msu.su/~megera/\n> phone: +007(495)939-16-83, +007(495)939-23-83\n>\n>\n>\n\n \tRegards,\n \t\tOleg\n_____________________________________________________________\nOleg Bartunov, Research Scientist, Head of AstroNet (www.astronet.ru),\nSternberg Astronomical Institute, Moscow University, Russia\nInternet: [email protected], http://www.sai.msu.su/~megera/\nphone: +007(495)939-16-83, +007(495)939-23-83\n", "msg_date": "Thu, 29 Jan 2009 22:06:13 +0300 (MSK)", "msg_from": "Oleg Bartunov <[email protected]>", "msg_from_op": false, "msg_subject": "Re: LIKE Query performance" } ]
[ { "msg_contents": "Hi Mark,\n\nI Rohan Pethkar wants to run some of the DBT2 tests on PostgreSQL. I have\ndownloaded latest DBT2 tarball from http://git.postgresql.org .\nI tried steps from which are there in INSTALL file. cmake CMakeLists.txt\ncommand runs fine.I tried to install DBT2 but getting following exception\nwhile running make command :\n\nOutput of make command is as follows:\n\n[rohan@gem1 dbt2]$ make\n[ 3%] Building C object CMakeFiles/bin/dbt2-client.dir/src/db_threadpool.o\nIn file included from\n/home/rohan/NEW_DBT2/Installer/DBT2/dbt2/src/db_threadpool.c:28:\nsrc/include/db.h:35: warning: âstruct db_context_tâ declared inside\nparameter list\nsrc/include/db.h:35: warning: its scope is only this definition or\ndeclaration, which is probably not what you want\nsrc/include/db.h:48: warning: âstruct db_context_tâ declared inside\nparameter list\nsrc/include/db.h:50: warning: âstruct db_context_tâ declared inside\nparameter list\n/home/rohan/NEW_DBT2/Installer/DBT2/dbt2/src/db_threadpool.c: In function\nâdb_workerâ:\n/home/rohan/NEW_DBT2/Installer/DBT2/dbt2/src/db_threadpool.c:70: error:\nstorage size of âdbcâ isnât known\n/home/rohan/NEW_DBT2/Installer/DBT2/dbt2/src/db_threadpool.c:70: warning:\nunused variable âdbcâ\nmake[2]: *** [CMakeFiles/bin/dbt2-client.dir/src/db_threadpool.o] Error 1\nmake[1]: *** [CMakeFiles/bin/dbt2-client.dir/all] Error 2\nmake: *** [all] Error 2\n\nAny help on this will be appreciated. Please provide me your valuable inputs\nor document (How to setup DBT2? and How to conduct the tests?)so that so\nthat I can proceed further.\n\nThanks,\nRohan\n\nHi Mark,I Rohan Pethkar wants to run some of the DBT2 tests on PostgreSQL. I have downloaded latest DBT2 tarball from http://git.postgresql.org .I\ntried steps from which are there in INSTALL file. cmake CMakeLists.txt\ncommand runs fine.I tried to install DBT2 but getting following\nexception while running make command :\nOutput of make command is as follows:[rohan@gem1 dbt2]$ make[  3%] Building C object CMakeFiles/bin/dbt2-client.dir/src/db_threadpool.oIn file included from /home/rohan/NEW_DBT2/Installer/DBT2/dbt2/src/db_threadpool.c:28:\n\nsrc/include/db.h:35: warning: âstruct db_context_tâ declared inside parameter listsrc/include/db.h:35: warning: its scope is only this definition or declaration, which is probably not what you wantsrc/include/db.h:48: warning: âstruct db_context_tâ declared inside parameter list\n\nsrc/include/db.h:50: warning: âstruct db_context_tâ declared inside parameter list/home/rohan/NEW_DBT2/Installer/DBT2/dbt2/src/db_threadpool.c: In function âdb_workerâ:/home/rohan/NEW_DBT2/Installer/DBT2/dbt2/src/db_threadpool.c:70: error: storage size of âdbcâ isnât known\n\n/home/rohan/NEW_DBT2/Installer/DBT2/dbt2/src/db_threadpool.c:70: warning: unused variable âdbcâmake[2]: *** [CMakeFiles/bin/dbt2-client.dir/src/db_threadpool.o] Error 1make[1]: *** [CMakeFiles/bin/dbt2-client.dir/all] Error 2\n\nmake: *** [all] Error 2Any\nhelp on this will be appreciated. Please provide me your valuable\ninputs or document (How to setup DBT2? and How to conduct the tests?)so\nthat so that I can proceed further. Thanks,\nRohan", "msg_date": "Wed, 28 Jan 2009 14:44:55 +0530", "msg_from": "Rohan Pethkar <[email protected]>", "msg_from_op": true, "msg_subject": "Need help in setting up DBT2 for PostgreSQL" } ]
[ { "msg_contents": "[Ppsted similar note to PG General but I suppose it's more appropriate\nin this list. Apologies for cross-posting.]\n\nHi. Further to my bafflement with the \"count(*)\" queries as described\nin this thread:\n\nhttp://archives.postgresql.org/pgsql-general/2009-01/msg00804.php\n\nIt seems that whenever this question has come up, Postgresql comes up\nvery short in terms of \"count(*)\" functions.\n\nThe performance is always slow, because of the planner's need to guess\nand such. I don't fully understand how the statistics work (and the\nexplanation on the PG website is way too geeky) but he columns I work\nwith already have a stat level of 100. Not helping at all.\n\nWe are now considering a web based logging functionality for users of\nour website. This means the table could be heavily INSERTed into. We\nget about 10 million hits a day, and I'm guessing that we will have to\nkeep this data around for a while.\n\nMy question: with that kind of volume and the underlying aggregation\nfunctions (by product id, dates, possibly IP addresses or at least\ncountries of origin..) will PG ever be a good choice? Or should I be\nlooking at some other kind of tools? I wonder if OLAP tools would be\noverkill for something that needs to look like a barebones version of\ngoogle analytics limited to our site..\n\nAppreciate any thoughts. If possible I would prefer to tone down any\nrequests for MySQL and such!\n\nThanks!\n", "msg_date": "Wed, 28 Jan 2009 21:33:50 +0800", "msg_from": "Phoenix Kiula <[email protected]>", "msg_from_op": true, "msg_subject": "PG performance in high volume environment (many INSERTs and lots of\n\taggregation reporting)" }, { "msg_contents": "> My question: with that kind of volume and the underlying aggregation\n> functions (by product id, dates, possibly IP addresses or at least\n> countries of origin..) will PG ever be a good choice? Or should I be\n> looking at some other kind of tools? I wonder if OLAP tools would be\n> overkill for something that needs to look like a barebones version of\n> google analytics limited to our site..\n\nSome other databases might have an optimization that makes this much\nfaster that it would ordinarily be.\n\nselect count(*) from table;\n\nBut I don't think anyone has an optimization that makes this fast:\n\nselect column, count(*) from table group by 1;\n\nHow do you expect the database to get this information other than be\nreading the whole table and counting up the number of occurrences of\neach value? I guess an OLAP cube might precompute all the answers for\nyou, but I don't think MySQL is going to do that.\n\nOne option is to write a script that runs in the background and\nupdates all your statistics every 10 minutes or so, dumping the\nresults into separate (and smaller) tables that you can query quickly.\n\nAnother option (which is probably what I would do for really high\nvolume logging of web traffic) is to write your log records to a flat\nfile and then postprocess them with perl or something and load the\nsummary statistics into your database later. PostgreSQL is really\nfast, but nothing is as fast as writing to a flatfile.\n\n...Robert\n", "msg_date": "Wed, 28 Jan 2009 09:18:52 -0500", "msg_from": "Robert Haas <[email protected]>", "msg_from_op": false, "msg_subject": "Re: PG performance in high volume environment (many INSERTs\n\tand lots of aggregation reporting)" }, { "msg_contents": "Phoenix Kiula wrote:\n> [Ppsted similar note to PG General but I suppose it's more appropriate\n> in this list. Apologies for cross-posting.]\n> \n> Hi. Further to my bafflement with the \"count(*)\" queries as described\n> in this thread:\n> \n> http://archives.postgresql.org/pgsql-general/2009-01/msg00804.php\n> \n> It seems that whenever this question has come up, Postgresql comes up\n> very short in terms of \"count(*)\" functions.\n\nSorry - I'm confused. That thread doesn't seem to contain a slow\ncount(*) query. You seem to be saying you're having problems with the\nquery taking 10-15 seconds, but the example takes less then half a\nsecond. How have you identified the count() as being the problem here?\n\n> The performance is always slow, because of the planner's need to guess\n> and such. I don't fully understand how the statistics work (and the\n> explanation on the PG website is way too geeky) but he columns I work\n> with already have a stat level of 100. Not helping at all.\n\nBut your own email says it's slow sometimes:\n \"My queries are fast in general *except* the first time\"\nI'm not sure how the planner comes into this.\n\n> We are now considering a web based logging functionality for users of\n> our website. This means the table could be heavily INSERTed into. We\n> get about 10 million hits a day, and I'm guessing that we will have to\n> keep this data around for a while.\n> \n> My question: with that kind of volume and the underlying aggregation\n> functions (by product id, dates, possibly IP addresses or at least\n> countries of origin..) will PG ever be a good choice?\n\nA good choice compared to what?\n\n> Or should I be\n> looking at some other kind of tools? I wonder if OLAP tools would be\n> overkill for something that needs to look like a barebones version of\n> google analytics limited to our site..\n\nTypically you'd summarise the data by hour/day via triggers / a\nscheduled script if you weren't going towards a pre-packaged OLAP\ntoolkit. Otherwise you're going to have to scan the hundreds of millions\nof rows you've accumulated.\n\n> Appreciate any thoughts. If possible I would prefer to tone down any\n> requests for MySQL and such!\n\nI'm not sure MySQL is going to help you here - if you were running lots\nof small, simple queries it might make sense. If you want to aggregate\ndata by varying criteria I don't think there is any sensible\noptimisation (other than pre-calculating summaries).\n\n-- \n Richard Huxton\n Archonet Ltd\n", "msg_date": "Wed, 28 Jan 2009 14:25:41 +0000", "msg_from": "Richard Huxton <[email protected]>", "msg_from_op": false, "msg_subject": "Re: PG performance in high volume environment (many INSERTs\n\tand lots of \taggregation reporting)" }, { "msg_contents": "On 1/28/09, Phoenix Kiula <[email protected]> wrote:\n> [Ppsted similar note to PG General but I suppose it's more appropriate\n> in this list. Apologies for cross-posting.]\n>\n> Hi. Further to my bafflement with the \"count(*)\" queries as described\n> in this thread:\n>\n> http://archives.postgresql.org/pgsql-general/2009-01/msg00804.php\n>\n> It seems that whenever this question has come up, Postgresql comes up\n> very short in terms of \"count(*)\" functions.\n>\n> The performance is always slow, because of the planner's need to guess\n> and such. I don't fully understand how the statistics work (and the\n> explanation on the PG website is way too geeky) but he columns I work\n> with already have a stat level of 100. Not helping at all.\n\nYour issue is not statistics/planner. postgres just can't apply the\nspecial case optimization that some other database do because of the\nlocking model.\n\nall planner's 'guess'. the main goal of statistics is to make the\nguess better educated.\n\n> We are now considering a web based logging functionality for users of\n> our website. This means the table could be heavily INSERTed into. We\n> get about 10 million hits a day, and I'm guessing that we will have to\n> keep this data around for a while.\n\n10m hits/day = 115 hits/sec. This is no problem for even workstation\nbox assuming your disks can handle the syncs. however, with extreme\ninsert heavy loads it helps alot to look at partitioning/rotation to\nease the pain of big deletes.\n\nmerlin\n", "msg_date": "Wed, 28 Jan 2009 12:55:16 -0500", "msg_from": "Merlin Moncure <[email protected]>", "msg_from_op": false, "msg_subject": "Re: PG performance in high volume environment (many INSERTs\n\tand lots of aggregation reporting)" }, { "msg_contents": "[email protected] (Phoenix Kiula) writes:\n> [Ppsted similar note to PG General but I suppose it's more appropriate\n> in this list. Apologies for cross-posting.]\n>\n> Hi. Further to my bafflement with the \"count(*)\" queries as described\n> in this thread:\n>\n> http://archives.postgresql.org/pgsql-general/2009-01/msg00804.php\n>\n> It seems that whenever this question has come up, Postgresql comes up\n> very short in terms of \"count(*)\" functions.\n>\n> The performance is always slow, because of the planner's need to guess\n> and such. I don't fully understand how the statistics work (and the\n> explanation on the PG website is way too geeky) but he columns I work\n> with already have a stat level of 100. Not helping at all.\n\nThat's definitely *NOT* due to \"planner's need to guess\"; it's due to\nthere being some *specific* work that PostgreSQL needs to do that some\nother databases can avoid due to different storage strategies.\n\nThe matter is quite succinctly described here:\nhttp://wiki.postgresql.org/wiki/Why_PostgreSQL_Instead_of_MySQL:_Comparing_Reliability_and_Speed_in_2007#Counting_rows_in_a_table\n\nI'll just take one excerpt:\n---------------------------\nIt is worth observing that it is only this precise form of aggregate\nthat must be so pessimistic; if augmented with a \"WHERE\" clause like\n\nSELECT COUNT(*) FROM table WHERE status = 'something'\n\nPostgreSQL, MySQL, and most other database implementations will take\nadvantage of available indexes against the restricted field(s) to\nlimit how many records must be counted, which can greatly accelerate\nsuch queries.\n---------------------------\n\nIt is common for systems where it is necessary for aggregation\nreporting to be fast to do pre-computation of the aggregates, and that\nis in no way specific to PostgreSQL.\n\nIf you need *really* fast aggregates, then it will be worthwhile to\nput together triggers or procedures or something of the sort to help\npre-compute the aggregates.\n-- \n(reverse (concatenate 'string \"ofni.sesabatadxunil\" \"@\" \"enworbbc\"))\nhttp://linuxfinances.info/info/wp.html\n\"When you have eliminated the impossible, whatever remains, however\nimprobable, must be the truth.\" -- Sir Arthur Conan Doyle (1859-1930),\nEnglish author. Sherlock Holmes, in The Sign of Four, ch. 6 (1889).\n[...but see the Holmesian Fallacy, due to Bob Frankston...\n<http://www.frankston.com/public/Essays/Holmesian%20Fallacy.asp>]\n", "msg_date": "Thu, 29 Jan 2009 15:56:47 -0500", "msg_from": "Chris Browne <[email protected]>", "msg_from_op": false, "msg_subject": "Re: PG performance in high volume environment (many INSERTs and lots\n\tof aggregation reporting)" }, { "msg_contents": "On Thu, Jan 29, 2009 at 1:56 PM, Chris Browne <[email protected]> wrote:\n>\n> It is common for systems where it is necessary for aggregation\n> reporting to be fast to do pre-computation of the aggregates, and that\n> is in no way specific to PostgreSQL.\n>\n> If you need *really* fast aggregates, then it will be worthwhile to\n> put together triggers or procedures or something of the sort to help\n> pre-compute the aggregates.\n\nJust to add to this, at me last employer in Chicago, we had a database\nfrom a very large database company who's CEO makes more than all the\npeople on this mailing list combined that shall not be named for\nreasons like I don't want to be sued. This database had a large\nstatistical dataset we replicated over to pgsql on a by the minute\nbasis so we could run big ugly queries anytime we felt like it without\nblowing out the production database.\n\nAt night, or by hand, I would run such queries as select count(*) from\nreallyreallyreallybigstatstable on it and compare it to postgresql.\nPostgreSQL would take about 4 or 5 minutes to run this on a local\nserver running a software RAID-10 4 disc set on a single core P-4 Dell\nworkstation, and the really really big server in production took about\n15 to 20 seconds.\n\nOur local test server that ran the same really big database that\ncannot be named and had a 16 disk RAID-6 array with gigs of memory and\n4 cpu cores, took about 45 seconds to a minute to run the same select\ncount(*) query.\n\nAll of the machines showed high CPU and moderate I/O usage while\nrunning said query.\n\nSo, there's probably some room for improvement in pgsql's way of doing\nthings, but it's not like the other database software was providing\ninstantaneous answers. Basically, the second that a database server\nbecomes fast at running lots of update / select queries in a mixed\nenvironment, things like fast select count(*) get slower.\n\nTo the OP: Try running 100 transactional clients against mysql\n(updates/inserts/deletes/selects) while running a select count(*) and\nsee how it behaves. Single thread use cases are kind of uninteresting\ncompared to lotsa users. But if single thread use cases are your\nbread and butter, then pgsql is possibly a poor choice of db.\n", "msg_date": "Fri, 30 Jan 2009 00:53:37 -0700", "msg_from": "Scott Marlowe <[email protected]>", "msg_from_op": false, "msg_subject": "Re: PG performance in high volume environment (many INSERTs\n\tand lots of aggregation reporting)" } ]
[ { "msg_contents": "2 questions:\n\n1) Different costs for same actions. Doing an explain on 2 nearly identical\nqueries both involving the same Index scan on same table has 2 widely\ndifferent costs for same Index scan 303375872.86 vs. 12576.70\n\n2) Simple query using NOT IN (subquery)was killed after 2 hrs, using the\nsame query (query) except (query) ran in < 2 sec.\n\nSummary:\n\nOn devel box (Unix PG version 8.3.5) with no other database activity or\nsystem activity after immediately completing a vacuum analyze.\n\nThe original query (below) was running for over 2 hrs and was killed.:\n\nselect distinct ciknum into tmpnocikinowner from cik where ciknum not in\n(select cik from owner_cik_master);\n\nest total cost: 303375872.86, for Index Scan: 303375616.75\n\nSimple query broken down: explain select distinct ciknum into\ntmpnocikinowner from cik ;\n\nest total cost: 12576.70, for Index Scan: 12064.49\n\nand\n\nselect cik from owner_cik_master\n\nest total cost: 2587.36, for Index Scan: N/A\n\nActual time, the query was killed after 2hrs,\n\nHowever, we ran:\n\nselect distinct ciknum into tmpnocikinowner from cik ; - actual time 861.487\nms\n (select ciknum from tmpnocikinowner) except (select cik from\nowner_cik_master); - actual time 1328.094 ms\n\n\n\n##### Console log below with details ######\n\ndevel=# explain select distinct ciknum into tmpnocikinowner from cik where\nciknum not in (select cik from owner_cik_master);\n QUERY\nPLAN\n------------------------------------------------------------------------------------------------\n Unique (cost=3506.21..303375872.86 rows=71946 width=8)\n -> Index Scan using cik_ciknum_idx on cik (cost=3506.21..303375616.75\nrows=102444 width=8)\n Filter: (NOT (subplan))\n SubPlan\n -> Materialize (cost=3506.21..6002.40 rows=186019 width=4)\n -> Seq Scan on owner_cik_master (cost=0.00..2684.19\nrows=186019 width=4)\n(6 rows)\n\nTime: 0.723 ms\ndevel=# explain select ciknum into tmpnocikinowner from cik where ciknum not\nin (select cik from owner_cik_master);\n QUERY\nPLAN\n--------------------------------------------------------------------------------------\n Seq Scan on cik (cost=3506.21..303367660.13 rows=102444 width=8)\n Filter: (NOT (subplan))\n SubPlan\n -> Materialize (cost=3506.21..6002.40 rows=186019 width=4)\n -> Seq Scan on owner_cik_master (cost=0.00..2684.19 rows=186019\nwidth=4)\n(5 rows)\n\nTime: 0.588 ms\ndevel=# explain select ciknum::int into tmpnocikinowner from cik where\nciknum::int not in (select cik::int from owner_cik_master);\n QUERY\nPLAN\n--------------------------------------------------------------------------------------\n Seq Scan on cik (cost=3506.21..303368428.46 rows=102444 width=8)\n Filter: (NOT (subplan))\n SubPlan\n -> Materialize (cost=3506.21..6002.40 rows=186019 width=4)\n -> Seq Scan on owner_cik_master (cost=0.00..2684.19 rows=186019\nwidth=4)\n(5 rows)\n\nTime: 0.918 ms\ndevel=# explain select ciknum into tmpnocikinowner from cik\n;\nQUERY PLAN\n-----------------------------------------------------------\n Seq Scan on cik (cost=0.00..4107.87 rows=204887 width=8)\n(1 row)\n\nTime: 0.438 ms\ndevel=# explain select distinct ciknum into tmpnocikinowner from cik ;\n QUERY\nPLAN\n-----------------------------------------------------------------------------------------\n Unique (cost=0.00..12576.70 rows=143891 width=8)\n -> Index Scan using cik_ciknum_idx on cik (cost=0.00..12064.49\nrows=204887 width=8)\n(2 rows)\n\nTime: 0.468 ms\ndevel=# select distinct ciknum into tmpnocikinowner from cik ;\nSELECT\nTime: 861.487 ms\n\ndevel=# explain select ciknum from tmpnocikinowner where ciknum not in\n(select cik from owner_cik_master);\n QUERY\nPLAN\n--------------------------------------------------------------------------------------\n Seq Scan on tmpnocikinowner (cost=3506.21..261092922.31 rows=88168\nwidth=8)\n Filter: (NOT (subplan))\n SubPlan\n -> Materialize (cost=3506.21..6002.40 rows=186019 width=4)\n -> Seq Scan on owner_cik_master (cost=0.00..2684.19 rows=186019\nwidth=4)\n(5 rows)\n\nTime: 0.629 ms\n\ndevel=# explain select cik from owner_cik_master;\n QUERY PLAN\n------------------------------------------------------------------------\n Seq Scan on owner_cik_master (cost=0.00..2684.19 rows=186019 width=4)\n(1 row)\n\nTime: 0.415 ms\ndevel=# explain select ciknum from tmpnocikinowner;;\n QUERY PLAN\n-----------------------------------------------------------------------\n Seq Scan on tmpnocikinowner (cost=0.00..2587.36 rows=176336 width=8)\n(1 row)\n\nTime: 0.413 ms\ndevel=# explain (select ciknum from tmpnocikinowner) except (select cik\nfrom owner_cik_master);\n QUERY\nPLAN\n------------------------------------------------------------------------------------------------\n SetOp Except (cost=47309.23..49121.00 rows=36236 width=8)\n -> Sort (cost=47309.23..48215.12 rows=362355 width=8)\n Sort Key: \"*SELECT* 1\".ciknum\n -> Append (cost=0.00..8895.10 rows=362355 width=8)\n -> Subquery Scan \"*SELECT* 1\" (cost=0.00..4350.72\nrows=176336 width=8)\n -> Seq Scan on tmpnocikinowner (cost=0.00..2587.36\nrows=176336 width=8)\n -> Subquery Scan \"*SELECT* 2\" (cost=0.00..4544.38\nrows=186019 width=4)\n -> Seq Scan on owner_cik_master (cost=0.00..2684.19\nrows=186019 width=4)\n(8 rows)\n\nTime: 0.625 ms\ndevel=# (select ciknum from tmpnocikinowner) except (select cik from\nowner_cik_master);\n ciknum\n--------\n(0 rows)\n\nTime: 1328.094 ms\n\n2 questions:\n1) Different costs for same actions. Doing an explain on 2 nearly identical queries both involving the same Index scan on same table has 2 widely different costs for same Index scan  303375872.86 vs. 12576.70\n2) Simple query using NOT IN (subquery)was killed after 2 hrs, using the same query (query) except (query) ran in < 2 sec.\nSummary:\nOn devel box (Unix PG version 8.3.5) with no other database activity or system activity after immediately completing a vacuum analyze.\nThe original query (below) was running for over 2 hrs and was killed.:  \nselect distinct ciknum into tmpnocikinowner from cik where ciknum not in (select cik from owner_cik_master);\nest total cost: 303375872.86, for Index Scan: 303375616.75\nSimple query broken down: explain select distinct ciknum into tmpnocikinowner from cik ;\nest total cost: 12576.70, for Index Scan: 12064.49\nand \nselect cik from owner_cik_master\nest total cost: 2587.36, for Index Scan: N/A\nActual time, the query was killed after 2hrs, \nHowever, we ran:\nselect distinct ciknum into tmpnocikinowner from cik ; - actual time 861.487 ms (select ciknum from tmpnocikinowner)  except (select cik from owner_cik_master);  - actual time 1328.094 ms   ##### Console log below with details ######\ndevel=# explain select distinct ciknum into tmpnocikinowner from cik where ciknum not in (select cik from owner_cik_master);                                           QUERY PLAN                                           \n------------------------------------------------------------------------------------------------ Unique  (cost=3506.21..303375872.86 rows=71946 width=8)   ->  Index Scan using cik_ciknum_idx on cik  (cost=3506.21..303375616.75 rows=102444 width=8)\n         Filter: (NOT (subplan))         SubPlan           ->  Materialize  (cost=3506.21..6002.40 rows=186019 width=4)                 ->  Seq Scan on owner_cik_master  (cost=0.00..2684.19 rows=186019 width=4)\n(6 rows)\nTime: 0.723 msdevel=# explain select ciknum into tmpnocikinowner from cik where ciknum not in (select cik from owner_cik_master);                                               QUERY PLAN                                      \n-------------------------------------------------------------------------------------- Seq Scan on cik  (cost=3506.21..303367660.13 rows=102444 width=8)   Filter: (NOT (subplan))   SubPlan     ->  Materialize  (cost=3506.21..6002.40 rows=186019 width=4)\n           ->  Seq Scan on owner_cik_master  (cost=0.00..2684.19 rows=186019 width=4)(5 rows)\nTime: 0.588 msdevel=# explain select ciknum::int into tmpnocikinowner from cik where ciknum::int not in (select cik::int from owner_cik_master);                                      QUERY PLAN                                      \n-------------------------------------------------------------------------------------- Seq Scan on cik  (cost=3506.21..303368428.46 rows=102444 width=8)   Filter: (NOT (subplan))   SubPlan     ->  Materialize  (cost=3506.21..6002.40 rows=186019 width=4)\n           ->  Seq Scan on owner_cik_master  (cost=0.00..2684.19 rows=186019 width=4)(5 rows)\nTime: 0.918 msdevel=# explain select ciknum into tmpnocikinowner from cik ;                                                                                         QUERY PLAN                         -----------------------------------------------------------\n Seq Scan on cik  (cost=0.00..4107.87 rows=204887 width=8)(1 row)\nTime: 0.438 msdevel=# explain select distinct ciknum into tmpnocikinowner from cik ;                                       QUERY PLAN                                        -----------------------------------------------------------------------------------------\n Unique  (cost=0.00..12576.70 rows=143891 width=8)   ->  Index Scan using cik_ciknum_idx on cik  (cost=0.00..12064.49 rows=204887 width=8)(2 rows)\nTime: 0.468 msdevel=#  select distinct ciknum into tmpnocikinowner from cik ;       SELECTTime: 861.487 ms\ndevel=# explain select ciknum from tmpnocikinowner  where ciknum not in (select cik from owner_cik_master);                                      QUERY PLAN                                      --------------------------------------------------------------------------------------\n Seq Scan on tmpnocikinowner  (cost=3506.21..261092922.31 rows=88168 width=8)   Filter: (NOT (subplan))   SubPlan     ->  Materialize  (cost=3506.21..6002.40 rows=186019 width=4)           ->  Seq Scan on owner_cik_master  (cost=0.00..2684.19 rows=186019 width=4)\n(5 rows)\nTime: 0.629 ms\ndevel=# explain select cik from owner_cik_master;                               QUERY PLAN                               ------------------------------------------------------------------------ Seq Scan on owner_cik_master  (cost=0.00..2684.19 rows=186019 width=4)\n(1 row)\nTime: 0.415 msdevel=# explain select ciknum from tmpnocikinowner;;                              QUERY PLAN                               -----------------------------------------------------------------------\n Seq Scan on tmpnocikinowner  (cost=0.00..2587.36 rows=176336 width=8)(1 row)\nTime: 0.413 msdevel=# explain (select ciknum from tmpnocikinowner)  except (select cik from owner_cik_master);                                                               QUERY PLAN                                           \n------------------------------------------------------------------------------------------------ SetOp Except  (cost=47309.23..49121.00 rows=36236 width=8)   ->  Sort  (cost=47309.23..48215.12 rows=362355 width=8)\n         Sort Key: \"*SELECT* 1\".ciknum         ->  Append  (cost=0.00..8895.10 rows=362355 width=8)               ->  Subquery Scan \"*SELECT* 1\"  (cost=0.00..4350.72 rows=176336 width=8)\n                     ->  Seq Scan on tmpnocikinowner  (cost=0.00..2587.36 rows=176336 width=8)               ->  Subquery Scan \"*SELECT* 2\"  (cost=0.00..4544.38 rows=186019 width=4)                     ->  Seq Scan on owner_cik_master  (cost=0.00..2684.19 rows=186019 width=4)\n(8 rows)\nTime: 0.625 msdevel=#  (select ciknum from tmpnocikinowner)  except (select cik from owner_cik_master);        ciknum --------(0 rows)\nTime: 1328.094 ms", "msg_date": "Wed, 28 Jan 2009 23:01:34 -0800", "msg_from": "Kevin Traster <[email protected]>", "msg_from_op": true, "msg_subject": "NOT IN >2hrs vs EXCEPT < 2 sec." }, { "msg_contents": "On Thu, Jan 29, 2009 at 12:01 AM, Kevin Traster <[email protected]> wrote:\n> 2 questions:\n>\n> 1) Different costs for same actions. Doing an explain on 2 nearly identical\n> queries both involving the same Index scan on same table has 2 widely\n> different costs for same Index scan 303375872.86 vs. 12576.70\n\nPretty sure this is a FAQ by now.\n\nnot in and except treat nulls differently. If you table has nullable\nfields and nulls would break your query, then not in () is a bad\nchoice. Therefore, effort to optimize had been placed into except,\nwhich is distinctly, symantically different from not in ().\n\nIt seems like some shift in the pg community has happened where we're\nsuddenly getting a lot of folks who came from a database where not in\nand except are treated the same, even though they most definitely do\nnot mean the same thing.\n", "msg_date": "Thu, 29 Jan 2009 00:37:20 -0700", "msg_from": "Scott Marlowe <[email protected]>", "msg_from_op": false, "msg_subject": "Re: NOT IN >2hrs vs EXCEPT < 2 sec." }, { "msg_contents": "On Wed, Jan 28, 2009 at 11:37 PM, Scott Marlowe <[email protected]>wrote:\n\n> On Thu, Jan 29, 2009 at 12:01 AM, Kevin Traster <[email protected]> wrote:\n> > 2 questions:\n> >\n> > 1) Different costs for same actions. Doing an explain on 2 nearly\n> identical\n> > queries both involving the same Index scan on same table has 2 widely\n> > different costs for same Index scan 303375872.86 vs. 12576.70\n>\n> Pretty sure this is a FAQ by now.\n>\n> not in and except treat nulls differently. If you table has nullable\n> fields and nulls would break your query, then not in () is a bad\n> choice. Therefore, effort to optimize had been placed into except,\n> which is distinctly, symantically different from not in ().\n>\n> It seems like some shift in the pg community has happened where we're\n> suddenly getting a lot of folks who came from a database where not in\n> and except are treated the same, even though they most definitely do\n> not mean the same thing.\n>\n\n\nOn Wed, Jan 28, 2009 at 11:37 PM, Scott Marlowe <[email protected]> wrote:\n\nOn Thu, Jan 29, 2009 at 12:01 AM, Kevin Traster <[email protected]> wrote:> 2 questions:>> 1) Different costs for same actions. Doing an explain on 2 nearly identical\n> queries both involving the same Index scan on same table has 2 widely> different costs for same Index scan  303375872.86 vs. 12576.70Pretty sure this is a FAQ by now.not in and except treat nulls differently.  If you table has nullable\nfields and nulls would break your query, then not in () is a badchoice.  Therefore, effort to optimize had been placed into except,which is distinctly, symantically different from not in ().It seems like some shift in the pg community has happened where we're\nsuddenly getting a lot of folks who came from a database where not inand except are treated the same, even though they most definitely donot mean the same thing.", "msg_date": "Wed, 28 Jan 2009 23:54:45 -0800", "msg_from": "Kevin Traster <[email protected]>", "msg_from_op": true, "msg_subject": "Re: NOT IN >2hrs vs EXCEPT < 2 sec." }, { "msg_contents": "On Wed, Jan 28, 2009 at 11:37 PM, Scott Marlowe <[email protected]>wrote:\n\n> On Thu, Jan 29, 2009 at 12:01 AM, Kevin Traster <[email protected]> wrote:\n> > 2 questions:\n> >\n> > 1) Different costs for same actions. Doing an explain on 2 nearly\n> identical\n> > queries both involving the same Index scan on same table has 2 widely\n> > different costs for same Index scan 303375872.86 vs. 12576.70\n>\n> Pretty sure this is a FAQ by now.\n>\n> not in and except treat nulls differently. If you table has nullable\n> fields and nulls would break your query, then not in () is a bad\n> choice. Therefore, effort to optimize had been placed into except,\n> which is distinctly, symantically different from not in ().\n>\n> It seems like some shift in the pg community has happened where we're\n> suddenly getting a lot of folks who came from a database where not in\n> and except are treated the same, even though they most definitely do\n> not mean the same thing.\n>\n\n\nUmm... No. The top of the post you quoted regards the difference between the\nquery \"get ciknum from cik\" versus get ciknum from cik where NOT IN.... The\nonly differene between the two queries is the qualification of \"where ciknum\nnot in ....\". It does not involve the difference between NOT IN versus\nExcept\n\nBoth queries do an Index Scan using cik_ciknum_idx and those numbers show\nthe different costs doing the same task.\n\nIn this case, neither table allowes nulls in the columns, both tables have\nsingle indexes on the columns used.\n\nRegarding the previous posts about the same issues of PERFORMENCE between\nNOT IN versus EXCEPT. There has not been any answer to explain it - just\ntalk about the differenences between the two results.\n\nYes, I can still get the results using EXCEPT but it would be nice to no why\nI can't get NOT IN to complete the simple query.\n\n\nOn Wed, Jan 28, 2009 at 11:37 PM, Scott Marlowe <[email protected]> wrote:\n\nOn Thu, Jan 29, 2009 at 12:01 AM, Kevin Traster <[email protected]> wrote:> 2 questions:>> 1) Different costs for same actions. Doing an explain on 2 nearly identical\n> queries both involving the same Index scan on same table has 2 widely> different costs for same Index scan  303375872.86 vs. 12576.70Pretty sure this is a FAQ by now.not in and except treat nulls differently.  If you table has nullable\nfields and nulls would break your query, then not in () is a badchoice.  Therefore, effort to optimize had been placed into except,which is distinctly, symantically different from not in ().It seems like some shift in the pg community has happened where we're\nsuddenly getting a lot of folks who came from a database where not inand except are treated the same, even though they most definitely donot mean the same thing.\n \n \nUmm... No. The top of the post you quoted regards the difference between the query \"get ciknum from cik\" versus get ciknum from cik where NOT IN.... The only differene between the two queries is the qualification of \"where ciknum not in ....\".  It does not involve the difference between NOT IN versus Except\n Both queries do an Index Scan using cik_ciknum_idx and those numbers show the different costs doing the same task.  In this case, neither table  allowes nulls in the columns, both tables have single indexes on the columns used.\n Regarding the previous posts about the same issues of PERFORMENCE between NOT IN versus EXCEPT. There has not been any answer to explain it - just talk about the differenences between the two results.  Yes, I can still get the results using EXCEPT but it would be nice to no why I can't get NOT IN to complete the simple query.", "msg_date": "Wed, 28 Jan 2009 23:56:15 -0800", "msg_from": "Kevin Traster <[email protected]>", "msg_from_op": true, "msg_subject": "Re: NOT IN >2hrs vs EXCEPT < 2 sec." }, { "msg_contents": "Kevin Traster <[email protected]> writes:\n\n> Regarding the previous posts about the same issues of PERFORMENCE between\n> NOT IN versus EXCEPT. There has not been any answer to explain it - just\n> talk about the differenences between the two results.\n>\n> Yes, I can still get the results using EXCEPT but it would be nice to no why\n> I can't get NOT IN to complete the simple query.\n\n\nThere are two answers here. One you've already been given, that NOT IN has to\nhandle NULLs specially and that makes these plans not equivalent. The NOT IN\nis decidedly harder to solve.\n\nThe other answer is that EXCEPT is a set operation which in Postgres uses a\ncompletely different set of logic. Even if you used NOT EXISTS which really is\nequivalent to EXCEPT the resulting plans would be different. Which one would\nbe better would depend on the circumstances. In an ideal world every\nequivalent query would generate identical plans. We don't live in an ideal\nworld and Postgres isn't perfect.\n\n\n-- \n Gregory Stark\n EnterpriseDB http://www.enterprisedb.com\n Ask me about EnterpriseDB's Slony Replication support!\n", "msg_date": "Thu, 29 Jan 2009 13:00:16 +0000", "msg_from": "Gregory Stark <[email protected]>", "msg_from_op": false, "msg_subject": "Re: NOT IN >2hrs vs EXCEPT < 2 sec." }, { "msg_contents": "Kevin Traster <[email protected]> writes:\n> Unique (cost=3506.21..303375872.86 rows=71946 width=8)\n> -> Index Scan using cik_ciknum_idx on cik (cost=3506.21..303375616.75\n> rows=102444 width=8)\n> Filter: (NOT (subplan))\n> SubPlan\n> -> Materialize (cost=3506.21..6002.40 rows=186019 width=4)\n> -> Seq Scan on owner_cik_master (cost=0.00..2684.19\n> rows=186019 width=4)\n\nIt will help some if you raise work_mem enough so you get a \"hashed\nsubplan\" there, assuming the NOT IN is on a hashable datatype.\n\nBut as was already noted, more work has been put into optimizing\nEXCEPT and NOT EXISTS than NOT IN, because the latter is substantially\nless useful due to its unintuitive but spec-mandated handling of NULLs.\n(And this disparity will be even larger in 8.4.) We're not going to\napologize for that, and we're not going to regard it as a bug.\n\n\t\t\tregards, tom lane\n", "msg_date": "Thu, 29 Jan 2009 09:56:41 -0500", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: NOT IN >2hrs vs EXCEPT < 2 sec. " } ]
[ { "msg_contents": "Hi,\n \nIf I have a view like:\n \ncreate view X as (\nselect x from A\nunion all\nselect x from B)\n \nand do\n \nselect max(x) from X\n \nI get a plan like:\n \nAggregate\n Append\n Seq Scan on A\n Seq Scan on B\n \nIf A and B are indexed on x, I can get the result much faster as:\n \nselect max(x) from (\nselect max(x) from A\nunion all\nselect max(x) from B) X\n \nwith the plan:\n \nAggregate\n Append\n Result\n Limit\n Index Scan Backward using .. on A\n Result\n Limit\n Index Scan Backward using .. on B\n \nMy question is basically why the optimizer doesn't do this? Is it hard,\nor is it just something that hasn't been done yet?\nMy guess is that the second plan would always be as fast or faster than\nthe first one - even if A and B wasn't indexed?\n \nAnders\n \n\n\n\n\n\nHi,\n \nIf I have a view \nlike:\n \ncreate view X as \n(\nselect x from \nA\nunion \nall\nselect x from \nB)\n \nand \ndo\n \nselect max(x) from \nX\n \nI get a plan \nlike:\n \nAggregate\n  \nAppend\n    \nSeq Scan on A\n    \nSeq Scan on B\n \nIf A and B are \nindexed on x, I can get the result much faster as:\n \nselect max(x) from \n(\n\nselect max(x) from \nA\nunion \nall\nselect max(x) from \nB) X\n \nwith the \nplan:\n \nAggregate\n  \nAppend\n    \nResult\n      Limit\n        Index Scan Backward using .. \non A\n\n    \nResult\n      Limit\n        Index Scan Backward using .. \non B\n \nMy question is \nbasically why the optimizer doesn't do this? Is it hard, or is it just something \nthat hasn't been done yet?\nMy guess is that the \nsecond plan would always be as fast or faster than the first one - even if \nA and B wasn't indexed?\n \nAnders", "msg_date": "Thu, 29 Jan 2009 16:58:37 +0100", "msg_from": "<[email protected]>", "msg_from_op": true, "msg_subject": "Max on union" }, { "msg_contents": "On Thu, Jan 29, 2009 at 10:58 AM, <[email protected]> wrote:\n> Hi,\n>\n> If I have a view like:\n>\n> create view X as (\n> select x from A\n> union all\n> select x from B)\n>\n> and do\n>\n> select max(x) from X\n>\n> I get a plan like:\n>\n> Aggregate\n> Append\n> Seq Scan on A\n> Seq Scan on B\n>\n> If A and B are indexed on x, I can get the result much faster as:\n>\n> select max(x) from (\n> select max(x) from A\n> union all\n> select max(x) from B) X\n>\n> with the plan:\n>\n> Aggregate\n> Append\n> Result\n> Limit\n> Index Scan Backward using .. on A\n> Result\n> Limit\n> Index Scan Backward using .. on B\n>\n> My question is basically why the optimizer doesn't do this? Is it hard, or\n> is it just something that hasn't been done yet?\n> My guess is that the second plan would always be as fast or faster than the\n> first one - even if A and B wasn't indexed?\n\nWell, it's certainly not going to be faster without the index. You\ncan't very well do an index scan backward without an index.\n\nAs for why it doesn't do that, I don't think a huge amount of effort\nhas been put into optimizing the handling of appendrels. Patches are\nwelcome....\n\n...Robert\n", "msg_date": "Thu, 29 Jan 2009 15:10:01 -0500", "msg_from": "Robert Haas <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Max on union" } ]
[ { "msg_contents": "Hi All,\n \nI'm in the process of tuning a query that does a sort on a huge dataset.\nWith work_mem set to 2M, i see the sort operation spilling to disk\nwriting upto 430MB and then return the first 500 rows. Our query is of\nthe sort\n \nselect co1, col2... from table where col1 like 'aa%' order col1 limit\n500; It took 561Secs to complete. Looking at the execution plan 95% of\nthe time is spent on sort vs seq scan on the table.\n \nNow if set the work_mem to 500MB (i did this in a psql session without\nmaking it global) and ran the same query. One would think the sort\noperations would happen in memory and not spill to disk but i still see\n430MB written to disk however, the query complete time dropped down to\n351Secs. So work_mem did have an impact but wondering why its still\nwriting to disk when it can all do it memory.\n \nI appreciate if anyone can shed some light on this.\n \nThanks,\nStalin\n \nEnv: Sol 10, Pg 827 64bit.\n\n\n\n\n\nHi \nAll,\n \nI'm in the process \nof tuning a query that does a sort on a huge dataset. With work_mem set to 2M, i \nsee the sort operation spilling to disk writing upto 430MB and then return the \nfirst 500 rows. Our query is of the sort\n \nselect co1, col2... \nfrom table where col1 like 'aa%' order col1 limit 500; It took 561Secs to \ncomplete. Looking at the execution plan 95% of the time is spent on sort vs seq \nscan on the table.\n \nNow if set the \nwork_mem to 500MB (i did this in a psql session without making it global) and \nran the same query. One would think the sort operations would happen in memory \nand not spill to disk but i still see 430MB written to disk however, the query \ncomplete time dropped down to 351Secs. So work_mem did have an impact but \nwondering why its still writing to disk when it can all do it \nmemory.\n \nI appreciate if \nanyone can shed some light on this.\n \nThanks,\nStalin\n \nEnv: Sol 10, Pg 827 \n64bit.", "msg_date": "Thu, 29 Jan 2009 15:15:01 -0500", "msg_from": "\"Subbiah Stalin-XCGF84\" <[email protected]>", "msg_from_op": true, "msg_subject": "Sort performance" }, { "msg_contents": "On Thu, Jan 29, 2009 at 3:15 PM, Subbiah Stalin-XCGF84\n<[email protected]> wrote:\n> I'm in the process of tuning a query that does a sort on a huge dataset.\n> With work_mem set to 2M, i see the sort operation spilling to disk writing\n> upto 430MB and then return the first 500 rows. Our query is of the sort\n>\n> select co1, col2... from table where col1 like 'aa%' order col1 limit 500;\n> It took 561Secs to complete. Looking at the execution plan 95% of the time\n> is spent on sort vs seq scan on the table.\n>\n> Now if set the work_mem to 500MB (i did this in a psql session without\n> making it global) and ran the same query. One would think the sort\n> operations would happen in memory and not spill to disk but i still see\n> 430MB written to disk however, the query complete time dropped down to\n> 351Secs. So work_mem did have an impact but wondering why its still writing\n> to disk when it can all do it memory.\n>\n> I appreciate if anyone can shed some light on this.\n\nCan you send the EXPLAIN ANALYZE output?\n\nWhat happens if you set work_mem to something REALLY big, like 5GB?\n\n...Robert\n", "msg_date": "Thu, 29 Jan 2009 18:21:17 -0500", "msg_from": "Robert Haas <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Sort performance" }, { "msg_contents": "Here you go.\n\n Limit (cost=502843.44..502844.69 rows=501 width=618) (actual\ntime=561397.940..561429.242 rows=501 loops=1)\n -> Sort (cost=502843.44..503923.48 rows=432014 width=618) (actual\ntime=561397.934..561429.062 rows=501 loops=1)\n Sort Key: name\n -> Seq Scan on objects (cost=0.00..99157.88 rows=432014\nwidth=618) (actual time=0.172..22267.727 rows=649999 loops=1)\n Filter: (((domainid)::text = ANY\n(('{111,SmWCGiRp}'::character varying[])::text[])) AND ((\"type\")::text =\n'cpe'::text) AND (upper((name)::text) ~~ 'CPE1%'::text) AND\n(upper((name)::text) >= 'CPE1'::text) AND (upper((name)::text) <\n'CPE2'::text))\n Total runtime: 561429.915 ms\n(6 rows)\n\nI haven't tried setting that high number. I came up with 500M by\nmonitoring pgsql_tmp when sort operations were performed. It never went\nbeyond 450M. Once it reaches 450M it spends some cycles before I see the\noutput. I guess some sort of merge operation happens to get the first\n500 records out.\n\nThanks,\nStalin \n\n-----Original Message-----\nFrom: Robert Haas [mailto:[email protected]] \nSent: Thursday, January 29, 2009 3:21 PM\nTo: Subbiah Stalin-XCGF84\nCc: [email protected]\nSubject: Re: [PERFORM] Sort performance\n\nOn Thu, Jan 29, 2009 at 3:15 PM, Subbiah Stalin-XCGF84\n<[email protected]> wrote:\n> I'm in the process of tuning a query that does a sort on a huge\ndataset.\n> With work_mem set to 2M, i see the sort operation spilling to disk \n> writing upto 430MB and then return the first 500 rows. Our query is of\n\n> the sort\n>\n> select co1, col2... from table where col1 like 'aa%' order col1 limit \n> 500; It took 561Secs to complete. Looking at the execution plan 95% of\n\n> the time is spent on sort vs seq scan on the table.\n>\n> Now if set the work_mem to 500MB (i did this in a psql session without\n\n> making it global) and ran the same query. One would think the sort \n> operations would happen in memory and not spill to disk but i still \n> see 430MB written to disk however, the query complete time dropped \n> down to 351Secs. So work_mem did have an impact but wondering why its \n> still writing to disk when it can all do it memory.\n>\n> I appreciate if anyone can shed some light on this.\n\nCan you send the EXPLAIN ANALYZE output?\n\nWhat happens if you set work_mem to something REALLY big, like 5GB?\n\n...Robert\n", "msg_date": "Thu, 29 Jan 2009 18:29:19 -0500", "msg_from": "\"Subbiah Stalin-XCGF84\" <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Sort performance" }, { "msg_contents": "Robert Haas <[email protected]> writes:\n\n> On Thu, Jan 29, 2009 at 3:15 PM, Subbiah Stalin-XCGF84\n> <[email protected]> wrote:\n>>\n>> i see the sort operation spilling to disk writing upto 430MB and then\n>> return the first 500 rows. Our query is of the sort\n>>\n>> Now if set the work_mem to 500MB (i did this in a psql session without\n>> making it global) and ran the same query. One would think the sort\n>> operations would happen in memory and not spill to disk but i still see\n>> 430MB written to disk however, the query complete time dropped down to\n>> 351Secs. So work_mem did have an impact but wondering why its still writing\n>> to disk when it can all do it memory.\n\nThe on-disk storage is more compact than the in-memory storage so you actually\nneed a larger value than the space reported for on-disk storage to avoid the\ndisk sort entirely. The accounting also isn't perfect; the on-disk sort still\nuses some ram, for example.\n\n> What happens if you set work_mem to something REALLY big, like 5GB?\n\nDon't set it larger than the available RAM though -- or you'll quite possibly\nget an out-of-error error.\n\n\n-- \n Gregory Stark\n EnterpriseDB http://www.enterprisedb.com\n Ask me about EnterpriseDB's PostGIS support!\n", "msg_date": "Thu, 29 Jan 2009 23:35:44 +0000", "msg_from": "Gregory Stark <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Sort performance" }, { "msg_contents": "Thanks Greg. You were right. If I set my sort_mem to 1G (yes I have\nloads of memory, only for testing purpose), then I don't see any thing\nwritten to disk. So in-memory require more memory than reported on-disk\nstorage.\n\nStalin \n\n-----Original Message-----\nFrom: Greg Stark [mailto:[email protected]] On Behalf Of\nGregory Stark\nSent: Thursday, January 29, 2009 3:36 PM\nTo: Robert Haas\nCc: Subbiah Stalin-XCGF84; [email protected]\nSubject: Re: Sort performance\n\nRobert Haas <[email protected]> writes:\n\n> On Thu, Jan 29, 2009 at 3:15 PM, Subbiah Stalin-XCGF84 \n> <[email protected]> wrote:\n>>\n>> i see the sort operation spilling to disk writing upto 430MB and then\n\n>> return the first 500 rows. Our query is of the sort\n>>\n>> Now if set the work_mem to 500MB (i did this in a psql session \n>> without making it global) and ran the same query. One would think the\n\n>> sort operations would happen in memory and not spill to disk but i \n>> still see 430MB written to disk however, the query complete time \n>> dropped down to 351Secs. So work_mem did have an impact but wondering\n\n>> why its still writing to disk when it can all do it memory.\n\nThe on-disk storage is more compact than the in-memory storage so you\nactually need a larger value than the space reported for on-disk storage\nto avoid the disk sort entirely. The accounting also isn't perfect; the\non-disk sort still uses some ram, for example.\n\n> What happens if you set work_mem to something REALLY big, like 5GB?\n\nDon't set it larger than the available RAM though -- or you'll quite\npossibly get an out-of-error error.\n\n\n--\n Gregory Stark\n EnterpriseDB http://www.enterprisedb.com\n Ask me about EnterpriseDB's PostGIS support!\n", "msg_date": "Thu, 29 Jan 2009 18:58:04 -0500", "msg_from": "\"Subbiah Stalin-XCGF84\" <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Sort performance" } ]
[ { "msg_contents": "Hi,When I try to restore a database dump on PostgreSQL 8.3 that's approximately 130GB in size and takes about 1 hour, I noticed index creation makes up the bulk of that time. I'm using a very fast I/O subsystem (16 Mtron Pro 7535 SSDs using a dual 1.2Ghz IOP/4GB cache RAID controller), fast CPUs (2 quad core C2Q's at 2.6Ghz) and 32GB RAM. From monitoring the restore process, I learned that only 10 minutes is spend doing IO, while the rest of the time is spend on creating the indexes. Index creation seems to be completely CPU bound.The problem is that only 1 CPU core is used. My other 7 cores are just sitting there doing nothing. It seems to me that creating each index, especially for different tables, is something that can be done independently.Is there some way I can let PostgreSQL use multiple cores for creating the indexes?Thanks in advance\n_________________________________________________________________\nExpress yourself instantly with MSN Messenger! Download today it's FREE!\nhttp://messenger.msn.click-url.com/go/onm00200471ave/direct/01/\n\n\n\n\n\nHi,When I try to restore a database dump on PostgreSQL 8.3 that's approximately 130GB in size and takes about 1 hour, I noticed index creation makes up the bulk of that time. I'm using a very fast I/O subsystem (16 Mtron Pro 7535 SSDs using a dual 1.2Ghz IOP/4GB cache RAID controller), fast CPUs (2 quad core C2Q's at 2.6Ghz) and 32GB RAM. From monitoring the restore process, I learned that only 10 minutes is spend doing IO, while the rest of the time is spend on creating the indexes. Index creation seems to be completely CPU bound.The problem is that only 1 CPU core is used. My other 7 cores are just sitting there doing nothing. It seems to me that creating each index, especially for different tables, is something that can be done independently.Is there some way I can let PostgreSQL use multiple cores for creating the indexes?Thanks in advanceExpress yourself instantly with MSN Messenger! MSN Messenger", "msg_date": "Thu, 29 Jan 2009 21:21:30 +0100", "msg_from": "henk de wit <[email protected]>", "msg_from_op": true, "msg_subject": "Using multiple cores for index creation?" }, { "msg_contents": "On Thu, Jan 29, 2009 at 3:21 PM, henk de wit <[email protected]> wrote:\n> Hi,\n> When I try to restore a database dump on PostgreSQL 8.3\n> that's approximately 130GB in size and takes about 1 hour, I noticed index\n> creation makes up the bulk of that time. I'm using a very fast I/O subsystem\n> (16 Mtron Pro 7535 SSDs using a dual 1.2Ghz IOP/4GB cache RAID controller),\n> fast CPUs (2 quad core C2Q's at 2.6Ghz) and 32GB RAM. From monitoring the\n> restore process, I learned that only 10 minutes is spend doing IO, while the\n> rest of the time is spend on creating the indexes. Index creation seems to\n> be completely CPU bound.\n> The problem is that only 1 CPU core is used. My other 7 cores are just\n> sitting there doing nothing. It seems to me that creating each index,\n> especially for different tables, is something that can be done\n> independently.\n> Is there some way I can let PostgreSQL use multiple cores for creating the\n> indexes?\n\nAndrew Dunstan has been working on this problem. His latest parallel\nrestore patch can be found here:\n\nhttp://archives.postgresql.org/message-id/[email protected]\n\n...Robert\n", "msg_date": "Thu, 29 Jan 2009 18:09:53 -0500", "msg_from": "Robert Haas <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Using multiple cores for index creation?" }, { "msg_contents": "On Thu, 2009-01-29 at 18:09 -0500, Robert Haas wrote:\n> On Thu, Jan 29, 2009 at 3:21 PM, henk de wit <[email protected]> wrote:\n> > Hi,\n> > When I try to restore a database dump on PostgreSQL 8.3\n> > that's approximately 130GB in size and takes about 1 hour, I noticed index\n> > creation makes up the bulk of that time. I'm using a very fast I/O subsystem\n> > (16 Mtron Pro 7535 SSDs using a dual 1.2Ghz IOP/4GB cache RAID controller),\n> > fast CPUs (2 quad core C2Q's at 2.6Ghz) and 32GB RAM. From monitoring the\n> > restore process, I learned that only 10 minutes is spend doing IO, while the\n> > rest of the time is spend on creating the indexes. Index creation seems to\n> > be completely CPU bound.\n> > The problem is that only 1 CPU core is used. My other 7 cores are just\n> > sitting there doing nothing. It seems to me that creating each index,\n> > especially for different tables, is something that can be done\n> > independently.\n> > Is there some way I can let PostgreSQL use multiple cores for creating the\n> > indexes?\n> \n> Andrew Dunstan has been working on this problem. His latest parallel\n> restore patch can be found here:\n> \n> http://archives.postgresql.org/message-id/[email protected]\n\n\nYeah but that isn't useful for 8.3. What can be done in this specific\nsituation is to make sure you dump with the -Fc option. You can then\npull a TOC out with pg_restore and break that appart. Reading the TOC is\npretty self evident. Once you get down to index creation you can create\nmultiple files each with a group of indexes to create. Then call\npg_restore multiple times in a script against the individual TOC and you\nwill use all cores.\n\nJoshua D. Drake\n\nP.S. Increase maintenance_work_mem can help too\n\n\n> \n> ...Robert\n> \n-- \nPostgreSQL - XMPP: [email protected]\n Consulting, Development, Support, Training\n 503-667-4564 - http://www.commandprompt.com/\n The PostgreSQL Company, serving since 1997\n\n", "msg_date": "Thu, 29 Jan 2009 15:19:42 -0800", "msg_from": "\"Joshua D. Drake\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Using multiple cores for index creation?" }, { "msg_contents": "Hi,> You can then> pull a TOC out with pg_restore and break that appart. Reading the TOC is> pretty self evident. Once you get down to index creation you can create> multiple files each with a group of indexes to create. Then call> pg_restore multiple times in a script against the individual TOC and you> will use all cores.I figured something like this would be possible. Thanks for the explanation. Ultimately I'm looking for something more automatic though. Not that I personally mind doing the above, but when an emergency restore is needed for some production server manually editing a dump is probably not the safest course of action ;)It sounds like something though that a tool could do automatically. The mentioned patch sounds interesting too, is there anything known about whether this patch will make it into the main stream Postgres source? I guess it's too late for inclusion in PostgreSQL 8.4, but 8.5 perhaps?Kind regards\n_________________________________________________________________\nWhat can you do with the new Windows Live? Find out\nhttp://www.microsoft.com/windows/windowslive/default.aspx\n\n\n\n\n\nHi,> You can then> pull a TOC out with pg_restore and break that appart. Reading the TOC is> pretty self evident. Once you get down to index creation you can create> multiple files each with a group of indexes to create. Then call> pg_restore multiple times in a script against the individual TOC and you> will use all cores.I figured something like this would be possible. Thanks for the explanation. Ultimately I'm looking for something more automatic though. Not that I personally mind doing the above, but when an emergency restore is needed for some production server manually editing a dump is probably not the safest course of action ;)It sounds like something though that a tool could do automatically. The mentioned patch sounds interesting too, is there anything known about whether this patch will make it into the main stream Postgres source? I guess it's too late for inclusion in PostgreSQL 8.4, but 8.5 perhaps?Kind regardsWhat can you do with the new Windows Live? Find out", "msg_date": "Fri, 30 Jan 2009 01:02:55 +0100", "msg_from": "henk de wit <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Using multiple cores for index creation?" }, { "msg_contents": "\"Joshua D. Drake\" <[email protected]> writes:\n> On Thu, 2009-01-29 at 18:09 -0500, Robert Haas wrote:\n>> Andrew Dunstan has been working on this problem. His latest parallel\n>> restore patch can be found here:\n>> \n>> http://archives.postgresql.org/message-id/[email protected]\n\n> Yeah but that isn't useful for 8.3.\n\nSure it is. Andrew has made a point of making sure that the improved\nversion of pg_restore can work against older servers (not sure how far\nback, but it's definitely supposed to work with 8.3).\n\n> What can be done in this specific\n> situation is to make sure you dump with the -Fc option...\n\nYou're essentially proposing a manual reimplementation of Andrew's\npatch ...\n\n\t\t\tregards, tom lane\n", "msg_date": "Thu, 29 Jan 2009 20:25:16 -0500", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Using multiple cores for index creation? " }, { "msg_contents": "It may not be that possible with your schema, but when I had to restore a 2.5TB database (with plenty fast I/O, it was never I/O bound) I used 3 or 4 copies of pg_restore operating on different tables.\n\nWith the -Fc option, like a plain dump you can have it restore just certain schemas or tables per command. A bit of manual work but you don't have to modify the dump file(s), just make various differing commands that operate on different sections of the database. How hard that is depend on the schema. In my case, we have most of the data in partitioned tables and can use a regex to peel off chunks of them by date to restore in different processes once the parent tables and schema are in place.\n\nIt still took all day though, and it wasn't I/O bound.\n\nOn 1/29/09 12:21 PM, \"henk de wit\" <[email protected]> wrote:\n\nHi,\n\nWhen I try to restore a database dump on PostgreSQL 8.3 that's approximately 130GB in size and takes about 1 hour, I noticed index creation makes up the bulk of that time. I'm using a very fast I/O subsystem (16 Mtron Pro 7535 SSDs using a dual 1.2Ghz IOP/4GB cache RAID controller), fast CPUs (2 quad core C2Q's at 2.6Ghz) and 32GB RAM. From monitoring the restore process, I learned that only 10 minutes is spend doing IO, while the rest of the time is spend on creating the indexes. Index creation seems to be completely CPU bound.\n\nThe problem is that only 1 CPU core is used. My other 7 cores are just sitting there doing nothing. It seems to me that creating each index, especially for different tables, is something that can be done independently.\n\nIs there some way I can let PostgreSQL use multiple cores for creating the indexes?\n\nThanks in advance\n\n________________________________\nExpress yourself instantly with MSN Messenger! MSN Messenger <http://clk.atdmt.com/AVE/go/onm00200471ave/direct/01/>\n\n\n\nRe: [PERFORM] Using multiple cores for index creation?\n\n\nIt may not be that possible with your schema, but when I had to restore a 2.5TB database (with plenty fast I/O, it was never I/O bound) I used 3 or 4 copies of pg_restore operating on different tables.\n\nWith the –Fc option, like a plain dump you can have it restore just certain schemas or tables per command.  A bit of manual work but you don’t have to modify the dump file(s), just make various differing commands that operate on different sections of the database.  How hard that is depend on the schema.  In my case, we have most of the data in partitioned tables and can use a regex to peel off chunks of them by date to restore in different processes once the parent tables and schema are in place.\n\nIt still took all day though, and it wasn’t I/O bound.\n\nOn 1/29/09 12:21 PM, \"henk de wit\" <[email protected]> wrote:\n\nHi,\n\nWhen I try to restore a database dump on PostgreSQL 8.3 that's approximately 130GB in size and takes about 1 hour, I noticed index creation makes up the bulk of that time. I'm using a very fast I/O subsystem (16 Mtron Pro 7535 SSDs using a dual 1.2Ghz IOP/4GB cache RAID controller), fast CPUs (2 quad core C2Q's at 2.6Ghz) and 32GB RAM. From monitoring the restore process, I learned that only 10 minutes is spend doing IO, while the rest of the time is spend on creating the indexes. Index creation seems to be completely CPU bound.\n\nThe problem is that only 1 CPU core is used. My other 7 cores are just sitting there doing nothing. It seems to me that creating each index, especially for different tables, is something that can be done independently.\n\nIs there some way I can let PostgreSQL use multiple cores for creating the indexes?\n\nThanks in advance\n\nExpress yourself instantly with MSN Messenger! MSN Messenger <http://clk.atdmt.com/AVE/go/onm00200471ave/direct/01/>", "msg_date": "Fri, 30 Jan 2009 12:04:05 -0800", "msg_from": "Scott Carey <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Using multiple cores for index creation?" } ]
[ { "msg_contents": ">[email protected] writes:\n>> the poster who started this thread had a query where the parsing phase\n>> took significantly longer than the planning stage.\n\n> That was an anecdote utterly unsupported by evidence.\n regards, tom lane\n\nThe issue of prepared statements having atrocious query plans has hit me again. I feel very strongly about this topic and the need for Postgres to have an option that allows for prepared statements to re-plan based on the inputs that works and is user friendly. Pardon my bluntness, but in the current situation the system is brain dead in many important use cases and data sets.\n\nI believe my statement referenced above was about parsing time to the remaining time, not parsing compared to planning. But either way, its a minor detail, and its not important to justify the need for the enhancement here.\nYeah, its anecdotal from your perspective. Go ahead and ignore all that if you wish.\n**** I am making several points in this message that are independent of such evidence, IMHO.\nI have tried to rearrange this message so that the anecdotal narrative is at the end, after the first dashed line.\n\n\nUnnamed prepared statements do solve much of the problem in theory, since the most common issue is typically poor execution plans or a lack of ability to cleanly deal with SQL injection and write less bug prone client code. Parsing being very expensive is more rare. But there IS a performance savings that is not insignificant for many workloads to be had by avoiding the utterly avoidable parsing.\n\nHOWEVER:\nWhat is overlooked WRT unnamed prepared statements, is that they are hard to use and changing client code or behavior is difficult, error prone, and sometimes impossible. Not all client APIs play nice with them at the moment (see Postgres' JDBC). The behavior has some global tweaks, but these are useless in many situations where you need behavior that varies.\n\nEvery time the answer to a problem is to change the client behavior, I ask myself if the DB could have a better default or configuration parameter so that clients don't have to change. Some database instances have dozens of applications and hundreds of client types including ad-hoc usage. Some client code is legacy code that simply can't be changed.\nChanging the clients is a lot harder than changing a db parameter, or configuring a new default for a particular db user. If the client must change, adding something like SET prepare_level = 'parse_only' is the least intrusive and easiest to test - but I stress again that in many real-world cases the client is not flexible.\n\nA session-level parameter that controls prepared statement behavior defaults (never cache by default? parse cache only? parse cache and plan cache?) would be a blessing. A DBA could administer a fix to certain problems without having to force clients to change behavior or wait for new client API versions with fixes.\n\nThat reminds me, isn't there a global parameter that can force no prepared statements to be cached, does that make them all behave as if they are unnamed? Or are they simply re-created each time? I believe I tried this in the past and the prepared statements were unable to use the parameter values for partition table selection, suggesting the latter.\n\nTypically, I run into the issue with queries in a back-end process that operates on large data sets or partitioned tables. Prepared statements essentially kill performance by several orders of magnitude (think, scan 1 versus scan 5000 partition tables). However, my recent issue is brutally simple.\nI need to have an auto-complete web form, and thus need a query with a portion like\nWHERE name LIKE 'userenteredtext%'\nThus, I make an index with varchar_pattern_ops and off we go! ... Or not. Works fine with explicit queries, but not a prepared query. Unfortunately, this is highly prone to SQL injection, and the industry standard way to deal with this is by parameterization.\nhttp://www.owasp.org/index.php/Guide_to_SQL_Injection\n(that site is a weath of information, tools, and tests on the topic, for example: http://www.owasp.org/index.php/Testing_for_SQL_Injection).\n\nPrepared statements are a blessing from a client code perspective, preventing all sorts of bugs and catching many others early, on the client side. Not being able to use them because it causes the database to execute very bad query plans is a flaw in the database, not the client.\n\n------------------------------------------------------------------------------------\nUnnamed prepared statements did not work for me when I tried them as a solution (8.3.2, supposedly after the fix). I was in a hurry to fix my issue, and just moved on when they were plainly not working. It is possible I did something wrong back then. They are also poorly supported by many client APIs -- when they did not work for me, I supposed it was a JDBC issue or perhaps user error, but maybe it was server side? The documentation on both sides is not entirely clear on what should really be happening. And I could not figure out how to debug where the problem was. Can you even test an unnamed prepared statement in psql?\nI had no time to waste and changed a lot of the client code instead, and have since been less interested until the topic came up in this thread, and then after letting this message sit as a draft for a month, ran into the \"varchar_pattern_ops + prepared statement = index, what index?\" Issue.\n\n--------\nOn queries that are overly parse-heavy:\n\nIt is very difficult to tease apart parse time from plan time in Postgres (genrally speaking, getting performance data from Postgres is much harder than the commercial DB's I've used). However, my experience with commercial DBs that have running counters of time spent in various operations is that parsing can be upwards of 50% of the total CPU usage with some workloads. Decision Support type stuff where many light-weight queries are executed, is where I have seen that with Oracle. In Oracle you can find out exactly how much of CPU time was spent parsing versus planning versus other stuff in aggregate over a time interval. I don't suspect that Postgres has a parser that is orders of magnitude faster. But its true I don't have direct evidence to share right now teasing apart plan from parse time.\n\nSo yes, unnamed prepared statements are potentially part (but not all) of a solution, provided there was good control over their use in client API's (which there isn't and won't be, since they are non-standard), or there is a useful non-global way to configure them.\n\nPlanning is long on most queries that take long to parse, but this will vary. (We have several queries that access tables with either only primary key indexes or no indexes, lots of embedded 'stuff' that is naturally more parser than planner heavy like CASE statements and value constraints/checks/modifiers on columns without indexes and not in the where clause, and some are approximately 2k character behemoths).\n\n-----\nOn Performance Improvements gained by avoiding prepared statements:\n\nI gained about a factor of 5x to 50x in performance by changing a lot of code to avoid prepared statements. Some of the worst cases involved partitioned tables, where planning issues are particularly troublesome. (lets scan 1000 tables for no good reason! Hooray!)\n\nI never posted here about my struggles with prepared statements and execution performance, I knew it wasn't going to change and I had to fix the problem in a few days time. One of the queries changed had about 7 sub-selects in it, but the eventual plan can be very fast for some cases, returning almost no data, and we run this query repeatedly with slightly different parameters. So with some parameters the execution time dominates by orders of magnitude, and for most parameter combinations the execution time is almost none of it. Of course, now we just write a new query for all the variants, else the performance is unacceptable. This is not too difficult of a change because these are not a SQL injection worry, although it has complicated the client code and test cases.\n\n---\nBeing able to avoid these problems and let programmers use prepared statements would be a good thing, and so I think a solution more useable and flexible than the current unnamed prepared statements would be great. And if the avoidable and redundant re-parsing can be avoided too, its win-win.\n\nBeing able to cut parsing out of the loop in many cases to improve performance, should be able to stand up on its own as a legitimate improvement. If it is permanently bound to planning, it is permanently bound to significant caveats. Decoupling the two and providing a means of control over these WRT prepared statements and related features has much merit IMO.\n\n\n\n\nRE: [PERFORM] Poor plan choice in prepared statement \n\n\n>[email protected] writes:\n>> the poster who started this thread had a query where the parsing phase\n>> took significantly longer than the planning stage.\n\n> That was an anecdote utterly unsupported by evidence.\n                        regards, tom lane\n\nThe issue of prepared statements having atrocious query plans has hit me again.  I feel very strongly about this topic and the need for Postgres to have an option that allows for prepared statements to re-plan based on the inputs that works and is user friendly.  Pardon my bluntness, but in the current situation the system is brain dead in many important use cases and data sets.\n\nI believe my statement referenced above was about parsing time to the remaining time, not parsing compared to planning.  But either way, its a minor detail, and its not important to justify the need for the enhancement here.\nYeah, its anecdotal from your perspective.  Go ahead and ignore all that if you wish.\n**** I am making several points in this message that are independent of such evidence, IMHO.\nI have tried to rearrange this message so that the anecdotal narrative is at the end, after the first dashed line.\n\n\nUnnamed prepared statements do solve much of the problem in theory, since the most common issue is typically poor execution plans or a lack of ability to cleanly deal with SQL injection and write less bug prone client code.  Parsing being very expensive is more rare.  But there IS a performance savings that is not insignificant for many workloads to be had by avoiding the utterly avoidable parsing.  \n\nHOWEVER:\nWhat is overlooked WRT unnamed prepared statements, is that they are hard to use and changing client code or behavior is difficult, error prone, and sometimes impossible.  Not all client APIs play nice with them at the moment (see Postgres’ JDBC).  The behavior has some global tweaks, but these are useless in many situations where you need behavior that varies.\n\nEvery time the answer to a problem is to change the client behavior, I ask myself if the DB could have a better default or configuration parameter so that clients don't have to change.  Some database instances have dozens of applications and hundreds of client types including ad-hoc usage.  Some client code is legacy code that simply can't be changed.\nChanging the clients is a lot harder than changing a db parameter, or configuring a new default for a particular db user.  If the client must change, adding something like SET prepare_level = ‘parse_only’ is the least intrusive and easiest to test — but I stress again that in many real-world cases the client is not flexible.\n\nA session-level parameter that controls prepared statement behavior defaults (never cache by default?  parse cache only? parse cache and plan cache?) would be a blessing.  A DBA could administer a fix to certain problems without having to force clients to change behavior or wait for new client API versions with fixes.\n\nThat reminds me, isn't there a global parameter that can force no prepared statements to be cached, does that make them all behave as if they are unnamed?  Or are they simply re-created each time?  I believe I tried this in the past and the prepared statements were unable to use the parameter values for partition table selection, suggesting the latter.\n\nTypically, I run into the issue with queries in a back-end process that operates on large data sets or partitioned tables.  Prepared statements essentially kill performance by several orders of magnitude (think, scan 1 versus scan 5000 partition tables). However, my recent issue is brutally simple.\nI need to have an auto-complete web form, and thus need a query with a portion like\nWHERE name LIKE ‘userenteredtext%’\nThus, I make an index with varchar_pattern_ops and off we go! ... Or not.  Works fine with explicit queries, but not a prepared query.  Unfortunately, this is highly prone to SQL injection, and the industry standard way to deal with this is by parameterization.  \nhttp://www.owasp.org/index.php/Guide_to_SQL_Injection\n(that site is a weath of information, tools, and tests on the topic, for example: http://www.owasp.org/index.php/Testing_for_SQL_Injection).\n\nPrepared statements are a blessing from a client code perspective, preventing all sorts of bugs and catching many others early, on the client side.  Not being able to use them because it causes the database to execute very bad query plans is a flaw in the database, not the client.\n\n------------------------------------------------------------------------------------\nUnnamed prepared statements did not work for me when I tried them as a solution (8.3.2, supposedly after the fix).  I was in a hurry to fix my issue, and just moved on when they were plainly not working.  It is possible I did something wrong back then.  They are also poorly supported by many client APIs -- when they did not work for me, I supposed it was a JDBC issue or perhaps user error, but maybe it was server side?  The documentation on both sides is not entirely clear on what should really be happening.  And I could not figure out how to debug where the problem was.  Can you even test an unnamed prepared statement in psql?\nI had no time to waste and changed a lot of the client code instead, and have since been less interested until the topic came up in this thread, and then after letting this message sit as a draft for a month, ran into the “varchar_pattern_ops + prepared statement = index, what index?” Issue.\n\n--------\nOn queries that are overly parse-heavy:\n\nIt is very difficult to tease apart parse time from plan time in Postgres (genrally speaking, getting performance data from Postgres is much harder than the commercial DB's I've used).  However, my experience with commercial DBs that have running counters of time spent in various operations is that parsing can be upwards of 50% of the total CPU usage with some workloads.  Decision Support type stuff where many light-weight queries are executed, is where I have seen that with Oracle.  In Oracle you can find out exactly how much of CPU time was spent parsing versus planning versus other stuff in aggregate over a time interval.  I don't suspect that Postgres has a parser that is orders of magnitude faster.  But its true I don't have direct evidence to share right now teasing apart plan from parse time.\n\nSo yes, unnamed prepared statements are potentially part (but not all) of a solution, provided there was good control over their use in client API’s (which there isn’t and won’t be, since they are non-standard), or there is a useful non-global way to configure them.\n\nPlanning is long on most queries that take long to parse, but this will vary.  (We have several queries that access tables with either only primary key indexes or no indexes, lots of embedded 'stuff' that is naturally more parser than planner heavy like CASE statements and value constraints/checks/modifiers on columns without indexes and not in the where clause, and some are approximately 2k character behemoths).\n\n-----\nOn Performance Improvements gained by avoiding prepared statements:\n\nI gained about a factor of 5x to 50x in performance by changing a lot of code to avoid prepared statements.  Some of the worst cases involved partitioned tables, where planning issues are particularly troublesome.  (lets scan 1000 tables for no good reason! Hooray!)\n\nI never posted here about my struggles with prepared statements and execution performance, I knew it wasn't going to change and I had to fix the problem in a few days time.  One of the queries changed had about 7 sub-selects in it, but the eventual plan can be very fast for some cases, returning almost no data, and we run this query repeatedly with slightly different parameters.  So with some parameters the execution time dominates by orders of magnitude, and for most parameter combinations the execution time is almost none of it.  Of course, now we just write a new query for all the variants, else the performance is unacceptable.   This is not too difficult of a change because these are not a SQL injection worry, although it has complicated the client code and test cases.\n\n---\nBeing able to avoid these problems and let programmers use prepared statements would be a good thing, and so I think a solution more useable and flexible than the current unnamed prepared statements would be great.  And if the avoidable and redundant re-parsing can be avoided too, its win-win.\n\nBeing able to cut parsing out of the loop in many cases to improve performance, should be able to stand up on its own as a legitimate improvement.  If it is permanently bound to planning, it is permanently bound to significant caveats.  Decoupling the two and providing a means of control over these WRT prepared statements and related features has much merit IMO.", "msg_date": "Fri, 30 Jan 2009 11:41:24 -0800", "msg_from": "Scott Carey <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Poor plan choice in prepared statement " } ]
[ { "msg_contents": "I'm using 8.3.5. Table ts_defects has 48M rows. Through psql: delete \nfrom ts_defects;\nResult: out of memory/Can't allocate size: 32\nI then did 10 or so deletes to get rid of the rows. Afterwards, inserts \ninto or queries on this\ntable performed significantly slower. I tried a vacuum analyze, but this \ndidn't help. To fix this,\nI dumped and restored the database.\n\n1) why can't postgres delete all rows in a table if it has millions of rows?\n2) is there any other way to restore performance other than restoring \nthe database?\n\nThanks,\nBrian\n\n", "msg_date": "Mon, 02 Feb 2009 10:17:36 -0800", "msg_from": "Brian Cox <[email protected]>", "msg_from_op": true, "msg_subject": "Deleting millions of rows" }, { "msg_contents": "On Mon, Feb 2, 2009 at 1:17 PM, Brian Cox <[email protected]> wrote:\n> I'm using 8.3.5. Table ts_defects has 48M rows. Through psql: delete from\n> ts_defects;\n> Result: out of memory/Can't allocate size: 32\n> I then did 10 or so deletes to get rid of the rows. Afterwards, inserts into\n> or queries on this\n> table performed significantly slower. I tried a vacuum analyze, but this\n> didn't help. To fix this,\n> I dumped and restored the database.\n>\n> 1) why can't postgres delete all rows in a table if it has millions of rows?\n> 2) is there any other way to restore performance other than restoring the\n> database?\n\nDoes the table have triggers on it? Does it have indexes? What is the\nresult of pg_relation_size() on that table?\n\nHow much memory do you have in your machine? What is work_mem set to?\n\nDid you try VACUUM FULL instead of just plain VACUUM to recover\nperformance? You might also need to REINDEX.\n\nOr you could TRUNCATE the table.\n\n...Robert\n", "msg_date": "Mon, 2 Feb 2009 13:38:50 -0500", "msg_from": "Robert Haas <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Deleting millions of rows" }, { "msg_contents": "Brian:\n\nOne approach we use for large tables is to partition and then drop\npartitions as the data becomes obsolete. This way you never have the\nproblem. Our general rule is to never delete data from a table because it\nis too slow. We have found this to be the preferred approach regardless of\ndatabase platform.\n\n-Jerry\n\nJerry Champlin|Absolute Performance Inc.|Mobile: 303-588-2547\n\n\n-----Original Message-----\nFrom: [email protected]\n[mailto:[email protected]] On Behalf Of Brian Cox\nSent: Monday, February 02, 2009 11:18 AM\nTo: [email protected]\nSubject: [PERFORM] Deleting millions of rows\n\nI'm using 8.3.5. Table ts_defects has 48M rows. Through psql: delete \nfrom ts_defects;\nResult: out of memory/Can't allocate size: 32\nI then did 10 or so deletes to get rid of the rows. Afterwards, inserts \ninto or queries on this\ntable performed significantly slower. I tried a vacuum analyze, but this \ndidn't help. To fix this,\nI dumped and restored the database.\n\n1) why can't postgres delete all rows in a table if it has millions of rows?\n2) is there any other way to restore performance other than restoring \nthe database?\n\nThanks,\nBrian\n\n\n-- \nSent via pgsql-performance mailing list ([email protected])\nTo make changes to your subscription:\nhttp://www.postgresql.org/mailpref/pgsql-performance\n\n\n", "msg_date": "Mon, 2 Feb 2009 12:15:29 -0700", "msg_from": "\"Jerry Champlin\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Deleting millions of rows" }, { "msg_contents": "> -----Original Message-----\n> From: Brian Cox\n> Subject: [PERFORM] Deleting millions of rows\n> \n> I'm using 8.3.5. Table ts_defects has 48M rows. Through psql: \n> delete from ts_defects;\n> Result: out of memory/Can't allocate size: 32 I then did 10 \n> or so deletes to get rid of the rows. Afterwards, inserts \n> into or queries on this table performed significantly slower. \n> I tried a vacuum analyze, but this didn't help. To fix this, \n> I dumped and restored the database.\n> \n> 1) why can't postgres delete all rows in a table if it has \n> millions of rows?\n> 2) is there any other way to restore performance other than \n> restoring the database?\n> \n> Thanks,\n> Brian\n\nIf you are deleting an entire table, then the TRUNCATE command is the way to\ngo. TRUNCATE is very fast and leaves no dead rows behind. The problem with\na normal delete is that the rows are not actually removed from the file.\nOnce the table is VACUUMED the dead space is marked as available to be\nreused, but plain VACUUM doesn't remove any space either. A VACUUM FULL or\nCLUSTER will actually remove dead space, but they can take a while to run.\n(I've heard CLUSTER is supposed to be faster than VACUUM FULL) Another way\nis to create a new table with the same definition as the old table, select\nthe rows you want to keep into the new table, drop the old table, and then\nrename the new table to have the old table's name. \n\n\nDave\n\n", "msg_date": "Mon, 2 Feb 2009 13:35:22 -0600", "msg_from": "\"Dave Dutcher\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Deleting millions of rows" }, { "msg_contents": "On Mon, Feb 2, 2009 at 1:17 PM, Brian Cox <[email protected]> wrote:\n> I'm using 8.3.5. Table ts_defects has 48M rows. Through psql: delete from\n> ts_defects;\n> Result: out of memory/Can't allocate size: 32\n\nIs this table the target of any foreign keys?\n\n-- \n- David T. Wilson\[email protected]\n", "msg_date": "Mon, 2 Feb 2009 15:20:14 -0500", "msg_from": "David Wilson <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Deleting millions of rows" }, { "msg_contents": "On Mon, Feb 2, 2009 at 11:17 AM, Brian Cox <[email protected]> wrote:\n> I'm using 8.3.5. Table ts_defects has 48M rows. Through psql: delete from\n> ts_defects;\n> Result: out of memory/Can't allocate size: 32\n> I then did 10 or so deletes to get rid of the rows. Afterwards, inserts into\n> or queries on this\n> table performed significantly slower. I tried a vacuum analyze, but this\n> didn't help.\n\nThere are two different problems happening here. One is the failed\ndelete, the other is the normal bloating caused when a lot of rows are\ndeleted.\n\nWhen deleting every row in a table you're much better off just\ntruncating it. But foreign keys can get in the way so you might need\ntruncate cascade. If you're not sure you really want to do it you can\nwrap your truncate in a begin;commit; pair and see how the database\nlooks after the truncate.\n\nIf you choose to use a delete, then foreign keys can slow things down\nquite a bit, and if you've got bad stats it's possible for the planner\nto choose a plan that runs out of memory. Was this db recently\nanalyzed?\n\nIf the delete is what you need for some reason, a regular vacuum won't\nfix your problem, because it only makes dead tuples available again,\nit doesn't remove them. A cluster command OR vacuum full followed by\nreindex are the two best ways to get the space recovered.\n\n> To fix this,\n> I dumped and restored the database.\n\nThat works too. Since the table was empty, you could have dropped and\nrecreated it, but if you had foreign keys you'd have to recreate them\ntoo.\n\n> 1) why can't postgres delete all rows in a table if it has millions of rows?\n\nIt works fine for me. Often into the billions. Your test case seems\nout of the ordinary.\n\nCan you post all the non-default values in your postgresql.conf /\nalter database set ... settings?\n", "msg_date": "Mon, 2 Feb 2009 13:58:35 -0700", "msg_from": "Scott Marlowe <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Deleting millions of rows" } ]
[ { "msg_contents": "Robert Haas [[email protected]] wrote:\nThanks for your response.\n\n> Does the table have triggers on it? Does it have indexes? What is the\n> result of pg_relation_size() on that table?\nNo triggers; 3 indexes\ncemdb=> select pg_relation_size('ts_defects');\n pg_relation_size\n------------------\n 9464971264\n(1 row)\n\ncemdb=>\ncemdb=> select pg_relation_size('ts_defects_DateIndex');\n pg_relation_size\n------------------\n 1299931136\n(1 row)\n\ncemdb=> select pg_relation_size('ts_defects_DefectIndex');\n pg_relation_size\n------------------\n 1217224704\n(1 row)\n\ncemdb=> select pg_relation_size('ts_defects_EventIndex');\n pg_relation_size\n------------------\n 1216528384\n\n> \n> How much memory do you have in your machine? What is work_mem set to?\n32G; work_mem=64M\n\n> Did you try VACUUM FULL instead of just plain VACUUM to recover\n> performance? You might also need to REINDEX.\n> Or you could TRUNCATE the table.\nI didn't try FULL or REINDEX. In this case, TRUNCATE is the best option \nas I was just trying to reset the state of the table for another test. \nBut this brings up another question: will autovacuum do the right thing \nto preserve performance on this table when many rows are deleted?\n\nThanks,\nBrian\n", "msg_date": "Mon, 02 Feb 2009 12:01:53 -0800", "msg_from": "Brian Cox <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Deleting millions of rows" }, { "msg_contents": "On Mon, Feb 2, 2009 at 3:01 PM, Brian Cox <[email protected]> wrote:\n>> How much memory do you have in your machine? What is work_mem set to?\n>\n> 32G; work_mem=64M\n\nHmm. Well then I'm not sure why you're running out of memory, that\nseems like a bug. Taking a long time, I understand. Crashing, not so\nmuch.\n\n>> Did you try VACUUM FULL instead of just plain VACUUM to recover\n>> performance? You might also need to REINDEX.\n>> Or you could TRUNCATE the table.\n>\n> I didn't try FULL or REINDEX. In this case, TRUNCATE is the best option as I\n> was just trying to reset the state of the table for another test. But this\n> brings up another question: will autovacuum do the right thing to preserve\n> performance on this table when many rows are deleted?\n\nI don't think so. I think you need to VACUUM FULL and REINDEX when\nyou do a big DELETE. But if you TRUNCATE then you should be OK - no\nfurther cleanup required in that case.\n\n...Robert\n", "msg_date": "Mon, 2 Feb 2009 15:07:59 -0500", "msg_from": "Robert Haas <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Deleting millions of rows" }, { "msg_contents": "Robert Haas <[email protected]> writes:\n> On Mon, Feb 2, 2009 at 3:01 PM, Brian Cox <[email protected]> wrote:\n>>> How much memory do you have in your machine? What is work_mem set to?\n>> \n>> 32G; work_mem=64M\n\n> Hmm. Well then I'm not sure why you're running out of memory,\n\nIt's the pending trigger list. He's got two trigger events per row,\nwhich at 40 bytes apiece would approach 4GB of memory. Apparently\nit's a 32-bit build of Postgres, so he's running out of process address\nspace.\n\nThere's a TODO item to spill that list to disk when it gets too large,\nbut the reason nobody's done it yet is that actually executing that many\nFK check trigger events would take longer than you want to wait anyway.\n\nTRUNCATE is the best solution if you want to get rid of the whole table\ncontents. If you're deleting very many but not all rows, people tend\nto drop the FK constraints and re-establish them afterwards. Retail\nchecking is just too slow.\n\n\t\t\tregards, tom lane\n", "msg_date": "Mon, 02 Feb 2009 15:58:43 -0500", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Deleting millions of rows " }, { "msg_contents": "> It's the pending trigger list. He's got two trigger events per row,\n> which at 40 bytes apiece would approach 4GB of memory. Apparently\n> it's a 32-bit build of Postgres, so he's running out of process address\n> space.\n>\n> There's a TODO item to spill that list to disk when it gets too large,\n> but the reason nobody's done it yet is that actually executing that many\n> FK check trigger events would take longer than you want to wait anyway.\n\nHave you ever given any thought to whether it would be possible to\nimplement referential integrity constraints with statement-level\ntriggers instead of row-level triggers? IOW, instead of planning this\nand executing it N times:\n\nDELETE FROM ONLY <fktable> WHERE $1 = fkatt1 [AND ...]\n\n...we could join the original query against fktable with join clauses\non the correct pairs of attributes and then execute it once.\n\nIs this insanely difficult to implement?\n\n...Robert\n", "msg_date": "Mon, 2 Feb 2009 18:26:36 -0500", "msg_from": "Robert Haas <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Deleting millions of rows" }, { "msg_contents": "Robert Haas escribi�:\n\n> Have you ever given any thought to whether it would be possible to\n> implement referential integrity constraints with statement-level\n> triggers instead of row-level triggers?\n\nWell, one reason we haven't discussed this is because our per-statement\ntriggers are too primitive yet -- we don't have access to the list of\nacted-upon tuples. As soon as we have that we can start discussing this\noptimization.\n\n-- \nAlvaro Herrera http://www.CommandPrompt.com/\nThe PostgreSQL Company - Command Prompt, Inc.\n", "msg_date": "Tue, 3 Feb 2009 15:20:01 -0300", "msg_from": "Alvaro Herrera <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Deleting millions of rows" }, { "msg_contents": "Alvaro Herrera <[email protected]> writes:\n> Robert Haas escribi�:\n>> Have you ever given any thought to whether it would be possible to\n>> implement referential integrity constraints with statement-level\n>> triggers instead of row-level triggers?\n\n> Well, one reason we haven't discussed this is because our per-statement\n> triggers are too primitive yet -- we don't have access to the list of\n> acted-upon tuples. As soon as we have that we can start discussing this\n> optimization.\n\nI think the point is that at some number of tuples it's better to forget\nabout per-row tests at all, and instead perform the same whole-table\njoin that would be used to validate the FK from scratch. The mechanism\nwe lack is not one to pass the row list to a statement trigger, but one\nto smoothly segue from growing a list of per-row entries to dropping\nthat list and queueing one instance of a statement trigger instead.\n\n\t\t\tregards, tom lane\n", "msg_date": "Tue, 03 Feb 2009 16:17:29 -0500", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Deleting millions of rows " }, { "msg_contents": "Hello All,\n\n\nTL> If you're deleting very many but not all rows, people tend\nTL> to drop the FK constraints and re-establish them afterwards.\n\nI find\n\nBEGIN;\nCREATE TEMP TABLE remnant AS\n SELECT * FROM bigtable WHERE (very_restrictive_condition);\nTRUNCATE TABLE bigtable;\nINSERT INTO bigtable SELECT * FROM remnant;\nCOMMIT;\nANALYSE bigtable;\n\nworks well because there is no possibility of my forgetting FKs.\n\n\n-- \nSincerely,\n Andrew Lazarus mailto:[email protected]", "msg_date": "Tue, 3 Feb 2009 14:04:43 -0800", "msg_from": "Andrew Lazarus <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Deleting millions of rows" }, { "msg_contents": "On Tue, Feb 3, 2009 at 4:17 PM, Tom Lane <[email protected]> wrote:\n> Alvaro Herrera <[email protected]> writes:\n>> Robert Haas escribió:\n>>> Have you ever given any thought to whether it would be possible to\n>>> implement referential integrity constraints with statement-level\n>>> triggers instead of row-level triggers?\n>\n>> Well, one reason we haven't discussed this is because our per-statement\n>> triggers are too primitive yet -- we don't have access to the list of\n>> acted-upon tuples. As soon as we have that we can start discussing this\n>> optimization.\n>\n> I think the point is that at some number of tuples it's better to forget\n> about per-row tests at all, and instead perform the same whole-table\n> join that would be used to validate the FK from scratch. The mechanism\n> we lack is not one to pass the row list to a statement trigger, but one\n> to smoothly segue from growing a list of per-row entries to dropping\n> that list and queueing one instance of a statement trigger instead.\n\nThat's good if you're deleting most or all of the parent table, but\nwhat if you're deleting 100,000 values from a 10,000,000 row table?\nIn that case maybe I'm better off inserting all of the deleted keys\ninto a side table and doing a merge or hash join between the side\ntable and the child table...\n\n...Robert\n", "msg_date": "Tue, 3 Feb 2009 19:08:59 -0500", "msg_from": "Robert Haas <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Deleting millions of rows" }, { "msg_contents": "Robert Haas <[email protected]> writes:\n\n> That's good if you're deleting most or all of the parent table, but\n> what if you're deleting 100,000 values from a 10,000,000 row table?\n> In that case maybe I'm better off inserting all of the deleted keys\n> into a side table and doing a merge or hash join between the side\n> table and the child table...\n\nIt would be neat if we could feed the queued trigger tests into a plan node\nlike a Materialize and use the planner to determine which type of plan to\ngenerate.\n\n-- \n Gregory Stark\n EnterpriseDB http://www.enterprisedb.com\n Ask me about EnterpriseDB's Slony Replication support!\n", "msg_date": "Wed, 04 Feb 2009 12:35:57 +0000", "msg_from": "Gregory Stark <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Deleting millions of rows" }, { "msg_contents": "On Wed, Feb 4, 2009 at 7:35 AM, Gregory Stark <[email protected]> wrote:\n> Robert Haas <[email protected]> writes:\n>\n>> That's good if you're deleting most or all of the parent table, but\n>> what if you're deleting 100,000 values from a 10,000,000 row table?\n>> In that case maybe I'm better off inserting all of the deleted keys\n>> into a side table and doing a merge or hash join between the side\n>> table and the child table...\n>\n> It would be neat if we could feed the queued trigger tests into a plan node\n> like a Materialize and use the planner to determine which type of plan to\n> generate.\n\nYes, definitely. If it could be built as a general facility it would\nbe good for a lot of other things too. Imagine that from within a\nstatement-level trigger you had magical tables called OLD_TUPLES and\nNEW_TUPLES, analagous to OLD and NEW, but the whole set of them. I\ncan't tell you how many problems I could solve with this type of\nfacility...\n\nWhat I think makes it a little extra-difficult is that even if you had\nthis, you still can't express what you want to plan as a single query.\n You can either join the foreign key relation against OLD_TUPLES and\ndelete everything that matches, or you can join the foreign key\nrelation against the remaining table contents and throw away\neverything that doesn't match.\n\n...Robert\n", "msg_date": "Wed, 4 Feb 2009 08:59:17 -0500", "msg_from": "Robert Haas <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Deleting millions of rows" } ]
[ { "msg_contents": "David Wilson [[email protected]] wrote:\n\n> Is this table the target of any foreign keys?\nThere are 2 \"on delete cascade\" FKs that reference this table.\n\nBrian\n\n", "msg_date": "Mon, 02 Feb 2009 12:37:04 -0800", "msg_from": "Brian Cox <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Deleting millions of rows" }, { "msg_contents": "On Mon, Feb 2, 2009 at 3:37 PM, Brian Cox <[email protected]> wrote:\n> David Wilson [[email protected]] wrote:\n>\n>> Is this table the target of any foreign keys?\n>\n> There are 2 \"on delete cascade\" FKs that reference this table.\n\nI believe that's the source of your memory issues. I think TRUNCATE\nmay handle this more effectively; alternately you can handle the\ncascading yourself in these cases. (And, as Dave Dutcher mentioned,\nTRUNCATE is definitely the way to go for full-table wipes).\n\n\n-- \n- David T. Wilson\[email protected]\n", "msg_date": "Mon, 2 Feb 2009 15:57:25 -0500", "msg_from": "David Wilson <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Deleting millions of rows" } ]
[ { "msg_contents": "Tom Lane [[email protected]] wrote:\n> It's the pending trigger list. He's got two trigger events per row,\n> which at 40 bytes apiece would approach 4GB of memory. Apparently\n> it's a 32-bit build of Postgres, so he's running out of process address\n> space.\nYes, this is a 32 bit Postgres running on a 32 bit Linux. I assume that \nthe 2 triggers are due to the 2 \"on delete cascade\" FKs. Thanks for \nexplaining this bit of a mystery.\n\n> TRUNCATE is the best solution if you want to get rid of the whole table\n> contents. If you're deleting very many but not all rows, people tend\n> to drop the FK constraints and re-establish them afterwards. Retail\n> checking is just too slow.\nThanks also to you (and several others) for reminding me of TRUNCATE.\nThis will definitely work for what I was trying to do: reset this table \nfor more testing.\n\nIn production, the table on which I ran DELETE FROM grows constantly \nwith old data removed in bunches periodically (say up to a few 100,000s \nof rows [out of several millions] in a bunch). I'm assuming that \nauto-vacuum/analyze will allow Postgres to maintain reasonable \nperformance for INSERTs and SELECTs on it; do you think that this is a \nreasonable assumption?\n\nThanks,\nBrian\n\n", "msg_date": "Mon, 02 Feb 2009 14:01:12 -0800", "msg_from": "Brian Cox <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Deleting millions of rows" }, { "msg_contents": "On Mon, Feb 2, 2009 at 3:01 PM, Brian Cox <[email protected]> wrote:\n\n> In production, the table on which I ran DELETE FROM grows constantly with\n> old data removed in bunches periodically (say up to a few 100,000s of rows\n> [out of several millions] in a bunch). I'm assuming that auto-vacuum/analyze\n> will allow Postgres to maintain reasonable performance for INSERTs and\n> SELECTs on it; do you think that this is a reasonable assumption?\n\nYes, as long as you're deleting a small enough percentage that it\ndoesn't get bloated (100k of millions is a good ratio) AND autovacuum\nis running AND you have enough FSM entries to track the dead tuples\nyou're gold.\n", "msg_date": "Mon, 2 Feb 2009 15:33:13 -0700", "msg_from": "Scott Marlowe <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Deleting millions of rows" } ]
[ { "msg_contents": "Hi All,\n\nI am trying to conduct DBT2 test for PostgreSQL. I am getting following\nerrors when I run client and driver separately(Instead of running\ndbt2-run-workload script). I am also attaching exact error log file for the\nsame.\n\ntid:1073875280 /home/rohan/NEW_DBT2/Installer/dbt2/src/driver.c:496\nconnect_to_client() failed, thread exiting...\nWed Feb 4 20:00:52 2009\ntid:1074010448 /home/rohan/NEW_DBT2/Installer/dbt2/src/driver.c:496\nconnect_to_client() failed, thread exiting...\n.\n.\n.\n\nCan someone please provide inputs on this? Do I need to make specific\nchanges if any then please let me know.\n\nAny help on this will be appreciated.\n\nThanks in advance for your help.\n\nThanks,\nRohan", "msg_date": "Wed, 4 Feb 2009 16:12:09 +0530", "msg_from": "Rohan Pethkar <[email protected]>", "msg_from_op": true, "msg_subject": "Getting error while running DBT2 test for PostgreSQL" }, { "msg_contents": "On Wed, Feb 4, 2009 at 2:42 AM, Rohan Pethkar <[email protected]> wrote:\n> Hi All,\n>\n> I am trying to conduct DBT2 test for PostgreSQL. I am getting following\n> errors when I run client and driver separately(Instead of running\n> dbt2-run-workload script). I am also attaching exact error log file for the\n> same.\n>\n> tid:1073875280 /home/rohan/NEW_DBT2/Installer/dbt2/src/driver.c:496\n> connect_to_client() failed, thread exiting...\n> Wed Feb 4 20:00:52 2009\n> tid:1074010448 /home/rohan/NEW_DBT2/Installer/dbt2/src/driver.c:496\n> connect_to_client() failed, thread exiting...\n> .\n> .\n> .\n>\n> Can someone please provide inputs on this? Do I need to make specific\n> changes if any then please let me know.\n>\n> Any help on this will be appreciated.\n>\n> Thanks in advance for your help.\n\nHi Rohan,\n\nAs I mentioned on the osdldbt list, for questions like this it would\nbe more appropriate to cc the [email protected]\ninstead of the -performance list.\n\nIt's not clear why the driver can't connect to the client. Can you\nprovide all of the log files somewhere?\n\nRegards,\nMark\n", "msg_date": "Fri, 6 Feb 2009 21:31:46 -0800", "msg_from": "Mark Wong <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Getting error while running DBT2 test for PostgreSQL" } ]
[ { "msg_contents": "Hi,\n\nI am going to get a Dell 2950 with PERC6i with\n8 * 73 15K SAS drives +\n300 GB EMC SATA SAN STORAGE,\n\nI seek suggestions from users sharing their experience with\nsimilar hardware if any. I have following specific concerns.\n\n1. On list i read that RAID10 function in PERC5 is not really\n striping but spanning and does not give performance boost\n is it still true in case of PERC6i ?\n\n\n2. I am planning for RAID10 array of 8 drives for entrire database\n ( including pg_xlog) , the controller has a write back cache (256MB)\n is it a good idea ?\n or is it better to have 6 drives in HW RAID1 and RAID0 of 3 mirrors\n in s/w and leave 2 drives (raid1) for OS ?\n\n3. Is there any preferred Stripe Size for RAID0 for postgresql usage ?\n\n\n4. Although i would benchmark (with bonnie++) how would the EMC\n SATA SAN storage compare with locally attached SAS storage for the\n purpose of hosting the data , i am hiring the storage primarily for\n storing base base backups and log archives for PITR implementation.\n as retal of separate machine was higher than SATA SAN.\n\nRegds\nmallah.\n", "msg_date": "Thu, 5 Feb 2009 00:15:43 +0530", "msg_from": "Rajesh Kumar Mallah <[email protected]>", "msg_from_op": true, "msg_subject": "suggestions for postgresql setup on Dell 2950 , PERC6i controller" }, { "msg_contents": "On Wed, Feb 4, 2009 at 11:45 AM, Rajesh Kumar Mallah\n<[email protected]> wrote:\n> Hi,\n>\n> I am going to get a Dell 2950 with PERC6i with\n> 8 * 73 15K SAS drives +\n> 300 GB EMC SATA SAN STORAGE,\n>\n> I seek suggestions from users sharing their experience with\n> similar hardware if any. I have following specific concerns.\n>\n> 1. On list i read that RAID10 function in PERC5 is not really\n> striping but spanning and does not give performance boost\n> is it still true in case of PERC6i ?\n\nI have little experience with the 6i. I do have experience with all\nthe Percs from the 3i/3c series to the 5e series. My experience has\ntaught me that a brand new, latest model $700 Dell RAID controller is\nabout as good as a $150 LSI, Areca, or Escalade/3Ware controller.\nI.e. a four or five year old design. And that's being generous.\n\n> 2. I am planning for RAID10 array of 8 drives for entrire database\n> ( including pg_xlog) , the controller has a write back cache (256MB)\n> is it a good idea ?\n> or is it better to have 6 drives in HW RAID1 and RAID0 of 3 mirrors\n> in s/w and leave 2 drives (raid1) for OS ?\n\nHard to say without testing. Some controllers work fine with all the\ndrives in one big RAID 10 array, some don't. What I'd do is install\nthe OS on a separate drive from the RAID controller, and start\nbenchmarking the performance of your RAID controller with various\nconfigurations, like RAID-10, RAID-5 and RAID-6 (assuming it supports\nall three) and how it behaves when the array is degraded.\n\nYou may well find that your machine is faster if you either run the\ncontroller in JBOD mode and do all the RAID in the kernel, or with a\nmix, with the RAID controller running a bunch of RAID-1 mirrors and\nthe OS building a RAI(D)-0 on top of that.\n\nWith larger arrays and busy dbs I usually always put the OS and\npg_xlog on either a single mirror set or two different mirrorsets.\nWhether or not this will be faster for you depends greatly on your\nusage scenario, which I don't think you've mentioned. For\ntransactional databases it's almost always a win to split out the\npg_xlog from the main array. Unless you have a LOT of disks, a single\nRAID-1 pair is usually sufficient.\n\n> 3. Is there any preferred Stripe Size for RAID0 for postgresql usage ?\n\nYou'll really have to test that with your controller, as on some it\nmakes a difference to change it and on others, the default setting is\nas good as it ever gets.\n\n> 4. Although i would benchmark (with bonnie++) how would the EMC\n> SATA SAN storage compare with locally attached SAS storage for the\n> purpose of hosting the data , i am hiring the storage primarily for\n> storing base base backups and log archives for PITR implementation.\n> as retal of separate machine was higher than SATA SAN.\n\nThat really depends on how the SAN is implemented I'd think. I only\nhave a bit of experience with storage arrays, and that experience\nhasn't been all that great in terms of performance.\n\n-- \nWhen fascism comes to America, it will be the intolerant selling it as\ndiversity.\n", "msg_date": "Wed, 4 Feb 2009 13:09:57 -0700", "msg_from": "Scott Marlowe <[email protected]>", "msg_from_op": false, "msg_subject": "Re: suggestions for postgresql setup on Dell 2950 , PERC6i controller" }, { "msg_contents": "Rajesh Kumar Mallah wrote:\n> Hi,\n> \n> I am going to get a Dell 2950 with PERC6i with\n> 8 * 73 15K SAS drives +\n> 300 GB EMC SATA SAN STORAGE,\n> \n> I seek suggestions from users sharing their experience with\n> similar hardware if any. I have following specific concerns.\n> \n> 1. On list i read that RAID10 function in PERC5 is not really\n> striping but spanning and does not give performance boost\n> is it still true in case of PERC6i ?\n\nIt's long been our policy to buy Dell servers and I agree with most \npeople here that the performance of the PERCs (5 and earlier) have been \ngenerally pretty poor\n\nHowever, they seem to have listened and got it right, or at least a lot \nbetter, with the PERC6.\n\nI have recently installed Ubuntu server on 2 Dell 2950s with 8GB RAM and \nsix 2.5 inch 15K rpm SAS disks in a single RAID10.\n\nI only got chance to run bonnie++ on them a few times, but I was \nconsistently getting around 200MB/sec for both sequential read and write \n(16GB file).\n\nSimilar setup with the older Dell 2850 (PERC5, 6 x 15K rpm 3.5 inch \nSCSI) gave only around 120GB/sec whatever I did.\n\nHope this helps.\n\nCheers,\nGary.\n", "msg_date": "Wed, 04 Feb 2009 20:25:00 +0000", "msg_from": "Gary Doades <[email protected]>", "msg_from_op": false, "msg_subject": "Re: suggestions for postgresql setup on Dell 2950 , PERC6i controller" }, { "msg_contents": "On 4-2-2009 21:09 Scott Marlowe wrote:\n> I have little experience with the 6i. I do have experience with all\n> the Percs from the 3i/3c series to the 5e series. My experience has\n> taught me that a brand new, latest model $700 Dell RAID controller is\n> about as good as a $150 LSI, Areca, or Escalade/3Ware controller.\n> I.e. a four or five year old design. And that's being generous.\n\nAfaik the Perc 5/i and /e are more or less rebranded LSI-cards (they're \nnot identical in layout etc), so it would be a bit weird if they \nperformed much less than the similar LSI's wouldn't you think?\nAnd as far as I can remember, our Perc 5/e actually performed similar to \na LSI with similar specs (external sas, 256MB ram, etc) we had at the \ntime of testing.\nAreca may be the fastest around right now, but if you'd like to get it \nall from one supplier, its not too bad to be stuck with Dell's perc 5 or \n6 series.\n\nBest regards,\n\nArjen\n", "msg_date": "Wed, 04 Feb 2009 22:11:50 +0100", "msg_from": "Arjen van der Meijden <[email protected]>", "msg_from_op": false, "msg_subject": "Re: suggestions for postgresql setup on Dell 2950 , PERC6i controller" }, { "msg_contents": "On Wed, Feb 4, 2009 at 2:11 PM, Arjen van der Meijden\n<[email protected]> wrote:\n> On 4-2-2009 21:09 Scott Marlowe wrote:\n>>\n>> I have little experience with the 6i. I do have experience with all\n>> the Percs from the 3i/3c series to the 5e series. My experience has\n>> taught me that a brand new, latest model $700 Dell RAID controller is\n>> about as good as a $150 LSI, Areca, or Escalade/3Ware controller.\n>> I.e. a four or five year old design. And that's being generous.\n>\n> Afaik the Perc 5/i and /e are more or less rebranded LSI-cards (they're not\n> identical in layout etc), so it would be a bit weird if they performed much\n> less than the similar LSI's wouldn't you think?\n> And as far as I can remember, our Perc 5/e actually performed similar to a\n> LSI with similar specs (external sas, 256MB ram, etc) we had at the time of\n> testing.\n> Areca may be the fastest around right now, but if you'd like to get it all\n> from one supplier, its not too bad to be stuck with Dell's perc 5 or 6\n> series.\n\nWe purhcased the Perc 5E, which dell wanted $728 for last fall with 8\nSATA disks in an MD-1000 and the performance is just terrible. No\nmatter what we do the best throughput on any RAID setup was about 30\nmegs/second write and 60 Megs/second read. I can get that from a\nmirror set of the same drives under linux kernel software RAID. This\nwas with battery backed cache enabled. Could be an interaction issue\nwith the MD-1000, or something, but the numbers are just awful. We\nhave a Perc 6(i or e not sure) on a 6 disk SAS array and it's a little\nbetter, getting into the hundred meg/second range, but nothing\nspectacular. They're stable, which is more than I can say for a lot\nof older PERCs and the servers they came in (x600 series with Perc 3i\nfor instance).\n", "msg_date": "Wed, 4 Feb 2009 14:36:46 -0700", "msg_from": "Scott Marlowe <[email protected]>", "msg_from_op": false, "msg_subject": "Re: suggestions for postgresql setup on Dell 2950 , PERC6i controller" }, { "msg_contents": "Sorry for the top posts, I don't have a client that is inline post friendly.\n\nMost PERCs are rebranded LSI's lately. The difference between the 5 and 6 is PCIX versus PCIe LSI series, relatively recent ones. Just look at the OpenSolaris drivers for the PERC cards for a clue to what is what.\n\nBonnie ++ is a horrible benchmark IMO (for server disk performance checks beyond very basic sanity). I've tried iozone, dd, fio, and manual shell script stuff...\nFio is very good, there's one quirk with how it does random writes (sparsely) that can make XFS freak out, don't test it with sparse random writes - Postgres doesn't do this, it writes random re-writes and only appends to files to grow.\nFIO is also good because you can make useful profiles, such as multiple concurrent readers of different types, or mix in some writes. A real postgres benchmark may be better, but some more sophisticated synthetic ones were able to show how far from the ideal the PERC falls with more sophisticated load than a better card.\n\nMy experience with 12 nearline-SAS 7200 RPM drives and a Perc 6e, then the same system with another card:\n\nExt3, out of the box, 12 drives raid 10: ~225MB/sec.\next3, os readahead tuned up: 350MB/sec.\nXFS, out of the box, 12 drives raid 10: ~300MB/sec.\nTune OS readahead (24576 or so), with xfs, 410MB/sec.\n\nHigher Linux device readahead did not impact the random access performance, and the defaults are woeful for the PERC cards.\n\n10 disk and 8 disk setups performed the same. PERC did not really scale past 8 disks in raid 10, I did not try 6 disks. Each disk can do 115MB/sec or so at the front of the drive with JBOD tests tuned with the right readahead Linux filesystem value.\nAll tests were done on the first 20% or so carved out, to limit the effects of transfer rate decrease on higher LBA's and be fair between file systems (otherwise, ext3 looks worse, as it is more likely than xfs to allocate you some stuff way out on the disk in a somewhat empty partition).\n\n\nAdaptec card (5445), untuned readahead, 500MB/sec +\nTuned readahead, 600MB/sec (and xfs now the same as dd, with 100GB files+), at the maximum expectation for this sort of raid 10 (that can't use all drives for reading, like zfs).\n\nI did not get much higher random IOPS out of smaller block sizes than the default. 15K SAS drives will be more likely to benefit from smaller blocks, but I don't have experience with that on a PERC. General experience says that going below 64K on any setup is a waste of time with today's hardware. Reading 64K takes less than 1ms.\n\nDo not bother with the PERC BIOS' read-ahead setting, it just makes things worse, the Linux block device readahead is far superior.\n\nBest performance achieved on a set of 20 drives in my testing was to use two Adaptec cards, each with moderate sized raid 10 sets (adaptec 10 drives) and software linux 'md' raid 0 on top of that. It takes at least two concurrent sequential readers to max the I/O in this case, and 1000MB/sec to 1150MB/sec is the peak depending on the mix of sequential readers. In the real world, that only happens when writes are low and there are about 4 concurrent sequential scans on large (multi-GB) tables. Most people will be optimizing for much higher random access rates rather than sequential scans mixed with random access.\n\nPlacing the xlogs on a separate volume helped quite a bit in the real world postgres tests with mixed load.\n\n\nOn 2/4/09 12:09 PM, \"Scott Marlowe\" <[email protected]> wrote:\n\nOn Wed, Feb 4, 2009 at 11:45 AM, Rajesh Kumar Mallah\n<[email protected]> wrote:\n> Hi,\n>\n> I am going to get a Dell 2950 with PERC6i with\n> 8 * 73 15K SAS drives +\n> 300 GB EMC SATA SAN STORAGE,\n>\n> I seek suggestions from users sharing their experience with\n> similar hardware if any. I have following specific concerns.\n>\n> 1. On list i read that RAID10 function in PERC5 is not really\n> striping but spanning and does not give performance boost\n> is it still true in case of PERC6i ?\n\nI have little experience with the 6i. I do have experience with all\nthe Percs from the 3i/3c series to the 5e series. My experience has\ntaught me that a brand new, latest model $700 Dell RAID controller is\nabout as good as a $150 LSI, Areca, or Escalade/3Ware controller.\nI.e. a four or five year old design. And that's being generous.\n\n\n\nRe: [PERFORM] suggestions for postgresql setup on Dell 2950 , PERC6i  controller\n\n\n\nSorry for the  top posts, I don’t have a client that is inline post friendly.\n\nMost PERCs are rebranded LSI’s lately.  The difference between the 5 and  6 is PCIX versus PCIe LSI series, relatively recent ones.  Just look at the OpenSolaris drivers for the PERC cards for a clue to what is what.\n\nBonnie ++ is a horrible benchmark IMO (for server disk performance checks beyond very basic sanity).  I’ve tried iozone, dd, fio, and manual shell script stuff...\nFio is very good, there’s one quirk with how it does random writes (sparsely) that can make XFS freak out, don’t test it with sparse random writes — Postgres doesn’t do this, it writes random re-writes and only appends to files to grow.  \nFIO is also good because you can make useful profiles, such as multiple concurrent readers of different types, or mix in some writes.  A real postgres benchmark may be better, but some more sophisticated synthetic ones were able to show how far from the ideal the PERC falls with more sophisticated load than a better card.  \n\nMy experience with 12 nearline-SAS 7200 RPM drives and a Perc 6e, then the same system with another card:\n\nExt3, out of the box, 12 drives raid 10:  ~225MB/sec.  \next3, os readahead tuned up: 350MB/sec.\nXFS, out of the box, 12 drives raid 10: ~300MB/sec.\nTune OS readahead (24576 or so), with xfs, 410MB/sec.\n\nHigher Linux device readahead did not impact the random access performance, and the defaults are woeful for the PERC cards.\n\n10 disk and 8 disk setups performed the same.  PERC did not really scale past 8 disks in raid 10, I did not try 6 disks.  Each disk can do 115MB/sec or so at the front of the drive with JBOD tests tuned with the right readahead Linux filesystem value.\nAll tests were done on the first 20% or so carved out, to limit the effects of transfer rate decrease on higher LBA’s and be fair between file systems (otherwise, ext3 looks worse, as it is more likely than xfs to allocate you some stuff way out on the disk in a somewhat empty partition).\n\n\nAdaptec card (5445), untuned readahead, 500MB/sec +\nTuned readahead, 600MB/sec (and xfs now the same as dd, with 100GB files+), at the maximum expectation for this sort of raid 10 (that can’t use all drives for reading, like zfs).  \n\nI did not get much higher random IOPS out of smaller block sizes than the default.  15K SAS drives will be more likely to benefit from smaller blocks, but I don’t have experience with that on a PERC.  General experience says that going below 64K on any setup is a waste of time with today’s hardware.  Reading 64K takes less than 1ms.\n\nDo not bother with the PERC BIOS’ read-ahead setting, it just makes things worse, the Linux block device readahead is far superior.\n\nBest performance achieved on a set of 20 drives in my testing was to use two Adaptec cards, each with moderate sized raid 10 sets (adaptec 10 drives) and software linux ‘md’ raid 0 on top of that.  It takes at least two concurrent sequential readers to max the I/O in this case, and 1000MB/sec to 1150MB/sec is the peak depending on the mix of sequential readers.  In the real world, that only happens when writes are low and there are about 4 concurrent sequential scans on large (multi-GB) tables.  Most people will be optimizing for much higher random access rates rather than sequential scans mixed with random access. \n\nPlacing the xlogs on a separate volume helped quite a bit in the real world postgres tests with mixed load.\n\n\nOn 2/4/09 12:09 PM, \"Scott Marlowe\" <[email protected]> wrote:\n\nOn Wed, Feb 4, 2009 at 11:45 AM, Rajesh Kumar Mallah\n<[email protected]> wrote:\n> Hi,\n>\n> I am going to get a Dell 2950 with PERC6i with\n> 8 * 73 15K SAS drives +\n> 300 GB EMC SATA SAN STORAGE,\n>\n> I seek suggestions from users sharing their experience with\n> similar hardware if any. I have following specific concerns.\n>\n> 1. On list i read  that RAID10 function in PERC5 is not really\n>   striping but spanning and does not give performance boost\n>   is it still true in case of PERC6i ?\n\nI have little experience with the 6i.  I do have experience with all\nthe Percs from the 3i/3c series to the 5e series.  My experience has\ntaught me that a brand new, latest model $700 Dell RAID controller is\nabout as good as a $150 LSI, Areca, or Escalade/3Ware controller.\nI.e. a four or five year old design.  And that's being generous.", "msg_date": "Wed, 4 Feb 2009 20:24:31 -0800", "msg_from": "Scott Carey <[email protected]>", "msg_from_op": false, "msg_subject": "Re: suggestions for postgresql setup on Dell 2950 ,\n PERC6i controller" }, { "msg_contents": "Sorry for the top post --\n\nAssuming Linux --\n\n1: PERC 6 is still a bit inferior to other options, but not that bad. Its random IOPS is fine, sequential speeds are noticeably less than say the latest from Adaptec or Areca.\n\n2: Random iops will probably scale ok from 6 to 8 drives, but depending on your use case, having a single mirror for OS and xlogs can be a significant performance improvement. My base suggestion would be to go with 6 drives in raid 10, perhaps try 3 mirrors with software raid 0 on top to compare, and leave one mirror for the OS and xlogs (separate partitions, use ext2 for the xlogs and that can be a fairly small partition). There isn't any way to know which of these options will be best for you, it is very dependant on the data and applications accessing it.\n\n3. No, its too hardware dependant to have an ideal raid block size. Its slightly usage dependant too. The default is probably going to be best based on my PERC 6 experience. You'll gain a lot more from tuning other things.\n\n4. Can't say much about the SAN. High end ones can do good iops, the one listed looks more like archival storage to me though.\n\nMake sure you tune the Linux block device readahead, it makes a huge difference in sequential access performance ( see blockdev -getra <device>). 1MB to 4MB per raid spindle 'width' is usually ideal. The default is 128k, the default with software raid is 4MB, often the performance difference when using software raid is largely just this setting. If you're comfortable with XFS, it works well for the postgres data files.\n\n\n\nOn 2/4/09 10:45 AM, \"Rajesh Kumar Mallah\" <[email protected]> wrote:\n\nHi,\n\nI am going to get a Dell 2950 with PERC6i with\n8 * 73 15K SAS drives +\n300 GB EMC SATA SAN STORAGE,\n\nI seek suggestions from users sharing their experience with\nsimilar hardware if any. I have following specific concerns.\n\n1. On list i read that RAID10 function in PERC5 is not really\n striping but spanning and does not give performance boost\n is it still true in case of PERC6i ?\n\n\n2. I am planning for RAID10 array of 8 drives for entrire database\n ( including pg_xlog) , the controller has a write back cache (256MB)\n is it a good idea ?\n or is it better to have 6 drives in HW RAID1 and RAID0 of 3 mirrors\n in s/w and leave 2 drives (raid1) for OS ?\n\n3. Is there any preferred Stripe Size for RAID0 for postgresql usage ?\n\n\n4. Although i would benchmark (with bonnie++) how would the EMC\n SATA SAN storage compare with locally attached SAS storage for the\n purpose of hosting the data , i am hiring the storage primarily for\n storing base base backups and log archives for PITR implementation.\n as retal of separate machine was higher than SATA SAN.\n\nRegds\nmallah.\n\n--\nSent via pgsql-performance mailing list ([email protected])\nTo make changes to your subscription:\nhttp://www.postgresql.org/mailpref/pgsql-performance\n\n\n\n\nRe: [PERFORM] suggestions for postgresql setup on Dell 2950 , PERC6i controller\n\n\nSorry for the top post --\n\nAssuming Linux --\n\n1: PERC 6 is still a bit inferior to other options, but not that bad.  Its random IOPS is fine, sequential speeds are noticeably less than say the latest from Adaptec or Areca.\n\n2: Random iops will probably scale ok from 6 to 8 drives, but depending on your use case, having a single mirror for OS and xlogs can be a significant performance improvement.  My base suggestion would be to go with 6 drives in raid 10, perhaps try 3 mirrors with software raid 0 on top to compare, and leave one mirror for the OS and xlogs (separate partitions, use ext2 for the xlogs and that can be a fairly small partition).  There isn’t any way to know which of these options will be best for you, it is very dependant on the data and applications accessing it.\n\n3. No, its too hardware dependant to have an ideal raid block size.  Its slightly usage dependant too.  The default is probably going to be best based on my PERC 6 experience.  You’ll gain a lot more from tuning other things.\n\n4. Can’t say much about the SAN. High end ones can do good iops, the one listed looks more like archival storage to me though.\n\nMake sure you tune the Linux block device readahead, it makes a huge difference in sequential access performance ( see blockdev —getra <device>). 1MB to 4MB per raid spindle ‘width’ is usually ideal.  The default is 128k, the default with software raid is 4MB, often the performance difference when using software raid is largely just this setting.  If you’re comfortable with XFS, it works well for the postgres data files.\n\n\n\nOn 2/4/09 10:45 AM, \"Rajesh Kumar Mallah\" <[email protected]> wrote:\n\nHi,\n\nI am going to get a Dell 2950 with PERC6i with\n8 * 73 15K SAS drives +\n300 GB EMC SATA SAN STORAGE,\n\nI seek suggestions from users sharing their experience with\nsimilar hardware if any. I have following specific concerns.\n\n1. On list i read  that RAID10 function in PERC5 is not really\n   striping but spanning and does not give performance boost\n   is it still true in case of PERC6i ?\n\n\n2. I am planning for RAID10 array of 8 drives for entrire database\n   ( including pg_xlog)  , the controller has a write back cache (256MB)\n   is it a good idea ?\n   or is it better to have 6 drives in HW RAID1 and RAID0 of 3 mirrors\n   in s/w  and leave 2 drives (raid1) for OS ?\n\n3. Is there any preferred Stripe Size for RAID0  for postgresql usage ?\n\n\n4. Although i would benchmark (with bonnie++) how would the EMC\n   SATA SAN storage compare with locally attached SAS storage for the\n   purpose of hosting the data  , i am hiring the storage primarily for\n   storing base base backups and  log archives for PITR implementation.\n   as retal of separate machine was higher than SATA SAN.\n\nRegds\nmallah.\n\n--\nSent via pgsql-performance mailing list ([email protected])\nTo make changes to your subscription:\nhttp://www.postgresql.org/mailpref/pgsql-performance", "msg_date": "Wed, 4 Feb 2009 21:04:23 -0800", "msg_from": "Scott Carey <[email protected]>", "msg_from_op": false, "msg_subject": "Re: suggestions for postgresql setup on Dell 2950 , PERC6i controller" }, { "msg_contents": "Scott Carey wrote:\n> Sorry for the top post --\n>\n> Assuming Linux --\n>\n> 1: PERC 6 is still a bit inferior to other options, but not that bad. \n> Its random IOPS is fine, sequential speeds are noticeably less than \n> say the latest from Adaptec or Areca.\n>\n>\nIn the archives there was big thread about this very setup. is very \nsimilar to mine although i'm getting close to a year old\n\nhttp://archives.postgresql.org/pgsql-performance/2008-03/thrd3.php#00264\n\n\n\n\n\n\n\nScott Carey wrote:\n\nRe: [PERFORM] suggestions for postgresql setup on Dell 2950 ,\nPERC6i controller\nSorry for the top post --\n\nAssuming Linux --\n\n1: PERC 6 is still a bit inferior to other options, but not that bad.\n Its random IOPS is fine, sequential speeds are noticeably less than\nsay the latest from Adaptec or Areca.\n\n\n\nIn the archives there was big thread about this very setup.  is very\nsimilar to mine although i'm getting close to a year old\n\nhttp://archives.postgresql.org/pgsql-performance/2008-03/thrd3.php#00264", "msg_date": "Thu, 05 Feb 2009 00:47:54 -0500", "msg_from": "justin <[email protected]>", "msg_from_op": false, "msg_subject": "Re: suggestions for postgresql setup on Dell 2950 , PERC6i controller" }, { "msg_contents": "Arjen van der Meijden wrote:\n\n> Afaik the Perc 5/i and /e are more or less rebranded LSI-cards (they're\n> not identical in layout etc), so it would be a bit weird if they\n> performed much less than the similar LSI's wouldn't you think?\n\nI've recently had to replace a PERC4/DC with the exact same card made by\nLSI (320-2) because the PERCs firmware was crippled. Its idea of RAID10\nactually appears to be concatenated RAID1 arrays.\n\nSince replacing it and rebuilding the array on the LSI card, performance\nhas been considerably better (14 disk SCSI shelf)\n\n> Areca may be the fastest around right now, but if you'd like to get it\n> all from one supplier, its not too bad to be stuck with Dell's perc 5 or\n> 6 series.\n\nThe PERC6 isn't too bad, however it grinds to a halt when the IO queue\ngets large and it has the serious limitation of not supporting more than\n8 spans, so trying to build a RAID10 array greater than 16 disks is\npointless if you're not just after the extra capacity.\n\nAre there any reasonable choices for bigger (3+ shelf) direct-connected\nRAID10 arrays, or are hideously expensive SANs the only option? I've\nchecked out the latest Areca controllers, but the manual available on\ntheir website states there's a limitation of 32 disks in an array...\n", "msg_date": "Thu, 05 Feb 2009 12:40:02 +0000", "msg_from": "Matt Burke <[email protected]>", "msg_from_op": false, "msg_subject": "Re: suggestions for postgresql setup on Dell 2950 , PERC6i controller" }, { "msg_contents": "\n\n\n--- On Thu, 5/2/09, Matt Burke <[email protected]> wrote:\n\n> From: Matt Burke <[email protected]>\n> Subject: Re: [PERFORM] suggestions for postgresql setup on Dell 2950 , PERC6i controller\n> To: [email protected]\n> Date: Thursday, 5 February, 2009, 12:40 PM\n> Arjen van der Meijden wrote:\n> \n> > Afaik the Perc 5/i and /e are more or less rebranded\n> LSI-cards (they're\n> > not identical in layout etc), so it would be a bit\n> weird if they\n> > performed much less than the similar LSI's\n> wouldn't you think?\n> \n> I've recently had to replace a PERC4/DC with the exact\n> same card made by\n> LSI (320-2) because the PERCs firmware was crippled. Its\n> idea of RAID10\n> actually appears to be concatenated RAID1 arrays.\n> \n\nDid you try flashing the PERC with the LSI firmware?\n\nI tried flashing a PERC3/dc with LSI firmware, it worked fine but I saw no difference in performance so I assumed it must be somethign else on the board that cripples it.\n\n\n\n\n \n", "msg_date": "Thu, 5 Feb 2009 12:55:24 +0000 (GMT)", "msg_from": "Glyn Astill <[email protected]>", "msg_from_op": false, "msg_subject": "Re: suggestions for postgresql setup on Dell 2950 ,\n PERC6i controller" }, { "msg_contents": "Matt Burke wrote:\n> Arjen van der Meijden wrote:\n> \n>> Afaik the Perc 5/i and /e are more or less rebranded LSI-cards (they're\n>> not identical in layout etc), so it would be a bit weird if they\n>> performed much less than the similar LSI's wouldn't you think?\n> \n> I've recently had to replace a PERC4/DC with the exact same card made by\n> LSI (320-2) because the PERCs firmware was crippled. Its idea of RAID10\n> actually appears to be concatenated RAID1 arrays.\n> \n> Since replacing it and rebuilding the array on the LSI card, performance\n> has been considerably better (14 disk SCSI shelf)\n> \n>> Areca may be the fastest around right now, but if you'd like to get it\n>> all from one supplier, its not too bad to be stuck with Dell's perc 5 or\n>> 6 series.\n> \n> The PERC6 isn't too bad, however it grinds to a halt when the IO queue\n> gets large and it has the serious limitation of not supporting more than\n> 8 spans, so trying to build a RAID10 array greater than 16 disks is\n> pointless if you're not just after the extra capacity.\n> \n> Are there any reasonable choices for bigger (3+ shelf) direct-connected\n> RAID10 arrays, or are hideously expensive SANs the only option? I've\n> checked out the latest Areca controllers, but the manual available on\n> their website states there's a limitation of 32 disks in an array...\n\nIn the context of RAID 10, what are the drawbacks of sticking several\nsuch controllers and use them only for hardware RAID 1 arraylets,\nrunning RAID 0 across them in software? You'd lose booting from the\narray but data safety should be about the same since the hardware is\nmirroring data, right?", "msg_date": "Thu, 05 Feb 2009 14:01:07 +0100", "msg_from": "Ivan Voras <[email protected]>", "msg_from_op": false, "msg_subject": "Re: suggestions for postgresql setup on Dell 2950 ,\n\tPERC6i controller" }, { "msg_contents": "Glyn Astill wrote:\n\n> Did you try flashing the PERC with the LSI firmware?\n> \n> I tried flashing a PERC3/dc with LSI firmware, it worked fine but I\n> saw no difference in performance so I assumed it must be somethign\n> else on the board that cripples it.\n\nNo, for a few reasons:\n\n1. I read somewhere on the interwebs that doing so would brick the card\n2. I don't have access to a DOS/Windows machine\n3. Dodgy hardware isn't what you want when dealing with large databases\n\nIf it's not just a firmware issue it wouldn't surprise me if you could\njust link a couple of pins/contacts/etc on the card and gain the LSIs\ncapabilities, but it's not an idea I'd entertain outside of personal use...\n\n\n-- \n", "msg_date": "Thu, 05 Feb 2009 13:12:15 +0000", "msg_from": "Matt Burke <[email protected]>", "msg_from_op": false, "msg_subject": "Re: suggestions for postgresql setup on Dell 2950 , PERC6i controller" }, { "msg_contents": "Scott Marlowe <[email protected]> writes:\n\n> We purhcased the Perc 5E, which dell wanted $728 for last fall with 8\n> SATA disks in an MD-1000 and the performance is just terrible. No\n> matter what we do the best throughput on any RAID setup was about 30\n> megs/second write and 60 Megs/second read. \n\nIs that sequential or a mix of random and sequential (It's too high to be\npurely random i/o)? A single consumer drive should be able to beat those\nnumbers on sequential i/o. If it's a mix of random and sequential then\nperformance will obviously depend on the mix.\n\n> I can get that from a mirror set of the same drives under linux kernel\n> software RAID.\n\nWhy is that surprising? I would expect software raid to be able to handle 8\ndrives perfectly well assuming you had an controller and bus you aren't\nsaturating.\n\n-- \n Gregory Stark\n EnterpriseDB http://www.enterprisedb.com\n Ask me about EnterpriseDB's On-Demand Production Tuning\n", "msg_date": "Thu, 05 Feb 2009 14:04:25 +0000", "msg_from": "Gregory Stark <[email protected]>", "msg_from_op": false, "msg_subject": "Re: suggestions for postgresql setup on Dell 2950 ,\n PERC6i controller" }, { "msg_contents": "On Thu, 2009-02-05 at 12:40 +0000, Matt Burke wrote:\n> Arjen van der Meijden wrote:\n> \n\n> Are there any reasonable choices for bigger (3+ shelf) direct-connected\n> RAID10 arrays, or are hideously expensive SANs the only option? I've\n> checked out the latest Areca controllers, but the manual available on\n> their website states there's a limitation of 32 disks in an array...\n> \n\nHP P800.\n\n-- \nPostgreSQL - XMPP: [email protected]\n Consulting, Development, Support, Training\n 503-667-4564 - http://www.commandprompt.com/\n The PostgreSQL Company, serving since 1997\n\n", "msg_date": "Thu, 05 Feb 2009 08:13:38 -0800", "msg_from": "\"Joshua D. Drake\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: suggestions for postgresql setup on Dell 2950 , PERC6i controller" }, { "msg_contents": "On Thu, Feb 5, 2009 at 6:10 PM, Matt Burke <[email protected]> wrote:\n> Arjen van der Meijden wrote:\n>\n>> Afaik the Perc 5/i and /e are more or less rebranded LSI-cards (they're\n>> not identical in layout etc), so it would be a bit weird if they\n>> performed much less than the similar LSI's wouldn't you think?\n>\n> I've recently had to replace a PERC4/DC with the exact same card made by\n> LSI (320-2) because the PERCs firmware was crippled. Its idea of RAID10\n> actually appears to be concatenated RAID1 arrays.\n>\n> Since replacing it and rebuilding the array on the LSI card, performance\n> has been considerably better (14 disk SCSI shelf)\n>\n>> Areca may be the fastest around right now, but if you'd like to get it\n>> all from one supplier, its not too bad to be stuck with Dell's perc 5 or\n>> 6 series.\n>\n> The PERC6 isn't too bad, however it grinds to a halt when the IO queue\n> gets large and it has the serious limitation of not supporting more than\n> 8 spans, so trying to build a RAID10 array greater than 16 disks is\n> pointless if you're not just after the extra capacity.\n>\n> Are there any reasonable choices for bigger (3+ shelf) direct-connected\n> RAID10 arrays, or are hideously expensive SANs the only option? I've\n> checked out the latest Areca controllers, but the manual available on\n> their website states there's a limitation of 32 disks in an array...\n\nWhere exactly is there limitation of 32 drives.\nthe datasheet of 1680 states support upto 128drives\nusing enclosures.\n\nregds\nrajesh kumar mallah.\n\n>\n> --\n> Sent via pgsql-performance mailing list ([email protected])\n> To make changes to your subscription:\n> http://www.postgresql.org/mailpref/pgsql-performance\n>\n", "msg_date": "Thu, 5 Feb 2009 22:08:26 +0530", "msg_from": "Rajesh Kumar Mallah <[email protected]>", "msg_from_op": true, "msg_subject": "Re: suggestions for postgresql setup on Dell 2950 , PERC6i controller" }, { "msg_contents": "On 2/5/09 4:40 AM, \"Matt Burke\" <[email protected]> wrote:\n\nAre there any reasonable choices for bigger (3+ shelf) direct-connected\nRAID10 arrays, or are hideously expensive SANs the only option? I've\nchecked out the latest Areca controllers, but the manual available on\ntheir website states there's a limitation of 32 disks in an array...\n\nWhat I'm using currently:\nAdaptec / Areca cards + Promise V-Trac J610S (for 3.5\" drives, if total storage is your concern). Multiple cards if necessary and you want dual-path to each drive.\nhttp://www.promise.com/product/product_detail_eng.asp?segment=undefined&product_id=190\nhttp://www.promise.com/product/product_detail_eng.asp?segment=undefined&product_id=189\n\nUsing two of the former with two Adaptec cards (Software raid 0 on top of them) with great success.\nThere's 2.5\" drive ones too from other manufacturers ... The Promise one here scared me at first until I got confirmation from several experts actually using them in place of Dell MD1000 and HP SAS expander boxes because of higher device compatibility (Dells only works with PERC, etc) and reasonable cost.\nYou probably don't want a single array with more than 32 drives anyway, its almost always better to start carving out chunks and using software raid 0 or 1 on top of that for various reasons. I wouldn't put more than 16 drives in one array on any of these RAID cards, they're just not optimized for really big arrays and tend to fade between 6 to 16 in one array, depending on the quality.\n\nHigh quality SAS expander boxes compatible with good, non-proprietary RAID cards are not those from T1 vendors. The Promise above has a large compatibility list, since it uses 'standard' controller chips, etc. There are several others. See the Adaptec and Areca SAS expander compatibility lists. Dual redundant path to drives is nice.\n\nYou can do direct-attached storage to 100+ drives or more if you want. The price and manageability cost go up a lot if it gets too big however. Having global hot spare drives is critical. Not that the cost of using SAN's and such is low... SAS expanders have made DAS with large arrays very accessible though.\nSun has some nice solutions here too, but like all T1 vendors the compatibility lists are smaller. Their RAID card they sell is an OEM'd Adaptec and performs nicely. The Sun 4150 with a direct-attached SAS storage makes a pretty good DB server. And yes, you can run Linux on it or Solaris or OpenSolaris or Windows or some BSD variants.\n\n\n\nRe: [PERFORM] suggestions for postgresql setup on Dell 2950 , PERC6i  controller\n\n\nOn 2/5/09 4:40 AM, \"Matt Burke\" <[email protected]> wrote:\n\nAre there any reasonable choices for bigger (3+ shelf) direct-connected\nRAID10 arrays, or are hideously expensive SANs the only option? I've\nchecked out the latest Areca controllers, but the manual available on\ntheir website states there's a limitation of 32 disks in an array...\n\nWhat I’m using currently:\nAdaptec / Areca cards + Promise V-Trac J610S  (for 3.5” drives, if total storage is your concern).  Multiple cards if necessary and you want dual-path to each drive.\nhttp://www.promise.com/product/product_detail_eng.asp?segment=undefined&product_id=190\nhttp://www.promise.com/product/product_detail_eng.asp?segment=undefined&product_id=189\n\nUsing two of the former with two Adaptec cards (Software raid 0 on top of them) with great success.  \nThere’s 2.5” drive ones too from other manufacturers ... The Promise one here scared me at first until I got confirmation from several experts actually using them in place of Dell MD1000 and HP SAS expander boxes because of higher device compatibility (Dells only works with PERC, etc) and reasonable cost.  \nYou probably don’t want a single array with more than 32 drives anyway, its almost always better to start carving out chunks and using software raid 0 or 1 on top of that for various reasons. I wouldn’t put more than 16 drives in one array on any of these RAID cards, they’re just not optimized for really big arrays and tend to fade between 6 to 16 in one array, depending on the quality.\n\nHigh quality SAS expander boxes compatible with good, non-proprietary RAID cards are not those from T1 vendors.  The Promise above has a large compatibility list, since it uses ‘standard’ controller chips, etc.  There are several others.  See the Adaptec and Areca SAS expander compatibility lists.   Dual redundant path to drives is nice.  \n\nYou can do direct-attached storage to 100+ drives or more if you want.  The price and manageability cost go up a lot if it gets too big however.  Having global hot spare drives is critical.  Not that the cost of using SAN’s and such is low...  SAS expanders have made DAS with large arrays very accessible though.\nSun has some nice solutions here too, but like all T1 vendors the compatibility lists are smaller.  Their RAID card they sell is an OEM’d Adaptec and performs nicely.  The Sun 4150 with a direct-attached SAS storage makes a pretty good DB server.  And yes, you can run Linux on it or Solaris or OpenSolaris or Windows or some BSD variants.", "msg_date": "Thu, 5 Feb 2009 10:42:37 -0800", "msg_from": "Scott Carey <[email protected]>", "msg_from_op": false, "msg_subject": "Re: suggestions for postgresql setup on Dell 2950 ,\n PERC6i controller" }, { "msg_contents": "Rajesh Kumar Mallah wrote:\n>> I've checked out the latest Areca controllers, but the manual \n>> available on their website states there's a limitation of 32 disks \n>> in an array...\n> \n> Where exactly is there limitation of 32 drives. the datasheet of \n> 1680 states support upto 128drives using enclosures.\n\nThe 1680 manual:\nhttp://www.areca.us//support/download/RaidCards/Documents/Manual_Spec/SAS_Manual.zip\n\nPage 25:\n\n> Note:\n> \n> 1. The maximum no. is 32 disk drived included in a single RAID set\n\nPage 49:\n\n> 1. Up to 32 disk drives can be included in a single RAID set.\n> 2. Up to 8 RAID sets can be created per controller\n\n(point 2 meaning you can't do s/w RAID over umpteen h/w RAID1 pairs)\n\nPage 50:\n\n> To create RAID 30/50/60 volume, you need create multiple RAID sets\n> first with the same disk members on each RAID set. The max no. disk\n> drives per volume set: 32 for RAID 0/1/10/3/5/6 and 128 for RAID\n> 30/50/60.\n\n...and a few more times saying the same thing\n\n-- \n", "msg_date": "Fri, 06 Feb 2009 08:32:21 +0000", "msg_from": "Matt Burke <[email protected]>", "msg_from_op": false, "msg_subject": "Re: suggestions for postgresql setup on Dell 2950 , PERC6i controller" }, { "msg_contents": "Scott Carey wrote:\n> You probably don�t want a single array with more than 32 drives anyway,\n> its almost always better to start carving out chunks and using software\n> raid 0 or 1 on top of that for various reasons. I wouldn�t put more than\n> 16 drives in one array on any of these RAID cards, they�re just not\n> optimized for really big arrays and tend to fade between 6 to 16 in one\n> array, depending on the quality.\n\nThis is what I'm looking at now. The server I'm working on at the moment\ncurrently has a PERC6/e and 3xMD1000s which needs to be tested in a few\nsetups. I need to code a benchmarker yet (I haven't found one yet that\ncan come close to replicating our DB usage patterns), but I intend to try:\n\n1. 3x h/w RAID10 (one per shelf), sofware RAID0\n2. lots x h/w RAID1, software RAID0 if the PERC will let me create\nenough arrays\n3. Pure s/w RAID10 if I can convince the PERC to let the OS see the disks\n4. 2x h/w RAID30, software RAID0\n\nI'm not holding much hope out for the last one :)\n\n\nI'm just glad work on a rewrite of my inherited backend systems should\nstart soon; get rid of the multi-TB MySQL hell and move to a distributed\nPG setup on dirt cheap Dell R200s/blades\n\n\n> You can do direct-attached storage to 100+ drives or more if you want.\n> The price and manageability cost go up a lot if it gets too big\n> however. Having global hot spare drives is critical. Not that the cost\n> of using SAN�s and such is low... SAS expanders have made DAS with\n> large arrays very accessible though.\n\nFor large storage arrays (RAID60 or similar) you can't beat a RAID\ncontroller and disk shelf(s), especially if you keep the raidsets small\nand use cheap ludicrous capacity SATA disks\n\nYou just need to be aware that performance doesn't scale well/easily\nover 1-2 shelves on the things\n\n\n-- \n\n\n", "msg_date": "Fri, 06 Feb 2009 09:04:14 +0000", "msg_from": "Matt Burke <[email protected]>", "msg_from_op": false, "msg_subject": "Re: suggestions for postgresql setup on Dell 2950 , PERC6i controller" }, { "msg_contents": "On Fri, Feb 6, 2009 at 2:04 AM, Matt Burke <[email protected]> wrote:\n> Scott Carey wrote:\n>> You probably don't want a single array with more than 32 drives anyway,\n>> its almost always better to start carving out chunks and using software\n>> raid 0 or 1 on top of that for various reasons. I wouldn't put more than\n>> 16 drives in one array on any of these RAID cards, they're just not\n>> optimized for really big arrays and tend to fade between 6 to 16 in one\n>> array, depending on the quality.\n>\n> This is what I'm looking at now. The server I'm working on at the moment\n> currently has a PERC6/e and 3xMD1000s which needs to be tested in a few\n> setups. I need to code a benchmarker yet (I haven't found one yet that\n> can come close to replicating our DB usage patterns), but I intend to try:\n>\n> 1. 3x h/w RAID10 (one per shelf), sofware RAID0\n\nShould work pretty well.\n\n> 2. lots x h/w RAID1, software RAID0 if the PERC will let me create\n> enough arrays\n\nI don't recall the max number arrays. I'm betting it's less than that.\n\n> 3. Pure s/w RAID10 if I can convince the PERC to let the OS see the disks\n\nLook for JBOD mode.\n\n> 4. 2x h/w RAID30, software RAID0\n>\n> I'm not holding much hope out for the last one :)\n\nMe either. :)\n", "msg_date": "Fri, 6 Feb 2009 02:17:29 -0700", "msg_from": "Scott Marlowe <[email protected]>", "msg_from_op": false, "msg_subject": "Re: suggestions for postgresql setup on Dell 2950 , PERC6i controller" }, { "msg_contents": "Matt Burke wrote:\n> Scott Carey wrote:\n> > You probably don?t want a single array with more than 32 drives anyway,\n> > its almost always better to start carving out chunks and using software\n> > raid 0 or 1 on top of that for various reasons. I wouldn?t put more than\n> > 16 drives in one array on any of these RAID cards, they?re just not\n> > optimized for really big arrays and tend to fade between 6 to 16 in one\n> > array, depending on the quality.\n> \n> This is what I'm looking at now. The server I'm working on at the moment\n> currently has a PERC6/e and 3xMD1000s which needs to be tested in a few\n> setups. I need to code a benchmarker yet (I haven't found one yet that\n> can come close to replicating our DB usage patterns), but I intend to try:\n\nStupid question, but why do people bother with the Perc line of cards if\nthe LSI brand is better? It seems the headache of trying to get the\nPerc cards to perform is not worth any money saved.\n\n-- \n Bruce Momjian <[email protected]> http://momjian.us\n EnterpriseDB http://enterprisedb.com\n\n + If your life is a hard drive, Christ can be your backup. +\n", "msg_date": "Fri, 6 Feb 2009 09:23:59 -0500 (EST)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": false, "msg_subject": "Re: suggestions for postgresql setup on Dell 2950 , PERC6i controller" }, { "msg_contents": "--- On Fri, 6/2/09, Bruce Momjian <[email protected]> wrote:\n\n> Stupid question, but why do people bother with the Perc\n> line of cards if\n> the LSI brand is better? It seems the headache of trying\n> to get the\n> Perc cards to perform is not worth any money saved.\n\nI think in most cases the dell cards actually cost more, people end up stuck with them because they come bundled with their servers - they find out too late that they've got a lemon.\n\nUp until recently those in charge of buying hardware where I work insisted everything be supplied from dell. Fortunately that policy is no more; I have enough paperweights.\n\n\n \n", "msg_date": "Fri, 6 Feb 2009 14:34:50 +0000 (GMT)", "msg_from": "Glyn Astill <[email protected]>", "msg_from_op": false, "msg_subject": "Re: suggestions for postgresql setup on Dell 2950 , PERC6i controller" }, { "msg_contents": "Glyn Astill wrote:\n>> Stupid question, but why do people bother with the Perc line of\n>> cards if the LSI brand is better? It seems the headache of trying \n>> to get the Perc cards to perform is not worth any money saved.\n> \n> I think in most cases the dell cards actually cost more, people end\n> up stuck with them because they come bundled with their servers -\n> they find out too late that they've got a lemon.\n\nThat's what's been happening with me... The fact Dell prices can have a\nfair bit of downward movement when you get the account manager on the\nphone makes them especially attractive to the people controlling the\npurse strings.\n\nThe biggest reason for me however is the lack of comparative reviews. I\nstruggled to get the LSI card to replace the PERC3 because all I had to\ngo on was qualitative mailing list/forum posts from strangers. The only\nway I got it was to make the argument that other than trying the LSI,\nwe'd have no choice other than replacing the server+shelf+disks.\n\nI want to see just how much better a high-end Areca/Adaptec controller\nis, but I just don't think I can get approval for a £1000 card \"because\nsome guy on the internet said the PERC sucks\". Would that same person\nsay it sucked if it came in Areca packaging? Am I listening to the\nadvice of a professional, or a fanboy?\n\n\n", "msg_date": "Fri, 06 Feb 2009 15:19:34 +0000", "msg_from": "Matt Burke <[email protected]>", "msg_from_op": false, "msg_subject": "Re: suggestions for postgresql setup on Dell 2950 , PERC6i controller" }, { "msg_contents": "Matt Burke wrote:\n> Glyn Astill wrote:\n> >> Stupid question, but why do people bother with the Perc line of\n> >> cards if the LSI brand is better? It seems the headache of trying \n> >> to get the Perc cards to perform is not worth any money saved.\n> > \n> > I think in most cases the dell cards actually cost more, people end\n> > up stuck with them because they come bundled with their servers -\n> > they find out too late that they've got a lemon.\n> \n> That's what's been happening with me... The fact Dell prices can have a\n> fair bit of downward movement when you get the account manager on the\n> phone makes them especially attractive to the people controlling the\n> purse strings.\n> \n> The biggest reason for me however is the lack of comparative reviews. I\n> struggled to get the LSI card to replace the PERC3 because all I had to\n> go on was qualitative mailing list/forum posts from strangers. The only\n> way I got it was to make the argument that other than trying the LSI,\n> we'd have no choice other than replacing the server+shelf+disks.\n> \n> I want to see just how much better a high-end Areca/Adaptec controller\n> is, but I just don't think I can get approval for a ?1000 card \"because\n> some guy on the internet said the PERC sucks\". Would that same person\n> say it sucked if it came in Areca packaging? Am I listening to the\n> advice of a professional, or a fanboy?\n\nThe experiences I have heard is that Dell looks at server hardware in\nthe same way they look at their consumer gear, \"If I put in a cheaper\npart, how much will it cost Dell to warranty replace it\". Sorry, but I\ndon't look at my performance or downtime in the same way Dell does. ;-)\n\n-- \n Bruce Momjian <[email protected]> http://momjian.us\n EnterpriseDB http://enterprisedb.com\n\n + If your life is a hard drive, Christ can be your backup. +\n", "msg_date": "Fri, 6 Feb 2009 10:27:13 -0500 (EST)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": false, "msg_subject": "Re: suggestions for postgresql setup on Dell 2950 , PERC6i controller" }, { "msg_contents": "\n\n\n\n\n\nBruce Momjian wrote:\n\nMatt Burke wrote:\n \n\n\nwe'd have no choice other than replacing the server+shelf+disks.\n\nI want to see just how much better a high-end Areca/Adaptec controller\nis, but I just don't think I can get approval for a ?1000 card \"because\nsome guy on the internet said the PERC sucks\". Would that same person\nsay it sucked if it came in Areca packaging? Am I listening to the\nadvice of a professional, or a fanboy?\n \n\n\nThe experiences I have heard is that Dell looks at server hardware in\nthe same way they look at their consumer gear, \"If I put in a cheaper\npart, how much will it cost Dell to warranty replace it\". Sorry, but I\ndon't look at my performance or downtime in the same way Dell does. ;-)\n \n\nIt always boils down to money.  To communicate to the ones controlling\nthe purse strings talk dollar bills.  To get what one wants from the\npurse string holders give examples like this. \nBuying cheap hardware can result in a complete shut down resulting in\nlost sales and/or non productive labor being spent.  \n\nExample would be a company generates 100 sales orders an hour average\n$100 = $10,000 if the server is down for 8 hours 1 business day thats\n$80,000 lost in business.  now lets throw in labor average hourly rate\nlets say $15.00 an hour for 10 people = $150.00 for 8 hours = $1200 in\nlost labor.  Now throw in overtime to get caught up  $1800  total labor\ncost  = $3000\n\nThe $200 to $300 saved on the card  was a good decision :-(\n\nNow the argument can be made hardware failures are low so that goes out\nthe door\n\nYour next best argument is showing the waste in lost productivity. \nLets say  because of the cheap hardware purchased the users must sit\nidle  3 seconds per transactions  times 100 transactions per day = 300\nseconds lost X 10 people = 3000 Seconds per day X 235 working days  =\n705000/60/60 = 196 hours lost per year  times 3years for average life\nspan of the server = 588 hours X  average pay rate $15 = $8820.00 lost\nlabor\n\nAgain smart thinking.  \n\nThere are all kind of ways to win these arguments to push for higher\nquality hardware.\n\n\n", "msg_date": "Fri, 06 Feb 2009 11:05:42 -0500", "msg_from": "justin <[email protected]>", "msg_from_op": false, "msg_subject": "Re: suggestions for postgresql setup on Dell 2950 , PERC6i controller" }, { "msg_contents": "On Fri, Feb 6, 2009 at 8:19 AM, Matt Burke <[email protected]> wrote:\n> Glyn Astill wrote:\n>>> Stupid question, but why do people bother with the Perc line of\n>>> cards if the LSI brand is better? It seems the headache of trying\n>>> to get the Perc cards to perform is not worth any money saved.\n>>\n>> I think in most cases the dell cards actually cost more, people end\n>> up stuck with them because they come bundled with their servers -\n>> they find out too late that they've got a lemon.\n>\n> That's what's been happening with me... The fact Dell prices can have a\n> fair bit of downward movement when you get the account manager on the\n> phone makes them especially attractive to the people controlling the\n> purse strings.\n>\n> The biggest reason for me however is the lack of comparative reviews. I\n> struggled to get the LSI card to replace the PERC3 because all I had to\n> go on was qualitative mailing list/forum posts from strangers. The only\n> way I got it was to make the argument that other than trying the LSI,\n> we'd have no choice other than replacing the server+shelf+disks.\n>\n> I want to see just how much better a high-end Areca/Adaptec controller\n> is, but I just don't think I can get approval for a £1000 card \"because\n> some guy on the internet said the PERC sucks\". Would that same person\n> say it sucked if it came in Areca packaging? Am I listening to the\n> advice of a professional, or a fanboy?\n\nThe best reviews I've seen have been on Tweakers first, then\ntomshardware. I am both a professional who recommends the Areca 16xx\nseries and a bit of a fanboy, mainly because they saved out bacon this\nlast year, compared to the crapware we'd have had to buy from Dell at\ntwice to price to come even close to it in performance. A $11.5k white\nbox with the Areac is a match for over $20k worth of Dell hardware,\nand it just works. Can you get an evaluation unit from a supplier?\n", "msg_date": "Fri, 6 Feb 2009 10:36:47 -0700", "msg_from": "Scott Marlowe <[email protected]>", "msg_from_op": false, "msg_subject": "Re: suggestions for postgresql setup on Dell 2950 , PERC6i controller" }, { "msg_contents": "On 4-2-2009 22:36 Scott Marlowe wrote:\n> We purhcased the Perc 5E, which dell wanted $728 for last fall with 8\n> SATA disks in an MD-1000 and the performance is just terrible. No\n> matter what we do the best throughput on any RAID setup was about 30\n> megs/second write and 60 Megs/second read. I can get that from a\n> mirror set of the same drives under linux kernel software RAID. This\n> was with battery backed cache enabled. Could be an interaction issue\n> with the MD-1000, or something, but the numbers are just awful. We\n> have a Perc 6(i or e not sure) on a 6 disk SAS array and it's a little\n> better, getting into the hundred meg/second range, but nothing\n> spectacular. They're stable, which is more than I can say for a lot\n> of older PERCs and the servers they came in (x600 series with Perc 3i\n> for instance).\n\nWhen we purchased our Perc 5/e with MD1000 filled with 15 15k rpm sas \ndisks, my colleague actually spend some time benchmarking the PERC and a \nICP Vortex (basically a overclocked Adaptec) on those drives. \nUnfortunately he doesn't have too many comparable results, but it \nbasically boiled down to quite good scores for the PERC and a bit less \nfor the ICP Vortex.\nIOMeter sequential reads are above 300MB/s for the RAID5 and above \n240MB/s for a RAID10 (and winbench99 versions range from 400+ to \n600+MB/s). The results for a 10, 12 and to 14 disk configuration also \nshowed nice increments in performance.\n\nSo we've based our purchase on my colleague's earlier bad experience \nwith Adaptec (much worse results than LSI) and weren't dissapointed by \nDell's scores. I have no idea whether Adaptec's results have increased \nover time, unfortunately we haven't had a larger scale disk IO-benchmark \nfor quite some time.\n\nIf you're able to understand Dutch, you can click around here:\nhttp://tweakers.net/benchdb/test/90\n\nBest regards,\n\nArjen\n", "msg_date": "Fri, 06 Feb 2009 18:53:22 +0100", "msg_from": "Arjen van der Meijden <[email protected]>", "msg_from_op": false, "msg_subject": "Re: suggestions for postgresql setup on Dell 2950 , PERC6i controller" }, { "msg_contents": "> 3. Pure s/w RAID10 if I can convince the PERC to let the OS see the disks\n\nLook for JBOD mode.\n\n\n\nPERC 6 does not have JBOD mode exposed. Dell disables the feature from the LSI firmware in their customization.\nHowever, I have been told that you can convince them to tell you the 'secret handshake' or whatever that allows JBOD to be enabled. The more adventurous flash the card with the LSI firmware, though I'm sure that voids all sorts of support from DELL.\n\n\n\nRe: [PERFORM] suggestions for postgresql setup on Dell 2950 , PERC6i  controller\n\n\n\n\n> 3. Pure s/w RAID10 if I can convince the PERC to let the OS see the disks\n\nLook for JBOD mode.\n\n\n\nPERC 6 does not have JBOD mode exposed.  Dell disables the feature from the LSI firmware in their customization.\nHowever, I have been told that you can convince them to tell you the ‘secret handshake’ or whatever that allows JBOD to be enabled.  The more adventurous flash the card with the LSI firmware, though I’m sure that voids all sorts of support from DELL.", "msg_date": "Fri, 6 Feb 2009 09:57:41 -0800", "msg_from": "Scott Carey <[email protected]>", "msg_from_op": false, "msg_subject": "Re: suggestions for postgresql setup on Dell 2950 ,\n PERC6i controller" }, { "msg_contents": "On 6-2-2009 16:27 Bruce Momjian wrote:\n> The experiences I have heard is that Dell looks at server hardware in\n> the same way they look at their consumer gear, \"If I put in a cheaper\n> part, how much will it cost Dell to warranty replace it\". Sorry, but I\n> don't look at my performance or downtime in the same way Dell does. ;-)\n\nI'm pretty sure all major server-suppliers will have some form of \nrisk-analysis for their servers, especially in the high volume x86 \nmarket where most servers are replaced in three years time anyway.\nAnd although Dell's image for quality hardware isn't too good, the \nservers we have from them all reached high uptimes before we did \nhardware unrelated reboots.\nOur Dell-desktops/workstations have seen a bit more support-technician's \nthough, so we're not becoming fanboys any time soon ;-) They seem to be \nmuch more serious on quality for their servers compared to the other stuff.\n\nBest regards,\n\nArjen\n", "msg_date": "Fri, 06 Feb 2009 19:09:40 +0100", "msg_from": "Arjen van der Meijden <[email protected]>", "msg_from_op": false, "msg_subject": "Re: suggestions for postgresql setup on Dell 2950 , PERC6i controller" }, { "msg_contents": "On Fri, 6 Feb 2009, Bruce Momjian wrote:\n\n> Stupid question, but why do people bother with the Perc line of cards if\n> the LSI brand is better?\n\nBecause when you're ordering a Dell server, all you do is click a little \nbox and you get a PERC card with it. There aren't that many places that \ncarry the LSI cards either, so most people are looking at \"get the PERC as \npart of a supported package from Dell\" vs. \"have one rogue component I \nbought from random reseller mixed in, complicating all future support \ncalls\".\n\nThe failure in this logic is assuming that \"support\" from Dell includes \nmaking sure the box actually performs well. If it works at any speed, \nthat's good enough for Dell.\n\n--\n* Greg Smith [email protected] http://www.gregsmith.com Baltimore, MD\n", "msg_date": "Fri, 6 Feb 2009 13:49:21 -0500 (EST)", "msg_from": "Greg Smith <[email protected]>", "msg_from_op": false, "msg_subject": "Re: suggestions for postgresql setup on Dell 2950 , PERC6i controller" }, { "msg_contents": "On 2/6/09 9:53 AM, \"Arjen van der Meijden\" <[email protected]> wrote:\n\nWhen we purchased our Perc 5/e with MD1000 filled with 15 15k rpm sas\ndisks, my colleague actually spend some time benchmarking the PERC and a\nICP Vortex (basically a overclocked Adaptec) on those drives.\nUnfortunately he doesn't have too many comparable results, but it\nbasically boiled down to quite good scores for the PERC and a bit less\nfor the ICP Vortex.\n\nAdaptec's previous generation stuff was not very good. Its the '5IE5' (I = internal port count, E = external) series that is much improved, and very much like the Areca 16xxx series. Maybe not as good - I haven't seen a head to head on those two but they are built very similarly.\nI have no idea if the Vortex referred to correlates with this newer generation card or the old generation. But if it was in relation to a Perc 5, which is also an older (PCI-X, not PCIe) generation, then I'm not sure how much this relates to the Perc 6 and new Adaptecs or Areca 16xxx series which are all PCIe.\n\nOne manufacturer may be good in one generation and stink in another. For those long established providers, every generation tends to yield new winners and losers. Some are more often on the top or the bottom, but it wouldn't be a total shocker a recent low performer like LSI's or 3Ware had a next generation product that ended up on or near the top, or if the next PERC is one of the better ones. What is more consistent between generations is the management software and recovery process.\n\n\n\nRe: [PERFORM] suggestions for postgresql setup on Dell 2950 , PERC6i  controller\n\n\nOn 2/6/09 9:53 AM, \"Arjen van der Meijden\" <[email protected]> wrote:\n\nWhen we purchased our Perc 5/e with MD1000 filled with 15 15k rpm sas\ndisks, my colleague actually spend some time benchmarking the PERC and a\nICP Vortex (basically a overclocked Adaptec) on those drives.\nUnfortunately he doesn't have too many comparable results, but it\nbasically boiled down to quite good scores for the PERC and a bit less\nfor the ICP Vortex.\n\nAdaptec’s previous generation stuff was not very good.  Its the ‘5IE5’ (I = internal port count, E = external) series that is much improved, and very much like the Areca 16xxx series.  Maybe not as good — I haven’t seen a head to head on those two but they are built very similarly.  \nI have no idea if the Vortex referred to correlates with this newer generation card or the old generation.  But if it was in relation to a Perc 5, which is also an older (PCI-X, not PCIe) generation, then I’m not sure how much this relates to the Perc 6 and new Adaptecs or Areca 16xxx series which are all PCIe.\n\nOne manufacturer may be good in one generation and stink in another.  For those long established providers, every generation tends to yield new winners and losers.  Some are more often on the top or the bottom, but it wouldn’t be a total shocker a recent low performer like LSI’s or 3Ware had a next generation product that ended up on or near the top, or if the next PERC is one of the better ones.  What is more consistent between generations is the management software and recovery process.", "msg_date": "Fri, 6 Feb 2009 16:10:39 -0800", "msg_from": "Scott Carey <[email protected]>", "msg_from_op": false, "msg_subject": "Re: suggestions for postgresql setup on Dell 2950 ,\n PERC6i controller" }, { "msg_contents": "Arjen van der Meijden <[email protected]> writes:\n\n> When we purchased our Perc 5/e with MD1000 filled with 15 15k rpm sas disks, my\n> colleague actually spend some time benchmarking the PERC and a ICP Vortex\n> (basically a overclocked Adaptec) on those drives. Unfortunately he doesn't\n> have too many comparable results, but it basically boiled down to quite good\n> scores for the PERC and a bit less for the ICP Vortex.\n> IOMeter sequential reads are above 300MB/s for the RAID5 and above 240MB/s for\n> a RAID10 (and winbench99 versions range from 400+ to 600+MB/s). \n\nFWIW those are pretty terrible numbers for fifteen 15k rpm drives. They're\nabout what you would expect if for a PCI-X card which was bus bandwidth\nlimited. A PCI-e card should be able to get about 3x that from the drives.\n\n-- \n Gregory Stark\n EnterpriseDB http://www.enterprisedb.com\n Ask me about EnterpriseDB's RemoteDBA services!\n", "msg_date": "Mon, 16 Feb 2009 13:31:37 +0000", "msg_from": "Gregory Stark <[email protected]>", "msg_from_op": false, "msg_subject": "Re: suggestions for postgresql setup on Dell 2950 , PERC6i controller" }, { "msg_contents": "BTW\n\nour Machine got build with 8 15k drives in raid10 ,\nfrom bonnie++ results its looks like the machine is\nable to do 400 Mbytes/s seq write and 550 Mbytes/s\nread. the BB cache is enabled with 256MB\n\nsda6 --> xfs with default formatting options.\nsda7 --> mkfs.xfs -f -d sunit=128,swidth=512 /dev/sda7\nsda8 --> ext3 (default)\n\nit looks like mkfs.xfs options sunit=128 and swidth=512 did not improve\nio throughtput as such in bonnie++ tests .\n\nit looks like ext3 with default options performed worst in my case.\n\nregds\n-- mallah\n\n\nNOTE: observations made in this post are interpretations by the poster\nonly which may or may not be indicative of the true suitablity of the\nfilesystem.\n\n\n\nOn Mon, Feb 16, 2009 at 7:01 PM, Gregory Stark <[email protected]> wrote:\n> Arjen van der Meijden <[email protected]> writes:\n>\n>> When we purchased our Perc 5/e with MD1000 filled with 15 15k rpm sas disks, my\n>> colleague actually spend some time benchmarking the PERC and a ICP Vortex\n>> (basically a overclocked Adaptec) on those drives. Unfortunately he doesn't\n>> have too many comparable results, but it basically boiled down to quite good\n>> scores for the PERC and a bit less for the ICP Vortex.\n>> IOMeter sequential reads are above 300MB/s for the RAID5 and above 240MB/s for\n>> a RAID10 (and winbench99 versions range from 400+ to 600+MB/s).\n>\n> FWIW those are pretty terrible numbers for fifteen 15k rpm drives. They're\n> about what you would expect if for a PCI-X card which was bus bandwidth\n> limited. A PCI-e card should be able to get about 3x that from the drives.\n>\n> --\n> Gregory Stark\n> EnterpriseDB http://www.enterprisedb.com\n> Ask me about EnterpriseDB's RemoteDBA services!\n>\n> --\n> Sent via pgsql-performance mailing list ([email protected])\n> To make changes to your subscription:\n> http://www.postgresql.org/mailpref/pgsql-performance\n>\n", "msg_date": "Tue, 17 Feb 2009 02:04:44 +0530", "msg_from": "Rajesh Kumar Mallah <[email protected]>", "msg_from_op": true, "msg_subject": "Re: suggestions for postgresql setup on Dell 2950 , PERC6i controller" }, { "msg_contents": "The URL of the result is\n\nhttp://98.129.214.99/bonnie/report.html\n\n(sorry if this was a repost)\n\n\nOn Tue, Feb 17, 2009 at 2:04 AM, Rajesh Kumar Mallah\n<[email protected]> wrote:\n> BTW\n>\n> our Machine got build with 8 15k drives in raid10 ,\n> from bonnie++ results its looks like the machine is\n> able to do 400 Mbytes/s seq write and 550 Mbytes/s\n> read. the BB cache is enabled with 256MB\n>\n> sda6 --> xfs with default formatting options.\n> sda7 --> mkfs.xfs -f -d sunit=128,swidth=512 /dev/sda7\n> sda8 --> ext3 (default)\n>\n> it looks like mkfs.xfs options sunit=128 and swidth=512 did not improve\n> io throughtput as such in bonnie++ tests .\n>\n> it looks like ext3 with default options performed worst in my case.\n>\n> regds\n> -- mallah\n>\n>\n> NOTE: observations made in this post are interpretations by the poster\n> only which may or may not be indicative of the true suitablity of the\n> filesystem.\n>\n>\n>\n> On Mon, Feb 16, 2009 at 7:01 PM, Gregory Stark <[email protected]> wrote:\n>> Arjen van der Meijden <[email protected]> writes:\n>>\n>>> When we purchased our Perc 5/e with MD1000 filled with 15 15k rpm sas disks, my\n>>> colleague actually spend some time benchmarking the PERC and a ICP Vortex\n>>> (basically a overclocked Adaptec) on those drives. Unfortunately he doesn't\n>>> have too many comparable results, but it basically boiled down to quite good\n>>> scores for the PERC and a bit less for the ICP Vortex.\n>>> IOMeter sequential reads are above 300MB/s for the RAID5 and above 240MB/s for\n>>> a RAID10 (and winbench99 versions range from 400+ to 600+MB/s).\n>>\n>> FWIW those are pretty terrible numbers for fifteen 15k rpm drives. They're\n>> about what you would expect if for a PCI-X card which was bus bandwidth\n>> limited. A PCI-e card should be able to get about 3x that from the drives.\n>>\n>> --\n>> Gregory Stark\n>> EnterpriseDB http://www.enterprisedb.com\n>> Ask me about EnterpriseDB's RemoteDBA services!\n>>\n>> --\n>> Sent via pgsql-performance mailing list ([email protected])\n>> To make changes to your subscription:\n>> http://www.postgresql.org/mailpref/pgsql-performance\n>>\n>\n", "msg_date": "Tue, 17 Feb 2009 02:06:32 +0530", "msg_from": "Rajesh Kumar Mallah <[email protected]>", "msg_from_op": true, "msg_subject": "Re: suggestions for postgresql setup on Dell 2950 , PERC6i controller" }, { "msg_contents": "On Tue, 17 Feb 2009, Rajesh Kumar Mallah wrote:\n> sda6 --> xfs with default formatting options.\n> sda7 --> mkfs.xfs -f -d sunit=128,swidth=512 /dev/sda7\n> sda8 --> ext3 (default)\n>\n> it looks like mkfs.xfs options sunit=128 and swidth=512 did not improve\n> io throughtput as such in bonnie++ tests .\n>\n> it looks like ext3 with default options performed worst in my case.\n\nOf course, doing comparisons using a setup like that (on separate \npartitions) will skew the results, because discs' performance differs \ndepending on the portion of the disc being accessed. You should perform \nthe different filesystem tests on the same partition one after the other \ninstead.\n\nMatthew\n\n-- \n\"We did a risk management review. We concluded that there was no risk\n of any management.\" -- Hugo Mills <[email protected]>\n", "msg_date": "Tue, 17 Feb 2009 11:45:43 +0000 (GMT)", "msg_from": "Matthew Wakeling <[email protected]>", "msg_from_op": false, "msg_subject": "Re: suggestions for postgresql setup on Dell 2950 , PERC6i controller" }, { "msg_contents": "On Tue, Feb 17, 2009 at 5:15 PM, Matthew Wakeling <[email protected]> wrote:\n> On Tue, 17 Feb 2009, Rajesh Kumar Mallah wrote:\n>>\n>> sda6 --> xfs with default formatting options.\n>> sda7 --> mkfs.xfs -f -d sunit=128,swidth=512 /dev/sda7\n>> sda8 --> ext3 (default)\n>>\n>> it looks like mkfs.xfs options sunit=128 and swidth=512 did not improve\n>> io throughtput as such in bonnie++ tests .\n>>\n>> it looks like ext3 with default options performed worst in my case.\n>\n> Of course, doing comparisons using a setup like that (on separate\n> partitions) will skew the results, because discs' performance differs\n> depending on the portion of the disc being accessed. You should perform the\n> different filesystem tests on the same partition one after the other\n> instead.\n\npoint noted . will redo the test on ext3.\n\n\n>\n> Matthew\n>\n> --\n> \"We did a risk management review. We concluded that there was no risk\n> of any management.\" -- Hugo Mills <[email protected]>\n>\n> --\n> Sent via pgsql-performance mailing list ([email protected])\n> To make changes to your subscription:\n> http://www.postgresql.org/mailpref/pgsql-performance\n>\n", "msg_date": "Tue, 17 Feb 2009 18:55:42 +0530", "msg_from": "Rajesh Kumar Mallah <[email protected]>", "msg_from_op": true, "msg_subject": "Re: suggestions for postgresql setup on Dell 2950 , PERC6i controller" }, { "msg_contents": "Generally speaking, you will want to use a partition that is 25% or less the size of the whole disk as well. If it is the whole thing, one file system can place the file you are testing in a very different place on disk and skew results as well.\n\nMy own tests, using the first 20% of an array for all, showed that xfs with default settings beat out or equalled 'tuned' settings with hardware raid 10, and was far faster than ext3 in sequential transfer rate.\n\nIf testing STR, you will also want to tune the block device read ahead value (example: /sbin/blockdev -getra /dev/sda6). This has very large impact on sequential transfer performance (and no impact on random access). How large of an impact depends quite a bit on what kernel you're on since the readahead code has been getting better over time and requires less tuning. But it still defaults out-of-the-box to more optimal settings for a single drive than RAID.\nFor SAS, try 256 or 512 * the number of effective spindles (spindles * 0.5 for raid 10). For SATA, try 1024 or 2048 * the number of effective spindles. The value is in blocks (512 bytes). There is documentation on the blockdev command, and here is a little write-up I found with a couple web searches: http://portal.itauth.com/2007/11/20/howto-linux-double-your-disk-read-performance-single-command\n\n________________________________________\nFrom: [email protected] [[email protected]] On Behalf Of Rajesh Kumar Mallah [[email protected]]\nSent: Tuesday, February 17, 2009 5:25 AM\nTo: Matthew Wakeling\nCc: [email protected]\nSubject: Re: [PERFORM] suggestions for postgresql setup on Dell 2950 , PERC6i controller\n\nOn Tue, Feb 17, 2009 at 5:15 PM, Matthew Wakeling <[email protected]> wrote:\n> On Tue, 17 Feb 2009, Rajesh Kumar Mallah wrote:\n>>\n>> sda6 --> xfs with default formatting options.\n>> sda7 --> mkfs.xfs -f -d sunit=128,swidth=512 /dev/sda7\n>> sda8 --> ext3 (default)\n>>\n>> it looks like mkfs.xfs options sunit=128 and swidth=512 did not improve\n>> io throughtput as such in bonnie++ tests .\n>>\n>> it looks like ext3 with default options performed worst in my case.\n>\n> Of course, doing comparisons using a setup like that (on separate\n> partitions) will skew the results, because discs' performance differs\n> depending on the portion of the disc being accessed. You should perform the\n> different filesystem tests on the same partition one after the other\n> instead.\n\npoint noted . will redo the test on ext3.\n\n\n>\n> Matthew\n>\n> --\n> \"We did a risk management review. We concluded that there was no risk\n> of any management.\" -- Hugo Mills <[email protected]>\n>\n> --\n> Sent via pgsql-performance mailing list ([email protected])\n> To make changes to your subscription:\n> http://www.postgresql.org/mailpref/pgsql-performance\n>\n\n--\nSent via pgsql-performance mailing list ([email protected])\nTo make changes to your subscription:\nhttp://www.postgresql.org/mailpref/pgsql-performance\n", "msg_date": "Tue, 17 Feb 2009 08:19:23 -0800", "msg_from": "Scott Carey <[email protected]>", "msg_from_op": false, "msg_subject": "Re: suggestions for postgresql setup on Dell 2950 ,\n PERC6i \tcontroller" }, { "msg_contents": "the raid10 voulme was benchmarked again\ntaking in consideration above points\n\n# fdisk -l /dev/sda\nDisk /dev/sda: 290.9 GB, 290984034304 bytes\n255 heads, 63 sectors/track, 35376 cylinders\nUnits = cylinders of 16065 * 512 = 8225280 bytes\n\n Device Boot Start End Blocks Id System\n/dev/sda1 * 1 12 96358+ 83 Linux\n/dev/sda2 13 1317 10482412+ 83 Linux\n/dev/sda3 1318 1578 2096482+ 83 Linux\n/dev/sda4 1579 35376 271482435 5 Extended\n/dev/sda5 1579 1839 2096451 82 Linux swap / Solaris\n/dev/sda6 1840 7919 48837568+ 83 Linux\n/dev/sda7 29297 35376 48837600 83 Linux\n\n\nCASE writes reads\n KB/s KB/s\n\next3(whole disk) 244194 , 352093 one part whole disk\nxfs(whole disk) 402352 , 547674\n\n25ext3 260132 , 420905 partition only first 25%\n25xfs 404291 , 547672 (/dev/sda6)\n\next3_25 227307, 348237 partition\nspecifically last 25%\nxfs25 350661, 474481 (/dev/sda7)\n\n\nEffect of ReadAhead Settings\ndisabled,256(default) , 512,1024\n\nxfs_ra0 414741 , 66144\nxfs_ra256 403647, 545026 all tests on sda6\nxfs_ra512 411357, 564769\nxfs_ra1024 404392, 431168\n\nlooks like 512 was the best setting for this controller\n\nConsidering these two figures\nxfs25 350661, 474481 (/dev/sda7)\n25xfs 404291 , 547672 (/dev/sda6)\n\nlooks like the beginning of the drives are 15% faster\nthan the ending sections , considering this is it worth\ncreating a special tablespace at the begining of drives\n\nif at all done what kind of data objects should be placed\ntowards begining , WAL , indexes , frequently updated tables\nor sequences ?\n\nregds\nmallah.\n\n>On Tue, Feb 17, 2009 at 9:49 PM, Scott Carey <[email protected]> wrote:\n> Generally speaking, you will want to use a partition that is 25% or less the size of the whole disk as well. If it is >the whole thing, one file system can place the file you are testing in a very different place on disk and skew results as well.\n>\n> My own tests, using the first 20% of an array for all, showed that xfs with default settings beat out or equalled >'tuned' settings with hardware raid 10, and was far faster than ext3 in sequential transfer rate.\n\nsame here.\n\n>\n> If testing STR, you will also want to tune the block device read ahead value (example: /sbin/blockdev -getra\n> /dev/sda6). This has very large impact on sequential transfer performance (and no impact on random access). >How large of an impact depends quite a bit on what kernel you're on since the readahead code has been getting >better over time and requires less tuning. But it still defaults out-of-the-box to more optimal settings for a single >drive than RAID.\n> For SAS, try 256 or 512 * the number of effective spindles (spindles * 0.5 for raid 10). For SATA, try 1024 or >2048 * the number of effective spindles. The value is in blocks (512 bytes). There is documentation on the >blockdev command, and here is a little write-up I found with a couple web searches:\n>http://portal.itauth.com/2007/11/20/howto-linux-double-your-disk-read-performance-single-command\n\n\n>\n> ________________________________________\n> From: [email protected] [[email protected]] On Behalf Of Rajesh Kumar Mallah [[email protected]]\n> Sent: Tuesday, February 17, 2009 5:25 AM\n> To: Matthew Wakeling\n> Cc: [email protected]\n> Subject: Re: [PERFORM] suggestions for postgresql setup on Dell 2950 , PERC6i controller\n>\n> On Tue, Feb 17, 2009 at 5:15 PM, Matthew Wakeling <[email protected]> wrote:\n>> On Tue, 17 Feb 2009, Rajesh Kumar Mallah wrote:\n>>>\n>>> sda6 --> xfs with default formatting options.\n>>> sda7 --> mkfs.xfs -f -d sunit=128,swidth=512 /dev/sda7\n>>> sda8 --> ext3 (default)\n>>>\n>>> it looks like mkfs.xfs options sunit=128 and swidth=512 did not improve\n>>> io throughtput as such in bonnie++ tests .\n>>>\n>>> it looks like ext3 with default options performed worst in my case.\n>>\n>> Of course, doing comparisons using a setup like that (on separate\n>> partitions) will skew the results, because discs' performance differs\n>> depending on the portion of the disc being accessed. You should perform the\n>> different filesystem tests on the same partition one after the other\n>> instead.\n>\n> point noted . will redo the test on ext3.\n>\n>\n>>\n>> Matthew\n>>\n>> --\n>> \"We did a risk management review. We concluded that there was no risk\n>> of any management.\" -- Hugo Mills <[email protected]>\n>>\n>> --\n>> Sent via pgsql-performance mailing list ([email protected])\n>> To make changes to your subscription:\n>> http://www.postgresql.org/mailpref/pgsql-performance\n>>\n>\n> --\n> Sent via pgsql-performance mailing list ([email protected])\n> To make changes to your subscription:\n> http://www.postgresql.org/mailpref/pgsql-performance\n>\n", "msg_date": "Wed, 18 Feb 2009 13:22:36 +0530", "msg_from": "Rajesh Kumar Mallah <[email protected]>", "msg_from_op": true, "msg_subject": "Re: suggestions for postgresql setup on Dell 2950 , PERC6i controller" }, { "msg_contents": "Detailed bonnie++ figures.\n\nhttp://98.129.214.99/bonnie/report.html\n\n\n\nOn Wed, Feb 18, 2009 at 1:22 PM, Rajesh Kumar Mallah\n<[email protected]> wrote:\n> the raid10 voulme was benchmarked again\n> taking in consideration above points\n>\n> # fdisk -l /dev/sda\n> Disk /dev/sda: 290.9 GB, 290984034304 bytes\n> 255 heads, 63 sectors/track, 35376 cylinders\n> Units = cylinders of 16065 * 512 = 8225280 bytes\n>\n> Device Boot Start End Blocks Id System\n> /dev/sda1 * 1 12 96358+ 83 Linux\n> /dev/sda2 13 1317 10482412+ 83 Linux\n> /dev/sda3 1318 1578 2096482+ 83 Linux\n> /dev/sda4 1579 35376 271482435 5 Extended\n> /dev/sda5 1579 1839 2096451 82 Linux swap / Solaris\n> /dev/sda6 1840 7919 48837568+ 83 Linux\n> /dev/sda7 29297 35376 48837600 83 Linux\n>\n>\n> CASE writes reads\n> KB/s KB/s\n>\n> ext3(whole disk) 244194 , 352093 one part whole disk\n> xfs(whole disk) 402352 , 547674\n>\n> 25ext3 260132 , 420905 partition only first 25%\n> 25xfs 404291 , 547672 (/dev/sda6)\n>\n> ext3_25 227307, 348237 partition\n> specifically last 25%\n> xfs25 350661, 474481 (/dev/sda7)\n>\n>\n> Effect of ReadAhead Settings\n> disabled,256(default) , 512,1024\n>\n> xfs_ra0 414741 , 66144\n> xfs_ra256 403647, 545026 all tests on sda6\n> xfs_ra512 411357, 564769\n> xfs_ra1024 404392, 431168\n>\n> looks like 512 was the best setting for this controller\n>\n> Considering these two figures\n> xfs25 350661, 474481 (/dev/sda7)\n> 25xfs 404291 , 547672 (/dev/sda6)\n>\n> looks like the beginning of the drives are 15% faster\n> than the ending sections , considering this is it worth\n> creating a special tablespace at the begining of drives\n>\n> if at all done what kind of data objects should be placed\n> towards begining , WAL , indexes , frequently updated tables\n> or sequences ?\n>\n> regds\n> mallah.\n>\n>>On Tue, Feb 17, 2009 at 9:49 PM, Scott Carey <[email protected]> wrote:\n>> Generally speaking, you will want to use a partition that is 25% or less the size of the whole disk as well. If it is >the whole thing, one file system can place the file you are testing in a very different place on disk and skew results as well.\n>>\n>> My own tests, using the first 20% of an array for all, showed that xfs with default settings beat out or equalled >'tuned' settings with hardware raid 10, and was far faster than ext3 in sequential transfer rate.\n>\n> same here.\n>\n>>\n>> If testing STR, you will also want to tune the block device read ahead value (example: /sbin/blockdev -getra\n>> /dev/sda6). This has very large impact on sequential transfer performance (and no impact on random access). >How large of an impact depends quite a bit on what kernel you're on since the readahead code has been getting >better over time and requires less tuning. But it still defaults out-of-the-box to more optimal settings for a single >drive than RAID.\n>> For SAS, try 256 or 512 * the number of effective spindles (spindles * 0.5 for raid 10). For SATA, try 1024 or >2048 * the number of effective spindles. The value is in blocks (512 bytes). There is documentation on the >blockdev command, and here is a little write-up I found with a couple web searches:\n>>http://portal.itauth.com/2007/11/20/howto-linux-double-your-disk-read-performance-single-command\n>\n>\n>>\n>> ________________________________________\n>> From: [email protected] [[email protected]] On Behalf Of Rajesh Kumar Mallah [[email protected]]\n>> Sent: Tuesday, February 17, 2009 5:25 AM\n>> To: Matthew Wakeling\n>> Cc: [email protected]\n>> Subject: Re: [PERFORM] suggestions for postgresql setup on Dell 2950 , PERC6i controller\n>>\n>> On Tue, Feb 17, 2009 at 5:15 PM, Matthew Wakeling <[email protected]> wrote:\n>>> On Tue, 17 Feb 2009, Rajesh Kumar Mallah wrote:\n>>>>\n>>>> sda6 --> xfs with default formatting options.\n>>>> sda7 --> mkfs.xfs -f -d sunit=128,swidth=512 /dev/sda7\n>>>> sda8 --> ext3 (default)\n>>>>\n>>>> it looks like mkfs.xfs options sunit=128 and swidth=512 did not improve\n>>>> io throughtput as such in bonnie++ tests .\n>>>>\n>>>> it looks like ext3 with default options performed worst in my case.\n>>>\n>>> Of course, doing comparisons using a setup like that (on separate\n>>> partitions) will skew the results, because discs' performance differs\n>>> depending on the portion of the disc being accessed. You should perform the\n>>> different filesystem tests on the same partition one after the other\n>>> instead.\n>>\n>> point noted . will redo the test on ext3.\n>>\n>>\n>>>\n>>> Matthew\n>>>\n>>> --\n>>> \"We did a risk management review. We concluded that there was no risk\n>>> of any management.\" -- Hugo Mills <[email protected]>\n>>>\n>>> --\n>>> Sent via pgsql-performance mailing list ([email protected])\n>>> To make changes to your subscription:\n>>> http://www.postgresql.org/mailpref/pgsql-performance\n>>>\n>>\n>> --\n>> Sent via pgsql-performance mailing list ([email protected])\n>> To make changes to your subscription:\n>> http://www.postgresql.org/mailpref/pgsql-performance\n>>\n>\n", "msg_date": "Wed, 18 Feb 2009 13:26:15 +0530", "msg_from": "Rajesh Kumar Mallah <[email protected]>", "msg_from_op": true, "msg_subject": "Re: suggestions for postgresql setup on Dell 2950 , PERC6i controller" }, { "msg_contents": "On Wed, Feb 18, 2009 at 12:52 AM, Rajesh Kumar Mallah\n<[email protected]> wrote:\n> the raid10 voulme was benchmarked again\n> taking in consideration above points\n\n> Effect of ReadAhead Settings\n> disabled,256(default) , 512,1024\n>\n> xfs_ra0 414741 , 66144\n> xfs_ra256 403647, 545026 all tests on sda6\n> xfs_ra512 411357, 564769\n> xfs_ra1024 404392, 431168\n>\n> looks like 512 was the best setting for this controller\n\nThat's only known for sequential access.\nHow did it perform under the random access, or did the numbers not\nchange too much?\n\n> Considering these two figures\n> xfs25 350661, 474481 (/dev/sda7)\n> 25xfs 404291 , 547672 (/dev/sda6)\n>\n> looks like the beginning of the drives are 15% faster\n> than the ending sections , considering this is it worth\n> creating a special tablespace at the begining of drives\n\nIt's also good because you will be short stroking the drives. They\nwill naturally have a smaller space to move back and forth in and this\ncan increase the random speed access at the same time.\n", "msg_date": "Wed, 18 Feb 2009 01:31:26 -0700", "msg_from": "Scott Marlowe <[email protected]>", "msg_from_op": false, "msg_subject": "Re: suggestions for postgresql setup on Dell 2950 , PERC6i controller" }, { "msg_contents": ">> Effect of ReadAhead Settings\n>> disabled,256(default) , 512,1024\n>>\nSEQUENTIAL\n>> xfs_ra0 414741 , 66144\n>> xfs_ra256 403647, 545026 all tests on sda6\n>> xfs_ra512 411357, 564769\n>> xfs_ra1024 404392, 431168\n>>\n>> looks like 512 was the best setting for this controller\n>\n> That's only known for sequential access.\n> How did it perform under the random access, or did the numbers not\n> change too much?\n\nRANDOM SEEKS /sec\n\nxfs_ra0 6341.0\nxfs_ra256 14642.7\nxfs_ra512 14415.6\nxfs_ra1024 14541.6\n\nthe value does not seems to be having much effect\nunless its totally disabled.\n\nregds\nmallah.\n\n\n>\n", "msg_date": "Wed, 18 Feb 2009 14:14:46 +0530", "msg_from": "Rajesh Kumar Mallah <[email protected]>", "msg_from_op": true, "msg_subject": "Re: suggestions for postgresql setup on Dell 2950 , PERC6i controller" }, { "msg_contents": "On Wed, Feb 18, 2009 at 1:44 AM, Rajesh Kumar Mallah\n<[email protected]> wrote:\n>>> Effect of ReadAhead Settings\n>>> disabled,256(default) , 512,1024\n>>>\n> SEQUENTIAL\n>>> xfs_ra0 414741 , 66144\n>>> xfs_ra256 403647, 545026 all tests on sda6\n>>> xfs_ra512 411357, 564769\n>>> xfs_ra1024 404392, 431168\n>>>\n>>> looks like 512 was the best setting for this controller\n>>\n>> That's only known for sequential access.\n>> How did it perform under the random access, or did the numbers not\n>> change too much?\n>\n> RANDOM SEEKS /sec\n>\n> xfs_ra0 6341.0\n> xfs_ra256 14642.7\n> xfs_ra512 14415.6\n> xfs_ra1024 14541.6\n>\n> the value does not seems to be having much effect\n> unless its totally disabled.\n\nexcellent. and yes, you have to dump and reload from 32 to 64 bit.\n", "msg_date": "Wed, 18 Feb 2009 01:50:33 -0700", "msg_from": "Scott Marlowe <[email protected]>", "msg_from_op": false, "msg_subject": "Re: suggestions for postgresql setup on Dell 2950 , PERC6i controller" }, { "msg_contents": "have you tried hanging bunch of raid1 to linux's md, and let it do\nraid0 for you ?\nI heard plenty of stories where this actually sped up performance. One\nnoticeable is case of youtube servers.\n", "msg_date": "Wed, 18 Feb 2009 08:57:25 +0000", "msg_from": "=?UTF-8?Q?Grzegorz_Ja=C5=9Bkiewicz?= <[email protected]>", "msg_from_op": false, "msg_subject": "Re: suggestions for postgresql setup on Dell 2950 , PERC6i controller" }, { "msg_contents": "On Wed, Feb 18, 2009 at 2:27 PM, Grzegorz Jaśkiewicz <[email protected]> wrote:\n> have you tried hanging bunch of raid1 to linux's md, and let it do\n> raid0 for you ?\n\nHmmm , i will have only 3 bunches in that case as system has to boot\nfrom first bunch\nas system has only 8 drives. i think reducing spindles will reduce perf.\n\nI also have a SATA SAN though from which i can boot!\nbut the server needs to be rebuilt in that case too.\nI (may) give it a shot.\n\nregds\n-- mallah.\n\n> I heard plenty of stories where this actually sped up performance. One\n> noticeable is case of youtube servers.\n>\n", "msg_date": "Wed, 18 Feb 2009 14:33:48 +0530", "msg_from": "Rajesh Kumar Mallah <[email protected]>", "msg_from_op": true, "msg_subject": "Re: suggestions for postgresql setup on Dell 2950 , PERC6i controller" }, { "msg_contents": "2009/2/18 Rajesh Kumar Mallah <[email protected]>:\n> On Wed, Feb 18, 2009 at 2:27 PM, Grzegorz Jaśkiewicz <[email protected]> wrote:\n>> have you tried hanging bunch of raid1 to linux's md, and let it do\n>> raid0 for you ?\n>\n> Hmmm , i will have only 3 bunches in that case as system has to boot\n> from first bunch\n> as system has only 8 drives. i think reducing spindles will reduce perf.\n>\n> I also have a SATA SAN though from which i can boot!\n> but the server needs to be rebuilt in that case too.\n> I (may) give it a shot.\n\nSure, if you do play with that - make sure to tweak 'chunk' size too.\nDefault one is way to small (IMO)\n\n\n-- \nGJ\n", "msg_date": "Wed, 18 Feb 2009 14:05:40 +0000", "msg_from": "=?UTF-8?Q?Grzegorz_Ja=C5=9Bkiewicz?= <[email protected]>", "msg_from_op": false, "msg_subject": "Re: suggestions for postgresql setup on Dell 2950 , PERC6i controller" }, { "msg_contents": "On 2/18/09 12:31 AM, \"Scott Marlowe\" <[email protected]> wrote:\n\n\n> Effect of ReadAhead Settings\n> disabled,256(default) , 512,1024\n>\n> xfs_ra0 414741 , 66144\n> xfs_ra256 403647, 545026 all tests on sda6\n> xfs_ra512 411357, 564769\n> xfs_ra1024 404392, 431168\n>\n> looks like 512 was the best setting for this controller\n\nThat's only known for sequential access.\nHow did it perform under the random access, or did the numbers not\nchange too much?\n\nIn my tests, I have never seen the readahead value affect random access performance (kernel 2.6.18 +). At the extreme, I tried a 128MB readahead, and random I/O rates were the same. This was with CentOS 5.2, other confirmation of this would be useful. The Linux readahead algorithm is smart enough to only seek ahead after detecting sequential access. The readahead algorithm has had various improvements to reduce the need to tune it from 2.6.18 to 2.6.24, but from what I gather, this tuning is skewed towards desktop/workstation drives and not large RAID arrays.\nThe readaheaed value DOES affect random access as a side effect in favor of sequential reads when there is mixed random/sequential load, by decreasing the 'read fragmentation' effect of mixing random seeks into a sequential request stream. For most database loads, this is a good thing, since it increases total bytes read per unit of time, effectively 'getting out of the way' a sequential read rather than making it drag on for a long time by splitting it into non-sequential I/O's while other random access is concurrent.\n\n\n\nRe: [PERFORM] suggestions for postgresql setup on Dell 2950 , PERC6i  controller\n\n\nOn 2/18/09 12:31 AM, \"Scott Marlowe\" <[email protected]> wrote:\n\n\n> Effect of ReadAhead Settings\n> disabled,256(default) , 512,1024\n>\n> xfs_ra0                 414741 ,   66144\n> xfs_ra256            403647,  545026                 all tests on sda6\n> xfs_ra512            411357,  564769\n> xfs_ra1024          404392,  431168\n>\n> looks like 512 was the best setting for this controller\n\nThat's only known for sequential access.\nHow did it perform under the random access, or did the numbers not\nchange too much?\n\nIn my tests, I have never seen the readahead value affect random access performance (kernel 2.6.18 +).  At the extreme, I tried a 128MB readahead, and random I/O rates were the same.  This was with CentOS 5.2, other confirmation of this would be useful. The Linux readahead algorithm is smart enough to only seek ahead after detecting sequential access.  The readahead algorithm has had various improvements to reduce the need to tune it from 2.6.18 to 2.6.24, but from what I gather, this tuning is skewed towards desktop/workstation drives and not large RAID arrays.\nThe readaheaed value DOES affect random access as a side effect in favor of sequential reads when there is mixed random/sequential load, by decreasing the ‘read fragmentation’ effect of mixing random seeks into a sequential request stream.  For most database loads, this is a good thing, since it increases total bytes read per unit of time, effectively ‘getting out of the way’ a sequential read rather than making it drag on for a long time by splitting it into non-sequential I/O’s while other random access is concurrent.", "msg_date": "Wed, 18 Feb 2009 11:08:13 -0800", "msg_from": "Scott Carey <[email protected]>", "msg_from_op": false, "msg_subject": "Re: suggestions for postgresql setup on Dell 2950 ,\n PERC6i controller" }, { "msg_contents": "One thing to note, is that linux's md sets the readahead to 8192 by default instead of 128. I've noticed that in many situations, a large chunk of the performance boost reported is due to this alone.\n\n\nOn 2/18/09 12:57 AM, \"Grzegorz Jaśkiewicz\" <[email protected]> wrote:\n\nhave you tried hanging bunch of raid1 to linux's md, and let it do\nraid0 for you ?\nI heard plenty of stories where this actually sped up performance. One\nnoticeable is case of youtube servers.\n\n\n\n\nRe: [PERFORM] suggestions for postgresql setup on Dell 2950 , PERC6i  controller\n\n\nOne thing to note, is that linux’s md sets the readahead to 8192 by default instead of 128.  I’ve noticed that in many situations, a large chunk of the performance boost reported is due to this alone.\n\n\nOn 2/18/09 12:57 AM, \"Grzegorz Jaśkiewicz\" <[email protected]> wrote:\n\nhave you tried hanging bunch of raid1 to linux's md, and let it do\nraid0 for you ?\nI heard plenty of stories where this actually sped up performance. One\nnoticeable is case of youtube servers.", "msg_date": "Wed, 18 Feb 2009 11:23:06 -0800", "msg_from": "Scott Carey <[email protected]>", "msg_from_op": false, "msg_subject": "Re: suggestions for postgresql setup on Dell 2950 ,\n PERC6i controller" }, { "msg_contents": "On 2/17/09 11:52 PM, \"Rajesh Kumar Mallah\" <[email protected]> wrote:\n\nthe raid10 voulme was benchmarked again\ntaking in consideration above points\n\nEffect of ReadAhead Settings\ndisabled,256(default) , 512,1024\n\nxfs_ra0 414741 , 66144\nxfs_ra256 403647, 545026 all tests on sda6\nxfs_ra512 411357, 564769\nxfs_ra1024 404392, 431168\n\nlooks like 512 was the best setting for this controller\n\nTry 4096 or 8192 (or just to see, 32768), you should get numbers very close to a raw partition with xfs with a sufficient readahead value. It is controller dependant for sure, but I usually see a \"small peak\" in performance at 512 or 1024, followed by a dip, then a larger peak and plateau at somewhere near # of drives * the small peak. The higher quality the controller, the less you need to fiddle with this.\nI use a script that runs fio benchmarks with the following profiles with readahead values from 128 to 65536. The single reader STR test peaks with a smaller readahead value than the concurrent reader one (2 ot 8 concurrent sequential readers) and the mixed random/sequential read loads become more biased to sequential transfer (and thus, higher overall throughput in bytes/sec) with larger readahead values. The choice between the cfq and deadline scheduler however will affect the priority of random vs sequential reads more than the readahead - cfq favoring random access due to dividing I/O by time slice.\n\nThe FIO profiles I use for benchmarking are at the end of this message.\n\n\nConsidering these two figures\nxfs25 350661, 474481 (/dev/sda7)\n25xfs 404291 , 547672 (/dev/sda6)\n\nlooks like the beginning of the drives are 15% faster\nthan the ending sections , considering this is it worth\ncreating a special tablespace at the begining of drives\n\nFor SAS drives, its typically a ~15% to 25% degradation (the last 5% is definitely slow). For SATA 3.5\" drives the last 5% is 50% the STR as the front.\nGraphs about half way down this page show what it looks like for a typical SATA drive: http://www.tomshardware.com/reviews/Seagate-Barracuda-1-5-TB,2032-5.html\nAnd a couple figures for some SAS drives here http://www.storagereview.com/ST973451SS.sr?page=0%2C1\n\n\n>\n> If testing STR, you will also want to tune the block device read ahead value (example: /sbin/blockdev -getra\n> /dev/sda6). This has very large impact on sequential transfer performance (and no impact on random access). >How large of an impact depends quite a bit on what kernel you're on since the readahead code has been getting >better over time and requires less tuning. But it still defaults out-of-the-box to more optimal settings for a single >drive than RAID.\n> For SAS, try 256 or 512 * the number of effective spindles (spindles * 0.5 for raid 10). For SATA, try 1024 or >2048 * the number of effective spindles. The value is in blocks (512 bytes). There is documentation on the >blockdev command, and here is a little write-up I found with a couple web searches:\n>http://portal.itauth.com/2007/11/20/howto-linux-double-your-disk-read-performance-single-command\n\n\nFIO benchmark profile examples (long, posting here for the archives):\n\n\n*Read benchmarks, sequential:\n\n[read-seq]\n; one sequential reader reading one 64g file\nrw=read\nsize=64g\ndirectory=/data/test\nfadvise_hint=0\nblocksize=8k\ndirect=0\nioengine=sync\niodepth=1\nnumjobs=1\nnrfiles=1\nruntime=1m\ngroup_reporting=1\nexec_prerun=echo 3 > /proc/sys/vm/drop_caches\n\n[read-seq]\n; two sequential readers, each concurrently reading a 32g file, for a total of 64g max\nrw=read\nsize=32g\ndirectory=/data/test\nfadvise_hint=0\nblocksize=8k\ndirect=0\nioengine=sync\niodepth=1\nnumjobs=2\nnrfiles=1\nruntime=1m\ngroup_reporting=1\nexec_prerun=echo 3 > /proc/sys/vm/drop_caches\n\n[read-seq]\n; eight sequential readers, each concurrently reading a 8g file, for a total of 64g max\nrw=read\nsize=8g\ndirectory=/data/test\nfadvise_hint=0\nblocksize=8k\ndirect=0\nioengine=sync\niodepth=1\nnumjobs=8\nnrfiles=1\nruntime=1m\ngroup_reporting=1\nexec_prerun=echo 3 > /proc/sys/vm/drop_caches\n\n\n*Read benchmarks, random 8k reads.\n\n[read-rand]\n; random access on 2g file by single reader, best case scenario.\nrw=randread\nsize=2g\ndirectory=/data/test\nfadvise_hint=0\nblocksize=8k\ndirect=0\nioengine=sync\niodepth=1\nnumjobs=1\nnrfiles=1\ngroup_reporting=1\nruntime=1m\nexec_prerun=echo 3 > /proc/sys/vm/drop_caches\n\n[read-rand]\n; 8 concurrent random readers each to its own 1g file\nrw=randread\nsize=1g\ndirectory=/data/test\nfadvise_hint=0\nblocksize=8k\ndirect=0\nioengine=sync\niodepth=1\nnumjobs=8\nnrfiles=1\ngroup_reporting=1\nruntime=1m\nexec_prerun=echo 3 > /proc/sys/vm/drop_caches\n\n*Mixed Load:\n\n[global]\n; one random reader concurrently with one sequential reader.\ndirectory=/data/test\nfadvise_hint=0\nblocksize=8k\ndirect=0\nioengine=sync\niodepth=1\nruntime=1m\nexec_prerun=echo 3 > /proc/sys/vm/drop_caches\n[seq-read]\nrw=read\nsize=64g\nnumjobs=1\nnrfiles=1\n[read-rand]\nrw=randread\nsize=1g\nnumjobs=1\nnrfiles=1\n\n\n[global]\n; Four sequential readers concurrent with four random readers\ndirectory=/data/test\nfadvise_hint=0\nblocksize=8k\ndirect=0\nioengine=sync\niodepth=1\nruntime=1m\ngroup_reporting=1\nexec_prerun=echo 3 > /proc/sys/vm/drop_caches\n[read-seq]\nrw=read\nsize=8g\nnumjobs=4\nnrfiles=1\n[read-rand]\nrw=randread\nsize=1g\nnumjobs=4\nnrfiles=1\n\n\n\n*Write tests\n\n[write-seq]\nrw=write\nsize=32g\ndirectory=/data/test\nfadvise_hint=0\nblocksize=8k\ndirect=0\nioengine=sync\niodepth=1\nnumjobs=1\nnrfiles=1\nruntime=1m\ngroup_reporting=1\nend_fsync=1\n\n[write-rand]\nrw=randwrite\nsize=32g\ndirectory=/data/test\nfadvise_hint=0\nblocksize=8k\ndirect=0\nioengine=sync\n; overwrite= 1 is MANDATORY for xfs, otherwise the writes are sparse random writes and can slow performance to near zero. Postgres only does random re-writes, never sparse random writes.\noverwrite=1\niodepth=1\nnumjobs=1\nnrfiles=1\ngroup_reporting=1\nruntime=1m\nend_fsync=1;\n\n\n\nRe: [PERFORM] suggestions for postgresql setup on Dell 2950 , PERC6i  controller\n\n\n\nOn 2/17/09 11:52 PM, \"Rajesh Kumar Mallah\" <[email protected]> wrote:\n\nthe raid10 voulme was benchmarked again\ntaking in consideration above points\n\nEffect of ReadAhead Settings\ndisabled,256(default) , 512,1024\n\nxfs_ra0                 414741 ,   66144\nxfs_ra256            403647,  545026                 all tests on sda6\nxfs_ra512            411357,  564769\nxfs_ra1024          404392,  431168\n\nlooks like 512 was the best setting for this controller\n\nTry 4096 or 8192 (or just to see, 32768), you should get numbers very close to a raw partition with xfs with a sufficient readahead value.  It is controller dependant for sure, but I usually see a “small peak” in performance at 512 or 1024, followed by a dip, then a larger peak and plateau at somewhere near # of drives * the small peak.  The higher quality the controller, the less you need to fiddle with this.\nI use a script that runs fio benchmarks with the following profiles with readahead values from 128 to 65536.  The single reader STR test peaks with a smaller readahead value than the concurrent reader one (2 ot 8 concurrent sequential readers) and the mixed random/sequential read loads become more biased to sequential transfer (and thus, higher overall throughput in bytes/sec) with larger readahead values.  The choice between the cfq and deadline scheduler however will affect the priority of random vs sequential reads more than the readahead — cfq favoring random access due to dividing I/O by time slice. \n\nThe FIO profiles I use for benchmarking are at the end of this message.\n\n\nConsidering these two figures\nxfs25                   350661,   474481                (/dev/sda7)\n25xfs                   404291  , 547672                (/dev/sda6)\n\nlooks like the beginning of the drives are  15% faster\nthan the ending sections , considering this is it worth\ncreating a special tablespace at the begining of drives\n\nFor SAS drives, its typically a ~15% to 25% degradation (the last 5% is definitely slow).  For SATA 3.5” drives the last 5% is 50% the STR as the front. \nGraphs about half way down this page show what it looks like for a typical SATA drive: http://www.tomshardware.com/reviews/Seagate-Barracuda-1-5-TB,2032-5.html\nAnd a couple figures for some SAS drives here http://www.storagereview.com/ST973451SS.sr?page=0%2C1\n\n\n>\n> If testing STR, you will also want to tune the block device read ahead value (example: /sbin/blockdev -getra\n> /dev/sda6).  This has very large impact on sequential transfer performance (and no impact on random access). >How large of an impact depends quite a bit on what kernel you're on since the readahead code has been getting >better over time and requires less tuning.  But it still defaults out-of-the-box to more optimal settings for a single >drive than RAID.\n> For SAS, try 256 or 512 * the number of effective spindles (spindles * 0.5 for raid 10).  For SATA, try 1024 or >2048 * the number of effective spindles.  The value is in blocks (512 bytes).  There is documentation on the >blockdev command, and here is a little write-up I found with a couple web searches:\n>http://portal.itauth.com/2007/11/20/howto-linux-double-your-disk-read-performance-single-command\n\n\nFIO benchmark profile examples (long, posting here for the archives):\n\n\n*Read benchmarks, sequential:\n\n[read-seq]\n; one sequential reader reading one 64g file\nrw=read\nsize=64g\ndirectory=/data/test\nfadvise_hint=0\nblocksize=8k\ndirect=0\nioengine=sync\niodepth=1\nnumjobs=1\nnrfiles=1\nruntime=1m\ngroup_reporting=1\nexec_prerun=echo 3 > /proc/sys/vm/drop_caches\n\n[read-seq]\n; two sequential readers, each concurrently reading a 32g file, for a total of 64g max\nrw=read\nsize=32g\ndirectory=/data/test\nfadvise_hint=0\nblocksize=8k\ndirect=0\nioengine=sync\niodepth=1\nnumjobs=2\nnrfiles=1\nruntime=1m\ngroup_reporting=1\nexec_prerun=echo 3 > /proc/sys/vm/drop_caches\n\n[read-seq]\n; eight sequential readers, each concurrently reading a 8g file, for a total of 64g max\nrw=read\nsize=8g\ndirectory=/data/test\nfadvise_hint=0\nblocksize=8k\ndirect=0\nioengine=sync\niodepth=1\nnumjobs=8\nnrfiles=1\nruntime=1m\ngroup_reporting=1\nexec_prerun=echo 3 > /proc/sys/vm/drop_caches\n\n\n*Read benchmarks, random 8k reads.\n\n[read-rand]\n; random access on 2g file by single reader, best case scenario.\nrw=randread\nsize=2g\ndirectory=/data/test\nfadvise_hint=0\nblocksize=8k\ndirect=0\nioengine=sync\niodepth=1\nnumjobs=1\nnrfiles=1\ngroup_reporting=1\nruntime=1m\nexec_prerun=echo 3 > /proc/sys/vm/drop_caches\n\n[read-rand]\n; 8 concurrent random readers each to its own 1g file\nrw=randread\nsize=1g\ndirectory=/data/test\nfadvise_hint=0\nblocksize=8k\ndirect=0\nioengine=sync\niodepth=1\nnumjobs=8\nnrfiles=1\ngroup_reporting=1\nruntime=1m\nexec_prerun=echo 3 > /proc/sys/vm/drop_caches\n\n*Mixed Load:\n\n[global]\n; one random reader concurrently with one sequential reader.\ndirectory=/data/test\nfadvise_hint=0\nblocksize=8k\ndirect=0\nioengine=sync\niodepth=1\nruntime=1m\nexec_prerun=echo 3 > /proc/sys/vm/drop_caches\n[seq-read]\nrw=read\nsize=64g\nnumjobs=1\nnrfiles=1\n[read-rand]\nrw=randread\nsize=1g\nnumjobs=1\nnrfiles=1\n\n\n[global]\n; Four sequential readers concurrent with four random readers\ndirectory=/data/test\nfadvise_hint=0\nblocksize=8k\ndirect=0\nioengine=sync\niodepth=1\nruntime=1m\ngroup_reporting=1\nexec_prerun=echo 3 > /proc/sys/vm/drop_caches\n[read-seq]\nrw=read\nsize=8g\nnumjobs=4\nnrfiles=1\n[read-rand]\nrw=randread\nsize=1g\nnumjobs=4\nnrfiles=1\n\n\n\n*Write tests\n\n[write-seq]\nrw=write\nsize=32g\ndirectory=/data/test\nfadvise_hint=0\nblocksize=8k\ndirect=0\nioengine=sync\niodepth=1\nnumjobs=1\nnrfiles=1\nruntime=1m\ngroup_reporting=1\nend_fsync=1\n\n[write-rand]\nrw=randwrite\nsize=32g\ndirectory=/data/test\nfadvise_hint=0\nblocksize=8k\ndirect=0\nioengine=sync\n; overwrite= 1 is MANDATORY for xfs, otherwise the writes are sparse random writes and can slow performance to near zero.  Postgres only does random re-writes, never sparse random writes.\noverwrite=1\niodepth=1\nnumjobs=1\nnrfiles=1\ngroup_reporting=1\nruntime=1m\nend_fsync=1;", "msg_date": "Wed, 18 Feb 2009 11:26:58 -0800", "msg_from": "Scott Carey <[email protected]>", "msg_from_op": false, "msg_subject": "Re: suggestions for postgresql setup on Dell 2950 ,\n PERC6i controller" }, { "msg_contents": "There has been an error in the tests the dataset size was not 2*MEM it\nwas 0.5*MEM\ni shall redo the tests and post results.\n", "msg_date": "Thu, 19 Feb 2009 01:44:44 +0530", "msg_from": "Rajesh Kumar Mallah <[email protected]>", "msg_from_op": true, "msg_subject": "Re: suggestions for postgresql setup on Dell 2950 , PERC6i controller" } ]
[ { "msg_contents": "Hi,\n\nI have a need to fairly often select data where the number of\noccurrences of a character in the field is \"x\". Semantically, it's\nliterally \"SELECT something FROM table WHERE numch('/', field)=$x\".\n\nThe problem is how to do it efficiently. I see there isn't a built-in\nfunction that counts character occurrences so I'd have to write it\nmyself. An additional constraint is that it must be implemented with\nbuilt-in capabilities, i.e. SQL and plpsql languages. I can do it the\nbrute force way, looping over the string and processing one by one\ncharacter with substring(), but is there a faster way?\n\nWhatever the function is, I intend to create an index on it.", "msg_date": "Thu, 05 Feb 2009 14:31:24 +0100", "msg_from": "Ivan Voras <[email protected]>", "msg_from_op": true, "msg_subject": "Number of occurrence of characters?" }, { "msg_contents": "On Thu, Feb 05, 2009 at 02:31:24PM +0100, Ivan Voras wrote:\n> The problem is how to do it efficiently. I see there isn't a built-in\n> function that counts character occurrences so I'd have to write it\n> myself. An additional constraint is that it must be implemented with\n> built-in capabilities, i.e. SQL and plpsql languages. I can do it the\n> brute force way, looping over the string and processing one by one\n> character with substring(), but is there a faster way?\n\n# select length(regexp_replace('/some/string/with/slashes', '[^/]+', '', 'g'));\n length\n--------\n 4\n(1 row)\n\ndepesz\n\n-- \nLinkedin: http://www.linkedin.com/in/depesz / blog: http://www.depesz.com/\njid/gtalk: [email protected] / aim:depeszhdl / skype:depesz_hdl / gg:6749007\n", "msg_date": "Thu, 5 Feb 2009 15:13:35 +0100", "msg_from": "hubert depesz lubaczewski <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Number of occurrence of characters?" }, { "msg_contents": "Ivan Voras wrote:\n> I have a need to fairly often select data where the number of\n> occurrences of a character in the field is \"x\". Semantically, it's\n> literally \"SELECT something FROM table WHERE numch('/', field)=$x\".\n> \n> The problem is how to do it efficiently. I see there isn't a built-in\n> function that counts character occurrences so I'd have to write it\n> myself. An additional constraint is that it must be implemented with\n> built-in capabilities, i.e. SQL and plpsql languages. I can do it the\n> brute force way, looping over the string and processing one by one\n> character with substring(), but is there a faster way?\n\nHmm, you could do this:\n\nCREATE OR REPLACE FUNCTION numch(text, text) RETURNS integer AS $$ \nSELECT length($2) - length(replace($2, $1, '')) $$ LANGUAGE SQL;\n\nie. remove the characters we're counting, and see how much shorter the \nstring became. I don't know if this is any faster than looping in a \nplpgsql function, but it might be.\n\n-- \n Heikki Linnakangas\n EnterpriseDB http://www.enterprisedb.com\n", "msg_date": "Thu, 05 Feb 2009 16:35:07 +0200", "msg_from": "Heikki Linnakangas <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Number of occurrence of characters?" }, { "msg_contents": "hubert depesz lubaczewski wrote:\n> On Thu, Feb 05, 2009 at 02:31:24PM +0100, Ivan Voras wrote:\n>> The problem is how to do it efficiently. I see there isn't a built-in\n>> function that counts character occurrences so I'd have to write it\n>> myself. An additional constraint is that it must be implemented with\n>> built-in capabilities, i.e. SQL and plpsql languages. I can do it the\n>> brute force way, looping over the string and processing one by one\n>> character with substring(), but is there a faster way?\n> \n> # select length(regexp_replace('/some/string/with/slashes', '[^/]+', '', 'g'));\n> length\n> --------\n> 4\n> (1 row)\n\nThank you (and Heikki), I had a feeling I was missing an approach.", "msg_date": "Fri, 06 Feb 2009 10:42:27 +0100", "msg_from": "Ivan Voras <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Number of occurrence of characters?" } ]
[ { "msg_contents": "Hi All,\n\nI'm getting poor performance on full text searches that return lots of \nentries from a table with about 7 million rows. I think the cause is \nrechecking the text match on all of the returned rows, even though I'm \nusing a @@ query on a gin index, which the docs say should not require \na recheck.\n\nHere's the table:\n\n****\nmediacloud=> \\d download_texts;\n Table \"public.download_texts\"\n Column | Type | Modifiers\n-------------------+--------- \n+ \n----------------------------------------------------------------------------\ndownload_texts_id | integer | not null default \nnextval('download_texts_download_texts_id_seq'::regclass)\ndownloads_id | integer | not null\ndownload_text | text | not null\nIndexes:\n \"download_texts_pkey\" PRIMARY KEY, btree (download_texts_id)\n \"download_texts_downloads_id_index\" UNIQUE, btree (downloads_id)\n \"download_texts_textsearch_idx\" gin \n(to_tsvector('english'::regconfig, download_text)), tablespace \n\"large_table_space\"\nForeign-key constraints:\n \"download_texts_downloads_id_fkey\" FOREIGN KEY (downloads_id) \nREFERENCES downloads(downloads_id)\nTablespace: \"large_table_space\"\n****\n\nAnd here's the query:\n\n****\nmediacloud=> explain analyze select count(dt.download_texts_id) from \ndownload_texts dt where to_tsvector('english', download_text) @@ \nto_tsquery('english', 'stimulus');\nQUERY PLAN\n------------------------------------------------------------------------------------------------------------------------------------------------------------\nAggregate (cost=26161.16..26161.17 rows=1 width=4) (actual \ntime=153640.083..153640.083 rows=1 loops=1)\n -> Bitmap Heap Scan on download_texts dt (cost=3937.41..26146.11 \nrows=6018 width=4) (actual time=1957.074..153529.351 rows=72225 loops=1)\n Recheck Cond: (to_tsvector('english'::regconfig, \ndownload_text) @@ '''stimulus'''::tsquery)\n -> Bitmap Index Scan on download_texts_textsearch_idx \n(cost=0.00..3935.90 rows=6018 width=0) (actual time=1048.556..1048.556 \nrows=72225 loops=1)\n Index Cond: (to_tsvector('english'::regconfig, \ndownload_text) @@ '''stimulus'''::tsquery)\nTotal runtime: 153642.249 ms\n****\n\nNearly all of the time is being spent in the bitmap heap scan, I \nsuspect because of the work of rereading and rechecking the text of \nall the matched entries. Is this indeed what's going on here? Is \nthere any way to make postgres not do that recheck?\n\n\nThanks!\n\n-hal\n", "msg_date": "Thu, 5 Feb 2009 17:13:36 -0600", "msg_from": "Hal Roberts <[email protected]>", "msg_from_op": true, "msg_subject": "bitmap heap scan recheck on full text search with gin index" }, { "msg_contents": "Hal,\n\njust create separate column with tsvector and create index on it.\n\nOleg\nOn Thu, 5 Feb 2009, Hal Roberts wrote:\n\n> Hi All,\n>\n> I'm getting poor performance on full text searches that return lots of \n> entries from a table with about 7 million rows. I think the cause is \n> rechecking the text match on all of the returned rows, even though I'm using \n> a @@ query on a gin index, which the docs say should not require a recheck.\n>\n> Here's the table:\n>\n> ****\n> mediacloud=> \\d download_texts;\n> Table \"public.download_texts\"\n> Column | Type | Modifiers\n> -------------------+---------+----------------------------------------------------------------------------\n> download_texts_id | integer | not null default \n> nextval('download_texts_download_texts_id_seq'::regclass)\n> downloads_id | integer | not null\n> download_text | text | not null\n> Indexes:\n> \"download_texts_pkey\" PRIMARY KEY, btree (download_texts_id)\n> \"download_texts_downloads_id_index\" UNIQUE, btree (downloads_id)\n> \"download_texts_textsearch_idx\" gin (to_tsvector('english'::regconfig, \n> download_text)), tablespace \"large_table_space\"\n> Foreign-key constraints:\n> \"download_texts_downloads_id_fkey\" FOREIGN KEY (downloads_id) REFERENCES \n> downloads(downloads_id)\n> Tablespace: \"large_table_space\"\n> ****\n>\n> And here's the query:\n>\n> ****\n> mediacloud=> explain analyze select count(dt.download_texts_id) from \n> download_texts dt where to_tsvector('english', download_text) @@ \n> to_tsquery('english', 'stimulus');\n> QUERY PLAN\n> ------------------------------------------------------------------------------------------------------------------------------------------------------------\n> Aggregate (cost=26161.16..26161.17 rows=1 width=4) (actual \n> time=153640.083..153640.083 rows=1 loops=1)\n> -> Bitmap Heap Scan on download_texts dt (cost=3937.41..26146.11 rows=6018 \n> width=4) (actual time=1957.074..153529.351 rows=72225 loops=1)\n> Recheck Cond: (to_tsvector('english'::regconfig, download_text) @@ \n> '''stimulus'''::tsquery)\n> -> Bitmap Index Scan on download_texts_textsearch_idx \n> (cost=0.00..3935.90 rows=6018 width=0) (actual time=1048.556..1048.556 \n> rows=72225 loops=1)\n> Index Cond: (to_tsvector('english'::regconfig, download_text) @@ \n> '''stimulus'''::tsquery)\n> Total runtime: 153642.249 ms\n> ****\n>\n> Nearly all of the time is being spent in the bitmap heap scan, I suspect \n> because of the work of rereading and rechecking the text of all the matched \n> entries. Is this indeed what's going on here? Is there any way to make \n> postgres not do that recheck?\n>\n>\n> Thanks!\n>\n> -hal\n>\n>\n\n \tRegards,\n \t\tOleg\n_____________________________________________________________\nOleg Bartunov, Research Scientist, Head of AstroNet (www.astronet.ru),\nSternberg Astronomical Institute, Moscow University, Russia\nInternet: [email protected], http://www.sai.msu.su/~megera/\nphone: +007(495)939-16-83, +007(495)939-23-83\n", "msg_date": "Fri, 6 Feb 2009 07:22:17 +0300 (MSK)", "msg_from": "Oleg Bartunov <[email protected]>", "msg_from_op": false, "msg_subject": "Re: bitmap heap scan recheck on full text search with gin\n index" } ]
[ { "msg_contents": "Hi All,\n\nI am conductingDBT2 tests on PostgreSQL. After completing the test while\nanalyzing and creating the results I am getting following error:\n\n./dbt2-run-workload: line 514: 731 Terminated dbt2-client\n${CLIENT_COMMAND_ARGS} -p ${PORT} -o ${CDIR} >\n${CLIENT_OUTPUT_DIR}/`hostname`/client-${SEG}.out 2>&1\nwaiting for server to shut down.... done\nserver stopped\nCan't locate Test/Parser/Dbt2.pm in @INC (@INC contains:\n/usr/lib64/perl5/site_perl/5.8.8/x86_64-linux-thread-multi\n/usr/lib64/perl5/site_perl/5.8.7/x86_64-linux-thread-multi\n/usr/lib64/perl5/site_perl/5.8.6/x86_64-linux-thread-multi\n/usr/lib64/perl5/site_perl/5.8.5/x86_64-linux-thread-multi\n/usr/lib/perl5/site_perl/5.8.8 /usr/lib/perl5/site_perl/5.8.7\n/usr/lib/perl5/site_perl/5.8.6 /usr/lib/perl5/site_perl/5.8.5\n/usr/lib/perl5/site_perl\n/usr/lib64/perl5/vendor_perl/5.8.8/x86_64-linux-thread-multi\n/usr/lib64/perl5/vendor_perl/5.8.7/x86_64-linux-thread-multi\n/usr/lib64/perl5/vendor_perl/5.8.6/x86_64-linux-thread-multi\n/usr/lib64/perl5/vendor_perl/5.8.5/x86_64-linux-thread-multi\n/usr/lib/perl5/vendor_perl/5.8.8 /usr/lib/perl5/vendor_perl/5.8.7\n/usr/lib/perl5/vendor_perl/5.8.6 /usr/lib/perl5/vendor_perl/5.8.5\n/usr/lib/perl5/vendor_perl /usr/lib64/perl5/5.8.8/x86_64-linux-thread-multi\n/usr/lib/perl5/5.8.8 .) at\n/home/rohan/NEW_DBT2/Installer/DBT2_SETUP/bin/dbt2-post-process line 13.\nBEGIN failed--compilation aborted at\n/home/rohan/NEW_DBT2/Installer/DBT2_SETUP/bin/dbt2-post-process line 13.\nThe authenticity of host 'localhost (127.0.0.1)' can't be established.\nRSA key fingerprint is 87:1e:a0:e0:4f:b7:32:42:3d:5c:85:10:f3:25:63:8d.\nAre you sure you want to continue connecting (yes/no)? yes\nWarning: Permanently added 'localhost' (RSA) to the list of known hosts.\nWrite failed: Broken pipe\nCreating chart for Index Scans...\nCreating chart for Index Blocks Read...\nCreating chart for Index Blocks Hit...\nCreating chart for Table Blocks Read...\nCreating chart for Table Blocks Hit...\nCreating chart for Sequential Scans...\nCreating chart for Rows Inserted...\nCreating chart for Rows Updated...\nCreating chart for Row Deleted...\nCreating chart for HOT Rows Updated...\nCreating chart for Dead Tuples...\nCan't exec \"gnuplot\": No such file or directory at\n/home/rohan/NEW_DBT2/Installer/DBT2_SETUP/bin/dbt2-pgsql-analyze-stats line\n113.\nCan't exec \"gnuplot\": No such file or directory at\n/home/rohan/NEW_DBT2/Installer/DBT2_SETUP/bin/dbt2-pgsql-analyze-stats line\n113.\nCan't exec \"gnuplot\": No such file or directory at\n/home/rohan/NEW_DBT2/Installer/DBT2_SETUP/bin/dbt2-pgsql-analyze-stats line\n113.\nCan't exec \"gnuplot\": No such file or directory at\n/home/rohan/NEW_DBT2/Installer/DBT2_SETUP/bin/dbt2-pgsql-analyze-stats line\n113.\nCan't exec \"gnuplot\": No such file or directory at\n/home/rohan/NEW_DBT2/Installer/DBT2_SETUP/bin/dbt2-pgsql-analyze-stats line\n113.\nCan't exec \"gnuplot\": No such file or directory at\n/home/rohan/NEW_DBT2/Installer/DBT2_SETUP/bin/dbt2-pgsql-analyze-stats line\n113.\nCan't exec \"gnuplot\": No such file or directory at\n/home/rohan/NEW_DBT2/Installer/DBT2_SETUP/bin/dbt2-pgsql-analyze-stats line\n113.\nCan't exec \"gnuplot\": No such file or directory at\n/home/rohan/NEW_DBT2/Installer/DBT2_SETUP/bin/dbt2-pgsql-analyze-stats line\n113.\nCan't exec \"gnuplot\": No such file or directory at\n/home/rohan/NEW_DBT2/Installer/DBT2_SETUP/bin/dbt2-pgsql-analyze-stats line\n113.\nCan't exec \"gnuplot\": No such file or directory at\n/home/rohan/NEW_DBT2/Installer/DBT2_SETUP/bin/dbt2-pgsql-analyze-stats line\n113.\nCan't exec \"gnuplot\": No such file or directory at\n/home/rohan/NEW_DBT2/Installer/DBT2_SETUP/bin/dbt2-pgsql-analyze-stats line\n113.\nTest completed.\nResults are in: OUTPUT\n\nI ma not sure why it doesn't find Test/Parser/Dbt2.pm even if I have\ninstalled DBT2 completely. Did I miss any steps? Do I need to install some\nextra packages? If any then please let me know.\n\nThanks in advance for your help.\n\nThanks,\nRohan\n\nHi All,I am conductingDBT2 tests on PostgreSQL. After completing the test while analyzing and creating the results I am getting following error: ./dbt2-run-workload: line 514:   731 Terminated              dbt2-client ${CLIENT_COMMAND_ARGS} -p ${PORT} -o ${CDIR} > ${CLIENT_OUTPUT_DIR}/`hostname`/client-${SEG}.out 2>&1\nwaiting for server to shut down.... doneserver stoppedCan't locate Test/Parser/Dbt2.pm in @INC (@INC contains: /usr/lib64/perl5/site_perl/5.8.8/x86_64-linux-thread-multi /usr/lib64/perl5/site_perl/5.8.7/x86_64-linux-thread-multi /usr/lib64/perl5/site_perl/5.8.6/x86_64-linux-thread-multi /usr/lib64/perl5/site_perl/5.8.5/x86_64-linux-thread-multi /usr/lib/perl5/site_perl/5.8.8 /usr/lib/perl5/site_perl/5.8.7 /usr/lib/perl5/site_perl/5.8.6 /usr/lib/perl5/site_perl/5.8.5 /usr/lib/perl5/site_perl /usr/lib64/perl5/vendor_perl/5.8.8/x86_64-linux-thread-multi /usr/lib64/perl5/vendor_perl/5.8.7/x86_64-linux-thread-multi /usr/lib64/perl5/vendor_perl/5.8.6/x86_64-linux-thread-multi /usr/lib64/perl5/vendor_perl/5.8.5/x86_64-linux-thread-multi /usr/lib/perl5/vendor_perl/5.8.8 /usr/lib/perl5/vendor_perl/5.8.7 /usr/lib/perl5/vendor_perl/5.8.6 /usr/lib/perl5/vendor_perl/5.8.5 /usr/lib/perl5/vendor_perl /usr/lib64/perl5/5.8.8/x86_64-linux-thread-multi /usr/lib/perl5/5.8.8 .) at /home/rohan/NEW_DBT2/Installer/DBT2_SETUP/bin/dbt2-post-process line 13.\nBEGIN failed--compilation aborted at /home/rohan/NEW_DBT2/Installer/DBT2_SETUP/bin/dbt2-post-process line 13.The authenticity of host 'localhost (127.0.0.1)' can't be established.RSA key fingerprint is 87:1e:a0:e0:4f:b7:32:42:3d:5c:85:10:f3:25:63:8d.\nAre you sure you want to continue connecting (yes/no)? yesWarning: Permanently added 'localhost' (RSA) to the list of known hosts.Write failed: Broken pipeCreating chart for Index Scans...Creating chart for Index Blocks Read...\nCreating chart for Index Blocks Hit...Creating chart for Table Blocks Read...Creating chart for Table Blocks Hit...Creating chart for Sequential Scans...Creating chart for Rows Inserted...Creating chart for Rows Updated...\nCreating chart for Row Deleted...Creating chart for HOT Rows Updated...Creating chart for Dead Tuples...Can't exec \"gnuplot\": No such file or directory at /home/rohan/NEW_DBT2/Installer/DBT2_SETUP/bin/dbt2-pgsql-analyze-stats line 113.\nCan't exec \"gnuplot\": No such file or directory at /home/rohan/NEW_DBT2/Installer/DBT2_SETUP/bin/dbt2-pgsql-analyze-stats line 113.Can't exec \"gnuplot\": No such file or directory at /home/rohan/NEW_DBT2/Installer/DBT2_SETUP/bin/dbt2-pgsql-analyze-stats line 113.\nCan't exec \"gnuplot\": No such file or directory at /home/rohan/NEW_DBT2/Installer/DBT2_SETUP/bin/dbt2-pgsql-analyze-stats line 113.Can't exec \"gnuplot\": No such file or directory at /home/rohan/NEW_DBT2/Installer/DBT2_SETUP/bin/dbt2-pgsql-analyze-stats line 113.\nCan't exec \"gnuplot\": No such file or directory at /home/rohan/NEW_DBT2/Installer/DBT2_SETUP/bin/dbt2-pgsql-analyze-stats line 113.Can't exec \"gnuplot\": No such file or directory at /home/rohan/NEW_DBT2/Installer/DBT2_SETUP/bin/dbt2-pgsql-analyze-stats line 113.\nCan't exec \"gnuplot\": No such file or directory at /home/rohan/NEW_DBT2/Installer/DBT2_SETUP/bin/dbt2-pgsql-analyze-stats line 113.Can't exec \"gnuplot\": No such file or directory at /home/rohan/NEW_DBT2/Installer/DBT2_SETUP/bin/dbt2-pgsql-analyze-stats line 113.\nCan't exec \"gnuplot\": No such file or directory at /home/rohan/NEW_DBT2/Installer/DBT2_SETUP/bin/dbt2-pgsql-analyze-stats line 113.Can't exec \"gnuplot\": No such file or directory at /home/rohan/NEW_DBT2/Installer/DBT2_SETUP/bin/dbt2-pgsql-analyze-stats line 113.\nTest completed.Results are in: OUTPUTI ma not sure why it doesn't find Test/Parser/Dbt2.pm even if I have installed DBT2 completely. Did I miss any steps? Do I need to install some extra packages? If any then please let me know.\nThanks in advance for your help.Thanks,Rohan", "msg_date": "Fri, 6 Feb 2009 17:07:31 +0530", "msg_from": "Rohan Pethkar <[email protected]>", "msg_from_op": true, "msg_subject": "Can't locate Test/Parser/Dbt2.pm in DBT2 tests" }, { "msg_contents": "Rohan Pethkar wrote:\n> Hi All,\n> \n> I am conductingDBT2 tests on PostgreSQL. After completing the test while\n> analyzing and creating the results I am getting following error:\n> \n> ./dbt2-run-workload: line 514: 731 Terminated dbt2-client\n> ${CLIENT_COMMAND_ARGS} -p ${PORT} -o ${CDIR} >\n> ${CLIENT_OUTPUT_DIR}/`hostname`/client-${SEG}.out 2>&1\n> waiting for server to shut down.... done\n> server stopped\n> Can't locate Test/Parser/Dbt2.pm in @INC (@INC contains:\n> /usr/lib64/perl5/site_perl/5.8.8/x86_64-linux-thread-multi\n> /usr/lib64/perl5/site_perl/5.8.7/x86_64-linux-thread-multi\n> /usr/lib64/perl5/site_perl/5.8.6/x86_64-linux-thread-multi\n> /usr/lib64/perl5/site_perl/5.8.5/x86_64-linux-thread-multi\n> /usr/lib/perl5/site_perl/5.8.8 /usr/lib/perl5/site_perl/5.8.7\n> /usr/lib/perl5/site_perl/5.8.6 /usr/lib/perl5/site_perl/5.8.5\n> /usr/lib/perl5/site_perl\n> /usr/lib64/perl5/vendor_perl/5.8.8/x86_64-linux-thread-multi\n> /usr/lib64/perl5/vendor_perl/5.8.7/x86_64-linux-thread-multi\n> /usr/lib64/perl5/vendor_perl/5.8.6/x86_64-linux-thread-multi\n> /usr/lib64/perl5/vendor_perl/5.8.5/x86_64-linux-thread-multi\n> /usr/lib/perl5/vendor_perl/5.8.8 /usr/lib/perl5/vendor_perl/5.8.7\n> /usr/lib/perl5/vendor_perl/5.8.6 /usr/lib/perl5/vendor_perl/5.8.5\n> /usr/lib/perl5/vendor_perl /usr/lib64/perl5/5.8.8/x86_64-linux-thread-multi\n> /usr/lib/perl5/5.8.8 .) at\n> /home/rohan/NEW_DBT2/Installer/DBT2_SETUP/bin/dbt2-post-process line 13.\n\nWell, if Test::Parser::Dbt2 isn't in somewhere in that list of\ndirectories, you'll need to tell perl where to look. Simplest is\nprobably just to:\n export PERL5LIB=\"/path/to/extra/libs\"\nbefore running your tests.\n\n> Can't exec \"gnuplot\": No such file or directory at\n> /home/rohan/NEW_DBT2/Installer/DBT2_SETUP/bin/dbt2-pgsql-analyze-stats line\n> 113.\n\nIt also looks like you're missing gnuplot for your charts.\n\n> I ma not sure why it doesn't find Test/Parser/Dbt2.pm even if I have\n> installed DBT2 completely. Did I miss any steps? Do I need to install some\n> extra packages? If any then please let me know.\n\nYou can always \"perldoc perlrun\" for more info (google it if you don't\nhave docs installed locally).\n\n-- \n Richard Huxton\n Archonet Ltd\n", "msg_date": "Fri, 06 Feb 2009 11:46:57 +0000", "msg_from": "Richard Huxton <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Can't locate Test/Parser/Dbt2.pm in DBT2 tests" }, { "msg_contents": "On Fri, Feb 6, 2009 at 3:46 AM, Richard Huxton <[email protected]> wrote:\n> Rohan Pethkar wrote:\n>> Hi All,\n>>\n>> I am conductingDBT2 tests on PostgreSQL. After completing the test while\n>> analyzing and creating the results I am getting following error:\n>>\n>> ./dbt2-run-workload: line 514: 731 Terminated dbt2-client\n>> ${CLIENT_COMMAND_ARGS} -p ${PORT} -o ${CDIR} >\n>> ${CLIENT_OUTPUT_DIR}/`hostname`/client-${SEG}.out 2>&1\n>> waiting for server to shut down.... done\n>> server stopped\n>> Can't locate Test/Parser/Dbt2.pm in @INC (@INC contains:\n>> /usr/lib64/perl5/site_perl/5.8.8/x86_64-linux-thread-multi\n>> /usr/lib64/perl5/site_perl/5.8.7/x86_64-linux-thread-multi\n>> /usr/lib64/perl5/site_perl/5.8.6/x86_64-linux-thread-multi\n>> /usr/lib64/perl5/site_perl/5.8.5/x86_64-linux-thread-multi\n>> /usr/lib/perl5/site_perl/5.8.8 /usr/lib/perl5/site_perl/5.8.7\n>> /usr/lib/perl5/site_perl/5.8.6 /usr/lib/perl5/site_perl/5.8.5\n>> /usr/lib/perl5/site_perl\n>> /usr/lib64/perl5/vendor_perl/5.8.8/x86_64-linux-thread-multi\n>> /usr/lib64/perl5/vendor_perl/5.8.7/x86_64-linux-thread-multi\n>> /usr/lib64/perl5/vendor_perl/5.8.6/x86_64-linux-thread-multi\n>> /usr/lib64/perl5/vendor_perl/5.8.5/x86_64-linux-thread-multi\n>> /usr/lib/perl5/vendor_perl/5.8.8 /usr/lib/perl5/vendor_perl/5.8.7\n>> /usr/lib/perl5/vendor_perl/5.8.6 /usr/lib/perl5/vendor_perl/5.8.5\n>> /usr/lib/perl5/vendor_perl /usr/lib64/perl5/5.8.8/x86_64-linux-thread-multi\n>> /usr/lib/perl5/5.8.8 .) at\n>> /home/rohan/NEW_DBT2/Installer/DBT2_SETUP/bin/dbt2-post-process line 13.\n>\n> Well, if Test::Parser::Dbt2 isn't in somewhere in that list of\n> directories, you'll need to tell perl where to look. Simplest is\n> probably just to:\n> export PERL5LIB=\"/path/to/extra/libs\"\n> before running your tests.\n>\n>> Can't exec \"gnuplot\": No such file or directory at\n>> /home/rohan/NEW_DBT2/Installer/DBT2_SETUP/bin/dbt2-pgsql-analyze-stats line\n>> 113.\n>\n> It also looks like you're missing gnuplot for your charts.\n>\n>> I ma not sure why it doesn't find Test/Parser/Dbt2.pm even if I have\n>> installed DBT2 completely. Did I miss any steps? Do I need to install some\n>> extra packages? If any then please let me know.\n>\n> You can always \"perldoc perlrun\" for more info (google it if you don't\n> have docs installed locally).\n\nHi Rohan,\n\nIn addition to what Richard said, I'm guessing you don't have those\nperl modules installed. In the main README file lists the perl\nmodules required for the post processing of the data colelcted, which\nis what failed here:\n\n\"The data analysis scripts requires two additional Perl packages to be\ninstalled,\nwhich are not checked by configure. They are Statistics::Descriptive and\nTest::Parser. To generate HTML reports, Test::Reporter is required.\"\n\nSorry, it's still a work in progress., as slow as that may be.\n\nRegards,\nMark\n", "msg_date": "Fri, 6 Feb 2009 21:46:44 -0800", "msg_from": "Mark Wong <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Can't locate Test/Parser/Dbt2.pm in DBT2 tests" } ]
[ { "msg_contents": "It all depends at the end of a day, how crucial is that functionality\nto your app ?\nIf very, I would add to every insert/update a trigger, that would\nupdate info on other table(s) with stats per character.\n\nOther option, is to write a function in C that would parse word and\nchop it in C,O (character, number of occurrences ) result.\n\n\n-- \nGJ\n", "msg_date": "Fri, 6 Feb 2009 13:13:31 +0000", "msg_from": "=?UTF-8?Q?Grzegorz_Ja=C5=9Bkiewicz?= <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Number of occurrence of characters?" } ]
[ { "msg_contents": "I have a table, like this:\n\nCREATE TABLE transactions\n(\n transaction_id integer NOT NULL DEFAULT \nnextval('transactions_seq'::regclass),\n transaction_type integer NOT NULL,\n transaction_client_id integer NOT NULL,\n transaction_destination_id integer NOT NULL,\n transaction_operator_id integer NOT NULL,\n transaction_application_id integer NOT NULL,\n transaction_application_service character varying NOT NULL,\n transaction_quantity integer NOT NULL,\n transaction_time_commit timestamp with time zone NOT NULL,\n transaction_time_received timestamp with time zone NOT NULL,\n transaction_gateway_id character(36) NOT NULL,\n transaction_payment_amount integer NOT NULL DEFAULT 0,\n CONSTRAINT transactions_pk PRIMARY KEY (transaction_id),\n CONSTRAINT transactions_uq__gateway_id UNIQUE (transaction_gateway_id)\n)\nWITH (OIDS=FALSE);\n\nNow, all the _type, client_id, destination_id, operator_id, and \napplication_id are foreigen-keyed to coresponding tables. There are no \nindices on those columns.\n\nBesides PK and uq-constraint indices I have this index:\n\nCREATE INDEX transactions_idx__client_data ON transactions\nUSING btree (transaction_client_id, transaction_destination_id, \ntransaction_operator_id, transaction_application_id, \ntransaction_time_commit)\n\nThe table_count is like this:\n\njura=# select count(*) from transactions;\n count\n----------\n 13751457\n(1 row)\n\nThere are roughly 500.000 - 600.000 transactions for each month. There \nare also transactions from past two years in the table.\n\nI often SELECT data from the table for specified time period - usualy \nfrom begining to the end of the month, like this:\n\nSELECT <some-columns> FROM transactions WHERE transaction_time_commit \nBETWEEN '2009-01-01' AND '2009-01-31 23:59:59';\n\nThe problem is that postgres is never using an index:\n\njura=# explain analyze select * from transactions where \ntransaction_time_commit between '2009-01-01' and '2009-01-31 23:59:59';\n \n QUERY PLAN \n\n---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n Seq Scan on transactions (cost=0.00..416865.85 rows=593713 width=91) \n(actual time=4.067..3918.629 rows=525051 loops=1)\n Filter: ((transaction_time_commit >= '2009-01-01 \n00:00:00+01'::timestamp with time zone) AND (transaction_time_commit <= \n'2009-01-31 23:59:59+01'::timestamp with time zone))\n Total runtime: 4026.404 ms\n(3 rows)\n\nTime: 4068.521 ms\n\n\nIf I force it not to use sequential scans, it is using index, with \nbenefits of shorter execution time:\njura=# set enable_seqscan to false;\nSET\nTime: 0.103 ms\njura=# explain analyze select * from transactions where \ntransaction_time_commit between '2009-01-01' and '2009-01-31 23:59:59';\n \n QUERY PLAN \n\n-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n Bitmap Heap Scan on transactions (cost=410369.98..629869.67 \nrows=593713 width=91) (actual time=1060.569..1280.500 rows=525051 loops=1)\n Recheck Cond: ((transaction_time_commit >= '2009-01-01 \n00:00:00+01'::timestamp with time zone) AND (transaction_time_commit <= \n'2009-01-31 23:59:59+01'::timestamp with time zone))\n -> Bitmap Index Scan on transactions_idx__client_data \n(cost=0.00..410221.55 rows=593713 width=0) (actual \ntime=1058.992..1058.992 rows=525051 loops=1)\n Index Cond: ((transaction_time_commit >= '2009-01-01 \n00:00:00+01'::timestamp with time zone) AND (transaction_time_commit <= \n'2009-01-31 23:59:59+01'::timestamp with time zone))\n Total runtime: 1388.882 ms\n(5 rows)\n\nTime: 1396.737 ms\n\n\nNow, I found interesting is that if I create index just on \ntransaction_time_commit column (and I leave \ntransactions_idx__client_data index), then postgres is using that new index.\n\nAlso, if I change idx__client_data index like this (first I drop it, and \nthen I create new one):\n\nCREATE INDEX transactions_idx__client_data ON transactions\nUSING btree (transaction_client_id, transaction_destination_id, \ntransaction_time_commit);\n\nthen postgres is using that index:\n\njura=# explain analyze select * from transactions where \ntransaction_time_commit between '2009-01-01' and '2009-01-31 23:59:59';\n \n QUERY PLAN \n\n-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n Bitmap Heap Scan on transactions (cost=349473.37..568973.06 \nrows=593713 width=91) (actual time=949.224..1128.848 rows=525051 loops=1)\n Recheck Cond: ((transaction_time_commit >= '2009-01-01 \n00:00:00+01'::timestamp with time zone) AND (transaction_time_commit <= \n'2009-01-31 23:59:59+01'::timestamp with time zone))\n -> Bitmap Index Scan on transactions_idx__client_data \n(cost=0.00..349324.94 rows=593713 width=0) (actual time=947.678..947.678 \nrows=525051 loops=1)\n Index Cond: ((transaction_time_commit >= '2009-01-01 \n00:00:00+01'::timestamp with time zone) AND (transaction_time_commit <= \n'2009-01-31 23:59:59+01'::timestamp with time zone))\n Total runtime: 1234.989 ms\n(5 rows)\n\nTime: 1235.727 ms\n\n\n\nNow, I have many 'selects' on the transactions table (still, not as many \nas inserts), mostly filtered on transaction_time, client_id, \ndestination_id and application_id, but there is fair amount of 'selects' \nfiltered only on transaction_time.\n\nNow, shall I keep the original index and add another one on just \ntransaction_time (there is, I guess, overhead of maintaining two \nindices), or shall I remove transaction_time from original index, and \ncreate another one?\n\nAnd, is it normal for postgres to 'ignore' the transaction_time column \nin original index?\n\n\nThis is the postgres version I'm using:\njura=# select version();\n version \n\n----------------------------------------------------------------------------------------------------------------\n PostgreSQL 8.3.5 on i686-pc-linux-gnu, compiled by GCC gcc (GCC) 4.1.2 \n20061115 (prerelease) (Debian 4.1.1-21)\n(1 row)\n\n\n\tMike\n", "msg_date": "Fri, 06 Feb 2009 16:43:52 +0100", "msg_from": "Mario Splivalo <[email protected]>", "msg_from_op": true, "msg_subject": "Postgres not willing to use an index?" }, { "msg_contents": "On Fri, Feb 6, 2009 at 3:43 PM, Mario Splivalo\n<[email protected]> wrote:\n\n> Besides PK and uq-constraint indices I have this index:\n>\n> CREATE INDEX transactions_idx__client_data ON transactions\n> USING btree (transaction_client_id, transaction_destination_id,\n> transaction_operator_id, transaction_application_id,\n> transaction_time_commit)\n\nI think it is because it isn't just a simple index, and for some\nreason planner decides - that going through every\ntransaction_application_id, etc, etc just to find right\ntransaction_time_commit isn't worth the hassle.\nTry using few more indexes, on less columns.\nAlso, if I may - I don't think it is quite usefull to have column\nnames that include table name in every single one of them, makes\nthings so much less readable.\n\n-- \nGJ\n", "msg_date": "Fri, 6 Feb 2009 15:55:03 +0000", "msg_from": "=?UTF-8?Q?Grzegorz_Ja=C5=9Bkiewicz?= <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Postgres not willing to use an index?" }, { "msg_contents": "Mario Splivalo <[email protected]> writes:\n> Besides PK and uq-constraint indices I have this index:\n\n> CREATE INDEX transactions_idx__client_data ON transactions\n> USING btree (transaction_client_id, transaction_destination_id, \n> transaction_operator_id, transaction_application_id, \n> transaction_time_commit)\n\n> SELECT <some-columns> FROM transactions WHERE transaction_time_commit \n> BETWEEN '2009-01-01' AND '2009-01-31 23:59:59';\n> The problem is that postgres is never using an index:\n\nHardly surprising --- a search on the index's lowest-order column would\nrequire scanning practically all of the index. (If you think about the\nordering of the index entries you'll see why.) If this is a typical\nquery then you need a separate index on transaction_time_commit.\n\nThe fine manual goes into some detail about how to design indexes;\nhttp://www.postgresql.org/docs/8.3/static/indexes.html\nparticularly 11.3, 11.5.\n\n\t\t\tregards, tom lane\n", "msg_date": "Fri, 06 Feb 2009 11:14:32 -0500", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Postgres not willing to use an index? " }, { "msg_contents": "On Fri, Feb 6, 2009 at 11:14 AM, Tom Lane <[email protected]> wrote:\n> Mario Splivalo <[email protected]> writes:\n>> Besides PK and uq-constraint indices I have this index:\n>\n>> CREATE INDEX transactions_idx__client_data ON transactions\n>> USING btree (transaction_client_id, transaction_destination_id,\n>> transaction_operator_id, transaction_application_id,\n>> transaction_time_commit)\n>\n>> SELECT <some-columns> FROM transactions WHERE transaction_time_commit\n>> BETWEEN '2009-01-01' AND '2009-01-31 23:59:59';\n>> The problem is that postgres is never using an index:\n>\n> Hardly surprising --- a search on the index's lowest-order column would\n> require scanning practically all of the index. (If you think about the\n> ordering of the index entries you'll see why.) If this is a typical\n> query then you need a separate index on transaction_time_commit.\n>\n> The fine manual goes into some detail about how to design indexes;\n> http://www.postgresql.org/docs/8.3/static/indexes.html\n> particularly 11.3, 11.5.\n\nWhat's weird about this example is that when he sets enable_seqscan to\noff, the bitmap index scan plan is actually substantially faster, even\nthough it in fact does scan nearly the entire heap. I don't\nunderstand how it can be faster to scan the index and the heap than to\njust scan the heap.\n\n...Robert\n", "msg_date": "Fri, 6 Feb 2009 11:29:58 -0500", "msg_from": "Robert Haas <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Postgres not willing to use an index?" }, { "msg_contents": ">>> Robert Haas <[email protected]> wrote: \n> What's weird about this example is that when he sets enable_seqscan\nto\n> off, the bitmap index scan plan is actually substantially faster,\neven\n> though it in fact does scan nearly the entire heap. I don't\n> understand how it can be faster to scan the index and the heap than\nto\n> just scan the heap.\n \nIt's cached in the second test, maybe?\n \n-Kevin\n", "msg_date": "Fri, 06 Feb 2009 11:41:12 -0600", "msg_from": "\"Kevin Grittner\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Postgres not willing to use an index?" }, { "msg_contents": "On Fri, Feb 6, 2009 at 12:41 PM, Kevin Grittner\n<[email protected]> wrote:\n>>>> Robert Haas <[email protected]> wrote:\n>> What's weird about this example is that when he sets enable_seqscan to\n>> off, the bitmap index scan plan is actually substantially faster, even\n>> though it in fact does scan nearly the entire heap. I don't\n>> understand how it can be faster to scan the index and the heap than to\n>> just scan the heap.\n>\n> It's cached in the second test, maybe?\n\nI gather that the results were repeatable, but perhaps Mario could\ndouble-check that?\n\n...Robert\n", "msg_date": "Fri, 6 Feb 2009 13:12:19 -0500", "msg_from": "Robert Haas <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Postgres not willing to use an index?" }, { "msg_contents": "Tom Lane wrote:\n> \n> Hardly surprising --- a search on the index's lowest-order column would\n> require scanning practically all of the index. (If you think about the\n> ordering of the index entries you'll see why.) If this is a typical\n> query then you need a separate index on transaction_time_commit.\n\nYes, actually I just moved transaction_time_commit column to the \nbegining of the index, since, most of the time I run queries based on \ntransaction_time_commit and then transaction_client_id and \ntransaction_destination_id.\n\n> The fine manual goes into some detail about how to design indexes;\n> http://www.postgresql.org/docs/8.3/static/indexes.html\n> particularly 11.3, 11.5.\n\nI see it now. I read the manual concerning CREATE INDEX command, and \nthere is no mention of multicolumn indices, did not notice Note that \npoints to Chapter 11.\n\n\tMike\n\n", "msg_date": "Mon, 09 Feb 2009 10:28:15 +0100", "msg_from": "Mario Splivalo <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Postgres not willing to use an index?" }, { "msg_contents": "Robert Haas wrote:\n> On Fri, Feb 6, 2009 at 12:41 PM, Kevin Grittner\n> <[email protected]> wrote:\n>>>>> Robert Haas <[email protected]> wrote:\n>>> What's weird about this example is that when he sets enable_seqscan to\n>>> off, the bitmap index scan plan is actually substantially faster, even\n>>> though it in fact does scan nearly the entire heap. I don't\n>>> understand how it can be faster to scan the index and the heap than to\n>>> just scan the heap.\n>> It's cached in the second test, maybe?\n> \n> I gather that the results were repeatable, but perhaps Mario could\n> double-check that?\n\nI think that it is always cached - the machine has 4GB of RAM, and i'm \njust using it for testing. Now, I think that the cache is used because \nthere is no I/O wait when I run the queries (if you have any suggestion \non how to check cache usage, since I have no idea):\n\njura=# set enable_seqscan to true;\nSET\njura=# explain analyze select * from transactions where \ntransaction_time_commit between '2008-01-01' and '2008-01-31 23:59:59';\n \n QUERY PLAN \n\n---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n Seq Scan on transactions (cost=0.00..418365.68 rows=759775 width=91) \n(actual time=928.342..3788.232 rows=722176 loops=1)\n Filter: ((transaction_time_commit >= '2008-01-01 \n00:00:00+01'::timestamp with time zone) AND (transaction_time_commit <= \n'2008-01-31 23:59:59+01'::timestamp with time zone))\n Total runtime: 3936.744 ms\n(3 rows)\n\njura=# set enable_seqscan to false;\nSET\njura=# explain analyze select * from transactions where \ntransaction_time_commit between '2008-01-01' and '2008-01-31 23:59:59';\n \n QUERY PLAN \n\n-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n Bitmap Heap Scan on transactions (cost=428882.89..651630.52 \nrows=759775 width=91) (actual time=1358.040..1633.867 rows=722176 loops=1)\n Recheck Cond: ((transaction_time_commit >= '2008-01-01 \n00:00:00+01'::timestamp with time zone) AND (transaction_time_commit <= \n'2008-01-31 23:59:59+01'::timestamp with time zone))\n -> Bitmap Index Scan on transactions_idx__client_data \n(cost=0.00..428692.95 rows=759775 width=0) (actual \ntime=1354.485..1354.485 rows=722176 loops=1)\n Index Cond: ((transaction_time_commit >= '2008-01-01 \n00:00:00+01'::timestamp with time zone) AND (transaction_time_commit <= \n'2008-01-31 23:59:59+01'::timestamp with time zone))\n Total runtime: 1778.938 ms\n(5 rows)\n\n\nNow, transactions_idx__client_data index has transaction_time_commit as \nthe last column in index.\n\nWhen I 'recreate' the database, and run the queries again, first run \nwhich uses sequential scan is around 10 seconds, heavy I/O, any \nsubsequent query run takes cca 3900 msecs, as shown above.\n\nWhen I say 'disable seqscan', planner uses Bitmap Index Scan, as shown \nabove, just that the first query takes around 25 seconds to run, with \nheavy I/O. Any subsequent query runs take somewhat less than 2 seconds, \nas shown above.\n\nI'm not sure on what to do to minimize the impact of the OS-cache, apart \nfrom taking RAM modules out of the machine - if you have any suggestions \nI'll try to apply them.\n\nOn production database I changed the index so that \n'transaction_time_commit' is the first column, and now I don't get any \nsequential scans on transactions table when only WHERE condition is on \ntransaction_time_commit.\n\n\tMike\n", "msg_date": "Mon, 09 Feb 2009 10:40:45 +0100", "msg_from": "Mario Splivalo <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Postgres not willing to use an index?" }, { "msg_contents": "Mario Splivalo wrote:\n> Robert Haas wrote:\n> jura=# set enable_seqscan to false;\n> SET\n> jura=# explain analyze select * from transactions where \n> transaction_time_commit between '2008-01-01' and '2008-01-31 23:59:59';\n> \n> QUERY PLAN\n> ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- \n> \n> Bitmap Heap Scan on transactions (cost=428882.89..651630.52 \n> rows=759775 width=91) (actual time=1358.040..1633.867 rows=722176 loops=1)\n> Recheck Cond: ((transaction_time_commit >= '2008-01-01 \n> 00:00:00+01'::timestamp with time zone) AND (transaction_time_commit <= \n> '2008-01-31 23:59:59+01'::timestamp with time zone))\n> -> Bitmap Index Scan on transactions_idx__client_data \n> (cost=0.00..428692.95 rows=759775 width=0) (actual \n> time=1354.485..1354.485 rows=722176 loops=1)\n> Index Cond: ((transaction_time_commit >= '2008-01-01 \n> 00:00:00+01'::timestamp with time zone) AND (transaction_time_commit <= \n> '2008-01-31 23:59:59+01'::timestamp with time zone))\n> Total runtime: 1778.938 ms\n> (5 rows)\n> \n> \n\nI neglected to paste this 'explain analyze', when I changed the index so \nthat 'transaction_time_commit' is first column in the index:\n\njura=# explain analyze select * from transactions where \ntransaction_time_commit between '2009-01-01' and '2009-01-31 23:59:59';\n \n QUERY PLAN \n\n-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\n Bitmap Heap Scan on transactions (cost=7550.51..233419.58 rows=250880 \nwidth=91) (actual time=95.139..280.008 rows=525051 loops=1)\n Recheck Cond: ((transaction_time_commit >= '2009-01-01 \n00:00:00+01'::timestamp with time zone) AND (transaction_time_commit <= \n'2009-01-31 23:59:59+01'::timestamp with time zone))\n -> Bitmap Index Scan on transactions_idx__client_data \n(cost=0.00..7487.79 rows=250880 width=0) (actual time=93.382..93.382 \nrows=525051 loops=1)\n Index Cond: ((transaction_time_commit >= '2009-01-01 \n00:00:00+01'::timestamp with time zone) AND (transaction_time_commit <= \n'2009-01-31 23:59:59+01'::timestamp with time zone))\n Total runtime: 386.665 ms\n(5 rows)\n\nThank you, Tom!\n\n\tMike\n", "msg_date": "Mon, 09 Feb 2009 11:07:43 +0100", "msg_from": "Mario Splivalo <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Postgres not willing to use an index?" } ]
[ { "msg_contents": "Hey folks,\n\nI have few tables, that inherit from table X.\nThe query I perform, tries to obtain information about changes in all\ntables that inherit from X,\naside from that, I have table Y that keeps another information related\nto changes, but in bit different schema.\nAnyway, there is one unique id field, shared amongst them.\n\nWhen I want to obtain all that information, I do:\n\n\nselect updateid from (\n select updateid from r.skel\nunion all\n select updateid from r.history\n) as foo\n where updateid > 1232634919168805;\n\nAnd what amazes me, is that no matter what value I choose in where X >\n, postgres will always think this is the best plan:\n\n QUERY PLAN\n-------------------------------------------------------------------------------------------------------\n Subquery Scan foo (cost=0.00..167736.75 rows=978726 width=8)\n Filter: (foo.updateid > 1232634919168805::bigint)\n -> Append (cost=0.00..131034.54 rows=2936177 width=8)\n -> Subquery Scan \"*SELECT* 1\" (cost=0.00..130999.94\nrows=2934947 width=8)\n -> Result (cost=0.00..101650.47 rows=2934947 width=8)\n -> Append (cost=0.00..101650.47 rows=2934947 width=8)\n -> Seq Scan on skel (cost=0.00..24.80\nrows=1480 width=8)\n -> Seq Scan on a skel\n(cost=0.00..22028.96 rows=923596 width=8)\n -> Seq Scan on b skel (cost=0.00..8.01\nrows=201 width=8)\n -> Seq Scan on c skel (cost=0.00..1.81\nrows=81 width=8)\n -> Seq Scan on d skel\n(cost=0.00..22117.94 rows=923594 width=8)\n -> Seq Scan on e skel (cost=0.00..6.03\nrows=303 width=8)\n -> Seq Scan on f skel (cost=0.00..6.02\nrows=202 width=8)\n -> Seq Scan on g skel (cost=0.00..1987.40\nrows=85140 width=8)\n -> Seq Scan on h skel (cost=0.00..1.01\nrows=1 width=8)\n -> Seq Scan on i skel\n(cost=0.00..55454.99 rows=999999 width=8)\n -> Seq Scan on j skel (cost=0.00..13.50\nrows=350 width=8)\n -> Seq Scan on history (cost=0.00..22.30 rows=1230 width=8)\n(18 rows)\n\nso my question is, why isn't postgres use index on some tables , and\nsearch for the X > N individually ?\n\nBecause, yet - I tried to recreate problem, but I wasn't able. I have\nthis test db:\n\ncreate schema r;\n\ncreate sequence fooseq;\ncreate domain r.fooint AS bigint NOT NULL default nextval('fooseq');\n\ncreate table skel(aid r.fooint, cd timestamp default now() not null);\n\ncreate table one( a bigserial, aid r.fooint, cd timestamp not null);\ncreate table two( a bigserial, aid r.fooint, cd timestamp not null);\ncreate table three( a bigserial, aid r.fooint, cd timestamp not null);\ncreate table four( a bigserial, aid r.fooint, cd timestamp not null);\ncreate table five( a bigserial, aid r.fooint, cd timestamp not null);\n\ncreate unique index one_aid on one(aid);\ncreate unique index two_aid on two(aid);\ncreate unique index three_aid on three(aid);\ncreate unique index four_aid on four(aid);\ncreate unique index five_aid on five(aid);\n\n\ncreate table numbers( something int default random()*666, aid_foo r.fooint);\n\ncreate unique index numbers_aid on numbers(aid_foo);\n\ninsert into one(a, cd) select generate_series(1,2000000), now();\ninsert into two(a, cd) select generate_series(1,200000), now();\ninsert into three(a, cd) select generate_series(1,2200000), now();\ninsert into four(a, cd) select generate_series(1,2200000), now();\ninsert into five(a, cd) select generate_series(1,2200000), now();\n\ninsert into numbers(something) select generate_series(1,870000);\n\nalter table one inherit skel;\nalter table two inherit skel;\nalter table three inherit skel;\nalter table four inherit skel;\nalter table five inherit skel;\n\n\nBut no matter how many tables I throw in ( and I got to 20 ) - it will\nalways do it right:\ngjaskie=# explain select aid from (select aid from skel union all\nselect aid_foo as aid from numbers) AS foo where aid > 999000;\n QUERY PLAN\n---------------------------------------------------------------------------------------------\n Result (cost=0.00..178034.88 rows=8661268 width=8)\n -> Append (cost=0.00..178034.88 rows=8661268 width=8)\n -> Seq Scan on skel (cost=0.00..32.12 rows=590 width=8)\n Filter: ((aid)::bigint > 999000)\n -> Index Scan using one_aid on one skel\n(cost=0.00..34549.76 rows=991445 width=8)\n Index Cond: ((aid)::bigint > 999000)\n -> Seq Scan on two skel (cost=0.00..3774.00 rows=199980 width=8)\n Filter: ((aid)::bigint > 999000)\n -> Seq Scan on three skel (cost=0.00..41513.00 rows=2199780 width=8)\n Filter: ((aid)::bigint > 999000)\n -> Seq Scan on four skel (cost=0.00..41513.00 rows=2199780 width=8)\n Filter: ((aid)::bigint > 999000)\n -> Seq Scan on five skel (cost=0.00..41513.00 rows=2199780 width=8)\n Filter: ((aid)::bigint > 999000)\n -> Seq Scan on numbers (cost=0.00..15140.00 rows=869913 width=8)\n Filter: ((aid_foo)::bigint > 999000)\n(16 rows)\n\nTime: 36.326 ms\n\n\n\nBut, if I add another union, it screws it up:\n\ngjaskie=# explain select aid from (select aid from skel union all\nselect aid_foo as aid from numbers union all select 1 aid) AS foo\nwhere aid > 999000;\n QUERY PLAN\n--------------------------------------------------------------------------------------------------------\n Subquery Scan foo (cost=0.00..374659.56 rows=3223924 width=8)\n Filter: (foo.aid > 999000)\n -> Append (cost=0.00..253762.42 rows=9671771 width=8)\n -> Result (cost=0.00..253762.40 rows=9671770 width=8)\n -> Append (cost=0.00..253762.40 rows=9671770 width=8)\n -> Result (cost=0.00..144079.70 rows=8801770 width=8)\n -> Append (cost=0.00..144079.70\nrows=8801770 width=8)\n -> Seq Scan on skel\n(cost=0.00..27.70 rows=1770 width=8)\n -> Seq Scan on one skel\n(cost=0.00..32739.00 rows=2000000 width=8)\n -> Seq Scan on two skel\n(cost=0.00..3274.00 rows=200000 width=8)\n -> Seq Scan on three skel\n(cost=0.00..36013.00 rows=2200000 width=8)\n -> Seq Scan on four skel\n(cost=0.00..36013.00 rows=2200000 width=8)\n -> Seq Scan on five skel\n(cost=0.00..36013.00 rows=2200000 width=8)\n -> Seq Scan on numbers (cost=0.00..12965.00\nrows=870000 width=8)\n -> Subquery Scan \"*SELECT* 3\" (cost=0.00..0.02 rows=1 width=0)\n -> Result (cost=0.00..0.01 rows=1 width=0)\n(16 rows)\n\nTime: 1.502 ms\n\nnow the question is, how my test db's query:\n select aid from (select aid from skel union all select aid_foo as\naid from numbers union all select 1 aid) AS foo where aid > 999000;\n\ndiffer from original:\n select updateid from ( select updateid from r.skel union all select\nupdateid from r.history ) as foo where updateid > 1232634919168805;\n\n\nOh, and the value N doesn't change the plan here either :/\n\ntested on both 8.3 and 8.4, same results..\nideas welcomed\n\n-- \nGJ\n", "msg_date": "Fri, 6 Feb 2009 18:00:30 +0000", "msg_from": "=?UTF-8?Q?Grzegorz_Ja=C5=9Bkiewicz?= <[email protected]>", "msg_from_op": true, "msg_subject": "inheritance, and plans" }, { "msg_contents": "Just guessing here, but what values are you using for\njoin_collapse_limit and from_collapse_limit, and what happens if you\nmake them much bigger (like 100)?\n\n...Robert\n", "msg_date": "Fri, 6 Feb 2009 13:15:16 -0500", "msg_from": "Robert Haas <[email protected]>", "msg_from_op": false, "msg_subject": "Re: inheritance, and plans" }, { "msg_contents": "On Fri, Feb 6, 2009 at 6:15 PM, Robert Haas <[email protected]> wrote:\n> Just guessing here, but what values are you using for\n> join_collapse_limit and from_collapse_limit, and what happens if you\n> make them much bigger (like 100)?\nboth default values, afair = 8.\n", "msg_date": "Fri, 6 Feb 2009 18:20:10 +0000", "msg_from": "=?UTF-8?Q?Grzegorz_Ja=C5=9Bkiewicz?= <[email protected]>", "msg_from_op": true, "msg_subject": "Re: inheritance, and plans" }, { "msg_contents": "On Fri, Feb 6, 2009 at 6:20 PM, Grzegorz Jaśkiewicz <[email protected]> wrote:\n> On Fri, Feb 6, 2009 at 6:15 PM, Robert Haas <[email protected]> wrote:\n>> Just guessing here, but what values are you using for\n>> join_collapse_limit and from_collapse_limit, and what happens if you\n>> make them much bigger (like 100)?\n\nchanging them to 100 doesn't help a bit.\n\n\n\n-- \nGJ\n", "msg_date": "Fri, 6 Feb 2009 18:21:01 +0000", "msg_from": "=?UTF-8?Q?Grzegorz_Ja=C5=9Bkiewicz?= <[email protected]>", "msg_from_op": true, "msg_subject": "Re: inheritance, and plans" }, { "msg_contents": "=?UTF-8?Q?Grzegorz_Ja=C5=9Bkiewicz?= <[email protected]> writes:\n> so my question is, why isn't postgres use index on some tables , and\n> search for the X > N individually ?\n\nThe UNION arms have to all be the same data type in order to have\nrestrictions pushed down through the UNION. You did not show us\nthe table declarations for your first example, but I bet that updateid\nisn't the same type in both. (And yes, a domain is different from its\nunderlying type for this purpose.)\n\nIn the second example, \"1\" isn't even the right base type let alone\nthe same domain.\n\n\t\t\tregards, tom lane\n", "msg_date": "Fri, 06 Feb 2009 16:50:08 -0500", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: inheritance, and plans " }, { "msg_contents": "On Fri, Feb 6, 2009 at 9:50 PM, Tom Lane <[email protected]> wrote:\n> The UNION arms have to all be the same data type in order to have\n> restrictions pushed down through the UNION. You did not show us\n> the table declarations for your first example, but I bet that updateid\n> isn't the same type in both. (And yes, a domain is different from its\n> underlying type for this purpose.)\nI think you're right. The domain's in both cases (updateid and uri)\nare bigints default nextval('something') not null;\n\nand the r.history table's ones are just bigints not null. Same\nunderlying type, but not a domain. I'll try to alter it to domain\ntype, and see.\n\nthanks.\n\n\n-- \nGJ\n", "msg_date": "Sat, 7 Feb 2009 10:59:47 +0000", "msg_from": "=?UTF-8?Q?Grzegorz_Ja=C5=9Bkiewicz?= <[email protected]>", "msg_from_op": true, "msg_subject": "Re: inheritance, and plans" }, { "msg_contents": "that helped, thanks a lot Tom.\n\nLooks like additional thing on 'pet peeves' list (from -general).\n:P\n", "msg_date": "Sat, 7 Feb 2009 11:03:17 +0000", "msg_from": "=?UTF-8?Q?Grzegorz_Ja=C5=9Bkiewicz?= <[email protected]>", "msg_from_op": true, "msg_subject": "Re: inheritance, and plans" }, { "msg_contents": "so Tom,\nwith a little help on Irc from Andrew (RhodiumToad) I got it 'fixed',\nbut I know this is just a hack:\n\nIndex: src/backend/utils/adt/selfuncs.c\n===================================================================\nRCS file: /projects/cvsroot/pgsql/src/backend/utils/adt/selfuncs.c,v\nretrieving revision 1.258\ndiff -u -r1.258 selfuncs.c\n--- src/backend/utils/adt/selfuncs.c\t1 Jan 2009 17:23:50 -0000\t1.258\n+++ src/backend/utils/adt/selfuncs.c\t7 Feb 2009 17:20:21 -0000\n@@ -3392,7 +3392,7 @@\n static double\n convert_numeric_to_scalar(Datum value, Oid typid)\n {\n-\tswitch (typid)\n+\tswitch (getBaseType(typid))\n \t{\n \t\tcase BOOLOID:\n \t\t\treturn (double) DatumGetBool(value);\nndex: src/backend/optimizer/path/allpaths.c\n===================================================================\nRCS file: /projects/cvsroot/pgsql/src/backend/optimizer/path/allpaths.c,v\nretrieving revision 1.179\ndiff -u -r1.179 allpaths.c\n--- src/backend/optimizer/path/allpaths.c\t1 Jan 2009 17:23:43 -0000\t1.179\n+++ src/backend/optimizer/path/allpaths.c\t7 Feb 2009 17:21:25 -0000\n@@ -33,6 +33,9 @@\n #include \"parser/parse_clause.h\"\n #include \"parser/parsetree.h\"\n #include \"rewrite/rewriteManip.h\"\n+#include \"utils/lsyscache.h\"\n+#include \"utils/syscache.h\"\n+\n\n\n /* These parameters are set by GUC */\n@@ -1042,7 +1045,7 @@\n \t\t\tcontinue;\t\t\t/* ignore resjunk columns */\n \t\tif (colType == NULL)\n \t\t\telog(ERROR, \"wrong number of tlist entries\");\n-\t\tif (exprType((Node *) tle->expr) != lfirst_oid(colType))\n+\t\tif (exprType((Node *) tle->expr) != lfirst_oid(colType) &&\ngetBaseType(exprType((Node *) tle->expr)) != lfirst_oid(colType))\n \t\t\tdifferentTypes[tle->resno] = true;\n \t\tcolType = lnext(colType);\n \t}\n\n\n\n\nThis is just a hack, and I know it won't work. But the question is, is\nit possible to fix it ?\nI think something should convert types to base type somewhere up the\ncall level. Perhaps we should pass on oid of type+baseType ?\n\nI also noticed that the convert_numeric_to_scalar() family of\nfunctions is considered a future change too, atm everything there is\nhardcoded pretty much.\n", "msg_date": "Sat, 7 Feb 2009 17:23:40 +0000", "msg_from": "=?UTF-8?Q?Grzegorz_Ja=C5=9Bkiewicz?= <[email protected]>", "msg_from_op": true, "msg_subject": "Re: inheritance, and plans" }, { "msg_contents": "=?UTF-8?Q?Grzegorz_Ja=C5=9Bkiewicz?= <[email protected]> writes:\n> with a little help on Irc from Andrew (RhodiumToad) I got it 'fixed',\n> but I know this is just a hack:\n\nYou seem to be laboring under the delusion that this is considered a\nbug. It's a necessary semantic restriction, because the pushed-down\nexpression could mean different things when applied to different\ndata types.\n\n\t\t\tregards, tom lane\n", "msg_date": "Sat, 07 Feb 2009 12:39:24 -0500", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: inheritance, and plans " }, { "msg_contents": "On Sat, Feb 7, 2009 at 5:39 PM, Tom Lane <[email protected]> wrote:\n>\n> You seem to be laboring under the delusion that this is considered a\n> bug. It's a necessary semantic restriction, because the pushed-down\n> expression could mean different things when applied to different\n> data types.\n\nVery true Tom, still I was hoping this could be seen as an improvement\n- because in fact in my case it would be .\n\n\nThanks.\n\n\n-- \nGJ\n", "msg_date": "Sat, 7 Feb 2009 17:43:44 +0000", "msg_from": "=?UTF-8?Q?Grzegorz_Ja=C5=9Bkiewicz?= <[email protected]>", "msg_from_op": true, "msg_subject": "Re: inheritance, and plans" }, { "msg_contents": "and frankly I still (and few others) think it is a defect, for domain\nwith some base type should be treated as such. It is after all treated\nthat way when you create index.\n", "msg_date": "Sat, 7 Feb 2009 17:52:56 +0000", "msg_from": "=?UTF-8?Q?Grzegorz_Ja=C5=9Bkiewicz?= <[email protected]>", "msg_from_op": true, "msg_subject": "Re: inheritance, and plans" }, { "msg_contents": ">>>>> \"Tom\" == Tom Lane <[email protected]> writes:\n\n [domain -> base type conversion interfering with optimization]\n\n Tom> You seem to be laboring under the delusion that this is\n Tom> considered a bug.\n\nOf course it's a bug, or at least a missing feature - there is no\njustification for putting performance deathtraps in the way of using\ndomains.\n\n Tom> It's a necessary semantic restriction, because the pushed-down\n Tom> expression could mean different things when applied to different\n Tom> data types.\n\nHow?\n\nType-dependent selection of operators has already been done as part of\nparse analysis, no? And the domain -> base conversion is purely a\nrelabelling, no? So what semantic change is possible as a result?\n\n-- \nAndrew (irc:RhodiumToad)\n\n", "msg_date": "Sat, 07 Feb 2009 17:58:56 +0000", "msg_from": "Andrew Gierth <[email protected]>", "msg_from_op": false, "msg_subject": "Re: inheritance, and plans" }, { "msg_contents": "Andrew Gierth <[email protected]> writes:\n> Type-dependent selection of operators has already been done as part of\n> parse analysis, no? And the domain -> base conversion is purely a\n> relabelling, no? So what semantic change is possible as a result?\n\nDomain conversions are *not* simply relabellings. It's possible now to\nhave domain-specific functions/operators, and the likely future\nevolution of the system is to make that work better rather than remove it.\n\nIt's possible that there are specific cases where the UNION optimization\nchecks could allow domains to be treated as their base types, but\nblindly smashing both sides of the check to base is going to break more\ncases than it fixes.\n\n\t\t\tregards, tom lane\n", "msg_date": "Sun, 08 Feb 2009 13:34:05 -0500", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: inheritance, and plans " }, { "msg_contents": ">>>>> \"Tom\" == Tom Lane <[email protected]> writes:\n\n > Andrew Gierth <[email protected]> writes:\n >> Type-dependent selection of operators has already been done as\n >> part of parse analysis, no? And the domain -> base conversion is\n >> purely a relabelling, no? So what semantic change is possible as a\n >> result?\n\n Tom> Domain conversions are *not* simply relabellings. It's possible\n Tom> now to have domain-specific functions/operators,\n\nRight, but that's irrelevent to the planner in this case because the\nresolution of precisely which operator is being called has _already\nhappened_ (in parse analysis), no?\n\n Tom> It's possible that there are specific cases where the UNION\n Tom> optimization checks could allow domains to be treated as their\n Tom> base types,\n\nThe domain -> base conversion is an important one (to anyone who uses\ndomains) because it happens implicitly in a wide range of contexts,\nand so it's unsatisfactory for it to have major performance impacts\nsuch as interfering with important optimizations.\n\n Tom> but blindly smashing both sides of the check to base is going to\n Tom> break more cases than it fixes.\n\nThe posted code was only looking up the base type for one side, not\nboth (though I don't know that code well enough to know whether it was\nthe correct side); the case of interest is when the subquery has the\ndomain type but the outer query is seeing the base type, _not_ the\nreverse.\n\n-- \nAndrew.\n", "msg_date": "Sun, 08 Feb 2009 18:58:51 +0000", "msg_from": "Andrew Gierth <[email protected]>", "msg_from_op": false, "msg_subject": "Re: inheritance, and plans" }, { "msg_contents": "On Sun, Feb 8, 2009 at 6:34 PM, Tom Lane <[email protected]> wrote:\n\n> It's possible that there are specific cases where the UNION optimization\n> checks could allow domains to be treated as their base types, but\n> blindly smashing both sides of the check to base is going to break more\n> cases than it fixes.\n\nWhat my little patch was trying to proof, is whether, that part of\nplanner could construct the plan better, if it had a notion of base\ntype in domains.\nWhich I still believe it should do, based on my simple test.\n\nAgain, I am not saying, that postgresql should treat domains just as\nan alias to base type, but I do believe that it should at least\nconstruct plan better - based on the base type, not the domain's oid.\n\nIf you know what it might possibly break, can you give some examples please ?\n\n-- \nGJ\n", "msg_date": "Sun, 8 Feb 2009 19:37:33 +0000", "msg_from": "=?UTF-8?Q?Grzegorz_Ja=C5=9Bkiewicz?= <[email protected]>", "msg_from_op": true, "msg_subject": "Re: inheritance, and plans" } ]
[ { "msg_contents": "Hi\n\nI am having some problem understanding the details of a couple of \nconfiguration directives. Here is what I think I understand it so far:\n\neffective_cache_size\n\t- specifies in how many B/KB/MB will be used to cache data\n\t between pg and the OS?\n\t- what is actually cached and how it is actually used by the\n planner and how does it affect the planner and the queries?\n\t -in other words, when and why do I need to set this parameter?\n\ncheckpoint_segments\n\t- specifies the number of segments?\n\t size: number*16MB?\n\t- means when number of WAL segments has filled up and matches\n checkpoint segment, dirty data is written to disk?\n\t- so it specifies how much data is stored in memory/wal before\n written to disk and therefore affects recoverability?\n\nregards\n\nthomas\n", "msg_date": "Sat, 07 Feb 2009 01:52:17 +0100", "msg_from": "Thomas Finneid <[email protected]>", "msg_from_op": true, "msg_subject": "explanation of some configs" }, { "msg_contents": "On Sat, 2009-02-07 at 01:52 +0100, Thomas Finneid wrote:\n> Hi\n> \n> I am having some problem understanding the details of a couple of \n> configuration directives. Here is what I think I understand it so far:\n> \n> effective_cache_size\n> \t- specifies in how many B/KB/MB will be used to cache data\n> \t between pg and the OS?\n> \t- what is actually cached and how it is actually used by the\n> planner and how does it affect the planner and the queries?\n> \t -in other words, when and why do I need to set this parameter?\n\nThis is just a hint to tell the planner how much cache will generally be\navailable. The number should be reflective of your shared buffers +\navailable operating system cache. If you database is postgresql only you\ncan generally set this very high 75% of available ram. If not then you\nneed to tone it down.\n\n> \n> checkpoint_segments\n> \t- specifies the number of segments?\n\nThe number of segments that will be used before a checkpoint is forced.\n\n> \t size: number*16MB?\n\nYes but they are not preallocated.\n\n> \t- means when number of WAL segments has filled up and matches\n> checkpoint segment, dirty data is written to disk?\n\nA checkpoint occurs.\n\nJoshua D. Drake\n\n-- \nPostgreSQL - XMPP: [email protected]\n Consulting, Development, Support, Training\n 503-667-4564 - http://www.commandprompt.com/\n The PostgreSQL Company, serving since 1997\n\n", "msg_date": "Fri, 06 Feb 2009 16:59:11 -0800", "msg_from": "\"Joshua D. Drake\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: explanation of some configs" }, { "msg_contents": "Joshua D. Drake wrote:\n>> On Sat, 2009-02-07 at 01:52 +0100, Thomas Finneid wrote:\n >>\n>> effective_cache_size\n >>\n> This is just a hint to tell the planner how much cache will generally be\n> available. \n\nok, but available for what?\n\n> The number should be reflective of your shared buffers +\n> available operating system cache. If you database is postgresql only you\n> can generally set this very high 75% of available ram. If not then you\n> need to tone it down.\n\nSo that means, if I have 8GB ram and have set shared buffer to e.g. 4GB, \nI should set the effective_cache_size to at least 4GB otherwise the \nplanner will assume I dont have as much memory available so it would be \nsort of pointless so set shared_buffer to 4GB?\n\n\n>> checkpoint_segments\n>> \t- specifies the number of segments?\n> \n> The number of segments that will be used before a checkpoint is forced.\n\nSo to sum it up:\n\n- Specifies the number of memory segments the WAL will use before a \ncheckpoint occur. (A checkpoint guarantees the data has been written to \ndisk, including dirty pages.)\n- A segment is 16MB and the number of actually used segments are dynamic.\n- If this number is too low or the transaction is large, PG will spend \nmore time on performing checkpoint operations which decreases performance.\n\nQ1:\n\nSo checkpoint_time is then just another way of expressing the same?\nI.e. to ensure that if the segments have not been filled, which would \nfoce a checkpoint, a checkpoint is at least forced at the specified time \nlapse?\n\nQ2:\n\nSo how does this relate to WAL buffers? It seems to me that wal_buffers \nare not needed. Based on the above explanation.\n\n\nregards\n\nthomas\n", "msg_date": "Sat, 07 Feb 2009 10:27:34 +0100", "msg_from": "Thomas Finneid <[email protected]>", "msg_from_op": true, "msg_subject": "Re: explanation of some configs" }, { "msg_contents": "Thomas Finneid wrote:\n> Joshua D. Drake wrote:\n>>> On Sat, 2009-02-07 at 01:52 +0100, Thomas Finneid wrote:\n> >>\n>>> effective_cache_size\n> >>\n>> This is just a hint to tell the planner how much cache will generally be\n>> available. \n>\n> ok, but available for what?\nfor storing the data/tables/rows in memory so it does not have the disk \nsubsystem. \n>\n>> The number should be reflective of your shared buffers +\n>> available operating system cache. If you database is postgresql only you\n>> can generally set this very high 75% of available ram. If not then you\n>> need to tone it down.\n>\n> So that means, if I have 8GB ram and have set shared buffer to e.g. \n> 4GB, I should set the effective_cache_size to at least 4GB otherwise \n> the planner will assume I dont have as much memory available so it \n> would be sort of pointless so set shared_buffer to 4GB?\nNo because other parts of Postgresql use the shared_buffer to store \ntables/data in memory. If shared_buffer is set low then the system will \nbe during more disk IO as it can't fit that much into memory. Another \ncritical setting to look at is work_mem where all the complex sorting, \nand joins are done which is not related to shared buffers. So leave \nroom in memory for these processes \n>\n>\n>>> checkpoint_segments\n>>> - specifies the number of segments?\n>>\n>> The number of segments that will be used before a checkpoint is forced.\n>\n> So to sum it up:\n>\n> - Specifies the number of memory segments the WAL will use before a \n> checkpoint occur. (A checkpoint guarantees the data has been written \n> to disk, including dirty pages.)\n> - A segment is 16MB and the number of actually used segments are dynamic.\n> - If this number is too low or the transaction is large, PG will spend \n> more time on performing checkpoint operations which decreases \n> performance.\n>\n> Q1:\n>\n> So checkpoint_time is then just another way of expressing the same?\n> I.e. to ensure that if the segments have not been filled, which would \n> foce a checkpoint, a checkpoint is at least forced at the specified \n> time lapse?\nYes and No Checkpoint_time does forces a check point regardless if the \nsegment is full or not. Checkpoint_segment is used to force a check \npoint based on size. In a big databases a checkpoint could get very \nlarge before time had elapsed and if server cashed all that work would \nbe rolled back. \n\n>\n> Q2:\n>\n> So how does this relate to WAL buffers? It seems to me that \n> wal_buffers are not needed. Based on the above explanation.\nThis is number of pages in shared memory the Postgresql uses before WAL \nis written to disk this is used to improve performance for large writes.\n\n>\n>\n> regards\n>\n> thomas\n>\n", "msg_date": "Sat, 07 Feb 2009 10:40:52 -0500", "msg_from": "justin <[email protected]>", "msg_from_op": false, "msg_subject": "Re: explanation of some configs" }, { "msg_contents": ">>> effective_cache_size\n>>\n>> This is just a hint to tell the planner how much cache will generally be\n>> available.\n>\n> ok, but available for what?\n\nThe documentation on these parameters is really very good.\n\nhttp://www.postgresql.org/docs/8.3/interactive/runtime-config-query.html\nhttp://www.postgresql.org/docs/8.3/interactive/runtime-config-resource.html\n\neffective_cache_size doesn't actually reserve any memory, but it\nenables the planner to know something about what will probably happen\nwhen attempting to execute queries (and therefore generate better\nplans).\n\n>> The number should be reflective of your shared buffers +\n>> available operating system cache. If you database is postgresql only you\n>> can generally set this very high 75% of available ram. If not then you\n>> need to tone it down.\n>\n> So that means, if I have 8GB ram and have set shared buffer to e.g. 4GB, I\n> should set the effective_cache_size to at least 4GB otherwise the planner\n> will assume I dont have as much memory available so it would be sort of\n> pointless so set shared_buffer to 4GB?\n\nYou might want to look at pgtune as a starting point for tuning these settings.\n\nhttp://pgfoundry.org/projects/pgtune/\n\nBut, yes, if you read the documentation (links above) it says to make\neffective_cache_size equal to shared_buffers plus however much of the\noperating system disk cache you expect PostgreSQL to get.\n\n>>> checkpoint_segments\n>>> - specifies the number of segments?\n>>\n>> The number of segments that will be used before a checkpoint is forced.\n>\n> So to sum it up:\n>\n> - Specifies the number of memory segments the WAL will use before a\n> checkpoint occur. (A checkpoint guarantees the data has been written to\n> disk, including dirty pages.)\n> - A segment is 16MB and the number of actually used segments are dynamic.\n> - If this number is too low or the transaction is large, PG will spend more\n> time on performing checkpoint operations which decreases performance.\n\nI think this is pretty much right. The WAL logs are always flushed to\ndisk right away (unless you fool with the asynchronous_commit or fsync\nparameters), so you are not at risk of losing data even if the server\ncrashes before the next checkpoint. But the checkpoints keep you from\naccumulating too much WAL (which eats disk space and makes recovery\nslower in the event of a crash).\n\n> Q1:\n>\n> So checkpoint_time is then just another way of expressing the same?\n> I.e. to ensure that if the segments have not been filled, which would foce a\n> checkpoint, a checkpoint is at least forced at the specified time lapse?\n\nhttp://www.postgresql.org/docs/8.3/interactive/wal-configuration.html\n\n\"The server's background writer process will automatically perform a\ncheckpoint every so often. A checkpoint is created every\ncheckpoint_segments log segments, or every checkpoint_timeout seconds,\nwhichever comes first.\"\n\n> Q2:\n>\n> So how does this relate to WAL buffers? It seems to me that wal_buffers are\n> not needed. Based on the above explanation.\n\nwal_buffers are in-memory buffers that hold WAL that has not yet been\nflushed to disk. WAL segments are files hold the WAL that has been\nwritten to disk but not yet recycled (perhaps because the\ncorresponding data blocks haven't yet been written out).\n\n...Robert\n", "msg_date": "Sat, 7 Feb 2009 11:36:32 -0500", "msg_from": "Robert Haas <[email protected]>", "msg_from_op": false, "msg_subject": "Re: explanation of some configs" }, { "msg_contents": "On Sat, 7 Feb 2009, justin wrote:\n> In a big databases a checkpoint could get very large before time had \n> elapsed and if server cashed all that work would be rolled back.\n\nNo. Once you commit a transaction, it is safe (unless you play with fsync \nor asynchronous commit). The size of the checkpoint is irrelevant.\n\nYou see, Postgres writes the data twice. First it writes the data to the \nend of the WAL. WAL_buffers are used to buffer this. Then Postgres calls \nfsync on the WAL when you commit the transaction. This makes the \ntransaction safe, and is usually fast because it will be sequential writes \non a disc. Once fsync returns, Postgres starts the (lower priority) task \nof copying the data from the WAL into the data tables. All the un-copied \ndata in the WAL needs to be held in memory, and that is what \ncheckpoint_segments is for. When that gets full, then Postgres needs to \nstop writes until the copying has freed up the checkpoint segments again.\n\nMatthew\n\n-- \n Don't worry! The world can't end today because it's already tomorrow\n in Australia.\n", "msg_date": "Mon, 9 Feb 2009 10:30:59 +0000 (GMT)", "msg_from": "Matthew Wakeling <[email protected]>", "msg_from_op": false, "msg_subject": "Re: explanation of some configs" }, { "msg_contents": "Matthew Wakeling wrote:\n> On Sat, 7 Feb 2009, justin wrote:\n>> In a big databases a checkpoint could get very large before time had \n>> elapsed and if server cashed all that work would be rolled back.\n>\n> No. Once you commit a transaction, it is safe (unless you play with \n> fsync or asynchronous commit). The size of the checkpoint is irrelevant.\n>\n> You see, Postgres writes the data twice. First it writes the data to \n> the end of the WAL. WAL_buffers are used to buffer this. Then Postgres \n> calls fsync on the WAL when you commit the transaction. This makes the \n> transaction safe, and is usually fast because it will be sequential \n> writes on a disc. Once fsync returns, Postgres starts the (lower \n> priority) task of copying the data from the WAL into the data tables. \n> All the un-copied data in the WAL needs to be held in memory, and that \n> is what checkpoint_segments is for. When that gets full, then Postgres \n> needs to stop writes until the copying has freed up the checkpoint \n> segments again.\n>\n> Matthew\n>\nWell then we have conflicting instructions in places on \nwiki.postgresql.org which links to this\nhttp://www.varlena.com/GeneralBits/Tidbits/annotated_conf_e.html\n", "msg_date": "Mon, 09 Feb 2009 10:44:31 -0500", "msg_from": "justin <[email protected]>", "msg_from_op": false, "msg_subject": "Re: explanation of some configs" }, { "msg_contents": "On Mon, Feb 9, 2009 at 10:44 AM, justin <[email protected]> wrote:\n> Matthew Wakeling wrote:\n>>\n>> On Sat, 7 Feb 2009, justin wrote:\n>>>\n>>> In a big databases a checkpoint could get very large before time had\n>>> elapsed and if server cashed all that work would be rolled back.\n>>\n>> No. Once you commit a transaction, it is safe (unless you play with fsync\n>> or asynchronous commit). The size of the checkpoint is irrelevant.\n>>\n>> You see, Postgres writes the data twice. First it writes the data to the\n>> end of the WAL. WAL_buffers are used to buffer this. Then Postgres calls\n>> fsync on the WAL when you commit the transaction. This makes the transaction\n>> safe, and is usually fast because it will be sequential writes on a disc.\n>> Once fsync returns, Postgres starts the (lower priority) task of copying the\n>> data from the WAL into the data tables. All the un-copied data in the WAL\n>> needs to be held in memory, and that is what checkpoint_segments is for.\n>> When that gets full, then Postgres needs to stop writes until the copying\n>> has freed up the checkpoint segments again.\n>>\n>> Matthew\n>>\n> Well then we have conflicting instructions in places on wiki.postgresql.org\n> which links to this\n> http://www.varlena.com/GeneralBits/Tidbits/annotated_conf_e.html\n\nYes, I think the explanation of checkpoint_segments on that page is\nsimply wrong (though it could be true to a limited extent if you have\nsynchronous_commit turned off).\n\n...Robert\n", "msg_date": "Mon, 9 Feb 2009 12:38:24 -0500", "msg_from": "Robert Haas <[email protected]>", "msg_from_op": false, "msg_subject": "Re: explanation of some configs" }, { "msg_contents": "On Mon, 9 Feb 2009, justin wrote:\n> Well then we have conflicting instructions in places on wiki.postgresql.org \n> which links to this\n> http://www.varlena.com/GeneralBits/Tidbits/annotated_conf_e.html\n\nCould you be a little more specific as to which sections conflict?\n\nMatthew\n\n-- \n The only secure computer is one that's unplugged, locked in a safe,\n and buried 20 feet under the ground in a secret location...and i'm not\n even too sure about that one. --Dennis Huges, FBI\n", "msg_date": "Tue, 10 Feb 2009 12:09:32 +0000 (GMT)", "msg_from": "Matthew Wakeling <[email protected]>", "msg_from_op": false, "msg_subject": "Re: explanation of some configs" }, { "msg_contents": "\n\n\n\n\n\ncheckpoint_segments\nMaximum distance between automatic WAL checkpoints, in log file\nsegments (each segment is normally 16 megabytes).\n\nIncrease these settings if your database has lots of large batch writes\nto decrease the frequency of checkpoints (and thus lower disk\nactivity). Decrease them if you are short on disk space or your\nenvironment has a significant risk of unexpected power-outs, as\nany un-checkpointed transactions will dropped on restart.\n\nMatthew Wakeling wrote:\nOn Mon, 9 Feb 2009, justin wrote:\n \nWell then we have conflicting instructions in\nplaces on wiki.postgresql.org which links to this\n \nhttp://www.varlena.com/GeneralBits/Tidbits/annotated_conf_e.html\n\n\n\nCould you be a little more specific as to which sections conflict?\n \n\nMatthew\n \n\n\n\n\n", "msg_date": "Tue, 10 Feb 2009 13:07:23 -0500", "msg_from": "justin <[email protected]>", "msg_from_op": false, "msg_subject": "Re: explanation of some configs" }, { "msg_contents": "On Tue, 10 Feb 2009, justin wrote:\n\n> http://www.varlena.com/GeneralBits/Tidbits/annotated_conf_e.html\n> checkpoint_segments\n> Maximum distance between automatic WAL checkpoints, in log file segments (each\n> segment is normally 16 megabytes).\n> \n> Increase these settings if your database has lots of large batch writes to\n> decrease the frequency of checkpoints (and thus lower disk activity). Decrease\n> them if you are short on disk space or your environment has a significant risk\n> of unexpected power-outs, as any un-checkpointed transactions will dropped on\n> restart.\n\nYou mentioned getting to there via \nhttp://wiki.postgresql.org/wiki/Performance_Optimization\n\nYou'll note that link is disclaimed with \"older version of material \ncovered in the GUC tour\", and it's aimed at PostgreSQL 7.4. If you read \nthe current version of that document at \nhttp://www.pgcon.org/2008/schedule/attachments/44_annotated_gucs_draft1.pdf\nyou'll see that text you've noted is incorrect isn't there anymore.\n\n--\n* Greg Smith [email protected] http://www.gregsmith.com Baltimore, MD\n", "msg_date": "Tue, 10 Feb 2009 14:45:30 -0500 (EST)", "msg_from": "Greg Smith <[email protected]>", "msg_from_op": false, "msg_subject": "Re: explanation of some configs" }, { "msg_contents": "Greg Smith wrote:\n> On Tue, 10 Feb 2009, justin wrote:\n>\n>> http://www.varlena.com/GeneralBits/Tidbits/annotated_conf_e.html\n>> checkpoint_segments\n>> Maximum distance between automatic WAL checkpoints, in log file \n>> segments (each\n>> segment is normally 16 megabytes).\n>>\n>> Increase these settings if your database has lots of large batch \n>> writes to\n>> decrease the frequency of checkpoints (and thus lower disk activity). \n>> Decrease\n>> them if you are short on disk space or your environment has a \n>> significant risk\n>> of unexpected power-outs, as any un-checkpointed transactions will \n>> dropped on\n>> restart.\n>\n> You mentioned getting to there via \n> http://wiki.postgresql.org/wiki/Performance_Optimization\n>\n> You'll note that link is disclaimed with \"older version of material \n> covered in the GUC tour\", and it's aimed at PostgreSQL 7.4. If you \n> read the current version of that document at \n> http://www.pgcon.org/2008/schedule/attachments/44_annotated_gucs_draft1.pdf \n>\n> you'll see that text you've noted is incorrect isn't there anymore.\n>\n> -- \n> * Greg Smith [email protected] http://www.gregsmith.com Baltimore, MD\n\nNot to be overly nick picking where is the version called out that it \napplies to. Stating Older version is vague\n\nIf this new version annotated config file is correct then I can add a \nnew page detailing out the the different annotated config for the \ndifferent versions to the wiki.\n", "msg_date": "Tue, 10 Feb 2009 15:23:05 -0500", "msg_from": "justin <[email protected]>", "msg_from_op": false, "msg_subject": "Re: explanation of some configs" }, { "msg_contents": "On Tue, 10 Feb 2009, justin wrote:\n\n> Not to be overly nick picking where is the version called out that it applies \n> to. Stating Older version is vague\n\nIt's at the bottom of the document. I just updated the \"Performance \nOptimization\" page to reflect that. One of those things I keep meaning to \ndo is tag all of the documents on that page with the associated verison \nnumber that was current when they were written; many of them cover V8.0 or \nbefore. It's not trivial to figure that out in all cases, for some you \nneed to know a bit about how the configuration parameters changed in order \nto guess which version the advice applied to.\n\n--\n* Greg Smith [email protected] http://www.gregsmith.com Baltimore, MD\n", "msg_date": "Tue, 10 Feb 2009 15:37:43 -0500 (EST)", "msg_from": "Greg Smith <[email protected]>", "msg_from_op": false, "msg_subject": "Re: explanation of some configs" }, { "msg_contents": "Greg Smith wrote:\n> On Tue, 10 Feb 2009, justin wrote:\n>\n>> Not to be overly nick picking where is the version called out that it \n>> applies to. Stating Older version is vague\n>\n> It's at the bottom of the document. I just updated the \"Performance \n> Optimization\" page to reflect that. One of those things I keep \n> meaning to do is tag all of the documents on that page with the \n> associated verison number that was current when they were written; \n> many of them cover V8.0 or before. It's not trivial to figure that \n> out in all cases, for some you need to know a bit about how the \n> configuration parameters changed in order to guess which version the \n> advice applied to.\nI was thinking something like this\nhttp://wiki.postgresql.org/wiki/Annotated_config\n\nSo in the future all we have to do is add new entries to this page once \nannotated config file is updated with the correct advice. \n", "msg_date": "Tue, 10 Feb 2009 16:01:12 -0500", "msg_from": "justin <[email protected]>", "msg_from_op": false, "msg_subject": "Re: explanation of some configs" } ]
[ { "msg_contents": "Hi:\n\nOur queries are extremely slow only after db server reboot, not after\nrestart postgres db only. The difference is about 10 mins vs. 8 secs. Not\nacceptable. I have browsed around , set the postgres db parameters as\nsuggested. Still the same.\n\nAny suggestion on how to tackle the problem?\n\nThanks\nwei\n\nHi:Our queries are extremely slow only after db server reboot, not after restart postgres db only. The difference is about 10 mins vs. 8 secs. Not acceptable. I have browsed around , set the postgres db parameters as suggested. Still the same. \nAny suggestion on how to tackle the problem?Thankswei", "msg_date": "Mon, 9 Feb 2009 17:21:31 -0800", "msg_from": "Wei Yan <[email protected]>", "msg_from_op": true, "msg_subject": "query slow only after reboot" }, { "msg_contents": "Wei Yan wrote:\n> Hi:\n>\n> Our queries are extremely slow only after db server reboot, not after \n> restart postgres db only. The difference is about 10 mins vs. 8 secs. \n> Not acceptable. I have browsed around , set the postgres db parameters \n> as suggested. Still the same.\n>\n> Any suggestion on how to tackle the problem?\n>\nWhat OS, amount of RAM, DB size...?\n\nIf the queries get progressively faster as you use the system then slow \nagain after a reboot, my initial guess would be that you are getting \nmore and more disk-cache hits the longer you use the system.\n\nCheers,\nSteve\n\n", "msg_date": "Mon, 09 Feb 2009 17:35:47 -0800", "msg_from": "Steve Crawford <[email protected]>", "msg_from_op": false, "msg_subject": "Re: query slow only after reboot" }, { "msg_contents": "You can try using the pg_stat_io table and related stat tables to figure out which ones are responsible for all this I/O on startup.\nThen, for the big offenders issue a select count(*) on those right at the start, to force the OS to read the pages into memory. This won't be effective if these are too much larger than RAM. Also, running queries that do large index scans on tables/indexes heavily used can help.\nIts a brute force preload, but my guess is your woes are caused by random I/O on tables and indexes that you can attempt to pre-cache by forcing sequential access on some of that data. There are many other ways to read heavily accessed tables or indexes into OS memory on a freshly restarted system - even just reading the raw files with something like grep.\n\n\n\n\nOn 2/9/09 5:21 PM, \"Wei Yan\" <[email protected]> wrote:\n\nHi:\n\nOur queries are extremely slow only after db server reboot, not after restart postgres db only. The difference is about 10 mins vs. 8 secs. Not acceptable. I have browsed around , set the postgres db parameters as suggested. Still the same.\n\nAny suggestion on how to tackle the problem?\n\nThanks\nwei\n\n\n\n\n\nRe: [PERFORM] query slow only after reboot\n\n\nYou can try using the pg_stat_io table and related stat tables to figure out which ones are responsible for all this I/O on startup.\nThen, for the big offenders issue a select count(*) on those right at the start, to force the OS to read the pages into memory.  This won’t be effective if these are too much larger than RAM.  Also, running queries that do large index scans on tables/indexes heavily used can help. \nIts a brute force preload, but my guess is your woes are caused by random I/O on tables and indexes that you can attempt to pre-cache by forcing sequential access on some of that data.  There are many other ways to read heavily accessed tables or indexes into OS memory on a freshly restarted system — even just reading the raw files with something like grep.\n\n\n\n\nOn 2/9/09 5:21 PM, \"Wei Yan\" <[email protected]> wrote:\n\nHi:\n\nOur queries are extremely slow only after db server reboot, not after restart postgres db only. The difference is about 10 mins vs. 8 secs. Not acceptable. I have browsed around , set the postgres db parameters as suggested. Still the same. \n\nAny suggestion on how to tackle the problem?\n\nThanks\nwei", "msg_date": "Mon, 9 Feb 2009 18:20:56 -0800", "msg_from": "Scott Carey <[email protected]>", "msg_from_op": false, "msg_subject": "Re: query slow only after reboot" }, { "msg_contents": ">>> Wei Yan <[email protected]> wrote: \n> Our queries are extremely slow only after db server reboot\n \n> Any suggestion on how to tackle the problem?\n \nOthers have addressed how you can try to prime your cache after a\nreboot, so I'll mention the 800 pound gorilla sitting in the corner. \nI suggest you don't reboot as often, and when you have to do so,\nschedule it off-hours, when there is time for the cache to get\npopulated with minimal pain.\n \nWe basically don't reboot our database servers except when we upgrade\nthe kernel. Even when we were running Windows as the OS on some\nservers we were able to tune it so that reboots could be relatively\ninfrequent by aggressively disabling services and daemons down to the\nbare minimum.\n \n-Kevin\n", "msg_date": "Tue, 10 Feb 2009 09:00:54 -0600", "msg_from": "\"Kevin Grittner\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: query slow only after reboot" } ]
[ { "msg_contents": "Hi ,\n\nI have a query in which two huge tables (A,B) are joined using an indexed\ncolumn and a search is made on tsvector on some column on B. Very limited\nrows of B are expected to match the query on tsvector column.\n\nWith default planner settings the query takes too long ( > 100 secs) , but\nwith hashjoin off it returns almost immediately. The question is , is\nit is advisable to\ntweak planner settings for specific queries in application ?\n\n\nThe plans are as follows.\n\n1. With default settings\n\nexplain select lead_id from general.trade_leads join\ngeneral.profile_master as pm using(profile_id) where status ='m' and\nco_name_vec @@ to_tsquery('plastic&tubes') limit 20;\n QUERY PLAN\n---------------------------------------------------------------------------------------------------------------\n Limit (cost=4109.11..11127.78 rows=20 width=4)\n -> Hash Join (cost=4109.11..90789.72 rows=247 width=4)\n Hash Cond: (trade_leads.profile_id = pm.profile_id)\n -> Seq Scan on trade_leads (cost=0.00..85752.52 rows=246832 width=8)\n Filter: ((status)::text = 'm'::text)\n -> Hash (cost=4095.68..4095.68 rows=1074 width=4)\n -> Bitmap Heap Scan on profile_master pm\n(cost=40.89..4095.68 rows=1074 width=4)\n Filter: (co_name_vec @@ '''plastic'' & ''tube'''::tsquery)\n -> Bitmap Index Scan on\nprofile_master_co_name_vec (cost=0.00..40.62 rows=1074 width=0)\n Index Cond: (co_name_vec @@ '''plastic'' &\n''tube'''::tsquery)\n(10 rows)\n\n\n\n2. with SET enable_hashjoin TO off;\n\nexplain analyze select lead_id from general.trade_leads join\ngeneral.profile_master as pm using(profile_id) where status ='m' and\nco_name_vec @@ to_tsquery('plastic&tubes') limit 20;\n\n QUERY PLAN\n---------------------------------------------------------------------------------------------------------------------------------------------------------------------\n Limit (cost=3.42..13080.44 rows=20 width=4) (actual\ntime=1530.039..1530.039 rows=0 loops=1)\n -> Nested Loop (cost=3.42..161504.56 rows=247 width=4) (actual\ntime=1530.037..1530.037 rows=0 loops=1)\n -> Index Scan using profile_master_co_name_vec on\nprofile_master pm (cost=0.00..4335.36 rows=1074 width=4) (actual\ntime=220.821..1014.501 rows=7 loops=1)\n Index Cond: (co_name_vec @@ '''plastic'' & ''tube'''::tsquery)\n Filter: (co_name_vec @@ '''plastic'' & ''tube'''::tsquery)\n -> Bitmap Heap Scan on trade_leads (cost=3.42..145.75\nrows=47 width=8) (actual time=73.640..73.640 rows=0 loops=7)\n Recheck Cond: (trade_leads.profile_id = pm.profile_id)\n Filter: ((status)::text = 'm'::text)\n -> Bitmap Index Scan on trade_leads_profile_id\n(cost=0.00..3.41 rows=47 width=0) (actual time=73.579..73.579 rows=0\nloops=7)\n Index Cond: (trade_leads.profile_id = pm.profile_id)\n Total runtime: 1530.137 ms\n\n\n\nregds\nmallah.\n", "msg_date": "Tue, 10 Feb 2009 16:01:21 +0530", "msg_from": "Rajesh Kumar Mallah <[email protected]>", "msg_from_op": true, "msg_subject": "query becomes fas on 'SET enable_hashjoin TO off;'" }, { "msg_contents": "On Tue, Feb 10, 2009 at 5:31 AM, Rajesh Kumar Mallah\n<[email protected]> wrote:\n> I have a query in which two huge tables (A,B) are joined using an indexed\n> column and a search is made on tsvector on some column on B. Very limited\n> rows of B are expected to match the query on tsvector column.\n>\n> With default planner settings the query takes too long ( > 100 secs) , but\n> with hashjoin off it returns almost immediately. The question is , is\n> it is advisable to\n> tweak planner settings for specific queries in application ?\n\nThe ones that start with \"enable_\" usually shouldn't be changed.\nThey're mostly for debugging and finding problems.\n\n> The plans are as follows.\n\nIt's a little hard to figure out what's gone wrong here because you've\nonly included EXPLAIN ANALYZE output for one of the plans - the other\nis just regular EXPLAIN. Can you send that, along with the output of\nthe following query:\n\nSELECT SUM(1) FROM trade_leads WHERE status = 'm'\n\nI'm guessing that the problem is that the selectivity estimate for\nco_name_vec @@ to_tsquery('plastic&tubes') is not very good, but I'm\nnot real familiar with full text search, so I'm not sure whether\nthere's anything sensible you can do about it.\n\n...Robert\n", "msg_date": "Tue, 10 Feb 2009 08:06:30 -0500", "msg_from": "Robert Haas <[email protected]>", "msg_from_op": false, "msg_subject": "Re: query becomes fas on 'SET enable_hashjoin TO off;'" }, { "msg_contents": "Dear Robert,\nthanks for ur interest. Our server was too loaded\nwhat i posted my last observation, now the\nother explain analyze can also be run and i am posting\nboth the result , as you can see latter is 55ms versus\n3000 ms .\n\n explain analyze select lead_id from general.trade_leads join\ngeneral.profile_master as pm using(profile_id) where status ='m' and\nco_name_vec @@ to_tsquery('plastic&tubes') limit 20;\n\n QUERY PLAN\n-----------------------------------------------------------------------------------------------------------------------------------------------------------\n Limit (cost=4109.11..11127.78 rows=20 width=4) (actual\ntime=3076.059..3076.059 rows=0 loops=1)\n -> Hash Join (cost=4109.11..90789.72 rows=247 width=4) (actual\ntime=3076.057..3076.057 rows=0 loops=1)\n Hash Cond: (trade_leads.profile_id = pm.profile_id)\n -> Seq Scan on trade_leads (cost=0.00..85752.52 rows=246832\nwidth=8) (actual time=0.020..2972.446 rows=127371 loops=1)\n Filter: ((status)::text = 'm'::text)\n -> Hash (cost=4095.68..4095.68 rows=1074 width=4) (actual\ntime=42.368..42.368 rows=7 loops=1)\n -> Bitmap Heap Scan on profile_master pm\n(cost=40.89..4095.68 rows=1074 width=4) (actual time=42.287..42.360\nrows=7 loops=1)\n Filter: (co_name_vec @@ '''plastic'' & ''tube'''::tsquery)\n -> Bitmap Index Scan on\nprofile_master_co_name_vec (cost=0.00..40.62 rows=1074 width=0)\n(actual time=42.252..42.252 rows=7 loops=1)\n Index Cond: (co_name_vec @@ '''plastic'' &\n''tube'''::tsquery)\n Total runtime: 3076.121 ms\n(11 rows)\n\ntradein_clients=> SET enable_hashjoin TO off;\nSET\ntradein_clients=> explain analyze select lead_id from\ngeneral.trade_leads join general.profile_master as pm\nusing(profile_id) where status ='m' and co_name_vec @@\nto_tsquery('plastic&tubes') limit 20;\n\n QUERY PLAN\n------------------------------------------------------------------------------------------------------------------------------------------------------------------\n Limit (cost=3.42..13080.44 rows=20 width=4) (actual\ntime=55.233..55.233 rows=0 loops=1)\n -> Nested Loop (cost=3.42..161504.56 rows=247 width=4) (actual\ntime=55.232..55.232 rows=0 loops=1)\n -> Index Scan using profile_master_co_name_vec on\nprofile_master pm (cost=0.00..4335.36 rows=1074 width=4) (actual\ntime=16.578..46.175 rows=7 loops=1)\n Index Cond: (co_name_vec @@ '''plastic'' & ''tube'''::tsquery)\n Filter: (co_name_vec @@ '''plastic'' & ''tube'''::tsquery)\n -> Bitmap Heap Scan on trade_leads (cost=3.42..145.75\nrows=47 width=8) (actual time=1.287..1.287 rows=0 loops=7)\n Recheck Cond: (trade_leads.profile_id = pm.profile_id)\n Filter: ((status)::text = 'm'::text)\n -> Bitmap Index Scan on trade_leads_profile_id\n(cost=0.00..3.41 rows=47 width=0) (actual time=1.285..1.285 rows=0\nloops=7)\n Index Cond: (trade_leads.profile_id = pm.profile_id)\n Total runtime: 55.333 ms\n(11 rows)\n\n SELECT SUM(1) FROM general.trade_leads WHERE status = 'm';\n sum\n--------\n 127371\n\nthis constitutes 90% of the total rows.\n\nregds\nmallah.\n\n\nOn Tue, Feb 10, 2009 at 6:36 PM, Robert Haas <[email protected]> wrote:\n> On Tue, Feb 10, 2009 at 5:31 AM, Rajesh Kumar Mallah\n> <[email protected]> wrote:\n>> I have a query in which two huge tables (A,B) are joined using an indexed\n>> column and a search is made on tsvector on some column on B. Very limited\n>> rows of B are expected to match the query on tsvector column.\n>>\n>> With default planner settings the query takes too long ( > 100 secs) , but\n>> with hashjoin off it returns almost immediately. The question is , is\n>> it is advisable to\n>> tweak planner settings for specific queries in application ?\n>\n> The ones that start with \"enable_\" usually shouldn't be changed.\n> They're mostly for debugging and finding problems.\n>\n>> The plans are as follows.\n>\n> It's a little hard to figure out what's gone wrong here because you've\n> only included EXPLAIN ANALYZE output for one of the plans - the other\n> is just regular EXPLAIN. Can you send that, along with the output of\n> the following query:\n>\n> SELECT SUM(1) FROM trade_leads WHERE status = 'm'\n>\n> I'm guessing that the problem is that the selectivity estimate for\n> co_name_vec @@ to_tsquery('plastic&tubes') is not very good, but I'm\n> not real familiar with full text search, so I'm not sure whether\n> there's anything sensible you can do about it.\n>\n> ...Robert\n>\n", "msg_date": "Tue, 10 Feb 2009 19:59:42 +0530", "msg_from": "Rajesh Kumar Mallah <[email protected]>", "msg_from_op": true, "msg_subject": "Re: query becomes fas on 'SET enable_hashjoin TO off;'" }, { "msg_contents": "Rajesh Kumar Mallah <[email protected]> writes:\n> On Tue, Feb 10, 2009 at 6:36 PM, Robert Haas <[email protected]> wrote:\n>> I'm guessing that the problem is that the selectivity estimate for\n>> co_name_vec @@ to_tsquery('plastic&tubes') is not very good, but I'm\n>> not real familiar with full text search, so I'm not sure whether\n>> there's anything sensible you can do about it.\n\nYeah, the bad selectivity estimate seems to be the entire problem ---\nif that were even slightly closer to reality the planner would've\npreferred the nestloop.\n\nI don't think there's a good solution to this in 8.3, because its\nestimator for @@ is just a stub. There will be a non-toy estimator\nin 8.4, fwiw.\n\nA possibility that seems a bit less crude than turning off hashjoins\nis to reduce random_page_cost, so as to bias things toward nestloop\nindexscans in general.\n\n\t\t\tregards, tom lane\n", "msg_date": "Tue, 10 Feb 2009 10:39:18 -0500", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: query becomes fas on 'SET enable_hashjoin TO off;' " }, { "msg_contents": "On Tue, Feb 10, 2009 at 9:09 PM, Tom Lane <[email protected]> wrote:\n> Rajesh Kumar Mallah <[email protected]> writes:\n>> On Tue, Feb 10, 2009 at 6:36 PM, Robert Haas <[email protected]> wrote:\n>>> I'm guessing that the problem is that the selectivity estimate for\n>>> co_name_vec @@ to_tsquery('plastic&tubes') is not very good, but I'm\n>>> not real familiar with full text search, so I'm not sure whether\n>>> there's anything sensible you can do about it.\n>\n> Yeah, the bad selectivity estimate seems to be the entire problem ---\n> if that were even slightly closer to reality the planner would've\n> preferred the nestloop.\n>\n> I don't think there's a good solution to this in 8.3,\n\nthis is 8.2 server at the moment.\n\n>because its\n> estimator for @@ is just a stub. There will be a non-toy estimator\n> in 8.4, fwiw.\n>\n> A possibility that seems a bit less crude than turning off hashjoins\n> is to reduce random_page_cost, so as to bias things toward nestloop\n> indexscans in general.\nreducing random_page_cost from 4 (default) to 3 does switch the plan\nin favour of nested loop thanks for the suggestion.\n\nSET random_page_cost TO 4;\nSET\ntradein_clients=> explain select lead_id from general.trade_leads\njoin general.profile_master as pm using(profile_id) where status ='m'\nand co_name_vec @@ to_tsquery('plastic&tubes') limit 20;\n QUERY PLAN\n---------------------------------------------------------------------------------------------------------------\n Limit (cost=4109.11..11127.78 rows=20 width=4)\n -> Hash Join (cost=4109.11..90789.72 rows=247 width=4)\n Hash Cond: (trade_leads.profile_id = pm.profile_id)\n -> Seq Scan on trade_leads (cost=0.00..85752.52 rows=246832 width=8)\n Filter: ((status)::text = 'm'::text)\n -> Hash (cost=4095.68..4095.68 rows=1074 width=4)\n -> Bitmap Heap Scan on profile_master pm\n(cost=40.89..4095.68 rows=1074 width=4)\n Filter: (co_name_vec @@ '''plastic'' & ''tube'''::tsquery)\n -> Bitmap Index Scan on\nprofile_master_co_name_vec (cost=0.00..40.62 rows=1074 width=0)\n Index Cond: (co_name_vec @@ '''plastic'' &\n''tube'''::tsquery)\n(10 rows)\n\ntradein_clients=> SET random_page_cost TO 3;\nSET\ntradein_clients=> explain select lead_id from general.trade_leads\njoin general.profile_master as pm using(profile_id) where status ='m'\nand co_name_vec @@ to_tsquery('plastic&tubes') limit 20;\n QUERY PLAN\n----------------------------------------------------------------------------------------------------------------------\n Limit (cost=0.00..9944.78 rows=20 width=4)\n -> Nested Loop (cost=0.00..122818.07 rows=247 width=4)\n -> Index Scan using profile_master_co_name_vec on\nprofile_master pm (cost=0.00..3256.28 rows=1074 width=4)\n Index Cond: (co_name_vec @@ '''plastic'' & ''tube'''::tsquery)\n Filter: (co_name_vec @@ '''plastic'' & ''tube'''::tsquery)\n -> Index Scan using trade_leads_profile_id on trade_leads\n(cost=0.00..110.76 rows=45 width=8)\n Index Cond: (trade_leads.profile_id = pm.profile_id)\n Filter: ((status)::text = 'm'::text)\n(8 rows)\n\n\n\n>\n> regards, tom lane\n>\n", "msg_date": "Tue, 10 Feb 2009 21:22:39 +0530", "msg_from": "Rajesh Kumar Mallah <[email protected]>", "msg_from_op": true, "msg_subject": "Re: query becomes fas on 'SET enable_hashjoin TO off;'" } ]
[ { "msg_contents": "Hi All,\n\nI am getting following error while conducting DBT2 tests on PostgreSQL.There\nis no error on driver side as such. After completion of test it is giving\nproblem in generating reports scripts:\nPlease find the error below:\n\nwaiting for server to shut down.... done\nserver stopped\nserver starting\nDBT-2 test for pgsql started...\n\nDATABASE SYSTEM: localhost\nDATABASE NAME: dbt2\nDATABASE CONNECTIONS: 1\nTERMINAL THREADS: 10\nTERMINALS PER WAREHOUSE: 10\nWAREHOUSES PER THREAD/CLIENT PAIR: 500\nSCALE FACTOR (WAREHOUSES): 1\nDURATION OF TEST (in sec): 60\n1 client stared every 1000 millisecond(s)\n\nStage 1. Starting up client...\nSleeping 501 seconds\ncollecting database statistics...\n\nStage 2. Starting up driver...\n1000 threads started per millisecond\nestimated rampup time: Sleeping 5010 seconds\nestimated rampup time has elapsed\nestimated steady state time: Sleeping 60 seconds\n\nStage 3. Processing of results...\nKilling client...\n./dbt2-run-workload: line 514: 23758 Terminated dbt2-client\n${CLIENT_COMMAND_ARGS} -p ${PORT} -o ${CDIR}\n>${CLIENT_OUTPUT_DIR}/`hostname`/client-${SEG}.out 2>&1\nwaiting for server to shut down.... done\nserver stopped\n*Can't use an undefined value as an ARRAY reference at\n/usr/lib/perl5/site_perl/5.8.8/Test/Parser/Dbt2.pm line 521.*\n\nCan someone please give inputs to resolve this issue? Any help on this will\nbe appreciated.\n\nThanks in advance.\n\nThanks,\nRohan\n\nHi All, I am getting following error while conducting DBT2 tests on PostgreSQL.There is no error on driver side as such. After completion of test it is giving problem in generating reports scripts:Please find the error below:\nwaiting for server to shut down.... doneserver stoppedserver startingDBT-2 test for pgsql started...DATABASE SYSTEM: localhostDATABASE NAME: dbt2DATABASE CONNECTIONS: 1TERMINAL THREADS: 10\nTERMINALS PER WAREHOUSE: 10WAREHOUSES PER THREAD/CLIENT PAIR: 500SCALE FACTOR (WAREHOUSES): 1DURATION OF TEST (in sec): 601 client stared every 1000 millisecond(s)Stage 1. Starting up client...\nSleeping 501 secondscollecting database statistics...Stage 2. Starting up driver...1000 threads started per millisecondestimated rampup time: Sleeping 5010 secondsestimated rampup time has elapsed\nestimated steady state time: Sleeping 60 secondsStage 3. Processing of results...Killing client..../dbt2-run-workload: line 514: 23758 Terminated              dbt2-client ${CLIENT_COMMAND_ARGS} -p ${PORT} -o ${CDIR} >${CLIENT_OUTPUT_DIR}/`hostname`/client-${SEG}.out 2>&1\nwaiting for server to shut down.... doneserver stoppedCan't use an undefined value as an ARRAY reference at /usr/lib/perl5/site_perl/5.8.8/Test/Parser/Dbt2.pm line 521.Can someone please give inputs to resolve this issue? Any help on this will be appreciated.\nThanks in advance.Thanks,Rohan", "msg_date": "Tue, 10 Feb 2009 18:16:19 +0530", "msg_from": "Rohan Pethkar <[email protected]>", "msg_from_op": true, "msg_subject": "ERROR: Can't use an undefined value as an ARRAY reference at\n\t/usr/lib/perl5/site_perl/5.8.8/Test/Parser/Dbt2.pm line 521." }, { "msg_contents": "> Can't use an undefined value as an ARRAY reference at\n> /usr/lib/perl5/site_perl/5.8.8/Test/Parser/Dbt2.pm line 521.\n>\n> Can someone please give inputs to resolve this issue? Any help on this will\n> be appreciated.\n\n519 sub transactions {\n520 my $self = shift;\n521 return @{$self->{data}->{transactions}->{transaction}};\n522 }\n\nthe stuff in $self->{data}->{transactions}->{transaction} is not defined\nso it cannot be dereferenced. If you want to just escape this error you\nmay modify the code as:\n\nif ($self->{data}->{transactions}->{transaction}) {\nreturn @{$self->{data}->{transactions}->{transaction}};\n} else {\n return ();\n}\n\nhowever understanding the root cause is recommended.\n\n>\n> Thanks in advance.\n>\n> Thanks,\n> Rohan\n>\n", "msg_date": "Tue, 10 Feb 2009 20:06:50 +0530", "msg_from": "Rajesh Kumar Mallah <[email protected]>", "msg_from_op": false, "msg_subject": "Re: ERROR: Can't use an undefined value as an ARRAY\n\treference at /usr/lib/perl5/site_perl/5.8.8/Test/Parser/Dbt2.pm line\n\t521." } ]
[ { "msg_contents": "\nI've got an application that pulls stock information from the web and \nperforms a bunch of calculations on them. There is a \"stock\" table \nand then for convenience I've got a view called \"stock_calculations\". \nThis view is defined as a stock id column coupled with 30-some \ncalculated columns. Some of these columns are computationally intense \nand are delegated to functions.\n\nIf I ever \"select * from stock_calculations\", the query is \nunderstandable pretty slow, since it has to do every one of the \ncalculations for each stock. However, in 8.1, if I only selected a \ncouple of specific columns from the view, it ran very quickly. I \nassumed that the database did not bother calculating all the other \ncolumns it didn't need.\n\nUnfortunately, I've recently upgraded to 8.3 and found that selecting \na single column from this view has now become as slow as selecting all \nof them.\n\nI'm wondering if there is any thing that I can do about this short of \ndowngrading to 8.1? Are there any configuration parameters that \neffect this behavior?\n\nThanks,\n\nMark\n\n", "msg_date": "Tue, 10 Feb 2009 17:20:36 -0500", "msg_from": "Mark Roghelia <[email protected]>", "msg_from_op": true, "msg_subject": "View performance degraded between 8.1 and 8.3" } ]
[ { "msg_contents": "Mark Roghelia <[email protected]> writes:\n> Unfortunately, I've recently upgraded to 8.3 and found that selecting \n> a single column from this view has now become as slow as selecting all \n> of them.\n\nWithout specifics there's really no way to answer this.\n\n\t\t\tregards, tom lane\n", "msg_date": "Tue, 10 Feb 2009 19:37:16 -0500", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": true, "msg_subject": "Re: View performance degraded between 8.1 and 8.3 " } ]
[ { "msg_contents": "Dear friends,\n\nI have explain analyze of two queries\n\nexplain analyze SELECT count(*) from general.rfis where 1=1 and\ninquiry_type = 'BUY' and receiver_uid=1320721;\n(7 ms)\nhttp://pastebin.com/m5297e03c\n\nexplain analyze SELECT count(*) from general.rfis where 1=1 and\ninquiry_type = 'BUY' and receiver_uid=1320721 generated_date >=\n2251 and ;\n(80 secs)\nhttp://pastebin.com/d1e4bdea7\n\n\nThe table general.rfis is partitioned on generated_date and the\ncondition generated_date >= 2251\nwas added with the intention to limit the number of (date based)\npartitions that would be searched\nby the query using the constraint exclusion facility. however as\nevident the query has become very\nslow as a result of this condition (even on primed caches).\n\ncan anyone kindly explain why the result was so counter intuitive ?\n\nIn particular where is most of the (80828.438 ms) spent on the plan\nhttp://pastebin.com/d1e4bdea7 (reference to actual line number is appreciated)\n\n\n\nstructure of a typical partition (abridged)\n\n Table \"rfi_partitions.rfis_part_2009_01\"\n Column | Type |\n Modifiers\n-----------------------+------------------------+---------------------------------------------------------------\n rfi_id | integer | not null default\nnextval('general.rfis_rfi_id_seq'::regclass)\n sender_uid | integer | not null\n receiver_uid | integer | not null\n subject | character varying(100) | not null\n message | text | not null\n inquiry_type | character varying(50) | default\n'BUY'::character varying\n inquiry_source | character varying(30) | not null\n generated_date | integer | not null default\ngeneral.current_date_id()\nIndexes:\n \"rfis_part_2009_01_pkey\" PRIMARY KEY, btree (rfi_id)\n \"rfis_part_2009_01_generated_date\" btree (generated_date)\n \"rfis_part_2009_01_receiver_uid\" btree (receiver_uid) CLUSTER\n \"rfis_part_2009_01_sender_uid\" btree (sender_uid)\nCheck constraints:\n \"rfis_part_2009_01_generated_date_check\" CHECK (generated_date >=\n3289 AND generated_date <= 3319)\n \"rfis_part_2009_01_rfi_id_check\" CHECK (rfi_id >= 12344252 AND\nrfi_id <= 12681399)\nInherits: rfis\n\nregds\nrajesh kumar mallah.\n", "msg_date": "Wed, 11 Feb 2009 16:28:00 +0530", "msg_from": "Rajesh Kumar Mallah <[email protected]>", "msg_from_op": true, "msg_subject": "please help with the explain analyze plan" }, { "msg_contents": "Both queries are using your uid index on each of the partitions not generated_date, it's doing the generated_date with a filter on most of the partitions.\n\nThis is except for on partition part_2006_02 in the second query where it uses your generated date index - and that takes the 80 secs.\n\n -> Index Scan using rfis_part_2006_02_generated_date on rfis_part_2006_02 rfis (cost=0.00..6.45 rows=1 width=0) (actual time=80827.207..80827.207 rows=0 loops=1)\n\nAlso the second query appears to go through a few more partitions than the first, i.e. part_2001_2004 and part_1005\n\n\n--- On Wed, 11/2/09, Rajesh Kumar Mallah <[email protected]> wrote:\n\n> From: Rajesh Kumar Mallah <[email protected]>\n> Subject: [PERFORM] please help with the explain analyze plan\n> To: [email protected]\n> Date: Wednesday, 11 February, 2009, 10:58 AM\n> Dear friends,\n> \n> I have explain analyze of two queries\n> \n> explain analyze SELECT count(*) from general.rfis where \n> 1=1 and\n> inquiry_type = 'BUY' and receiver_uid=1320721;\n> (7 ms)\n> http://pastebin.com/m5297e03c\n> \n> explain analyze SELECT count(*) from general.rfis where \n> 1=1 and\n> inquiry_type = 'BUY' and receiver_uid=1320721\n> generated_date >=\n> 2251 and ;\n> (80 secs)\n> http://pastebin.com/d1e4bdea7\n> \n> \n> The table general.rfis is partitioned on generated_date and\n> the\n> condition generated_date >= 2251\n> was added with the intention to limit the number of (date\n> based)\n> partitions that would be searched\n> by the query using the constraint exclusion facility.\n> however as\n> evident the query has become very\n> slow as a result of this condition (even on primed caches).\n> \n> can anyone kindly explain why the result was so counter\n> intuitive ?\n> \n> In particular where is most of the (80828.438 ms) spent on\n> the plan\n> http://pastebin.com/d1e4bdea7 (reference to actual line\n> number is appreciated)\n> \n> \n> \n> structure of a typical partition (abridged)\n> \n> Table \"rfi_partitions.rfis_part_2009_01\"\n> Column | Type |\n> Modifiers\n> -----------------------+------------------------+---------------------------------------------------------------\n> rfi_id | integer | not null\n> default\n> nextval('general.rfis_rfi_id_seq'::regclass)\n> sender_uid | integer | not null\n> receiver_uid | integer | not null\n> subject | character varying(100) | not null\n> message | text | not null\n> inquiry_type | character varying(50) | default\n> 'BUY'::character varying\n> inquiry_source | character varying(30) | not null\n> generated_date | integer | not null\n> default\n> general.current_date_id()\n> Indexes:\n> \"rfis_part_2009_01_pkey\" PRIMARY KEY, btree\n> (rfi_id)\n> \"rfis_part_2009_01_generated_date\" btree\n> (generated_date)\n> \"rfis_part_2009_01_receiver_uid\" btree\n> (receiver_uid) CLUSTER\n> \"rfis_part_2009_01_sender_uid\" btree\n> (sender_uid)\n> Check constraints:\n> \"rfis_part_2009_01_generated_date_check\"\n> CHECK (generated_date >=\n> 3289 AND generated_date <= 3319)\n> \"rfis_part_2009_01_rfi_id_check\" CHECK\n> (rfi_id >= 12344252 AND\n> rfi_id <= 12681399)\n> Inherits: rfis\n> \n> regds\n> rajesh kumar mallah.\n> \n> -- \n> Sent via pgsql-performance mailing list\n> ([email protected])\n> To make changes to your subscription:\n> http://www.postgresql.org/mailpref/pgsql-performance\n\n\n \n", "msg_date": "Wed, 11 Feb 2009 11:55:54 +0000 (GMT)", "msg_from": "Glyn Astill <[email protected]>", "msg_from_op": false, "msg_subject": "Re: please help with the explain analyze plan" }, { "msg_contents": "thanks for the hint,\n\nnow the peak hour is over and the same scan is taking 71 ms in place of 80000 ms\nand the total query time is also acceptable. But it is surprising that\nthe scan was\ntaking so long consistently at that point of time. I shall test again\nunder similar\ncircumstance tomorrow.\n\nIs it possible to enable block level statistics from the psql prompt\nfor a particular query\nand see the results on the psql prompt ?\n\nexplain analyze SELECT count(*) from\nrfi_partitions.rfis_part_2006_02 where generated_date >= 2251 and\nreceiver_uid=1320721 ;\n\n QUERY PLAN\n------------------------------------------------------------------------------------------------------------------------------------------------------------\n Aggregate (cost=6.44..6.45 rows=1 width=0) (actual\ntime=71.513..71.513 rows=1 loops=1)\n -> Index Scan using rfis_part_2006_02_generated_date on\nrfis_part_2006_02 (cost=0.00..6.43 rows=1 width=0) (actual\ntime=71.508..71.508 rows=0 loops=1)\n Index Cond: (generated_date >= 2251)\n Filter: (receiver_uid = 1320721)\n Total runtime: 71.553 ms\n(5 rows)\n", "msg_date": "Wed, 11 Feb 2009 18:07:55 +0530", "msg_from": "Rajesh Kumar Mallah <[email protected]>", "msg_from_op": true, "msg_subject": "Re: please help with the explain analyze plan" }, { "msg_contents": "hurray!\nANALYZING changed the plan\n\nI was not expecting the plan to change because\nthe partition of 2006_02 is supposed to be\ndormant. maybe the partition was never analyzed.\n\nBut still question remains, why the time taken was\nin orders of magnitude higher in loaded condition.\n\n\n\ntradein_clients=> explain SELECT count(*) from\nrfi_partitions.rfis_part_2006_02 where generated_date >= 2251 and\nreceiver_uid=1320721 ;\n QUERY PLAN\n----------------------------------------------------------------------------------------------------------------\n Aggregate (cost=6.44..6.45 rows=1 width=0)\n -> Index Scan using rfis_part_2006_02_generated_date on\nrfis_part_2006_02 (cost=0.00..6.43 rows=1 width=0)\n Index Cond: (generated_date >= 2251)\n Filter: (receiver_uid = 1320721)\n(4 rows)\ntradein_clients=> ANALYZE rfi_partitions.rfis_part_2006_02;\nANALYZE\ntradein_clients=> explain SELECT count(*) from\nrfi_partitions.rfis_part_2006_02 where generated_date >= 2251 and\nreceiver_uid=1320721 ;\n QUERY PLAN\n--------------------------------------------------------------------------------------------------------------\n Aggregate (cost=8.78..8.79 rows=1 width=0)\n -> Index Scan using rfis_part_2006_02_receiver_uid on\nrfis_part_2006_02 (cost=0.00..8.77 rows=1 width=0)\n Index Cond: (receiver_uid = 1320721)\n Filter: (generated_date >= 2251)\n(4 rows)\n\ntradein_clients=> explain analyze SELECT count(*) from\nrfi_partitions.rfis_part_2006_02 where generated_date >= 2251 and\nreceiver_uid=1320721 ;\n\nQUERY PLAN\n--------------------------------------------------------------------------------------------------------------------------------------------------------\n Aggregate (cost=8.78..8.79 rows=1 width=0) (actual time=0.045..0.045\nrows=1 loops=1)\n -> Index Scan using rfis_part_2006_02_receiver_uid on\nrfis_part_2006_02 (cost=0.00..8.77 rows=1 width=0) (actual\ntime=0.042..0.042 rows=0 loops=1)\n Index Cond: (receiver_uid = 1320721)\n Filter: (generated_date >= 2251)\n Total runtime: 0.082 ms\n(5 rows)\n\ntradein_clients=>\n\nOn Wed, Feb 11, 2009 at 6:07 PM, Rajesh Kumar Mallah\n<[email protected]> wrote:\n> thanks for the hint,\n>\n> now the peak hour is over and the same scan is taking 71 ms in place of 80000 ms\n> and the total query time is also acceptable. But it is surprising that\n> the scan was\n> taking so long consistently at that point of time. I shall test again\n> under similar\n> circumstance tomorrow.\n>\n> Is it possible to enable block level statistics from the psql prompt\n> for a particular query\n> and see the results on the psql prompt ?\n>\n> explain analyze SELECT count(*) from\n> rfi_partitions.rfis_part_2006_02 where generated_date >= 2251 and\n> receiver_uid=1320721 ;\n>\n> QUERY PLAN\n> ------------------------------------------------------------------------------------------------------------------------------------------------------------\n> Aggregate (cost=6.44..6.45 rows=1 width=0) (actual\n> time=71.513..71.513 rows=1 loops=1)\n> -> Index Scan using rfis_part_2006_02_generated_date on\n> rfis_part_2006_02 (cost=0.00..6.43 rows=1 width=0) (actual\n> time=71.508..71.508 rows=0 loops=1)\n> Index Cond: (generated_date >= 2251)\n> Filter: (receiver_uid = 1320721)\n> Total runtime: 71.553 ms\n> (5 rows)\n>\n", "msg_date": "Wed, 11 Feb 2009 18:23:32 +0530", "msg_from": "Rajesh Kumar Mallah <[email protected]>", "msg_from_op": true, "msg_subject": "Re: please help with the explain analyze plan" } ]
[ { "msg_contents": "Hi,\n\nIs it possible to configure autovacuum to run only\nduring certain hours ? We are forced to keep\nit off because it pops up during the peak\nquery hours.\n\nRegds\nrajesh kumar mallah.\n", "msg_date": "Wed, 11 Feb 2009 18:25:51 +0530", "msg_from": "Rajesh Kumar Mallah <[email protected]>", "msg_from_op": true, "msg_subject": "scheduling autovacuum at lean hours only." }, { "msg_contents": "Rajesh Kumar Mallah <mallah.rajesh 'at' gmail.com> writes:\n\n> Hi,\n>\n> Is it possible to configure autovacuum to run only\n> during certain hours ? We are forced to keep\n> it off because it pops up during the peak\n> query hours.\n\nYou'd rather configure the delaying process to not alter too much\nperformance. Autovacuum is really not targeted at running once a\nday - I think it is partly because the old vacuuming was too weak\n(because too seldom in many cases) that autovaccum was added.\n\nA delaying configuration that works nicely for us without\nimpacting performance much (tested at the time of 8.2 to cause a\n+40% response time during autovacuuming, compared to +300% with\nmore default values):\n\nvacuum_cost_delay = 150\nvacuum_cost_page_hit = 1\nvacuum_cost_page_miss = 10\nvacuum_cost_page_dirty = 20\nvacuum_cost_limit = 1000\nautovacuum_vacuum_cost_delay = 300\n\n(Disclaimer: IIRC, Alvaro doesn't like these figures at all)\n\nOf course, these are good for us (bloat is very, very low and\nperformance impact is not experienced in production), not\nnecessarily for you. You should conduct your own tests.\n\nBe sure to also consider http://developer.postgresql.org/~wieck/vacuum_cost/\n\n-- \nGuillaume Cottenceau\n", "msg_date": "Wed, 11 Feb 2009 14:41:52 +0100", "msg_from": "Guillaume Cottenceau <[email protected]>", "msg_from_op": false, "msg_subject": "Re: scheduling autovacuum at lean hours only." }, { "msg_contents": "\n> From: Rajesh Kumar Mallah <[email protected]>\n\n> Is it possible to configure autovacuum to run only\n> during certain hours ? We are forced to keep\n> it off because it pops up during the peak\n> query hours.\n\nAFAIK not directly within the conf.\n\nHowever you could probably set up a shell script to turn it on and off as and when required via cron; just change the setting to off in the conf file and reload.\n\n\n \n", "msg_date": "Wed, 11 Feb 2009 13:54:34 +0000 (GMT)", "msg_from": "Glyn Astill <[email protected]>", "msg_from_op": false, "msg_subject": "Re: scheduling autovacuum at lean hours only." }, { "msg_contents": "On Wed, Feb 11, 2009 at 7:11 PM, Guillaume Cottenceau <[email protected]> wrote:\n> Rajesh Kumar Mallah <mallah.rajesh 'at' gmail.com> writes:\n>\n>> Hi,\n>>\n>> Is it possible to configure autovacuum to run only\n>> during certain hours ? We are forced to keep\n>> it off because it pops up during the peak\n>> query hours.\n>\n> You'd rather configure the delaying process to not alter too much\n> performance. Autovacuum is really not targeted at running once a\n> day - I think it is partly because the old vacuuming was too weak\n> (because too seldom in many cases) that autovaccum was added.\n>\n> A delaying configuration that works nicely for us without\n> impacting performance much (tested at the time of 8.2 to cause a\n> +40% response time during autovacuuming, compared to +300% with\n> more default values):\n>\n> vacuum_cost_delay = 150\n> vacuum_cost_page_hit = 1\n> vacuum_cost_page_miss = 10\n> vacuum_cost_page_dirty = 20\n> vacuum_cost_limit = 1000\n> autovacuum_vacuum_cost_delay = 300\n\nwhy is it not a good idea to give end users control over when they\nwant to run it ?\n\n\n>\n> (Disclaimer: IIRC, Alvaro doesn't like these figures at all)\n>\n> Of course, these are good for us (bloat is very, very low and\n> performance impact is not experienced in production), not\n> necessarily for you. You should conduct your own tests.\n>\n> Be sure to also consider http://developer.postgresql.org/~wieck/vacuum_cost/\n>\n> --\n> Guillaume Cottenceau\n>\n", "msg_date": "Wed, 11 Feb 2009 20:27:45 +0530", "msg_from": "Rajesh Kumar Mallah <[email protected]>", "msg_from_op": true, "msg_subject": "Re: scheduling autovacuum at lean hours only." }, { "msg_contents": "Rajesh Kumar Mallah escribi�:\n\n> why is it not a good idea to give end users control over when they\n> want to run it ?\n\nIt has never been said that we don't want to give the users control.\nIt's a matter of writing the code. If you want to propose a patch to\nadd the feature, feel free.\n", "msg_date": "Wed, 11 Feb 2009 13:21:57 -0300", "msg_from": "Alvaro Herrera <[email protected]>", "msg_from_op": false, "msg_subject": "Re: scheduling autovacuum at lean hours only." }, { "msg_contents": "On Wed, Feb 11, 2009 at 2:57 PM, Rajesh Kumar Mallah\n<[email protected]> wrote:\n\n>> vacuum_cost_delay = 150\n>> vacuum_cost_page_hit = 1\n>> vacuum_cost_page_miss = 10\n>> vacuum_cost_page_dirty = 20\n>> vacuum_cost_limit = 1000\n>> autovacuum_vacuum_cost_delay = 300\n>\n> why is it not a good idea to give end users control over when they\n> want to run it ?\n\nEffectively, you have control over autovacuum via these params.\nYou have to remember, that autovacuum doesn't cost much, and it makes\nplanner know more about data.\nIt's not there to clean up databases, as you might imagine - it is\nthere to update stats, and mark pages as free.\n\nSo make sure you tweak that config fist, because I have a funny\nfeeling that you just think that vacuuming bogs down your machine, and\n_can_ be turned off without any bad consequences, which is simply not\ntrue.\n\n\n-- \nGJ\n", "msg_date": "Wed, 11 Feb 2009 16:33:16 +0000", "msg_from": "=?UTF-8?Q?Grzegorz_Ja=C5=9Bkiewicz?= <[email protected]>", "msg_from_op": false, "msg_subject": "Re: scheduling autovacuum at lean hours only." }, { "msg_contents": "On Feb 11, 2009, at 6:57 AM, Rajesh Kumar Mallah wrote:\n\n> why is it not a good idea to give end users control over when they\n> want to run it ?\n\nThere's nothing stopping you from just turning off autovacuum and \nrunning vacuum manually. The point of autovacuum is to vacuum \"as \nneeded.\"\n\n", "msg_date": "Wed, 11 Feb 2009 08:46:54 -0800", "msg_from": "Ben <[email protected]>", "msg_from_op": false, "msg_subject": "Re: scheduling autovacuum at lean hours only." }, { "msg_contents": "On a large partitioned database, ordinary vacuum is a very very difficult option.\n\nMost of the time on such a system, most tables are dormant with respect to writes and never need to be vacuumed. A 'daily vacuum' is likely to take a full day to run on larger systems. Since ordinary vacuum can't be run on subsets of tables without explicitly naming them one at a time (can't just vacuum a schema, tablespace, or use a regex to match table names), good luck using it effectively if you have a few thousand tables in partitions. You'll have to have application code or a script with knowledge of all the partition names and which are in need of an analyze/vacuum.\n\nAutovacuum is good enough in recent versions to be tuned to have very low impact though. If you have to, rather than stop and start it, just turn the delay or cost settings up and down during different parts of the day. More than likely however, it will be able to keep up with a single set of settings.\nIn particular, rather than making the delay longer, make the appropriate cost larger -- page miss or page dirty affect how much I/O it will do, and page hit will mostly affect how much CPU it uses.\n\nPerhaps a feature request is to have a version of the manual vacuum command that doesn't bother running on tables that autovacuum wouldn't touch due to insufficient data churn. This way, at lean hours one can manually vacuum to help an autovacuum that was tuned for very low impact 'catch up'.\nAlso, if there was some way to make vacuum not stink so badly on tables that were just loaded with pg_load, where it causes huge write activity for tables that clearly have no bloat (I believe this is a hint bits thing?).\n________________________________________\nFrom: [email protected] [[email protected]] On Behalf Of Ben [[email protected]]\nSent: Wednesday, February 11, 2009 8:46 AM\nTo: Rajesh Kumar Mallah\nCc: PostgreSQL Performance\nSubject: Re: [PERFORM] scheduling autovacuum at lean hours only.\n\nOn Feb 11, 2009, at 6:57 AM, Rajesh Kumar Mallah wrote:\n\n> why is it not a good idea to give end users control over when they\n> want to run it ?\n\nThere's nothing stopping you from just turning off autovacuum and\nrunning vacuum manually. The point of autovacuum is to vacuum \"as\nneeded.\"\n\n\n--\nSent via pgsql-performance mailing list ([email protected])\nTo make changes to your subscription:\nhttp://www.postgresql.org/mailpref/pgsql-performance\n", "msg_date": "Wed, 11 Feb 2009 09:23:25 -0800", "msg_from": "Scott Carey <[email protected]>", "msg_from_op": false, "msg_subject": "Re: scheduling autovacuum at lean hours only." }, { "msg_contents": "On Wed, Feb 11, 2009 at 10:03 PM, Grzegorz Jaśkiewicz <[email protected]> wrote:\n> On Wed, Feb 11, 2009 at 2:57 PM, Rajesh Kumar Mallah\n> <[email protected]> wrote:\n>\n>>> vacuum_cost_delay = 150\n>>> vacuum_cost_page_hit = 1\n>>> vacuum_cost_page_miss = 10\n>>> vacuum_cost_page_dirty = 20\n>>> vacuum_cost_limit = 1000\n>>> autovacuum_vacuum_cost_delay = 300\n>>\n>> why is it not a good idea to give end users control over when they\n>> want to run it ?\n>\n> Effectively, you have control over autovacuum via these params.\n> You have to remember, that autovacuum doesn't cost much, and it makes\n> planner know more about data.\n> It's not there to clean up databases, as you might imagine - it is\n> there to update stats, and mark pages as free.\n>\n> So make sure you tweak that config fist, because I have a funny\n> feeling that you just think that vacuuming bogs down your machine, and\n> _can_ be turned off without any bad consequences, which is simply not\n> true.\n\nour usage pattern is such that peak activity (indicated by load average)\nduring day time is 10 times during night hours. Autovacuum just puts\nmore pressure to the system. If less stressing version is used then\nit shall take longer to complete one cycle, which would mean less\nperformance for longer time . Less performance queues up queries\nand encourages people to re submit their queries which again\nadds to bogging up the system.\n\nIn our case i feel the hardware is bit underscaled as compared to\nload thats why i think running in lean hours is best of both worlds\nno performance sacrifices and intelligent vacuuming.\n\nregds\n-- mallah.\n\n\n>\n>\n> --\n> GJ\n>\n", "msg_date": "Wed, 11 Feb 2009 22:57:27 +0530", "msg_from": "Rajesh Kumar Mallah <[email protected]>", "msg_from_op": true, "msg_subject": "Re: scheduling autovacuum at lean hours only." }, { "msg_contents": "On Wed, 11 Feb 2009, Scott Carey wrote:\n\n> On a large partitioned database, ordinary vacuum is a very very difficult option.\n>\n> Most of the time on such a system, most tables are dormant with respect to writes and never need to be vacuumed. A 'daily vacuum' is likely to take a full day to run on larger systems. Since ordinary vacuum can't be run on subsets of tables without explicitly naming them one at a time (can't just vacuum a schema, tablespace, or use a regex to match table names), good luck using it effectively if you have a few thousand tables in partitions. You'll have to have application code or a script with knowledge of all the partition names and which are in need of an analyze/vacuum.\n>\n> Autovacuum is good enough in recent versions to be tuned to have very low impact though. If you have to, rather than stop and start it, just turn the delay or cost settings up and down during different parts of the day. More than likely however, it will be able to keep up with a single set of settings.\n> In particular, rather than making the delay longer, make the appropriate cost larger -- page miss or page dirty affect how much I/O it will do, and page hit will mostly affect how much CPU it uses.\n>\n> Perhaps a feature request is to have a version of the manual vacuum command that doesn't bother running on tables that autovacuum wouldn't touch due to insufficient data churn. This way, at lean hours one can manually vacuum to help an autovacuum that was tuned for very low impact 'catch up'.\n> Also, if there was some way to make vacuum not stink so badly on tables that were just loaded with pg_load, where it causes huge write activity for tables that clearly have no bloat (I believe this is a hint bits thing?).\n\nOh, I agree with everything you say. I'm just pointing out that if you \nreally do want control over when things get vacuumed (e.g. you have a \nmostly-read database 20 hours a day and then 4 hours of heavy churn at \nnight) then you can still do that if you want.\n", "msg_date": "Wed, 11 Feb 2009 09:57:43 -0800 (PST)", "msg_from": "Ben Chobot <[email protected]>", "msg_from_op": false, "msg_subject": "Re: scheduling autovacuum at lean hours only." }, { "msg_contents": "On Wed, 2009-02-11 at 22:57 +0530, Rajesh Kumar Mallah wrote:\n> On Wed, Feb 11, 2009 at 10:03 PM, Grzegorz Jaśkiewicz <[email protected]> wrote:\n> > On Wed, Feb 11, 2009 at 2:57 PM, Rajesh Kumar Mallah\n> > <[email protected]> wrote:\n> >\n> >>> vacuum_cost_delay = 150\n> >>> vacuum_cost_page_hit = 1\n> >>> vacuum_cost_page_miss = 10\n> >>> vacuum_cost_page_dirty = 20\n> >>> vacuum_cost_limit = 1000\n> >>> autovacuum_vacuum_cost_delay = 300\n> >>\n> >> why is it not a good idea to give end users control over when they\n> >> want to run it ?\n> >\n> > Effectively, you have control over autovacuum via these params.\n> > You have to remember, that autovacuum doesn't cost much, and it makes\n> > planner know more about data.\n> > It's not there to clean up databases, as you might imagine - it is\n> > there to update stats, and mark pages as free.\n> >\n> > So make sure you tweak that config fist, because I have a funny\n> > feeling that you just think that vacuuming bogs down your machine, and\n> > _can_ be turned off without any bad consequences, which is simply not\n> > true.\n> \n> our usage pattern is such that peak activity (indicated by load average)\n> during day time is 10 times during night hours. Autovacuum just puts\n> more pressure to the system. If less stressing version is used then\n> it shall take longer to complete one cycle, which would mean less\n> performance for longer time . Less performance queues up queries\n> and encourages people to re submit their queries which again\n> adds to bogging up the system.\n\nThat's not exactly how it works in practise, if tuned properly. It may\ntake longer, but it is less intensive while running.\n\nWe had one system that had spikes happening due to the exact case you\ndescribed - there were noticeably high IO wait times while certain\ntables were being vacuumed. We set the cost delay and the wait times\ndropped to the point where it was non-issue. Vacuums take twice as\nlong, but there is no measurable impact to the performance.\n\n> In our case i feel the hardware is bit underscaled as compared to\n> load thats why i think running in lean hours is best of both worlds\n> no performance sacrifices and intelligent vacuuming.\n\nThat is a different issue altogether. \n\nNot vacuuming a running system at all during peak hours is not\nconsidered intelligent vacuuming IMHO. There are plenty of use cases\nwhere small, frequent vacuums keep tables under control at a very low\ncost. Letting them go for extended periods of time without vacuuming\ncauses bloat and eventual slowdowns to table access which manifest in\nhigher IO usage across the board.\n\nIf you really are dead set on vacuuming only at night, you may want to\ndo a careful analysis of which tables need to be vacuumed and when, and\ntrigger manual vacuums from cron.\n\n-- \nBrad Nicholson 416-673-4106\nDatabase Administrator, Afilias Canada Corp.\n\n", "msg_date": "Wed, 11 Feb 2009 13:00:03 -0500", "msg_from": "Brad Nicholson <[email protected]>", "msg_from_op": false, "msg_subject": "Re: scheduling autovacuum at lean hours only." }, { "msg_contents": "On Wed, Feb 11, 2009 at 11:30 PM, Brad Nicholson\n<[email protected]> wrote:\n> On Wed, 2009-02-11 at 22:57 +0530, Rajesh Kumar Mallah wrote:\n>> On Wed, Feb 11, 2009 at 10:03 PM, Grzegorz Jaśkiewicz <[email protected]> wrote:\n>> > On Wed, Feb 11, 2009 at 2:57 PM, Rajesh Kumar Mallah\n>> > <[email protected]> wrote:\n>> >\n>> >>> vacuum_cost_delay = 150\n>> >>> vacuum_cost_page_hit = 1\n>> >>> vacuum_cost_page_miss = 10\n>> >>> vacuum_cost_page_dirty = 20\n>> >>> vacuum_cost_limit = 1000\n>> >>> autovacuum_vacuum_cost_delay = 300\n>> >>\n>> >> why is it not a good idea to give end users control over when they\n>> >> want to run it ?\n>> >\n>> > Effectively, you have control over autovacuum via these params.\n>> > You have to remember, that autovacuum doesn't cost much, and it makes\n>> > planner know more about data.\n>> > It's not there to clean up databases, as you might imagine - it is\n>> > there to update stats, and mark pages as free.\n>> >\n>> > So make sure you tweak that config fist, because I have a funny\n>> > feeling that you just think that vacuuming bogs down your machine, and\n>> > _can_ be turned off without any bad consequences, which is simply not\n>> > true.\n>>\n>> our usage pattern is such that peak activity (indicated by load average)\n>> during day time is 10 times during night hours. Autovacuum just puts\n>> more pressure to the system. If less stressing version is used then\n>> it shall take longer to complete one cycle, which would mean less\n>> performance for longer time . Less performance queues up queries\n>> and encourages people to re submit their queries which again\n>> adds to bogging up the system.\n>\n> That's not exactly how it works in practise, if tuned properly. It may\n> take longer, but it is less intensive while running.\n>\n> We had one system that had spikes happening due to the exact case you\n> described - there were noticeably high IO wait times while certain\n> tables were being vacuumed. We set the cost delay and the wait times\n> dropped to the point where it was non-issue.\n\n\nI think i can take this route and monitor the io activity\nduring vacuums. thanks everyone for their suggestions.\n\n-- mallah.\n\n\n>Vacuums take twice as\n> long, but there is no measurable impact to the performance.\n>\n>> In our case i feel the hardware is bit underscaled as compared to\n>> load thats why i think running in lean hours is best of both worlds\n>> no performance sacrifices and intelligent vacuuming.\n>\n> That is a different issue altogether.\n>\n> Not vacuuming a running system at all during peak hours is not\n> considered intelligent vacuuming IMHO. There are plenty of use cases\n> where small, frequent vacuums keep tables under control at a very low\n> cost. Letting them go for extended periods of time without vacuuming\n> causes bloat and eventual slowdowns to table access which manifest in\n> higher IO usage across the board.\n>\n> If you really are dead set on vacuuming only at night, you may want to\n> do a careful analysis of which tables need to be vacuumed and when, and\n> trigger manual vacuums from cron.\n>\n> --\n> Brad Nicholson 416-673-4106\n> Database Administrator, Afilias Canada Corp.\n>\n>\n", "msg_date": "Wed, 11 Feb 2009 23:46:59 +0530", "msg_from": "Rajesh Kumar Mallah <[email protected]>", "msg_from_op": true, "msg_subject": "Re: scheduling autovacuum at lean hours only." }, { "msg_contents": "Rajesh Kumar Mallah <mallah.rajesh 'at' gmail.com> writes:\n\n> our usage pattern is such that peak activity (indicated by load average)\n> during day time is 10 times during night hours. Autovacuum just puts\n> more pressure to the system. If less stressing version is used then\n\nYet it may allow a more stable performance in the mid/long term.\nNot vacuuming enough, even during peak activity, may lead to\nbloat in your DB and poor peak performance even without any\nvacuuming, only because the DB will have to scan through a lot of\ndead tuples. Unless you're doing full vacuums overnight?\n\n> it shall take longer to complete one cycle, which would mean less\n> performance for longer time . Less performance queues up queries\n> and encourages people to re submit their queries which again\n> adds to bogging up the system.\n\nI think this user \"problem\" should be handled at the application\nlevel. You need to add some \"still working\" pages/icons etc, or\nget sure one running query from a single user prevents another\nquery from the same user to be run concurrently.\n\n> In our case i feel the hardware is bit underscaled as compared to\n> load thats why i think running in lean hours is best of both worlds\n> no performance sacrifices and intelligent vacuuming.\n\nIMHO you should still measure how much bloat you produce with\nsuch a strategy. I can talk first-hand, because with only nightly\nvacuuming and not so much traffic, we had ever growing bloat\nwhich in the end led to minuscule performance (the culprit was\nshared with untuned FSM and friends).\n\n-- \nGuillaume Cottenceau\n", "msg_date": "Thu, 12 Feb 2009 09:23:56 +0100", "msg_from": "Guillaume Cottenceau <[email protected]>", "msg_from_op": false, "msg_subject": "Re: scheduling autovacuum at lean hours only." }, { "msg_contents": "[email protected] (Rajesh Kumar Mallah) writes:\n> why is it not a good idea to give end users control over when they\n> want to run it ?\n\nIt's not a particularly good idea to give end users things that they\nare likely then to *immediately* use to shoot themselves in the foot.\n\nTurning off vacuuming \"all day\" is the sort of thing that is indeed\npretty certain to hurt you when you imagined it was going to help you.\n\nIn particular, if you shut off autovac all day, heavily updated tables\nwith certain sorts of (pretty common!) update patterns are certain to\n\"bloat up,\" to the point that you'll need to do CLUSTER/VACUUM FULL on\nthem.\n\nIn effect, the practical effect of \"autovacuum at lean hours only\" is\nmore reasonably described as \"cancel autovacuum and revert to the\nelder policy of requiring users to do manual vacuuming.\"\n\nIt's worth looking at how autovacuum has been evolving over time...\n\n- When it was introduced, 8.0-ish (maybe it was 8.1 when it became\n \"official\"), it was pretty primitive.\n\n Autovac was a single process, where you had three controls over\n behaviour:\n\n - You could run autovac, or not; \n\n - You could exclude specific tables from being processed by autovac\n\n - There is a capability to vacuum less aggressively by using\n delays to reduce autovac I/O usage\n\n- In 8.3, it was enhanced to add the notion of having multiple vacuum\n workers\n\n There was discussion about having one of those workers restrict\n itself to small tables, so that you'd never have the scenario where\n the workers were all busy and a small table that needed vacuuming\n was left unvacuumed for a long time. It appears that didn't happen,\n which seems unfortunate, but that's life...\n\nYou should look at all the \"knobs\" that *are* offered before deciding\na policy that may be harmful to performance. As things stand now,\nthere are a couple of ways I could see tuning this:\n\n - You might check on the GUC variables autovacuum_vacuum_cost_delay\n and autovacuum_vacuum_cost_limit, which would allow you to restrict\n the I/O cost.\n\n This might allow you to run autovacuum all the time without\n adversely affecting performance.\n\n - You might come up with a list of the *LARGE* tables that you don't\n want vacuumed during the day, and set up a cron job that adds/drops\n them from the pg_autovacuum table at the appropriate times.\n\n This is definitely going to be more finicky, and requires a great\n deal more awareness of the tables being updated by your\n applications. It makes \"autovacuum\" a whole lot less \"automatic.\"\n\nThere are known enhancements coming up:\n\n - In 8.4, there is a capability for VACUUM to only process the\n portions of the table known to have been altered.\n\n That ought to be a better answer than *any* of the fiddling\n suggested, to date. Right now, a VACUUM on \"public.my_huge_table\",\n a table 18GB in size, will walk through the entire table, even\n though there were only a handful of pages where tuples were\n invalidated.\n\n This is almost certainly the single best improvement possible to\n resolve your issue; it seems likely to *directly* address the\n problem, and has the considerable merit of not requiring much if\n any configuration/reconfiguration/scheduling.\n-- \n(reverse (concatenate 'string \"gro.mca\" \"@\" \"enworbbc\"))\nhttp://linuxdatabases.info/info/\n\"what would we do without C? we would have PASAL, BASI, OBOL, and\nOmmon Lisp.\" -- #Erik\n", "msg_date": "Fri, 13 Feb 2009 12:37:21 -0500", "msg_from": "Chris Browne <[email protected]>", "msg_from_op": false, "msg_subject": "Re: scheduling autovacuum at lean hours only." }, { "msg_contents": "Its nice to know the evolution of autovacuum and i understand that\nthe suggestion/requirement of \"autovacuum at lean hours only\"\nwas defeating the whole idea.\n\nregds\n--rajesh kumar mallah.\n\nOn Fri, Feb 13, 2009 at 11:07 PM, Chris Browne <[email protected]> wrote:\n> [email protected] (Rajesh Kumar Mallah) writes:\n>> why is it not a good idea to give end users control over when they\n>> want to run it ?\n>\n> It's not a particularly good idea to give end users things that they\n> are likely then to *immediately* use to shoot themselves in the foot.\n>\n> Turning off vacuuming \"all day\" is the sort of thing that is indeed\n> pretty certain to hurt you when you imagined it was going to help you.\n>\n> In particular, if you shut off autovac all day, heavily updated tables\n> with certain sorts of (pretty common!) update patterns are certain to\n> \"bloat up,\" to the point that you'll need to do CLUSTER/VACUUM FULL on\n> them.\n>\n> In effect, the practical effect of \"autovacuum at lean hours only\" is\n> more reasonably described as \"cancel autovacuum and revert to the\n> elder policy of requiring users to do manual vacuuming.\"\n>\n> It's worth looking at how autovacuum has been evolving over time...\n>\n> - When it was introduced, 8.0-ish (maybe it was 8.1 when it became\n> \"official\"), it was pretty primitive.\n>\n> Autovac was a single process, where you had three controls over\n> behaviour:\n>\n> - You could run autovac, or not;\n>\n> - You could exclude specific tables from being processed by autovac\n>\n> - There is a capability to vacuum less aggressively by using\n> delays to reduce autovac I/O usage\n>\n> - In 8.3, it was enhanced to add the notion of having multiple vacuum\n> workers\n>\n> There was discussion about having one of those workers restrict\n> itself to small tables, so that you'd never have the scenario where\n> the workers were all busy and a small table that needed vacuuming\n> was left unvacuumed for a long time. It appears that didn't happen,\n> which seems unfortunate, but that's life...\n>\n> You should look at all the \"knobs\" that *are* offered before deciding\n> a policy that may be harmful to performance. As things stand now,\n> there are a couple of ways I could see tuning this:\n>\n> - You might check on the GUC variables autovacuum_vacuum_cost_delay\n> and autovacuum_vacuum_cost_limit, which would allow you to restrict\n> the I/O cost.\n>\n> This might allow you to run autovacuum all the time without\n> adversely affecting performance.\n>\n> - You might come up with a list of the *LARGE* tables that you don't\n> want vacuumed during the day, and set up a cron job that adds/drops\n> them from the pg_autovacuum table at the appropriate times.\n>\n> This is definitely going to be more finicky, and requires a great\n> deal more awareness of the tables being updated by your\n> applications. It makes \"autovacuum\" a whole lot less \"automatic.\"\n>\n> There are known enhancements coming up:\n>\n> - In 8.4, there is a capability for VACUUM to only process the\n> portions of the table known to have been altered.\n>\n> That ought to be a better answer than *any* of the fiddling\n> suggested, to date. Right now, a VACUUM on \"public.my_huge_table\",\n> a table 18GB in size, will walk through the entire table, even\n> though there were only a handful of pages where tuples were\n> invalidated.\n>\n> This is almost certainly the single best improvement possible to\n> resolve your issue; it seems likely to *directly* address the\n> problem, and has the considerable merit of not requiring much if\n> any configuration/reconfiguration/scheduling.\n> --\n> (reverse (concatenate 'string \"gro.mca\" \"@\" \"enworbbc\"))\n> http://linuxdatabases.info/info/\n> \"what would we do without C? we would have PASAL, BASI, OBOL, and\n> Ommon Lisp.\" -- #Erik\n>\n> --\n> Sent via pgsql-performance mailing list ([email protected])\n> To make changes to your subscription:\n> http://www.postgresql.org/mailpref/pgsql-performance\n>\n", "msg_date": "Sat, 14 Feb 2009 17:28:32 +0530", "msg_from": "Rajesh Kumar Mallah <[email protected]>", "msg_from_op": true, "msg_subject": "Re: scheduling autovacuum at lean hours only." } ]
[ { "msg_contents": "Hello,\n\nI have a table 'foo_bar' with a column 'col1' defined as\n'col1 varchar(512)'. This column is indexed using an expression index\ndefined as\n\nCREATE INDEX ix_foo_bar_by_col1 ON foo_bar(lower(col1) col1 varchar_pattern_ops)\n\nThe\nproblem is when I try matching using ILIKE, (col1 ILIKE 'foo%') \nPostgreSQL does not use an index scan but a Seq scan of the whole\ntable, but when I try (lower(col1) LIKE 'foo%')\nPostgreSQL uses an index scan.\n\nCould this be a bug with ILIKE or am I missing something?\n\nThe table has ~ 4 million rows.\nPostgreSQL 8.3.5 on Windows Vista, non C locale, DB encoding is LATIN1.\n\nThank you in advance,\nMilos.\n_________________________________________________________________\nWant to marry your mail? Combine your email accounts here!\nhttp://livelife.ninemsn.com.au/article.aspx?id=633386\n\n\n\n\n\nHello,I have a table 'foo_bar' with a column 'col1' defined as\n'col1 varchar(512)'. This column is indexed using an expression index\ndefined asCREATE INDEX ix_foo_bar_by_col1 ON foo_bar(lower(col1) col1 varchar_pattern_ops)The\nproblem is when I try matching using ILIKE, (col1 ILIKE 'foo%') \nPostgreSQL does not use an index scan but a Seq scan of the whole\ntable, but when I try (lower(col1) LIKE 'foo%')\nPostgreSQL uses an index scan.Could this be a bug with ILIKE or am I missing something?The table has ~ 4 million rows.PostgreSQL 8.3.5 on Windows Vista, non C locale, DB encoding is LATIN1.Thank you in advance,Milos.Combine your email accounts here! Want to marry your mail?", "msg_date": "Thu, 12 Feb 2009 22:50:23 +1100", "msg_from": "milos d <[email protected]>", "msg_from_op": true, "msg_subject": "col1 ILIKE 'foo%' not behaving the same as lower(col1) LIKE 'foo%'" }, { "msg_contents": "milos d wrote:\n> Hello,\n> \n> I have a table 'foo_bar' with a column 'col1' defined as\n> 'col1 varchar(512)'. This column is indexed using an expression index\n> defined as\n> \n> CREATE INDEX ix_foo_bar_by_col1 ON foo_bar(lower(col1) col1 varchar_pattern_ops)\n> \n> The\n> problem is when I try matching using ILIKE, (col1 ILIKE 'foo%') \n> PostgreSQL does not use an index scan but a Seq scan of the whole\n> table, but when I try (lower(col1) LIKE 'foo%')\n> PostgreSQL uses an index scan.\n\nWhy should it use the index? They're not even equivalent queries:\n\nSELECT ... WHERE lower(col1) LIKE 'FOO%'\n\nSELECT ... WHERE col1 ILIKE 'FOO%'\n\nOne is guaranteed to return no rows, the other not.\n\n-- \n Richard Huxton\n Archonet Ltd\n", "msg_date": "Thu, 12 Feb 2009 12:04:16 +0000", "msg_from": "Richard Huxton <[email protected]>", "msg_from_op": false, "msg_subject": "Re: col1 ILIKE 'foo%' not behaving the same as lower(col1) LIKE\n 'foo%'" }, { "msg_contents": "I'm pretty sure the intent was:\n\nWHERE lower(col1) LIKE lower('foo%');\n\nMost likely, his client code ensures the lower on the string passed in the query. Whether it should use an index or not has nothing to do with his example.\n\nAll I can do when answering this question, is confirm that the query planner doesn't know when it can and can't pair ILIKE with an index, even if lower() or upper() are used on parameters and indexes.\nI use\n WHERE lower(col1) LIKE\n(and create a functional index on lower(col1))\nAnd just pretend that there isn't an ILIKE.\n\nOn 2/12/09 4:04 AM, \"Richard Huxton\" <[email protected]> wrote:\n\nmilos d wrote:\n> Hello,\n>\n> I have a table 'foo_bar' with a column 'col1' defined as\n> 'col1 varchar(512)'. This column is indexed using an expression index\n> defined as\n>\n> CREATE INDEX ix_foo_bar_by_col1 ON foo_bar(lower(col1) col1 varchar_pattern_ops)\n>\n> The\n> problem is when I try matching using ILIKE, (col1 ILIKE 'foo%')\n> PostgreSQL does not use an index scan but a Seq scan of the whole\n> table, but when I try (lower(col1) LIKE 'foo%')\n> PostgreSQL uses an index scan.\n\nWhy should it use the index? They're not even equivalent queries:\n\nSELECT ... WHERE lower(col1) LIKE 'FOO%'\n\nSELECT ... WHERE col1 ILIKE 'FOO%'\n\nOne is guaranteed to return no rows, the other not.\n\n--\n Richard Huxton\n Archonet Ltd\n\n--\nSent via pgsql-performance mailing list ([email protected])\nTo make changes to your subscription:\nhttp://www.postgresql.org/mailpref/pgsql-performance\n\n\n\n\nRe: [PERFORM] col1 ILIKE 'foo%' not behaving the same as lower(col1) LIKE 'foo%'\n\n\nI’m pretty sure the intent was:\n\nWHERE lower(col1) LIKE lower(‘foo%’);\n\nMost likely, his client code ensures the lower on the string passed in the query.   Whether it should use an index or not has nothing to do with his example.\n\nAll I can do when answering this question, is confirm that the query planner doesn’t know when it can and can’t pair ILIKE with an index, even if lower() or upper() are used on parameters and indexes.\nI use \n  WHERE lower(col1) LIKE \n(and create a functional index on lower(col1))\nAnd just pretend that there isn’t an ILIKE.\n\nOn 2/12/09 4:04 AM, \"Richard Huxton\" <[email protected]> wrote:\n\nmilos d wrote:\n> Hello,\n>\n> I have a table 'foo_bar' with a column 'col1' defined as\n> 'col1 varchar(512)'. This column is indexed using an expression index\n> defined as\n>\n> CREATE INDEX ix_foo_bar_by_col1 ON foo_bar(lower(col1) col1 varchar_pattern_ops)\n>\n> The\n> problem is when I try matching using ILIKE, (col1 ILIKE 'foo%')\n> PostgreSQL does not use an index scan but a Seq scan of the whole\n> table, but when I try (lower(col1) LIKE 'foo%')\n> PostgreSQL uses an index scan.\n\nWhy should it use the index? They're not even equivalent queries:\n\nSELECT ... WHERE lower(col1) LIKE 'FOO%'\n\nSELECT ... WHERE col1 ILIKE 'FOO%'\n\nOne is guaranteed to return no rows, the other not.\n\n--\n  Richard Huxton\n  Archonet Ltd\n\n--\nSent via pgsql-performance mailing list ([email protected])\nTo make changes to your subscription:\nhttp://www.postgresql.org/mailpref/pgsql-performance", "msg_date": "Thu, 12 Feb 2009 11:50:42 -0800", "msg_from": "Scott Carey <[email protected]>", "msg_from_op": false, "msg_subject": "Re: col1 ILIKE 'foo%' not behaving the same as lower(col1) LIKE\n 'foo%'" }, { "msg_contents": "Thanks Scott, \n\nYes you are right, my code does lower case 'foo%'. I would expect SELECT ... WHERE col1 ILIKE 'foo%' to use an index. \nThe way I understand it is that the planner would translate this to SELECT ... WHERE lower(col1) LIKE lower('foo%') ?\n\nYou may be right, with more tests I see that ILIKE never uses an index. e.g SELECT ... WHERE lower(col1) ILIKE lower('foo%' ) doesn't use an index.\n\nI think we need clarification from the developers.\n\nTools like Hibernate and NHibernate use ILIKE to do insensitive matching when using PostgresDialect.\n\nRegards,\nMilos.\n\nFrom: [email protected]\nTo: [email protected]; [email protected]\nCC: [email protected]\nDate: Thu, 12 Feb 2009 11:50:42 -0800\nSubject: Re: [PERFORM] col1 ILIKE 'foo%' not behaving the same as lower(col1) LIKE 'foo%'\n\n\n\n\n\nRe: [PERFORM] col1 ILIKE 'foo%' not behaving the same as lower(col1) LIKE 'foo%'\n\n\nI’m pretty sure the intent was:\n\n\n\nWHERE lower(col1) LIKE lower(‘foo%’);\n\n\n\nMost likely, his client code ensures the lower on the string passed in the query. Whether it should use an index or not has nothing to do with his example.\n\n\n\nAll I can do when answering this question, is confirm that the query planner doesn’t know when it can and can’t pair ILIKE with an index, even if lower() or upper() are used on parameters and indexes.\n\nI use \n\n WHERE lower(col1) LIKE \n\n(and create a functional index on lower(col1))\n\nAnd just pretend that there isn’t an ILIKE.\n\n\n\nOn 2/12/09 4:04 AM, \"Richard Huxton\" <[email protected]> wrote:\n\n\n\nmilos d wrote:\n\n> Hello,\n\n>\n\n> I have a table 'foo_bar' with a column 'col1' defined as\n\n> 'col1 varchar(512)'. This column is indexed using an expression index\n\n> defined as\n\n>\n\n> CREATE INDEX ix_foo_bar_by_col1 ON foo_bar(lower(col1) col1 varchar_pattern_ops)\n\n>\n\n> The\n\n> problem is when I try matching using ILIKE, (col1 ILIKE 'foo%')\n\n> PostgreSQL does not use an index scan but a Seq scan of the whole\n\n> table, but when I try (lower(col1) LIKE 'foo%')\n\n> PostgreSQL uses an index scan.\n\n\n\nWhy should it use the index? They're not even equivalent queries:\n\n\n\nSELECT ... WHERE lower(col1) LIKE 'FOO%'\n\n\n\nSELECT ... WHERE col1 ILIKE 'FOO%'\n\n\n\nOne is guaranteed to return no rows, the other not.\n\n\n\n--\n\n Richard Huxton\n\n Archonet Ltd\n\n\n\n--\n\nSent via pgsql-performance mailing list ([email protected])\n\nTo make changes to your subscription:\n\nhttp://www.postgresql.org/mailpref/pgsql-performance\n\n\n\n\n_________________________________________________________________\nNeed a new place to rent, share or buy? Let ninemsn property help\nhttp://a.ninemsn.com.au/b.aspx?URL=http%3A%2F%2Fninemsn%2Edomain%2Ecom%2Eau%2F%3Fs%5Fcid%3DFDMedia%3ANineMSN%5FHotmail%5FTagline&_t=774152450&_r=Domain_tagline&_m=EXT\n\n\n\n\n\nThanks Scott, Yes you are right, my code does lower case 'foo%'. I would expect SELECT ... WHERE col1 ILIKE 'foo%' to use an index. The way I understand it is that the planner would translate this to SELECT ... WHERE lower(col1) LIKE lower('foo%') ?You may be right, with more tests I see that ILIKE never uses an index. e.g SELECT ... WHERE lower(col1) ILIKE lower('foo%' ) doesn't use an index.I think we need clarification from the developers.Tools like Hibernate and NHibernate use ILIKE to do insensitive matching when using PostgresDialect.Regards,Milos.From: [email protected]: [email protected]; [email protected]: [email protected]: Thu, 12 Feb 2009 11:50:42 -0800Subject: Re: [PERFORM] col1 ILIKE 'foo%' not behaving the same as lower(col1) LIKE 'foo%'\nRe: [PERFORM] col1 ILIKE 'foo%' not behaving the same as lower(col1) LIKE 'foo%'\nI’m pretty sure the intent was:\n\nWHERE lower(col1) LIKE lower(‘foo%’);\n\nMost likely, his client code ensures the lower on the string passed in the query.   Whether it should use an index or not has nothing to do with his example.\n\nAll I can do when answering this question, is confirm that the query planner doesn’t know when it can and can’t pair ILIKE with an index, even if lower() or upper() are used on parameters and indexes.\nI use \n  WHERE lower(col1) LIKE \n(and create a functional index on lower(col1))\nAnd just pretend that there isn’t an ILIKE.\n\nOn 2/12/09 4:04 AM, \"Richard Huxton\" <[email protected]> wrote:\n\nmilos d wrote:\n> Hello,\n>\n> I have a table 'foo_bar' with a column 'col1' defined as\n> 'col1 varchar(512)'. This column is indexed using an expression index\n> defined as\n>\n> CREATE INDEX ix_foo_bar_by_col1 ON foo_bar(lower(col1) col1 varchar_pattern_ops)\n>\n> The\n> problem is when I try matching using ILIKE, (col1 ILIKE 'foo%')\n> PostgreSQL does not use an index scan but a Seq scan of the whole\n> table, but when I try (lower(col1) LIKE 'foo%')\n> PostgreSQL uses an index scan.\n\nWhy should it use the index? They're not even equivalent queries:\n\nSELECT ... WHERE lower(col1) LIKE 'FOO%'\n\nSELECT ... WHERE col1 ILIKE 'FOO%'\n\nOne is guaranteed to return no rows, the other not.\n\n--\n  Richard Huxton\n  Archonet Ltd\n\n--\nSent via pgsql-performance mailing list ([email protected])\nTo make changes to your subscription:\nhttp://www.postgresql.org/mailpref/pgsql-performance\n\nLet ninemsn property help! Need a new place to rent, share or buy?", "msg_date": "Fri, 13 Feb 2009 10:49:46 +1100", "msg_from": "milos d <[email protected]>", "msg_from_op": true, "msg_subject": "Re: col1 ILIKE 'foo%' not behaving the same as lower(col1) LIKE\n 'foo%'" } ]
[ { "msg_contents": "Hi Guys,\n\nI'm a bit confused when the proper way to use GIST versus GIN indexes \nwith integer arrays.\n\nThe documentation states:\n\nhttp://www.postgresql.org/docs/current/static/intarray.html\n\nThe choice between GiST and GIN indexing depends on the relative \nperformance characteristics of GiST and GIN, which are discussed \nelsewhere. As a rule of thumb, a GIN index is faster to search than a \nGiST index, but slower to build or update; so GIN is better suited for \nstatic data and GiST for often-updated data.\n\nSince 100% of my queries are for retrieval, I should use GIN but it \nnever appears to be used unlike how GIST indexes are:\n\ngearbuyer_ig=# select version();\n version\n----------------------------------------------------------------------------------------------------\n PostgreSQL 8.3.6 on i686-pc-linux-gnu, compiled by GCC gcc (GCC) \n4.1.2 20070925 (Red Hat 4.1.2-33)\n(1 row)\n\nWith just a GIN index I get this plan (no use of GIN):\n\ngearbuyer_ig=# explain select count(*) from items where \nitems.fast_colors @> ARRAY[0];\n QUERY PLAN\n-----------------------------------------------------------------\n Aggregate (cost=21194.27..21194.28 rows=1 width=0)\n -> Seq Scan on items (cost=0.00..21193.64 rows=251 width=0)\n Filter: (fast_colors @> '{0}'::integer[])\n(3 rows)\n\nWith a GIST index created like:\n\ngearbuyer_ig=# CREATE INDEX items_fast_colors_rdtree2_idx ON items \nUSING gist (fast_colors gist__int_ops);\n\ngearbuyer_ig=# explain select count(*) from items where \nitems.fast_colors @> ARRAY[0];\n QUERY PLAN\n-----------------------------------------------------------------------------------------------------\n Aggregate (cost=929.81..929.82 rows=1 width=0)\n -> Bitmap Heap Scan on items (cost=14.30..929.18 rows=251 width=0)\n Recheck Cond: (fast_colors @> '{0}'::integer[])\n -> Bitmap Index Scan on items_fast_colors_rdtree2_idx \n(cost=0.00..14.24 rows=251 width=0)\n Index Cond: (fast_colors @> '{0}'::integer[])\n(5 rows)\n\nAny insight is greatly appreciated. Could this be a regression from \n8.3.5 and 8.3.6?\n\nThanks,\n\nRusty\n--\nRusty Conover\[email protected]\nInfoGears Inc / GearBuyer.com / FootwearBuyer.com\nhttp://www.infogears.com\nhttp://www.gearbuyer.com\nhttp://www.footwearbuyer.com\nHi Guys,I'm a bit confused when the proper way to use GIST versus GIN indexes with integer arrays.The documentation states:http://www.postgresql.org/docs/current/static/intarray.htmlThe choice between GiST and GIN indexing depends on the relative performance characteristics of GiST and GIN, which are discussed elsewhere. As a rule of thumb, a GIN index is faster to search than a GiST index, but slower to build or update; so GIN is better suited for static data and GiST for often-updated data.Since 100% of my queries are for retrieval, I should use GIN but it never appears to be used unlike how GIST indexes are:gearbuyer_ig=# select version();                                              version                                               ---------------------------------------------------------------------------------------------------- PostgreSQL 8.3.6 on i686-pc-linux-gnu, compiled by GCC gcc (GCC) 4.1.2 20070925 (Red Hat 4.1.2-33)(1 row)With just a GIN index I get this plan (no use of GIN):gearbuyer_ig=# explain select count(*) from items where items.fast_colors @> ARRAY[0];                           QUERY PLAN                            ----------------------------------------------------------------- Aggregate  (cost=21194.27..21194.28 rows=1 width=0)   ->  Seq Scan on items  (cost=0.00..21193.64 rows=251 width=0)         Filter: (fast_colors @> '{0}'::integer[])(3 rows)With a GIST index created like:gearbuyer_ig=# CREATE INDEX items_fast_colors_rdtree2_idx ON items USING gist (fast_colors gist__int_ops);gearbuyer_ig=# explain select count(*) from items where items.fast_colors @> ARRAY[0];                                             QUERY PLAN                                              ----------------------------------------------------------------------------------------------------- Aggregate  (cost=929.81..929.82 rows=1 width=0)   ->  Bitmap Heap Scan on items  (cost=14.30..929.18 rows=251 width=0)         Recheck Cond: (fast_colors @> '{0}'::integer[])         ->  Bitmap Index Scan on items_fast_colors_rdtree2_idx  (cost=0.00..14.24 rows=251 width=0)               Index Cond: (fast_colors @> '{0}'::integer[])(5 rows)Any insight is greatly appreciated.  Could this be a regression from 8.3.5 and 8.3.6?Thanks,Rusty --Rusty [email protected] Inc / GearBuyer.com / FootwearBuyer.comhttp://www.infogears.comhttp://www.gearbuyer.comhttp://www.footwearbuyer.com", "msg_date": "Thu, 12 Feb 2009 13:09:14 -0700", "msg_from": "Rusty Conover <[email protected]>", "msg_from_op": true, "msg_subject": "GIST versus GIN indexes for intarrays" }, { "msg_contents": "Rusty Conover <[email protected]> writes:\n> Since 100% of my queries are for retrieval, I should use GIN but it \n> never appears to be used unlike how GIST indexes are:\n\nYou haven't shown us either the table or the index declaration,\nso it's a bit tough to comment on that. It's worth noting though\nthat your GIST example appears to rely on a nonstandard operator class.\n\n\t\t\tregards, tom lane\n", "msg_date": "Thu, 12 Feb 2009 15:54:16 -0500", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: GIST versus GIN indexes for intarrays " }, { "msg_contents": "\nOn Feb 12, 2009, at 1:54 PM, Tom Lane wrote:\n\n> Rusty Conover <[email protected]> writes:\n>> Since 100% of my queries are for retrieval, I should use GIN but it\n>> never appears to be used unlike how GIST indexes are:\n>\n> You haven't shown us either the table or the index declaration,\n> so it's a bit tough to comment on that. It's worth noting though\n> that your GIST example appears to rely on a nonstandard operator \n> class.\n>\n> \t\t\tregards, tom lane\n>\n\nHi Tom,\n\nMy apologies, below is the table definition, and the GIN index creation.\n\nThe gist__int_ops is the default operator class for integer[] arrays, \nas shown at:\n\nhttp://www.postgresql.org/docs/current/static/intarray.html\n\ngearbuyer_ig=# \\d items\n Table \"public.items\"\n Column | Type | Modifiers\n-------------------------+----------- \n+---------------------------------------------------\n item_id | integer | not null default \nnextval('generic_seq'::regclass)\n gb_product_url | text | not null\n group_id | integer |\n category_id | integer |\n product_name | text | not null\n gender | text | not null\n description_extract | text | not null\n sort_price | real | not null\n price_range | text | not null\n brand_id | integer | not null\n xapian_doc_id | integer |\n average_rating | uint1 |\n reviews_count | smallint |\n store_count | uint1 |\n default_image_id | integer |\n available_sizes | integer[] |\n fast_colors | integer[] |\n has_coupons | boolean | not null default false\n age_low | uint1 |\n sale_percentage_low | uint1 |\n store_count_low | uint1 |\n price_range_low | smallint |\n offering_stores | integer[] |\n subclassification_ids | integer[] |\n popularity_rank | integer |\n default_similarity_type | uint1 |\n default_similarity_id | integer |\n gc_lookup_id | integer |\n\nThe GIN index was created via:\n\nCREATE INDEX items_fast_colors_rdtree_idx ON items USING gin \n(fast_colors);\n\nCheers,\n\nRusty\n--\nRusty Conover\[email protected]\nInfoGears Inc / GearBuyer.com / FootwearBuyer.com\nhttp://www.infogears.com\nhttp://www.gearbuyer.com\nhttp://www.footwearbuyer.com\n", "msg_date": "Thu, 12 Feb 2009 14:05:02 -0700", "msg_from": "Rusty Conover <[email protected]>", "msg_from_op": true, "msg_subject": "Re: GIST versus GIN indexes for intarrays " }, { "msg_contents": "Rusty Conover <[email protected]> writes:\n> The gist__int_ops is the default operator class for integer[] arrays, \n> as shown at:\n> http://www.postgresql.org/docs/current/static/intarray.html\n\nAh, so you have contrib/intarray installed.\n\n[ pokes at it... ] Seems like what we have here is another iteration\nof this ancient bug:\nhttp://archives.postgresql.org/pgsql-committers/2004-01/msg00073.php\nto wit, contrib/intarray is defining its own @> and <@ operators that\nconflict with those since added to the core. In the case Rusty is\nshowing, the @> gets resolved as intarray's @> (because that's an\nexact match, where the core provides anyarray @> anyarray) and then\nthis operator is NOT a member of the core-provided GIN opclass for\ninteger arrays.\n\nThe short-term workaround for Rusty is probably to create his GIN\nindex using the intarray-provided gin__int_ops opclass. But it\nseems to me that we ought to get rid of intarray's @> and <@ operators\nand have the module depend on the core anyarray operators, just as we\nhave already done for = and <>. Comments?\n\n\t\t\tregards, tom lane\n", "msg_date": "Thu, 12 Feb 2009 16:29:38 -0500", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [PERFORM] GIST versus GIN indexes for intarrays " }, { "msg_contents": "On Feb 12, 2009, at 2:29 PM, Tom Lane wrote:\n\n> Rusty Conover <[email protected]> writes:\n>> The gist__int_ops is the default operator class for integer[] arrays,\n>> as shown at:\n>> http://www.postgresql.org/docs/current/static/intarray.html\n>\n> Ah, so you have contrib/intarray installed.\n>\n> [ pokes at it... ] Seems like what we have here is another iteration\n> of this ancient bug:\n> http://archives.postgresql.org/pgsql-committers/2004-01/msg00073.php\n> to wit, contrib/intarray is defining its own @> and <@ operators that\n> conflict with those since added to the core. In the case Rusty is\n> showing, the @> gets resolved as intarray's @> (because that's an\n> exact match, where the core provides anyarray @> anyarray) and then\n> this operator is NOT a member of the core-provided GIN opclass for\n> integer arrays.\n>\n> The short-term workaround for Rusty is probably to create his GIN\n> index using the intarray-provided gin__int_ops opclass. But it\n> seems to me that we ought to get rid of intarray's @> and <@ operators\n> and have the module depend on the core anyarray operators, just as we\n> have already done for = and <>. Comments?\n\nHi Tom,\n\nFor the record using the GIN opclass does resolve the problem for me. \nThe indexes are now seeing usage.\n\nThanks for the help,\n\nRusty\n--\nRusty Conover\[email protected]\nInfoGears Inc / GearBuyer.com / FootwearBuyer.com\nhttp://www.infogears.com\nhttp://www.gearbuyer.com\nhttp://www.footwearbuyer.com\nOn Feb 12, 2009, at 2:29 PM, Tom Lane wrote:Rusty Conover <[email protected]> writes:The gist__int_ops is the default operator class for integer[] arrays,  as shown at:http://www.postgresql.org/docs/current/static/intarray.htmlAh, so you have contrib/intarray installed.[ pokes at it... ]  Seems like what we have here is another iterationof this ancient bug:http://archives.postgresql.org/pgsql-committers/2004-01/msg00073.phpto wit, contrib/intarray is defining its own @> and <@ operators thatconflict with those since added to the core.  In the case Rusty isshowing, the @> gets resolved as intarray's @> (because that's anexact match, where the core provides anyarray @> anyarray) and thenthis operator is NOT a member of the core-provided GIN opclass forinteger arrays.The short-term workaround for Rusty is probably to create his GINindex using the intarray-provided gin__int_ops opclass.  But itseems to me that we ought to get rid of intarray's @> and <@ operatorsand have the module depend on the core anyarray operators, just as wehave already done for = and <>.  Comments?Hi Tom,For the record using the GIN opclass does resolve the problem for me.  The indexes are now seeing usage.Thanks for the help,Rusty--Rusty [email protected] Inc / GearBuyer.com / FootwearBuyer.comhttp://www.infogears.comhttp://www.gearbuyer.comhttp://www.footwearbuyer.com", "msg_date": "Thu, 12 Feb 2009 18:32:43 -0700", "msg_from": "Rusty Conover <[email protected]>", "msg_from_op": true, "msg_subject": "Re: GIST versus GIN indexes for intarrays " }, { "msg_contents": "> The short-term workaround for Rusty is probably to create his GIN\n> index using the intarray-provided gin__int_ops opclass. But it\nRight\n> seems to me that we ought to get rid of intarray's @> and <@ operators\n> and have the module depend on the core anyarray operators, just as we\n> have already done for = and <>. Comments?\nAgree, will do. Although built-in anyarray operators have ~N^2 behaviour while \nintarray's version - only N*log(N)\n-- \nTeodor Sigaev E-mail: [email protected]\n WWW: http://www.sigaev.ru/\n", "msg_date": "Fri, 13 Feb 2009 16:12:53 +0300", "msg_from": "Teodor Sigaev <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [PERFORM] GIST versus GIN indexes for intarrays" }, { "msg_contents": "On Fri, Feb 13, 2009 at 04:12:53PM +0300, Teodor Sigaev wrote:\n>> The short-term workaround for Rusty is probably to create his GIN\n>> index using the intarray-provided gin__int_ops opclass. But it\n> Right\n>> seems to me that we ought to get rid of intarray's @> and <@ operators\n>> and have the module depend on the core anyarray operators, just as we\n>> have already done for = and <>. Comments?\n> Agree, will do. Although built-in anyarray operators have ~N^2 behaviour \n> while intarray's version - only N*log(N)\nIs there a way to have the buily-in anyarray opeators be N*log(N)?\n\nKen\n", "msg_date": "Fri, 13 Feb 2009 08:04:58 -0600", "msg_from": "Kenneth Marshall <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [PERFORM] GIST versus GIN indexes for intarrays" }, { "msg_contents": "Teodor Sigaev <[email protected]> writes:\n>> seems to me that we ought to get rid of intarray's @> and <@ operators\n>> and have the module depend on the core anyarray operators, just as we\n>> have already done for = and <>. Comments?\n\n> Agree, will do. Although built-in anyarray operators have ~N^2 behaviour while \n> intarray's version - only N*log(N)\n\nReally? isort() looks like a bubble sort to me.\n\nBut in any case, a pre-sort is probably actually *slower* for small\nnumbers of array elements. I wonder where the crossover is. In\nprinciple we could make the core implementation do a sort when working\nwith a sortable datatype, but I'm unsure it's worth the trouble.\n\n\t\t\tregards, tom lane\n", "msg_date": "Fri, 13 Feb 2009 12:20:58 -0500", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [PERFORM] GIST versus GIN indexes for intarrays " } ]
[ { "msg_contents": "After upgrading from 8.2 to 8.3.5, the write load on our database\nserver has increased dramatically and inexplicably -- as has the CPU\nusage.\n\nHere's a Munin graph of iostat showing the sudden increase in blocks\nwritten/sec:\n\n http://purefiction.net/paste/Totals-iostatwrite1-week.png\n\nWe expected the upgrade to give us better, not worse, performance. How\ndo I diagnose this problem?\n\nThe application code has not changed, and neither have the usage\npatterns. The number of tuple inserts/updates/deletes per second has\nnot deviated from the normal. However, CPU usage has increased by\naround 30%, and the load average has similarly increased.\n\nThe other performance metrics I monitor, such as transaction load,\nHTTP traffic, etc., show everything else as either normal or slightly\ndecreased, as you would expect when the database server slows down.\n\nThe upgrade was done with dump/restore using \"pg_dump -Fc\". The old\ndatabase lived on a SAN volume, whereas the new database lives on a\nlocal disk volume.\n\nOS is Linux 2.6.24 on Intel (x86_64).\n\nAlexander.\n", "msg_date": "Fri, 13 Feb 2009 12:53:12 +0100", "msg_from": "Alexander Staubo <[email protected]>", "msg_from_op": true, "msg_subject": "I/O increase after upgrading to 8.3.5" }, { "msg_contents": ">>> Alexander Staubo <[email protected]> wrote: \n> After upgrading from 8.2 to 8.3.5, the write load on our database\n> server has increased dramatically and inexplicably -- as has the CPU\n> usage.\n \nDid you do a VACUUM ANALYZE of the database after loading it? Without\nthe database VACUUM, the first read of any page causes it to be\nrewritten to set hint bits. Without an ANALYZE, it might be picking\nvery inefficient plans. I actually run a VACUUM FREEZE ANALYZE after\nloading a database (as with the upgrade), to prevent massive rewrites\nof everything in the database at some later date due to freeze\nactivity.\n \nIt could be something else, but I've seen similar behavior for the\nabove reasons.\n \n-Kevin\n", "msg_date": "Fri, 13 Feb 2009 08:46:42 -0600", "msg_from": "\"Kevin Grittner\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: I/O increase after upgrading to 8.3.5" }, { "msg_contents": "On Fri, Feb 13, 2009 at 3:46 PM, Kevin Grittner\n<[email protected]> wrote:\n>>>> Alexander Staubo <[email protected]> wrote:\n>> After upgrading from 8.2 to 8.3.5, the write load on our database\n>> server has increased dramatically and inexplicably -- as has the CPU\n>> usage.\n>\n> Did you do a VACUUM ANALYZE of the database after loading it? Without\n> the database VACUUM, the first read of any page causes it to be\n> rewritten to set hint bits. Without an ANALYZE, it might be picking\n> very inefficient plans. I actually run a VACUUM FREEZE ANALYZE after\n> loading a database (as with the upgrade), to prevent massive rewrites\n> of everything in the database at some later date due to freeze\n> activity.\n\nThanks, the lack of statistics should explain why things are a bit\nslow. I ran a \"vacuum freeze analyze\" now, but the I/O level is still\nquite high.\n\nI have verified using pidstat that the I/O is all caused by\nPostgreSQL. Here's some sample output from iostat, interval 1 second\n(the 4th column is KB written):\n\nsda 1173.00 68.00 149672.00 68 149672\nsda 14.00 0.00 1712.00 0 1712\nsda 2.00 0.00 336.00 0 336\nsda 679.00 344.00 115200.00 344 115200\nsda 238.00 0.00 61764.00 0 61764\nsda 436.00 0.00 95004.00 0 95004\nsda 14.00 0.00 1032.00 0 1032\nsda 1882.00 72.00 82380.00 72 82380\nsda 173.00 8.00 7936.00 8 7936\n\nWhat I find odd is that PostgreSQL is only clocking at about 8 tuple\nmodifications per second average (on our user tables). There are\naround 800 transactions per second, but most of these are only reads.\nHow can 8 tuples/sec result in 115MB writes per second? The database\nis not large enough and the tuples not large enough to explain those\nhuge amounts of data. At 115MB/s you could rewrite the entire database\nin 1.5 minutes.\n\nIs there any way to determine the *sizes* of the tuple mods, not just\nthe frequency, that PostgreSQL performs? What kinds of tools are\navailable to track down the causes of these writes?\n\nAlexander.\n", "msg_date": "Fri, 13 Feb 2009 16:58:21 +0100", "msg_from": "Alexander Staubo <[email protected]>", "msg_from_op": true, "msg_subject": "Re: I/O increase after upgrading to 8.3.5" }, { "msg_contents": "On Fri, Feb 13, 2009 at 12:53 PM, Alexander Staubo <[email protected]> wrote:\n> The upgrade was done with dump/restore using \"pg_dump -Fc\". The old\n> database lived on a SAN volume, whereas the new database lives on a\n> local disk volume.\n\nI need to correct myself: The Munin graphs were never set to track the\nSAN volume where the old database lived. So when the graph goes from\n\"near-zero\" to \"lots\", it's actually correct.\n\nWhen I compare the correct graph, however, it's apparently that I/O\nwrites have, on average, doubled.\n\nThe new volume uses the same file system and block size as the old one.\n\nAlexander.\n", "msg_date": "Fri, 13 Feb 2009 17:04:58 +0100", "msg_from": "Alexander Staubo <[email protected]>", "msg_from_op": true, "msg_subject": "Re: I/O increase after upgrading to 8.3.5" }, { "msg_contents": ">>> Alexander Staubo <[email protected]> wrote: \n> When I compare the correct graph, however, it's apparently that I/O\n> writes have, on average, doubled.\n \nCould you show the non-commented lines from old and new\npostgresql.conf files, please?\n \n-Kevin\n", "msg_date": "Fri, 13 Feb 2009 10:17:03 -0600", "msg_from": "\"Kevin Grittner\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: I/O increase after upgrading to 8.3.5" }, { "msg_contents": "On Fri, Feb 13, 2009 at 5:17 PM, Kevin Grittner\n<[email protected]> wrote:\n> Could you show the non-commented lines from old and new\n> postgresql.conf files, please?\n\nAttached. The differences are not performance-related, as far as I can\nsee, aside from the additional of \"synchronous_commit = off\".\n\nAlexander.", "msg_date": "Fri, 13 Feb 2009 17:31:25 +0100", "msg_from": "Alexander Staubo <[email protected]>", "msg_from_op": true, "msg_subject": "Re: I/O increase after upgrading to 8.3.5" }, { "msg_contents": ">>> Alexander Staubo <[email protected]> wrote: \n> Kevin Grittner <[email protected]> wrote:\n>> Could you show the non-commented lines from old and new\n>> postgresql.conf files, please?\n> \n> Attached. The differences are not performance-related, as far as I\n> can see, aside from the additional of \"synchronous_commit = off\".\n \nYou should definitely set effective_cache_size.\n \nIf you still see the problem after that, I suggest testing different\nsettings for:\n \nbgwriter_lru_maxpages\nbgwriter_lru_multiplier\ncheckpoint_segments\ncheckpoint_timeout\ncheckpoint_completion_target\n \nBoth the checkpoint process and the background writer changed quite a\nbit, and these are the main knobs for tuning the new version.\n \nIt's possible that under 8.2 your hint bit writes were being combined\nwith other writes due to caching, and differences in the timings now\nhave the hint bit writes happening too long after the initial write to\nget that benefit. If that's the case, the counterintuitive step of\nmaking PostgreSQL more aggressive about writing to the OS cache might\nreduce your disk write I/O. Making it less aggressive might allow\nthem to combine in the PostgreSQL buffers before making to the OS\ncache.\n \n-Kevin\n", "msg_date": "Fri, 13 Feb 2009 11:35:29 -0600", "msg_from": "\"Kevin Grittner\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: I/O increase after upgrading to 8.3.5" }, { "msg_contents": "Alexander Staubo wrote:\n> On Fri, Feb 13, 2009 at 12:53 PM, Alexander Staubo <[email protected]> wrote:\n>> The upgrade was done with dump/restore using \"pg_dump -Fc\". The old\n>> database lived on a SAN volume, whereas the new database lives on a\n>> local disk volume.\n> \n> I need to correct myself: The Munin graphs were never set to track the\n> SAN volume where the old database lived. So when the graph goes from\n> \"near-zero\" to \"lots\", it's actually correct.\n> \n> When I compare the correct graph, however, it's apparently that I/O\n> writes have, on average, doubled.\n\nIs there any chance you had pg_xlog stored separately on your old \ndatabase, and I/O for it wasn't being recorded?\n\n--\nCraig Ringer\n", "msg_date": "Sat, 14 Feb 2009 17:49:05 +0900", "msg_from": "Craig Ringer <[email protected]>", "msg_from_op": false, "msg_subject": "Re: I/O increase after upgrading to 8.3.5" }, { "msg_contents": "On Fri, Feb 13, 2009 at 6:35 PM, Kevin Grittner\n<[email protected]> wrote:\n> You should definitely set effective_cache_size.\n\nWow -- I set this to 10GB (5GB for shared buffers + another 5GB for\ncache), and today's average write frequency went from 20MB/sec to just\n1MB/sec. The documentation suggests that effective_cache_size is only\nused for query planning in conjunction with indexes. So how come it\naffects write access?\n\n> If you still see the problem after that, I suggest testing different\n> settings for:\n>\n> bgwriter_lru_maxpages\n> bgwriter_lru_multiplier\n> checkpoint_segments\n> checkpoint_timeout\n> checkpoint_completion_target\n>\n> Both the checkpoint process and the background writer changed quite a\n> bit, and these are the main knobs for tuning the new version.\n\nWe are hoping to set up a duplicate instance and play back the SQL log\nagainst it so we can experiment with different settings. Until we have\nsuch a setup, I'm not sure what to do with the knobs other than frob\nthem wildly. :-) Are there any statistics, either in PostgreSQL proper\nor in the OS, that I can use as metrics to guide the tuning? For\nexample, is there anything in pg_stat_bgwriter that can help me tune\nthe bgwriter_lru_* settings?\n\nDo transactions that only contain query statements end up writing\nentries to the WAL when they commit? If yes, can we avoid the writes\nby wrapping our queries in \"read only\" transactions, and would it be\nworth the effort?\n\nOur web application is handling 30 requests per second at peak time;\neach request is performing dozens queries in autocommit mode, ie. one\ntransaction per query. Only a minority of those requests actually end\nup modifying the database. PostgreSQL is committing and fsyncing\n600-800 transactions per second, so that's probably a good chunk of\ndisk/CPU usage wasted, right?\n\nAlexander.\n", "msg_date": "Sat, 14 Feb 2009 19:26:37 +0100", "msg_from": "Alexander Staubo <[email protected]>", "msg_from_op": true, "msg_subject": "Re: I/O increase after upgrading to 8.3.5" }, { "msg_contents": "On Sat, Feb 14, 2009 at 9:49 AM, Craig Ringer\n<[email protected]> wrote:\n> Is there any chance you had pg_xlog stored separately on your old database,\n> and I/O for it wasn't being recorded?\n\nNo, the database files have always been on a single volume.\n\nAlexander.\n", "msg_date": "Sat, 14 Feb 2009 19:27:14 +0100", "msg_from": "Alexander Staubo <[email protected]>", "msg_from_op": true, "msg_subject": "Re: I/O increase after upgrading to 8.3.5" }, { "msg_contents": "Alexander Staubo <[email protected]> writes:\n> <[email protected]> wrote:\n>> You should definitely set effective_cache_size.\n\n> Wow -- I set this to 10GB (5GB for shared buffers + another 5GB for\n> cache), and today's average write frequency went from 20MB/sec to just\n> 1MB/sec. The documentation suggests that effective_cache_size is only\n> used for query planning in conjunction with indexes. So how come it\n> affects write access?\n\nIt *is* only used for query planning. A plausible theory is that you\ncaused some join queries to change from hash or merge joining involving\na temporary hash or sort file to an index nestloop that doesn't use any\ntemporary storage. If the required hash or sort storage exceeded\nwork_mem, which you have set to just 10MB, that would have created some\nwrite traffic.\n\nDid you happen to notice whether your queries got faster or slower when\nyou did this? Watching only aggregate write traffic is a pretty limited\nview of what is happening in your database.\n\n\t\t\tregards, tom lane\n", "msg_date": "Sat, 14 Feb 2009 14:23:08 -0500", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: I/O increase after upgrading to 8.3.5 " }, { "msg_contents": "On Sat, Feb 14, 2009 at 8:23 PM, Tom Lane <[email protected]> wrote:\n> Alexander Staubo <[email protected]> writes:\n>> Wow -- I set this to 10GB (5GB for shared buffers + another 5GB for\n>> cache), and today's average write frequency went from 20MB/sec to just\n>> 1MB/sec. The documentation suggests that effective_cache_size is only\n>> used for query planning in conjunction with indexes. So how come it\n>> affects write access?\n>\n> It *is* only used for query planning. A plausible theory is that you\n> caused some join queries to change from hash or merge joining involving\n> a temporary hash or sort file to an index nestloop that doesn't use any\n> temporary storage. If the required hash or sort storage exceeded\n> work_mem, which you have set to just 10MB, that would have created some\n> write traffic.\n\nInteresting. Is there any statistic available that can tell me whether\nwork_mem is being exceeded? The tools to monitor exactly what\nPostgreSQL is doing -- especially on a production box -- are fairly\nlimited, especially since Linux does not yet have anything close to\nDTrace in functionality.\n\n> Did you happen to notice whether your queries got faster or slower when\n> you did this? Watching only aggregate write traffic is a pretty limited\n> view of what is happening in your database.\n\nUnfortunately we don't log SQL queries at the moment. We do log\napplication response times, but they look roughly the same as before\nthe change. I could revert the effective_cache_size setting, turn on\nSQL logging for a while, then reapply the change and compare.\n\nAlexander.\n", "msg_date": "Sat, 14 Feb 2009 21:04:43 +0100", "msg_from": "Alexander Staubo <[email protected]>", "msg_from_op": true, "msg_subject": "Re: I/O increase after upgrading to 8.3.5" }, { "msg_contents": "On Sat, 14 Feb 2009, Alexander Staubo wrote:\n\n> Are there any statistics, either in PostgreSQL proper or in the OS, that \n> I can use as metrics to guide the tuning? For example, is there anything \n> in pg_stat_bgwriter that can help me tune the bgwriter_lru_* settings?\n\nhttp://www.westnet.com/~gsmith/content/postgresql/chkp-bgw-83.htm goes \nover this topic, with \"Appendix B: pg_stat_bgwriter sample analysis\" \ncovering a look at what to do based on a pg_stat_bgwriter snapshot.\n\n> Do transactions that only contain query statements end up writing\n> entries to the WAL when they commit?\n\nYou need a transactions XID before you can write to the WAL, and quoting \nfrom the transaction management docs:\n\n\"Transactions and subtransactions are assigned permanent XIDs only when/if \nthey first do something that requires one --- typically, \ninsert/update/delete a tuple\"\n\nThe main thing that will cause writes even when reading are hint bit \nupdates, which have been mentioned here already. \nhttp://wiki.postgresql.org/wiki/Hint_Bits has an intro to that topic.\n\n--\n* Greg Smith [email protected] http://www.gregsmith.com Baltimore, MD\n", "msg_date": "Sun, 15 Feb 2009 12:35:04 -0500 (EST)", "msg_from": "Greg Smith <[email protected]>", "msg_from_op": false, "msg_subject": "Re: I/O increase after upgrading to 8.3.5" }, { "msg_contents": "On Sat, 14 Feb 2009, Alexander Staubo wrote:\n\n> Is there any statistic available that can tell me whether work_mem is \n> being exceeded?\n\nAs of 8.3, log_temp_files puts information about them into your logs; see \nhttp://www.postgresql.org/docs/current/static/runtime-config-logging.html\n\n--\n* Greg Smith [email protected] http://www.gregsmith.com Baltimore, MD\n", "msg_date": "Sun, 15 Feb 2009 12:38:00 -0500 (EST)", "msg_from": "Greg Smith <[email protected]>", "msg_from_op": false, "msg_subject": "Re: I/O increase after upgrading to 8.3.5" }, { "msg_contents": "On Sun, Feb 15, 2009 at 6:35 PM, Greg Smith <[email protected]> wrote:\n> http://www.westnet.com/~gsmith/content/postgresql/chkp-bgw-83.htm goes over\n> this topic, with \"Appendix B: pg_stat_bgwriter sample analysis\" covering a\n> look at what to do based on a pg_stat_bgwriter snapshot.\n\nWonderful, thank you.\n\nAlexander.\n", "msg_date": "Mon, 16 Feb 2009 15:59:05 +0100", "msg_from": "Alexander Staubo <[email protected]>", "msg_from_op": true, "msg_subject": "Re: I/O increase after upgrading to 8.3.5" } ]
[ { "msg_contents": "I have received Dell Poweredge 2950 MIII with 2 kind of\ndrives. I cant' make out the reason behind it , does it\nmake any difference in long run or in performance\nthe drives are similar in overall characteristics but does\nthe minor differences if will cause any problem ?\n\nscsi0 : LSI Logic SAS based MegaRAID driver\n Vendor: SEAGATE Model: ST973451SS Rev: SM04\n Type: Direct-Access ANSI SCSI revision: 05\n Vendor: FUJITSU Model: MBC2073RC Rev: D506\n Type: Direct-Access ANSI SCSI revision: 05\n Vendor: FUJITSU Model: MBC2073RC Rev: D506\n Type: Direct-Access ANSI SCSI revision: 05\n Vendor: SEAGATE Model: ST973451SS Rev: SM04\n Type: Direct-Access ANSI SCSI revision: 05\n Vendor: SEAGATE Model: ST973451SS Rev: SM04\n Type: Direct-Access ANSI SCSI revision: 05\n Vendor: FUJITSU Model: MBC2073RC Rev: D506\n Type: Direct-Access ANSI SCSI revision: 05\n Vendor: SEAGATE Model: ST973451SS Rev: SM04\n Type: Direct-Access ANSI SCSI revision: 05\n Vendor: FUJITSU Model: MBC2073RC Rev: D506\n Type: Direct-Access ANSI SCSI revision: 05\n\nthanks\nregds\n-- mallah\n", "msg_date": "Fri, 13 Feb 2009 19:00:50 +0530", "msg_from": "Rajesh Kumar Mallah <[email protected]>", "msg_from_op": true, "msg_subject": "dissimilar drives in Raid10 , does it make difference ?" }, { "msg_contents": "On Fri, 13 Feb 2009, Rajesh Kumar Mallah wrote:\n> I have received Dell Poweredge 2950 MIII with 2 kind of\n> drives. I cant' make out the reason behind it , does it\n> make any difference in long run or in performance\n> the drives are similar in overall characteristics but does\n> the minor differences if will cause any problem ?\n\nAs long as the drives are approximately the same capacity and speed, then \nno, it will not cause problems.\n\nIn fact, it is recommended that two different types of drives are used. \nThat way, if there's a mass failure of a whole batch of drives from one \nparticular vendor, you don't lose all your data.\n\nMatthew\n\n-- \n\"Programming today is a race between software engineers striving to build\n bigger and better idiot-proof programs, and the Universe trying to produce\n bigger and better idiots. So far, the Universe is winning.\" -- Rich Cook\n", "msg_date": "Fri, 13 Feb 2009 14:45:54 +0000 (GMT)", "msg_from": "Matthew Wakeling <[email protected]>", "msg_from_op": false, "msg_subject": "Re: dissimilar drives in Raid10 , does it make difference\n ?" }, { "msg_contents": "Matthew Wakeling wrote:\n\n> In fact, it is recommended that two different types of drives are used. \n> That way, if there's a mass failure of a whole batch of drives from one \n> particular vendor, you don't lose all your data.\n\nDon't think this is just paranoia, either. I've had it happen to me \nSEVERAL times - either a second drive fails before I can replace the \nfirst, or the second drive in a pair fails during rebuild onto the \nreplacement. I use regular RAID scrubbing, so this isn't just a case of \nundetected media errors.\n\nNothing beats good backups.\n\n--\nCraig Ringer\n", "msg_date": "Sat, 14 Feb 2009 17:46:07 +0900", "msg_from": "Craig Ringer <[email protected]>", "msg_from_op": false, "msg_subject": "Re: dissimilar drives in Raid10 , does it make difference\n ?" } ]
[ { "msg_contents": "I did some performance tests (retrieving data from DB to .NET\napplication using Npgsql driver) and I found that for command type ==\nStoredProcedure is better to first call function Prepare(). But, for\ncommand type == Text (direct query) performance is better if I do not\nrun function Prepare(). Why it is so? I thought that Prepare() should\nboost performance of executed query. Why executing stored procedure\nwith Prepare() is faster but direct query is slower?\n\nAlso I wonder why executing direct query is the fasted method for\nretrieving data. Shouldn't stored procedures be faster?\n\nSome details:\n-10 000 iterations\n-single connection for all iterations\n-PostgreSQL on Windows\n-query (the same in stored procedure)\n\nSELECT count(*)\nFROM \"tableA\" a\nLEFT JOIN \"tableB\" b ON a.id = b.\"tableA_id\"\nLEFT JOIN \"tableC\" c ON b.id = c.\"tableB_id\";\n\n\nResults:\nSP with Prepare() (Prepare for each iteration) 05.75s\nSP with Prepare() (Prepare is called only for first iteration)\n 12.81s\nSP without Prepare()\n 12.87s\n\nDirect query with Prepare() (Prepare for each iteration):\n 06.73s\nDirect query with Prepare() (Prepare is called only for first\niteration) 03.60s\nDirect query without Prepare()\n 03.43s\n", "msg_date": "Sat, 14 Feb 2009 17:11:43 +0100", "msg_from": "\"Peter G.\" <[email protected]>", "msg_from_op": true, "msg_subject": "\n =?windows-1252?Q?Retrieving_data_from_PostgreSQL_to_=2ENET_application_?=\n\t=?windows-1252?Q?=96_performance_test_=96_surprising_results?=" } ]