threads
listlengths
1
2.99k
[ { "msg_contents": "Hello.\n\nI'm busy writing an IP accounting DB using nacctd and postgres.\nThe data gets inserted correctly into the database and I can run the\nselect queries that I desire. \n\ne.g. (select sum(size) from iptraff where srcip = \"209.100.30.2\") gives\nme the total bytes that that particular host sent. Now it would be\n*REALLY* groovy if I could do the following: (select sum(size) from\niptraff where scrip = \"209.100.30.0/24\")\nThat would tell me the total traffic for that subnet.\n\n>From what I understand the relevant code resides in network.c\nunfortunately I am not a C person :-( Perhaps this feature would be\nincluded in the next snapshot/release or someone could help me with my\nparticular installation.\n\nThanks!\nChrisG\n\n", "msg_date": "Fri, 4 Jun 1999 15:41:36 +0200 (SAST)", "msg_from": "[email protected]", "msg_from_op": true, "msg_subject": "inet data type and select " } ]
[ { "msg_contents": "> Modified Files:\n> lock.sgml set.sgml\n> Log Message:\n> Add input parameters for LOCK TABLE. Still needs explanation from Vadim.\n> Fix markup.\n\nI've fixed the markup on these files so they will build cleanly. Also,\nI've augmented the \"Inputs\" area for the new locking parameters, so\nperhaps Vadim can just insert his explanations.\n\nThere is only one hardcopy doc left to do, but the User's Guide is a\n310 page sucker so will take me at least two or three hours to polish\nfor hardcopy (probably a couple of days elapsed time). I won't be able\nto start until after the weekend, so if Vadim can write something in\nthe next two days I'll get a start on it Monday.\n\nThe html docs are built automatically each night, so after the build\ntonight you should be seeing something close to the final version on\nthe web site and in the /pub/doc tar files. Let me know if anyone sees\nany funny business with them...\n\n - Thomas\n\n-- \nThomas Lockhart\t\t\t\[email protected]\nSouth Pasadena, California\n", "msg_date": "Fri, 04 Jun 1999 16:01:30 +0000", "msg_from": "Thomas Lockhart <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [COMMITTERS] 'pgsql/doc/src/sgml/ref lock.sgml set.sgml'" } ]
[ { "msg_contents": "Well, after a delay of a lot of months (sorry, huge personal crises!) I am\nhappy to report that this works now, at least on a cursory test. I'll hit\non it a bit harder soon and report back, but hopefully this patch'll be in\ntime for 6.5? Certainly it works better now than before so maybe include\nthe patch anyway, even if there isn't time to do better testing.\n\nCheers, and many many thanks Tatsuo.\n\nIan\n\n---------- Forwarded message ----------\nDate: Fri, 12 Feb 1999 23:12:07 +0900\nFrom: Tatsuo Ishii <[email protected]>\nTo: [email protected]\nCc: Ian Grant <[email protected]>,\n Bruce Momjian <[email protected]>, [email protected]\nSubject: Re: [HACKERS] Backend problem with large objects \n\n> >Many thanks indeed for this. Unfortunately it doesn't completely work: it\n> >fixes the problem as reported, but when, instead of writing five\n> >characters, one at a time, I write five at once, the backend dies in\n> >the same place it did before. Here's the C code slightly modified to\n> >reproduce the problem:\n> \n> Give me some time. I'm not sure if I could solve new problem, though.\n> --\n> Tatsuo Ishii\n\nI think I have fixed the problem you mentioned. Ian, could you apply\nincluded patches and test again? Note that those are for 6.4.2 and\nadditions to the previous patches.\n\nBTW, lobj strangely added a 0 filled disk block at the head of the\nheap. As a result, even 1-byte-user-data lobj consumes at least 16384\nbytes (2 disk blocks)! Included patches also fix this problem.\n\nTo Bruce:\nThanks for taking care of my previous patches for current. If\nincluded patch is ok, I will make one for current.\n\n---------------------------- cut here ---------------------------------\n*** postgresql-6.4.2/src/backend/storage/large_object/inv_api.c.orig\tSun Dec 13 14:08:19 1998\n--- postgresql-6.4.2/src/backend/storage/large_object/inv_api.c\tFri Feb 12 20:21:05 1999\n***************\n*** 624,648 ****\n \t\t|| obj_desc->offset < obj_desc->lowbyte\n \t\t|| !ItemPointerIsValid(&(obj_desc->htid)))\n \t{\n \n \t\t/* initialize scan key if not done */\n \t\tif (obj_desc->iscan == (IndexScanDesc) NULL)\n \t\t{\n- \t\t\tScanKeyData skey;\n- \n \t\t\t/*\n \t\t\t * As scan index may be prematurely closed (on commit), we\n \t\t\t * must use object current offset (was 0) to reinitialize the\n \t\t\t * entry [ PA ].\n \t\t\t */\n- \t\t\tScanKeyEntryInitialize(&skey, 0x0, 1, F_INT4GE,\n- \t\t\t\t\t\t\t\t Int32GetDatum(obj_desc->offset));\n \t\t\tobj_desc->iscan =\n \t\t\t\tindex_beginscan(obj_desc->index_r,\n \t\t\t\t\t\t\t\t(bool) 0, (uint16) 1,\n \t\t\t\t\t\t\t\t&skey);\n \t\t}\n- \n \t\tdo\n \t\t{\n \t\t\tres = index_getnext(obj_desc->iscan, ForwardScanDirection);\n--- 630,655 ----\n \t\t|| obj_desc->offset < obj_desc->lowbyte\n \t\t|| !ItemPointerIsValid(&(obj_desc->htid)))\n \t{\n+ \t\tScanKeyData skey;\n+ \n+ \t\tScanKeyEntryInitialize(&skey, 0x0, 1, F_INT4GE,\n+ \t\t\t\t Int32GetDatum(obj_desc->offset));\n \n \t\t/* initialize scan key if not done */\n \t\tif (obj_desc->iscan == (IndexScanDesc) NULL)\n \t\t{\n \t\t\t/*\n \t\t\t * As scan index may be prematurely closed (on commit), we\n \t\t\t * must use object current offset (was 0) to reinitialize the\n \t\t\t * entry [ PA ].\n \t\t\t */\n \t\t\tobj_desc->iscan =\n \t\t\t\tindex_beginscan(obj_desc->index_r,\n \t\t\t\t\t\t\t\t(bool) 0, (uint16) 1,\n \t\t\t\t\t\t\t\t&skey);\n+ \t\t} else {\n+ \t\t\tindex_rescan(obj_desc->iscan, false, &skey);\n \t\t}\n \t\tdo\n \t\t{\n \t\t\tres = index_getnext(obj_desc->iscan, ForwardScanDirection);\n***************\n*** 666,672 ****\n \t\t\ttuple = heap_fetch(obj_desc->heap_r, SnapshotNow,\n \t\t\t\t\t\t\t &res->heap_iptr, buffer);\n \t\t\tpfree(res);\n! \t\t} while (tuple == (HeapTuple) NULL);\n \n \t\t/* remember this tid -- we may need it for later reads/writes */\n \t\tItemPointerCopy(&tuple->t_ctid, &obj_desc->htid);\n--- 673,679 ----\n \t\t\ttuple = heap_fetch(obj_desc->heap_r, SnapshotNow,\n \t\t\t\t\t\t\t &res->heap_iptr, buffer);\n \t\t\tpfree(res);\n! \t\t} while (!HeapTupleIsValid(tuple));\n \n \t\t/* remember this tid -- we may need it for later reads/writes */\n \t\tItemPointerCopy(&tuple->t_ctid, &obj_desc->htid);\n***************\n*** 675,680 ****\n--- 682,691 ----\n \t{\n \t\ttuple = heap_fetch(obj_desc->heap_r, SnapshotNow,\n \t\t\t\t\t\t &(obj_desc->htid), buffer);\n+ \t\tif (!HeapTupleIsValid(tuple)) {\n+ \t\t elog(ERROR,\n+ \t\t \"inv_fetchtup: heap_fetch failed\");\n+ \t\t}\n \t}\n \n \t/*\n***************\n*** 746,757 ****\n \n \tnblocks = RelationGetNumberOfBlocks(hr);\n \n! \tif (nblocks > 0)\n \t\tbuffer = ReadBuffer(hr, nblocks - 1);\n! \telse\n \t\tbuffer = ReadBuffer(hr, P_NEW);\n! \n! \tpage = BufferGetPage(buffer);\n \n \t/*\n \t * If the last page is too small to hold all the data, and it's too\n--- 757,771 ----\n \n \tnblocks = RelationGetNumberOfBlocks(hr);\n \n! \tif (nblocks > 0) {\n \t\tbuffer = ReadBuffer(hr, nblocks - 1);\n! \t\tpage = BufferGetPage(buffer);\n! \t}\n! \telse {\n \t\tbuffer = ReadBuffer(hr, P_NEW);\n! \t\tpage = BufferGetPage(buffer);\n! \t\tPageInit(page, BufferGetPageSize(buffer), 0);\n! \t}\n \n \t/*\n \t * If the last page is too small to hold all the data, and it's too\n***************\n*** 865,876 ****\n \n \t\tnblocks = RelationGetNumberOfBlocks(hr);\n \n! \t\tif (nblocks > 0)\n \t\t\tnewbuf = ReadBuffer(hr, nblocks - 1);\n! \t\telse\n \t\t\tnewbuf = ReadBuffer(hr, P_NEW);\n \n- \t\tnewpage = BufferGetPage(newbuf);\n \t\tfreespc = IFREESPC(newpage);\n \n \t\t/*\n--- 879,894 ----\n \n \t\tnblocks = RelationGetNumberOfBlocks(hr);\n \n! \t\tif (nblocks > 0) {\n \t\t\tnewbuf = ReadBuffer(hr, nblocks - 1);\n! \t\t\tnewpage = BufferGetPage(newbuf);\n! \t\t}\n! \t\telse {\n \t\t\tnewbuf = ReadBuffer(hr, P_NEW);\n+ \t\t\tnewpage = BufferGetPage(newbuf);\n+ \t\t\tPageInit(newpage, BufferGetPageSize(newbuf), 0);\n+ \t\t}\n \n \t\tfreespc = IFREESPC(newpage);\n \n \t\t/*\n***************\n*** 973,978 ****\n--- 991,999 ----\n \tWriteBuffer(buffer);\n \tif (newbuf != buffer)\n \t\tWriteBuffer(newbuf);\n+ \n+ \t/* Tuple id is no longer valid */\n+ \tItemPointerSetInvalid(&(obj_desc->htid));\n \n \t/* done */\n \treturn nwritten;\n\n", "msg_date": "Fri, 4 Jun 1999 17:28:09 +0100 (BST)", "msg_from": "Ian Grant <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Backend problem with large objects" }, { "msg_contents": "What do we do with this. Is it already done? Looks quite old.\n\n\n> Well, after a delay of a lot of months (sorry, huge personal crises!) I am\n> happy to report that this works now, at least on a cursory test. I'll hit\n> on it a bit harder soon and report back, but hopefully this patch'll be in\n> time for 6.5? Certainly it works better now than before so maybe include\n> the patch anyway, even if there isn't time to do better testing.\n> \n> Cheers, and many many thanks Tatsuo.\n> \n> Ian\n> \n> ---------- Forwarded message ----------\n> Date: Fri, 12 Feb 1999 23:12:07 +0900\n> From: Tatsuo Ishii <[email protected]>\n> To: [email protected]\n> Cc: Ian Grant <[email protected]>,\n> Bruce Momjian <[email protected]>, [email protected]\n> Subject: Re: [HACKERS] Backend problem with large objects \n> \n> > >Many thanks indeed for this. Unfortunately it doesn't completely work: it\n> > >fixes the problem as reported, but when, instead of writing five\n> > >characters, one at a time, I write five at once, the backend dies in\n> > >the same place it did before. Here's the C code slightly modified to\n> > >reproduce the problem:\n> > \n> > Give me some time. I'm not sure if I could solve new problem, though.\n> > --\n> > Tatsuo Ishii\n> \n> I think I have fixed the problem you mentioned. Ian, could you apply\n> included patches and test again? Note that those are for 6.4.2 and\n> additions to the previous patches.\n> \n> BTW, lobj strangely added a 0 filled disk block at the head of the\n> heap. As a result, even 1-byte-user-data lobj consumes at least 16384\n> bytes (2 disk blocks)! Included patches also fix this problem.\n> \n> To Bruce:\n> Thanks for taking care of my previous patches for current. If\n> included patch is ok, I will make one for current.\n> \n> ---------------------------- cut here ---------------------------------\n> *** postgresql-6.4.2/src/backend/storage/large_object/inv_api.c.orig\tSun Dec 13 14:08:19 1998\n> --- postgresql-6.4.2/src/backend/storage/large_object/inv_api.c\tFri Feb 12 20:21:05 1999\n> ***************\n> *** 624,648 ****\n> \t\t|| obj_desc->offset < obj_desc->lowbyte\n> \t\t|| !ItemPointerIsValid(&(obj_desc->htid)))\n> \t{\n> \n> \t\t/* initialize scan key if not done */\n> \t\tif (obj_desc->iscan == (IndexScanDesc) NULL)\n> \t\t{\n> - \t\t\tScanKeyData skey;\n> - \n> \t\t\t/*\n> \t\t\t * As scan index may be prematurely closed (on commit), we\n> \t\t\t * must use object current offset (was 0) to reinitialize the\n> \t\t\t * entry [ PA ].\n> \t\t\t */\n> - \t\t\tScanKeyEntryInitialize(&skey, 0x0, 1, F_INT4GE,\n> - \t\t\t\t\t\t\t\t Int32GetDatum(obj_desc->offset));\n> \t\t\tobj_desc->iscan =\n> \t\t\t\tindex_beginscan(obj_desc->index_r,\n> \t\t\t\t\t\t\t\t(bool) 0, (uint16) 1,\n> \t\t\t\t\t\t\t\t&skey);\n> \t\t}\n> - \n> \t\tdo\n> \t\t{\n> \t\t\tres = index_getnext(obj_desc->iscan, ForwardScanDirection);\n> --- 630,655 ----\n> \t\t|| obj_desc->offset < obj_desc->lowbyte\n> \t\t|| !ItemPointerIsValid(&(obj_desc->htid)))\n> \t{\n> + \t\tScanKeyData skey;\n> + \n> + \t\tScanKeyEntryInitialize(&skey, 0x0, 1, F_INT4GE,\n> + \t\t\t\t Int32GetDatum(obj_desc->offset));\n> \n> \t\t/* initialize scan key if not done */\n> \t\tif (obj_desc->iscan == (IndexScanDesc) NULL)\n> \t\t{\n> \t\t\t/*\n> \t\t\t * As scan index may be prematurely closed (on commit), we\n> \t\t\t * must use object current offset (was 0) to reinitialize the\n> \t\t\t * entry [ PA ].\n> \t\t\t */\n> \t\t\tobj_desc->iscan =\n> \t\t\t\tindex_beginscan(obj_desc->index_r,\n> \t\t\t\t\t\t\t\t(bool) 0, (uint16) 1,\n> \t\t\t\t\t\t\t\t&skey);\n> + \t\t} else {\n> + \t\t\tindex_rescan(obj_desc->iscan, false, &skey);\n> \t\t}\n> \t\tdo\n> \t\t{\n> \t\t\tres = index_getnext(obj_desc->iscan, ForwardScanDirection);\n> ***************\n> *** 666,672 ****\n> \t\t\ttuple = heap_fetch(obj_desc->heap_r, SnapshotNow,\n> \t\t\t\t\t\t\t &res->heap_iptr, buffer);\n> \t\t\tpfree(res);\n> ! \t\t} while (tuple == (HeapTuple) NULL);\n> \n> \t\t/* remember this tid -- we may need it for later reads/writes */\n> \t\tItemPointerCopy(&tuple->t_ctid, &obj_desc->htid);\n> --- 673,679 ----\n> \t\t\ttuple = heap_fetch(obj_desc->heap_r, SnapshotNow,\n> \t\t\t\t\t\t\t &res->heap_iptr, buffer);\n> \t\t\tpfree(res);\n> ! \t\t} while (!HeapTupleIsValid(tuple));\n> \n> \t\t/* remember this tid -- we may need it for later reads/writes */\n> \t\tItemPointerCopy(&tuple->t_ctid, &obj_desc->htid);\n> ***************\n> *** 675,680 ****\n> --- 682,691 ----\n> \t{\n> \t\ttuple = heap_fetch(obj_desc->heap_r, SnapshotNow,\n> \t\t\t\t\t\t &(obj_desc->htid), buffer);\n> + \t\tif (!HeapTupleIsValid(tuple)) {\n> + \t\t elog(ERROR,\n> + \t\t \"inv_fetchtup: heap_fetch failed\");\n> + \t\t}\n> \t}\n> \n> \t/*\n> ***************\n> *** 746,757 ****\n> \n> \tnblocks = RelationGetNumberOfBlocks(hr);\n> \n> ! \tif (nblocks > 0)\n> \t\tbuffer = ReadBuffer(hr, nblocks - 1);\n> ! \telse\n> \t\tbuffer = ReadBuffer(hr, P_NEW);\n> ! \n> ! \tpage = BufferGetPage(buffer);\n> \n> \t/*\n> \t * If the last page is too small to hold all the data, and it's too\n> --- 757,771 ----\n> \n> \tnblocks = RelationGetNumberOfBlocks(hr);\n> \n> ! \tif (nblocks > 0) {\n> \t\tbuffer = ReadBuffer(hr, nblocks - 1);\n> ! \t\tpage = BufferGetPage(buffer);\n> ! \t}\n> ! \telse {\n> \t\tbuffer = ReadBuffer(hr, P_NEW);\n> ! \t\tpage = BufferGetPage(buffer);\n> ! \t\tPageInit(page, BufferGetPageSize(buffer), 0);\n> ! \t}\n> \n> \t/*\n> \t * If the last page is too small to hold all the data, and it's too\n> ***************\n> *** 865,876 ****\n> \n> \t\tnblocks = RelationGetNumberOfBlocks(hr);\n> \n> ! \t\tif (nblocks > 0)\n> \t\t\tnewbuf = ReadBuffer(hr, nblocks - 1);\n> ! \t\telse\n> \t\t\tnewbuf = ReadBuffer(hr, P_NEW);\n> \n> - \t\tnewpage = BufferGetPage(newbuf);\n> \t\tfreespc = IFREESPC(newpage);\n> \n> \t\t/*\n> --- 879,894 ----\n> \n> \t\tnblocks = RelationGetNumberOfBlocks(hr);\n> \n> ! \t\tif (nblocks > 0) {\n> \t\t\tnewbuf = ReadBuffer(hr, nblocks - 1);\n> ! \t\t\tnewpage = BufferGetPage(newbuf);\n> ! \t\t}\n> ! \t\telse {\n> \t\t\tnewbuf = ReadBuffer(hr, P_NEW);\n> + \t\t\tnewpage = BufferGetPage(newbuf);\n> + \t\t\tPageInit(newpage, BufferGetPageSize(newbuf), 0);\n> + \t\t}\n> \n> \t\tfreespc = IFREESPC(newpage);\n> \n> \t\t/*\n> ***************\n> *** 973,978 ****\n> --- 991,999 ----\n> \tWriteBuffer(buffer);\n> \tif (newbuf != buffer)\n> \t\tWriteBuffer(newbuf);\n> + \n> + \t/* Tuple id is no longer valid */\n> + \tItemPointerSetInvalid(&(obj_desc->htid));\n> \n> \t/* done */\n> \treturn nwritten;\n> \n> \n\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Fri, 4 Jun 1999 12:39:27 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Backend problem with large objects" }, { "msg_contents": ">What do we do with this. Is it already done? Looks quite old.\n\nI believe these have been already in the current source.\n\n---\nTatsuo Ishii\n\n>> Well, after a delay of a lot of months (sorry, huge personal crises!) I am\n>> happy to report that this works now, at least on a cursory test. I'll hit\n>> on it a bit harder soon and report back, but hopefully this patch'll be in\n>> time for 6.5? Certainly it works better now than before so maybe include\n>> the patch anyway, even if there isn't time to do better testing.\n>> \n>> Cheers, and many many thanks Tatsuo.\n>> \n>> Ian\n>> \n>> ---------- Forwarded message ----------\n>> Date: Fri, 12 Feb 1999 23:12:07 +0900\n>> From: Tatsuo Ishii <[email protected]>\n>> To: [email protected]\n>> Cc: Ian Grant <[email protected]>,\n>> Bruce Momjian <[email protected]>, [email protected]\n>> Subject: Re: [HACKERS] Backend problem with large objects \n>> \n>> > >Many thanks indeed for this. Unfortunately it doesn't completely work: it\n>> > >fixes the problem as reported, but when, instead of writing five\n>> > >characters, one at a time, I write five at once, the backend dies in\n>> > >the same place it did before. Here's the C code slightly modified to\n>> > >reproduce the problem:\n>> > \n>> > Give me some time. I'm not sure if I could solve new problem, though.\n>> > --\n>> > Tatsuo Ishii\n>> \n>> I think I have fixed the problem you mentioned. Ian, could you apply\n>> included patches and test again? Note that those are for 6.4.2 and\n>> additions to the previous patches.\n>> \n>> BTW, lobj strangely added a 0 filled disk block at the head of the\n>> heap. As a result, even 1-byte-user-data lobj consumes at least 16384\n>> bytes (2 disk blocks)! Included patches also fix this problem.\n>> \n>> To Bruce:\n>> Thanks for taking care of my previous patches for current. If\n>> included patch is ok, I will make one for current.\n>> \n>> ---------------------------- cut here ---------------------------------\n>> *** postgresql-6.4.2/src/backend/storage/large_object/inv_api.c.orig\tSun Dec 13 14:08:19 1998\n>> --- postgresql-6.4.2/src/backend/storage/large_object/inv_api.c\tFri Feb 12 20:21:05 1999\n>> ***************\n>> *** 624,648 ****\n>> \t\t|| obj_desc->offset < obj_desc->lowbyte\n>> \t\t|| !ItemPointerIsValid(&(obj_desc->htid)))\n>> \t{\n>> \n>> \t\t/* initialize scan key if not done */\n>> \t\tif (obj_desc->iscan == (IndexScanDesc) NULL)\n>> \t\t{\n>> - \t\t\tScanKeyData skey;\n>> - \n>> \t\t\t/*\n>> \t\t\t * As scan index may be prematurely closed (on commit), we\n>> \t\t\t * must use object current offset (was 0) to reinitialize the\n>> \t\t\t * entry [ PA ].\n>> \t\t\t */\n>> - \t\t\tScanKeyEntryInitialize(&skey, 0x0, 1, F_INT4GE,\n>> - \t\t\t\t\t\t\t\t Int32GetDatum(obj_desc->offset));\n>> \t\t\tobj_desc->iscan =\n>> \t\t\t\tindex_beginscan(obj_desc->index_r,\n>> \t\t\t\t\t\t\t\t(bool) 0, (uint16) 1,\n>> \t\t\t\t\t\t\t\t&skey);\n>> \t\t}\n>> - \n>> \t\tdo\n>> \t\t{\n>> \t\t\tres = index_getnext(obj_desc->iscan, ForwardScanDirection);\n>> --- 630,655 ----\n>> \t\t|| obj_desc->offset < obj_desc->lowbyte\n>> \t\t|| !ItemPointerIsValid(&(obj_desc->htid)))\n>> \t{\n>> + \t\tScanKeyData skey;\n>> + \n>> + \t\tScanKeyEntryInitialize(&skey, 0x0, 1, F_INT4GE,\n>> + \t\t\t\t Int32GetDatum(obj_desc->offset));\n>> \n>> \t\t/* initialize scan key if not done */\n>> \t\tif (obj_desc->iscan == (IndexScanDesc) NULL)\n>> \t\t{\n>> \t\t\t/*\n>> \t\t\t * As scan index may be prematurely closed (on commit), we\n>> \t\t\t * must use object current offset (was 0) to reinitialize the\n>> \t\t\t * entry [ PA ].\n>> \t\t\t */\n>> \t\t\tobj_desc->iscan =\n>> \t\t\t\tindex_beginscan(obj_desc->index_r,\n>> \t\t\t\t\t\t\t\t(bool) 0, (uint16) 1,\n>> \t\t\t\t\t\t\t\t&skey);\n>> + \t\t} else {\n>> + \t\t\tindex_rescan(obj_desc->iscan, false, &skey);\n>> \t\t}\n>> \t\tdo\n>> \t\t{\n>> \t\t\tres = index_getnext(obj_desc->iscan, ForwardScanDirection);\n>> ***************\n>> *** 666,672 ****\n>> \t\t\ttuple = heap_fetch(obj_desc->heap_r, SnapshotNow,\n>> \t\t\t\t\t\t\t &res->heap_iptr, buffer);\n>> \t\t\tpfree(res);\n>> ! \t\t} while (tuple == (HeapTuple) NULL);\n>> \n>> \t\t/* remember this tid -- we may need it for later reads/writes */\n>> \t\tItemPointerCopy(&tuple->t_ctid, &obj_desc->htid);\n>> --- 673,679 ----\n>> \t\t\ttuple = heap_fetch(obj_desc->heap_r, SnapshotNow,\n>> \t\t\t\t\t\t\t &res->heap_iptr, buffer);\n>> \t\t\tpfree(res);\n>> ! \t\t} while (!HeapTupleIsValid(tuple));\n>> \n>> \t\t/* remember this tid -- we may need it for later reads/writes */\n>> \t\tItemPointerCopy(&tuple->t_ctid, &obj_desc->htid);\n>> ***************\n>> *** 675,680 ****\n>> --- 682,691 ----\n>> \t{\n>> \t\ttuple = heap_fetch(obj_desc->heap_r, SnapshotNow,\n>> \t\t\t\t\t\t &(obj_desc->htid), buffer);\n>> + \t\tif (!HeapTupleIsValid(tuple)) {\n>> + \t\t elog(ERROR,\n>> + \t\t \"inv_fetchtup: heap_fetch failed\");\n>> + \t\t}\n>> \t}\n>> \n>> \t/*\n>> ***************\n>> *** 746,757 ****\n>> \n>> \tnblocks = RelationGetNumberOfBlocks(hr);\n>> \n>> ! \tif (nblocks > 0)\n>> \t\tbuffer = ReadBuffer(hr, nblocks - 1);\n>> ! \telse\n>> \t\tbuffer = ReadBuffer(hr, P_NEW);\n>> ! \n>> ! \tpage = BufferGetPage(buffer);\n>> \n>> \t/*\n>> \t * If the last page is too small to hold all the data, and it's too\n>> --- 757,771 ----\n>> \n>> \tnblocks = RelationGetNumberOfBlocks(hr);\n>> \n>> ! \tif (nblocks > 0) {\n>> \t\tbuffer = ReadBuffer(hr, nblocks - 1);\n>> ! \t\tpage = BufferGetPage(buffer);\n>> ! \t}\n>> ! \telse {\n>> \t\tbuffer = ReadBuffer(hr, P_NEW);\n>> ! \t\tpage = BufferGetPage(buffer);\n>> ! \t\tPageInit(page, BufferGetPageSize(buffer), 0);\n>> ! \t}\n>> \n>> \t/*\n>> \t * If the last page is too small to hold all the data, and it's too\n>> ***************\n>> *** 865,876 ****\n>> \n>> \t\tnblocks = RelationGetNumberOfBlocks(hr);\n>> \n>> ! \t\tif (nblocks > 0)\n>> \t\t\tnewbuf = ReadBuffer(hr, nblocks - 1);\n>> ! \t\telse\n>> \t\t\tnewbuf = ReadBuffer(hr, P_NEW);\n>> \n>> - \t\tnewpage = BufferGetPage(newbuf);\n>> \t\tfreespc = IFREESPC(newpage);\n>> \n>> \t\t/*\n>> --- 879,894 ----\n>> \n>> \t\tnblocks = RelationGetNumberOfBlocks(hr);\n>> \n>> ! \t\tif (nblocks > 0) {\n>> \t\t\tnewbuf = ReadBuffer(hr, nblocks - 1);\n>> ! \t\t\tnewpage = BufferGetPage(newbuf);\n>> ! \t\t}\n>> ! \t\telse {\n>> \t\t\tnewbuf = ReadBuffer(hr, P_NEW);\n>> + \t\t\tnewpage = BufferGetPage(newbuf);\n>> + \t\t\tPageInit(newpage, BufferGetPageSize(newbuf), 0);\n>> + \t\t}\n>> \n>> \t\tfreespc = IFREESPC(newpage);\n>> \n>> \t\t/*\n>> ***************\n>> *** 973,978 ****\n>> --- 991,999 ----\n>> \tWriteBuffer(buffer);\n>> \tif (newbuf != buffer)\n>> \t\tWriteBuffer(newbuf);\n>> + \n>> + \t/* Tuple id is no longer valid */\n>> + \tItemPointerSetInvalid(&(obj_desc->htid));\n>> \n>> \t/* done */\n>> \treturn nwritten;\n>> \n>> \n>\n>\n>-- \n> Bruce Momjian | http://www.op.net/~candle\n> [email protected] | (610) 853-3000\n> + If your life is a hard drive, | 830 Blythe Avenue\n> + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n\n", "msg_date": "Sat, 05 Jun 1999 07:42:03 +0900", "msg_from": "Tatsuo Ishii <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Backend problem with large objects " } ]
[ { "msg_contents": "Hi Hackers,\n\nI found some bugs in the latest snapshot (dated 19990606). I had some\ncompilation errors which I have fixed with the following patches:\n\necpg.patch (wrong makefile expansion in some cases)\n\n\tthe ecpg Makefiles use a variable DESTDIR which is never defined\n\texcept by debian/rules makefile, in which case the ecpg makefiles\n\texpand wrong pathnames. If we want to support a DESTDIR root it\n\tmust be done consistently in all the makefiles, not just in ecpg.\n\nman.patch (make inconsistency)\n\n\tthe default target is 'install' instead of 'all'. So if you do a\n\tmake without target you actually do a make install, which is not\n\twhat one normally expects from a standard makefile.\n\npostgres.patch (compilation error)\n\n\tin postgres.c someone changed TRACE_PLAN to TRACE_PRETTY_PLAN\n\twhich is not defined anywhere. Seems unfinished work in progress.\n\ntrace.patch (compilation error)\n\n\tthe gettimeofday doesn't compile under Linux with glibc2 because\n\tthe DST_NONE constant is no more defined. It seems that this code\n\t(written by me) has always be wrong but for some reason working.\n\n-- \nMassimo Dal Zotto\n\n+----------------------------------------------------------------------+\n| Massimo Dal Zotto email: [email protected] |\n| Via Marconi, 141 phone: ++39-0461534251 |\n| 38057 Pergine Valsugana (TN) www: http://www.cs.unitn.it/~dz/ |\n| Italy pgp: finger [email protected] |\n+----------------------------------------------------------------------+", "msg_date": "Fri, 4 Jun 1999 23:37:53 +0200 (MEST)", "msg_from": "Massimo Dal Zotto <[email protected]>", "msg_from_op": true, "msg_subject": "bugs in version 6.5" }, { "msg_contents": "\nI applied the ecpg, man and trace patches, but not the postgres one...is\nthis something something that one *is* working on? \n\n>From what I can tell, in the logs, this was added to rev 1.112 of\npostgres.c (the TRACE_PRETTY_PLAN stuff)...by Jan:\n\n============================================================\nrevision 1.112\ndate: 1999/05/11 09:06:31; author: wieck; state: Exp; lines: +53 -19\nChanged debug options:\n\n-d4 now prints compressed trees from nodeToString()\n-d5 prints pretty trees via nodeDisplay()\n\nnew pg_options: pretty_plan, pretty_parse, pretty_rewritten\n\nJan\n============================================================\n\nThat was 3(?) weeks ago, and nobody has reported any problems with it so\nfar?\n\nJan? Comments?\n\n\nOn Fri, 4 Jun 1999, Massimo Dal Zotto wrote:\n\n> Hi Hackers,\n> \n> I found some bugs in the latest snapshot (dated 19990606). I had some\n> compilation errors which I have fixed with the following patches:\n> \n> ecpg.patch (wrong makefile expansion in some cases)\n> \n> \tthe ecpg Makefiles use a variable DESTDIR which is never defined\n> \texcept by debian/rules makefile, in which case the ecpg makefiles\n> \texpand wrong pathnames. If we want to support a DESTDIR root it\n> \tmust be done consistently in all the makefiles, not just in ecpg.\n> \n> man.patch (make inconsistency)\n> \n> \tthe default target is 'install' instead of 'all'. So if you do a\n> \tmake without target you actually do a make install, which is not\n> \twhat one normally expects from a standard makefile.\n> \n> postgres.patch (compilation error)\n> \n> \tin postgres.c someone changed TRACE_PLAN to TRACE_PRETTY_PLAN\n> \twhich is not defined anywhere. Seems unfinished work in progress.\n> \n> trace.patch (compilation error)\n> \n> \tthe gettimeofday doesn't compile under Linux with glibc2 because\n> \tthe DST_NONE constant is no more defined. It seems that this code\n> \t(written by me) has always be wrong but for some reason working.\n> \n> -- \n> Massimo Dal Zotto\n> \n> +----------------------------------------------------------------------+\n> | Massimo Dal Zotto email: [email protected] |\n> | Via Marconi, 141 phone: ++39-0461534251 |\n> | 38057 Pergine Valsugana (TN) www: http://www.cs.unitn.it/~dz/ |\n> | Italy pgp: finger [email protected] |\n> +----------------------------------------------------------------------+\n> \n> \n\nMarc G. Fournier ICQ#7615664 IRC Nick: Scrappy\nSystems Administrator @ hub.org \nprimary: [email protected] secondary: scrappy@{freebsd|postgresql}.org \n\n", "msg_date": "Sat, 5 Jun 1999 01:24:13 -0300 (ADT)", "msg_from": "The Hermit Hacker <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [PATCHES] bugs in version 6.5" } ]
[ { "msg_contents": "Hi,\n\nI have updated my contrib code for version 6.5. In the attachment you will\nfind the directories array, datetime, miscutil, string, tools and userlocks\nwhich replace the corresponding directories under contrib.\n\nIn contrib/tools you will find some developement scripts which I use while\nhacking the sources. I hope they will be useful for some other people.\n\nI have also added a contrib/Makefile which tries to compile and install all\nthe contribs. Unfortunately many of them don't have a Makefile or don't\ncompile cleanly.\n\n-- \nMassimo Dal Zotto\n\n+----------------------------------------------------------------------+\n| Massimo Dal Zotto email: [email protected] |\n| Via Marconi, 141 phone: ++39-0461534251 |\n| 38057 Pergine Valsugana (TN) www: http://www.cs.unitn.it/~dz/ |\n| Italy pgp: finger [email protected] |\n+----------------------------------------------------------------------+", "msg_date": "Fri, 4 Jun 1999 23:46:38 +0200 (MEST)", "msg_from": "Massimo Dal Zotto <[email protected]>", "msg_from_op": true, "msg_subject": "contrib code for 6.5" }, { "msg_contents": "What should we do with this? I can do it, but I am unsure about it.\n\n> Hi,\n> \n> I have updated my contrib code for version 6.5. In the attachment you will\n> find the directories array, datetime, miscutil, string, tools and userlocks\n> which replace the corresponding directories under contrib.\n> \n> In contrib/tools you will find some developement scripts which I use while\n> hacking the sources. I hope they will be useful for some other people.\n> \n> I have also added a contrib/Makefile which tries to compile and install all\n> the contribs. Unfortunately many of them don't have a Makefile or don't\n> compile cleanly.\n> \n> -- \n> Massimo Dal Zotto\n> \n> +----------------------------------------------------------------------+\n> | Massimo Dal Zotto email: [email protected] |\n> | Via Marconi, 141 phone: ++39-0461534251 |\n> | 38057 Pergine Valsugana (TN) www: http://www.cs.unitn.it/~dz/ |\n> | Italy pgp: finger [email protected] |\n> +----------------------------------------------------------------------+\n> \n\n[application/x-gtar is not supported, skipping...]\n\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Sat, 5 Jun 1999 12:54:44 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": false, "msg_subject": "Re: contrib code for 6.5" }, { "msg_contents": "\nI wasn't sure either, so close to a release :(\n\n\nOn Sat, 5 Jun 1999, Bruce Momjian wrote:\n\n> What should we do with this? I can do it, but I am unsure about it.\n> \n> > Hi,\n> > \n> > I have updated my contrib code for version 6.5. In the attachment you will\n> > find the directories array, datetime, miscutil, string, tools and userlocks\n> > which replace the corresponding directories under contrib.\n> > \n> > In contrib/tools you will find some developement scripts which I use while\n> > hacking the sources. I hope they will be useful for some other people.\n> > \n> > I have also added a contrib/Makefile which tries to compile and install all\n> > the contribs. Unfortunately many of them don't have a Makefile or don't\n> > compile cleanly.\n> > \n> > -- \n> > Massimo Dal Zotto\n> > \n> > +----------------------------------------------------------------------+\n> > | Massimo Dal Zotto email: [email protected] |\n> > | Via Marconi, 141 phone: ++39-0461534251 |\n> > | 38057 Pergine Valsugana (TN) www: http://www.cs.unitn.it/~dz/ |\n> > | Italy pgp: finger [email protected] |\n> > +----------------------------------------------------------------------+\n> > \n> \n> [application/x-gtar is not supported, skipping...]\n> \n> \n> -- \n> Bruce Momjian | http://www.op.net/~candle\n> [email protected] | (610) 853-3000\n> + If your life is a hard drive, | 830 Blythe Avenue\n> + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n> \n\nMarc G. Fournier ICQ#7615664 IRC Nick: Scrappy\nSystems Administrator @ hub.org \nprimary: [email protected] secondary: scrappy@{freebsd|postgresql}.org \n\n", "msg_date": "Sat, 5 Jun 1999 15:36:16 -0300 (ADT)", "msg_from": "The Hermit Hacker <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [PATCHES] Re: contrib code for 6.5" }, { "msg_contents": "> \n> I wasn't sure either, so close to a release :(\n> \n\nYes. I will try and generate an actual diff so we can review it.\n\n\n> \n> On Sat, 5 Jun 1999, Bruce Momjian wrote:\n> \n> > What should we do with this? I can do it, but I am unsure about it.\n> > \n> > > Hi,\n> > > \n> > > I have updated my contrib code for version 6.5. In the attachment you will\n> > > find the directories array, datetime, miscutil, string, tools and userlocks\n> > > which replace the corresponding directories under contrib.\n> > > \n> > > In contrib/tools you will find some developement scripts which I use while\n> > > hacking the sources. I hope they will be useful for some other people.\n> > > \n> > > I have also added a contrib/Makefile which tries to compile and install all\n> > > the contribs. Unfortunately many of them don't have a Makefile or don't\n> > > compile cleanly.\n> > > \n> > > -- \n> > > Massimo Dal Zotto\n> > > \n> > > +----------------------------------------------------------------------+\n> > > | Massimo Dal Zotto email: [email protected] |\n> > > | Via Marconi, 141 phone: ++39-0461534251 |\n> > > | 38057 Pergine Valsugana (TN) www: http://www.cs.unitn.it/~dz/ |\n> > > | Italy pgp: finger [email protected] |\n> > > +----------------------------------------------------------------------+\n> > > \n> > \n> > [application/x-gtar is not supported, skipping...]\n> > \n> > \n> > -- \n> > Bruce Momjian | http://www.op.net/~candle\n> > [email protected] | (610) 853-3000\n> > + If your life is a hard drive, | 830 Blythe Avenue\n> > + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n> > \n> \n> Marc G. Fournier ICQ#7615664 IRC Nick: Scrappy\n> Systems Administrator @ hub.org \n> primary: [email protected] secondary: scrappy@{freebsd|postgresql}.org \n> \n> \n\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Sat, 5 Jun 1999 14:53:32 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [PATCHES] Re: contrib code for 6.5" }, { "msg_contents": "I have make a diff, and applied the diff, while adding the new files\nfrom the distribution\n\nOne item stuck out as a problem:\n\n\t+ # Strip debian/rules install prefix if defined\n\t+ REAL_MODDIR = $(shell echo $(MODDIR) | sed 's|.*/debian/tmp||')\n\nI really could not install these lines, and replaced mentions of\nREAL_MODDIR with MODDIR. This seems too site-specific.\n\n> Hi,\n> \n> I have updated my contrib code for version 6.5. In the attachment you will\n> find the directories array, datetime, miscutil, string, tools and userlocks\n> which replace the corresponding directories under contrib.\n> \n> In contrib/tools you will find some developement scripts which I use while\n> hacking the sources. I hope they will be useful for some other people.\n> \n> I have also added a contrib/Makefile which tries to compile and install all\n> the contribs. Unfortunately many of them don't have a Makefile or don't\n> compile cleanly.\n> \n> -- \n> Massimo Dal Zotto\n> \n> +----------------------------------------------------------------------+\n> | Massimo Dal Zotto email: [email protected] |\n> | Via Marconi, 141 phone: ++39-0461534251 |\n> | 38057 Pergine Valsugana (TN) www: http://www.cs.unitn.it/~dz/ |\n> | Italy pgp: finger [email protected] |\n> +----------------------------------------------------------------------+\n> \n\n[application/x-gtar is not supported, skipping...]\n\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Sat, 5 Jun 1999 15:10:00 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": false, "msg_subject": "Re: contrib code for 6.5" }, { "msg_contents": "> \n> I wasn't sure either, so close to a release :(\n> \n\nVadim has asked for two more days, and the patch looked reasonable, so I\napplied it, with the mentioned modification.\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Sat, 5 Jun 1999 15:10:42 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [PATCHES] Re: contrib code for 6.5" } ]
[ { "msg_contents": "Tried to get changes:\n\npg@nature:~/cvs$ uppgsql \nFatal error, aborting.\n: no such user\n\n\tRegards,\n\n\t\tOleg\n\n_____________________________________________________________\nOleg Bartunov, sci.researcher, hostmaster of AstroNet,\nSternberg Astronomical Institute, Moscow University (Russia)\nInternet: [email protected], http://www.sai.msu.su/~megera/\nphone: +007(095)939-16-83, +007(095)939-23-83\n\n", "msg_date": "Sat, 5 Jun 1999 03:20:59 +0400 (MSD)", "msg_from": "Oleg Bartunov <[email protected]>", "msg_from_op": true, "msg_subject": "problem with cvs ?" }, { "msg_contents": "\nThere are no problems that I'm immediately aware of...what does your\nuppgsql script look like?\n\nOh, wait, damn...fixed...\n\nOn Sat, 5 Jun 1999, Oleg Bartunov wrote:\n\n> Tried to get changes:\n> \n> pg@nature:~/cvs$ uppgsql \n> Fatal error, aborting.\n> : no such user\n> \n> \tRegards,\n> \n> \t\tOleg\n> \n> _____________________________________________________________\n> Oleg Bartunov, sci.researcher, hostmaster of AstroNet,\n> Sternberg Astronomical Institute, Moscow University (Russia)\n> Internet: [email protected], http://www.sai.msu.su/~megera/\n> phone: +007(095)939-16-83, +007(095)939-23-83\n> \n> \n\nMarc G. Fournier ICQ#7615664 IRC Nick: Scrappy\nSystems Administrator @ hub.org \nprimary: [email protected] secondary: scrappy@{freebsd|postgresql}.org \n\n", "msg_date": "Sat, 5 Jun 1999 01:08:02 -0300 (ADT)", "msg_from": "The Hermit Hacker <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] problem with cvs ?" } ]
[ { "msg_contents": "I obtained Oracle for Linux and did some comparisons with PostgreSQL\n6.5 current using the test suite I mentioned before that is good to\ncreate heavy loads.\n\nWith default postmaster settings (postmaster -S -i), PostgreSQL was\nseveral times slower than Oracle. However with -F (postmaster -S -i -o\n'-F'), PostgreSQL was much faster than the default settings. Yes, this\nis well known behavior of PostgreSQL. Without -F PostgreSQL does\nfsync() every time a transaction is committed, and it is the bottle\nneck of the performance. I observed the disk activity LED almost\nalways on while running PostgreSQL without -F. However with -F, there\nmay be a chance that we loose committed data if the computer gets\ncrashed.\n\nOn the other hand the LED was on only every few secs while running\nOracle. I heard that Oracle has a \"REDO log file\" and a log is written\ninto there when a transaction is committed. If so, apparently Oracle\ndoes not issue sync() or fsync() every time a transaction gets\ncommitted. I don't know how Oracle guarantees the log be written into\nthe disk without sync() or fsync() at the commit time, but seems\nsomething like it is one of the most important technique to enhance\nthe performance of PostgreSQL.\n\nDoes anybody have an idea on this?\n---\nTatsuo Ishii\n", "msg_date": "Sat, 05 Jun 1999 08:50:06 +0900", "msg_from": "Tatsuo Ishii <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] Priorities for 6.6 " }, { "msg_contents": "At 08:50 AM 6/5/99 +0900, Tatsuo Ishii wrote:\n>\n>On the other hand the LED was on only every few secs while running\n>Oracle. I heard that Oracle has a \"REDO log file\" and a log is written\n>into there when a transaction is committed. If so, apparently Oracle\n>does not issue sync() or fsync() every time a transaction gets\n>committed. I don't know how Oracle guarantees the log be written into\n>the disk without sync() or fsync() at the commit time, but seems\n>something like it is one of the most important technique to enhance\n>the performance of PostgreSQL.\n\n>Does anybody have an idea on this?\n\nIt's a well-known bug in the current Oracle release for Linux,\nthe redo log is supposed to be fsynch'd on commitment. Oracle\ndoes fsynch on other Unices. It will be interesting to see if\nthe upcoming 8.1.5 (or \"8i\", \"i\" for internet, as it's called)\nwill have the bug fixed. This still won't cause a lot of\ndisk thrashing in a recommended Oracle installation as the\nredo log should be on a separate spindle from any db spindles,\nand Oracle grabs the entire file when the db's created in\norder to increase the odds that the file will be one sequential\nseries of blocks (of course, real Oracle studs use raw disks\nin which case the db can guarantee serial block writing). \n\nThere's a separate demeon hanging around that writes dirty\ndatabase pages to the disk at its leisure.\n\nOf course, if I've understood past postings to this list,\nPostgres also fsynch's after read-only selects, too, and\nmy own experience would seem to confirm it (putting a\nstring of selects in a transaction makes the disk get\nquiet, just as it does with inserts). \n\nI can guarantee that Oracle NEVER does that :)\n\n\n\n- Don Baccus, Portland OR <[email protected]>\n Nature photos, on-line guides, and other goodies at\n http://donb.photo.net\n", "msg_date": "Fri, 04 Jun 1999 17:18:07 -0700", "msg_from": "Don Baccus <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Priorities for 6.6 " }, { "msg_contents": "Don Baccus <[email protected]> writes:\n> Of course, if I've understood past postings to this list,\n> Postgres also fsynch's after read-only selects, too,\n\nI recently learned something about this that I hadn't understood before.\nWhen a tuple is written out during an insert/update transaction, it is\nmarked as not definitely committed (since of course Postgres can't know\nwhether you'll abort the transaction later). The ID of the transaction\nthat wrote it is stored with it. Subsequently, whenever the tuple is\nscanned, the backend has to go to the \"transaction log\" to see if that\ntransaction has been committed yet --- if not, it ignores the tuple.\n\nAs soon as the transaction is known to be committed, the next operation\nthat visits that tuple will mark it as \"known committed\", so as to avoid\nfuture consultations of the transaction log. This happens *even if the\ncurrent operation is a select*. That is why selects can cause disk\nwrites in Postgres.\n\nSimilar things happen when a tuple is replaced or deleted, of course.\n\nIn short, if you load a bunch of tuples into a table, the first select\nafter the load can run a lot slower than you might expect, because it'll\nbe writing back most or all of the pages it touches. But that penalty\ndoesn't affect every select, only the first one to scan a newly-written\ntuple.\n\n\t\t\tregards, tom lane\n", "msg_date": "Sat, 05 Jun 1999 11:31:10 -0400", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Priorities for 6.6 " }, { "msg_contents": "At 11:31 AM 6/5/99 -0400, Tom Lane wrote:\n\n>In short, if you load a bunch of tuples into a table, the first select\n>after the load can run a lot slower than you might expect, because it'll\n>be writing back most or all of the pages it touches. But that penalty\n>doesn't affect every select, only the first one to scan a newly-written\n>tuple.\n\nWhile I don't doubt your analysis is correct for the case you've\nuncovered, it doesn't explain why surrounding a bunch of selects\nwith a begin/end block greatly descreases disk activity for tables\nthat don't change. I'm pulling out \"select\" lists (html <select>)\nfrom small tables of counties, states, countries for the project\nI'm working on. The two countries, for instance, are \"USA\" and\n\"CA\" and the table's not been updated in two months :). I'm\nbuilding a form and doing a very simple \"select * from county_locales\"\ntype selects, then building a <select> list containing all of the\npossible values (not as many as you might think, this project\ninvolves only the Pacific Northwest). There are several of\nthese selects executed for each form. Without the transaction\nblock, there's a lot of disk activity. With it, much less.\n\nI can go pull out the begin/end blocks, they're conditionalized\nin my Tcl scripts based on a \"postgres\" predicate so they'll\ndisappear if I migrate the database to another engine. Maybe\nI'll have time this afternoon, if you'd like me to confirm, I'm\ngoing to a brunch right now...\n\n\n\n- Don Baccus, Portland OR <[email protected]>\n Nature photos, on-line guides, and other goodies at\n http://donb.photo.net\n", "msg_date": "Sat, 05 Jun 1999 10:10:07 -0700", "msg_from": "Don Baccus <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Priorities for 6.6 " }, { "msg_contents": "> In short, if you load a bunch of tuples into a table, the first select\n> after the load can run a lot slower than you might expect, because it'll\n> be writing back most or all of the pages it touches. But that penalty\n> doesn't affect every select, only the first one to scan a newly-written\n> tuple.\n\nI have removed this from the TODO list:\n\n\t* Prevent fsync in SELECT-only queries\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Sat, 5 Jun 1999 14:14:58 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Priorities for 6.6" }, { "msg_contents": "Don Baccus <[email protected]> writes:\n> While I don't doubt your analysis is correct for the case you've\n> uncovered, it doesn't explain why surrounding a bunch of selects\n> with a begin/end block greatly descreases disk activity for tables\n> that don't change.\n\nHmm, I'm not sure why that should be, either. Anyone?\n\n\t\t\tregards, tom lane\n", "msg_date": "Sat, 05 Jun 1999 14:25:20 -0400", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Priorities for 6.6 " }, { "msg_contents": "Tom Lane wrote:\n> \n> Don Baccus <[email protected]> writes:\n> > While I don't doubt your analysis is correct for the case you've\n> > uncovered, it doesn't explain why surrounding a bunch of selects\n> > with a begin/end block greatly descreases disk activity for tables\n> > that don't change.\n> \n> Hmm, I'm not sure why that should be, either. Anyone?\n\n>From a recent discussion I remember that every block that is read \nin is marked as dirty, regardless of weather it is modified or not.\n\nIt is not a genuine bug (as it only slows thong down instead of \ngetting wrong results), but still a misfeature.\n\nIt is most likely an ancient quickfix for some execution path that \nfailed to set the dirty mark when it should have.\n\n---------------------\nHannu\n", "msg_date": "Sat, 05 Jun 1999 23:51:32 +0300", "msg_from": "Hannu Krosing <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Priorities for 6.6" }, { "msg_contents": "Hannu Krosing <[email protected]> writes:\n>> Hmm, I'm not sure why that should be, either. Anyone?\n\n> From a recent discussion I remember that every block that is read \n> in is marked as dirty, regardless of weather it is modified or not.\n\nNo, that was me claiming that, on the basis of a profile I had taken\nthat showed an unreasonably large number of writes --- but the case\nI was profiling was a selective UPDATE on a table that had just\nbeen loaded. When I repeated the test, the number of writes decreased\nto the right ballpark.\n\nI am not sure what effect Don is seeing, but I don't think it's quite\nas dumb a mistake as that...\n\n\t\t\tregards, tom lane\n", "msg_date": "Sat, 05 Jun 1999 16:58:23 -0400", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Priorities for 6.6 " }, { "msg_contents": "At 11:51 PM 6/5/99 +0300, Hannu Krosing wrote:\n\n>It is not a genuine bug (as it only slows thong down instead of \n>getting wrong results), but still a misfeature.\n\nWell, it depends on how one defines \"bug\", I suppose :) In the\nstrictest sense you're correct, yet for real world use, particularly\nin environments with high traffic, it's a killer. \n\n>It is most likely an ancient quickfix for some execution path that \n>failed to set the dirty mark when it should have.\n\nYep, I remember this from the earlier conversation, too.\n\n\n\n- Don Baccus, Portland OR <[email protected]>\n Nature photos, on-line guides, and other goodies at\n http://donb.photo.net\n", "msg_date": "Sat, 05 Jun 1999 19:19:29 -0700", "msg_from": "Don Baccus <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Priorities for 6.6" }, { "msg_contents": "At 04:58 PM 6/5/99 -0400, Tom Lane wrote:\n\n>I am not sure what effect Don is seeing, but I don't think it's quite\n>as dumb a mistake as that...\n\nIf you want, I can wait until the 6.5 release is out, then \nplay some more to make sure I can make the disk thrash with\nold tables. This certainly isn't the kind of thing that\ndeserves rush treatment. \n\n\n\n- Don Baccus, Portland OR <[email protected]>\n Nature photos, on-line guides, and other goodies at\n http://donb.photo.net\n", "msg_date": "Sat, 05 Jun 1999 19:23:29 -0700", "msg_from": "Don Baccus <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Priorities for 6.6 " }, { "msg_contents": "> While I don't doubt your analysis is correct for the case you've\n> uncovered, it doesn't explain why surrounding a bunch of selects\n> with a begin/end block greatly descreases disk activity for tables\n> that don't change. I'm pulling out \"select\" lists (html <select>)\n> from small tables of counties, states, countries for the project\n> I'm working on. The two countries, for instance, are \"USA\" and\n> \"CA\" and the table's not been updated in two months :). I'm\n> building a form and doing a very simple \"select * from county_locales\"\n> type selects, then building a <select> list containing all of the\n> possible values (not as many as you might think, this project\n> involves only the Pacific Northwest). There are several of\n> these selects executed for each form. Without the transaction\n> block, there's a lot of disk activity. With it, much less.\n> \n> I can go pull out the begin/end blocks, they're conditionalized\n> in my Tcl scripts based on a \"postgres\" predicate so they'll\n> disappear if I migrate the database to another engine. Maybe\n> I'll have time this afternoon, if you'd like me to confirm, I'm\n> going to a brunch right now...\n\nPostgreSQL writes into pg_log each time a transaction gets committed\neven if it is a read only one. Once whatever file writings happen in\nthe transaction, fsync() would be forced at the commit time. Probably\nthat's why you observe less disk activity when you surround some\nselects in begin/end blocks.\n\nBy the way, may I ask more question regarding Oracle? You mentioned\nthe magic of no-fsync in Oracle is actually a bug. Ok, I understand. I\nalso heard that Oracle does some kind of redo-log bufferings. Does\nthis mean certain committed data might be lost if the system crashed\nbefore the buffered data is written into the disk?\n---\nTatsuo Ishii\n", "msg_date": "Sun, 06 Jun 1999 12:22:03 +0900", "msg_from": "Tatsuo Ishii <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] Priorities for 6.6 " }, { "msg_contents": "> By the way, may I ask more question regarding Oracle? You mentioned\n> the magic of no-fsync in Oracle is actually a bug. Ok, I understand. I\n> also heard that Oracle does some kind of redo-log bufferings. Does\n> this mean certain committed data might be lost if the system crashed\n> before the buffered data is written into the disk?\n\nThat is my guess. Informix does that. No run runs with non-buffered\nlogging. They run with buffered logging, which may loose transactions\nfor a few seconds or minutes before a crash.\n\nI think we need that, and it should be the default, but few people agree\nwith me. I have some schemes to do this.\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Sun, 6 Jun 1999 00:41:02 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Priorities for 6.6" }, { "msg_contents": "> I think we need that, and it should be the default, but few people agree\n> with me. I have some schemes to do this.\n\nI believe you're absolutely right. To most people, performance matters\nmore than security in a system break down. After all we're talking\nLinux, FreeBSD and other systems here. And if people worry, they can\nbuy UPS'es, duplicate hardware and stuff. It's extremely rare that the\nhardware needs to fail.\n\nTo counter this, I think Postgresql needs some roll forward mechanism.\nMaybe that's what Vadim means with savepoints? Now we're at the\nEnterprise end, i could add that companies need hot backup. And if you\ninclude the parallelizing server I believe the commercial community\nwill be served very well.\n\nI was at a seminar last week where Oracle bragged about 8i. Maybe\nPostgresql some time in the future could have hooks for other\nlanguages? I know there's a PL-thing and a C-thing, but I would\npersonally like a Perl interface.\n\n", "msg_date": "Sun, 6 Jun 1999 07:25:06 +0200 (CEST)", "msg_from": "Kaare Rasmussen <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Priorities for 6.6" }, { "msg_contents": "On a personal note, I hope that outer joins and views on unions will\nget attention in 6.6.\n\nRyan Bradetich is working on views. Maybe I can get my wish in 6.6?\n\nIf Tom's idea about removing the 8K tuble limit and Bruce's idea about\nrelaxed sync'ing will make it into the next release, it should be\nversion 7.0 in my opinion.\n\n", "msg_date": "Sun, 6 Jun 1999 09:04:37 +0200 (CEST)", "msg_from": "Kaare Rasmussen <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Priorities for 6.6" }, { "msg_contents": "Kaare Rasmussen wrote:\n> \n> I was at a seminar last week where Oracle bragged about 8i. Maybe\n> Postgresql some time in the future could have hooks for other\n> languages? I know there's a PL-thing and a C-thing, but I would\n> personally like a Perl interface.\n\nThe hooks are already in place, thanks to Jan. \nHe started by Tcl first and PL only after that.\nIt should be quite possible to add others with not too much work.\n\nHannu\n", "msg_date": "Sun, 06 Jun 1999 12:11:14 +0300", "msg_from": "Hannu Krosing <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Priorities for 6.6" }, { "msg_contents": "Tom Lane wrote:\n> \n> Don Baccus <[email protected]> writes:\n> > Of course, if I've understood past postings to this list,\n> > Postgres also fsynch's after read-only selects, too,\n> \n> I recently learned something about this that I hadn't understood before.\n> When a tuple is written out during an insert/update transaction, it is\n> marked as not definitely committed (since of course Postgres can't know\n> whether you'll abort the transaction later). The ID of the transaction\n> that wrote it is stored with it. Subsequently, whenever the tuple is\n> scanned, the backend has to go to the \"transaction log\" to see if that\n> transaction has been committed yet --- if not, it ignores the tuple.\n> \n> As soon as the transaction is known to be committed, the next operation\n> that visits that tuple will mark it as \"known committed\", so as to avoid\n> future consultations of the transaction log. This happens *even if the\n> current operation is a select*. That is why selects can cause disk\n> writes in Postgres.\n\nRight. But we could avoid fsync for such write operation, i.e.\ndo write call but not fsync. This will not avoid real disk writes\nbut select will not wait for them.\n\nVadim\n", "msg_date": "Sun, 06 Jun 1999 20:20:23 +0800", "msg_from": "Vadim Mikheev <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Priorities for 6.6" }, { "msg_contents": "Bruce Momjian wrote:\n> \n> > In short, if you load a bunch of tuples into a table, the first select\n> > after the load can run a lot slower than you might expect, because it'll\n> > be writing back most or all of the pages it touches. But that penalty\n> > doesn't affect every select, only the first one to scan a newly-written\n> > tuple.\n> \n> I have removed this from the TODO list:\n> \n> * Prevent fsync in SELECT-only queries\n\nWhen selecting (i.e. - read-only) transaction commits,\nit change pg_log - we obviously can avoid this!\nNo sense to store commit/abort status of read-only xactions!\n\nVadim\n", "msg_date": "Sun, 06 Jun 1999 20:22:44 +0800", "msg_from": "Vadim Mikheev <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Priorities for 6.6" }, { "msg_contents": "Tom Lane wrote:\n> \n> Don Baccus <[email protected]> writes:\n> > While I don't doubt your analysis is correct for the case you've\n> > uncovered, it doesn't explain why surrounding a bunch of selects\n> > with a begin/end block greatly descreases disk activity for tables\n> > that don't change.\n> \n> Hmm, I'm not sure why that should be, either. Anyone?\n\npg_log fsync for read-only xactions...\nAnd more of that, commit fsyncs ALL dirty buffers\nin pool, even dirtied not by xaction being committed!\n\nVadim\n", "msg_date": "Sun, 06 Jun 1999 20:25:52 +0800", "msg_from": "Vadim Mikheev <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Priorities for 6.6" }, { "msg_contents": "Hannu Krosing wrote:\n> \n> Tom Lane wrote:\n> >\n> > Don Baccus <[email protected]> writes:\n> > > While I don't doubt your analysis is correct for the case you've\n> > > uncovered, it doesn't explain why surrounding a bunch of selects\n> > > with a begin/end block greatly descreases disk activity for tables\n> > > that don't change.\n> >\n> > Hmm, I'm not sure why that should be, either. Anyone?\n> \n> >From a recent discussion I remember that every block that is read\n> in is marked as dirty, regardless of weather it is modified or not.\n\nNo! \n\nVadim\n", "msg_date": "Sun, 06 Jun 1999 20:26:46 +0800", "msg_from": "Vadim Mikheev <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Priorities for 6.6" }, { "msg_contents": "Kaare Rasmussen wrote:\n> \n> > I think we need that, and it should be the default, but few people agree\n> > with me. I have some schemes to do this.\n\nI remember this, Bruce. But I would like to see it implemented\nin right way. I'm not happy with \"two sync() in postmaster\" idea.\nWe have to implement Shared Catalog Cache (SCC), mark all dirtied \nrelation files there and than just fsync() these files, before \nfsync() of pg_log.\n\n> To counter this, I think Postgresql needs some roll forward mechanism.\n> Maybe that's what Vadim means with savepoints? Now we're at the\n\nNo. Savepoints are short-term things, living during xaction.\n\nVadim\n", "msg_date": "Sun, 06 Jun 1999 20:41:52 +0800", "msg_from": "Vadim Mikheev <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Priorities for 6.6" }, { "msg_contents": "> The hooks are already in place, thanks to Jan. \n> He started by Tcl first and PL only after that.\n> It should be quite possible to add others with not too much work.\n\nExplain a bit more - I'd like to have a Perl interface. It has to be\nadded by some of the clever postgresql hackers? A non-C-speaking\nindividual like me can't do it, right?\n\n", "msg_date": "Sun, 6 Jun 1999 16:35:14 +0200 (CEST)", "msg_from": "Kaare Rasmussen <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Priorities for 6.6" }, { "msg_contents": "At 12:22 PM 6/6/99 +0900, Tatsuo Ishii wrote:\n\n>By the way, may I ask more question regarding Oracle? You mentioned\n>the magic of no-fsync in Oracle is actually a bug. Ok, I understand. I\n>also heard that Oracle does some kind of redo-log bufferings. Does\n>this mean certain committed data might be lost if the system crashed\n>before the buffered data is written into the disk?\n\nNot sure, actually, I'm by no means an Oracle expert, I was just\npassing alone information gleaned from the Oracle/linux newsgroup.\nYou can access this via the main Oracle website, go to the Oracle\nTechnology Network and register, much as you did to download your\ndeveloper's copy of the db engine. Some very experienced Oracle\ntypes hang out there.\n\n\n\n- Don Baccus, Portland OR <[email protected]>\n Nature photos, on-line guides, and other goodies at\n http://donb.photo.net\n", "msg_date": "Sun, 06 Jun 1999 11:43:48 -0700", "msg_from": "Don Baccus <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Priorities for 6.6 " }, { "msg_contents": ">\n> > In short, if you load a bunch of tuples into a table, the first select\n> > after the load can run a lot slower than you might expect, because it'll\n> > be writing back most or all of the pages it touches. But that penalty\n> > doesn't affect every select, only the first one to scan a newly-written\n> > tuple.\n>\n> I have removed this from the TODO list:\n>\n> * Prevent fsync in SELECT-only queries\n\n I think this entry should stay.\n\n In fact, there is a write on every transaction that\n commits/aborts even if it's one that doesn't modify any data.\n pg_log is written for SELECT only transactions too. I'm\n nearly 99.5% sure that not fsync()'ing those transaction\n would not hit reliability and we might have to work it out.\n\n This might be one cause that surrounding a bunch of SELECT\n statements by BEGIN/END speeds up PostgreSQL in non -F mode.\n\n\nJan\n\n--\n\n#======================================================================#\n# It's easier to get forgiveness for being wrong than for being right. #\n# Let's break this rule - forgive me. #\n#========================================= [email protected] (Jan Wieck) #\n\n\n", "msg_date": "Sun, 6 Jun 1999 20:44:49 +0200 (MET DST)", "msg_from": "[email protected] (Jan Wieck)", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Priorities for 6.6" }, { "msg_contents": ">\n> Tom Lane wrote:\n> >\n> > Don Baccus <[email protected]> writes:\n> > > While I don't doubt your analysis is correct for the case you've\n> > > uncovered, it doesn't explain why surrounding a bunch of selects\n> > > with a begin/end block greatly descreases disk activity for tables\n> > > that don't change.\n> >\n> > Hmm, I'm not sure why that should be, either. Anyone?\n>\n> >From a recent discussion I remember that every block that is read\n> in is marked as dirty, regardless of weather it is modified or not.\n>\n> It is not a genuine bug (as it only slows thong down instead of\n> getting wrong results), but still a misfeature.\n>\n> It is most likely an ancient quickfix for some execution path that\n> failed to set the dirty mark when it should have.\n\n Can't believe that this is true - uhhhhhh!\n\n If it is, then it's surely a severe BUG!\n\n\nJan\n\n--\n\n#======================================================================#\n# It's easier to get forgiveness for being wrong than for being right. #\n# Let's break this rule - forgive me. #\n#========================================= [email protected] (Jan Wieck) #\n\n\n", "msg_date": "Sun, 6 Jun 1999 20:47:03 +0200 (MET DST)", "msg_from": "[email protected] (Jan Wieck)", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Priorities for 6.6" }, { "msg_contents": "It would be cool to have Perl interface to postgres internals!\nUnfortunately I'm not C programmer. I think Edmund could do this.\n\n\tRegards,\n\t\n\t\tOleg\n\n\nOn Sun, 6 Jun 1999, Kaare Rasmussen wrote:\n\n> Date: Sun, 6 Jun 1999 16:35:14 +0200 (CEST)\n> From: Kaare Rasmussen <[email protected]>\n> To: [email protected]\n> Subject: Re: [HACKERS] Priorities for 6.6\n> \n> > The hooks are already in place, thanks to Jan. \n> > He started by Tcl first and PL only after that.\n> > It should be quite possible to add others with not too much work.\n> \n> Explain a bit more - I'd like to have a Perl interface. It has to be\n> added by some of the clever postgresql hackers? A non-C-speaking\n> individual like me can't do it, right?\n> \n> \n\n_____________________________________________________________\nOleg Bartunov, sci.researcher, hostmaster of AstroNet,\nSternberg Astronomical Institute, Moscow University (Russia)\nInternet: [email protected], http://www.sai.msu.su/~megera/\nphone: +007(095)939-16-83, +007(095)939-23-83\n\n", "msg_date": "Sun, 6 Jun 1999 22:58:22 +0400 (MSD)", "msg_from": "Oleg Bartunov <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Priorities for 6.6" }, { "msg_contents": ">\n> > By the way, may I ask more question regarding Oracle? You mentioned\n> > the magic of no-fsync in Oracle is actually a bug. Ok, I understand. I\n> > also heard that Oracle does some kind of redo-log bufferings. Does\n> > this mean certain committed data might be lost if the system crashed\n> > before the buffered data is written into the disk?\n>\n> That is my guess. Informix does that. No run runs with non-buffered\n> logging. They run with buffered logging, which may loose transactions\n> for a few seconds or minutes before a crash.\n>\n> I think we need that, and it should be the default, but few people agree\n> with me. I have some schemes to do this.\n\n The major problem in this area is, that with the given model\n of telling which tuples are committed, noone can guarantee a\n consistent PostgreSQL database in the case of a non-fsynced\n crash. You might loose some tuples and might get some\n outdated ones back. But it depends on subsequently executed\n SELECT's which ones and it all doesn't have anything to do\n with transaction boundaries or with the order in which\n transactions committed.\n\n As I understand Oracle the entire reliability depends on the\n redo logs. If a crash is too badly, you can allways restore\n the last backup and recover from that. The database crash\n recovery will roll forward until the last COMMIT that occurs\n in the redolog (except for point in time recovery).\n\n Someone can live with the case, that the last COMMIT's\n (sorted by time) cannot get recovered. But noone can live\n with a database that's left corrupt.\n\n\nJan\n\n--\n\n#======================================================================#\n# It's easier to get forgiveness for being wrong than for being right. #\n# Let's break this rule - forgive me. #\n#========================================= [email protected] (Jan Wieck) #\n\n\n", "msg_date": "Sun, 6 Jun 1999 21:14:25 +0200 (MET DST)", "msg_from": "[email protected] (Jan Wieck)", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Priorities for 6.6" }, { "msg_contents": ">\n> It would be cool to have Perl interface to postgres internals!\n> Unfortunately I'm not C programmer. I think Edmund could do this.\n\n Now I've got a sequence overflow counting the requests for\n something like PL/Perl!\n\n Even if I can't believe, could it be that there are too many\n Perl users vs. Perl developers?\n\n Several times I clearly said that I don't know much more\n about Perl than that it's YASL (Yet Another Scripting\n Language).\n\n But ...\n\n 1. I've designed and implemented the PL interface into the\n PostgreSQL function manager including the CREATE/DROP\n PROCEDURAL LANGUAGE statements.\n\n 2. I've added the procedural languages PL/Tcl and PL/pgSQL,\n which are described in the programmers manual and which\n are both used since v6.4 outside in the world.\n\n 3. I've offered help for building PL/Perl several times now.\n But only \"it would be nice if someone else ...\" requests\n are coming in.\n\n Some years ago I searched for a general purpose scripting\n language. I found Tcl, which has a graphical user interface\n (Tk) and is supported on the platforms I need (all UN*X and\n (for sake) Windows-NT/95). Since then I've created a couple\n of Tcl interfaces to things, it cannot do by default (like\n SAP remote function calls - not available as open source so\n don't call for :-( ). The simpleness it uses for interfacing\n foreign things is why it's called the \"Tool Command\n Language\".\n\n This simpleness gave me the power to create PL/Tcl. PL/pgSQL\n was my answer to requests about a native language that\n doesn't depend on any other thing installed on a PostgreSQL\n target system.\n\n To explain point 3 in detail: I still feel responsible for\n the function managers procedural language interface -- since\n I created it. BUT I WOULDN'T LEARN PERL PLUS IT'S API ONLY TO\n PROVIDE PL/Perl TO THE {U|LOO}SER-COMMUNITY! That would mean\n to get responsible for one more thing I don't need for\n myself.\n\n If there's (only one) Perl PROGRAMMER out in the world\n reading this, who does see a (however small) possibility to\n explain how to integrate a Perl interpreter into PostgreSQL,\n RESPOND!!!!!!!!!!!\n\n I'll let y'all know about the responses I got. Even if I\n don't expect a single one where a PL/Perl could result from.\n\n Maybe Perl isn't the scripting language someone should choose\n because it is too limited in it's capabilities - remember\n that real programmers don't use pascal... - maybe real\n programmer wouldn't ever use Perl...\n\n\nMaybe - (please don't) - Jan\n\n>\n> Regards,\n>\n> Oleg\n>\n>\n> On Sun, 6 Jun 1999, Kaare Rasmussen wrote:\n>\n> > Date: Sun, 6 Jun 1999 16:35:14 +0200 (CEST)\n> > From: Kaare Rasmussen <[email protected]>\n> > To: [email protected]\n> > Subject: Re: [HACKERS] Priorities for 6.6\n> >\n> > > The hooks are already in place, thanks to Jan.\n> > > He started by Tcl first and PL only after that.\n> > > It should be quite possible to add others with not too much work.\n> >\n> > Explain a bit more - I'd like to have a Perl interface. It has to be\n> > added by some of the clever postgresql hackers? A non-C-speaking\n> > individual like me can't do it, right?\n\n\n--\n\n#======================================================================#\n# It's easier to get forgiveness for being wrong than for being right. #\n# Let's break this rule - forgive me. #\n#========================================= [email protected] (Jan Wieck) #\n\n\n", "msg_date": "Mon, 7 Jun 1999 02:04:31 +0200 (MET DST)", "msg_from": "[email protected] (Jan Wieck)", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Priorities for 6.6" }, { "msg_contents": "Oh, I see. SELECT is a transaction, so it flushes pglog. Re-added to\nTODO.\n\n> Bruce Momjian wrote:\n> > \n> > > In short, if you load a bunch of tuples into a table, the first select\n> > > after the load can run a lot slower than you might expect, because it'll\n> > > be writing back most or all of the pages it touches. But that penalty\n> > > doesn't affect every select, only the first one to scan a newly-written\n> > > tuple.\n> > \n> > I have removed this from the TODO list:\n> > \n> > * Prevent fsync in SELECT-only queries\n> \n> When selecting (i.e. - read-only) transaction commits,\n> it change pg_log - we obviously can avoid this!\n> No sense to store commit/abort status of read-only xactions!\n> \n> Vadim\n> \n\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Sun, 6 Jun 1999 20:44:37 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Priorities for 6.6" }, { "msg_contents": "Jan Wieck wrote:\n> \n> >\n> > It would be cool to have Perl interface to postgres internals!\n> > Unfortunately I'm not C programmer. I think Edmund could do this.\n> \n> Now I've got a sequence overflow counting the requests for\n> something like PL/Perl!\n> \n[personal view on perl and tcl deleted]\n\nJan:\n\nI've been looking for a project to get me active in the postgresql\ncommunity after lurking since before it was (officially) PostgreSQL. \n\nI will do the PL/Perl interface.\n\nPerl is a great integration tool. This can be seen from the enormous\ngrowth it its use in such areas as CGI programming. And imbedding\nfunctionality into perl from other sources is rarely a hard problem. \n\nBut embedding perl in other applications is not as easy as it could be.\n\n-- \n\nMark Hollomon\[email protected]\n", "msg_date": "Sun, 06 Jun 1999 20:58:49 -0400", "msg_from": "\"Mark Hollomon\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Priorities for 6.6" }, { "msg_contents": "> Kaare Rasmussen wrote:\n> > \n> > > I think we need that, and it should be the default, but few people agree\n> > > with me. I have some schemes to do this.\n> \n> I remember this, Bruce. But I would like to see it implemented\n> in right way. I'm not happy with \"two sync() in postmaster\" idea.\n> We have to implement Shared Catalog Cache (SCC), mark all dirtied \n> relation files there and than just fsync() these files, before \n> fsync() of pg_log.\n\nI see. You want to use the shared catalog cache to flag relations that\nhave been modified, and fsync those before fsync of pglog. Another idea\nis to send a signal to each backend that has marked a bit in shared\nmemory saying it has written to a relation, and have the signal handler\nfsync all its dirty relations, set a finished bit, and have the\npostmaster then fsync pglog. The shared catalog cache still requires\nthe postmaster to open every relation that is marked as dirty to fsync\nit, which could be a performance problem. Now, if we could pass file\ndescriptors between processes, that would make things easy. I think BSD\ncan do it, but I don't believe it is portable.\n\nMy idea would be:\n\nbackend 1 2 3 4 5 6 7 \ndirtied:\n\n 1 2 3 4 5 6 7\nfsync'ed:\n\nEach backend sets it's 'dirtied' bit when it modifies and relation.\n\nEvery 5 seconds, postmaster scans dirtied list, sends signal to each\nbackend that has dirtied. Each backend fsyncs its relations, then sets\nits fsync'ed bit. When all have signaled fsynced, the postmaster can\nupdate pg_log on disk. Another issue is that now that we update the\ntransaction status as part of SELECT, pg_log is not the only\nrepresentation of committed status.\n\nOf course, we have to prevent flush of pglog by OS, perhaps by making a\ncopy of the last two pages of pg_log before this and remove it after. \nIf a backend starts up and sees that pg_log copy file, it puts that in\nplace of the current last two pages of pg_log.\n\nAlso, for 6.6, I am going to add system table indexes so all cache\nlookups use indexes. I am unsure that shared catalog cache is going to\ndo that buffer cache doesn't already do. Perhaps if we just flushed the\nsystem table cache buffers less frequently, there would be no need for a\nshared system cache.\n\nBasically, this fsync() thing is killing performance, and I think we can\ncome up with an smart solution to this if we discuss the options.\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Sun, 6 Jun 1999 21:16:45 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Priorities for 6.6" }, { "msg_contents": "> The major problem in this area is, that with the given model\n> of telling which tuples are committed, noone can guarantee a\n> consistent PostgreSQL database in the case of a non-fsynced\n> crash. You might loose some tuples and might get some\n> outdated ones back. But it depends on subsequently executed\n> SELECT's which ones and it all doesn't have anything to do\n> with transaction boundaries or with the order in which\n> transactions committed.\n> \n> As I understand Oracle the entire reliability depends on the\n> redo logs. If a crash is too badly, you can allways restore\n> the last backup and recover from that. The database crash\n> recovery will roll forward until the last COMMIT that occurs\n> in the redolog (except for point in time recovery).\n> \n> Someone can live with the case, that the last COMMIT's\n> (sorted by time) cannot get recovered. But noone can live\n> with a database that's left corrupt.\n\nYes, I 100% agree. We have to bring the database back to a consistent\ncase where only the last few transactions are not done at all, and all\nprevious ones are completely done. See previous post on methods and\nissues.\n\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Sun, 6 Jun 1999 21:24:02 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Priorities for 6.6" }, { "msg_contents": "Added to TODO list.\n\n> Jan Wieck wrote:\n> > \n> > >\n> > > It would be cool to have Perl interface to postgres internals!\n> > > Unfortunately I'm not C programmer. I think Edmund could do this.\n> > \n> > Now I've got a sequence overflow counting the requests for\n> > something like PL/Perl!\n> > \n> [personal view on perl and tcl deleted]\n> \n> Jan:\n> \n> I've been looking for a project to get me active in the postgresql\n> community after lurking since before it was (officially) PostgreSQL. \n> \n> I will do the PL/Perl interface.\n> \n> Perl is a great integration tool. This can be seen from the enormous\n> growth it its use in such areas as CGI programming. And imbedding\n> functionality into perl from other sources is rarely a hard problem. \n> \n> But embedding perl in other applications is not as easy as it could be.\n> \n> -- \n> \n> Mark Hollomon\n> [email protected]\n> \n> \n\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Sun, 6 Jun 1999 21:34:58 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Priorities for 6.6" }, { "msg_contents": "Bruce Momjian wrote:\n> \n> update pg_log on disk. Another issue is that now that we update the\n> transaction status as part of SELECT, pg_log is not the only\n\nWe should don't update pg_log for read-only xactions.\n\n> representation of committed status.\n> \n> Of course, we have to prevent flush of pglog by OS, perhaps by making a\n> copy of the last two pages of pg_log before this and remove it after.\n> If a backend starts up and sees that pg_log copy file, it puts that in\n> place of the current last two pages of pg_log.\n\nKeep two last pg_log pages in shmem, lock them, copy, unlock,\nwrite copy to pg_log.\n\n> Also, for 6.6, I am going to add system table indexes so all cache\n> lookups use indexes. I am unsure that shared catalog cache is going to\n> do that buffer cache doesn't already do. Perhaps if we just flushed the\n> system table cache buffers less frequently, there would be no need for a\n> shared system cache.\n\nI would like to see ntuples and npages in pg_class up-to-date.\nNow we do fseek for each heap_insert and for each heap_beginscan.\nAnd note that we have to open() system relation files, even\nif pages are in buffer pool.\n\nVadim\n", "msg_date": "Mon, 07 Jun 1999 10:55:02 +0800", "msg_from": "Vadim Mikheev <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Priorities for 6.6" }, { "msg_contents": "> Bruce Momjian wrote:\n> > \n> > update pg_log on disk. Another issue is that now that we update the\n> > transaction status as part of SELECT, pg_log is not the only\n> \n> We should don't update pg_log for read-only xactions.\n\nNo, I was saying we mark those SELECT'ed rows as being part of committed\ntransactions. When we SELECT a row, we look at pg_log to see if it is\ncommitted, and mark that row as part of a committed transaction so we\ndon't have to check pg_log again. We can't do that with the system we\ninvisioning until we put pg_log on disk as a committed transaction. \nCould be tricky, though having two copies of pg_log in memory, one\ndisk-copy and one active copy, and use disk-copy for row xact status\nupdates would do the trick.\n\n> \n> > representation of committed status.\n> > \n> > Of course, we have to prevent flush of pglog by OS, perhaps by making a\n> > copy of the last two pages of pg_log before this and remove it after.\n> > If a backend starts up and sees that pg_log copy file, it puts that in\n> > place of the current last two pages of pg_log.\n> \n> Keep two last pg_log pages in shmem, lock them, copy, unlock,\n> write copy to pg_log.\n\nYes, much better. Control what gets to disk by not updating the file at\nall.\n\n> \n> > Also, for 6.6, I am going to add system table indexes so all cache\n> > lookups use indexes. I am unsure that shared catalog cache is going to\n> > do that buffer cache doesn't already do. Perhaps if we just flushed the\n> > system table cache buffers less frequently, there would be no need for a\n> > shared system cache.\n> \n> I would like to see ntuples and npages in pg_class up-to-date.\n> Now we do fseek for each heap_insert and for each heap_beginscan.\n> And note that we have to open() system relation files, even\n> if pages are in buffer pool.\n\nWhy do we have to open system tables if already in buffer cache? I\nguess so in case we need to write it out, or fault on another page.\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Sun, 6 Jun 1999 23:01:59 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Priorities for 6.6" }, { "msg_contents": "Bruce Momjian wrote:\n> \n> >\n> > > Also, for 6.6, I am going to add system table indexes so all cache\n> > > lookups use indexes. I am unsure that shared catalog cache is going to\n> > > do that buffer cache doesn't already do. Perhaps if we just flushed the\n> > > system table cache buffers less frequently, there would be no need for a\n> > > shared system cache.\n> >\n> > I would like to see ntuples and npages in pg_class up-to-date.\n> > Now we do fseek for each heap_insert and for each heap_beginscan.\n> > And note that we have to open() system relation files, even\n> > if pages are in buffer pool.\n> \n> Why do we have to open system tables if already in buffer cache? I\n> guess so in case we need to write it out, or fault on another page.\n\nJust because of ... heap_open()->RelationBuildDesc() does it.\nMaybe we could delay smgropen?\n\nBut in any case note that big guys have shared catalog cache,\nand this is not because of they haven't good buffer pool -:)\nKeeping page in pool for just single row is not good.\n\n\"Oracle itself accesses the data dictionary frequently during\nthe parsing of SQL statements. This access is essential to the \ncontinuing operation of Oracle. See Chapter 8, \"The Data Dictionary,\" \nfor more information on the data dictionary.\n\n...\n\nCaching of the Data Dictionary for Fast Access\n\nBecause Oracle constantly accesses the data dictionary during database \noperation to validate user access and to verify the state of objects, \nmuch of the data dictionary information is cached in the SGA. All \ninformation is stored in memory using the LRU (least recently\nused) algorithm. Information typically kept in the caches is that \nrequired for parsing.\"\n\nVadim\n", "msg_date": "Mon, 07 Jun 1999 11:47:14 +0800", "msg_from": "Vadim Mikheev <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Priorities for 6.6" }, { "msg_contents": "> Just because of ... heap_open()->RelationBuildDesc() does it.\n> Maybe we could delay smgropen?\n> \n> But in any case note that big guys have shared catalog cache,\n> and this is not because of they haven't good buffer pool -:)\n> Keeping page in pool for just single row is not good.\n> \n> \"Oracle itself accesses the data dictionary frequently during\n> the parsing of SQL statements. This access is essential to the \n> continuing operation of Oracle. See Chapter 8, \"The Data Dictionary,\" \n> for more information on the data dictionary.\n> \n> ...\n> \n> Caching of the Data Dictionary for Fast Access\n> \n> Because Oracle constantly accesses the data dictionary during database \n> operation to validate user access and to verify the state of objects, \n> much of the data dictionary information is cached in the SGA. All \n> information is stored in memory using the LRU (least recently\n> used) algorithm. Information typically kept in the caches is that \n> required for parsing.\"\n\nI agree we need it. I just think we could use better fsync more, and\nseeing how hard shared catalog cache may be, it may be good to get fsync\nfaster first.\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Sun, 6 Jun 1999 23:48:43 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Priorities for 6.6" }, { "msg_contents": "My apologies for the apparent triple posting of my earlier message\nregarding stored procedures. The mailing list send me mail notifying me\nthat the message had bounced due to my not being subscribed so I had tried\nto subscribe and then send again. Somehow, all three eventually go\nthrough.\n\n- K\n\nKristofer Munn * http://www.munn.com/~kmunn/ * ICQ# 352499 * AIM: KrMunn \n\n", "msg_date": "Mon, 7 Jun 1999 01:31:18 -0400 (EDT)", "msg_from": "Kristofer Munn <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Priorities for 6.6" }, { "msg_contents": "\"Mark Hollomon\" <[email protected]> writes:\n> I've been looking for a project to get me active in the postgresql\n> community after lurking since before it was (officially) PostgreSQL. \n>\n> I will do the PL/Perl interface.\n\nGreat! Glad to hear it.\n\n\t\t\tregards, tom lane\n", "msg_date": "Mon, 07 Jun 1999 10:50:49 -0400", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Priorities for 6.6 " }, { "msg_contents": "Bruce Momjian <[email protected]> writes:\n> ... Another idea\n> is to send a signal to each backend that has marked a bit in shared\n> memory saying it has written to a relation, and have the signal handler\n> fsync all its dirty relations, set a finished bit, and have the\n> postmaster then fsync pglog.\n\nI do not think it's practical to expect any useful work to happen inside\na signal handler. The signal could come at any moment, such as when\ndata structures are being updated and are in a transient invalid state.\nUnless you are willing to do a lot of fooling around with blocking &\nunblocking the signal, about all the handler can safely do is set a flag\nvariable that will be examined somewhere in the backend main loop.\n\nHowever, if enough information is available in shared memory, perhaps\nthe postmaster could do this scan/update/flush all by itself?\n\n> Of course, we have to prevent flush of pglog by OS, perhaps by making a\n> copy of the last two pages of pg_log before this and remove it after. \n> If a backend starts up and sees that pg_log copy file, it puts that in\n> place of the current last two pages of pg_log.\n\nIt seems to me that one or so disk writes per transaction is not all\nthat big a cost. Does it take much more than one write to update\npg_log, and if so why?\n\n\t\t\tregards, tom lane\n", "msg_date": "Mon, 07 Jun 1999 10:57:26 -0400", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Priorities for 6.6 " }, { "msg_contents": "Tom Lane wrote:\n> \n> \"Mark Hollomon\" <[email protected]> writes:\n> > I've been looking for a project to get me active in the postgresql\n> > community after lurking since before it was (officially) PostgreSQL.\n> >\n> > I will do the PL/Perl interface.\n> \n> Great! Glad to hear it.\n\nAnd me!\n\nVadim\n", "msg_date": "Mon, 07 Jun 1999 23:18:44 +0800", "msg_from": "Vadim Mikheev <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Priorities for 6.6" }, { "msg_contents": "Mark Hollomon wrote:\n>\n> Jan Wieck wrote:\n> >\n> > >\n> > > It would be cool to have Perl interface to postgres internals!\n> > > Unfortunately I'm not C programmer. I think Edmund could do this.\n> >\n> > Now I've got a sequence overflow counting the requests for\n> > something like PL/Perl!\n> >\n> [personal view on perl and tcl deleted]\n\n Really sorry for that. It's not my favorite behaviour to talk\n dirty about something I don't know. But after asking kindly\n several times I thought making someone angry could work - and\n it did :-)\n\n>\n> Jan:\n>\n> I've been looking for a project to get me active in the postgresql\n> community after lurking since before it was (officially) PostgreSQL.\n>\n> I will do the PL/Perl interface.\n\n That's a word! I'll contact you with private mail.\n\n\nJan\n\n--\n\n#======================================================================#\n# It's easier to get forgiveness for being wrong than for being right. #\n# Let's break this rule - forgive me. #\n#========================================= [email protected] (Jan Wieck) #\n\n\n", "msg_date": "Mon, 7 Jun 1999 17:28:45 +0200 (MET DST)", "msg_from": "[email protected] (Jan Wieck)", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Priorities for 6.6" }, { "msg_contents": "According to Vadim Mikheev:\n\n> > \"Mark Hollomon\" <[email protected]> writes:\n> > > I've been looking for a project to get me active in the postgresql\n> > > community after lurking since before it was (officially) PostgreSQL.\n> > >\n> > > I will do the PL/Perl interface.\n> > \n> > Great! Glad to hear it.\n> \n> And me!\n\nIt would be really nice if the client/server interface could be\nfixed up to remove the tuple size limits by the time the embedded\nperl interface is added. I think preloading some perl functions\nto get arbitrarily processed output back from a select would be\nhandy for a lot of uses, and even better if we didn't have to worry\nabout the size of the returned \"record\".\n\n Les Mikesell\n [email protected]\n", "msg_date": "Mon, 7 Jun 1999 11:35:12 -0500 (CDT)", "msg_from": "Leslie Mikesell <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Priorities for 6.6" }, { "msg_contents": "Bruce Momjian <[email protected]> writes:\n> ... Another idea\n> is to send a signal to each backend that has marked a bit in shared\n> memory saying it has written to a relation, and have the signal handler\n> fsync all its dirty relations, set a finished bit, and have the\n> postmaster then fsync pglog.\n\nOne other problem with signals is that things get complicated if\nPostgreSQL ever moves to a multi-threading model.\n\n-- \n=====================================================================\n| JAVA must have been developed in the wilds of West Virginia. |\n| After all, why else would it support only single inheritance?? |\n=====================================================================\n| Finger [email protected] for my public key. |\n=====================================================================", "msg_date": "7 Jun 1999 14:39:32 -0400", "msg_from": "Brian E Gallew <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Priorities for 6.6 " }, { "msg_contents": "> If there's (only one) Perl PROGRAMMER out in the world\n> reading this, who does see a (however small) possibility to\n> explain how to integrate a Perl interpreter into PostgreSQL,\n> RESPOND!!!!!!!!!!!\n\nWell I'm a Perl programmer. I don't have a clue about how to integrate \nPerl into PostgreSQL, but it should be possible.\n\nI'd like to help. I know Perl, but nothing about the internals of\nPostgreSQL and I don't code C.\n\n> Maybe Perl isn't the scripting language someone should choose\n> because it is too limited in it's capabilities - remember\n\nToo limited? Perl? You're joking.\n\n> that real programmers don't use pascal... - maybe real\n> programmer wouldn't ever use Perl...\n\nThen I'm no real programmer. But then again I program in any language\nthat is needed.\n\n", "msg_date": "Mon, 7 Jun 1999 23:03:03 +0200 (CEST)", "msg_from": "Kaare Rasmussen <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Priorities for 6.6" }, { "msg_contents": "> Really sorry for that. It's not my favorite behaviour to talk\n> dirty about something I don't know. But after asking kindly\n> several times I thought making someone angry could work - and\n> it did :-)\n\nI've never seen yout post before. But then again I've not been on\nhackers for too long.\n\n", "msg_date": "Mon, 7 Jun 1999 23:19:03 +0200 (CEST)", "msg_from": "Kaare Rasmussen <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Priorities for 6.6" }, { "msg_contents": "On 07-Jun-99 Kaare Rasmussen wrote:\n>> If there's (only one) Perl PROGRAMMER out in the world\n>> reading this, who does see a (however small) possibility to\n>> explain how to integrate a Perl interpreter into PostgreSQL,\n>> RESPOND!!!!!!!!!!!\n\nEasy. \nSee attachment.\n\n>\n---\nDmitry Samersoff, [email protected], ICQ:3161705\nhttp://devnull.wplus.net\n* There will come soft rains ...", "msg_date": "Tue, 08 Jun 1999 11:42:24 +0400 (MSD)", "msg_from": "Dmitry Samersoff <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Priorities for 6.6" }, { "msg_contents": "Dmitry Samersoff wrote:\n\n>\n> This message is in MIME format\n> --_=XFMail.1.3.p0.FreeBSD:990608114224:212=_\n> Content-Type: text/plain; charset=KOI8-R\n>\n>\n> On 07-Jun-99 Kaare Rasmussen wrote:\n> >> If there's (only one) Perl PROGRAMMER out in the world\n> >> reading this, who does see a (however small) possibility to\n> >> explain how to integrate a Perl interpreter into PostgreSQL,\n> >> RESPOND!!!!!!!!!!!\n>\n> Easy.\n> See attachment.\n>\n> Content-Disposition: attachment; filename=\"loadmail.pl\"\n\nDmitry,\n\n it's well known that a Perl script can access a PostgreSQL\n database. But that's not the thing we're looking for.\n\n For building PL/Perl, the Perl INTERPRETER must be INSIDE the\n backend. Only if it is part of the PostgreSQL backend, it can\n have access to the SPI. It will not work to spawn off an\n external interpreter which then contacts the database back\n via Pg. Thus, there must be some way to link a shared object\n against the Perl libraries and at the time the PostgreSQL\n database backend loads our shared object to call functions in\n the Perl library.\n\n The attachment you've sent is simply a Perl script that does\n some db access. Nice, but not the point. Please show us how\n easy it is to do what we want.\n\n\nJan\n\n--\n\n#======================================================================#\n# It's easier to get forgiveness for being wrong than for being right. #\n# Let's break this rule - forgive me. #\n#========================================= [email protected] (Jan Wieck) #\n\n\n", "msg_date": "Tue, 8 Jun 1999 11:12:41 +0200 (MET DST)", "msg_from": "[email protected] (Jan Wieck)", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Priorities for 6.6" }, { "msg_contents": "Kaare Rasmussen wrote:\n\n>\n> > If there's (only one) Perl PROGRAMMER out in the world\n> > reading this, who does see a (however small) possibility to\n> > explain how to integrate a Perl interpreter into PostgreSQL,\n> > RESPOND!!!!!!!!!!!\n>\n> Well I'm a Perl programmer. I don't have a clue about how to integrate\n> Perl into PostgreSQL, but it should be possible.\n>\n> I'd like to help. I know Perl, but nothing about the internals of\n> PostgreSQL and I don't code C.\n\n That's the point and why I wrote programmer in capitals (REAL\n PROGRAMMERS STILL THINK AND TALK IN CAPITALS). It's not Perl\n script-writing what's needed at this stage. We need\n programmers that are familiar with the C API of Perl and\n could easily write things like the Pg package. And that's\n still not enough knowledge.\n\n>\n> > Maybe Perl isn't the scripting language someone should choose\n> > because it is too limited in it's capabilities - remember\n>\n> Too limited? Perl? You're joking.\n\n No, I wasn't joking. Up to now there's only one person who\n said that what we need is possible, but it wouldn't be as\n easy as it should be. I didn't talked about how powerful the\n Perl language is. And I know that it's easy to integrate\n anything into Perl. But after all, it's a Perl script that\n has the entire control.\n\n This time, the Perl interpreter has to become a silly little\n working slave. Beeing quiet until it's called and quiet\n again after having served one function call until the big\n master PostgreSQL calls him again.\n\n This flexibility requires a real good design of the\n interpreters internals. And that's what I'm addressing here.\n\n>\n> > that real programmers don't use pascal... - maybe real\n> > programmer wouldn't ever use Perl...\n>\n> Then I'm no real programmer. But then again I program in any language\n> that is needed.\n\n You aren't - you're a script writer and that's a today quiche\n eater :-)\n\n The term \"Real Programmer\" is something any hacker should\n know!\n\n Top of the article \"Real Programmers Don't Use Pascal\":\n\n <<Back in the good old days -- the \"Golden Era\" of computers,\n it was easy to separate the men from the boys (sometimes\n called \"Real Men\" and \"Quiche Eaters\" in the literature).\n During this period, the Real Men were the ones that\n understood computer programming, and the Quiche Eaters were\n the ones that didn't. ...>>\n\n Take a look at\n\n http://burks.bton.ac.uk/burks/foldoc/33/86.htm\n\n and follow the links - enjoy.\n\n\nJan\n\n--\n\n#======================================================================#\n# It's easier to get forgiveness for being wrong than for being right. #\n# Let's break this rule - forgive me. #\n#========================================= [email protected] (Jan Wieck) #\n\n\n", "msg_date": "Tue, 8 Jun 1999 11:46:47 +0200 (MET DST)", "msg_from": "[email protected] (Jan Wieck)", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Priorities for 6.6" }, { "msg_contents": "Hello!\n\nOn Tue, 8 Jun 1999, Jan Wieck wrote:\n> This time, the Perl interpreter has to become a silly little\n> working slave. Beeing quiet until it's called and quiet\n> again after having served one function call until the big\n> master PostgreSQL calls him again.\n> \n> This flexibility requires a real good design of the\n> interpreters internals. And that's what I'm addressing here.\n\n I know exactly 1 (one) program that incorporate (embed) Perl interpreter\n- it is editor VIM (well-known vi-clone from www.vim.org). I think anyone\nwho want to learn how to embed perl may start looking int vim sources.\n Once I tried to compile vim+perl myself, but perl didn't work. I am\nperl-hater, so this probably was the reason.\n Anothe example - mod_perl for Apache - is rather bad example, as\nmod_perl is too big, overcomplicated and too perlish :)\n\n VIM can also be compiled with builtin Python interpreter, and I had no\nproblem compilng and using vim+python. Python is well known for its\nextending and embedding capabilities. mod_python (it is called PyApache) is\nsmall and elegant example of how to embed python, but of course it is not\nas powerful as mod_perl (one cannot touch Apache internals from mod_python,\nthough author lead PyApache development this way).\n Yes, I am biased toward Python, but I cannot say \"I recommend embed\nPython to construct PL/Python\" - I have no time to lead the development,\nand I doubt there are many pythoners here (D'Arcy?).\n\n> Jan\n> \n> --\n> \n> #======================================================================#\n> # It's easier to get forgiveness for being wrong than for being right. #\n> # Let's break this rule - forgive me. #\n> #========================================= [email protected] (Jan Wieck) #\n\nOleg.\n---- \n Oleg Broytmann http://members.xoom.com/phd2/ [email protected]\n Programmers don't die, they just GOSUB without RETURN.\n\n", "msg_date": "Tue, 8 Jun 1999 14:40:39 +0400 (MSD)", "msg_from": "Oleg Broytmann <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] PL/Lang (was: Priorities for 6.6)" }, { "msg_contents": "\nOn 08-Jun-99 Jan Wieck wrote:\n> Kaare Rasmussen wrote:\n> \n>>\n>> > If there's (only one) Perl PROGRAMMER out in the world\n>> > reading this, who does see a (however small) possibility to\n>> > explain how to integrate a Perl interpreter into PostgreSQL,\n>> > RESPOND!!!!!!!!!!!\n>>\n>> Well I'm a Perl programmer. I don't have a clue about how to integrate\n>> Perl into PostgreSQL, but it should be possible.\n>>\n>> I'd like to help. I know Perl, but nothing about the internals of\n>> PostgreSQL and I don't code C.\n> \n> That's the point and why I wrote programmer in capitals (REAL\n> PROGRAMMERS STILL THINK AND TALK IN CAPITALS). It's not Perl\n> script-writing what's needed at this stage. We need\n> programmers that are familiar with the C API of Perl and\n> could easily write things like the Pg package. And that's\n> still not enough knowledge.\n\nOk!\nI have no problems writing C package like Pg.pm to use with Perl 5.x.\n\nHowever, IMHO all tasks requiring such packages \ncan better be done using C++ and STL. \n \n\n\n---\nDmitry Samersoff, [email protected], ICQ:3161705\nhttp://devnull.wplus.net\n* There will come soft rains ...\n", "msg_date": "Tue, 08 Jun 1999 14:51:10 +0400 (MSD)", "msg_from": "Dmitry Samersoff <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Priorities for 6.6" }, { "msg_contents": "Oleg Broytmann wrote:\n> \n> Hello!\n> \n> VIM can also be compiled with builtin Python interpreter, and I had no\n> problem compilng and using vim+python. Python is well known for its\n> extending and embedding capabilities. mod_python (it is called PyApache) is\n> small and elegant example of how to embed python, but of course it is not\n> as powerful as mod_perl (one cannot touch Apache internals from mod_python,\n> though author lead PyApache development this way).\n\nActually abou 1.5 years ago it used to allow access to internals, but \nseemingly nobody used it and so it was thrown out in later versions.\n\n> Yes, I am biased toward Python, but I cannot say \"I recommend embed\n> Python to construct PL/Python\" - I have no time to lead the development,\n> and I doubt there are many pythoners here (D'Arcy?).\n\nI have contemplated it several times, but to be really useful we would \nfirst need a nice interface for returning \"tables\" from PL functions.\n\nI suspect this is not something trivial to add ?\n\nWith it I could use PL/Python to make all kinds of external objects like\nmailboxes\n(both local and POP/IMAP/NNTP), conf files (/etc/passwd, pg_hba.conf),\nDNS/LDAP/...\nqueries or any other nice things available through existing python\nmodules available \nto postgres queries.\n\n-----------------\nHannu\n", "msg_date": "Tue, 08 Jun 1999 17:47:12 +0300", "msg_from": "Hannu Krosing <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] PL/Lang (was: Priorities for 6.6)" }, { "msg_contents": "I re-read the Real Programmers don't use Pascal ---- it beats writing\nthis damned proposal I'm working on. If the line about Real Programmers\nuse goto's is anything to go by, then you should nominate Vadim as the\nv6.5 Real Programmer. You should _see_ all those gotos in nbtree.c! \nHe's done a fine job with them too.\n\nBernie\n", "msg_date": "Wed, 09 Jun 1999 00:27:57 +0000", "msg_from": "Bernard Frankpitt <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Priorities for 6.6" }, { "msg_contents": ">\n> I re-read the Real Programmers don't use Pascal ---- it beats writing\n> this damned proposal I'm working on. If the line about Real Programmers\n> use goto's is anything to go by, then you should nominate Vadim as the\n> v6.5 Real Programmer. You should _see_ all those gotos in nbtree.c!\n> He's done a fine job with them too.\n\n Vadim is surely one of the real programmers in our project.\n It's not only that he isn't afraid using GOTO's. He also\n know's very well how to speed things up by complicating the\n code :-)\n\n\nJan\n\n--\n\n#======================================================================#\n# It's easier to get forgiveness for being wrong than for being right. #\n# Let's break this rule - forgive me. #\n#========================================= [email protected] (Jan Wieck) #\n\n\n", "msg_date": "Wed, 9 Jun 1999 07:57:50 +0200 (MET DST)", "msg_from": "[email protected] (Jan Wieck)", "msg_from_op": false, "msg_subject": "Real Programmers (was: [HACKERS] Priorities for 6.6)" }, { "msg_contents": "> Perl language is. And I know that it's easy to integrate\n> anything into Perl. But after all, it's a Perl script that\n> has the entire control.\n\nI don't think so, but then again I'm only speculating. If the Apache\npeople can embed an entire Perl interpreter in their Web server,\nshouldn't it be possible for PostgreSQL? Or maybe the Apache people are\nREALLY REAL PROGRAMMERS? :-)\n\n> This flexibility requires a real good design of the\n> interpreters internals. And that's what I'm addressing here.\n\nAs I said, I don't code C. I haven't got the time to learn it right\nnow, and not the time to learn PostgreSQL's internals. If my offer to\nhelp with any Perl question I can help with is below your standards,\nI'm sorry.\n\n> You aren't - you're a script writer and that's a today quiche\n> eater :-)\n\nThat remark shows that you know nothing about Perl. But it's okay; be\nignorant in your own little way ;-]\n\nBtw. How do you define script writing as oposed to programming?\n\n", "msg_date": "Wed, 9 Jun 1999 19:26:32 +0200 (CEST)", "msg_from": "Kaare Rasmussen <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Priorities for 6.6" }, { "msg_contents": "Kaare Rasmussen wrote:\n\n> shouldn't it be possible for PostgreSQL? Or maybe the Apache people are\n> REALLY REAL PROGRAMMERS? :-)\n\n There are some.\n\n> > You aren't - you're a script writer and that's a today quiche\n> > eater :-)\n>\n> That remark shows that you know nothing about Perl. But it's okay; be\n> ignorant in your own little way ;-]\n\n Kaare,\n\n I would never really flame on a list like this. And I\n personally prefer scripts wherever possible. Sometimes I\n can't resist to write some humor - just that my kind of humor\n is a little hard to understand. But real programmers don't\n care if other human's understand them as long as their\n computers do. But even then, they have a programming problem,\n and totally don't care any more about human's and their\n communication problems.\n\n>\n> Btw. How do you define script writing as oposed to programming?\n\n #define SCRIPT_WRITING \"modern form of quiche eating\"\n #define PROGRAMMING (((READABILITY_OF_WRITTEN_CODING <= 0 && \\\n ABLE_TO_WRITE_FORTRAN_STYLE_IN_C > 4) || \\\n USES_GOTO_EXCESSIVELY) ? TRUE : FALSE)\n\n\nDisclaimer: This entire message isn't serious!\n\nJan\n\n--\n\n#======================================================================#\n# It's easier to get forgiveness for being wrong than for being right. #\n# Let's break this rule - forgive me. #\n#========================================= [email protected] (Jan Wieck) #\n\n\n", "msg_date": "Wed, 9 Jun 1999 22:34:48 +0200 (MET DST)", "msg_from": "[email protected] (Jan Wieck)", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Priorities for 6.6" }, { "msg_contents": "Jan Wieck wrote:\n> \n> >\n> > I re-read the Real Programmers don't use Pascal ---- it beats writing\n> > this damned proposal I'm working on. If the line about Real Programmers\n> > use goto's is anything to go by, then you should nominate Vadim as the\n> > v6.5 Real Programmer. You should _see_ all those gotos in nbtree.c!\n> > He's done a fine job with them too.\n\nUnfortunately, I use loops in 9 cases of 10, seems\nlike I have no chance to win nomination -:(\nThough, you should _see_ gotos in heapam.c - maybe there\nis still some chance for me. \n-:)\n\nActually, I just don't think that breaks in loops are always better\nthan gotos.\n\n> Vadim is surely one of the real programmers in our project.\n> It's not only that he isn't afraid using GOTO's. He also\n\nLike someone didn't afraid to use siglongjmp in elog.c.\n\n> know's very well how to speed things up by complicating the\n> code :-)\n\n-:)\n\nVAdim\n", "msg_date": "Thu, 10 Jun 1999 10:34:28 +0800", "msg_from": "Vadim Mikheev <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Real Programmers (was: [HACKERS] Priorities for 6.6)" }, { "msg_contents": "> I would never really flame on a list like this. And I\n\nWho's flaming. I'm just tickling your side bone. Maybe you'll end up\nbelieving yourself if nobody tells you otherwise ;-}\n\n", "msg_date": "Thu, 10 Jun 1999 06:50:30 +0200 (CEST)", "msg_from": "Kaare Rasmussen <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Priorities for 6.6" }, { "msg_contents": "> Actually, I just don't think that breaks in loops are always better\n> than gotos.\n>\n> > Vadim is surely one of the real programmers in our project.\n> > It's not only that he isn't afraid using GOTO's. He also\n>\n> Like someone didn't afraid to use siglongjmp in elog.c.\n\n There are much better ones in the PL handlers! memcpy()'s\n mangling sigjmp_buf's between sigsetjmp() siglongjmp() stuff.\n\n>\n> > know's very well how to speed things up by complicating the\n> > code :-)\n>\n> -:)\n>\n> VAdim\n>\n\n\nJan\n\n--\n\n#======================================================================#\n# It's easier to get forgiveness for being wrong than for being right. #\n# Let's break this rule - forgive me. #\n#========================================= [email protected] (Jan Wieck) #\n\n\n", "msg_date": "Thu, 10 Jun 1999 10:44:38 +0200 (MET DST)", "msg_from": "[email protected] (Jan Wieck)", "msg_from_op": false, "msg_subject": "Re: Real Programmers (was: [HACKERS] Priorities for 6.6)" }, { "msg_contents": "Jan Wieck wrote:\n> \n> > Actually, I just don't think that breaks in loops are always better\n> > than gotos.\n> >\n> > > Vadim is surely one of the real programmers in our project.\n> > > It's not only that he isn't afraid using GOTO's. He also\n> >\n> > Like someone didn't afraid to use siglongjmp in elog.c.\n> \n> There are much better ones in the PL handlers! memcpy()'s\n> mangling sigjmp_buf's between sigsetjmp() siglongjmp() stuff.\n\nWow! Voodoo!\nI very like such things -:)\nThis is really the way what Real Programmers follow -:)\n\nVadim\n", "msg_date": "Thu, 10 Jun 1999 21:54:13 +0800", "msg_from": "Vadim Mikheev <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Real Programmers (was: [HACKERS] Priorities for 6.6)" }, { "msg_contents": "\nHey, why don't you just overwrite the jmp instruction with a nop....\n\nOn Thu, 10 Jun 1999, Vadim Mikheev wrote:\n\n> Jan Wieck wrote:\n> > \n> > > Actually, I just don't think that breaks in loops are always better\n> > > than gotos.\n> > >\n> > > > Vadim is surely one of the real programmers in our project.\n> > > > It's not only that he isn't afraid using GOTO's. He also\n> > >\n> > > Like someone didn't afraid to use siglongjmp in elog.c.\n> > \n> > There are much better ones in the PL handlers! memcpy()'s\n> > mangling sigjmp_buf's between sigsetjmp() siglongjmp() stuff.\n> \n> Wow! Voodoo!\n> I very like such things -:)\n> This is really the way what Real Programmers follow -:)\n> \n> Vadim\n> \n\nA.J. ([email protected])\nIgnorance is not knowing.\nStupidity is the active pursuit of ignorance.\n\n", "msg_date": "Thu, 10 Jun 1999 16:16:20 +0100 (BST)", "msg_from": "A James Lewis <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Real Programmers (was: [HACKERS] Priorities for 6.6)" }, { "msg_contents": ">\n>\n> Hey, why don't you just overwrite the jmp instruction with a nop....\n>\n\n Hmmmm - this would require that the code segment is writable\n what it isn't on most modern systems.\n\n But the shared objects are usually compiled with -fPIC\n (position independent code), so it should be possible to copy\n the code segment part of the PL handlers into an malloc()'ed\n area to get it into writable memory and execute it there over\n function pointers...\n\n Nice idea, we'll try it with the upcoming PL/Perl handler.\n\n On second thought, there maybe is another tricky way to\n prevent it all. Copy the entire Perl interpreter into\n malloc()'ed memory and modify it's calls to malloc(), free()\n redirecting them to private ones. Then we have total control\n over it's allocations, can create an image copy of it after\n each some successful calls into another area and in the case\n of a transaction abort reset it to the last valid state by\n restoring the copy.\n\n On third thought, we could also do it the Microsoft way. Hook\n into the kernel's virtual memory control and trace every\n first write operation into a page. At this time we copy the\n old pages state to somewhere else. This will save some\n allocated memory because we only need restorable copies of\n the pages modified since the last save cycle. Requires to\n hack down ways to get around access restrictions so the\n postmaster is able to patch the OS kernel at startup (only\n requires root permissions so /dev/kmem can get opened for\n writing), but since this is definitely the best way to do it,\n it's worth the efford.\n\n The result from this work then will become the base for more\n changes. If the postmaster is already patching the kernel,\n it can also take over the process scheduling to optimize the\n system for PostgreSQL performance and we could get rid of\n these damned SYSV IPC semaphores. Finally the postmaster will\n control a new type of block cache, by mapping part's of the\n relations into virtual memory pages of the backends on demand\n avoiding SYSV shared memories too.\n\n\nJan\n\n--\n\n#======================================================================#\n# It's easier to get forgiveness for being wrong than for being right. #\n# Let's break this rule - forgive me. #\n#========================================= [email protected] (Jan Wieck) #\n\n\n", "msg_date": "Thu, 10 Jun 1999 17:52:24 +0200 (MET DST)", "msg_from": "[email protected] (Jan Wieck)", "msg_from_op": false, "msg_subject": "Re: Real Programmers (was: [HACKERS] Priorities for 6.6)" }, { "msg_contents": "\nThen again, I never coded assembler on a modern system...... it was fun\nthough....\n\nCheers for a great database! If you have to delay 6.5 longer, do it...\nit's better to have somthing stable.\n\nJames\n\nOn Thu, 10 Jun 1999, Jan Wieck wrote:\n\n> >\n> >\n> > Hey, why don't you just overwrite the jmp instruction with a nop....\n> >\n> \n> Hmmmm - this would require that the code segment is writable\n> what it isn't on most modern systems.\n> \n> But the shared objects are usually compiled with -fPIC\n> (position independent code), so it should be possible to copy\n> the code segment part of the PL handlers into an malloc()'ed\n> area to get it into writable memory and execute it there over\n> function pointers...\n> \n> Nice idea, we'll try it with the upcoming PL/Perl handler.\n> \n> On second thought, there maybe is another tricky way to\n> prevent it all. Copy the entire Perl interpreter into\n> malloc()'ed memory and modify it's calls to malloc(), free()\n> redirecting them to private ones. Then we have total control\n> over it's allocations, can create an image copy of it after\n> each some successful calls into another area and in the case\n> of a transaction abort reset it to the last valid state by\n> restoring the copy.\n> \n> On third thought, we could also do it the Microsoft way. Hook\n> into the kernel's virtual memory control and trace every\n> first write operation into a page. At this time we copy the\n> old pages state to somewhere else. This will save some\n> allocated memory because we only need restorable copies of\n> the pages modified since the last save cycle. Requires to\n> hack down ways to get around access restrictions so the\n> postmaster is able to patch the OS kernel at startup (only\n> requires root permissions so /dev/kmem can get opened for\n> writing), but since this is definitely the best way to do it,\n> it's worth the efford.\n> \n> The result from this work then will become the base for more\n> changes. If the postmaster is already patching the kernel,\n> it can also take over the process scheduling to optimize the\n> system for PostgreSQL performance and we could get rid of\n> these damned SYSV IPC semaphores. Finally the postmaster will\n> control a new type of block cache, by mapping part's of the\n> relations into virtual memory pages of the backends on demand\n> avoiding SYSV shared memories too.\n> \n> \n> Jan\n> \n> --\n> \n> #======================================================================#\n> # It's easier to get forgiveness for being wrong than for being right. #\n> # Let's break this rule - forgive me. #\n> #========================================= [email protected] (Jan Wieck) #\n> \n> \n\nA.J. ([email protected])\nIgnorance is not knowing.\nStupidity is the active pursuit of ignorance.\n\n", "msg_date": "Thu, 10 Jun 1999 17:20:50 +0100 (BST)", "msg_from": "A James Lewis <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Real Programmers (was: [HACKERS] Priorities for 6.6)" } ]
[ { "msg_contents": "Hello.\n\nI'm busy writing an IP accounting DB using nacctd and postgres.\nThe data gets inserted correctly into the database and I can run the\nselect queries that I desire. \n\nE.g. (select sum(size) from iptraff where srcip = \"209.100.30.2\") gives\nme the total bytes that that particular host sent. Now it would be\n*REALLY* cool if I could do the following: (select sum(size) from\niptraff where scrip = \"209.100.30.0/24\")\nThat would tell me the total outgoing traffic for that subnet.\n\n>From what I understand the relevant code resides in network.c\nunfortunately I am not a C person :-( Perhaps this feature would be\nincluded in the next snapshot/release or someone could help me with my\nparticular installation.\n\nThanks!\nChrisG\n -----!!!-------\n---FREE THE SOURCE---\n -----!!!-------\n\n", "msg_date": "Sat, 5 Jun 1999 02:20:50 +0200 (SAST)", "msg_from": "[email protected]", "msg_from_op": true, "msg_subject": "inet type & select" } ]
[ { "msg_contents": "> How can we create the entire parse tree first,\n> then traverse it?\n> > > We've downloaded postgresql (latest version). We would like\n> > > to print the parse tree. Any help or guidance will be appreciated.\n> > Use EXPLAIN VERBOSE on a query, or run the postmaster from a terminal\n> > window using a large debugging level (\"-d 99\").\n> > If you need more than that, you have to dive into the backend and\n> > insert some print statements (though there are some utilities to help\n> > print these trees).\n\nHi Dan. All of these questions are best asked on the hackers mailing\nlist. Unfortunately I'm leaving town for a few days and don't have\ntime to think about an additional answer for you.\n\nGood luck.\n\n - Thomas\n\n-- \nThomas Lockhart\t\t\t\[email protected]\nSouth Pasadena, California\n", "msg_date": "Sat, 05 Jun 1999 01:37:44 +0000", "msg_from": "Thomas Lockhart <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Printing parse tree" }, { "msg_contents": "See developers FAQ in web site or in doc directory.\n\n> > How can we create the entire parse tree first,\n> > then traverse it?\n> > > > We've downloaded postgresql (latest version). We would like\n> > > > to print the parse tree. Any help or guidance will be appreciated.\n> > > Use EXPLAIN VERBOSE on a query, or run the postmaster from a terminal\n> > > window using a large debugging level (\"-d 99\").\n> > > If you need more than that, you have to dive into the backend and\n> > > insert some print statements (though there are some utilities to help\n> > > print these trees).\n> \n> Hi Dan. All of these questions are best asked on the hackers mailing\n> list. Unfortunately I'm leaving town for a few days and don't have\n> time to think about an additional answer for you.\n> \n> Good luck.\n> \n> - Thomas\n> \n> -- \n> Thomas Lockhart\t\t\t\[email protected]\n> South Pasadena, California\n> \n> \n\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Fri, 4 Jun 1999 22:03:05 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Re: Printing parse tree" } ]
[ { "msg_contents": "Here is a parody of the post-6.5 PostgreSQL history. I am interested in\nadditional ideas and comments. Not sure if I am going to publish this\nor put it on the web page yet.\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n\n\n\n\n\n\n\n\n\n\nThe History of PostgreSQL\n\n\nPart II(Parody)\nAfter PostgreSQL 6.5, we decided to change some things.  First, we\ncut out the beta testing period.  We decided to make every prime-numbered\nrelease a stable release.  This worked fine for a while, but as the\nversion numbers got larger, the number of prime numbers became rare. \nWe put out more unstable releases to reach the prime stable release version\nnumbers.  We removed the regression tests.  (Saved disk space). \nThe documentation was too hard to maintain, so we removed that too. \nWe no longer ran our source code intending tools so developers would get\nmore pride that the code they contributed was indented in their own unique\nstyle.\nWe became much less restrictive about patches.  Patches that fixed\none bug while introducing five more were now accepted to encourage developers. \nWe decided to accept bug reports only as MS-Word documents, which cut down\non the number of bug reports.  We had to do this because we were getting\ntoo many bug reports because of our new liberal patch application policy.\nWe stripped down the web site so lynx users could read it easier, and\nPalm Pilot users said their browsing was much improved.  We moved\nour web site to WindowsNT to slow down the bug reports coming from the\nmailing list.\nAt this point, we really moved into high gear.  We switched to\nC++ so we could use those nifty // single line comments.  We\nadded Java to some low-level routines.  This slowed things down, so\nwe randomly replaced some functions with assembly language.  As the\nsupported platform list dwindled, the requirement to have Java was not\nas much of a problem, and we got PostgreSQL running on a Java-enabled printer,\nso I think the change was worth it.  Having PostgreSQL in the printer\nhelped because we could format our source code using the database before\nprinting it.  It was really hard to read without that.\nLooking back, we certainly are making progress.  I am just\nnot sure if the progress is forwards or backwards.", "msg_date": "Fri, 4 Jun 1999 22:04:57 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": true, "msg_subject": "PostgreSQL History(Parody)" }, { "msg_contents": "On Fri, 4 Jun 1999, Bruce Momjian wrote:\n\n> Here is a parody of the post-6.5 PostgreSQL history. I am interested in\n> additional ideas and comments. Not sure if I am going to publish this\n> or put it on the web page yet.\n\n*rofl* Publish, publish!! :)\n\nMarc G. Fournier ICQ#7615664 IRC Nick: Scrappy\nSystems Administrator @ hub.org \nprimary: [email protected] secondary: scrappy@{freebsd|postgresql}.org \n\n", "msg_date": "Sat, 5 Jun 1999 11:02:44 -0300 (ADT)", "msg_from": "The Hermit Hacker <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] PostgreSQL History(Parody)" }, { "msg_contents": "> Bruce Momjian wrote:\n> \n> Here is a parody of the post-6.5 PostgreSQL history. I am interested in\n> additional ideas and comments. Not sure if I am going to publish this\n> or put it on the web page yet.\n\nDamn, just when I told the kids they could use the NT CD for a frisbee.\n\nBtw. the South African Linux Professional Association (http://www.lpa.org.za)\nhad a stand at our Computer Faire. It was across the road from the Microsoft\nStand.\n\nWe were voted two things - the second most ugly stand at the show and the most\npopular stand at the show. We moved over 5000 copies of RH6.0 with the\nguys from the huge expensive MS stand looking on!\n\n--------\nRegards\nTheo\n", "msg_date": "Sun, 06 Jun 1999 13:00:34 +0200", "msg_from": "Theo Kramer <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] PostgreSQL History(Parody)" }, { "msg_contents": ":-)\n\n> <center>\n> <h1>\n> The History of PostgreSQL</h1></center>\n>\n> <center>\n> <h2>\n> Part II(Parody)</h2></center>\n> After PostgreSQL 6.5, we decided to change some things.&nbsp; First, we\n> cut out the beta testing period.&nbsp; We decided to make every prime-numbered\n> release a stable release.&nbsp; This worked fine for a while, but as the\n> version numbers got larger, the number of prime numbers became rare.&nbsp;\n> We put out more unstable releases to reach the prime stable release version\n> numbers.&nbsp; We removed the regression tests.&nbsp; (Saved disk space).&nbsp;\n> The documentation was too hard to maintain, so we removed that too.&nbsp;\n\n Also, as Stonebreaker once said, productional rule systems\n are conceptually simple. After changing our concepts some\n more it was so easy that really a first time BASIC programmer\n could have done it all (we simply simplyfied your concept by\n removing productional rules at all - no concept no problems).\n\n\nJan\n\n--\n\n#======================================================================#\n# It's easier to get forgiveness for being wrong than for being right. #\n# Let's break this rule - forgive me. #\n#========================================= [email protected] (Jan Wieck) #\n\n\n", "msg_date": "Sun, 6 Jun 1999 14:27:59 +0200 (MET DST)", "msg_from": "[email protected] (Jan Wieck)", "msg_from_op": false, "msg_subject": "Re: [HACKERS] PostgreSQL History(Parody)" }, { "msg_contents": "On 06-Jun-99 Jan Wieck wrote:\n>:-)\n\nPostgres 111111.001\n\nTo avoid possible portability problems we change touple\nstorage to plain text file.\nAll allowed query is restricted to simple select query:\n\nSo backend looks like \n\npostgres.sh:\n cat $1 \n\nand psql become just simbolic link to vi \n\n;-))))\n\n\n\n---\nDmitry Samersoff, [email protected], ICQ:3161705\nhttp://devnull.wplus.net\n* There will come soft rains ...\n", "msg_date": "Mon, 07 Jun 1999 17:26:36 +0400 (MSD)", "msg_from": "Dmitry Samersoff <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] PostgreSQL History(Parody)" }, { "msg_contents": "\nAny plans to integrate MS ActiveGPF into the code base at some point in\nthe future :-) As Stef* says - \"I wan't my GPFs\"\n\n* www.userfriendly.org\n\n->->->->->->->->->->->->->->->->->->---<-<-<-<-<-<-<-<-<-<-<-<-<-<-<-<-<-<\nJames Thompson 138 Cardwell Hall Manhattan, Ks 66506 785-532-0561 \nKansas State University Department of Mathematics\n->->->->->->->->->->->->->->->->->->---<-<-<-<-<-<-<-<-<-<-<-<-<-<-<-<-<-<\n\n\n", "msg_date": "Mon, 7 Jun 1999 08:54:26 -0500 (CDT)", "msg_from": "James Thompson <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] PostgreSQL History(Parody)" }, { "msg_contents": "\n*rofl* I like it...when can we implement it?:)\n\nOn Mon, 7 Jun 1999, Dmitry Samersoff wrote:\n\n> On 06-Jun-99 Jan Wieck wrote:\n> >:-)\n> \n> Postgres 111111.001\n> \n> To avoid possible portability problems we change touple\n> storage to plain text file.\n> All allowed query is restricted to simple select query:\n> \n> So backend looks like \n> \n> postgres.sh:\n> cat $1 \n> \n> and psql become just simbolic link to vi \n> \n> ;-))))\n> \n> \n> \n> ---\n> Dmitry Samersoff, [email protected], ICQ:3161705\n> http://devnull.wplus.net\n> * There will come soft rains ...\n> \n\nMarc G. Fournier ICQ#7615664 IRC Nick: Scrappy\nSystems Administrator @ hub.org \nprimary: [email protected] secondary: scrappy@{freebsd|postgresql}.org \n\n", "msg_date": "Mon, 7 Jun 1999 10:59:37 -0300 (ADT)", "msg_from": "The Hermit Hacker <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] PostgreSQL History(Parody)" }, { "msg_contents": "> To avoid possible portability problems we change touple\n> storage to plain text file.\n\nDon't laugh. There was once a database based on Unix standard tools. I\nread about it 10 or 15 years ago. I believe it was called rdb. Maybe it\nstill exists?\n\n", "msg_date": "Mon, 7 Jun 1999 23:14:59 +0200 (CEST)", "msg_from": "Kaare Rasmussen <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] PostgreSQL History(Parody)" }, { "msg_contents": "> > To avoid possible portability problems we change touple\n> > storage to plain text file.\n> \n> Don't laugh. There was once a database based on Unix standard tools. I\n> read about it 10 or 15 years ago. I believe it was called rdb. Maybe it\n> still exists?\n\nIf it is shql, I wrote it.\n\nIt is attached.\n\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n\nHere is shql 1.3. I am posting the complete source rather than a diff\nfile because of the many patches released since the full posting of shql\n1.0 in October of 1991.\n\nThis new version is primarily a portability release. It also fixes a\nbug in the number of rows reported with the -q option.\n\nSome people using Unixes that lack the USL version of the bourne shell\nand other utilities have reported problems when running shql. With bash\n1.14.4 (not earlier versions) and other PD utilities like sed 1.18 (not\n2.*), gawk 2.15PL5, almost any Unix should be able to run shql.\n\nSHQL is an interactive SQL database engine. Written as a Unix shell\nscript, SHQL interprets SQL commands and manipulates flat files based on\nthose commands. SHQL is limited in its understanding of SQL constructs.\nAll this is outlined in the README file contained in the distribution. \nA demo file is also included to show some examples.\n\n---------------------------------------------------------------------------\n#!/bin/sh\n# This is a shell archive (produced by GNU sharutils 4.1).\n# To extract the files from this archive, save it to some FILE, remove\n# everything before the `!/bin/sh' line above, then type `sh FILE'.\n#\n# Made on 1995-07-09 17:46 EDT by <[email protected]>.\n# Source directory was `/var/local/shql/dist/tmp'.\n#\n# Existing files will *not* be overwritten unless `-c' is specified.\n#\n# This shar contains:\n# length mode name\n# ------ ---------- ------------------------------------------\n# 1041 -rw-r--r-- CHANGES\n# 4520 -rw-r--r-- README\n# 2156 -rw-r--r-- demo.shql\n# 20727 -rwxr-xr-x shql\n#\ntouch -am 1231235999 $$.touch >/dev/null 2>&1\nif test ! -f 1231235999 && test -f $$.touch; then\n shar_touch=touch\nelse\n shar_touch=:\n echo\n echo 'WARNING: not restoring timestamps. Consider getting and'\n echo \"installing GNU \\`touch', distributed in GNU File Utilities...\"\n echo\nfi\nrm -f 1231235999 $$.touch\n#\n# ============= CHANGES ==============\nif test -f 'CHANGES' && test X\"$1\" != X\"-c\"; then\n echo 'x - skipping CHANGES (file already exists)'\nelse\n echo 'x - extracting CHANGES (text)'\n sed 's/^X//' << 'SHAR_EOF' > 'CHANGES' &&\nNew to versoin 1.3\n-----------------------------------\nchanged sed handling of sql statements for portability\nfixed count bug with -q(quiet) option\nX\nNew to version 1.2\n-----------------------------------\nchanged sed script to allow spaces at the end of /g lines\nadded backslash to sed script for portability\nX\nNew to version 1.2\n-----------------------------------\nfixed bug where -q option causes first row to always print\nfixed subselect bug on first row\nfixed bug with subselect's with where clauses\nfixed bug in multi-table joins\nX\nX\nNew to version 1.1\n-----------------------------------\nNow runs under ksh as well as sh.\nMulti-table joins possible without creating views\nAggregates now supported\nLooks in your $HOME/shql/ for database name also\nExecution operators are now '\\' and '/',with '/' now possible the end \nX\tof a line\nWhite-space is not required as it was before\nNew -q option removes table headers, so only taking output that begins\nX\twith '|' gets you all the data\nDelete syntax now requires a FROM, as it should have all along\nSHAR_EOF\n $shar_touch -am 0709174395 'CHANGES' &&\n chmod 0644 'CHANGES' ||\n echo 'restore of CHANGES failed'\n shar_count=\"`wc -c < 'CHANGES'`\"\n test 1041 -eq \"$shar_count\" ||\n echo \"CHANGES: original size 1041, current size $shar_count\"\nfi\n# ============= README ==============\nif test -f 'README' && test X\"$1\" != X\"-c\"; then\n echo 'x - skipping README (file already exists)'\nelse\n echo 'x - extracting README (text)'\n sed 's/^X//' << 'SHAR_EOF' > 'README' &&\nX S H Q L version 1.2\nX\nX\tShql is a program that reads SQL commands interactively and\nX\texecutes those commands by creating and manipulating Unix files.\nX\nX\tThis program requires a bourne shell that understands functions,\nX\tas well as awk, grep, cut, sort, uniq, join, wc, and sed.\nX\nX \tThis script can be invoked with the command \nX\nX\t\tshql [-q] {database name}\nX\nX \tA directory must be created for the database before you may use it.\nX\tThis directory will house all data files for a single database.\nX\tAll datafiles are created with mode 666 ('rw-rw-rw-'), so create the\nX\tdirectory with 777 ('rwxrwxrwx') if you want the database to be \nX\tsharable, and 700 ('rwx------') to be private. Of course, multiple\nX\tdatabases are possible. A database called 'mydb' may be created\nX\tas a directory $HOME/mydb, $HOME/shql/mydb, ./mydb, or as \nX\t$SHQL_ROOT/mydb, where $SHQL_ROOT is defined below. The -q\nX\toption turns off the display of headings so the output of shql\nX\tcan be used by other programs by caputuring all lines that begin\nX\tthe pipe symbol.\nX\nX \tThe program is patterned after Ingres's interactive sql terminal\nX\tmonitor program. Terminal monitor commands begin with either a\nX\tforward or backward-slash. Forward slashes may appear at the end of\nX\ta commend line. Back-slashes are accepted for compatability. The /g\nX\tis the 'go' command, /p is print, and /q is quit. Try 'help commands'\nX\tfor a full list. Because of this, if you need a slash as the\nX\tsecond to last caracter on a line, you should add a space\nX\tbetween the slash and the last character.\nX\t\nX\tTo get started, invoke shql with a database name. Use the directory \nX\tname you created above. Type\nX\nX\t\tshql mydb\nX\nX\tif the directory you created was 'mydb'. Once shql starts up, you \nX\tshould see the database name displayed, and then a '*'. At this\nX\tpoint, the most valuable thing is to type help,\nX\nX\t\t* help\nX\t\t* /g\nX\nX\tYou may then go on. The command 'help syntax' displays syntax\nX\tfor all SQL operations, and 'help commands' displays all shql\nX\tworkspace commands. Try the demo.\nX\nX\tShql can execute only one operation at a time, but operations can\nX\tbe spread over several lines.\nX\nX\tShql operations are allow 'select' operations on multiple tables.\nX\tTable names are read from left to write in select's 'from'\nX\tsection, so the tables should be ordered with the most central\nX\ttables first. In two-table joins, it doesn't matter. In three\nX\ttable joins, if you join table A-to-B and B-to-C, B must not be\nX\tthe last table in the from clause, because shql will not be able\nX\tto join tables A-C. If you get the message 'Join not found, try\nX\treordering tables', this is probably the problem. Also\nX\tqualified field names are not understood, like tablename.fieldname,\nX\tso if you are joining my_id in table A with my_id in table B, just\nX\tsay 'my_id = my_id'. Views can also be used to create\nX\tmulti-table selects.\nX\nX\tSubselects are implemented, but must be the last operand of a\nX\t'where' clause, most useful with 'in'.\nX\nX\tIn most cases, commas are optional. NULLs are not implemented.\nX\tAggregates like AVG() are implemented, but not with GROUP BY.\nX\nX\tWhen INSERTing strings that contain the characters !,*,=,>,<,(, or ),\nX\tspaces or backslashes may be added during the insert. This is a\nX\tside-effect of the string manipulation needed to properly\nX\tparse the command parameters.\nX\nX\tThis SQL is type-less, so specify just the column width when creating\nX\ttables. This is used only for display purposes. Shql is\nX\tcase-sensitive, and expects SQL key words to be in lower case.\nX\t\nX\tCommands can be piped into shql. The table data files are\nX\ttab delimited, so awk scripts can be used to generate reports \nX\tdirectly from the tables. To operate on non-shql data files,\nX\tcreate a dummy table with the proper fields, then copy your file\nX\tinto your shql data directory, replacing your delimiters with\nX\ttabs, then run shql on the table, and convert the table back to \nX\tits original format. Grave accents (`) may be used to execute \nX\tunix command from with shql. Environment variables may also be\nX\tused. See the demo for an example, i.e. \"cat demo.shql | shql mydb\".\nX\nX\tIf you have comments, suggestions, or bug reports contact:\nX\nX\t\tBruce Momjian, [email protected]\nX\n-----BEGIN PGP PUBLIC KEY BLOCK-----\nVersion: 2.6.1\nX\nmQBtAy6pceUAAAEDAM9YKKJiqG7AzjLIyvqmDHvjVnmCG0QlhNZm3EdpcbIZBEUJ\n41zWuGhvusiC99MeXy43kxSF4pJLFWhLDYRk1unVvz5y3B+xyERhig3h/AWWRaxH\nJ4HzFdeKgclqllqhVQAFEbQlQnJ1Y2UgTW9tamlhbiA8cm9vdEBjYW5kbGUucGhh\nLnBhLnVzPg==\n=S8mw\n-----END PGP PUBLIC KEY BLOCK-----\nSHAR_EOF\n $shar_touch -am 0709174395 'README' &&\n chmod 0644 'README' ||\n echo 'restore of README failed'\n shar_count=\"`wc -c < 'README'`\"\n test 4520 -eq \"$shar_count\" ||\n echo \"README: original size 4520, current size $shar_count\"\nfi\n# ============= demo.shql ==============\nif test -f 'demo.shql' && test X\"$1\" != X\"-c\"; then\n echo 'x - skipping demo.shql (file already exists)'\nelse\n echo 'x - extracting demo.shql (text)'\n sed 's/^X//' << 'SHAR_EOF' > 'demo.shql' &&\n# Demo for SHQL\n# Create table customer\ncreate table customer (\nX\tname 30,\nX\tage 3,\nX\tstatus 1)\n/p/g\nX\n# Put one person in the table\ninsert into customer values ( 'Fred', 32, 'G' )/p/g\nX\n# Study the table\nhelp customer\n/p/g\nselect * from customer/p/g\nX\n# Add two more people\ninsert into customer values \n( 'Barney', 29, 'G', 'Wilma', 28, 'D' )\n/p/g\nprint customer\n/p/g\nX\n# Get customers with 'G' status\nselect * from customer\nwhere status = 'G' /p/g\nX\n# Get sorted list of customers by age\nselect * from customer\norder by age num\n/p/g \nX\n# Make a table to hold customer status codes and their descriptions\ncreate table codes ( \nX\tcode 1,\nX\tdescription 10 )\n/p/g\nX\n# Insert status codes\ninsert into codes values \n( 'G', 'Good', 'B', 'Bad', 'D', 'Dead Beat' )\n/p/g\nX\n# Create a view so we can see the customer name and status description\ncreate view custstat ( customer.status = codes.code )\n/p/g\nX\n# Look at the table\nhelp custstat\n/p/g\nselect * from custstat\n/p/g\nX\nselect * \nfrom customer, codes\nwhere status = code\n/p/g\nX\n# Replace 'Barney' with 'Bad Bart'\nupdate customer \nset name = 'Bad Bart', status = 'X' \nwhere age = 29\n/p/g\nX\nprint customer\n/p/g\nX\n# Get all customers that have invalid status'es\nselect * from customer\nwhere status not in select code \nX\t\t from codes\n/p/g\nX\n# Remove 'Fred'\ndelete from customer\nwhere age = 32\n/p/g\nX\n# Get rid of view \ndrop view custstat\n/p/g\nX\n# Create a holding table for old customers\ncreate table oldcust (\nX\tname 30,\nX\tstatus 1 )\n/p/g\nX\n# Copy old customer to new table\ninsert into oldcust ( \nX\tname status )\nselect name status \nfrom customer\nwhere age > 28\n/p/g\nX\nselect avg(age)\nfrom customer\n/p/g\nX\nselect name\nfrom customer\nwhere age = select min(age)\nX\t from customer\n/p/g\nX\n# Look at table\nprint oldcust\n/p/g\nX\n# Delete customers moved over\ndelete from customer\nwhere age > 28\n/p/g\nX\nprint customer\n/p/g\nX\n# Try a union of the two tables\nselect name age\nfrom customer\nunion\nselect name status \nfrom oldcust\n/p/g\nX\n# Show example of executing Unix commands\ninsert into customer \nvalues ( '`date`', `ls / | wc -l`, 'Y' )\n/p/g\nprint customer\n/p/g\n# Clean up\ndrop table codes\n/p/g\ndrop table customer\n/p/g\ndrop table oldcust\n/p/g\n/q\t\nSHAR_EOF\n $shar_touch -am 0709174495 'demo.shql' &&\n chmod 0644 'demo.shql' ||\n echo 'restore of demo.shql failed'\n shar_count=\"`wc -c < 'demo.shql'`\"\n test 2156 -eq \"$shar_count\" ||\n echo \"demo.shql: original size 2156, current size $shar_count\"\nfi\n# ============= shql ==============\nif test -f 'shql' && test X\"$1\" != X\"-c\"; then\n echo 'x - skipping shql (file already exists)'\nelse\n echo 'x - extracting shql (text)'\n sed 's/^X//' << 'SHAR_EOF' > 'shql' &&\n#!/bin/sh\n# use /bin/sh, /bin/ksh, or /bin/bash\n#\n# shql - version 1.3\n#\n# by Bruce Momjian, [email protected]\n#\nX\n# $Id: shql,v 1.23 1995/06/11 02:23:58 root Exp root $\nX\n# DEFINE THESE\nSHQL_ROOT=\"/u/shql\"\t\t# system-wide database location\nEDITOR=\"${EDITOR:=/usr/bin/vi}\" # default editor if EDITOR not defined\nSHELL=\"${SHELL:=/bin/sh}\" \t# default editor if SHELL not defined\nX\n# Unix table file postfixes: @ is attrib, ~ is data, % is view\nX\nDEBUG=\"N\"\t# set to Y for debugging\nX\n[ \"$DEBUG\" = \"Y\" ] && set -x \t\t# uncomment for debugging\n#set -v\nUMASK=`umask`\numask 0000\t\t# share database\ntrap \"echo \\\"Goodbye\\\" ; \\\nX rm -f /tmp/$$ /tmp/$$row /tmp/$$join*\" 0 1 2 3 15\nset -h\t\t\t# remember functions\nX\nif echo '\\c' | grep -s c ; then\t\t# to adapt to System V vs. BSD 'echo'\nX\tNOCR1='-n'\t\t\t# BSD\nX\tNOCR2=\"\"\nelse\nX\tNOCR1=\"\"\t\t\t# System V\nX\tNOCR2='\\c'\nfi\nNL='\n'\nTAB='\t'\nX\nif [ \"X$1\" = \"X-q\" ] \nthen\tQUIET=\"Y\"\nX\tshift\nfi\nX\n_IFS=\"$IFS\"\nX\nexport _IFS TABLE CMD NOCR1 NOCR2 NL TAB QUIET DEBUG\nX\nif [ \"X$1\" = \"X\" ]\nthen\techo \"Missing database name.\" 1>&2\nX\techo \"The database name must be a directory under $HOME/shql\" 1>&2\nX\techo \"\tor a directory under $SHQL_ROOT\" 1>&2\nX\texit 1\nfi\necho \"Database: $1\"\nX\nif [ -d $HOME/shql/$1 ]\nthen\tcd $HOME/shql/$1\nelif [ -d $SHQL_ROOT/$1 ]\nthen\tcd $SHQL_ROOT/$1\nelif [ -d $HOME/$1 ]\nthen\tcd $HOME/$1\nelif [ -d $1 ]\nthen\tcd $1\nelse \techo \"Unknown database ($1)\" 1>&2\nX\techo \"The database name must be a directory under $HOME/shql\" 1>&2\nX\techo \"\tor a directory under $SHQL_ROOT\" 1>&2\nX\texit 1\nfi\nX\nX\n#\f\n#**************************************************************************\n# syntax\n#**************************************************************************\nsyntax(){\nX\tcase \"$1\" in\nX\t\tcreate)\tcat <<\"END\"\nCREATE TABLE table_name (\nX\tcolumn_name column_width \nX\t{, ...} \n) \t\nor\nCREATE VIEW view_name (\nX\ttable_or_view1.column1 = table_or_view2.column2\n)\nEND\nreturn 0\n;;\nX\t\tdelete) cat <<\"END\"\nDELETE \nFROM table_name\t\n{ WHERE where_clause }\nEND\nreturn 0\n;;\nX\t\tdrop) cat <<\"END\"\nDROP TABLE table_name\nor\nDROP VIEW view_name\nEND\nreturn 0\n;;\nX\t\tedit) cat <<\"END\"\nEDIT table_name\nis a non-standard method of changing a table's field names or display widths.\nEND\nreturn 0\n;;\nX\t\thelp)\tcat <<\"END\"\nHELP ALL\nor\nHELP TABLES \nor\nHELP VIEWS\nor\nHELP COMMANDS\nor\nHELP [CREATE | DELETE | DROP | INSERT | SELECT | UPDATE | WHERE | PRINT | EDIT]\nor\nHELP table_name\nCommands must appear in lower case.\nEND\nreturn 0\n;;\nX\t\tinsert) cat <<\"END\"\nINSERT INTO table_name \nX\t{ ( column_name, ... ) }\nVALUES ( expression, ...)\nor\t\t\nINSERT INTO table_name \nX\t{ ( column_name, ... ) }\nsubselect\nEND\nreturn 0\n;;\nX\t\tprint) cat <<\"END\"\nPRINT table_name \nis a non-standard synonym for SELECT * FROM table_name.\nEND\nreturn 0\n;;\nX\t\tselect) cat <<\"END\"\nSELECT { DISTINCT } \nX\t[ column_name {,...} | * ]\nFROM [ table_name | view_name ]\n{ WHERE where_clause }\n{ ORDER BY column_name { NUM } { ASC | DESC } {, ... }\n{ UNION select statement }\n'NUM' is a non-standard method for sorting numeric fields.\nEND\nreturn 0\n;;\nX\t\tupdate) cat <<\"END\"\nUPDATE table_name\nSET column_name = expression {, ... }\n{ WHERE where_clause }\nEND\nreturn 0\n;;\nX\t\twhere) cat <<\"END\"\nWHERE [ column_name | value ] [ =, !=, >, <, >=, <=, and, or, not, in ]\nX [ column_name | value | subselect ]\nParentheses may be used to group expressions. \nEND\nreturn 0\n;;\nX\t\tsyntax)\tsyntax commands; echo\nX\t\t\tsyntax create; echo\nX\t\t\tsyntax delete; echo\nX\t\t\tsyntax drop; echo\nX\t\t\tsyntax insert; echo\nX\t\t\tsyntax select; echo\nX\t\t\tsyntax update; echo\nX\t\t\tsyntax where; echo\nX\t\t\tsyntax print; echo\nX\t\t\tsyntax edit; echo\nX\t\t\treturn 0\nX\t\t\t;;\nX\tesac\nX\treturn 1\n}\nX\n#\f\n#**************************************************************************\n# lookup_field\n#**************************************************************************\nlookup_field(){\nX\tRESULT=\"`grep -n \\\"^$1\t\\\" $TABLE@ | sed 1q`\"\nX\tif [ ! \"$RESULT\" ] \nX\tthen \tOUTFIELD=\"$1\"\nX\t\treturn 1\nX\telse\tOUTFIELDNUM=\"`expr \"$RESULT\" : '\\([^:]*\\)'`\"\nX\t\tOUTFIELD=\"\\$$OUTFIELDNUM\" \nX\t\treturn 0\nX\tfi\n}\nX\n#\f\n#**************************************************************************\n# do_aggreg\n#**************************************************************************\ndo_aggreg(){\nX\tif \t[ \"X$1\" = 'Xsum' ]\nX\tthen\tAGGREG='total'\nX\telif \t[ \"X$1\" = 'Xavg' ]\nX\tthen \tAGGREG='(total/cnt)'\nX\telif \t[ \"X$1\" = 'Xcount' ]\nX\tthen \tAGGREG='cnt'\nX\telif \t[ \"X$1\" = 'Xmin' ]\nX\tthen \tAGGREG='min'\nX\telif \t[ \"X$1\" = 'Xmax' ]\nX\tthen \tAGGREG='max'\nX\telse\treturn 1\nX\tfi\nX\t[ \"X$2\" != \"X(\" -o \"X$4\" != \"X)\" ] && \\\nX\t\techo \"Bad aggregate syntax\" 1>&2 && syntax select && return 1\nX\tAGGFIELD=\"$3\"\nX\tshift 4\nX\tlookup_field \"$AGGFIELD\"\nX\t[ \"$?\" -ne 0 ] && echo \"Bad field name ($1)\" 1>&2 && return 1\nX\twhile [ $# -ne 0 ]\nX\tdo\t\nX\t\t[ \"X$1\" = \"Xwhere\" ] && break;\nX\t\t[ \"X$1\" = \"Xorder\" ] && break;\nX\t\t[ \"X$1\" = \"Xunion\" ] && break;\nX\t\tshift\nX\tdone\nX\nX\tOUTFIELD=`( SUBSELECT=\"Y\" ; AGGREGATE=\"Y\"; \\\nX\t select_ \"select\" \"$AGGFIELD\" \"from\" \"$TABLE\" \"$@\") | \\\nX\t awk -F\"\t\" \\\nX\t\t'NR == 1 { min = $1; max = $1 }\nX\t\t\t { cnt += 1; total += $1 }\nX\t\t$1 < min { min = $1 }\nX\t\t$1 > max { max = $1 }\nX\t\tEND\t { printf \"%s%s%s\", \"\\\"\", '$AGGREG', \"\\\"\" }'`\nX\tif [ `expr \"$RESULT\" : '[^\t]*\t\\(.*\\)'` -lt 10 ]\nX\tthen\tRESULT=\"$AGGFIELD\t10\"\nX\tfi\nX\treturn 0\n}\nX\n#\f\n#**************************************************************************\n# do_join \n#**************************************************************************\ndo_join(){\nX\tupdate_view \"$1\"\nX\tTABLE=\"$1\"\nX\tlookup_field \"$2\" \nX\t[ \"$?\" -ne 0 ] && echo \"Bad view specifcation ($1.$2)\" 1>&2 && return 1\nX\tJFIELD1=\"$OUTFIELDNUM\"\nX\tJFIELD1L1=\"`expr $JFIELD1 - 1`\"\nX\tupdate_view \"$3\"\nX\tTABLE=\"$3\"\nX\tlookup_field \"$4\" \nX\t[ \"$?\" -ne 0 ] && echo \"Bad view specifcation ($3.$4)\" 1>&2 && return 1\nX\tJFIELD2=\"$OUTFIELDNUM\"\nX\tJFIELD2L1=\"`expr $JFIELD2 - 1`\"\nX\nX\t( grep \"^$2\t\" $1@ ;\nX\t grep -v \"^$2\t\" $1@ ;\nX\t grep -v \"^$4\t\" $3@ ) > $5@\nX\tsort -t\\\t +$JFIELD2L1 $3~ > /tmp/$$\nX\tsort -t\\\t +$JFIELD1L1 $1~ | \\\nX\t\tjoin -t\\\t -j1 $JFIELD1 -j2 $JFIELD2 \\\nX\t\t\t\t\t\t- /tmp/$$ > $5~\n}\nX\n#\f\n#**************************************************************************\n# update_view\n#**************************************************************************\nupdate_view(){\nX\t[ ! -f \"$1%\" ] && return 1\nX\t( do_join `cat $1%` )\n}\nX\n#\f\n#**************************************************************************\n# where\n#**************************************************************************\nwhere(){\nX\tshift\nX\twhile [ $# -gt 0 -a \"$1\" != \"order\" -a \"$1\" != \"union\" ]\nX\tdo\nX\t\tif [ \"X$1\" = \"Xselect\" ]\nX\t\tthen\nX\t\t\tset X `( SUBSELECT=\"Y\" ;select_ \"$@\")`\nX\t\t\tif [ \"$?\" -eq 0 ]\nX\t\t\tthen \tshift\nX\t\t\telse \treturn 1\nX\t\t\tfi\nX\t\tfi\nX\t\tcase \"$1\" in\nX\t\t\tand) \tWHERE=\"$WHERE && \";;\nX\t\t\tor)\tWHERE=\"$WHERE || \";;\nX\t\t\tnot)\tWHERE=\"$WHERE !\" ;;\nX\t\t\t=)\tWHERE=\"$WHERE == \";;\nX\t\t\t'in') \tshift\nX\t\t\t\tset X `( SUBSELECT='Y';select_ \"$@\" )`\nX\t\t\t\tif [ \"$?\" -eq 0 ]\nX\t\t\t\tthen \tshift\nX\t\t\t\telse \treturn 1\nX\t\t\t\tfi\nX\t\t\t\tINWHERE=\"\"\nX\t\t\t\tCOMP=\"==\"\nX\t\t\t\tLOGIC=\"||\"\nX\t\t\t\t[ \"X$LAST\" = \"Xnot\" ] && COMP=\"=\" && LOGIC=\"&&\"\nX\t\t\t\t[ \"$#\" -eq 0 ] && set \"\\\"__()__\\\"\"\nX\t\t\t\tfor VALUE\nX\t\t\t\tdo\nX\t\t\t\t\t[ \"X$INWHERE\" != \"X\" ] && \nX\t\t\t\t\t\tINWHERE=\"$INWHERE $LOGIC\"\nX\t\t\t\t\tINWHERE=\"$INWHERE ($WHERE$COMP $VALUE) \"\nX\t\t\t\tdone\nX\t\t\t\tWHERE=\"$INWHERE\"\nX\t\t\t\tbreak;;\nX\t\t\t*)\tlookup_field \"$1\"\nX\t\t\t\tWHERE=\"$WHERE $OUTFIELD\";;\nX\t\tesac\nX\t\tLAST=\"$1\"\nX\t\tshift\nX\tdone \nX\t[ \"$WHERE\" ] && WHERE=\" ( $WHERE ) \" && return 0\nX\techo \"Missing 'where' clause\" 1>&2\nX\tsyntax where\nX\treturn 1\n}\nX\n#\f\n#**************************************************************************\n# help\n#**************************************************************************\nhelp(){\nX\tif [ ! \"$2\" ]\nX\tthen\techo \"Ambiguous syntax, try:\" 1>&2 ; syntax help\nX\telif [ \"$2\" = \"all\" ]\nX\tthen\tls *@ *% 2>/dev/null | cut -d@ -f1 | cut -d% -f1 | uniq\nX\telif [ \"$2\" = \"tables\" ] \nX\tthen\tls *@ *% 2>/dev/null | cut -d@ -f1 | cut -d% -f1 | uniq -u \nX\telif [ \"$2\" = \"views\" ] \nX\tthen\tls *% 2>/dev/null | cut -d% -f1 \nX\telif [ \"$2\" = \"commands\" ]\nX\tthen\tcat << \"END\"\n/p is print\n/g is go(execute)\n/q is quit\n/e is edit\n/i is include\n/w is write\n/r is reset(clear)\n/s is shell\n/p/g print and go\nThe number sign(#) may be used at the start of a line for comments.\nEND\nX\telse\tsyntax $2 && return\nX\t\tTABLE=\"$2\"\nX\t\tupdate_view \"$TABLE\"\nX\t\tif [ -f \"$2@\" ] \nX\t\tthen\techo \"$NL <$2>\" && cat \"$2@\"\nX\t\t\t[ -f \"${2}%\" ] &&echo $NOCR1 \"$NL View:\t$NOCR2\" && \nX\t\t\t\tset X `cat $2%` && shift &&\nX\t\t\t\techo \"$1.$2 = $3.$4\"\nX\t\t\techo \"$NL Rows:\t\"`cat $TABLE~ | wc -l`\nX\t\telse \techo \"$TABLE does not exist.\" 1>&2\nX\t\t\tsyntax help\nX\t\tfi\nX\tfi\n}\nX\n#\f\n#**************************************************************************\n# create\n#**************************************************************************\ncreate(){\nX\tshift\nX\tif [ -f \"$2@\" -o -f \"$2%\" ]\nX\tthen\techo \"Table already exists.\" 1>&2\nX\telif [ \"X$1\" = \"Xview\" -a $# -gt 2 ]\nX\tthen\tshift\nX\t\tif [ $# -ne 6 ]\nX\t\tthen \tsyntax create\nX\t\telse \t\nX\t\t\t[ \"X$2\" != \"X(\" ] && echo \"Bad syntax\" 1>&2 && \nX\t\t\t\t\t\t\tsyntax create && return\nX\t\t\tTABLE1=\"`expr $3 : '\\([^\\.]*\\)'`\"\nX\t\t\tFIELD1=\"`expr $3 : '[^\\.]*.\\(.*\\)'`\"\nX\t\t\tTABLE=\"$TABLE1\"\nX\t\t\tlookup_field \"$FIELD1\" \nX\t\t\t[ \"$?\" -ne 0 ] && echo \"Bad table or field name\" 1>&2 &&\nX\t\t\t\t\t\t\t\t\treturn\nX\t\t\t[ \"X$4\" != \"X=\" ] && echo \"Bad syntax\" 1>&2 && \nX\t\t\t\t\t\t\tsyntax create && return\nX\t\t\tTABLE2=\"`expr $5 : '\\([^\\.]*\\)'`\"\nX\t\t\tFIELD2=\"`expr $5 : '[^\\.]*.\\(.*\\)'`\"\nX\t\t\tTABLE=\"$TABLE2\"\nX\t\t\tlookup_field \"$FIELD2\" \nX\t\t\t[ \"$?\" -ne 0 ] && echo \"Bad table or field name\" 1>&2 &&\nX\t\t\t\t\t\t\t\t\treturn\nX\t\t\t[ \"X$2\" != \"X(\" ] && echo \"Bad syntax\" 1>&2 && \nX\t\t\t\t\t\t\tsyntax create && return\nX\t\t\techo \"$TABLE1 $FIELD1 $TABLE2 $FIELD2 $1\" > $1%\nX\t\t\tupdate_view \"$1\"\t\t\t\nX\t\tfi\nX\t\techo \"OK\"\nX\telif [ \"X$1\" = \"Xtable\" -a $# -ge 5 ] \nX\tthen\nX\t\t[ \"X$3\" != \"X(\" ] && echo \"Bad syntax\" 1>&2 && \nX\t\t\t\t\t\t\tsyntax create && return\nX\t\tTABLE=\"$2\"\nX\t\tshift 3\nX\t\t> $TABLE@\nX\t\t> $TABLE~\nX\t\twhile [ $# -ge 2 ]\nX\t\tdo\nX\t\t\techo \"$1\t$2\" >> $TABLE@\nX\t\t\tshift 2\nX\t\tdone\nX\t\t[ \"X$1\" != \"X)\" ] && echo \"Bad syntax\" 1>&2 && \nX\t\t\t\t\trm -f $TABLE@ && syntax create && return\nX\t\techo \"OK\"\nX\telse \nX\t\techo \"Improper syntax ($1)\" 1>&2\nX\t\tsyntax create\nX\tfi\nX\treturn\n}\nX\n#\f\n#*************************************************************************\n# drop\n#**************************************************************************\ndrop(){\nX\t[ \"$2\" != \"table\" -a \"$2\" != \"view\" ] && \nX\t\techo \"Syntax error.\" 1>&2 && syntax drop && return\nX\t[ \"$2\" = \"table\" -a -f \"$3%\" ] &&\nX\t\techo \"Can not drop, $2 is a view, not a table\" 1>&2 && return\nX\t[ \"$2\" = \"view\" -a ! -f \"$3%\" ] &&\nX\t\techo \"Can not drop, $2 is not a view\" 1>&2 && return\nX\tif [ -f \"$3@\" -o -f \"$3%\" ] \nX\tthen\trm -f $3@ $3~ $3%\nX\t\techo \"OK\"\nX\telse \techo \"No such table\" 1>&2\nX\tfi\n}\nX\n#\f\n#**************************************************************************\n# insert\n#**************************************************************************\ninsert(){\nX\tshift\nX\t[ \"X$1\" != \"Xinto\" ] && echo \"Improper syntax ($1)\" 1>&2 && \nX\t\tsyntax insert && return\nX\tshift\nX\tTABLE=\"$1\"\nX\tupdate_view \"$TABLE\" && echo \"Can not insert into a view\" 1>&2 && return\nX\t[ ! -f \"$TABLE@\" ] && echo \"Table does not exist\" 1>&2 && return\nX\tshift\nX\tATTRIB=\"`cat $TABLE@ | wc -l`\"\nX\tXASGN=\"\"\nX\tXECHO=\"echo \\\"\"\nX\tif [ $# -gt 0 -a \"X$1\" = \"X(\" ]\nX\tthen\tATTRIB2=\"0\"\nX\t\tshift\nX\t\twhile [ $# -gt 0 -a \"X$1\" != \"X)\" ]\nX\t\tdo\nX\t\t\tlookup_field \"$1\" \nX\t\t\t[ \"$?\" -ne 0 ] && echo \"Bad field name. ($1)\" 1>&2 && \nX\t\t\t\t\t\t\t\t\treturn \nX\t\t\tXASGN=\"$XASGN X$OUTFIELDNUM=\\`eval echo \\$1\\` ; shift;\"\nX\t\t\tshift\nX\t\t\tATTRIB2=`expr $ATTRIB2 + 1`\nX\t\tdone\nX\t\t[ \"X$1\" != \"X)\" ] && echo \"Syntax error ($1)\" 1>&2 && \nX\t\t\t\t\t\tsyntax insert && return\nX\t\tshift\nX\t\tPOS=\"1\"\nX\t\twhile [ \"$POS\" -le \"$ATTRIB\" ]\nX\t\tdo\nX\t\t\teval X$POS=\"\"\nX\t\t\t[ \"$POS\" != \"1\" ] && XECHO=\"$XECHO\\$TAB\"\nX\t\t\tXECHO=\"$XECHO\\$X$POS\"\nX\t\t\tPOS=`expr $POS + 1`\nX\t\tdone\nX\t\tXECHO=\"$XECHO\\\"\"\nX\t\tATTRIB=\"$ATTRIB2\"\nX\tfi\t\nX\tif [ \"X$1\" = \"Xselect\" ]\nX\tthen \teval set X \"`( SUBSELECT='Y' ; select_ \"$@\" )` \\)\"\nX\t\tshift\nX\telif [ \"X$1\" != \"Xvalues\" -o \"X$2\" != 'X(' ] \nX\t\tthen\t echo \"Improper syntax ($1)\" 1>&2 && syntax insert && \nX\t\t\t\t\t\t\t\t\treturn\nX\telse\tshift 2\nX\tfi\nX\tfor LAST do \nX\t: ; done\nX\t[ \"X$LAST\" != \"X)\" ] && \nX\t\techo \"Improper syntax\" 1>&2 && syntax insert && return\nX\tif [ \"`expr \\( $# - 1 \\) % $ATTRIB`\" -ne 0 ]\nX\tthen \techo \"Incorrect number of values.\" 1>&2\nX\telse\tROWS=\"`expr \\( $# - 1 \\) / $ATTRIB`\"\nX\t\twhile [ $# -gt 1 ]\nX\t\tdo\t\nX\t\t\tif [ \"$XASGN\" = \"\" ]\nX\t\t\tthen \t\nX\t\t\t\techo $NOCR1 \"`eval echo $1`$NOCR2\" >> $TABLE~ \nX\t\t\t\tshift\nX\t\t\t\twhile [ \"`expr \\( $# - 1 \\) % $ATTRIB`\" -ne 0 ]\nX\t\t\t\tdo\nX\t\t\t\t\techo $NOCR1 \"$TAB`eval echo $1`$NOCR2\"\\\nX\t\t\t\t\t\t\t \t>> $TABLE~\nX\t\t\t\t\tshift\nX\t\t\t\tdone\nX\t\t\t\techo \"\" >> $TABLE~\nX\t\t\telse\teval $XASGN\nX\t\t\t\teval $XECHO >> $TABLE~\nX\t\t\tfi\nX\t\tdone\nX\t\techo \"($ROWS rows)\"\t\t\t\nX\tfi\n}\nX\n#\f\n#*************************************************************************\n# delete\n#**************************************************************************\ndelete(){\nX\tTABLE=\"$3\"\nX\t[ \"X$2\" != \"Xfrom\" ] && echo \"Improper syntax ($2)\" 1>&2 && \nX\t\tsyntax delete && return\nX\tupdate_view \"$TABLE\" && echo \"You can not delete from a view.\" 1>&2 &&\nX\t\t\t\t\t\t\t\t\treturn \nX\t[ ! -f \"$TABLE@\" ] && echo \"$TABLE does not exist.\" 1>&2 && return\nX\tWHERE=\"\"\nX\tif [ \"X$4\" = \"Xwhere\" ]\nX\tthen \tshift 3\nX\t\twhere \"$@\" && \nX\t\tawk -F\"\t\" \"! $WHERE { cnt += 1 ; print } \nX\t\t\tEND { printf \\\"( %1d rows)\\\\n\\\", (NR - cnt) \\\nX\t\t\t>\\\"/tmp/$$row\\\" }\" $TABLE~ > /tmp/$$ && \nX\t\t\tmv /tmp/$$ $TABLE~ && cat /tmp/$$row\nX\telse\techo '('`cat $TABLE~ | wc -l`' rows)' \nX\t\t> $TABLE~\nX\tfi\n}\nX\n#\f\n#*************************************************************************\n# update\n#**************************************************************************\nupdate(){\nX\tTABLE=\"$2\"\nX\tupdate_view \"$TABLE\" && echo \"Can not update a view.\" 1>&2 && return\nX\t[ ! -f \"$TABLE@\" ] && echo \"$TABLE does not exit.\" 1>&2 && return\nX\t[ \"X$3\" != \"Xset\" ] && echo \"Improper syntax.\" 1>&2 && syntax update && \nX\t\t\t\t\t\t\t\t\treturn\nX\tshift 3\nX\tASSIGN=\"\"\nX\twhile [ $# -gt 0 -a \"X$1\" != \"Xwhere\" ]\nX\tdo\nX\t\tlookup_field \"$1\" && [ \"X$2\" = \"X=\" ] && ASSIGN=\"$ASSIGN ; \"\nX\t\tASSIGN=\"$ASSIGN $OUTFIELD\"\nX\t\tshift\nX\tdone\nX\tWHERE=\"\"\nX\tif [ \"X$1\" = \"Xwhere\" ] \nX\tthen \twhere \"$@\" || return \nX\tfi\nX\tawk -F\"\t\" \"BEGIN { OFS = \\\"\t\\\" }\nX\t\t$WHERE \t{ $ASSIGN; cnt += 1 }\nX\t\t\t{ print } \nX\t\tEND \t{ printf \\\"( %1d rows)\\\\n\\\", cnt >\\\"/tmp/$$row\\\" }\" \\\nX\t\t$TABLE~ > /tmp/$$ && \nX\t\t\tmv /tmp/$$ $TABLE~ && cat /tmp/$$row\n}\nX\n#\f\n#**************************************************************************\n# select_\n#**************************************************************************\nselect_(){\n[ \"$DEBUG\" = \"Y\" ] && set -x\nX\tUNION=\"Y\"\nX\twhile [ \"$UNION\" != \"\" ]\nX\tdo\nX\t\tINAGG=\"\"\nX\t\tFROM=\"\"\nX\t\tUNION=\"\"\nX\t\tTABLE=\"\"\nX\t\tfor ATABLE\nX\t\tdo\nX\t\t [ \"X$ATABLE\" = \"Xwhere\" ] && break\nX\t\t [ \"X$ATABLE\" = \"Xorder\" ] && break\nX\t\t [ \"X$ATABLE\" = \"Xunion\" ] && break\nX\t\t [ \"X$ATABLE\" = \"Xfrom\" ] && FROM=\"Y\" && continue\nX\t\t if [ \"$FROM\" ]\nX\t\t then\nX\t\t\t[ ! -f \"$ATABLE@\" ] && \\\nX\t\t\techo \"$ATABLE does not exist.\" 1>&2 && return 1\nX\t\t\tif [ ! \"$TABLE\" ]\nX\t\t\tthen \tTABLE=\"$ATABLE\"\nX\t\t\telse\tJTABLE=\"$TABLE\"\nX\t\t\t\tPREV=\"\"\nX\t\t\t\tPPREV=\"\"\nX\t\t\t\tFOUND=\"\"\nX\t\t\t\tfor GETJ\nX\t\t\t\tdo\nX\t\t\t\t if [ \"$PREV\" = \"=\" ]\nX\t\t\t\t then\nX\t\t\t\t\tTABLE=\"$JTABLE\"\nX\t\t\t\t\tlookup_field \"$PPREV\" &&\nX\t\t\t\t\tTABLE=\"$ATABLE\" &&\nX\t\t\t\t\tlookup_field \"$GETJ\" &&\nX\t\t\t\t\tFOUND=\"Y1\" &&\nX\t\t\t\t\tbreak\nX\t\t\t\t\tTABLE=\"$ATABLE\"\nX\t\t\t\t\tlookup_field \"$PPREV\" &&\nX\t\t\t\t\tTABLE=\"$JTABLE\" &&\nX\t\t\t\t\tlookup_field \"$GETJ\" &&\nX\t\t\t\t\tFOUND=\"Y2\" &&\nX\t\t\t\t\tbreak\nX\t\t\t\t fi\nX\t\t\t\t PPREV=\"$PREV\"\nX\t\t\t\t PREV=\"$GETJ\"\nX\t\t\t\tdone\nX\t\t\t\t[ ! \"$FOUND\" ] &&\nX\t\t\t\techo \"Join not found, \\c\" &&\nX\t\t\t\techo \"try reordering tables.\" 1>&2 && return 1\nX\t\t\t\tif [ \"$FOUND\" = \"Y1\" ]\nX\t\t\t\tthen\nX\techo \"$JTABLE\t$PPREV\t$ATABLE\t$GETJ\t/tmp/$$join2\" >/tmp/$$join2%\nX\t\t\t\telse\nX\techo \"$ATABLE\t$PPREV\t$JTABLE\t$GETJ\t/tmp/$$join2\" >/tmp/$$join2%\nX\t\t\t\tfi\nX\t\t\t\tupdate_view /tmp/$$join2\nX\t\t\t\tmv /tmp/$$join2~ /tmp/$$join~\nX\t\t\t\tmv /tmp/$$join2@ /tmp/$$join@\nX\t\t\t\texpr \"$RESULT\" : '[^:]:*\\(.*\\)' >>/tmp/$$join@\nX\t\t\t\tcut -d\\\t -f1 /tmp/$$join~ | \\\nX\t\t\t\t\tpaste /tmp/$$join~ - >/tmp/$$\nX\t\t\t\tmv /tmp/$$ /tmp/$$join~\nX\t\t\t\tTABLE=\"/tmp/$$join\"\nX\t\t\tfi\nX\t\t fi\nX\t\tdone\nX\t\t[ ! \"$FROM\" ] && echo \"Syntax error.\" 1>&2 && syntax select &&\nX\t\t\t\t\t\t\t\treturn 1\nX\t\tupdate_view \"$TABLE\"\nX \t\tshift\nX\t\tDISTINCT=\"\"\nX\t\t[ \"X$1\" = \"Xdistinct\" ] && DISTINCT=\"Y\" && shift\nX\t\tFIELDS=\"\"\nX\t\tPRINTF=\"\"\nX\t\twhile [ \"X$1\" != \"Xfrom\" ]\nX\t\tdo\nX\t\t\tif [ \"X$1\" = 'X*' ]\nX\t\t\tthen\tshift\nX\t\t\t\tset X `cat $TABLE@ | cut -d\\\t -f1` \"$@\" \nX\t\t\t\tshift\nX\t\t\telse\tlookup_field \"$1\"\nX\t\t\t\tif [ \"$?\" -ne 0 ]\nX\t\t\t\tthen \tdo_aggreg \"$@\"\nX\t\t\t\t\tif [ \"$?\" -eq 0 ]\nX\t\t\t\t\tthen\tINAGG=\"Y\"\nX\t\t\t\t\t\tshift 3\nX\t\t\t\t\telse \nX\t\t\t\t\t echo \"Bad field name ($1)\" 1>&2\nX\t\t\t\t\t return 1\nX\t\t\t\t\tfi\nX\t\t\t\tfi\nX\t\t\t\t[ \"$FIELDS\" ] && FIELDS=\"$FIELDS,\"\nX\t\t\t\tFIELDS=\"$FIELDS $OUTFIELD\"\nX\t\t\t\tif [ \"$SUBSELECT\" = \"\" ]\nX\t\t\t\tthen \t[ ! \"$PRINTF\" ] && PRINTF=\"|\"\nX\t\t\t\t\tWIDTH=`expr \"$RESULT\" : \\\nX\t\t\t\t\t\t'[^\t]*\t\\(.*\\)'`\nX\t\t\t\t\tPRINTF=\"$PRINTF%-$WIDTH.${WIDTH}s|\"\nX\t\t\t\telse\tif [ ! \"$AGGREGATE\" ]\nX\t\t\t\t\tthen\tPRINTF=\"$PRINTF\\\\\\\"%s\\\\\\\" \"\nX\t\t\t\t\telse\tPRINTF=\"$PRINTF%s\\n\"\nX\t\t\t\t\tfi\nX\t\t\t\tfi\nX\t\t\t\tshift\nX\t\t\tfi\nX\t\tdone\nX\t\tshift 2\nX\t\tWHERE=\"\"\nX\t\tWHERE_USED=\"\"\nX\t\tSORT=\"\"\nX\t\twhile [ $# -ne 0 ]\nX\t\tdo\t\nX\t\t\tif [ \"X$1\" = \"Xwhere\" -a \"$WHERE_USED\" = \"\" ]\nX\t\t\tthen\nX\t\t\t\twhere \"$@\"\nX\t\t\t\t[ \"$?\" -ne 0 ] && return 1 \nX\t\t\t\t[ \"$QUIET\" = \"\" -a \"$SUBSELECT\" = \"\" ] && \nX\t\t\t\t\t\tWHERE=\"$WHERE || NR == 1\"\nX\t\t\t\tWHERE_USED=\"Y\"\nX\t\t\t\tshift\nX\t\t\telif [ \"X$1\" = \"Xorder\" ]\nX\t\t\tthen \t[ \"X$2\" != \"Xby\" ] && \nX\t\t\t\t\techo \"Syntax error ($2)\" 1>&2 && \nX\t\t\t\t\tsyntax select && return 1\nX\t\t\t\tshift 2\nX\t\t\t\twhile [ $# -gt 0 -a \"$1\" != \"union\" ]\nX\t\t\t\tdo\nX\t\t\t\t\tif [ \t\"X$1\" != \"Xasc\" -a \\\nX\t\t\t\t\t\t\"X$1\" != \"Xdesc\" -a \\\nX\t\t\t\t\t\t\"X$1\" != \"Xnum\" ] \nX\t\t\t\t\tthen\tlookup_field \"$1\" \nX\t\t\t\t\t\t[ \"$?\" -ne 0 ] &&\nX\t\t\t\techo \"Bad field name ($1)\" 1>&2 && return 1 \nX\t\t\t\t\t\t[ \"$SORT\" = \"\" ] && \nX\t\t\t\t\t\t\tSORT=\"sort -t\\\"\t\\\" \"\nX\t\t\t\t\t\tSORTL=\"`expr $OUTFIELDNUM - 1`\"\nX\t\t\t\t\t\tSORT=\"$SORT +$SORTL\"\nX\t\t\t\t\t\t[ \"X$2\" = \"Xnum\" ] && \nX\t\t\t\t\t\t\tSORT=\"${SORT}n\"\nX\t\t\t\t\t\t[ \"X$2\" = \"Xdesc\" ] && \nX\t\t\t\t\t\t\tSORT=\"${SORT}r\"\nX\t\t\t\t\t\t[ \"X$3\" = \"Xdesc\" ] && \nX\t\t\t\t\t\t\tSORT=\"${SORT}r\"\nX\t\t\t\t\t\tSORT=\"$SORT -$OUTFIELDNUM\"\nX\t\t\t\t\tfi\nX\t\t\t\t\tshift\nX\t\t\t\tdone\nX\t\t\telif [ \"X$1\" = \"Xunion\" ]\nX\t\t\tthen\tshift\nX\t\t\t\tUNION=\"Y\"\nX\t\t\t\tWHERE_USED=\"\"\nX\t\t\t\tbreak\nX\t\t\telse\tshift\nX\t\t\tfi\nX\t\tdone\nX\t\t[ \"$INAGG\" ] && WHERE=\"NR == 1\"\nX\nX\t\tif [ \"$DISTINCT\" != \"\" ] \nX\t\tthen\tif [ \"$SORT\" = \"\" ]\nX\t\t\tthen\tDIST=\"sort | uniq | tee /tmp/$$row\"\nX\t\t\telse\tDIST=\"uniq | tee /tmp/$$row\"\nX\t\t\tfi\nX\t\telse\tDIST=\"cat\"\nX\t\tfi\t\t\t\t\t\nX\nX\t\tTABLEFILE=\"$TABLE~\"\nX\t\t[ \"$SORT\" != \"\" ] && cat $TABLE~ | eval \"$SORT\" > /tmp/$$ &&\nX\t\t\t\t\t\t\tTABLEFILE=\"/tmp/$$\"\nX\nX\t\tif [ \"$SUBSELECT\" ]\nX\t\tthen\tawk -F\"\t\" \"$WHERE {printf \\\"$PRINTF\\\", $FIELDS }\" \\\nX\t\t\t\t\t\t\t$TABLEFILE |eval \"$DIST\"\nX\t\telse if [ ! \"$QUIET\" -o \"$INAGG\" = \"Y\" ]\nX\t\t\tthen \nX\t\t\t( set X `cut -d\\\t -f1 $TABLE@` ; shift \nX\t\t\t echo $NOCR1 \"-$1-$NOCR2\" ; shift \nX\t\t \t for HEADING\nX\t\t\t do \nX\t\t\t\techo $NOCR1 \"$TAB-$HEADING-$NOCR2\" \nX\t \t\t done ; echo \"\" )\nX\t\t\tfi |\nX\t\t\tawk -F\"\t\" \\\nX\t\t\t\"$WHERE { cnt += 1 ; printf \\\"$PRINTF\\\\n\\\", $FIELDS }\nX\t\t\tEND\t{ printf \\\"( %1d rows)\\\\n\\\", (cnt - 1) \\\nX\t\t\t>\\\"/tmp/$$row\\\" }\" - $TABLEFILE | eval \"$DIST\" \\\nX\t\t\t\t&& if [ \"$DISTINCT\" = \"\" ]\nX\t\t\t\t then\tcat /tmp/$$row\nX\t\t\t\t else if [ \"$QUIET\" = \"\" ]\nX\t\t\t\t\tthen\tX=`expr \\`cat /tmp/$$row|wc -l\\` - 1`\nX\t\t\t\t\telse\tX=`expr \\`cat /tmp/$$row|wc -l\\``\nX\t\t\t\t\tfi\nX\t\t\t\t\techo '('$X' rows)' \nX\t\t\t\tfi\nX\t\tfi\nX\tdone\nX\treturn 0\n}\t\nX\n#\f\n#**************************************************************************\n# main\n#**************************************************************************\nwhile :\ndo\nX\twhile :\nX\tdo\nX\t\techo $NOCR1 \"* $NOCR2\"\nX\t\tread LINE || exit \nX\t\tSQLPART=\"`expr \"$LINE\" : '\\(..*\\)/. *$'`\"\nX\t\tif [ \"$SQLPART\" != \"\" ]\nX\t\tthen\nX\t\t\t[ \"$NEW\" = \"Y\" ] && _CMD=\"\"\nX\t\t\tif [ \"`expr \"$LINE\" : '.*/p/g *$'`\" -ne 0 ]\nX\t\t\tthen\nX\t\t\t\t_CMD=\"$_CMD\"`expr \"$LINE\" : '\\(.*\\)/p/g *$'`\"$NL\"\nX\t\t\t\tLINE=\"/p/g\"\nX\t\t\t\tNEW=\"\"\nX\t\t\telse\nX\t\t\t\t_CMD=\"$_CMD\"\"$SQLPART\"\"$NL\"\nX\t\t\t\tLINE=\"`expr \"$LINE\" : '.*\\(/.\\) *$'`\"\nX\t\t\t\tNEW=\"\"\nX\t\t\tfi\nX\t\tfi\nX \t\tcase \"$LINE\" in \nX\t\t\t/p|p) echo \"$_CMD\";;\nX\t\t\t/g|g) break;;\nX\t\t\t/p/g|pg) echo \"$_CMD\" ; break ;;\nX\t\t\t/r|r) echo \"reset\" ; _CMD=\"\";;\nX\t\t\t/s|s) umask $UMASK ; $SHELL ; umask 0000;;\nX\t\t\t/e|e) umask $UMASK ; echo \"$_CMD\" > /tmp/$$\nX\t\t\t\t$EDITOR /tmp/$$; _CMD=\"`cat /tmp/$$`\"\nX\t\t\t\tumask 0000;;\nX\t\t\t/i|i) echo $NOCR1 \"Enter include file: $NOCR2\" \nX\t\t\t\tread LINE \nX\t\t\t\t[ -f \"$LINE\" ] && _CMD=\"$_CMD`cat $LINE`$NL\" &&\nX\t\t\t\techo \"$LINE included\";;\nX\t\t\t/w|w) echo $NOCR1 \"Enter output file: $NOCR2\" \nX\t\t\t\tread LINE \nX\t\t\t\t[ \"$LINE\" ] && umask $UMASK && \nX\t\t\t\techo \"$_CMD\" > \"$LINE\" && umask 0000 && \nX\t\t\t\techo \"$LINE written\";;\nX\t\t\t/q|q) exit 0;; \nX\t\t\t\\#*) \t[ \"$NEW\" = \"Y\" ] && _CMD=\"\" ;;\nX\t\t\t*) \t[ \"$NEW\" = \"Y\" ] && _CMD=\"\"\nX\t\t\t \t_CMD=\"$_CMD$LINE$NL\";;\nX\t\tesac\nX\t\tNEW=\"\"\nX\tdone\nX\nX\tCMD=`echo \"$_CMD\" | sed \\\nX\t\t-e \"s/'/\\\"/g\" \\\nX\t\t-e 's/\\\"\\([^\\\"]*\\)\\\"/\\\"\\\\\\\"\\1\\\\\\\"\\\"/g' \\\nX\t\t-e 's/\\([<>!=][<>!=]*\\)/ \\1 /g' \\\nX\t\t-e 's/</\\\\\\</g' \\\nX\t\t-e 's/>/\\\\\\>/g' \\\nX\t\t-e 's/\\*/\\\\\\*/g' \\\nX\t\t-e 's/(/ \\\\\\( /g' \\\nX\t\t-e 's/)/ \\\\\\) /g'`\nX\t[ ! \"$CMD\" ] && continue\nX\tIFS=\"$_IFS,\"\nX\teval set X $CMD\nX\tshift\nX\tIFS=\"$_IFS\"\nX\tNEW=\"Y\"\nX\tcase $1 in\nX\t\tselect) \tselect_ \"$@\";; \nX\t\tcreate) \tcreate \"$@\";;\nX\t\tdelete) \tdelete \"$@\";;\nX\t\tdrop) \t\tdrop \"$@\";;\nX\t\tinsert) \tinsert \"$@\";;\nX\t\tupdate) \tupdate \"$@\";;\nX\t\tedit)\t\t[ \"$2\" ] && $EDITOR $2@;;\nX\t\thelp)\t\thelp \"$@\";;\nX\t\tprint)\t\tselect_ \"select\" '*' \"from\" \"$2\";;\nX\t\t*) \t\techo \"Missing or unrecognized command.\" 1>&2 ;;\nX\tesac\ndone\nSHAR_EOF\n $shar_touch -am 0709174495 'shql' &&\n chmod 0755 'shql' ||\n echo 'restore of shql failed'\n shar_count=\"`wc -c < 'shql'`\"\n test 20727 -eq \"$shar_count\" ||\n echo \"shql: original size 20727, current size $shar_count\"\nfi\nexit 0", "msg_date": "Mon, 7 Jun 1999 19:03:11 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] PostgreSQL History(Parody)" }, { "msg_contents": "Hi!\n\nOn Mon, 7 Jun 1999, Kaare Rasmussen wrote:\n> Don't laugh. There was once a database based on Unix standard tools. I\n> read about it 10 or 15 years ago. I believe it was called rdb. Maybe it\n> still exists?\n\n Yet Another One is called NOSQL (no, it is not using SQL, hence the name :)\nand you may find it on freshmeat. I tried it once...\n\nOleg.\n---- \n Oleg Broytmann http://members.xoom.com/phd2/ [email protected]\n Programmers don't die, they just GOSUB without RETURN.\n\n", "msg_date": "Tue, 8 Jun 1999 12:02:13 +0400 (MSD)", "msg_from": "Oleg Broytmann <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] PostgreSQL History(Parody)" } ]
[ { "msg_contents": "hi all,\n\nI want to build subsubtotals of a column (rechnr, for example). this\nfield should be incremented, whenever the field \"baust\" changes.\nI'm able to increment a field by a function:\n\ncreate function add_intsum(varchar,int8) returns int8\n as' update sum_table set intsum=((intsum )+ ($2))where key1 = $1;\n select max(intsum) from sum_table where key1 = $1;'\nlanguage 'sql';\n\nsum_table ist delared as: \ncreate table sum_table(key1 varchar,moneysum money,realsum real,intsum\nint8);\ninsert into sum_table values('baust','0',0,0);\ninsert into sum_table values('contanz', '0',0,0);\ninsert into sum_table values('rechnr', '0',0,0);\n\nnow \"select add_intsum('rechnr',1);\" will increment \"rechnr\" in\n\"sum_table\" and return this field. this works - but there are 2 problems:\n\na) its not very fast\nb) I can't do a conditional-increment, depending on the value \n\tof another field\n\nso I've done the following:\n\nint4 rechnr=0;\nint4 baust=0;\n\nint4 add_rechnr(int4 val)\n{rechnr=rechnr+val;return rechnr;}\n\nint4 add_baust(int4 val)\n{baustelle=baust+val;return baust;}\n\nint4 init_rechnr(int4 val)\n{rechnr=val;return rechnr;}\n\nint4 init_baust(int4 val)\n{baust=val;return baust;}\n\n\nthis works and I'm able to include conditions into the c-code. But what I\nnot fully understand is the followinf behaviour:\n\nCREATE FUNCTION add_rechnr(int4) RETURNS int4\n AS '/usr/local/pgsql/lib/modules/funcs.so' LANGUAGE 'c';\n\nwhenever I add a value to rechnr, the correct result is returned. but if I\ncopy a new file \"funcs.so\" into the direcotry while the backend is\nrunning and a connection is established, I'll get this error:\n\npqReadData() -- backend closed the channel unexpectedly.\n.....\n\nSo I suppose, the backend will load the lib whenever a lib-function is\ninvoced. But if so, why does he remember the last value of the variables\ndefined in this lib?\n\nAny hints ?\n\nbest regards\n\nwicki ;*)\n\n\n\n", "msg_date": "Sat, 5 Jun 1999 07:01:11 +0000 (GMT)", "msg_from": "\"Victoria W.\" <[email protected]>", "msg_from_op": true, "msg_subject": "livetime of a variable defined in a c-procedure (fwd)" }, { "msg_contents": "\"Victoria W.\" <[email protected]> writes:\n> CREATE FUNCTION add_rechnr(int4) RETURNS int4\n> AS '/usr/local/pgsql/lib/modules/funcs.so' LANGUAGE 'c';\n\n> whenever I add a value to rechnr, the correct result is returned. but if I\n> copy a new file \"funcs.so\" into the direcotry while the backend is\n> running and a connection is established, I'll get this error:\n\n> pqReadData() -- backend closed the channel unexpectedly.\n\nOverwriting a shared library that's in active use is a no-no on many\nflavors of Unix. (The one I use won't even let you do it --- you get\na \"text file busy\" error if you try to delete or modify an open SL.)\n\nI don't think there is an \"unload shared library\" function in Postgres\n(it'd be hard or impossible to implement on some Unixes, anyway). So\nthe only way to stop referencing a shared library once it's been opened\nis to terminate the backend process.\n\nIn short, if you've designed a solution that requires you to constantly\nmodify the shared library, you'd better look for another solution ---\nor accept a lot of backend restarts.\n\n\t\t\tregards, tom lane\n", "msg_date": "Sat, 05 Jun 1999 14:33:03 -0400", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] livetime of a variable defined in a c-procedure (fwd) " } ]
[ { "msg_contents": "Hiroshi Inoue wrote:\n> \n> I have downloaded snapshot(99/06/04) and retried.\n> But the result didn't change.\n\nI run Hiroshi' test locally and got the same ERROR.\nNow I see that my changes in xact.c can't fix\nXmaxRecent vacuum problem, though they avoid ability\nto get the same tuple twice. \n\nI need in additional time... Jun 9th...\n\nVadim\n", "msg_date": "Sat, 05 Jun 1999 22:39:26 +0800", "msg_from": "Vadim Mikheev <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] Open 6.5 items" } ]
[ { "msg_contents": "I have been puzzling out the coding in _bt_binsrch() in\nbackend/access/nbtree/nbtsearch.c, with an eye to speeding it up for\nthe many-equal-keys case. I have finally worked out exactly what it's\ndoing, to wit:\n\n * On a leaf page, we always return the first key >= scan key\n * (which could be the last slot + 1).\n *\n * On a non-leaf page, there are special cases:\n *\n * For an insertion (srchtype != BT_DESCENT and natts == keysz)\n * always return first key >= scan key (which could be off the end).\n *\n * For a standard search (srchtype == BT_DESCENT and natts == keysz)\n * return the first equal key if one exists, else the last lesser key\n * if one exists, else the first slot on the page.\n *\n * For a partial-match search (srchtype == BT_DESCENT and natts < keysz)\n * return the last lesser key if one exists, else the first slot.\n\nThis strikes me as a tad bizarre --- in particular, the discrepancy\nbetween treatment of equal keys in the normal and partial search cases.\n\nI think I understand why the partial-match code works that way: there\ncould be matching keys in the sub-page belonging to the last lesser key.\nFor example, if our scan target is (x = 2) and we have internal keys\n\t(x = 1, y = 2)\n\t(x = 2, y = 1)\nthen we need to look at the first key's subpages, where we might find\nmatching keys like (x = 2, y = 0).\n\nThe full-width case appears to assume that that can't happen: if we\nhave a given key in an upper page, there can be *no* equal keys in\nsubpages to its left. That's a rather strong assumption about how\npage splitting is done; is it correct?\n\nEven more to the point, *should* it be correct? If we always returned\nthe last lesser key, then the code would work for any correctly\nsequenced b-tree, but this code looks like it works only if splits occur\nonly at the leftmost of a group of equal keys. If there are a lot of\nequal keys, that could result in a badly unbalanced tree, no? Maybe\nthat's the real reason why performance seems to be so bad for many\nequal keys... maybe the split algorithm needs to be relaxed?\n\n\t\t\tregards, tom lane\n", "msg_date": "Sat, 05 Jun 1999 20:40:50 -0400", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": true, "msg_subject": "Bizarre coding in _bt_binsrch" }, { "msg_contents": "Tom Lane wrote:\n> \n> The full-width case appears to assume that that can't happen: if we\n> have a given key in an upper page, there can be *no* equal keys in\n> subpages to its left. That's a rather strong assumption about how\n> page splitting is done; is it correct?\n> \n> Even more to the point, *should* it be correct? If we always returned\n> the last lesser key, then the code would work for any correctly\n> sequenced b-tree, but this code looks like it works only if splits occur\n> only at the leftmost of a group of equal keys. If there are a lot of\n> equal keys, that could result in a badly unbalanced tree, no? Maybe\n> that's the real reason why performance seems to be so bad for many\n> equal keys... maybe the split algorithm needs to be relaxed?\n\nOur btree-s use Lehman-Yao algorithm which works in assumption\nthat there is no duplicates at all. It's just reminding.\nIt was ~ 2 years ago when I changed duplicates handling\nto fix some rare bugs (this is why you see BTP_CHAIN stuff\nthere) and now I don't remember many things and so I can't\ncomment. But after I changed btree-s I learned how Oracle\nhandles duplicates problem - it just uses heap tuple id\nas (last) part of index key! So simple! Unfortunately,\nI had not time to re-implement btree-s in this way.\nBut this would:\n\n1. get rid of duplicates problem;\n2. simplify code (BTP_CHAIN stuff would be removed);\n3. order index tuples in such way that in scan heap pages \n would be read sequentially (from up of file to down);\n4. speed up finding index tuple which corresponds to heap one\n (good for index cleaning up).\n\nThe main problem is just programistic: you will have to add\nheap tid to the end of index tuples on internal index pages,\nbut on leaf pages heap tid is in the begin of index tuples\n(inside of btitem struct).\n\nSo, if you're going to change btree, please consider ability\nto implement above.\n\nVadim\n", "msg_date": "Sun, 06 Jun 1999 21:32:36 +0800", "msg_from": "Vadim Mikheev <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Bizarre coding in _bt_binsrch" }, { "msg_contents": "\nTom, I assume you have dealt with this, right?\n\n> I have been puzzling out the coding in _bt_binsrch() in\n> backend/access/nbtree/nbtsearch.c, with an eye to speeding it up for\n> the many-equal-keys case. I have finally worked out exactly what it's\n> doing, to wit:\n> \n> * On a leaf page, we always return the first key >= scan key\n> * (which could be the last slot + 1).\n> *\n> * On a non-leaf page, there are special cases:\n> *\n> * For an insertion (srchtype != BT_DESCENT and natts == keysz)\n> * always return first key >= scan key (which could be off the end).\n> *\n> * For a standard search (srchtype == BT_DESCENT and natts == keysz)\n> * return the first equal key if one exists, else the last lesser key\n> * if one exists, else the first slot on the page.\n> *\n> * For a partial-match search (srchtype == BT_DESCENT and natts < keysz)\n> * return the last lesser key if one exists, else the first slot.\n> \n> This strikes me as a tad bizarre --- in particular, the discrepancy\n> between treatment of equal keys in the normal and partial search cases.\n> \n> I think I understand why the partial-match code works that way: there\n> could be matching keys in the sub-page belonging to the last lesser key.\n> For example, if our scan target is (x = 2) and we have internal keys\n> \t(x = 1, y = 2)\n> \t(x = 2, y = 1)\n> then we need to look at the first key's subpages, where we might find\n> matching keys like (x = 2, y = 0).\n> \n> The full-width case appears to assume that that can't happen: if we\n> have a given key in an upper page, there can be *no* equal keys in\n> subpages to its left. That's a rather strong assumption about how\n> page splitting is done; is it correct?\n> \n> Even more to the point, *should* it be correct? If we always returned\n> the last lesser key, then the code would work for any correctly\n> sequenced b-tree, but this code looks like it works only if splits occur\n> only at the leftmost of a group of equal keys. If there are a lot of\n> equal keys, that could result in a badly unbalanced tree, no? Maybe\n> that's the real reason why performance seems to be so bad for many\n> equal keys... maybe the split algorithm needs to be relaxed?\n> \n> \t\t\tregards, tom lane\n> \n> \n\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Mon, 29 Nov 1999 17:24:37 -0500 (EST)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Bizarre coding in _bt_binsrch" }, { "msg_contents": "Bruce Momjian <[email protected]> writes:\n> Tom, I assume you have dealt with this, right?\n\n>> I have been puzzling out the coding in _bt_binsrch() in\n>> backend/access/nbtree/nbtsearch.c, with an eye to speeding it up for\n>> the many-equal-keys case.\n\nI tweaked the code to go faster in the equal-keys case, but Vadim later\npointed out that what we *really* should do is force the algorithms to\nnever consider two index keys equal (eg, by including the heap tuple id\nas the last part of the comparison key). See his pgsql-hackers message\ndated 06 Jun 1999 21:32:36 +0800. Getting the full benefit would\nrequire ripping out the BTP_CHAIN logic and doing some other major\nsurgery, so I don't feel like I know the btree code well enough to\ntackle it. It should be on the TODO list though:\n\n* Include heap CTID in btree index keys, remove equal-key cruft from btree\n\n\t\t\tregards, tom lane\n", "msg_date": "Mon, 29 Nov 1999 21:07:12 -0500", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] Bizarre coding in _bt_binsrch " }, { "msg_contents": "> Bruce Momjian <[email protected]> writes:\n> > Tom, I assume you have dealt with this, right?\n> \n> >> I have been puzzling out the coding in _bt_binsrch() in\n> >> backend/access/nbtree/nbtsearch.c, with an eye to speeding it up for\n> >> the many-equal-keys case.\n> \n> I tweaked the code to go faster in the equal-keys case, but Vadim later\n> pointed out that what we *really* should do is force the algorithms to\n> never consider two index keys equal (eg, by including the heap tuple id\n> as the last part of the comparison key). See his pgsql-hackers message\n> dated 06 Jun 1999 21:32:36 +0800. Getting the full benefit would\n> require ripping out the BTP_CHAIN logic and doing some other major\n> surgery, so I don't feel like I know the btree code well enough to\n> tackle it. It should be on the TODO list though:\n> \n> * Include heap CTID in btree index keys, remove equal-key cruft from btree\n\nThanks. That's what I needed.\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Mon, 29 Nov 1999 21:13:23 -0500 (EST)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Bizarre coding in _bt_binsrch" } ]
[ { "msg_contents": "Hello\n\nhave somebody a De-Compiler for C++ or PASCAL\nPLEASE give it to me \n\n\n\n\nMr. Boom from Germany\n", "msg_date": "Sun, 06 Jun 1999 09:59:08 +0200", "msg_from": "Axel Thomas <[email protected]>", "msg_from_op": true, "msg_subject": "!!!!I NEED A DE-COMPILER!!!!" } ]
[ { "msg_contents": "Hello!\n\nPostgreSQL 6.4.2.\n\nran=> create table t (some_number int4, some_string text);\nCREATE\nran=> insert into t values(-1,'a');\nINSERT 20362 1\nran=> insert into t values(1,'d');\nINSERT 20363 1\nran=> create view v as select * from t where some_number>=0;\nCREATE\nran=> insert into v values (17,'q');\nINSERT 20375 1\nran=> select * from t;\nsome_number|some_string\n-----------+-----------\n -1|a \n 1|d \n(2 rows)\n\n What is 20375? Is it real OID of dummy row?\n\nOleg.\n---- \n Oleg Broytmann http://members.xoom.com/phd2/ [email protected]\n Programmers don't die, they just GOSUB without RETURN.\n\n", "msg_date": "Sun, 6 Jun 1999 18:08:22 +0400 (MSD)", "msg_from": "Oleg Broytmann <[email protected]>", "msg_from_op": true, "msg_subject": "INSERT into VIEW" }, { "msg_contents": ">\n> Hello!\n>\n> PostgreSQL 6.4.2.\n>\n> ran=> create table t (some_number int4, some_string text);\n> CREATE\n> ran=> insert into t values(-1,'a');\n> INSERT 20362 1\n> ran=> insert into t values(1,'d');\n> INSERT 20363 1\n> ran=> create view v as select * from t where some_number>=0;\n> CREATE\n> ran=> insert into v values (17,'q');\n> INSERT 20375 1\n> ran=> select * from t;\n> some_number|some_string\n> -----------+-----------\n> -1|a\n> 1|d\n> (2 rows)\n>\n> What is 20375? Is it real OID of dummy row?\n\n It's the OID of the real row that got stored into relation v.\n\n Do a\n\n DROP RULE \"_RETv\";\n SELECT * FROM v;\n\n and you'll get back your (17,'q') row.\n\n Maybe you should read now some chapters in the programmers\n manual about the implementation of views, the rule system\n etc.\n\n\nJan\n\n--\n\n#======================================================================#\n# It's easier to get forgiveness for being wrong than for being right. #\n# Let's break this rule - forgive me. #\n#========================================= [email protected] (Jan Wieck) #\n\n\n", "msg_date": "Sun, 6 Jun 1999 21:31:23 +0200 (MET DST)", "msg_from": "[email protected] (Jan Wieck)", "msg_from_op": false, "msg_subject": "Re: [HACKERS] INSERT into VIEW" } ]
[ { "msg_contents": "I've been following this thread via Usenet and wanted to weigh in as a\nlarge application developer using Postgresql on Linux. I want to thank\neveryone for their wonderful work on getting the database to where it is\ntoday. I haven't seen the following discussed at length anywhere and\nhopefully it's not because I've overlooked some key functionality that\nalready does this...\n\nWith Postgresql, the biggest handicap I run into in day-to-day development\n(as compared to the numerous other database I deal with) is the lack of\nstored procedures that return rows (a la Sybase, MS SQL, etc). These\nprocedures are pre-compiled (pre-planned) and would execute faster than\nfeeding the commands to the parser one at a time and performing any\nconditional logic on the front-end. The ability to store logic equivalent\nto (please forgive any syntax mistakes)...\n\nIF EXISTS(SELECT 1 FROM tblFoo where checkFlag = 1)\n SELECT col1, col2, col3 FROM tblFoo where checkFlag = 1\nELSE\nBEGIN\n SELECT 'No rows were found.' AS errorStr\n RETURN -1\nEND\nRETURN 0\n\nor\n\nBEGIN TRANSACTION\n\nIF EXISTS(SELECT 1 FROM tblFoo WHERE uniqueID = @idparam)\nBEGIN\n\tUPDATE tblFoo SET col1 = @col1, col2 = @col2\n\t\tWHERE uniqueID = @idparam\n\n\tIF @error != 0\n\tBEGIN\n\t\tROLLBACK TRANSACTION\n\t\tSELECT \"Unable to update record.\" AS errorStr\n\t\tRETURN -1\n\tEND\nEND\nELSE BEGIN\n\tINSERT INTO tblFoo (col1, col2) VALUES (@col1, @col2)\n\n\tIF @error != 0\n\tBEGIN\n\t\tROLLBACK TRANSACTION\n\t\tSELECT \"Unable to insert record.\" AS errorStr\n\t\tRETURN -1\n\tEND\n\n\tSELECT @idparam = @@identity\nEND\n\nCOMMIT TRANSACTION\nSELECT @idparam AS rowID\nRETURN 0\n\ninto some sort of compiled procedure at the database would be tremendously\nuseful. Plus, most of the execution time for some multiway joins seems to\nbe spent on the planning of the command, not the actual doing - time which\ncan be recaptured by compiling a procedure once (and perhaps after every\nVACUUM ANALYZE).\n\nThe procedures would also help developers centralize their code across\nplatforms. My application runs PHP for the web interface and Perl for\nbehind-the-scenes processing. I have to re-implement/re-write the same\ncode to do the same thing - once under Perl and once under PHP and\nmaintain both. With stored procedures that return multiple rows, I could\nsimply put that code in the database and simplify my PHP and Perl code by\nan order of magnitude (not a trivial thing for interpreted languages).\n\nFinally, implementing stored procedures using the same language constructs\nas MS SQL and Sybase (virtually identical) would allow existing developers\nto write new applications and/or port existing applications to Postgresql. \nEven if we weren't to add the same language but simply add the ability to\nreturn multiple rows to the existing stored function languages, it would\nbe a giant step forward for myself and other application developers.\n\nThanks for your time...\n\n- K\n\nKristofer Munn * http://www.munn.com/~kmunn/ * ICQ# 352499 * AIM: KrMunn \n\n", "msg_date": "Sun, 6 Jun 1999 14:06:04 -0400 (EDT)", "msg_from": "Kristofer Munn <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] Priorities for 6.6" } ]
[ { "msg_contents": "I've been following this thread via Usenet and wanted to weigh in as a\nlarge application developer using Postgresql on Linux. I want to thank\neveryone for their wonderful work on getting the database to where it is\ntoday. I haven't seen the following discussed at length anywhere and\nhopefully it's not because I've overlooked some key functionality that\nalready does this...\n\nWith Postgresql, the biggest handicap I run into in day-to-day development\n(as compared to the numerous other database I deal with) is the lack of\nstored procedures that can return multiple rows (a la Sybase, MS SQL,\netc). These procedures are pre-compiled (pre-planned) and would execute\nfaster than feeding the commands to the parser one at a time and\nperforming any conditional logic on the front-end. The ability to store\nthe equivalent of (please forgive any syntax mistakes)...\n\nIF EXISTS(SELECT 1 FROM tblFoo where checkFlag = 1)\n SELECT col1, col2, col3 FROM tblFoo where checkFlag = 1\nELSE\nBEGIN\n SELECT 'No rows were found.' AS errorStr\n RETURN -1\nEND\nRETURN 0\n\nor\n\nBEGIN TRANSACTION\n\nIF EXISTS(SELECT 1 FROM tblFoo WHERE uniqueID = @idparam)\nBEGIN\n\tUPDATE tblFoo SET col1 = @col1, col2 = @col2\n\t\tWHERE uniqueID = @idparam\n\n\tIF @error != 0\n\tBEGIN\n\t\tROLLBACK TRANSACTION\n\t\tSELECT \"Unable to update record.\" AS errorStr\n\t\tRETURN -1\n\tEND\nEND\nELSE BEGIN\n\tINSERT INTO tblFoo (col1, col2) VALUES (@col1, @col2)\n\n\tIF @error != 0\n\tBEGIN\n\t\tROLLBACK TRANSACTION\n\t\tSELECT \"Unable to insert record.\" AS errorStr\n\t\tRETURN -1\n\tEND\n\n\tSELECT @idparam = @@identity\nEND\n\nCOMMIT TRANSACTION\nSELECT @idparam AS rowID\nRETURN 0\n\ninto some sort of compiled procedure at the database would be tremendously\nuseful. Plus, most of the execution time for some multiway joins seems to\nbe spent on the planning of the command, not the actual doing - time which\ncan be recaptured by compiling a procedure once (and perhaps after every\nVACUUM ANALYZE).\n\nThe procedures would also help developers centralize their code across\nplatforms. My application runs PHP for the web interface and Perl for\nbehind-the-scenes processing. I have to re-implement/re-write the same\ncode to do the same thing - once under Perl and once under PHP and\nmaintain both. With stored procedures that return multiple rows, I could\nsimply put that code in the database and simplify my PHP and Perl code by\nan order of magnitude (not a trivial thing for interpreted languages).\n\nFinally, implementing stored procedures using the same language constructs\nas MS SQL and Sybase (virtually identical) would allow existing developers\nto write new applications and/or port existing applications to Postgresql. \nEven if we weren't to add the same language but simply add the ability to\nreturn multiple rows to the existing stored function languages, it would\nbe a giant step forward for myself and other application developers.\n\nThanks for your time...\n\n- K\n\nKristofer Munn * http://www.munn.com/~kmunn/ * ICQ# 352499 * AIM: KrMunn \n\n", "msg_date": "Sun, 6 Jun 1999 14:27:15 -0400 (EDT)", "msg_from": "Kristofer Munn <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] Priorities for 6.6" } ]
[ { "msg_contents": "I've been following this thread via Usenet and wanted to weigh in as a\nlarge application developer using Postgresql on Linux. I want to thank\neveryone for their wonderful work on getting the database to where it is\ntoday. I haven't seen the following discussed at length anywhere and\nhopefully it's not because I've overlooked some key functionality that\nalready does this...\n\nWith Postgresql, the biggest handicap I run into in day-to-day development\n(as compared to the numerous other database I deal with) is the lack of\nstored procedures that can return multiple rows (a la Sybase, MS SQL,\netc). These procedures are pre-compiled (pre-planned) and would execute\nfaster than feeding the commands to the parser one at a time and\nperforming any conditional logic on the front-end. The ability to store\nthe equivalent of (please forgive any syntax mistakes)...\n\nIF EXISTS(SELECT 1 FROM tblFoo where checkFlag = 1)\n SELECT col1, col2, col3 FROM tblFoo where checkFlag = 1\nELSE\nBEGIN\n SELECT 'No rows were found.' AS errorStr\n RETURN -1\nEND\nRETURN 0\n\nor\n\nBEGIN TRANSACTION\n\nIF EXISTS(SELECT 1 FROM tblFoo WHERE uniqueID = @idparam)\nBEGIN\n\tUPDATE tblFoo SET col1 = @col1, col2 = @col2\n\t\tWHERE uniqueID = @idparam\n\n\tIF @error != 0\n\tBEGIN\n\t\tROLLBACK TRANSACTION\n\t\tSELECT \"Unable to update record.\" AS errorStr\n\t\tRETURN -1\n\tEND\nEND\nELSE BEGIN\n\tINSERT INTO tblFoo (col1, col2) VALUES (@col1, @col2)\n\n\tIF @error != 0\n\tBEGIN\n\t\tROLLBACK TRANSACTION\n\t\tSELECT \"Unable to insert record.\" AS errorStr\n\t\tRETURN -1\n\tEND\n\n\tSELECT @idparam = @@identity\nEND\n\nCOMMIT TRANSACTION\nSELECT @idparam AS rowID\nRETURN 0\n\ninto some sort of compiled procedure at the database would be tremendously\nuseful. Plus, most of the execution time for some multiway joins seems to\nbe spent on the planning of the command, not the actual doing - time which\ncan be recaptured by compiling a procedure once (and perhaps after every\nVACUUM ANALYZE).\n\nThe procedures would also help developers centralize their code across\nplatforms. My application runs PHP for the web interface and Perl for\nbehind-the-scenes processing. I have to re-implement/re-write the same\ncode to do the same thing - once under Perl and once under PHP and\nmaintain both. With stored procedures that return multiple rows, I could\nsimply put that code in the database and simplify my PHP and Perl code by\nan order of magnitude (not a trivial thing for interpreted languages).\n\nFinally, implementing stored procedures using the same language constructs\nas MS SQL and Sybase (virtually identical) would allow existing developers\nto write new applications and/or port existing applications to Postgresql. \nEven if we weren't to add the same language but simply add the ability to\nreturn multiple rows to the existing stored function languages, it would\nbe a giant step forward for myself and other application developers.\n\nThanks for your time...\n\n- K\n\nKristofer Munn * http://www.munn.com/~kmunn/ * ICQ# 352499 * AIM: KrMunn \n\n", "msg_date": "Sun, 6 Jun 1999 14:43:16 -0400 (EDT)", "msg_from": "Kristofer Munn <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] Priorities for 6.6" } ]
[ { "msg_contents": "Greetings once again. I've been following recent threads on problems with\nlarge objects and the possibility of plans for unlimited-size tuples in\nversion 6.6 (as a replacement). I was wondering if there was a list of\nall the Large Object known problems/limitations/drawbacks as of 6.5. \nThis would include everything from any remaining memory leaks to the\nperformance hits resulting from having all the files in one directory to\nthe lack of a way to get a list of all existing LOs for dumping (or\ncleanup). I like to think I've caught all the relevant concerns (I use\nLarge Objects extensively in a large application I will be deploying) but\nit would be nice to have such a list as well as perhaps linking them to\nthe documentation.\n\nThanks...\n\n- K\n\nKristofer Munn * http://www.munn.com/~kmunn/ * ICQ# 352499 * AIM: KrMunn \n\n", "msg_date": "Sun, 6 Jun 1999 19:21:40 -0400 (EDT)", "msg_from": "Kristofer Munn <[email protected]>", "msg_from_op": true, "msg_subject": "LO Problem List (Can we get one)" }, { "msg_contents": "Kristofer Munn <[email protected]> writes:\n> I was wondering if there was a list of\n> all the Large Object known problems/limitations/drawbacks as of 6.5. \n\nNo such list that I know of, but it would be useful to have one. Are\nyou volunteering ;-) ?\n\n\t\t\tregards, tom lane\n", "msg_date": "Mon, 07 Jun 1999 10:49:09 -0400", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] LO Problem List (Can we get one) " }, { "msg_contents": "> Kristofer Munn <[email protected]> writes:\n> > I was wondering if there was a list of\n> > all the Large Object known problems/limitations/drawbacks as of 6.5. \n> \n> No such list that I know of, but it would be useful to have one. Are\n> you volunteering ;-) ?\n\nTODO list. They should all be there under large object heading.\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Mon, 7 Jun 1999 11:32:50 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] LO Problem List (Can we get one)" } ]
[ { "msg_contents": "I have updated the TODO list, rearranging it. Here is the top of the\n\"Enhancements\" list:\n\n * Add referential integrity\n * Add OUTER joins, left and right(Thomas)\n * Allow long tuples by chaining or auto-storing outside db (chaining,large objs)\n * Fix memory leak for expressions?, aggregates?\n\nAnd the top of \"Performance\" is:\n\n * Allow transaction commits with rollback with no-fsync performance\n * Prevent fsync in SELECT-only queries\n\nThis should help. Did I get all the big items just discussed?\n\n---------------------------------------------------------------------------\n\n\nTODO list for PostgreSQL\n========================\nLast updated:\t\tSun Jun 6 22:08:59 EDT 1999\n\nCurrent maintainer:\tBruce Momjian ([email protected])\n\nThe most recent version of this document can be viewed at\nthe PostgreSQL WWW site, http://www.postgreSQL.org.\n\nA dash(-) marks changes to be in the next release.\n\nDevelopers who have claimed items are:\n-------------------------------------\n\t* Billy is Billy G. Allie <[email protected]>\n\t* Brook is Brook Milligan <[email protected]>\n\t* Bruce is Bruce Momjian<[email protected]>\n\t* Bryan is Bryan Henderson<[email protected]>\n\t* D'Arcy is D'Arcy J.M. Cain <[email protected]>\n\t* David is David Hartwig <[email protected]>\n\t* Edmund is Edmund Mergl <[email protected]>\n\t* Goran is Goran Thyni <[email protected]>\n\t* Hiroshi is Hiroshi Inoue<[email protected]>\n\t* Jan is Jan Wieck <[email protected]>\n \t* Marc is Marc Fournier <[email protected]>\n\t* Massimo Dal Zotto <[email protected]>\n\t* Michael is Michael Meskes <[email protected]>\n\t* Oleg is Oleg Bartunov <[email protected]>\n\t* Peter is Peter T Mount <[email protected]>\n\t* Stefan Simkovics <[email protected]>\n\t* Tatsuo is Tatsuo Ishii <[email protected]>\n\t* Tom is Tom Lane <[email protected]>\n\t* Thomas is Thomas Lockhart <[email protected]>\n\t* TomH is Tom I Helbekkmo <[email protected]>\n\n\t* Vadim is \"Vadim B. Mikheev\" <[email protected]>\n\n\nRELIABILITY\n-----------\n* Overhaul mdmgr/smgr to fix double unlinking and double opens, cleanup\n* Overhaul bufmgr/lockmgr/transaction manager\n* Remove EXTEND?\n* Tables that start with xinv confused to be large objects\n* Two and three dimensional arrays display improperly, missing {}\n* Select a[1] FROM test fails, it needs test.a[1]\n* Update table SET table.value = 3 fails\n* User who can create databases can modify pg_database table\n* Elog() does not free all its memory(Jan)\n* Disallow inherited columns with the same name as new columns\n* Recover or force failure when disk space is exhausted\n* Views containing aggregates sometimes fail(Jan)\n* Alter TABLE ADD COLUMN does not honor DEFAULT, add CONSTRAINT\n* Array index references without table name cause problems\n* Views with spaces in view name fail when referenced\n* Plpgsql does not handle quoted mixed-case identifiers\n* Do not allow bpchar column creation without length\n* INSERT INTO ... SELECT with AS columns matching result columns problem\n\n\nENHANCEMENTS\n------------\n* Add referential integrity\n* Add OUTER joins, left and right(Thomas)\n* Allow long tuples by chaining or auto-storing outside db (chaining,large objs)\n* Fix memory leak for expressions?, aggregates?\n\nExotic features:\n* Add sql3 recursive unions\n* Add the concept of dataspaces\n* Add replication of distributed databases\n* Allow queries across multiple databases\n\nAdmin:\n* Better interface for adding to pg_group\n* More access control over who can create tables and access the database\n* Add syslog functionality\n* Allow elog() to return error codes, not just messages\n* Allow international error message support and add error codes\n* Generate postmaster pid file and remove flock/fcntl lock code\n* Add ability to specifiy location of lock/socket files\n\nTypes:\n* Add BIT, BIT VARYING\n* Nchar (as distinguished from ordinary varchar),\n* Domain capability\n* Add STDDEV/VARIANCE() function for standard deviation computation/variance\n* Allow compression of large fields or a compressed field type\n* Large objects\n\t* Fix large object mapping scheme, own typeid or reltype(Peter)\n\t* Allow large text type to use large objects(Peter)\n\t* Not to stuff everything as files in a single directory\n* Allow pg_descriptions when creating types, tables, columns, and functions\n* Add IPv6 capability to INET/CIDR types\n* Make a separate SERIAL type?\n* Store binary-compatible type information in the system\n* Allow user to define char1 column\n* Add support for & operator\n\nViews:\n* Allow DISTINCT on views\n* Allow views of aggregate columns\n* Allow views with subselects\n\n* Allow subqueries in target list\n* Put sort files, large objects in their on directory\n* Do autocommit so always in a transaction block\n* Show location of syntax error in query\n* Redesign the function call interface to handle NULLs better(Jan)\n* Document/trigger/rule so changes to pg_shadow create pg_pwd\n* Missing optimizer selectivities for date, etc.\n\nIndexes:\n* Allow CREATE INDEX zman_index ON test (date_trunc( 'day', zman ) datetime_ops)\n\tfails index can't store constant parameters\n* Allow creation of functional indexes to use default types\n* Permissions on indexes - prevent them?\n* Allow SQL function indexes\n* Add FILLFACTOR to index creation\n\nCommands:\n* ALTER TABLE ADD COLUMN to inherited table put column in wrong place\n* Add ALTER TABLE DROP/ALTER COLUMN feature\n* Allow CLUSTER on all tables at once, and improve CLUSTER\n* Generate error on CREATE OPERATOR of ~~, ~ and and ~*\n* Add SIMILAR TO to allow character classes, 'pg_[a-c]%'\n* Auto-destroy sequence on DROP of table with SERIAL\n* Allow LOCK TABLE tab1, tab2, tab3 so all tables locked in unison\n* Allow INSERT/UPDATE of system-generated oid value for a row\n* Allow ESCAPE '\\' at the end of LIKE for ANSI compliance\n* Rewrite the LIKE handling by rewriting the user string with the \n\tsupplied ESCAPE\n* Move LIKE index optimization handling to the optimizer\n \nClients:\n* Make NULL's come out at the beginning or end depending on the \n\tORDER BY direction\n* Allow flag to control COPY input/output of NULLs\n* Update reltuples from COPY command\n* Allow psql \\copy to allow delimiters\n* Add a function to return the last inserted oid, for use in psql scripts\n* Allow psql to print nulls as distinct from \"\"(?)\n\n* Increase identifier length(NAMEDATALEN) if small performance hit\n* Allow row re-use without vacuum, maybe?(Vadim)\n* Add UNIQUE capability to non-btree indexes\n* Certain indexes will not shrink, i.e. oid indexes with many inserts\n* Restore unused oid's on backend exit if no one else has gotten oids\n* Have UPDATE/DELETE clean out indexes\n* Allow WHERE restriction on ctid\n* Allow cursors to be DECLAREd/OPENed/CLOSEed outside transactions\n* Allow PQrequestCancel() to terminate when in waiting-for-lock state\n* Transaction log, so re-do log can be on a separate disk\n* Populate backend status area and write program to dump status data\n* Make oid use unsigned int more reliably, pg_atoi()\n\n* Add PL/Perl(Mark Hollomon)\n\n\nPERFORMANCE\n-----------\n* Allow transaction commits with rollback with no-fsync performance\n* Prevent fsync in SELECT-only queries\n\n* Use indexes in ORDER BY for restrictive data sets, min(), max()\n* Pull requested data directly from indexes, bypassing heap data\n* Use index to restrict rows returned by multi-key index when used with\n\tnon-consecutive keys or OR clauses, so fewer heap accesses\n* Use index with constants on functions\n\n* Allow LIMIT ability on single-table queries that have no ORDER BY to use\n\ta matching index\n* Improve LIMIT processing by using index to limit rows processed\n\n* Cache most recent query plan(s?)\n* Shared catalog cache, reduce lseek()'s by caching table size in shared area\n\n* Allow compression of log and meta data\n* Update pg_statistic table to remove operator column\n* Make index creation use psort code, because it is now faster(Vadim)\n* Allow char() not to use variable-sized header to reduce disk size\n* Do async I/O to do better read-ahead of data\n* Fix memory exhaustion when using many OR's\n* Get faster regex() code from Henry Spencer <[email protected]>\n\twhen it is available\n* Use mmap() rather than SYSV shared memory(?)\n* Process const = const parts of OR clause in separate pass\n* Make oid use oidin/oidout not int4in/int4out in pg_type.h\n* Create more system table indexes for faster cache lookups\n* Improve Subplan list handling\n* Allow Subplans to use efficient joins(hash, merge) with upper variable\n\n\nDOCUMENTATION\n-------------\n* Add use of 'const' for varibles in source tree\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Sun, 6 Jun 1999 22:43:20 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": true, "msg_subject": "6.6 items" }, { "msg_contents": "Bruce Momjian wrote:\n> \n> I have updated the TODO list, rearranging it. Here is the top of the\n> \"Enhancements\" list:\n> \n> * Add referential integrity\n> * Add OUTER joins, left and right(Thomas)\n> * Allow long tuples by chaining or auto-storing outside db (chaining,large objs)\n> * Fix memory leak for expressions?, aggregates?\n> \n> And the top of \"Performance\" is:\n> \n> * Allow transaction commits with rollback with no-fsync performance\n> * Prevent fsync in SELECT-only queries\n> \n> This should help. Did I get all the big items just discussed?\n\nSavepoints.\nError codes.\n\nVadim\n", "msg_date": "Mon, 07 Jun 1999 10:59:25 +0800", "msg_from": "Vadim Mikheev <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] 6.6 items" }, { "msg_contents": "> > I have updated the TODO list, rearranging it. Here is the top of the\n> > \"Enhancements\" list:\n> > \n> > * Add referential integrity\n> > * Add OUTER joins, left and right(Thomas)\n> > * Allow long tuples by chaining or auto-storing outside db (chaining,large objs)\n> > * Fix memory leak for expressions?, aggregates?\n> > \n> > And the top of \"Performance\" is:\n> > \n> > * Allow transaction commits with rollback with no-fsync performance\n> > * Prevent fsync in SELECT-only queries\n> > \n> > This should help. Did I get all the big items just discussed?\n> \n> Savepoints.\n> Error codes.\n\nI tried to pick items that users were complaining about, not items we\nplan to do for 6.6.\n\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Sun, 6 Jun 1999 23:02:51 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] 6.6 items" }, { "msg_contents": "Bruce Momjian wrote:\n> \n> I have updated the TODO list, rearranging it. Here is the top of the\n> \"Enhancements\" list:\n> \n> * Add referential integrity\n> * Add OUTER joins, left and right(Thomas)\n> * Allow long tuples by chaining or auto-storing outside db (chaining,large objs)\n> * Fix memory leak for expressions?, aggregates?\n\nHow hard would it bet to separate PREPARE and EXECUTE for queries \nand provide some way to pass arguments without converting them to some \nascii representation first?\n\nThe main use I need it for is storing small images in ordinary table\nfields.\n\nThe mechanics should already be there as SPI uses it.\n \n----------------\nHannu\n", "msg_date": "Mon, 07 Jun 1999 10:40:36 +0300", "msg_from": "Hannu Krosing <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] 6.6 items" }, { "msg_contents": "Bruce Momjian <[email protected]> writes:\n> Did I get all the big items just discussed?\n\nEliminating limits on textual query length.\n\n(This is not same thing as increasing tuple size, although in practice\nwe'd want to do both in the same release.)\n\n\t\t\tregards, tom lane\n", "msg_date": "Mon, 07 Jun 1999 10:58:55 -0400", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] 6.6 items " }, { "msg_contents": "Done.\n\n\n> Bruce Momjian <[email protected]> writes:\n> > Did I get all the big items just discussed?\n> \n> Eliminating limits on textual query length.\n> \n> (This is not same thing as increasing tuple size, although in practice\n> we'd want to do both in the same release.)\n> \n> \t\t\tregards, tom lane\n> \n\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Mon, 7 Jun 1999 11:27:34 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] 6.6 items" }, { "msg_contents": "> Did I get all the big items just discussed?\n\nMaybe they're too big issues just to add, but when I discuss Postgresql\nwith people responsible for company decisions about databases they\nalways mention\n\n- Scalability. This may come if PostgreSQL INC will introduce\nclusterwide servers\n\n- Security. One main point is the roll forward facility. I think all\nthe commercial db's has this?\n\n- Up time. This means hot backup and no locking of the database during\nvacuum.\n\n\n", "msg_date": "Mon, 7 Jun 1999 23:24:29 +0200 (CEST)", "msg_from": "Kaare Rasmussen <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] 6.6 items" } ]
[ { "msg_contents": "\nI am just creating this now...this is to be considered the first *releaes*\ncandidate for v6.5 ...\n\nI expect there to be *no more* commits to the source tree until after a\nformal release is done...\n\nBaring any bugs, this will become the formal release as of 15:30EST on\nWednesday, giving two days for ppl to run her through and make sure\nnothing obvious is outstanding.\n\nConsider the cvs repository *frozen* until I announce otherwise, as I will\nonce more tag things so that we have a -CURRENT vs -STABLE source tree to\nwork with.\n\nThere are to be *no* code changes between now and then, except by having\nsuch changes reviewed by one other committer...about the only thing I'd\nexpect to see is something port specific...\n\nAt this time, I hope everyone will take a second to download the above\n.tar.gz file and run things through to make sure there aren't any\noutstanding problems ...\n\nMarc G. Fournier ICQ#7615664 IRC Nick: Scrappy\nSystems Administrator @ hub.org \nprimary: [email protected] secondary: scrappy@{freebsd|postgresql}.org \n\n", "msg_date": "Mon, 7 Jun 1999 00:40:13 -0300 (ADT)", "msg_from": "The Hermit Hacker <[email protected]>", "msg_from_op": true, "msg_subject": "postgresql-v6.5beta2.tar.gz ..." }, { "msg_contents": "The Hermit Hacker wrote:\n> \n> I am just creating this now...this is to be considered the first *releaes*\n> candidate for v6.5 ...\n> \n> I expect there to be *no more* commits to the source tree until after a\n> formal release is done...\n\nAh. I wanted to implement proper handling of case when tuple in \nupdate-chain is updated by \"too old\" xaction (part of chain should\nbe deleted) but ok - it's not bug, just unimplemented feature.\n\n(BTW, Mark, I have to update lock.sgml)\n\nHiroshi, please run your test. You'll get NOTICEs instead of ERRORs.\nUnfortunately, I can't get your test finished locally - I always\nget core and gdb says too little:\n\n#0 0x2823452f in strcpy () \n(gdb) bt \n#0 0x2823452f in strcpy () \n#1 0xefbf8944 in ?? () \n#2 0x812d19d in hash_search (hashp=0x1c518bfc, \n keyPtr=0x8901428d <Address 0x8901428d out of bounds>, action=1233853505, \n foundPtr=0x4d89410c <Address 0x4d89410c out of bounds>) at dynahash.c:651 \n#3 0x4d8b0000 in ?? () \nCannot access memory at address 0x3c7. \n(gdb) f 2 \n#2 0x812cfbd in hash_search (hashp=0x1c518bfc, \n keyPtr=0x8901428d <Address 0x8901428d out of bounds>, action=1233853505, \n foundPtr=0x4d89410c <Address 0x4d89410c out of bounds>) at dynahash.c:651 \n651 memmove(destAddr, keyPtr, hctl->keysize); \n ^^^^^^^\n??? memmove shouldn't call strcpy. Something is broken,\nbut maybe due to FreeBSD 3.0 on my box... I'll run test\non 2.2.6 soon.\n\nVadim\n", "msg_date": "Mon, 07 Jun 1999 12:11:48 +0800", "msg_from": "Vadim Mikheev <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] postgresql-v6.5beta2.tar.gz ..." }, { "msg_contents": "On Mon, 7 Jun 1999, Vadim Mikheev wrote:\n\n> The Hermit Hacker wrote:\n> > \n> > I am just creating this now...this is to be considered the first *releaes*\n> > candidate for v6.5 ...\n> > \n> > I expect there to be *no more* commits to the source tree until after a\n> > formal release is done...\n> \n> Ah. I wanted to implement proper handling of case when tuple in \n> update-chain is updated by \"too old\" xaction (part of chain should\n> be deleted) but ok - it's not bug, just unimplemented feature.\n\nCan that wait until *after* 6.5 is released, for inclusion on v6.5.1? \n\n> (BTW, Mark, I have to update lock.sgml)\n\nI have no major qualms about docs, if thomas doesn't ...\n\nMarc G. Fournier ICQ#7615664 IRC Nick: Scrappy\nSystems Administrator @ hub.org \nprimary: [email protected] secondary: scrappy@{freebsd|postgresql}.org \n\n", "msg_date": "Mon, 7 Jun 1999 01:27:59 -0300 (ADT)", "msg_from": "The Hermit Hacker <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] postgresql-v6.5beta2.tar.gz ..." }, { "msg_contents": "The Hermit Hacker wrote:\n> \n> On Mon, 7 Jun 1999, Vadim Mikheev wrote:\n> \n> > The Hermit Hacker wrote:\n> > >\n> > > I am just creating this now...this is to be considered the first *releaes*\n> > > candidate for v6.5 ...\n> > >\n> > > I expect there to be *no more* commits to the source tree until after a\n> > > formal release is done...\n> >\n> > Ah. I wanted to implement proper handling of case when tuple in\n> > update-chain is updated by \"too old\" xaction (part of chain should\n> > be deleted) but ok - it's not bug, just unimplemented feature.\n> \n> Can that wait until *after* 6.5 is released, for inclusion on v6.5.1?\n\nYes. And if I'll not implement it in 6.5.1 then I'll not have time\nfor this after -:)\n\nVadim\n", "msg_date": "Mon, 07 Jun 1999 12:31:07 +0800", "msg_from": "Vadim Mikheev <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] postgresql-v6.5beta2.tar.gz ..." }, { "msg_contents": "> -----Original Message-----\n> From: [email protected]\n> [mailto:[email protected]]On Behalf Of Vadim Mikheev\n> Sent: Monday, June 07, 1999 1:12 PM\n> To: The Hermit Hacker\n> Cc: [email protected]\n> Subject: Re: [HACKERS] postgresql-v6.5beta2.tar.gz ...\n> \n> \n> The Hermit Hacker wrote:\n> > \n> > I am just creating this now...this is to be considered the \n> first *releaes*\n> > candidate for v6.5 ...\n> > \n> > I expect there to be *no more* commits to the source tree until after a\n> > formal release is done...\n> \n> Ah. I wanted to implement proper handling of case when tuple in \n> update-chain is updated by \"too old\" xaction (part of chain should\n> be deleted) but ok - it's not bug, just unimplemented feature.\n> \n> (BTW, Mark, I have to update lock.sgml)\n> \n> Hiroshi, please run your test. You'll get NOTICEs instead of ERRORs.\n\nI have run my test and got the following NOTICEs.\n\nNOTICE: Child itemid in update-chain marked as unused - can't \ncontinue vc_rpfheap\n\nThanks.\n\nHiroshi Inoue\[email protected]\n", "msg_date": "Mon, 7 Jun 1999 15:17:49 +0900", "msg_from": "\"Hiroshi Inoue\" <[email protected]>", "msg_from_op": false, "msg_subject": "RE: [HACKERS] postgresql-v6.5beta2.tar.gz ..." }, { "msg_contents": "Hiroshi Inoue wrote:\n> \n> >\n> > Hiroshi, please run your test. You'll get NOTICEs instead of ERRORs.\n> \n> I have run my test and got the following NOTICEs.\n> \n> NOTICE: Child itemid in update-chain marked as unused - can't\n> continue vc_rpfheap\n\nI'm still getting troubles when running your test due to\n\n1. bug in cache invalidation code: when we invalidate relcache\n we forget to free MdfdVec in md.c!\n\n Vacuum invalidates a relation tuple in pg_class and concurrent\n xactions invalidate corresponding relcache entry, but don't\n free MdfdVec and so allocate new one for the same relation\n more and more. Each MdfdVed requires own fd.c:Vfd entry -> below\n\n2. fd.c:pg_nofile()->sysconf(_SC_OPEN_MAX) returns in FreeBSD \n near total number of files that can be opened in system\n (by _all_ users/procs). With total number of opened files\n ~ 2000 I can run your test with 10-20 simultaneous\n xactions for very short time, -:)\n\n Should we limit fd.c:no_files to ~ 256?\n This is port-specific, of course...\n\nVadim\nP.S. Hiroshi test I run:\n\n10-20 psql < file with 10000 xactions:\n\nBEGIN;\nUPDATE single row in table;\nEND;\n\nA script run \"vacuum table\" each rand(10) + 1 sec.\n", "msg_date": "Mon, 07 Jun 1999 18:49:16 +0800", "msg_from": "Vadim Mikheev <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] postgresql-v6.5beta2.tar.gz ..." }, { "msg_contents": "> > Ah. I wanted to implement proper handling of case when tuple in \n> > update-chain is updated by \"too old\" xaction (part of chain should\n> > be deleted) but ok - it's not bug, just unimplemented feature.\n> \n> Can that wait until *after* 6.5 is released, for inclusion on v6.5.1? \n> \n> > (BTW, Mark, I have to update lock.sgml)\n> \n> I have no major qualms about docs, if thomas doesn't ...\n\nNo. If you want it, do it now, not in 6.5.1. Adding features in\nminor releases is worse than adding stuff now.\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Mon, 7 Jun 1999 09:18:47 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] postgresql-v6.5beta2.tar.gz ..." }, { "msg_contents": "On Mon, 7 Jun 1999, Bruce Momjian wrote:\n\n> > > Ah. I wanted to implement proper handling of case when tuple in \n> > > update-chain is updated by \"too old\" xaction (part of chain should\n> > > be deleted) but ok - it's not bug, just unimplemented feature.\n> > \n> > Can that wait until *after* 6.5 is released, for inclusion on v6.5.1? \n> > \n> > > (BTW, Mark, I have to update lock.sgml)\n> > \n> > I have no major qualms about docs, if thomas doesn't ...\n> \n> No. If you want it, do it now, not in 6.5.1. Adding features in\n> minor releases is worse than adding stuff now.\n\nWith Vadim himself uncertain as to the effect of doing the patch, then it\nshould bve left for 6.6 then...\n\nMarc G. Fournier ICQ#7615664 IRC Nick: Scrappy\nSystems Administrator @ hub.org \nprimary: [email protected] secondary: scrappy@{freebsd|postgresql}.org \n\n", "msg_date": "Mon, 7 Jun 1999 10:40:55 -0300 (ADT)", "msg_from": "The Hermit Hacker <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] postgresql-v6.5beta2.tar.gz ..." }, { "msg_contents": "> -----Original Message-----\n> From: [email protected] [mailto:[email protected]]On Behalf Of Vadim\n> Mikheev\n> Sent: Monday, June 07, 1999 7:49 PM\n> To: Hiroshi Inoue\n> Cc: The Hermit Hacker; [email protected]\n> Subject: Re: [HACKERS] postgresql-v6.5beta2.tar.gz ...\n> \n> \n> Hiroshi Inoue wrote:\n> > \n> > >\n> > > Hiroshi, please run your test. You'll get NOTICEs instead of ERRORs.\n> > \n> > I have run my test and got the following NOTICEs.\n> > \n> > NOTICE: Child itemid in update-chain marked as unused - can't\n> > continue vc_rpfheap\n> \n> I'm still getting troubles when running your test due to\n> \n> 1. bug in cache invalidation code: when we invalidate relcache\n> we forget to free MdfdVec in md.c!\n> \n> Vacuum invalidates a relation tuple in pg_class and concurrent\n> xactions invalidate corresponding relcache entry, but don't\n> free MdfdVec and so allocate new one for the same relation\n\nI have noticed the same thing But at that point my concern was \ntruncation of segmented relations. AFAIC concurrent xactions \ninvalidate relcache entry when StartTransaction() or Command-\nCounterIncrement() is called. Unfortunately vacuum couldn't stop \nother sessions from executing StartTransaction() nor Command-\nCounterIncrement().\n\n> more and more. Each MdfdVed requires own fd.c:Vfd entry -> below\n> \n> 2. fd.c:pg_nofile()->sysconf(_SC_OPEN_MAX) returns in FreeBSD \n> near total number of files that can be opened in system\n> (by _all_ users/procs). With total number of opened files\n> ~ 2000 I can run your test with 10-20 simultaneous\n> xactions for very short time, -:)\n>\n\nI have remembered another thing.\nSomeone has already reported to ML in Japan and I was able to \nfind the cause easily thanks to him.\n\nvacuum calls open() directly to create pg_vlock file.\nSometimes I was not able to open the file because of the lack of \nfile descriptors().\nFileNameOpenFile()/FileClose() etc should be called instead of \nopen()/close() etc ?\n\nRegards.\n\nHiroshi Inoue\[email protected]\n", "msg_date": "Tue, 8 Jun 1999 10:08:57 +0900", "msg_from": "\"Hiroshi Inoue\" <[email protected]>", "msg_from_op": false, "msg_subject": "RE: [HACKERS] postgresql-v6.5beta2.tar.gz ..." }, { "msg_contents": ">vacuum calls open() directly to create pg_vlock file.\n>Sometimes I was not able to open the file because of the lack of \n>file descriptors().\n>FileNameOpenFile()/FileClose() etc should be called instead of \n>open()/close() etc ?\n\nThanks Hiroshi! I forgot to forward his message regarding pg_vlock\nissue. Also note that there are some codes that directly open files in\ncreatedb/destroydb stuffs...\n--\nTatsuo Ishii\n\n", "msg_date": "Tue, 08 Jun 1999 10:17:34 +0900", "msg_from": "Tatsuo Ishii <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] postgresql-v6.5beta2.tar.gz ... " }, { "msg_contents": "\"Hiroshi Inoue\" <[email protected]> writes:\n> vacuum calls open() directly to create pg_vlock file.\n> Sometimes I was not able to open the file because of the lack of \n> file descriptors().\n> FileNameOpenFile()/FileClose() etc should be called instead of \n> open()/close() etc ?\n\nThat is a good point, but actually I think it should not be a problem.\nvacuum.c just does open() and immediately close() on pg_vlock; it\ndoesn't hold the file descriptor open.\n\nWhen I fixed psort.c a few weeks ago, I looked through all the other\ndirect calls of open() and fopen() in the backend. There are still half\na dozen or so, but none of them open more than one file or will hold the\nfile descriptor for longer than the execution of the function they're\nin. So I felt it was OK to leave them alone.\n\nThe reason it's OK is that fd.c doesn't use up all the available\nfile descriptors --- it tries to leave ten or so unused. That's\nnecessary to ensure that library functions like dlopen() will work,\nbecause they don't know anything about using fd.c's routines.\n\nSo, the occasional short-term file opening in vacuum.c and similar\nplaces should not matter. If those do fail for lack of FDs, then the\n*real* problem is that fd.c is not estimating correctly how many file\ndescriptors it can safely use; that's what we need to fix.\n\nBut what I want to know right now is whether this behavior has been\nseen with code from the last week or two. Maybe the report is just\na side-effect of the FD leaks that used to exist in several places...\n\n\t\t\tregards, tom lane\n", "msg_date": "Mon, 07 Jun 1999 22:34:20 -0400", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] postgresql-v6.5beta2.tar.gz ..." }, { "msg_contents": ">That is a good point, but actually I think it should not be a problem.\n>vacuum.c just does open() and immediately close() on pg_vlock; it\n>doesn't hold the file descriptor open.\n>\n>When I fixed psort.c a few weeks ago, I looked through all the other\n>direct calls of open() and fopen() in the backend. There are still half\n>a dozen or so, but none of them open more than one file or will hold the\n>file descriptor for longer than the execution of the function they're\n>in. So I felt it was OK to leave them alone.\n>\n>The reason it's OK is that fd.c doesn't use up all the available\n>file descriptors --- it tries to leave ten or so unused. That's\n>necessary to ensure that library functions like dlopen() will work,\n>because they don't know anything about using fd.c's routines.\n>\n>So, the occasional short-term file opening in vacuum.c and similar\n>places should not matter. If those do fail for lack of FDs, then the\n>*real* problem is that fd.c is not estimating correctly how many file\n>descriptors it can safely use; that's what we need to fix.\n>\n>But what I want to know right now is whether this behavior has been\n>seen with code from the last week or two. Maybe the report is just\n>a side-effect of the FD leaks that used to exist in several places...\n\nIf I correctly remember, the report was regarding 6.4.2. I will check\nif it happens with current, and report back soon.\n---\nTatsuo Ishii\n\n", "msg_date": "Tue, 08 Jun 1999 11:42:09 +0900", "msg_from": "Tatsuo Ishii <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] postgresql-v6.5beta2.tar.gz ... " }, { "msg_contents": "> -----Original Message-----\n> From: [email protected]\n> [mailto:[email protected]]On Behalf Of Tom Lane\n> Sent: Tuesday, June 08, 1999 11:34 AM\n> To: [email protected]\n> Subject: Re: [HACKERS] postgresql-v6.5beta2.tar.gz ...\n> \n> \n> \"Hiroshi Inoue\" <[email protected]> writes:\n> > vacuum calls open() directly to create pg_vlock file.\n> > Sometimes I was not able to open the file because of the lack of \n> > file descriptors().\n> > FileNameOpenFile()/FileClose() etc should be called instead of \n> > open()/close() etc ?\n> \n> That is a good point, but actually I think it should not be a problem.\n> vacuum.c just does open() and immediately close() on pg_vlock; it\n> doesn't hold the file descriptor open.\n> \n> When I fixed psort.c a few weeks ago, I looked through all the other\n> direct calls of open() and fopen() in the backend. There are still half\n> a dozen or so, but none of them open more than one file or will hold the\n> file descriptor for longer than the execution of the function they're\n> in. So I felt it was OK to leave them alone.\n> \n> The reason it's OK is that fd.c doesn't use up all the available\n> file descriptors --- it tries to leave ten or so unused. That's\n> necessary to ensure that library functions like dlopen() will work,\n> because they don't know anything about using fd.c's routines.\n> \n> So, the occasional short-term file opening in vacuum.c and similar\n> places should not matter. If those do fail for lack of FDs, then the\n> *real* problem is that fd.c is not estimating correctly how many file\n> descriptors it can safely use; that's what we need to fix.\n> \n> But what I want to know right now is whether this behavior has been\n> seen with code from the last week or two. Maybe the report is just\n> a side-effect of the FD leaks that used to exist in several places...\n>\n\nI don't remember correctly how the phenomenon occured,sorry.\nI couldn't reproduce it now.\n\nRegards.\n\nHiroshi Inoue\[email protected]\n", "msg_date": "Tue, 8 Jun 1999 18:45:23 +0900", "msg_from": "\"Hiroshi Inoue\" <[email protected]>", "msg_from_op": false, "msg_subject": "RE: [HACKERS] postgresql-v6.5beta2.tar.gz ..." }, { "msg_contents": ">>That is a good point, but actually I think it should not be a problem.\n>>vacuum.c just does open() and immediately close() on pg_vlock; it\n>>doesn't hold the file descriptor open.\n>>\n>>When I fixed psort.c a few weeks ago, I looked through all the other\n>>direct calls of open() and fopen() in the backend. There are still half\n>>a dozen or so, but none of them open more than one file or will hold the\n>>file descriptor for longer than the execution of the function they're\n>>in. So I felt it was OK to leave them alone.\n>>\n>>The reason it's OK is that fd.c doesn't use up all the available\n>>file descriptors --- it tries to leave ten or so unused. That's\n>>necessary to ensure that library functions like dlopen() will work,\n>>because they don't know anything about using fd.c's routines.\n>>\n>>So, the occasional short-term file opening in vacuum.c and similar\n>>places should not matter. If those do fail for lack of FDs, then the\n>>*real* problem is that fd.c is not estimating correctly how many file\n>>descriptors it can safely use; that's what we need to fix.\n>>\n>>But what I want to know right now is whether this behavior has been\n>>seen with code from the last week or two. Maybe the report is just\n>>a side-effect of the FD leaks that used to exist in several places...\n>\n>If I correctly remember, the report was regarding 6.4.2. I will check\n>if it happens with current, and report back soon.\n\nDone. With 32/64 users and 100 transactions each, I see no problem so\nfar.\n--\nTatsuo Ishii\n\n\n", "msg_date": "Tue, 08 Jun 1999 19:12:26 +0900", "msg_from": "Tatsuo Ishii <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] postgresql-v6.5beta2.tar.gz ... " } ]
[ { "msg_contents": "\n========================================================\nFreeBSD thelab.hub.org 4.0-CURRENT FreeBSD 4.0-CURRENT #0: Mon Apr 5\n> grep fail regress.out\nfloat8 .. failed\ngeometry .. failed\n========================================================\n\nMarc G. Fournier ICQ#7615664 IRC Nick: Scrappy\nSystems Administrator @ hub.org \nprimary: [email protected] secondary: scrappy@{freebsd|postgresql}.org \n\n", "msg_date": "Mon, 7 Jun 1999 01:31:19 -0300 (ADT)", "msg_from": "The Hermit Hacker <[email protected]>", "msg_from_op": true, "msg_subject": "Regression Test results for v6.5beta2 ..." } ]
[ { "msg_contents": "\n> \"Victoria W.\" <[email protected]> writes:\n> > CREATE FUNCTION add_rechnr(int4) RETURNS int4\n> > AS '/usr/local/pgsql/lib/modules/funcs.so' LANGUAGE 'c';\n> \n> I don't think there is an \"unload shared library\" function in Postgres\n> (it'd be hard or impossible to implement on some Unixes, anyway). So\n> the only way to stop referencing a shared library once it's been opened\n> is to terminate the backend process.\n> \nIn my experience you can do the psql command:\n\nload '/usr/local/pgsql/lib/modules/funcs.so';\n\nafter you changed the lib to load the new library into the backend.\nUnfortunately I think you will lose the last value in \"rechnr\". \n\nAndreas\n\n", "msg_date": "Mon, 7 Jun 1999 11:18:30 +0200 ", "msg_from": "ZEUGSWETTER Andreas IZ5 <[email protected]>", "msg_from_op": true, "msg_subject": "AW: [HACKERS] livetime of a variable defined in a c-procedure (fw d) " } ]
[ { "msg_contents": "\n> The main problem is just programistic: you will have to add\n> heap tid to the end of index tuples on internal index pages,\n> but on leaf pages heap tid is in the begin of index tuples\n> (inside of btitem struct).\n> \nWhile I absolutely like the idea of having the heap tid in the index,\nI don't quite agree, that leaf pages need heap tid at the front of the key.\nThis would lead to Index access beeing not ordered (in terms of key) :-(\n\nHaving it in the front will only lead to \"on disk ordered\" fetches while\nreading \ntuples from one leaf page, when reading the next leaf page you will start \nfrom the beginning.\n\nSo I think the leaf page needs heap tid at the end of each key, same as\nin root pages.\n\nFor performance reasons a totally standalone \"sort to tuple on disk order\"\nnode could be implemented, that could also be handled by the \noptimizer, and would be of wider performance use.\n\nAndreas\n", "msg_date": "Mon, 7 Jun 1999 11:58:39 +0200 ", "msg_from": "ZEUGSWETTER Andreas IZ5 <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] Bizarre coding in _bt_binsrch" }, { "msg_contents": "ZEUGSWETTER Andreas IZ5 wrote:\n> \n> > The main problem is just programistic: you will have to add\n> > heap tid to the end of index tuples on internal index pages,\n> > but on leaf pages heap tid is in the begin of index tuples\n> > (inside of btitem struct).\n> >\n> While I absolutely like the idea of having the heap tid in the index,\n> I don't quite agree, that leaf pages need heap tid at the front of the key.\n\nOh no - this is not what I meant to say.\n\nFirst, there is no heap tid in index tuples in internal pages,\nand so we'll have to add it to them. Actually, it doesn't matter\nwhere to add it - just after btitem->bti_itup (i.e. header of\nindex tuple) or after field keys - it will be the last key used\nin comparing.\n\nBut on leaf pages index tuples already keep heap tid - this is\nbtitem->bti_itup.t_tid - and so we shouldn't add heap tid there.\n\nI just wanted to say that we'll have to differentiate \ninternal/leaf index tuples in _bt_compare, _bt_binsrch etc \nto know from what part of index tuples heap tid should be fetched.\n\nSorry.\n\nVadim\n", "msg_date": "Mon, 07 Jun 1999 18:19:36 +0800", "msg_from": "Vadim Mikheev <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Bizarre coding in _bt_binsrch" }, { "msg_contents": "ZEUGSWETTER Andreas IZ5 wrote:\n> \n> For performance reasons a totally standalone \"sort to tuple on disk order\"\n> node could be implemented, that could also be handled by the\n> optimizer, and would be of wider performance use.\n\nWe'll get tuples from index scan sorted on [key, disk order].\n\nVadim\n", "msg_date": "Mon, 07 Jun 1999 18:23:09 +0800", "msg_from": "Vadim Mikheev <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Bizarre coding in _bt_binsrch" } ]
[ { "msg_contents": "\n> >By the way, may I ask more question regarding Oracle? You mentioned\n> >the magic of no-fsync in Oracle is actually a bug. Ok, I understand. I\n> >also heard that Oracle does some kind of redo-log bufferings. Does\n> >this mean certain committed data might be lost if the system crashed\n> >before the buffered data is written into the disk?\n> \nYes, you might loose a transaction that has been reported committed to the\nclient.\nBut, it guarantees that every transaction is eighter committed, or rolled\nback as a \nwhole. Thus leaving the database in a consistent state. We have a lot of\napplications\nwhere this is acceptable, and others where this is not. It is the ability to\nchoose \n(in Informix buffered versus unbuffered logging) that makes us happy. \n\nAndreas\n", "msg_date": "Mon, 7 Jun 1999 12:11:49 +0200 ", "msg_from": "ZEUGSWETTER Andreas IZ5 <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] Priorities for 6.6 " } ]
[ { "msg_contents": "Hi Guys.\n\nHas anyone heard of Smalltalk interfaces to PostgreSQL? Through ODBC\npossibly?\n\nThanx,\nDuane\n", "msg_date": "Mon, 7 Jun 1999 11:03:14 +0000 (AST)", "msg_from": "Duane Currie <[email protected]>", "msg_from_op": true, "msg_subject": "PGSQL Interface for Smalltalk?" } ]
[ { "msg_contents": "Just experienced, located - this will take 10 mins\nto fix it. Old one...\n\nVadim\n", "msg_date": "Mon, 07 Jun 1999 22:14:25 +0800", "msg_from": "Vadim Mikheev <[email protected]>", "msg_from_op": true, "msg_subject": "deadlock in btree..." } ]
[ { "msg_contents": "> Update of /usr/local/cvsroot/www/html/docs\n> Added Files:\n> Pg.pm.html faq-dev-english.html faq-english.html faq-hpux.html\n> faq-irix.html faq-linux.html\n> Removed Files:\n> Pg.pm.shtml faq-dev-english.shtml faq-english.shtml\n> faq-hpux.shtml faq-irix.shtml faq-linux.shtml\n\nDo we have the new FAQs for AIX and SCO posted yet? I haven't looked,\nbut vaguely recall that Vince wasn't sure where they would come from\nfor the web version...\n\n - Thomas\n\n-- \nThomas Lockhart\t\t\t\[email protected]\nSouth Pasadena, California\n", "msg_date": "Mon, 07 Jun 1999 14:41:21 +0000", "msg_from": "Thomas Lockhart <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [COMMITTERS] [WEBMASTER] 'www/html/docs Pg.pm.html\n\tfaq-dev-english.html faq-english.html faq-hpux.html faq-irix.html\n\tfaq-linux.html index.html Pg.pm.shtml faq-dev-english.shtml\n\tfaq-english.shtml faq-hpux.shtml faq-irix.shtml faq-linux.shtml'" }, { "msg_contents": "> > Update of /usr/local/cvsroot/www/html/docs\n> > Added Files:\n> > Pg.pm.html faq-dev-english.html faq-english.html faq-hpux.html\n> > faq-irix.html faq-linux.html\n> > Removed Files:\n> > Pg.pm.shtml faq-dev-english.shtml faq-english.shtml\n> > faq-hpux.shtml faq-irix.shtml faq-linux.shtml\n> \n> Do we have the new FAQs for AIX and SCO posted yet? I haven't looked,\n> but vaguely recall that Vince wasn't sure where they would come from\n> for the web version...\n\nThey are not on the web site, so I don't have them.\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Mon, 7 Jun 1999 11:32:19 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Re: [COMMITTERS] [WEBMASTER] 'www/html/docs Pg.pm.html\n\tfaq-dev-english.html faq-english.html faq-hpux.html faq-irix.h" }, { "msg_contents": "> > Do we have the new FAQs for AIX and SCO posted yet? I haven't looked,\n> > but vaguely recall that Vince wasn't sure where they would come from\n> > for the web version...\n> They are not on the web site, so I don't have them.\n\n?? Right, they are plain text in the cvs tree. How do we get them onto\nthe web site?\n\n - Thomas\n\n-- \nThomas Lockhart\t\t\t\[email protected]\nSouth Pasadena, California\n", "msg_date": "Mon, 07 Jun 1999 15:55:01 +0000", "msg_from": "Thomas Lockhart <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] Re: [COMMITTERS] [WEBMASTER] 'www/html/docs\n\tPg.pm.htmlfaq-dev-english.html faq-english.html faq-hpux.html\n\tfaq-irix.h" }, { "msg_contents": "On Mon, 7 Jun 1999, Thomas Lockhart wrote:\n\n> > > Do we have the new FAQs for AIX and SCO posted yet? I haven't looked,\n> > > but vaguely recall that Vince wasn't sure where they would come from\n> > > for the web version...\n> > They are not on the web site, so I don't have them.\n> \n> ?? Right, they are plain text in the cvs tree. How do we get them onto\n> the web site?\n> \n> - Thomas\n> \n> \n\nIf they're strictly plain text I can toss them in, but formatting will\nbe minimal since they can change and I'd have to redo it. If/when/after\nthey're in sgml then of course it won't be a problem.\n\nVince.\n-- \n==========================================================================\nVince Vielhaber -- KA8CSH email: [email protected] flame-mail: /dev/null\n # include <std/disclaimers.h> TEAM-OS2\n Online Campground Directory http://www.camping-usa.com\n Online Giftshop Superstore http://www.cloudninegifts.com\n==========================================================================\n\n\n\n", "msg_date": "Mon, 7 Jun 1999 12:04:06 -0400 (EDT)", "msg_from": "Vince Vielhaber <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Re: [COMMITTERS] [WEBMASTER] 'www/html/docs\n\tPg.pm.htmlfaq-dev-english.html faq-english.html faq-hpux.html\n\tfaq-irix.h" }, { "msg_contents": "> > > Do we have the new FAQs for AIX and SCO posted yet? I haven't looked,\n> > > but vaguely recall that Vince wasn't sure where they would come from\n> > > for the web version...\n> > They are not on the web site, so I don't have them.\n> \n> ?? Right, they are plain text in the cvs tree. How do we get them onto\n> the web site?\n\ncheckout www cvs tree, update docs/index.html, and put them on the web\nsite.\n\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Mon, 7 Jun 1999 12:21:04 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Re: [COMMITTERS] [WEBMASTER] 'www/html/docs\n\tPg.pm.htmlfaq-dev-english.html faq-english.html faq-hpux.html faq-iri" }, { "msg_contents": "> > > > Do we have the new FAQs for AIX and SCO posted yet?\n> > > They are not on the web site, so I don't have them.\n> > ?? Right, they are plain text in the cvs tree. How do we get them onto\n> > the web site?\n> If they're strictly plain text I can toss them in, but formatting will\n> be minimal since they can change and I'd have to redo it.\n\nafaik *all* the FAQs start as plain text in the cvs tree. How did\nthose make it onto the web site in html?\n\n - Thomas\n\n-- \nThomas Lockhart\t\t\t\[email protected]\nSouth Pasadena, California\n", "msg_date": "Mon, 07 Jun 1999 16:34:17 +0000", "msg_from": "Thomas Lockhart <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] Re: [COMMITTERS] [WEBMASTER] 'www/html/docs\n\tPg.pm.htmlfaq-dev-english.html faq-english.html faq-hpux.html\n\tfaq-irix.h" }, { "msg_contents": "On Mon, 7 Jun 1999, Thomas Lockhart wrote:\n\n> > > > > Do we have the new FAQs for AIX and SCO posted yet?\n> > > > They are not on the web site, so I don't have them.\n> > > ?? Right, they are plain text in the cvs tree. How do we get them onto\n> > > the web site?\n> > If they're strictly plain text I can toss them in, but formatting will\n> > be minimal since they can change and I'd have to redo it.\n> \n> afaik *all* the FAQs start as plain text in the cvs tree. How did\n> those make it onto the web site in html?\n\nThe current web site is in frames. Just pull it in and put the pre tag\nat the top. The new web pages will not be frames so EVERY document will\nneed to be formatted somewhat as they're merged into some template files,\nabout a 5K increase per file. I'm not sure what I'm doing about the\ndocumentation tho since that's regenerated nightly. It'll probably have\nto go to non-bordered pages (IOW no logo, etc.). \n\nVince.\n-- \n==========================================================================\nVince Vielhaber -- KA8CSH email: [email protected] flame-mail: /dev/null\n # include <std/disclaimers.h> TEAM-OS2\n Online Campground Directory http://www.camping-usa.com\n Online Giftshop Superstore http://www.cloudninegifts.com\n==========================================================================\n\n\n\n", "msg_date": "Mon, 7 Jun 1999 12:49:03 -0400 (EDT)", "msg_from": "Vince Vielhaber <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Re: [COMMITTERS] [WEBMASTER] 'www/html/docs\n\tPg.pm.htmlfaq-dev-english.html faq-english.html faq-hpux.html\n\tfaq-irix.h" } ]
[ { "msg_contents": "> Update of /usr/local/cvsroot/pgsql/src/backend/parser\n> In directory hub.org:/tmp/cvs-serv80148/parser\n> \n> Modified Files:\n> \tgram.y \n> Log Message:\n> Repair recently-introduced error in makeIndexable for LIKE:\n> a non-leading % would be put into the >=/<= patterns. Also, repair\n> longstanding confusion about whether %% means a literal %%. The SQL92\n> doesn't say any such thing, and textlike() knows that, but gram.y didn't.\n> \n> \n\nHouston, we have a problem. DoMatch has:\n\n case '%':\n /* %% is the same as % according to the SQL standard */\n /* Advance past all %'s */\n while (*p == '%')\n\nDon't we want %% to be %?\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Mon, 7 Jun 1999 10:42:07 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [COMMITTERS] 'pgsql/src/backend/parser gram.y'" }, { "msg_contents": "Bruce Momjian <[email protected]> writes:\n>> Repair recently-introduced error in makeIndexable for LIKE:\n>> a non-leading % would be put into the >=/<= patterns. Also, repair\n>> longstanding confusion about whether %% means a literal %%. The SQL92\n>> doesn't say any such thing, and textlike() knows that, but gram.y didn't.\n\n> Houston, we have a problem. DoMatch has:\n\n> case '%':\n> /* %% is the same as % according to the SQL standard */\n> /* Advance past all %'s */\n> while (*p == '%')\n\n> Don't we want %% to be %?\n\nI looked at the spec, and this piece of code is right: there is nothing\nin the spec that says that %% means anything other than two string\npattern matches (which of course has the same effect as one). So I made\ngram.y agree.\n\nIt could be that people like Microsoft don't follow the spec... can\nanyone check this?\n\n\t\t\tregards, tom lane\n", "msg_date": "Mon, 07 Jun 1999 11:06:07 -0400", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [COMMITTERS] 'pgsql/src/backend/parser gram.y' " }, { "msg_contents": "> Bruce Momjian <[email protected]> writes:\n> >> Repair recently-introduced error in makeIndexable for LIKE:\n> >> a non-leading % would be put into the >=/<= patterns. Also, repair\n> >> longstanding confusion about whether %% means a literal %%. The SQL92\n> >> doesn't say any such thing, and textlike() knows that, but gram.y didn't.\n> \n> > Houston, we have a problem. DoMatch has:\n> \n> > case '%':\n> > /* %% is the same as % according to the SQL standard */\n> > /* Advance past all %'s */\n> > while (*p == '%')\n> \n> > Don't we want %% to be %?\n> \n> I looked at the spec, and this piece of code is right: there is nothing\n> in the spec that says that %% means anything other than two string\n> pattern matches (which of course has the same effect as one). So I made\n> gram.y agree.\n> \n> It could be that people like Microsoft don't follow the spec... can\n> anyone check this?\n\nDoMatch, which implements LIKE clearly thinks %% is %, and I think our\nusers think so too. I would not change it.\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Mon, 7 Jun 1999 11:35:35 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [COMMITTERS] 'pgsql/src/backend/parser gram.y'" }, { "msg_contents": "Bruce Momjian <[email protected]> writes:\n> DoMatch, which implements LIKE clearly thinks %% is %,\n\nCareful: DoMatch thinks %% is a wildcard (ie, same as %), not literal %.\n\nIt's hard to say what our users think. If you'd written %% it would\nindeed match a % --- but you'd have a problem with it matching other\nstuff too. This might or might not be partially masked by the range\nconditions inserted by the parser. (But how long have we been doing\nthat?)\n\n\t\t\tregards, tom lane\n", "msg_date": "Mon, 07 Jun 1999 13:28:26 -0400", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Re: [COMMITTERS] 'pgsql/src/backend/parser gram.y' " }, { "msg_contents": "> Bruce Momjian <[email protected]> writes:\n> > DoMatch, which implements LIKE clearly thinks %% is %,\n> \n> Careful: DoMatch thinks %% is a wildcard (ie, same as %), not literal %.\n> \n> It's hard to say what our users think. If you'd written %% it would\n> indeed match a % --- but you'd have a problem with it matching other\n> stuff too. This might or might not be partially masked by the range\n> conditions inserted by the parser. (But how long have we been doing\n> that?)\n\nSorry. I see now. I was confused.\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Mon, 7 Jun 1999 13:58:06 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] Re: [COMMITTERS] 'pgsql/src/backend/parser gram.y'" } ]
[ { "msg_contents": "Deadlock is fixed but seems that I found another one, in btree\nagain, that bit me today all day long.\n\nVadim\n", "msg_date": "Mon, 07 Jun 1999 22:43:49 +0800", "msg_from": "Vadim Mikheev <[email protected]>", "msg_from_op": true, "msg_subject": "new one" }, { "msg_contents": "Vadim Mikheev wrote:\n> \n> Deadlock is fixed but seems that I found another one, in btree\n> again, that bit me today all day long.\n\nFixed...\n\nBut there is still some problem in vacuum:\n\nTRAP: Failed Assertion(\"!((*vpp)->vpd_offsets_used == num_tuples):\", \n File: \"vacuum.c\", Line: 1736)\n\nI hope to fix it tomorrow...\n\nVadim\n", "msg_date": "Mon, 07 Jun 1999 23:17:59 +0800", "msg_from": "Vadim Mikheev <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] new one" } ]
[ { "msg_contents": "\n> One reason to do truncate is that if it is a symbolic link to another\n> driver, that link will stay, while unlink will not, and will recreate on\n> on the same drive.\n> \nThat is the first good argument in favor of truncate other than \ncoding issues. I like it. It is actually very good :-)\n\nMy only concern with leaving the files around would be, that there is\na chance for other backends to write where they should not.\n\nAndreas\n", "msg_date": "Mon, 7 Jun 1999 16:56:09 +0200 ", "msg_from": "ZEUGSWETTER Andreas IZ5 <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] Open 6.5 items" }, { "msg_contents": "ZEUGSWETTER Andreas IZ5 wrote:\n> \n> > One reason to do truncate is that if it is a symbolic link to another\n> > driver, that link will stay, while unlink will not, and will recreate on\n> > on the same drive.\n> >\n> That is the first good argument in favor of truncate other than\n> coding issues. I like it. It is actually very good :-)\n> \n> My only concern with leaving the files around would be, that there is\n> a chance for other backends to write where they should not.\n\nSeems that shouldn't be problem here:\n\n- only vacuum trancates relations;\n- vacuum locks a relation in Access Exclusive mode;\n- hepa_beginscan (used by both SeqScan and IndexScan)\n tries to lock a relation in Access Share mode _before_ \n call to RelationGetNumberOfBlocks()->smgrnblocks();\n- INSERT acquires Row Exclusive lock on relation\n before doing anything;\n\n- so, anyone should get valid number of blocks.\n\nBut mdmgr should be tested...\n\nVadim\n", "msg_date": "Mon, 07 Jun 1999 23:40:14 +0800", "msg_from": "Vadim Mikheev <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Open 6.5 items" } ]
[ { "msg_contents": "\nOn AIX 4.3.2 the third line in template/aix_42:\nSHARED_LIB:\n\nneeds to be changed to:\nSHARED_LIB:-lc\n\nI think this was also needed on AIX 4.2. Comments Please !!\n\nIf nobody objects, I suggest to make this change, since it cannot\nbreak AIX 4.2 and is necessary on AIX 4.3\n\nAndreas\n", "msg_date": "Mon, 7 Jun 1999 17:53:48 +0200 ", "msg_from": "ZEUGSWETTER Andreas IZ5 <[email protected]>", "msg_from_op": true, "msg_subject": "AIX port" }, { "msg_contents": "Done.\n\n> \n> On AIX 4.3.2 the third line in template/aix_42:\n> SHARED_LIB:\n> \n> needs to be changed to:\n> SHARED_LIB:-lc\n> \n> I think this was also needed on AIX 4.2. Comments Please !!\n> \n> If nobody objects, I suggest to make this change, since it cannot\n> break AIX 4.2 and is necessary on AIX 4.3\n> \n> Andreas\n> \n> \n\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Mon, 7 Jun 1999 13:18:54 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] AIX port" } ]
[ { "msg_contents": "Can we get the %% to % back in there until we can do a complete fix for\nthis? I promise to add ESCAPE for 6.6.\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Mon, 7 Jun 1999 13:20:01 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": true, "msg_subject": "%% and LIKE" } ]
[ { "msg_contents": "try :\n select '209.100.30.2'::inet<<='209.100.30.0/24';\n\nHey, I know nothing about the inet type or IPv4 or v6 but I found this one\nby using \\do in psql.\n\t-DEJ\n\n> -----Original Message-----\n> From:\[email protected] [SMTP:[email protected]]\n> Sent:\tFriday, June 04, 1999 7:21 PM\n> To:\[email protected]\n> Subject:\t[HACKERS] inet type & select\n> \n> Hello.\n> \n> I'm busy writing an IP accounting DB using nacctd and postgres.\n> The data gets inserted correctly into the database and I can run the\n> select queries that I desire. \n> \n> E.g. (select sum(size) from iptraff where srcip = \"209.100.30.2\") gives\n> me the total bytes that that particular host sent. Now it would be\n> *REALLY* cool if I could do the following: (select sum(size) from\n> iptraff where scrip = \"209.100.30.0/24\")\n> That would tell me the total outgoing traffic for that subnet.\n> \n> From what I understand the relevant code resides in network.c\n> unfortunately I am not a C person :-( Perhaps this feature would be\n> included in the next snapshot/release or someone could help me with my\n> particular installation.\n> \n> Thanks!\n> ChrisG\n> -----!!!-------\n> ---FREE THE SOURCE---\n> -----!!!-------\n> \n", "msg_date": "Mon, 7 Jun 1999 13:50:07 -0500 ", "msg_from": "\"Jackson, DeJuan\" <[email protected]>", "msg_from_op": true, "msg_subject": "RE: [HACKERS] inet type & select" } ]
[ { "msg_contents": "I am updating a database from 6.4 to 6.5. This statement was produced\nby 6.4.2's pg_dumpall but is not accepted by 6.5:\n\n CREATE RULE \"rtest_v1_upd\" AS ON UPDATE TO \"rtest_v1\" \n DO INSTEAD UPDATE rtest_t1 SET \"a\" = new.\"a\", \"b\" = new.\"b\"\n WHERE \"a\" = current.\"a\";\n\n ERROR: current: Table does not exist.\n\nThis rule is part of the regression tests and it seems that `current' has\nnow been changed to `old'. Apart from the error messages that occur\nas the script flows past (too fast to read) pg_upgrade does not\noffer any explanations; it merely says that psql failed to run the\nscript, which is contrary to what the user (me) thinks he has just seen.\n\nThis particular problem can be fixed by applying this sed command to\nthe script:\n\n sed -e '/^CREATE RULE /s/current\\./old./g'\n\nwith some smallish risk of breaking rules where some table has a name\nending in `current'.\n\n\nI'm currently trying to fix the automatic upgrade procedure for Debian's\npostgresql package. Are there any other problems like this that people\nmight meet?\n\n\n\n\nI attach a patch for pg_upgrade. This does two things:\n\n1. check whether the program is being executed in $PGDATA/.. This is\n necessary if the data tree is not in the standard place, as is the\n case with the Debian distribution (because of Debian policy).\n\n2. give a clearer error message if the dumped data structure fails to\n be loaded.\n\n===================== start patch =================\n*** src/bin/pg_dump/pg_upgrade~\tMon Jun 7 22:13:00 1999\n--- src/bin/pg_dump/pg_upgrade\tMon Jun 7 22:11:13 1999\n***************\n*** 29,36 ****\n \n # check things\n \n! if [ ! -f \"./lib/global1.bki.source\" ]\n! then\techo \"$0 must be run from the top of the postgres directory tree.\" 1>&2\n \texit 1\n fi\n \n--- 29,37 ----\n \n # check things\n \n! if [ ! -f \"./data/PG_VERSION\" ]\n! then\techo \"`basename $0` must be run from the directory containing\n! the database directory \\`data' (`dirname $PGDATA`.)\" 1>&2\n \texit 1\n fi\n \n***************\n*** 72,78 ****\n psql \"template1\" <\"/tmp/$$\"\n \n if [ $? -ne 0 ]\n! then\techo \"psql failed to execute $INPUT script.\" 1>&2\n \texit 1\n fi\n \n--- 73,80 ----\n psql \"template1\" <\"/tmp/$$\"\n \n if [ $? -ne 0 ]\n! then\techo \"There were errors in the input script $INPUT.\n! $0 aborted.\" 1>&2\n \texit 1\n fi\n \n===================== end patch =================\n\n-- \nOliver Elphick [email protected]\nIsle of Wight http://www.lfix.co.uk/oliver\n PGP key from public servers; key ID 32B8FAA1\n ========================================\n \"There is a way that seems right to a man, but in the \n end it leads to death.\" \n Proverbs 16:25 \n\n\n", "msg_date": "Mon, 07 Jun 1999 22:17:42 +0100", "msg_from": "\"Oliver Elphick\" <[email protected]>", "msg_from_op": true, "msg_subject": "pg_upgrade problem" }, { "msg_contents": "> I am updating a database from 6.4 to 6.5. This statement was produced\n> by 6.4.2's pg_dumpall but is not accepted by 6.5:\n> \n> CREATE RULE \"rtest_v1_upd\" AS ON UPDATE TO \"rtest_v1\" \n> DO INSTEAD UPDATE rtest_t1 SET \"a\" = new.\"a\", \"b\" = new.\"b\"\n> WHERE \"a\" = current.\"a\";\n> \n> ERROR: current: Table does not exist.\n> \n> This rule is part of the regression tests and it seems that `current' has\n> now been changed to `old'. Apart from the error messages that occur\n> as the script flows past (too fast to read) pg_upgrade does not\n> offer any explanations; it merely says that psql failed to run the\n> script, which is contrary to what the user (me) thinks he has just seen.\n\nYikes. I forgot to disable pg_upgrade in this release. I don't think\nit is going to work because the on-disk tuple structures has changed for\nMVCC? Vadim, an I correct?\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Mon, 7 Jun 1999 18:56:33 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] pg_upgrade problem" }, { "msg_contents": "Applied.\n\n\n> \n> I attach a patch for pg_upgrade. This does two things:\n> \n> 1. check whether the program is being executed in $PGDATA/.. This is\n> necessary if the data tree is not in the standard place, as is the\n> case with the Debian distribution (because of Debian policy).\n> \n> 2. give a clearer error message if the dumped data structure fails to\n> be loaded.\n> \n> ===================== start patch =================\n> *** src/bin/pg_dump/pg_upgrade~\tMon Jun 7 22:13:00 1999\n> --- src/bin/pg_dump/pg_upgrade\tMon Jun 7 22:11:13 1999\n> ***************\n> *** 29,36 ****\n> \n> # check things\n> \n> ! if [ ! -f \"./lib/global1.bki.source\" ]\n> ! then\techo \"$0 must be run from the top of the postgres directory tree.\" 1>&2\n> \texit 1\n> fi\n> \n> --- 29,37 ----\n> \n> # check things\n> \n> ! if [ ! -f \"./data/PG_VERSION\" ]\n> ! then\techo \"`basename $0` must be run from the directory containing\n> ! the database directory \\`data' (`dirname $PGDATA`.)\" 1>&2\n> \texit 1\n> fi\n> \n> ***************\n> *** 72,78 ****\n> psql \"template1\" <\"/tmp/$$\"\n> \n> if [ $? -ne 0 ]\n> ! then\techo \"psql failed to execute $INPUT script.\" 1>&2\n> \texit 1\n> fi\n> \n> --- 73,80 ----\n> psql \"template1\" <\"/tmp/$$\"\n> \n> if [ $? -ne 0 ]\n> ! then\techo \"There were errors in the input script $INPUT.\n> ! $0 aborted.\" 1>&2\n> \texit 1\n> fi\n> \n> ===================== end patch =================\n> \n> -- \n> Oliver Elphick [email protected]\n> Isle of Wight http://www.lfix.co.uk/oliver\n> PGP key from public servers; key ID 32B8FAA1\n> ========================================\n> \"There is a way that seems right to a man, but in the \n> end it leads to death.\" \n> Proverbs 16:25 \n> \n> \n> \n> \n\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Mon, 7 Jun 1999 18:59:24 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] pg_upgrade problem" }, { "msg_contents": "Bruce Momjian wrote:\n> \n> Yikes. I forgot to disable pg_upgrade in this release. I don't think\n> it is going to work because the on-disk tuple structures has changed for\n> MVCC? Vadim, an I correct?\n\nNot for MVCC, but we get rid t_len from tuple header.\n\nVadim\n", "msg_date": "Tue, 08 Jun 1999 09:54:48 +0800", "msg_from": "Vadim Mikheev <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] pg_upgrade problem" } ]
[ { "msg_contents": "Hi Kaare,\n\ndo you mean /rdb. I have used it with Coherent. It was distributed 1992\ntogether with Coherent under the copyright of Revolutionary Software,\nInc.\n\nThere should exist one book (Schaffer?).\n\n-Egon\n", "msg_date": "Tue, 08 Jun 1999 00:07:25 +0200", "msg_from": "Egon Schmid <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] PostgreSQL History(Parody)" }, { "msg_contents": "> do you mean /rdb. I have used it with Coherent. It was distributed 1992\n\nYes, that's the one. Never tried it, but liked the idea. Easy to fix\nproblems :-)\n\n", "msg_date": "Tue, 8 Jun 1999 06:30:50 +0200 (CEST)", "msg_from": "Kaare Rasmussen <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] PostgreSQL History(Parody)" } ]
[ { "msg_contents": "Release 6.5beta2\n\nI've used pg_upgrade to import data from 6.4.2.\n\nThe tables are created correctly, as far as I can see, but the data gets\nlost, so that no tuples are visible in any table I have yet looked at.\nThe data is actually present, because I can see it if I use `od -c' on\na table's file, but select * reports no rows.\n\nAny clues, please?\n\n\nNB: If this experience is repeated by others, anyone who had not backed up\nhis data separately would lose it all (if the tables are not repairable).\n\n\n-- \nOliver Elphick [email protected]\nIsle of Wight http://www.lfix.co.uk/oliver\n PGP key from public servers; key ID 32B8FAA1\n ========================================\n \"There is a way that seems right to a man, but in the \n end it leads to death.\" \n Proverbs 16:25 \n\n\n", "msg_date": "Mon, 07 Jun 1999 23:35:42 +0100", "msg_from": "\"Oliver Elphick\" <[email protected]>", "msg_from_op": true, "msg_subject": "pg_upgrade loses all data" }, { "msg_contents": "I have already noted in the Release notes and FAQ that pg_upgrade can\nnot be used in 6.5. Let me disable it now. I forgot to do that, and\nyour report confirms my fears.\n\n\n> Release 6.5beta2\n> \n> I've used pg_upgrade to import data from 6.4.2.\n> \n> The tables are created correctly, as far as I can see, but the data gets\n> lost, so that no tuples are visible in any table I have yet looked at.\n> The data is actually present, because I can see it if I use `od -c' on\n> a table's file, but select * reports no rows.\n> \n> Any clues, please?\n> \n> \n> NB: If this experience is repeated by others, anyone who had not backed up\n> his data separately would lose it all (if the tables are not repairable).\n> \n> \n> -- \n> Oliver Elphick [email protected]\n> Isle of Wight http://www.lfix.co.uk/oliver\n> PGP key from public servers; key ID 32B8FAA1\n> ========================================\n> \"There is a way that seems right to a man, but in the \n> end it leads to death.\" \n> Proverbs 16:25 \n> \n> \n> \n> \n\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Mon, 7 Jun 1999 21:18:37 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] pg_upgrade loses all data" } ]
[ { "msg_contents": "Marc,\n\nI would like you remind that the tar ball file name and the top source\ndirectory name for the official release are better assigned in the\nsame convention as previoius release. I mean:\n\ntar ball:\tpostgresql-6.5.tar.gz\ndirectory:\tpostgresql-6.5\n\nwould be nice. If you alredy deicide to do that, please forgive me.\n--\nTatsuo Ishii\n", "msg_date": "Tue, 08 Jun 1999 10:25:47 +0900", "msg_from": "Tatsuo Ishii <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] postgresql-v6.5beta2.tar.gz ... " }, { "msg_contents": "On Tue, 8 Jun 1999, Tatsuo Ishii wrote:\n\n> Marc,\n> \n> I would like you remind that the tar ball file name and the top source\n> directory name for the official release are better assigned in the\n> same convention as previoius release. I mean:\n> \n> tar ball:\tpostgresql-6.5.tar.gz\n> directory:\tpostgresql-6.5\n> \n> would be nice. If you alredy deicide to do that, please forgive me.\n\nS'alright, it will be done for the release, but the reminder is/will be\nappreciated. I created the beta2.tar.gz file and then clued in that I had\nforgotten to rename the directory :(\n\nMarc G. Fournier ICQ#7615664 IRC Nick: Scrappy\nSystems Administrator @ hub.org \nprimary: [email protected] secondary: scrappy@{freebsd|postgresql}.org \n\n", "msg_date": "Mon, 7 Jun 1999 23:23:36 -0300 (ADT)", "msg_from": "The Hermit Hacker <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] postgresql-v6.5beta2.tar.gz ... " } ]
[ { "msg_contents": "\n> - Up time. This means hot backup and no locking of the database during\n> vacuum.\n> \nIf Vadim made the change to make pg_dump dump in one transaction then \nPostgreSQL already has \"hot backup\" in 6.5. This will do a consistent\nsnapshot of your database as it was when pg_dump began as long as you \ndon't change the database layout during the dump.\n\nAndreas\n", "msg_date": "Tue, 8 Jun 1999 09:18:33 +0200 ", "msg_from": "ZEUGSWETTER Andreas IZ5 <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] 6.6 items" }, { "msg_contents": "ZEUGSWETTER Andreas IZ5 wrote:\n> \n> > - Up time. This means hot backup and no locking of the database during\n> > vacuum.\n> >\n> If Vadim made the change to make pg_dump dump in one transaction then\n> PostgreSQL already has \"hot backup\" in 6.5. This will do a consistent\n> snapshot of your database as it was when pg_dump began as long as you\n> don't change the database layout during the dump.\n\nThis is done.\n\nVacuum doesn't lock entire database. But it acquires access \nexclusive lock over table being vacuumed and so delay all \n(even SELECT) concurrent query executions. Lock released\nafter table vacuumed.\n\nVadim\n", "msg_date": "Tue, 08 Jun 1999 15:44:54 +0800", "msg_from": "Vadim Mikheev <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] 6.6 items" }, { "msg_contents": "At 03:44 PM 6/8/99 +0800, Vadim Mikheev wrote:\n>ZEUGSWETTER Andreas IZ5 wrote:\n\n>> If Vadim made the change to make pg_dump dump in one transaction then\n>> PostgreSQL already has \"hot backup\" in 6.5. This will do a consistent\n>> snapshot of your database as it was when pg_dump began as long as you\n>> don't change the database layout during the dump.\n\n>This is done.\n\nYou boys need to publicize this. This lacking has been one of the\nbiggest arguments against using Postgres in any environment with\n24-hr access, one example being the web...\n\n\n\n- Don Baccus, Portland OR <[email protected]>\n Nature photos, on-line guides, and other goodies at\n http://donb.photo.net\n", "msg_date": "Tue, 08 Jun 1999 06:56:57 -0700", "msg_from": "Don Baccus <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] 6.6 items" }, { "msg_contents": "Don Baccus wrote:\n> \n> At 03:44 PM 6/8/99 +0800, Vadim Mikheev wrote:\n> >ZEUGSWETTER Andreas IZ5 wrote:\n> \n> >> If Vadim made the change to make pg_dump dump in one transaction then\n> >> PostgreSQL already has \"hot backup\" in 6.5. This will do a consistent\n> >> snapshot of your database as it was when pg_dump began as long as you\n> >> don't change the database layout during the dump.\n> \n> >This is done.\n> \n> You boys need to publicize this. This lacking has been one of the\n> biggest arguments against using Postgres in any environment with\n> 24-hr access, one example being the web...\n\nWe discussed this issue recently and decided to follow this way,\nso I didn't post message when pg_dump was changed, assuming\nthat it's known by all -:)\n\nOr you talk that this should be mentioned in announcement/release\nnotes? May be... Though this is obvious feature of MVCC -\nconsistency read without blocking writers.\n\nVadim\n", "msg_date": "Tue, 08 Jun 1999 22:11:49 +0800", "msg_from": "Vadim Mikheev <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] 6.6 items" }, { "msg_contents": "> Or you talk that this should be mentioned in \n> announcement/release notes?\n\nI've added a mention of this in the release notes, and (Bruce) since\nI'll be regenerating INSTALL and HISTORY it will be included in those.\nI *should* be able to retrofit it into admin.ps.gz also...\n\n - Thomas\n\n-- \nThomas Lockhart\t\t\t\[email protected]\nSouth Pasadena, California\n", "msg_date": "Tue, 08 Jun 1999 14:28:42 +0000", "msg_from": "Thomas Lockhart <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] 6.6 items" }, { "msg_contents": "At 10:11 PM 6/8/99 +0800, Vadim Mikheev wrote:\n\n>We discussed this issue recently and decided to follow this way,\n>so I didn't post message when pg_dump was changed, assuming\n>that it's known by all -:)\n\nI don't mean to this group, or any of the postgres groups,\nI mean to the world at large, in which Postgres has a very\nnegative image for web work. Consistent dumps, killing\nof one very bad memory leak (and a bunch of not-so-bad\nones), and moving to mvcc from table-locking - these are\nthree huge improvements for people building web sites.\nFolks outside the normal Postgres community deserve to\nknow this.\n\nAnd the latest download executes my \"group by\" clauses rather\nthan killing the backend, as I found out last night. I've\njust sped up a page that returns a bar graph of monthly\ndata by an order of magnitude, woo-hoo! Before I was\nforced to do a separate select for each month (against\nabout 100,000 records, boo-hiss), now one nice select\ngrouping data by month just like real databases let me\ndo, I'm happy!\n\n\n\n- Don Baccus, Portland OR <[email protected]>\n Nature photos, on-line guides, and other goodies at\n http://donb.photo.net\n", "msg_date": "Tue, 08 Jun 1999 07:32:41 -0700", "msg_from": "Don Baccus <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] 6.6 items" }, { "msg_contents": "> \n> > - Up time. This means hot backup and no locking of the database during\n> > vacuum.\n> > \n> If Vadim made the change to make pg_dump dump in one transaction then \n> PostgreSQL already has \"hot backup\" in 6.5. This will do a consistent\n> snapshot of your database as it was when pg_dump began as long as you \n> don't change the database layout during the dump.\n\nExcellent point.\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Tue, 8 Jun 1999 11:41:11 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] 6.6 items" }, { "msg_contents": "> At 10:11 PM 6/8/99 +0800, Vadim Mikheev wrote:\n> \n> >We discussed this issue recently and decided to follow this way,\n> >so I didn't post message when pg_dump was changed, assuming\n> >that it's known by all -:)\n> \n> I don't mean to this group, or any of the postgres groups,\n> I mean to the world at large, in which Postgres has a very\n> negative image for web work. Consistent dumps, killing\n> of one very bad memory leak (and a bunch of not-so-bad\n> ones), and moving to mvcc from table-locking - these are\n> three huge improvements for people building web sites.\n> Folks outside the normal Postgres community deserve to\n> know this.\n> \n> And the latest download executes my \"group by\" clauses rather\n> than killing the backend, as I found out last night. I've\n> just sped up a page that returns a bar graph of monthly\n> data by an order of magnitude, woo-hoo! Before I was\n> forced to do a separate select for each month (against\n> about 100,000 records, boo-hiss), now one nice select\n> grouping data by month just like real databases let me\n> do, I'm happy!\n\nWe really don't have access to web admin channels except through our\nmailing list and software lists. You need to get the word out. We\naddressed these issues escecially for web/high traffic users.\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Tue, 8 Jun 1999 12:25:33 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] 6.6 items" }, { "msg_contents": "> > Or you talk that this should be mentioned in \n> > announcement/release notes?\n> \n> I've added a mention of this in the release notes, and (Bruce) since\n> I'll be regenerating INSTALL and HISTORY it will be included in those.\n> I *should* be able to retrofit it into admin.ps.gz also...\n\nAdded to release notes:\n\n Another big benefit of MVCC is that <application>pg_dump</application>\n can now generate consistent backups of live, active databases, without\n blocking active transactions.\n\nGood idea to add this. I also added to the Enhancements list:\n\npg_dump now can generate consistent snapshots on active databases(Vadim) \n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Tue, 8 Jun 1999 12:40:57 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] 6.6 items" }, { "msg_contents": "At 12:25 PM 6/8/99 -0400, Bruce Momjian wrote:\n\n>We really don't have access to web admin channels except through our\n>mailing list and software lists. You need to get the word out. We\n>addressed these issues escecially for web/high traffic users.\n\nI have been, quietly, to people I know. I gave up on 6.4.2 for my\nparticular use and had told folks that, and have now told the same\nsuite of folks that 6.5 appears to be a vast improvement for this\nenvironment, at least for modest web/db projects. Postgres still\ndoesn't have anywhere near the scalability of, say, Oracle and I'm\nnot certain that should be the goal. It's far simpler to administer\nand install largely for this very reason...and that's a very good\nthing for folks wanting to do modest sites.\n\nAny graphics designers out there? How about a small \"Powered\nby Postgres\" GIF with a snazzy, attractive look? I've come\nup with the phrase, but my artistic skills are limited to\nphotography, I'm afraid! \n\nI'd certainly use such a graphic to decorate my pages if it\nwere available...\n\n\n\n- Don Baccus, Portland OR <[email protected]>\n Nature photos, on-line guides, and other goodies at\n http://donb.photo.net\n", "msg_date": "Tue, 08 Jun 1999 09:46:42 -0700", "msg_from": "Don Baccus <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] 6.6 items" }, { "msg_contents": "Don Baccus wrote:\n> \n> Any graphics designers out there? How about a small \"Powered\n> by Postgres\" GIF with a snazzy, attractive look? I've come\n> up with the phrase, but my artistic skills are limited to\n> photography, I'm afraid!\n> \n> I'd certainly use such a graphic to decorate my pages if it\n> were available...\n\nI posted one below a days ago... Any chance to see all such\nthings somewhere in ftp/www postgresql.org servers?\n\nVadim\nP.S. Created by Michael Ivanov <[email protected]>", "msg_date": "Wed, 09 Jun 1999 01:10:55 +0800", "msg_from": "Vadim Mikheev <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] 6.6 items" }, { "msg_contents": "At 01:10 AM 6/9/99 +0800, Vadim Mikheev wrote:\n\n>I posted one below a days ago... Any chance to see all such\n>things somewhere in ftp/www postgresql.org servers?\n\nNice! I missed this when you posted it, there's been a lot\nof traffic on the list with y'all getting ready for release.\n\n\n\n- Don Baccus, Portland OR <[email protected]>\n Nature photos, on-line guides, and other goodies at\n http://donb.photo.net\n", "msg_date": "Tue, 08 Jun 1999 10:34:11 -0700", "msg_from": "Don Baccus <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] 6.6 items" }, { "msg_contents": "Don Baccus wrote:\n> \n> At 01:10 AM 6/9/99 +0800, Vadim Mikheev wrote:\n> \n> >I posted one below a days ago... Any chance to see all such\n> >things somewhere in ftp/www postgresql.org servers?\n> \n> Nice! I missed this when you posted it, there's been a lot\n> of traffic on the list with y'all getting ready for release.\n\nThis was posted ~ 6 monthes ago -:)\n\nVadim\n", "msg_date": "Wed, 09 Jun 1999 02:03:20 +0800", "msg_from": "Vadim Mikheev <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] 6.6 items" }, { "msg_contents": "On Tue, Jun 08, 1999 at 07:32:41AM -0700, Don Baccus wrote:\n> I don't mean to this group, or any of the postgres groups,\n> I mean to the world at large, in which Postgres has a very\n> negative image for web work. Consistent dumps, killing\n> of one very bad memory leak (and a bunch of not-so-bad\n> ones), and moving to mvcc from table-locking - these are\n> three huge improvements for people building web sites.\n> Folks outside the normal Postgres community deserve to\n> know this.\n\nI have to agree with Don, and maybe try to offer an additional couple\nof notes. I think, and this is only me - not supported by any facts,\nthat it would be useful having a highlights section in the release\nnotes, since the efforts of the developers (of which I hope to be able\nto contribute too soon) make sure that each release results in *many*\nupdates. I would propose classifying these updates into a number of\ncategories, that would help users identify is (a) it is worth them\nupdgrading and (maybe more important) (b) if it is worth them trying\nto use PGSQL now that \"XYZ\" has been fixed/implemented.\n\nA quick suggested list:\n\n1. New features\n2. Major Performance Updates\n3. Major Bug Fixes\n4. Security / Reliability Changes\n5. Other\n\nListing updates simple in reverse time order is too confusing.\n\nBTW If no one has time, I will volunteer to do this...\n\njust my $.04c (inflation)\n-- \nPeter Galbavy\nKnowledge Matters Ltd\nhttp://www.knowledge.com/\n", "msg_date": "Tue, 8 Jun 1999 19:16:49 +0100", "msg_from": "Peter Galbavy <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] 6.6 items" }, { "msg_contents": "At 02:03 AM 6/9/99 +0800, Vadim Mikheev wrote:\n\n>This was posted ~ 6 monthes ago -:)\n\nOh, I've only been reading the list for a few weeks...\n\n\n\n- Don Baccus, Portland OR <[email protected]>\n Nature photos, on-line guides, and other goodies at\n http://donb.photo.net\n", "msg_date": "Tue, 08 Jun 1999 11:43:17 -0700", "msg_from": "Don Baccus <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] 6.6 items" }, { "msg_contents": "On 08-Jun-99 Vadim Mikheev wrote:\n> Don Baccus wrote:\n>> \n>> Any graphics designers out there? How about a small \"Powered\n>> by Postgres\" GIF with a snazzy, attractive look? I've come\n>> up with the phrase, but my artistic skills are limited to\n>> photography, I'm afraid!\n>> \n>> I'd certainly use such a graphic to decorate my pages if it\n>> were available...\n> \n> I posted one below a days ago... Any chance to see all such\n> things somewhere in ftp/www postgresql.org servers?\n> \n> Vadim\n> P.S. Created by Michael Ivanov <[email protected]>\n\nI kinda like this one from http://guru.umc.se/imgdb/\nMarc found it last week but I didn't get a chance to put it up.\n\nVince.\n-- \n==========================================================================\nVince Vielhaber -- KA8CSH email: [email protected] flame-mail: /dev/null\n # include <std/disclaimers.h> TEAM-OS2\n Online Campground Directory http://www.camping-usa.com\n Online Giftshop Superstore http://www.cloudninegifts.com\n==========================================================================", "msg_date": "Tue, 08 Jun 1999 14:48:59 -0400 (EDT)", "msg_from": "Vince Vielhaber <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] 6.6 items" }, { "msg_contents": "Vince Vielhaber wrote:\n> \n> I kinda like this one from http://guru.umc.se/imgdb/\n> Marc found it last week but I didn't get a chance to put it up.\n\nI like it too.\nSo, any chance to put all these and others somewhere\nin www.postgresql.org?\n\nVadim\n", "msg_date": "Wed, 09 Jun 1999 02:59:31 +0800", "msg_from": "Vadim Mikheev <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] 6.6 items" }, { "msg_contents": "\nOn 08-Jun-99 Vadim Mikheev wrote:\n> Vince Vielhaber wrote:\n>> \n>> I kinda like this one from http://guru.umc.se/imgdb/\n>> Marc found it last week but I didn't get a chance to put it up.\n> \n> I like it too.\n> So, any chance to put all these and others somewhere\n> in www.postgresql.org?\n\nSomething that I've been thinking about for the new site, since more than\none of us is thinking this it'll be there (I no longer have to wonder if\nit's a good idea :)\n\nVince.\n-- \n==========================================================================\nVince Vielhaber -- KA8CSH email: [email protected] flame-mail: /dev/null\n # include <std/disclaimers.h> TEAM-OS2\n Online Campground Directory http://www.camping-usa.com\n Online Giftshop Superstore http://www.cloudninegifts.com\n==========================================================================\n\n\n", "msg_date": "Tue, 08 Jun 1999 15:01:46 -0400 (EDT)", "msg_from": "Vince Vielhaber <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] 6.6 items" }, { "msg_contents": "\nHave you added to our web site? *raised eyebrow*\n\nOn Wed, 9 Jun 1999, Vadim Mikheev wrote:\n\n> Don Baccus wrote:\n> > \n> > Any graphics designers out there? How about a small \"Powered\n> > by Postgres\" GIF with a snazzy, attractive look? I've come\n> > up with the phrase, but my artistic skills are limited to\n> > photography, I'm afraid!\n> > \n> > I'd certainly use such a graphic to decorate my pages if it\n> > were available...\n> \n> I posted one below a days ago... Any chance to see all such\n> things somewhere in ftp/www postgresql.org servers?\n> \n> Vadim\n> P.S. Created by Michael Ivanov <[email protected]>\n\nMarc G. Fournier ICQ#7615664 IRC Nick: Scrappy\nSystems Administrator @ hub.org \nprimary: [email protected] secondary: scrappy@{freebsd|postgresql}.org \n\n", "msg_date": "Tue, 8 Jun 1999 16:12:20 -0300 (ADT)", "msg_from": "The Hermit Hacker <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] 6.6 items" }, { "msg_contents": "> On Tue, Jun 08, 1999 at 07:32:41AM -0700, Don Baccus wrote:\n> > I don't mean to this group, or any of the postgres groups,\n> > I mean to the world at large, in which Postgres has a very\n> > negative image for web work. Consistent dumps, killing\n> > of one very bad memory leak (and a bunch of not-so-bad\n> > ones), and moving to mvcc from table-locking - these are\n> > three huge improvements for people building web sites.\n> > Folks outside the normal Postgres community deserve to\n> > know this.\n> \n> I have to agree with Don, and maybe try to offer an additional couple\n> of notes. I think, and this is only me - not supported by any facts,\n> that it would be useful having a highlights section in the release\n> notes, since the efforts of the developers (of which I hope to be able\n> to contribute too soon) make sure that each release results in *many*\n> updates. I would propose classifying these updates into a number of\n> categories, that would help users identify is (a) it is worth them\n> updgrading and (maybe more important) (b) if it is worth them trying\n> to use PGSQL now that \"XYZ\" has been fixed/implemented.\n> \n> A quick suggested list:\n> \n> 1. New features\n> 2. Major Performance Updates\n> 3. Major Bug Fixes\n> 4. Security / Reliability Changes\n> 5. Other\n> \n> Listing updates simple in reverse time order is too confusing.\n> \n\nAlready done, on web site, and release notes. Sections are Bugs,\nEnhancements, Source code changes.\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Tue, 8 Jun 1999 15:17:50 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] 6.6 items" }, { "msg_contents": "At 02:48 PM 6/8/99 -0400, Vince Vielhaber wrote:\n\n>I kinda like this one from http://guru.umc.se/imgdb/\n>Marc found it last week but I didn't get a chance to put it up.\n\nI do, too! A little more subdued and not quite as distracting.\n\nIf you want to see how it looks on a page, it now appears on\nall pages at http://donb.photo.net/tweeterdom.\n\n(AOLServer makes it easy to decorate all pages coming out of\na site so adding things like this is a piece of cake).\n\nYou'll notice I made it a link to http://www.postgresql.org...\n\n\n\n- Don Baccus, Portland OR <[email protected]>\n Nature photos, on-line guides, and other goodies at\n http://donb.photo.net\n", "msg_date": "Tue, 08 Jun 1999 12:21:16 -0700", "msg_from": "Don Baccus <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] 6.6 items" }, { "msg_contents": "On Tue, 8 Jun 1999, The Hermit Hacker wrote:\n\n> \n> Have you added to our web site? *raised eyebrow*\n\nOops, had meant to respond to Vince's on the umc.se Powered By :)\n\n\n> \n> On Wed, 9 Jun 1999, Vadim Mikheev wrote:\n> \n> > Don Baccus wrote:\n> > > \n> > > Any graphics designers out there? How about a small \"Powered\n> > > by Postgres\" GIF with a snazzy, attractive look? I've come\n> > > up with the phrase, but my artistic skills are limited to\n> > > photography, I'm afraid!\n> > > \n> > > I'd certainly use such a graphic to decorate my pages if it\n> > > were available...\n> > \n> > I posted one below a days ago... Any chance to see all such\n> > things somewhere in ftp/www postgresql.org servers?\n> > \n> > Vadim\n> > P.S. Created by Michael Ivanov <[email protected]>\n> \n> Marc G. Fournier ICQ#7615664 IRC Nick: Scrappy\n> Systems Administrator @ hub.org \n> primary: [email protected] secondary: scrappy@{freebsd|postgresql}.org \n> \n> \n\nMarc G. Fournier ICQ#7615664 IRC Nick: Scrappy\nSystems Administrator @ hub.org \nprimary: [email protected] secondary: scrappy@{freebsd|postgresql}.org \n\n", "msg_date": "Tue, 8 Jun 1999 16:27:46 -0300 (ADT)", "msg_from": "The Hermit Hacker <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] 6.6 items" }, { "msg_contents": "\nOn 08-Jun-99 The Hermit Hacker wrote:\n> On Tue, 8 Jun 1999, The Hermit Hacker wrote:\n> \n>> \n>> Have you added to our web site? *raised eyebrow*\n> \n> Oops, had meant to respond to Vince's on the umc.se Powered By :)\n\nActually I hoped you were typing at Vadim! I totally forgot about it, \nkinda, since I'm more looking to the new stuff (which I'm still waiting\nfor final decisions so we can get going). Also it occurred to me a few\nminutes ago that we never *really* got an approval from the elephant's\nauthor to use that logo - unless core heard something that I didn't.\n\nVince.\n-- \n==========================================================================\nVince Vielhaber -- KA8CSH email: [email protected] flame-mail: /dev/null\n # include <std/disclaimers.h> TEAM-OS2\n Online Campground Directory http://www.camping-usa.com\n Online Giftshop Superstore http://www.cloudninegifts.com\n==========================================================================\n\n\n", "msg_date": "Tue, 08 Jun 1999 16:17:24 -0400 (EDT)", "msg_from": "Vince Vielhaber <[email protected]>", "msg_from_op": false, "msg_subject": "Logos-n-stuff Was: Re: [HACKERS] 6.6 items" }, { "msg_contents": "\nI asked Jan about it over a month ago, and his response to me was that we\ncould use it...\n\nOn Tue, 8 Jun 1999, Vince Vielhaber wrote:\n\n> \n> On 08-Jun-99 The Hermit Hacker wrote:\n> > On Tue, 8 Jun 1999, The Hermit Hacker wrote:\n> > \n> >> \n> >> Have you added to our web site? *raised eyebrow*\n> > \n> > Oops, had meant to respond to Vince's on the umc.se Powered By :)\n> \n> Actually I hoped you were typing at Vadim! I totally forgot about it, \n> kinda, since I'm more looking to the new stuff (which I'm still waiting\n> for final decisions so we can get going). Also it occurred to me a few\n> minutes ago that we never *really* got an approval from the elephant's\n> author to use that logo - unless core heard something that I didn't.\n> \n> Vince.\n> -- \n> ==========================================================================\n> Vince Vielhaber -- KA8CSH email: [email protected] flame-mail: /dev/null\n> # include <std/disclaimers.h> TEAM-OS2\n> Online Campground Directory http://www.camping-usa.com\n> Online Giftshop Superstore http://www.cloudninegifts.com\n> ==========================================================================\n> \n> \n\nMarc G. Fournier ICQ#7615664 IRC Nick: Scrappy\nSystems Administrator @ hub.org \nprimary: [email protected] secondary: scrappy@{freebsd|postgresql}.org \n\n", "msg_date": "Tue, 8 Jun 1999 17:49:59 -0300 (ADT)", "msg_from": "The Hermit Hacker <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Logos-n-stuff Was: Re: [HACKERS] 6.6 items" }, { "msg_contents": "> > > Or you talk that this should be mentioned in\n> > > announcement/release notes?\n> > I've added a mention of this in the release notes, and (Bruce) since\n> > I'll be regenerating INSTALL and HISTORY it will be included in those.\n> > I *should* be able to retrofit it into admin.ps.gz also...\n> Added to release notes:\n> Good idea to add this. I also added to the Enhancements list:\n> pg_dump now can generate consistent snapshots on active databases(Vadim)\n\n?? \"I've added a mention of this in the release notes\" was supposed to\nkeep you from doing anything! Wholesale reorganization isn't likely to\nmake it into hardcopy, and I'm not happy about that. Please remember\nthat I've already generated hardcopy for a release that was supposed\nto happen tomorrow, so gratuitous changes aren't going to go over very\nwell with me :/\n\nI see that lock.sgml has been updated. But I'll probably need ~2 days\nelapsed to finish off the docs, as I mentioned last week. \n\n - Thomas\n\n-- \nThomas Lockhart\t\t\t\[email protected]\nSouth Pasadena, California\n", "msg_date": "Wed, 09 Jun 1999 01:57:42 +0000", "msg_from": "Thomas Lockhart <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] 6.6 items" }, { "msg_contents": "> > > > Or you talk that this should be mentioned in\n> > > > announcement/release notes?\n> > > I've added a mention of this in the release notes, and (Bruce) since\n> > > I'll be regenerating INSTALL and HISTORY it will be included in those.\n> > > I *should* be able to retrofit it into admin.ps.gz also...\n> > Added to release notes:\n> > Good idea to add this. I also added to the Enhancements list:\n> > pg_dump now can generate consistent snapshots on active databases(Vadim)\n> \n> ?? \"I've added a mention of this in the release notes\" was supposed to\n> keep you from doing anything! Wholesale reorganization isn't likely to\n> make it into hardcopy, and I'm not happy about that. Please remember\n> that I've already generated hardcopy for a release that was supposed\n> to happen tomorrow, so gratuitous changes aren't going to go over very\n> well with me :/\n> \n> I see that lock.sgml has been updated. But I'll probably need ~2 days\n> elapsed to finish off the docs, as I mentioned last week. \n\nOK, I am confused. What should we do or not do? I thought release\nnotes->HISTORY was OK for changes. I realize lock.sgml is harder. Is\nit because the release notes are also in admin.gz?\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Tue, 8 Jun 1999 22:03:19 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] 6.6 items" } ]
[ { "msg_contents": "There seems to be a problem with MakeIndexable(), though I haven't\nconfirmed that the problem lies there:\n\npostgres=> select * from t1 where i like '2';\nERROR: pg_atoi: error in \"2�\": can't parse \"�\"\n\nistm that this query should behave itself, or at least fail in some\nother way :(\n\nI'll guess that, even though there isn't a wildcard to pattern match,\nMakeIndexable() is adding a trailing \\377 to the string?\n\n - Thomas\n\n-- \nThomas Lockhart\t\t\t\[email protected]\nSouth Pasadena, California\n", "msg_date": "Tue, 08 Jun 1999 13:53:01 +0000", "msg_from": "Thomas Lockhart <[email protected]>", "msg_from_op": true, "msg_subject": "I don't like LIKE" }, { "msg_contents": "On Tue, 8 Jun 1999, Thomas Lockhart wrote:\n\n> Date: Tue, 08 Jun 1999 13:53:01 +0000\n> From: Thomas Lockhart <[email protected]>\n> To: Postgres Hackers List <[email protected]>\n> Subject: [HACKERS] I don't like LIKE\n> \n> There seems to be a problem with MakeIndexable(), though I haven't\n> confirmed that the problem lies there:\n> \n> postgres=> select * from t1 where i like '2';\n> ERROR: pg_atoi: error in \"2О©╫\": can't parse \"О©╫\"\n> \n> istm that this query should behave itself, or at least fail in some\n> other way :(\n> \n> I'll guess that, even though there isn't a wildcard to pattern match,\n> MakeIndexable() is adding a trailing \\377 to the string?\n\nJust run on fresh cvs:\ntest=> \\d t1 \nTable = t1\n+----------------------------------+----------------------------------+-------+\n| Field | Type | Length|\n+----------------------------------+----------------------------------+-------+\n| id | int4 not null | 4 |\n| a | varchar() | 4 |\n+----------------------------------+----------------------------------+-------+\nIndex: id_t1\n\ntest=> select * from t1 where id like '2';\nid|a \n--+---\n 2|at1\n(1 row)\n\nIt seems it's run ok, because I used --enable-locale option.\n\n\tRegards,\n\n\t\tOleg\n\n> \n> - Thomas\n> \n> -- \n> Thomas Lockhart\t\t\t\[email protected]\n> South Pasadena, California\n> \n\n_____________________________________________________________\nOleg Bartunov, sci.researcher, hostmaster of AstroNet,\nSternberg Astronomical Institute, Moscow University (Russia)\nInternet: [email protected], http://www.sai.msu.su/~megera/\nphone: +007(095)939-16-83, +007(095)939-23-83\n\n", "msg_date": "Tue, 8 Jun 1999 18:19:13 +0400 (MSD)", "msg_from": "Oleg Bartunov <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] I don't like LIKE" }, { "msg_contents": "[Charset iso-8859-1 unsupported, filtering to ASCII...]\n> There seems to be a problem with MakeIndexable(), though I haven't\n> confirmed that the problem lies there:\n> \n> postgres=> select * from t1 where i like '2';\n> ERROR: pg_atoi: error in \"2_\": can't parse \"_\"\n> \n> istm that this query should behave itself, or at least fail in some\n> other way :(\n> \n> I'll guess that, even though there isn't a wildcard to pattern match,\n> MakeIndexable() is adding a trailing \\377 to the string?\n\nPlease post example. Works here:\n\t\n\tselect * from pg_class where rename like '2';\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Tue, 8 Jun 1999 13:08:46 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] I don't like LIKE" }, { "msg_contents": "Bruce Momjian wrote:\n> \n> [Charset iso-8859-1 unsupported, filtering to ASCII...]\n> > There seems to be a problem with MakeIndexable(), though I haven't\n> > confirmed that the problem lies there:\n> >\n> > postgres=> select * from t1 where i like '2';\n> > ERROR: pg_atoi: error in \"2_\": can't parse \"_\"\n> >\n> > istm that this query should behave itself, or at least fail in some\n> > other way :(\n> >\n> > I'll guess that, even though there isn't a wildcard to pattern match,\n> > MakeIndexable() is adding a trailing \\377 to the string?\n> \n> Please post example. Works here:\n> \n> select * from pg_class where rename like '2';\n\nMaybe he meant something like this (using 6.4.2)\n\nhannu=> \\d t\n \nTable = t\n+----------------------------------+----------------------------------+-------+\n| Field | Type |\nLength|\n+----------------------------------+----------------------------------+-------+\n| i | int4 \n| 4 |\n+----------------------------------+----------------------------------+-------+\nhannu=> create index indx on t(i);\nCREATE\nhannu=> vacuum;\nVACUUM\nhannu=> select * from t where i like '1';\nERROR: pg_atoi: error in \"1�\": can't parse \"�\"\nhannu=>\n\n---------------------\nHannu\n", "msg_date": "Tue, 08 Jun 1999 23:58:01 +0300", "msg_from": "Hannu Krosing <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] I don't like LIKE" }, { "msg_contents": "> > Please post example. Works here:\n> > \n> > select * from pg_class where rename like '2';\n> \n> Maybe he meant something like this (using 6.4.2)\n> \n> hannu=> \\d t\n> \n> Table = t\n> +----------------------------------+----------------------------------+-------+\n> | Field | Type |\n> Length|\n> +----------------------------------+----------------------------------+-------+\n> | i | int4 \n> | 4 |\n> +----------------------------------+----------------------------------+-------+\n> hannu=> create index indx on t(i);\n> CREATE\n> hannu=> vacuum;\n> VACUUM\n> hannu=> select * from t where i like '1';\n> ERROR: pg_atoi: error in \"1_\": can't parse \"_\"\n> hannu=>\n\nCan't reproduce:\n\t\n\ttest=> create table tx(i int);\n\tCREATE\n\ttest=> create index xx on tx(i);\n\tCREATE\n\ttest=> select * from tx where i like '3';\n\ti\n\t-\n\t(0 rows)\n\t\n\ttest=> vacuum;\n\tVACUUM\n\ttest=> select * from tx where i like '3';\n\ti\n\t-\n\t(0 rows)\n\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Tue, 8 Jun 1999 17:17:17 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] I don't like LIKE" }, { "msg_contents": "Thomas Lockhart <[email protected]> writes:\n> postgres=> select * from t1 where i like '2';\n> ERROR: pg_atoi: error in \"2�\": can't parse \"�\"\n\n> I'll guess that, even though there isn't a wildcard to pattern match,\n> MakeIndexable() is adding a trailing \\377 to the string?\n\nYup. This is an example of my point the other day: we shouldn't be\nadding those index restriction clauses in the parser, but much later\non after type conversions have settled down and we know what we're\ndealing with. I don't think there's a good quick-fix, we'll just have\nto do it right.\n\nIf you use variables to prevent makeIndexable from triggering, you\nwill find that the system will indeed take\n\tint4 like int4\n\tfloat4 like float4\n\tfloat8 like float8\nwhich I find surprising, seeing as how there are no such operators.\nAutomatic anything->text conversion, apparently. I wonder whether\nthis isn't being a little too free with auto conversion.\n\n\t\t\tregards, tom lane\n", "msg_date": "Tue, 08 Jun 1999 18:18:12 -0400", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] I don't like LIKE " }, { "msg_contents": "Bruce Momjian <[email protected]> writes:\n> Can't reproduce:\n\n> \ttest=> select * from tx where i like '3';\n> \ti\n> \t-\n> \t(0 rows)\n\nIf you've built with USE_LOCALE you won't see the failure,\nbecause the parser doesn't add the right-side index qualification\nin that case (at least not in 6.5; 6.4 did).\n\nWe still need a better solution for non-ASCII locales, too...\n\n\t\t\tregards, tom lane\n", "msg_date": "Tue, 08 Jun 1999 18:32:32 -0400", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] I don't like LIKE " }, { "msg_contents": "> If you use variables to prevent makeIndexable from triggering, you\n> will find that the system will indeed take\n> int4 like int4\n> float4 like float4\n> float8 like float8\n> which I find surprising, seeing as how there are no such operators.\n> Automatic anything->text conversion, apparently. I wonder whether\n> this isn't being a little too free with auto conversion.\n\nYeah, maybe. But since there aren't regression tests for it, and no\napparent interest in adding them, it's pretty damn hard to add useful\nfeatures without damaging other things, eh?\n\nHmm, maybe I'd better simmer down about the docs stuff before\nanswering more mail ;)\n\n - Thomas\n\n-- \nThomas Lockhart\t\t\t\[email protected]\nSouth Pasadena, California\n", "msg_date": "Wed, 09 Jun 1999 01:58:03 +0000", "msg_from": "Thomas Lockhart <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] I don't like LIKE" } ]
[ { "msg_contents": "I'm talking with the Debian porters for Alpha architecture, who are having\ntrouble getting 6.5beta2 to work properly. \n\nIs there anyone using this setup? If so, do you get a good regression test\nresult? (They don't.)\n\n\nThis patch was necessary for them to get 6.5beta2 to build at all:\n======================================================================\ndiff -ruN pgsql/src/config.sub postgresql-patched/src/config.sub\n--- pgsql/src/config.sub\tTue Apr 20 09:01:00 1999\n+++ postgresql-patched/src/config.sub\tTue Jun 8 17:21:32 1999\n@@ -152,7 +152,7 @@\n \ttahoe | i860 | m32r | m68k | m68000 | m88k | ns32k | arc | arm \\\n \t\t| arme[lb] | pyramid | mn10200 | mn10300 | tron | a29k \\\n \t\t| 580 | i960 | h8300 | hppa | hppa1.0 | hppa1.1 | hppa2.0 \\\n-\t\t| alpha | alphaev5 | alphaev56 | we32k | ns16k | clipper \\\n+\t\t| alpha | alphaev5 | alphaev56 | alphapca5[6,7] | we32k | ns16k | clipper \\\n \t\t| i370 | sh | powerpc | powerpcle | 1750a | dsp16xx | pdp11 \\\n \t\t| mips64 | mipsel | mips64el | mips64orion | mips64orionel \\\n \t\t| mipstx39 | mipstx39el \\\n@@ -176,7 +176,7 @@\n \t | mips-* | pyramid-* | tron-* | a29k-* | romp-* | rs6000-* \\\n \t | power-* | none-* | 580-* | cray2-* | h8300-* | i960-* \\\n \t | xmp-* | ymp-* | hppa-* | hppa1.0-* | hppa1.1-* | hppa2.0*-* \\\n-\t | alpha-* | alphaev5-* | alphaev56-* | we32k-* | cydra-* \\\n+\t | alpha-* | alphaev5-* | alphaev56-* | alphapca5[6,7]-* | we32k-* | cydra-* \\\n \t | ns16k-* | pn-* | np1-* | xps100-* | clipper-* | orion-* \\\n \t | sparclite-* | pdp11-* | sh-* | powerpc-* | powerpcle-* \\\n \t | sparc64-* | mips64-* | mipsel-* \\\ndiff -ruN pgsql/src/template/linux_alpha postgresql-patched/src/template/linux_alpha\n--- pgsql/src/template/linux_alpha\tTue Jun 16 06:04:00 1998\n+++ postgresql-patched/src/template/linux_alpha\tTue Jun 8 17:21:32 1999\n@@ -1,5 +1,5 @@\n AROPT:crs\n-CFLAGS:-O2 -mieee\n+CFLAGS:-mieee\n SHARED_LIB:-fpic\n ALL:\n SRCH_INC:\n\n======================================================================\n\nThis is required because of problems with egcs:\n\n>> Is this patch suitable for forwarding upstream? Do all linux Alphas\n>> have to disable -O2?\n>\n> Yes. At least all that use egcs as a primary compiler (gcc 2.7.x never\n> worked for us, so unless they're using gcc 2.8.x, it won't work with -O2).\n\n...and...\n\n> The patch I mailed you DEFINITELY needs to be included upstream if\n> possible. All Alphas running linux and using egcs REQUIRE it to be\n> compiled without optimisation since it hoses the spinlock code (I'm\n> looking into it with the guy that wrote the spinlock code for Alphas).\n> And the alphapca56,7 machines are fairly common now. Mention to them that\n> it might be helpful to use a config.sub from a more recent autoconf or\n> libtool since it includes another two or three new Alpha types (at least\n> the Debian sources do...I make sure of that :-)\n\n-- \n Vote against SPAM: http://www.politik-digital.de/spam/\n ========================================\nOliver Elphick [email protected]\nIsle of Wight http://www.lfix.co.uk/oliver\n PGP key from public servers; key ID 32B8FAA1\n ========================================\n \"Fear not, for I am with thee; be not dismayed, \n for I am thy God. I will strengthen thee and I will \n help thee; yea, I will uphold thee with the right hand\n of my righteousness.\" Isaiah 41:10 \n\n", "msg_date": "Tue, 08 Jun 1999 18:14:36 +0100", "msg_from": "\"Oliver Elphick\" <[email protected]>", "msg_from_op": true, "msg_subject": "Anyone working on linux Alpha?" }, { "msg_contents": " diff -ruN pgsql/src/template/linux_alpha postgresql-patched/src/template/linux_alpha\n> --- pgsql/src/template/linux_alpha\tTue Jun 16 06:04:00 1998\n> +++ postgresql-patched/src/template/linux_alpha\tTue Jun 8 17:21:32 1999\n> @@ -1,5 +1,5 @@\n> AROPT:crs\n> -CFLAGS:-O2 -mieee\n> +CFLAGS:-mieee\n> SHARED_LIB:-fpic\n> ALL:\n> SRCH_INC:\n> \n\n\n> ======================================================================\n> \n> This is required because of problems with egcs:\n> \n> >> Is this patch suitable for forwarding upstream? Do all linux Alphas\n> >> have to disable -O2?\n> >\n> > Yes. At least all that use egcs as a primary compiler (gcc 2.7.x never\n> > worked for us, so unless they're using gcc 2.8.x, it won't work with -O2).\n> \n> ...and...\n> \n> > The patch I mailed you DEFINITELY needs to be included upstream if\n> > possible. All Alphas running linux and using egcs REQUIRE it to be\n> > compiled without optimisation since it hoses the spinlock code (I'm\n> > looking into it with the guy that wrote the spinlock code for Alphas).\n> > And the alphapca56,7 machines are fairly common now. Mention to them that\n> > it might be helpful to use a config.sub from a more recent autoconf or\n> > libtool since it includes another two or three new Alpha types (at least\n> > the Debian sources do...I make sure of that :-)\n\n\nI have just removed optimization on linux/alpha for this reason. Can I\nuse -O optimization on this platform instead of -O2?\n\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Tue, 8 Jun 1999 13:35:02 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [PORTS] Anyone working on linux Alpha?" }, { "msg_contents": "\nOn Tue, 8 Jun 1999, Bruce Momjian wrote:\n\n> I have just removed optimization on linux/alpha for this reason. Can I\n> use -O optimization on this platform instead of -O2?\n\nI'll test this and find out. In most cases with egcs, -O or -O0 is\nsufficient, but it's unknown in this situation.\n\nI'll get back to you in less than an hour as to whether or not -O will\nwork. I agree that any optimisation would be better than none.\n\nC\n\n", "msg_date": "Tue, 8 Jun 1999 13:52:41 -0400 (EDT)", "msg_from": "Christopher C Chimelis <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [PORTS] Anyone working on linux Alpha?" } ]
[ { "msg_contents": "What about potential deadlocking issues with vacuum? Haven't tried to\ndeadlock vacuum, but wondering if it's possible.\n\t-DEJ\n\n> -----Original Message-----\n> From:\tBruce Momjian [SMTP:[email protected]]\n> Sent:\tTuesday, June 08, 1999 11:41 AM\n> To:\tThomas Lockhart\n> Cc:\tVadim Mikheev; Don Baccus; ZEUGSWETTER Andreas IZ5;\n> [email protected]; '[email protected]'\n> Subject:\tRe: [HACKERS] 6.6 items\n> \n> > > Or you talk that this should be mentioned in \n> > > announcement/release notes?\n> > \n> > I've added a mention of this in the release notes, and (Bruce) since\n> > I'll be regenerating INSTALL and HISTORY it will be included in those.\n> > I *should* be able to retrofit it into admin.ps.gz also...\n> \n> Added to release notes:\n> \n> Another big benefit of MVCC is that\n> <application>pg_dump</application>\n> can now generate consistent backups of live, active databases,\n> without\n> blocking active transactions.\n> \n> Good idea to add this. I also added to the Enhancements list:\n> \n> pg_dump now can generate consistent snapshots on active databases(Vadim) \n> \n> -- \n> Bruce Momjian | http://www.op.net/~candle\n> [email protected] | (610) 853-3000\n> + If your life is a hard drive, | 830 Blythe Avenue\n> + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Tue, 8 Jun 1999 12:31:41 -0500 ", "msg_from": "\"Jackson, DeJuan\" <[email protected]>", "msg_from_op": true, "msg_subject": "RE: [HACKERS] 6.6 items" }, { "msg_contents": "\"Jackson, DeJuan\" wrote:\n> \n> What about potential deadlocking issues with vacuum? Haven't tried to\n> deadlock vacuum, but wondering if it's possible.\n\nShouldn't be possible.\n\nVacuum never acquires AccessExclusive lock on more than\none table at once. This lock is released immediately\nafter a relation processed.\n\nSELECT/COPY_TO queries from pg_dump lock relations\nin AccessShare mode for duration of query.\n\nVadim\n", "msg_date": "Wed, 09 Jun 1999 01:52:06 +0800", "msg_from": "Vadim Mikheev <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] 6.6 items" } ]
[ { "msg_contents": "In the 6.5beta2 distribution, contrib/spi/preprocessor contains step1.e:\n\n$ file step1.e\nstep1.e: ELF 32-bit LSB executable, Intel 80386, version 1, dynamically linked \n(uses shared libs), not stripped\n\nIt isn't built by the Makefile. \n\nIs there some special reason for including this executable rather than\nbuilding it?\n\n-- \n Vote against SPAM: http://www.politik-digital.de/spam/\n ========================================\nOliver Elphick [email protected]\nIsle of Wight http://www.lfix.co.uk/oliver\n PGP key from public servers; key ID 32B8FAA1\n ========================================\n \"Fear not, for I am with thee; be not dismayed, \n for I am thy God. I will strengthen thee and I will \n help thee; yea, I will uphold thee with the right hand\n of my righteousness.\" Isaiah 41:10 \n\n\n", "msg_date": "Tue, 08 Jun 1999 18:42:47 +0100", "msg_from": "\"Oliver Elphick\" <[email protected]>", "msg_from_op": true, "msg_subject": "contrib/spi/preprocessor" }, { "msg_contents": "\"Oliver Elphick\" <[email protected]> writes:\n> In the 6.5beta2 distribution, contrib/spi/preprocessor contains step1.e:\n> $ file step1.e\n> step1.e: ELF 32-bit LSB executable, Intel 80386, version 1, dynamically linked \n> (uses shared libs), not stripped\n\nLooks like a mistake to me --- that shouldn't be there.\n\n\t\t\tregards, tom lane\n", "msg_date": "Tue, 08 Jun 1999 18:36:47 -0400", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] contrib/spi/preprocessor " } ]
[ { "msg_contents": "\nSELECT * FROM test WHERE test IN (SELECT * FROM test) fails with strange error\nRegression test for new Numeric type\nrefint problems\ninvalidate cache on elog failure, abort is OK\nlarge objects security problem\nspinlock stuck problem\nbenchmark performance problem, index lookups on entries with many dup keys\nadd more detail inref/lock.sgml, ref/set.sgml to reflect MVCC & locking changes\n\nGenerate Admin, User, hardcopy postscript\nGenerate HISTORY from sgml sources.\n\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Tue, 8 Jun 1999 13:49:18 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": true, "msg_subject": "Open 6.5 items" }, { "msg_contents": "Bruce Momjian wrote:\n> \n> SELECT * FROM test WHERE test IN (SELECT * FROM test) fails with strange error\n> Regression test for new Numeric type\n> refint problems\n> invalidate cache on elog failure, abort is OK\n\nIsn't it fixed by Tom?\n\n> large objects security problem\n> spinlock stuck problem\n\nOld one, possible when elog(FATAL) and elog(ERROR) inside bufmgr.\n\n> benchmark performance problem, index lookups on entries with many dup keys\n> add more detail inref/lock.sgml, ref/set.sgml to reflect MVCC & locking changes\n ^^^^^^^^^\nDoing it right now.\n\n> \n> Generate Admin, User, hardcopy postscript\n> Generate HISTORY from sgml sources.\n\n+ vacuum problems (two assertions) in the case of high update\n activity on the same table\n\n Seems fixed, though I wonder why it's fixed by changes I made, -:(\n\n+ 'Deadlock detected' errors when running a row-locking query\n (SELECT ... ORDER BY FOR UPDATE in PL/pgSQL)\n\n Bug-report was posted today to ports list.\n I hope to address it before release.\n\nVadim\n", "msg_date": "Wed, 09 Jun 1999 02:09:52 +0800", "msg_from": "Vadim Mikheev <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Open 6.5 items" }, { "msg_contents": "> Bruce Momjian wrote:\n> > \n> > SELECT * FROM test WHERE test IN (SELECT * FROM test) fails with strange error\n> > Regression test for new Numeric type\n> > refint problems\n> > invalidate cache on elog failure, abort is OK\n> \n> Isn't it fixed by Tom?\n\nRemoved.\n\n> \n> > large objects security problem\n> > spinlock stuck problem\n> \n> Old one, possible when elog(FATAL) and elog(ERROR) inside bufmgr.\n\nRemoved. Right?\n\n> \n> > benchmark performance problem, index lookups on entries with many dup keys\n> > add more detail inref/lock.sgml, ref/set.sgml to reflect MVCC & locking changes\n> ^^^^^^^^^\n> Doing it right now.\n\nOK.\n\n> \n> > \n> > Generate Admin, User, hardcopy postscript\n> > Generate HISTORY from sgml sources.\n> \n> + vacuum problems (two assertions) in the case of high update\n> activity on the same table\n> \n> Seems fixed, though I wonder why it's fixed by changes I made, -:(\n> \n> + 'Deadlock detected' errors when running a row-locking query\n> (SELECT ... ORDER BY FOR UPDATE in PL/pgSQL)\n> \n\nI will leave these off the list.\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Tue, 8 Jun 1999 15:16:24 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] Open 6.5 items" }, { "msg_contents": "Bruce Momjian wrote:\n> \n> > > spinlock stuck problem\n> >\n> > Old one, possible when elog(FATAL) and elog(ERROR) inside bufmgr.\n> \n> Removed. Right?\n\nNo time to fix it for 6.5, leave them for future.\n\nVadim\n", "msg_date": "Wed, 09 Jun 1999 03:24:13 +0800", "msg_from": "Vadim Mikheev <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Open 6.5 items" } ]
[ { "msg_contents": "\"Vadim B. Mikheev - CVS\" wrote:\n> \n> Update of /usr/local/cvsroot/pgsql/doc/src/sgml/ref\n> In directory hub.org:/tmp/cvs-serv54505\n> \n> Modified Files:\n> lock.sgml set.sgml\n> Log Message:\n> MVCC updation.\n\nI'll update lock.l and set.l tomorrow.\n\nVadim\n", "msg_date": "Wed, 09 Jun 1999 04:14:37 +0800", "msg_from": "Vadim Mikheev <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [COMMITTERS] 'pgsql/doc/src/sgml/ref lock.sgml set.sgml'" } ]
[ { "msg_contents": "What is the release date now? Still tomorrow?\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Tue, 8 Jun 1999 16:18:12 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": true, "msg_subject": "Release schedule" }, { "msg_contents": "On Tue, 8 Jun 1999, Bruce Momjian wrote:\n\n> What is the release date now? Still tomorrow?\n\nDue to all the changes that have gone on in the past 24, I'm going to do a\nquick beta3 up tonight with a release for Thursday ... *just in case* ....\n\nMarc G. Fournier ICQ#7615664 IRC Nick: Scrappy\nSystems Administrator @ hub.org \nprimary: [email protected] secondary: scrappy@{freebsd|postgresql}.org \n\n", "msg_date": "Tue, 8 Jun 1999 17:49:17 -0300 (ADT)", "msg_from": "The Hermit Hacker <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Release schedule" }, { "msg_contents": "> On Tue, 8 Jun 1999, Bruce Momjian wrote:\n> \n> > What is the release date now? Still tomorrow?\n> \n> Due to all the changes that have gone on in the past 24, I'm going to do a\n> quick beta3 up tonight with a release for Thursday ... *just in case* ....\n> \n\nCan we also wait until 6.5.* stabilizes before we split the cvs tree?\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Tue, 8 Jun 1999 16:51:32 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Release schedule" }, { "msg_contents": "On Tue, 8 Jun 1999, Bruce Momjian wrote:\n\n> > On Tue, 8 Jun 1999, Bruce Momjian wrote:\n> > \n> > > What is the release date now? Still tomorrow?\n> > \n> > Due to all the changes that have gone on in the past 24, I'm going to do a\n> > quick beta3 up tonight with a release for Thursday ... *just in case* ....\n> > \n> \n> Can we also wait until 6.5.* stabilizes before we split the cvs tree?\n\nThat defeats the purpose of the split...the split is meant to allow Vadim\nand all to keep moving forward while giving us a stable branch to build a\nv6.5.1 off of ...\n\nMarc G. Fournier ICQ#7615664 IRC Nick: Scrappy\nSystems Administrator @ hub.org \nprimary: [email protected] secondary: scrappy@{freebsd|postgresql}.org \n\n", "msg_date": "Tue, 8 Jun 1999 17:55:18 -0300 (ADT)", "msg_from": "The Hermit Hacker <[email protected]>", "msg_from_op": false, "msg_subject": "Re: Release schedule" }, { "msg_contents": "> On Tue, 8 Jun 1999, Bruce Momjian wrote:\n> \n> > > On Tue, 8 Jun 1999, Bruce Momjian wrote:\n> > > \n> > > > What is the release date now? Still tomorrow?\n> > > \n> > > Due to all the changes that have gone on in the past 24, I'm going to do a\n> > > quick beta3 up tonight with a release for Thursday ... *just in case* ....\n> > > \n> > \n> > Can we also wait until 6.5.* stabilizes before we split the cvs tree?\n> \n> That defeats the purpose of the split...the split is meant to allow Vadim\n> and all to keep moving forward while giving us a stable branch to build a\n> v6.5.1 off of ...\n> \n\nYes, but the double patch application is a pain. Can you find anyone\nwho wants to start on 6.6 in the next two weeks, rather than working on\n6.5 problems?\n\nIf there is anyone, please speak up.\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Tue, 8 Jun 1999 17:14:59 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Release schedule" }, { "msg_contents": "Bruce Momjian wrote:\n> \n> Yes, but the double patch application is a pain. Can you find anyone\n> who wants to start on 6.6 in the next two weeks, rather than working on\n ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\nNot me. Some issues should be discussed first.\n\n> 6.5 problems?\n\nVadim\n", "msg_date": "Wed, 09 Jun 1999 05:45:04 +0800", "msg_from": "Vadim Mikheev <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Re: Release schedule" }, { "msg_contents": "> Bruce Momjian wrote:\n> > \n> > Yes, but the double patch application is a pain. Can you find anyone\n> > who wants to start on 6.6 in the next two weeks, rather than working on\n> ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n> Not me. Some issues should be discussed first.\n> \n> > 6.5 problems?\n> \n> Vadim\n> \n\nAll I am asking is that we wait until things get quiet with 6.5 before\nsplitting. Those first weeks are all double-patching. We also don't\nwant to dilute ourselves with people working on new featues while we try\nto get bugs fixed.\n\nWe don't have to wait for 6.5 to be closed before splitting CVS, though,\njust quiet.\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Tue, 8 Jun 1999 17:47:58 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] Re: Release schedule" }, { "msg_contents": "Bruce Momjian <[email protected]> writes:\n> All I am asking is that we wait until things get quiet with 6.5 before\n> splitting. Those first weeks are all double-patching.\n\nI agree with Bruce on this --- double-patching is a pain, and we could\nprobably save ourselves some work if we waited a couple weeks before\nsplitting the tree. I know I'm not planning on doing anything but\nbug-fixes for a while...\n\n\t\t\tregards, tom lane\n", "msg_date": "Tue, 08 Jun 1999 18:28:43 -0400", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Re: Release schedule " }, { "msg_contents": "\nIf everyone is in agreement, then I can't dispute it :)\n\nOn Tue, 8 Jun 1999, Tom Lane wrote:\n\n> Bruce Momjian <[email protected]> writes:\n> > All I am asking is that we wait until things get quiet with 6.5 before\n> > splitting. Those first weeks are all double-patching.\n> \n> I agree with Bruce on this --- double-patching is a pain, and we could\n> probably save ourselves some work if we waited a couple weeks before\n> splitting the tree. I know I'm not planning on doing anything but\n> bug-fixes for a while...\n> \n> \t\t\tregards, tom lane\n> \n\nMarc G. Fournier ICQ#7615664 IRC Nick: Scrappy\nSystems Administrator @ hub.org \nprimary: [email protected] secondary: scrappy@{freebsd|postgresql}.org \n\n", "msg_date": "Tue, 8 Jun 1999 22:30:23 -0300 (ADT)", "msg_from": "The Hermit Hacker <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Re: Release schedule " }, { "msg_contents": "Tom Lane <[email protected]> writes:\n> Bruce Momjian <[email protected]> writes:\n> > All I am asking is that we wait until things get quiet with 6.5 before\n> > splitting. Those first weeks are all double-patching.\n> I agree with Bruce on this --- double-patching is a pain, and we could\n> probably save ourselves some work if we waited a couple weeks before\n> splitting the tree. I know I'm not planning on doing anything but\n> bug-fixes for a while...\n\nI suspect I'm probably bringing up something that the core development\nteam considered but had a good reason not to do, but on the offhand\nchance it would be helpful, I'll nonetheless venture to point out that\nif you created a branch for each release, patches applied to that\nbranch could be easily merged into the mainline---at least until the\nsources begin to diverge, which probably wouldn't happen for the first\ncouple of weeks (the time period under discussion)---which would\nobviate double-patching.\n\nI'll mention that it seems to work pretty well for the egcs\ndevelopment team---but again, if this has all been discussed before\nand decided against, please forget I said anything.\n\nMike.\n", "msg_date": "08 Jun 1999 21:33:41 -0400", "msg_from": "Michael Alan Dorman <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Re: Release schedule" } ]
[ { "msg_contents": "Oleg Broytmann <[email protected]> writes:\n> I know exactly 1 (one) program that incorporate (embed) Perl interpreter\n>- - it is editor VIM (well-known vi-clone from www.vim.org). I think anyone\n>who want to learn how to embed perl may start looking int vim sources.\n\nAlso now GIMP.\n\n> Yes, I am biased toward Python, but I cannot say \"I recommend embed\n>Python to construct PL/Python\" - I have no time to lead the development,\n>and I doubt there are many pythoners here (D'Arcy?).\n\nI went down that road a little ways to see what it looked like, and decided\nthat was the wrong road to take.\n\nThe reason people want an embedded procedural language is because procedures\nin such a language have access to the guts of the backend, and can perform\nmany operations much more efficiently than having to go push everything\nthrough the FE->SQL->compiler->executor->tuple list->FE->lather->rinse->repeat\nbottleneck. \n\nI decided that the proper solution was to expose all the internal guts of\nthe backend through a proper CORBA interface. That way, any language with\nan ORB could act as an embedded procedural language.\n\nCurrently, I'm working on imbedding ORBit into Python to get my skills up\nto speed. When that's accomplished, I intend to tackle PostgreSQL. For\na proof of concept, you can look at mod_corba (ORBit embedded in Apache,\nexposing the Apache API).\n\n\t-Michael Robinson\n\n", "msg_date": "Wed, 9 Jun 1999 08:07:07 +0800 (CST)", "msg_from": "Michael Robinson <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] PL/Lang (was: Priorities for 6.6)" }, { "msg_contents": "Michael Robinson wrote:\n\n> The reason people want an embedded procedural language is because procedures\n> in such a language have access to the guts of the backend, and can perform\n> many operations much more efficiently than having to go push everything\n> through the FE->SQL->compiler->executor->tuple list->FE->lather->rinse->repeat\n> bottleneck.\n\n That's one reason. Another one is that you can create stored\n procedures that get triggered on events\n (INSERT/UPDATE/DELETE) and can perform referential integrity\n checks and other things then.\n\n Some of them could also be done by constraints (if we\n sometimes have the rule system powerful enought to do it\n correctly). Some can't.\n\n>\n> I decided that the proper solution was to expose all the internal guts of\n> the backend through a proper CORBA interface. That way, any language with\n> an ORB could act as an embedded procedural language.\n\n And how does your CORBA get triggered in the case someone\n uses the good old psql? Or MUST everything then talk CORBA\n and you disable any other kind of access completely? Note\n that for a trigger that has to ensure referential integrity\n it's not enough to say \"it will be triggered if the user uses\n the correct access path\". It has to ensure that the user\n doesn't use the wrong one!\n\n\nJan\n\n--\n\n#======================================================================#\n# It's easier to get forgiveness for being wrong than for being right. #\n# Let's break this rule - forgive me. #\n#========================================= [email protected] (Jan Wieck) #\n\n\n", "msg_date": "Wed, 9 Jun 1999 07:52:11 +0200 (MET DST)", "msg_from": "[email protected] (Jan Wieck)", "msg_from_op": false, "msg_subject": "Re: [HACKERS] PL/Lang (was: Priorities for 6.6)" }, { "msg_contents": "[email protected] (Jan Wieck) writes:\n> That's one reason. Another one is that you can create stored\n> procedures that get triggered on events\n> (INSERT/UPDATE/DELETE) and can perform referential integrity\n> checks and other things then.\n>\n> Some of them could also be done by constraints (if we\n> sometimes have the rule system powerful enought to do it\n> correctly). Some can't.\n\nYes, this is true. However, between SQL, and PL/PGSQL, I think we have\nthis covered, and I don't see a lot of urgency for adding new languages\njust for this purpose.\n\n> And how does your CORBA get triggered in the case someone\n> uses the good old psql? Or MUST everything then talk CORBA\n> and you disable any other kind of access completely? Note\n> that for a trigger that has to ensure referential integrity\n> it's not enough to say \"it will be triggered if the user uses\n> the correct access path\". It has to ensure that the user\n> doesn't use the wrong one!\n\nWell, that's the nice thing about ORBit. You can link two CORBA-connected\nsystems into one binary, and ORBit will give a clean and efficient in-process\nconnection between the two. So, just as pgsql can expose it's guts via\nCORBA, so, too, can the programming runtime of your choice. All that's\nrequired (in theory) is a one-time wrapper for pgsql's current embedded-\nlanguage API, and you don't have to mess with the pgsql side ever again. Of\ncourse, this win only applies to languages with ORBit bindings (currently C,\nwith C++, Python, Ada, and several others in the pipeline).\n\nBut, again, I don't see a lot of urgency for this kind of solution.\n\n\t-Michael Robinson\n\n\n", "msg_date": "Wed, 9 Jun 1999 17:45:55 +0800 (CST)", "msg_from": "Michael Robinson <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] PL/Lang (was: Priorities for 6.6)" } ]
[ { "msg_contents": "At 14:40 8/06/99 +0400, you wrote:\n>Hello!\n>\n>On Tue, 8 Jun 1999, Jan Wieck wrote:\n>> This time, the Perl interpreter has to become a silly little\n>> working slave. Beeing quiet until it's called and quiet\n>> again after having served one function call until the big\n>> master PostgreSQL calls him again.\n>> \n>> This flexibility requires a real good design of the\n>> interpreters internals. And that's what I'm addressing here.\n>\n> I know exactly 1 (one) program that incorporate (embed) Perl interpreter\n>- it is editor VIM (well-known vi-clone from www.vim.org). I think anyone\n\nApache also has Perl very nicely embedded (as opposed to available through indirect CGI calls); when it is embedded it automatically reloads and recompiles changed scripts.\n\nI presume that the Apache code may be useful in this process.\n\n----------------------------------------------------------------\nPhilip Warner | __---_____\nAlbatross Consulting Pty. Ltd. |----/ - \\\n(A.C.N. 008 659 498) | /(@) ______---_\nTel: +61-03-5367 7422 | _________ \\\nFax: +61-03-5367 7430 | ___________ |\nHttp://www.rhyme.com.au | / \\|\n | --________--\nPGP key available upon request, | /\nand from pgp5.ai.mit.edu:11371 |/\n", "msg_date": "Wed, 09 Jun 1999 11:05:57 +1000", "msg_from": "Philip Warner <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] PL/Lang (was: Priorities for 6.6)" }, { "msg_contents": "\nOn 09-Jun-99 Philip Warner wrote:\n> At 14:40 8/06/99 +0400, you wrote:\n>>Hello!\n>>\n>>On Tue, 8 Jun 1999, Jan Wieck wrote:\n>>> This time, the Perl interpreter has to become a silly little\n>>> working slave. Beeing quiet until it's called and quiet\n>>> again after having served one function call until the big\n>>> master PostgreSQL calls him again.\n>>> \n>>> This flexibility requires a real good design of the\n>>> interpreters internals. And that's what I'm addressing here.\n>>\n>> I know exactly 1 (one) program that incorporate (embed) Perl interpreter\n>>- it is editor VIM (well-known vi-clone from www.vim.org). I think anyone\n> \n> Apache also has Perl very nicely embedded (as opposed to available through\n> indirect CGI calls); when it is embedded it automatically reloads and\n> recompiles changed scripts.\n\nIMHO, It's bad practice to embed Perl, C++ and so on into postgres.\nbecause it slow down postgres, increase memory requirement \nand amount of leaks and errors. \n\nPostgres should use it's own language like plpgsql, and\nit's better to point your mind to improve and speed up it.\n\nFor example:\n Add pre-compilation and syntax check while create function\n executed\n Add some string and regex manipulation functions.\n Add exception handling.\n\nAll completely non standard thing may (and should) be done outside of postgres\nor in worst case by DYNALOAD mechanic.\n\nYou can look at Apache's mod_perl and mod_php3 to compare two ways\nmentioned above:\n\n First - embedding perl with all it's history and lots of function completely\nunnecessary and inconvenient for web programming.\n\n Second - php3 - language initially developed to embed into apache.\n\n---\nDmitry Samersoff, [email protected], ICQ:3161705\nhttp://devnull.wplus.net\n* There will come soft rains ...\n", "msg_date": "Wed, 09 Jun 1999 13:34:13 +0400 (MSD)", "msg_from": "Dmitry Samersoff <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] PL/Lang (was: Priorities for 6.6)" }, { "msg_contents": "At 13:34 9/06/99 +0400, you wrote:\n>\n>IMHO, It's bad practice to embed Perl, C++ and so on into postgres.\n>because it slow down postgres, increase memory requirement \n>and amount of leaks and errors. \n\nYou can't possibly mean to say Perl is a slow, leaky memory hog! ;-} \n\nDo I detect a conspiracy here? (Oleg?)\n\n>Postgres should use it's own language like plpgsql, and\n>it's better to point your mind to improve and speed up it.\n>\n>For example:\n> Add pre-compilation and syntax check while create function\n> executed\n\nAt least a syntax check...\n\n\n> Add some string and regex manipulation functions.\n> Add exception handling.\n>\n\nThese all sound like a good idea. \n\nJan: If I volunteer to attempt any of these, can you provide advice?\n\n\n----------------------------------------------------------------\nPhilip Warner | __---_____\nAlbatross Consulting Pty. Ltd. |----/ - \\\n(A.C.N. 008 659 498) | /(@) ______---_\nTel: +61-03-5367 7422 | _________ \\\nFax: +61-03-5367 7430 | ___________ |\nHttp://www.rhyme.com.au | / \\|\n | --________--\nPGP key available upon request, | /\nand from pgp5.ai.mit.edu:11371 |/\n", "msg_date": "Wed, 09 Jun 1999 22:23:20 +1000", "msg_from": "Philip Warner <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] PL/Lang (was: Priorities for 6.6)" }, { "msg_contents": "On Wed, 9 Jun 1999, Philip Warner wrote:\n> You can't possibly mean to say Perl is a slow, leaky memory hog! ;-} \n> Do I detect a conspiracy here? (Oleg?)\n\n I am not in the language wars, sorry :)\n\n> >For example:\n> > Add pre-compilation and syntax check while create function\n> > executed\n> \n> At least a syntax check...\n\n Python can store compiled bytecodes on disk.\n\n> > Add exception handling.\n\n Python is definetely good on this.\n\n I'll following the discussion.\n\n> ----------------------------------------------------------------\n> Philip Warner | __---_____\n> Albatross Consulting Pty. Ltd. |----/ - \\\n> (A.C.N. 008 659 498) | /(@) ______---_\n> Tel: +61-03-5367 7422 | _________ \\\n> Fax: +61-03-5367 7430 | ___________ |\n> Http://www.rhyme.com.au | / \\|\n> | --________--\n> PGP key available upon request, | /\n> and from pgp5.ai.mit.edu:11371 |/\n\nOleg.\n---- \n Oleg Broytmann http://members.xoom.com/phd2/ [email protected]\n Programmers don't die, they just GOSUB without RETURN.\n\n", "msg_date": "Wed, 9 Jun 1999 16:35:59 +0400 (MSD)", "msg_from": "Oleg Broytmann <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] PL/Lang (was: Priorities for 6.6)" }, { "msg_contents": "\nOn 09-Jun-99 Philip Warner wrote:\n> At 13:34 9/06/99 +0400, you wrote:\n>>\n>>IMHO, It's bad practice to embed Perl, C++ and so on into postgres.\n>>because it slow down postgres, increase memory requirement \n>>and amount of leaks and errors. \n> \n> You can't possibly mean to say Perl is a slow, leaky memory hog! ;-} \n\n No - it just a huge bug ;-))\n I like Perl - but it's really too big to be combined with something else.\n\n> \n> Do I detect a conspiracy here? (Oleg?)\n\n I hope not. No masquerading is installed - I'm Dmitry yet. ;-))\n\n> \n>>Postgres should use it's own language like plpgsql, and\n>>it's better to point your mind to improve and speed up it.\n>>\n>>For example:\n>> Add pre-compilation and syntax check while create function\n>> executed\n> \n> At least a syntax check...\n> \n> \n>> Add some string and regex manipulation functions.\n>> Add exception handling.\n>>\n> \n> These all sound like a good idea. \n\nThanks\n\n> \n> Jan: If I volunteer to attempt any of these, can you provide advice?\n> \n \n---\nDmitry Samersoff, [email protected], ICQ:3161705\nhttp://devnull.wplus.net\n* There will come soft rains ...\n", "msg_date": "Wed, 09 Jun 1999 16:39:21 +0400 (MSD)", "msg_from": "Dmitry Samersoff <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] PL/Lang (was: Priorities for 6.6)" }, { "msg_contents": "Dmitry Samersoff wrote:\n> \n> IMHO, It's bad practice to embed Perl, C++ and so on into postgres.\n> because it slow down postgres, increase memory requirement\n> and amount of leaks and errors.\n> \n> Postgres should use it's own language like plpgsql, and\n> it's better to point your mind to improve and speed up it.\n> \n> For example:\n> Add pre-compilation and syntax check while create function\n> executed\n> Add some string and regex manipulation functions.\n> Add exception handling.\n\nWhat we really need is a better PL interface, the current one has \nquite a few limitations. Corba may help here, but _not_ a simple \nCorba wrapper of existing api\n\n> All completely non standard thing may (and should) be done outside of postgres\n> or in worst case by DYNALOAD mechanic.\n\nCurrently we are doing it in your \"worst case\" way :)\n\nthe v6.5 has even special scripts to create/destroy PLs. Only SQL and \ninternal (compiled-in C) are \"embedded\" in non-removable way. \n\nEven PL/PGSQL must be installed to be used\n\n> You can look at Apache's mod_perl and mod_php3 to compare two ways\n> mentioned above:\n> \n> First - embedding perl with all it's history and lots of function completely\n> unnecessary and inconvenient for web programming.\n> \n> Second - php3 - language initially developed to embed into apache.\n\nCompare this to Zope - using an advanced language to craft a special\ntool\n(in this case application server), which can both be used from other\nservers \nbut also has its own server (also written in python) which can\noutperform \napache in many usage patterns. And it has even a small SQL server\n(gadfly)\nembedded for both example of SQL adapter and for smaller scale work\n(also \nwritten in python)\nI think this achieves both the slickness of php3 and with extendability\nof perl.\n\nNow - what has it to do with embedding languages in PostgreSQL?\n\nIMHO having better support for PLs should go further than just calling \nfunctions in SELECT or triggers - to make it possible for people to\neasily \nadd new types/indexing schemes the PL should also be usable for\ninput/output \nfunctions, access methods and maybe even for prototyping new fe/be\nprotocol.\n\nI hope to get a draft implementation of this in 6.5 before its official\nlaunch :)\n\n-------------------\nHannu\n", "msg_date": "Wed, 09 Jun 1999 22:28:19 +0300", "msg_from": "Hannu Krosing <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] PL/Lang (was: Priorities for 6.6)" }, { "msg_contents": "Hannu Krosing wrote:\n\n> What we really need is a better PL interface, the current one has\n> quite a few limitations. Corba may help here, but _not_ a simple\n> Corba wrapper of existing api\n\n Actually I'm a little in doubt if calling it an interface\n isn't gone too far. The only difference (from the call\n handlers point of view) is that the fmgr calls one and the\n same C entry point for different functions and passes\n therefore the OID of the function meant. So in reality it's\n more another call technique than an interface.\n\n On the point on CORBA I say: If Someone can create a general\n interface that makes integration of external\n languages/applications as easy so little old ladies and 12\n year old pacman players can do it, than he should. If that\n new, fantastic, glory technique is based on CORBA or COBOL, I\n wouln't mind - I would shout a welcome!\n\n I'm not able to produce such a magic thing. All I can create\n are things like PL/Tcl, PL/pgSQL, the actual pain of the rule\n system and some other ugly ones. And as long as noone comes\n up with the above peace of magic, I'll go on providing the\n things I can create.\n\n It was hard to write them, so it should be hard to use them.\n I only try to make it not as hard as to learn an entirely new\n language from scratch. In the PL/Tcl tree there is a little\n test suite. I'm sure that a Perl expert could create the\n same suite in a few minutes (like I did it) - but only in\n PL/Perl - never in PL/Tcl, because then he would have a\n larger learning curve ahead.\n\n Let people use the languages they're familiar with, and\n you'll get good, reliable, performat applications. Force them\n to use something they've never seen before and they'll \"shoot\n theirself into the foot\".\n\n To understand this one someone must (re)read:\n\n http://www.cs.rpi.edu/~edwardsb/shoot.in.foot.html\n\n>\n> > All completely non standard thing may (and should) be done outside of postgres\n> > or in worst case by DYNALOAD mechanic.\n>\n> Currently we are doing it in your \"worst case\" way :)\n>\n> the v6.5 has even special scripts to create/destroy PLs. Only SQL and\n> internal (compiled-in C) are \"embedded\" in non-removable way.\n>\n> Even PL/PGSQL must be installed to be used\n\n And I don't think that having PL/pgSQL to follow the\n generalized standard way all PL's must go is the worst case.\n IMHO it's the best of all cases. Think of other standards -\n ALL Windows applications must use the Windows API - except\n for those created by M$. Maybe they have a good reason not to\n use the Windows API - they know it's internals.\n\n> IMHO having better support for PLs should go further than just calling\n> functions in SELECT or triggers - to make it possible for people to\n> easily\n> add new types/indexing schemes the PL should also be usable for\n> input/output\n> functions, access methods and maybe even for prototyping new fe/be\n> protocol.\n\n For PL/Tcl, this time will surely come. When Tcl8.1 has\n settled so 8.0 could be considered the release that has to be\n supported for backward compatibility, I'll kick out the 7.6\n support from PL/Tcl. This would again increase it's\n performance.\n\n The actual limitation not to be able to create data type\n input/output functions is because the call handler doesn't\n support it. Definitely, it could! It already has to lookup\n pg_type to find the input/output functions for the return-\n value/arguments. If the OID in the pg_type tuples in-/out-\n function is the one the call handler actually is called for,\n why not acting so?\n\n The PL/Tcl module you're seeing with v6.5 is mainly still the\n one I've developed based on Tcl7.6 (the fact that one and the\n same sources actually compile with 7.6 and 8.0 stands for\n itself). All the above features would have required the\n Tcl_Obj interface which was new to 8.0. But at the time I\n created it, 7.6 was the default in install packages of unices\n and surely there where many 7.5's still in use. I decided it\n would be better to first support most Tcl installations and\n later force those who used it to do an upgrade.\n\n>\n> I hope to get a draft implementation of this in 6.5 before its official\n> launch :)\n\n I'll be back...\n\n\nJan\n\n--\n\n#======================================================================#\n# It's easier to get forgiveness for being wrong than for being right. #\n# Let's break this rule - forgive me. #\n#========================================= [email protected] (Jan Wieck) #\n\n\n", "msg_date": "Wed, 9 Jun 1999 23:51:26 +0200 (MET DST)", "msg_from": "[email protected] (Jan Wieck)", "msg_from_op": false, "msg_subject": "Re: [HACKERS] PL/Lang (was: Priorities for 6.6)" } ]
[ { "msg_contents": "\nOnce more, this is a release candidate...I was considering holding off\nuntil Thursday, and if anyone feels that we should, please feel free to\npop up, but unless anyone objects, I'm going to continue working on a\nWed @ 15:30EST release...\n\nNote that if anyone commits any source code changes, this will put things\noff by one day, but docs changes won't affect this...\n\nMarc G. Fournier ICQ#7615664 IRC Nick: Scrappy\nSystems Administrator @ hub.org \nprimary: [email protected] secondary: scrappy@{freebsd|postgresql}.org \n\n", "msg_date": "Tue, 8 Jun 1999 22:40:26 -0300 (ADT)", "msg_from": "The Hermit Hacker <[email protected]>", "msg_from_op": true, "msg_subject": "Beta3 Available ..." } ]
[ { "msg_contents": "I have just talked to Thomas Lockhart, and he will need a few days to\nfinish up the docs. He has said he needs some time once all the changes\nare made. There are over 800 pages of documenation, so it takes some\ntime to complete.\n\nI recommend a Monday, June 14th release date. I know it is later than\nwe wanted, but I am not sure what choice we have unless we want to ship\nwith incomplete documenation, which seems wrong to me.\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Tue, 8 Jun 1999 23:31:24 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": true, "msg_subject": "6.5 Release date" }, { "msg_contents": "On Tue, 8 Jun 1999, Bruce Momjian wrote:\n\n> I have just talked to Thomas Lockhart, and he will need a few days to\n> finish up the docs. He has said he needs some time once all the changes\n> are made. There are over 800 pages of documenation, so it takes some\n> time to complete.\n> \n> I recommend a Monday, June 14th release date. I know it is later than\n> we wanted, but I am not sure what choice we have unless we want to ship\n> with incomplete documenation, which seems wrong to me.\n\nI concur...let's hold her till June 14th then. No source changes between\nnow and then, unless *absolutely* required...agreed? :)\n\nMarc G. Fournier ICQ#7615664 IRC Nick: Scrappy\nSystems Administrator @ hub.org \nprimary: [email protected] secondary: scrappy@{freebsd|postgresql}.org \n\n", "msg_date": "Wed, 9 Jun 1999 00:59:15 -0300 (ADT)", "msg_from": "The Hermit Hacker <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] 6.5 Release date" }, { "msg_contents": "The Hermit Hacker wrote:\n> \n> On Tue, 8 Jun 1999, Bruce Momjian wrote:\n> \n> > I have just talked to Thomas Lockhart, and he will need a few days to\n> > finish up the docs. He has said he needs some time once all the changes\n> > are made. There are over 800 pages of documenation, so it takes some\n> > time to complete.\n> >\n> > I recommend a Monday, June 14th release date. I know it is later than\n> > we wanted, but I am not sure what choice we have unless we want to ship\n> > with incomplete documenation, which seems wrong to me.\n> \n> I concur...let's hold her till June 14th then. No source changes between\n> now and then, unless *absolutely* required...agreed? :)\n\nWe'll try... :))\n\nVadim\n", "msg_date": "Wed, 09 Jun 1999 12:40:37 +0800", "msg_from": "Vadim Mikheev <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] 6.5 Release date" }, { "msg_contents": "On Wed, 9 Jun 1999, Vadim Mikheev wrote:\n\n> The Hermit Hacker wrote:\n> > \n> > On Tue, 8 Jun 1999, Bruce Momjian wrote:\n> > \n> > > I have just talked to Thomas Lockhart, and he will need a few days to\n> > > finish up the docs. He has said he needs some time once all the changes\n> > > are made. There are over 800 pages of documenation, so it takes some\n> > > time to complete.\n> > >\n> > > I recommend a Monday, June 14th release date. I know it is later than\n> > > we wanted, but I am not sure what choice we have unless we want to ship\n> > > with incomplete documenation, which seems wrong to me.\n> > \n> > I concur...let's hold her till June 14th then. No source changes between\n> > now and then, unless *absolutely* required...agreed? :)\n> \n> We'll try... :))\n\n*If* I see a source change, I will try and build up a new beta that night\nso that ppl can run through it, but if we can avoid that, all the better\n:)\n\nMarc G. Fournier ICQ#7615664 IRC Nick: Scrappy\nSystems Administrator @ hub.org \nprimary: [email protected] secondary: scrappy@{freebsd|postgresql}.org \n\n", "msg_date": "Wed, 9 Jun 1999 01:56:38 -0300 (ADT)", "msg_from": "The Hermit Hacker <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] 6.5 Release date" }, { "msg_contents": "On Wed, 9 Jun 1999, The Hermit Hacker wrote:\n\n> On Tue, 8 Jun 1999, Bruce Momjian wrote:\n> \n> > I have just talked to Thomas Lockhart, and he will need a few days to\n> > finish up the docs. He has said he needs some time once all the changes\n> > are made. There are over 800 pages of documenation, so it takes some\n> > time to complete.\n> > \n> > I recommend a Monday, June 14th release date. I know it is later than\n> > we wanted, but I am not sure what choice we have unless we want to ship\n> > with incomplete documenation, which seems wrong to me.\n> \n> I concur...let's hold her till June 14th then. No source changes between\n> now and then, unless *absolutely* required...agreed? :)\n\nChange the date on the web page?\n\nVince.\n-- \n==========================================================================\nVince Vielhaber -- KA8CSH email: [email protected] flame-mail: /dev/null\n # include <std/disclaimers.h> TEAM-OS2\n Online Campground Directory http://www.camping-usa.com\n Online Giftshop Superstore http://www.cloudninegifts.com\n==========================================================================\n\n\n\n", "msg_date": "Wed, 9 Jun 1999 05:41:08 -0400 (EDT)", "msg_from": "Vince Vielhaber <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] 6.5 Release date" }, { "msg_contents": "\nGo for her...\n\nOn Wed, 9 Jun 1999, Vince Vielhaber wrote:\n\n> On Wed, 9 Jun 1999, The Hermit Hacker wrote:\n> \n> > On Tue, 8 Jun 1999, Bruce Momjian wrote:\n> > \n> > > I have just talked to Thomas Lockhart, and he will need a few days to\n> > > finish up the docs. He has said he needs some time once all the changes\n> > > are made. There are over 800 pages of documenation, so it takes some\n> > > time to complete.\n> > > \n> > > I recommend a Monday, June 14th release date. I know it is later than\n> > > we wanted, but I am not sure what choice we have unless we want to ship\n> > > with incomplete documenation, which seems wrong to me.\n> > \n> > I concur...let's hold her till June 14th then. No source changes between\n> > now and then, unless *absolutely* required...agreed? :)\n> \n> Change the date on the web page?\n> \n> Vince.\n> -- \n> ==========================================================================\n> Vince Vielhaber -- KA8CSH email: [email protected] flame-mail: /dev/null\n> # include <std/disclaimers.h> TEAM-OS2\n> Online Campground Directory http://www.camping-usa.com\n> Online Giftshop Superstore http://www.cloudninegifts.com\n> ==========================================================================\n> \n> \n> \n\nMarc G. Fournier ICQ#7615664 IRC Nick: Scrappy\nSystems Administrator @ hub.org \nprimary: [email protected] secondary: scrappy@{freebsd|postgresql}.org \n\n", "msg_date": "Wed, 9 Jun 1999 09:36:50 -0300 (ADT)", "msg_from": "The Hermit Hacker <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] 6.5 Release date" }, { "msg_contents": "> On Wed, 9 Jun 1999, The Hermit Hacker wrote:\n> \n> > On Tue, 8 Jun 1999, Bruce Momjian wrote:\n> > \n> > > I have just talked to Thomas Lockhart, and he will need a few days to\n> > > finish up the docs. He has said he needs some time once all the changes\n> > > are made. There are over 800 pages of documenation, so it takes some\n> > > time to complete.\n> > > \n> > > I recommend a Monday, June 14th release date. I know it is later than\n> > > we wanted, but I am not sure what choice we have unless we want to ship\n> > > with incomplete documenation, which seems wrong to me.\n> > \n> > I concur...let's hold her till June 14th then. No source changes between\n> > now and then, unless *absolutely* required...agreed? :)\n> \n> Change the date on the web page?\n\nYes, please. Any chance for new web look by then? (I am a comedian.)\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Wed, 9 Jun 1999 08:37:24 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] 6.5 Release date" }, { "msg_contents": "\nOn 09-Jun-99 Bruce Momjian wrote:\n>> On Wed, 9 Jun 1999, The Hermit Hacker wrote:\n>> \n>> > On Tue, 8 Jun 1999, Bruce Momjian wrote:\n>> > \n>> > > I have just talked to Thomas Lockhart, and he will need a few days to\n>> > > finish up the docs. He has said he needs some time once all the changes\n>> > > are made. There are over 800 pages of documenation, so it takes some\n>> > > time to complete.\n>> > > \n>> > > I recommend a Monday, June 14th release date. I know it is later than\n>> > > we wanted, but I am not sure what choice we have unless we want to ship\n>> > > with incomplete documenation, which seems wrong to me.\n>> > \n>> > I concur...let's hold her till June 14th then. No source changes between\n>> > now and then, unless *absolutely* required...agreed? :)\n>> \n>> Change the date on the web page?\n> \n> Yes, please. Any chance for new web look by then? (I am a comedian.)\n\nIt didn't look good, but a water main break closed the complex and I'm \nhome today. So I'm working on it now. I made up some templates for the\nnew stuff and we'll see how it goes. I still need to write a couple of \nquick scripts to make future changes easier, but that's only a few min\njob.\n\nVince.\n-- \n==========================================================================\nVince Vielhaber -- KA8CSH email: [email protected] flame-mail: /dev/null\n # include <std/disclaimers.h> TEAM-OS2\n Online Campground Directory http://www.camping-usa.com\n Online Giftshop Superstore http://www.cloudninegifts.com\n==========================================================================\n\n\n", "msg_date": "Wed, 09 Jun 1999 08:47:51 -0400 (EDT)", "msg_from": "Vince Vielhaber <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] 6.5 Release date" }, { "msg_contents": "Sorry Mark,\n\n this late I have now a regression test for the NUMERIC data\n type. Should I add it or is it too late?\n\n\nJan\n\n--\n\n#======================================================================#\n# It's easier to get forgiveness for being wrong than for being right. #\n# Let's break this rule - forgive me. #\n#========================================= [email protected] (Jan Wieck) #\n\n\n", "msg_date": "Wed, 9 Jun 1999 15:08:56 +0200 (MET DST)", "msg_from": "[email protected] (Jan Wieck)", "msg_from_op": false, "msg_subject": "Re: [HACKERS] 6.5 Release date" }, { "msg_contents": "> > Yes, please. Any chance for new web look by then? (I am a comedian.)\n> \n> It didn't look good, but a water main break closed the complex and I'm \n> home today. So I'm working on it now. I made up some templates for the\n> new stuff and we'll see how it goes. I still need to write a couple of \n> quick scripts to make future changes easier, but that's only a few min\n> job.\n\nAw, water main break. That's a shame. :-) (Yes!)\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Wed, 9 Jun 1999 09:12:43 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] 6.5 Release date" }, { "msg_contents": "On Wed, 9 Jun 1999, Jan Wieck wrote:\n\n> Sorry Mark,\n> \n> this late I have now a regression test for the NUMERIC data\n> type. Should I add it or is it too late?\n\nI personally see no problems with adding a new regression test to the\ntree...\n\nMarc G. Fournier ICQ#7615664 IRC Nick: Scrappy\nSystems Administrator @ hub.org \nprimary: [email protected] secondary: scrappy@{freebsd|postgresql}.org \n\n", "msg_date": "Wed, 9 Jun 1999 10:31:55 -0300 (ADT)", "msg_from": "The Hermit Hacker <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] 6.5 Release date" }, { "msg_contents": "> I have now a regression test for the NUMERIC data\n> type. Should I add it or is it too late?\n\nAdd it. I'll help test. We'll ask Mark about it later ;)\n\n - Thomas\n\n-- \nThomas Lockhart\t\t\t\[email protected]\nSouth Pasadena, California\n", "msg_date": "Wed, 09 Jun 1999 13:35:30 +0000", "msg_from": "Thomas Lockhart <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] 6.5 Release date" }, { "msg_contents": "On Wed, 9 Jun 1999, Bruce Momjian wrote:\n\n> > > Yes, please. Any chance for new web look by then? (I am a comedian.)\n> > \n> > It didn't look good, but a water main break closed the complex and I'm \n> > home today. So I'm working on it now. I made up some templates for the\n> > new stuff and we'll see how it goes. I still need to write a couple of \n> > quick scripts to make future changes easier, but that's only a few min\n> > job.\n> \n> Aw, water main break. That's a shame. :-) (Yes!)\n\nI love the genuine sympathy in this one *rofl*\n\nMarc G. Fournier ICQ#7615664 IRC Nick: Scrappy\nSystems Administrator @ hub.org \nprimary: [email protected] secondary: scrappy@{freebsd|postgresql}.org \n\n", "msg_date": "Wed, 9 Jun 1999 10:42:50 -0300 (ADT)", "msg_from": "The Hermit Hacker <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] 6.5 Release date" }, { "msg_contents": "Marc G. Fournier wrote:\n\n>\n> On Wed, 9 Jun 1999, Jan Wieck wrote:\n>\n> > Sorry Mark,\n> >\n> > this late I have now a regression test for the NUMERIC data\n> > type. Should I add it or is it too late?\n>\n> I personally see no problems with adding a new regression test to the\n> tree...\n\n But I see a little one on it.\n\n The test I've created uses some values that have been\n calcuated by bc(1) with a precision of 1000 digits after the\n decimal point. And it excessively uses LOG, POWER etc.\n\n The SQL script plus the expected .out are about 500K. It\n run's about 1 hour and 13 minutes on a 333 MHz PII system (no\n disk activity during it - real CPU time).\n\n I think it should be somewhat separate from the usual\n regression because some older hardware would need days to\n complete the entire suite.\n\n What about adding a 'make longtest' to the regression and\n printing a hint about it at the end of 'make runtest'? I see\n we're short of time, so I'll do it this way now and maybe\n later turn the wheel back.\n\n\nJan\n\n--\n\n#======================================================================#\n# It's easier to get forgiveness for being wrong than for being right. #\n# Let's break this rule - forgive me. #\n#========================================= [email protected] (Jan Wieck) #\n\n\n", "msg_date": "Thu, 10 Jun 1999 16:46:11 +0200 (MET DST)", "msg_from": "[email protected] (Jan Wieck)", "msg_from_op": false, "msg_subject": "Re: [HACKERS] 6.5 Release date" }, { "msg_contents": ">\n> > I have now a regression test for the NUMERIC data\n> > type. Should I add it or is it too late?\n>\n> Add it. I'll help test. We'll ask Mark about it later ;)\n\n O.K. - it's in place.\n\n I've added 2 tests for NUMERIC. One that is performed by\n default on target 'runtest'. This one uses 100 digits after\n the decimal point. It causes the entire regression suite to\n take now 2.5 times longer than before.\n\n The other one is mainly the same test, just with other values\n and this time 1000 digits after the decimal point. This one\n is added to the end of the regression suite if the make\n target 'bigtest' is used.\n\n Both stress the CPU havily by calculating square roots,\n logarithms and power to the wanted precision. I don't have\n any clue how many internal add, subtract, multiply and divide\n cycles they cause - all the complex functions are based on\n Taylor/McLaurin. But it must be some hundreds.\n\n All expected results have been precalculated with bc(1) using\n a slightly higher precision. So you shouldn't have to check\n ALL the numerical results in the 250K numeric_big test by\n hand - maybe you do it anyway just to be sure that there is\n not one single difference of 10^^-1000 :-)\n\n\nJan\n\n--\n\n#======================================================================#\n# It's easier to get forgiveness for being wrong than for being right. #\n# Let's break this rule - forgive me. #\n#========================================= [email protected] (Jan Wieck) #\n\n\n", "msg_date": "Thu, 10 Jun 1999 20:13:21 +0200 (MET DST)", "msg_from": "[email protected] (Jan Wieck)", "msg_from_op": false, "msg_subject": "Re: [HACKERS] 6.5 Release date" }, { "msg_contents": "[email protected] (Jan Wieck) writes:\n> The SQL script plus the expected .out are about 500K. It\n> run's about 1 hour and 13 minutes on a 333 MHz PII system (no\n> disk activity during it - real CPU time).\n\nYipes. That's a tad heavyweight for a regression test of *one* feature.\n\nI had in mind something that would help us catch portability bugs,\nnot something that would encourage people to buy new hardware ;-).\n\n\t\t\tregards, tom lane\n", "msg_date": "Thu, 10 Jun 1999 16:57:00 -0400", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] 6.5 Release date " }, { "msg_contents": "> The SQL script plus the expected .out are about 500K. It\n> run's about 1 hour and 13 minutes on a 333 MHz PII system (no\n> disk activity during it - real CPU time).\n> \n> I think it should be somewhat separate from the usual\n> regression because some older hardware would need days to\n> complete the entire suite.\n> \n> What about adding a 'make longtest' to the regression and\n> printing a hint about it at the end of 'make runtest'? I see\n> we're short of time, so I'll do it this way now and maybe\n> later turn the wheel back.\n> \n\nCan we get a short version for 6.5, and add a 'decimal-test' version to\na later release.\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Thu, 10 Jun 1999 18:52:24 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] 6.5 Release date" }, { "msg_contents": "\n\n> \n> I concur...let's hold her till June 14th then. No source changes between\n> now and then, unless *absolutely* required...agreed? :)\n>\n\nPlease put the implementaion of mdtruncate() back to old one.\nCurrent implementation is the worst one.\n\nFor example,trying to truncate a\n\t\t1.5G( = 1G + 0.5G ) table to 1.2G,\nvacuum expander would expand the table to\n\t\t1.7G( = 1.2G + 0.5G) .\n\nRegards.\n\nHiroshi Inoue\[email protected]\n", "msg_date": "Fri, 11 Jun 1999 08:46:17 +0900", "msg_from": "\"Hiroshi Inoue\" <[email protected]>", "msg_from_op": false, "msg_subject": "RE: [HACKERS] 6.5 Release date" }, { "msg_contents": "I need a patch to the current tree. I don't see a previous version of\nmd.c, and at this point, I am not going to guess on a fix.\n\n\n> > I concur...let's hold her till June 14th then. No source changes between\n> > now and then, unless *absolutely* required...agreed? :)\n> >\n> \n> Please put the implementaion of mdtruncate() back to old one.\n> Current implementation is the worst one.\n> \n> For example,trying to truncate a\n> \t\t1.5G( = 1G + 0.5G ) table to 1.2G,\n> vacuum expander would expand the table to\n> \t\t1.7G( = 1.2G + 0.5G) .\n> \n> Regards.\n> \n> Hiroshi Inoue\n> [email protected]\n> \n\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Thu, 10 Jun 1999 20:37:48 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] 6.5 Release date" } ]
[ { "msg_contents": "Dear All,\n\nI have written a STDEV aggregate function which seems to work; the only problem is that I am not sure that the method I am using was intended by the designers of aggregates.\n\nI am using the first parameter of sfunc1 to store a pointer to a context record, and sfunc1 creates and returns that pointer when it is first called (the DB definition specified initcond1 = '0'). In this way I can accumulate all the details I need.\n\nThe problems is that the way I read the code, there is an expectation that the parameters of sfuncX and finalfunc match the basetype. This is clearly not the case, and I have declared them as int4, which will also presumable break on 64 bit implementations...\n\nAnyway, the code is below. Any suggestions as to how I should handle complex context within aggregates would be appreciated.\n\nThanks,\n\nPhilip Warner.\n\n\n-------------- STDEv Func:\ntypedef struct {\n float4 ssq;\n float4 s;\n float4 n;\n} ctx_type;\n\nint4\nsdev_ag1(int4 ctxPtr, float4 *f2)\n{\n ctx_type *ctx;\n int4 *result;\n\n if ((ctxPtr) == 0) {\n ctx = (ctx_type*)palloc(sizeof(ctx_type));\n ctx->ssq = 0;\n ctx->s = 0;\n ctx->n = 0;\n } else {\n ctx = (ctx_type*)(ctxPtr);\n };\n\n ctx->ssq += (*f2) * (*f2);\n ctx->s += (*f2);\n ctx->n++;\n\n return (int4)ctx;\n}\n\nfloat4*\nsdev_fin(int4 ctxPtr, void* p)\n{\n ctx_type *ctx;\n float4 *result;\n float4 avg;\n\n result = (float4*)palloc(sizeof(float4));\n\n if ((ctxPtr) == 0) {\n (*result) = 0;\n } else {\n ctx = (ctx_type*)(ctxPtr);\n avg = ctx->s / ctx->n;\n (*result) = (ctx->ssq - 2*avg*ctx->s)/ctx->n + avg*avg;\n (*result) = sqrt((*result));\n };\n\n pfree(ctx);\n\n return result;\n}\n\n\n----------------------------------------------------------------\nPhilip Warner | __---_____\nAlbatross Consulting Pty. Ltd. |----/ - \\\n(A.C.N. 008 659 498) | /(@) ______---_\nTel: +61-03-5367 7422 | _________ \\\nFax: +61-03-5367 7430 | ___________ |\nHttp://www.rhyme.com.au | / \\|\n | --________--\nPGP key available upon request, | /\nand from pgp5.ai.mit.edu:11371 |/\n", "msg_date": "Wed, 09 Jun 1999 16:21:03 +1000", "msg_from": "Philip Warner <[email protected]>", "msg_from_op": true, "msg_subject": "Aggregates with context - a question" }, { "msg_contents": "Philip Warner wrote:\n\n>\n> Dear All,\n>\n> I have written a STDEV aggregate function which seems to work; the only problem is that I am not sure that the method I am using was intended by the designers of aggregates.\n>\n> I am using the first parameter of sfunc1 to store a pointer to a context record, and sfunc1 creates and returns that pointer when it is first called (the DB definition specified initcond1 = '0'). In this way I can accumulate all the details I need.\n>\n> The problems is that the way I read the code, there is an expectation that the parameters of sfuncX and finalfunc match the basetype. This is clearly not the case, and I have declared them as int4, which will also presumable break on 64 bit implementations...\n>\n> Anyway, the code is below. Any suggestions as to how I should handle complex context within aggregates would be appreciated.\n\n\n Here's another implementation of standard deviation and\n variance I've done some month's ago. It's based on it's own\n datatype holding 2 float64 values as aggregate state\n variable. So it doesn't need to pass pointers of context\n records around.\n\n I wanted to wait for some other statistical functions that\n could be added to the package before putting it into the\n contrib. Maybe you have some suggestions what would be nice\n to add it to? I'll add it anyway to the contrib for v6.6.\n\n\nJan\n\n--\n\n#======================================================================#\n# It's easier to get forgiveness for being wrong than for being right. #\n# Let's break this rule - forgive me. #\n#========================================= [email protected] (Jan Wieck) #\n\n\nbegin 644 statmath.tar.gz\nM'XL(`/U;7C<\"`^U:>W/;R`WWO]Q/@3@//6)1HF3)&9^=.<567'?D1V5[>IW+\nMC88B*9D-12I\\R,DUU\\]>8)=/DY;EJ^-F4F(2B5PN@`7P`Q9+V?-5?Z[ZU\\V-\nM;T>PW=KI=F$#D#HM_JVTQ7=(+8!>;Z?7;G?Q`Y^V=EJ=#>AN/`$%GJ^Z`!N+\nMF??)6C%/GZ@;/R!Y4?Q/U(_&U+2,;Z!#:;5ZV]MWQK_=ZFY3_)7M7K?;57;H\nMZ7:GMP&M,O[?G)XW'HO8<_8<(A1!>(\\4#TT=%_QK`R+(P=S1`\\N0:>+CK8*=\nM'QT>CV`?9+DIR^QB=\"!N7U3Y@UK3<S7&3%NS`MW`43&A%N-?GEG.1+48.SX]\nM&%X=#L9GYY>P+S6.06[\"!R;15<*5'XDDYQXL'-=OXBK.1I>G_9-!C;&#]\\/^\nMT06\\IL6EM-7P5CP:7PQQVLG9(3'@(B+/\\;&K(0V]J(:/:R^JA\\.+J_?OCW\\1\nM/-QL?#X\\?L?U\"V][C%W\\;7C[&4*?,=6R=B5)3`,^(JYQD&M!A2@9'T0#7*W,\nM9YHVKHWXXYG`KT@Z^8\"KK*%+M`4T%E`PJXD//=\\U%ZFA1&V&,=:;2&XR%K/M\nMXNSY1]UT.<//]\"2<E7ORDJ3L`O^239N68.BPZ7T5>L?G_<N_D+:O!6OZN@E[\nM\\&(/WG))LG#]X&(7,H%XF;HC11HCYQT<)$&N0<.[5EW4VW!0%(ID3#<6AJT#\nM?NW&\\QLG)[=Q4I<U>\"OF,J99AFK3='<.C2G4_WW;R^EP38U/4!6<6R^J-Z:E\nM:ZK+%>)(K1;G1R@<_YM3ME'2C[O_1Q>R[FA/N__#CB+V_VZKLZUT:?_OM'N=\nM<O]_\"KK`L)N>;VJJ!>ILYAHSU<>M.K`UWW1LCV_:YX[GXQ,LHC+N$DO#56=&\nM=6HYJO^F)DGA`\"Q5*S!`<^:+P,=B-OG\"L!9)$L;7#B_&XS%=C2GF\"GW0'BE]\nMQLT9-VZZ?POPF0D>Y**/YAAGFR&_N:\\PQI:J:ZJVEJP@&BC4'2N/M$,[41^?\nM/\\;0IF<>9-92_0S0@,\\U2-;44*)%Q:L*E\\4\\7]>-9>(7Q)6MBZ*Z-%7R9L$\"\nMQ^-P65(3&V$0B]CGE@-XL=X/N+\\Q7!?[JVK#WTU#^PA[_[RA[Y]U8V)Z,DI^\nMRQXG_[5'Q]CJ_%?:7:45Y7]/:2LTM-,K^_\\GH68=4LTSU.$D[/^B5A/J^(]W\nMGKDR(:I#F/];$.<AXAXB]!-SG`!R*\"U&,2G,J$></X]ZCSW,*-.1K]]FARQS\nMDAWCJ,V,&*YK<\\9D;',ABI@G7V^F1@/?M+SF`EM71Z,GC.4<<FAHENKR]7MB\nM_;%SQG1A8.$;FW:U)AW;F-UQ[>3.X1/`=U7;,_D@=]+$,HK%.(&/<LX\"_T\\*\nMTAS+,C22(5W<9HC%^0Z$\\\\#TC;F7D1!&<SSE%KTW;0QX9B%1N8]1D&&/,'`W\nM?X*20@&BBM[-GA33#/]M$/$JW-LNCI2&[3;4\\;!1^XE%UT61B(7@`$[-\"XW<\nMG9ZX!=$=.1?9<EQI%V<Y3=O?!KN()^/7=9E2OKR#I0#NB7'7F*`X'1IAUKYS\nM`EOW@(^;]HR'0^QV8N_W4&[4*W@%0>$U1(.E8^HLIZ6J.P&BF235V+^89$ZA\nM.E4G7I4&<#,^?#<<G_1_H9.@83FSZF`T.AMMX5$M%+0+#KH5EW.SB79Q=EK[\nMLWUHR2UX]0H287M\"V/'I\"F%H:2+MCY5^2B$K=E5!)8C>?-Q*8NJ9=-57P?^R\nM,*@^2I<XT>3\\=!Q&/^/9$/P;)_1UQ0//6%!!PC[BQO2O0:5T1F&5W8I<X/8P\nM].R>5\"\"GAU/Q\"&GX.`']**(B2?Y\\@7<T6Y+JVD+9`OQL(X\"XIY]Q`9PM<&V(\nMT%:#TZOAD\"8UZTS\"E?6IS)(+O(6J)2^$$\"Z!Y=.,)@M58P^42!'EN>J9OQO.\nM-!HFI^$)&-JUE((CP^<2IZ;K^6`'\\XGAAG+YGH!B6V@'6H-7N&C?T:OXM06O\nMT*8(-V0?`0?]\"5^_0LBX#X-1__1HL`(T)E:#&:Y=1`^MPV&HO/0JFUL@JHV4\nMQSVNA1X(LW]M_88+X\\[.V>09FH,;:\\:HC\"&O7_/`H\"GME\"EM;LJ'UM/;HN1M\nM&0F`^!SBON%2>9]@D7>_A\"H2$-\"\\\"(9K)B#6[#@#B_;0=5/P=@:))&%K;!*4\nM1\")))L'T5Z75WOXME41+C,XRE32<(\\Z:!;I5#Q;5S:H=6%:-E['8<>=8\"00,\nMDE)+=\\;GT(MAA*CU$O(X6Y)=Q*\"&Z:>+_`M]O50(0@)\\J'+9CFX5NO46I'E:\nM17L0'R_E^FR7/A`%5$0/CX_0)\"5]TPX1D#8)F>\\MHU%+$@5PK?Z%ART*;;1'\nM$G>Z!2WJ&;RXU-[96,&U8^%VU\\[N<3(@$_&*$F-Z`E7!')RIP#6U5%OIC,U.\nM(%Z:`Y\\\"57?YCKAPL>?6?&^MZKU&S[%&*4]!D.K\",\\'W'53P`W%$YE)MXR;T\nM>K;<A87R-=1IV0^LJBD92BP#%:\\K:W55N^V(]:M8JBN,<T#TO^*M@9I-@LCS\nMN79\\+1`5MJ#!/.X,'X\"@X'^#FS1F^M12@FXN38]<,_D\"OQNN`[@^W.QX/D8%\nMP]!#7;1VFS;\"%JX>C97JL7;L%W]*+(J-_Z,8HZ$G,P\"M4[8W(6QI:_8J5\"5Z\nMOP&FTJ>&AX\"J^(QV'ZCN/Z.LAI6ZG*4WRU#:7=OE]P8UR_!H+U!MW#%686X/\nMZ]V?@)QJ:8&EAJ\"+=SHA&!V7*HM9X,6A1%NCHM>(Y]81O[,:L<0\\^%216]SF\nM/&@C:5GDID/UF/!]Q)/5>N>BU<?EAZ!7R\"JQF\\=NOBM;`\\7\"G?3XD^M7'P3D\nM.Y`L)&9QG`3MX2C^<7[_$S^%/^'O?YV.HL2__[4['?K];Z>CE.__GX(:#4A'\nMGF%QQ*%C\\><<<6G-O_JGB>'D`]=04QU[7'-7%>?PW34*.!@-^I<#>']U>G!Y\nM?'9:_,;66:B?`@-KSVAP>34ZO<C/8E+_`BJW_FRCPJ1A__3HJG\\T@,I!!3/[\nM?FUTIL^I$P,/TW'YC_-!7CY4L9B&KSXLPY[YUUAXE-X6C=+.ME_H`'SLB!<:\nM^\\5+Q@FJ9<[LN6'3'%$#&=6^7(SB>(I(A94>0Q70W[Z$[Q6R@;[3:]%A-+>F\nM\\$3ZYA%\"EC-@FOE5P(MAYE^[QMHKSYV`WHA]/K5@,?S?`BO3%1?XZ9LH3;4R\nMCZPR%XW4KX%X._<,:YIR?/_H:#0XHJOHH(KXGZB>P0O$?JA[BW9H'%!R(_0R\nMBI9+]Q3Q9,;\"BL:B.::MX1!'!PVG4R45;9YIID]O9DA8I55)C;3%\"$^<G`5Q\nM*WV?\"7F7K[#F=BX]P*HTM')F[:YO6-A=?2]F)>!]@%'E'Z>55%)))9544DDE\nIE51222655%)))9544DDEE51222655%)))9544DG_K_0?^Q8S<`!0````\n`\nend\n", "msg_date": "Wed, 9 Jun 1999 14:29:09 +0200 (MET DST)", "msg_from": "[email protected] (Jan Wieck)", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Aggregates with context - a question" }, { "msg_contents": "Philip Warner <[email protected]> writes:\n> The problems is that the way I read the code, there is an expectation\n> that the parameters of sfuncX and finalfunc match the basetype.\n\nNo, not at all. transfn1 has the signature\n\ttranstype1 = transfn1 (transtype1, basetype)\nie, you take the state object and the next input value, and deliver a\nnew state object. transfn2 has the signature\n\ttranstype2 = transfn2 (transtype2)\nie, it operates on the second state object and delivers a new value.\nAnd finalfn has the signature\n\tfinaltype = finalfn (transtype1, transtype2)\nie, take the two state objects and deliver the desired result.\n(You can also use a finalfn that takes only one state object,\nif you are only using one transfn.)\n\nIf you omit finalfn then you can only supply one transfn, and the\ncorresponding transtype must equal finaltype because the default\nbehavior is just to copy the state value computed by the transfn\nthat's present. But as long as you have a finalfn, all four datatypes\ncan be different.\n\nThis undoubtedly seems overly baroque, and it is overkill when writing\na specialized transfn from scratch; there's no need for transfn2, just\na transfn1 and a finalfn to produce the end result. The reason it was\ndone this way (I assume; I wasn't there) is that you can produce many\nuseful aggregates by combining *existing* operators without having to\nwrite any code at all. For example, avg() for float8 is built from\ntransfn1 = float8pl, transfn2 = float8inc, and finalfn = float8div.\n\nI am not sure whether this is adequately documented; probably not.\nWill revisit the docs for 6.6 (I suppose Thomas will have my head\nif I change 'em now ;-)).\n\n> This is clearly not the case, and I have declared them as int4, which\n> will also presumable break on 64 bit implementations...\n\nThe \"clean\" way to do it is to make a declared datatype that corresponds\nto the state storage you need, but that's overkill if such a datatype\nhasn't got any other use. I think the way you have done it is a\nreasonable cheat, though I agree that using int4 is risky for\nportability. There has been some talk of inventing a type OID\nrepresenting \"C string\", and that (when available) might be a better way\nof declaring transtype1 when it's really a private struct of some sort.\n\nOne thing you have to be very careful about is memory allocation and\nlifetime. The way you are doing it, a palloc in the first transfn1\niteration and a pfree in finalfn, will be fine. However this may change\nin 6.6, since we are going to have to do something to cure memory leaks\nin aggregation. (Currently, if the transfns are ones that palloc their\nresult value, as all float8 ops do for example, the storage is not\nreclaimed until end of transaction. That's no good if there are lots of\ntuples...)\n\n> Anyway, the code is below.\n\nLooks OK except you are potentially pfreeing an uninit pointer in the\nfinalfn...\n\n\t\t\tregards, tom lane\n", "msg_date": "Wed, 09 Jun 1999 10:20:07 -0400", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Aggregates with context - a question " }, { "msg_contents": "At 10:20 9/06/99 -0400, Tom Lane wrote:\n\n>> This is clearly not the case, and I have declared them as int4, which\n>> will also presumable break on 64 bit implementations...\n>\n>The \"clean\" way to do it is to make a declared datatype that corresponds\n>to the state storage you need, but that's overkill if such a datatype\n>hasn't got any other use. I think the way you have done it is a\n>reasonable cheat, though I agree that using int4 is risky for\n>portability. There has been some talk of inventing a type OID\n>representing \"C string\", and that (when available) might be a better way\n>of declaring transtype1 when it's really a private struct of some sort.\n\nSounds like a wonderful idea; does this mean that users can be prevented from declaring a column of type 'C String'? Or do you then need to build all the support functions? I suppose the alternative would be to use a 'varbinary' (or varchar?), which has the first word being the structure length. That would at least be standard.\n\n>One thing you have to be very careful about is memory allocation and\n>lifetime. The way you are doing it, a palloc in the first transfn1\n>iteration and a pfree in finalfn, will be fine. However this may change\n>in 6.6, since we are going to have to do something to cure memory leaks\n>in aggregation. (Currently, if the transfns are ones that palloc their\n>result value, as all float8 ops do for example, the storage is not\n>reclaimed until end of transaction. That's no good if there are lots of\n>tuples...)\n>\n>> Anyway, the code is below.\n>\n>Looks OK except you are potentially pfreeing an uninit pointer in the\n>finalfn...\n>\n\nOops - pretty clever, considering I had already checked if it was zero...\n\nThanks for the information,\n\nPhilip\n\n----------------------------------------------------------------\nPhilip Warner | __---_____\nAlbatross Consulting Pty. Ltd. |----/ - \\\n(A.C.N. 008 659 498) | /(@) ______---_\nTel: +61-03-5367 7422 | _________ \\\nFax: +61-03-5367 7430 | ___________ |\nHttp://www.rhyme.com.au | / \\|\n | --________--\nPGP key available upon request, | /\nand from pgp5.ai.mit.edu:11371 |/\n", "msg_date": "Thu, 10 Jun 1999 12:50:21 +1000", "msg_from": "Philip Warner <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] Aggregates with context - a question " }, { "msg_contents": "I have attached the latest attempt at my stats functions. They are defined for int4, float4 and float8, and now use a 'text' type to store context information. This gets rid of the need to pass pointers as int4, but brings me to the next question:\n\nis it OK to store 'binary' data (possibly containing \\0) in a text field, so long as that field is never displayed?\n\nIf it is, then I presume text fields are the best approach to preserving context in complex aggregate functions. Is this reasonable?\n\nAny further (polite) suggestions would be welcome.\n\nIf all is OK, my final question is: would people be happy to incorporate such code into PG for general use once all relevant types are supported?\n\n\n\n\n\n\n----------------------------------------------------------------\nPhilip Warner | __---_____\nAlbatross Consulting Pty. Ltd. |----/ - \\\n(A.C.N. 008 659 498) | /(@) ______---_\nTel: +61-03-5367 7422 | _________ \\\nFax: +61-03-5367 7430 | ___________ |\nHttp://www.rhyme.com.au | / \\|\n | --________--\nPGP key available upon request, | /\nand from pgp5.ai.mit.edu:11371 |/", "msg_date": "Thu, 10 Jun 1999 17:35:09 +1000", "msg_from": "Philip Warner <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] Aggregates with context - a question " }, { "msg_contents": "Philip Warner <[email protected]> writes:\n> At 10:20 9/06/99 -0400, Tom Lane wrote:\n>> ... There has been some talk of inventing a type OID\n>> representing \"C string\", and that (when available) might be a better way\n>> of declaring transtype1 when it's really a private struct of some sort.\n\n> Sounds like a wonderful idea; does this mean that users can be\n> prevented from declaring a column of type 'C String'? Or do you then\n> need to build all the support functions?\n\nThe original proposal was to have a type OID that would represent the\ntextual input or output of datatype input/output functions, in order to\nsolve the problems we have now with not being able to typecheck explicit\nuses of these functions. There would be no reason to make it a \"real\"\ntype that could be declared as a column type, AFAICS. You'd have to be\nable to refer to it by name in order to declare user-supplied datatype\nI/O functions, however. Might take a bit of a kluge to make the type\nacceptable for one purpose and not the other...\n\n> I suppose the alternative\n> would be to use a 'varbinary' (or varchar?), which has the first word\n> being the structure length. That would at least be standard.\n\nThat's actually probably a better idea; I'd suggest the existing \"bytea\"\ntype could be used to represent the workspace datatype for aggregates\nthat are really using a private struct. Not sure how you'd get it\ninitialized at aggregate startup, but that's probably doable.\n\n\t\t\tregards, tom lane\n", "msg_date": "Thu, 10 Jun 1999 19:27:39 -0400", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Aggregates with context - a question " } ]
[ { "msg_contents": "Dear All,\n\nI've been working (a little) on external functions and laguages recently, and was wondering how PG handles updates to libraries and/or stored procedures in the context of transactions. This is probably most relevant to Perl, but really applies to any external function.\n\nIf:\n\n1. user A starts a TX, and calls a user defined function in userlib.so, \n2. user B changes, recompiles, and reloads the shared library\n3. user A calls the same function within the same TX.\n\nDoes PG prevent the 'load' command by user B (via locking)?\nDoes user A get a different result?\nDoes user A's backend not load the new module until after a commit?\n\nIn the extreme case this is obvoiusly totally outside the control of PostgreSQL, but in the case of Perl, which includes all sorts of external files, should the implementor of PL/Perl be careful to NOT update already loaded module? Or to update them ASAP?\n\nThis is probably too unlikely to worry about, but I am (academically) interested...\n\n\n----------------------------------------------------------------\nPhilip Warner | __---_____\nAlbatross Consulting Pty. Ltd. |----/ - \\\n(A.C.N. 008 659 498) | /(@) ______---_\nTel: +61-03-5367 7422 | _________ \\\nFax: +61-03-5367 7430 | ___________ |\nHttp://www.rhyme.com.au | / \\|\n | --________--\nPGP key available upon request, | /\nand from pgp5.ai.mit.edu:11371 |/\n", "msg_date": "Wed, 09 Jun 1999 17:13:07 +1000", "msg_from": "Philip Warner <[email protected]>", "msg_from_op": true, "msg_subject": "External functions/languages and transactions" }, { "msg_contents": "Philip Warner wrote:\n\n>\n> Dear All,\n>\n> I've been working (a little) on external functions and laguages recently, and was wondering how PG handles updates to libraries and/or stored procedures in the context of transactions. This is probably most relevant to Perl, but really applies to any external function.\n>\n> If:\n>\n> 1. user A starts a TX, and calls a user defined function in userlib.so,\n> 2. user B changes, recompiles, and reloads the shared library\n> 3. user A calls the same function within the same TX.\n>\n> Does PG prevent the 'load' command by user B (via locking)?\n> Does user A get a different result?\n> Does user A's backend not load the new module until after a commit?\n>\n> In the extreme case this is obvoiusly totally outside the control of PostgreSQL, but in the case of Perl, which includes all sorts of external files, should the implementor of PL/Perl be careful to NOT update already loaded module? Or to update them ASAP?\n>\n> This is probably too unlikely to worry about, but I am (academically) interested...\n\n\n It's mostly OS dependant. PostgreSQL itself load's a shared\n object only the first time it is required PER BACKEND. It\n remembers which objects have been loaded by path. So for the\n entire lifetime of a connection, PostgreSQL will never reload\n a once dynamically linked library. So the new functionality\n will only show up if you reconnect.\n\n If the OS supports writing into shared objects that are in\n use, and what would happen if it is done - dunno! Some might\n allow it but the backend potentially dumps core - some might\n deny write access - some might whatever.\n\n Under every UNIX operating system, there's a clean way you\n can protect the backend from any danger. Install the shared\n objects in a common place (like PGLIB) and inside the install\n target, first remove an eventually existing old version. That\n would cause that the real deletion of the old version will be\n delayed by the OS until the last backend using it has\n terminated, but the directory is ready to receive the new\n version.\n\n\nJan\n\n--\n\n#======================================================================#\n# It's easier to get forgiveness for being wrong than for being right. #\n# Let's break this rule - forgive me. #\n#========================================= [email protected] (Jan Wieck) #\n\n\n", "msg_date": "Wed, 9 Jun 1999 14:41:20 +0200 (MET DST)", "msg_from": "[email protected] (Jan Wieck)", "msg_from_op": false, "msg_subject": "Re: [HACKERS] External functions/languages and transactions" } ]
[ { "msg_contents": "Hello,\n\nSorry that I picked this too late in the release cycle, but other work \nprevented my earlier involvement in the testing.\n\nThere are at least two serious problems that I discovered so far with Postgres \n6.5 beta2 and beta3 (running on BSD/OS 4.0.1):\n\n1. LIKE with indexes works worse than without indexes.\n\nGiven the following schema:\n\nCREATE TABLE \"words\" (\n \"w_key\" text,\n \"w_pages\" text);\nCREATE INDEX \"w_k_words_i\" on \"words\" using btree ( \"w_key\" \"text_ops\" );\n\nThe table words has 117743 records.\n\nthe folowing query:\n\nselect w_key from words where w_key like 'sometext%'\n\nis explained as:\n\nIndex Scan using w_k_words_i on words (cost=3335.38 rows=1 width=12)\n\nand runs for several minutes. If I drop the w_k_words_i index, the explain is:\n\nSeq Scan on words (cost=7609.52 rows=1 width=12)\n\nand the query runs noticeably faster.\n\nUnder 6.4 the behavior is as expected, much better with indexes.\n\n2. Under Postgres 6.4 the following query:\n\nSELECT config.serviceid, custid, datetime_date( updated_at ) as date ,archived \nas a, c.subserviceid as ss, c.usage_price as\n price, c.usage_included as time, service\nFROM a, b, c \nWHERE confid in ( SELECT confid \n \t\t FROM a\n \t\t WHERE archived_at > '30-04-1999' \n \t\t\t AND created_at < '30-04-1999' )\n \tAND not archived\n AND a.serviceid=b.serviceid \n \tAND c.serviceid=a.serviceid\nGROUP BY custid, serviceid, subserviceid;\n\nworks, although runs for indefinitely long time (due to the subselect - but \nthis is not a problem, as it can be rewritten). Under Postgres 6.5 hwoever, it \nis not accepted, because there are no aggregates in the target list. Is this \nincorrect behavior of the 6.4.2 version or 6.5 has different syntax?\n\nRegards,\nDaniel Kalchev\n\n", "msg_date": "Wed, 09 Jun 1999 11:04:28 +0300", "msg_from": "Daniel Kalchev <[email protected]>", "msg_from_op": true, "msg_subject": "Postgres 6.5 beta2 and beta3 problem" }, { "msg_contents": "Daniel Kalchev <[email protected]> writes:\n> 1. LIKE with indexes works worse than without indexes.\n\nSince you are using USE_LOCALE, the parser is inserting only a one-sided\nindex restriction; that is\n\tWHERE w_key like 'sometext%'\nbecomes\n\tWHERE w_key like 'sometext%' AND w_key >= 'sometext'\nwhereas without USE_LOCALE it becomes\n\tWHERE w_key like 'sometext%' AND w_key >= 'sometext'\n\t\tAND w_key <= 'sometext\\377'\n6.4 always did the latter, which was wrong in non-ASCII locales because\n\\377 might not be the highest character in the sort order. (Strictly\nspeaking it's wrong in ASCII locale as well...)\n\nOf course, the one-sided index restriction is much less selective than\nthe two-sided; depending on what 'sometext' actually is, you might end\nup scanning most of the table, and since index scan is much slower\nper-tuple-scanned than sequential scan, you lose. That's evidently\nwhat's happening here.\n\nI suspect that the optimizer's cost estimates need refinement;\nit should be able to guess that the sequential scan will be the\nfaster choice here.\n\nOf course what you really want is a two-sided index restriction,\nbut we are not going to be able to fix that until someone figures\nout a locale-independent way of generating a \"slightly larger\"\ncomparison string. So far I have not heard any proposals that\nsound like they will work...\n\n> Under Postgres 6.5 hwoever, it\n> is not accepted, because there are no aggregates in the target list.\n\nNo, that's not what it's unhappy about; it's unhappy because there\nare ungrouped fields used in the target list. This is erroneous\nSQL because there's no unique choice of value to return for such an\nfield (if several tuples are grouped together, which one's value\nof the field do you use?) Prior versions of Postgres failed to detect\nthis error, but it's an error. You were getting randomly selected\nvalues for the ungrouped fields, I suppose.\n\n\t\t\tregards, tom lane\n", "msg_date": "Wed, 09 Jun 1999 10:41:02 -0400", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Postgres 6.5 beta2 and beta3 problem " } ]
[ { "msg_contents": "Yes,\n\nI do build with --enable-locale, but I do not build with --enable-mb and do \nnot use client_encoding or server_encoding.\n\nThe content of the keys is in cyrillic. I have LC_CTYPE=CP1251 in the \nenvironment in both server and client, and this has worked for me in 6.4.2.\n\nRegards,\nDaniel\n\n>>>\"Hiroshi Inoue\" said:\n > \n > \n > > -----Original Message-----\n > > From: [email protected]\n > > [mailto:[email protected]]On Behalf Of Daniel Kalchev\n > > Sent: Wednesday, June 09, 1999 5:04 PM\n > > To: [email protected]\n > > Subject: [HACKERS] Postgres 6.5 beta2 and beta3 problem\n > > \n > > \n > > Hello,\n > > \n > > Sorry that I picked this too late in the release cycle, but other work \n > > prevented my earlier involvement in the testing.\n > > \n > > There are at least two serious problems that I discovered so far \n > > with Postgres \n > > 6.5 beta2 and beta3 (running on BSD/OS 4.0.1):\n > > \n > > 1. LIKE with indexes works worse than without indexes.\n > >\n > \n > Did you built with --enable-locale ?\n > \n > Regards.\n > \n > Hiroshi Inoue\n > [email protected] \n > \n\n\n", "msg_date": "Wed, 09 Jun 1999 11:58:36 +0300", "msg_from": "Daniel Kalchev <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] Postgres 6.5 beta2 and beta3 problem " }, { "msg_contents": "> Yes,\n> \n> I do build with --enable-locale, but I do not build with --enable-mb and do \n> not use client_encoding or server_encoding.\n> \n> The content of the keys is in cyrillic. I have LC_CTYPE=CP1251 in the \n> environment in both server and client, and this has worked for me in 6.4.2.\n\n\nThis certainly explains it. With locale enabled, LIKE does not use\nindexes because we can't figure out how to do the indexing trick with\nnon-ASCII character sets because we can't figure out the maximum\ncharacter value for a particular encoding.\n\nWe didn't do the check in 6.4.*, and LIKE was not returning the proper\nresults for queries at those sites that used locale.\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Wed, 9 Jun 1999 08:43:50 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Postgres 6.5 beta2 and beta3 problem" }, { "msg_contents": "On Wed, 9 Jun 1999, Bruce Momjian wrote:\n\n> Date: Wed, 9 Jun 1999 08:43:50 -0400 (EDT)\n> From: Bruce Momjian <[email protected]>\n> To: Daniel Kalchev <[email protected]>\n> Cc: Hiroshi Inoue <[email protected]>, [email protected]\n> Subject: Re: [HACKERS] Postgres 6.5 beta2 and beta3 problem\n> \n> > Yes,\n> > \n> > I do build with --enable-locale, but I do not build with --enable-mb and do \n> > not use client_encoding or server_encoding.\n> > \n> > The content of the keys is in cyrillic. I have LC_CTYPE=CP1251 in the \n> > environment in both server and client, and this has worked for me in 6.4.2.\n> \n> \n> This certainly explains it. With locale enabled, LIKE does not use\n> indexes because we can't figure out how to do the indexing trick with\n> non-ASCII character sets because we can't figure out the maximum\n> character value for a particular encoding.\n\nIf so, why explain reports 'Index Scan ....' ?\napod=> \\d\nDatabase = apod\n +------------------+----------------------------------+----------+\n | Owner | Relation | Type |\n +------------------+----------------------------------+----------+\n | megera | idx_adate | index |\n | megera | idx_atitle | index |\n | megera | idx_url | index |\n | megera | titles | table |\n +------------------+----------------------------------+----------+\n\napod=> explain select atitle from titles where atitle like 'Sun%';\nNOTICE: QUERY PLAN:\n\nIndex Scan using idx_atitle on titles (cost=33.28 rows=1 width=12)\n\nCurrent cvs, --enable-locale --with-mb=KOI8\n\n\tRegards,\n\n\t\tOleg\n> \n> We didn't do the check in 6.4.*, and LIKE was not returning the proper\n> results for queries at those sites that used locale.\n> \n> -- \n> Bruce Momjian | http://www.op.net/~candle\n> [email protected] | (610) 853-3000\n> + If your life is a hard drive, | 830 Blythe Avenue\n> + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n> \n\n_____________________________________________________________\nOleg Bartunov, sci.researcher, hostmaster of AstroNet,\nSternberg Astronomical Institute, Moscow University (Russia)\nInternet: [email protected], http://www.sai.msu.su/~megera/\nphone: +007(095)939-16-83, +007(095)939-23-83\n\n", "msg_date": "Wed, 9 Jun 1999 17:12:40 +0400 (MSD)", "msg_from": "Oleg Bartunov <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Postgres 6.5 beta2 and beta3 problem" }, { "msg_contents": "Bruce,\n\nThis is extremely bad news, because if will make PostgreSQL 6.5 unusable for \nmost of my applications. Perhaps something can be done to resolve this \nsituation?\n\nI understand the problem for 16-bit characters support, but for most of the \nencodings that support only 8 bit characters it should be safe to assume the \nmaximum character value is 255.\n\nAnyway, making this check compile-time defined would certainly fix things \nhere, because in my case the cyrillic letters order match that of the binary \nencoding (that is, the first alphabet letter is before the second etc).\n\nPerhaps the locale data can be used to gather this information?\n\nI will do some testing without using locale to see what happens.\n\nRegards,\nDaniel\n\n>>>Bruce Momjian said:\n > > Yes,\n > > \n > > I do build with --enable-locale, but I do not build with --enable-mb and d\n o \n > > not use client_encoding or server_encoding.\n > > \n > > The content of the keys is in cyrillic. I have LC_CTYPE=CP1251 in the \n > > environment in both server and client, and this has worked for me in 6.4.2\n .\n > \n > \n > This certainly explains it. With locale enabled, LIKE does not use\n > indexes because we can't figure out how to do the indexing trick with\n > non-ASCII character sets because we can't figure out the maximum\n > character value for a particular encoding.\n > \n > We didn't do the check in 6.4.*, and LIKE was not returning the proper\n > results for queries at those sites that used locale.\n > \n > -- \n > Bruce Momjian | http://www.op.net/~candle\n > [email protected] | (610) 853-3000\n > + If your life is a hard drive, | 830 Blythe Avenue\n > + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n\n\n", "msg_date": "Wed, 09 Jun 1999 16:15:58 +0300", "msg_from": "Daniel Kalchev <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] Postgres 6.5 beta2 and beta3 problem " }, { "msg_contents": "On Wed, 9 Jun 1999, Daniel Kalchev wrote:\n\n> Date: Wed, 09 Jun 1999 16:15:58 +0300\n> From: Daniel Kalchev <[email protected]>\n> To: Bruce Momjian <[email protected]>\n> Cc: [email protected]\n> Subject: Re: [HACKERS] Postgres 6.5 beta2 and beta3 problem \n> \n> Bruce,\n> \n> This is extremely bad news, because if will make PostgreSQL 6.5 unusable for \n> most of my applications. Perhaps something can be done to resolve this \n> situation?\n> \n> I understand the problem for 16-bit characters support, but for most of the \n> encodings that support only 8 bit characters it should be safe to assume the \n> maximum character value is 255.\n> \n> Anyway, making this check compile-time defined would certainly fix things \n> here, because in my case the cyrillic letters order match that of the binary \n> encoding (that is, the first alphabet letter is before the second etc).\n> \n> Perhaps the locale data can be used to gather this information?\n\nIt's certainly there ! locale data contains all information about\nspecific character set and encoding. Is it possible to use it to create\nindices ? It should be slower but benefits of using indices will cover\nexpenses for non-US people. I didn't notice such behaivour in Informix\nand Oracle. Fixing this would be a good point in respect of popularity\nof Postgres. Are there any chance to place it in TODO for 6.6 ?\nAt least explain should reflect such fact !\n\n\n\tRegards,\n\n\t\tOleg\n\n> \n> I will do some testing without using locale to see what happens.\n> \n> Regards,\n> Daniel\n> \n> >>>Bruce Momjian said:\n> > > Yes,\n> > > \n> > > I do build with --enable-locale, but I do not build with --enable-mb and d\n> o \n> > > not use client_encoding or server_encoding.\n> > > \n> > > The content of the keys is in cyrillic. I have LC_CTYPE=CP1251 in the \n> > > environment in both server and client, and this has worked for me in 6.4.2\n> .\n> > \n> > \n> > This certainly explains it. With locale enabled, LIKE does not use\n> > indexes because we can't figure out how to do the indexing trick with\n> > non-ASCII character sets because we can't figure out the maximum\n> > character value for a particular encoding.\n> > \n> > We didn't do the check in 6.4.*, and LIKE was not returning the proper\n> > results for queries at those sites that used locale.\n> > \n> > -- \n> > Bruce Momjian | http://www.op.net/~candle\n> > [email protected] | (610) 853-3000\n> > + If your life is a hard drive, | 830 Blythe Avenue\n> > + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n> \n> \n> \n\n_____________________________________________________________\nOleg Bartunov, sci.researcher, hostmaster of AstroNet,\nSternberg Astronomical Institute, Moscow University (Russia)\nInternet: [email protected], http://www.sai.msu.su/~megera/\nphone: +007(095)939-16-83, +007(095)939-23-83\n\n", "msg_date": "Wed, 9 Jun 1999 18:34:50 +0400 (MSD)", "msg_from": "Oleg Bartunov <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Postgres 6.5 beta2 and beta3 problem " }, { "msg_contents": "Bruce Momjian <[email protected]> writes:\n> This certainly explains it. With locale enabled, LIKE does not use\n> indexes because we can't figure out how to do the indexing trick with\n> non-ASCII character sets because we can't figure out the maximum\n> character value for a particular encoding.\n\nWe don't actually need the *maximum* character value, what we need is\nto be able to generate a *slightly larger* character value.\n\nFor example, what the parser is doing now:\n\tfld LIKE 'abc%' ==> fld <= 'abc\\377'\nis not even really right in ASCII locale, because it will reject a\ndata value like 'abc\\377x'.\n\nI think what we really want is to generate the \"next value of the\nsame length\" and use a < comparison. In ASCII locale this means\n\tfld LIKE 'abc%' ==> fld < 'abd'\nwhich is reliable regardless of what comes after abc in the data.\n\nThe trick is to figure out a \"next\" value without assuming a lot\nabout the local character set and collation sequence. I had\nbeen thinking about a brute force method: generate a string and\ncheck to see whether strcmp claims it's greater than the original\nor not; if not, increment the last byte and try again. You'd\nalso have to be able to back up and increment earlier bytes if\nyou maxed out the last one. For example, in French locale,\n\tfld LIKE 'ab\\376%'\nyou'd first produce 'ab\\377' but discover that it's less than\n'ab\\376' (because \\377 is y-dieresis which sorts like 'y').\nYour next try must be 'ac\\377' which will succeed.\n\nBut I am worried whether this trick will work in multibyte locales ---\nincrementing the last byte might generate an invalid character sequence\nand produce unpredictable results from strcmp. So we need some help\nfrom someone who knows a lot about collation orders and multibyte\ncharacter representations.\n\n\t\t\tregards, tom lane\n", "msg_date": "Wed, 09 Jun 1999 12:08:52 -0400", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Postgres 6.5 beta2 and beta3 problem " }, { "msg_contents": "Tom Lane wrote:\n> \n> Bruce Momjian <[email protected]> writes:\n> > This certainly explains it. With locale enabled, LIKE does not use\n> > indexes because we can't figure out how to do the indexing trick with\n> > non-ASCII character sets because we can't figure out the maximum\n> > character value for a particular encoding.\n> \n> We don't actually need the *maximum* character value, what we need is\n> to be able to generate a *slightly larger* character value.\n> \n> For example, what the parser is doing now:\n> fld LIKE 'abc%' ==> fld <= 'abc\\377'\n> is not even really right in ASCII locale, because it will reject a\n> data value like 'abc\\377x'.\n> \n> I think what we really want is to generate the \"next value of the\n> same length\" and use a < comparison. In ASCII locale this means\n> fld LIKE 'abc%' ==> fld < 'abd'\n> which is reliable regardless of what comes after abc in the data.\n> The trick is to figure out a \"next\" value without assuming a lot\n> about the local character set and collation sequence.\n\nin single-byte locales it should be easy:\n\n1. sort a char[256] array from 0-255 using the current locale settings,\n do it once, either at startup or when first needed.\n\n2. use binary search on that array to locate the last char before %\n in this sorted array:\n if (it is not the last char in sorted array)\n then (replace that char with the one at index+1)\n else (\n if (it is not the first char in like string)\n then (discard the last char and goto 2.\n else (don't do the end restriction)\n )\n\nsome locales where the string is already sorted may use special \ntreatment (ASCII, CYRILLIC) \n\n> But I am worried whether this trick will work in multibyte locales ---\n> incrementing the last byte might generate an invalid character sequence\n> and produce unpredictable results from strcmp. So we need some help\n> from someone who knows a lot about collation orders and multibyte\n> character representations.\n\nfor double-byte locales something similar should work, but getting\nthe initial array is probably tricky\n\n----------------\nHannu\n", "msg_date": "Wed, 09 Jun 1999 21:32:03 +0300", "msg_from": "Hannu Krosing <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Postgres 6.5 beta2 and beta3 problem" }, { "msg_contents": "\nIt was me who found the bug and supplied the ugly fix, but that was the\nonly\nquick fix we could find until someone figure out how to get the correct\none from\nlocale.\nUSE_LOCALE uses strcoll instead of strcmp for comparasion and indexes\nand it sorts\ncorrectly but does not work with the MakeIndexable trick since \\377\nmight not\nbe the highest char in the alphabet or worse might not be a char of the\nalphabet at\nall (like in sv/fi locales).\n\nThe patch gives a slower correct result instead of a fast incorrect one,\nwhich is better IMHO, but maybe I am old fashion. :-)\n\nAnyway I think a correct solution should be:\n\"a LIKE 'abc%'\" should generate:\n\"a >= 'abc\\0' AND a < 'abd'\"\n ^\nwhere d is the last char before % + 1, or more correct the next char in\nalphabet\nafter the last char before %.\n\nStill looking for a locale guru. Hello!! :-(\n\nregards,\n-- \n-----------------\nG�ran Thyni\nThis is Penguin Country. On a quiet night you can hear Windows NT\nreboot!\n", "msg_date": "Wed, 09 Jun 1999 21:41:29 +0200", "msg_from": "Goran Thyni <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Postgres 6.5 beta2 and beta3 problem" }, { "msg_contents": "> Bruce,\n> \n> This is extremely bad news, because if will make PostgreSQL 6.5 unusable for \n> most of my applications. Perhaps something can be done to resolve this \n> situation?\n> \n> I understand the problem for 16-bit characters support, but for most of the \n> encodings that support only 8 bit characters it should be safe to assume the \n> maximum character value is 255.\n> \n> Anyway, making this check compile-time defined would certainly fix things \n> here, because in my case the cyrillic letters order match that of the binary \n> encoding (that is, the first alphabet letter is before the second etc).\n> \n> Perhaps the locale data can be used to gather this information?\n> \n> I will do some testing without using locale to see what happens.\n\nThe locale check is near the bottom of parser/gram.y, so you can\ncertainly enable indexing there.\n\nI am told that french does not have 255 as it's max character, and there\nthere is no collating interface to request the highest character. I\nsuppose one hack would be to go out and test all the char values to see\nwhich is highest.\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Wed, 9 Jun 1999 20:34:24 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Postgres 6.5 beta2 and beta3 problem" }, { "msg_contents": "> It's certainly there ! locale data contains all information about\n> specific character set and encoding. Is it possible to use it to create\n> indices ? It should be slower but benefits of using indices will cover\n> expenses for non-US people. I didn't notice such behaivour in Informix\n> and Oracle. Fixing this would be a good point in respect of popularity\n> of Postgres. Are there any chance to place it in TODO for 6.6 ?\n> At least explain should reflect such fact !\n> \n> \n\nAdded to TODO:\n\n\t* Allow indexing of LIKE with localle character sets \n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Wed, 9 Jun 1999 20:40:28 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Postgres 6.5 beta2 and beta3 problem" }, { "msg_contents": "\nYes. This pretty much sums up the problem.\n\n\n> Bruce Momjian <[email protected]> writes:\n> > This certainly explains it. With locale enabled, LIKE does not use\n> > indexes because we can't figure out how to do the indexing trick with\n> > non-ASCII character sets because we can't figure out the maximum\n> > character value for a particular encoding.\n> \n> We don't actually need the *maximum* character value, what we need is\n> to be able to generate a *slightly larger* character value.\n> \n> For example, what the parser is doing now:\n> \tfld LIKE 'abc%' ==> fld <= 'abc\\377'\n> is not even really right in ASCII locale, because it will reject a\n> data value like 'abc\\377x'.\n> \n> I think what we really want is to generate the \"next value of the\n> same length\" and use a < comparison. In ASCII locale this means\n> \tfld LIKE 'abc%' ==> fld < 'abd'\n> which is reliable regardless of what comes after abc in the data.\n> \n> The trick is to figure out a \"next\" value without assuming a lot\n> about the local character set and collation sequence. I had\n> been thinking about a brute force method: generate a string and\n> check to see whether strcmp claims it's greater than the original\n> or not; if not, increment the last byte and try again. You'd\n> also have to be able to back up and increment earlier bytes if\n> you maxed out the last one. For example, in French locale,\n> \tfld LIKE 'ab\\376%'\n> you'd first produce 'ab\\377' but discover that it's less than\n> 'ab\\376' (because \\377 is y-dieresis which sorts like 'y').\n> Your next try must be 'ac\\377' which will succeed.\n> \n> But I am worried whether this trick will work in multibyte locales ---\n> incrementing the last byte might generate an invalid character sequence\n> and produce unpredictable results from strcmp. So we need some help\n> from someone who knows a lot about collation orders and multibyte\n> character representations.\n> \n> \t\t\tregards, tom lane\n> \n\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Wed, 9 Jun 1999 20:44:48 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Postgres 6.5 beta2 and beta3 problem" }, { "msg_contents": ">>>Bruce Momjian said:\n > The locale check is near the bottom of parser/gram.y, so you can\n > certainly enable indexing there.\n\nI commented the two ifdefs that had USE_LOCALE in gram.y and now like uses \nindexes for me...\n\n > I am told that french does not have 255 as it's max character, and there\n > there is no collating interface to request the highest character. I\n > suppose one hack would be to go out and test all the char values to see\n > which is highest.\n\nAs I understand the current 'non-locale' way the like indexing works, it \nshould create this problem for the Bulgarian cyrillic (cp1251) too, because \ncharacter code 255 is valid cyrillic character (the last one from the \nalphabet). Therefore, the solution proposed by Hannu Krosing should be \nimplemented.\n\nI believe we can make the assumption, that if you --enable-locale, but do not \nuse --with-mb, then you are using single-byte locales and therefore the hack \nmight work properly. If you use --with-mb you are out of luck until someone \nexplains better how multibyte characters are ordered.\n\nIs there other place than gram.y where this hack needs to be implemented?\n\nWhile I am bashing the locale support... why the ~* operator does not work \nwith locales? That is, I need to use construct like\n\nSELECT key from t WHERE upper(a) ~ upper('somestring');\n\ninstead of\n\nSELECT key FROM t WHERE a ~* 'somestring';\n\nOr store everything in the database in uppercase (works for keys) and upper \nthe string in the frontend. The result is that this construct does not use \nindices. We also cannot create indexes by upper(a). I believe this was \noutstanding problem in pre-6.4.\n\nI found this later problem to be resolved by modifying the \nbackend/regex/makefile to add -funsigned-char to CFLAGS. This is under BSD/OS \n4.0.1 - I found out, that by default characters that are 'alpha' in cyrillic \nare threated by the compiler as negative and therefore isalpha() returns \nzero... I believe this should be fixed as it may be causing other problems \nwith non-ASCII locales.\n\nMy proposal is to add -funsigned-char to all places where routines such as \n'isalpha' are used, and ifdef it for USE_LOCALE.\n\nDaniel\n\n", "msg_date": "Thu, 10 Jun 1999 12:05:45 +0300", "msg_from": "Daniel Kalchev <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] Postgres 6.5 beta2 and beta3 problem " }, { "msg_contents": "> I believe we can make the assumption, that if you --enable-locale, but do not \n> use --with-mb, then you are using single-byte locales and therefore the hack \n> might work properly. \n\nI believe at least one person uses both --enable-locale and --with-mb\nfor single byte locale.\n\n> If you use --with-mb you are out of luck until someone \n> explains better how multibyte characters are ordered.\n\nI think until NATIONAL CHARACTER is fully implemented, we would not be\nable to properly handle sort orders with multi-byte characters.\n(Thomas, I've been think about implementing NCHAR too!)\n\nSo you could do whatever you like as long as they are in #ifndef\nMULTIBYTE:-)\n\n> I found this later problem to be resolved by modifying the \n> backend/regex/makefile to add -funsigned-char to CFLAGS. This is under BSD/OS \n> 4.0.1 - I found out, that by default characters that are 'alpha' in cyrillic \n> are threated by the compiler as negative and therefore isalpha() returns \n> zero... I believe this should be fixed as it may be causing other problems \n> with non-ASCII locales.\n> \n> My proposal is to add -funsigned-char to all places where routines such as \n> 'isalpha' are used, and ifdef it for USE_LOCALE.\n\nOnce I propsed another solution to BSD/OS guy (I don't remember who he\nwas) and asked him to check if it worked. Unfortunately I got no\nanswer from him. Then I merged it into #ifdef MULTIBYTE sections in\nregcomp.c and it seems work at least for cyrillic locale (Thanks Oleg\nfor testing!).\n---\nTatsuo Ishii\n\n", "msg_date": "Thu, 10 Jun 1999 23:30:56 +0900", "msg_from": "Tatsuo Ishii <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Postgres 6.5 beta2 and beta3 problem " }, { "msg_contents": "Tatsuo Ishii wrote:\n> I think until NATIONAL CHARACTER is fully implemented, we would not be\n> able to properly handle sort orders with multi-byte characters.\n> (Thomas, I've been think about implementing NCHAR too!)\n> \n> So you could do whatever you like as long as they are in #ifndef\n> MULTIBYTE:-)\n\nHmm,\nand the world is moving towards Unicode.\nwe definitely need working support for locales and multichar.\nPerhaps the right way to do it is to store everything in\nsom Unicode format internally and convert the output\naccording to the suggested \"per-column-locale-definition\".\n\nLarry Wall has hacked UTF8 support into perl so it is doable,\nso let see what we can do for 6.6 (time permitting as always).\n\nregards,\nG�ran\n", "msg_date": "Fri, 11 Jun 1999 09:03:01 +0200", "msg_from": "Goran Thyni <[email protected]>", "msg_from_op": false, "msg_subject": "locales and MB (was: Postgres 6.5 beta2 and beta3 problem)" }, { "msg_contents": ">and the world is moving towards Unicode.\n>we definitely need working support for locales and multichar.\n>Perhaps the right way to do it is to store everything in\n>som Unicode format internally and convert the output\n>according to the suggested \"per-column-locale-definition\".\n\nNo. There's nothing perfect in the world. Unicode is not the\nexception too. So we need to keep the freedom of choice of the\ninternal encodings. Currently the mb support allows serveral internal\nencodings including Unicode and mule-internal-code.\n(yes, you can do regexp/like to Unicode data if mb support is\nenabled).\n--\nTatsuo Ishii\n", "msg_date": "Fri, 11 Jun 1999 16:14:58 +0900", "msg_from": "Tatsuo Ishii <[email protected]>", "msg_from_op": false, "msg_subject": "Re: locales and MB (was: Postgres 6.5 beta2 and beta3 problem) " }, { "msg_contents": "Hi!\n\nOn Thu, 10 Jun 1999, Tatsuo Ishii wrote:\n> > I believe we can make the assumption, that if you --enable-locale, but do not \n> > use --with-mb, then you are using single-byte locales and therefore the hack \n> > might work properly. \n> \n> I believe at least one person uses both --enable-locale and --with-mb\n> for single byte locale.\n\n I think it's me. But very soon all in Russia will use locale+mb for koi8\nand win1251 locales - all of us are always in need to convert between them.\nMaking Postgres convert on-the-fly is big win.\n\n> Once I propsed another solution to BSD/OS guy (I don't remember who he\n> was) and asked him to check if it worked. Unfortunately I got no\n> answer from him. Then I merged it into #ifdef MULTIBYTE sections in\n\n I remember he raised the issue few times, and finally we agreed on\n-funsigned-char. I think, he got a working Postgres and locale after all.\n\n> regcomp.c and it seems work at least for cyrillic locale (Thanks Oleg\n> for testing!).\n\n Works now pretty good!\n\n> ---\n> Tatsuo Ishii\n\nOleg.\n---- \n Oleg Broytmann http://members.xoom.com/phd2/ [email protected]\n Programmers don't die, they just GOSUB without RETURN.\n\n", "msg_date": "Fri, 11 Jun 1999 11:17:22 +0400 (MSD)", "msg_from": "Oleg Broytmann <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Postgres 6.5 beta2 and beta3 problem " }, { "msg_contents": "\nOn 11-Jun-99 Goran Thyni wrote:\n> Tatsuo Ishii wrote:\n>> I think until NATIONAL CHARACTER is fully implemented, we would not be\n>> able to properly handle sort orders with multi-byte characters.\n>> (Thomas, I've been think about implementing NCHAR too!)\n>> \n>> So you could do whatever you like as long as they are in #ifndef\n>> MULTIBYTE:-)\n> \n> Hmm,\n> and the world is moving towards Unicode.\n> we definitely need working support for locales and multichar.\n> Perhaps the right way to do it is to store everything in\n> som Unicode format internally and convert the output\n> according to the suggested \"per-column-locale-definition\".\n> \n> Larry Wall has hacked UTF8 support into perl so it is doable,\n> so let see what we can do for 6.6 (time permitting as always).\n\nIMHO, also will be pleasent add charset/unicode for database or possible, for\nsingle table at runtime, not for the whole postgres by configure.\n\n\n---\nDmitry Samersoff, [email protected], ICQ:3161705\nhttp://devnull.wplus.net\n* There will come soft rains ...\n", "msg_date": "Fri, 11 Jun 1999 11:18:23 +0400 (MSD)", "msg_from": "Dmitry Samersoff <[email protected]>", "msg_from_op": false, "msg_subject": "RE: [HACKERS] locales and MB (was: Postgres 6.5 beta2 and beta3 " }, { "msg_contents": "Tatsuo Ishii <[email protected]> writes:\n> Currently the mb support allows serveral internal\n> encodings including Unicode and mule-internal-code.\n> (yes, you can do regexp/like to Unicode data if mb support is\n> enabled).\n\nOne of the things that bothers me about makeIndexable() is that it\ndoesn't seem to be multibyte-aware; does it really work in MB case?\n\n\t\t\tregards, tom lane\n", "msg_date": "Fri, 11 Jun 1999 09:49:51 -0400", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Re: locales and MB (was: Postgres 6.5 beta2 and beta3\n\tproblem)" }, { "msg_contents": "> Tatsuo Ishii <[email protected]> writes:\n> > Currently the mb support allows serveral internal\n> > encodings including Unicode and mule-internal-code.\n> > (yes, you can do regexp/like to Unicode data if mb support is\n> > enabled).\n> \n> One of the things that bothers me about makeIndexable() is that it\n> doesn't seem to be multibyte-aware; does it really work in MB case?\n\nYes. This is because I carefully choose multibyte encodings for\nthe backend that have following characteristics:\n\no if the 8th bit of a byte is off then it is a ascii character\no otherwise it is part of non ascii multibyte characters\n\nWith these assumptions, makeIndexable() works very well with multibyte\nchars.\n\nNot all multibyte encodings satisfy above conditions. For example,\nSJIS (an encoding for Japanese) and Big5 (for traditional Chinese)\ndoes not satisfies those requirements. In these encodings the first\nbyte of the double byte is always 8th bit on. However in second byte\nsometimes 8th bit is off: this means we cannot distinguish it from\nascii since it may accidentally matches a bit pattern of an ascii\nchar. This is why I do not allow SJIS and Big5 as the server\nencodings. Users can use SJIS and Big5 for the client encoding,\nhowever.\n\nYou might ask why I don't make makeIndexable() multibyte-aware. It\ndefinitely possible. But you should know there are many places that\nneed to be multibyte-aware in this sence. The parser is one of the\ngood example. Making everything in the backend multibyte-aware is not\nworse to do, in my opinion.\n---\nTatsuo Ishii\n", "msg_date": "Sat, 12 Jun 1999 00:14:55 +0900", "msg_from": "Tatsuo Ishii <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Re: locales and MB (was: Postgres 6.5 beta2 and beta3\n\tproblem)" }, { "msg_contents": "Tatsuo Ishii <[email protected]> writes:\n> Yes. This is because I carefully choose multibyte encodings for\n> the backend that have following characteristics:\n> o if the 8th bit of a byte is off then it is a ascii character\n> o otherwise it is part of non ascii multibyte characters\n\nAh so.\n\n> You might ask why I don't make makeIndexable() multibyte-aware. It\n> definitely possible. But you should know there are many places that\n> need to be multibyte-aware in this sence. The parser is one of the\n> good example.\n\nRight, it's much easier to dodge the problem by restricting backend\nencodings, and since we have conversions that doesn't hurt anyone.\nNow that I think about it, all the explicitly MB-aware code that\nI've seen is in frontend stuff.\n\nThanks for the clue...\n\n\t\t\tregards, tom lane\n", "msg_date": "Fri, 11 Jun 1999 17:26:25 -0400", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Re: locales and MB (was: Postgres 6.5 beta2 and beta3\n\tproblem)" } ]
[ { "msg_contents": "I have the fix and it must be applied, but at the moment I'm\nnot able to do cvs update (network problems?).\n\nI'll apply (and explain) it tomorrow.\n\nVadim\n", "msg_date": "Wed, 09 Jun 1999 19:56:50 +0800", "msg_from": "Vadim Mikheev <[email protected]>", "msg_from_op": true, "msg_subject": "bug in execMain.c:EvalPlanQual()" } ]
[ { "msg_contents": "> >> well, right? I compile alot on an alphapca56-* machine and the compile\n> >> will fail without it unless I add a --host= configure option otherwise.\n> >> Plus, there are alot of these Alphas out there now, so I wanted to make\n> >> sure all of them were covered. The alphapca57-class should also cover the\n> >> new DS20's, btw, so we're set for the future as well...\n> \n> The full patch is attached. I see that there is (in beta3) now a comment\n> in src/template/linux_alpha about removing -O2, but it is not, in fact,\n> removed.\n\nDone. Removed -O2 for linux_alpha and made it -O. I can not make the\nother changes because that is generated by GNU autoconf.\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Wed, 9 Jun 1999 08:39:08 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [PORTS] Anyone working on linux Alpha?" }, { "msg_contents": "Bruce Momjian <[email protected]> writes:\n> Done. Removed -O2 for linux_alpha and made it -O. I can not make the\n> other changes because that is generated by GNU autoconf.\n\nconfig.sub and config.guess are not auto-generated.\n\nWhat we have already done a couple of times is to apply suggested\nfixes to our current copies of these files, but to ask the submitter\nto send the bug report to the GNU folks as well ([email protected]\nand [email protected]) so that it gets into future GNU\nreleases. We do not want to maintain a divergent version of these\nfiles.\n\nI think someone remarked a few days ago that they had access to a\nmore up-to-date version of the config scripts than was present in\nthe last Autoconf release (2.13). Maybe we should try to grab the\nlatest GNU sources of these files?\n\n\t\t\tregards, tom lane\n", "msg_date": "Wed, 09 Jun 1999 10:48:22 -0400", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Re: [PORTS] Anyone working on linux Alpha? " }, { "msg_contents": "On Wed, 9 Jun 1999, Tom Lane wrote:\n\n> Bruce Momjian <[email protected]> writes:\n> > Done. Removed -O2 for linux_alpha and made it -O. I can not make the\n> > other changes because that is generated by GNU autoconf.\n> \n> config.sub and config.guess are not auto-generated.\n> \n> What we have already done a couple of times is to apply suggested\n> fixes to our current copies of these files, but to ask the submitter\n> to send the bug report to the GNU folks as well ([email protected]\n> and [email protected]) so that it gets into future GNU\n> releases. We do not want to maintain a divergent version of these\n> files.\n> \n> I think someone remarked a few days ago that they had access to a\n> more up-to-date version of the config scripts than was present in\n> the last Autoconf release (2.13). Maybe we should try to grab the\n> latest GNU sources of these files?\n\nConsidering that we are pretty much requiring the use of 2.13, this does\nmake sense...\n\nMarc G. Fournier ICQ#7615664 IRC Nick: Scrappy\nSystems Administrator @ hub.org \nprimary: [email protected] secondary: scrappy@{freebsd|postgresql}.org \n\n", "msg_date": "Wed, 9 Jun 1999 13:22:24 -0300 (ADT)", "msg_from": "The Hermit Hacker <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Re: [PORTS] Anyone working on linux Alpha? " } ]
[ { "msg_contents": "Further to my cyrillic charset (--enable-locale) introduced woes, I would like \nto add, that when recompiling Postgres 6.5 without --enable-locale now the \nquery runs reasonably fast and explain gives:\n\nIndex Scan using w_k_words_i on words (cost=1112.13 rows=1 width=12)\n\ninstead of \n\nIndex Scan using w_k_words_i on words (cost=3335.38 rows=1 width=12)\n\nThe problem as it seems is that the restrictions to use indexes when locale is \nenabled are not consistently applied - explain shows that indices will be \nused, and the behavior with indexes and without indexes is different (with \nindexes it's noticeably slower :-) so indexes are apparently being used...\n\nApparently (for my current tests at least) the Bulgarian cyrillic \n(windows-1251) is handled reasonably well without locale support - untill now \n~* didn't work anyway.\n\nDaniel Kalchev\n\n", "msg_date": "Wed, 09 Jun 1999 16:53:19 +0300", "msg_from": "Daniel Kalchev <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] Postgres 6.5 beta2 and beta3 problem " }, { "msg_contents": "Daniel Kalchev <[email protected]> writes:\n> The problem as it seems is that the restrictions to use indexes when\n> locale is enabled are not consistently applied - explain shows that\n> indices will be used, and the behavior with indexes and without\n> indexes is different (with indexes it's noticeably slower :-) so\n> indexes are apparently being used...\n\nRight, but what EXPLAIN doesn't show you (unless you can read the\nmuch uglier EXPLAIN VERBOSE output) is what index restrictions are\nbeing used.\n\nLIKE doesn't know anything about indexes, nor vice versa. What the\nindex-scan machinery *does* know about is <, <=, etc. If you have\nWHERE clauses like \"x >= 33\" and \"x <= 54\" then an index scan knows\nto only scan the part of the index from 33 to 54. So you never even\nvisit a large fraction of the table. This is why an index scan can\nbe faster than a sequential scan even though the per-tuple overhead\nof consulting the index is larger.\n\nSo, there is a special hack in the parser for LIKE (also for regexp\nmatches): if the parser sees that the match pattern has a fixed initial\nsubstring, it inserts some >= and <= clauses that are designed to\nexploit what the index scanner can do.\n\nOur immediate problem is that we had to drop the <= clause in non-ASCII\nlocales because it was wrong. So now an index scan driven by LIKE\nrestrictions is not nearly as restrictive as it was, and has to visit\nmany tuples (about half the table on average) whereas before it was\nlikely to visit only a few, if you had a reasonably long fixed initial\nstring.\n\nThere are some other problems (notably, that the extra clauses are\ninserted even if there's no index and thus no way that they will be\nhelpful) but those we know how to fix, and I hope to address them for\n6.6. Fixing the <= problem requires knowledge about non-ASCII character\nsets, and I for one don't know enough to fix it...\n\n\t\t\tregards, tom lane\n", "msg_date": "Wed, 09 Jun 1999 13:10:34 -0400", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Postgres 6.5 beta2 and beta3 problem " }, { "msg_contents": "> LIKE doesn't know anything about indexes, nor vice versa. What the\n> index-scan machinery *does* know about is <, <=, etc. If you have\n> WHERE clauses like \"x >= 33\" and \"x <= 54\" then an index scan knows\n> to only scan the part of the index from 33 to 54. So you never even\n> visit a large fraction of the table. This is why an index scan can\n> be faster than a sequential scan even though the per-tuple overhead\n> of consulting the index is larger.\n> \n\nYou know, everyone beats me up for that LIKE indexing hack, but every\nmonth that goes by where someone does not come up with a better solution\nmakes me feel a little better about the criticism.\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Wed, 9 Jun 1999 20:51:04 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Postgres 6.5 beta2 and beta3 problem" } ]
[ { "msg_contents": "\nThe following query produces the following error\n\nBUT...\n\nif I take out the \"*\" from category* it starts to work. Unfortunately I need the \"*\" for my purposes. Bug?\n\nThis is from CVS of about a week ago.\n\nSELECT question.title, count(comment.oid) FROM question, category*,comment WHERE comment.webobject = question.oid and question.category = category.oid GROUP BY question.title;\nERROR: replace_vars_with_subplan_refs: variable not in target list\n\n-- \nChris Bitmead\nhttp://www.bigfoot.com/~chris.bitmead\nmailto:[email protected]\n", "msg_date": "Thu, 10 Jun 1999 00:43:22 +1000", "msg_from": "[email protected]", "msg_from_op": true, "msg_subject": "BUG in 6.5 - GROUP BY inheritance" } ]
[ { "msg_contents": "Hi!\nCan somebody answer my question:\nI wanna hack a WINNT System with Novel Netware installed.\nI think the only way to do this is copying the sam. file and to decrypt\nthe passwords in it. But the problem is: this file is in use while\nrunning WINNT and so you have no access. But the computer has also no\nCDROM or floppy drive, so that I could read the NT Partition in DOS\nmode.\nCan someone tell my, how I can copy the sam. file in spite of all this?\nThanx a lot\n\nCU, Matze\n\nPS: Anwers per personal mail to: [email protected], please\n\n\n\n", "msg_date": "Wed, 09 Jun 1999 19:15:56 +0200", "msg_from": "Matthias Frank <[email protected]>", "msg_from_op": true, "msg_subject": "Trying to hack NT" } ]
[ { "msg_contents": "\n> > Perhaps the locale data can be used to gather this information?\n> \n> It's certainly there ! locale data contains all information about\n> specific character set and encoding. Is it possible to use it to create\n> indices ? It should be slower but benefits of using indices will cover\n> expenses for non-US people. I didn't notice such behaivour in Informix\n> and Oracle. \n> \nInformix has the national character handling, and it is indexable in\nInformix.\nBut it is not done for the standard types char and varchar. In Informix\nyou use nchar and nvarchar, and have one locale defined per database. \nIn Oracle you have national characters, but access is not indexable. \nActually the SQL standard has something to say about national char \nand varchar. I think it is wrong that char and varchar change their behavior\nin postgresql, if you enable locale. A locale sensitive column needs to be\nspecified\nas such in the create table statement according to the standard.\nI never enable locale.\n\nAndreas\n", "msg_date": "Wed, 9 Jun 1999 19:16:14 +0200 ", "msg_from": "ZEUGSWETTER Andreas IZ5 <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] Postgres 6.5 beta2 and beta3 problem " }, { "msg_contents": "On Wed, 9 Jun 1999, ZEUGSWETTER Andreas IZ5 wrote:\n\n> Date: Wed, 9 Jun 1999 19:16:14 +0200 \n> From: ZEUGSWETTER Andreas IZ5 <[email protected]>\n> To: 'Oleg Bartunov' <[email protected]>\n> Cc: \"'[email protected]'\" <[email protected]>\n> Subject: Re: [HACKERS] Postgres 6.5 beta2 and beta3 problem \n> \n> \n> > > Perhaps the locale data can be used to gather this information?\n> > \n> > It's certainly there ! locale data contains all information about\n> > specific character set and encoding. Is it possible to use it to create\n> > indices ? It should be slower but benefits of using indices will cover\n> > expenses for non-US people. I didn't notice such behaivour in Informix\n> > and Oracle. \n> > \n> Informix has the national character handling, and it is indexable in\n> Informix.\n> But it is not done for the standard types char and varchar. In Informix\n> you use nchar and nvarchar, and have one locale defined per database. \n> In Oracle you have national characters, but access is not indexable. \n> Actually the SQL standard has something to say about national char \n> and varchar. I think it is wrong that char and varchar change their behavior\n> in postgresql, if you enable locale. A locale sensitive column needs to be\n> specified\n> as such in the create table statement according to the standard.\n\nThanks for explanations. It would be great if I could specify for specific\ncolumns if it needs locale. For now I have to pay decreasing in speed\njust to enable locale for the only column of one database !!!\nI always dream to be able to specify on fly 'SET LOCALE to ON|OFF'\nIt's a bit separate problem, but probably more easy to implement.\n\n\tRegards,\n \tOleg\n\n\n> I never enable locale.\n> \n> Andreas\n> \n\n_____________________________________________________________\nOleg Bartunov, sci.researcher, hostmaster of AstroNet,\nSternberg Astronomical Institute, Moscow University (Russia)\nInternet: [email protected], http://www.sai.msu.su/~megera/\nphone: +007(095)939-16-83, +007(095)939-23-83\n\n", "msg_date": "Wed, 9 Jun 1999 21:37:44 +0400 (MSD)", "msg_from": "Oleg Bartunov <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Postgres 6.5 beta2 and beta3 problem " }, { "msg_contents": "Oleg Bartunov wrote:\n> \n> \n> Thanks for explanations. It would be great if I could specify for specific\n> columns if it needs locale. For now I have to pay decreasing in speed\n> just to enable locale for the only column of one database !!!\n> I always dream to be able to specify on fly 'SET LOCALE to ON|OFF'\n> It's a bit separate problem, but probably more easy to implement.\n\nActually the locale should be part of field definition (like length and \nprecision currently are) with default of ascii. \n\nAnd also it may be probably safest to maintain our own locale defs (as\nOracle \ncurrently does)so that things don't break if somebody changes the system\nones.\n\n-------------\nHannu\n", "msg_date": "Wed, 09 Jun 1999 22:04:00 +0300", "msg_from": "Hannu Krosing <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Postgres 6.5 beta2 and beta3 problem" }, { "msg_contents": "> Oleg Bartunov wrote:\n> > \n> > \n> > Thanks for explanations. It would be great if I could specify for specific\n> > columns if it needs locale. For now I have to pay decreasing in speed\n> > just to enable locale for the only column of one database !!!\n> > I always dream to be able to specify on fly 'SET LOCALE to ON|OFF'\n> > It's a bit separate problem, but probably more easy to implement.\n> \n> Actually the locale should be part of field definition (like length and \n> precision currently are) with default of ascii. \n> \n> And also it may be probably safest to maintain our own locale defs (as\n> Oracle \n> currently does)so that things don't break if somebody changes the system\n> ones.\n\nAdded to TODO:\n\n\t* Allow LOCALE on a per-column basis, default to ASCII\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Wed, 9 Jun 1999 20:54:38 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Postgres 6.5 beta2 and beta3 problem" }, { "msg_contents": "> Actually the SQL standard has something to say about national char\n> and varchar. I think it is wrong that char and varchar change their \n> behavior in postgresql, if you enable locale. A locale sensitive column \n> needs to be specified as such in the create table statement according to \n> the standard. I never enable locale.\n\nI had some discussions on the list a while ago (6 months?) about this\ntopic, but never really got feedback from \"locale-using\" people that\nNATIONAL CHARACTER and collation sequences are an acceptable solution.\nistm that Postgres' extensibility would make this *very* easy to\nimplement and extend, and that then everyone would get the same\nbehavior from CHAR while being able to get the behaviors they need\nfrom a variety of other character sets.\n\nI do have an interest in implementing or helping with something, but\nsince I don't have to live with the consequences of the results\n(coming from an ASCII country :) it seemed to be poor form to push it\nwithout feedback from others...\n\n - Thomas\n\n-- \nThomas Lockhart\t\t\t\[email protected]\nSouth Pasadena, California\n", "msg_date": "Thu, 10 Jun 1999 01:37:00 +0000", "msg_from": "Thomas Lockhart <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Postgres 6.5 beta2 and beta3 problem" }, { "msg_contents": "On Thu, 10 Jun 1999, Thomas Lockhart wrote:\n> I had some discussions on the list a while ago (6 months?) about this\n> topic, but never really got feedback from \"locale-using\" people that\n> NATIONAL CHARACTER and collation sequences are an acceptable solution.\n\n What feedback do you want? I am pretty sure two Olegs on this list are\nready to continue the discussion.\n\n> istm that Postgres' extensibility would make this *very* easy to\n> implement and extend, and that then everyone would get the same\n> behavior from CHAR while being able to get the behaviors they need\n> from a variety of other character sets.\n> \n> I do have an interest in implementing or helping with something, but\n> since I don't have to live with the consequences of the results\n> (coming from an ASCII country :) it seemed to be poor form to push it\n> without feedback from others...\n> \n> - Thomas\n> \n> -- \n> Thomas Lockhart\t\t\t\[email protected]\n> South Pasadena, California\n\nOleg.\n---- \n Oleg Broytmann http://members.xoom.com/phd2/ [email protected]\n Programmers don't die, they just GOSUB without RETURN.\n\n", "msg_date": "Thu, 10 Jun 1999 13:13:11 +0400 (MSD)", "msg_from": "Oleg Broytmann <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Postgres 6.5 beta2 and beta3 problem" }, { "msg_contents": "> What feedback do you want? I am pretty sure two Olegs on this list are\n> ready to continue the discussion.\n\nistm that the Russian and Japanese contingents could represent the\nneeds of multibyte and locale concerns very well. So, we should ask\nourselves some questions to see if we can make *progress* in evolving\nour text handling, rather than just staying the same forever.\n\nSQL92 suggests some specific text handling features to help with\nnon-ascii applications. \"national character\" is, afaik, the feature\nwhich would hold an installation-wide local text type. \"collations\"\nwould allow other text types in the same installation, but SQL92 is a\nbit fuzzier about how to make them work.\n\nWould these mechanisms work for people? Or are they so fundamentally\nflawed or non-standard (it is from a standard, but I'm not sure who\nimplements it)?\n\n - Thomas\n\n-- \nThomas Lockhart\t\t\t\[email protected]\nSouth Pasadena, California\n", "msg_date": "Thu, 10 Jun 1999 15:14:43 +0000", "msg_from": "Thomas Lockhart <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Postgres 6.5 beta2 and beta3 problem" }, { "msg_contents": "> istm that the Russian and Japanese contingents could represent the\n> needs of multibyte and locale concerns very well. So, we should ask\n> ourselves some questions to see if we can make *progress* in evolving\n> our text handling, rather than just staying the same forever.\n> \n> SQL92 suggests some specific text handling features to help with\n> non-ascii applications. \"national character\" is, afaik, the feature\n> which would hold an installation-wide local text type. \"collations\"\n> would allow other text types in the same installation, but SQL92 is a\n> bit fuzzier about how to make them work.\n> \n> Would these mechanisms work for people? Or are they so fundamentally\n> flawed or non-standard (it is from a standard, but I'm not sure who\n> implements it)?\n\nIn my opinion, introducing NCHAR is useful at least for single byte\ncodes. Although I'm not familiar with single byte languages, I see\nstrong demands for NCHAR through recent discussions.\n\nI don't mean it's useless for multibyte, however. As far as I know\nlocales for multibyte provided by any OS are totally broken especially\nin COLLATE(I seriously doubt existing locale framework work for\nmultibyte). It would be nice to have our own locale data for COLLATE\nsomewhere in our system like some commercial dbms do, or even better\nuser defined collations allowed (this is already in the standard).\n\nI have a feeling that I'm going to implement CREATE CHARSET through\nslightly modifying the multibyte support code that currently only\nallows predefined charset. That will be the first step toward CREATE\nCOLLATE, NCHAR etc....\n---\nTatsuo Ishii\n", "msg_date": "Fri, 11 Jun 1999 09:54:08 +0900", "msg_from": "Tatsuo Ishii <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Postgres 6.5 beta2 and beta3 problem " }, { "msg_contents": "On Thu, 10 Jun 1999, Thomas Lockhart wrote:\n> istm that the Russian and Japanese contingents could represent the\n> needs of multibyte and locale concerns very well. So, we should ask\n> ourselves some questions to see if we can make *progress* in evolving\n> our text handling, rather than just staying the same forever.\n\n Ok, we are here.\n And what a pros and cons for NCHAR?\n\n> SQL92 suggests some specific text handling features to help with\n> non-ascii applications. \"national character\" is, afaik, the feature\n\n What the help?\n\n> which would hold an installation-wide local text type. \"collations\"\n> would allow other text types in the same installation, but SQL92 is a\n> bit fuzzier about how to make them work.\n> \n> Would these mechanisms work for people? Or are they so fundamentally\n> flawed or non-standard (it is from a standard, but I'm not sure who\n> implements it)?\n> \n> - Thomas\n> \n> -- \n> Thomas Lockhart\t\t\t\[email protected]\n> South Pasadena, California\n\nOleg.\n---- \n Oleg Broytmann http://members.xoom.com/phd2/ [email protected]\n Programmers don't die, they just GOSUB without RETURN.\n\n", "msg_date": "Fri, 11 Jun 1999 11:12:36 +0400 (MSD)", "msg_from": "Oleg Broytmann <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Postgres 6.5 beta2 and beta3 problem" }, { "msg_contents": "\nOn 11-Jun-99 Oleg Broytmann wrote:\n> On Thu, 10 Jun 1999, Thomas Lockhart wrote:\n>> istm that the Russian and Japanese contingents could represent the\n>> needs of multibyte and locale concerns very well. So, we should ask\n>> ourselves some questions to see if we can make *progress* in evolving\n>> our text handling, rather than just staying the same forever.\n> \n> Ok, we are here.\n> And what a pros and cons for NCHAR?\n> \n>> SQL92 suggests some specific text handling features to help with\n>> non-ascii applications. \"national character\" is, afaik, the feature\n> \n> What the help?\n> \n>> which would hold an installation-wide local text type. \"collations\"\n>> would allow other text types in the same installation, but SQL92 is a\n>> bit fuzzier about how to make them work.\n>> \n>> Would these mechanisms work for people? Or are they so fundamentally\n>> flawed or non-standard (it is from a standard, but I'm not sure who\n>> implements it)?\n\nThere are two different problems under \"locale\" cover.\n\nFirst is national charset handling, mostly sorting,\nI try to find official recomendation for it.\n \nAnother one is encoding conversion - \n it's unlimited field for discussion.\n\nIMHO, conversion table have to be stored on per host basic, \nsome where near from HBA. \n\nSort table is to be placed near from createdb. \nCustom sort table is also very attractive for me.\n\n\n---\nDmitry Samersoff, [email protected], ICQ:3161705\nhttp://devnull.wplus.net\n* There will come soft rains ...\n", "msg_date": "Fri, 11 Jun 1999 11:27:49 +0400 (MSD)", "msg_from": "Dmitry Samersoff <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Postgres 6.5 beta2 and beta3 problem" }, { "msg_contents": "Hi!\n\nOn Fri, 11 Jun 1999, Dmitry Samersoff wrote:\n> IMHO, conversion table have to be stored on per host basic, \n> some where near from HBA. \n\n Currently you can do this with per-host (SQL command) SET\nCLIENT_ENCODING or (env var) export PGCLIENTENCODING=...\n\n> ---\n> Dmitry Samersoff, [email protected], ICQ:3161705\n> http://devnull.wplus.net\n> * There will come soft rains ...\n\nOleg.\n---- \n Oleg Broytmann http://members.xoom.com/phd2/ [email protected]\n Programmers don't die, they just GOSUB without RETURN.\n\n", "msg_date": "Fri, 11 Jun 1999 11:35:38 +0400 (MSD)", "msg_from": "Oleg Broytmann <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Postgres 6.5 beta2 and beta3 problem" }, { "msg_contents": "> > istm that the Russian and Japanese contingents could represent the\n> > needs of multibyte and locale concerns very well. So, we should ask\n> > ourselves some questions to see if we can make *progress* in evolving\n> > our text handling, rather than just staying the same forever.\n> Ok, we are here.\n> And what a pros and cons for NCHAR?\n\nI was hoping you would tell me! :)\n\n> > SQL92 suggests some specific text handling features to help with\n> > non-ascii applications.\n> What the help?\n\nOK, SQL92 defines two kinds of native character sets: those we already\nhave (char, varchar) and those which can be locale customized (nchar,\nnational character varying, and others). char and varchar always\ndefault to the \"SQL\" behavior (which I think corresponds to ascii\n(called \"SQL_TEXT\") but I didn't bother looking for the details).\n\nSo, at its simplest, there would be two sets of character types, with\nchar, varchar, etc, always the same on every system (just like\nPostgres w/o multibyte or locale), and nchar, nvarchar, etc configured\nas your locale/multibyte environment would want.\n\nHowever, there are many more features in SQL92 to deal with text\ncustomization. I'll mention a few (well, most of them, but not in\ndetail):\n\no You can define a \"character set\" and, independently, a \"collation\".\nThe syntax for the type definition is\n CHARACTER [ VARYING ] [ (length) ]\n [ CHARACTER SET your-character-set ]\n [ COLLATE your-collation-sequence ]\n\no You can specify a character type for string literals:\n _your-character-set 'literal string' e.g. _ESPANOL 'Que pasa?'\n (forgive my omission of a leading upside down question mark :)\nWe already have some support for this in that character string\nliterals can have a type specification (e.g. \"DATETIME 'now'\") and\npresumably we can use the required underscore to convert the\n\"_ESPANOL\" to a character set and collation, all within the existing\nPostgres type system.\n\no You can specify collation behavior in a strange way:\n 'Yo dude!' COLLATE collation-method\n(which we could convert in the parser to a function call).\n\no You can translate between character sets, *if* there is a reasonable\nmapping available:\n TRANSLATE(string USING method)\nand you can define translations in a vague way (since no one but\nPostgres implemented a type system back then):\n CREATE TRANSLATION translation\n FOR source-charset\n TO target-charset\n FROM { EXTERNAL('external-translation') | IDENTITY |\nexisting-translation }\n DROP TRANSLATION translation\n\no You can convert character strings which have the same character\n\"repertoire\" from one to the other:\n CONVERT(string USING conversion-method)\n(e.g. we could define a method \"EBCDIC_TO_ASCII\" once we have an\n\"EBCDIC\" character set).\n\no You can specify identifiers (column names, etc) with a specific\ncharacter set/collation by:\n _charset colname (e.g. _FRANCAIS Francais where the second \"c\" is\nallowed to be \"c-cedilla\", a character in the French/latin character\nset; sorry I didn't type it).\n\n> > Would these mechanisms work for people? Or are they so fundamentally\n> > flawed or non-standard (it is from a standard, but I'm not sure who\n> > implements it)?\n\nFully implementing these features (or a reasonable subset) would give\nus more capabilities than we have now, and imho can be fit into our\nexisting type system. *Only* implementing NCHAR etc gives us the\nability to carry SQL_TEXT and multibyte/locale types in the same\ndatabase, which may not be a huge benefit to those who never want to\nmix them in the same installation. I don't know who those folks might\nbe but Tatsuo and yourself probably do.\n\nComments?\n\n - Thomas\n\n-- \nThomas Lockhart\t\t\t\[email protected]\nSouth Pasadena, California\n", "msg_date": "Fri, 11 Jun 1999 15:23:57 +0000", "msg_from": "Thomas Lockhart <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Postgres 6.5 beta2 and beta3 problem" }, { "msg_contents": "Hi!\n\nOn Fri, 11 Jun 1999, Thomas Lockhart wrote:\n> > And what a pros and cons for NCHAR?\n> I was hoping you would tell me! :)\n\n I can see only one advantage for NCHAR - those fields that aren't NCHAR\nwill not use strcoll() for comparison.\n But I cannot remember one filed in my database that does not contain\nrussian characters. Even my WWW logs contain them.\n So in any case I am forced to make all my fields NCHAR, and this is\nexactly what we have now - postgres compiled with --enable-locale makes all\nchar NCHAR.\n\n> - Thomas\n> -- \n> Thomas Lockhart\t\t\t\[email protected]\n> South Pasadena, California\n\nOleg.\n---- \n Oleg Broytmann http://members.xoom.com/phd2/ [email protected]\n Programmers don't die, they just GOSUB without RETURN.\n\n", "msg_date": "Sat, 12 Jun 1999 18:32:05 +0400 (MSD)", "msg_from": "Oleg Broytmann <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Postgres 6.5 beta2 and beta3 problem" }, { "msg_contents": "> I can see only one advantage for NCHAR - those fields that aren't NCHAR\n> will not use strcoll() for comparison.\n> But I cannot remember one filed in my database that does not contain\n> russian characters. Even my WWW logs contain them.\n> So in any case I am forced to make all my fields NCHAR, and this is\n> exactly what we have now - postgres compiled with --enable-locale makes \n> all char NCHAR.\n\nYes, and that is how we got the implementation we have. Implementing\nNCHAR is a step on the road toward having fully flexible character set\ncapabilities in a single database. By itself, NCHAR probably does not\noffer tremendous advantages for anyone running a fully \"localized\"\ndatabase.\n\nSo some the questions really might be:\n1) Is implementing NCHAR, and reverting CHAR back to the SQL-standard\nascii-ish behavior, acceptable, or does it introduce fatal flaws for\nimplementers? e.g. do any third party tools know about NCHAR? I would\nassume that the odbc interface could just map NCHAR to CHAR if odbc\nknows nothing about NCHAR...\n\n2) Solving various problems for specific datasets will require new\nspecialized support routines. If this is true, then isn't the Postgres\ntype system the way to introduce these specialized capabilities?\nDoesn't Unicode, for example, work well as a new data type, as opposed\nto shoehorning it into all areas of the backend with #ifdefs?\n\n3) Do the SQL92-defined features help us solve the problem, or do they\njust get in the way? istm that they address some of the features we\nwould need, and have sufficient fuzz around the edges to allow a\nsuccessful implementation.\n\nAn example of what we could do would be to have both Russian/Cyrillic\nand Japanese regression tests in the main regression suite, since they\ncould coexist with the other tests.\n\n - Thomas\n\n-- \nThomas Lockhart\t\t\t\[email protected]\nSouth Pasadena, California\n", "msg_date": "Sat, 12 Jun 1999 15:29:05 +0000", "msg_from": "Thomas Lockhart <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Postgres 6.5 beta2 and beta3 problem" }, { "msg_contents": "Oleg Broytmann wrote:\n> \n> Hi!\n> \n> On Fri, 11 Jun 1999, Thomas Lockhart wrote:\n> > > And what a pros and cons for NCHAR?\n> > I was hoping you would tell me! :)\n> \n> I can see only one advantage for NCHAR - those fields that aren't NCHAR\n> will not use strcoll() for comparison.\n> But I cannot remember one filed in my database that does not contain\n> russian characters. Even my WWW logs contain them.\n\nwhat about the tables beginning with pg_ ?\n\nAre the system tables currently affected by --enable-locale ?\n\n> So in any case I am forced to make all my fields NCHAR, and this is\n> exactly what we have now - postgres compiled with --enable-locale makes all\n> char NCHAR.\n\nWell, the problem is that while I do occasionally need cyrillic chars, \nI also need English, Estonian, Finnish/Swedish, Latvian and Lithuanian.\n\nThe only two of them that don't have overlapping character codes are\nRussian (all chars >127) and English (all < 128)\n\nMy current solution is to run without --enable-locale and do all the\nsorting\nin the client. But it would be often useful to have language specific\ncolumns.\n\n--------------------\nHannu\n", "msg_date": "Sat, 12 Jun 1999 21:16:53 +0300", "msg_from": "Hannu Krosing <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Postgres 6.5 beta2 and beta3 problem" }, { "msg_contents": "> Well, the problem is that while I do occasionally need cyrillic chars,\n> I also need English, Estonian, Finnish/Swedish, Latvian and Lithuanian.\n> The only two of them that don't have overlapping character codes are\n> Russian (all chars >127) and English (all < 128)\n> My current solution is to run without --enable-locale and do all the\n> sorting\n> in the client. But it would be often useful to have language specific\n> columns.\n\nGreat! You're our representative for multi-charset usage :)\n\nPerhaps you can think about and comment on the SQL92 features which\nsupport this, and whether that kind of capability would suit your\nneeds. Of course, we would need to start getting specific about how to\nmap those generic features into a Postgres implementation...\n\nbtw, istm that we could make things somewhat backward compatible by\nallowing the backend to be built with CHAR and NCHAR always mapped to\nNCHAR.\n\n - Thomas\n\n-- \nThomas Lockhart\t\t\t\[email protected]\nSouth Pasadena, California\n", "msg_date": "Sun, 13 Jun 1999 15:53:49 +0000", "msg_from": "Thomas Lockhart <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Postgres 6.5 beta2 and beta3 problem" }, { "msg_contents": "> OK, SQL92 defines two kinds of native character sets: those we already\n> have (char, varchar) and those which can be locale customized (nchar,\n> national character varying, and others). char and varchar always\n> default to the \"SQL\" behavior (which I think corresponds to ascii\n> (called \"SQL_TEXT\") but I didn't bother looking for the details).\n\nThis seems to be a little bit different from the standard. First,\nSQL_TEXT is not equal to ascii. It's a subset of ascii. Second, the\ndefault charset for char and varchar might be implemenation dependent,\nnot neccesarily limited to SQL_TEXT. The only requirement is the\ncharset must contain the repertoire SQL_TEXT has. I think any charsets\nincluding ascii I've ever seen satisfies the requirement. Third, the\nstandards says nothing about locale.\n---\nTatsuo Ishii\n", "msg_date": "Mon, 14 Jun 1999 23:07:08 +0900", "msg_from": "Tatsuo Ishii <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Postgres 6.5 beta2 and beta3 problem " }, { "msg_contents": "> Well, the problem is that while I do occasionally need cyrillic chars, \n> I also need English, Estonian, Finnish/Swedish, Latvian and Lithuanian.\n\nProbably this is the first example ever appeared on this list for the\ndemand of i18n database, that should be the future direction of\nPostgreSQL, in my opinion.\n\nCurrently MB has two encodings for this kind of purpose: Unicode and\nmule-internal-code. Both of them allows mixed languages even in the\nsame column. This might give you a partial solution.\n\n> The only two of them that don't have overlapping character codes are\n> Russian (all chars >127) and English (all < 128)\n> \n> My current solution is to run without --enable-locale and do all the\n> sorting\n> in the client. But it would be often useful to have language specific\n> columns.\n\nThis is another reason why we cannot rely on the locale mechanism\nsupported by os. Basically locale is a global data for the entire\nprocess. If each column has different language, we have to switch\nlocales. That would be inefficient and painful.\n---\nTatsuo Ishii\n", "msg_date": "Mon, 14 Jun 1999 23:07:15 +0900", "msg_from": "Tatsuo Ishii <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Postgres 6.5 beta2 and beta3 problem " }, { "msg_contents": "\n> This seems to be a little bit different from the standard. First,\n> SQL_TEXT is not equal to ascii. It's a subset of ascii.\n\nYes, sorry. I was lazy in my posting.\n\n> Second, the\n> default charset for char and varchar might be implemenation dependent,\n> not neccesarily limited to SQL_TEXT. The only requirement is the\n> charset must contain the repertoire SQL_TEXT has. I think any charsets\n> including ascii I've ever seen satisfies the requirement.\n\nYow! I certainly misremembered the definition. Date and Darwen, 1997,\npoint out that the SQL implementation *must* support at least one\ncharacter set, SQL_TEXT, whose repertoire must contain:\n\n1) Every character that is used in the SQL language itself (this is\nthe part I remembered), and\n\n2) Every character that is included in *any other character set*\nsupported by the SQL implementation (Postgres).\n\nThis second requirement is presumably to enable text handling of\nmultiple character sets, but would seem to put severe restrictions on\nhow we would implement things. Or can it act only as a placeholder,\nallowing us to define new character sets as different types in\nPostgres? Otherwise, we would have to retrofit capabilities into\nSQL_TEXT anytime we defined a new character set??\n\n> Third, the\n> standards says nothing about locale.\n\nYou are referring to the Unix-style system support for \"locale\"?\nCertainly the NCHAR and character set support in SQL92 would qualify\nas locale support in the generic sense...\n\nRegards.\n\n - Thomas\n\n-- \nThomas Lockhart\t\t\t\[email protected]\nSouth Pasadena, California\n", "msg_date": "Mon, 14 Jun 1999 15:10:07 +0000", "msg_from": "Thomas Lockhart <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Postgres 6.5 beta2 and beta3 problem" }, { "msg_contents": "> > Second, the\n> > default charset for char and varchar might be implemenation dependent,\n> > not neccesarily limited to SQL_TEXT. The only requirement is the\n> > charset must contain the repertoire SQL_TEXT has. I think any charsets\n> > including ascii I've ever seen satisfies the requirement.\n> \n> Yow! I certainly misremembered the definition. Date and Darwen, 1997,\n> point out that the SQL implementation *must* support at least one\n> character set, SQL_TEXT, whose repertoire must contain:\n> \n> 1) Every character that is used in the SQL language itself (this is\n> the part I remembered), and\n> \n> 2) Every character that is included in *any other character set*\n> supported by the SQL implementation (Postgres).\n> \n> This second requirement is presumably to enable text handling of\n> multiple character sets, but would seem to put severe restrictions on\n> how we would implement things. Or can it act only as a placeholder,\n> allowing us to define new character sets as different types in\n> Postgres? Otherwise, we would have to retrofit capabilities into\n> SQL_TEXT anytime we defined a new character set??\n\nI don't think so. 2) can be read as:\n\nAny other character set must contain every character included in\nSQL_TEXT.\n\nThis seems extremely easy to implement. We could define SQL_TEXT be a\nsubset of ASCII and almost any character set contains ASCII chars. As\na result, any character set satisfies above that is logically same as\n2). No?\n\n> > Third, the\n> > standards says nothing about locale.\n> \n> You are referring to the Unix-style system support for \"locale\"?\n\nYes.\n\n> Certainly the NCHAR and character set support in SQL92 would qualify\n> as locale support in the generic sense...\n---\nTatsuo Ishii\n", "msg_date": "Thu, 17 Jun 1999 09:53:21 +0900", "msg_from": "Tatsuo Ishii <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Postgres 6.5 beta2 and beta3 problem " }, { "msg_contents": "> > SQL_TEXT ... repertoire must contain:\n> > 1) Every character that is used in the SQL language itself (this is\n> > the part I remembered), and\n> > 2) Every character that is included in *any other character set*\n> > supported by the SQL implementation (Postgres).\n> > This second requirement is presumably to enable text handling of\n> > multiple character sets, but would seem to put severe restrictions on\n> > how we would implement things. Or can it act only as a placeholder,\n> > allowing us to define new character sets as different types in\n> > Postgres? Otherwise, we would have to retrofit capabilities into\n> > SQL_TEXT anytime we defined a new character set??\n> I don't think so. 2) can be read as:\n> Any other character set must contain every character included in\n> SQL_TEXT.\n\nHere is the text from the July, 1992 SQL92 draft standard:\n\n The <implementation-defined character repertoire name>\nSQL_TEXT\n specifies the name of a character repertoire and implied\nform-of-\n use that can represent every character that is in <SQL\nlanguage\n character> and all other characters that are in character\nsets\n supported by the implementation.\n\nand later in the same doc:\n\n 11)The character set named SQL_TEXT is an\nimplementation-defined\n character set whose character repertoire is SQL_TEXT.\n\nI'm reading this to say that SQL_TEXT must contain the union of all\ncharacters in the character sets in the implementation, rather than an\nintersection between that union and the characters required by the SQL\nlanguage itself.\n\nBut I'm not really sure what they mean by this, or whether it is a\nproblem or not. Clearly different character sets and collations can be\nmixed only when that can preserve meaning, so saying that SQL_TEXT has\na repertoire which contains ASCII characters and Japanese characters\ndoesn't seem to help much.\n\nSo istm that \"SQL_TEXT\" might be just a container class for all\ncharacters in the installation, which still doesn't make complete\nsense to me wrt a Postgres implementation.\n\n - Tom\n\n-- \nThomas Lockhart\t\t\t\[email protected]\nSouth Pasadena, California\n", "msg_date": "Thu, 17 Jun 1999 06:11:42 +0000", "msg_from": "Thomas Lockhart <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Postgres 6.5 beta2 and beta3 problem" } ]
[ { "msg_contents": "Hi,\n\nI have a problem with large queries: I have a table with 300000 rows and\nwhen I try the following query the backends runs out of memory:\n\n select upper(name) from my_table;\n\nThe following queries without funcs or with funcs of int4 work fine:\n\n select name from my_table;\n select max(id,0) from my_table;\n\nso I suspect that the trouble is with memory allocated by functions\nreturning data by address, which is not released until the end of the\ntransaction. With more then 300000 rows you eat a lot of memory.\n\nThis means that postgres is currently unable to execute large queries\nthat involve functions on text fields. A pretty bad limitation IMHO.\n\nI tried to look at the code but haven't found a way to release the\nstorage allocated for each tuple and the context allocation code is\nnot very documented.\n\nAny suggestion?\n\n-- \nMassimo Dal Zotto\n\n+----------------------------------------------------------------------+\n| Massimo Dal Zotto email: [email protected] |\n| Via Marconi, 141 phone: ++39-0461534251 |\n| 38057 Pergine Valsugana (TN) www: http://www.cs.unitn.it/~dz/ |\n| Italy pgp: finger [email protected] |\n+----------------------------------------------------------------------+\n", "msg_date": "Wed, 9 Jun 1999 22:36:00 +0200 (MET DST)", "msg_from": "Massimo Dal Zotto <[email protected]>", "msg_from_op": true, "msg_subject": "out of memory with large queries" }, { "msg_contents": "Massimo Dal Zotto <[email protected]> writes:\n> This means that postgres is currently unable to execute large queries\n> that involve functions on text fields. A pretty bad limitation IMHO.\n\nYup. This is one of the major projects that I was proposing for 6.6.\nI do not think the fix will be easy, but we need to do it.\n\n\t\t\tregards, tom lane\n", "msg_date": "Wed, 09 Jun 1999 17:14:12 -0400", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] out of memory with large queries " }, { "msg_contents": "At 17:14 9/06/99 -0400, you wrote:\n>Massimo Dal Zotto <[email protected]> writes:\n>> This means that postgres is currently unable to execute large queries\n>> that involve functions on text fields. A pretty bad limitation IMHO.\n>\n>Yup. This is one of the major projects that I was proposing for 6.6.\n>I do not think the fix will be easy, but we need to do it.\n>\n\nIn yet another attempt to go over my head, wouldn't it be possible to pass three parameters to the aggregate state functions, so their signatures become:\n\nint (or void) = transfn1 (transtype1, basetype, *transtype1)\nint (or void) = transfn2 (transtype2, *transtype2)\nint (or void) = finalfn (transtype1, transtype2, *finaltype)\n\nwhere the int return value *may* give status information, or may just be ignored.\n\nThis clearly breaks the ability to use the existing float8pl etc functions, but the cost is reasonably controlled: the creation of xxxxx3 wrappers (eg. float8pl3) which take three parameters and call the original function, then release the memory.\n\nA better solution would be to have float8pl accept 2 or 3 parameters, and modify how it returns information based on the parameter count, but I *presume* that this would break on many platforms/compilers (is this right?), in any case it seems pretty nasty returning an int OR a float8*, depending on your parameters. Perhaps if the functions were modified as:\n\ntranstype1 = transfn1 (transtype1, basetype [, *transtype1])\ntranstype2 = transfn2 (transtype2 [, *transtype2])\nfinaltype = finalfn (transtype1, transtype2 [, *finaltype])\n\nThen they are completely source-compatible with existing functions, but will accept an optional pointer to the storage for their return value (probably a local variable in the caller).\n\nIf it worked, then each of the aggregates could avoid memory allocation completely, which has to be a performance gain as well. Unfortunately, even if this worked on all platforms, I have no idea what it would do to the internals of PG.\n\nAm I missing something (again?).\n\n\n----------------------------------------------------------------\nPhilip Warner | __---_____\nAlbatross Consulting Pty. Ltd. |----/ - \\\n(A.C.N. 008 659 498) | /(@) ______---_\nTel: +61-03-5367 7422 | _________ \\\nFax: +61-03-5367 7430 | ___________ |\nHttp://www.rhyme.com.au | / \\|\n | --________--\nPGP key available upon request, | /\nand from pgp5.ai.mit.edu:11371 |/\n", "msg_date": "Thu, 10 Jun 1999 12:38:48 +1000", "msg_from": "Philip Warner <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] out of memory with large queries " } ]
[ { "msg_contents": "I noticed a couple of days ago that the current sources coredump if you\ntry to use GROUP BY on the first sub-SELECT of a UNION, eg\n\ncreate table category (name text, image text, url text, parent oid);\n\nselect name from category group by name\nunion select image from category;\n\n=> kerboom\n\n(It works if you put a GROUP BY on the second select, though. 6.4.2\ndidn't coredump in a cursory test, but it didn't produce the right\nanswers either.)\n\nA check of the mail archives shows that Bill Carlson reported this\nbug to pgsql-sql on 22 April, but I'd not picked up on it at the time.\n\nThe cause is that plan_union_queries() is failing to clear out the\ngroupclause before it returns control to union_planner, so the GROUP BY\ngets applied twice, once to the subselect and once (incorrectly) to the\nresult of the UNION. (This wouldn't have happened with a less klugy\nrepresentation for UNION parsetrees, but I digress.) You can see this\nhappening if you look at the EXPLAIN output; the coredump only happens\nat execution time.\n\nThis patch fixes it:\n\n\n*** backend/optimizer/prep/prepunion.c.orig\tSun Jun 6 13:38:11 1999\n--- backend/optimizer/prep/prepunion.c\tWed Jun 9 20:38:48 1999\n***************\n*** 192,197 ****\n--- 192,204 ----\n \t\t/* needed so we don't take the flag from the first query */\n \t\tparse->uniqueFlag = NULL;\n \n+ \t/* Make sure we don't try to apply the first query's grouping stuff\n+ \t * to the Append node, either. Basically we don't want union_planner\n+ \t * to do anything when we return control, except add the top sort/unique\n+ \t * nodes for DISTINCT processing if this wasn't UNION ALL, or the top\n+ \t * sort node if it was UNION ALL with a user-provided sort clause.\n+ \t */\n+ \tparse->groupClause = NULL;\n \tparse->havingQual = NULL;\n \tparse->hasAggs = false;\n \n\n\nI feel fairly confident that this is a low-risk patch; certainly\nit cannot break anything that doesn't involve GROUP BY and UNION.\nIs there any objection to my committing it at this late hour?\n\n\t\t\tregards, tom lane\n", "msg_date": "Wed, 09 Jun 1999 20:56:42 -0400", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": true, "msg_subject": "UNION + GROUP BY bug located" }, { "msg_contents": "> The cause is that plan_union_queries() is failing to clear out the\n> groupclause before it returns control to union_planner, so the GROUP BY\n> gets applied twice, once to the subselect and once (incorrectly) to the\n> result of the UNION. (This wouldn't have happened with a less klugy\n> representation for UNION parsetrees, but I digress.) You can see this\n> happening if you look at the EXPLAIN output; the coredump only happens\n> at execution time.\n\nIs going to be similar to LIKE indexing, where everyone beats me up\nabout it, but the code remains unchanged because no one can think of a\nbetter/cleaner idea? But i digress... :-)\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Wed, 9 Jun 1999 21:13:34 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] UNION + GROUP BY bug located" } ]
[ { "msg_contents": "Hannu Krosing <[email protected]> writes:\n>in single-byte locales it should be easy:\n\nIf you do it right, in double-byte locales as well.\n\n>1. sort a char[256] array from 0-255 using the current locale settings,\n> do it once, either at startup or when first needed.\n\nOr, alternatively, maintain per-locale table files and mmap them.\n\n>2. use binary search on that array to locate the last char before %\n> in this sorted array:\n\nOr, alternatively, maintain another table that maps char values to\nlexicographic order (and a per-locale constant for maximum character\norder value):\n\n> if (it is not the last char in sorted array)\n> then (replace that char with the one at index+1)\n> else (\n> if (it is not the first char in like string)\n> then (discard the last char and goto 2.\n> else (don't do the end restriction)\n> )\n\n\tif ( (nextindex = charorder[c]+1) <= maxchar ) {\n\t\tnextchar = charmap[nextindex];\n\t} else {\n\t\tno nextchar, append charmap[maxchar] to LIKE base string\n\t}\n\nI don't see where the pain is, but I may be missing something.\n\n\t-Michael Robinson\n\n", "msg_date": "Thu, 10 Jun 1999 10:14:10 +0800 (CST)", "msg_from": "Michael Robinson <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] Postgres 6.5 beta2 and beta3 problem " } ]
[ { "msg_contents": "Well, I fixed one of two problems shown by Mark Wright\napplication (below) and found that subj doesn't work correctly:\n\n--session1:\ncreate table t (x int, y int);\ninsert into t values (1, 1);\ninsert into t values (2, 2);\nbegin;\nupdate t set x = 3 where x = 1;\n\n--session2:\nselect * from t order by x for update; -- waits\n\n--session1:\ncommit;\n\nsession2 returns:\nx|y\n-+-\n3|1\n2|2\n\n: FOR UPDATE in subj must be handled before sorting.\nNo ability to fix this in 6.5.X. Is there the \"known-bug\"\ndoc? \n\nThere is another problem in subj - sometimes application gets\nERROR: EvalPlanQual: t_xmin is uncommitted ?!\nI'll try to find why. Mark (Wright), could you avoid\norder by in PL function? If you really need in ordered\nupdates then try to create index on id_number and add\nid_number >= 0 to WHERE in select for update.\n\nVadim\n======================================================\nTest Case:\n----------\nThe following SQL script will create the tables, indices and function necessary\nto reproduce the error. If you then execute these commands, it should re-create\nthe problem:\n perl -e 'for ($i=0;$i<200;++$i){system \\\n \"psql -c \\\"select get_next_test_attendee();\\\" >> $$.txt\";}'&\n perl -e 'for ($i=0;$i<200;++$i){system \\\n \"psql -c \\\"select get_next_test_attendee();\\\" >> $$.txt\";}'&\n perl -e 'for ($i=0;$i<200;++$i){system \\\n \"psql -c \\\"select get_next_test_attendee();\\\" >> $$.txt\";}'&\n perl -e 'for ($i=0;$i<200;++$i){system \\\n \"psql -c \\\"select get_next_test_attendee();\\\" >> $$.txt\";}'&\n\n(The same error occurs if I use DBI+DBD::Pg in a Perl script instead of psql.)\n\n===============================================================================\n Begin Script\n===============================================================================\ndrop table test_attendees;\ndrop sequence test_attendees_id_number_seq;\ncreate table test_attendees\n(\n id_number serial,\n print_status char default 'R',\n name varchar(20)\n);\ncreate index idx_test_attendees_name on test_attendees(name);\n\n\nDROP FUNCTION get_next_test_attendee ();\nCREATE FUNCTION get_next_test_attendee() returns int4 AS '\nDECLARE\n test_attendee_rec RECORD;\nBEGIN\n FOR test_attendee_rec IN SELECT * FROM test_attendees \n WHERE print_status = ''R''\n ORDER BY id_number \n FOR UPDATE OF test_attendees\n LOOP\n -- If more changes in test_attendee are to be made than just setting\n -- status to P, do them all in one UPDATE. The record is\n -- locked now and the lock will release only when our entire\n -- transaction commits or rolls back - not when we update it.\n UPDATE test_attendees SET print_status = ''Y'' \n WHERE id_number = test_attendee_rec.id_number;\n\n -- Now we return from inside the loop at the first\n -- row processed. This ensures we will process one\n -- row at max per call.\n RETURN test_attendee_rec.id_number;\n END LOOP;\n\n -- If we reach here, we did not find any row (left) with\n -- print_status = R\n return -1;\n\nEND;' LANGUAGE 'plpgsql';\n\ninsert into test_attendees (name) values ('name1');\ninsert into test_attendees (name) values ('name2');\n...\nI used 500 rows.\n", "msg_date": "Thu, 10 Jun 1999 11:14:33 +0800", "msg_from": "Vadim Mikheev <[email protected]>", "msg_from_op": true, "msg_subject": "select order by for update " }, { "msg_contents": "> There is another problem in subj - sometimes application gets\n> ERROR: EvalPlanQual: t_xmin is uncommitted ?!\n> I'll try to find why. Mark (Wright), could you avoid\n> order by in PL function? If you really need in ordered\n> updates then try to create index on id_number and add\n> id_number >= 0 to WHERE in select for update.\n\nOps, this will not work in all cases. Try to rewrite select:\n\nSELECT * FROM test_attendees\nWHERE print_status = 'R'\nAND id_number = (select min(id_number) from test_attendees)\nFOR UPDATE OF test_attendees\n\nand run it in loop.\n\nVadim\n", "msg_date": "Thu, 10 Jun 1999 11:33:32 +0800", "msg_from": "Vadim Mikheev <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] select order by for update" } ]
[ { "msg_contents": "\nThe following query produces the following error\n\nBUT...\n\nif I take out the \"*\" from category* it starts to work. Unfortunately I\nneed the \"*\" for my purposes. Bug?\n\nThis is from CVS of about a week ago.\n\nSELECT question.title, count(comment.oid) FROM question,\ncategory*,comment WHERE comment.webobject = question.oid and\nquestion.category = category.oid GROUP BY question.title;\nERROR: replace_vars_with_subplan_refs: variable not in target list\n\n-- \nChris Bitmead\nhttp://www.bigfoot.com/~chris.bitmead\nmailto:[email protected]\n", "msg_date": "Thu, 10 Jun 1999 13:48:01 +1000", "msg_from": "Chris Bitmead <[email protected]>", "msg_from_op": true, "msg_subject": "BUG in 6.5 - GROUP BY inheritance" }, { "msg_contents": "Chris Bitmead <[email protected]> writes:\n> This is from CVS of about a week ago.\n\n> SELECT question.title, count(comment.oid) FROM question,\n> category*,comment WHERE comment.webobject = question.oid and\n> question.category = category.oid GROUP BY question.title;\n\nSee my message from Sunday, \"inherited GROUP BY is busted\":\n\n: It turns out that pretty much *anything* involving grouping or\n: aggregation would fail if the query used inheritance, because the\n\nThis query seems to work with current sources (although EXPLAIN\nfails for it --- I think that explain.c is out of sync with the\nexecutor).\n\n\t\t\tregards, tom lane\n", "msg_date": "Thu, 10 Jun 1999 11:06:22 -0400", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] BUG in 6.5 - GROUP BY inheritance " }, { "msg_contents": "\nOk, I upgraded my CVS sources yesterday, and simple inherited/GROUP BY\nqueries are working again. But a more complex one still seems broken.\nThis is the query...\n\n\n\n\nhttpd=> SELECT question.oid, question.title, question.summary,\nquestion.datetime, category.oid, category.title, category.image, email,\nwebuser.name, count(comment.oid) FROM question, webuser,\ncategory*,comment WHERE question.webuser = webuser.oid and\nquestion.category = category.oid AND comment.webobject = question.oid\nGROUP BY question.oid, question.title, question.summary,\nquestion.datetime, category.oid, category.title, category.image, email,\nwebuser.name UNION SELECT question.oid, question.title,\nquestion.summary, question.datetime, category.oid, category.title,\ncategory.image, webuser.email, webuser.name, 0 FROM question, webuser,\ncategory* WHERE question.webuser = webuser.oid and question.category =\ncategory.oid;\nERROR: replace_vars_with_subplan_refs: variable not in target list\n\n\nTom Lane wrote:\n> \n> Chris Bitmead <[email protected]> writes:\n> > This is from CVS of about a week ago.\n> \n> > SELECT question.title, count(comment.oid) FROM question,\n> > category*,comment WHERE comment.webobject = question.oid and\n> > question.category = category.oid GROUP BY question.title;\n> \n> See my message from Sunday, \"inherited GROUP BY is busted\":\n> \n> : It turns out that pretty much *anything* involving grouping or\n> : aggregation would fail if the query used inheritance, because the\n> \n> This query seems to work with current sources (although EXPLAIN\n> fails for it --- I think that explain.c is out of sync with the\n> executor).\n> \n> regards, tom lane\n\n-- \nChris Bitmead\nmailto:[email protected]\nhttp://www.techphoto.org - Photography News, Stuff that Matters\n", "msg_date": "Fri, 11 Jun 1999 23:12:56 +1000", "msg_from": "Chris Bitmead <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] BUG in 6.5 - GROUP BY inheritance" }, { "msg_contents": "Chris Bitmead <[email protected]> writes:\n> httpd=> SELECT question.oid, question.title, question.summary,\n> question.datetime, category.oid, category.title, category.image, email,\n> webuser.name, count(comment.oid) FROM question, webuser,\n> category*,comment WHERE question.webuser = webuser.oid and\n> question.category = category.oid AND comment.webobject = question.oid\n> GROUP BY question.oid, question.title, question.summary,\n> question.datetime, category.oid, category.title, category.image, email,\n> webuser.name UNION SELECT question.oid, question.title,\n> question.summary, question.datetime, category.oid, category.title,\n> category.image, webuser.email, webuser.name, 0 FROM question, webuser,\n> category* WHERE question.webuser = webuser.oid and question.category =\n> category.oid;\n> ERROR: replace_vars_with_subplan_refs: variable not in target list\n\nWould it be possible to have some CREATE TABLE statements for these\ntables, so that the problem can be reproduced without so much\nreverse-engineering?\n\nBTW, I do not know when \"yesterday\" your time might have been,\nbut I committed a critical fix for UNION + GROUP BY in\nbackend/optimizer/prep/prepunion.c at 10 Jun 02:55 EDT (-0400).\nIf you don't have that version please update and try again.\n\n\t\t\tregards, tom lane\n", "msg_date": "Fri, 11 Jun 1999 10:02:56 -0400", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] BUG in 6.5 - GROUP BY inheritance " }, { "msg_contents": "\nJust wondering, when I do a COPY in the latest CVS into a file, it makes\nthe file both where I ask it to AND in\n/home/postgres/data/base/<filename>. I've tried removing both files and\ndoing it again, and it still happens. The one in /home/postgres looks\nlike some kind of binary file.\n\nThis is latest CVS.\n", "msg_date": "Sun, 13 Jun 1999 22:21:59 +1000", "msg_from": "Chris Bitmead <[email protected]>", "msg_from_op": true, "msg_subject": "COPY bug?" }, { "msg_contents": "Chris Bitmead <[email protected]> writes:\n> Just wondering, when I do a COPY in the latest CVS into a file, it makes\n> the file both where I ask it to AND in\n> /home/postgres/data/base/<filename>. I've tried removing both files and\n> doing it again, and it still happens. The one in /home/postgres looks\n> like some kind of binary file.\n\nCan't reproduce it here. Anyone else seeing this?\n\nIf you do something like\n\tCOPY int4_tbl TO 'int4out';\nyou will get the file dumped into the database directory, because that\nis the current working directory of the backend; you need to give a\nfull path in this form of the COPY command to get the file put someplace\nmore useful. But I don't see how COPY could produce two output files\nwhen it's only doing one fopen()...\n\nIt occurs to me that it's a very dangerous thing for server-side COPY\nto default to storing into the DB directory; pick the name of an\nexisting table and boom, you just overwrote your table. Maybe the\nserver-side COPY command ought to insist on being given an absolute\npath?\n\n\t\t\tregards, tom lane\n", "msg_date": "Sun, 13 Jun 1999 16:09:42 -0400", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] COPY bug? " }, { "msg_contents": "\nIs it really necessary that the mailing lists block \"non-member\nsubmissions\"?. I have several email addresses and this is rather\ninconvenient.\n\n-- \nChris Bitmead\nmailto:[email protected]\nhttp://www.techphoto.org - Photography News, Stuff that Matters\n", "msg_date": "Wed, 16 Jun 1999 14:16:12 +1000", "msg_from": "Chris Bitmead <[email protected]>", "msg_from_op": true, "msg_subject": "Postgres mailing lists" }, { "msg_contents": "At 02:16 PM 6/16/99 +1000, Chris Bitmead wrote:\n>\n>Is it really necessary that the mailing lists block \"non-member\n>submissions\"?. I have several email addresses and this is rather\n>inconvenient.\n\nIn my experiences with other mailing lists - yes. If the\nmailing list address shows up anywhere accessible by \nweb spiders, it will be found, and the list will be hit\nby spam.\n\nSo I guess the question is really \"which is worse, getting\nhit by occassional spam or having to post from a subscribed\ne-mail address?\"\n\nI have two e-mail addresses, one which I use for personal\ne-mail and lists, so the answer's easy for me. Keep it\nclosed to subscribers.\n\nIf no one anywhere has the e-mail address of the list on\na spider-accesible web page, then opening it *might*\nwork unless...\n\nAnyone ever posts the list address to Usenet.\n\n\n\n- Don Baccus, Portland OR <[email protected]>\n Nature photos, on-line guides, and other goodies at\n http://donb.photo.net\n", "msg_date": "Tue, 15 Jun 1999 21:43:38 -0700", "msg_from": "Don Baccus <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Postgres mailing lists" }, { "msg_contents": "On Wed, 16 Jun 1999, Chris Bitmead wrote:\n\n> \n> Is it really necessary that the mailing lists block \"non-member\n> submissions\"?. I have several email addresses and this is rather\n> inconvenient.\n\nThere is a pgsql-loopback address you can subscribe to, so that you don't\nhave to be a member of every list, but, yes, it is required...\n\nMarc G. Fournier ICQ#7615664 IRC Nick: Scrappy\nSystems Administrator @ hub.org \nprimary: [email protected] secondary: scrappy@{freebsd|postgresql}.org \n\n", "msg_date": "Wed, 16 Jun 1999 01:46:04 -0300 (ADT)", "msg_from": "The Hermit Hacker <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Postgres mailing lists" }, { "msg_contents": "Well I reckon there should be a pseudo-subscription mechanism, where you\ncan register yourself as a contributor without actually getting a copy\nof the email. I could subscribe under my several email addresses just so\nthat I can mail from all accounts but I don't really want to receive X\ncopies of everything. Is there a way to subscribe without getting\nanything sent? A kind of suspended account I guess you would call it.\n\nThe Hermit Hacker wrote:\n> \n> On Wed, 16 Jun 1999, Chris Bitmead wrote:\n> \n> >\n> > Is it really necessary that the mailing lists block \"non-member\n> > submissions\"?. I have several email addresses and this is rather\n> > inconvenient.\n> \n> There is a pgsql-loopback address you can subscribe to, so that you don't\n> have to be a member of every list, but, yes, it is required...\n> \n> Marc G. Fournier ICQ#7615664 IRC Nick: Scrappy\n> Systems Administrator @ hub.org\n> primary: [email protected] secondary: scrappy@{freebsd|postgresql}.org\n\n-- \nChris Bitmead\nmailto:[email protected]\nhttp://www.techphoto.org - Photography News, Stuff that Matters\n", "msg_date": "Wed, 16 Jun 1999 16:11:37 +1000", "msg_from": "Chris Bitmead <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] Postgres mailing lists" }, { "msg_contents": "> At 02:16 PM 6/16/99 +1000, Chris Bitmead wrote:\n> >\n> >Is it really necessary that the mailing lists block \"non-member\n> >submissions\"?. I have several email addresses and this is rather\n> >inconvenient.\n> \n> In my experiences with other mailing lists - yes. If the\n> mailing list address shows up anywhere accessible by \n> web spiders, it will be found, and the list will be hit\n> by spam.\n> \n> So I guess the question is really \"which is worse, getting\n> hit by occassional spam or having to post from a subscribed\n> e-mail address?\"\n> \n> I have two e-mail addresses, one which I use for personal\n> e-mail and lists, so the answer's easy for me. Keep it\n> closed to subscribers.\n> \n> If no one anywhere has the e-mail address of the list on\n> a spider-accesible web page, then opening it *might*\n> work unless...\n> \n> Anyone ever posts the list address to Usenet.\n\nI think there is a way to add all your e-mail addresses to the list\nwithout getting mail to each address. Not sure how, though.\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Wed, 16 Jun 1999 06:11:42 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Postgres mailing lists" }, { "msg_contents": "> Well I reckon there should be a pseudo-subscription mechanism, where you\n> can register yourself as a contributor without actually getting a copy\n> of the email. I could subscribe under my several email addresses just so\n> that I can mail from all accounts but I don't really want to receive X\n> copies of everything. Is there a way to subscribe without getting\n> anything sent? A kind of suspended account I guess you would call it.\n\nThat's what loopback does.\n\n> \n> The Hermit Hacker wrote:\n> > \n> > On Wed, 16 Jun 1999, Chris Bitmead wrote:\n> > \n> > >\n> > > Is it really necessary that the mailing lists block \"non-member\n> > > submissions\"?. I have several email addresses and this is rather\n> > > inconvenient.\n> > \n> > There is a pgsql-loopback address you can subscribe to, so that you don't\n> > have to be a member of every list, but, yes, it is required...\n> > \n> > Marc G. Fournier ICQ#7615664 IRC Nick: Scrappy\n> > Systems Administrator @ hub.org\n> > primary: [email protected] secondary: scrappy@{freebsd|postgresql}.org\n> \n> -- \n> Chris Bitmead\n> mailto:[email protected]\n> http://www.techphoto.org - Photography News, Stuff that Matters\n> \n> \n\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Wed, 16 Jun 1999 06:12:55 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Postgres mailing lists" }, { "msg_contents": "On Wed, 16 Jun 1999, Bruce Momjian wrote:\n\n> > At 02:16 PM 6/16/99 +1000, Chris Bitmead wrote:\n> > >\n> > >Is it really necessary that the mailing lists block \"non-member\n> > >submissions\"?. I have several email addresses and this is rather\n> > >inconvenient.\n> > \n> > In my experiences with other mailing lists - yes. If the\n> > mailing list address shows up anywhere accessible by \n> > web spiders, it will be found, and the list will be hit\n> > by spam.\n> > \n> > So I guess the question is really \"which is worse, getting\n> > hit by occassional spam or having to post from a subscribed\n> > e-mail address?\"\n> > \n> > I have two e-mail addresses, one which I use for personal\n> > e-mail and lists, so the answer's easy for me. Keep it\n> > closed to subscribers.\n> > \n> > If no one anywhere has the e-mail address of the list on\n> > a spider-accesible web page, then opening it *might*\n> > work unless...\n> > \n> > Anyone ever posts the list address to Usenet.\n> \n> I think there is a way to add all your e-mail addresses to the list\n> without getting mail to each address. Not sure how, though.\n\nSubscribe all addresses to loopback. It doesn't send mail out, it's \njust a place for majordomo to look for ok addresses.\n\nVince.\n-- \n==========================================================================\nVince Vielhaber -- KA8CSH email: [email protected] flame-mail: /dev/null\n # include <std/disclaimers.h> TEAM-OS2\n Online Campground Directory http://www.camping-usa.com\n Online Giftshop Superstore http://www.cloudninegifts.com\n==========================================================================\n\n\n\n", "msg_date": "Wed, 16 Jun 1999 06:48:45 -0400 (EDT)", "msg_from": "Vince Vielhaber <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Postgres mailing lists" }, { "msg_contents": "Thus spake Chris Bitmead\n> Is it really necessary that the mailing lists block \"non-member\n> submissions\"?. I have several email addresses and this is rather\n> inconvenient.\n\nSpam is inconvenient. The term for mailing lists that don't do this\nsort of blocking is \"spam amplifier.\"\n\n-- \nD'Arcy J.M. Cain <darcy@{druid|vex}.net> | Democracy is three wolves\nhttp://www.druid.net/darcy/ | and a sheep voting on\n+1 416 424 2871 (DoD#0082) (eNTP) | what's for dinner.\n", "msg_date": "Wed, 16 Jun 1999 08:00:55 -0400 (EDT)", "msg_from": "\"D'Arcy\" \"J.M.\" Cain <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Postgres mailing lists" }, { "msg_contents": "On Wed, 16 Jun 1999, Chris Bitmead wrote:\n\n> Well I reckon there should be a pseudo-subscription mechanism, where you\n> can register yourself as a contributor without actually getting a copy\n> of the email. I could subscribe under my several email addresses just so\n> that I can mail from all accounts but I don't really want to receive X\n> copies of everything. Is there a way to subscribe without getting\n> anything sent? A kind of suspended account I guess you would call it.\n> \n\n> > There is a pgsql-loopback address you can subscribe to, so that you\n> > don't have to be a member of every list, but, yes, it is required...\n\n\n> The Hermit Hacker wrote:\n> > \n> > On Wed, 16 Jun 1999, Chris Bitmead wrote:\n> > \n> > >\n> > > Is it really necessary that the mailing lists block \"non-member\n> > > submissions\"?. I have several email addresses and this is rather\n> > > inconvenient.\n> > \n> > There is a pgsql-loopback address you can subscribe to, so that you don't\n> > have to be a member of every list, but, yes, it is required...\n> > \n> > Marc G. Fournier ICQ#7615664 IRC Nick: Scrappy\n> > Systems Administrator @ hub.org\n> > primary: [email protected] secondary: scrappy@{freebsd|postgresql}.org\n> \n> -- \n> Chris Bitmead\n> mailto:[email protected]\n> http://www.techphoto.org - Photography News, Stuff that Matters\n> \n\nMarc G. Fournier ICQ#7615664 IRC Nick: Scrappy\nSystems Administrator @ hub.org \nprimary: [email protected] secondary: scrappy@{freebsd|postgresql}.org \n\n", "msg_date": "Wed, 16 Jun 1999 10:19:01 -0300 (ADT)", "msg_from": "The Hermit Hacker <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Postgres mailing lists" }, { "msg_contents": "\"D'Arcy\" \"J.M.\" Cain <[email protected]> writes:\n> Spam is inconvenient. The term for mailing lists that don't do this\n> sort of blocking is \"spam amplifier.\"\n\nAnd, in fact, we were getting spammed pretty heavily until the blocking\nsolution was put in place last summer. See the mailing list archives.\nWe've been mostly spam-free since then, though.\n\nI for one would object to any thought of taking out the block. The\nloopback list seems to me to solve any problems that legitimate users\nwould have ...\n\n\t\t\tregards, tom lane\n", "msg_date": "Wed, 16 Jun 1999 10:13:15 -0400", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Postgres mailing lists " }, { "msg_contents": "At 9:16 PM -0700 6/15/99, Chris Bitmead wrote:\n>Is it really necessary that the mailing lists block \"non-member\n>submissions\"?. I have several email addresses and this is rather\n>inconvenient.\n\nI don't know how postgres does it exactly, but NetBSD has open lists and\nsome aggressive kind of spam filtering based on known spammers. I think\nit's been very nice to have discussions, for example, which span both the\[email protected] list and e.g. a MkLinux list. The spam filtering\nseems to be very effective since I only see a piece of spam on the netbsd\nlists once every 3 months or so. I would suggest you consider cooperating\nwith them to use whatever mechanism they use.\n\nAs another specific example there was a person on port-mac68k who was\ngetting postgres up on a Mac 68k system and having some problems. I tried\nto cross-post my responses to the postgres-ports lists, but they never\nappeared. I was doing it from this very email address on this very\nmachine. Aside from the 'now' - 'current' problem I reported earlier, his\nwere mostly related to interactions among the NetBSD installation, the\nNetBSD package system and our install instructions rather than to any\nfundamental deficiencies in postgres. However I think he had valid\nproblems which I think the Postgres documenters could address, and which\nwould improve our product. I think it is unfortunate that I was not able\nto involve one of the postgres lists in the discussion.\n\nSignature failed Preliminary Design Review.\nFeasibility of a new signature is currently being evaluated.\[email protected], or [email protected]\n", "msg_date": "Wed, 16 Jun 1999 16:25:15 -0700", "msg_from": "\"Henry B. Hotz\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Postgres mailing lists" }, { "msg_contents": "On Wed, 16 Jun 1999, Henry B. Hotz wrote:\n\n> As another specific example there was a person on port-mac68k who was\n> getting postgres up on a Mac 68k system and having some problems. I tried\n> to cross-post my responses to the postgres-ports lists, but they never\n> appeared.\n\nCurious about this, since if its rejected as \"non-member submission\", it\nwill get sent back to you...\n\nMarc G. Fournier ICQ#7615664 IRC Nick: Scrappy\nSystems Administrator @ hub.org \nprimary: [email protected] secondary: scrappy@{freebsd|postgresql}.org \n\n", "msg_date": "Thu, 17 Jun 1999 09:23:34 -0300 (ADT)", "msg_from": "The Hermit Hacker <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Postgres mailing lists" }, { "msg_contents": "This is a test to make sure I really can post to the ports list.\n\nAt 5:23 AM -0700 6/17/99, The Hermit Hacker wrote:\n>On Wed, 16 Jun 1999, Henry B. Hotz wrote:\n>\n>> As another specific example there was a person on port-mac68k who was\n>> getting postgres up on a Mac 68k system and having some problems. I tried\n>> to cross-post my responses to the postgres-ports lists, but they never\n>> appeared.\n>\n>Curious about this, since if its rejected as \"non-member submission\", it\n>will get sent back to you...\n>\n\nI didn't get a bounce message.\n\nThe ports list should allow bug reports from anybody. Also the rest of\nwhat I said still applies: I think it is nice to allow cross-posted\ndiscussions among normally unrelated lists (the person with the problem\nwouldn't have been a known subscriber and someone with a solution might be\na subscriber only of the non-postgres lists) and NetBSD has (or had a year\nago) a filtering mechanism that allows it while still effectively blocking\nspam.\n\nSignature failed Preliminary Design Review.\nFeasibility of a new signature is currently being evaluated.\[email protected], or [email protected]\n", "msg_date": "Thu, 17 Jun 1999 11:35:37 -0700", "msg_from": "\"Henry B. Hotz\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Postgres mailing lists" }, { "msg_contents": "What is the status of nested structures?\nLike\n\nCREATE TABLE foo (x int4, y text);\nCREATE TABLE bar (z foo, y foo);\n\nPostgres doesn't complain but then again it doesn't seem to do anything\ngood with it. I can't remember seeing it mentioned in the docs anywhere\nbut I had the idea from somewhere that the postgres of old supported it.\n", "msg_date": "Thu, 24 Jun 1999 17:16:23 +1000", "msg_from": "Chris Bitmead <[email protected]>", "msg_from_op": false, "msg_subject": "Nested structures" }, { "msg_contents": "Using the very latest CVS I'm encountering a bug in SUBSELECTs.\nThis query returns 22 rows...\n \nSELECT id,title FROM question;\n\nThis query returns 15 rows...\n\nSELECT id,title FROM question WHERE question.id IN (SELECT webobject\nFROM comment);\n\nTherefore I would expect this query to return 7 rows. Instead it returns\nnone....\n\nSELECT id,title FROM question WHERE question.id NOT IN (SELECT webobject\nFROM comment);\n\nI've tried it with a newly dumped and created database and it still\nhappens. However I can't get it to happen on a new and empty database\nwith simple data. In other words I can't come up with a simple test\ncase. All I can think to do therefore is put my data in a file for\ndownload. It is 100k.\n\nftp://ftp.tech.com.au/pub/datadump.gz\n\nAs an aside, while creating this dump I tried to load it into another\ndatabase and drop a few tables to make it smaller. I then tried to dump\nit again and got the following error...\n\ndumpRules(): SELECT failed for table productv. Explanation from\nbackend: 'ERROR: cache lookup of attribute 1 in relation 1864370 failed\n\n-- \nChris Bitmead\nmailto:[email protected]\n", "msg_date": "Sat, 26 Jun 1999 00:58:20 +1000", "msg_from": "Chris Bitmead <[email protected]>", "msg_from_op": true, "msg_subject": "Severe SUBSELECT bug in 6.5 CVS" }, { "msg_contents": "\nOk, I've come up with a test case now. It's got to do with nulls so I\ndon't know if someone's going to come back and say that this is the way\nit's meant to work. It sure doesn't seem intuitive but perhaps someone\ncan tell me if it's correct behaviour. I feel sure it can't be because\nit means one spurious record in the database could destroy lots of\npreviously working queries. In other words you could have a whole lot of\nqueries that work. Then if some joker puts a record in the database with\na null, all the other records will no longer be returned. Anyway, here\nis the simple test case...\n\n\nhttpd=> create table a (i int, aa text); \nCREATE\nhttpd=> create table b (i int, bb text);\nCREATE\nhttpd=> insert into a values(1, 'foo');\nINSERT 1878534 1\nhttpd=> insert into b values(null, 'bar');\nINSERT 1878535 1\nhttpd=> select * from a where i not in (select i from b);\ni|aa\n-+--\n(0 rows)\n\nI would expect the single record in a to be returned here. Imagine I\nhave thousands of records in the database that this query returns. Then\nsomeone adds a record to b with a null. Now all those previous return\nvalues will no longer be returned. Seems really dangerous but maybe\nthat is how nulls work???\n\n\n\nChris Bitmead wrote:\n> \n> Using the very latest CVS I'm encountering a bug in SUBSELECTs.\n> This query returns 22 rows...\n> \n> SELECT id,title FROM question;\n> \n> This query returns 15 rows...\n> \n> SELECT id,title FROM question WHERE question.id IN (SELECT webobject\n> FROM comment);\n> \n> Therefore I would expect this query to return 7 rows. Instead it returns\n> none....\n> \n> SELECT id,title FROM question WHERE question.id NOT IN (SELECT webobject\n> FROM comment);\n> \n> I've tried it with a newly dumped and created database and it still\n> happens. However I can't get it to happen on a new and empty database\n> with simple data. In other words I can't come up with a simple test\n> case. All I can think to do therefore is put my data in a file for\n> download. It is 100k.\n> \n> ftp://ftp.tech.com.au/pub/datadump.gz\n> \n> As an aside, while creating this dump I tried to load it into another\n> database and drop a few tables to make it smaller. I then tried to dump\n> it again and got the following error...\n> \n> dumpRules(): SELECT failed for table productv. Explanation from\n> backend: 'ERROR: cache lookup of attribute 1 in relation 1864370 failed\n> \n> --\n> Chris Bitmead\n> mailto:[email protected]\n\n-- \nChris Bitmead\nmailto:[email protected]\n", "msg_date": "Sun, 27 Jun 1999 01:29:45 +1000", "msg_from": "Chris Bitmead <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] Severe SUBSELECT bug in 6.5 CVS" }, { "msg_contents": "Chris Bitmead <[email protected]> writes:\n> httpd=> select * from a where i not in (select i from b);\n> [ returns nothing if b contains any nulls in column i ]\n\nOf course, what's happening here is that the NOT IN is (in effect)\ntransformed to\n\ta.i != b.i1 AND a.i != b.i2 AND a.i != b.i3 ...\n(writing i1, i2, ... for the values extracted from b). Then, since\nany comparison involving NULL returns FALSE, the where-clause fails\nfor all values of a.i.\n\nI think this actually is a bug, not because it's wrong for \"x != NULL\"\nto be false, but because the SQL spec defines \"a NOT IN t\" as equivalent\nto \"NOT (a IN t)\". IN is implemented as\n\ta.i = b.i1 OR a.i = b.i2 OR a.i = b.i3 ...\nwhich will effectively ignore nulls in b --- it'll return true if and\nonly if a.i matches one of the non-null values in b. Our implementation\nfails to maintain the equivalence that NOT IN is the negation of this.\n\nIt appears to me that to follow the SQL spec, a NULL found in a.i\nshould return NULL for both IN and NOT IN (the spec appears to say that\nthe result of IN is \"unknown\" in that case, and we are using NULL to\nrepresent \"unknown\"):\n c) If the implied <comparison predicate> is true for at least\n one row RT in T, then \"R <comp op> <some> T\" is true.\n d) If T is empty or if the implied <comparison predicate> is\n false for every row RT in T, then \"R <comp op> <some> T\" is\n false.\n e) If \"R <comp op> <quantifier> T\" is neither true nor false,\n then it is unknown.\n(recall that null compared to anything yields unknown, not false).\nI don't believe we currently have that behavior, but it seems\nreasonable.\n\nMore subtly, it looks like for a non-null a.i, IN should return TRUE\nif there is a match in b, even if b also contains nulls (fine), but if\nthere is no match in b and b contains nulls then the spec seems to\nrequire NULL, *not* FALSE, to be returned! I'm not sure I like that\nconclusion...\n\nIn the meantime, a workaround for Chris is to use NOT (i IN ...) instead\nof NOT IN. That should work as he expects, at least for nulls in b.\n\n\t\t\tregards, tom lane\n", "msg_date": "Sat, 26 Jun 1999 13:22:50 -0400", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Severe SUBSELECT bug in 6.5 CVS " }, { "msg_contents": "> Chris Bitmead <[email protected]> writes:\n> > httpd=> select * from a where i not in (select i from b);\n> > [ returns nothing if b contains any nulls in column i ]\n> \n> Of course, what's happening here is that the NOT IN is (in effect)\n> transformed to\n> \ta.i != b.i1 AND a.i != b.i2 AND a.i != b.i3 ...\n> (writing i1, i2, ... for the values extracted from b). Then, since\n> any comparison involving NULL returns FALSE, the where-clause fails\n> for all values of a.i.\n> \n> I think this actually is a bug, not because it's wrong for \"x != NULL\"\n> to be false, but because the SQL spec defines \"a NOT IN t\" as equivalent\n> to \"NOT (a IN t)\". IN is implemented as\n> \ta.i = b.i1 OR a.i = b.i2 OR a.i = b.i3 ...\n> which will effectively ignore nulls in b --- it'll return true if and\n> only if a.i matches one of the non-null values in b. Our implementation\n> fails to maintain the equivalence that NOT IN is the negation of this.\n> \n> It appears to me that to follow the SQL spec, a NULL found in a.i\n> should return NULL for both IN and NOT IN (the spec appears to say that\n> the result of IN is \"unknown\" in that case, and we are using NULL to\n> represent \"unknown\"):\n\nI would be interested to see how other databases handle this.\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Sat, 26 Jun 1999 14:55:03 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Severe SUBSELECT bug in 6.5 CVS" }, { "msg_contents": "Bruce Momjian ha scritto:\n\n> > Chris Bitmead <[email protected]> writes:\n> > > httpd=> select * from a where i not in (select i from b);\n> > > [ returns nothing if b contains any nulls in column i ]\n> >\n> > Of course, what's happening here is that the NOT IN is (in effect)\n> > transformed to\n> > a.i != b.i1 AND a.i != b.i2 AND a.i != b.i3 ...\n> > (writing i1, i2, ... for the values extracted from b). Then, since\n> > any comparison involving NULL returns FALSE, the where-clause fails\n> > for all values of a.i.\n> >\n> > I think this actually is a bug, not because it's wrong for \"x != NULL\"\n> > to be false, but because the SQL spec defines \"a NOT IN t\" as equivalent\n> > to \"NOT (a IN t)\". IN is implemented as\n> > a.i = b.i1 OR a.i = b.i2 OR a.i = b.i3 ...\n> > which will effectively ignore nulls in b --- it'll return true if and\n> > only if a.i matches one of the non-null values in b. Our implementation\n> > fails to maintain the equivalence that NOT IN is the negation of this.\n> >\n> > It appears to me that to follow the SQL spec, a NULL found in a.i\n> > should return NULL for both IN and NOT IN (the spec appears to say that\n> > the result of IN is \"unknown\" in that case, and we are using NULL to\n> > represent \"unknown\"):\n>\n> I would be interested to see how other databases handle this.\n>\n\n----------------------------------------------\ncreate table a (i int, aa char(10));\ncreate table b (i int, bb char(10));\ninsert into a values(1, 'foo');\ninsert into b values(null, 'bar');\nselect * from a where i not in (select i from b);\n-----------------------------------------------\nI tried the above script on:\n\n Informix-SE\n Oracle8\n\nand both of them return 0 rows, like PostgreSQL.\n\n______________________________________________________________\nPostgreSQL 6.5.0 on i586-pc-linux-gnu, compiled by gcc 2.7.2.3\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\nJose'\n\n\n", "msg_date": "Mon, 28 Jun 1999 14:52:42 +0200", "msg_from": "=?iso-8859-1?Q?Jos=E9?= Soares <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Severe SUBSELECT bug in 6.5 CVS" }, { "msg_contents": "> > > It appears to me that to follow the SQL spec, a NULL found in a.i\n> > > should return NULL for both IN and NOT IN (the spec appears to say that\n> > > the result of IN is \"unknown\" in that case, and we are using NULL to\n> > > represent \"unknown\"):\n> >\n> > I would be interested to see how other databases handle this.\n> >\n> \n> ----------------------------------------------\n> create table a (i int, aa char(10));\n> create table b (i int, bb char(10));\n> insert into a values(1, 'foo');\n> insert into b values(null, 'bar');\n> select * from a where i not in (select i from b);\n> -----------------------------------------------\n> I tried the above script on:\n> \n> Informix-SE\n> Oracle8\n> \n> and both of them return 0 rows, like PostgreSQL.\n> \n\nYes, this is how I remembered Informix doing it. Returning a NULL in\nthe subselect does not match anything, so hopefully we don't have a bug.\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Mon, 28 Jun 1999 14:39:28 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Severe SUBSELECT bug in 6.5 CVS" }, { "msg_contents": "Bruce Momjian wrote:\n\n> > Informix-SE\n> > Oracle8\n> >\n> > and both of them return 0 rows, like PostgreSQL.\n> >\n> \n> Yes, this is how I remembered Informix doing it. \n> Returning a NULL in\n> the subselect does not match anything, so hopefully we \n> don't have a bug.\n\nWhat is the general policy? Follow the SQL standard, or do what all the\nother databases do?\n", "msg_date": "Tue, 29 Jun 1999 09:51:40 +1000", "msg_from": "Chris Bitmead <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Severe SUBSELECT bug in 6.5 CVS" }, { "msg_contents": "Several problems - Java and CVS.\n\nCVS has stopped working for me. I get the error...\nFatal error, aborting.\n: no such user\n\nI've tried logging in and out to no avail. It was working for me before.\nAs an aside I did an strace cvs update and saw \"I love you\" in the\ntrace. (??!)\n\nJava - I tried to build JDBC to teach myself Java. I'm getting the\nfollowing build errors. While I'm only teaching myself Java the brackets\ndon't even seem to match. I'm using Java 1.2 Linux.\n\njavac postgresql/Driver.java\npostgresql/Driver.java:107: Identifier expected.\n } catch(PSQLException(ex1) {\n ^\npostgresql/Driver.java:111: 'catch' without 'try'.\n } catch(Exception ex2) {\n ^\n2 errors\nmake[1]: *** [postgresql/Driver.class] Error 1\nmake[1]: Leaving directory\n`/usr/local/src/postgres-cvs/pgsql/src/interfaces/jdbc'\nmake: *** [all] Error 2\n", "msg_date": "Tue, 06 Jul 1999 22:24:36 +1000", "msg_from": "Chris Bitmead <[email protected]>", "msg_from_op": false, "msg_subject": "CVS, Java etc" }, { "msg_contents": "Chris Bitmead wrote:\n> \n> \n> CVS has stopped working for me. I get the error...\n> Fatal error, aborting.\n> : no such user\n\nI have been seeing this as well.\nI started seeing this just after doing a restore of my hard drive,\nso I thought it was just me. Anybody got any clues?\n\n-- \n\nMark Hollomon\[email protected]\n", "msg_date": "Wed, 07 Jul 1999 08:10:27 -0400", "msg_from": "\"Mark Hollomon\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] CVS, Java etc" }, { "msg_contents": "Mark Hollomon wrote:\n> \n> Chris Bitmead wrote:\n> >\n> >\n> > CVS has stopped working for me. I get the error...\n> > Fatal error, aborting.\n> > : no such user\n> \n> I have been seeing this as well.\n> I started seeing this just after doing a restore of my hard drive,\n> so I thought it was just me. Anybody got any clues?\n\nIt is failing for me as well. Sorry, no clue.\n\nClark\n", "msg_date": "Wed, 07 Jul 1999 08:25:36 -0400", "msg_from": "Clark Evans <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] CVS, Java etc" } ]
[ { "msg_contents": "We have an application which imports a significant\namount of data from a mainframe on a nightly basis.\nThe application which does this wraps its code in \na BEGIN/END transacation. During the process, \nindexes are dropped and recreated. If the process is \nnot successful, the indexes are left in an unusable \nstate. Postgres always yeilds a:\n\nERROR: Index k_vendors2 is not a btree\n\nmessage. Therefore, we changed the code to look like\nthis:\n\n1. BEGIN\n ... do some stuff ...\n END\n\n2. DROP index k_foo;\n COPY foo from 'import.txt'\n CREATE index k_foo;\n\n3. BEGIN\n ... do some stuff ...\n END\n\nThe application, due to errors in the import data, \nis not completing to termination. It is exitting\nin block 3. However, the indexes on foo are left\ncorrupted. We've had to go in and recreate them\nby hand. The DROP/COPY/CREATE sequence in step 2\nis performed on multiple tables...and all of them are\ncorrupted following the abnormal termination of the\nprocess.\n\nAny help with this issue would be greatly appreciated.\n\nMarcus Mascari ([email protected])\n\n\n\n\n\n_________________________________________________________\nDo You Yahoo!?\nGet your free @yahoo.com address at http://mail.yahoo.com\n\n", "msg_date": "Wed, 9 Jun 1999 23:24:23 -0700 (PDT)", "msg_from": "Marcus Mascari <[email protected]>", "msg_from_op": true, "msg_subject": "Index corruption under PostgreSQL 6.5beta3" }, { "msg_contents": "Marcus Mascari wrote:\n> \n> The application, due to errors in the import data,\n> is not completing to termination. It is exitting\n> in block 3. However, the indexes on foo are left\n\nHow is it exitting? ERROR, FATAL, ABORT?\n\n> corrupted. We've had to go in and recreate them\n> by hand. The DROP/COPY/CREATE sequence in step 2\n> is performed on multiple tables...and all of them are\n> corrupted following the abnormal termination of the\n> process.\n\nVadim\n", "msg_date": "Thu, 10 Jun 1999 14:29:32 +0800", "msg_from": "Vadim Mikheev <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Index corruption under PostgreSQL 6.5beta3" } ]
[ { "msg_contents": "Hi all\n\nI hope no one finds this the wrong Mailing List, but I would like to\nrequest/propose a change in PostGres. (I may even hack this one in\nmyself if no one objects.)\n\nIt seems that ANY error in postgres voids the entire transaction. I.e.\nif an error occurs, one must immediately commit decide to commit or\nrollback the current transaction. No further updates will be accepted\nuntil this is done. I am almost postive that most commercial databases\ndo not behave this way. (I do not think that DB2 does, but I have had\nvery little to do with other databases since discovering Postgresql! ;-)\nAt the VERY least it would be nice to have a user option whereby errors\nwould not affect the current transaction state.\n\n\n\n--\n===\nPeace,\nPeter\n\nWe are Microsoft of Borg, you will be assimilated!!!\nResistance is fut... ***BZZZRT*** THUD!!!\n[General Protection Fault in MSBorg32.DLL]\nPlease contact the Vendor of this Borg for more Information\n\n--\n", "msg_date": "Thu, 10 Jun 1999 06:45:50 EDT", "msg_from": "\"Peter Garner\" <[email protected]>", "msg_from_op": true, "msg_subject": "transaction rollbacks on error" }, { "msg_contents": "Peter Garner wrote:\n> \n> Hi all\n> \n> I hope no one finds this the wrong Mailing List, but I would like to\n> request/propose a change in PostGres. (I may even hack this one in\n> myself if no one objects.)\n> \n> It seems that ANY error in postgres voids the entire transaction. I.e.\n> if an error occurs, one must immediately commit decide to commit or\n> rollback the current transaction. No further updates will be accepted\n> until this is done. I am almost postive that most commercial databases\n> do not behave this way. (I do not think that DB2 does, but I have had\n> very little to do with other databases since discovering Postgresql! ;-)\n> At the VERY least it would be nice to have a user option whereby errors\n> would not affect the current transaction state.\n\nI'm going to implement savepoints (and _implicit_ savepoints)\nin 6.6.\n\nVadim\n", "msg_date": "Thu, 10 Jun 1999 18:57:22 +0800", "msg_from": "Vadim Mikheev <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] transaction rollbacks on error" }, { "msg_contents": ">From man begin:\n\n This command begins a user transaction which Postgres will\n guarantee is serializable with respect to all concurrently\n executing transactions. Postgres uses two-phase locking\n to perform this task. If the transaction is committed,\n Postgres will ensure that all updates are done or none of\n them are done. Transactions have the standard ACID\n (atomic, consistent, isolatable, and durable) property.\n\n\nPeter Garner ha scritto:\n\n> Hi all\n>\n> I hope no one finds this the wrong Mailing List, but I would like to\n> request/propose a change in PostGres. (I may even hack this one in\n> myself if no one objects.)\n>\n> It seems that ANY error in postgres voids the entire transaction. I.e.\n> if an error occurs, one must immediately commit decide to commit or\n> rollback the current transaction. No further updates will be accepted\n> until this is done. I am almost postive that most commercial databases\n> do not behave this way. (I do not think that DB2 does, but I have had\n> very little to do with other databases since discovering Postgresql! ;-)\n> At the VERY least it would be nice to have a user option whereby errors\n> would not affect the current transaction state.\n>\n> --\n> ===\n> Peace,\n> Peter\n>\n> We are Microsoft of Borg, you will be assimilated!!!\n> Resistance is fut... ***BZZZRT*** THUD!!!\n> [General Protection Fault in MSBorg32.DLL]\n> Please contact the Vendor of this Borg for more Information\n>\n> --\n\n--\n______________________________________________________________\nPostgreSQL 6.5.0 on i586-pc-linux-gnu, compiled by gcc 2.7.2.3\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\nJose'\n\n\n", "msg_date": "Thu, 10 Jun 1999 14:50:57 +0200", "msg_from": "=?iso-8859-1?Q?Jos=E9?= Soares <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] transaction rollbacks on error" }, { "msg_contents": "Jos� Soares wrote:\n> \n> >From man begin:\n> \n> This command begins a user transaction which Postgres will\n> guarantee is serializable with respect to all concurrently\n ^^^^^^^^^^^^\n> executing transactions. Postgres uses two-phase locking\n ^^^^^^^^^^^^^^^^^\n> to perform this task. If the transaction is committed,\n> Postgres will ensure that all updates are done or none of\n> them are done. Transactions have the standard ACID\n> (atomic, consistent, isolatable, and durable) property.\n\nOps. This man page must be changed too. I'll do it tomorrow.\n\nSeems that it's too late to update begin.sgml as well?\nThomas?\n\nVadim\n", "msg_date": "Thu, 10 Jun 1999 21:33:21 +0800", "msg_from": "Vadim Mikheev <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] transaction rollbacks on error" }, { "msg_contents": "> Seems that it's too late to update begin.sgml as well?\n> Thomas?\n\nIt is ok to update since it is in docs not yet formatted. I'll start\nthe User's Guide tomorrow, or perhaps tonight if I see your changes.\n\n - Thomas\n\n-- \nThomas Lockhart\t\t\t\[email protected]\nSouth Pasadena, California\n", "msg_date": "Thu, 10 Jun 1999 14:29:25 +0000", "msg_from": "Thomas Lockhart <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] transaction rollbacks on error" }, { "msg_contents": "Thomas Lockhart wrote:\n> \n> > Seems that it's too late to update begin.sgml as well?\n> > Thomas?\n> \n> It is ok to update since it is in docs not yet formatted. I'll start\n> the User's Guide tomorrow, or perhaps tonight if I see your changes.\n\nPlease do it tomorrow - I'm going home soon...\n\nVadim\n", "msg_date": "Thu, 10 Jun 1999 22:32:45 +0800", "msg_from": "Vadim Mikheev <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] transaction rollbacks on error" } ]
[ { "msg_contents": "Hi, all.\n\nI have found what seems to be a 64/32-bit problem while testing the latest\ncvsup'ed source on an alpha running Digital Unix 4.0d. After fixing a bug\nin the 'money' type that prevented the rules regression test from working\n(I sent a patch to pgsql-patches but was told by Bruce that it will have\nto wait after the release), the test ran up to a point where the backend\ndied dumping core. Here is what I've seen using gdb:\n\npgbeta:nodes> gdb /usr/local/pgsql.beta/bin/postgres\n/usr/local/pgsql.beta/data/base/regression/core \nGDB is free software and you are welcome to distribute copies of it\n under certain conditions; type \"show copying\" to see the conditions.\nThere is absolutely no warranty for GDB; type \"show warranty\" for details.\nGDB 4.16 (alpha-dec-osf3.2), Copyright 1996 Free Software Foundation, Inc...\nCore was generated by `postgres'.\nProgram terminated with signal 11, Segmentation fault.\nReading symbols from /usr/shlib/libm.so...done.\nReading symbols from /usr/shlib/libcurses.so...done.\nReading symbols from /usr/shlib/libc.so...done.\nReading symbols from /usr/lib/nls/loc//es_ES.ISO8859-1...done.\n#0 replace_opid (oper=0x4015aad0) at nodeFuncs.c:95\n95 oper->opid = get_opcode(oper->opno);\n(gdb) where\n#0 replace_opid (oper=0x4015aad0) at nodeFuncs.c:95\n#1 0x1201208b0 in fix_opid (clause=0x14015aaa0) at clauses.c:554\n#2 0x12011e214 in preprocess_targetlist (tlist=0x14015a4c0, command_type=2, \n result_relation=5, range_table=0x14015b1b0) at preptlist.c:84\n#3 0x120118808 in union_planner (parse=0x14015a100) at planner.c:162\n#4 0x1201185d4 in planner (parse=0x14015a100) at planner.c:83\n#5 0x120159d90 in pg_parse_and_plan (\n query_string=0x11fffa918 \"update rtest_v1 set a = rtest_t3.a + 20\nwhere b = rtest_t3.b;\", typev=0x0, nargs=0, queryListP=0x11fffa868,\ndest=Remote, \n aclOverride=0 '\\000') at postgres.c:590\n#6 0x12015a034 in pg_exec_query_dest (\n query_string=0x11fffa918 \"update rtest_v1 set a = rtest_t3.a + 20\nwhere b = rtest_t3.b;\", dest=Remote, aclOverride=0 '\\000') at\npostgres.c:678\n#7 0x120159f80 in pg_exec_query (\n query_string=0x11fffa918 \"update rtest_v1 set a = rtest_t3.a + 20\nwhere b = rtest_t3.b;\") at postgres.c:656\n#8 0x12015baa0 in PostgresMain (argc=10, argv=0x11fffee90, real_argc=9, \n real_argv=0x11ffffc28) at postgres.c:1658\n#9 0x12012d02c in DoBackend (port=0x1400d9a00) at postmaster.c:1628\n#10 0x12012c778 in BackendStartup (port=0x1400d9a00) at postmaster.c:1373\n#11 0x12012b5d8 in ServerLoop () at postmaster.c:823\n#12 0x12012ae00 in PostmasterMain (argc=9, argv=0x11ffffc28)\n at postmaster.c:616\n#13 0x1200e0b30 in main (argc=9, argv=0x11ffffc28) at main.c:93\n(gdb) \n\nAlthough my knowledge of postgres internals is null, I have done a bit of\ninvestigation. 'replace_opid' is called from 'fix_opid', line 554 of file \nbackend/optimizer/utils/clauses.c:\n\nreplace_opid((Oper *) ((Expr *) clause)->oper);\n\n'clause' is a pointer to Node. The actual value is tagged as T_Expr, so it\nseems to be used right. If you look at the contents of 'clause':\n\n(gdb) p *((Expr *) clause)\n$3 = {type = T_Expr, typeOid = 23, opType = OP_EXPR, oper = 0x4015aad0, \n args = 0x14015ab30}\n\nHere is the problem. ((Expr*) clause)->oper is a pointer to Node, which\n(from the name of the field) I think that should be tagged as T_Oper. But,\nif you look carefully, it has the same value as 'clause' but *truncated to\n32 bits*. This is a problem that I've seen many times when you store a\npointer in an int (which still is 32 bits long in an alpha) and later you\nuse it again as a pointer.\n\nI don't know if ((Expr*) clause)->oper should point to itself as it seems\nto do, but certainly its value is passed though an int variable and is\ntruncated.\n\nIf someone points me to the right place to look, I can play a bit more\nwith gdb and try to find the cause. You can find the query that crashes\nthe backend at the stack trace above.\n\nCheers,\n\n\tPedro.\n\n-- \n-------------------------------------------------------------------\nPedro Jos� Lobo Perea Tel: +34 91 336 78 19\nCentro de C�lculo Fax: +34 91 331 92 29\nE.U.I.T. Telecomunicaci�n e-mail: [email protected]\nUniversidad Polit�cnica de Madrid\nCtra. de Valencia, Km. 7 E-28031 Madrid - Espa�a / Spain\n\n", "msg_date": "Thu, 10 Jun 1999 09:47:27 +0200 (MET DST)", "msg_from": "\"Pedro J. Lobo\" <[email protected]>", "msg_from_op": true, "msg_subject": "Postgres dies in the rules regression test (64-bit problem)" }, { "msg_contents": "\"Pedro J. Lobo\" <[email protected]> writes:\n> #0 replace_opid (oper=0x4015aad0) at nodeFuncs.c:95\n> #1 0x1201208b0 in fix_opid (clause=0x14015aaa0) at clauses.c:554\n\n> (gdb) p *((Expr *) clause)\n> $3 = {type = T_Expr, typeOid = 23, opType = OP_EXPR, oper = 0x4015aad0, \n> args = 0x14015ab30}\n\n> I don't know if ((Expr*) clause)->oper should point to itself as it seems\n> to do,\n\nIt shouldn't ever point to itself, but it looks to me like it's not ---\nthe low order bits of clause are ...aaa0 and oper is ...aad0.\n\n> but certainly its value is passed though an int variable and is\n> truncated.\n\nLooks that way doesn't it :-(. I did some quick scratching around in\nthe sources and couldn't find any obvious mistakes of that ilk. Most of\nthe code that touches Oper nodes would have been exercised heavily long\nbefore we get to the rules regression test, so I'm not sure what to think.\n\n> If someone points me to the right place to look, I can play a bit more\n> with gdb and try to find the cause.\n\nThe Expr node was presumably made by make_op() in\nbackend/parser/parse_oper.c, although the tree might have been copied at\nleast once using the functions in backend/nodes/copyfuncs.c. Good luck!\n\n\t\t\tregards, tom lane\n", "msg_date": "Thu, 10 Jun 1999 16:53:48 -0400", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Postgres dies in the rules regression test (64-bit\n\tproblem)" }, { "msg_contents": "On Thu, 10 Jun 1999, Tom Lane wrote:\n\n>\"Pedro J. Lobo\" <[email protected]> writes:\n>> #0 replace_opid (oper=0x4015aad0) at nodeFuncs.c:95\n>> #1 0x1201208b0 in fix_opid (clause=0x14015aaa0) at clauses.c:554\n>\n>> (gdb) p *((Expr *) clause)\n>> $3 = {type = T_Expr, typeOid = 23, opType = OP_EXPR, oper = 0x4015aad0, \n>> args = 0x14015ab30}\n>\n>> I don't know if ((Expr*) clause)->oper should point to itself as it seems\n>> to do,\n>\n>It shouldn't ever point to itself, but it looks to me like it's not ---\n>the low order bits of clause are ...aaa0 and oper is ...aad0.\n\nOoops, you are right! Now I am using a bigger font :-)\n\n>> but certainly its value is passed though an int variable and is\n>> truncated.\n>\n>Looks that way doesn't it :-(. I did some quick scratching around in\n>the sources and couldn't find any obvious mistakes of that ilk. Most of\n>the code that touches Oper nodes would have been exercised heavily long\n>before we get to the rules regression test, so I'm not sure what to think.\n\nI have also looked at the warnings that arise in the compilation process,\nand haven't found any that could be related to that.\n\n>> If someone points me to the right place to look, I can play a bit more\n>> with gdb and try to find the cause.\n>\n>The Expr node was presumably made by make_op() in\n>backend/parser/parse_oper.c, although the tree might have been copied at\n>least once using the functions in backend/nodes/copyfuncs.c. Good luck!\n\nOk, I'll let you know if/when I find something (or, more probably, when I\nhave more questions ;-)\n\nRegards,\n\n\tPedro.\n\n-- \n-------------------------------------------------------------------\nPedro Jos� Lobo Perea Tel: +34 91 336 78 19\nCentro de C�lculo Fax: +34 91 331 92 29\nE.U.I.T. Telecomunicaci�n e-mail: [email protected]\nUniversidad Polit�cnica de Madrid\nCtra. de Valencia, Km. 7 E-28031 Madrid - Espa�a / Spain\n\n", "msg_date": "Fri, 11 Jun 1999 10:19:00 +0200 (MET DST)", "msg_from": "\"Pedro J. Lobo\" <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] Postgres dies in the rules regression test (64-bit\n\tproblem)" }, { "msg_contents": "On Fri, 11 Jun 1999, Pedro J. Lobo wrote:\n\n>On Thu, 10 Jun 1999, Tom Lane wrote:\n>\n>>\"Pedro J. Lobo\" <[email protected]> writes:\n>>>\n>>> If someone points me to the right place to look, I can play a bit more\n>>> with gdb and try to find the cause.\n>>\n>>The Expr node was presumably made by make_op() in\n>>backend/parser/parse_oper.c, although the tree might have been copied at\n>>least once using the functions in backend/nodes/copyfuncs.c. Good luck!\n>\n>Ok, I'll let you know if/when I find something (or, more probably, when I\n>have more questions ;-)\n\nWell, I have found something. As you said, the node is made by make_op()\n(which, btw, is in parse_node.c ;-) and later copied using copyObject().\nBut those functions are correct and work as expected.\n\nI have located the point where the pointer is overwritten. Here is the\nstack trace:\n\n(gdb) where\n#0 ResolveNew (info=0x140169ac0, targetlist=0x140169f70, nodePtr=0x14016aac8, \n sublevels_up=0) at rewriteManip.c:719\n#1 0x12013ea38 in ResolveNew (info=0x140169ac0, targetlist=0x140169f70, \n nodePtr=0x14016ab38, sublevels_up=0) at rewriteManip.c:670\n#2 0x12013ea58 in ResolveNew (info=0x140169ac0, targetlist=0x140169f70, \n nodePtr=0x140169b80, sublevels_up=0) at rewriteManip.c:730\n#3 0x12013ead0 in FixNew (info=0x140169ac0, parsetree=0x1401692d0)\n at rewriteManip.c:753\n#4 0x12013cc5c in fireRules (parsetree=0x1401692d0, rt_index=1, \n event=CMD_UPDATE, instead_flag=0x11fffa6c0 \"\\001\", locks=0x14016a8c0, \n qual_products=0x11fffa6b8) at rewriteHandler.c:2612\n#5 0x12013ce90 in RewriteQuery (parsetree=0x1401692d0, \n instead_flag=0x11fffa6c0 \"\\001\", qual_products=0x11fffa6b8)\n at rewriteHandler.c:2697\n#6 0x12013cf38 in deepRewriteQuery (parsetree=0x1401692d0)\n at rewriteHandler.c:2742\n#7 0x12013d008 in QueryRewriteOne (parsetree=0x1401692d0)\n at rewriteHandler.c:2791\n#8 0x12013d128 in BasicQueryRewrite (parsetree=0x1401692d0)\n at rewriteHandler.c:2862\n#9 0x12013d230 in QueryRewrite (parsetree=0x1401692d0)\n at rewriteHandler.c:2916\n#10 0x1200b3ce4 in ExplainQuery (query=0x1401692d0, verbose=0 '\\000', \n dest=Remote) at explain.c:68\n#11 0x12015dce0 in ProcessUtility (parsetree=0x140169110, dest=Remote)\n at utility.c:651\n#12 0x12015a138 in pg_exec_query_dest (\n query_string=0x11fffa918 \"explain update rtest_v1 set a = rtest_t3.a +\n 20 where b = rtest_t3.b;\", dest=Remote, aclOverride=0 '\\000') at\n postgres.c:727\n#13 0x120159f80 in pg_exec_query (\n query_string=0x11fffa918 \"explain update rtest_v1 set a = rtest_t3.a +\n 20 where b = rtest_t3.b;\") at postgres.c:656\n#14 0x12015baa0 in PostgresMain (argc=10, argv=0x11fffee90, real_argc=9, \n real_argv=0x11ffffc28) at postgres.c:1658\n#15 0x12012d02c in DoBackend (port=0x1400d9a00) at postmaster.c:1628\n#16 0x12012c778 in BackendStartup (port=0x1400d9a00) at postmaster.c:1373\n#17 0x12012b5d8 in ServerLoop () at postmaster.c:823\n#18 0x12012ae00 in PostmasterMain (argc=9, argv=0x11ffffc28)\n at postmaster.c:616\n#19 0x1200e0b30 in main (argc=9, argv=0x11ffffc28) at main.c:93\n(gdb)\n\nResolveNew() is a recursive function in backend/rewrite/rewriteManip.c.\nThe problem appears one time that it is called with nodePtr pointing to a\nVar node. This is the node:\n\n(gdb) p *((Var *) *nodePtr)\n$4 = {type = T_Var, varno = 4, varattno = 1, vartype = 23, vartypmod = -1, \n varlevelsup = 0, varnoold = 4, varoattno = 1}\n\nIn the beginning of the function, there is a 'switch(nodeTag(node))'\n('node' is assigned the value of '*nodePtr'), that jumps to the\ncorresponding 'case T_Var:' at line 695. There you see a call to\nFindMatchingNew() (a static function of the same c file), that returns a\npointer to a node that, from what I have seen in that function, is\nexpected to be of type Expr, and indeed it is. This pointer is stored in\na variable named 'n'. Right after that, you can see this code:\n\nif (n == NULL)\n{\n ... some code that isn't executed because n != NULL\n}\nelse\n{\n *nodePtr = copyObject(n);\n ((Var *) *nodePtr)->varlevelsup = this_varlevelsup;\n}\n\nWell, this *can't* be correct. 'n' points to a node of type Expr, and of\ncourse copyObject returns a node of that type. But that node is treated in\nthe following line as if it were of type Var! The value of\n'this_varlevelsup' is 0, and the offset of the field 'varlevelsup' of\nnode Var is the same as that of the field 'oper' of node Expr, and the \npointer is overwritten.\n\nI'd bet that the copy of the Expr node is not supposed to be stored in\n*nodePtr (which points to a node of type Var), but I don't know what would\nbe the right action. I've found the bug, now I'll let the wise ones fix it\n;-)\n\nThis problem has been hidden until now (for me, at least) because of a bug\nin the 'money' data type that broke the rules test before it reached that\npoint. That bug sowed up only when you had locale enabled and your\ncurrency didn't use cents (like spanish pesetas). On 32-bit systems it\nis likely that the pointer isn't overwritten, so the bug didn't showed up,\neither.\n\nCheers,\n\n\tPedro.\n\n-- \n-------------------------------------------------------------------\nPedro Jos� Lobo Perea Tel: +34 91 336 78 19\nCentro de C�lculo Fax: +34 91 331 92 29\nE.U.I.T. Telecomunicaci�n e-mail: [email protected]\nUniversidad Polit�cnica de Madrid\nCtra. de Valencia, Km. 7 E-28031 Madrid - Espa�a / Spain\n\n", "msg_date": "Fri, 11 Jun 1999 14:09:46 +0200 (MET DST)", "msg_from": "\"Pedro J. Lobo\" <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] Postgres dies in the rules regression test (64-bit\n\tproblem)" }, { "msg_contents": "\"Pedro J. Lobo\" <[email protected]> writes:\n> I have located the point where the pointer is overwritten.\n\n> *nodePtr = copyObject(n);\n> ((Var *) *nodePtr)->varlevelsup = this_varlevelsup;\n\n> Well, this *can't* be correct. 'n' points to a node of type Expr,\n\nGood sleuthing! It looks like on a 32-bit machine, the overwritten\narea will be just past the last field that's part of Expr, and because\nof the memory manager's habit of rounding up object sizes that area\njust happens to be available space rather than the start of the next\nobject. So that's why we hadn't found it before.\n\nI believe the copied TLE entry could be any of several other node types\nbesides Var and Expr, so it's probably possible to see related failures\neven on a 32-bit machine.\n\nJan, what should really be happening here?\n\n\t\t\tregards, tom lane\n", "msg_date": "Fri, 11 Jun 1999 09:38:09 -0400", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Postgres dies in the rules regression test (64-bit\n\tproblem)" } ]
[ { "msg_contents": "What's the situation with foreign keys? Do we have them in 6.5?\n\nMichael\n-- \nMichael Meskes | Go SF 49ers!\nTh.-Heuss-Str. 61, D-41812 Erkelenz | Go Rhein Fire!\nTel.: (+49) 2431/72651 | Use Debian GNU/Linux!\nEmail: [email protected] | Use PostgreSQL!\n", "msg_date": "Thu, 10 Jun 1999 11:40:27 +0200", "msg_from": "Michael Meskes <[email protected]>", "msg_from_op": true, "msg_subject": "Foreign Keys" }, { "msg_contents": "> What's the situation with foreign keys? Do we have them \n> in 6.5?\n\nNot natively. You can enforce referential integrity using procedures;\nI think the contrib area has examples (but I'll bet you already knew\nthat).\n\n - Thomas\n\n-- \nThomas Lockhart\t\t\t\[email protected]\nSouth Pasadena, California\n", "msg_date": "Thu, 10 Jun 1999 14:25:06 +0000", "msg_from": "Thomas Lockhart <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Foreign Keys" }, { "msg_contents": "Thomas Lockhart wrote:\n> \n> > What's the situation with foreign keys? Do we have them\n> > in 6.5?\n> \n> Not natively. You can enforce referential integrity using procedures;\n> I think the contrib area has examples (but I'll bet you already knew\n> that).\n\nBut unfortunately, in 6.5 one should use SHARE/SHARE ROW EXCLISIVE\nlocks when using refint.c, as described in release notes.\n\nVadim\n", "msg_date": "Thu, 10 Jun 1999 22:37:13 +0800", "msg_from": "Vadim Mikheev <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Foreign Keys" } ]
[ { "msg_contents": "While playing with --enable-locale and the like expressions I came across this \nphenomenon ('somestring' is in cyrillic - last 64 codes from 0xa0 to 0xff)\n\nSELECT key FROM t WHERE key ~* 'somestring'\n\nreturns the correct answers, properly matching upper/lower case characters - \nthis with --enable-locale, --with-mb=WIN and commenting out the USE_LOCALE \nrestrictions in gram.y. Explain shows Sequential scan, as expected...\n\nHowever\n\nSELECT key FROM t WHERE key ~* '^somestring'\n\nreturns no tuples and explain says it will use the index on key. Why is this?\n\nIf 'somestring' is ASCII characters, explain always gives sequential scan and \nin both cases returns the proper results.\n\nI am willing to do some testing if anyone has ideas how to patch the regex \ncode (I hate it :-).\n\nRegards,\nDaniel\n\n", "msg_date": "Thu, 10 Jun 1999 17:17:25 +0300", "msg_from": "Daniel Kalchev <[email protected]>", "msg_from_op": true, "msg_subject": "another locale problem" }, { "msg_contents": "> While playing with --enable-locale and the like expressions I came across this \n> phenomenon ('somestring' is in cyrillic - last 64 codes from 0xa0 to 0xff)\n> \n> SELECT key FROM t WHERE key ~* 'somestring'\n> \n> returns the correct answers, properly matching upper/lower case characters - \n> this with --enable-locale, --with-mb=WIN and commenting out the USE_LOCALE \n> restrictions in gram.y. Explain shows Sequential scan, as expected...\n> \n> However\n> \n> SELECT key FROM t WHERE key ~* '^somestring'\n> \n> returns no tuples and explain says it will use the index on key. Why is this?\n\nThat's strange. It should be seq scan in this case?\n\n> If 'somestring' is ASCII characters, explain always gives sequential scan and \n> in both cases returns the proper results.\n> \n> I am willing to do some testing if anyone has ideas how to patch the regex \n> code (I hate it :-).\n\nCan you test following case:\n\nSELECT key FROM t WHERE key ~* '^Xsomestring'\n\nwhere X is one of an ASCII character.\n---\nTatsuo Ishii\n", "msg_date": "Fri, 11 Jun 1999 00:25:21 +0900", "msg_from": "Tatsuo Ishii <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] another locale problem " }, { "msg_contents": ">>>Tatsuo Ishii said:\n > > SELECT key FROM t WHERE key ~* '^somestring'\n > > \n > > returns no tuples and explain says it will use the index on key. Why is th\n is?\n > \n > That's strange. It should be seq scan in this case?\n\nI forgot to mention, that if 'somestring' is all uppercase, everything works \n(the key field in the table is all uppercase). It still says index scan will \nbe used.\n\nTo summarize the problem. If key contains (equivalent cyrillic letters) 'ABC', \n'ABCD', 'DAB' and 'ABX' and the query is:\n\nSELECT key FROM t WHERE key ~* '^AB';\n\nindex scan will be used and the correct tuples ('ABC', 'ABCD' and 'ABX') will \nbe returned. If the query is\n\nSELECT key FROM t WHERE key ~* '^ab';\n\nindex scan will be used and no tuples will be returned. With the query\n\nSELECT key FROM t WHERE key ~* 'ab';\n\nsequential scan will be used and the correct tuples will be returned (all of \nthe above).\n\n > Can you test following case:\n > \n > SELECT key FROM t WHERE key ~* '^Xsomestring'\n > \n > where X is one of an ASCII character.\n\nExplain says it will use sequential scan and if I insert proper key in the \ntable it will be returned.\n\nDaniel\n\n", "msg_date": "Fri, 11 Jun 1999 10:06:06 +0300", "msg_from": "Daniel Kalchev <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] another locale problem " }, { "msg_contents": "> > > SELECT key FROM t WHERE key ~* '^somestring'\n> > > \n> > > returns no tuples and explain says it will use the index on key. Why is th\n> is?\n> > \n> > That's strange. It should be seq scan in this case?\n>\n>I forgot to mention, that if 'somestring' is all uppercase, everything works \n>(the key field in the table is all uppercase). It still says index scan will \n>be used.\n\nHmm... I am totally confused by this. Sould be a bug.\n\n>To summarize the problem. If key contains (equivalent cyrillic letters) 'ABC', \n>'ABCD', 'DAB' and 'ABX' and the query is:\n>\n>SELECT key FROM t WHERE key ~* '^AB';\n>\n>index scan will be used and the correct tuples ('ABC', 'ABCD' and 'ABX') will \n>be returned. If the query is\n>\n>SELECT key FROM t WHERE key ~* '^ab';\n>\n>index scan will be used and no tuples will be returned. With the query\n\nShould be bug.\n\n>SELECT key FROM t WHERE key ~* 'ab';\n>\n>sequential scan will be used and the correct tuples will be returned (all of \n>the above).\n\nSeems correct result.\n\n> > Can you test following case:\n> > \n> > SELECT key FROM t WHERE key ~* '^Xsomestring'\n> > \n> > where X is one of an ASCII character.\n>\n>Explain says it will use sequential scan and if I insert proper key in the \n>table it will be returned.\n\nExpected result.\n\n>From line 5388 of parser/gram.y:\n\t\t\t\t\t\t(strcmp(opname,\"~*\") == 0 && isalpha(n->val.val.str[pos])))\n\nI suspect isalpha() does not return true if n->val.val.str[pos] is a\nnon ascii char. Probably that's why \n\n\tSELECT key FROM t WHERE key ~* '^somestring'\n\ndoes not work.\n\nIf its argument is a cyrillic char and cyrillic locale enabled then\nisalpha() should return true. Can you check this?\n\nOr:\n\t\t\t\t\t\t(strcmp(opname,\"~*\")\n== 0 && isalpha((unsigned char)n->val.val.str[pos])))\n\nworks for you?\n--\nTatsuo Ishii\n", "msg_date": "Fri, 11 Jun 1999 16:42:13 +0900", "msg_from": "Tatsuo Ishii <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] another locale problem " }, { "msg_contents": ">>>Tatsuo Ishii said:\n[...]\n > >From line 5388 of parser/gram.y:\n > \t\t\t\t\t\t(strcmp(opname,\"~*\") == 0 && is\n alpha(n->val.val.str[pos])))\n > \n > I suspect isalpha() does not return true if n->val.val.str[pos] is a\n > non ascii char. Probably that's why \n[...]\n\nI have tried the following program under BSD/OS 4.0.1:\n\n--- cut here ---\n#include <string.h>\n#include <locale.h>\n#include <rune.h>\n#include <errno.h>\n\n\nmain()\n{\n int i;\n\n printf (\"%s\\n\", setlocale(LC_CTYPE, \"CP1251\"));\n printf (\"--- strcoll ---\\n\");\n printf (\"%d\\n\", strcoll(\"���\", \"���\"));\n printf (\"%d\\n\", strcoll(\"���\", \"���\"));\n printf (\"%d\\n\", strcoll(\"���\", \"���\"));\n printf (\"--- isalpha ---\\n\");\n i = '�';\n printf (\"%c, %d\\n\", i, i);\n printf (\"%d\\n\", isalpha('�'));\n printf (\"%d\\n\", isalpha('a'));\n}\n--- cut here ---\n\nWhere the cyrillic letter is the 'L' lowercase in cyrillic, the other are \nfirst characters from the alphabet....\n\nThe strcoll work always. This is because BSD/OS does NOT support the \nLC_COLLATE and strcoll is effectively strcmp (confirmed by the libc sources).\n\nThere are two cases:\n\n1. cc -o test test.c\n\nReturns 0 for isalpha() on cyrillic characers.\n\n2. cc -funsigned-char -o test test.c\n\nReturns 1 for isalpha() on cyrillic characters!\n\nIf I substitute the '�' character constant with it's code (235), isalpha() \nreturns 1.\n\nBSD/OS 4.0.1 uses gcc 2.7.2.1. Apparently somewhere in the code 'char' is used \n(I did use both --enable-locale and --with-mb=WIN). I am still searching for \nit, but the regex code is not very readable... :-(\n\nDaniel\n\n", "msg_date": "Fri, 11 Jun 1999 11:36:51 +0300", "msg_from": "Daniel Kalchev <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] another locale problem " }, { "msg_contents": "Ok. Now I almost certain that:\n\n\t\t\t\t\t\t(strcmp(opname,\"~*\")\n== 0 && isalpha((unsigned char)n->val.val.str[pos])))\n\nshould work for you. Can you confirm this?\n--\nTatsuo Ishii\n", "msg_date": "Fri, 11 Jun 1999 17:46:03 +0900", "msg_from": "Tatsuo Ishii <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] another locale problem " }, { "msg_contents": "On Fri, 11 Jun 1999, Tatsuo Ishii wrote:\n\n> \n> Ok. Now I almost certain that:\n> \n> \t\t\t\t\t\t(strcmp(opname,\"~*\")\n> == 0 && isalpha((unsigned char)n->val.val.str[pos])))\n> \n> should work for you. Can you confirm this?\n> --\n> Tatsuo Ishii\n> \n\n\nIt did for me , BSD/OS is veeeery finicky with the way it treats chars. It \nupgrades them to ints and from then on isalpha isupper do not work.\nI have tried with --unsigned-char gcc switch the result was the same, only\nwith a cast to (unsigned char) do all the char related functions work\nas advertised.\n\nI am sorry to say I have not had any input from other internationals using\nBSDI with a non-english character set.\n\n\n-- \nIncredible Networks LTD Angelos Karageorgiou\n20 Karea st, +30.1.92.12.312 (voice)\n116 36 Athens, Greece. +30.1.92.12.314 (fax)\nhttp://www.incredible.com [email protected] (e-mail)\n\n", "msg_date": "Fri, 11 Jun 1999 11:49:50 +0300 (EEST)", "msg_from": "Angelos Karageorgiou <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] another locale problem " }, { "msg_contents": "This worked, however it made all selects of ~* '^sometext' sequential.\n\nWhy can't we use index searches in this case? I believe it is :-)\n\nBut this\n\nSELECT key FROM t WHERE key ~* '^ sometext';\n\n(note the space after the carret) still does not work! It says index scan will \nbe used and finds nothng. If 'sometext' is all uppercase the proper result is \nreturned. The problem seems to be in the regex handling anyway. I tried \ncompiling regcomp.c with -funsigned-char, but that was not enough. Further \nhacking...\n\nDaniel\n\n>>>Tatsuo Ishii said:\n > Ok. Now I almost certain that:\n > \n > \t\t\t\t\t\t(strcmp(opname,\"~*\")\n > == 0 && isalpha((unsigned char)n->val.val.str[pos])))\n > \n > should work for you. Can you confirm this?\n > --\n > Tatsuo Ishii\n\n\n", "msg_date": "Fri, 11 Jun 1999 12:32:09 +0300", "msg_from": "Daniel Kalchev <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] another locale problem " }, { "msg_contents": "This is not because of BSD/OS, but because of gcc. I found some 'char' \nreferences in the regex routines and am working on that currently.\n\nI can confirm that other software on BSD/OS works correctly with locales, such \nas vi, less etc. Apparently we need unsigned char in order to do anything \nsensible with char values over 127.\n\nDaniel\n\n>>>Angelos Karageorgiou said:\n> > \t\t\t\t\t\t(strcmp(opname,\"~*\")\n > > == 0 && isalpha((unsigned char)n->val.val.str[pos])))\n > > \n > > should work for you. Can you confirm this?\n > > --\n > > Tatsuo Ishii\n > \n > It did for me , BSD/OS is veeeery finicky with the way it treats chars. It \n > upgrades them to ints and from then on isalpha isupper do not work.\n > I have tried with --unsigned-char gcc switch the result was the same, only\n > with a cast to (unsigned char) do all the char related functions work\n > as advertised.\n > \n > I am sorry to say I have not had any input from other internationals using\n > BSDI with a non-english character set.\n > \n > \n > -- \n > Incredible Networks LTD Angelos Karageorgiou\n > 20 Karea st, +30.1.92.12.312 (voice)\n > 116 36 Athens, Greece. +30.1.92.12.314 (fax)\n > http://www.incredible.com [email protected] (e-mail)\n > \n\n\n", "msg_date": "Fri, 11 Jun 1999 12:36:31 +0300", "msg_from": "Daniel Kalchev <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] another locale problem " }, { "msg_contents": ">This worked, however it made all selects of ~* '^sometext' sequential.\n\nThat's correct behavior.\n\n>Why can't we use index searches in this case? I believe it is :-)\n\nNo. It's due to the nature of the Btree index.\n\n>But this\n>\n>SELECT key FROM t WHERE key ~* '^ sometext';\n>\n>(note the space after the carret) still does not work! It says index scan will \n>be used and finds nothng. If 'sometext' is all uppercase the proper result is \n>returned. The problem seems to be in the regex handling anyway. I tried \n>compiling regcomp.c with -funsigned-char, but that was not enough. Further \n>hacking...\n\nAgain, that should not be index scan.\n\nBTW, if you want to play with regex, you might find retest.c be\nuseful. You can build the test tool by:\n\nmake retest\n\nenjoy:-)\n--\nTatsuo Ishii\n", "msg_date": "Fri, 11 Jun 1999 18:42:36 +0900", "msg_from": "Tatsuo Ishii <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] another locale problem " }, { "msg_contents": ">>>Tatsuo Ishii said:\n > >This worked, however it made all selects of ~* '^sometext' sequential.\n > \n > That's correct behavior.\n > \n > >Why can't we use index searches in this case? I believe it is :-)\n > \n > No. It's due to the nature of the Btree index.\n\nBut why it did use index scan when the case of the field and the expression \nmatched and did find the correct results?\n\n > >But this\n > >\n > >SELECT key FROM t WHERE key ~* '^ sometext';\n > \n > Again, that should not be index scan.\n\nSELECT key FROM t WHERE key ~* '^ ';\n\n(space only) uses index and works correctly.... weird!\n\n > BTW, if you want to play with regex, you might find retest.c be\n > useful. You can build the test tool by:\n > \n > make retest\n > \n > enjoy:-)\n\n\nIn a wild guess I changed all 'char' to 'unsigned char' in regcomp.c, with no \npositive or negative results. Maybe there are other places in regex where this \nshould be done, such as regexec.c? Is there an regex guru over here ... :-)\n\nMy long-time wondering about regex in Postgres has always been - isn't there \nsomething better than this old regex code that we can use?\n\nDaniel\n\n", "msg_date": "Fri, 11 Jun 1999 13:38:57 +0300", "msg_from": "Daniel Kalchev <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] another locale problem " }, { "msg_contents": "Daniel Kalchev <[email protected]> writes:\n> To summarize the problem. If key contains (equivalent cyrillic\n> letters) 'ABC', 'ABCD', 'DAB' and 'ABX' and the query is:\n\n> SELECT key FROM t WHERE key ~* '^AB';\n\n> index scan will be used and the correct tuples ('ABC', 'ABCD' and\n> 'ABX') will be returned. If the query is\n\n> SELECT key FROM t WHERE key ~* '^ab';\n\n> index scan will be used and no tuples will be returned.\n\nHm. Is it possible that isalpha() is doing the wrong thing on your\nmachine? makeIndexable() currently assumes that isalpha() returns true\nfor any character that is subject to case conversion, but I wonder\nwhether that's a good enough test.\n\nThe other possibility is that regexp's internal handling of\ncase-insensitive matching is not right.\n\n\t\t\tregards, tom lane\n", "msg_date": "Fri, 11 Jun 1999 09:47:56 -0400", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] another locale problem " }, { "msg_contents": "> >>>Tatsuo Ishii said:\n> > > SELECT key FROM t WHERE key ~* '^somestring'\n> > > \n> > > returns no tuples and explain says it will use the index on key. Why is th\n> is?\n> > \n> > That's strange. It should be seq scan in this case?\n> \n> I forgot to mention, that if 'somestring' is all uppercase, everything works \n> (the key field in the table is all uppercase). It still says index scan will \n> be used.\n> \n> To summarize the problem. If key contains (equivalent cyrillic letters) 'ABC', \n> 'ABCD', 'DAB' and 'ABX' and the query is:\n> \n> SELECT key FROM t WHERE key ~* '^AB';\n\nOops, forgot ~* is case-insensitive. The conditions we adde for this\nrequire the query to be between > A and < a, which is not very\nrestrictive.\n\nNo real better way to do this.\n\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Fri, 11 Jun 1999 10:22:46 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] another locale problem" }, { "msg_contents": ">>>Tom Lane said:\n > Daniel Kalchev <[email protected]> writes:\n > > To summarize the problem. If key contains (equivalent cyrillic\n > > letters) 'ABC', 'ABCD', 'DAB' and 'ABX' and the query is:\n > \n > > SELECT key FROM t WHERE key ~* '^AB';\n > \n > > index scan will be used and the correct tuples ('ABC', 'ABCD' and\n > > 'ABX') will be returned. If the query is\n > \n > > SELECT key FROM t WHERE key ~* '^ab';\n > \n > > index scan will be used and no tuples will be returned.\n > \n > Hm. Is it possible that isalpha() is doing the wrong thing on your\n > machine? makeIndexable() currently assumes that isalpha() returns true\n > for any character that is subject to case conversion, but I wonder\n > whether that's a good enough test.\n\nIn fact, after giving it some though... the expression in gram.y\n\n\n\t\t\t\t\t\t(strcmp(opname,\"~*\")\n== 0 && isalpha(n->val.val.str[pos])))\n\nis wrong. The statement in my view decides that a regular expression is not \nindexable if it contains special characters or if it contains non-alpha \ncharacters. Therefore, the statement should be written as:\n\n\t\t\t\t\t\t(strcmp(opname,\"~*\")\n== 0 && !isalpha((unsigned char)n->val.val.str[pos])))\n\n(two fixes :) This makes indexes work for '^abc' (lowercase ASCII). But does \nnot find anything, which means regex does not work. It does not work for both \nASCII and non-ASCII text/patterns. :-(\n\n > The other possibility is that regexp's internal handling of\n > case-insensitive matching is not right.\n\nI believe it to be terribly wrong, and some releases ago it worked with 8-bit \ncharacters by just compiling it with -funsigned-char. Now this breaks things...\n\nDaniel\n\n", "msg_date": "Fri, 11 Jun 1999 19:38:56 +0300", "msg_from": "Daniel Kalchev <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] another locale problem " }, { "msg_contents": "Daniel Kalchev <[email protected]> writes:\n> In fact, after giving it some though... the expression in gram.y\n\n> \t\t\t\t\t\t(strcmp(opname,\"~*\")\n> == 0 && isalpha(n->val.val.str[pos])))\n\n> is wrong. The statement in my view decides that a regular expression is not \n> indexable if it contains special characters or if it contains non-alpha \n> characters. Therefore, the statement should be written as:\n\n> \t\t\t\t\t\t(strcmp(opname,\"~*\")\n> == 0 && !isalpha((unsigned char)n->val.val.str[pos])))\n\nNo, it's not wrong, at least not in that way! You've missed the point\nentirely. ~* is the *case insensitive* regexp match operator.\nTherefore if I have a pattern like '^abc' it can match anything\nbeginning with either 'a' or 'A'. If the index restriction were to\ninclude the letter 'a' then it would exclude valid matches starting with\n'A'. The simplest solution, which is what's in makeIndexable(), is\nto exclude case-foldable characters from the index restriction pattern.\nIn this particular case you end up getting no index restriction at all,\nbut that is indeed what's supposed to happen.\n\nI am not sure that isalpha() is an adequate test for case-foldable\ncharacters in non-ASCII locales, but inverting it is definitely wrong ;-)\n\n\t\t\tregards, tom lane\n", "msg_date": "Fri, 11 Jun 1999 17:41:13 -0400", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] another locale problem " }, { "msg_contents": "Tom,\n\nSo you say that this check prevents the use of indexes, when we use the ~* \noperator and have alpha characters in the pattern, because apparently the \nindex cannot do case insensitive matching.\n\nI was under the (apparently wrong impression) that it was possible to use \nindexes for case insensitive matching. Dreaming... :-)\n\nFor your information, isalpha() is the correct match for case foldable \ncharacters, at least in the cp1251 (windows-1251) locale. I believe a more \ncorrect test could be to access the locale's MAPLOWER and MAPUPPER tables.\n\nIt is not the case in Bulgarian, but there might be languages where an letter \ndoes not exist in both upper and lower cases and therefore requires more \ncomplex handling. Perhaps such situation exists in the multibyte locales.\n\nPlease excuse my confusion. :-)\n\nDaniel\n\n>>>Tom Lane said:\n > Daniel Kalchev <[email protected]> writes:\n > > In fact, after giving it some though... the expression in gram.y\n > \n > > \t\t\t\t\t\t(strcmp(opname,\"~*\")\n > > == 0 && isalpha(n->val.val.str[pos])))\n > \n > > is wrong. The statement in my view decides that a regular expression is no\n t \n > > indexable if it contains special characters or if it contains non-alpha \n > > characters. Therefore, the statement should be written as:\n > \n > > \t\t\t\t\t\t(strcmp(opname,\"~*\")\n > > == 0 && !isalpha((unsigned char)n->val.val.str[pos])))\n > \n > No, it's not wrong, at least not in that way! You've missed the point\n > entirely. ~* is the *case insensitive* regexp match operator.\n > Therefore if I have a pattern like '^abc' it can match anything\n > beginning with either 'a' or 'A'. If the index restriction were to\n > include the letter 'a' then it would exclude valid matches starting with\n > 'A'. The simplest solution, which is what's in makeIndexable(), is\n > to exclude case-foldable characters from the index restriction pattern.\n > In this particular case you end up getting no index restriction at all,\n > but that is indeed what's supposed to happen.\n > \n > I am not sure that isalpha() is an adequate test for case-foldable\n > characters in non-ASCII locales, but inverting it is definitely wrong ;-)\n > \n > \t\t\tregards, tom lane\n\n\n", "msg_date": "Mon, 14 Jun 1999 10:39:52 +0300", "msg_from": "Daniel Kalchev <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] another locale problem " } ]
[ { "msg_contents": "...as well as another bug described below.\nSo, Mark, you can use select order by for update now, \nbut be aware that such queries may return unordered\ndata if sort-keys will be changed by concurrent xactions.\n\nMy hands are mostly away from sources now, though I still\nget Assert in vacuum (FreeBSD 2.2.6) when run Hiroshi test.\n\nVadim\n\n======================= CVS log message ==================\n\n1. Fix for elog(ERROR, \"EvalPlanQual: t_xmin is uncommitted ?!\")\n and possibly for other cases too:\n \n DO NOT cache status of transaction in unknown state\n (i.e. non-committed and non-aborted ones)\n \n Example:\n T1 reads row updated/inserted by running T2 and cache T2 status.\n T2 commits.\n Now T1 reads a row updated by T2 and with HEAP_XMAX_COMMITTED\n in t_infomask (so cached T2 status is not changed).\n Now T1 EvalPlanQual gets updated row version without HEAP_XMIN_COMMITTED\n -> TransactionIdDidCommit(t_xmin) and TransactionIdDidAbort(t_xmin)\n return FALSE and T2 decides that t_xmin is not committed and gets\n ERROR above.\n \n It's too late to find more smart way to handle such cases and so\n I just changed xact status caching and got rid TransactionIdFlushCache()\n from code.\n \n Changed: transam.c, xact.c, lmgr.c and transam.h - last three\n just because of TransactionIdFlushCache() is removed.\n\n2. heapam.c:\n\n T1 marked a row for update. T2 waits for T1 commit/abort.\n T1 commits. T3 updates the row before T2 locks row page.\n Now T2 sees that new row t_xmax is different from xact id (T1)\n T2 was waiting for. Old code did Assert here. New one goes to\n HeapTupleSatisfiesUpdate. Obvious changes too.\n", "msg_date": "Thu, 10 Jun 1999 22:52:51 +0800", "msg_from": "Vadim Mikheev <[email protected]>", "msg_from_op": true, "msg_subject": "elog(ERROR, \"EvalPlanQual: t_xmin is uncommitted ?!\") fixed" } ]
[ { "msg_contents": "From: Vadim Mikheev <[email protected]>\n\n|> There is another problem in subj - sometimes application gets\n|> ERROR: EvalPlanQual: t_xmin is uncommitted ?!\n|> I'll try to find why. Mark (Wright), could you avoid\n|> order by in PL function? If you really need in ordered\n|> updates then try to create index on id_number and add\n|> id_number >= 0 to WHERE in select for update.\n|\n|Ops, this will not work in all cases. Try to rewrite select:\n|\n|SELECT * FROM test_attendees\n|WHERE print_status = 'R'\n|AND id_number = (select min(id_number) from test_attendees)\n|FOR UPDATE OF test_attendees\n|\n|and run it in loop.\n\nThat would only work the first time, since after updating print_status to\n'C', the record where id_number = (select min(id_number) from\ntest_attendees) would no longer have print_status = 'R', so no records would\nmatch the query.\n\nThe solution would appear to be to replace the clause '(select\nmin(id_number) from test_attendees)' with '(select min(id_number) from\ntest_attendees where print_status = 'R')'. However, that would not work,\nsince the subselect doesn't block (see the pgsql mailing list for an\nexplanation from Jan Wieck -\nhttp://www.postgresql.org/mhonarc/pgsql-sql/1999-06/msg00049.html - my\ncurrent solution is from his suggestion).\n\nI need the ordered select, since I'm trying to create a FIFO. I have one\nset of clients who are entering records into the table, and another set of\nclients who are taking those records and sending them to a printer. I need\nthe printers to output records in more or less the same order that they were\nentered.\n\n---\nMark Wright\[email protected]\[email protected]\n\n\n", "msg_date": "Thu, 10 Jun 1999 10:08:17 -0500", "msg_from": "\"Mark Wright\" <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] select order by for update" }, { "msg_contents": "Mark Wright wrote:\n> \n> |SELECT * FROM test_attendees\n> |WHERE print_status = 'R'\n> |AND id_number = (select min(id_number) from test_attendees )\n ^^^\nOps - add \"AND print_status = 'R'\" here!\n\n> |FOR UPDATE OF test_attendees\n> |\n> |and run it in loop.\n> \n> That would only work the first time, since after updating print_status to\n> 'C', the record where id_number = (select min(id_number) from\n> test_attendees) would no longer have print_status = 'R', so no records would\n> match the query.\n\nAnd this is why I said \"run select in loop\": if no one row will\nbe returned (i.e. row returned by subselect is already updated\nby concurrent xaction) then re-run select!\n\nVadim\n", "msg_date": "Thu, 10 Jun 1999 23:15:19 +0800", "msg_from": "Vadim Mikheev <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] select order by for update" } ]
[ { "msg_contents": "In message \"Re: Real Programmers (was: [HACKERS] Priorities for 6.6)\", \nyou write:\n\n>>\n>>\n>> Hey, why don't you just overwrite the jmp instruction with a nop....\n>>\n>\n> Hmmmm - this would require that the code segment is writable\n> what it isn't on most modern systems.\n>\n> But the shared objects are usually compiled with -fPIC\n> (position independent code), so it should be possible to copy\n> the code segment part of the PL handlers into an malloc()'ed\n> area to get it into writable memory and execute it there over\n> function pointers...\n>\n> Nice idea, we'll try it with the upcoming PL/Perl handler.\n>\n> On second thought, there maybe is another tricky way to\n> prevent it all. Copy the entire Perl interpreter into\n> malloc()'ed memory and modify it's calls to malloc(), free()\n> redirecting them to private ones. Then we have total control\n> over it's allocations, can create an image copy of it after\n> each some successful calls into another area and in the case\n> of a transaction abort reset it to the last valid state by\n> restoring the copy.\n>\n> On third thought, we could also do it the Microsoft way. Hook\n> into the kernel's virtual memory control and trace every\n> first write operation into a page. At this time we copy the\n> old pages state to somewhere else. This will save some\n> allocated memory because we only need restorable copies of\n> the pages modified since the last save cycle. Requires to\n> hack down ways to get around access restrictions so the\n> postmaster is able to patch the OS kernel at startup (only\n> requires root permissions so /dev/kmem can get opened for\n> writing), but since this is definitely the best way to do it,\n> it's worth the efford.\n>\n> The result from this work then will become the base for more\n> changes. If the postmaster is already patching the kernel,\n> it can also take over the process scheduling to optimize the\n> system for PostgreSQL performance and we could get rid of\n> these damned SYSV IPC semaphores. Finally the postmaster will\n> control a new type of block cache, by mapping part's of the\n> relations into virtual memory pages of the backends on demand\n> avoiding SYSV shared memories too.\n\n\nOr, embed postgreSQL directly in the OS using the OS toolkit.\n\n\n\n", "msg_date": "Thu, 10 Jun 1999 13:40:45 -0400 (EDT)", "msg_from": "\"Mark Hollomon\" <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Real Programmers (was: [HACKERS] Priorities for 6.6)" } ]
[ { "msg_contents": "\nWith the recent changes, here's the new release candidate for Monday ...\n\nftp://ftp.postgresql.org/pub/Beta/postgresql-v6.5beta4.tar.gz\n\nMarc G. Fournier ICQ#7615664 IRC Nick: Scrappy\nSystems Administrator @ hub.org \nprimary: [email protected] secondary: scrappy@{freebsd|postgresql}.org \n\n", "msg_date": "Fri, 11 Jun 1999 00:37:42 -0300 (ADT)", "msg_from": "The Hermit Hacker <[email protected]>", "msg_from_op": true, "msg_subject": "Beta4 Available ..." }, { "msg_contents": "The Hermit Hacker wrote:\n> \n> With the recent changes, here's the new release candidate for Monday ...\n> \n> ftp://ftp.postgresql.org/pub/Beta/postgresql-v6.5beta4.tar.gz\n\nUnfortunately, I made yet two another changes... -:(\n\nVadim\n", "msg_date": "Fri, 11 Jun 1999 18:08:17 +0800", "msg_from": "Vadim Mikheev <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Beta4 Available ..." } ]
[ { "msg_contents": "\nOne Beeg Table:\n\n---snip---\n/dev/hdc1 6033323 5414189 306672 95% /var/lib/pgsql\n---snip---\n\n---snip---\nlocal-mon:/var/lib/pgsql/data/base/corona # ls -l iptraffic*\n-rw------- 1 postgres daemon 2147475456 Jun 11 03:16 iptraffic\n-rw------- 1 postgres daemon 2147475456 Jun 11 03:16 iptraffic.1\n-rw------- 1 postgres daemon 1225385984 Jun 11 08:09 iptraffic.2\n---snip---\n\nOne Query:\n\n---snip---\nselect sum(size) from iptraffic where date = '19990601';\npqReadData() -- backend closed the channel unexpectedly.\n This probably means the backend terminated abnormally before or\nwhile processing the request.\nWe have lost the connection to the backend, so further processing is\nimpossible. Terminating.\n---snip---\n\nOne `tail /var/log/postgres.log`\n\n---snip---\nDEBUG: --Relation pg_indexes--\nDEBUG: Pages 0: Changed 0, Reapped 0, Empty 0, New 0; Tup 0: Vac 0, Crash\n0, Un\nUsed 0, MinLen 0, MaxLen 0; Re-using: Free/Avail. Space 0/0;\nEndEmpty/Avail. Pag\nes 0/0. Elapsed 0/0 sec.\nERROR: parser: parse error at or near \"*\"\nERROR: parser: parse error at or near \"*\"\nERROR: parser: parse error at or near \"\"\"\nERROR: parser: parse error at or near \"*\"\nERROR: iptraffic: cannot extend\nTRAP: Failed Assertion(\"!((*(&(buf->io_in_progress_lock)) == 0)):\", File:\n\"bufmg\nr.c\", Line: 656)\n\n!((*(&(buf->io_in_progress_lock)) == 0)) (0)\nERROR: parser: parse error at or near \";\"\n---\n\n\nWhy the \"-\" sign?\n\n---snip---\n>select sum(size) from iptraffic where date = '19990601';\n sum\n----------\n-393538142\n(1 row)\n---snip---\n\nThanks!\nChrisG\n\n", "msg_date": "Fri, 11 Jun 1999 08:18:15 +0200 (SAST)", "msg_from": "[email protected]", "msg_from_op": true, "msg_subject": "Crash on select?" } ]
[ { "msg_contents": "Hi!\nI have following problem:\n\nI use php with postgres as backend. Every time, I run some queries,\npostgres creates a lot of processes - this causes extremely high processor\nusage. \nI execute some queries in quasi-parallel way, cause I need its results.\nBut other are executing and every time I free result - all of those\nqueries are executing on the same connection. So is it normal, that I get\nso much processes? And if there is some way to limit it? Or may be change\nprocess live time?\n\tTIA\n\tRem\n\n\n-------------------------------------------------------------------*------------\nRemigiusz Sokolowski e-mail: [email protected] * *\t\t\n-----------------------------------------------------------------*****----------\n\n", "msg_date": "Fri, 11 Jun 1999 10:28:57 +0200 (MET DST)", "msg_from": "Remigiusz Sokolowski <[email protected]>", "msg_from_op": true, "msg_subject": "postgres processes" }, { "msg_contents": "Remigiusz Sokolowski wrote:\n\n>\n> Hi!\n> I have following problem:\n>\n> I use php with postgres as backend. Every time, I run some queries,\n> postgres creates a lot of processes - this causes extremely high processor\n> usage.\n> I execute some queries in quasi-parallel way, cause I need its results.\n> But other are executing and every time I free result - all of those\n> queries are executing on the same connection. So is it normal, that I get\n> so much processes? And if there is some way to limit it? Or may be change\n> process live time?\n\n That's a general problem when using PostgreSQL in the\n background for Apache CGI or php scripts.\n\n The defaults in the Apache configuration are\n\n StartServers 5\n MaxClients 256\n MinSpareServers 5\n MaxSpareServers 10\n\n This means, that at startup Apache will create 5 server\n processes that can handle requests simultaneously. When the\n site gets busy and some of them take longer to handle\n requests (especially scripting requests), it will start new\n servers (max one per second) until the limit of 256 parallel\n server processes is reached. If they finish their requests\n and become idle again, some of them get killed if there are\n more than 10 idle Apache processes.\n\n This is normally a good policy. It ensures that small file\n requests can still get served while some long running CGI's\n block their server process.\n\n In the case of having PostgreSQL in the background, any such\n CGI request causes another backend to get started too. So\n when there's a peak of such requests, PostgreSQL will have to\n serve more parallel queries, Apache will start more servers\n to handle the incoming requests, causing more PostgreSQL\n connections and more parallel queries...\n\n What you can try is to take down the MaxClients directive in\n the Apache configuration. But that would mean, that a plain\n html file request, that could be served in milliseconds, will\n have to wait if all servers are blocked waiting for CGI's.\n\n\nJan\n\n--\n\n#======================================================================#\n# It's easier to get forgiveness for being wrong than for being right. #\n# Let's break this rule - forgive me. #\n#========================================= [email protected] (Jan Wieck) #\n\n\n", "msg_date": "Fri, 11 Jun 1999 12:44:05 +0200 (MET DST)", "msg_from": "[email protected] (Jan Wieck)", "msg_from_op": false, "msg_subject": "Re: [HACKERS] postgres processes" }, { "msg_contents": "> The defaults in the Apache configuration are\n> \n> StartServers 5\n> MaxClients 256\n> MinSpareServers 5\n> MaxSpareServers 10\n> \n> This means, that at startup Apache will create 5 server\n> processes that can handle requests simultaneously. When the\n> site gets busy and some of them take longer to handle\n> requests (especially scripting requests), it will start new\n> servers (max one per second) until the limit of 256 parallel\n> server processes is reached. If they finish their requests\n> and become idle again, some of them get killed if there are\n> more than 10 idle Apache processes.\n> \n> This is normally a good policy. It ensures that small file\n> requests can still get served while some long running CGI's\n> block their server process.\n\n\nMy problem is, that server is used not only as database server, but also\n(and in general) as mail server - I think that tehre are some other\nservices too.\nI've used persistent connections to database (and I think I now understand\nwhy so big processor usage), so postgres processes haven't die after\nserve requests but wait for another. \nHmm... I have one question more - every postgres process takes about 5% of\nprocessor time ( I've used to measure top command ) - it is normal or may\nbe processor is too slow?\n\tRem\n\n", "msg_date": "Fri, 11 Jun 1999 13:45:57 +0200 (MET DST)", "msg_from": "Remigiusz Sokolowski <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] postgres processes" }, { "msg_contents": "\n From: Remigiusz Sokolowski <[email protected]>\n cc: [email protected], [email protected]\n Content-Type: TEXT/PLAIN; charset=US-ASCII\n Sender: [email protected]\n Precedence: bulk\n\n > The defaults in the Apache configuration are\n > \n > StartServers 5\n > MaxClients 256\n > MinSpareServers 5\n > MaxSpareServers 10\n > \n > This means, that at startup Apache will create 5 server\n > processes that can handle requests simultaneously. When the\n > site gets busy and some of them take longer to handle\n > requests (especially scripting requests), it will start new\n > servers (max one per second) until the limit of 256 parallel\n > server processes is reached. If they finish their requests\n > and become idle again, some of them get killed if there are\n > more than 10 idle Apache processes.\n > \n > This is normally a good policy. It ensures that small file\n > requests can still get served while some long running CGI's\n > block their server process.\n\n\n My problem is, that server is used not only as database server, but also\n (and in general) as mail server - I think that tehre are some other\n services too.\n I've used persistent connections to database (and I think I now understand\n why so big processor usage), so postgres processes haven't die after\n serve requests but wait for another. \n Hmm... I have one question more - every postgres process takes about 5% of\n processor time ( I've used to measure top command ) - it is normal or may\n be processor is too slow?\n\t Rem\n\n\n\nWe use a similar configuration, and initially had similar problems.\nWe just don't use persistent connections in php anymore, and things\nwork fine - In our case, the number of reconnects saved by pconnect\nwould be small anyway.\n\nAnother strategy would be to start a second apache server on a\ndifferent port or different machine, use it only for redirects to the\npages that call postgres (assuming this is not your whole site). Then\nthrottle the second server back as described above (we haven't\nactually done this - but it seems it should work).\n\n-- \nKarl DeBisschop <[email protected]>\n617.832.0332 (Fax: 617.956.2696)\n\nInformation Please - your source for FREE online reference\nhttp://www.infoplease.com - Your Ultimate Fact Finder\nhttp://kids.infoplease.com - The Great Homework Helper\n", "msg_date": "Fri, 11 Jun 1999 09:02:41 -0400", "msg_from": "Karl DeBisschop <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [GENERAL] Re: [HACKERS] postgres processes" }, { "msg_contents": "Karl DeBisschop wrote:\n\n> From: Remigiusz Sokolowski <[email protected]>\n>\n> My problem is, that server is used not only as database server, but also\n> (and in general) as mail server - I think that tehre are some other\n> services too.\n> I've used persistent connections to database (and I think I now understand\n> why so big processor usage), so postgres processes haven't die after\n> serve requests but wait for another.\n> Hmm... I have one question more - every postgres process takes about 5% of\n> processor time ( I've used to measure top command ) - it is normal or may\n> be processor is too slow?\n>\n> We use a similar configuration, and initially had similar problems.\n> We just don't use persistent connections in php anymore, and things\n> work fine - In our case, the number of reconnects saved by pconnect\n> would be small anyway.\n>\n> Another strategy would be to start a second apache server on a\n> different port or different machine, use it only for redirects to the\n> pages that call postgres (assuming this is not your whole site). Then\n> throttle the second server back as described above (we haven't\n> actually done this - but it seems it should work).\n\n I don't know anything about the internals of php, but I have\n similar problems with an Apache module I'm (still) writing.\n\n Using Apache 1.3.3 I've tried to use persistent connections\n to a content server. The methods available for a module\n include a call AtExit, which is called just before the actual\n Apache server process will be terminated (due to Apache\n shutdown or because too many spare servers).\n\n But there's nothing that tells about a timeout. In the case\n of a communication or scripting timeout, Apache does a\n longjmp() back into the mainloop and clears the pool. That's\n the reason why sockets, files and memory allocations under\n Apache should be done via the Apache pool utilities, and not\n using the low level functions (as in PostgreSQL with\n palloc()).\n\n It MIGHT be the case, that php has that problem when using\n persistent database connections and that after a timeout, an\n unclosed connection is hanging around causing another hanging\n PostgreSQL backend.\n\n\nJan\n\n--\n\n#======================================================================#\n# It's easier to get forgiveness for being wrong than for being right. #\n# Let's break this rule - forgive me. #\n#========================================= [email protected] (Jan Wieck) #\n\n\n", "msg_date": "Mon, 14 Jun 1999 12:10:01 +0200 (MET DST)", "msg_from": "[email protected] (Jan Wieck)", "msg_from_op": false, "msg_subject": "Re: [GENERAL] Re: [HACKERS] postgres processes" }, { "msg_contents": "On Mon, Jun 14, 1999 at 12:10:01PM +0200, Jan Wieck wrote:\n> Karl DeBisschop wrote:\n> \n> > From: Remigiusz Sokolowski <[email protected]>\n> >\n> > My problem is, that server is used not only as database server, but also\n> > (and in general) as mail server - I think that tehre are some other\n> > services too.\n> > I've used persistent connections to database (and I think I now understand\n> > why so big processor usage), so postgres processes haven't die after\n> > serve requests but wait for another.\n> > Hmm... I have one question more - every postgres process takes about 5% of\n> > processor time ( I've used to measure top command ) - it is normal or may\n> > be processor is too slow?\n> >\n> > We use a similar configuration, and initially had similar problems.\n> > We just don't use persistent connections in php anymore, and things\n> > work fine - In our case, the number of reconnects saved by pconnect\n> > would be small anyway.\n> >\n> > Another strategy would be to start a second apache server on a\n> > different port or different machine, use it only for redirects to the\n> > pages that call postgres (assuming this is not your whole site). Then\n> > throttle the second server back as described above (we haven't\n> > actually done this - but it seems it should work).\n> \n> I don't know anything about the internals of php, but I have\n> similar problems with an Apache module I'm (still) writing.\n> \n> Using Apache 1.3.3 I've tried to use persistent connections\n> to a content server. The methods available for a module\n> include a call AtExit, which is called just before the actual\n> Apache server process will be terminated (due to Apache\n> shutdown or because too many spare servers).\n\nPHP3's module version uses the standard ap_register_cleanup()\ncall to register its shutdown function. This function calls then\nall PHP module specific shutdown functions, so that PostgreSQL\npersistent connection should be closed properly.\n\nI don't know of any situation where Apache would not call these\nregistered handlers (I'm not familiar with the Apache\ninternals...). Additionally, I've been running a site for a\nclient for about two years now which uses PHP and persistent\nconnections to PostgreSQL heavily - I don't think we have ever\nseen such problems as described previously.\n\n> \n> But there's nothing that tells about a timeout. In the case\n> of a communication or scripting timeout, Apache does a\n> longjmp() back into the mainloop and clears the pool. That's\n> the reason why sockets, files and memory allocations under\n> Apache should be done via the Apache pool utilities, and not\n> using the low level functions (as in PostgreSQL with\n> palloc()).\n> \n> It MIGHT be the case, that php has that problem when using\n> persistent database connections and that after a timeout, an\n> unclosed connection is hanging around causing another hanging\n> PostgreSQL backend.\n> \n> \n> Jan\n> \n> --\n> \n> #======================================================================#\n> # It's easier to get forgiveness for being wrong than for being right. #\n> # Let's break this rule - forgive me. #\n> #========================================= [email protected] (Jan Wieck) #\n> \n> \n> \n\n-- \n\n Regards,\n\n Sascha Schumann\n Consultant\n", "msg_date": "Mon, 14 Jun 1999 13:05:31 +0200", "msg_from": "Sascha Schumann <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [GENERAL] Re: [HACKERS] postgres processes" } ]
[ { "msg_contents": "\n Does anybody use postgres on this animal?\n\n UNIX_SV its-sp 4.2MP 2.1.2 i386 x86at SCO UNIX_SVR4\n UX:cc: INFO: Optimizing C Compilation System (CCS) 3.0 09/26/96 (u211mr1)\n Postgres 6.5 current CVS\n\nI have problem to configure:\n\n>> dms@its-sp:~/current/pgsql/src>./configure\n>> --with-template=unixware --without-CXX\n>> checking whether the C compiler (cc -O -K i486,host,inline,loop_unroll,alloca\n>> -Dsvr4 ) works... no\n>> configure: error: installation or configuration problem: C compiler cannot\n>> create executables.\n\nether:\n\u001b\n>> dms@its-sp:~/current/pgsql/src>./configure --with-template=unixware\n>> --without-CXX\n>> include/config.h is unchanged\n>> linking ./backend/port/tas/dummy.s to backend/port/tas.s\n>> linking ./backend/port/dynloader/unknown.c to backend/port/dynloader.c\n>> configure: error: ./backend/port/dynloader/unknown.c: File not found\n\n\n\n---\nDmitry Samersoff, [email protected], ICQ:3161705\nhttp://devnull.wplus.net\n* There will come soft rains ...\n", "msg_date": "Fri, 11 Jun 1999 16:55:43 +0400 (MSD)", "msg_from": "Dmitry Samersoff <[email protected]>", "msg_from_op": true, "msg_subject": "UnixWare" } ]
[ { "msg_contents": " All,\n\nAre there separate UNIX client package to build it \nwithout whole postgres ?\n\n---\nDmitry Samersoff, [email protected], ICQ:3161705\nhttp://devnull.wplus.net\n* There will come soft rains ...\n", "msg_date": "Fri, 11 Jun 1999 17:15:57 +0400 (MSD)", "msg_from": "Dmitry Samersoff <[email protected]>", "msg_from_op": true, "msg_subject": "Client" } ]
[ { "msg_contents": "Hi,\n\nplease apply the included patch. It corrects the headers in src/win32 -\nthere are some missing #endif.\n\n\t\t\tDan\n\n----------------------------------------------\nDaniel Horak\nnetwork and system administrator\ne-mail: [email protected]\nprivat e-mail: [email protected] ICQ:36448176\n----------------------------------------------", "msg_date": "Fri, 11 Jun 1999 15:43:19 +0200", "msg_from": "Horak Daniel <[email protected]>", "msg_from_op": true, "msg_subject": "missing #endif in win32 specific headers" }, { "msg_contents": "Applied.\n\n[Charset iso-8859-2 unsupported, filtering to ASCII...]\n> Hi,\n> \n> please apply the included patch. It corrects the headers in src/win32 -\n> there are some missing #endif.\n> \n> \t\t\tDan\n> \n> ----------------------------------------------\n> Daniel Horak\n> network and system administrator\n> e-mail: [email protected]\n> privat e-mail: [email protected] ICQ:36448176\n> ----------------------------------------------\n> \n\n[Attachment, skipping...]\n\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Fri, 11 Jun 1999 11:00:39 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] missing #endif in win32 specific headers" } ]
[ { "msg_contents": "> Applied. You man want to give us a fix for /template/.similar for that\n> platform, if needed to configure guesses the proper platform.\n> \n> > This patch should enable 6.5 to build on Motorola 68000 architecture. It comes\n> > from Roman Hodek <[email protected]>.\n> > \n> > \n> Content-Description: ol\n\nOK, I have a big question on this. The template/linux_m68k uses\nCFLAGS:-O2. Now, with egcs, which I think the person using, this -O2\ncauses some problems with the function pointers we call. It is a known\nproblem.\n\nCan you please test with -O instead, and see if the fmgr_ptr change made\nto postgres.h is needed in this case?\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Fri, 11 Jun 1999 10:27:01 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [PORTS] Patch for m68k architecture" } ]
[ { "msg_contents": "There are a couple of places in the docs where the term \"DML\" is used,\nbut it is not defined anywhere. What exactly does it stand for and how\nwould you define it?\n\n - Thomas\n\n-- \nThomas Lockhart\t\t\t\[email protected]\nSouth Pasadena, California\n", "msg_date": "Fri, 11 Jun 1999 15:38:38 +0000", "msg_from": "Thomas Lockhart <[email protected]>", "msg_from_op": true, "msg_subject": "\"DML\"" }, { "msg_contents": "> There are a couple of places in the docs where the term \"DML\" is used,\n> but it is not defined anywhere. What exactly does it stand for and how\n> would you define it?\n\nHmm. Thanks to Don and Jay for a definition. The def (\"Data\nManipulation Language\") was what I thought, but I find it confusing\nthat it is used to refer to a subset of \"SQL\", which also has\n\"Language\" in the acronym.\n\nWould it be acceptable to replace \"DML statement\" with something like\n\"data-altering statement\"? The phrase shows up in only two places in\nour ~700 pages of docs and out of context it doesn't seem to add\nvalue...\n\n-- \nThomas Lockhart\t\t\t\[email protected]\nSouth Pasadena, California\n", "msg_date": "Fri, 11 Jun 1999 16:28:00 +0000", "msg_from": "Thomas Lockhart <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] \"DML\"" }, { "msg_contents": ">\n>Hmm. Thanks to Don and Jay for a definition. The def (\"Data\n>Manipulation Language\") was what I thought, but I find it confusing\n>that it is used to refer to a subset of \"SQL\", which also has\n>\"Language\" in the acronym.\n\nNO!!!! SQL doesn't stand for *anything*! (Though common practice calls\nit Structured Query Language, it was officially called SQL because the\noriginal name of the language was copyrighted.)\n\nSQL is composed of three languages (DML, DDL -- Data Definition Language,\nand some TLA that I've forgotten).\n\n(OK, I admit to being persnickity about this.) :)\n-- \nD. Jay Newman ! For the pleasure and the profit it derives\[email protected] ! I arrange things, like furniture, and\nhttp://www.sprucegrove.com/~jay/ ! daffodils, and ...lives. -- Hello Dolly\n", "msg_date": "Fri, 11 Jun 1999 12:46:24 -0400 (EDT)", "msg_from": "\"D. Jay Newman\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] \"DML\"" }, { "msg_contents": "At 04:28 PM 6/11/99 +0000, Thomas Lockhart wrote:\n\n>Would it be acceptable to replace \"DML statement\" with something like\n>\"data-altering statement\"? The phrase shows up in only two places in\n>our ~700 pages of docs and out of context it doesn't seem to add\n>value...\n\nIt would probably be OK. DML seems to be one of those terms of\nart, not using it would seem odd to db geeks but they'd understand\nwhat you're saying. Using it is incomprehensible to the rest of\nus. So I'd vote for writing something non-experts can understand.\n\nExperts don't need your docs anyway :)\n\n\n\n- Don Baccus, Portland OR <[email protected]>\n Nature photos, on-line guides, and other goodies at\n http://donb.photo.net\n", "msg_date": "Fri, 11 Jun 1999 09:49:44 -0700", "msg_from": "Don Baccus <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] \"DML\"" }, { "msg_contents": "On Fri, 11 Jun 1999, Don Baccus wrote:\n\n> At 04:28 PM 6/11/99 +0000, Thomas Lockhart wrote:\n> \n> >Would it be acceptable to replace \"DML statement\" with something like\n> >\"data-altering statement\"? The phrase shows up in only two places in\n> >our ~700 pages of docs and out of context it doesn't seem to add\n> >value...\n> \n> It would probably be OK. DML seems to be one of those terms of\n> art, not using it would seem odd to db geeks but they'd understand\n> what you're saying. Using it is incomprehensible to the rest of\n> us. So I'd vote for writing something non-experts can understand.\n\nI dunno about that one Don. DML and its cousins (DDL and DCL) are\nvery much in common usage these days. DDL (Data Definintion Language)\nwould be CREATE TABLE and so on whilst DML is INSERT INTO, UPDATE\nand DELETE FROM...DCL (Data Control Language) usually deals with\npermissions (GRANT/REVOKE) and often gets lumped in with DDL.\n\nIf you have a look at any comprehensive text that tells neophytes\n(like yers truly) how to get a handle on SQL the subsets are defined\nand referred to by their acronyms (e.g., Groff and Weinberg, 'LAN\nTimes Guide to SQL' which I keep handy...)\n\nI'm not sure why they originally split up SQL but I know this:\nproprietary databases like PROGRESS that claim to `support'\nSQL generally don't support *all* the subsets.\n\nPROGRESS, for example, does not support DCL at all. One must use\nthe `data dictionary', an awkward user interface. It claims to\nsupport DDL but you can't access the tables you make with DDL cmds\nvia the dictionary. However, DML is fairly well supported, for what\nthat's worth...\n\n\n\n------- North Richmond Community Mental Health Center -------\nAnyway, hope I haven't muddied the waters but I wanted to say that\neven non-SQL databases use these terms...and almost all SQL textbooks\ncover them in detail.\n\nCheers,\nTom\n\nThomas Good MIS Coordinator, Senior DBA\nVital Signs: tomg@ { admin | q8 } .nrnet.org\n Phone: 718-354-5528 \n Fax: 718-354-5056 \n \n/* Member: Computer Professionals For Social Responsibility */ \n\n", "msg_date": "Fri, 11 Jun 1999 13:12:56 -0400 (EDT)", "msg_from": "Thomas Good <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] \"DML\"" }, { "msg_contents": "On Fri, 11 Jun 1999, Don Baccus wrote:\n\n> Hmmm...it appears \"The Practical SQL Handbook\" gets it wrong, \n> then, as it lumps \"select\" with data modification statements\n> when it makes its breakdown between DML, data definition, \n> and what they call data administration (clearly they mean\n> the same things as your DCL definition, i.e. grant/revoke\n> type stuff).\n> \n> And the AOLServer guys get it right as they talk about \n> \"ns_db dml\" working on insert/update/delete and \"also\n> data definition\" statements, i.e. they recognize the\n> difference in their documentation.\n> \n> OK, in the DML, DCL, and DDL decomposition of things, just what\n> *is* a select statement?\n\nHee hee...according to Guy Harrison who wrote 'Oracle SQL High \nPerformance Tuning' (a great book) SELECT is NOT part of DML.\n\nIt is a QUERY. \nHarrison separates SELECT from DML because it does not alter data.\n\nIs this getting murkier or do I really need some coffee?\n \n> You've given a simple definition of the decomposition, why\n> not bottle it and pour it into the docs?\n\nI would give it a try, if Thomas wanted it...usually tho I sit back\nand try to learn from following this list. Anyway the two books that\nI've come to rely on are literally on my coffee table at home, so \nmaybe tonight I'll have a pint and try to write up a coherent def.\n\nTom\n\n------- North Richmond Community Mental Health Center -------\n\nThomas Good MIS Coordinator\nVital Signs: tomg@ { admin | q8 } .nrnet.org\n Phone: 718-354-5528 \n Fax: 718-354-5056 \n \n/* Member: Computer Professionals For Social Responsibility */ \n\n", "msg_date": "Fri, 11 Jun 1999 13:36:26 -0400 (EDT)", "msg_from": "Thomas Good <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] \"DML\"" }, { "msg_contents": "At 01:12 PM 6/11/99 -0400, Thomas Good wrote:\n\n>I dunno about that one Don. DML and its cousins (DDL and DCL) are\n>very much in common usage these days. DDL (Data Definintion Language)\n>would be CREATE TABLE and so on whilst DML is INSERT INTO, UPDATE\n>and DELETE FROM...DCL (Data Control Language) usually deals with\n>permissions (GRANT/REVOKE) and often gets lumped in with DDL.\n\n>If you have a look at any comprehensive text that tells neophytes\n>(like yers truly) how to get a handle on SQL...\n\nHmmm...it appears \"The Practical SQL Handbook\" gets it wrong, \nthen, as it lumps \"select\" with data modification statements\nwhen it makes its breakdown between DML, data definition, \nand what they call data administration (clearly they mean\nthe same things as your DCL definition, i.e. grant/revoke\ntype stuff).\n\nAnd the AOLServer guys get it right as they talk about \n\"ns_db dml\" working on insert/update/delete and \"also\ndata definition\" statements, i.e. they recognize the\ndifference in their documentation.\n\nOK, in the DML, DCL, and DDL decomposition of things, just what\n*is* a select statement?\n\nYou've given a simple definition of the decomposition, why\nnot bottle it and pour it into the docs?\n\n...\n\n>PROGRESS, for example, does not support DCL at all. One must use\n>the `data dictionary', an awkward user interface.\n\nBarf\n\n> It claims to\n>support DDL but you can't access the tables you make with DDL cmds\n>via the dictionary.\n\nAnd Codds hasn't struck them dead with lightning? :)\n\n\n\n- Don Baccus, Portland OR <[email protected]>\n Nature photos, on-line guides, and other goodies at\n http://donb.photo.net\n", "msg_date": "Fri, 11 Jun 1999 11:28:41 -0700", "msg_from": "Don Baccus <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] \"DML\"" }, { "msg_contents": "At 01:36 PM 6/11/99 -0400, Thomas Good wrote:\n\n>Hee hee...according to Guy Harrison who wrote 'Oracle SQL High \n>Performance Tuning' (a great book) SELECT is NOT part of DML.\n\n>It is a QUERY. \n>Harrison separates SELECT from DML because it does not alter data.\n\n>Is this getting murkier or do I really need some coffee?\n\nMurkier :)\n\nOK, so SQL can be decomposed into: queries, DML, DDL, DCL.\n\nThe queries are ... DQL? :)\n\n> Anyway the two books that\n>I've come to rely on are literally on my coffee table at home, so \n>maybe tonight I'll have a pint ....\n\nTaking my \"bottle and pour it\" advice literally, I see!\n\n\n\n- Don Baccus, Portland OR <[email protected]>\n Nature photos, on-line guides, and other goodies at\n http://donb.photo.net\n", "msg_date": "Fri, 11 Jun 1999 12:42:04 -0700", "msg_from": "Don Baccus <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] \"DML\"" }, { "msg_contents": "Thus spake Thomas Good\n> I'm not sure why they originally split up SQL but I know this:\n\nActually, I think it was the other way around. The term SQL didn't\ncome into use until DDL, DML and DCL had been in common use. I'm\npretty sure Date didn't mention SQL in his original paper.\n\n-- \nD'Arcy J.M. Cain <darcy@{druid|vex}.net> | Democracy is three wolves\nhttp://www.druid.net/darcy/ | and a sheep voting on\n+1 416 424 2871 (DoD#0082) (eNTP) | what's for dinner.\n", "msg_date": "Fri, 11 Jun 1999 22:34:46 -0400 (EDT)", "msg_from": "\"D'Arcy\" \"J.M.\" Cain <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] \"DML\"" }, { "msg_contents": "Thus spake D'Arcy J.M. Cain\n> Thus spake Thomas Good\n> > I'm not sure why they originally split up SQL but I know this:\n> \n> Actually, I think it was the other way around. The term SQL didn't\n> come into use until DDL, DML and DCL had been in common use. I'm\n> pretty sure Date didn't mention SQL in his original paper.\n\nOops. That would be E.F. Codd, not Chris Date.\n\n-- \nD'Arcy J.M. Cain <darcy@{druid|vex}.net> | Democracy is three wolves\nhttp://www.druid.net/darcy/ | and a sheep voting on\n+1 416 424 2871 (DoD#0082) (eNTP) | what's for dinner.\n", "msg_date": "Fri, 11 Jun 1999 22:56:18 -0400 (EDT)", "msg_from": "\"D'Arcy\" \"J.M.\" Cain <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] \"DML\"" }, { "msg_contents": "Thomas Good wrote:\n> \n> > OK, in the DML, DCL, and DDL decomposition of things, just what\n> > *is* a select statement?\n> \n> Hee hee...according to Guy Harrison who wrote 'Oracle SQL High\n> Performance Tuning' (a great book) SELECT is NOT part of DML.\n> \n> It is a QUERY.\n> Harrison separates SELECT from DML because it does not alter data.\n\nBut what would you call yer query when the tables have rules/triggers\nattached that do alter data ? A DML-savvy QUERY ;)\n\nBTW, where do all the triggers and rules fall anyway, or ar they a\ndifferent subset - maybe Data Behaviour Language (DBL)\n \n> Is this getting murkier or do I really need some coffee?\n\nMaybe next we should try to divide english into sublanguages ?\n English for Order Giving - EOG\n English for Describing Things - EDT\n Englisg for Discussing SQL Sublanguages - EDSQLL\n\n------------------\nHannu\n", "msg_date": "Sat, 12 Jun 1999 13:27:11 +0300", "msg_from": "Hannu Krosing <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] \"DML\"" }, { "msg_contents": "Thomas Lockhart wrote:\n> \n> There are a couple of places in the docs where the term \"DML\" is used,\n> but it is not defined anywhere. What exactly does it stand for and how\n> would you define it?\n\nI used it for\nSELECT, INSERT, DELETE, UPDATE, FETCH and COPY_TO statements.\nSeems that I was wrong using \"DML\" for all of them. \n\nThey are statements for data management and querying.\nServer computes snapshot only for them. \nAnytime when I used \"DML\" in docs it was related to this,\ni.e. to the time of snapshot calculation.\n\nVadim\n", "msg_date": "Sat, 12 Jun 1999 19:15:14 +0800", "msg_from": "Vadim Mikheev <[email protected]>", "msg_from_op": false, "msg_subject": "Re: \"DML\"" }, { "msg_contents": "On Sat, 12 Jun 1999, Hannu Krosing wrote:\n\n> But what would you call yer query when the tables have rules/triggers\n> attached that do alter data ? A DML-savvy QUERY ;)\n> \n> BTW, where do all the triggers and rules fall anyway, or ar they a\n> different subset - maybe Data Behaviour Language (DBL)\n\nRight. Well, in the interests of annoying Tom Lockhart further...\nI've attempted to define SQL and its subsets. Preliminary apologies\nare apprently offered to Thomas, Hannu, Don, D'Arcy and Vadim (who\nseems to be cc'd on this foolishness).\n\nHere 'tis:\n\nConversational SQL - A Gratuitous Glossary\n------------------------------------------\n\nIt would appear that one could blame E. F. ('Ted') Codd for\nthe current state of confusion regarding the various subsets\n(DML-DDL-DCL-Queries, et al.) of the SQL language. Of course,\nhe is probably not responsible...in any event:\n\nIn June, 1970 Codd presented a paper on 'A Relational Model of\nData For Large Shared Data Banks.' This act set in motion\na chain reaction the result of which was the preeminence of\nrelational databases and their primary interface - the language\nknown as `SQL'. It also set in motion the entire process of\ndeveloping acronyms to help with the obfuscation of any remaining\npoints of clarity surrounding the language whose very name is \nthe subject of controversy. (The fact that a language which does \nnot have a procedural nature is termed `structured' is an apparently \noxymoronic bit of wordplay along the lines of `English Grammar.')\n\nThe original query language that accompanied IBM's System R database\nwas known as SEQUEL (Structured English QUEry Language). IBM\nis often blamed for later truncating the name to SQL. In any case,\nSQL (pronounced either as sequel or S-Q-L) stuck. At some point\nin the birthing process, SQL was either carved up into subsets\nor perhaps borne of the concatenation of various subsets which may\nor may not have existed within the vernacular of SQL's mischeivous\nmidwives.\n\nThe various subsets are the source of much confusion, apparently \nby design. Although this point is also open to disputation.\n\nThe subsets would seem to be (and these are widely disputed and \nmisrepresented, especially by this writer):\n\nThe QUERY: the basic building block of SQL. The SELECT statement\nis the quintessential QUERY. Except, as was pointed out by Hannu\nKrossing, when the SELECT accesses a TABLE that has rules or triggers\nattached...in this instance (and probably others as well) the SELECT\ncomes dangerously close to DML (a notorious subset covered below).\nHowever, this is probably an inaccurate definition as rules, triggers\nand stored procecures are not actually part of core SQL, if there\nis such a thing. What are they? Procedural Extensions...\n\nMoving along nicely now we come to Data Manipulation Language, another\nsubset of SQL which alleges to provide some functionality whilst defying\ndefinition. DML, as it is known, is composed of INSERT INTO, UPDATE\nand DELETE FROM statements. DML manipulates data unlike SELECT QUERIES\nwhich manipulate data. The difference apparently is that DML can alter\ndata, while SELECT cannot unless the table it is querying has procedural\nextensions somehow glued to it. Or if it is part of an DML statement as\nin an INSERT INTO y SELECT * FROM x; statement or a CREATE TABLE AS SELECT\nstatement. Although there is tremendous overlap here we cling to this\narbitary cutting up in the interests of clarity.\n\nThe next subset would seem to be DDL. Data Definition Language appears to\ngovern the creation and destruction of database objects. It includes\nCREATE TABLE, CREATE SEQUENCE, CREATE INDEX and similar statements. It \nalso includes the destructive counterparts: DROP TABLE and so on.\nUnfortunately DROP ACRONYM is apparently not implemented. As DDL seems\nto be clearly defined by comparison with DML and SELECT queries it is\nperhaps time to introduce some ambiguity.\n\nDCL (Data Control Language) may or may not exist. It may or may not be\na part of DDL, depending on which erudite text one reads. It may also be\ncolloquially termed `data administration language' or something similar.\nPerhaps not. In any case it seems to contain the GRANT and REVOKE \nstatements which offer the illusion of control in a language which is \nobviously out of control.\n\nEOT!\n-------------------------------------\nTom Good, New York City, 12 June 1999\n\n------- North Richmond Community Mental Health Center -------\n\nThomas Good MIS Coordinator\nVital Signs: tomg@ { admin | q8 } .nrnet.org\n Phone: 718-354-5528 \n Fax: 718-354-5056 \n \n/* Member: Computer Professionals For Social Responsibility */ \n\n", "msg_date": "Sat, 12 Jun 1999 08:23:49 -0400 (EDT)", "msg_from": "Thomas Good <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] \"DML\"...CREATE ACRONYM statement" }, { "msg_contents": "I like it! :)\n\nOf course, if I include it in the docs then it would be much more\neffective if I also include the previous 20 mail messages on the\nsubject...\n\n - Thomas\n\n-- \nThomas Lockhart\t\t\t\[email protected]\nSouth Pasadena, California\n", "msg_date": "Sat, 12 Jun 1999 14:45:49 +0000", "msg_from": "Thomas Lockhart <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] \"DML\"...CREATE ACRONYM statement" }, { "msg_contents": "\nOn 12-Jun-99 Hannu Krosing wrote:\n> \n> Maybe next we should try to divide english into sublanguages ?\n> English for Order Giving - EOG\n> English for Describing Things - EDT\n> Englisg for Discussing SQL Sublanguages - EDSQLL\n\nAnd don't forget my favorite:\n\n English for Confusing Things - ECT\n\nnot to be confused with it's counterpart:\n\n English That Confuses - ETC\n\n\nVince.\n-- \n==========================================================================\nVince Vielhaber -- KA8CSH email: [email protected] flame-mail: /dev/null\n # include <std/disclaimers.h> TEAM-OS2\n Online Campground Directory http://www.camping-usa.com\n Online Giftshop Superstore http://www.cloudninegifts.com\n==========================================================================\n\n\n", "msg_date": "Sat, 12 Jun 1999 10:53:23 -0400 (EDT)", "msg_from": "Vince Vielhaber <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] \"DML\"" }, { "msg_contents": "On Sat, 12 Jun 1999, Thomas Lockhart wrote:\n\n> I like it! :)\n> \n> Of course, if I include it in the docs then it would be much more\n> effective if I also include the previous 20 mail messages on the\n> subject...\n> \n> - Thomas\n\nOk...ok! I sheared off abit of the fluff and here it is...provided\nit meets with everyone's approval you're free to do with it what you\nwill.\n\nCheers,\nTom\n\n-------------------------------------------------------------\n\nSQL subsets - QUERIES, DML, DDL and DCL\n---------------------------------------\n\nThe original query language that accompanied IBM's System R database\nwas known as SEQUEL (Structured English QUEry Language). IBM\nlater truncated the name to SQL - pronounced either as sequel \nor as the phonetic eS-Que-eL. SQL has one basic operation and\n3 subsets:\n\nThe QUERY - SQL has one primordial operation known as a QUERY. \n'SELECT' is the verb in the basic SQL query. Queries can become rather\ncomplex and may include elements of DML (Data Manipulation Language).\nQueries may also access tables that contain TRIGGERs and RULEs\nthus invoking more complex database operations. In its most elemental\nform, however, a SELECT statement is a simple QUERY.\n\nDML - the Data Manipulation Languge subset of SQL - is composed of \nINSERT INTO, UPDATE and DELETE FROM statements. DML manipulates data\n(altering it) while SELECT, at least in its purest form, does not.\n\nDDL - the Data Definition Language subset of SQL - is a collection of\nstatements that create and destroy database objects. It includes\nCREATE TABLE, CREATE SEQUENCE, CREATE INDEX and similar statements. It \nalso includes the destructive counterparts: DROP TABLE and so on.\n(Unfortunately DROP ACRONYM is apparently yet to be implemented.)\n\nDCL - the Data Control Language subset of SQL - is that part of the SQL\nlanguage that controls user access. It contains the GRANT and REVOKE \nstatements which govern permissions.\n\n------- North Richmond Community Mental Health Center -------\n\nThomas Good MIS Coordinator\nVital Signs: tomg@ { admin | q8 } .nrnet.org\n Phone: 718-354-5528 \n Fax: 718-354-5056 \n \n/* Member: Computer Professionals For Social Responsibility */ \n\n\n------- North Richmond Community Mental Health Center -------\n\nThomas Good MIS Coordinator\nVital Signs: tomg@ { admin | q8 } .nrnet.org\n Phone: 718-354-5528 \n Fax: 718-354-5056 \n \n/* Member: Computer Professionals For Social Responsibility */ \n\n", "msg_date": "Sat, 12 Jun 1999 14:32:42 -0400 (EDT)", "msg_from": "Thomas Good <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] \"DML\"...CREATE ACRONYM statement" } ]
[ { "msg_contents": "Hi Hackers,\n\nI don't like last minute patches before the final freeze, but I believe that\nthis one could be useful for people experiencing out-of-memory crashes while\nexecuting queries which retrieve or use a very large number of tuples.\n\nThe problem happens when storage is allocated for functions results used in\na large query, for example:\n\n select upper(name) from big_table;\n select big_table.array[1] from big_table;\n select count(upper(name)) from big_table;\n\nThis patch is a dirty hack that fixes the out-of-memory problem for the most\ncommon cases, like the above ones. It is not the final solution for the\nproblem but it can work for some people, so I'm posting it.\n\nThe patch should be safe because all changes are under #ifdef. Furthermore\nthe feature can be enabled or disabled at runtime by the `free_tuple_memory'\noptions in the pg_options file. The option is disabled by default and must\nbe explicitly enabled at runtime to have any effect.\n\nTo enable the patch add the follwing line to Makefile.custom:\n\nCUSTOM_COPT += -DFREE_TUPLE_MEMORY\n\nTo enable the option at runtime add the following line to pg_option:\n\nfree_tuple_memory=1\n\nHere is the patch:\n\n*** src/include/utils/portal.h.orig\tWed May 26 09:07:16 1999\n--- src/include/utils/portal.h\tFri Jun 11 18:06:07 1999\n***************\n*** 80,85 ****\n--- 80,89 ----\n extern PortalVariableMemory PortalGetVariableMemory(Portal portal);\n extern PortalHeapMemory PortalGetHeapMemory(Portal portal);\n \n+ #ifdef FREE_TUPLE_MEMORY\n+ bool PortalHeapMemoryIsValid(MemoryContext context, Pointer pointer);\n+ #endif\n+ \n /* estimate of the maximum number of open portals a user would have,\n * used in initially sizing the PortalHashTable in\tEnablePortalManager()\n */\n*** src/backend/utils/mmgr/portalmem.c.orig\tWed May 26 09:06:02 1999\n--- src/backend/utils/mmgr/portalmem.c\tFri Jun 11 18:06:28 1999\n***************\n*** 289,294 ****\n--- 289,312 ----\n \t}\n }\n \n+ #ifdef FREE_TUPLE_MEMORY\n+ /*\n+ * PortalHeapMemoryIsValid --\n+ *\n+ * Check if a pointer is allocated in a memory context.\n+ *\n+ */\n+ bool\n+ PortalHeapMemoryIsValid(MemoryContext context, Pointer pointer)\n+ {\n+ \tHeapMemoryBlock block = HEAPMEMBLOCK((PortalHeapMemory) context);\n+ \n+ \tAssertState(PointerIsValid(block));\n+ \n+ \treturn (AllocSetContains(&block->setData, pointer));\n+ }\n+ #endif\n+ \n /* ----------------\n *\t\tPortalHeapMemoryRealloc\n * ----------------\n*** src/include/utils/trace.h.orig\tSat Jun 5 09:00:40 1999\n--- src/include/utils/trace.h\tFri Jun 11 19:01:30 1999\n***************\n*** 64,69 ****\n--- 64,72 ----\n \tOPT_SYSLOG,\t\t\t\t\t/* use syslog for error messages */\n \tOPT_HOSTLOOKUP,\t\t\t\t/* enable hostname lookup in ps_status */\n \tOPT_SHOWPORTNUMBER,\t\t\t/* show port number in ps_status */\n+ #ifdef FREE_TUPLE_MEMORY\n+ \tOPT_FREE_TUPLE_MEMORY,\t\t/* try to pfree memory for each tuple */\n+ #endif\n \n \tNUM_PG_OPTIONS\t\t\t\t/* must be the last item of enum */\n };\n***************\n*** 83,91 ****\n #endif\t /* TRACE_H */\n \n /*\n! * Local variables:\n! *\ttab-width: 4\n! *\tc-indent-level: 4\n! *\tc-basic-offset: 4\n * End:\n */\n--- 86,94 ----\n #endif\t /* TRACE_H */\n \n /*\n! * Local Variables:\n! * tab-width: 4\n! * c-indent-level: 4\n! * c-basic-offset: 4\n * End:\n */\n*** src/backend/utils/misc/trace.c.orig\tSat Jun 5 09:00:37 1999\n--- src/backend/utils/misc/trace.c\tFri Jun 11 19:01:31 1999\n***************\n*** 73,78 ****\n--- 73,81 ----\n \t\"syslog\",\t\t\t\t\t/* use syslog for error messages */\n \t\"hostlookup\",\t\t\t\t/* enable hostname lookup in ps_status */\n \t\"showportnumber\",\t\t\t/* show port number in ps_status */\n+ #ifdef FREE_TUPLE_MEMORY\n+ \t\"free_tuple_memory\",\t\t/* try to pfree memory for each tuple */\n+ #endif\n \n \t/* NUM_PG_OPTIONS */\t\t/* must be the last item of enum */\n };\n***************\n*** 404,412 ****\n }\n \n /*\n! * Local variables:\n! *\ttab-width: 4\n! *\tc-indent-level: 4\n! *\tc-basic-offset: 4\n * End:\n */\n--- 407,415 ----\n }\n \n /*\n! * Local Variables:\n! * tab-width: 4\n! * c-indent-level: 4\n! * c-basic-offset: 4\n * End:\n */\n*** src/backend/access/common/heaptuple.c.orig\tWed May 26 09:01:59 1999\n--- src/backend/access/common/heaptuple.c\tFri Jun 11 19:08:33 1999\n***************\n*** 27,32 ****\n--- 27,37 ----\n #include <storage/bufpage.h>\n #include <utils/memutils.h>\n \n+ #ifdef FREE_TUPLE_MEMORY\n+ #include <utils/portal.h>\n+ #include <utils/trace.h>\n+ #endif\n+ \n #ifndef HAVE_MEMMOVE\n #include <regex/utils.h>\n #else\n***************\n*** 93,98 ****\n--- 98,106 ----\n \tint\t\t\ti;\n \tint\t\t\tnumberOfAttributes = tupleDesc->natts;\n \tForm_pg_attribute *att = tupleDesc->attrs;\n+ #ifdef FREE_TUPLE_MEMORY\n+ \tbool\t\tfree_tuple_memory = pg_options[OPT_FREE_TUPLE_MEMORY];\n+ #endif\n \n \tif (bit != NULL)\n \t{\n***************\n*** 131,136 ****\n--- 139,152 ----\n \t\t\t\t*infomask |= HEAP_HASVARLENA;\n \t\t\t\tdata_length = VARSIZE(DatumGetPointer(value[i]));\n \t\t\t\tmemmove(data, DatumGetPointer(value[i]), data_length);\n+ #ifdef FREE_TUPLE_MEMORY\n+ \t\t\t\t/* try to pfree value[i] - dz */\n+ \t\t\t\tif (free_tuple_memory &&\n+ \t\t\t\t\tPortalHeapMemoryIsValid(CurrentMemoryContext,\n+ \t\t\t\t\t\t\t\t\t\t\t(Pointer) value[i])) {\n+ \t\t\t\t\tpfree(value[i]);\n+ \t\t\t\t}\n+ #endif\n \t\t\t\tbreak;\n \t\t\tcase sizeof(char):\n \t\t\t\t*data = att[i]->attbyval ?\n***************\n*** 147,154 ****\n \t\t\t\t\t\t\t\t *((int32 *) value[i]));\n \t\t\t\tbreak;\n \t\t\tdefault:\n! \t\t\t\tmemmove(data, DatumGetPointer(value[i]),\n! \t\t\t\t\t\tatt[i]->attlen);\n \t\t\t\tbreak;\n \t\t}\n \t\tdata = (char *) att_addlength((long) data, att[i]->attlen, value[i]);\n--- 163,177 ----\n \t\t\t\t\t\t\t\t *((int32 *) value[i]));\n \t\t\t\tbreak;\n \t\t\tdefault:\n! \t\t\t\tmemmove(data, DatumGetPointer(value[i]), att[i]->attlen);\n! #ifdef FREE_TUPLE_MEMORY\n! \t\t\t\t/* try to pfree value[i] - dz */\n! \t\t\t\tif (free_tuple_memory &&\n! \t\t\t\t\tPortalHeapMemoryIsValid(CurrentMemoryContext,\n! \t\t\t\t\t\t\t\t\t\t\t(Pointer) value[i])) {\n! \t\t\t\t\tpfree(value[i]);\n! \t\t\t\t}\n! #endif\n \t\t\t\tbreak;\n \t\t}\n \t\tdata = (char *) att_addlength((long) data, att[i]->attlen, value[i]);\n*** src/backend/executor/nodeAgg.c.orig\tWed May 26 09:02:57 1999\n--- src/backend/executor/nodeAgg.c\tFri Jun 11 19:10:27 1999\n***************\n*** 31,36 ****\n--- 31,41 ----\n #include \"utils/syscache.h\"\n #include \"optimizer/clauses.h\"\n \n+ #ifdef FREE_TUPLE_MEMORY\n+ #include <utils/portal.h>\n+ #include <utils/trace.h>\n+ #endif\n+ \n /*\n * AggFuncInfo -\n *\t keeps the transition functions information around\n***************\n*** 113,119 ****\n \t\t\t\tisNull1 = FALSE,\n \t\t\t\tisNull2 = FALSE;\n \tbool\t\tqual_result;\n! \n \n \t/* ---------------------\n \t *\tget state info from node\n--- 118,126 ----\n \t\t\t\tisNull1 = FALSE,\n \t\t\t\tisNull2 = FALSE;\n \tbool\t\tqual_result;\n! #ifdef FREE_TUPLE_MEMORY\n! \tbool\t\tfree_tuple_memory = pg_options[OPT_FREE_TUPLE_MEMORY];\n! #endif\n \n \t/* ---------------------\n \t *\tget state info from node\n***************\n*** 241,246 ****\n--- 248,257 ----\n \t\tfor (;;)\n \t\t{\n \t\t\tTupleTableSlot *outerslot;\n+ #ifdef FREE_TUPLE_MEMORY\n+ \t\t\tOid\t\t\t\tvalueType;\n+ \t\t\tbool\t\t\tisByValue = 0;\n+ #endif\n \n \t\t\tisNull = isNull1 = isNull2 = 0;\n \t\t\touterslot = ExecProcNode(outerPlan, (Plan *) node);\n***************\n*** 293,298 ****\n--- 304,334 ----\n \t\t\t\t\tnewVal = ExecEvalExpr(aggref->target, econtext,\n \t\t\t\t\t\t\t\t\t\t &isNull, &isDone);\n \t\t\t\t}\n+ #ifdef FREE_TUPLE_MEMORY\n+ \t\t\t\tif (free_tuple_memory) {\n+ \t\t\t\t\tswitch (nodeTag(aggref->target)) {\n+ \t\t\t\t\tcase T_Const:\n+ \t\t\t\t\t\tisByValue = ((Const*) (aggref->target))->constbyval;\n+ \t\t\t\t\t\tbreak;\n+ \t\t\t\t\tcase T_Var:\n+ \t\t\t\t\t\tvalueType = ((Var*) (aggref->target))->vartype;\n+ \t\t\t\t\t\tisByValue = typeByVal(typeidType(valueType));\n+ \t\t\t\t\t\tbreak;\n+ \t\t\t\t\tcase T_Array:\n+ \t\t\t\t\t\tisByValue = ((Array*)(aggref->target))->arrayelembyval;\n+ \t\t\t\t\t\tbreak;\n+ \t\t\t\t\tcase T_ArrayRef:\n+ \t\t\t\t\t\tisByValue =((ArrayRef*)(aggref->target))->refelembyval;\n+ \t\t\t\t\t\tbreak;\n+ \t\t\t\t\tcase T_Expr:\n+ \t\t\t\t\t\tvalueType = ((Expr*) (aggref->target))->typeOid;\n+ \t\t\t\t\t\tisByValue = typeByVal(typeidType(valueType));\n+ \t\t\t\t\t\tbreak;\n+ \t\t\t\t\tdefault:\n+ \t\t\t\t\t\tbreak;\n+ \t\t\t\t\t}\n+ \t\t\t\t}\n+ #endif\n \n \t\t\t\tif (isNull && !aggref->usenulls)\n \t\t\t\t\tcontinue;\t/* ignore this tuple for this agg */\n***************\n*** 353,358 ****\n--- 389,404 ----\n \t\t\t\t\t\t\t\t\t\t (FmgrValues *) args, &isNull2);\n \t\t\t\t\tAssert(!isNull2);\n \t\t\t\t}\n+ \n+ #ifdef FREE_TUPLE_MEMORY\n+ \t\t\t\t/* try to pfree newVal if not isByValue - dz */\n+ \t\t\t\tif (free_tuple_memory && !isByValue && \n+ \t\t\t\t\tPortalHeapMemoryIsValid(CurrentMemoryContext,\n+ \t\t\t\t\t\t\t\t\t\t\t(Pointer) newVal))\n+ \t\t\t\t{\n+ \t\t\t\t\tpfree(newVal);\n+ \t\t\t\t}\n+ #endif\n \t\t\t}\n \n \t\t\t/*\n\n\n-- \nMassimo Dal Zotto\n\n+----------------------------------------------------------------------+\n| Massimo Dal Zotto email: [email protected] |\n| Via Marconi, 141 phone: ++39-0461534251 |\n| 38057 Pergine Valsugana (TN) www: http://www.cs.unitn.it/~dz/ |\n| Italy pgp: finger [email protected] |\n+----------------------------------------------------------------------+\n", "msg_date": "Fri, 11 Jun 1999 19:49:19 +0200 (MEST)", "msg_from": "Massimo Dal Zotto <[email protected]>", "msg_from_op": true, "msg_subject": "patch for large queries" }, { "msg_contents": "This looks like a great patch, and it is one of our major issues to fix\nfor 6.6.\n\nCan I hold on to it, and put it in 6.5.1? I am pretty sure we will have\none. I am hesitant to put it in now because Marc would have to put out\na new beta for the change.\n\nIt looks really good.\n\n\n> Hi Hackers,\n> \n> I don't like last minute patches before the final freeze, but I believe that\n> this one could be useful for people experiencing out-of-memory crashes while\n> executing queries which retrieve or use a very large number of tuples.\n> \n> The problem happens when storage is allocated for functions results used in\n> a large query, for example:\n> \n> select upper(name) from big_table;\n> select big_table.array[1] from big_table;\n> select count(upper(name)) from big_table;\n> \n> This patch is a dirty hack that fixes the out-of-memory problem for the most\n> common cases, like the above ones. It is not the final solution for the\n> problem but it can work for some people, so I'm posting it.\n> \n> The patch should be safe because all changes are under #ifdef. Furthermore\n> the feature can be enabled or disabled at runtime by the `free_tuple_memory'\n> options in the pg_options file. The option is disabled by default and must\n> be explicitly enabled at runtime to have any effect.\n> \n> To enable the patch add the follwing line to Makefile.custom:\n> \n> CUSTOM_COPT += -DFREE_TUPLE_MEMORY\n> \n> To enable the option at runtime add the following line to pg_option:\n> \n> free_tuple_memory=1\n> \n> Here is the patch:\n> \n> *** src/include/utils/portal.h.orig\tWed May 26 09:07:16 1999\n> --- src/include/utils/portal.h\tFri Jun 11 18:06:07 1999\n> ***************\n> *** 80,85 ****\n> --- 80,89 ----\n> extern PortalVariableMemory PortalGetVariableMemory(Portal portal);\n> extern PortalHeapMemory PortalGetHeapMemory(Portal portal);\n> \n> + #ifdef FREE_TUPLE_MEMORY\n> + bool PortalHeapMemoryIsValid(MemoryContext context, Pointer pointer);\n> + #endif\n> + \n> /* estimate of the maximum number of open portals a user would have,\n> * used in initially sizing the PortalHashTable in\tEnablePortalManager()\n> */\n> *** src/backend/utils/mmgr/portalmem.c.orig\tWed May 26 09:06:02 1999\n> --- src/backend/utils/mmgr/portalmem.c\tFri Jun 11 18:06:28 1999\n> ***************\n> *** 289,294 ****\n> --- 289,312 ----\n> \t}\n> }\n> \n> + #ifdef FREE_TUPLE_MEMORY\n> + /*\n> + * PortalHeapMemoryIsValid --\n> + *\n> + * Check if a pointer is allocated in a memory context.\n> + *\n> + */\n> + bool\n> + PortalHeapMemoryIsValid(MemoryContext context, Pointer pointer)\n> + {\n> + \tHeapMemoryBlock block = HEAPMEMBLOCK((PortalHeapMemory) context);\n> + \n> + \tAssertState(PointerIsValid(block));\n> + \n> + \treturn (AllocSetContains(&block->setData, pointer));\n> + }\n> + #endif\n> + \n> /* ----------------\n> *\t\tPortalHeapMemoryRealloc\n> * ----------------\n> *** src/include/utils/trace.h.orig\tSat Jun 5 09:00:40 1999\n> --- src/include/utils/trace.h\tFri Jun 11 19:01:30 1999\n> ***************\n> *** 64,69 ****\n> --- 64,72 ----\n> \tOPT_SYSLOG,\t\t\t\t\t/* use syslog for error messages */\n> \tOPT_HOSTLOOKUP,\t\t\t\t/* enable hostname lookup in ps_status */\n> \tOPT_SHOWPORTNUMBER,\t\t\t/* show port number in ps_status */\n> + #ifdef FREE_TUPLE_MEMORY\n> + \tOPT_FREE_TUPLE_MEMORY,\t\t/* try to pfree memory for each tuple */\n> + #endif\n> \n> \tNUM_PG_OPTIONS\t\t\t\t/* must be the last item of enum */\n> };\n> ***************\n> *** 83,91 ****\n> #endif\t /* TRACE_H */\n> \n> /*\n> ! * Local variables:\n> ! *\ttab-width: 4\n> ! *\tc-indent-level: 4\n> ! *\tc-basic-offset: 4\n> * End:\n> */\n> --- 86,94 ----\n> #endif\t /* TRACE_H */\n> \n> /*\n> ! * Local Variables:\n> ! * tab-width: 4\n> ! * c-indent-level: 4\n> ! * c-basic-offset: 4\n> * End:\n> */\n> *** src/backend/utils/misc/trace.c.orig\tSat Jun 5 09:00:37 1999\n> --- src/backend/utils/misc/trace.c\tFri Jun 11 19:01:31 1999\n> ***************\n> *** 73,78 ****\n> --- 73,81 ----\n> \t\"syslog\",\t\t\t\t\t/* use syslog for error messages */\n> \t\"hostlookup\",\t\t\t\t/* enable hostname lookup in ps_status */\n> \t\"showportnumber\",\t\t\t/* show port number in ps_status */\n> + #ifdef FREE_TUPLE_MEMORY\n> + \t\"free_tuple_memory\",\t\t/* try to pfree memory for each tuple */\n> + #endif\n> \n> \t/* NUM_PG_OPTIONS */\t\t/* must be the last item of enum */\n> };\n> ***************\n> *** 404,412 ****\n> }\n> \n> /*\n> ! * Local variables:\n> ! *\ttab-width: 4\n> ! *\tc-indent-level: 4\n> ! *\tc-basic-offset: 4\n> * End:\n> */\n> --- 407,415 ----\n> }\n> \n> /*\n> ! * Local Variables:\n> ! * tab-width: 4\n> ! * c-indent-level: 4\n> ! * c-basic-offset: 4\n> * End:\n> */\n> *** src/backend/access/common/heaptuple.c.orig\tWed May 26 09:01:59 1999\n> --- src/backend/access/common/heaptuple.c\tFri Jun 11 19:08:33 1999\n> ***************\n> *** 27,32 ****\n> --- 27,37 ----\n> #include <storage/bufpage.h>\n> #include <utils/memutils.h>\n> \n> + #ifdef FREE_TUPLE_MEMORY\n> + #include <utils/portal.h>\n> + #include <utils/trace.h>\n> + #endif\n> + \n> #ifndef HAVE_MEMMOVE\n> #include <regex/utils.h>\n> #else\n> ***************\n> *** 93,98 ****\n> --- 98,106 ----\n> \tint\t\t\ti;\n> \tint\t\t\tnumberOfAttributes = tupleDesc->natts;\n> \tForm_pg_attribute *att = tupleDesc->attrs;\n> + #ifdef FREE_TUPLE_MEMORY\n> + \tbool\t\tfree_tuple_memory = pg_options[OPT_FREE_TUPLE_MEMORY];\n> + #endif\n> \n> \tif (bit != NULL)\n> \t{\n> ***************\n> *** 131,136 ****\n> --- 139,152 ----\n> \t\t\t\t*infomask |= HEAP_HASVARLENA;\n> \t\t\t\tdata_length = VARSIZE(DatumGetPointer(value[i]));\n> \t\t\t\tmemmove(data, DatumGetPointer(value[i]), data_length);\n> + #ifdef FREE_TUPLE_MEMORY\n> + \t\t\t\t/* try to pfree value[i] - dz */\n> + \t\t\t\tif (free_tuple_memory &&\n> + \t\t\t\t\tPortalHeapMemoryIsValid(CurrentMemoryContext,\n> + \t\t\t\t\t\t\t\t\t\t\t(Pointer) value[i])) {\n> + \t\t\t\t\tpfree(value[i]);\n> + \t\t\t\t}\n> + #endif\n> \t\t\t\tbreak;\n> \t\t\tcase sizeof(char):\n> \t\t\t\t*data = att[i]->attbyval ?\n> ***************\n> *** 147,154 ****\n> \t\t\t\t\t\t\t\t *((int32 *) value[i]));\n> \t\t\t\tbreak;\n> \t\t\tdefault:\n> ! \t\t\t\tmemmove(data, DatumGetPointer(value[i]),\n> ! \t\t\t\t\t\tatt[i]->attlen);\n> \t\t\t\tbreak;\n> \t\t}\n> \t\tdata = (char *) att_addlength((long) data, att[i]->attlen, value[i]);\n> --- 163,177 ----\n> \t\t\t\t\t\t\t\t *((int32 *) value[i]));\n> \t\t\t\tbreak;\n> \t\t\tdefault:\n> ! \t\t\t\tmemmove(data, DatumGetPointer(value[i]), att[i]->attlen);\n> ! #ifdef FREE_TUPLE_MEMORY\n> ! \t\t\t\t/* try to pfree value[i] - dz */\n> ! \t\t\t\tif (free_tuple_memory &&\n> ! \t\t\t\t\tPortalHeapMemoryIsValid(CurrentMemoryContext,\n> ! \t\t\t\t\t\t\t\t\t\t\t(Pointer) value[i])) {\n> ! \t\t\t\t\tpfree(value[i]);\n> ! \t\t\t\t}\n> ! #endif\n> \t\t\t\tbreak;\n> \t\t}\n> \t\tdata = (char *) att_addlength((long) data, att[i]->attlen, value[i]);\n> *** src/backend/executor/nodeAgg.c.orig\tWed May 26 09:02:57 1999\n> --- src/backend/executor/nodeAgg.c\tFri Jun 11 19:10:27 1999\n> ***************\n> *** 31,36 ****\n> --- 31,41 ----\n> #include \"utils/syscache.h\"\n> #include \"optimizer/clauses.h\"\n> \n> + #ifdef FREE_TUPLE_MEMORY\n> + #include <utils/portal.h>\n> + #include <utils/trace.h>\n> + #endif\n> + \n> /*\n> * AggFuncInfo -\n> *\t keeps the transition functions information around\n> ***************\n> *** 113,119 ****\n> \t\t\t\tisNull1 = FALSE,\n> \t\t\t\tisNull2 = FALSE;\n> \tbool\t\tqual_result;\n> ! \n> \n> \t/* ---------------------\n> \t *\tget state info from node\n> --- 118,126 ----\n> \t\t\t\tisNull1 = FALSE,\n> \t\t\t\tisNull2 = FALSE;\n> \tbool\t\tqual_result;\n> ! #ifdef FREE_TUPLE_MEMORY\n> ! \tbool\t\tfree_tuple_memory = pg_options[OPT_FREE_TUPLE_MEMORY];\n> ! #endif\n> \n> \t/* ---------------------\n> \t *\tget state info from node\n> ***************\n> *** 241,246 ****\n> --- 248,257 ----\n> \t\tfor (;;)\n> \t\t{\n> \t\t\tTupleTableSlot *outerslot;\n> + #ifdef FREE_TUPLE_MEMORY\n> + \t\t\tOid\t\t\t\tvalueType;\n> + \t\t\tbool\t\t\tisByValue = 0;\n> + #endif\n> \n> \t\t\tisNull = isNull1 = isNull2 = 0;\n> \t\t\touterslot = ExecProcNode(outerPlan, (Plan *) node);\n> ***************\n> *** 293,298 ****\n> --- 304,334 ----\n> \t\t\t\t\tnewVal = ExecEvalExpr(aggref->target, econtext,\n> \t\t\t\t\t\t\t\t\t\t &isNull, &isDone);\n> \t\t\t\t}\n> + #ifdef FREE_TUPLE_MEMORY\n> + \t\t\t\tif (free_tuple_memory) {\n> + \t\t\t\t\tswitch (nodeTag(aggref->target)) {\n> + \t\t\t\t\tcase T_Const:\n> + \t\t\t\t\t\tisByValue = ((Const*) (aggref->target))->constbyval;\n> + \t\t\t\t\t\tbreak;\n> + \t\t\t\t\tcase T_Var:\n> + \t\t\t\t\t\tvalueType = ((Var*) (aggref->target))->vartype;\n> + \t\t\t\t\t\tisByValue = typeByVal(typeidType(valueType));\n> + \t\t\t\t\t\tbreak;\n> + \t\t\t\t\tcase T_Array:\n> + \t\t\t\t\t\tisByValue = ((Array*)(aggref->target))->arrayelembyval;\n> + \t\t\t\t\t\tbreak;\n> + \t\t\t\t\tcase T_ArrayRef:\n> + \t\t\t\t\t\tisByValue =((ArrayRef*)(aggref->target))->refelembyval;\n> + \t\t\t\t\t\tbreak;\n> + \t\t\t\t\tcase T_Expr:\n> + \t\t\t\t\t\tvalueType = ((Expr*) (aggref->target))->typeOid;\n> + \t\t\t\t\t\tisByValue = typeByVal(typeidType(valueType));\n> + \t\t\t\t\t\tbreak;\n> + \t\t\t\t\tdefault:\n> + \t\t\t\t\t\tbreak;\n> + \t\t\t\t\t}\n> + \t\t\t\t}\n> + #endif\n> \n> \t\t\t\tif (isNull && !aggref->usenulls)\n> \t\t\t\t\tcontinue;\t/* ignore this tuple for this agg */\n> ***************\n> *** 353,358 ****\n> --- 389,404 ----\n> \t\t\t\t\t\t\t\t\t\t (FmgrValues *) args, &isNull2);\n> \t\t\t\t\tAssert(!isNull2);\n> \t\t\t\t}\n> + \n> + #ifdef FREE_TUPLE_MEMORY\n> + \t\t\t\t/* try to pfree newVal if not isByValue - dz */\n> + \t\t\t\tif (free_tuple_memory && !isByValue && \n> + \t\t\t\t\tPortalHeapMemoryIsValid(CurrentMemoryContext,\n> + \t\t\t\t\t\t\t\t\t\t\t(Pointer) newVal))\n> + \t\t\t\t{\n> + \t\t\t\t\tpfree(newVal);\n> + \t\t\t\t}\n> + #endif\n> \t\t\t}\n> \n> \t\t\t/*\n> \n> \n> -- \n> Massimo Dal Zotto\n> \n> +----------------------------------------------------------------------+\n> | Massimo Dal Zotto email: [email protected] |\n> | Via Marconi, 141 phone: ++39-0461534251 |\n> | 38057 Pergine Valsugana (TN) www: http://www.cs.unitn.it/~dz/ |\n> | Italy pgp: finger [email protected] |\n> +----------------------------------------------------------------------+\n> \n\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n \n", "msg_date": "Sat, 12 Jun 1999 00:18:16 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": false, "msg_subject": "Re: patch for large queries" }, { "msg_contents": "Applied.\n\n\n> Hi Hackers,\n> \n> I don't like last minute patches before the final freeze, but I believe that\n> this one could be useful for people experiencing out-of-memory crashes while\n> executing queries which retrieve or use a very large number of tuples.\n> \n> The problem happens when storage is allocated for functions results used in\n> a large query, for example:\n> \n> select upper(name) from big_table;\n> select big_table.array[1] from big_table;\n> select count(upper(name)) from big_table;\n> \n> This patch is a dirty hack that fixes the out-of-memory problem for the most\n> common cases, like the above ones. It is not the final solution for the\n> problem but it can work for some people, so I'm posting it.\n> \n> The patch should be safe because all changes are under #ifdef. Furthermore\n> the feature can be enabled or disabled at runtime by the `free_tuple_memory'\n> options in the pg_options file. The option is disabled by default and must\n> be explicitly enabled at runtime to have any effect.\n> \n> To enable the patch add the follwing line to Makefile.custom:\n> \n> CUSTOM_COPT += -DFREE_TUPLE_MEMORY\n> \n> To enable the option at runtime add the following line to pg_option:\n> \n> free_tuple_memory=1\n> \n> Here is the patch:\n> \n> *** src/include/utils/portal.h.orig\tWed May 26 09:07:16 1999\n> --- src/include/utils/portal.h\tFri Jun 11 18:06:07 1999\n> ***************\n> *** 80,85 ****\n> --- 80,89 ----\n> extern PortalVariableMemory PortalGetVariableMemory(Portal portal);\n> extern PortalHeapMemory PortalGetHeapMemory(Portal portal);\n> \n> + #ifdef FREE_TUPLE_MEMORY\n> + bool PortalHeapMemoryIsValid(MemoryContext context, Pointer pointer);\n> + #endif\n> + \n> /* estimate of the maximum number of open portals a user would have,\n> * used in initially sizing the PortalHashTable in\tEnablePortalManager()\n> */\n> *** src/backend/utils/mmgr/portalmem.c.orig\tWed May 26 09:06:02 1999\n> --- src/backend/utils/mmgr/portalmem.c\tFri Jun 11 18:06:28 1999\n> ***************\n> *** 289,294 ****\n> --- 289,312 ----\n> \t}\n> }\n> \n> + #ifdef FREE_TUPLE_MEMORY\n> + /*\n> + * PortalHeapMemoryIsValid --\n> + *\n> + * Check if a pointer is allocated in a memory context.\n> + *\n> + */\n> + bool\n> + PortalHeapMemoryIsValid(MemoryContext context, Pointer pointer)\n> + {\n> + \tHeapMemoryBlock block = HEAPMEMBLOCK((PortalHeapMemory) context);\n> + \n> + \tAssertState(PointerIsValid(block));\n> + \n> + \treturn (AllocSetContains(&block->setData, pointer));\n> + }\n> + #endif\n> + \n> /* ----------------\n> *\t\tPortalHeapMemoryRealloc\n> * ----------------\n> *** src/include/utils/trace.h.orig\tSat Jun 5 09:00:40 1999\n> --- src/include/utils/trace.h\tFri Jun 11 19:01:30 1999\n> ***************\n> *** 64,69 ****\n> --- 64,72 ----\n> \tOPT_SYSLOG,\t\t\t\t\t/* use syslog for error messages */\n> \tOPT_HOSTLOOKUP,\t\t\t\t/* enable hostname lookup in ps_status */\n> \tOPT_SHOWPORTNUMBER,\t\t\t/* show port number in ps_status */\n> + #ifdef FREE_TUPLE_MEMORY\n> + \tOPT_FREE_TUPLE_MEMORY,\t\t/* try to pfree memory for each tuple */\n> + #endif\n> \n> \tNUM_PG_OPTIONS\t\t\t\t/* must be the last item of enum */\n> };\n> ***************\n> *** 83,91 ****\n> #endif\t /* TRACE_H */\n> \n> /*\n> ! * Local variables:\n> ! *\ttab-width: 4\n> ! *\tc-indent-level: 4\n> ! *\tc-basic-offset: 4\n> * End:\n> */\n> --- 86,94 ----\n> #endif\t /* TRACE_H */\n> \n> /*\n> ! * Local Variables:\n> ! * tab-width: 4\n> ! * c-indent-level: 4\n> ! * c-basic-offset: 4\n> * End:\n> */\n> *** src/backend/utils/misc/trace.c.orig\tSat Jun 5 09:00:37 1999\n> --- src/backend/utils/misc/trace.c\tFri Jun 11 19:01:31 1999\n> ***************\n> *** 73,78 ****\n> --- 73,81 ----\n> \t\"syslog\",\t\t\t\t\t/* use syslog for error messages */\n> \t\"hostlookup\",\t\t\t\t/* enable hostname lookup in ps_status */\n> \t\"showportnumber\",\t\t\t/* show port number in ps_status */\n> + #ifdef FREE_TUPLE_MEMORY\n> + \t\"free_tuple_memory\",\t\t/* try to pfree memory for each tuple */\n> + #endif\n> \n> \t/* NUM_PG_OPTIONS */\t\t/* must be the last item of enum */\n> };\n> ***************\n> *** 404,412 ****\n> }\n> \n> /*\n> ! * Local variables:\n> ! *\ttab-width: 4\n> ! *\tc-indent-level: 4\n> ! *\tc-basic-offset: 4\n> * End:\n> */\n> --- 407,415 ----\n> }\n> \n> /*\n> ! * Local Variables:\n> ! * tab-width: 4\n> ! * c-indent-level: 4\n> ! * c-basic-offset: 4\n> * End:\n> */\n> *** src/backend/access/common/heaptuple.c.orig\tWed May 26 09:01:59 1999\n> --- src/backend/access/common/heaptuple.c\tFri Jun 11 19:08:33 1999\n> ***************\n> *** 27,32 ****\n> --- 27,37 ----\n> #include <storage/bufpage.h>\n> #include <utils/memutils.h>\n> \n> + #ifdef FREE_TUPLE_MEMORY\n> + #include <utils/portal.h>\n> + #include <utils/trace.h>\n> + #endif\n> + \n> #ifndef HAVE_MEMMOVE\n> #include <regex/utils.h>\n> #else\n> ***************\n> *** 93,98 ****\n> --- 98,106 ----\n> \tint\t\t\ti;\n> \tint\t\t\tnumberOfAttributes = tupleDesc->natts;\n> \tForm_pg_attribute *att = tupleDesc->attrs;\n> + #ifdef FREE_TUPLE_MEMORY\n> + \tbool\t\tfree_tuple_memory = pg_options[OPT_FREE_TUPLE_MEMORY];\n> + #endif\n> \n> \tif (bit != NULL)\n> \t{\n> ***************\n> *** 131,136 ****\n> --- 139,152 ----\n> \t\t\t\t*infomask |= HEAP_HASVARLENA;\n> \t\t\t\tdata_length = VARSIZE(DatumGetPointer(value[i]));\n> \t\t\t\tmemmove(data, DatumGetPointer(value[i]), data_length);\n> + #ifdef FREE_TUPLE_MEMORY\n> + \t\t\t\t/* try to pfree value[i] - dz */\n> + \t\t\t\tif (free_tuple_memory &&\n> + \t\t\t\t\tPortalHeapMemoryIsValid(CurrentMemoryContext,\n> + \t\t\t\t\t\t\t\t\t\t\t(Pointer) value[i])) {\n> + \t\t\t\t\tpfree(value[i]);\n> + \t\t\t\t}\n> + #endif\n> \t\t\t\tbreak;\n> \t\t\tcase sizeof(char):\n> \t\t\t\t*data = att[i]->attbyval ?\n> ***************\n> *** 147,154 ****\n> \t\t\t\t\t\t\t\t *((int32 *) value[i]));\n> \t\t\t\tbreak;\n> \t\t\tdefault:\n> ! \t\t\t\tmemmove(data, DatumGetPointer(value[i]),\n> ! \t\t\t\t\t\tatt[i]->attlen);\n> \t\t\t\tbreak;\n> \t\t}\n> \t\tdata = (char *) att_addlength((long) data, att[i]->attlen, value[i]);\n> --- 163,177 ----\n> \t\t\t\t\t\t\t\t *((int32 *) value[i]));\n> \t\t\t\tbreak;\n> \t\t\tdefault:\n> ! \t\t\t\tmemmove(data, DatumGetPointer(value[i]), att[i]->attlen);\n> ! #ifdef FREE_TUPLE_MEMORY\n> ! \t\t\t\t/* try to pfree value[i] - dz */\n> ! \t\t\t\tif (free_tuple_memory &&\n> ! \t\t\t\t\tPortalHeapMemoryIsValid(CurrentMemoryContext,\n> ! \t\t\t\t\t\t\t\t\t\t\t(Pointer) value[i])) {\n> ! \t\t\t\t\tpfree(value[i]);\n> ! \t\t\t\t}\n> ! #endif\n> \t\t\t\tbreak;\n> \t\t}\n> \t\tdata = (char *) att_addlength((long) data, att[i]->attlen, value[i]);\n> *** src/backend/executor/nodeAgg.c.orig\tWed May 26 09:02:57 1999\n> --- src/backend/executor/nodeAgg.c\tFri Jun 11 19:10:27 1999\n> ***************\n> *** 31,36 ****\n> --- 31,41 ----\n> #include \"utils/syscache.h\"\n> #include \"optimizer/clauses.h\"\n> \n> + #ifdef FREE_TUPLE_MEMORY\n> + #include <utils/portal.h>\n> + #include <utils/trace.h>\n> + #endif\n> + \n> /*\n> * AggFuncInfo -\n> *\t keeps the transition functions information around\n> ***************\n> *** 113,119 ****\n> \t\t\t\tisNull1 = FALSE,\n> \t\t\t\tisNull2 = FALSE;\n> \tbool\t\tqual_result;\n> ! \n> \n> \t/* ---------------------\n> \t *\tget state info from node\n> --- 118,126 ----\n> \t\t\t\tisNull1 = FALSE,\n> \t\t\t\tisNull2 = FALSE;\n> \tbool\t\tqual_result;\n> ! #ifdef FREE_TUPLE_MEMORY\n> ! \tbool\t\tfree_tuple_memory = pg_options[OPT_FREE_TUPLE_MEMORY];\n> ! #endif\n> \n> \t/* ---------------------\n> \t *\tget state info from node\n> ***************\n> *** 241,246 ****\n> --- 248,257 ----\n> \t\tfor (;;)\n> \t\t{\n> \t\t\tTupleTableSlot *outerslot;\n> + #ifdef FREE_TUPLE_MEMORY\n> + \t\t\tOid\t\t\t\tvalueType;\n> + \t\t\tbool\t\t\tisByValue = 0;\n> + #endif\n> \n> \t\t\tisNull = isNull1 = isNull2 = 0;\n> \t\t\touterslot = ExecProcNode(outerPlan, (Plan *) node);\n> ***************\n> *** 293,298 ****\n> --- 304,334 ----\n> \t\t\t\t\tnewVal = ExecEvalExpr(aggref->target, econtext,\n> \t\t\t\t\t\t\t\t\t\t &isNull, &isDone);\n> \t\t\t\t}\n> + #ifdef FREE_TUPLE_MEMORY\n> + \t\t\t\tif (free_tuple_memory) {\n> + \t\t\t\t\tswitch (nodeTag(aggref->target)) {\n> + \t\t\t\t\tcase T_Const:\n> + \t\t\t\t\t\tisByValue = ((Const*) (aggref->target))->constbyval;\n> + \t\t\t\t\t\tbreak;\n> + \t\t\t\t\tcase T_Var:\n> + \t\t\t\t\t\tvalueType = ((Var*) (aggref->target))->vartype;\n> + \t\t\t\t\t\tisByValue = typeByVal(typeidType(valueType));\n> + \t\t\t\t\t\tbreak;\n> + \t\t\t\t\tcase T_Array:\n> + \t\t\t\t\t\tisByValue = ((Array*)(aggref->target))->arrayelembyval;\n> + \t\t\t\t\t\tbreak;\n> + \t\t\t\t\tcase T_ArrayRef:\n> + \t\t\t\t\t\tisByValue =((ArrayRef*)(aggref->target))->refelembyval;\n> + \t\t\t\t\t\tbreak;\n> + \t\t\t\t\tcase T_Expr:\n> + \t\t\t\t\t\tvalueType = ((Expr*) (aggref->target))->typeOid;\n> + \t\t\t\t\t\tisByValue = typeByVal(typeidType(valueType));\n> + \t\t\t\t\t\tbreak;\n> + \t\t\t\t\tdefault:\n> + \t\t\t\t\t\tbreak;\n> + \t\t\t\t\t}\n> + \t\t\t\t}\n> + #endif\n> \n> \t\t\t\tif (isNull && !aggref->usenulls)\n> \t\t\t\t\tcontinue;\t/* ignore this tuple for this agg */\n> ***************\n> *** 353,358 ****\n> --- 389,404 ----\n> \t\t\t\t\t\t\t\t\t\t (FmgrValues *) args, &isNull2);\n> \t\t\t\t\tAssert(!isNull2);\n> \t\t\t\t}\n> + \n> + #ifdef FREE_TUPLE_MEMORY\n> + \t\t\t\t/* try to pfree newVal if not isByValue - dz */\n> + \t\t\t\tif (free_tuple_memory && !isByValue && \n> + \t\t\t\t\tPortalHeapMemoryIsValid(CurrentMemoryContext,\n> + \t\t\t\t\t\t\t\t\t\t\t(Pointer) newVal))\n> + \t\t\t\t{\n> + \t\t\t\t\tpfree(newVal);\n> + \t\t\t\t}\n> + #endif\n> \t\t\t}\n> \n> \t\t\t/*\n> \n> \n> -- \n> Massimo Dal Zotto\n> \n> +----------------------------------------------------------------------+\n> | Massimo Dal Zotto email: [email protected] |\n> | Via Marconi, 141 phone: ++39-0461534251 |\n> | 38057 Pergine Valsugana (TN) www: http://www.cs.unitn.it/~dz/ |\n> | Italy pgp: finger [email protected] |\n> +----------------------------------------------------------------------+\n> \n\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Sat, 12 Jun 1999 10:04:31 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": false, "msg_subject": "Re: patch for large queries" }, { "msg_contents": "Sorry, someone objected, and I have to reverse out the patch. I will\nkeep it for later use when we address the issue.\n\n> Hi Hackers,\n> \n> I don't like last minute patches before the final freeze, but I believe that\n> this one could be useful for people experiencing out-of-memory crashes while\n> executing queries which retrieve or use a very large number of tuples.\n> \n> The problem happens when storage is allocated for functions results used in\n> a large query, for example:\n> \n> select upper(name) from big_table;\n> select big_table.array[1] from big_table;\n> select count(upper(name)) from big_table;\n> \n> This patch is a dirty hack that fixes the out-of-memory problem for the most\n> common cases, like the above ones. It is not the final solution for the\n> problem but it can work for some people, so I'm posting it.\n> \n> The patch should be safe because all changes are under #ifdef. Furthermore\n> the feature can be enabled or disabled at runtime by the `free_tuple_memory'\n> options in the pg_options file. The option is disabled by default and must\n> be explicitly enabled at runtime to have any effect.\n> \n> To enable the patch add the follwing line to Makefile.custom:\n> \n> CUSTOM_COPT += -DFREE_TUPLE_MEMORY\n> \n> To enable the option at runtime add the following line to pg_option:\n> \n> free_tuple_memory=1\n> \n> Here is the patch:\n> \n> *** src/include/utils/portal.h.orig\tWed May 26 09:07:16 1999\n> --- src/include/utils/portal.h\tFri Jun 11 18:06:07 1999\n> ***************\n> *** 80,85 ****\n> --- 80,89 ----\n> extern PortalVariableMemory PortalGetVariableMemory(Portal portal);\n> extern PortalHeapMemory PortalGetHeapMemory(Portal portal);\n> \n> + #ifdef FREE_TUPLE_MEMORY\n> + bool PortalHeapMemoryIsValid(MemoryContext context, Pointer pointer);\n> + #endif\n> + \n> /* estimate of the maximum number of open portals a user would have,\n> * used in initially sizing the PortalHashTable in\tEnablePortalManager()\n> */\n> *** src/backend/utils/mmgr/portalmem.c.orig\tWed May 26 09:06:02 1999\n> --- src/backend/utils/mmgr/portalmem.c\tFri Jun 11 18:06:28 1999\n> ***************\n> *** 289,294 ****\n> --- 289,312 ----\n> \t}\n> }\n> \n> + #ifdef FREE_TUPLE_MEMORY\n> + /*\n> + * PortalHeapMemoryIsValid --\n> + *\n> + * Check if a pointer is allocated in a memory context.\n> + *\n> + */\n> + bool\n> + PortalHeapMemoryIsValid(MemoryContext context, Pointer pointer)\n> + {\n> + \tHeapMemoryBlock block = HEAPMEMBLOCK((PortalHeapMemory) context);\n> + \n> + \tAssertState(PointerIsValid(block));\n> + \n> + \treturn (AllocSetContains(&block->setData, pointer));\n> + }\n> + #endif\n> + \n> /* ----------------\n> *\t\tPortalHeapMemoryRealloc\n> * ----------------\n> *** src/include/utils/trace.h.orig\tSat Jun 5 09:00:40 1999\n> --- src/include/utils/trace.h\tFri Jun 11 19:01:30 1999\n> ***************\n> *** 64,69 ****\n> --- 64,72 ----\n> \tOPT_SYSLOG,\t\t\t\t\t/* use syslog for error messages */\n> \tOPT_HOSTLOOKUP,\t\t\t\t/* enable hostname lookup in ps_status */\n> \tOPT_SHOWPORTNUMBER,\t\t\t/* show port number in ps_status */\n> + #ifdef FREE_TUPLE_MEMORY\n> + \tOPT_FREE_TUPLE_MEMORY,\t\t/* try to pfree memory for each tuple */\n> + #endif\n> \n> \tNUM_PG_OPTIONS\t\t\t\t/* must be the last item of enum */\n> };\n> ***************\n> *** 83,91 ****\n> #endif\t /* TRACE_H */\n> \n> /*\n> ! * Local variables:\n> ! *\ttab-width: 4\n> ! *\tc-indent-level: 4\n> ! *\tc-basic-offset: 4\n> * End:\n> */\n> --- 86,94 ----\n> #endif\t /* TRACE_H */\n> \n> /*\n> ! * Local Variables:\n> ! * tab-width: 4\n> ! * c-indent-level: 4\n> ! * c-basic-offset: 4\n> * End:\n> */\n> *** src/backend/utils/misc/trace.c.orig\tSat Jun 5 09:00:37 1999\n> --- src/backend/utils/misc/trace.c\tFri Jun 11 19:01:31 1999\n> ***************\n> *** 73,78 ****\n> --- 73,81 ----\n> \t\"syslog\",\t\t\t\t\t/* use syslog for error messages */\n> \t\"hostlookup\",\t\t\t\t/* enable hostname lookup in ps_status */\n> \t\"showportnumber\",\t\t\t/* show port number in ps_status */\n> + #ifdef FREE_TUPLE_MEMORY\n> + \t\"free_tuple_memory\",\t\t/* try to pfree memory for each tuple */\n> + #endif\n> \n> \t/* NUM_PG_OPTIONS */\t\t/* must be the last item of enum */\n> };\n> ***************\n> *** 404,412 ****\n> }\n> \n> /*\n> ! * Local variables:\n> ! *\ttab-width: 4\n> ! *\tc-indent-level: 4\n> ! *\tc-basic-offset: 4\n> * End:\n> */\n> --- 407,415 ----\n> }\n> \n> /*\n> ! * Local Variables:\n> ! * tab-width: 4\n> ! * c-indent-level: 4\n> ! * c-basic-offset: 4\n> * End:\n> */\n> *** src/backend/access/common/heaptuple.c.orig\tWed May 26 09:01:59 1999\n> --- src/backend/access/common/heaptuple.c\tFri Jun 11 19:08:33 1999\n> ***************\n> *** 27,32 ****\n> --- 27,37 ----\n> #include <storage/bufpage.h>\n> #include <utils/memutils.h>\n> \n> + #ifdef FREE_TUPLE_MEMORY\n> + #include <utils/portal.h>\n> + #include <utils/trace.h>\n> + #endif\n> + \n> #ifndef HAVE_MEMMOVE\n> #include <regex/utils.h>\n> #else\n> ***************\n> *** 93,98 ****\n> --- 98,106 ----\n> \tint\t\t\ti;\n> \tint\t\t\tnumberOfAttributes = tupleDesc->natts;\n> \tForm_pg_attribute *att = tupleDesc->attrs;\n> + #ifdef FREE_TUPLE_MEMORY\n> + \tbool\t\tfree_tuple_memory = pg_options[OPT_FREE_TUPLE_MEMORY];\n> + #endif\n> \n> \tif (bit != NULL)\n> \t{\n> ***************\n> *** 131,136 ****\n> --- 139,152 ----\n> \t\t\t\t*infomask |= HEAP_HASVARLENA;\n> \t\t\t\tdata_length = VARSIZE(DatumGetPointer(value[i]));\n> \t\t\t\tmemmove(data, DatumGetPointer(value[i]), data_length);\n> + #ifdef FREE_TUPLE_MEMORY\n> + \t\t\t\t/* try to pfree value[i] - dz */\n> + \t\t\t\tif (free_tuple_memory &&\n> + \t\t\t\t\tPortalHeapMemoryIsValid(CurrentMemoryContext,\n> + \t\t\t\t\t\t\t\t\t\t\t(Pointer) value[i])) {\n> + \t\t\t\t\tpfree(value[i]);\n> + \t\t\t\t}\n> + #endif\n> \t\t\t\tbreak;\n> \t\t\tcase sizeof(char):\n> \t\t\t\t*data = att[i]->attbyval ?\n> ***************\n> *** 147,154 ****\n> \t\t\t\t\t\t\t\t *((int32 *) value[i]));\n> \t\t\t\tbreak;\n> \t\t\tdefault:\n> ! \t\t\t\tmemmove(data, DatumGetPointer(value[i]),\n> ! \t\t\t\t\t\tatt[i]->attlen);\n> \t\t\t\tbreak;\n> \t\t}\n> \t\tdata = (char *) att_addlength((long) data, att[i]->attlen, value[i]);\n> --- 163,177 ----\n> \t\t\t\t\t\t\t\t *((int32 *) value[i]));\n> \t\t\t\tbreak;\n> \t\t\tdefault:\n> ! \t\t\t\tmemmove(data, DatumGetPointer(value[i]), att[i]->attlen);\n> ! #ifdef FREE_TUPLE_MEMORY\n> ! \t\t\t\t/* try to pfree value[i] - dz */\n> ! \t\t\t\tif (free_tuple_memory &&\n> ! \t\t\t\t\tPortalHeapMemoryIsValid(CurrentMemoryContext,\n> ! \t\t\t\t\t\t\t\t\t\t\t(Pointer) value[i])) {\n> ! \t\t\t\t\tpfree(value[i]);\n> ! \t\t\t\t}\n> ! #endif\n> \t\t\t\tbreak;\n> \t\t}\n> \t\tdata = (char *) att_addlength((long) data, att[i]->attlen, value[i]);\n> *** src/backend/executor/nodeAgg.c.orig\tWed May 26 09:02:57 1999\n> --- src/backend/executor/nodeAgg.c\tFri Jun 11 19:10:27 1999\n> ***************\n> *** 31,36 ****\n> --- 31,41 ----\n> #include \"utils/syscache.h\"\n> #include \"optimizer/clauses.h\"\n> \n> + #ifdef FREE_TUPLE_MEMORY\n> + #include <utils/portal.h>\n> + #include <utils/trace.h>\n> + #endif\n> + \n> /*\n> * AggFuncInfo -\n> *\t keeps the transition functions information around\n> ***************\n> *** 113,119 ****\n> \t\t\t\tisNull1 = FALSE,\n> \t\t\t\tisNull2 = FALSE;\n> \tbool\t\tqual_result;\n> ! \n> \n> \t/* ---------------------\n> \t *\tget state info from node\n> --- 118,126 ----\n> \t\t\t\tisNull1 = FALSE,\n> \t\t\t\tisNull2 = FALSE;\n> \tbool\t\tqual_result;\n> ! #ifdef FREE_TUPLE_MEMORY\n> ! \tbool\t\tfree_tuple_memory = pg_options[OPT_FREE_TUPLE_MEMORY];\n> ! #endif\n> \n> \t/* ---------------------\n> \t *\tget state info from node\n> ***************\n> *** 241,246 ****\n> --- 248,257 ----\n> \t\tfor (;;)\n> \t\t{\n> \t\t\tTupleTableSlot *outerslot;\n> + #ifdef FREE_TUPLE_MEMORY\n> + \t\t\tOid\t\t\t\tvalueType;\n> + \t\t\tbool\t\t\tisByValue = 0;\n> + #endif\n> \n> \t\t\tisNull = isNull1 = isNull2 = 0;\n> \t\t\touterslot = ExecProcNode(outerPlan, (Plan *) node);\n> ***************\n> *** 293,298 ****\n> --- 304,334 ----\n> \t\t\t\t\tnewVal = ExecEvalExpr(aggref->target, econtext,\n> \t\t\t\t\t\t\t\t\t\t &isNull, &isDone);\n> \t\t\t\t}\n> + #ifdef FREE_TUPLE_MEMORY\n> + \t\t\t\tif (free_tuple_memory) {\n> + \t\t\t\t\tswitch (nodeTag(aggref->target)) {\n> + \t\t\t\t\tcase T_Const:\n> + \t\t\t\t\t\tisByValue = ((Const*) (aggref->target))->constbyval;\n> + \t\t\t\t\t\tbreak;\n> + \t\t\t\t\tcase T_Var:\n> + \t\t\t\t\t\tvalueType = ((Var*) (aggref->target))->vartype;\n> + \t\t\t\t\t\tisByValue = typeByVal(typeidType(valueType));\n> + \t\t\t\t\t\tbreak;\n> + \t\t\t\t\tcase T_Array:\n> + \t\t\t\t\t\tisByValue = ((Array*)(aggref->target))->arrayelembyval;\n> + \t\t\t\t\t\tbreak;\n> + \t\t\t\t\tcase T_ArrayRef:\n> + \t\t\t\t\t\tisByValue =((ArrayRef*)(aggref->target))->refelembyval;\n> + \t\t\t\t\t\tbreak;\n> + \t\t\t\t\tcase T_Expr:\n> + \t\t\t\t\t\tvalueType = ((Expr*) (aggref->target))->typeOid;\n> + \t\t\t\t\t\tisByValue = typeByVal(typeidType(valueType));\n> + \t\t\t\t\t\tbreak;\n> + \t\t\t\t\tdefault:\n> + \t\t\t\t\t\tbreak;\n> + \t\t\t\t\t}\n> + \t\t\t\t}\n> + #endif\n> \n> \t\t\t\tif (isNull && !aggref->usenulls)\n> \t\t\t\t\tcontinue;\t/* ignore this tuple for this agg */\n> ***************\n> *** 353,358 ****\n> --- 389,404 ----\n> \t\t\t\t\t\t\t\t\t\t (FmgrValues *) args, &isNull2);\n> \t\t\t\t\tAssert(!isNull2);\n> \t\t\t\t}\n> + \n> + #ifdef FREE_TUPLE_MEMORY\n> + \t\t\t\t/* try to pfree newVal if not isByValue - dz */\n> + \t\t\t\tif (free_tuple_memory && !isByValue && \n> + \t\t\t\t\tPortalHeapMemoryIsValid(CurrentMemoryContext,\n> + \t\t\t\t\t\t\t\t\t\t\t(Pointer) newVal))\n> + \t\t\t\t{\n> + \t\t\t\t\tpfree(newVal);\n> + \t\t\t\t}\n> + #endif\n> \t\t\t}\n> \n> \t\t\t/*\n> \n> \n> -- \n> Massimo Dal Zotto\n> \n> +----------------------------------------------------------------------+\n> | Massimo Dal Zotto email: [email protected] |\n> | Via Marconi, 141 phone: ++39-0461534251 |\n> | 38057 Pergine Valsugana (TN) www: http://www.cs.unitn.it/~dz/ |\n> | Italy pgp: finger [email protected] |\n> +----------------------------------------------------------------------+\n> \n\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Sat, 12 Jun 1999 10:06:57 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": false, "msg_subject": "Re: patch for large queries" }, { "msg_contents": "\nI think it was agreed that this patch was too ugly to apply. We have\naddressed the aggregate memory case already.\n\n> Hi Hackers,\n> \n> I don't like last minute patches before the final freeze, but I believe that\n> this one could be useful for people experiencing out-of-memory crashes while\n> executing queries which retrieve or use a very large number of tuples.\n> \n> The problem happens when storage is allocated for functions results used in\n> a large query, for example:\n> \n> select upper(name) from big_table;\n> select big_table.array[1] from big_table;\n> select count(upper(name)) from big_table;\n> \n> This patch is a dirty hack that fixes the out-of-memory problem for the most\n> common cases, like the above ones. It is not the final solution for the\n> problem but it can work for some people, so I'm posting it.\n> \n> The patch should be safe because all changes are under #ifdef. Furthermore\n> the feature can be enabled or disabled at runtime by the `free_tuple_memory'\n> options in the pg_options file. The option is disabled by default and must\n> be explicitly enabled at runtime to have any effect.\n> \n> To enable the patch add the follwing line to Makefile.custom:\n> \n> CUSTOM_COPT += -DFREE_TUPLE_MEMORY\n> \n> To enable the option at runtime add the following line to pg_option:\n> \n> free_tuple_memory=1\n> \n> Here is the patch:\n> \n> *** src/include/utils/portal.h.orig\tWed May 26 09:07:16 1999\n> --- src/include/utils/portal.h\tFri Jun 11 18:06:07 1999\n> ***************\n> *** 80,85 ****\n> --- 80,89 ----\n> extern PortalVariableMemory PortalGetVariableMemory(Portal portal);\n> extern PortalHeapMemory PortalGetHeapMemory(Portal portal);\n> \n> + #ifdef FREE_TUPLE_MEMORY\n> + bool PortalHeapMemoryIsValid(MemoryContext context, Pointer pointer);\n> + #endif\n> + \n> /* estimate of the maximum number of open portals a user would have,\n> * used in initially sizing the PortalHashTable in\tEnablePortalManager()\n> */\n> *** src/backend/utils/mmgr/portalmem.c.orig\tWed May 26 09:06:02 1999\n> --- src/backend/utils/mmgr/portalmem.c\tFri Jun 11 18:06:28 1999\n> ***************\n> *** 289,294 ****\n> --- 289,312 ----\n> \t}\n> }\n> \n> + #ifdef FREE_TUPLE_MEMORY\n> + /*\n> + * PortalHeapMemoryIsValid --\n> + *\n> + * Check if a pointer is allocated in a memory context.\n> + *\n> + */\n> + bool\n> + PortalHeapMemoryIsValid(MemoryContext context, Pointer pointer)\n> + {\n> + \tHeapMemoryBlock block = HEAPMEMBLOCK((PortalHeapMemory) context);\n> + \n> + \tAssertState(PointerIsValid(block));\n> + \n> + \treturn (AllocSetContains(&block->setData, pointer));\n> + }\n> + #endif\n> + \n> /* ----------------\n> *\t\tPortalHeapMemoryRealloc\n> * ----------------\n> *** src/include/utils/trace.h.orig\tSat Jun 5 09:00:40 1999\n> --- src/include/utils/trace.h\tFri Jun 11 19:01:30 1999\n> ***************\n> *** 64,69 ****\n> --- 64,72 ----\n> \tOPT_SYSLOG,\t\t\t\t\t/* use syslog for error messages */\n> \tOPT_HOSTLOOKUP,\t\t\t\t/* enable hostname lookup in ps_status */\n> \tOPT_SHOWPORTNUMBER,\t\t\t/* show port number in ps_status */\n> + #ifdef FREE_TUPLE_MEMORY\n> + \tOPT_FREE_TUPLE_MEMORY,\t\t/* try to pfree memory for each tuple */\n> + #endif\n> \n> \tNUM_PG_OPTIONS\t\t\t\t/* must be the last item of enum */\n> };\n> ***************\n> *** 83,91 ****\n> #endif\t /* TRACE_H */\n> \n> /*\n> ! * Local variables:\n> ! *\ttab-width: 4\n> ! *\tc-indent-level: 4\n> ! *\tc-basic-offset: 4\n> * End:\n> */\n> --- 86,94 ----\n> #endif\t /* TRACE_H */\n> \n> /*\n> ! * Local Variables:\n> ! * tab-width: 4\n> ! * c-indent-level: 4\n> ! * c-basic-offset: 4\n> * End:\n> */\n> *** src/backend/utils/misc/trace.c.orig\tSat Jun 5 09:00:37 1999\n> --- src/backend/utils/misc/trace.c\tFri Jun 11 19:01:31 1999\n> ***************\n> *** 73,78 ****\n> --- 73,81 ----\n> \t\"syslog\",\t\t\t\t\t/* use syslog for error messages */\n> \t\"hostlookup\",\t\t\t\t/* enable hostname lookup in ps_status */\n> \t\"showportnumber\",\t\t\t/* show port number in ps_status */\n> + #ifdef FREE_TUPLE_MEMORY\n> + \t\"free_tuple_memory\",\t\t/* try to pfree memory for each tuple */\n> + #endif\n> \n> \t/* NUM_PG_OPTIONS */\t\t/* must be the last item of enum */\n> };\n> ***************\n> *** 404,412 ****\n> }\n> \n> /*\n> ! * Local variables:\n> ! *\ttab-width: 4\n> ! *\tc-indent-level: 4\n> ! *\tc-basic-offset: 4\n> * End:\n> */\n> --- 407,415 ----\n> }\n> \n> /*\n> ! * Local Variables:\n> ! * tab-width: 4\n> ! * c-indent-level: 4\n> ! * c-basic-offset: 4\n> * End:\n> */\n> *** src/backend/access/common/heaptuple.c.orig\tWed May 26 09:01:59 1999\n> --- src/backend/access/common/heaptuple.c\tFri Jun 11 19:08:33 1999\n> ***************\n> *** 27,32 ****\n> --- 27,37 ----\n> #include <storage/bufpage.h>\n> #include <utils/memutils.h>\n> \n> + #ifdef FREE_TUPLE_MEMORY\n> + #include <utils/portal.h>\n> + #include <utils/trace.h>\n> + #endif\n> + \n> #ifndef HAVE_MEMMOVE\n> #include <regex/utils.h>\n> #else\n> ***************\n> *** 93,98 ****\n> --- 98,106 ----\n> \tint\t\t\ti;\n> \tint\t\t\tnumberOfAttributes = tupleDesc->natts;\n> \tForm_pg_attribute *att = tupleDesc->attrs;\n> + #ifdef FREE_TUPLE_MEMORY\n> + \tbool\t\tfree_tuple_memory = pg_options[OPT_FREE_TUPLE_MEMORY];\n> + #endif\n> \n> \tif (bit != NULL)\n> \t{\n> ***************\n> *** 131,136 ****\n> --- 139,152 ----\n> \t\t\t\t*infomask |= HEAP_HASVARLENA;\n> \t\t\t\tdata_length = VARSIZE(DatumGetPointer(value[i]));\n> \t\t\t\tmemmove(data, DatumGetPointer(value[i]), data_length);\n> + #ifdef FREE_TUPLE_MEMORY\n> + \t\t\t\t/* try to pfree value[i] - dz */\n> + \t\t\t\tif (free_tuple_memory &&\n> + \t\t\t\t\tPortalHeapMemoryIsValid(CurrentMemoryContext,\n> + \t\t\t\t\t\t\t\t\t\t\t(Pointer) value[i])) {\n> + \t\t\t\t\tpfree(value[i]);\n> + \t\t\t\t}\n> + #endif\n> \t\t\t\tbreak;\n> \t\t\tcase sizeof(char):\n> \t\t\t\t*data = att[i]->attbyval ?\n> ***************\n> *** 147,154 ****\n> \t\t\t\t\t\t\t\t *((int32 *) value[i]));\n> \t\t\t\tbreak;\n> \t\t\tdefault:\n> ! \t\t\t\tmemmove(data, DatumGetPointer(value[i]),\n> ! \t\t\t\t\t\tatt[i]->attlen);\n> \t\t\t\tbreak;\n> \t\t}\n> \t\tdata = (char *) att_addlength((long) data, att[i]->attlen, value[i]);\n> --- 163,177 ----\n> \t\t\t\t\t\t\t\t *((int32 *) value[i]));\n> \t\t\t\tbreak;\n> \t\t\tdefault:\n> ! \t\t\t\tmemmove(data, DatumGetPointer(value[i]), att[i]->attlen);\n> ! #ifdef FREE_TUPLE_MEMORY\n> ! \t\t\t\t/* try to pfree value[i] - dz */\n> ! \t\t\t\tif (free_tuple_memory &&\n> ! \t\t\t\t\tPortalHeapMemoryIsValid(CurrentMemoryContext,\n> ! \t\t\t\t\t\t\t\t\t\t\t(Pointer) value[i])) {\n> ! \t\t\t\t\tpfree(value[i]);\n> ! \t\t\t\t}\n> ! #endif\n> \t\t\t\tbreak;\n> \t\t}\n> \t\tdata = (char *) att_addlength((long) data, att[i]->attlen, value[i]);\n> *** src/backend/executor/nodeAgg.c.orig\tWed May 26 09:02:57 1999\n> --- src/backend/executor/nodeAgg.c\tFri Jun 11 19:10:27 1999\n> ***************\n> *** 31,36 ****\n> --- 31,41 ----\n> #include \"utils/syscache.h\"\n> #include \"optimizer/clauses.h\"\n> \n> + #ifdef FREE_TUPLE_MEMORY\n> + #include <utils/portal.h>\n> + #include <utils/trace.h>\n> + #endif\n> + \n> /*\n> * AggFuncInfo -\n> *\t keeps the transition functions information around\n> ***************\n> *** 113,119 ****\n> \t\t\t\tisNull1 = FALSE,\n> \t\t\t\tisNull2 = FALSE;\n> \tbool\t\tqual_result;\n> ! \n> \n> \t/* ---------------------\n> \t *\tget state info from node\n> --- 118,126 ----\n> \t\t\t\tisNull1 = FALSE,\n> \t\t\t\tisNull2 = FALSE;\n> \tbool\t\tqual_result;\n> ! #ifdef FREE_TUPLE_MEMORY\n> ! \tbool\t\tfree_tuple_memory = pg_options[OPT_FREE_TUPLE_MEMORY];\n> ! #endif\n> \n> \t/* ---------------------\n> \t *\tget state info from node\n> ***************\n> *** 241,246 ****\n> --- 248,257 ----\n> \t\tfor (;;)\n> \t\t{\n> \t\t\tTupleTableSlot *outerslot;\n> + #ifdef FREE_TUPLE_MEMORY\n> + \t\t\tOid\t\t\t\tvalueType;\n> + \t\t\tbool\t\t\tisByValue = 0;\n> + #endif\n> \n> \t\t\tisNull = isNull1 = isNull2 = 0;\n> \t\t\touterslot = ExecProcNode(outerPlan, (Plan *) node);\n> ***************\n> *** 293,298 ****\n> --- 304,334 ----\n> \t\t\t\t\tnewVal = ExecEvalExpr(aggref->target, econtext,\n> \t\t\t\t\t\t\t\t\t\t &isNull, &isDone);\n> \t\t\t\t}\n> + #ifdef FREE_TUPLE_MEMORY\n> + \t\t\t\tif (free_tuple_memory) {\n> + \t\t\t\t\tswitch (nodeTag(aggref->target)) {\n> + \t\t\t\t\tcase T_Const:\n> + \t\t\t\t\t\tisByValue = ((Const*) (aggref->target))->constbyval;\n> + \t\t\t\t\t\tbreak;\n> + \t\t\t\t\tcase T_Var:\n> + \t\t\t\t\t\tvalueType = ((Var*) (aggref->target))->vartype;\n> + \t\t\t\t\t\tisByValue = typeByVal(typeidType(valueType));\n> + \t\t\t\t\t\tbreak;\n> + \t\t\t\t\tcase T_Array:\n> + \t\t\t\t\t\tisByValue = ((Array*)(aggref->target))->arrayelembyval;\n> + \t\t\t\t\t\tbreak;\n> + \t\t\t\t\tcase T_ArrayRef:\n> + \t\t\t\t\t\tisByValue =((ArrayRef*)(aggref->target))->refelembyval;\n> + \t\t\t\t\t\tbreak;\n> + \t\t\t\t\tcase T_Expr:\n> + \t\t\t\t\t\tvalueType = ((Expr*) (aggref->target))->typeOid;\n> + \t\t\t\t\t\tisByValue = typeByVal(typeidType(valueType));\n> + \t\t\t\t\t\tbreak;\n> + \t\t\t\t\tdefault:\n> + \t\t\t\t\t\tbreak;\n> + \t\t\t\t\t}\n> + \t\t\t\t}\n> + #endif\n> \n> \t\t\t\tif (isNull && !aggref->usenulls)\n> \t\t\t\t\tcontinue;\t/* ignore this tuple for this agg */\n> ***************\n> *** 353,358 ****\n> --- 389,404 ----\n> \t\t\t\t\t\t\t\t\t\t (FmgrValues *) args, &isNull2);\n> \t\t\t\t\tAssert(!isNull2);\n> \t\t\t\t}\n> + \n> + #ifdef FREE_TUPLE_MEMORY\n> + \t\t\t\t/* try to pfree newVal if not isByValue - dz */\n> + \t\t\t\tif (free_tuple_memory && !isByValue && \n> + \t\t\t\t\tPortalHeapMemoryIsValid(CurrentMemoryContext,\n> + \t\t\t\t\t\t\t\t\t\t\t(Pointer) newVal))\n> + \t\t\t\t{\n> + \t\t\t\t\tpfree(newVal);\n> + \t\t\t\t}\n> + #endif\n> \t\t\t}\n> \n> \t\t\t/*\n> \n> \n> -- \n> Massimo Dal Zotto\n> \n> +----------------------------------------------------------------------+\n> | Massimo Dal Zotto email: [email protected] |\n> | Via Marconi, 141 phone: ++39-0461534251 |\n> | 38057 Pergine Valsugana (TN) www: http://www.cs.unitn.it/~dz/ |\n> | Italy pgp: finger [email protected] |\n> +----------------------------------------------------------------------+\n> \n\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Mon, 27 Sep 1999 16:31:34 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": false, "msg_subject": "Re: patch for large queries" } ]
[ { "msg_contents": "Hi,\n\nwhile playing with aggregates I found this bug in the planner:\n\ndz=> select count(1) from my_table;\npqReadData() -- backend closed the channel unexpectedly.\n This probably means the backend terminated abnormally\n before or while processing the request.\nWe have lost the connection to the backend, so further processing is impossible. Terminating.\n\nThe debugger prints the following information:\n\n(xxgdb) cont\nProgram received signal SIGSEGV, Segmentation fault.\n0x80d93cf in set_agg_tlist_references (aggNode=0x82a4310) at setrefs.c:765\n(xxgdb) info stack\n#0 0x80d93cf in set_agg_tlist_references (aggNode=0x82a4310) at setrefs.c:765\n#1 0x80d80ac in union_planner (parse=0x82a40a0) at planner.c:319\n#2 0x80d7d05 in planner (parse=0x82a40a0) at planner.c:83\n#3 0x80fd344 in pg_parse_and_plan (query_string=0xbffef2d8 \"select count(1) from my_table;\", typev=0x0, nargs=0, queryListP=0xbffef268, dest=Remote, aclOverride=0 '\\000') at postgres.c:590\n#4 0x80fd4a3 in pg_exec_query_dest (query_string=0xbffef2d8 \"select count(1) from my_table;\", dest=Remote, aclOverride=0) at postgres.c:678\n#5 0x80fd454 in pg_exec_query (query_string=0xbffef2d8 \"select count(1) from my_table;\") at postgres.c:656\n#6 0x80fe6c8 in PostgresMain (argc=9, argv=0xbffff850, real_argc=6, real_argv=0xbffffd6c) at postgres.c:1658\n#7 0x80e32ec in DoBackend (port=0x8235ca8) at postmaster.c:1628\n(xxgdb) print *aggNode\n$2 = {\n plan = {\n type = T_Agg, \n cost = 0, \n plan_size = 0, \n plan_width = 0, \n plan_tupperpage = 0, \n state = 0x0, \n targetlist = 0x82a44f8, \n qual = 0x0, \n lefttree = 0x0, \n righttree = 0x0, \n extParam = 0x0, \n locParam = 0x0, \n chgParam = 0x0, \n initPlan = 0x0, \n subPlan = 0x0, \n nParamExec = 0\n }, \n aggs = 0x0, \n aggstate = 0x0\n}\n(xxgdb) \n\nThe problem is caused by a null plan.lefttree in set_agg_tlist_references()\n(setrefs.c:765), but I don't know what it means:\n\n\tsubplanTargetList = aggNode->plan.lefttree->targetlist;\n\n-- \nMassimo Dal Zotto\n\n+----------------------------------------------------------------------+\n| Massimo Dal Zotto email: [email protected] |\n| Via Marconi, 141 phone: ++39-0461534251 |\n| 38057 Pergine Valsugana (TN) www: http://www.cs.unitn.it/~dz/ |\n| Italy pgp: finger [email protected] |\n+----------------------------------------------------------------------+\n", "msg_date": "Fri, 11 Jun 1999 21:52:21 +0200 (MEST)", "msg_from": "Massimo Dal Zotto <[email protected]>", "msg_from_op": true, "msg_subject": "bug with aggregates" }, { "msg_contents": "Massimo Dal Zotto <[email protected]> writes:\n> dz=> select count(1) from my_table;\n> pqReadData() -- backend closed the channel unexpectedly.\n\nOops. Probably not a big enough bug to delay 6.5 release for,\nbut I'll look into it and commit a fix shortly after the release.\nI think the parser may be doing the wrong thing here. Thanks!\n\n\t\t\tregards, tom lane\n", "msg_date": "Sat, 12 Jun 1999 12:21:21 -0400", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] bug with aggregates " }, { "msg_contents": "Massimo Dal Zotto <[email protected]> writes:\n> dz=> select count(1) from my_table;\n> pqReadData() -- backend closed the channel unexpectedly.\n\nFurther notes --- I find that you can get the same crash with no table\nat all,\n\t\tselect count(1);\n\n6.4.2 executes both queries --- but curiously enough, it produces \"1\"\nregardless of the size of the table you mention, which is not surprising\nwhen you look at its plan ... it optimizes out the scan of the table\nentirely. But if you do\n\t\tselect a,count(1) from table group by a;\nthen you get a count of the number of rows in each group, which is more\nor less what I'd expect. This behavior is not consistent with the\nungrouped case.\n\nAfter a quick gander at the SQL spec, I see no evidence that either of\nthese queries is allowed by the spec. I'm inclined to think that\n\"select count(1);\" ought to be disallowed and \"select count(1) from\nmy_table;\" ought to be treated the same as \"select count(*) from\nmy_table;\", like it is in the grouped case. Comments?\n\n\t\t\tregards, tom lane\n", "msg_date": "Sat, 12 Jun 1999 12:42:32 -0400", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] bug with aggregates " }, { "msg_contents": "Massimo Dal Zotto <[email protected]> writes:\n> dz=> select count(1) from my_table;\n> pqReadData() -- backend closed the channel unexpectedly.\n\nPoking into this failure revealed a potentially serious problem in\nexecQual.c, so I decided it would be wise to fix it now rather than\nwait till after 6.5. In the situation where ExecTargetList() is asked\nto generate a null tuple --- which arises in the case above, and\nevidently in other cases judging from the comments there and the\nmultiple bogus ways that people have tried to fix it before ---\nit was handing back a palloc'd but uninitialized chunk of memory.\nThis would result in unpredictable behavior if anyone actually tried\nto do anything with the tuple. In the case above, nodeAgg.c tried to\ncopy the tuple, leading to coredumps some of the time. I fixed\nExecTargetList to generate a valid tuple containing zero attributes,\nwhich should work reliably.\n\nI had managed to break the planner's handling of this case too, so I\nfigured I would fix that as long as I was annoying Marc anyway ;-).\n\nThe behavior is now back to that of 6.4.2: you get \"1\" when the query is\nnot grouped and row counts when it is. I still think that that's wrong,\nbut I will not risk trying to change it just before release.\n\n\t\t\tregards, tom lane\n", "msg_date": "Sat, 12 Jun 1999 15:55:42 -0400", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] bug with aggregates " } ]
[ { "msg_contents": "What does this error message means ?\n\nI did a query:\ndiscovery=> select a.msg_id,b.key_id into qq from publications a, keywords b where a.title ~* b.name and b.key_id in ( select key_id from keywords);\nERROR: regcomp failed with error empty (sub)expression\n\nI found what's the problem:\n\nin a.title ~* b.name b.name = '' (NOT NULL)\n\ndiscovery=> select * from keywords where key_id=449;\nkey_id|name\n------+----\n 449| \n(1 row)\n\n\nI consider this as a bug in regcomp code. rexx, perl match empty string ! \n\nThis happens with slightly outdated (week ago) 6.5 cvs\n\n\tRegards,\n\n\t\tOleg\n_____________________________________________________________\nOleg Bartunov, sci.researcher, hostmaster of AstroNet,\nSternberg Astronomical Institute, Moscow University (Russia)\nInternet: [email protected], http://www.sai.msu.su/~megera/\nphone: +007(095)939-16-83, +007(095)939-23-83\n\n", "msg_date": "Sat, 12 Jun 1999 05:51:58 +0400 (MSD)", "msg_from": "Oleg Bartunov <[email protected]>", "msg_from_op": true, "msg_subject": "BUG: ERROR: regcomp failed with error empty (sub)expression" } ]
[ { "msg_contents": "Two small patches:\n\n1)\tmake default NBuffers = DEF_MAXBACKENDS*2 as required by check in\n\tPostmasterMain().\n\n2)\tcheck for QueryCancel in the copy command. Maybe we should do the\n\tsame in vacuum command (Vadim?). \n\n\n*** src/include/miscadmin.h.orig\tWed May 26 09:06:39 1999\n--- src/include/miscadmin.h\tSat Jun 12 20:01:10 1999\n***************\n*** 106,112 ****\n *\t\tdefault number of buffers in buffer pool\n *\n */\n! #define NDBUFS 64\n \n /*****************************************************************************\n *\t pdir.h --\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t *\n--- 106,112 ----\n *\t\tdefault number of buffers in buffer pool\n *\n */\n! #define NDBUFS (2*DEF_MAXBACKENDS)\n \n /*****************************************************************************\n *\t pdir.h --\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t *\n*** src/backend/commands/copy.c.orig\tSun May 30 09:01:01 1999\n--- src/backend/commands/copy.c\tSat Jun 12 20:23:51 1999\n***************\n*** 18,23 ****\n--- 18,24 ----\n \n #include <access/heapam.h>\n #include <tcop/dest.h>\n+ #include \"tcop/tcopprot.h\"\n #include <fmgr.h>\n #include <miscadmin.h>\n #include <utils/builtins.h>\n***************\n*** 253,259 ****\n \t */\n \tif (file_opened)\n \t{\n! \t\tFreeFile(fp);\n \t\tfile_opened = false;\n \t}\n \n--- 254,265 ----\n \t */\n \tif (file_opened)\n \t{\n! \t\t/*\n! \t\t * This is unnecessary: files are closed automatically by\n! \t\t * AtEOXact_Files() at transaction abort. -- dz\n! \t\t */\n! \n! \t\t/* FreeFile(fp); */\n \t\tfile_opened = false;\n \t}\n \n***************\n*** 419,424 ****\n--- 425,432 ----\n \n \twhile (HeapTupleIsValid(tuple = heap_getnext(scandesc, 0)))\n \t{\n+ \t\tif (QueryCancel)\n+ \t\t\tCancelQuery();\n \n \t\tif (oids && !binary)\n \t\t{\n***************\n*** 691,696 ****\n--- 699,707 ----\n \tlineno = 0;\n \twhile (!done)\n \t{\n+ \t\tif (QueryCancel)\n+ \t\t\tCancelQuery();\n+ \n \t\tif (!binary)\n \t\t{\n #ifdef COPY_PATCH\n\n\n-- \nMassimo Dal Zotto\n\n+----------------------------------------------------------------------+\n| Massimo Dal Zotto email: [email protected] |\n| Via Marconi, 141 phone: ++39-0461534251 |\n| 38057 Pergine Valsugana (TN) www: http://www.cs.unitn.it/~dz/ |\n| Italy pgp: finger [email protected] |\n+----------------------------------------------------------------------+\n", "msg_date": "Sat, 12 Jun 1999 20:42:24 +0200 (MEST)", "msg_from": "Massimo Dal Zotto <[email protected]>", "msg_from_op": true, "msg_subject": "new patches" }, { "msg_contents": "Massimo Dal Zotto <[email protected]> writes:\n> Two small patches:\n> 1)\tmake default NBuffers = DEF_MAXBACKENDS*2 as required by check in\n> \tPostmasterMain().\n\nI had proposed moving NDBUFS into config.h and fixing the default a few\ndays ago, but then forgot to do it. As things stand, if you increase\nDEF_MAXBACKENDS at configure time, you'll get a postmaster that won't\nstart unless you give it a -B setting larger than default. This is bad,\nand I agree with Massimo that we ought to make sure the default NBuffers\nis one that will work with the default MaxBackends.\n\nThis patch is not quite right though, since it doesn't account for the\nother part of PostmasterMain's condition (NBuffers >= 16). Will fix.\n\n> 2)\tcheck for QueryCancel in the copy command. Maybe we should do the\n> \tsame in vacuum command (Vadim?). \n\nI'm not too excited about adding QueryCancel support so soon before the\nrelease, but the part of your patch that you didn't mention (diking out\nthe \"file_opened\" hack) is really a critical fix --- as the code stood\nit would try to fclose() the same stdio file twice, which is disastrous\nin most stdio libraries. I applied that part of it... good catch!\n\n\t\t\tregards, tom lane\n", "msg_date": "Sat, 12 Jun 1999 16:49:25 -0400", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] new patches " }, { "msg_contents": "Massimo Dal Zotto wrote:\n> \n> 2) check for QueryCancel in the copy command. Maybe we should do the\n> same in vacuum command (Vadim?).\n\nYes, as well as in other places - QueryCancel doesn't work when\nbackend waits for lock. But not now...\n\nVadim\n", "msg_date": "Sun, 13 Jun 1999 17:11:03 +0800", "msg_from": "Vadim Mikheev <[email protected]>", "msg_from_op": false, "msg_subject": "Re: new patches" }, { "msg_contents": "> Two small patches:\n> \n> 1)\tmake default NBuffers = DEF_MAXBACKENDS*2 as required by check in\n> \tPostmasterMain().\n\nSeems this line is gone in the current sources.\n> \n> 2)\tcheck for QueryCancel in the copy command. Maybe we should do the\n> \tsame in vacuum command (Vadim?). \n\nApplied.\n\n> \n> \n> *** src/include/miscadmin.h.orig\tWed May 26 09:06:39 1999\n> --- src/include/miscadmin.h\tSat Jun 12 20:01:10 1999\n> ***************\n> *** 106,112 ****\n> *\t\tdefault number of buffers in buffer pool\n> *\n> */\n> ! #define NDBUFS 64\n> \n> /*****************************************************************************\n> *\t pdir.h --\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t *\n> --- 106,112 ----\n> *\t\tdefault number of buffers in buffer pool\n> *\n> */\n> ! #define NDBUFS (2*DEF_MAXBACKENDS)\n> \n> /*****************************************************************************\n> *\t pdir.h --\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t *\n> *** src/backend/commands/copy.c.orig\tSun May 30 09:01:01 1999\n> --- src/backend/commands/copy.c\tSat Jun 12 20:23:51 1999\n> ***************\n> *** 18,23 ****\n> --- 18,24 ----\n> \n> #include <access/heapam.h>\n> #include <tcop/dest.h>\n> + #include \"tcop/tcopprot.h\"\n> #include <fmgr.h>\n> #include <miscadmin.h>\n> #include <utils/builtins.h>\n> ***************\n> *** 253,259 ****\n> \t */\n> \tif (file_opened)\n> \t{\n> ! \t\tFreeFile(fp);\n> \t\tfile_opened = false;\n> \t}\n> \n> --- 254,265 ----\n> \t */\n> \tif (file_opened)\n> \t{\n> ! \t\t/*\n> ! \t\t * This is unnecessary: files are closed automatically by\n> ! \t\t * AtEOXact_Files() at transaction abort. -- dz\n> ! \t\t */\n> ! \n> ! \t\t/* FreeFile(fp); */\n> \t\tfile_opened = false;\n> \t}\n> \n> ***************\n> *** 419,424 ****\n> --- 425,432 ----\n> \n> \twhile (HeapTupleIsValid(tuple = heap_getnext(scandesc, 0)))\n> \t{\n> + \t\tif (QueryCancel)\n> + \t\t\tCancelQuery();\n> \n> \t\tif (oids && !binary)\n> \t\t{\n> ***************\n> *** 691,696 ****\n> --- 699,707 ----\n> \tlineno = 0;\n> \twhile (!done)\n> \t{\n> + \t\tif (QueryCancel)\n> + \t\t\tCancelQuery();\n> + \n> \t\tif (!binary)\n> \t\t{\n> #ifdef COPY_PATCH\n> \n> \n> -- \n> Massimo Dal Zotto\n> \n> +----------------------------------------------------------------------+\n> | Massimo Dal Zotto email: [email protected] |\n> | Via Marconi, 141 phone: ++39-0461534251 |\n> | 38057 Pergine Valsugana (TN) www: http://www.cs.unitn.it/~dz/ |\n> | Italy pgp: finger [email protected] |\n> +----------------------------------------------------------------------+\n> \n\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Mon, 27 Sep 1999 16:30:35 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": false, "msg_subject": "Re: new patches" }, { "msg_contents": "\nTom, any comment on this?\n\n\n> Massimo Dal Zotto <[email protected]> writes:\n> > Two small patches:\n> > 1)\tmake default NBuffers = DEF_MAXBACKENDS*2 as required by check in\n> > \tPostmasterMain().\n> \n> I had proposed moving NDBUFS into config.h and fixing the default a few\n> days ago, but then forgot to do it. As things stand, if you increase\n> DEF_MAXBACKENDS at configure time, you'll get a postmaster that won't\n> start unless you give it a -B setting larger than default. This is bad,\n> and I agree with Massimo that we ought to make sure the default NBuffers\n> is one that will work with the default MaxBackends.\n> \n> This patch is not quite right though, since it doesn't account for the\n> other part of PostmasterMain's condition (NBuffers >= 16). Will fix.\n> \n> > 2)\tcheck for QueryCancel in the copy command. Maybe we should do the\n> > \tsame in vacuum command (Vadim?). \n> \n> I'm not too excited about adding QueryCancel support so soon before the\n> release, but the part of your patch that you didn't mention (diking out\n> the \"file_opened\" hack) is really a critical fix --- as the code stood\n> it would try to fclose() the same stdio file twice, which is disastrous\n> in most stdio libraries. I applied that part of it... good catch!\n> \n> \t\t\tregards, tom lane\n> \n> \n\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Mon, 29 Nov 1999 17:25:57 -0500 (EST)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [PATCHES] Re: [HACKERS] new patches" }, { "msg_contents": "Bruce Momjian <[email protected]> writes:\n> Tom, any comment on this?\n\nI believe all those patches are applied long since in current sources\n(Massimo might want to check though).\n\nI even did something about QueryCancel in vacuum yesterday...\n\n\t\t\tregards, tom lane\n\n\n>> Massimo Dal Zotto <[email protected]> writes:\n>>>> Two small patches:\n>>>> 1)\tmake default NBuffers = DEF_MAXBACKENDS*2 as required by check in\n>>>> PostmasterMain().\n>> \n>> I had proposed moving NDBUFS into config.h and fixing the default a few\n>> days ago, but then forgot to do it. As things stand, if you increase\n>> DEF_MAXBACKENDS at configure time, you'll get a postmaster that won't\n>> start unless you give it a -B setting larger than default. This is bad,\n>> and I agree with Massimo that we ought to make sure the default NBuffers\n>> is one that will work with the default MaxBackends.\n>> \n>> This patch is not quite right though, since it doesn't account for the\n>> other part of PostmasterMain's condition (NBuffers >= 16). Will fix.\n>> \n>>>> 2)\tcheck for QueryCancel in the copy command. Maybe we should do the\n>>>> same in vacuum command (Vadim?). \n>> \n>> I'm not too excited about adding QueryCancel support so soon before the\n>> release, but the part of your patch that you didn't mention (diking out\n>> the \"file_opened\" hack) is really a critical fix --- as the code stood\n>> it would try to fclose() the same stdio file twice, which is disastrous\n>> in most stdio libraries. I applied that part of it... good catch!\n", "msg_date": "Mon, 29 Nov 1999 21:12:45 -0500", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [PATCHES] Re: [HACKERS] new patches " } ]
[ { "msg_contents": "I have Web site where I use persistent connection between\nhttpd (Apache) and database (postgres,6.5). I noticed strange\nresults I got after reloading page with results from query\nwhen I destroydb , createdb, fill db ( with the same data ).\nIt seems backend doesn't close connection when db is destroyed\nand this produces unpredictable results. My application is\nwritten in Perl and uses DBI/DBD for persistent connection.\nI don't know is it DBI/DBD problem or backend must close\nall connections to DB when it destroyed.\n\n\tRegards,\n\t\tOleg\n_____________________________________________________________\nOleg Bartunov, sci.researcher, hostmaster of AstroNet,\nSternberg Astronomical Institute, Moscow University (Russia)\nInternet: [email protected], http://www.sai.msu.su/~megera/\nphone: +007(095)939-16-83, +007(095)939-23-83\n\n", "msg_date": "Sat, 12 Jun 1999 22:57:38 +0400 (MSD)", "msg_from": "Oleg Bartunov <[email protected]>", "msg_from_op": true, "msg_subject": "destroydb doesn't close connection with client (httpd <-> pg)" }, { "msg_contents": "Oleg Bartunov wrote:\n> \n> I have Web site where I use persistent connection between\n> httpd (Apache) and database (postgres,6.5). I noticed strange\n> results I got after reloading page with results from query\n> when I destroydb , createdb, fill db ( with the same data ).\n> It seems backend doesn't close connection when db is destroyed\n> and this produces unpredictable results. My application is\n> written in Perl and uses DBI/DBD for persistent connection.\n> I don't know is it DBI/DBD problem or backend must close\n> all connections to DB when it destroyed.\n> \n> Regards,\n> Oleg\n> _____________________________________________________________\n> Oleg Bartunov, sci.researcher, hostmaster of AstroNet,\n> Sternberg Astronomical Institute, Moscow University (Russia)\n> Internet: [email protected], http://www.sai.msu.su/~megera/\n> phone: +007(095)939-16-83, +007(095)939-23-83\n\n\n\nThis is not DBI/DBD-Pg specific.\n\nA short test with psql shows, that this seems to be \na bug of postgresql itself.\n\nCreate and fill a database. Connect to this database with psql\nand perform some query. Without disconnecting destroy and re-create \nthe database but insert this time different data. Performing\nthe same query a second time will retrieve the same data as before \n\n\nEdmund\n\n\n\n-- \nEdmund Mergl mailto:[email protected]\nIm Haldenhau 9 http://www.bawue.de/~mergl\n70565 Stuttgart fon: +49 711 747503\nGermany\n", "msg_date": "Sat, 12 Jun 1999 21:46:45 +0200", "msg_from": "Edmund Mergl <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] destroydb doesn't close connection with client (httpd\n\t<-> pg)" }, { "msg_contents": "Oleg Bartunov <[email protected]> writes:\n> I don't know is it DBI/DBD problem or backend must close\n> all connections to DB when it destroyed.\n\nYou cannot destroy a DB while there are backends connected to it;\nall hell breaks loose if you do. See thread \"How to destroy your entire\nPostgres installation\" in pg-hackers in late Sept. 1998.\n\nThe correct fix is to add an interlock that prevents \"destroydb\" when\nthere are connected backends. I don't know just how that might be done,\nhowever.\n\nThere should be a TODO item for this, but I don't see one:\n * Prevent destroydb when there are backends active in that database\n\n\t\t\tregards, tom lane\n", "msg_date": "Sat, 12 Jun 1999 16:04:30 -0400", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] destroydb doesn't close connection with client (httpd\n\t<-> pg)" }, { "msg_contents": "I also think this is the bug in postgres. I didn't check if this \nbehaivour is the same with 6.4.2 though. It could be nice if\nbackend could communicate with 'persistent' client to tell him\nthat database is destroyed and client probably could decide\nif he wants to try to reestablish connection. So in case database\nrestored nothing dangerous would happens to client (pprobably\njust informational message in that case ).\n\n\tRegards,\n\t\tOleg\n\nOn Sat, 12 Jun 1999, Edmund Mergl wrote:\n\n> Date: Sat, 12 Jun 1999 21:46:45 +0200\n> From: Edmund Mergl <[email protected]>\n> To: Oleg Bartunov <[email protected]>\n> Cc: [email protected]\n> Subject: Re: [HACKERS] destroydb doesn't close connection with client (httpd <-> pg)\n> \n> Oleg Bartunov wrote:\n> > \n> > I have Web site where I use persistent connection between\n> > httpd (Apache) and database (postgres,6.5). I noticed strange\n> > results I got after reloading page with results from query\n> > when I destroydb , createdb, fill db ( with the same data ).\n> > It seems backend doesn't close connection when db is destroyed\n> > and this produces unpredictable results. My application is\n> > written in Perl and uses DBI/DBD for persistent connection.\n> > I don't know is it DBI/DBD problem or backend must close\n> > all connections to DB when it destroyed.\n> > \n> > Regards,\n> > Oleg\n> > _____________________________________________________________\n> > Oleg Bartunov, sci.researcher, hostmaster of AstroNet,\n> > Sternberg Astronomical Institute, Moscow University (Russia)\n> > Internet: [email protected], http://www.sai.msu.su/~megera/\n> > phone: +007(095)939-16-83, +007(095)939-23-83\n> \n> \n> \n> This is not DBI/DBD-Pg specific.\n> \n> A short test with psql shows, that this seems to be \n> a bug of postgresql itself.\n> \n> Create and fill a database. Connect to this database with psql\n> and perform some query. Without disconnecting destroy and re-create \n> the database but insert this time different data. Performing\n> the same query a second time will retrieve the same data as before \n> \n> \n> Edmund\n> \n> \n> \n> -- \n> Edmund Mergl mailto:[email protected]\n> Im Haldenhau 9 http://www.bawue.de/~mergl\n> 70565 Stuttgart fon: +49 711 747503\n> Germany\n> \n\n_____________________________________________________________\nOleg Bartunov, sci.researcher, hostmaster of AstroNet,\nSternberg Astronomical Institute, Moscow University (Russia)\nInternet: [email protected], http://www.sai.msu.su/~megera/\nphone: +007(095)939-16-83, +007(095)939-23-83\n\n", "msg_date": "Sun, 13 Jun 1999 00:04:43 +0400 (MSD)", "msg_from": "Oleg Bartunov <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] destroydb doesn't close connection with client (httpd\n\t<-> pg)" }, { "msg_contents": "On Sat, 12 Jun 1999, Tom Lane wrote:\n\n> Date: Sat, 12 Jun 1999 16:04:30 -0400\n> From: Tom Lane <[email protected]>\n> To: Oleg Bartunov <[email protected]>\n> Cc: [email protected]\n> Subject: Re: [HACKERS] destroydb doesn't close connection with client (httpd <-> pg) \n> \n> Oleg Bartunov <[email protected]> writes:\n> > I don't know is it DBI/DBD problem or backend must close\n> > all connections to DB when it destroyed.\n> \n> You cannot destroy a DB while there are backends connected to it;\n> all hell breaks loose if you do. See thread \"How to destroy your entire\n> Postgres installation\" in pg-hackers in late Sept. 1998.\n\nI did it without any problem :-)\n\n> \n> The correct fix is to add an interlock that prevents \"destroydb\" when\n> there are connected backends. I don't know just how that might be done,\n> however.\n\nI'm not sure preventing \"destroydb\" is a good idea. Imagine some\nuser connects to db (via psql) and hold it for a weekend.\nYou will not be able to do some dbadmin work ? \nWhat's wrong if backend close connection, cleanup buffers etc.\nand inform client that db was destroyed. Client could decide -\nexit or try to reestablish connection. If you recreate that db \nnothing will happens. \n\n> \n> There should be a TODO item for this, but I don't see one:\n> * Prevent destroydb when there are backends active in that database\n\nThe problem would be worse in case of 24*7 life-cycle of Web-database \nconnectivity if you decide \"prevent destroydb\" feature !\nI prefer to allow destroydb but close all active connections with\nthis db and inform client. Client will decide what to do.\nAs I understand DBI/DBD is already ready for such behaivour because\nit has ping method. I'm no sure about psql.\n\n\tOleg\n\n> \n\n> \t\t\tregards, tom lane\n> \n\n_____________________________________________________________\nOleg Bartunov, sci.researcher, hostmaster of AstroNet,\nSternberg Astronomical Institute, Moscow University (Russia)\nInternet: [email protected], http://www.sai.msu.su/~megera/\nphone: +007(095)939-16-83, +007(095)939-23-83\n\n", "msg_date": "Sun, 13 Jun 1999 00:41:32 +0400 (MSD)", "msg_from": "Oleg Bartunov <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] destroydb doesn't close connection with client (httpd\n\t<-> pg)" }, { "msg_contents": "Oleg Bartunov <[email protected]> writes:\n>> You cannot destroy a DB while there are backends connected to it;\n>> all hell breaks loose if you do. See thread \"How to destroy your entire\n>> Postgres installation\" in pg-hackers in late Sept. 1998.\n\n> I did it without any problem :-)\n\nYou were lucky. I suffered severe database corruption in *other*\ndatabases when I did it by accident last year.\n\n>> The correct fix is to add an interlock that prevents \"destroydb\" when\n>> there are connected backends. I don't know just how that might be done,\n>> however.\n\n> I'm not sure preventing \"destroydb\" is a good idea. Imagine some\n> user connects to db (via psql) and hold it for a weekend.\n\nIf you cannot stop the connected backends first, you should not be able\nto kill the database. I suppose a different way to look at it is that\ndestroydb should kill any backends connected to the database before it\ndoes anything else. I'd suggest that that only happen with user\nconfirmation though, since destroying an active database seems to me\nto be something you'd never really want to do.\n\n> The problem would be worse in case of 24*7 life-cycle of Web-database \n> connectivity if you decide \"prevent destroydb\" feature !\n\nIf you have an active website using a database, what are you doing\ndestroying the database? We're not talking about dropping a temp table\nhere... I find it hard to imagine what you'd use this \"feature\" for,\nexcept for shooting yourself in the foot.\n\n\t\t\tregards, tom lane\n", "msg_date": "Sat, 12 Jun 1999 16:58:47 -0400", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] destroydb doesn't close connection with client (httpd\n\t<-> pg)" }, { "msg_contents": "At 04:58 PM 6/12/99 -0400, Tom Lane wrote:\n\n>If you have an active website using a database, what are you doing\n>destroying the database? We're not talking about dropping a temp table\n>here... I find it hard to imagine what you'd use this \"feature\" for,\n>except for shooting yourself in the foot.\n\nOr upgrading Postgres :)\n\nJust joking...Tom, you're absolutely right, by definition a web\nsite being backed by a db that's frequently created and destroyed\nis not a 24/7 site. It it's not 24/7, then you can knock it down,\ndestroy and re-create the database, then coax the postmaster and\nwebserver back into service.\n\nMost active web services will be denying some level of service\nif you simply drop a (non-temp) table. The destroy the whole db?\n\nOmigosh!\n\n\n\n- Don Baccus, Portland OR <[email protected]>\n Nature photos, on-line guides, and other goodies at\n http://donb.photo.net\n", "msg_date": "Sat, 12 Jun 1999 14:27:47 -0700", "msg_from": "Don Baccus <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] destroydb doesn't close connection with client\n\t(httpd <-> pg)" }, { "msg_contents": "\nAdded to TODO.\n\n\n> Oleg Bartunov <[email protected]> writes:\n> > I don't know is it DBI/DBD problem or backend must close\n> > all connections to DB when it destroyed.\n> \n> You cannot destroy a DB while there are backends connected to it;\n> all hell breaks loose if you do. See thread \"How to destroy your entire\n> Postgres installation\" in pg-hackers in late Sept. 1998.\n> \n> The correct fix is to add an interlock that prevents \"destroydb\" when\n> there are connected backends. I don't know just how that might be done,\n> however.\n> \n> There should be a TODO item for this, but I don't see one:\n> * Prevent destroydb when there are backends active in that database\n> \n> \t\t\tregards, tom lane\n> \n> \n\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Sat, 12 Jun 1999 20:54:16 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] destroydb doesn't close connection with client (httpd\n\t<-> pg)" }, { "msg_contents": "> Oleg Bartunov <[email protected]> writes:\n> >> You cannot destroy a DB while there are backends connected to it;\n> >> all hell breaks loose if you do. See thread \"How to destroy your entire\n> >> Postgres installation\" in pg-hackers in late Sept. 1998.\n> \n> > I did it without any problem :-)\n> \n> You were lucky. I suffered severe database corruption in *other*\n> databases when I did it by accident last year.\n\nDestroydb is reworked in 6.5 because it flushes all its buffers. I\nspecifically fixed something like this.\n\n\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Sat, 12 Jun 1999 20:57:37 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] destroydb doesn't close connection with client (httpd\n\t<-> pg)" }, { "msg_contents": "\nCan someone comment on where we are on this?\n\n[Charset iso-8859-2 unsupported, filtering to ASCII...]\n> Oleg Bartunov wrote:\n> > \n> > I have Web site where I use persistent connection between\n> > httpd (Apache) and database (postgres,6.5). I noticed strange\n> > results I got after reloading page with results from query\n> > when I destroydb , createdb, fill db ( with the same data ).\n> > It seems backend doesn't close connection when db is destroyed\n> > and this produces unpredictable results. My application is\n> > written in Perl and uses DBI/DBD for persistent connection.\n> > I don't know is it DBI/DBD problem or backend must close\n> > all connections to DB when it destroyed.\n> > \n> > Regards,\n> > Oleg\n> > _____________________________________________________________\n> > Oleg Bartunov, sci.researcher, hostmaster of AstroNet,\n> > Sternberg Astronomical Institute, Moscow University (Russia)\n> > Internet: [email protected], http://www.sai.msu.su/~megera/\n> > phone: +007(095)939-16-83, +007(095)939-23-83\n> \n> \n> \n> This is not DBI/DBD-Pg specific.\n> \n> A short test with psql shows, that this seems to be \n> a bug of postgresql itself.\n> \n> Create and fill a database. Connect to this database with psql\n> and perform some query. Without disconnecting destroy and re-create \n> the database but insert this time different data. Performing\n> the same query a second time will retrieve the same data as before \n> \n> \n> Edmund\n> \n> \n> \n> -- \n> Edmund Mergl mailto:[email protected]\n> Im Haldenhau 9 http://www.bawue.de/~mergl\n> 70565 Stuttgart fon: +49 711 747503\n> Germany\n> \n> \n\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Mon, 29 Nov 1999 17:25:02 -0500 (EST)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] destroydb doesn't close connection with client (httpd\n\t<-> pg)" }, { "msg_contents": "Bruce Momjian <[email protected]> writes:\n> Can someone comment on where we are on this?\n\nProblem's gone: you cannot destroy a DB containing active backends\nanymore. That may not be quite the solution Edmund wanted ;-), but\nit's effective.\n\n>> Create and fill a database. Connect to this database with psql\n>> and perform some query. Without disconnecting destroy and re-create \n>> the database but insert this time different data. Performing\n>> the same query a second time will retrieve the same data as before \n\n\t\t\tregards, tom lane\n", "msg_date": "Mon, 29 Nov 1999 21:09:29 -0500", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] destroydb doesn't close connection with client (httpd\n\t<-> pg)" } ]
[ { "msg_contents": "It appears the Postgres Web site is down right now, so I can't\nlook this up in the archives.\n\nAnyone know how to fix this?\n\nThanks.\n\n_______\n\n\naasc24>createdb doda\naasc24>psql -c \"select datetime 'now'\" doda\n?column? \n----------------------------\nSun Jun 13 10:15:07 1999 CDT\n(1 row)\n\naasc24>destroydb doda\nERROR: typeidTypeRelid: Invalid type - oid = 0\ndestroydb: database destroy failed on doda.\n", "msg_date": "Sun, 13 Jun 1999 10:56:35 -0500", "msg_from": "\"David R. Favor\" <[email protected]>", "msg_from_op": true, "msg_subject": "PostgreSQL 6.5beta 3 broken - typeidTypeRelid error" } ]
[ { "msg_contents": "*** src/bin/psql/psql.c.orig\tSat Jun 5 09:00:38 1999\n--- src/bin/psql/psql.c\tSun Jun 13 10:34:05 1999\n***************\n*** 2875,2880 ****\n--- 2875,2887 ----\n \t\tpqsignal(SIGINT, handle_sigint);\t\t/* control-C => cancel */\n #ifdef USE_READLINE\n \t\tsettings.useReadline = 1;\n+ \t\t{\n+ \t\t\t/*\n+ \t\t\t * Set the application name, used for parsing .inputrc -- dz\n+ \t\t\t */\n+ \t\t\tchar *progname = rindex(argv[0], SEP_CHAR);\n+ \t\t\trl_readline_name = (progname ? progname : argv[0]);\n+ \t\t}\n #endif\n \t}\n #ifdef PSQL_ALWAYS_GET_PASSWORDS\n\n-- \nMassimo Dal Zotto\n\n+----------------------------------------------------------------------+\n| Massimo Dal Zotto email: [email protected] |\n| Via Marconi, 141 phone: ++39-0461534251 |\n| 38057 Pergine Valsugana (TN) www: http://www.cs.unitn.it/~dz/ |\n| Italy pgp: finger [email protected] |\n+----------------------------------------------------------------------+\n", "msg_date": "Sun, 13 Jun 1999 22:01:16 +0200 (MEST)", "msg_from": "Massimo Dal Zotto <[email protected]>", "msg_from_op": true, "msg_subject": "new patch" }, { "msg_contents": "Massimo Dal Zotto <[email protected]> writes:\n> + \t\t\tchar *progname = rindex(argv[0], SEP_CHAR);\n> + \t\t\trl_readline_name = (progname ? progname : argv[0]);\n\n1. rindex => strrchr, please.\n\n2. Shouldn't that be (progname ? progname+1 : argv[0]) ?\n\n\t\t\tregards, tom lane\n", "msg_date": "Sun, 13 Jun 1999 16:26:42 -0400", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] new patch " }, { "msg_contents": "> \n> Massimo Dal Zotto <[email protected]> writes:\n> > + \t\t\tchar *progname = rindex(argv[0], SEP_CHAR);\n> > + \t\t\trl_readline_name = (progname ? progname : argv[0]);\n> \n> 1. rindex => strrchr, please.\n> \n> 2. Shouldn't that be (progname ? progname+1 : argv[0]) ?\n> \n> \t\t\tregards, tom lane\n> \n\nYes, I had already added the +1, but for same obscure reason it worked\nalso with the original code, so I didn't notice the bug in my first test.\nI suspect that the readline uses only the basename of the argument.\nOk also for strrchr, this is the final patch:\n\n*** src/bin/psql/psql.c.orig\tSat Jun 5 09:00:38 1999\n--- src/bin/psql/psql.c\tMon Jun 14 09:04:15 1999\n***************\n*** 2875,2880 ****\n--- 2875,2887 ----\n \t\tpqsignal(SIGINT, handle_sigint);\t\t/* control-C => cancel */\n #ifdef USE_READLINE\n \t\tsettings.useReadline = 1;\n+ \t\t{\n+ \t\t\t/*\n+ \t\t\t * Set the application name, used for parsing .inputrc -- dz\n+ \t\t\t */\n+ \t\t\tchar *progname = strrchr(argv[0], SEP_CHAR);\n+ \t\t\trl_readline_name = (progname ? progname+1 : argv[0]);\n+ \t\t}\n #endif\n \t}\n #ifdef PSQL_ALWAYS_GET_PASSWORDS\n\n\n-- \nMassimo Dal Zotto\n\n+----------------------------------------------------------------------+\n| Massimo Dal Zotto email: [email protected] |\n| Via Marconi, 141 phone: ++39-0461534251 |\n| 38057 Pergine Valsugana (TN) www: http://www.cs.unitn.it/~dz/ |\n| Italy pgp: finger [email protected] |\n+----------------------------------------------------------------------+\n", "msg_date": "Mon, 14 Jun 1999 22:55:03 +0200 (MET DST)", "msg_from": "Massimo Dal Zotto <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] new patch" }, { "msg_contents": "Applied.\n\n[Charset ISO-8859-1 unsupported, filtering to ASCII...]\n> > \n> > Massimo Dal Zotto <[email protected]> writes:\n> > > + \t\t\tchar *progname = rindex(argv[0], SEP_CHAR);\n> > > + \t\t\trl_readline_name = (progname ? progname : argv[0]);\n> > \n> > 1. rindex => strrchr, please.\n> > \n> > 2. Shouldn't that be (progname ? progname+1 : argv[0]) ?\n> > \n> > \t\t\tregards, tom lane\n> > \n> \n> Yes, I had already added the +1, but for same obscure reason it worked\n> also with the original code, so I didn't notice the bug in my first test.\n> I suspect that the readline uses only the basename of the argument.\n> Ok also for strrchr, this is the final patch:\n> \n> *** src/bin/psql/psql.c.orig\tSat Jun 5 09:00:38 1999\n> --- src/bin/psql/psql.c\tMon Jun 14 09:04:15 1999\n> ***************\n> *** 2875,2880 ****\n> --- 2875,2887 ----\n> \t\tpqsignal(SIGINT, handle_sigint);\t\t/* control-C => cancel */\n> #ifdef USE_READLINE\n> \t\tsettings.useReadline = 1;\n> + \t\t{\n> + \t\t\t/*\n> + \t\t\t * Set the application name, used for parsing .inputrc -- dz\n> + \t\t\t */\n> + \t\t\tchar *progname = strrchr(argv[0], SEP_CHAR);\n> + \t\t\trl_readline_name = (progname ? progname+1 : argv[0]);\n> + \t\t}\n> #endif\n> \t}\n> #ifdef PSQL_ALWAYS_GET_PASSWORDS\n> \n> \n> -- \n> Massimo Dal Zotto\n> \n> +----------------------------------------------------------------------+\n> | Massimo Dal Zotto email: [email protected] |\n> | Via Marconi, 141 phone: ++39-0461534251 |\n> | 38057 Pergine Valsugana (TN) www: http://www.cs.unitn.it/~dz/ |\n> | Italy pgp: finger [email protected] |\n> +----------------------------------------------------------------------+\n> \n\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Mon, 19 Jul 1999 12:45:51 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] new patch" } ]
[ { "msg_contents": "\nI just packaged up a Beta5 that represents what is planned for release\ntomorrow. I'll bundle up one last bundle tomorrow night *is* anything\n*really* has to get added before we release, but, otherwise, consider that\none what is to be released at 15:30EST tomorrow...\n\nMarc G. Fournier ICQ#7615664 IRC Nick: Scrappy\nSystems Administrator @ hub.org \nprimary: [email protected] secondary: scrappy@{freebsd|postgresql}.org \n\n", "msg_date": "Sun, 13 Jun 1999 22:08:41 -0300 (ADT)", "msg_from": "The Hermit Hacker <[email protected]>", "msg_from_op": true, "msg_subject": "Beta5 == Tomorrow's Release .." }, { "msg_contents": "> \n> I just packaged up a Beta5 that represents what is planned for release\n> tomorrow. I'll bundle up one last bundle tomorrow night *is* anything\n> *really* has to get added before we release, but, otherwise, consider that\n> one what is to be released at 15:30EST tomorrow...\n> \n\nGo.\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Sun, 13 Jun 1999 21:29:57 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Beta5 == Tomorrow's Release .." }, { "msg_contents": "\nOn 14-Jun-99 The Hermit Hacker wrote:\n> \n> I just packaged up a Beta5 that represents what is planned for release\n> tomorrow. I'll bundle up one last bundle tomorrow night *is* anything\n> *really* has to get added before we release, but, otherwise, consider that\n> one what is to be released at 15:30EST tomorrow...\n\nI still have a ways to go on the web stuff but I'm hoping to be there\nin the next couple of days.\n\nVince.\n-- \n==========================================================================\nVince Vielhaber -- KA8CSH email: [email protected] flame-mail: /dev/null\n # include <std/disclaimers.h> TEAM-OS2\n Online Campground Directory http://www.camping-usa.com\n Online Giftshop Superstore http://www.cloudninegifts.com\n==========================================================================\n\n\n", "msg_date": "Sun, 13 Jun 1999 21:47:45 -0400 (EDT)", "msg_from": "Vince Vielhaber <[email protected]>", "msg_from_op": false, "msg_subject": "RE: [HACKERS] Beta5 == Tomorrow's Release .." }, { "msg_contents": "> I just packaged up a Beta5 that represents what is planned for release\n> tomorrow. I'll bundle up one last bundle tomorrow night *is* anything\n> *really* has to get added before we release, but, otherwise, consider that\n> one what is to be released at 15:30EST tomorrow...\n\nI'm working on the User's Guide right now. I'm half-way through, but\nhave some formatting bugs in Applix to work through. I should get\nthere by tomorrow morning, but would like a commit of user.ps.gz,\nHISTORY, and INSTALL to be a prerequisite for the official release.\n\nTIA\n\n - Thomas\n\n-- \nThomas Lockhart\t\t\t\[email protected]\nSouth Pasadena, California\n", "msg_date": "Mon, 14 Jun 1999 02:05:06 +0000", "msg_from": "Thomas Lockhart <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Beta5 == Tomorrow's Release .." }, { "msg_contents": "On Mon, 14 Jun 1999, Thomas Lockhart wrote:\n\n> > I just packaged up a Beta5 that represents what is planned for release\n> > tomorrow. I'll bundle up one last bundle tomorrow night *is* anything\n> > *really* has to get added before we release, but, otherwise, consider that\n> > one what is to be released at 15:30EST tomorrow...\n> \n> I'm working on the User's Guide right now. I'm half-way through, but\n> have some formatting bugs in Applix to work through. I should get\n> there by tomorrow morning, but would like a commit of user.ps.gz,\n> HISTORY, and INSTALL to be a prerequisite for the official release.\n\nNo probs...pop me off a quick email when you are ready with this, and I'll\nre-package and release ...\n\nAnyone with source code changes, make sure you wear steel gloves, mind you\n*grin*\n\nMarc G. Fournier ICQ#7615664 IRC Nick: Scrappy\nSystems Administrator @ hub.org \nprimary: [email protected] secondary: scrappy@{freebsd|postgresql}.org \n\n", "msg_date": "Mon, 14 Jun 1999 00:41:12 -0300 (ADT)", "msg_from": "The Hermit Hacker <[email protected]>", "msg_from_op": true, "msg_subject": "Re: [HACKERS] Beta5 == Tomorrow's Release .." } ]
[ { "msg_contents": "The conversion tables are declared as \"char []\" not \"unsigned char[]\",\nwhile they are passed to \"unsigned char*\".\nIs there any reason to prevent declaring as \"unsigned char[]\"?\n\nA patch is following my signature.\n--------\nTomoaki Nishiyama\n e-mail:[email protected]\n Department of Biological Sciences,\nGraduate School of Science, The University of Tokyo\n\n--- conv.orig\tThu May 27 16:00:39 1999\n+++ conv.c\tMon Jun 14 16:34:03 1999\n@@ -724,7 +724,7 @@\n static void\n iso2mic(unsigned char *l, unsigned char *p, int len)\n {\n-\tstatic char iso2koi[] = {\n+\tstatic unsigned char iso2koi[] = {\n \t\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n \t\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n \t\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n@@ -750,7 +750,7 @@\n static void\n mic2iso(unsigned char *mic, unsigned char *p, int len)\n {\n-\tstatic char koi2iso[] = {\n+\tstatic unsigned char koi2iso[] = {\n \t\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n \t\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n \t\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n@@ -776,7 +776,7 @@\n static void\n win2mic(unsigned char *l, unsigned char *p, int len)\n {\n-\tstatic char win2koi[] = {\n+\tstatic unsigned char win2koi[] = {\n \t\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n \t\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n \t\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n@@ -802,7 +802,7 @@\n static void\n mic2win(unsigned char *mic, unsigned char *p, int len)\n {\n-\tstatic char koi2win[] = {\n+\tstatic unsigned char koi2win[] = {\n \t\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n \t\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n \t\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n@@ -828,7 +828,7 @@\n static void\n alt2mic(unsigned char *l, unsigned char *p, int len)\n {\n-\tstatic char alt2koi[] = {\n+\tstatic unsigned char alt2koi[] = {\n \t\t0xe1, 0xe2, 0xf7, 0xe7, 0xe4, 0xe5, 0xf6, 0xfa,\n \t\t0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0xf0,\n \t\t0xf2, 0xf3, 0xf4, 0xf5, 0xe6, 0xe8, 0xe3, 0xfe,\n@@ -854,7 +854,7 @@\n static void\n mic2alt(unsigned char *mic, unsigned char *p, int len)\n {\n-\tstatic char koi2alt[] = {\n+\tstatic unsigned char koi2alt[] = {\n \t\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n \t\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n \t\t0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n", "msg_date": "Mon, 14 Jun 1999 16:50:59 +0900", "msg_from": "Tomoaki NISHIYAMA <[email protected]>", "msg_from_op": true, "msg_subject": "libpq/conv.c" }, { "msg_contents": ">The conversion tables are declared as \"char []\" not \"unsigned char[]\",\n>while they are passed to \"unsigned char*\".\n>Is there any reason to prevent declaring as \"unsigned char[]\"?\n\nSounds resonable, but it's too late for 6.5. I'm going to take care of \nit for 6.5.1...\n--\nTatsuo Ishii\n", "msg_date": "Mon, 14 Jun 1999 17:19:42 +0900", "msg_from": "Tatsuo Ishii <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] libpq/conv.c " }, { "msg_contents": "> The conversion tables are declared as \"char []\" not \"unsigned char[]\",\n> while they are passed to \"unsigned char*\".\n> Is there any reason to prevent declaring as \"unsigned char[]\"?\n> \n> A patch is following my signature.\n\nDone.\n---\nTatsuo Ishii\n", "msg_date": "Mon, 12 Jul 1999 07:58:04 +0900", "msg_from": "Tatsuo Ishii <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] libpq/conv.c " } ]
[ { "msg_contents": "\n> The change was:\n> #if defined(__mc68000__)\n> to:\n> #if defined(__mc68000__) && defined(__linux__)\n> in s_lock.h.\n\n> I think the change I made in s_lock.h should make both NetBSD/m68k\n> and Linux/m68k happy. Can some Linux/m68k folks confirm it?\n\nSure, this can't cause any trouble on Linux/m68k.\n\nRoman\n", "msg_date": "Mon, 14 Jun 1999 10:13:01 +0200 (MET DST)", "msg_from": "Roman Hodek <[email protected]>", "msg_from_op": true, "msg_subject": "Re: Patch for m68k architecture (fwd)" } ]
[ { "msg_contents": "It looks like the counts for records found are happening, but no\ncontent is returned.\n", "msg_date": "Mon, 14 Jun 1999 05:40:44 -0500", "msg_from": "\"David R. Favor\" <[email protected]>", "msg_from_op": true, "msg_subject": "Postgres mailing list search engine is down" } ]
[ { "msg_contents": "Hi,\nDose anyone know if www.flex.ro is still around? I've tried several time\nto gain access\nto the server with no luck. Sorry for the off topic traffic.\nWayne\n\n", "msg_date": "Mon, 14 Jun 1999 07:33:23 -0400", "msg_from": "Wayne <[email protected]>", "msg_from_op": true, "msg_subject": "off-topic: pgaccess?" } ]
[ { "msg_contents": "None of the shared libraries appear to get generated properly.\n\nAs a result lib*.a end up in /usr/local/pgsql/lib, so the regression\ntests can't run.\n\nMailing list archive search is still down, so I can't check if this\nhas been fixed.\n\nI'm using egcs (gcc 2.95) because the native xlC compiler has other problems.\n\nSuggestions?\n", "msg_date": "Mon, 14 Jun 1999 07:38:54 -0500", "msg_from": "\"David R. Favor\" <[email protected]>", "msg_from_op": true, "msg_subject": "AIX 432 shared library problem" }, { "msg_contents": "\nJust checked...what is wrong with the archive search? It gave me back\nresults when I just tried it seconds ago...\n\nOn Mon, 14 Jun 1999, David R. Favor wrote:\n\n> None of the shared libraries appear to get generated properly.\n> \n> As a result lib*.a end up in /usr/local/pgsql/lib, so the regression\n> tests can't run.\n> \n> Mailing list archive search is still down, so I can't check if this\n> has been fixed.\n> \n> I'm using egcs (gcc 2.95) because the native xlC compiler has other problems.\n> \n> Suggestions?\n> \n\nMarc G. Fournier ICQ#7615664 IRC Nick: Scrappy\nSystems Administrator @ hub.org \nprimary: [email protected] secondary: scrappy@{freebsd|postgresql}.org \n\n", "msg_date": "Mon, 14 Jun 1999 14:08:15 -0300 (ADT)", "msg_from": "The Hermit Hacker <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] AIX 432 shared library problem" } ]
[ { "msg_contents": "\n> I'm using egcs (gcc 2.95) because the native xlC compiler has other\n> problems.\n> \nI have not been successful with shared lib support using any version of gcc.\nThe native IBM compiler works. See the FAQ_AIX.\nIf you have readline and others in /usr/local use: (change /usr/postgres to\nyour preference)\n\nfix inttypes.h in /usr/include/ (comment out int8 to int64 typedefs\nIBM says that these are BSD, but obviously no other unix defines those (not\neven BSD))\n\ncd src\n./configure --prefix=/usr/postgres --without-CXX \\\n\t--with-includes=/usr/local/include --with-libraries=/usr/local/lib\ngmake all\ngmake install\n\ncd pl/plpgsql/src\n/usr/postgres/src/backend/port/aix/mkldexport.sh \\\n\tlibplpgsql.a /usr/postgres/lib > libplpgsql.exp\nld -H512 -bM:SRE -bI:/usr/postgres/lib/postgres.imp \\\n\t-bE:libplpgsql.exp -o libplpgsql.so libplpgsql.a -lc\ncp libplpgsql.so /usr/postgres/lib/plpgsql.so\n\nAndreas\n\n", "msg_date": "Mon, 14 Jun 1999 15:32:12 +0200", "msg_from": "ZEUGSWETTER Andreas IZ5 <[email protected]>", "msg_from_op": true, "msg_subject": "AW: [HACKERS] AIX 432 shared library problem" } ]
[ { "msg_contents": "Is anyone willing to tell me what this means:\n\nNOTICE: SIAssignBackendId: discarding tag 2147449930\nConnection to database 'db_domain' failed.\nFATAL 1: Backend cache invalidation initialization failed\n\n\nAnd how do I fix this:\n\nNOTICE: Index tbl_mail_archive_pkey: NUMBER OF INDEX' TUPLES (2238849) IS\nNOT THE SAME AS HEAP' (2241252)\n\nThanks,\n\nTim Perdue\nPHPBuilder.com / GotoCity.com / Geocrawler.com\n\n\n\n", "msg_date": "Mon, 14 Jun 1999 08:36:43 -0500", "msg_from": "\"Tim Perdue, Geocrawler.com\" <[email protected]>", "msg_from_op": true, "msg_subject": "Backend Cache Invalidation" } ]
[ { "msg_contents": "Is anyone willing to tell me what this means:\n\nNOTICE: SIAssignBackendId: discarding tag 2147449930\nConnection to database 'db_domain' failed.\nFATAL 1: Backend cache invalidation initialization failed\n\n\nAnd how do I fix this:\n\nNOTICE: Index tbl_mail_archive_pkey: NUMBER OF INDEX' TUPLES (2238849) IS\nNOT THE SAME AS HEAP' (2241252)\n\nThanks,\n\nTim Perdue\nPHPBuilder.com / GotoCity.com / Geocrawler.com\n\n\n\n\n\n", "msg_date": "Mon, 14 Jun 1999 08:38:56 -0500", "msg_from": "\"Tim Perdue\" <[email protected]>", "msg_from_op": true, "msg_subject": "Backend Cache Invalidation" }, { "msg_contents": "\"Tim Perdue\" <[email protected]> writes:\n> Is anyone willing to tell me what this means:\n> NOTICE: SIAssignBackendId: discarding tag 2147449930\n> Connection to database 'db_domain' failed.\n> FATAL 1: Backend cache invalidation initialization failed\n\nThis is coming from SIAssignBackendId() in\nbackend/storage/ipc/sinvaladt.c. Maybe I'm missing something,\nbut the logic in that routine (as of current CVS sources)\nsure looks wrong.\n\nLines 130-138 look like they are checking for no-longer-used\nprocState entries that can be recycled, but (a) the test is\nnot obviously correct (and I'm always suspicious of code that's\nboth this complicated and comment-free); (b) lines 151-158\nreject any entry that doesn't have tag InvalidBackendTag or\nbackendTag, meaning that lines 130-138 could be diked out with\nno loss of functionality.\n\nSomething is wrong here...\n\n\t\t\tregards, tom lane\n", "msg_date": "Mon, 14 Jun 1999 10:40:26 -0400", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Backend Cache Invalidation " } ]
[ { "msg_contents": "As far as I can tell, I'm done with the docs for this release. Please\nlet me know if you notice any errors or omissions, and I'll be\navailable to repair it about 8 hours hence (~1:00 UTC).\n\n - Thomas\n\n-- \nThomas Lockhart\t\t\t\[email protected]\nSouth Pasadena, California\n", "msg_date": "Mon, 14 Jun 1999 16:55:35 +0000", "msg_from": "Thomas Lockhart <[email protected]>", "msg_from_op": true, "msg_subject": "Docs done?" }, { "msg_contents": "Jan didn't seem to be in a hurry to fix that $1 => $portname typo\nin regress.sh, but I thought it was worth fixing before 6.5 release.\nTested and committed. (If you want to break my fingers, you know where\nto find me.)\n\nI think we're ready to release...\n\n\t\t\tregards, tom lane\n", "msg_date": "Mon, 14 Jun 1999 13:51:20 -0400", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Docs done? " }, { "msg_contents": "On Mon, 14 Jun 1999, Tom Lane wrote:\n\n> Jan didn't seem to be in a hurry to fix that $1 => $portname typo\n> in regress.sh, but I thought it was worth fixing before 6.5 release.\n> Tested and committed. (If you want to break my fingers, you know where\n> to find me.)\n\nThere was something you brought up that seemed to concern you about the\nrelease...a section of code that you felt was wrong? Are you fine with\nthat for the release?\n\nMarc G. Fournier ICQ#7615664 IRC Nick: Scrappy\nSystems Administrator @ hub.org \nprimary: [email protected] secondary: scrappy@{freebsd|postgresql}.org \n\n", "msg_date": "Mon, 14 Jun 1999 16:00:47 -0300 (ADT)", "msg_from": "The Hermit Hacker <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Docs done? " }, { "msg_contents": "The Hermit Hacker <[email protected]> writes:\n> There was something you brought up that seemed to concern you about the\n> release...a section of code that you felt was wrong? Are you fine with\n> that for the release?\n\nI am worried about that init code in sinvaladt.c, but I am not saying\nit's broken; maybe I just don't understand it. In any case I see no\nreason to hold up the release for it. There'll always be another bug ;-)\n\n\t\t\tregards, tom lane\n", "msg_date": "Mon, 14 Jun 1999 16:57:53 -0400", "msg_from": "Tom Lane <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Docs done? " }, { "msg_contents": "On Mon, 14 Jun 1999, Tom Lane wrote:\n\n>Jan didn't seem to be in a hurry to fix that $1 => $portname typo\n>in regress.sh, but I thought it was worth fixing before 6.5 release.\n>Tested and committed. (If you want to break my fingers, you know where\n>to find me.)\n\nThis may break the customized expected results for alpha/Digital Unix I\nsent in my last patch. I thought that the change was intentional, and\nchanged the name of the files.\n\nI am cvsup'ing the latest source right now, and will do a test ASAP.\n\nCheers,\n\n\tPedro.\n\n--\n-------------------------------------------------------------------\nPedro Jos� Lobo Perea Tel: +34 91 336 78 19\nCentro de C�lculo Fax: +34 91 331 92 29\nE.U.I.T. Telecomunicaci�n e-mail: [email protected]\nUniversidad Polit�cnica de Madrid\nCtra. de Valencia, Km. 7 E-28031 Madrid - Espa�a / Spain\n\n", "msg_date": "Tue, 15 Jun 1999 10:29:57 +0200 (MET DST)", "msg_from": "\"Pedro J. Lobo\" <[email protected]>", "msg_from_op": false, "msg_subject": "Re: [HACKERS] Docs done? " } ]
[ { "msg_contents": "Hi,\n\nI have added the pg_options variable to the SET command. It can be executed\nonly by the superuser and is intended mainly as a debugging aid.\nI have also moved the disableFsync variable to the pg_options array, so it\ncan now be set in the pg_options file or interactively with the commands:\n\n SET pg_options to 'nofsinc=1';\n SET pg_options to 'nofsinc=0';\n\nThere are obviously also the SHOW and RESET pg_options commands.\n\n\n*** src/backend/commands/variable.c.orig\tWed May 26 09:02:48 1999\n--- src/backend/commands/variable.c\tMon Jun 14 20:45:58 1999\n***************\n*** 15,20 ****\n--- 15,22 ----\n #include \"commands/variable.h\"\n #include \"utils/builtins.h\"\n #include \"optimizer/internal.h\"\n+ #include \"utils/trace.h\"\n+ #include \"catalog/pg_shadow.h\"\n #include \"access/xact.h\"\n #ifdef MULTIBYTE\n #include \"mb/pg_wchar.h\"\n***************\n*** 581,589 ****\n \tExecutorLimit(ALL_TUPLES);\n \treturn (TRUE);\n }\n- \n #endif\n \n /*-----------------------------------------------------------------------*/\n \n struct VariableParsers\n--- 583,622 ----\n \tExecutorLimit(ALL_TUPLES);\n \treturn (TRUE);\n }\n #endif\n \n+ /*\n+ *\n+ * Pg_options\n+ *\n+ */\n+ static bool\n+ parse_pg_options(const char *value)\n+ {\n+ \tif (!superuser()) {\n+ \t\telog(ERROR, \"Only users with Postgres superuser can set pg_options\");\n+ \t}\n+ \tparse_options((char *) value, TRUE);\n+ \treturn (TRUE);\n+ }\n+ \n+ static bool\n+ show_pg_options(void)\n+ {\n+ \tshow_options();\n+ \treturn (TRUE);\n+ }\n+ \n+ static bool\n+ reset_pg_options(void)\n+ {\n+ \tif (!superuser()) {\n+ \t\telog(ERROR, \"Only users with Postgres superuser can set pg_options\");\n+ \t}\n+ \tread_pg_options(0);\n+ \treturn (TRUE);\n+ }\n+ \n /*-----------------------------------------------------------------------*/\n \n struct VariableParsers\n***************\n*** 629,634 ****\n--- 662,670 ----\n \t\t\"query_limit\", parse_query_limit, show_query_limit, reset_query_limit\n \t},\n #endif\n+ \t{\n+ \t\t\"pg_options\", parse_pg_options, show_pg_options, reset_pg_options\n+ \t},\n \t{\n \t\tNULL, NULL, NULL, NULL\n \t}\n*** src/backend/utils/init/globals.c.orig\tWed May 26 09:05:49 1999\n--- src/backend/utils/init/globals.c\tMon Jun 14 20:56:34 1999\n***************\n*** 82,88 ****\n \t\t\t\t\t\t\t\t\t\t\t\t * malloc? XXX */\n char\t\tFloatFormat[20] = \"%f\";\n \n! bool\t\tdisableFsync = false;\n bool\t\tallowSystemTableMods = false;\n int\t\t\tSortMem = 512;\n \n--- 82,88 ----\n \t\t\t\t\t\t\t\t\t\t\t\t * malloc? XXX */\n char\t\tFloatFormat[20] = \"%f\";\n \n! /* bool\t\tdisableFsync = false; */\n bool\t\tallowSystemTableMods = false;\n int\t\t\tSortMem = 512;\n \n*** src/include/miscadmin.h.orig\tWed May 26 09:06:39 1999\n--- src/include/miscadmin.h\tMon Jun 14 20:01:27 1999\n***************\n*** 22,27 ****\n--- 22,29 ----\n #ifndef MISCADMIN_H\n #define MISCADMIN_H\n \n+ #include \"utils/trace.h\"\n+ \n /*****************************************************************************\n *\t globals.h --\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t *\n *****************************************************************************/\n***************\n*** 93,99 ****\n extern char FloatFormat[];\n extern char DateFormat[];\n \n! extern bool disableFsync;\n extern bool allowSystemTableMods;\n extern int\tSortMem;\n \n--- 95,103 ----\n extern char FloatFormat[];\n extern char DateFormat[];\n \n! /* extern bool disableFsync; */\n! #define disableFsync\tpg_options[OPT_NOFSYNC]\n! \n extern bool allowSystemTableMods;\n extern int\tSortMem;\n \n*** src/bin/psql/psqlHelp.h.orig\tFri Jun 4 09:00:17 1999\n--- src/bin/psql/psqlHelp.h\tMon Jun 14 20:17:45 1999\n***************\n*** 297,303 ****\n \t\t\"set run-time environment back to default\",\n \t\"\\\n \\tRESET DATESTYLE|COST_HEAP|COST_INDEX|GEQO|KSQO|QUERY_LIMIT|\\n\\\n! TIMEZONE|XACTISOLEVEL|CLIENT_ENCODING|SERVER_ENCODING\"},\n \t{\"revoke\",\n \t\t\"revoke access control from a user or group\",\n \t\"\\\n--- 297,303 ----\n \t\t\"set run-time environment back to default\",\n \t\"\\\n \\tRESET DATESTYLE|COST_HEAP|COST_INDEX|GEQO|KSQO|QUERY_LIMIT|\\n\\\n! \\t TIMEZONE|XACTISOLEVEL|CLIENT_ENCODING|SERVER_ENCODING|PG_OPTIONS\"},\n \t{\"revoke\",\n \t\t\"revoke access control from a user or group\",\n \t\"\\\n***************\n*** 331,336 ****\n--- 331,337 ----\n \\tSET KSQO TO 'ON'|'OFF'\\n\\\n \\tSET QUERY_LIMIT TO #\\n\\\n \\tSET TIMEZONE TO 'value'\\n\\\n+ \\tSET PG_OPTIONS TO 'value'\\n\\\n \\tSET TRANSACTION ISOLATION LEVEL 'SERIALIZABLE'|'READ COMMITTED'\\n\\\n \\tSET CLIENT_ENCODING|NAMES TO 'EUC_JP'|'SJIS'|'EUC_CN'|'EUC_KR'|'EUC_TW'|\\n\\\n \\t 'BIG5'|'MULE_INTERNAL'|'LATIN1'|'LATIN2'|'LATIN3'|'LATIN4'|'LATIN5'|\\n\\\n***************\n*** 342,348 ****\n \t\t\"show current run-time environment\",\n \t\"\\\n \\tSHOW DATESTYLE|COST_HEAP|COST_INDEX|GEQO|KSQO|QUERY_LIMIT|\\n\\\n! TIMEZONE|XACTISOLEVEL|CLIENT_ENCODING|SERVER_ENCODING\"},\n \t{\"unlisten\",\n \t\t\"stop listening for notification on a condition name\",\n \t\"\\\n--- 343,349 ----\n \t\t\"show current run-time environment\",\n \t\"\\\n \\tSHOW DATESTYLE|COST_HEAP|COST_INDEX|GEQO|KSQO|QUERY_LIMIT|\\n\\\n! \\t TIMEZONE|XACTISOLEVEL|CLIENT_ENCODING|SERVER_ENCODING|PG_OPTIONS\"},\n \t{\"unlisten\",\n \t\t\"stop listening for notification on a condition name\",\n \t\"\\\n*** src/include/utils/trace.h.orig\tSat Jun 12 22:45:23 1999\n--- src/include/utils/trace.h\tMon Jun 14 20:50:56 1999\n***************\n*** 30,38 ****\n--- 30,39 ----\n extern int\ttprintf(int flag, const char *fmt,...);\n extern int\teprintf(const char *fmt,...);\n extern void write_syslog(int level, char *line);\n+ extern void show_options(void);\n extern void parse_options(char *str, bool secure);\n extern void read_pg_options(SIGNAL_ARGS);\n \n /*\n * Trace options, used as index into pg_options.\n * Must match the constants in pg_options[].\n***************\n*** 61,66 ****\n--- 61,67 ----\n \tTRACE_LOCKRELATION,\n \tOPT_LOCKREADPRIORITY,\t\t/* lock priority, see lock.c */\n \tOPT_DEADLOCKTIMEOUT,\t\t/* deadlock timeout, see proc.c */\n+ \tOPT_NOFSYNC,\t\t\t\t/* turn fsync off */\n \tOPT_SYSLOG,\t\t\t\t\t/* use syslog for error messages */\n \tOPT_HOSTLOOKUP,\t\t\t\t/* enable hostname lookup in ps_status */\n \tOPT_SHOWPORTNUMBER,\t\t\t/* show port number in ps_status */\n*** src/backend/utils/misc/trace.c.orig\tSat Jun 12 22:47:32 1999\n--- src/backend/utils/misc/trace.c\tMon Jun 14 20:50:06 1999\n***************\n*** 70,75 ****\n--- 70,76 ----\n \t\"lock_debug_relid\",\n \t\"lock_read_priority\",\t\t/* lock priority, see lock.c */\n \t\"deadlock_timeout\",\t\t\t/* deadlock timeout, see proc.c */\n+ \t\"nofsync\",\t\t\t\t\t/* turn fsync off */\n \t\"syslog\",\t\t\t\t\t/* use syslog for error messages */\n \t\"hostlookup\",\t\t\t\t/* enable hostname lookup in ps_status */\n \t\"showportnumber\",\t\t\t/* show port number in ps_status */\n***************\n*** 407,412 ****\n--- 407,422 ----\n \tclose(fd);\n }\n \n+ void\n+ show_options(void)\n+ {\n+ \tint\t\ti;\n+ \n+ \tfor (i=0; i<NUM_PG_OPTIONS; i++) {\n+ \t\telog(NOTICE, \"%s=%d\", opt_names[i], pg_options[i]);\n+ \t}\n+ }\n+ \n /*\n * Local Variables:\n * tab-width: 4\n*** src/backend/bootstrap/bootstrap.c.orig\tWed May 26 09:02:25 1999\n--- src/backend/bootstrap/bootstrap.c\tMon Jun 14 20:06:00 1999\n***************\n*** 182,188 ****\n Form_pg_attribute attrtypes[MAXATTR];\t/* points to attribute info */\n static char *values[MAXATTR];\t/* cooresponding attribute values */\n int\t\t\tnumattr;\t\t\t/* number of attributes for cur. rel */\n! extern bool disableFsync;\t\t/* do not fsync the database */\n \n int\t\t\tDebugMode;\n static GlobalMemory nogc = (GlobalMemory) NULL; /* special no-gc mem\n--- 182,188 ----\n Form_pg_attribute attrtypes[MAXATTR];\t/* points to attribute info */\n static char *values[MAXATTR];\t/* cooresponding attribute values */\n int\t\t\tnumattr;\t\t\t/* number of attributes for cur. rel */\n! /* extern bool disableFsync; */\t\t/* do not fsync the database */\n \n int\t\t\tDebugMode;\n static GlobalMemory nogc = (GlobalMemory) NULL; /* special no-gc mem\n\n\n-- \nMassimo Dal Zotto\n\n+----------------------------------------------------------------------+\n| Massimo Dal Zotto email: [email protected] |\n| Via Marconi, 141 phone: ++39-0461534251 |\n| 38057 Pergine Valsugana (TN) www: http://www.cs.unitn.it/~dz/ |\n| Italy pgp: finger [email protected] |\n+----------------------------------------------------------------------+\n", "msg_date": "Mon, 14 Jun 1999 22:32:50 +0200 (MEST)", "msg_from": "Massimo Dal Zotto <[email protected]>", "msg_from_op": true, "msg_subject": "new patches..." }, { "msg_contents": "\nApplied. Thanks.\n\n\n> Hi,\n> \n> I have added the pg_options variable to the SET command. It can be executed\n> only by the superuser and is intended mainly as a debugging aid.\n> I have also moved the disableFsync variable to the pg_options array, so it\n> can now be set in the pg_options file or interactively with the commands:\n> \n> SET pg_options to 'nofsinc=1';\n> SET pg_options to 'nofsinc=0';\n> \n> There are obviously also the SHOW and RESET pg_options commands.\n> \n> \n> *** src/backend/commands/variable.c.orig\tWed May 26 09:02:48 1999\n> --- src/backend/commands/variable.c\tMon Jun 14 20:45:58 1999\n> ***************\n> *** 15,20 ****\n> --- 15,22 ----\n> #include \"commands/variable.h\"\n> #include \"utils/builtins.h\"\n> #include \"optimizer/internal.h\"\n> + #include \"utils/trace.h\"\n> + #include \"catalog/pg_shadow.h\"\n> #include \"access/xact.h\"\n> #ifdef MULTIBYTE\n> #include \"mb/pg_wchar.h\"\n> ***************\n> *** 581,589 ****\n> \tExecutorLimit(ALL_TUPLES);\n> \treturn (TRUE);\n> }\n> - \n> #endif\n> \n> /*-----------------------------------------------------------------------*/\n> \n> struct VariableParsers\n> --- 583,622 ----\n> \tExecutorLimit(ALL_TUPLES);\n> \treturn (TRUE);\n> }\n> #endif\n> \n> + /*\n> + *\n> + * Pg_options\n> + *\n> + */\n> + static bool\n> + parse_pg_options(const char *value)\n> + {\n> + \tif (!superuser()) {\n> + \t\telog(ERROR, \"Only users with Postgres superuser can set pg_options\");\n> + \t}\n> + \tparse_options((char *) value, TRUE);\n> + \treturn (TRUE);\n> + }\n> + \n> + static bool\n> + show_pg_options(void)\n> + {\n> + \tshow_options();\n> + \treturn (TRUE);\n> + }\n> + \n> + static bool\n> + reset_pg_options(void)\n> + {\n> + \tif (!superuser()) {\n> + \t\telog(ERROR, \"Only users with Postgres superuser can set pg_options\");\n> + \t}\n> + \tread_pg_options(0);\n> + \treturn (TRUE);\n> + }\n> + \n> /*-----------------------------------------------------------------------*/\n> \n> struct VariableParsers\n> ***************\n> *** 629,634 ****\n> --- 662,670 ----\n> \t\t\"query_limit\", parse_query_limit, show_query_limit, reset_query_limit\n> \t},\n> #endif\n> + \t{\n> + \t\t\"pg_options\", parse_pg_options, show_pg_options, reset_pg_options\n> + \t},\n> \t{\n> \t\tNULL, NULL, NULL, NULL\n> \t}\n> *** src/backend/utils/init/globals.c.orig\tWed May 26 09:05:49 1999\n> --- src/backend/utils/init/globals.c\tMon Jun 14 20:56:34 1999\n> ***************\n> *** 82,88 ****\n> \t\t\t\t\t\t\t\t\t\t\t\t * malloc? XXX */\n> char\t\tFloatFormat[20] = \"%f\";\n> \n> ! bool\t\tdisableFsync = false;\n> bool\t\tallowSystemTableMods = false;\n> int\t\t\tSortMem = 512;\n> \n> --- 82,88 ----\n> \t\t\t\t\t\t\t\t\t\t\t\t * malloc? XXX */\n> char\t\tFloatFormat[20] = \"%f\";\n> \n> ! /* bool\t\tdisableFsync = false; */\n> bool\t\tallowSystemTableMods = false;\n> int\t\t\tSortMem = 512;\n> \n> *** src/include/miscadmin.h.orig\tWed May 26 09:06:39 1999\n> --- src/include/miscadmin.h\tMon Jun 14 20:01:27 1999\n> ***************\n> *** 22,27 ****\n> --- 22,29 ----\n> #ifndef MISCADMIN_H\n> #define MISCADMIN_H\n> \n> + #include \"utils/trace.h\"\n> + \n> /*****************************************************************************\n> *\t globals.h --\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t *\n> *****************************************************************************/\n> ***************\n> *** 93,99 ****\n> extern char FloatFormat[];\n> extern char DateFormat[];\n> \n> ! extern bool disableFsync;\n> extern bool allowSystemTableMods;\n> extern int\tSortMem;\n> \n> --- 95,103 ----\n> extern char FloatFormat[];\n> extern char DateFormat[];\n> \n> ! /* extern bool disableFsync; */\n> ! #define disableFsync\tpg_options[OPT_NOFSYNC]\n> ! \n> extern bool allowSystemTableMods;\n> extern int\tSortMem;\n> \n> *** src/bin/psql/psqlHelp.h.orig\tFri Jun 4 09:00:17 1999\n> --- src/bin/psql/psqlHelp.h\tMon Jun 14 20:17:45 1999\n> ***************\n> *** 297,303 ****\n> \t\t\"set run-time environment back to default\",\n> \t\"\\\n> \\tRESET DATESTYLE|COST_HEAP|COST_INDEX|GEQO|KSQO|QUERY_LIMIT|\\n\\\n> ! TIMEZONE|XACTISOLEVEL|CLIENT_ENCODING|SERVER_ENCODING\"},\n> \t{\"revoke\",\n> \t\t\"revoke access control from a user or group\",\n> \t\"\\\n> --- 297,303 ----\n> \t\t\"set run-time environment back to default\",\n> \t\"\\\n> \\tRESET DATESTYLE|COST_HEAP|COST_INDEX|GEQO|KSQO|QUERY_LIMIT|\\n\\\n> ! \\t TIMEZONE|XACTISOLEVEL|CLIENT_ENCODING|SERVER_ENCODING|PG_OPTIONS\"},\n> \t{\"revoke\",\n> \t\t\"revoke access control from a user or group\",\n> \t\"\\\n> ***************\n> *** 331,336 ****\n> --- 331,337 ----\n> \\tSET KSQO TO 'ON'|'OFF'\\n\\\n> \\tSET QUERY_LIMIT TO #\\n\\\n> \\tSET TIMEZONE TO 'value'\\n\\\n> + \\tSET PG_OPTIONS TO 'value'\\n\\\n> \\tSET TRANSACTION ISOLATION LEVEL 'SERIALIZABLE'|'READ COMMITTED'\\n\\\n> \\tSET CLIENT_ENCODING|NAMES TO 'EUC_JP'|'SJIS'|'EUC_CN'|'EUC_KR'|'EUC_TW'|\\n\\\n> \\t 'BIG5'|'MULE_INTERNAL'|'LATIN1'|'LATIN2'|'LATIN3'|'LATIN4'|'LATIN5'|\\n\\\n> ***************\n> *** 342,348 ****\n> \t\t\"show current run-time environment\",\n> \t\"\\\n> \\tSHOW DATESTYLE|COST_HEAP|COST_INDEX|GEQO|KSQO|QUERY_LIMIT|\\n\\\n> ! TIMEZONE|XACTISOLEVEL|CLIENT_ENCODING|SERVER_ENCODING\"},\n> \t{\"unlisten\",\n> \t\t\"stop listening for notification on a condition name\",\n> \t\"\\\n> --- 343,349 ----\n> \t\t\"show current run-time environment\",\n> \t\"\\\n> \\tSHOW DATESTYLE|COST_HEAP|COST_INDEX|GEQO|KSQO|QUERY_LIMIT|\\n\\\n> ! \\t TIMEZONE|XACTISOLEVEL|CLIENT_ENCODING|SERVER_ENCODING|PG_OPTIONS\"},\n> \t{\"unlisten\",\n> \t\t\"stop listening for notification on a condition name\",\n> \t\"\\\n> *** src/include/utils/trace.h.orig\tSat Jun 12 22:45:23 1999\n> --- src/include/utils/trace.h\tMon Jun 14 20:50:56 1999\n> ***************\n> *** 30,38 ****\n> --- 30,39 ----\n> extern int\ttprintf(int flag, const char *fmt,...);\n> extern int\teprintf(const char *fmt,...);\n> extern void write_syslog(int level, char *line);\n> + extern void show_options(void);\n> extern void parse_options(char *str, bool secure);\n> extern void read_pg_options(SIGNAL_ARGS);\n> \n> /*\n> * Trace options, used as index into pg_options.\n> * Must match the constants in pg_options[].\n> ***************\n> *** 61,66 ****\n> --- 61,67 ----\n> \tTRACE_LOCKRELATION,\n> \tOPT_LOCKREADPRIORITY,\t\t/* lock priority, see lock.c */\n> \tOPT_DEADLOCKTIMEOUT,\t\t/* deadlock timeout, see proc.c */\n> + \tOPT_NOFSYNC,\t\t\t\t/* turn fsync off */\n> \tOPT_SYSLOG,\t\t\t\t\t/* use syslog for error messages */\n> \tOPT_HOSTLOOKUP,\t\t\t\t/* enable hostname lookup in ps_status */\n> \tOPT_SHOWPORTNUMBER,\t\t\t/* show port number in ps_status */\n> *** src/backend/utils/misc/trace.c.orig\tSat Jun 12 22:47:32 1999\n> --- src/backend/utils/misc/trace.c\tMon Jun 14 20:50:06 1999\n> ***************\n> *** 70,75 ****\n> --- 70,76 ----\n> \t\"lock_debug_relid\",\n> \t\"lock_read_priority\",\t\t/* lock priority, see lock.c */\n> \t\"deadlock_timeout\",\t\t\t/* deadlock timeout, see proc.c */\n> + \t\"nofsync\",\t\t\t\t\t/* turn fsync off */\n> \t\"syslog\",\t\t\t\t\t/* use syslog for error messages */\n> \t\"hostlookup\",\t\t\t\t/* enable hostname lookup in ps_status */\n> \t\"showportnumber\",\t\t\t/* show port number in ps_status */\n> ***************\n> *** 407,412 ****\n> --- 407,422 ----\n> \tclose(fd);\n> }\n> \n> + void\n> + show_options(void)\n> + {\n> + \tint\t\ti;\n> + \n> + \tfor (i=0; i<NUM_PG_OPTIONS; i++) {\n> + \t\telog(NOTICE, \"%s=%d\", opt_names[i], pg_options[i]);\n> + \t}\n> + }\n> + \n> /*\n> * Local Variables:\n> * tab-width: 4\n> *** src/backend/bootstrap/bootstrap.c.orig\tWed May 26 09:02:25 1999\n> --- src/backend/bootstrap/bootstrap.c\tMon Jun 14 20:06:00 1999\n> ***************\n> *** 182,188 ****\n> Form_pg_attribute attrtypes[MAXATTR];\t/* points to attribute info */\n> static char *values[MAXATTR];\t/* cooresponding attribute values */\n> int\t\t\tnumattr;\t\t\t/* number of attributes for cur. rel */\n> ! extern bool disableFsync;\t\t/* do not fsync the database */\n> \n> int\t\t\tDebugMode;\n> static GlobalMemory nogc = (GlobalMemory) NULL; /* special no-gc mem\n> --- 182,188 ----\n> Form_pg_attribute attrtypes[MAXATTR];\t/* points to attribute info */\n> static char *values[MAXATTR];\t/* cooresponding attribute values */\n> int\t\t\tnumattr;\t\t\t/* number of attributes for cur. rel */\n> ! /* extern bool disableFsync; */\t\t/* do not fsync the database */\n> \n> int\t\t\tDebugMode;\n> static GlobalMemory nogc = (GlobalMemory) NULL; /* special no-gc mem\n> \n> \n> -- \n> Massimo Dal Zotto\n> \n> +----------------------------------------------------------------------+\n> | Massimo Dal Zotto email: [email protected] |\n> | Via Marconi, 141 phone: ++39-0461534251 |\n> | 38057 Pergine Valsugana (TN) www: http://www.cs.unitn.it/~dz/ |\n> | Italy pgp: finger [email protected] |\n> +----------------------------------------------------------------------+\n> \n\n\n-- \n Bruce Momjian | http://www.op.net/~candle\n [email protected] | (610) 853-3000\n + If your life is a hard drive, | 830 Blythe Avenue\n + Christ can be your backup. | Drexel Hill, Pennsylvania 19026\n", "msg_date": "Mon, 27 Sep 1999 16:32:33 -0400 (EDT)", "msg_from": "Bruce Momjian <[email protected]>", "msg_from_op": false, "msg_subject": "Re: new patches..." } ]
[ { "msg_contents": "I'm trying to write some trigger functions that fill in the values of\nsome fields with functions of other fields. Everything is fine,\nexcept that I am unsure how to create Datum representations of the new\nfields. Presumably I need to do something like the following:\n\n\tPoint f (int32 i, int32 j);\n\tint32 i, j;\n\tPoint p;\n\tDatum d;\n\tp = f (i, j);\t\t\t/* ??? - how to create/access Point? */\n\td = PointGetDatum (&p);\t\t/* ??? - how to convert to Datum? */\n\trettuple = SPI_modifytuple (rel, rettuple, 1, attnum, &d, NULL);\n\nBut I don't know how to get at the internal representations I need.\nIn this case I need to create a Point data type. I can use the\nstructure in geo_decls.h fine, but is that the \"correct\" procedure?\nOnce I have a Point structure created, how do I convert that to a\nDatum (or Datum*) for an argument to SPI_modifytuple?\n\nSo that I haven't missed any details, I'm including the full version\nof my trigger below.\n\nThanks for your help.\n\nCheers,\nBrook\n\n===========================================================================\n\n/*\n * plot_corners.c\n *\n * trigger for keeping corners in sync with distances\n */\n\n\n#include \"executor/spi.h\"\t/* this is what you need to work with SPI */\n#include \"commands/trigger.h\"\t/* and triggers */\n#include \"utils/geo_decls.h\"\t/* this is needed for Point type */\n\nHeapTuple plot_corners (void);\n\n\t\t\t\t/* coordinants of corners */\ntypedef struct _corners\n{\n Point LL;\n Point LR;\n Point UL;\n Point UR;\n} Corners;\n\n\t\t\t\t/* calculate corner positions */\nstatic Corners calc_corners (int n_distances, const int32 * d);\n\n#define N_DISTANCES 6\nstatic char * distance_names [N_DISTANCES] = { \"d12\", \"d13\", \"d14\", \"d23\", \"d24\", \"d34\" };\n\n#define N_CORNERS 4\nstatic char * corner_names [N_CORNERS] = { \"ll\", \"lr\", \"ul\", \"ur\" };\n\nHeapTuple plot_corners ()\n{\n Relation rel;\t\t\t/* triggered relation */\n char *relname;\t\t/* triggered relation name */\n TupleDesc tupdesc;\t\t/* tuple description */\n\n HeapTuple rettuple = NULL;\t/* tuple to return */\n\n int d_attnum [N_DISTANCES];\t/* distance attribute numbers */\n int32 d [N_DISTANCES];\t/* distances between corners */\n\n int c_attnum [N_CORNERS];\t/* corner attribute numbers */\n Corners corners;\t\t/* coordinants of corners */\n Datum c [N_CORNERS];\t\t/* coordinants of corners */\n\n bool isnull;\n int i;\n\n if (!CurrentTriggerData)\n elog (ERROR, \"plot_corners: triggers are not initialized\");\n\n rel = CurrentTriggerData -> tg_relation;\n relname = SPI_getrelname (rel);\n tupdesc = rel -> rd_att;\n\n if (CurrentTriggerData -> tg_trigger -> tgnargs != 0)\n elog (ERROR, \"plot_corners (%s): no arguments were expected\", relname);\n\n if (!TRIGGER_FIRED_FOR_ROW (CurrentTriggerData -> tg_event))\n elog (ERROR, \"plot_corners (%s): must process ROW events\", relname);\n if (TRIGGER_FIRED_FOR_STATEMENT (CurrentTriggerData -> tg_event))\n elog (ERROR, \"plot_corners (%s): can't process STATEMENT events\", relname);\n\n if (!TRIGGER_FIRED_BEFORE (CurrentTriggerData -> tg_event))\n elog (ERROR, \"plot_corners (%s): must be fired before event\", relname);\n if (TRIGGER_FIRED_AFTER (CurrentTriggerData -> tg_event))\n elog (ERROR, \"plot_corners (%s): can't be fired after event\", relname);\n\n if (TRIGGER_FIRED_BY_INSERT (CurrentTriggerData -> tg_event))\n rettuple = CurrentTriggerData -> tg_trigtuple;\n else if (TRIGGER_FIRED_BY_UPDATE (CurrentTriggerData -> tg_event))\n rettuple = CurrentTriggerData -> tg_newtuple;\n else if (TRIGGER_FIRED_BY_DELETE (CurrentTriggerData -> tg_event))\n elog (ERROR, \"plot_corners (%s): can't process DELETE events\", relname);\n else\n elog (ERROR, \"plot_corners (%s): unknown trigger event\", relname);\n\n CurrentTriggerData = NULL;\n\n for (i = 0; i < N_DISTANCES; i++)\n {\n d_attnum [i] = SPI_fnumber (tupdesc, distance_names [i]);\n if (d_attnum [i] < 0)\n\telog (ERROR, \"plot_corners (%s): there is no attribute %s\", relname, distance_names [i]);\n if (SPI_gettypeid (tupdesc, d_attnum [i]) != INT4OID)\n\telog (ERROR, \"plot_corners (%s): attribute %s must be of INT4 type\",\n\t relname, distance_names [i]);\n d [i] = DatumGetInt32 (SPI_getbinval (rettuple, tupdesc, d_attnum [i], &isnull));\n if (isnull)\n\telog (ERROR, \"plot_corners (%s): attribute %s must not be NULL\",\n\t relname, distance_names [i]);\n }\n\n for (i = 0; i < N_CORNERS; i++)\n {\n c_attnum [i] = SPI_fnumber (tupdesc, corner_names [i]);\n if (c_attnum [i] < 0)\n\telog (ERROR, \"plot_corners (%s): there is no attribute %s\", relname, corner_names [i]);\n if (SPI_gettypeid (tupdesc, c_attnum [i]) != POINTOID)\n\telog (ERROR, \"plot_corners (%s): attribute %s must be of POINT type\",\n\t relname, distance_names [i]);\n }\n\n corners = calc_corners (N_DISTANCES, d);\n\n c [0] = PointGetDatum (&corners.LL); /* ??? - how to convert Point to Datum? */\n c [1] = PointGetDatum (&corners.LR);\n c [2] = PointGetDatum (&corners.UL);\n c [3] = PointGetDatum (&corners.UR);\n\n rettuple = SPI_modifytuple (rel, rettuple, N_CORNERS, c_attnum, c, NULL);\n if (rettuple == NULL)\n elog (ERROR, \"plot_corners (%s): %d returned by SPI_modifytuple\",\n\t relname, SPI_result);\n\n pfree (relname);\n\n return (rettuple);\n}\n\n\t\t\t\t/* calculate corner positions */\nCorners calc_corners (int n_distances, const int32 * d)\n{\n Corners c;\n\n c.LL.x = 0;\t\t\t/* ??? - how to initialize/access Point values? */\n c.LL.y = 0;\n\n c.LR.x = 1;\n c.LR.y = 0;\n\n c.UL.x = 0;\n c.UL.y = 1;\n\n c.UR.x = 1;\n c.UR.y = 1;\n\n return c;\n}\n\n", "msg_date": "Mon, 14 Jun 1999 15:32:11 -0600 (MDT)", "msg_from": "Brook Milligan <[email protected]>", "msg_from_op": true, "msg_subject": "trigger functions and access to datatypes" } ]