diff --git a/src/bin/pg_dump/pg_backup_archiver.c b/src/bin/pg_dump/pg_backup_archiver.c index a1faab4b609aa92dfd0d20f9264f7a56de381b24..8af0133de19d4c9e55ac35927239465d13ae312a 100644 --- a/src/bin/pg_dump/pg_backup_archiver.c +++ b/src/bin/pg_dump/pg_backup_archiver.c @@ -15,7 +15,7 @@ * * * IDENTIFICATION - * $Header: /cvsroot/pgsql/src/bin/pg_dump/pg_backup_archiver.c,v 1.25 2001/04/25 07:03:19 pjw Exp $ + * $Header: /cvsroot/pgsql/src/bin/pg_dump/pg_backup_archiver.c,v 1.26 2001/05/12 01:03:59 pjw Exp $ * * Modifications - 28-Jun-2000 - pjw@rhyme.com.au * @@ -51,6 +51,12 @@ * - Treat OIDs with more respect (avoid using ints, use macros for * conversion & comparison). * + * Modifications - 10-May-2001 - pjw@rhyme.com.au + * - Treat SEQUENCE SET TOC entries as data entries rather than schema + * entries. + * - Make allowance for data entries that did not have a data dumper + * routine (eg. SEQUENCE SET) + * *------------------------------------------------------------------------- */ @@ -154,6 +160,7 @@ RestoreArchive(Archive *AHX, RestoreOptions *ropt) int reqs; OutputContext sav; int impliedDataOnly; + bool defnDumped; AH->ropt = ropt; @@ -276,6 +283,8 @@ RestoreArchive(Archive *AHX, RestoreOptions *ropt) } } + defnDumped = false; + if ((reqs & 1) != 0) /* We want the schema */ { /* Reconnect if necessary */ @@ -283,6 +292,7 @@ RestoreArchive(Archive *AHX, RestoreOptions *ropt) ahlog(AH, 1, "Creating %s %s\n", te->desc, te->name); _printTocEntry(AH, te, ropt, false); + defnDumped = true; /* If we created a DB, connect to it... */ if (strcmp(te->desc, "DATABASE") == 0) @@ -290,64 +300,82 @@ RestoreArchive(Archive *AHX, RestoreOptions *ropt) ahlog(AH, 1, "Connecting to new DB '%s' as %s\n", te->name, te->owner); _reconnectAsUser(AH, te->name, te->owner); } - } + } /* - * If we want data, and it has data, then restore that too + * If we have a data component, then process it */ - if (AH->PrintTocDataPtr !=NULL && (reqs & 2) != 0) + if ( (reqs & 2) != 0 ) { -#ifndef HAVE_LIBZ - if (AH->compression != 0) - die_horribly(AH, "%s: Unable to restore data from a compressed archive\n", progname); -#endif - - _printTocEntry(AH, te, ropt, true); - - /* - * Maybe we can't do BLOBS, so check if this node is for BLOBS + /* hadDumper will be set if there is genuine data component for this + * node. Otherwise, we need to check the defn field for statements + * that need to be executed in data-only restores. */ - if ((strcmp(te->desc, "BLOBS") == 0) && !_canRestoreBlobs(AH)) + if (te->hadDumper) { - ahprintf(AH, "--\n-- SKIPPED \n--\n\n"); - /* - * This is a bit nasty - we assume, for the moment, that - * if a custom output is used, then we don't want - * warnings. + * If we can output the data, then restore it. */ - if (!AH->CustomOutPtr) - fprintf(stderr, "%s: WARNING - skipping BLOB restoration\n", progname); - - } - else - { - - _disableTriggersIfNecessary(AH, te, ropt); - - /* - * Reconnect if necessary (_disableTriggers may have - * reconnected) - */ - _reconnectAsOwner(AH, "-", te); - - ahlog(AH, 1, "Restoring data for %s \n", te->name); - - /* - * If we have a copy statement, use it. As of V1.3, these - * are separate to allow easy import from withing a - * database connection. Pre 1.3 archives can not use DB - * connections and are sent to output only. - * - * For V1.3+, the table data MUST have a copy statement so - * that we can go into appropriate mode with libpq. - */ - if (te->copyStmt && strlen(te->copyStmt) > 0) - ahprintf(AH, te->copyStmt); - - (*AH->PrintTocDataPtr) (AH, te, ropt); + if (AH->PrintTocDataPtr !=NULL && (reqs & 2) != 0) + { +#ifndef HAVE_LIBZ + if (AH->compression != 0) + die_horribly(AH, "%s: Unable to restore data from a compressed archive\n", + progname); +#endif - _enableTriggersIfNecessary(AH, te, ropt); + _printTocEntry(AH, te, ropt, true); + + /* + * Maybe we can't do BLOBS, so check if this node is for BLOBS + */ + if ((strcmp(te->desc, "BLOBS") == 0) && !_canRestoreBlobs(AH)) + { + ahprintf(AH, "--\n-- SKIPPED \n--\n\n"); + + /* + * This is a bit nasty - we assume, for the moment, that + * if a custom output is used, then we don't want + * warnings. + */ + if (!AH->CustomOutPtr) + fprintf(stderr, "%s: WARNING - skipping BLOB restoration\n", progname); + + } + else + { + + _disableTriggersIfNecessary(AH, te, ropt); + + /* + * Reconnect if necessary (_disableTriggers may have + * reconnected) + */ + _reconnectAsOwner(AH, "-", te); + + ahlog(AH, 1, "Restoring data for %s \n", te->name); + + /* + * If we have a copy statement, use it. As of V1.3, these + * are separate to allow easy import from withing a + * database connection. Pre 1.3 archives can not use DB + * connections and are sent to output only. + * + * For V1.3+, the table data MUST have a copy statement so + * that we can go into appropriate mode with libpq. + */ + if (te->copyStmt && strlen(te->copyStmt) > 0) + ahprintf(AH, te->copyStmt); + + (*AH->PrintTocDataPtr) (AH, te, ropt); + + _enableTriggersIfNecessary(AH, te, ropt); + } + } + } else if (!defnDumped) { + /* If we haven't already dumped the defn part, do so now */ + ahlog(AH, 1, "Executing %s %s\n", te->desc, te->name); + _printTocEntry(AH, te, ropt, false); } } te = te->next; @@ -1829,26 +1857,22 @@ _tocEntryRequired(TocEntry *te, RestoreOptions *ropt) return 0; } - /* Special Case: If 'SEQUENCE SET' and schemaOnly, then not needed */ - if (ropt->schemaOnly && (strcmp(te->desc, "SEQUENCE SET") == 0)) - return 0; + /* Special Case: If 'SEQUENCE SET' then it is considered a data entry */ + if (strcmp(te->desc, "SEQUENCE SET") == 0) + res = res & 2; /* Mask it if we only want schema */ if (ropt->schemaOnly) res = res & 1; /* Mask it we only want data */ - if (ropt->dataOnly && (strcmp(te->desc, "SEQUENCE SET") != 0)) + if (ropt->dataOnly) res = res & 2; /* Mask it if we don't have a schema contribition */ if (!te->defn || strlen(te->defn) == 0) res = res & 2; - /* Mask it if we don't have a possible data contribition */ - if (!te->hadDumper) - res = res & 1; - /* Finally, if we used a list, limit based on that as well */ if (ropt->limitToList && !ropt->idWanted[te->id - 1]) return 0; diff --git a/src/bin/pg_dump/pg_backup_archiver.h b/src/bin/pg_dump/pg_backup_archiver.h index bb5b03ae1f711948f128dfacbacce3f05ed0983f..fd36acba9fb57f2dfc4ac0249757584b4d1e82e7 100644 --- a/src/bin/pg_dump/pg_backup_archiver.h +++ b/src/bin/pg_dump/pg_backup_archiver.h @@ -17,7 +17,7 @@ * * * IDENTIFICATION - * $Header: /cvsroot/pgsql/src/bin/pg_dump/pg_backup_archiver.h,v 1.32 2001/04/25 07:03:19 pjw Exp $ + * $Header: /cvsroot/pgsql/src/bin/pg_dump/pg_backup_archiver.h,v 1.33 2001/05/12 01:03:59 pjw Exp $ * * Modifications - 28-Jun-2000 - pjw@rhyme.com.au * - Initial version. @@ -68,7 +68,7 @@ typedef z_stream *z_streamp; #define K_VERS_MAJOR 1 #define K_VERS_MINOR 5 -#define K_VERS_REV 5 +#define K_VERS_REV 6 /* Data block types */ #define BLK_DATA 1 diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c index b06ee7f58bfbec6b4a43867231b362603e156305..36e2cd1e3780589b99d3d6412c80afa68553779a 100644 --- a/src/bin/pg_dump/pg_dump.c +++ b/src/bin/pg_dump/pg_dump.c @@ -22,7 +22,7 @@ * * * IDENTIFICATION - * $Header: /cvsroot/pgsql/src/bin/pg_dump/pg_dump.c,v 1.205 2001/04/25 07:03:19 pjw Exp $ + * $Header: /cvsroot/pgsql/src/bin/pg_dump/pg_dump.c,v 1.206 2001/05/12 01:03:59 pjw Exp $ * * Modifications - 6/10/96 - dave@bensoft.com - version 1.13.dhb * @@ -127,6 +127,12 @@ * - Don't dump CHECK constraints with same source and names both * starting with '$'. * + * Modifications - 10-May-2001 - pjw@rhyme.com.au + * + * - Don't dump COMMENTs in data-only dumps + * - Fix view dumping SQL for V7.0 + * - Fix bug when getting view oid with long view names + * *------------------------------------------------------------------------- */ @@ -2047,15 +2053,37 @@ getTables(int *numTables, FuncInfo *finfo, int numFuncs) * (sequence) or 'v' (view). */ - appendPQExpBuffer(query, + if (g_fout->remoteVersion >= 70100) + { + appendPQExpBuffer(query, "SELECT pg_class.oid, relname, relkind, relacl, " - "(select usename from pg_user where relowner = usesysid) as usename, " + "(select usename from pg_user where relowner = usesysid) as usename, " "relchecks, reltriggers, relhasindex " "from pg_class " "where relname !~ '^pg_' " "and relkind in ('%c', '%c', '%c') " "order by oid", RELKIND_RELATION, RELKIND_SEQUENCE, RELKIND_VIEW); + } else { + /* + * In 7.1, view relkind was not set to 'v', so we fake this by checking + * if we have a view by looking up pg_class & pg_rewrite. + */ + appendPQExpBuffer(query, + "SELECT c.oid, relname, relacl, " + "CASE WHEN relhasrules and relkind = 'r' " + " And EXISTS(SELECT r.rulename FROM pg_rewrite r WHERE " + " r.ev_class = c.oid AND r.ev_type = '1'::\"char\") " + "THEN 'v'::\"char\" " + "ELSE relkind End AS relkind," + "relacl, (select usename from pg_user where relowner = usesysid) as usename, " + "relchecks, reltriggers, relhasindex " + "from pg_class c " + "where relname !~ '^pg_' " + "and relkind in ('%c', '%c', '%c') " + "order by oid", + RELKIND_RELATION, RELKIND_SEQUENCE, RELKIND_VIEW); + } res = PQexec(g_conn, query->data); if (!res || @@ -2103,9 +2131,9 @@ getTables(int *numTables, FuncInfo *finfo, int numFuncs) resetPQExpBuffer(query); appendPQExpBuffer(query, "SELECT definition as viewdef, "); /* XXX 7.2 - replace with att from pg_views or some other generic source */ - appendPQExpBuffer(query, "(select oid from pg_rewrite where rulename='_RET'" - " || viewname) as view_oid from pg_views" - " where viewname = "); + appendPQExpBuffer(query, "(select oid from pg_rewrite where " + " rulename=('_RET' || viewname)::name) as view_oid" + " from pg_views where viewname = "); formatStringLiteral(query, tblinfo[i].relname, CONV_ALL); appendPQExpBuffer(query, ";"); @@ -2974,6 +3002,10 @@ dumpComment(Archive *fout, const char *target, const char *oid) PQExpBuffer query; int i_description; + /* Comments are SCHEMA not data */ + if (dataOnly) + return; + /*** Build query to find comment ***/ query = createPQExpBuffer();