From fdf07fe14f20ed22cba4d7da2544522778de71d8 Mon Sep 17 00:00:00 2001 From: Peter Eisentraut <peter_e@gmx.net> Date: Fri, 21 Sep 2001 21:58:30 +0000 Subject: [PATCH] For consistency with the rest of PostgreSQL, rename BLOBs to large objects in messages and documentation. --- doc/src/sgml/ref/pg_dump.sgml | 12 ++++++------ doc/src/sgml/ref/pg_restore.sgml | 16 ++++++++-------- src/bin/pg_dump/pg_backup_archiver.c | 28 ++++++++++++++-------------- src/bin/pg_dump/pg_backup_custom.c | 6 +++--- src/bin/pg_dump/pg_backup_db.c | 16 ++++++++-------- src/bin/pg_dump/pg_backup_files.c | 16 ++++++++-------- src/bin/pg_dump/pg_backup_tar.c | 6 +++--- src/bin/pg_dump/pg_dump.c | 12 ++++++------ 8 files changed, 56 insertions(+), 56 deletions(-) diff --git a/doc/src/sgml/ref/pg_dump.sgml b/doc/src/sgml/ref/pg_dump.sgml index 9712ed3aa74..5d031d66f2e 100644 --- a/doc/src/sgml/ref/pg_dump.sgml +++ b/doc/src/sgml/ref/pg_dump.sgml @@ -1,5 +1,5 @@ <!-- -$Header: /cvsroot/pgsql/doc/src/sgml/ref/pg_dump.sgml,v 1.36 2001/09/03 12:57:50 petere Exp $ +$Header: /cvsroot/pgsql/doc/src/sgml/ref/pg_dump.sgml,v 1.37 2001/09/21 21:58:29 petere Exp $ Postgres documentation --> @@ -144,7 +144,7 @@ Postgres documentation <term>--blobs</term> <listitem> <para> - Dump data and <acronym>BLOB</acronym> data. + Include large objects in dump. </para> </listitem> </varlistentry> @@ -616,8 +616,8 @@ connectDBStart() -- connect() failed: No such file or directory </para> <para> - To dump a database called mydb that contains - <acronym>BLOB</acronym>s to a <filename>tar</filename> file: + To dump a database called <literal>mydb</> that contains + large objects to a <filename>tar</filename> file: <screen> <prompt>$</prompt> <userinput>pg_dump -Ft -b mydb > db.tar</userinput> @@ -625,8 +625,8 @@ connectDBStart() -- connect() failed: No such file or directory </para> <para> - To reload this database (with <acronym>BLOB</acronym>s) to an - existing database called newdb: + To reload this database (with large objects) to an + existing database called <literal>newdb</>: <screen> <prompt>$</prompt> <userinput>pg_restore -d newdb db.tar</userinput> diff --git a/doc/src/sgml/ref/pg_restore.sgml b/doc/src/sgml/ref/pg_restore.sgml index e498d76048a..c048ebbe8c0 100644 --- a/doc/src/sgml/ref/pg_restore.sgml +++ b/doc/src/sgml/ref/pg_restore.sgml @@ -1,4 +1,4 @@ -<!-- $Header: /cvsroot/pgsql/doc/src/sgml/ref/pg_restore.sgml,v 1.16 2001/09/13 15:55:24 petere Exp $ --> +<!-- $Header: /cvsroot/pgsql/doc/src/sgml/ref/pg_restore.sgml,v 1.17 2001/09/21 21:58:30 petere Exp $ --> <refentry id="APP-PGRESTORE"> <docinfo> @@ -213,7 +213,7 @@ <listitem> <para> Connect to database <replaceable class="parameter">dbname</replaceable> and restore - directly into the database. BLOBs can only be restored by using a direct database connection. + directly into the database. Large objects can only be restored by using a direct database connection. </para> </listitem> </varlistentry> @@ -585,8 +585,8 @@ connectDBStart() -- connect() failed: No such file or directory <listitem> <para> - <command>pg_restore</command> will not restore BLOBs for a single table. If - an archive contains BLOBs, then all BLOBs will be restored. + <command>pg_restore</command> will not restore large objects for a single table. If + an archive contains large objects, then all large objects will be restored. </para> </listitem> @@ -620,8 +620,8 @@ connectDBStart() -- connect() failed: No such file or directory </para> <para> - To dump a database called mydb that contains - <acronym>BLOB</acronym>s to a <filename>tar</filename> file: + To dump a database called <literal>mydb</> that contains + large objects to a <filename>tar</filename> file: <screen> <prompt>$</prompt> <userinput>pg_dump -Ft -b mydb > db.tar</userinput> @@ -629,8 +629,8 @@ connectDBStart() -- connect() failed: No such file or directory </para> <para> - To reload this database (with <acronym>BLOB</acronym>s) to an - existing database called newdb: + To reload this database (with large objects) to an + existing database called <literal>newdb</>: <screen> <prompt>$</prompt> <userinput>pg_restore -d newdb db.tar</userinput> diff --git a/src/bin/pg_dump/pg_backup_archiver.c b/src/bin/pg_dump/pg_backup_archiver.c index 4ada30994f2..0e7cba50a19 100644 --- a/src/bin/pg_dump/pg_backup_archiver.c +++ b/src/bin/pg_dump/pg_backup_archiver.c @@ -15,7 +15,7 @@ * * * IDENTIFICATION - * $Header: /cvsroot/pgsql/src/bin/pg_dump/pg_backup_archiver.c,v 1.32 2001/08/22 20:23:23 petere Exp $ + * $Header: /cvsroot/pgsql/src/bin/pg_dump/pg_backup_archiver.c,v 1.33 2001/09/21 21:58:30 petere Exp $ * * Modifications - 28-Jun-2000 - pjw@rhyme.com.au * @@ -333,7 +333,7 @@ RestoreArchive(Archive *AHX, RestoreOptions *ropt) * warnings. */ if (!AH->CustomOutPtr) - write_msg(modulename, "WARNING: skipping BLOB restoration\n"); + write_msg(modulename, "WARNING: skipping large object restoration\n"); } else @@ -398,12 +398,12 @@ RestoreArchive(Archive *AHX, RestoreOptions *ropt) if ((reqs & 2) != 0) /* We loaded the data */ { - ahlog(AH, 1, "fixing up BLOB reference for %s\n", te->name); + ahlog(AH, 1, "fixing up large object cross-reference for %s\n", te->name); FixupBlobRefs(AH, te->name); } } else - ahlog(AH, 2, "ignoring BLOB cross-references for %s %s\n", te->desc, te->name); + ahlog(AH, 2, "ignoring large object cross-references for %s %s\n", te->desc, te->name); te = te->next; } @@ -717,7 +717,7 @@ StartBlob(Archive *AHX, Oid oid) ArchiveHandle *AH = (ArchiveHandle *) AHX; if (!AH->StartBlobPtr) - die_horribly(AH, modulename, "BLOB output not supported in chosen format\n"); + die_horribly(AH, modulename, "large object output not supported in chosen format\n"); (*AH->StartBlobPtr) (AH, AH->currToc, oid); @@ -757,14 +757,14 @@ EndRestoreBlobs(ArchiveHandle *AH) { if (AH->txActive) { - ahlog(AH, 2, "committing BLOB transactions\n"); + ahlog(AH, 2, "committing large object transactions\n"); CommitTransaction(AH); } if (AH->blobTxActive) CommitTransactionXref(AH); - ahlog(AH, 1, "restored %d BLOBs\n", AH->blobCount); + ahlog(AH, 1, "restored %d large objects\n", AH->blobCount); } @@ -781,7 +781,7 @@ StartRestoreBlob(ArchiveHandle *AH, Oid oid) if (!AH->createdBlobXref) { if (!AH->connection) - die_horribly(AH, modulename, "cannot restore BLOBs without a database connection\n"); + die_horribly(AH, modulename, "cannot restore large objects without a database connection\n"); CreateBlobXrefTable(AH); AH->createdBlobXref = 1; @@ -792,7 +792,7 @@ StartRestoreBlob(ArchiveHandle *AH, Oid oid) */ if (!AH->txActive) { - ahlog(AH, 2, "starting BLOB transactions\n"); + ahlog(AH, 2, "starting large object transactions\n"); StartTransaction(AH); } if (!AH->blobTxActive) @@ -800,15 +800,15 @@ StartRestoreBlob(ArchiveHandle *AH, Oid oid) loOid = lo_creat(AH->connection, INV_READ | INV_WRITE); if (loOid == 0) - die_horribly(AH, modulename, "could not create BLOB\n"); + die_horribly(AH, modulename, "could not create large object\n"); - ahlog(AH, 2, "restoring BLOB oid %u as %u\n", oid, loOid); + ahlog(AH, 2, "restoring large object with oid %u as %u\n", oid, loOid); InsertBlobXref(AH, oid, loOid); AH->loFd = lo_open(AH->connection, loOid, INV_WRITE); if (AH->loFd == -1) - die_horribly(AH, modulename, "could not open BLOB\n"); + die_horribly(AH, modulename, "could not open large object\n"); AH->writingBlob = 1; } @@ -824,7 +824,7 @@ EndRestoreBlob(ArchiveHandle *AH, Oid oid) */ if (((AH->blobCount / BLOB_BATCH_SIZE) * BLOB_BATCH_SIZE) == AH->blobCount) { - ahlog(AH, 2, "committing BLOB transactions\n"); + ahlog(AH, 2, "committing large object transactions\n"); CommitTransaction(AH); CommitTransactionXref(AH); } @@ -1198,7 +1198,7 @@ ahwrite(const void *ptr, size_t size, size_t nmemb, ArchiveHandle *AH) if (AH->writingBlob) { res = lo_write(AH->connection, AH->loFd, (void *) ptr, size * nmemb); - ahlog(AH, 5, "wrote %d bytes of BLOB data (result = %d)\n", size * nmemb, res); + ahlog(AH, 5, "wrote %d bytes of large object data (result = %d)\n", size * nmemb, res); if (res < size * nmemb) die_horribly(AH, modulename, "could not write to large object (result: %d, expected: %d)\n", res, size * nmemb); diff --git a/src/bin/pg_dump/pg_backup_custom.c b/src/bin/pg_dump/pg_backup_custom.c index 7aeffac1d1c..76d4a78ff78 100644 --- a/src/bin/pg_dump/pg_backup_custom.c +++ b/src/bin/pg_dump/pg_backup_custom.c @@ -19,7 +19,7 @@ * * * IDENTIFICATION - * $Header: /cvsroot/pgsql/src/bin/pg_dump/pg_backup_custom.c,v 1.14 2001/08/19 22:17:03 petere Exp $ + * $Header: /cvsroot/pgsql/src/bin/pg_dump/pg_backup_custom.c,v 1.15 2001/09/21 21:58:30 petere Exp $ * * Modifications - 28-Jun-2000 - pjw@rhyme.com.au * @@ -386,7 +386,7 @@ static void _StartBlob(ArchiveHandle *AH, TocEntry *te, Oid oid) { if (oid == 0) - die_horribly(AH, modulename, "invalid OID for BLOB\n"); + die_horribly(AH, modulename, "invalid OID for large object\n"); WriteInt(AH, oid); _StartDataCompressor(AH, te); @@ -503,7 +503,7 @@ _PrintTocData(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt) case BLK_BLOBS: if (!AH->connection) - die_horribly(AH, modulename, "BLOBs cannot be loaded without a database connection\n"); + die_horribly(AH, modulename, "large objects cannot be loaded without a database connection\n"); _LoadBlobs(AH); break; diff --git a/src/bin/pg_dump/pg_backup_db.c b/src/bin/pg_dump/pg_backup_db.c index 4ba29058b3e..ed325db2045 100644 --- a/src/bin/pg_dump/pg_backup_db.c +++ b/src/bin/pg_dump/pg_backup_db.c @@ -5,7 +5,7 @@ * Implements the basic DB functions used by the archiver. * * IDENTIFICATION - * $Header: /cvsroot/pgsql/src/bin/pg_dump/pg_backup_db.c,v 1.25 2001/09/17 02:07:51 inoue Exp $ + * $Header: /cvsroot/pgsql/src/bin/pg_dump/pg_backup_db.c,v 1.26 2001/09/21 21:58:30 petere Exp $ * * NOTES * @@ -738,7 +738,7 @@ FixupBlobRefs(ArchiveHandle *AH, char *tablename) { attr = PQgetvalue(res, i, 0); - ahlog(AH, 1, "fixing BLOB cross-references for %s.%s\n", tablename, attr); + ahlog(AH, 1, "fixing large object cross-references for %s.%s\n", tablename, attr); resetPQExpBuffer(tblQry); @@ -785,16 +785,16 @@ CreateBlobXrefTable(ArchiveHandle *AH) if (!AH->blobConnection) AH->blobConnection = _connectDB(AH, NULL, NULL); - ahlog(AH, 1, "creating table for BLOB cross-references\n"); + ahlog(AH, 1, "creating table for large object cross-references\n"); appendPQExpBuffer(qry, "Create Temporary Table %s(oldOid oid, newOid oid);", BLOB_XREF_TABLE); - ExecuteSqlCommand(AH, qry, "could not create BLOB cross reference table", true); + ExecuteSqlCommand(AH, qry, "could not create large object cross-reference table", true); resetPQExpBuffer(qry); appendPQExpBuffer(qry, "Create Unique Index %s_ix on %s(oldOid)", BLOB_XREF_TABLE, BLOB_XREF_TABLE); - ExecuteSqlCommand(AH, qry, "could not create index on BLOB cross reference table", true); + ExecuteSqlCommand(AH, qry, "could not create index on large object cross-reference table", true); destroyPQExpBuffer(qry); } @@ -806,7 +806,7 @@ InsertBlobXref(ArchiveHandle *AH, int old, int new) appendPQExpBuffer(qry, "Insert Into %s(oldOid, newOid) Values (%d, %d);", BLOB_XREF_TABLE, old, new); - ExecuteSqlCommand(AH, qry, "could not create BLOB cross reference entry", true); + ExecuteSqlCommand(AH, qry, "could not create large object cross-reference entry", true); destroyPQExpBuffer(qry); } @@ -832,7 +832,7 @@ StartTransactionXref(ArchiveHandle *AH) appendPQExpBuffer(qry, "Begin;"); ExecuteSqlCommand(AH, qry, - "could not start transaction for BLOB cross references", true); + "could not start transaction for large object cross-references", true); AH->blobTxActive = true; destroyPQExpBuffer(qry); @@ -858,7 +858,7 @@ CommitTransactionXref(ArchiveHandle *AH) appendPQExpBuffer(qry, "Commit;"); - ExecuteSqlCommand(AH, qry, "could not commit transaction for BLOB cross references", true); + ExecuteSqlCommand(AH, qry, "could not commit transaction for large object cross-references", true); AH->blobTxActive = false; destroyPQExpBuffer(qry); diff --git a/src/bin/pg_dump/pg_backup_files.c b/src/bin/pg_dump/pg_backup_files.c index 47a224fb89f..db54ca0fd99 100644 --- a/src/bin/pg_dump/pg_backup_files.c +++ b/src/bin/pg_dump/pg_backup_files.c @@ -20,7 +20,7 @@ * * * IDENTIFICATION - * $Header: /cvsroot/pgsql/src/bin/pg_dump/pg_backup_files.c,v 1.12 2001/07/03 20:21:48 petere Exp $ + * $Header: /cvsroot/pgsql/src/bin/pg_dump/pg_backup_files.c,v 1.13 2001/09/21 21:58:30 petere Exp $ * * Modifications - 28-Jun-2000 - pjw@rhyme.com.au * @@ -363,7 +363,7 @@ _LoadBlobs(ArchiveHandle *AH, RestoreOptions *ropt) ctx->blobToc = fopen("blobs.toc", PG_BINARY_R); if (ctx->blobToc == NULL) - die_horribly(AH, modulename, "could not open BLOB TOC for input: %s\n", strerror(errno)); + die_horribly(AH, modulename, "could not open large object TOC for input: %s\n", strerror(errno)); _getBlobTocEntry(AH, &oid, fname); @@ -376,7 +376,7 @@ _LoadBlobs(ArchiveHandle *AH, RestoreOptions *ropt) } if (fclose(ctx->blobToc) != 0) - die_horribly(AH, modulename, "could not close BLOB TOC file: %s\n", strerror(errno)); + die_horribly(AH, modulename, "could not close large object TOC file: %s\n", strerror(errno)); EndRestoreBlobs(AH); } @@ -474,7 +474,7 @@ _StartBlobs(ArchiveHandle *AH, TocEntry *te) if (ctx->blobToc == NULL) die_horribly(AH, modulename, - "could not open BLOB TOC for output: %s\n", strerror(errno)); + "could not open large object TOC for output: %s\n", strerror(errno)); } @@ -495,7 +495,7 @@ _StartBlob(ArchiveHandle *AH, TocEntry *te, Oid oid) char *sfx; if (oid == 0) - die_horribly(AH, modulename, "invalid OID for BLOB (%u)\n", oid); + die_horribly(AH, modulename, "invalid OID for large object (%u)\n", oid); if (AH->compression != 0) sfx = ".gz"; @@ -514,7 +514,7 @@ _StartBlob(ArchiveHandle *AH, TocEntry *te, Oid oid) #endif if (tctx->FH == NULL) - die_horribly(AH, modulename, "could not open BLOB file\n"); + die_horribly(AH, modulename, "could not open large object file\n"); } /* @@ -529,7 +529,7 @@ _EndBlob(ArchiveHandle *AH, TocEntry *te, Oid oid) lclTocEntry *tctx = (lclTocEntry *) te->formatData; if (GZCLOSE(tctx->FH) != 0) - die_horribly(AH, modulename, "could not close BLOB file\n"); + die_horribly(AH, modulename, "could not close large object file\n"); } /* @@ -547,7 +547,7 @@ _EndBlobs(ArchiveHandle *AH, TocEntry *te) /* WriteInt(AH, 0); */ if (fclose(ctx->blobToc) != 0) - die_horribly(AH, modulename, "could not close BLOB TOC file: %s\n", strerror(errno)); + die_horribly(AH, modulename, "could not close large object TOC file: %s\n", strerror(errno)); } diff --git a/src/bin/pg_dump/pg_backup_tar.c b/src/bin/pg_dump/pg_backup_tar.c index f75dca1474b..35d8e9b344e 100644 --- a/src/bin/pg_dump/pg_backup_tar.c +++ b/src/bin/pg_dump/pg_backup_tar.c @@ -16,7 +16,7 @@ * * * IDENTIFICATION - * $Header: /cvsroot/pgsql/src/bin/pg_dump/pg_backup_tar.c,v 1.17 2001/07/03 20:21:48 petere Exp $ + * $Header: /cvsroot/pgsql/src/bin/pg_dump/pg_backup_tar.c,v 1.18 2001/09/21 21:58:30 petere Exp $ * * Modifications - 28-Jun-2000 - pjw@rhyme.com.au * @@ -715,7 +715,7 @@ _LoadBlobs(ArchiveHandle *AH, RestoreOptions *ropt) if (strncmp(th->targetFile, "blob_", 5) == 0 && oid != 0) { - ahlog(AH, 1, "restoring BLOB oid %u\n", oid); + ahlog(AH, 1, "restoring large object OID %u\n", oid); StartRestoreBlob(AH, oid); @@ -911,7 +911,7 @@ _StartBlob(ArchiveHandle *AH, TocEntry *te, Oid oid) char *sfx; if (oid == 0) - die_horribly(AH, modulename, "invalid OID for BLOB (%u)\n", oid); + die_horribly(AH, modulename, "invalid OID for large object (%u)\n", oid); if (AH->compression != 0) sfx = ".gz"; diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c index 590ded6fc01..4f7004392fa 100644 --- a/src/bin/pg_dump/pg_dump.c +++ b/src/bin/pg_dump/pg_dump.c @@ -22,7 +22,7 @@ * * * IDENTIFICATION - * $Header: /cvsroot/pgsql/src/bin/pg_dump/pg_dump.c,v 1.229 2001/09/07 01:11:50 tgl Exp $ + * $Header: /cvsroot/pgsql/src/bin/pg_dump/pg_dump.c,v 1.230 2001/09/21 21:58:30 petere Exp $ * *------------------------------------------------------------------------- */ @@ -141,7 +141,7 @@ help(const char *progname) #ifdef HAVE_GETOPT_LONG puts(gettext( " -a, --data-only dump only the data, not the schema\n" - " -b, --blobs include BLOB data in dump\n" + " -b, --blobs include large objects in dump\n" " -c, --clean clean (drop) schema prior to create\n" " -C, --create include commands to create database in dump\n" " -d, --inserts dump data as INSERT, rather than COPY, commands\n" @@ -175,7 +175,7 @@ help(const char *progname) #else puts(gettext( " -a dump only the data, not the schema\n" - " -b include BLOB data in dump\n" + " -b include large objects in dump\n" " -c clean (drop) schema prior to create\n" " -C include commands to create database in dump\n" " -d dump data as INSERT, rather than COPY, commands\n" @@ -961,7 +961,7 @@ main(int argc, char **argv) if (outputBlobs && tablename != NULL && strlen(tablename) > 0) { - write_msg(NULL, "BLOB output is not supported for a single table.\n"); + write_msg(NULL, "Large object output is not supported for a single table.\n"); write_msg(NULL, "Use all tables or a full dump instead.\n"); exit(1); } @@ -975,7 +975,7 @@ main(int argc, char **argv) if (outputBlobs == true && (format[0] == 'p' || format[0] == 'P')) { - write_msg(NULL, "BLOB output is not supported for plain text dump files.\n"); + write_msg(NULL, "large object output is not supported for plain text dump files.\n"); write_msg(NULL, "(Use a different output format.)\n"); exit(1); } @@ -1216,7 +1216,7 @@ dumpBlobs(Archive *AH, char *junkOid, void *junkVal) Oid blobOid; if (g_verbose) - write_msg(NULL, "saving BLOBs\n"); + write_msg(NULL, "saving large objects\n"); /* Cursor to get all BLOB tables */ if (AH->remoteVersion >= 70100) -- GitLab