当前位置: 首页>>代码示例>>C++>>正文


C++ prep_status函数代码示例

本文整理汇总了C++中prep_status函数的典型用法代码示例。如果您正苦于以下问题:C++ prep_status函数的具体用法?C++ prep_status怎么用?C++ prep_status使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了prep_status函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: dump_new_oids

/*
 * Dump OIDs of all objects, after upgrading the QD cluster.
 */
void
dump_new_oids(migratorContext *ctx)
{
	PGconn	   *conn;
	char		filename[MAXPGPATH];
	FILE	   *oid_dump;
	int			dbnum;

	prep_status(ctx, "Exporting object OIDs from the new cluster");

	/* Dump OIDs of global objects */
	snprintf(filename, sizeof(filename), "%s/%s", ctx->cwd, GLOBAL_OIDS_DUMP_FILE);
	oid_dump = fopen(filename, "w");
	if (!oid_dump)
		pg_log(ctx, PG_FATAL, "Could not create necessary file:  %s\n", filename);

	conn = connectToServer(ctx, "template1", CLUSTER_NEW);
	PQclear(executeQueryOrDie(ctx, conn, "set search_path='pg_catalog';"));

	dump_rows(ctx, NULL, oid_dump, conn, "SELECT oid, fsname FROM pg_filespace", "preassign_filespace_oid");
	dump_rows(ctx, NULL, oid_dump, conn, "SELECT oid, spcname FROM pg_tablespace", "preassign_tablespace_oid");
	dump_rows(ctx, NULL, oid_dump, conn, "SELECT oid, rsqname FROM pg_resqueue", "preassign_resqueue_oid");
	dump_rows(ctx, NULL, oid_dump, conn, "SELECT oid, resqueueid, restypid FROM pg_resqueuecapability", "preassign_resqueuecb_oid");
	dump_rows(ctx, NULL, oid_dump, conn, "SELECT oid, rolname FROM pg_authid", "preassign_authid_oid");
	dump_rows(ctx, NULL, oid_dump, conn, "SELECT oid, datname FROM pg_database", "preassign_database_oid");

	PQfinish(conn);
	fclose(oid_dump);

	/*
	 * We use the old database OIDs, because we won't have convenient access
	 * to the new OIDs when we read these back in. It doesn't really matter
	 * which ones we use, as long as we're consistent.
	 */
	for (dbnum = 0; dbnum < ctx->old.dbarr.ndbs; dbnum++)
	{
		DbInfo	   *olddb = &ctx->old.dbarr.dbs[dbnum];

		snprintf(filename, sizeof(filename), "%s/" DB_OIDS_DUMP_FILE_MASK, ctx->cwd, olddb->db_oid);
		oid_dump = fopen(filename, "w");
		if (!oid_dump)
			pg_log(ctx, PG_FATAL, "Could not create necessary file:  %s\n", filename);

		conn = connectToServer(ctx, olddb->db_name, CLUSTER_NEW);

		PQclear(executeQueryOrDie(ctx, conn, "set search_path='pg_catalog';"));

		dump_rows(ctx, NULL, oid_dump, conn, "SELECT oid, typname, typnamespace FROM pg_type", "preassign_type_oid");
		dump_rows(ctx, NULL, oid_dump, conn, "SELECT oid, oprnamespace, oprname FROM pg_operator", "preassign_operator_oid");
		dump_rows(ctx, NULL, oid_dump, conn, "SELECT oid, nspname FROM pg_namespace", "preassign_namespace_oid");
		dump_rows(ctx, NULL, oid_dump, conn, "SELECT oid, lanname FROM pg_language", "preassign_language_oid");
		dump_rows(ctx, NULL, oid_dump, conn, "SELECT oid, castsource, casttarget FROM pg_cast", "preassign_cast_oid");
		dump_rows(ctx, NULL, oid_dump, conn, "SELECT oid, connamespace, conname FROM pg_conversion", "preassign_conversion_oid");
		dump_rows(ctx, NULL, oid_dump, conn, "SELECT oid, ev_class, rulename FROM pg_rewrite", "preassign_rule_oid");
		dump_rows(ctx, NULL, oid_dump, conn, "SELECT oid, opfname, opfnamespace FROM pg_opfamily", "preassign_opfam_oid");
		dump_rows(ctx, NULL, oid_dump, conn, "SELECT oid, opcname, opcnamespace FROM pg_opclass", "preassign_opclass_oid");
		dump_rows(ctx, NULL, oid_dump, conn, "SELECT oid, prsnamespace, prsname FROM pg_ts_parser", "preassign_tsparser_oid");
		dump_rows(ctx, NULL, oid_dump, conn, "SELECT oid, dictnamespace, dictname FROM pg_ts_dict", "preassign_tsdict_oid");
		dump_rows(ctx, NULL, oid_dump, conn, "SELECT oid, tmplnamespace, tmplname FROM pg_ts_template", "preassign_tstemplate_oid");
		dump_rows(ctx, NULL, oid_dump, conn, "SELECT oid, cfgnamespace, cfgname FROM pg_ts_config", "preassign_tsconfig_oid");
		dump_rows(ctx, NULL, oid_dump, conn, "SELECT oid, extname FROM pg_extension", "preassign_extension_oid");
		dump_rows(ctx, NULL, oid_dump, conn, "SELECT oid, enumtypid, enumlabel FROM pg_enum", "preassign_enum_oid");
		dump_rows(ctx, NULL, oid_dump, conn, "SELECT oid, connamespace, conname, conrelid, contypid FROM pg_constraint", "preassign_constraint_oid");
		dump_rows(ctx, NULL, oid_dump, conn, "SELECT oid, ptcname FROM pg_extprotocol", "preassign_extprotocol_oid");
		dump_rows(ctx, NULL, oid_dump, conn, "SELECT oid, adrelid, adnum FROM pg_attrdef", "preassign_attrdef_oid");
		dump_rows(ctx, NULL, oid_dump, conn, "SELECT oid, relname, relnamespace FROM pg_class", "preassign_relation_oid");
		dump_rows(ctx, NULL, oid_dump, conn, "SELECT oid, proname, pronamespace FROM pg_proc", "preassign_procedure_oid");
		dump_rows(ctx, NULL, oid_dump, conn, "SELECT oid, amopmethod FROM pg_amop", "preassign_amop_oid");

		PQfinish(conn);

		fclose(oid_dump);
	}

	check_ok(ctx);
}
开发者ID:phan-pivotal,项目名称:gpdb,代码行数:79,代码来源:oid_dump.c

示例2: check_for_isn_and_int8_passing_mismatch

/*
 *	check_for_isn_and_int8_passing_mismatch()
 *
 *	contrib/isn relies on data type int8, and in 8.4 int8 can now be passed
 *	by value.  The schema dumps the CREATE TYPE PASSEDBYVALUE setting so
 *	it must match for the old and new servers.
 */
static void
check_for_isn_and_int8_passing_mismatch(ClusterInfo *cluster)
{
	int			dbnum;
	FILE	   *script = NULL;
	bool		found = false;
	char		output_path[MAX_PG_PATH];

	prep_status("Checking for contrib/isn with bigint-passing mismatch");

	if (old_cluster.controldata.float8_pass_by_value ==
		new_cluster.controldata.float8_pass_by_value)
	{
		/* no mismatch */
		check_ok();
		return;
	}

	snprintf(output_path, sizeof(output_path), "%s/contrib_isn_and_int8_pass_by_value.txt",
			 os_info.cwd);

	for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++)
	{
		PGresult   *res;
		bool		db_used = false;
		int			ntups;
		int			rowno;
		int			i_nspname,
					i_proname;
		DbInfo	   *active_db = &cluster->dbarr.dbs[dbnum];
		PGconn	   *conn = connectToServer(cluster, active_db->db_name);

		/* Find any functions coming from contrib/isn */
		res = executeQueryOrDie(conn,
								"SELECT n.nspname, p.proname "
								"FROM	pg_catalog.pg_proc p, "
								"		pg_catalog.pg_namespace n "
								"WHERE	p.pronamespace = n.oid AND "
								"		p.probin = '$libdir/isn'");

		ntups = PQntuples(res);
		i_nspname = PQfnumber(res, "nspname");
		i_proname = PQfnumber(res, "proname");
		for (rowno = 0; rowno < ntups; rowno++)
		{
			found = true;
			if (script == NULL && (script = fopen(output_path, "w")) == NULL)
				pg_log(PG_FATAL, "Could not create necessary file:  %s\n", output_path);
			if (!db_used)
			{
				fprintf(script, "Database:  %s\n", active_db->db_name);
				db_used = true;
			}
			fprintf(script, "  %s.%s\n",
					PQgetvalue(res, rowno, i_nspname),
					PQgetvalue(res, rowno, i_proname));
		}

		PQclear(res);

		PQfinish(conn);
	}

	if (script)
		fclose(script);

	if (found)
	{
		pg_log(PG_REPORT, "fatal\n");
		pg_log(PG_FATAL,
			   "| Your installation contains \"contrib/isn\" functions\n"
			   "| which rely on the bigint data type.  Your old and\n"
			   "| new clusters pass bigint values differently so this\n"
			   "| cluster cannot currently be upgraded.  You can\n"
			   "| manually upgrade data that use \"contrib/isn\"\n"
			   "| facilities and remove \"contrib/isn\" from the\n"
			   "| old cluster and restart the upgrade.  A list\n"
			   "| of the problem functions is in the file:\n"
			   "| \t%s\n\n", output_path);
	}
	else
		check_ok();
}
开发者ID:colinet,项目名称:sqlix,代码行数:90,代码来源:check.c

示例3: check_loadable_libraries

/*
 * check_loadable_libraries()
 *
 *	Check that the new cluster contains all required libraries.
 *	We do this by actually trying to LOAD each one, thereby testing
 *	compatibility as well as presence.
 */
void
check_loadable_libraries(void)
{
	PGconn	   *conn = connectToServer(&new_cluster, "template1");
	int			libnum;
	FILE	   *script = NULL;
	bool		found = false;
	char		output_path[MAXPGPATH];

	prep_status("Checking for presence of required libraries");

	snprintf(output_path, sizeof(output_path), "loadable_libraries.txt");

	for (libnum = 0; libnum < os_info.num_libraries; libnum++)
	{
		char	   *lib = os_info.libraries[libnum];
		int			llen = strlen(lib);
		char		cmd[7 + 2 * MAXPGPATH + 1];
		PGresult   *res;

		/*
		 * In Postgres 9.0, Python 3 support was added, and to do that, a
		 * plpython2u language was created with library name plpython2.so as a
		 * symbolic link to plpython.so.  In Postgres 9.1, only the
		 * plpython2.so library was created, and both plpythonu and plpython2u
		 * pointing to it.  For this reason, any reference to library name
		 * "plpython" in an old PG <= 9.1 cluster must look for "plpython2" in
		 * the new cluster.
		 *
		 * For this case, we could check pg_pltemplate, but that only works
		 * for languages, and does not help with function shared objects, so
		 * we just do a general fix.
		 */
		if (GET_MAJOR_VERSION(old_cluster.major_version) < 901 &&
			strcmp(lib, "$libdir/plpython") == 0)
		{
			lib = "$libdir/plpython2";
			llen = strlen(lib);
		}

		strcpy(cmd, "LOAD '");
		PQescapeStringConn(conn, cmd + strlen(cmd), lib, llen, NULL);
		strcat(cmd, "'");

		res = PQexec(conn, cmd);

		if (PQresultStatus(res) != PGRES_COMMAND_OK)
		{
			found = true;

			/* exit and report missing support library with special message */
			if (strcmp(lib, PG_UPGRADE_SUPPORT) == 0)
				pg_log(PG_FATAL,
					   "The pg_upgrade_support module must be created and installed in the new cluster.\n");

			if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
				pg_log(PG_FATAL, "Could not open file \"%s\": %s\n",
					   output_path, getErrorText(errno));
			fprintf(script, "Could not load library \"%s\"\n%s\n",
					lib,
					PQerrorMessage(conn));
		}

		PQclear(res);
	}

	PQfinish(conn);

	if (found)
	{
		fclose(script);
		pg_log(PG_REPORT, "fatal\n");
		pg_log(PG_FATAL,
			   "Your installation references loadable libraries that are missing from the\n"
			   "new installation.  You can add these libraries to the new installation,\n"
			   "or remove the functions using them from the old installation.  A list of\n"
			   "problem libraries is in the file:\n"
			   "    %s\n\n", output_path);
	}
	else
		check_ok();
}
开发者ID:d,项目名称:gpdb,代码行数:89,代码来源:function.c

示例4: old_8_3_create_sequence_script

/*
 * old_8_3_create_sequence_script()
 *	8.3 -> 8.4
 *	8.4 added the column "start_value" to all sequences.  For this reason,
 *	we don't transfer sequence files but instead use the CREATE SEQUENCE
 *	command from the schema dump, and use setval() to restore the sequence
 *	value and 'is_called' from the old database.  This is safe to run
 *	by pg_upgrade because sequence files are not transferred from the old
 *	server, even in link mode.
 */
char *
old_8_3_create_sequence_script(ClusterInfo *cluster)
{
	int			dbnum;
	FILE	   *script = NULL;
	bool		found = false;
	char	   *output_path;

	output_path = pg_strdup("adjust_sequences.sql");

	prep_status("Creating script to adjust sequences");

	for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++)
	{
		PGresult   *res;
		bool		db_used = false;
		int			ntups;
		int			rowno;
		int			i_nspname,
					i_relname;
		DbInfo	   *active_db = &cluster->dbarr.dbs[dbnum];
		PGconn	   *conn = connectToServer(cluster, active_db->db_name);

		/* Find any sequences */
		res = executeQueryOrDie(conn,
								"SELECT n.nspname, c.relname "
								"FROM	pg_catalog.pg_class c, "
								"		pg_catalog.pg_namespace n "
								"WHERE	c.relkind = 'S' AND "
								"		c.relnamespace = n.oid AND "
		/* exclude possible orphaned temp tables */
								"		n.nspname !~ '^pg_temp_' AND "
						 "		n.nspname !~ '^pg_toast_temp_' AND "
								"		n.nspname NOT IN ('pg_catalog', 'information_schema')");

		ntups = PQntuples(res);
		i_nspname = PQfnumber(res, "nspname");
		i_relname = PQfnumber(res, "relname");
		for (rowno = 0; rowno < ntups; rowno++)
		{
			PGresult   *seq_res;
			int			i_last_value,
						i_is_called;
			const char *nspname = PQgetvalue(res, rowno, i_nspname);
			const char *relname = PQgetvalue(res, rowno, i_relname);

			found = true;

			if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
				pg_fatal("could not open file \"%s\": %s\n", output_path, getErrorText(errno));
			if (!db_used)
			{
				fprintf(script, "\\connect %s\n\n",
						quote_identifier(active_db->db_name));
				db_used = true;
			}

			/* Find the desired sequence */
			seq_res = executeQueryOrDie(conn,
										"SELECT s.last_value, s.is_called "
										"FROM	%s.%s s",
										quote_identifier(nspname),
										quote_identifier(relname));

			assert(PQntuples(seq_res) == 1);
			i_last_value = PQfnumber(seq_res, "last_value");
			i_is_called = PQfnumber(seq_res, "is_called");

			fprintf(script, "SELECT setval('%s.%s', %s, '%s');\n",
					quote_identifier(nspname), quote_identifier(relname),
					PQgetvalue(seq_res, 0, i_last_value), PQgetvalue(seq_res, 0, i_is_called));
			PQclear(seq_res);
		}
		if (db_used)
			fprintf(script, "\n");

		PQclear(res);

		PQfinish(conn);
	}

	if (script)
		fclose(script);

	check_ok();

	if (found)
		return output_path;
	else
	{
//.........这里部分代码省略.........
开发者ID:aakashgoel,项目名称:postgres,代码行数:101,代码来源:version_old_8_3.c

示例5: main

int
main(int argc, char **argv)
{
	char	   *sequence_script_file_name = NULL;
	char	   *analyze_script_file_name = NULL;
	char	   *deletion_script_file_name = NULL;
	bool		live_check = false;

	parseCommandLine(argc, argv);

	adjust_data_dir(&old_cluster);
	adjust_data_dir(&new_cluster);

	setup(argv[0], &live_check);

	output_check_banner(live_check);

	check_cluster_versions();

	get_sock_dir(&old_cluster, live_check);
	get_sock_dir(&new_cluster, false);

	check_cluster_compatibility(live_check);

	check_and_dump_old_cluster(live_check, &sequence_script_file_name);


	/* -- NEW -- */
	start_postmaster(&new_cluster, true);

	check_new_cluster();
	report_clusters_compatible();

	pg_log(PG_REPORT, "\nPerforming Upgrade\n");
	pg_log(PG_REPORT, "------------------\n");

	prepare_new_cluster();

	stop_postmaster(false);

	/*
	 * Destructive Changes to New Cluster
	 */

	copy_clog_xlog_xid();

	/* New now using xids of the old system */

	/* -- NEW -- */
	start_postmaster(&new_cluster, true);

	prepare_new_databases();

	create_new_objects();

	stop_postmaster(false);

	/*
	 * Most failures happen in create_new_objects(), which has completed at
	 * this point.	We do this here because it is just before linking, which
	 * will link the old and new cluster data files, preventing the old
	 * cluster from being safely started once the new cluster is started.
	 */
	if (user_opts.transfer_mode == TRANSFER_MODE_LINK)
		disable_old_cluster();

	transfer_all_new_tablespaces(&old_cluster.dbarr, &new_cluster.dbarr,
								 old_cluster.pgdata, new_cluster.pgdata);

	/*
	 * Assuming OIDs are only used in system tables, there is no need to
	 * restore the OID counter because we have not transferred any OIDs from
	 * the old system, but we do it anyway just in case.  We do it late here
	 * because there is no need to have the schema load use new oids.
	 */
	prep_status("Setting next OID for new cluster");
	exec_prog(UTILITY_LOG_FILE, NULL, true,
			  "\"%s/pg_resetxlog\" -o %u \"%s\"",
			  new_cluster.bindir, old_cluster.controldata.chkpnt_nxtoid,
			  new_cluster.pgdata);
	check_ok();

	prep_status("Sync data directory to disk");
	exec_prog(UTILITY_LOG_FILE, NULL, true,
			  "\"%s/initdb\" --sync-only \"%s\"", new_cluster.bindir,
			  new_cluster.pgdata);
	check_ok();

	create_script_for_cluster_analyze(&analyze_script_file_name);
	create_script_for_old_cluster_deletion(&deletion_script_file_name);

	issue_warnings(sequence_script_file_name);

	pg_log(PG_REPORT, "\nUpgrade Complete\n");
	pg_log(PG_REPORT, "----------------\n");

	output_completion_banner(analyze_script_file_name,
							 deletion_script_file_name);

	pg_free(analyze_script_file_name);
//.........这里部分代码省略.........
开发者ID:dimitri,项目名称:postgres,代码行数:101,代码来源:pg_upgrade.c

示例6: old_8_3_check_ltree_usage

/*
 *	old_8_3_check_ltree_usage()
 *	8.3 -> 8.4
 *	The internal ltree structure was changed in 8.4 so upgrading is impossible.
 */
void
old_8_3_check_ltree_usage(ClusterInfo *cluster)
{
	int			dbnum;
	FILE	   *script = NULL;
	bool		found = false;
	char		output_path[MAXPGPATH];

	prep_status("Checking for contrib/ltree");

	snprintf(output_path, sizeof(output_path), "contrib_ltree.txt");

	for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++)
	{
		PGresult   *res;
		bool		db_used = false;
		int			ntups;
		int			rowno;
		int			i_nspname,
					i_proname;
		DbInfo	   *active_db = &cluster->dbarr.dbs[dbnum];
		PGconn	   *conn = connectToServer(cluster, active_db->db_name);

		/* Find any functions coming from contrib/ltree */
		res = executeQueryOrDie(conn,
								"SELECT n.nspname, p.proname "
								"FROM	pg_catalog.pg_proc p, "
								"		pg_catalog.pg_namespace n "
								"WHERE	p.pronamespace = n.oid AND "
								"		p.probin = '$libdir/ltree'");

		ntups = PQntuples(res);
		i_nspname = PQfnumber(res, "nspname");
		i_proname = PQfnumber(res, "proname");
		for (rowno = 0; rowno < ntups; rowno++)
		{
			found = true;
			if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
				pg_fatal("Could not open file \"%s\": %s\n",
					   output_path, getErrorText(errno));
			if (!db_used)
			{
				fprintf(script, "Database: %s\n", active_db->db_name);
				db_used = true;
			}
			fprintf(script, "  %s.%s\n",
					PQgetvalue(res, rowno, i_nspname),
					PQgetvalue(res, rowno, i_proname));
		}

		PQclear(res);

		PQfinish(conn);
	}

	if (script)
		fclose(script);

	if (found)
	{
		pg_log(PG_REPORT, "fatal\n");
		pg_fatal("Your installation contains the \"ltree\" data type.  This data type\n"
			   "changed its internal storage format between your old and new clusters so this\n"
			   "cluster cannot currently be upgraded.  You can manually upgrade databases\n"
			   "that use \"contrib/ltree\" facilities and remove \"contrib/ltree\" from the old\n"
			   "cluster and restart the upgrade.  A list of the problem functions is in the\n"
			   "file:\n"
			   "    %s\n\n", output_path);
	}
	else
		check_ok();
}
开发者ID:aakashgoel,项目名称:postgres,代码行数:77,代码来源:version_old_8_3.c

示例7: old_8_3_rebuild_tsvector_tables

/*
 * old_8_3_rebuild_tsvector_tables()
 *	8.3 -> 8.4
 * 8.3 sorts lexemes by its length and if lengths are the same then it uses
 * alphabetic order;  8.4 sorts lexemes in lexicographical order, e.g.
 *
 * => SELECT 'c bb aaa'::tsvector;
 *	   tsvector
 * ----------------
 *	'aaa' 'bb' 'c'		   -- 8.4
 *	'c' 'bb' 'aaa'		   -- 8.3
 */
void
old_8_3_rebuild_tsvector_tables(ClusterInfo *cluster, bool check_mode)
{
	int			dbnum;
	FILE	   *script = NULL;
	bool		found = false;
	char		output_path[MAXPGPATH];

	prep_status("Checking for tsvector user columns");

	snprintf(output_path, sizeof(output_path), "rebuild_tsvector_tables.sql");

	for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++)
	{
		PGresult   *res;
		bool		db_used = false;
		char		nspname[NAMEDATALEN] = "",
					relname[NAMEDATALEN] = "";
		int			ntups;
		int			rowno;
		int			i_nspname,
					i_relname,
					i_attname;
		DbInfo	   *active_db = &cluster->dbarr.dbs[dbnum];
		PGconn	   *conn = connectToServer(cluster, active_db->db_name);

		/* Find any user-defined tsvector columns */
		res = executeQueryOrDie(conn,
								"SELECT n.nspname, c.relname, a.attname "
								"FROM	pg_catalog.pg_class c, "
								"		pg_catalog.pg_namespace n, "
								"		pg_catalog.pg_attribute a "
		/* materialized views didn't exist in 8.3, so no need to check 'm' */
								"WHERE	c.relkind = 'r' AND "
								"		c.oid = a.attrelid AND "
								"		NOT a.attisdropped AND "
								"		a.atttypid = 'pg_catalog.tsvector'::pg_catalog.regtype AND "
								"		c.relnamespace = n.oid AND "
		/* exclude possible orphaned temp tables */
								"		n.nspname !~ '^pg_temp_' AND "
						 "		n.nspname !~ '^pg_toast_temp_' AND "
								"		n.nspname NOT IN ('pg_catalog', 'information_schema')");

/*
 *	This macro is used below to avoid reindexing indexes already rebuilt
 *	because of tsvector columns.
 */
#define SKIP_TSVECTOR_TABLES \
								"i.indrelid NOT IN ( "					\
								"SELECT DISTINCT c.oid "				\
								"FROM	pg_catalog.pg_class c, "		\
								"		pg_catalog.pg_namespace n, "	\
								"		pg_catalog.pg_attribute a "		\
		/* materialized views didn't exist in 8.3, so no need to check 'm' */ \
								"WHERE	c.relkind = 'r' AND "			\
								"		c.oid = a.attrelid AND "		\
								"		NOT a.attisdropped AND "		\
								"		a.atttypid = 'pg_catalog.tsvector'::pg_catalog.regtype AND " \
								"		c.relnamespace = n.oid AND "	\
								"       n.nspname !~ '^pg_' AND "		\
								"		n.nspname != 'information_schema') "

		ntups = PQntuples(res);
		i_nspname = PQfnumber(res, "nspname");
		i_relname = PQfnumber(res, "relname");
		i_attname = PQfnumber(res, "attname");
		for (rowno = 0; rowno < ntups; rowno++)
		{
			found = true;
			if (!check_mode)
			{
				if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
					pg_fatal("could not open file \"%s\": %s\n", output_path, getErrorText(errno));
				if (!db_used)
				{
					fprintf(script, "\\connect %s\n\n",
							quote_identifier(active_db->db_name));
					db_used = true;
				}

				/* Rebuild all tsvector collumns with one ALTER TABLE command */
				if (strcmp(PQgetvalue(res, rowno, i_nspname), nspname) != 0 ||
					strcmp(PQgetvalue(res, rowno, i_relname), relname) != 0)
				{
					if (strlen(nspname) != 0 || strlen(relname) != 0)
						fprintf(script, ";\n\n");
					fprintf(script, "ALTER TABLE %s.%s\n",
						 quote_identifier(PQgetvalue(res, rowno, i_nspname)),
//.........这里部分代码省略.........
开发者ID:aakashgoel,项目名称:postgres,代码行数:101,代码来源:version_old_8_3.c

示例8: old_8_3_check_for_tsquery_usage

/*
 * old_8_3_check_for_tsquery_usage()
 *	8.3 -> 8.4
 *	A new 'prefix' field was added to the 'tsquery' data type in 8.4
 *	so migration of such fields is impossible.
 */
void
old_8_3_check_for_tsquery_usage(Cluster whichCluster)
{
	ClusterInfo *active_cluster = ACTIVE_CLUSTER(whichCluster);
	int			dbnum;
	FILE	   *script = NULL;
	bool		found = false;
	char		output_path[MAXPGPATH];

	prep_status("Checking for tsquery user columns");

	snprintf(output_path, sizeof(output_path), "%s/tables_using_tsquery.txt",
			 os_info.cwd);

	for (dbnum = 0; dbnum < active_cluster->dbarr.ndbs; dbnum++)
	{
		PGresult   *res;
		bool		db_used = false;
		int			ntups;
		int			rowno;
		int			i_nspname,
					i_relname,
					i_attname;
		DbInfo	   *active_db = &active_cluster->dbarr.dbs[dbnum];
		PGconn	   *conn = connectToServer(active_db->db_name, whichCluster);

		/* Find any user-defined tsquery columns */
		res = executeQueryOrDie(conn,
								"SELECT n.nspname, c.relname, a.attname "
								"FROM	pg_catalog.pg_class c, "
								"		pg_catalog.pg_namespace n, "
								"		pg_catalog.pg_attribute a "
								"WHERE	c.relkind = 'r' AND "
								"		c.oid = a.attrelid AND "
								"		NOT a.attisdropped AND "
								"		a.atttypid = 'pg_catalog.tsquery'::pg_catalog.regtype AND "
								"		c.relnamespace = n.oid AND "
							  "		n.nspname != 'pg_catalog' AND "
						 "		n.nspname != 'information_schema'");

		ntups = PQntuples(res);
		i_nspname = PQfnumber(res, "nspname");
		i_relname = PQfnumber(res, "relname");
		i_attname = PQfnumber(res, "attname");
		for (rowno = 0; rowno < ntups; rowno++)
		{
			found = true;
			if (script == NULL && (script = fopen(output_path, "w")) == NULL)
				pg_log(PG_FATAL, "Could not create necessary file:  %s\n", output_path);
			if (!db_used)
			{
				fprintf(script, "Database:  %s\n", active_db->db_name);
				db_used = true;
			}
			fprintf(script, "  %s.%s.%s\n",
					PQgetvalue(res, rowno, i_nspname),
					PQgetvalue(res, rowno, i_relname),
					PQgetvalue(res, rowno, i_attname));
		}

		PQclear(res);

		PQfinish(conn);
	}

	if (found)
	{
		fclose(script);
		pg_log(PG_REPORT, "fatal\n");
		pg_log(PG_FATAL,
			   "| Your installation contains the \"tsquery\" data type.\n"
			   "| This data type added a new internal field between\n"
			   "| your old and new clusters so this cluster cannot\n"
			   "| currently be upgraded.  You can remove the problem\n"
			   "| columns and restart the migration.  A list of the\n"
			   "| problem columns is in the file:\n"
			   "| \t%s\n\n", output_path);
	}
	else
		check_ok();
}
开发者ID:markwkm,项目名称:postgres,代码行数:87,代码来源:version_old_8_3.c

示例9: old_8_3_check_for_name_data_type_usage

/*
 * old_8_3_check_for_name_data_type_usage()
 *	8.3 -> 8.4
 *	Alignment for the 'name' data type changed to 'char' in 8.4;
 *	checks tables and indexes.
 */
void
old_8_3_check_for_name_data_type_usage(Cluster whichCluster)
{
	ClusterInfo *active_cluster = ACTIVE_CLUSTER(whichCluster);
	int			dbnum;
	FILE	   *script = NULL;
	bool		found = false;
	char		output_path[MAXPGPATH];

	prep_status("Checking for invalid 'name' user columns");

	snprintf(output_path, sizeof(output_path), "%s/tables_using_name.txt",
			 os_info.cwd);

	for (dbnum = 0; dbnum < active_cluster->dbarr.ndbs; dbnum++)
	{
		PGresult   *res;
		bool		db_used = false;
		int			ntups;
		int			rowno;
		int			i_nspname,
					i_relname,
					i_attname;
		DbInfo	   *active_db = &active_cluster->dbarr.dbs[dbnum];
		PGconn	   *conn = connectToServer(active_db->db_name, whichCluster);

		/*
		 * With a smaller alignment in 8.4, 'name' cannot be used in a
		 * non-pg_catalog table, except as the first column. (We could tighten
		 * that condition with enough analysis, but it seems not worth the
		 * trouble.)
		 */
		res = executeQueryOrDie(conn,
								"SELECT n.nspname, c.relname, a.attname "
								"FROM	pg_catalog.pg_class c, "
								"		pg_catalog.pg_namespace n, "
								"		pg_catalog.pg_attribute a "
								"WHERE	c.oid = a.attrelid AND "
								"		a.attnum > 1 AND "
								"		NOT a.attisdropped AND "
								"		a.atttypid = 'pg_catalog.name'::pg_catalog.regtype AND "
								"		c.relnamespace = n.oid AND "
							  "		n.nspname != 'pg_catalog' AND "
						 "		n.nspname != 'information_schema'");

		ntups = PQntuples(res);
		i_nspname = PQfnumber(res, "nspname");
		i_relname = PQfnumber(res, "relname");
		i_attname = PQfnumber(res, "attname");
		for (rowno = 0; rowno < ntups; rowno++)
		{
			found = true;
			if (script == NULL && (script = fopen(output_path, "w")) == NULL)
				pg_log(PG_FATAL, "Could not create necessary file:  %s\n", output_path);
			if (!db_used)
			{
				fprintf(script, "Database:  %s\n", active_db->db_name);
				db_used = true;
			}
			fprintf(script, "  %s.%s.%s\n",
					PQgetvalue(res, rowno, i_nspname),
					PQgetvalue(res, rowno, i_relname),
					PQgetvalue(res, rowno, i_attname));
		}

		PQclear(res);

		PQfinish(conn);
	}

	if (found)
	{
		fclose(script);
		pg_log(PG_REPORT, "fatal\n");
		pg_log(PG_FATAL,
			   "| Your installation contains the \"name\" data type in\n"
			   "| user tables.  This data type changed its internal\n"
			   "| alignment between your old and new clusters so this\n"
			   "| cluster cannot currently be upgraded.  You can\n"
			   "| remove the problem tables and restart the migration.\n"
			   "| A list of the problem columns is in the file:\n"
			   "| \t%s\n\n", output_path);
	}
	else
		check_ok();
}
开发者ID:markwkm,项目名称:postgres,代码行数:92,代码来源:version_old_8_3.c

示例10: old_9_6_check_for_unknown_data_type_usage

/*
 * old_9_6_check_for_unknown_data_type_usage()
 *	9.6 -> 10
 *	It's no longer allowed to create tables or views with "unknown"-type
 *	columns.  We do not complain about views with such columns, because
 *	they should get silently converted to "text" columns during the DDL
 *	dump and reload; it seems unlikely to be worth making users do that
 *	by hand.  However, if there's a table with such a column, the DDL
 *	reload will fail, so we should pre-detect that rather than failing
 *	mid-upgrade.  Worse, if there's a matview with such a column, the
 *	DDL reload will silently change it to "text" which won't match the
 *	on-disk storage (which is like "cstring").  So we *must* reject that.
 *	Also check composite types, in case they are used for table columns.
 *	We needn't check indexes, because "unknown" has no opclasses.
 */
void
old_9_6_check_for_unknown_data_type_usage(ClusterInfo *cluster)
{
	int			dbnum;
	FILE	   *script = NULL;
	bool		found = false;
	char		output_path[MAXPGPATH];

	prep_status("Checking for invalid \"unknown\" user columns");

	snprintf(output_path, sizeof(output_path), "tables_using_unknown.txt");

	for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++)
	{
		PGresult   *res;
		bool		db_used = false;
		int			ntups;
		int			rowno;
		int			i_nspname,
					i_relname,
					i_attname;
		DbInfo	   *active_db = &cluster->dbarr.dbs[dbnum];
		PGconn	   *conn = connectToServer(cluster, active_db->db_name);

		res = executeQueryOrDie(conn,
								"SELECT n.nspname, c.relname, a.attname "
								"FROM	pg_catalog.pg_class c, "
								"		pg_catalog.pg_namespace n, "
								"		pg_catalog.pg_attribute a "
								"WHERE	c.oid = a.attrelid AND "
								"		NOT a.attisdropped AND "
								"		a.atttypid = 'pg_catalog.unknown'::pg_catalog.regtype AND "
								"		c.relkind IN ("
								CppAsString2(RELKIND_RELATION) ", "
								CppAsString2(RELKIND_COMPOSITE_TYPE) ", "
								CppAsString2(RELKIND_MATVIEW) ") AND "
								"		c.relnamespace = n.oid AND "
		/* exclude possible orphaned temp tables */
								"		n.nspname !~ '^pg_temp_' AND "
								"		n.nspname !~ '^pg_toast_temp_' AND "
								"		n.nspname NOT IN ('pg_catalog', 'information_schema')");

		ntups = PQntuples(res);
		i_nspname = PQfnumber(res, "nspname");
		i_relname = PQfnumber(res, "relname");
		i_attname = PQfnumber(res, "attname");
		for (rowno = 0; rowno < ntups; rowno++)
		{
			found = true;
			if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
				pg_fatal("could not open file \"%s\": %s\n", output_path,
						 strerror(errno));
			if (!db_used)
			{
				fprintf(script, "Database: %s\n", active_db->db_name);
				db_used = true;
			}
			fprintf(script, "  %s.%s.%s\n",
					PQgetvalue(res, rowno, i_nspname),
					PQgetvalue(res, rowno, i_relname),
					PQgetvalue(res, rowno, i_attname));
		}

		PQclear(res);

		PQfinish(conn);
	}

	if (script)
		fclose(script);

	if (found)
	{
		pg_log(PG_REPORT, "fatal\n");
		pg_fatal("Your installation contains the \"unknown\" data type in user tables.  This\n"
				 "data type is no longer allowed in tables, so this cluster cannot currently\n"
				 "be upgraded.  You can remove the problem tables and restart the upgrade.\n"
				 "A list of the problem columns is in the file:\n"
				 "    %s\n\n", output_path);
	}
	else
		check_ok();
}
开发者ID:AmiGanguli,项目名称:postgres,代码行数:98,代码来源:version.c

示例11: old_9_6_invalidate_hash_indexes

/*
 * old_9_6_invalidate_hash_indexes()
 *	9.6 -> 10
 *	Hash index binary format has changed from 9.6->10.0
 */
void
old_9_6_invalidate_hash_indexes(ClusterInfo *cluster, bool check_mode)
{
	int			dbnum;
	FILE	   *script = NULL;
	bool		found = false;
	char	   *output_path = "reindex_hash.sql";

	prep_status("Checking for hash indexes");

	for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++)
	{
		PGresult   *res;
		bool		db_used = false;
		int			ntups;
		int			rowno;
		int			i_nspname,
					i_relname;
		DbInfo	   *active_db = &cluster->dbarr.dbs[dbnum];
		PGconn	   *conn = connectToServer(cluster, active_db->db_name);

		/* find hash indexes */
		res = executeQueryOrDie(conn,
								"SELECT n.nspname, c.relname "
								"FROM	pg_catalog.pg_class c, "
								"		pg_catalog.pg_index i, "
								"		pg_catalog.pg_am a, "
								"		pg_catalog.pg_namespace n "
								"WHERE	i.indexrelid = c.oid AND "
								"		c.relam = a.oid AND "
								"		c.relnamespace = n.oid AND "
								"		a.amname = 'hash'"
			);

		ntups = PQntuples(res);
		i_nspname = PQfnumber(res, "nspname");
		i_relname = PQfnumber(res, "relname");
		for (rowno = 0; rowno < ntups; rowno++)
		{
			found = true;
			if (!check_mode)
			{
				if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
					pg_fatal("could not open file \"%s\": %s\n", output_path,
							 strerror(errno));
				if (!db_used)
				{
					PQExpBufferData connectbuf;

					initPQExpBuffer(&connectbuf);
					appendPsqlMetaConnect(&connectbuf, active_db->db_name);
					fputs(connectbuf.data, script);
					termPQExpBuffer(&connectbuf);
					db_used = true;
				}
				fprintf(script, "REINDEX INDEX %s.%s;\n",
						quote_identifier(PQgetvalue(res, rowno, i_nspname)),
						quote_identifier(PQgetvalue(res, rowno, i_relname)));
			}
		}

		PQclear(res);

		if (!check_mode && db_used)
		{
			/* mark hash indexes as invalid */
			PQclear(executeQueryOrDie(conn,
									  "UPDATE pg_catalog.pg_index i "
									  "SET	indisvalid = false "
									  "FROM	pg_catalog.pg_class c, "
									  "		pg_catalog.pg_am a, "
									  "		pg_catalog.pg_namespace n "
									  "WHERE	i.indexrelid = c.oid AND "
									  "		c.relam = a.oid AND "
									  "		c.relnamespace = n.oid AND "
									  "		a.amname = 'hash'"));
		}

		PQfinish(conn);
	}

	if (script)
		fclose(script);

	if (found)
	{
		report_status(PG_WARNING, "warning");
		if (check_mode)
			pg_log(PG_WARNING, "\n"
				   "Your installation contains hash indexes.  These indexes have different\n"
				   "internal formats between your old and new clusters, so they must be\n"
				   "reindexed with the REINDEX command.  After upgrading, you will be given\n"
				   "REINDEX instructions.\n\n");
		else
			pg_log(PG_WARNING, "\n"
//.........这里部分代码省略.........
开发者ID:AmiGanguli,项目名称:postgres,代码行数:101,代码来源:version.c

示例12: main

int
main(int argc, char **argv)
{
	char	   *sequence_script_file_name = NULL;
	char	   *deletion_script_file_name = NULL;
	bool		live_check = false;

	parseCommandLine(argc, argv);

	output_check_banner(&live_check);

	setup(argv[0], live_check);

	check_cluster_versions();
	check_cluster_compatibility(live_check);

	check_old_cluster(live_check, &sequence_script_file_name);


	/* -- NEW -- */
	start_postmaster(&new_cluster);

	check_new_cluster();
	report_clusters_compatible();

	pg_log(PG_REPORT, "\nPerforming Upgrade\n");
	pg_log(PG_REPORT, "------------------\n");

	disable_old_cluster();
	prepare_new_cluster();

	stop_postmaster(false);

	/*
	 * Destructive Changes to New Cluster
	 */

	copy_clog_xlog_xid();

	/* New now using xids of the old system */

	/* -- NEW -- */
	start_postmaster(&new_cluster);

	prepare_new_databases();

	create_new_objects();

	stop_postmaster(false);

	transfer_all_new_dbs(&old_cluster.dbarr, &new_cluster.dbarr,
						 old_cluster.pgdata, new_cluster.pgdata);

	/*
	 * Assuming OIDs are only used in system tables, there is no need to
	 * restore the OID counter because we have not transferred any OIDs from
	 * the old system, but we do it anyway just in case.  We do it late here
	 * because there is no need to have the schema load use new oids.
	 */
	prep_status("Setting next OID for new cluster");
	exec_prog(true, SYSTEMQUOTE "\"%s/pg_resetxlog\" -o %u \"%s\" > "
			  DEVNULL SYSTEMQUOTE,
			  new_cluster.bindir, old_cluster.controldata.chkpnt_nxtoid, new_cluster.pgdata);
	check_ok();

	create_script_for_old_cluster_deletion(&deletion_script_file_name);

	issue_warnings(sequence_script_file_name);

	pg_log(PG_REPORT, "\nUpgrade complete\n");
	pg_log(PG_REPORT, "----------------\n");

	output_completion_banner(deletion_script_file_name);

	pg_free(deletion_script_file_name);
	pg_free(sequence_script_file_name);

	cleanup();

	return 0;
}
开发者ID:mjw56,项目名称:postgres,代码行数:81,代码来源:pg_upgrade.c

示例13: create_new_objects

static void
create_new_objects(void)
{
	int			dbnum;

	prep_status("Adding support functions to new cluster");

	/*
	 * Technically, we only need to install these support functions in new
	 * databases that also exist in the old cluster, but for completeness we
	 * process all new databases.
	 */
	for (dbnum = 0; dbnum < new_cluster.dbarr.ndbs; dbnum++)
	{
		DbInfo	   *new_db = &new_cluster.dbarr.dbs[dbnum];

		/* skip db we already installed */
		if (strcmp(new_db->db_name, "template1") != 0)
			install_support_functions_in_new_db(new_db->db_name);
	}
	check_ok();

	prep_status("Restoring database schemas in the new cluster\n");

	for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++)
	{
		char		sql_file_name[MAXPGPATH],
					log_file_name[MAXPGPATH];
		DbInfo	   *old_db = &old_cluster.dbarr.dbs[dbnum];

		pg_log(PG_STATUS, "%s", old_db->db_name);
		snprintf(sql_file_name, sizeof(sql_file_name), DB_DUMP_FILE_MASK, old_db->db_oid);
		snprintf(log_file_name, sizeof(log_file_name), DB_DUMP_LOG_FILE_MASK, old_db->db_oid);

		/*
		 * pg_dump only produces its output at the end, so there is little
		 * parallelism if using the pipe.
		 */
		parallel_exec_prog(log_file_name,
						   NULL,
						   "\"%s/pg_restore\" %s --exit-on-error --verbose --dbname \"%s\" \"%s\"",
						   new_cluster.bindir,
						   cluster_conn_opts(&new_cluster),
						   old_db->db_name,
						   sql_file_name);
	}

	/* reap all children */
	while (reap_child(true) == true)
		;

	end_progress_output();
	check_ok();

	/*
	 * We don't have minmxids for databases or relations in pre-9.3
	 * clusters, so set those after we have restores the schemas.
	 */
	if (GET_MAJOR_VERSION(old_cluster.major_version) < 903)
		set_frozenxids(true);

	optionally_create_toast_tables();

	/* regenerate now that we have objects in the databases */
	get_db_and_rel_infos(&new_cluster);

	uninstall_support_functions_from_new_cluster();
}
开发者ID:PJMODOS,项目名称:postgres,代码行数:68,代码来源:pg_upgrade.c

示例14: transfer_all_new_dbs

/*
 * transfer_all_new_dbs()
 *
 * Responsible for upgrading all database. invokes routines to generate mappings and then
 * physically link the databases.
 */
const char *
transfer_all_new_dbs(DbInfoArr *old_db_arr,
				   DbInfoArr *new_db_arr, char *old_pgdata, char *new_pgdata)
{
	int			old_dbnum,
				new_dbnum;
	const char *msg = NULL;

	prep_status("%s user relation files\n",
	  user_opts.transfer_mode == TRANSFER_MODE_LINK ? "Linking" : "Copying");

	/* Scan the old cluster databases and transfer their files */
	for (old_dbnum = new_dbnum = 0;
		 old_dbnum < old_db_arr->ndbs;
		 old_dbnum++, new_dbnum++)
	{
		DbInfo	   *old_db = &old_db_arr->dbs[old_dbnum],
				   *new_db = NULL;
		FileNameMap *mappings;
		int			n_maps;
		pageCnvCtx *pageConverter = NULL;

		/*
		 * Advance past any databases that exist in the new cluster but not in
		 * the old, e.g. "postgres".  (The user might have removed the
		 * 'postgres' database from the old cluster.)
		 */
		for (; new_dbnum < new_db_arr->ndbs; new_dbnum++)
		{
			new_db = &new_db_arr->dbs[new_dbnum];
			if (strcmp(old_db->db_name, new_db->db_name) == 0)
				break;
		}

		if (new_dbnum >= new_db_arr->ndbs)
			pg_log(PG_FATAL, "old database \"%s\" not found in the new cluster\n",
				   old_db->db_name);

		n_maps = 0;
		mappings = gen_db_file_maps(old_db, new_db, &n_maps, old_pgdata,
									new_pgdata);

		if (n_maps)
		{
			print_maps(mappings, n_maps, new_db->db_name);

#ifdef PAGE_CONVERSION
			msg = setupPageConverter(&pageConverter);
#endif
			transfer_single_new_db(pageConverter, mappings, n_maps);

			pg_free(mappings);
		}
	}

	prep_status(" ");			/* in case nothing printed; pass a space so
								 * gcc doesn't complain about empty format
								 * string */
	check_ok();

	return msg;
}
开发者ID:HunterChen,项目名称:postgres-xc,代码行数:68,代码来源:relfilenode.c

示例15: create_script_for_cluster_analyze

/*
 * create_script_for_cluster_analyze()
 *
 *	This incrementally generates better optimizer statistics
 */
void
create_script_for_cluster_analyze(char **analyze_script_file_name)
{
	FILE	   *script = NULL;

	*analyze_script_file_name = pg_malloc(MAXPGPATH);

	prep_status("Creating script to analyze new cluster");

	snprintf(*analyze_script_file_name, MAXPGPATH, "analyze_new_cluster.%s",
			 SCRIPT_EXT);

	if ((script = fopen_priv(*analyze_script_file_name, "w")) == NULL)
		pg_log(PG_FATAL, "Could not open file \"%s\": %s\n",
			   *analyze_script_file_name, getErrorText(errno));

#ifndef WIN32
	/* add shebang header */
	fprintf(script, "#!/bin/sh\n\n");
#endif

	fprintf(script, "echo %sThis script will generate minimal optimizer statistics rapidly%s\n",
			ECHO_QUOTE, ECHO_QUOTE);
	fprintf(script, "echo %sso your system is usable, and then gather statistics twice more%s\n",
			ECHO_QUOTE, ECHO_QUOTE);
	fprintf(script, "echo %swith increasing accuracy.  When it is done, your system will%s\n",
			ECHO_QUOTE, ECHO_QUOTE);
	fprintf(script, "echo %shave the default level of optimizer statistics.%s\n",
			ECHO_QUOTE, ECHO_QUOTE);
	fprintf(script, "echo\n\n");

	fprintf(script, "echo %sIf you have used ALTER TABLE to modify the statistics target for%s\n",
			ECHO_QUOTE, ECHO_QUOTE);
	fprintf(script, "echo %sany tables, you might want to remove them and restore them after%s\n",
			ECHO_QUOTE, ECHO_QUOTE);
	fprintf(script, "echo %srunning this script because they will delay fast statistics generation.%s\n",
			ECHO_QUOTE, ECHO_QUOTE);
	fprintf(script, "echo\n\n");

	fprintf(script, "echo %sIf you would like default statistics as quickly as possible, cancel%s\n",
			ECHO_QUOTE, ECHO_QUOTE);
	fprintf(script, "echo %sthis script and run:%s\n",
			ECHO_QUOTE, ECHO_QUOTE);
	fprintf(script, "echo %s    vacuumdb --all %s%s\n", ECHO_QUOTE,
	/* Did we copy the free space files? */
			(GET_MAJOR_VERSION(old_cluster.major_version) >= 804) ?
			"--analyze-only" : "--analyze", ECHO_QUOTE);
	fprintf(script, "echo\n\n");

#ifndef WIN32
	fprintf(script, "sleep 2\n");
	fprintf(script, "PGOPTIONS='-c default_statistics_target=1 -c vacuum_cost_delay=0'\n");
	/* only need to export once */
	fprintf(script, "export PGOPTIONS\n");
#else
	fprintf(script, "REM simulate sleep 2\n");
	fprintf(script, "PING 1.1.1.1 -n 1 -w 2000 > nul\n");
	fprintf(script, "SET PGOPTIONS=-c default_statistics_target=1 -c vacuum_cost_delay=0\n");
#endif

	fprintf(script, "echo %sGenerating minimal optimizer statistics (1 target)%s\n",
			ECHO_QUOTE, ECHO_QUOTE);
	fprintf(script, "echo %s--------------------------------------------------%s\n",
			ECHO_QUOTE, ECHO_QUOTE);
	fprintf(script, "vacuumdb --all --analyze-only\n");
	fprintf(script, "echo\n");
	fprintf(script, "echo %sThe server is now available with minimal optimizer statistics.%s\n",
			ECHO_QUOTE, ECHO_QUOTE);
	fprintf(script, "echo %sQuery performance will be optimal once this script completes.%s\n",
			ECHO_QUOTE, ECHO_QUOTE);
	fprintf(script, "echo\n\n");

#ifndef WIN32
	fprintf(script, "sleep 2\n");
	fprintf(script, "PGOPTIONS='-c default_statistics_target=10'\n");
#else
	fprintf(script, "REM simulate sleep\n");
	fprintf(script, "PING 1.1.1.1 -n 1 -w 2000 > nul\n");
	fprintf(script, "SET PGOPTIONS=-c default_statistics_target=10\n");
#endif

	fprintf(script, "echo %sGenerating medium optimizer statistics (10 targets)%s\n",
			ECHO_QUOTE, ECHO_QUOTE);
	fprintf(script, "echo %s---------------------------------------------------%s\n",
			ECHO_QUOTE, ECHO_QUOTE);
	fprintf(script, "vacuumdb --all --analyze-only\n");
	fprintf(script, "echo\n\n");

#ifndef WIN32
	fprintf(script, "unset PGOPTIONS\n");
#else
	fprintf(script, "SET PGOPTIONS\n");
#endif

	fprintf(script, "echo %sGenerating default (full) optimizer statistics (100 targets?)%s\n",
//.........这里部分代码省略.........
开发者ID:hqinnus,项目名称:postgres,代码行数:101,代码来源:check.c


注:本文中的prep_status函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。