Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Connect using Connection Service File #87

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ By default, pg_view tries to autodetect all PostgreSQL clusters running on the s
- checks all arguments, picking the first that allows it to establish a connection
- if pg_view can't get either the port/host or port/socket_directory pair, bail out

If the program can't detect your connection arguments using the algorithm above, you can specify those arguments manually using the configuration file supplied with the -c option. This file should consist of one or more sections, each containing a key = value pair.
If the program can't detect your connection arguments using the algorithm above, you can specify those arguments manually using the configuration file supplied with the -c option. This file should consist of one or more sections, each containing a key = value pair. You can also use your Connection Service File.

The title of each section represents a database cluster name (this name is for display purposes only). The dbname parameter is `postgres` by default, and specifies the actual name of the database to connect to. The key-value pairs should contain connection parameters.

Expand Down
33 changes: 19 additions & 14 deletions pg_view/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,9 @@ def parse_args():
parser.add_option('-H', '--help', help='show_help', action='help')
parser.add_option('-v', '--verbose', help='verbose mode', action='store_true', dest='verbose')
parser.add_option('-i', '--instance', help='name of the instance to monitor', action='store', dest='instance')
parser.add_option('-s', '--use-service',
help='query the service file for the instance name provided',
action='store_true', dest='use_service')
parser.add_option('-t', '--tick', help='tick length (in seconds)',
action='store', dest='tick', type='int', default=1)
parser.add_option('-o', '--output-method', help='send output to the following source', action='store',
Expand Down Expand Up @@ -184,16 +187,14 @@ def main():
# set basic logging
setup_logger(options)

user_dbname = options.instance
user_dbver = options.version
clusters = []

# now try to read the configuration file
config = read_configuration(options.config_file) if options.config_file else None
dbver = None
dbversion = None
# configuration file takes priority over the rest of database connection information sources.
if config:
for instance in config:
if user_dbname and instance != user_dbname:
if options.instance and instance != options.instance:
continue
# pass already aquired connections to make sure we only list unique clusters.
host = config[instance].get('host')
Expand All @@ -205,27 +206,31 @@ def main():
logger.error('failed to acquire details about ' +
'the database cluster {0}, the server will be skipped'.format(instance))
elif options.host:
# try to connet to the database specified by command-line options
# connect to the database using the connection string supplied from command-line
conn = build_connection(options.host, options.port, options.username, options.dbname)
instance = options.instance or "default"
if not establish_user_defined_connection(instance, conn, clusters):
logger.error("unable to continue with cluster {0}".format(instance))
elif options.use_service and options.instance:
# connect to the database using the service name
if not establish_user_defined_connection(options.instance, {'service': options.instance}, clusters):
logger.error("unable to continue with cluster {0}".format(options.instance))
else:
# do autodetection
postmasters = get_postmasters_directories()

# get all PostgreSQL instances
for result_work_dir, data in postmasters.items():
(ppid, dbver, dbname) = data
(ppid, dbversion, dbname) = data
# if user requested a specific database name and version - don't try to connect to others
if user_dbname:
if dbname != user_dbname or not result_work_dir or not ppid:
if options.instance:
if dbname != options.instance or not result_work_dir or not ppid:
continue
if user_dbver is not None and dbver != user_dbver:
if options.version is not None and dbversion != options.version:
continue
try:
conndata = detect_db_connection_arguments(
result_work_dir, ppid, dbver, options.username, options.dbname)
result_work_dir, ppid, dbversion, options.username, options.dbname)
if conndata is None:
continue
host = conndata['host']
Expand All @@ -236,7 +241,7 @@ def main():
logger.error('PostgreSQL exception {0}'.format(e))
pgcon = None
if pgcon:
desc = make_cluster_desc(name=dbname, version=dbver, workdir=result_work_dir,
desc = make_cluster_desc(name=dbname, version=dbversion, workdir=result_work_dir,
pid=ppid, pgcon=pgcon, conn=conn)
clusters.append(desc)
collectors = []
Expand All @@ -251,9 +256,9 @@ def main():
# initialize the disks stat collector process and create an exchange queue
q = JoinableQueue(1)
work_directories = [cl['wd'] for cl in clusters if 'wd' in cl]
dbver = dbver or clusters[0]['ver']
dbversion = dbversion or clusters[0]['ver']

collector = DetachedDiskStatCollector(q, work_directories, dbver)
collector = DetachedDiskStatCollector(q, work_directories, dbversion)
collector.start()
consumer = DiskCollectorConsumer(q)

Expand Down