Skip to content

Commit

Permalink
增加oss_info 支持;升级cms版本
Browse files Browse the repository at this point in the history
  • Loading branch information
韩云峰 committed Aug 19, 2020
1 parent 5ae4df9 commit ef209d6
Show file tree
Hide file tree
Showing 4 changed files with 80 additions and 32 deletions.
3 changes: 2 additions & 1 deletion aliyun-exporter.yml
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,8 @@ info_metrics:
- redis
- slb
- mongodb
- polardb
# - polardb
# - oss

do_info_region:
- "cn-zhangjiakou"
Expand Down
37 changes: 24 additions & 13 deletions aliyun_exporter/collector.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,8 @@
from prometheus_client import Summary
from prometheus_client.core import GaugeMetricFamily, REGISTRY
from aliyunsdkcore.client import AcsClient
from aliyunsdkcms.request.v20180308 import QueryMetricLastRequest
# from aliyunsdkcms.request.v20190101 import QueryMetricLastRequest
from aliyunsdkcms.request.v20190101 import DescribeMetricLastRequest
from aliyunsdkrds.request.v20140815 import DescribeDBInstancePerformanceRequest
from ratelimiter import RateLimiter

Expand All @@ -16,11 +17,13 @@

rds_performance = 'rds_performance'
special_projects = {
rds_performance: lambda collector : RDSPerformanceCollector(collector),
rds_performance: lambda collector: RDSPerformanceCollector(collector),
}

requestSummary = Summary('cloudmonitor_request_latency_seconds', 'CloudMonitor request latency', ['project'])
requestFailedSummary = Summary('cloudmonitor_failed_request_latency_seconds', 'CloudMonitor failed request latency', ['project'])
requestFailedSummary = Summary('cloudmonitor_failed_request_latency_seconds', 'CloudMonitor failed request latency',
['project'])


class CollectorConfig(object):
def __init__(self,
Expand Down Expand Up @@ -57,6 +60,7 @@ def __init__(self,
self.credential['access_key_secret'] is None:
raise Exception('Credential is not fully configured.')


class AliyunCollector(object):
def __init__(self, config: CollectorConfig):
self.config = config
Expand All @@ -68,19 +72,25 @@ def __init__(self, config: CollectorConfig):
# region_id=config.credential['region_id'] #在获取监控指标metrics时貌似不需要region
)
self.rateLimiter = RateLimiter(max_calls=config.rate_limit)
self.info_provider = InfoProvider()
self.info_provider = InfoProvider(ak=config.credential['access_key_id'],
secret=config.credential['access_key_secret'],
region_id=config.credential['region_id'])
self.special_collectors = dict()
for k, v in special_projects.items():
if k in self.metrics:
self.special_collectors[k] = v(self)


def query_metric(self, project: str, metric: str, period: int):
with self.rateLimiter:
req = QueryMetricLastRequest.QueryMetricLastRequest()
req.set_Project(project)
req.set_Metric(metric)
# req = QueryMetricLastRequest.QueryMetricLastRequest()
# req.set_Project(project)
# req.set_Metric(metric)
# req.set_Period(period)
req = DescribeMetricLastRequest.DescribeMetricLastRequest()
req.set_Namespace(project)
req.set_MetricName(metric)
req.set_Period(period)

start_time = time.time()
try:
resp = self.client.do_action_with_exception(req)
Expand All @@ -95,8 +105,10 @@ def query_metric(self, project: str, metric: str, period: int):
points = json.loads(data['Datapoints'])
return points
else:
logging.error('Error query metrics for {}_{}, the response body don not have Datapoints field, please check you permission or workload' .format(project, metric))
return points
logging.error(
'Error query metrics for {}_{}, the response body don not have Datapoints field, please check you permission or workload'.format(
project, metric))
return None

def parse_label_keys(self, point):
return [k for k in point if k not in ['timestamp', 'Maximum', 'Minimum', 'Average']]
Expand Down Expand Up @@ -169,7 +181,6 @@ def collect(self):
yield from v.collect()



def metric_up_gauge(resource: str, succeeded=True):
metric_name = resource + '_up'
description = 'Did the {} fetch succeed.'.format(resource)
Expand Down Expand Up @@ -200,12 +211,12 @@ def collect(self):
secret=self.parent.config.credential['access_key_secret'],
region_id=a_region
)
for id in [s.labels['DBInstanceId'] for s in self.parent.info_provider.get_metrics('rds', client).samples]:
for id in [s.labels['DBInstanceId'] for s in
self.parent.info_provider.get_metrics('rds', client).samples]:
metrics = self.query_rds_performance_metrics(id)
for metric in metrics:
yield from self.parse_rds_performance(id, metric)


def parse_rds_performance(self, id, value):
value_format: str = value['ValueFormat']
metric_name = value['Key']
Expand Down
67 changes: 51 additions & 16 deletions aliyun_exporter/info_provider.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
import aliyunsdkslb.request.v20140515.DescribeLoadBalancersRequest as DescribeSLB
import aliyunsdkdds.request.v20151201.DescribeDBInstancesRequest as Mongodb
import aliyunsdkpolardb.request.v20170801.DescribeDBClustersRequest as Polardb
import oss2

from aliyun_exporter.utils import try_or_else

Expand All @@ -27,29 +28,35 @@
labels of metric, and handle nested attribute specially. If a nested
attribute is not handled explicitly, it will be dropped.
'''


class InfoProvider():

def __init__(self):
def __init__(self, ak, secret, region_id):
self.client = None
self.ak = ak
self.secret = secret
self.region_id = region_id

@cached(cache)
def get_metrics(self, resource: str, client: AcsClient) -> GaugeMetricFamily:
self.client = client
return {
'ecs': lambda : self.ecs_info(),
'rds': lambda : self.rds_info(),
'redis': lambda : self.redis_info(),
'slb':lambda : self.slb_info(),
'mongodb':lambda : self.mongodb_info(),
'polardb':lambda : self.polardb_info()
'ecs': lambda: self.ecs_info(),
'rds': lambda: self.rds_info(),
'redis': lambda: self.redis_info(),
'slb': lambda: self.slb_info(),
'mongodb': lambda: self.mongodb_info(),
'polardb': lambda: self.polardb_info(),
'oss': lambda: self.oss_info()
}[resource]()

def ecs_info(self) -> GaugeMetricFamily:
req = DescribeECS.DescribeInstancesRequest()
nested_handler = {
'InnerIpAddress': lambda obj : try_or_else(lambda : obj['IpAddress'][0], ''),
'PublicIpAddress': lambda obj : try_or_else(lambda : obj['IpAddress'][0], ''),
'VpcAttributes': lambda obj : try_or_else(lambda : obj['PrivateIpAddress']['IpAddress'][0], ''),
'InnerIpAddress': lambda obj: try_or_else(lambda: obj['IpAddress'][0], ''),
'PublicIpAddress': lambda obj: try_or_else(lambda: obj['IpAddress'][0], ''),
'VpcAttributes': lambda obj: try_or_else(lambda: obj['PrivateIpAddress']['IpAddress'][0], ''),
}
return self.info_template(req, 'aliyun_meta_ecs_info', nested_handler=nested_handler)

Expand All @@ -59,23 +66,52 @@ def rds_info(self) -> GaugeMetricFamily:

def redis_info(self) -> GaugeMetricFamily:
req = DescribeRedis.DescribeInstancesRequest()
return self.info_template(req, 'aliyun_meta_redis_info', to_list=lambda data: data['Instances']['KVStoreInstance'])
return self.info_template(req, 'aliyun_meta_redis_info',
to_list=lambda data: data['Instances']['KVStoreInstance'])

def slb_info(self) -> GaugeMetricFamily:
req = DescribeSLB.DescribeLoadBalancersRequest()
return self.info_template(req, 'aliyun_meta_slb_info', to_list=lambda data: data['LoadBalancers']['LoadBalancer'])
return self.info_template(req, 'aliyun_meta_slb_info',
to_list=lambda data: data['LoadBalancers']['LoadBalancer'])

def mongodb_info(self) -> GaugeMetricFamily:
req = Mongodb.DescribeDBInstancesRequest()
return self.info_template(req, 'aliyun_meta_mongodb_info', to_list=lambda data: data['DBInstances']['DBInstance'])
return self.info_template(req, 'aliyun_meta_mongodb_info',
to_list=lambda data: data['DBInstances']['DBInstance'])

def polardb_info(self) -> GaugeMetricFamily:
req = Polardb.DescribeDBClustersRequest()
return self.info_template(req, 'aliyun_meta_polardb_info', to_list=lambda data: data['Items']['DBCluster'])

def oss_info(self) -> GaugeMetricFamily:
auth = oss2.Auth(self.ak, self.secret)
service = oss2.Service(auth, 'http://oss-{resion_id}.aliyuncs.com'.format(resion_id=self.region_id))
nested_handler = None
gauge = None
label_keys = None
for instance in oss2.BucketIterator(service):
bucket = oss2.Bucket(auth, 'http://oss-cn-beijing.aliyuncs.com', instance.name)
bucket_info = bucket.get_bucket_info()
instance_dict = {'name': bucket_info.name,
'storage_class': bucket_info.storage_class,
'creation_date': bucket_info.creation_date,
'intranet_endpoint': bucket_info.intranet_endpoint,
'extranet_endpoint': bucket_info.extranet_endpoint,
'owner': bucket_info.owner.id,
'grant': bucket_info.acl.grant,
'data_redundancy_type': bucket_info.data_redundancy_type,
}
if gauge == None:
label_keys = self.label_keys(instance_dict, nested_handler)
gauge = GaugeMetricFamily('aliyun_meta_oss_info', '', labels=label_keys)
gauge.add_metric(labels=self.label_values(instance_dict, label_keys, nested_handler), value=1.0)
return gauge


'''
Template method to retrieve resource information and transform to metric.
'''

def info_template(self,
req,
name,
Expand Down Expand Up @@ -115,7 +151,6 @@ def label_keys(self, instance, nested_handler=None):
def label_values(self, instance, label_keys, nested_handler=None):
if nested_handler is None:
nested_handler = {}
return map(lambda k: str(nested_handler[k](instance[k])) if k in nested_handler else try_or_else(lambda: str(instance[k]), ''),
return map(lambda k: str(nested_handler[k](instance[k])) if k in nested_handler else try_or_else(
lambda: str(instance[k]), ''),
label_keys)


5 changes: 3 additions & 2 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@
package_data={'aliyun_exporter': ['static/*','templates/*']},
install_requires=[
'prometheus-client',
'aliyun-python-sdk-cms==6.0.13',
'aliyun-python-sdk-cms==7.0.13',
'aliyun-python-sdk-core-v3==2.13.3',
'pyyaml',
'ratelimiter',
Expand All @@ -42,7 +42,8 @@
'aliyun-python-sdk-r-kvstore==2.0.5',
'aliyun-python-sdk-slb==3.2.8',
"aliyun-python-sdk-dds==2.0.4",
"aliyun-python-sdk-polardb==1.7.2"
"aliyun-python-sdk-polardb==1.7.2",
"oss2==2.12.1",
],
entry_points={
'console_scripts': [
Expand Down

0 comments on commit ef209d6

Please sign in to comment.