openstack-slothd.py 26.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
#!/usr/bin/env python

##
## A simple Ceilometer script that runs within a Cloudlab OpenStack
## experiment and writes several resource utilization files into
## /root/openstack-slothd .
##
## This script runs every N minutes (default 10), and reports its
## metrics over 5 time periods: last 10 minutes, hour, 6 hours, day,
## week.  For each period, for each physical host in the experiment, it
## reports the number of distinct VMs that existed, CPU utilization for
## each VM, and network traffic for each VM.  
##

import os
import time
import sys
import hashlib
import logging
import traceback
import pprint
import json
import shutil
from ceilometerclient import client
from ceilometerclient.v2.query import QueryManager

27 28
VERSION = 1

29 30 31 32 33 34 35 36 37 38 39 40 41 42 43
CLOUDLAB_AUTH_FILE = '/root/setup/admin-openrc.py'
KEYSTONE_OPTS = [ 'OS_PROJECT_DOMAIN_ID','OS_USER_DOMAIN_ID',
                  'OS_PROJECT_NAME','OS_TENANT_NAME',
                  'OS_USERNAME','OS_PASSWORD','OS_AUTH_URL' ]
#'OS_IDENTITY_API_VERSION'

#
# We often want to see "everything", and ceilometer limits us by
# default, so assume "everything" falls into UINT32_MAX.  What a mess.
#
LIMIT = 0xffffffff
MINUTE = 60
HOUR = MINUTE * 60
DAY = HOUR * 24
WEEK = DAY * 7
44
EPOCH = '__EPOCH__'
45

46
PERIODS = [10*MINUTE,HOUR,6*HOUR,DAY,WEEK,EPOCH]
47

48 49 50
INTERVALS = { DAY  : 5 * MINUTE,
              WEEK : HOUR }

51 52 53 54 55
OURDIR = '/root/setup'
OUTDIR = '/root/setup'
OUTBASENAME = 'cloudlab-openstack-stats.json'
OURDOMAIN = None

56 57 58
USE_PRELOAD_RESOURCES = False
USE_UUID_MAP = False

59 60
projects = {}
resources = {}
61 62
vhostnames = {}
phostnames = {}
63 64
r_hostnames = {}

65 66 67
uuidmap = {}
uuidmap_counter = 0

68 69
LOG = logging.getLogger(__name__)
# Define a default handler at INFO logging level
70 71
logging.basicConfig(level=logging.INFO)

72 73
pp = pprint.PrettyPrinter(indent=2)

74 75
DMETERS = ['cpu_util','network.incoming.bytes.rate',
           'network.outgoing.bytes.rate']
76 77 78 79
# We no longer collect these meters for periods; for periods,
# all we collect are the event meters.  We collect the DMETERS
# only for intervals now.
PERIOD_DMETERS = [ 'instance' ]
80 81 82 83 84 85 86 87 88 89
# NB: very important that the .delete meters come first, for
# each resource type.  Why?  Because we only put the resource
# details into the info dict one time (because we don't know
# how to merge details for a given resource if we see it again
# later and it differs) -- and sometimes we know if a resource
# is deleted based on if the delete method has been called for
# it (i.e. for network resources); for other resources like
# images, there's a deleted bit in the metadata we can just read.
EMETERS = [ 'network.delete','network.create','network.update',
            'subnet.delete','subnet.create','subnet.update',
90
#            'port.delete','port.create','port.update',
91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156
            'router.delete','router.create','router.update',
            'image.upload','image.update' ]

HELP = {
    'Summary': \
      'This is a summary of OpenStack resource usage in your experiment over' \
      ' several prior time periods (the last 10 minutes, hour, 6 hours, day,' \
      ' and week).  It is collected by a simple Ceilometer script that' \
      ' requests statistics from several OpenStack Ceilometer meters.' \
      ' Because we\'re primarily interested in resource usage on a' \
      ' per-physical-node basis, metrics and events are grouped by the' \
      ' physical node that used the resource in question or originated the' \
      ' event, and we show totals for the physical machine, as well as the' \
      ' per-resource fine-grained metric value.  We collect several meter' \
      ' values, including CPU utilization, network traffic, and API events.' \
      ' These are described in more detail in the subsequent keys.  Finally,' \
      ' some detailed metadata (i.e., VM disk image, name, etc) each' \
      ' OpenStack resource measured by a meter during one of our time' \
      ' periods is placed in the top-level dict under the \'info\' key.',
    
    'cpu_util': \
      'An average of CPU utilization (percent) over the given time period.' \
      ' OpenStack polls each VM\'s real CPU usage time at intervals, and the' \
      ' difference in usage between polling intervals is used to calculate' \
      ' the average CPU utilization over the interval.',
    'network.incoming.bytes.rate': \
      'The average rate of incoming network traffic to VMs.  OpenStack' \
      ' collects cumulative samples at intervals of VM bandwidth usage,' \
      ' and these samples are used to calculate a rate.  We then take the ' \
      ' average of all rate "samples" over our time periods.',
    'network.outgoing.bytes.rate': \
      'The average rate of outgoing network traffic from VMs.  OpenStack' \
      ' collects cumulative samples at intervals of VM bandwidth usage,' \
      ' and these samples are used to calculate a rate.  We then take the ' \
      ' average of all rate "samples" over our time periods.',

    'network.delete': \
      'The number of OpenStack virtual networks deleted during the period.',
    'network.create': \
      'The number of OpenStack virtual networks created during the period.',
    'network.update': \
      'The number of OpenStack virtual networks updated during the period.',
    'subnet.delete': \
      'The number of OpenStack virtual subnets deleted during the period.',
    'subnet.create': \
      'The number of OpenStack virtual subnets created during the period.',
    'subnet.update': \
      'The number of OpenStack virtual subnets updated during the period.',
    'port.delete': \
      'The number of OpenStack virtual network ports deleted during the period.',
    'port.create': \
      'The number of OpenStack virtual network ports created during the period.',
    'port.update': \
      'The number of OpenStack virtual network ports updated during the period.',
    'router.delete': \
      'The number of OpenStack virtual network routers deleted during the period.',
    'router.create': \
      'The number of OpenStack virtual network routers created during the period.',
    'router.update': \
      'The number of OpenStack virtual network routers updated during the period.',
    'image.upload': \
      'The number of images uploaded during the period.',
    'image.update': \
      'The number of images updated during the period.',
}

157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186
def build_keystone_args():
    global KEYSTONE_OPTS, CLOUDLAB_AUTH_FILE
    
    ret = dict()
    # First, see if they're in the env:
    for opt in KEYSTONE_OPTS:
        if opt in os.environ:
            ret[opt.lower()] = os.environ[opt]
        pass
    # Second, see if they're in a special Cloudlab file:
    if os.geteuid() == 0 and os.path.exists(CLOUDLAB_AUTH_FILE):
        try:
            f = open(CLOUDLAB_AUTH_FILE,'r')
            while True:
                line = f.readline()
                if not line:
                    break
                line = line.rstrip('\n')
                vva = line.split('=')
                if not vva or len(vva) != 2:
                    continue
                
                ret[vva[0].lower()] = eval(vva[1])

                pass
            f.close()
        except:
            LOG.exception("could not build keystone args!")
        pass
    elif os.geteuid() != 0:
187
        LOG.warn("you are not root (%d); not checking %s",os.geteuid(),CLOUDLAB_AUTH_FILE)
188
    elif not os.path.exists(CLOUDLAB_AUTH_FILE):
189
        LOG.warn("%s does not exist; not loading auth opts from it",CLOUDLAB_AUTH_FILE)
190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239

    return ret

def get_resource(client,resource_id):
    global resources,projects
    
    r = None
    if not resource_id in resources:
        r = client.resources.get(resource_id)
        resources[resource_id] = r
    else:
        r = resources[resource_id]
        pass
    
    return r

def get_hypervisor_hostname(client,resource):
    global resources,projects,r_hostnames
    
    #
    # This is yucky.  I have seen two different cases: one where the
    # resource.metadata.host field is a hash of the project_id and
    # hypervisor hostname -- and a second one (after a reboot) where the
    # 'host' field looks like 'compute.<HYPERVISOR-FQDN>' and there is a
    # 'node' field that has the hypervisor FQDN.  So we try to place
    # nice for both cases... use the 'node' field if it exists --
    # otherwise assume that the 'host' field has a hash.  Ugh!
    #
    # Ok, I see how this works.  If you call client.resources.list(),
    # you are shown a hash for the 'host' field.  And if you call
    # client.resources.get(resource_id) (presumably as admin, like we
    # do), you get more info.  Now, why can't they just do the same for
    # client.resources.list()?!  Anyway, we just choose not to
    # pre-initialize the resources list above, at startup, and pull them
    # all on-demand.
    #
    # Well, that was a nice theory, but it doesn't seem deterministic.  I
    # wonder if there's some kind of race.  Anyway, have to leave all this
    # hash crap in here for now.
    #
    if 'node' in resource.metadata \
      and resource.metadata['node'].endswith(OURDOMAIN):
        hostname = resource.metadata['node']
    elif 'host' in resource.metadata \
      and resource.metadata['host'].startswith('compute.') \
      and resource.metadata['host'].endswith(OURDOMAIN):
        hostname = resource.metadata['host'].lstrip('compute.')
    else:
        if not resource.project_id in projects:
            projects[resource.project_id] = resource.project_id
240
            for hostname in vhostnames.keys():
241 242 243 244 245
                shash = hashlib.sha224(resource.project_id + hostname)
                hh = shash.hexdigest()
                r_hostnames[hh] = hostname
                pass
            pass
246
        #LOG.debug("resource: " + pp.pformat(resource))
247 248 249 250 251
        hh = None
        try:
            hh = resource.metadata['host']
        except:
            if 'instance_id' in resource.metadata:
252
                LOG.debug("no hostname info for resource %s; trying instance_id" % (str(resource),))
253 254 255 256
                return get_hypervisor_hostname(client,get_resource(client,resource.metadata['instance_id']))
            else:
                LOG.exception("no 'host' field in metadata for resource %s" % (str(resource,)))
            pass
257 258 259 260 261 262 263
        if not hh in r_hostnames.keys():
            LOG.error("hostname hash %s doesn't map to a known hypervisor hostname!" % (hh,))
            return None
        hostname = r_hostnames[hh]
        pass
    return hostname

264 265 266 267 268 269 270 271 272 273 274
def get_api_hostname(client,resource):
    if 'host' in resource.metadata:
        if resource.metadata['host'].startswith('compute.') \
          and resource.metadata['host'].endswith(OURDOMAIN):
            return resource.metadata['host'].lstrip('compute.')
        elif resource.metadata['host'].startswith('network.') \
          and resource.metadata['host'].endswith(OURDOMAIN):
            return resource.metadata['host'].lstrip('network.')
        pass
    return None

275 276 277 278 279 280 281 282 283 284 285 286 287
def get_short_uuid(uuid):
    global uuidmap,uuidmap_counter

    if not USE_UUID_MAP:
        return uuid
    
    if uuid in uuidmap:
        return uuidmap[uuid]

    uuidmap_counter += 1
    uuidmap[uuid] = "uu" + str(uuidmap_counter)
    return uuidmap[uuid]

288 289 290 291
def fetchall(client):
    tt = time.gmtime()
    ct = time.mktime(tt)
    cts = time.strftime('%Y-%m-%dT%H:%M:%S',tt)
292 293

    periods = {}
294
    intervals = {}
295 296
    info = {}
    #datadict = {}
297
    vm_dict = dict() #count=vm_0,list=[])
298 299 300 301 302 303
    
    #
    # Ok, collect all the statistics, grouped by VM, for the period.  We
    # have to specify this duration
    #
    for period in PERIODS:
304 305 306 307 308 309
        periodkey = period
        if period == EPOCH:
            period = time.time()
            pass
        
        periods[periodkey] = {}
310 311 312 313 314 315
        cpu_util_dict = dict()
        
        daylightfactor = 0
        if time.daylight:
            daylightfactor -= HOUR
            pass
316
        
317 318 319 320 321
        pct = ct - period + daylightfactor
        ptt = time.localtime(pct)
        pcts = time.strftime('%Y-%m-%dT%H:%M:%S',ptt)
        q = [{'field':'timestamp','value':pcts,'op':'ge',},
             {'field':'timestamp','value':cts,'op':'lt',}]
322 323

        # First, query some rate meters for avg stats:
324 325 326
        for meter in PERIOD_DMETERS:
            LOG.info("getting statistics for meter %s during period %s"
                     % (meter,str(period)))
327 328
            mdict = {}
            statistics = client.statistics.list(meter,#period=period,
329
                                                groupby=['resource_id'],
330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352
                                                q=q)
            LOG.debug("Statistics for %s during period %d (len %d): %s"
                      % (meter,period,len(statistics),pp.pformat(statistics)))
            for stat in statistics:
                rid = stat.groupby['resource_id']
                resource = get_resource(client,rid)
                # For whatever reason, the resource_id for the network.*
                # meters prefixes the VM UUIDs with instance-%d- ...
                # so strip that out.
                vmrid = rid
                if rid.startswith('instance-'):
                    vmrid = rid.lstrip('instance-')
                    vidx = vmrid.find('-')
                    vmrid = vmrid[(vidx+1):]
                    pass
                # Then, for the network.* meters, the results are
                # per-interface, so strip that off too so we can
                # report one number per VM.
                vidx = vmrid.find('-tap')
                if vidx > -1:
                    vmrid = vmrid[:vidx]
                    pass

353 354
                vmrid = get_short_uuid(vmrid)
                
355
                hostname = get_hypervisor_hostname(client,resource)
356 357
                LOG.debug("%s for %s on %s = %f (resource=%s)"
                          % (meter,rid,hostname,stat.avg,pp.pformat(resource)))
358 359 360
                if not hostname in vm_dict:
                    vm_dict[hostname] = {}
                    pass
361 362 363 364 365 366 367 368 369 370 371
                if not vmrid in vm_dict[hostname]:
                    vm_dict[hostname][vmrid] = {}
                if not 'name' in vm_dict[hostname][vmrid] \
                   and 'display_name' in resource.metadata:
                    vm_dict[hostname][vmrid]['name'] = resource.metadata['display_name']
                if not 'image' in vm_dict[hostname][vmrid] \
                   and 'image.name' in resource.metadata:
                    vm_dict[hostname][vmrid]['image'] = resource.metadata['image.name']
                if not 'status' in vm_dict[hostname][vmrid] \
                   and 'status' in resource.metadata:
                    vm_dict[hostname][vmrid]['status'] = resource.metadata['status']
372 373 374 375
                    pass
                if not hostname in mdict:
                    mdict[hostname] = dict(total=0.0,vms={})
                    pass
376
                mdict[hostname]['total'] += round(stat.avg,4)
377
                if not vmrid in mdict[hostname]['vms']:
378
                    mdict[hostname]['vms'][vmrid] = round(stat.avg,4)
379
                else:
380
                    mdict[hostname]['vms'][vmrid] += round(stat.avg,4)
381
                    pass
382
                pass
383
            periods[periodkey][meter] = mdict
384 385
            pass
        
386
        
387
        info['vms'] = vm_dict
388 389 390

        # Now also query the API delta meters:
        rdicts = dict()
391
        for meter in EMETERS:
392 393
            LOG.info("getting statistics for event meter %s during period %s"
                     % (meter,str(period)))
394 395 396 397 398 399 400 401 402 403 404
            idx = meter.find('.')
            if idx > -1:
                rplural = "%s%s" % (meter[0:idx],'s')
            else:
                rplural = None
                pass
            if rplural and not rplural in rdicts:
                rdicts[rplural] = dict()
                pass
            mdict = {}
            statistics = client.statistics.list(meter,#period=period,
405
                                                groupby=['resource_id'],
406 407 408 409 410 411
                                                q=q)
            LOG.debug("Statistics for %s during period %d (len %d): %s"
                      % (meter,period,len(statistics),pp.pformat(statistics)))
            for stat in statistics:
                rid = stat.groupby['resource_id']
                resource = get_resource(client,rid)
412
                rid = get_short_uuid(rid)
413
                hostname = get_api_hostname(client,resource)
414 415 416 417 418
                if not hostname:
                    hostname = 'UNKNOWN'
                    pass
                LOG.debug("%s for %s on %s = %f (%s)"
                          % (meter,rid,hostname,stat.sum,pp.pformat(resource)))
419
                if rplural and not rid in rdicts[rplural]:
420 421 422 423 424 425 426
                    deleted = False
                    if meter.endswith('.delete') \
                       or ('deleted' in resource.metadata \
                           and resource.metadata['deleted'] \
                                 in ['True','true',True]):
                        deleted = True
                        pass
427 428 429 430
                    rmname = None
                    if 'name' in resource.metadata:
                        rmname = resource.metadata['name']
                    rdicts[rplural][rid] = dict(name=rmname,
431 432 433 434 435 436 437 438 439 440
                                                deleted=deleted)
                    status = None
                    if 'state' in resource.metadata:
                        status = resource.metadata['state']
                    elif 'status' in resource.metadata:
                        status = resource.metadata['status']
                        pass
                    if not status is None:
                        rdicts[rplural][rid]['status'] = str(status).lower()
                        pass
441 442 443 444 445 446 447 448 449 450 451 452
                    pass
                if rplural:
                    rname = rplural
                else:
                    rname = 'resources'
                    pass
                if not hostname in mdict:
                    mdict[hostname] = { 'total':0.0,rname:{} }
                    pass
                mdict[hostname]['total'] += stat.sum
                mdict[hostname][rname][rid] = stat.sum
                pass
453
            periods[periodkey][meter] = mdict
454
            pass
455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471
        for (res,infodict) in rdicts.iteritems():
            # If we haven't seen this resource before, slap all
            # the info we've found for all those resource ids
            # into our info dict for this resource type.  Else,
            # carefully merge -- if we've already collected info
            # for a specific resource, don't overwrite that.
            # The theory for the Else case is that newer info
            # is better, if the older info differs (which I can't
            # think right now it would... should be the same for
            # all periods).
            if not res in info:
                info[res] = infodict
            else:
                for (resid,resinfodict) in infodict.iteritems():
                    if not resid in info[res]:
                        info[res][resid] = resinfodict
                pass
472
            pass
473
        pass
474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495
        
    #
    # Now query the same meters, but for 5-minute intervals over the
    # past 24 hours, and for 1-hr intervals over the past week.
    #
    for (period,interval) in INTERVALS.iteritems():
        intervals[period] = {}
        cpu_util_dict = dict()
        
        daylightfactor = 0
        if time.daylight:
            daylightfactor -= HOUR
            pass
        
        pct = ct - period + daylightfactor
        ptt = time.localtime(pct)
        pcts = time.strftime('%Y-%m-%dT%H:%M:%S',ptt)
        q = [{'field':'timestamp','value':pcts,'op':'ge',},
             {'field':'timestamp','value':cts,'op':'lt',}]

        # First, query some rate meters for avg stats:
        for meter in DMETERS:
496 497
            LOG.info("getting statistics for meter %s during period %s interval %s"
                     % (meter,str(period),str(interval)))
498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525
            mdict = {}
            statistics = client.statistics.list(meter,period=interval,
                                                groupby=['resource_id'],
                                                q=q)
            LOG.debug("Statistics (interval %d) for %s during period %d"
                      " (len %d): %s"
                      % (interval,meter,period,len(statistics),
                         pp.pformat(statistics)))
            for stat in statistics:
                rid = stat.groupby['resource_id']
                resource = get_resource(client,rid)
                # For whatever reason, the resource_id for the network.*
                # meters prefixes the VM UUIDs with instance-%d- ...
                # so strip that out.
                vmrid = rid
                if rid.startswith('instance-'):
                    vmrid = rid.lstrip('instance-')
                    vidx = vmrid.find('-')
                    vmrid = vmrid[(vidx+1):]
                    pass
                # Then, for the network.* meters, the results are
                # per-interface, so strip that off too so we can
                # report one number per VM.
                vidx = vmrid.find('-tap')
                if vidx > -1:
                    vmrid = vmrid[:vidx]
                    pass

526 527
                vmrid = get_short_uuid(vmrid)
                
528
                hostname = get_hypervisor_hostname(client,resource)
529 530
                LOG.debug("%s for %s on %s = %f (resource=%s)"
                          % (meter,rid,hostname,stat.avg,pp.pformat(resource)))
531 532 533
                if not hostname in vm_dict:
                    vm_dict[hostname] = {}
                    pass
534 535 536 537 538 539 540 541 542 543 544
                if not vmrid in vm_dict[hostname]:
                    vm_dict[hostname][vmrid] = {}
                if not 'name' in vm_dict[hostname][vmrid] \
                   and 'display_name' in resource.metadata:
                    vm_dict[hostname][vmrid]['name'] = resource.metadata['display_name']
                if not 'image' in vm_dict[hostname][vmrid] \
                   and 'image.name' in resource.metadata:
                    vm_dict[hostname][vmrid]['image'] = resource.metadata['image.name']
                if not 'status' in vm_dict[hostname][vmrid] \
                   and 'status' in resource.metadata:
                    vm_dict[hostname][vmrid]['status'] = resource.metadata['status']
545 546 547 548 549 550
                    pass
                if not hostname in mdict:
                    mdict[hostname] = dict(vms={}) #dict(total=0.0,vms={})
                    pass
                #mdict[hostname]['total'] += stat.avg
                if not vmrid in mdict[hostname]['vms']:
551 552
                    mdict[hostname]['vms'][vmrid] = {}
                    #mdict[hostname]['vms'][vmrid] = {'__FLATTEN__':True}
553 554 555 556 557 558 559 560 561
                    pass
                pet = time.strptime(stat.period_end,'%Y-%m-%dT%H:%M:%S')
                pes = time.mktime(pet)
                mdict[hostname]['vms'][vmrid][pes] = \
                  dict(avg=round(stat.avg,4),max=round(stat.max,4),n=round(stat.count,4))
                pass
            intervals[period][meter] = mdict
            pass
        pass
562
    
563
    info['vms'] = vm_dict
564 565
    info['host2vname'] = vhostnames
    info['host2pnode'] = phostnames
566
    info['uuidmap'] = uuidmap
567 568 569 570 571 572 573 574 575 576 577 578 579 580

    ett = time.gmtime()
    ect = time.mktime(ett)
    ects = time.strftime('%Y-%m-%dT%H:%M:%S',ett)
    gmoffset = time.timezone
    daylight = False
    if time.daylight:
        gmoffset = time.altzone
        daylight = True
        pass

    metadata = dict(start=cts,start_timestamp=ct,
                    end=ects,end_timestamp=ect,
                    duration=(ect-ct),gmoffset=gmoffset,
581 582
                    daylight=daylight,version=VERSION,
                    periods=PERIODS,intervals=INTERVALS)
583
    
584
    return dict(periods=periods,intervals=intervals,info=info,META=metadata,HELP=HELP)
585 586 587 588 589 590 591 592 593 594 595 596

def preload_resources(client):
    global resources

    resourcelist = client.resources.list(limit=LIMIT)
    LOG.debug("Resources: " + pp.pformat(resourcelist))
    for r in resourcelist:
        resources[r.id] = r
        pass
    pass

def reload_hostnames():
597 598 599
    global vhostnames,phostnames,OURDOMAIN
    newvhostnames = {}
    newphostnames = {}
600 601 602 603 604 605 606 607 608 609 610 611 612 613 614
    
    try:
        f = file(OURDIR + "/fqdn.map")
        i = 0
        for line in f:
            i += 1
            if len(line) == 0 or line[0] == '#':
                continue
            line = line.rstrip('\n')
            la = line.split("\t")
            if len(la) != 2:
                LOG.warn("bad FQDN line %d; skipping" % (i,))
                continue
            vname = la[0].lower()
            fqdn = la[1].lower()
615
            newvhostnames[fqdn] = vname
616 617 618 619 620 621
            if OURDOMAIN is None or OURDOMAIN == '':
                idx = fqdn.find('.')
                if idx > -1:
                    OURDOMAIN = fqdn[idx+1:]
                pass
            pass
622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639
        vhostnames = newvhostnames
        
        f = file(OURDIR + "/fqdn.physical.map")
        i = 0
        for line in f:
            i += 1
            if len(line) == 0 or line[0] == '#':
                continue
            line = line.rstrip('\n')
            la = line.split("\t")
            if len(la) != 2:
                LOG.warn("bad FQDN line %d; skipping" % (i,))
                continue
            pname = la[0].lower()
            fqdn = la[1].lower()
            newphostnames[fqdn] = pname
            pass
        phostnames = newphostnames
640 641 642 643 644 645 646 647 648 649 650 651 652 653 654
    except:
        LOG.exception("failed to reload hostnames, returning None")
        pass
    return

def main():
    try:
        os.makedirs(OUTDIR)
    except:
        pass
    kargs = build_keystone_args()
    LOG.debug("keystone args: %s" % (str(kargs)))
    
    cclient = client.get_client(2,**kargs)
    
655
    if USE_PRELOAD_RESOURCES:
656 657 658 659 660 661 662 663 664 665 666 667
        preload_resources(cclient)
        pass
    
    iteration = 0
    outfile = "%s/%s" % (OUTDIR,OUTBASENAME)
    tmpoutfile = outfile + ".NEW"
    while True:
        iteration += 1
        try:
            reload_hostnames()
            newdatadict = fetchall(cclient)
            f = file(tmpoutfile,'w')
668 669
            #,cls=FlatteningJSONEncoder)
            f.write(json.dumps(newdatadict,sort_keys=True,indent=None) + '\n')
670 671 672 673 674 675 676
            f.close()
            shutil.move(tmpoutfile,outfile)
        except:
            LOG.exception("failure during iteration %d; nothing new generated"
                          % (iteration,))
            pass
        
677
        LOG.debug("Sleeping for 5 minutes...")
678
        time.sleep(5 * MINUTE)
679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704
        pass

    #meters = client.meters.list(limit=LIMIT)
    #pp.pprint("Meters: ")
    #pp.pprint(meters)
    
    # Ceilometer meter.list command only allows filtering on
    # ['project', 'resource', 'source', 'user'].
    # q=[{'field':'meter.name','value':'cpu_util','op':'eq','type':'string'}]
    #cpu_util_meters = []
    #for m in meters:
    #    if m.name == 'cpu_util':
    #        cpu_util_meters.append(m)
    #    pass
    #pp.pprint("cpu_util Meters:")
    #pp.pprint(cpu_util_meters)
    
    #for m in cpu_util_meters:
    #    pp.pprint("Resource %s for cpu_util meter %s:" % (m.resource_id,m.meter_id))
    #    pp.pprint(resources[m.resource_id])
    #    pass
    
    return -1

if __name__ == "__main__":
    main()