This patch series just does a bunch of cleanup to the imagefactory, found by code inspection and through the use of pylint. There should be no functional change after this series, just cleaner code. With these patches in place tests still pass and a local upload style build still succeeds. Please review and ACK.
Chris Lalancette
Most of them can't really be used standalone anyway. For the one that can, I'm not sure how useful it is, so remove the main() from it as well.
Signed-off-by: Chris Lalancette clalance@redhat.com --- imgfac/ApplicationConfiguration.py | 1 - imgfac/ImageWarehouse.py | 1 - imgfac/Template.py | 1 - imgfac/VMWare.py | 12 ------------ 4 files changed, 0 insertions(+), 15 deletions(-)
diff --git a/imgfac/ApplicationConfiguration.py b/imgfac/ApplicationConfiguration.py index 9311ed5..7688c3a 100644 --- a/imgfac/ApplicationConfiguration.py +++ b/imgfac/ApplicationConfiguration.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # encoding: utf-8
# Copyright 2011 Red Hat, Inc. diff --git a/imgfac/ImageWarehouse.py b/imgfac/ImageWarehouse.py index d0c145d..220adb5 100644 --- a/imgfac/ImageWarehouse.py +++ b/imgfac/ImageWarehouse.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # encoding: utf-8
# Copyright 2011 Red Hat, Inc. diff --git a/imgfac/Template.py b/imgfac/Template.py index 194f675..82b4260 100644 --- a/imgfac/Template.py +++ b/imgfac/Template.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # encoding: utf-8
# Copyright 2011 Red Hat, Inc. diff --git a/imgfac/VMWare.py b/imgfac/VMWare.py index 733ec55..11ea7fd 100755 --- a/imgfac/VMWare.py +++ b/imgfac/VMWare.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # # Copyright 2010 Jonathan Kinred jonathan.kinred@gmail.com # @@ -300,14 +299,3 @@ class VMImport: disk_spec.operation = operation.add
return disk_spec - -def main(): - vmc = VMImport('https://noswhere.com/sdk', 'Administrator', 'changeme') - #nic = {'network_name': 'VM Network', 'type': 'VirtualE1000'} - vmc.import_vm(datastore='datastore1', network_name = 'VM Network', - name=sys.argv[1], disksize_kb = (10*1024*1024 + 2 ), memory=512, num_cpus=1, - guest_id='otherLinux64Guest', imagefilename="/var/tmp/vmware-test.vmdk") - -if __name__ == '__main__': - main() -
ACK
On Aug 3, 2011, at 8:27 AM, Chris Lalancette wrote:
Most of them can't really be used standalone anyway. For the one that can, I'm not sure how useful it is, so remove the main() from it as well.
Signed-off-by: Chris Lalancette clalance@redhat.com
imgfac/ApplicationConfiguration.py | 1 - imgfac/ImageWarehouse.py | 1 - imgfac/Template.py | 1 - imgfac/VMWare.py | 12 ------------ 4 files changed, 0 insertions(+), 15 deletions(-)
diff --git a/imgfac/ApplicationConfiguration.py b/imgfac/ApplicationConfiguration.py index 9311ed5..7688c3a 100644 --- a/imgfac/ApplicationConfiguration.py +++ b/imgfac/ApplicationConfiguration.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # encoding: utf-8
# Copyright 2011 Red Hat, Inc. diff --git a/imgfac/ImageWarehouse.py b/imgfac/ImageWarehouse.py index d0c145d..220adb5 100644 --- a/imgfac/ImageWarehouse.py +++ b/imgfac/ImageWarehouse.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # encoding: utf-8
# Copyright 2011 Red Hat, Inc. diff --git a/imgfac/Template.py b/imgfac/Template.py index 194f675..82b4260 100644 --- a/imgfac/Template.py +++ b/imgfac/Template.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # encoding: utf-8
# Copyright 2011 Red Hat, Inc. diff --git a/imgfac/VMWare.py b/imgfac/VMWare.py index 733ec55..11ea7fd 100755 --- a/imgfac/VMWare.py +++ b/imgfac/VMWare.py @@ -1,4 +1,3 @@ -#!/usr/bin/env python # # Copyright 2010 Jonathan Kinred jonathan.kinred@gmail.com # @@ -300,14 +299,3 @@ class VMImport: disk_spec.operation = operation.add
return disk_spec
-def main():
- vmc = VMImport('https://noswhere.com/sdk', 'Administrator', 'changeme')
- #nic = {'network_name': 'VM Network', 'type': 'VirtualE1000'}
- vmc.import_vm(datastore='datastore1', network_name = 'VM Network',
name=sys.argv[1], disksize_kb = (10*1024*1024 + 2 ), memory=512, num_cpus=1,
guest_id='otherLinux64Guest', imagefilename="/var/tmp/vmware-test.vmdk")
-if __name__ == '__main__':
- main()
-- 1.7.4.4
Signed-off-by: Chris Lalancette clalance@redhat.com --- imgfac/VMWare.py | 56 ++++---------------------------------- imgfac/qmfagent/BuildAdaptor.py | 4 --- 2 files changed, 6 insertions(+), 54 deletions(-)
diff --git a/imgfac/VMWare.py b/imgfac/VMWare.py index 11ea7fd..559a727 100755 --- a/imgfac/VMWare.py +++ b/imgfac/VMWare.py @@ -45,19 +45,12 @@ class VMImport: self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__)) self.vim = Vim(url) self.vim.login(username, password) - + def curl_progress(self, download_t, download_d, upload_t, upload_d): - #print "Total to download", download_t - #print "Total downloaded", download_d - #print "Total to upload", upload_t - #print "Total uploaded", upload_d curtime=time() # TODO: Make poke frequency variable # 5 seconds isn't too much and it makes the status bar in the vSphere GUI look nice :-) if (curtime - self.time_at_last_poke) >= 5: - #print "Current time (%s) - time at last poke (%s)" % (curtime, self.time_at_last_poke) - #print "Ten or more seconds since last poke" - #print "Trying to do a poke with progress of %d" % (int(upload_d*100/upload_t)) self.vim.invoke('HttpNfcLeaseProgress', _this=self.lease_mo_ref, percent = int(upload_d*100/upload_t)) self.time_at_last_poke = time()
@@ -69,7 +62,6 @@ class VMImport: # If the host is not set, use the ComputeResource as the target if not host: target = self.vim.find_entity_view(view_type='ComputeResource') -# filter={'name': compute_resource}) target.update_view_data(['name', 'datastore', 'network', 'parent', 'resourcePool']) resource_pool = target.resourcePool @@ -82,9 +74,6 @@ class VMImport: host_cr.update_view_data(properties=['resourcePool']) resource_pool = host_cr.resourcePool
- # Compute image size in KB rounding up - # disksize_kb = int(math.ceil((1.0*os.path.getsize(imagefilename))/(1024.0))) - # A list of devices to be assigned to the VM vm_devices = []
@@ -115,7 +104,7 @@ class VMImport:
disk = self.create_disk(datastore=ds, disksize_kb=disksize_kb) vm_devices.append(disk) - + for nic in nics: nic_spec = self.create_nic(target, nic) if not nic_spec: @@ -146,26 +135,11 @@ class VMImport:
dc.update_view_data(properties=['vmFolder'])
- #print "*************************************************" - #print "dc pprint" - #pprint(dc) - - #print "pool pprint" - #pprint(resource_pool) - - #print "Config spec pprint" - #pprint(vm_config_spec) - - #print "*************************************************" - importspec = self.vim.create_object('VirtualMachineImportSpec') - + importspec.configSpec = vm_config_spec importspec.resPoolEntity = None
- #print "pprint importspec" - #pprint(importspec) - lease_mo_ref = self.vim.invoke('ImportVApp', _this=resource_pool, spec = importspec, folder = dc.vmFolder)
lease = HttpNfcLease(mo_ref=lease_mo_ref, vim=self.vim) @@ -173,40 +147,22 @@ class VMImport: self.lease_mo_ref = lease_mo_ref
for i in range(1000): - lease.update_view_data() + lease.update_view_data() if lease.state == "ready": break sleep(5) - #print "Lease not ready - waiting 5 seconds" - - #print "pprint lease" - #pprint(lease) - - #print "Lease error: " - #pprint(lease.error) - - #print "Lease info: " - #pprint(lease.info) - - #print "Lease progress: %d" % lease.initializeProgress - - #print "Lease state: " - #pprint(lease.state)
url = lease.info.deviceUrl[0]['url']
self.lease_timeout = lease.info.leaseTimeout self.time_at_last_poke = time()
- #print "I will now upload (%s) to (%s)" % (imagefilename, url) - image_file = open(imagefilename) - + # Upload the image itself image_size = os.path.getsize(imagefilename) curl = pycurl.Curl() curl.setopt(pycurl.URL, str(url)) - #curl.setopt(pycurl.VERBOSE, 1) curl.setopt(pycurl.SSL_VERIFYPEER, 0) curl.setopt(pycurl.POST, 1) curl.setopt(pycurl.POSTFIELDSIZE, image_size) @@ -244,7 +200,7 @@ class VMImport: connect_info.connected = False connect_info.startConnected = True
- new_nic = self.vim.create_object(nic['type']) + new_nic = self.vim.create_object(nic['type']) new_nic.backing = backing new_nic.key = 2 # TODO: Work out a way to automatically increment this diff --git a/imgfac/qmfagent/BuildAdaptor.py b/imgfac/qmfagent/BuildAdaptor.py index 3d47c5b..0f68a12 100644 --- a/imgfac/qmfagent/BuildAdaptor.py +++ b/imgfac/qmfagent/BuildAdaptor.py @@ -106,7 +106,3 @@ class BuildAdaptor(BuildJob): event.type = failure_type event.info = failure_info self.agent.session.raiseEvent(data=event, severity=SEV_ERROR) - - -# if __name__ == '__main__': -# unittest.main()
ACK
On Aug 3, 2011, at 8:27 AM, Chris Lalancette wrote:
Signed-off-by: Chris Lalancette clalance@redhat.com
imgfac/VMWare.py | 56 ++++---------------------------------- imgfac/qmfagent/BuildAdaptor.py | 4 --- 2 files changed, 6 insertions(+), 54 deletions(-)
diff --git a/imgfac/VMWare.py b/imgfac/VMWare.py index 11ea7fd..559a727 100755 --- a/imgfac/VMWare.py +++ b/imgfac/VMWare.py @@ -45,19 +45,12 @@ class VMImport: self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__)) self.vim = Vim(url) self.vim.login(username, password)
- def curl_progress(self, download_t, download_d, upload_t, upload_d):
#print "Total to download", download_t
#print "Total downloaded", download_d
#print "Total to upload", upload_t
#print "Total uploaded", upload_d curtime=time() # TODO: Make poke frequency variable # 5 seconds isn't too much and it makes the status bar in the vSphere GUI look nice :-) if (curtime - self.time_at_last_poke) >= 5:
#print "Current time (%s) - time at last poke (%s)" % (curtime, self.time_at_last_poke)
#print "Ten or more seconds since last poke"
#print "Trying to do a poke with progress of %d" % (int(upload_d*100/upload_t)) self.vim.invoke('HttpNfcLeaseProgress', _this=self.lease_mo_ref, percent = int(upload_d*100/upload_t)) self.time_at_last_poke = time()
@@ -69,7 +62,6 @@ class VMImport: # If the host is not set, use the ComputeResource as the target if not host: target = self.vim.find_entity_view(view_type='ComputeResource') -# filter={'name': compute_resource}) target.update_view_data(['name', 'datastore', 'network', 'parent', 'resourcePool']) resource_pool = target.resourcePool @@ -82,9 +74,6 @@ class VMImport: host_cr.update_view_data(properties=['resourcePool']) resource_pool = host_cr.resourcePool
# Compute image size in KB rounding up
# disksize_kb = int(math.ceil((1.0*os.path.getsize(imagefilename))/(1024.0)))
# A list of devices to be assigned to the VM vm_devices = []
@@ -115,7 +104,7 @@ class VMImport:
disk = self.create_disk(datastore=ds, disksize_kb=disksize_kb) vm_devices.append(disk)
for nic in nics: nic_spec = self.create_nic(target, nic) if not nic_spec:
@@ -146,26 +135,11 @@ class VMImport:
dc.update_view_data(properties=['vmFolder'])
#print "*************************************************"
#print "dc pprint"
#pprint(dc)
#print "pool pprint"
#pprint(resource_pool)
#print "Config spec pprint"
#pprint(vm_config_spec)
#print "*************************************************"
importspec = self.vim.create_object('VirtualMachineImportSpec')
importspec.configSpec = vm_config_spec importspec.resPoolEntity = None
#print "pprint importspec"
#pprint(importspec)
lease_mo_ref = self.vim.invoke('ImportVApp', _this=resource_pool, spec = importspec, folder = dc.vmFolder) lease = HttpNfcLease(mo_ref=lease_mo_ref, vim=self.vim)
@@ -173,40 +147,22 @@ class VMImport: self.lease_mo_ref = lease_mo_ref
for i in range(1000):
lease.update_view_data()
lease.update_view_data() if lease.state == "ready": break sleep(5)
#print "Lease not ready - waiting 5 seconds"
#print "pprint lease"
#pprint(lease)
#print "Lease error: "
#pprint(lease.error)
#print "Lease info: "
#pprint(lease.info)
#print "Lease progress: %d" % lease.initializeProgress
#print "Lease state: "
#pprint(lease.state) url = lease.info.deviceUrl[0]['url'] self.lease_timeout = lease.info.leaseTimeout self.time_at_last_poke = time()
#print "I will now upload (%s) to (%s)" % (imagefilename, url)
image_file = open(imagefilename)
# Upload the image itself image_size = os.path.getsize(imagefilename) curl = pycurl.Curl() curl.setopt(pycurl.URL, str(url))
#curl.setopt(pycurl.VERBOSE, 1) curl.setopt(pycurl.SSL_VERIFYPEER, 0) curl.setopt(pycurl.POST, 1) curl.setopt(pycurl.POSTFIELDSIZE, image_size)
@@ -244,7 +200,7 @@ class VMImport: connect_info.connected = False connect_info.startConnected = True
new_nic = self.vim.create_object(nic['type'])
new_nic = self.vim.create_object(nic['type']) new_nic.backing = backing new_nic.key = 2 # TODO: Work out a way to automatically increment this
diff --git a/imgfac/qmfagent/BuildAdaptor.py b/imgfac/qmfagent/BuildAdaptor.py index 3d47c5b..0f68a12 100644 --- a/imgfac/qmfagent/BuildAdaptor.py +++ b/imgfac/qmfagent/BuildAdaptor.py @@ -106,7 +106,3 @@ class BuildAdaptor(BuildJob): event.type = failure_type event.info = failure_info self.agent.session.raiseEvent(data=event, severity=SEV_ERROR)
-# if __name__ == '__main__':
-# unittest.main()
1.7.4.4
1) Remove trailing whitespace 2) Switch some tabs to spaces (like the rest of the code) 3) Add some whitespace between arguments.
Signed-off-by: Chris Lalancette clalance@redhat.com --- Documentation/FAQ.markdown | 24 ++++++++-------- Documentation/INSTALL.markdown | 10 +++--- Documentation/QMF.markdown | 56 +++++++++++++++++++------------------- Documentation/man/imagefactory.1 | 18 +++++------- imgfac/ReservationManager.py | 8 +++--- imgfac/builders/FedoraBuilder.py | 16 +++++----- imgfac/builders/RHELBuilder.py | 17 +++++------ 7 files changed, 72 insertions(+), 77 deletions(-)
diff --git a/Documentation/FAQ.markdown b/Documentation/FAQ.markdown index dd029bd..dad7e4c 100644 --- a/Documentation/FAQ.markdown +++ b/Documentation/FAQ.markdown @@ -1,43 +1,43 @@ # Frequently Asked Questions #
## Installation ## -1. Q: How do I install image_factory? +1. Q: How do I install image_factory? A: The simplest approach is to install from rpm. To read about creating the rpm and/or other installation options, please read the [INSTALL](https://github.com/aeolusproject/image_factory/blob/master/Documentation/INS...) document in the Documentation directory of source distribution.
## Configuration ## -1. Q: Where does the configuration live? +1. Q: Where does the configuration live? A: `/etc/imagefactory.conf`
-2. Q: Where does imagefactory log by default? +2. Q: Where does imagefactory log by default? A: `/var/log/imagefactory.log`
-3. Q: Where does Image Factory put the images that are created? +3. Q: Where does Image Factory put the images that are created? A: There are really three parts to this answer... - + 1. Image Factory stores a local copy of the image in the path pointed to by the configuration option `imgdir`, which defaults to `/var/tmp` 2. Oz caches .iso files in `/var/lib/oz` 3. If an Image Warehouse is configured and the JEOS was created by Image Factory, the resulting image is uploaded to the Image Warehouse in the `images` bucket.
## Running and using Image Factory ## -1. Q: How do I enable Image Factory? +1. Q: How do I enable Image Factory? A: Run `service imagefactory start`
-2. Q: How do I run Image Factory in debug mode? +2. Q: How do I run Image Factory in debug mode? A: Shut the service down with `service imagefactory stop`. Then run `imagefactory --qmf --debug` as root.
-3. Q: How do I interact with the Image Factory daemon? +3. Q: How do I interact with the Image Factory daemon? A: Please read through the QMF guide in the Documentation directory for some examples.
-4. Q: How do I run the tests to know that what I have works? +4. Q: How do I run the tests to know that what I have works? A: The unit tests can be run from the top level of the source tree with the command `python -m unittest discover -v`
## Troubleshooting ## -1. Q: After starting imagefactory, the log shows "Please enter your password" instead of "image_factory has qmf/qpid address...". +1. Q: After starting imagefactory, the log shows "Please enter your password" instead of "image_factory has qmf/qpid address...". A: Put the following in /etc/qpidd.conf - + cluster-mechanism=ANONYMOUS auth=no
-2. Q: The imagefactory log shows an exception "AttributeError: 'NoneType' object has no attribute 'makefile'" +2. Q: The imagefactory log shows an exception "AttributeError: 'NoneType' object has no attribute 'makefile'" A: Please make certain the image warehouse is reachable and responds.
diff --git a/Documentation/INSTALL.markdown b/Documentation/INSTALL.markdown index dc95b86..59198a5 100644 --- a/Documentation/INSTALL.markdown +++ b/Documentation/INSTALL.markdown @@ -5,17 +5,17 @@ Installing from an rpm will install all dependencies needed by Image Factory. F
* __yum__
- The Aeolus project has built rpms available via the testing [repository](http://repos.fedorapeople.org/repos/aeolus/packages-testing/). + The Aeolus project has built rpms available via the testing [repository](http://repos.fedorapeople.org/repos/aeolus/packages-testing/). After adding the appropriate `.repo` file to your `/etc/yum.repos.d` directory, installation is as easy as running: `yum install imagefactory`
* __local rpm__
We have not yet established a download location for built packages. Until that time, one can easily build the rpm from the source as described below.
-__Creating an rpm:__ -After obtaining a copy of Image Factory from [source control](git://github.com/aeolusproject/image_factory.git), run: `setup.py bdist_rpm` +__Creating an rpm:__ +After obtaining a copy of Image Factory from [source control](git://github.com/aeolusproject/image_factory.git), run: `make rpm` A directory named `dist` will be created to store the newly built rpm.
## via setup.py ## -After obtaining a copy of Image Factory from [source control](git://github.com/aeolusproject/image_factory.git), run: `setup.py install` -Be sure to have the dependencies described in the Image Factory [README](https://github.com/aeolusproject/image_factory/blob/master/README.markdown) configured. \ No newline at end of file +After obtaining a copy of Image Factory from [source control](git://github.com/aeolusproject/image_factory.git), run: `setup.py install` +Be sure to have the dependencies described in the Image Factory [README](https://github.com/aeolusproject/image_factory/blob/master/README.markdown) configured. diff --git a/Documentation/QMF.markdown b/Documentation/QMF.markdown index cdcb303..3d8956a 100644 --- a/Documentation/QMF.markdown +++ b/Documentation/QMF.markdown @@ -3,7 +3,7 @@ The script imagefactory can be used to start a daemon running a QMF agent. Using the 'qpidd' option, the host that qpidd is running on can be set. Currently this defaults to the default qpidd port on localhost. Future versions will accept an amqp:// URL to fully configure the agent session.
Example usage: - + ./imagefactory --verbose --qmf --qpidd localhost
Once the agent has opened a session, QMF consoles can send messages to the agent start image builds, get status on builds, etc. @@ -12,10 +12,10 @@ Once the agent has opened a session, QMF consoles can send messages to the agent
## Image Factory Agent ##
-Once a console connects to the same QMF bus as the Image Factory, the agent can be found given the vendor ("redhat.com") and product ("imagefactory") portions of the agent name. +Once a console connects to the same QMF bus as the Image Factory, the agent can be found given the vendor ("redhat.com") and product ("imagefactory") portions of the agent name.
Example 1 - Getting an agent list in python - + In [1]: import cqpid In [2]: from qmf2 import * In [3]: connection = cqpid.Connection("localhost") @@ -23,29 +23,29 @@ Once a console connects to the same QMF bus as the Image Factory, the agent can In [5]: session = ConsoleSession(connection) In [6]: session.open() In [7]: session.getAgents() - Out[7]: + Out[7]: [apache.org:qpidd:41fc7abd-56cc-4090-97a8-011ef1104f7f, redhat.com:imagefactory:5edbf641-78ba-4375-8ec8-f60f555e173a] - + Given an agent, an imagefactory can be fetched:
Example 2 - Querying for an imagefactory - + In [14]: factory = agent.query("{class:ImageFactory, package:'com.redhat.imagefactory'}")[0] - + The imagefactory can then be sent a message to build an image:
Example 3 - Building an image - + In [15]: factory.build_image("", "", "<template></template>", ["mock"]) - Out[15]: + Out[15]: {'build_adaptors': [{'_agent_epoch': 1L, '_agent_name': 'redhat.com:imagefactory:5edbf641-78ba-4375-8ec8-f60f555e173a', '_object_name': 'build_adaptor:db21dd5e-b3b6-49d1-a432-07f9f2d1c3c5'}]} - + The console can poll the build_adaptor for build status or receive events listed below.
Example 4 - Querying for build status - + In [20]: response = factory.build_image("", "" "<template></template>", ["mock"]) In [21]: build_addr = DataAddr(response['build_adaptors'][0]) In [22]: query = Query(build_addr) @@ -55,7 +55,7 @@ The console can poll the build_adaptor for build status or receive events listed Out[24]: 'FINISHING' In [25]: agent.query(query)[0].status Out[25]: 'COMPLETED' - + --- ## Schemas for Image Factory classes ##
@@ -64,7 +64,7 @@ The console can poll the build_adaptor for build status or receive events listed package = "com.redhat.imagefactory" #### Methods: #### * `build_image(image, build, template, targets)` - + method name = "build_image" desc = "Build an image for the given targets" arguments: @@ -86,7 +86,7 @@ The console can poll the build_adaptor for build status or receive events listed desc = "the QMF addresses of the build_adaptors instantiated"
* `provider_image(image, build, providers, credentials)` - + method name = "push_image" desc = "Push an image to the given providers." arguments: @@ -108,7 +108,7 @@ The console can poll the build_adaptor for build status or receive events listed desc = "the QMF addresses of the build_adaptors instantiated"
* `import_image(image, build, target_identifier, image_desc, target, provider)` - + method name = "import_image" desc = "Import an image using a target specific image identifier" arguments: @@ -145,7 +145,7 @@ The console can poll the build_adaptor for build status or receive events listed desc = "the UUID of the provider image object"
* `instance_states(class_name)` - + method name = "instance_states" desc = "Returns a dictionary representing the finite state machine for instances." arguments: @@ -156,47 +156,47 @@ The console can poll the build_adaptor for build status or receive events listed name = "states" dtype = SCHEMA_DATA_STRING desc = "string representation of a dictionary describing the workflow" - + ### BuildAdaptor class ### name = "BuildAdaptor" package = "com.redhat.imagefactory"
#### Properties: #### * Image - + name = "image" dtype = SCHEMA_DATA_STRING desc = "the uuid of the image being built or pushed" * Build - + name = "build" dtype = SCHEMA_DATA_STRING desc = "the uuid of the image build being built or pushed" * Status - + name = "status" dtype = SCHEMA_DATA_STRING desc = "string representing the status (see instance_states() on ImageFactory)" * Percentage Complete - + name = "percent_complete" dtype = SCHEMA_DATA_INT desc = "the estimated percentage through an operation" * Target/Provider Image ID - + name = "image_id" dtype = SCHEMA_DATA_STRING desc = "the uuid of the newly created target or provider image"
#### Methods: #### * `abort()` - + name = "abort" desc = "If possible, abort running build."
#### Events: #### * Status Updates - + name = "BuildAdaptorStatusEvent" package = "com.redhat.imagefactory" attributes: @@ -212,9 +212,9 @@ The console can poll the build_adaptor for build status or receive events listed name = "old_status" dtype = SCHEMA_DATA_STRING desc = "string value of the old status" - + * Updates to the Percentage Complete - + name = "BuildAdaptorPercentCompleteEvent" package = "com.redhat.imagefactory" attributes: @@ -227,9 +227,9 @@ The console can poll the build_adaptor for build status or receive events listed name = "percent_complete" dtype = SCHEMA_DATA_INT desc = "the estimated percentage through an operation" - + * Failure Notification - + name = "BuildFailedEvent" package = "com.redhat.imagefactory" attributes: diff --git a/Documentation/man/imagefactory.1 b/Documentation/man/imagefactory.1 index 6f36a0c..ed70b14 100644 --- a/Documentation/man/imagefactory.1 +++ b/Documentation/man/imagefactory.1 @@ -23,7 +23,7 @@ Currently supported private cloud services: RHEV-M, KVM.
.SH OPTIONS
-.B -h, --help +.B -h, --help Display the help page.
.B --version @@ -51,7 +51,7 @@ Set the timeout period for image building in seconds. (default: 3600) Provide a QMFv2 agent interface.
.B --foreground -Stay in the foreground and avoid launching a daemon. +Stay in the foreground and avoid launching a daemon. Launching in foreground allows the user to see what messages are being passed to QMF. For more detailed output use this parameter together with .B --debug. If the parameter is not specified, the QMF agent will be started as a daemon. (default: False) @@ -66,7 +66,7 @@ URL of qpidd to connect to. (default: localhost) Build specified system and exit.
.B --template TEMPLATE -Template to build from. Template can be passed as URL, PATH, UUID or XML. Please refer to the +Template to build from. Template can be passed as URL, PATH, UUID or XML. Please refer to the .BR XML .BR EXAMPLES section at the bottom. @@ -83,9 +83,9 @@ The list of cloud services to target. Examples include ec2, rhevm, vsphere, rack The list of cloud service providers to push the emacs. Examples include ec2-us-east-1, ec2-us-west-1, rackspace.
.B --credentials CREDENTIALS -Cloud provider credentials. Credentials must be passed as XML file. Please refer to the -.BR XML -.BR EXAMPLES +Cloud provider credentials. Credentials must be passed as XML file. Please refer to the +.BR XML +.BR EXAMPLES section at the bottom.
.SS Image Warehouse: @@ -126,7 +126,7 @@ rackspace = "<provider_credentials> \ </rackspace_credentials> \ </provider_credentials>"
-.SS TEMPLATE +.SS TEMPLATE eg. a Windows 2008 64bit example
<template> @@ -160,7 +160,3 @@ eg. a Windows 2008 64bit example </package> </packages> </template> - - - - diff --git a/imgfac/ReservationManager.py b/imgfac/ReservationManager.py index 56e1f80..08edc0d 100644 --- a/imgfac/ReservationManager.py +++ b/imgfac/ReservationManager.py @@ -100,7 +100,7 @@ class ReservationManager(object): self.log.warn('No reservation for %s to cancel!' % filepath) else: raise e - except KeyError,e: + except KeyError, e: if(quiet): self.log.warn('No reservations exist on %s!' % mount_path) else: @@ -148,9 +148,9 @@ class ReservationManager(object): def available_space_for_path(self, path): """ TODO: Docstring for available_space_for_path - - @param path TODO - + + @param path TODO + @return TODO """ mount_path = self._mount_for_path(path) diff --git a/imgfac/builders/FedoraBuilder.py b/imgfac/builders/FedoraBuilder.py index a311bd8..9f559a8 100644 --- a/imgfac/builders/FedoraBuilder.py +++ b/imgfac/builders/FedoraBuilder.py @@ -1143,7 +1143,7 @@ class FedoraBuilder(BaseBuilder):
input_image_compressed_name = input_image_name + ".gz" input_image_compressed=input_image + ".gz" - + if not os.path.isfile(input_image_compressed): self.log.debug("No compressed version of image file found - compressing now") f_out = open(input_image_compressed, 'wb') @@ -1262,7 +1262,7 @@ class FedoraBuilder(BaseBuilder): if volume.status == "available": retcode = 0 break - self.log.debug("Volume status (%s) - waiting for 'available': %d/600" % (volume.status, i*10)) + self.log.debug("Volume status (%s) - waiting for 'available': %d/600" % (volume.status, i*10)) sleep(10)
if retcode: @@ -1322,11 +1322,11 @@ class FedoraBuilder(BaseBuilder): self.log.debug("Registering snapshot (%s) as new EBS AMI" % (snapshot.id)) ebs = EBSBlockDeviceType() ebs.snapshot_id = snapshot.id - block_map = BlockDeviceMapping() - block_map['/dev/sda1'] = ebs - result = conn.register_image(name='ImageFactory created AMI - %s' % (self.new_image_id), + block_map = BlockDeviceMapping() + block_map['/dev/sda1'] = ebs + result = conn.register_image(name='ImageFactory created AMI - %s' % (self.new_image_id), description='ImageFactory created AMI - %s' % (self.new_image_id), - architecture=self.tdlobj.arch, kernel_id=aki, + architecture=self.tdlobj.arch, kernel_id=aki, root_device_name='/dev/sda1', block_device_map=block_map)
ami_id = str(result) @@ -1335,9 +1335,9 @@ class FedoraBuilder(BaseBuilder): self.log.debug("Stopping EC2 instance and deleting temp security group and volume") instance.stop() factory_security_group.delete() - key_file_object.close() + key_file_object.close() conn.delete_key_pair(key_name) - + if volume: self.log.debug("Waiting up to 240 seconds for instance (%s) to shut down" % (instance.id)) retcode = 1 diff --git a/imgfac/builders/RHELBuilder.py b/imgfac/builders/RHELBuilder.py index dabfbca..96548a9 100644 --- a/imgfac/builders/RHELBuilder.py +++ b/imgfac/builders/RHELBuilder.py @@ -17,18 +17,17 @@ import zope from IBuilder import IBuilder from BaseBuilder import BaseBuilder
- class RHELBuilder(BaseBuilder): - """docstring for RHELBuilder""" - zope.interface.implements(IBuilder) + """docstring for RHELBuilder""" + zope.interface.implements(IBuilder)
# Initializer - def __init__(self, template=None, target=None): - super(RHELBuilder, self).__init__(template, target) + def __init__(self, template=None, target=None): + super(RHELBuilder, self).__init__(template, target)
# Image actions - def build_image(self, build_id=None): - pass + def build_image(self, build_id=None): + pass
- def abort(self): - pass + def abort(self): + pass
The only problem I see is in FAQ.markdown. The whitespace is meaningful to the rendering and this changes it by putting the questions and answers on the same line rather than splitting them the way I had it.
I ACK the rest of it.
-steve
On Aug 3, 2011, at 8:27 AM, Chris Lalancette wrote:
- Remove trailing whitespace
- Switch some tabs to spaces (like the rest of the code)
- Add some whitespace between arguments.
Signed-off-by: Chris Lalancette clalance@redhat.com
Documentation/FAQ.markdown | 24 ++++++++-------- Documentation/INSTALL.markdown | 10 +++--- Documentation/QMF.markdown | 56 +++++++++++++++++++------------------- Documentation/man/imagefactory.1 | 18 +++++------- imgfac/ReservationManager.py | 8 +++--- imgfac/builders/FedoraBuilder.py | 16 +++++----- imgfac/builders/RHELBuilder.py | 17 +++++------ 7 files changed, 72 insertions(+), 77 deletions(-)
diff --git a/Documentation/FAQ.markdown b/Documentation/FAQ.markdown index dd029bd..dad7e4c 100644 --- a/Documentation/FAQ.markdown +++ b/Documentation/FAQ.markdown @@ -1,43 +1,43 @@ # Frequently Asked Questions #
## Installation ## -1. Q: How do I install image_factory? +1. Q: How do I install image_factory? A: The simplest approach is to install from rpm. To read about creating the rpm and/or other installation options, please read the [INSTALL](https://github.com/aeolusproject/image_factory/blob/master/Documentation/INS...) document in the Documentation directory of source distribution.
## Configuration ## -1. Q: Where does the configuration live? +1. Q: Where does the configuration live? A: `/etc/imagefactory.conf`
-2. Q: Where does imagefactory log by default? +2. Q: Where does imagefactory log by default? A: `/var/log/imagefactory.log`
-3. Q: Where does Image Factory put the images that are created? +3. Q: Where does Image Factory put the images that are created? A: There are really three parts to this answer...
- Image Factory stores a local copy of the image in the path pointed to by the configuration option `imgdir`, which defaults to `/var/tmp`
- Oz caches .iso files in `/var/lib/oz`
- If an Image Warehouse is configured and the JEOS was created by Image Factory, the resulting image is uploaded to the Image Warehouse in the `images` bucket.
## Running and using Image Factory ## -1. Q: How do I enable Image Factory? +1. Q: How do I enable Image Factory? A: Run `service imagefactory start`
-2. Q: How do I run Image Factory in debug mode? +2. Q: How do I run Image Factory in debug mode? A: Shut the service down with `service imagefactory stop`. Then run `imagefactory --qmf --debug` as root.
-3. Q: How do I interact with the Image Factory daemon? +3. Q: How do I interact with the Image Factory daemon? A: Please read through the QMF guide in the Documentation directory for some examples.
-4. Q: How do I run the tests to know that what I have works? +4. Q: How do I run the tests to know that what I have works? A: The unit tests can be run from the top level of the source tree with the command `python -m unittest discover -v`
## Troubleshooting ## -1. Q: After starting imagefactory, the log shows "Please enter your password" instead of "image_factory has qmf/qpid address...". +1. Q: After starting imagefactory, the log shows "Please enter your password" instead of "image_factory has qmf/qpid address...". A: Put the following in /etc/qpidd.conf
cluster-mechanism=ANONYMOUS auth=no
-2. Q: The imagefactory log shows an exception "AttributeError: 'NoneType' object has no attribute 'makefile'" +2. Q: The imagefactory log shows an exception "AttributeError: 'NoneType' object has no attribute 'makefile'" A: Please make certain the image warehouse is reachable and responds.
diff --git a/Documentation/INSTALL.markdown b/Documentation/INSTALL.markdown index dc95b86..59198a5 100644 --- a/Documentation/INSTALL.markdown +++ b/Documentation/INSTALL.markdown @@ -5,17 +5,17 @@ Installing from an rpm will install all dependencies needed by Image Factory. F
- __yum__
- The Aeolus project has built rpms available via the testing [repository](http://repos.fedorapeople.org/repos/aeolus/packages-testing/).
- The Aeolus project has built rpms available via the testing [repository](http://repos.fedorapeople.org/repos/aeolus/packages-testing/). After adding the appropriate `.repo` file to your `/etc/yum.repos.d` directory, installation is as easy as running: `yum install imagefactory`
__local rpm__
We have not yet established a download location for built packages. Until that time, one can easily build the rpm from the source as described below.
-__Creating an rpm:__ -After obtaining a copy of Image Factory from [source control](git://github.com/aeolusproject/image_factory.git), run: `setup.py bdist_rpm` +__Creating an rpm:__ +After obtaining a copy of Image Factory from [source control](git://github.com/aeolusproject/image_factory.git), run: `make rpm` A directory named `dist` will be created to store the newly built rpm.
## via setup.py ## -After obtaining a copy of Image Factory from [source control](git://github.com/aeolusproject/image_factory.git), run: `setup.py install` -Be sure to have the dependencies described in the Image Factory [README](https://github.com/aeolusproject/image_factory/blob/master/README.markdown) configured. \ No newline at end of file +After obtaining a copy of Image Factory from [source control](git://github.com/aeolusproject/image_factory.git), run: `setup.py install` +Be sure to have the dependencies described in the Image Factory [README](https://github.com/aeolusproject/image_factory/blob/master/README.markdown) configured. diff --git a/Documentation/QMF.markdown b/Documentation/QMF.markdown index cdcb303..3d8956a 100644 --- a/Documentation/QMF.markdown +++ b/Documentation/QMF.markdown @@ -3,7 +3,7 @@ The script imagefactory can be used to start a daemon running a QMF agent. Using the 'qpidd' option, the host that qpidd is running on can be set. Currently this defaults to the default qpidd port on localhost. Future versions will accept an amqp:// URL to fully configure the agent session.
Example usage:
- ./imagefactory --verbose --qmf --qpidd localhost
Once the agent has opened a session, QMF consoles can send messages to the agent start image builds, get status on builds, etc. @@ -12,10 +12,10 @@ Once the agent has opened a session, QMF consoles can send messages to the agent
## Image Factory Agent ##
-Once a console connects to the same QMF bus as the Image Factory, the agent can be found given the vendor ("redhat.com") and product ("imagefactory") portions of the agent name. +Once a console connects to the same QMF bus as the Image Factory, the agent can be found given the vendor ("redhat.com") and product ("imagefactory") portions of the agent name.
Example 1 - Getting an agent list in python
- In [1]: import cqpid In [2]: from qmf2 import * In [3]: connection = cqpid.Connection("localhost")
@@ -23,29 +23,29 @@ Once a console connects to the same QMF bus as the Image Factory, the agent can In [5]: session = ConsoleSession(connection) In [6]: session.open() In [7]: session.getAgents()
Out[7]:
[apache.org:qpidd:41fc7abd-56cc-4090-97a8-011ef1104f7f, redhat.com:imagefactory:5edbf641-78ba-4375-8ec8-f60f555e173a]Out[7]:
Given an agent, an imagefactory can be fetched:
Example 2 - Querying for an imagefactory
- In [14]: factory = agent.query("{class:ImageFactory, package:'com.redhat.imagefactory'}")[0]
The imagefactory can then be sent a message to build an image:
Example 3 - Building an image
- In [15]: factory.build_image("", "", "<template></template>", ["mock"])
Out[15]:
{'build_adaptors': [{'_agent_epoch': 1L, '_agent_name': 'redhat.com:imagefactory:5edbf641-78ba-4375-8ec8-f60f555e173a', '_object_name': 'build_adaptor:db21dd5e-b3b6-49d1-a432-07f9f2d1c3c5'}]}Out[15]:
The console can poll the build_adaptor for build status or receive events listed below.
Example 4 - Querying for build status
- In [20]: response = factory.build_image("", "" "<template></template>", ["mock"]) In [21]: build_addr = DataAddr(response['build_adaptors'][0]) In [22]: query = Query(build_addr)
@@ -55,7 +55,7 @@ The console can poll the build_adaptor for build status or receive events listed Out[24]: 'FINISHING' In [25]: agent.query(query)[0].status Out[25]: 'COMPLETED'
## Schemas for Image Factory classes ##
@@ -64,7 +64,7 @@ The console can poll the build_adaptor for build status or receive events listed package = "com.redhat.imagefactory" #### Methods: ####
- `build_image(image, build, template, targets)`
method name = "build_image" desc = "Build an image for the given targets" arguments:
@@ -86,7 +86,7 @@ The console can poll the build_adaptor for build status or receive events listed desc = "the QMF addresses of the build_adaptors instantiated"
- `provider_image(image, build, providers, credentials)`
method name = "push_image" desc = "Push an image to the given providers." arguments:
@@ -108,7 +108,7 @@ The console can poll the build_adaptor for build status or receive events listed desc = "the QMF addresses of the build_adaptors instantiated"
- `import_image(image, build, target_identifier, image_desc, target, provider)`
method name = "import_image" desc = "Import an image using a target specific image identifier" arguments:
@@ -145,7 +145,7 @@ The console can poll the build_adaptor for build status or receive events listed desc = "the UUID of the provider image object"
- `instance_states(class_name)`
method name = "instance_states" desc = "Returns a dictionary representing the finite state machine for instances." arguments:
@@ -156,47 +156,47 @@ The console can poll the build_adaptor for build status or receive events listed name = "states" dtype = SCHEMA_DATA_STRING desc = "string representation of a dictionary describing the workflow"
### BuildAdaptor class ### name = "BuildAdaptor" package = "com.redhat.imagefactory"
#### Properties: ####
- Image
name = "image" dtype = SCHEMA_DATA_STRING desc = "the uuid of the image being built or pushed"
- Build
name = "build" dtype = SCHEMA_DATA_STRING desc = "the uuid of the image build being built or pushed"
- Status
name = "status" dtype = SCHEMA_DATA_STRING desc = "string representing the status (see instance_states() on ImageFactory)"
- Percentage Complete
name = "percent_complete" dtype = SCHEMA_DATA_INT desc = "the estimated percentage through an operation"
- Target/Provider Image ID
name = "image_id" dtype = SCHEMA_DATA_STRING desc = "the uuid of the newly created target or provider image"
#### Methods: ####
- `abort()`
name = "abort" desc = "If possible, abort running build."
#### Events: ####
- Status Updates
name = "BuildAdaptorStatusEvent" package = "com.redhat.imagefactory" attributes:
@@ -212,9 +212,9 @@ The console can poll the build_adaptor for build status or receive events listed name = "old_status" dtype = SCHEMA_DATA_STRING desc = "string value of the old status"
- Updates to the Percentage Complete
name = "BuildAdaptorPercentCompleteEvent" package = "com.redhat.imagefactory" attributes:
@@ -227,9 +227,9 @@ The console can poll the build_adaptor for build status or receive events listed name = "percent_complete" dtype = SCHEMA_DATA_INT desc = "the estimated percentage through an operation"
- Failure Notification
name = "BuildFailedEvent" package = "com.redhat.imagefactory" attributes:
diff --git a/Documentation/man/imagefactory.1 b/Documentation/man/imagefactory.1 index 6f36a0c..ed70b14 100644 --- a/Documentation/man/imagefactory.1 +++ b/Documentation/man/imagefactory.1 @@ -23,7 +23,7 @@ Currently supported private cloud services: RHEV-M, KVM.
.SH OPTIONS
-.B -h, --help +.B -h, --help Display the help page.
.B --version @@ -51,7 +51,7 @@ Set the timeout period for image building in seconds. (default: 3600) Provide a QMFv2 agent interface.
.B --foreground -Stay in the foreground and avoid launching a daemon. +Stay in the foreground and avoid launching a daemon. Launching in foreground allows the user to see what messages are being passed to QMF. For more detailed output use this parameter together with .B --debug. If the parameter is not specified, the QMF agent will be started as a daemon. (default: False) @@ -66,7 +66,7 @@ URL of qpidd to connect to. (default: localhost) Build specified system and exit.
.B --template TEMPLATE -Template to build from. Template can be passed as URL, PATH, UUID or XML. Please refer to the +Template to build from. Template can be passed as URL, PATH, UUID or XML. Please refer to the .BR XML .BR EXAMPLES section at the bottom. @@ -83,9 +83,9 @@ The list of cloud services to target. Examples include ec2, rhevm, vsphere, rack The list of cloud service providers to push the emacs. Examples include ec2-us-east-1, ec2-us-west-1, rackspace.
.B --credentials CREDENTIALS -Cloud provider credentials. Credentials must be passed as XML file. Please refer to the -.BR XML -.BR EXAMPLES +Cloud provider credentials. Credentials must be passed as XML file. Please refer to the +.BR XML +.BR EXAMPLES section at the bottom.
.SS Image Warehouse: @@ -126,7 +126,7 @@ rackspace = "<provider_credentials> \ </rackspace_credentials> \ </provider_credentials>"
-.SS TEMPLATE +.SS TEMPLATE eg. a Windows 2008 64bit example
<template> @@ -160,7 +160,3 @@ eg. a Windows 2008 64bit example </package> </packages> </template> - - - - diff --git a/imgfac/ReservationManager.py b/imgfac/ReservationManager.py index 56e1f80..08edc0d 100644 --- a/imgfac/ReservationManager.py +++ b/imgfac/ReservationManager.py @@ -100,7 +100,7 @@ class ReservationManager(object): self.log.warn('No reservation for %s to cancel!' % filepath) else: raise e - except KeyError,e: + except KeyError, e: if(quiet): self.log.warn('No reservations exist on %s!' % mount_path) else: @@ -148,9 +148,9 @@ class ReservationManager(object): def available_space_for_path(self, path): """ TODO: Docstring for available_space_for_path - - @param path TODO - + + @param path TODO + @return TODO """ mount_path = self._mount_for_path(path) diff --git a/imgfac/builders/FedoraBuilder.py b/imgfac/builders/FedoraBuilder.py index a311bd8..9f559a8 100644 --- a/imgfac/builders/FedoraBuilder.py +++ b/imgfac/builders/FedoraBuilder.py @@ -1143,7 +1143,7 @@ class FedoraBuilder(BaseBuilder):
input_image_compressed_name = input_image_name + ".gz" input_image_compressed=input_image + ".gz"
if not os.path.isfile(input_image_compressed): self.log.debug("No compressed version of image file found - compressing now") f_out = open(input_image_compressed, 'wb')
@@ -1262,7 +1262,7 @@ class FedoraBuilder(BaseBuilder): if volume.status == "available": retcode = 0 break
self.log.debug("Volume status (%s) - waiting for 'available': %d/600" % (volume.status, i*10))
self.log.debug("Volume status (%s) - waiting for 'available': %d/600" % (volume.status, i*10)) sleep(10) if retcode:
@@ -1322,11 +1322,11 @@ class FedoraBuilder(BaseBuilder): self.log.debug("Registering snapshot (%s) as new EBS AMI" % (snapshot.id)) ebs = EBSBlockDeviceType() ebs.snapshot_id = snapshot.id
block_map = BlockDeviceMapping()
block_map['/dev/sda1'] = ebs
result = conn.register_image(name='ImageFactory created AMI - %s' % (self.new_image_id),
block_map = BlockDeviceMapping()
block_map['/dev/sda1'] = ebs
result = conn.register_image(name='ImageFactory created AMI - %s' % (self.new_image_id), description='ImageFactory created AMI - %s' % (self.new_image_id),
architecture=self.tdlobj.arch, kernel_id=aki,
architecture=self.tdlobj.arch, kernel_id=aki, root_device_name='/dev/sda1', block_device_map=block_map) ami_id = str(result)
@@ -1335,9 +1335,9 @@ class FedoraBuilder(BaseBuilder): self.log.debug("Stopping EC2 instance and deleting temp security group and volume") instance.stop() factory_security_group.delete()
key_file_object.close()
key_file_object.close() conn.delete_key_pair(key_name)
if volume: self.log.debug("Waiting up to 240 seconds for instance (%s) to shut down" % (instance.id)) retcode = 1
diff --git a/imgfac/builders/RHELBuilder.py b/imgfac/builders/RHELBuilder.py index dabfbca..96548a9 100644 --- a/imgfac/builders/RHELBuilder.py +++ b/imgfac/builders/RHELBuilder.py @@ -17,18 +17,17 @@ import zope from IBuilder import IBuilder from BaseBuilder import BaseBuilder
class RHELBuilder(BaseBuilder):
- """docstring for RHELBuilder"""
- zope.interface.implements(IBuilder)
- """docstring for RHELBuilder"""
- zope.interface.implements(IBuilder)
# Initializer
- def __init__(self, template=None, target=None):
super(RHELBuilder, self).__init__(template, target)
- def __init__(self, template=None, target=None):
super(RHELBuilder, self).__init__(template, target)
# Image actions
- def build_image(self, build_id=None):
pass
- def build_image(self, build_id=None):
pass
- def abort(self):
pass
- def abort(self):
pass
-- 1.7.4.4
Signed-off-by: Chris Lalancette clalance@redhat.com --- Makefile | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-)
diff --git a/Makefile b/Makefile index 1535090..afa6eba 100644 --- a/Makefile +++ b/Makefile @@ -6,7 +6,7 @@ sdist: python setup.py sdist
signed-rpm: sdist version - rpmbuild -ba imagefactory.spec --sign --define "_sourcedir `pwd`/dist"i + rpmbuild -ba imagefactory.spec --sign --define "_sourcedir `pwd`/dist"
rpm: sdist version rpmbuild -ba imagefactory.spec --define "_sourcedir `pwd`/dist"
ACK
On Aug 3, 2011, at 8:27 AM, Chris Lalancette wrote:
Signed-off-by: Chris Lalancette clalance@redhat.com
Makefile | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-)
diff --git a/Makefile b/Makefile index 1535090..afa6eba 100644 --- a/Makefile +++ b/Makefile @@ -6,7 +6,7 @@ sdist: python setup.py sdist
signed-rpm: sdist version
- rpmbuild -ba imagefactory.spec --sign --define "_sourcedir `pwd`/dist"i
- rpmbuild -ba imagefactory.spec --sign --define "_sourcedir `pwd`/dist"
rpm: sdist version rpmbuild -ba imagefactory.spec --define "_sourcedir `pwd`/dist" -- 1.7.4.4
This just makes some of the code easier to read.
Signed-off-by: Chris Lalancette clalance@redhat.com --- imagefactory | 10 +++- imgfac/BuildDispatcher.py | 65 ++++++++++++++++------- imgfac/BuildJob.py | 9 ++- imgfac/ImageWarehouse.py | 103 ++++++++++++++++++++++++----------- imgfac/Template.py | 3 +- imgfac/qmfagent/BuildAdaptor.py | 112 +++++++++++++++++++++++++++----------- setup.py | 5 ++- 7 files changed, 214 insertions(+), 93 deletions(-)
diff --git a/imagefactory b/imagefactory index eb6c797..783c9a6 100755 --- a/imagefactory +++ b/imagefactory @@ -41,7 +41,8 @@ class Application(Singleton):
def _singleton_init(self): super(Application, self)._singleton_init() - self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__)) + self.log = logging.getLogger('%s.%s' % (__name__, + self.__class__.__name__)) self.daemon = False self.pid_file_path = "/var/run/imagefactory.pid" signal.signal(signal.SIGTERM, self.signal_handler) @@ -57,9 +58,12 @@ class Application(Singleton):
def setup_logging(self): if (self.app_config['foreground']): - logging.basicConfig(level=logging.WARNING, format='%(asctime)s %(levelname)s %(name)s pid(%(process)d) Message: %(message)s') + logging.basicConfig(level=logging.WARNING, + format='%(asctime)s %(levelname)s %(name)s pid(%(process)d) Message: %(message)s') else: - logging.basicConfig(level=logging.WARNING, format='%(asctime)s %(levelname)s %(name)s pid(%(process)d) Message: %(message)s', filename='/var/log/imagefactory.log') + logging.basicConfig(level=logging.WARNING, + format='%(asctime)s %(levelname)s %(name)s pid(%(process)d) Message: %(message)s', + filename='/var/log/imagefactory.log') if (self.app_config['debug']): logging.getLogger('').setLevel(logging.DEBUG) elif (self.app_config['verbose']): diff --git a/imgfac/BuildDispatcher.py b/imgfac/BuildDispatcher.py index 6653c5b..9a6f2fe 100644 --- a/imgfac/BuildDispatcher.py +++ b/imgfac/BuildDispatcher.py @@ -30,18 +30,22 @@ class BuildDispatcher(Singleton): self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__)) self.warehouse = ImageWarehouse(ApplicationConfiguration().configuration['warehouse'])
- def import_image(self, image_id, build_id, target_identifier, image_desc, target, provider): + def import_image(self, image_id, build_id, target_identifier, image_desc, + target, provider): image_id = self._ensure_image(image_id, image_desc) build_id = self._ensure_build(image_id, build_id)
target_image_id = self._ensure_target_image(build_id, target) - provider_image_id = self._ensure_provider_image(target_image_id, provider, target_identifier) + provider_image_id = self._ensure_provider_image(target_image_id, + provider, + target_identifier)
self._commit_build(image_id, build_id)
return (image_id, build_id, target_image_id, provider_image_id)
- def build_image_for_targets(self, image_id, build_id, template, targets, job_cls = BuildJob, *args, **kwargs): + def build_image_for_targets(self, image_id, build_id, template, targets, + job_cls = BuildJob, *args, **kwargs): if image_id and not targets: targets = self._targets_for_image_id(image_id)
@@ -60,17 +64,21 @@ class BuildDispatcher(Singleton):
return jobs
- def push_image_to_providers(self, image_id, build_id, providers, credentials, job_cls = BuildJob, *args, **kwargs): + def push_image_to_providers(self, image_id, build_id, providers, + credentials, job_cls = BuildJob, *args, + **kwargs): if not build_id: build_id = self._latest_unpushed(image_id)
- watcher = PushWatcher(image_id, build_id, len(providers), self.warehouse) + watcher = PushWatcher(image_id, build_id, len(providers), + self.warehouse)
jobs = [] for provider in providers: target = self._map_provider_to_target(provider)
- target_image_id = self._target_image_for_build_and_target(build_id, target) + target_image_id = self._target_image_for_build_and_target(build_id, + target)
template = self._template_for_target_image_id(target_image_id)
@@ -106,17 +114,24 @@ class BuildDispatcher(Singleton): return self.warehouse.store_build(None, dict(image = image_id))
def _ensure_target_image(self, build_id, target): - target_image_id = self._target_image_for_build_and_target(build_id, target) + target_image_id = self._target_image_for_build_and_target(build_id, + target) if target_image_id: return target_image_id - return self.warehouse.store_target_image(None, None, dict(build=build_id, target=target)) + return self.warehouse.store_target_image(None, None, + dict(build=build_id, + target=target))
- def _ensure_provider_image(self, target_image_id, provider, target_identifier): + def _ensure_provider_image(self, target_image_id, provider, + target_identifier): provider_image_id = self._provider_image_for_target_image_and_provider(target_image_id, provider) if provider_image_id: - self._set_provider_image_attr(provider_image_id, 'target_identifier', target_identifier) + self._set_provider_image_attr(provider_image_id, + 'target_identifier', + target_identifier) else: - metadata = dict(target_image=target_image_id, provider=provider, target_identifier=target_identifier) + metadata = dict(target_image=target_image_id, provider=provider, + target_identifier=target_identifier) return self.warehouse.create_provider_image(None, None, metadata)
def _load_template(self, image_id, build_id, template): @@ -134,21 +149,29 @@ class BuildDispatcher(Singleton): self._set_latest_build(image_id, build_id)
def _latest_build(self, image_id): - return self.warehouse.metadata_for_id_of_type(['latest_build'], image_id, 'image')['latest_build'] + return self.warehouse.metadata_for_id_of_type(['latest_build'], + image_id, + 'image')['latest_build']
def _latest_unpushed(self, image_id): - return self.warehouse.metadata_for_id_of_type(['latest_unpushed'], image_id, 'image')['latest_unpushed'] + return self.warehouse.metadata_for_id_of_type(['latest_unpushed'], + image_id, + 'image')['latest_unpushed']
def _set_latest_build(self, image_id, build_id): - self.warehouse.set_metadata_for_id_of_type({'latest_build' : build_id}, image_id, 'image') + self.warehouse.set_metadata_for_id_of_type({'latest_build' : build_id}, + image_id, 'image')
def _set_build_parent(self, build_id, parent_id): - self.warehouse.set_metadata_for_id_of_type({'parent' : parent_id}, build_id, 'build') + self.warehouse.set_metadata_for_id_of_type({'parent' : parent_id}, + build_id, 'build')
def _targets_for_build_id(self, build_id): targets = [] for target_image_id in self._target_images_for_build(build_id): - targets.append(self.warehouse.metadata_for_id_of_type(['target'], target_image_id, 'target_image')['target']) + targets.append(self.warehouse.metadata_for_id_of_type(['target'], + target_image_id, + 'target_image')['target']) return targets
def _targets_for_image_id(self, image_id): @@ -161,10 +184,12 @@ class BuildDispatcher(Singleton): return self.warehouse.query("target_image", "$build == "%s"" % (build_id,))
def _target_image_for_build_and_target(self, build_id, target): - results = self.warehouse.query("target_image", "$build == "%s" && $target == "%s"" % (build_id, target)) + results = self.warehouse.query("target_image", + "$build == "%s" && $target == "%s"" % (build_id, target)) return results[0] if results else None
- def _provider_image_for_target_image_and_provider(self, target_image_id, provider): + def _provider_image_for_target_image_and_provider(self, target_image_id, + provider): results = self.warehouse.query("provider_image", "$target_image == "%s" && $provider == "%s"" % (target_image_id, provider)) return results[0] if results else None
@@ -172,7 +197,9 @@ class BuildDispatcher(Singleton): self.warehouse.set_metadata_for_id_of_type({attr : value}, provider_image_id, "provider_image")
def _template_for_target_image_id(self, target_image_id): - return self.warehouse.metadata_for_id_of_type(['template'], target_image_id, 'target_image')['template'] + return self.warehouse.metadata_for_id_of_type(['template'], + target_image_id, + 'target_image')['template']
def _template_for_build_id(self, build_id): target_image_ids = self._target_images_for_build(build_id) diff --git a/imgfac/BuildJob.py b/imgfac/BuildJob.py index 0cd3773..aae87da 100644 --- a/imgfac/BuildJob.py +++ b/imgfac/BuildJob.py @@ -34,7 +34,8 @@ class BuildJob(object): def __init__(self, template, target, image_id = '', build_id = ''): super(BuildJob, self).__init__()
- self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__)) + self.log = logging.getLogger('%s.%s' % (__name__, + self.__class__.__name__))
self.template = template if isinstance(template, Template) else Template(template) self.target = target @@ -58,7 +59,8 @@ class BuildJob(object): def push_image(self, target_image_id, provider, credentials, watcher=None): self._watcher = watcher self.provider = provider - kwargs = dict(target_image_id=target_image_id, provider=provider, credentials=credentials) + kwargs = dict(target_image_id=target_image_id, provider=provider, + credentials=credentials) self._start_builder_thread("push_image", arg_dict=kwargs)
def abort(self): @@ -70,7 +72,8 @@ class BuildJob(object): self._watcher.completed() self._watcher = None
- def builder_did_update_percentage(self, builder, original_percentage, new_percentage): + def builder_did_update_percentage(self, builder, original_percentage, + new_percentage): self.percent_complete = new_percentage
def builder_did_fail(self, builder, failure_type, failure_info): diff --git a/imgfac/ImageWarehouse.py b/imgfac/ImageWarehouse.py index 220adb5..8a498fc 100644 --- a/imgfac/ImageWarehouse.py +++ b/imgfac/ImageWarehouse.py @@ -30,19 +30,28 @@ class ImageWarehouse(object): url = props.prop("_url", "The url property.") image_bucket = props.prop("_image_bucket", "The image_bucket property.") build_bucket = props.prop("_build_bucket", "The build_bucket property.") - target_image_bucket = props.prop("_target_image_bucket", "The target_image_bucket property.") - template_bucket = props.prop("_template_bucket", "The template_bucket property.") + target_image_bucket = props.prop("_target_image_bucket", + "The target_image_bucket property.") + template_bucket = props.prop("_template_bucket", + "The template_bucket property.") icicle_bucket = props.prop("_icicle_bucket", "The icicle_bucket property.") - provider_image_bucket = props.prop("_provider_image_bucket", "The provider_image_bucket property.") + provider_image_bucket = props.prop("_provider_image_bucket", + "The provider_image_bucket property.")
def __repr__(self): - return "%s - %r" % (super(ImageWarehouse, self).__repr__(), self.__dict__) + return "%s - %r" % (super(ImageWarehouse, self).__repr__(), + self.__dict__)
def __str__(self): - return "%s - buckets(%s, %s, %s, %s)" % (self.url, self.target_image_bucket, self.template_bucket, self.icicle_bucket, self.provider_image_bucket) + return "%s - buckets(%s, %s, %s, %s)" % (self.url, + self.target_image_bucket, + self.template_bucket, + self.icicle_bucket, + self.provider_image_bucket)
def __init__(self, url=None): - self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__)) + self.log = logging.getLogger('%s.%s' % (__name__, + self.__class__.__name__))
self.http = httplib2.Http()
@@ -64,9 +73,11 @@ class ImageWarehouse(object):
self.log.debug("Created Image Warehouse instance %s" % (self, ))
- def _http_request(self, url, method, body = None, content_type = 'text/plain'): + def _http_request(self, url, method, body = None, + content_type = 'text/plain'): try: - return self.http.request(url, method, body, headers={'content-type': content_type}) + return self.http.request(url, method, body, + headers={'content-type': content_type}) except Exception, e: raise WarehouseError("Problem encountered trying to reach image warehouse. Please check that iwhd is running and reachable.\nException text: %s" % (e, ))
@@ -101,31 +112,43 @@ class ImageWarehouse(object): return object_url
def query(self, object_type, expression): - object_url = self.__url_for_id_of_type("_query", object_type, create=False) - self.log.debug("Querying (%s) with expression (%s)" % (object_url, expression)) - xml = self._http_post(object_url, expression, 'application/x-www-form-urlencoded') + object_url = self.__url_for_id_of_type("_query", object_type, + create=False) + self.log.debug("Querying (%s) with expression (%s)" % (object_url, + expression)) + xml = self._http_post(object_url, expression, + 'application/x-www-form-urlencoded') if not xml: return [] - return map(lambda n: n.content, libxml2.parseDoc(xml).xpathEval("/objects/object/key")) + return map(lambda n: n.content, + libxml2.parseDoc(xml).xpathEval("/objects/object/key"))
def post_on_object_with_id_of_type(self, object_id, object_type, post_data): - object_url = self.__url_for_id_of_type(object_id, object_type, create=False) - return self._http_post(object_url, urllib.urlencode(post_data), 'application/x-www-form-urlencoded') + object_url = self.__url_for_id_of_type(object_id, object_type, + create=False) + return self._http_post(object_url, urllib.urlencode(post_data), + 'application/x-www-form-urlencoded')
def object_with_id_of_type(self, object_id, object_type, metadata_keys=()): - object_url = self.__url_for_id_of_type(object_id, object_type, create=False) + object_url = self.__url_for_id_of_type(object_id, object_type, + create=False) obj = self._http_get(object_url) if(len(metadata_keys) > 0): - metadata = self.metadata_for_id_of_type(metadata_keys, object_id, object_type) + metadata = self.metadata_for_id_of_type(metadata_keys, object_id, + object_type) else: metadata = {} return obj, metadata
- def object_for_target_image_id(self, target_image_id, object_type, metadata_keys=()): - object_url = self.__url_for_id_of_type(target_image_id, "target_image", create=False) + def object_for_target_image_id(self, target_image_id, object_type, + metadata_keys=()): + object_url = self.__url_for_id_of_type(target_image_id, "target_image", + create=False) object_id = self._http_get("%s/%s" % (object_url, object_type))
- the_object, metadata = self.object_with_id_of_type(object_id, object_type, metadata_keys) + the_object, metadata = self.object_with_id_of_type(object_id, + object_type, + metadata_keys) return object_id, the_object, metadata
def delete_object_at_url(self, object_url): @@ -138,11 +161,13 @@ class ImageWarehouse(object): return False
def delete_object_with_id_of_type(self, object_id, object_type): - object_url = self.__url_for_id_of_type(object_id, object_type, create=False) + object_url = self.__url_for_id_of_type(object_id, object_type, + create=False) return self.delete_object_at_url(object_url)
def set_metadata_for_id_of_type(self, metadata, object_id, object_type): - object_url = self.__url_for_id_of_type(object_id, object_type, create=False) + object_url = self.__url_for_id_of_type(object_id, object_type, + create=False) self.set_metadata_for_object_at_url(metadata, object_url)
def set_metadata_for_object_at_url(self, metadata, object_url): @@ -151,14 +176,17 @@ class ImageWarehouse(object): self._http_put("%s/%s" % (object_url, item), str(metadata[item]))
def metadata_for_id_of_type(self, metadata_keys, object_id, object_type): - object_url = self.__url_for_id_of_type(object_id, object_type, create=False) + object_url = self.__url_for_id_of_type(object_id, object_type, + create=False) return self.metadata_for_object_at_url(metadata_keys, object_url)
def metadata_for_object_at_url(self, metadata_keys, object_url): - self.log.debug("Getting metadata (%s) from %s" % (metadata_keys, object_url)) + self.log.debug("Getting metadata (%s) from %s" % (metadata_keys, + object_url)) metadata = dict() for item in metadata_keys: - metadata.update( { item : self._http_get("%s/%s" % (object_url, item)) } ) + metadata.update( { item : self._http_get("%s/%s" % (object_url, + item)) } ) return metadata
def store_image(self, image_id, image_xml, metadata=None): @@ -197,7 +225,8 @@ class ImageWarehouse(object): image_size = os.path.getsize(image_file_path) curl = pycurl.Curl() curl.setopt(pycurl.URL, object_url) - curl.setopt(pycurl.HTTPHEADER, ["User-Agent: Load Tool (PyCURL Load Tool)"]) + curl.setopt(pycurl.HTTPHEADER, + ["User-Agent: Load Tool (PyCURL Load Tool)"]) curl.setopt(pycurl.PUT, 1) curl.setopt(pycurl.INFILE, image_file) curl.setopt(pycurl.INFILESIZE, image_size) @@ -207,7 +236,8 @@ class ImageWarehouse(object): except Exception, e: raise WarehouseError("Problem encountered trying to reach image warehouse. Please check that iwhd is running and reachable.\nException text: %s" % (e, ))
- def store_target_image(self, target_image_id, image_file_path, metadata=None): + def store_target_image(self, target_image_id, image_file_path, + metadata=None): if(not target_image_id): target_image_id = str(uuid.uuid4()) object_url = self.__url_for_id_of_type(target_image_id, "target_image") @@ -227,7 +257,8 @@ class ImageWarehouse(object): def create_provider_image(self, image_id, txt=None, metadata=None): if(not image_id): image_id = str(uuid.uuid4()) - object_url = self.__url_for_id_of_type(image_id, object_type="provider_image") + object_url = self.__url_for_id_of_type(image_id, + object_type="provider_image") if(not txt): if(metadata): txt = "This object has the following metadata keys: %s" % (metadata.keys(), ) @@ -246,7 +277,8 @@ class ImageWarehouse(object): def store_template(self, template, template_id=None, metadata=None): if(not template_id): template_id = str(uuid.uuid4()) - object_url = self.__url_for_id_of_type(template_id, object_type="template") + object_url = self.__url_for_id_of_type(template_id, + object_type="template")
self._http_put(object_url, template)
@@ -275,16 +307,20 @@ class ImageWarehouse(object): return self.object_with_id_of_type(icicle_id, "icicle", metadata_keys)
def icicle_for_target_image_id(self, target_image_id, metadata_keys=()): - return self.object_for_target_image_id(target_image_id, "icicle", metadata_keys) + return self.object_for_target_image_id(target_image_id, "icicle", + metadata_keys)
def template_with_id(self, template_id, metadata_keys=()): - return self.object_with_id_of_type(template_id, "template", metadata_keys) + return self.object_with_id_of_type(template_id, "template", + metadata_keys)
def template_for_target_image_id(self, target_image_id, metadata_keys=()): - return self.object_for_target_image_id(target_image_id, "template", metadata_keys) + return self.object_for_target_image_id(target_image_id, "template", + metadata_keys)
def target_image_with_id(self, target_image_id, metadata_keys=()): - return self.object_with_id_of_type(target_image_id, "target_image", metadata_keys) + return self.object_with_id_of_type(target_image_id, "target_image", + metadata_keys)
def remove_template_with_id(self, template_id): return self.delete_object_with_id_of_type(template_id, "template") @@ -293,7 +329,8 @@ class ImageWarehouse(object): return self.delete_object_with_id_of_type(icicle_id, "icicle")
def remove_target_image_with_id(self, target_image_id): - return self.delete_object_with_id_of_type(target_image_id, "target_image") + return self.delete_object_with_id_of_type(target_image_id, + "target_image")
class WarehouseError(Exception): """Error related to image warehouse interactions.""" diff --git a/imgfac/Template.py b/imgfac/Template.py index 82b4260..4eba266 100644 --- a/imgfac/Template.py +++ b/imgfac/Template.py @@ -37,7 +37,8 @@ class Template(object): return super(Template, self).__repr__
def __init__(self, template=None, uuid=None, url=None, xml=None): - self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__)) + self.log = logging.getLogger('%s.%s' % (__name__, + self.__class__.__name__)) self.warehouse = ImageWarehouse(ApplicationConfiguration().configuration["warehouse"])
self.identifier = None diff --git a/imgfac/qmfagent/BuildAdaptor.py b/imgfac/qmfagent/BuildAdaptor.py index 0f68a12..291984d 100644 --- a/imgfac/qmfagent/BuildAdaptor.py +++ b/imgfac/qmfagent/BuildAdaptor.py @@ -24,48 +24,88 @@ from imgfac.BuildJob import BuildJob
class BuildAdaptor(BuildJob): # QMF schema for BuildAdaptor - qmf_schema = Schema(SCHEMA_TYPE_DATA, "com.redhat.imagefactory", "BuildAdaptor") - qmf_schema.addProperty(SchemaProperty("image", SCHEMA_DATA_STRING, desc="UUID of the image")) - qmf_schema.addProperty(SchemaProperty("build", SCHEMA_DATA_STRING, desc="UUD of the build")) - qmf_schema.addProperty(SchemaProperty("status", SCHEMA_DATA_STRING, desc="string representing the status (see instance_states() on ImageFactory)")) - qmf_schema.addProperty(SchemaProperty("percent_complete", SCHEMA_DATA_INT, desc="the estimated percentage through an operation")) - qmf_schema.addProperty(SchemaProperty("image_id", SCHEMA_DATA_STRING, desc="UUID of the newly created target or provider image")) - qmf_schema.addMethod(SchemaMethod("abort", desc = "If possible, abort running build.")) + qmf_schema = Schema(SCHEMA_TYPE_DATA, "com.redhat.imagefactory", + "BuildAdaptor") + qmf_schema.addProperty(SchemaProperty("image", SCHEMA_DATA_STRING, + desc="UUID of the image")) + qmf_schema.addProperty(SchemaProperty("build", SCHEMA_DATA_STRING, + desc="UUD of the build")) + qmf_schema.addProperty(SchemaProperty("status", SCHEMA_DATA_STRING, + desc="string representing the status (see instance_states() on ImageFactory)")) + qmf_schema.addProperty(SchemaProperty("percent_complete", SCHEMA_DATA_INT, + desc="the estimated percentage through an operation")) + qmf_schema.addProperty(SchemaProperty("image_id", SCHEMA_DATA_STRING, + desc="UUID of the newly created target or provider image")) + qmf_schema.addMethod(SchemaMethod("abort", + desc="If possible, abort running build."))
#QMF schema for status change event - qmf_event_schema_status = Schema(SCHEMA_TYPE_EVENT, "com.redhat.imagefactory", "BuildAdaptorStatusEvent") - qmf_event_schema_status.addProperty(SchemaProperty("addr", SCHEMA_DATA_MAP, desc="the address of the object raising this event")) - qmf_event_schema_status.addProperty(SchemaProperty("event", SCHEMA_DATA_STRING, desc="string describing the type of event, in this case 'STATUS'")) - qmf_event_schema_status.addProperty(SchemaProperty("new_status", SCHEMA_DATA_STRING, desc="string value of the new status")) - qmf_event_schema_status.addProperty(SchemaProperty("old_status", SCHEMA_DATA_STRING, desc="string value of the old status")) + qmf_event_schema_status = Schema(SCHEMA_TYPE_EVENT, + "com.redhat.imagefactory", + "BuildAdaptorStatusEvent") + qmf_event_schema_status.addProperty(SchemaProperty("addr", + SCHEMA_DATA_MAP, + desc="the address of the object raising this event")) + qmf_event_schema_status.addProperty(SchemaProperty("event", + SCHEMA_DATA_STRING, + desc="string describing the type of event, in this case 'STATUS'")) + qmf_event_schema_status.addProperty(SchemaProperty("new_status", + SCHEMA_DATA_STRING, + desc="string value of the new status")) + qmf_event_schema_status.addProperty(SchemaProperty("old_status", + SCHEMA_DATA_STRING, + desc="string value of the old status")) #QMF schema for change to percent_complete event - qmf_event_schema_percentage = Schema(SCHEMA_TYPE_EVENT, "com.redhat.imagefactory", "BuildAdaptorPercentCompleteEvent") - qmf_event_schema_percentage.addProperty(SchemaProperty("addr", SCHEMA_DATA_MAP, desc="the address of the object raising this event")) - qmf_event_schema_percentage.addProperty(SchemaProperty("event", SCHEMA_DATA_STRING, desc="string describing the type of event, in this case 'PERCENTAGE'")) - qmf_event_schema_percentage.addProperty(SchemaProperty("percent_complete", SCHEMA_DATA_INT, desc="the estimated percentage through an operation")) + qmf_event_schema_percentage = Schema(SCHEMA_TYPE_EVENT, + "com.redhat.imagefactory", + "BuildAdaptorPercentCompleteEvent") + qmf_event_schema_percentage.addProperty(SchemaProperty("addr", + SCHEMA_DATA_MAP, + desc="the address of the object raising this event")) + qmf_event_schema_percentage.addProperty(SchemaProperty("event", + SCHEMA_DATA_STRING, + desc="string describing the type of event, in this case 'PERCENTAGE'")) + qmf_event_schema_percentage.addProperty(SchemaProperty("percent_complete", + SCHEMA_DATA_INT, + desc="the estimated percentage through an operation")) #QMF schema for build failure events - qmf_event_schema_build_failed = Schema(SCHEMA_TYPE_EVENT, "com.redhat.imagefactory", "BuildFailedEvent") - qmf_event_schema_build_failed.addProperty(SchemaProperty("addr", SCHEMA_DATA_MAP, desc="the address of the object raising this event")) - qmf_event_schema_build_failed.addProperty(SchemaProperty("event", SCHEMA_DATA_STRING, desc="string describing the type of event, in this case 'FAILURE'")) - qmf_event_schema_build_failed.addProperty(SchemaProperty("type", SCHEMA_DATA_STRING, desc="short string description of the failure")) - qmf_event_schema_build_failed.addProperty(SchemaProperty("info", SCHEMA_DATA_STRING, desc="longer string description of the failure")) + qmf_event_schema_build_failed = Schema(SCHEMA_TYPE_EVENT, + "com.redhat.imagefactory", + "BuildFailedEvent") + qmf_event_schema_build_failed.addProperty(SchemaProperty("addr", + SCHEMA_DATA_MAP, + desc="the address of the object raising this event")) + qmf_event_schema_build_failed.addProperty(SchemaProperty("event", + SCHEMA_DATA_STRING, + desc="string describing the type of event, in this case 'FAILURE'")) + qmf_event_schema_build_failed.addProperty(SchemaProperty("type", + SCHEMA_DATA_STRING, + desc="short string description of the failure")) + qmf_event_schema_build_failed.addProperty(SchemaProperty("info", + SCHEMA_DATA_STRING, + desc="longer string description of the failure"))
@classmethod def object_states(cls): """Returns a dictionary representing the finite state machine for instances of this class.""" return { - "NEW":({"INITIALIZING":("build_image", "push_image")}, {"PENDING":("build_image", "push_image")}, {"FAILED":("build_image", "push_image")}), - "INITIALIZING":({"PENDING":("_auto_")}, {"FAILED":("_auto_")}), - "PENDING":({"FINISHING":("_auto_")}, {"COMPLETED":("_auto_")}, {"FAILED":("_auto_")}), - "FINISHING":({"COMPLETED":("_auto_")}, {"FAILED":("_auto_")}), - "COMPLETED":() - } + "NEW":({"INITIALIZING":("build_image", "push_image")}, + {"PENDING":("build_image", "push_image")}, + {"FAILED":("build_image", "push_image")}), + "INITIALIZING":({"PENDING":("_auto_")}, {"FAILED":("_auto_")}), + "PENDING":({"FINISHING":("_auto_")}, {"COMPLETED":("_auto_")}, + {"FAILED":("_auto_")}), + "FINISHING":({"COMPLETED":("_auto_")}, {"FAILED":("_auto_")}), + "COMPLETED":() + }
image_id = props.subprop("qmf_object", "image", "The UUID of the image.") build_id = props.subprop("qmf_object", "build", "The UUID of the build.") status = props.subprop("qmf_object", "status", "The status property.") - percent_complete = props.subprop("qmf_object", "percent_complete", "The percent_complete property.") - new_image_id = props.subprop("qmf_object", "image_id", "The image property.") + percent_complete = props.subprop("qmf_object", "percent_complete", + "The percent_complete property.") + new_image_id = props.subprop("qmf_object", "image_id", + "The image property.") qmf_object = props.prop("_qmf_object", "The qmf_object property.")
def __init__(self, template, target, image_id='', build_id='', agent=None): @@ -75,7 +115,9 @@ class BuildAdaptor(BuildJob):
# Builder delegate methods def builder_did_update_status(self, builder, old_status, new_status): - super(BuildAdaptor, self).builder_did_update_status(builder, old_status, new_status) + super(BuildAdaptor, self).builder_did_update_status(builder, + old_status, + new_status) self.log.debug("Raising event with agent handler (%s), changed status from %s to %s" % (self.agent, old_status, new_status)) event = Data(BuildAdaptor.qmf_event_schema_status) event.addr = self.qmf_object.getAddr().asMap() @@ -88,8 +130,11 @@ class BuildAdaptor(BuildJob): self.agent.deregister(self.qmf_object)
- def builder_did_update_percentage(self, builder, original_percentage, new_percentage): - super(BuildAdaptor, self).builder_did_update_percentage(builder, original_percentage, new_percentage) + def builder_did_update_percentage(self, builder, original_percentage, + new_percentage): + super(BuildAdaptor, self).builder_did_update_percentage(builder, + original_percentage, + new_percentage) self.log.debug("Raising event with agent handler (%s), changed percent complete from %s to %s" % (self.agent, original_percentage, new_percentage)) event = Data(BuildAdaptor.qmf_event_schema_percentage) event.addr = self.qmf_object.getAddr().asMap() @@ -98,7 +143,8 @@ class BuildAdaptor(BuildJob): self.agent.session.raiseEvent(data=event, severity=SEV_NOTICE)
def builder_did_fail(self, builder, failure_type, failure_info): - super(BuildAdaptor, self).builder_did_fail(builder, failure_type, failure_info) + super(BuildAdaptor, self).builder_did_fail(builder, failure_type, + failure_info) self.log.debug("Raising event with agent handler (%s), BUILD FAILED: %s - %s" % (self.agent, failure_type, failure_info)) event = Data(BuildAdaptor.qmf_event_schema_build_failed) event.addr = self.qmf_object.getAddr().asMap() diff --git a/setup.py b/setup.py index 6cc6278..c743745 100644 --- a/setup.py +++ b/setup.py @@ -28,7 +28,10 @@ try: except Exception, e: raise RuntimeError("ERROR: version.txt could not be found. Run 'git describe > version.txt' to get the correct version info.")
-datafiles=[('share/man/man1', ['Documentation/man/imagefactory.1']), ('/etc', ['imagefactory.conf']), ('/etc/pki/imagefactory', ['cert-ec2.pem']), ('/etc/rc.d/init.d', ['scripts/imagefactory'])] +datafiles=[('share/man/man1', ['Documentation/man/imagefactory.1']), + ('/etc', ['imagefactory.conf']), + ('/etc/pki/imagefactory', ['cert-ec2.pem']), + ('/etc/rc.d/init.d', ['scripts/imagefactory'])]
class sdist(_sdist): """ custom sdist command to prepare imagefactory.spec file """
I am always torn on this. I know it's in PEP008 and I want to be a good little Python developer, but the fact is that displays have advanced quite a bit since the original run of Seinfeld and if we use descriptive method and variable names, we'll be splitting every damn line and still be going over 80 char in many instances.
I'd like somebody others to chime in here and give an opinion.
-steve
On Aug 3, 2011, at 8:27 AM, Chris Lalancette wrote:
This just makes some of the code easier to read.
Signed-off-by: Chris Lalancette clalance@redhat.com
imagefactory | 10 +++- imgfac/BuildDispatcher.py | 65 ++++++++++++++++------- imgfac/BuildJob.py | 9 ++- imgfac/ImageWarehouse.py | 103 ++++++++++++++++++++++++----------- imgfac/Template.py | 3 +- imgfac/qmfagent/BuildAdaptor.py | 112 +++++++++++++++++++++++++++----------- setup.py | 5 ++- 7 files changed, 214 insertions(+), 93 deletions(-)
diff --git a/imagefactory b/imagefactory index eb6c797..783c9a6 100755 --- a/imagefactory +++ b/imagefactory @@ -41,7 +41,8 @@ class Application(Singleton):
def _singleton_init(self): super(Application, self)._singleton_init()
self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__))
self.log = logging.getLogger('%s.%s' % (__name__,
self.__class__.__name__)) self.daemon = False self.pid_file_path = "/var/run/imagefactory.pid" signal.signal(signal.SIGTERM, self.signal_handler)
@@ -57,9 +58,12 @@ class Application(Singleton):
def setup_logging(self): if (self.app_config['foreground']):
logging.basicConfig(level=logging.WARNING, format='%(asctime)s %(levelname)s %(name)s pid(%(process)d) Message: %(message)s')
logging.basicConfig(level=logging.WARNING,
format='%(asctime)s %(levelname)s %(name)s pid(%(process)d) Message: %(message)s') else:
logging.basicConfig(level=logging.WARNING, format='%(asctime)s %(levelname)s %(name)s pid(%(process)d) Message: %(message)s', filename='/var/log/imagefactory.log')
logging.basicConfig(level=logging.WARNING,
format='%(asctime)s %(levelname)s %(name)s pid(%(process)d) Message: %(message)s',
filename='/var/log/imagefactory.log') if (self.app_config['debug']): logging.getLogger('').setLevel(logging.DEBUG) elif (self.app_config['verbose']):
diff --git a/imgfac/BuildDispatcher.py b/imgfac/BuildDispatcher.py index 6653c5b..9a6f2fe 100644 --- a/imgfac/BuildDispatcher.py +++ b/imgfac/BuildDispatcher.py @@ -30,18 +30,22 @@ class BuildDispatcher(Singleton): self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__)) self.warehouse = ImageWarehouse(ApplicationConfiguration().configuration['warehouse'])
- def import_image(self, image_id, build_id, target_identifier, image_desc, target, provider):
def import_image(self, image_id, build_id, target_identifier, image_desc,
target, provider): image_id = self._ensure_image(image_id, image_desc) build_id = self._ensure_build(image_id, build_id) target_image_id = self._ensure_target_image(build_id, target)
provider_image_id = self._ensure_provider_image(target_image_id, provider, target_identifier)
provider_image_id = self._ensure_provider_image(target_image_id,
provider,
target_identifier) self._commit_build(image_id, build_id) return (image_id, build_id, target_image_id, provider_image_id)
- def build_image_for_targets(self, image_id, build_id, template, targets, job_cls = BuildJob, *args, **kwargs):
- def build_image_for_targets(self, image_id, build_id, template, targets,
job_cls = BuildJob, *args, **kwargs): if image_id and not targets: targets = self._targets_for_image_id(image_id)
@@ -60,17 +64,21 @@ class BuildDispatcher(Singleton):
return jobs
- def push_image_to_providers(self, image_id, build_id, providers, credentials, job_cls = BuildJob, *args, **kwargs):
- def push_image_to_providers(self, image_id, build_id, providers,
credentials, job_cls = BuildJob, *args,
**kwargs): if not build_id: build_id = self._latest_unpushed(image_id)
watcher = PushWatcher(image_id, build_id, len(providers), self.warehouse)
watcher = PushWatcher(image_id, build_id, len(providers),
self.warehouse) jobs = [] for provider in providers: target = self._map_provider_to_target(provider)
target_image_id = self._target_image_for_build_and_target(build_id, target)
target_image_id = self._target_image_for_build_and_target(build_id,
target) template = self._template_for_target_image_id(target_image_id)
@@ -106,17 +114,24 @@ class BuildDispatcher(Singleton): return self.warehouse.store_build(None, dict(image = image_id))
def _ensure_target_image(self, build_id, target):
target_image_id = self._target_image_for_build_and_target(build_id, target)
target_image_id = self._target_image_for_build_and_target(build_id,
target) if target_image_id: return target_image_id
return self.warehouse.store_target_image(None, None, dict(build=build_id, target=target))
return self.warehouse.store_target_image(None, None,
dict(build=build_id,
target=target))
- def _ensure_provider_image(self, target_image_id, provider, target_identifier):
- def _ensure_provider_image(self, target_image_id, provider,
target_identifier): provider_image_id = self._provider_image_for_target_image_and_provider(target_image_id, provider) if provider_image_id:
self._set_provider_image_attr(provider_image_id, 'target_identifier', target_identifier)
self._set_provider_image_attr(provider_image_id,
'target_identifier',
target_identifier) else:
metadata = dict(target_image=target_image_id, provider=provider, target_identifier=target_identifier)
metadata = dict(target_image=target_image_id, provider=provider,
target_identifier=target_identifier) return self.warehouse.create_provider_image(None, None, metadata)
def _load_template(self, image_id, build_id, template):
@@ -134,21 +149,29 @@ class BuildDispatcher(Singleton): self._set_latest_build(image_id, build_id)
def _latest_build(self, image_id):
return self.warehouse.metadata_for_id_of_type(['latest_build'], image_id, 'image')['latest_build']
return self.warehouse.metadata_for_id_of_type(['latest_build'],
image_id,
'image')['latest_build']
def _latest_unpushed(self, image_id):
return self.warehouse.metadata_for_id_of_type(['latest_unpushed'], image_id, 'image')['latest_unpushed']
return self.warehouse.metadata_for_id_of_type(['latest_unpushed'],
image_id,
'image')['latest_unpushed']
def _set_latest_build(self, image_id, build_id):
self.warehouse.set_metadata_for_id_of_type({'latest_build' : build_id}, image_id, 'image')
self.warehouse.set_metadata_for_id_of_type({'latest_build' : build_id},
image_id, 'image')
def _set_build_parent(self, build_id, parent_id):
self.warehouse.set_metadata_for_id_of_type({'parent' : parent_id}, build_id, 'build')
self.warehouse.set_metadata_for_id_of_type({'parent' : parent_id},
build_id, 'build')
def _targets_for_build_id(self, build_id): targets = [] for target_image_id in self._target_images_for_build(build_id):
targets.append(self.warehouse.metadata_for_id_of_type(['target'], target_image_id, 'target_image')['target'])
targets.append(self.warehouse.metadata_for_id_of_type(['target'],
target_image_id,
'target_image')['target']) return targets
def _targets_for_image_id(self, image_id):
@@ -161,10 +184,12 @@ class BuildDispatcher(Singleton): return self.warehouse.query("target_image", "$build == "%s"" % (build_id,))
def _target_image_for_build_and_target(self, build_id, target):
results = self.warehouse.query("target_image", "$build == \"%s\" && $target == \"%s\"" % (build_id, target))
results = self.warehouse.query("target_image",
"$build == \"%s\" && $target == \"%s\"" % (build_id, target)) return results[0] if results else None
- def _provider_image_for_target_image_and_provider(self, target_image_id, provider):
- def _provider_image_for_target_image_and_provider(self, target_image_id,
provider): results = self.warehouse.query("provider_image", "$target_image == \"%s\" && $provider == \"%s\"" % (target_image_id, provider)) return results[0] if results else None
@@ -172,7 +197,9 @@ class BuildDispatcher(Singleton): self.warehouse.set_metadata_for_id_of_type({attr : value}, provider_image_id, "provider_image")
def _template_for_target_image_id(self, target_image_id):
return self.warehouse.metadata_for_id_of_type(['template'], target_image_id, 'target_image')['template']
return self.warehouse.metadata_for_id_of_type(['template'],
target_image_id,
'target_image')['template']
def _template_for_build_id(self, build_id): target_image_ids = self._target_images_for_build(build_id)
diff --git a/imgfac/BuildJob.py b/imgfac/BuildJob.py index 0cd3773..aae87da 100644 --- a/imgfac/BuildJob.py +++ b/imgfac/BuildJob.py @@ -34,7 +34,8 @@ class BuildJob(object): def __init__(self, template, target, image_id = '', build_id = ''): super(BuildJob, self).__init__()
self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__))
self.log = logging.getLogger('%s.%s' % (__name__,
self.__class__.__name__)) self.template = template if isinstance(template, Template) else Template(template) self.target = target
@@ -58,7 +59,8 @@ class BuildJob(object): def push_image(self, target_image_id, provider, credentials, watcher=None): self._watcher = watcher self.provider = provider
kwargs = dict(target_image_id=target_image_id, provider=provider, credentials=credentials)
kwargs = dict(target_image_id=target_image_id, provider=provider,
credentials=credentials) self._start_builder_thread("push_image", arg_dict=kwargs)
def abort(self):
@@ -70,7 +72,8 @@ class BuildJob(object): self._watcher.completed() self._watcher = None
- def builder_did_update_percentage(self, builder, original_percentage, new_percentage):
def builder_did_update_percentage(self, builder, original_percentage,
new_percentage): self.percent_complete = new_percentage
def builder_did_fail(self, builder, failure_type, failure_info):
diff --git a/imgfac/ImageWarehouse.py b/imgfac/ImageWarehouse.py index 220adb5..8a498fc 100644 --- a/imgfac/ImageWarehouse.py +++ b/imgfac/ImageWarehouse.py @@ -30,19 +30,28 @@ class ImageWarehouse(object): url = props.prop("_url", "The url property.") image_bucket = props.prop("_image_bucket", "The image_bucket property.") build_bucket = props.prop("_build_bucket", "The build_bucket property.")
- target_image_bucket = props.prop("_target_image_bucket", "The target_image_bucket property.")
- template_bucket = props.prop("_template_bucket", "The template_bucket property.")
- target_image_bucket = props.prop("_target_image_bucket",
"The target_image_bucket property.")
- template_bucket = props.prop("_template_bucket",
icicle_bucket = props.prop("_icicle_bucket", "The icicle_bucket property.")"The template_bucket property.")
- provider_image_bucket = props.prop("_provider_image_bucket", "The provider_image_bucket property.")
provider_image_bucket = props.prop("_provider_image_bucket",
"The provider_image_bucket property.")
def __repr__(self):
return "%s - %r" % (super(ImageWarehouse, self).__repr__(), self.__dict__)
return "%s - %r" % (super(ImageWarehouse, self).__repr__(),
self.__dict__)
def __str__(self):
return "%s - buckets(%s, %s, %s, %s)" % (self.url, self.target_image_bucket, self.template_bucket, self.icicle_bucket, self.provider_image_bucket)
return "%s - buckets(%s, %s, %s, %s)" % (self.url,
self.target_image_bucket,
self.template_bucket,
self.icicle_bucket,
self.provider_image_bucket)
def __init__(self, url=None):
self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__))
self.log = logging.getLogger('%s.%s' % (__name__,
self.__class__.__name__)) self.http = httplib2.Http()
@@ -64,9 +73,11 @@ class ImageWarehouse(object):
self.log.debug("Created Image Warehouse instance %s" % (self, ))
- def _http_request(self, url, method, body = None, content_type = 'text/plain'):
- def _http_request(self, url, method, body = None,
content_type = 'text/plain'): try:
return self.http.request(url, method, body, headers={'content-type': content_type})
return self.http.request(url, method, body,
headers={'content-type': content_type}) except Exception, e: raise WarehouseError("Problem encountered trying to reach image warehouse. Please check that iwhd is running and reachable.\nException text: %s" % (e, ))
@@ -101,31 +112,43 @@ class ImageWarehouse(object): return object_url
def query(self, object_type, expression):
object_url = self.__url_for_id_of_type("_query", object_type, create=False)
self.log.debug("Querying (%s) with expression (%s)" % (object_url, expression))
xml = self._http_post(object_url, expression, 'application/x-www-form-urlencoded')
object_url = self.__url_for_id_of_type("_query", object_type,
create=False)
self.log.debug("Querying (%s) with expression (%s)" % (object_url,
expression))
xml = self._http_post(object_url, expression,
'application/x-www-form-urlencoded') if not xml: return []
return map(lambda n: n.content, libxml2.parseDoc(xml).xpathEval("/objects/object/key"))
return map(lambda n: n.content,
libxml2.parseDoc(xml).xpathEval("/objects/object/key"))
def post_on_object_with_id_of_type(self, object_id, object_type, post_data):
object_url = self.__url_for_id_of_type(object_id, object_type, create=False)
return self._http_post(object_url, urllib.urlencode(post_data), 'application/x-www-form-urlencoded')
object_url = self.__url_for_id_of_type(object_id, object_type,
create=False)
return self._http_post(object_url, urllib.urlencode(post_data),
'application/x-www-form-urlencoded')
def object_with_id_of_type(self, object_id, object_type, metadata_keys=()):
object_url = self.__url_for_id_of_type(object_id, object_type, create=False)
object_url = self.__url_for_id_of_type(object_id, object_type,
create=False) obj = self._http_get(object_url) if(len(metadata_keys) > 0):
metadata = self.metadata_for_id_of_type(metadata_keys, object_id, object_type)
metadata = self.metadata_for_id_of_type(metadata_keys, object_id,
object_type) else: metadata = {} return obj, metadata
- def object_for_target_image_id(self, target_image_id, object_type, metadata_keys=()):
object_url = self.__url_for_id_of_type(target_image_id, "target_image", create=False)
- def object_for_target_image_id(self, target_image_id, object_type,
metadata_keys=()):
object_url = self.__url_for_id_of_type(target_image_id, "target_image",
create=False) object_id = self._http_get("%s/%s" % (object_url, object_type))
the_object, metadata = self.object_with_id_of_type(object_id, object_type, metadata_keys)
the_object, metadata = self.object_with_id_of_type(object_id,
object_type,
metadata_keys) return object_id, the_object, metadata
def delete_object_at_url(self, object_url):
@@ -138,11 +161,13 @@ class ImageWarehouse(object): return False
def delete_object_with_id_of_type(self, object_id, object_type):
object_url = self.__url_for_id_of_type(object_id, object_type, create=False)
object_url = self.__url_for_id_of_type(object_id, object_type,
create=False) return self.delete_object_at_url(object_url)
def set_metadata_for_id_of_type(self, metadata, object_id, object_type):
object_url = self.__url_for_id_of_type(object_id, object_type, create=False)
object_url = self.__url_for_id_of_type(object_id, object_type,
create=False) self.set_metadata_for_object_at_url(metadata, object_url)
def set_metadata_for_object_at_url(self, metadata, object_url):
@@ -151,14 +176,17 @@ class ImageWarehouse(object): self._http_put("%s/%s" % (object_url, item), str(metadata[item]))
def metadata_for_id_of_type(self, metadata_keys, object_id, object_type):
object_url = self.__url_for_id_of_type(object_id, object_type, create=False)
object_url = self.__url_for_id_of_type(object_id, object_type,
create=False) return self.metadata_for_object_at_url(metadata_keys, object_url)
def metadata_for_object_at_url(self, metadata_keys, object_url):
self.log.debug("Getting metadata (%s) from %s" % (metadata_keys, object_url))
self.log.debug("Getting metadata (%s) from %s" % (metadata_keys,
object_url)) metadata = dict() for item in metadata_keys:
metadata.update( { item : self._http_get("%s/%s" % (object_url, item)) } )
metadata.update( { item : self._http_get("%s/%s" % (object_url,
item)) } ) return metadata
def store_image(self, image_id, image_xml, metadata=None):
@@ -197,7 +225,8 @@ class ImageWarehouse(object): image_size = os.path.getsize(image_file_path) curl = pycurl.Curl() curl.setopt(pycurl.URL, object_url)
curl.setopt(pycurl.HTTPHEADER, ["User-Agent: Load Tool (PyCURL Load Tool)"])
curl.setopt(pycurl.HTTPHEADER,
["User-Agent: Load Tool (PyCURL Load Tool)"]) curl.setopt(pycurl.PUT, 1) curl.setopt(pycurl.INFILE, image_file) curl.setopt(pycurl.INFILESIZE, image_size)
@@ -207,7 +236,8 @@ class ImageWarehouse(object): except Exception, e: raise WarehouseError("Problem encountered trying to reach image warehouse. Please check that iwhd is running and reachable.\nException text: %s" % (e, ))
- def store_target_image(self, target_image_id, image_file_path, metadata=None):
- def store_target_image(self, target_image_id, image_file_path,
metadata=None): if(not target_image_id): target_image_id = str(uuid.uuid4()) object_url = self.__url_for_id_of_type(target_image_id, "target_image")
@@ -227,7 +257,8 @@ class ImageWarehouse(object): def create_provider_image(self, image_id, txt=None, metadata=None): if(not image_id): image_id = str(uuid.uuid4())
object_url = self.__url_for_id_of_type(image_id, object_type="provider_image")
object_url = self.__url_for_id_of_type(image_id,
object_type="provider_image") if(not txt): if(metadata): txt = "This object has the following metadata keys: %s" % (metadata.keys(), )
@@ -246,7 +277,8 @@ class ImageWarehouse(object): def store_template(self, template, template_id=None, metadata=None): if(not template_id): template_id = str(uuid.uuid4())
object_url = self.__url_for_id_of_type(template_id, object_type="template")
object_url = self.__url_for_id_of_type(template_id,
object_type="template") self._http_put(object_url, template)
@@ -275,16 +307,20 @@ class ImageWarehouse(object): return self.object_with_id_of_type(icicle_id, "icicle", metadata_keys)
def icicle_for_target_image_id(self, target_image_id, metadata_keys=()):
return self.object_for_target_image_id(target_image_id, "icicle", metadata_keys)
return self.object_for_target_image_id(target_image_id, "icicle",
metadata_keys)
def template_with_id(self, template_id, metadata_keys=()):
return self.object_with_id_of_type(template_id, "template", metadata_keys)
return self.object_with_id_of_type(template_id, "template",
metadata_keys)
def template_for_target_image_id(self, target_image_id, metadata_keys=()):
return self.object_for_target_image_id(target_image_id, "template", metadata_keys)
return self.object_for_target_image_id(target_image_id, "template",
metadata_keys)
def target_image_with_id(self, target_image_id, metadata_keys=()):
return self.object_with_id_of_type(target_image_id, "target_image", metadata_keys)
return self.object_with_id_of_type(target_image_id, "target_image",
metadata_keys)
def remove_template_with_id(self, template_id): return self.delete_object_with_id_of_type(template_id, "template")
@@ -293,7 +329,8 @@ class ImageWarehouse(object): return self.delete_object_with_id_of_type(icicle_id, "icicle")
def remove_target_image_with_id(self, target_image_id):
return self.delete_object_with_id_of_type(target_image_id, "target_image")
return self.delete_object_with_id_of_type(target_image_id,
"target_image")
class WarehouseError(Exception): """Error related to image warehouse interactions.""" diff --git a/imgfac/Template.py b/imgfac/Template.py index 82b4260..4eba266 100644 --- a/imgfac/Template.py +++ b/imgfac/Template.py @@ -37,7 +37,8 @@ class Template(object): return super(Template, self).__repr__
def __init__(self, template=None, uuid=None, url=None, xml=None):
self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__))
self.log = logging.getLogger('%s.%s' % (__name__,
self.__class__.__name__)) self.warehouse = ImageWarehouse(ApplicationConfiguration().configuration["warehouse"]) self.identifier = None
diff --git a/imgfac/qmfagent/BuildAdaptor.py b/imgfac/qmfagent/BuildAdaptor.py index 0f68a12..291984d 100644 --- a/imgfac/qmfagent/BuildAdaptor.py +++ b/imgfac/qmfagent/BuildAdaptor.py @@ -24,48 +24,88 @@ from imgfac.BuildJob import BuildJob
class BuildAdaptor(BuildJob): # QMF schema for BuildAdaptor
- qmf_schema = Schema(SCHEMA_TYPE_DATA, "com.redhat.imagefactory", "BuildAdaptor")
- qmf_schema.addProperty(SchemaProperty("image", SCHEMA_DATA_STRING, desc="UUID of the image"))
- qmf_schema.addProperty(SchemaProperty("build", SCHEMA_DATA_STRING, desc="UUD of the build"))
- qmf_schema.addProperty(SchemaProperty("status", SCHEMA_DATA_STRING, desc="string representing the status (see instance_states() on ImageFactory)"))
- qmf_schema.addProperty(SchemaProperty("percent_complete", SCHEMA_DATA_INT, desc="the estimated percentage through an operation"))
- qmf_schema.addProperty(SchemaProperty("image_id", SCHEMA_DATA_STRING, desc="UUID of the newly created target or provider image"))
- qmf_schema.addMethod(SchemaMethod("abort", desc = "If possible, abort running build."))
qmf_schema = Schema(SCHEMA_TYPE_DATA, "com.redhat.imagefactory",
"BuildAdaptor")
qmf_schema.addProperty(SchemaProperty("image", SCHEMA_DATA_STRING,
desc="UUID of the image"))
qmf_schema.addProperty(SchemaProperty("build", SCHEMA_DATA_STRING,
desc="UUD of the build"))
qmf_schema.addProperty(SchemaProperty("status", SCHEMA_DATA_STRING,
desc="string representing the status (see instance_states() on ImageFactory)"))
qmf_schema.addProperty(SchemaProperty("percent_complete", SCHEMA_DATA_INT,
desc="the estimated percentage through an operation"))
qmf_schema.addProperty(SchemaProperty("image_id", SCHEMA_DATA_STRING,
desc="UUID of the newly created target or provider image"))
qmf_schema.addMethod(SchemaMethod("abort",
desc="If possible, abort running build."))
#QMF schema for status change event
- qmf_event_schema_status = Schema(SCHEMA_TYPE_EVENT, "com.redhat.imagefactory", "BuildAdaptorStatusEvent")
- qmf_event_schema_status.addProperty(SchemaProperty("addr", SCHEMA_DATA_MAP, desc="the address of the object raising this event"))
- qmf_event_schema_status.addProperty(SchemaProperty("event", SCHEMA_DATA_STRING, desc="string describing the type of event, in this case 'STATUS'"))
- qmf_event_schema_status.addProperty(SchemaProperty("new_status", SCHEMA_DATA_STRING, desc="string value of the new status"))
- qmf_event_schema_status.addProperty(SchemaProperty("old_status", SCHEMA_DATA_STRING, desc="string value of the old status"))
- qmf_event_schema_status = Schema(SCHEMA_TYPE_EVENT,
"com.redhat.imagefactory",
"BuildAdaptorStatusEvent")
- qmf_event_schema_status.addProperty(SchemaProperty("addr",
SCHEMA_DATA_MAP,
desc="the address of the object raising this event"))
- qmf_event_schema_status.addProperty(SchemaProperty("event",
SCHEMA_DATA_STRING,
desc="string describing the type of event, in this case 'STATUS'"))
- qmf_event_schema_status.addProperty(SchemaProperty("new_status",
SCHEMA_DATA_STRING,
desc="string value of the new status"))
- qmf_event_schema_status.addProperty(SchemaProperty("old_status",
SCHEMA_DATA_STRING,
#QMF schema for change to percent_complete eventdesc="string value of the old status"))
- qmf_event_schema_percentage = Schema(SCHEMA_TYPE_EVENT, "com.redhat.imagefactory", "BuildAdaptorPercentCompleteEvent")
- qmf_event_schema_percentage.addProperty(SchemaProperty("addr", SCHEMA_DATA_MAP, desc="the address of the object raising this event"))
- qmf_event_schema_percentage.addProperty(SchemaProperty("event", SCHEMA_DATA_STRING, desc="string describing the type of event, in this case 'PERCENTAGE'"))
- qmf_event_schema_percentage.addProperty(SchemaProperty("percent_complete", SCHEMA_DATA_INT, desc="the estimated percentage through an operation"))
- qmf_event_schema_percentage = Schema(SCHEMA_TYPE_EVENT,
"com.redhat.imagefactory",
"BuildAdaptorPercentCompleteEvent")
- qmf_event_schema_percentage.addProperty(SchemaProperty("addr",
SCHEMA_DATA_MAP,
desc="the address of the object raising this event"))
- qmf_event_schema_percentage.addProperty(SchemaProperty("event",
SCHEMA_DATA_STRING,
desc="string describing the type of event, in this case 'PERCENTAGE'"))
- qmf_event_schema_percentage.addProperty(SchemaProperty("percent_complete",
SCHEMA_DATA_INT,
#QMF schema for build failure eventsdesc="the estimated percentage through an operation"))
- qmf_event_schema_build_failed = Schema(SCHEMA_TYPE_EVENT, "com.redhat.imagefactory", "BuildFailedEvent")
- qmf_event_schema_build_failed.addProperty(SchemaProperty("addr", SCHEMA_DATA_MAP, desc="the address of the object raising this event"))
- qmf_event_schema_build_failed.addProperty(SchemaProperty("event", SCHEMA_DATA_STRING, desc="string describing the type of event, in this case 'FAILURE'"))
- qmf_event_schema_build_failed.addProperty(SchemaProperty("type", SCHEMA_DATA_STRING, desc="short string description of the failure"))
- qmf_event_schema_build_failed.addProperty(SchemaProperty("info", SCHEMA_DATA_STRING, desc="longer string description of the failure"))
qmf_event_schema_build_failed = Schema(SCHEMA_TYPE_EVENT,
"com.redhat.imagefactory",
"BuildFailedEvent")
qmf_event_schema_build_failed.addProperty(SchemaProperty("addr",
SCHEMA_DATA_MAP,
desc="the address of the object raising this event"))
qmf_event_schema_build_failed.addProperty(SchemaProperty("event",
SCHEMA_DATA_STRING,
desc="string describing the type of event, in this case 'FAILURE'"))
qmf_event_schema_build_failed.addProperty(SchemaProperty("type",
SCHEMA_DATA_STRING,
desc="short string description of the failure"))
qmf_event_schema_build_failed.addProperty(SchemaProperty("info",
SCHEMA_DATA_STRING,
desc="longer string description of the failure"))
@classmethod def object_states(cls): """Returns a dictionary representing the finite state machine for instances of this class.""" return {
"NEW":({"INITIALIZING":("build_image", "push_image")}, {"PENDING":("build_image", "push_image")}, {"FAILED":("build_image", "push_image")}),
"INITIALIZING":({"PENDING":("_auto_")}, {"FAILED":("_auto_")}),
"PENDING":({"FINISHING":("_auto_")}, {"COMPLETED":("_auto_")}, {"FAILED":("_auto_")}),
"FINISHING":({"COMPLETED":("_auto_")}, {"FAILED":("_auto_")}),
"COMPLETED":()
}
"NEW":({"INITIALIZING":("build_image", "push_image")},
{"PENDING":("build_image", "push_image")},
{"FAILED":("build_image", "push_image")}),
"INITIALIZING":({"PENDING":("_auto_")}, {"FAILED":("_auto_")}),
"PENDING":({"FINISHING":("_auto_")}, {"COMPLETED":("_auto_")},
{"FAILED":("_auto_")}),
"FINISHING":({"COMPLETED":("_auto_")}, {"FAILED":("_auto_")}),
"COMPLETED":()
}
image_id = props.subprop("qmf_object", "image", "The UUID of the image.") build_id = props.subprop("qmf_object", "build", "The UUID of the build.") status = props.subprop("qmf_object", "status", "The status property.")
- percent_complete = props.subprop("qmf_object", "percent_complete", "The percent_complete property.")
- new_image_id = props.subprop("qmf_object", "image_id", "The image property.")
percent_complete = props.subprop("qmf_object", "percent_complete",
"The percent_complete property.")
new_image_id = props.subprop("qmf_object", "image_id",
"The image property.")
qmf_object = props.prop("_qmf_object", "The qmf_object property.")
def __init__(self, template, target, image_id='', build_id='', agent=None):
@@ -75,7 +115,9 @@ class BuildAdaptor(BuildJob):
# Builder delegate methods def builder_did_update_status(self, builder, old_status, new_status):
super(BuildAdaptor, self).builder_did_update_status(builder, old_status, new_status)
super(BuildAdaptor, self).builder_did_update_status(builder,
old_status,
new_status) self.log.debug("Raising event with agent handler (%s), changed status from %s to %s" % (self.agent, old_status, new_status)) event = Data(BuildAdaptor.qmf_event_schema_status) event.addr = self.qmf_object.getAddr().asMap()
@@ -88,8 +130,11 @@ class BuildAdaptor(BuildJob): self.agent.deregister(self.qmf_object)
- def builder_did_update_percentage(self, builder, original_percentage, new_percentage):
super(BuildAdaptor, self).builder_did_update_percentage(builder, original_percentage, new_percentage)
- def builder_did_update_percentage(self, builder, original_percentage,
new_percentage):
super(BuildAdaptor, self).builder_did_update_percentage(builder,
original_percentage,
new_percentage) self.log.debug("Raising event with agent handler (%s), changed percent complete from %s to %s" % (self.agent, original_percentage, new_percentage)) event = Data(BuildAdaptor.qmf_event_schema_percentage) event.addr = self.qmf_object.getAddr().asMap()
@@ -98,7 +143,8 @@ class BuildAdaptor(BuildJob): self.agent.session.raiseEvent(data=event, severity=SEV_NOTICE)
def builder_did_fail(self, builder, failure_type, failure_info):
super(BuildAdaptor, self).builder_did_fail(builder, failure_type, failure_info)
super(BuildAdaptor, self).builder_did_fail(builder, failure_type,
failure_info) self.log.debug("Raising event with agent handler (%s), BUILD FAILED: %s - %s" % (self.agent, failure_type, failure_info)) event = Data(BuildAdaptor.qmf_event_schema_build_failed) event.addr = self.qmf_object.getAddr().asMap()
diff --git a/setup.py b/setup.py index 6cc6278..c743745 100644 --- a/setup.py +++ b/setup.py @@ -28,7 +28,10 @@ try: except Exception, e: raise RuntimeError("ERROR: version.txt could not be found. Run 'git describe > version.txt' to get the correct version info.")
-datafiles=[('share/man/man1', ['Documentation/man/imagefactory.1']), ('/etc', ['imagefactory.conf']), ('/etc/pki/imagefactory', ['cert-ec2.pem']), ('/etc/rc.d/init.d', ['scripts/imagefactory'])] +datafiles=[('share/man/man1', ['Documentation/man/imagefactory.1']),
('/etc', ['imagefactory.conf']),
('/etc/pki/imagefactory', ['cert-ec2.pem']),
('/etc/rc.d/init.d', ['scripts/imagefactory'])]
class sdist(_sdist): """ custom sdist command to prepare imagefactory.spec file """ -- 1.7.4.4
On 08/03/11 - 03:44:10PM, Steve Loranz wrote:
I am always torn on this. I know it's in PEP008 and I want to be a good little Python developer, but the fact is that displays have advanced quite a bit since the original run of Seinfeld and if we use descriptive method and variable names, we'll be splitting every damn line and still be going over 80 char in many instances.
Wider displays are so you can have multiple 80-column windows open at the same time ;).
I'd like somebody others to chime in here and give an opinion.
But I understand what you mean. To be clear, I'm not a slave to this, and I only do it in places where it makes sense. But my terminals and my editor default to 80 columns, so splitting it there makes it easier for me to read.
I'm fine with waiting on this patch to see if anyone else has another opinion, either way.
On Aug 3, 2011, at 3:52 PM, Chris Lalancette wrote:
On 08/03/11 - 03:44:10PM, Steve Loranz wrote:
I am always torn on this. I know it's in PEP008 and I want to be a good little Python developer, but the fact is that displays have advanced quite a bit since the original run of Seinfeld and if we use descriptive method and variable names, we'll be splitting every damn line and still be going over 80 char in many instances.
Wider displays are so you can have multiple 80-column windows open at the same time ;).
I knew that was coming!
I'd like somebody others to chime in here and give an opinion.
But I understand what you mean. To be clear, I'm not a slave to this, and I only do it in places where it makes sense. But my terminals and my editor default to 80 columns, so splitting it there makes it easier for me to read.
I'm fine with waiting on this patch to see if anyone else has another opinion, either way.
I'm fine with it either way as well. I guess I just don't want to become a slave to it either. I do see the 80 column breach in my editor, but I guess I'm not clear on when it makes sense or when It doesn't. If there is grey area to it, I just default to not bothering.
-steve
On Wed, 2011-08-03 at 15:44 -0500, Steve Loranz wrote:
I am always torn on this. I know it's in PEP008 and I want to be a good little Python developer, but the fact is that displays have advanced quite a bit since the original run of Seinfeld and if we use descriptive method and variable names, we'll be splitting every damn line and still be going over 80 char in many instances.
I'd like somebody others to chime in here and give an opinion.
-steve
Random €0.02
80 chars is a good guideline, but there's no need to be ultra strict about it. Sticking to the guideline can be good or bad for legibility on wider displays, depending on the context.
Looking at the patch, I prefer the original version in most cases.
e.g. some of the lines in BuildDispatcher.py approach 120 chars, which I agree is far too long. I see that as a symptom of too many parameters to methods and too long method names, though. Splitting the code across multiple lines doesn't improve the situation IMHO, it needs more serious refactoring to reduce the length of the lines.
I'd just shut pylint up on this one Chris :-)
Cheers, Mark.
On 08/04/11 - 06:22:38AM, Mark McLoughlin wrote:
On Wed, 2011-08-03 at 15:44 -0500, Steve Loranz wrote:
I am always torn on this. I know it's in PEP008 and I want to be a good little Python developer, but the fact is that displays have advanced quite a bit since the original run of Seinfeld and if we use descriptive method and variable names, we'll be splitting every damn line and still be going over 80 char in many instances.
I'd like somebody others to chime in here and give an opinion.
-steve
Random €0.02
80 chars is a good guideline, but there's no need to be ultra strict about it. Sticking to the guideline can be good or bad for legibility on wider displays, depending on the context.
Looking at the patch, I prefer the original version in most cases.
e.g. some of the lines in BuildDispatcher.py approach 120 chars, which I agree is far too long. I see that as a symptom of too many parameters to methods and too long method names, though. Splitting the code across multiple lines doesn't improve the situation IMHO, it needs more serious refactoring to reduce the length of the lines.
Yes, I was going to look at this a bit more, to see if I could do some refactoring.
I'd just shut pylint up on this one Chris :-)
While I still think it helps, opinion is against me so I'll drop the patch.
Signed-off-by: Chris Lalancette clalance@redhat.com --- imgfac/props.py | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-)
diff --git a/imgfac/props.py b/imgfac/props.py index 3db905c..474e9cc 100644 --- a/imgfac/props.py +++ b/imgfac/props.py @@ -41,4 +41,4 @@ def subprop(attr, subattr, doc = None, ro = False): return property(fget, fset if not ro else None, fdel if not ro else None, doc)
def ro_subprop(attr, subattr, doc = None): - return prop(attr, subattr, doc, True) + return subprop(attr, subattr, doc, True)
ACK
On Aug 3, 2011, at 8:27 AM, Chris Lalancette wrote:
Signed-off-by: Chris Lalancette clalance@redhat.com
imgfac/props.py | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-)
diff --git a/imgfac/props.py b/imgfac/props.py index 3db905c..474e9cc 100644 --- a/imgfac/props.py +++ b/imgfac/props.py @@ -41,4 +41,4 @@ def subprop(attr, subattr, doc = None, ro = False): return property(fget, fset if not ro else None, fdel if not ro else None, doc)
def ro_subprop(attr, subattr, doc = None):
- return prop(attr, subattr, doc, True)
- return subprop(attr, subattr, doc, True)
-- 1.7.4.4
Signed-off-by: Chris Lalancette clalance@redhat.com --- imgfac/BuildDispatcher.py | 6 ++++-- 1 files changed, 4 insertions(+), 2 deletions(-)
diff --git a/imgfac/BuildDispatcher.py b/imgfac/BuildDispatcher.py index 9a6f2fe..68b31b1 100644 --- a/imgfac/BuildDispatcher.py +++ b/imgfac/BuildDispatcher.py @@ -193,8 +193,10 @@ class BuildDispatcher(Singleton): results = self.warehouse.query("provider_image", "$target_image == "%s" && $provider == "%s"" % (target_image_id, provider)) return results[0] if results else None
- def _set_provider_image_attr(provider_image_id, attr, value): - self.warehouse.set_metadata_for_id_of_type({attr : value}, provider_image_id, "provider_image") + def _set_provider_image_attr(self, provider_image_id, attr, value): + self.warehouse.set_metadata_for_id_of_type({attr : value}, + provider_image_id, + "provider_image")
def _template_for_target_image_id(self, target_image_id): return self.warehouse.metadata_for_id_of_type(['template'],
ACK
On Aug 3, 2011, at 8:27 AM, Chris Lalancette wrote:
Signed-off-by: Chris Lalancette clalance@redhat.com
imgfac/BuildDispatcher.py | 6 ++++-- 1 files changed, 4 insertions(+), 2 deletions(-)
diff --git a/imgfac/BuildDispatcher.py b/imgfac/BuildDispatcher.py index 9a6f2fe..68b31b1 100644 --- a/imgfac/BuildDispatcher.py +++ b/imgfac/BuildDispatcher.py @@ -193,8 +193,10 @@ class BuildDispatcher(Singleton): results = self.warehouse.query("provider_image", "$target_image == "%s" && $provider == "%s"" % (target_image_id, provider)) return results[0] if results else None
- def _set_provider_image_attr(provider_image_id, attr, value):
self.warehouse.set_metadata_for_id_of_type({attr : value}, provider_image_id, "provider_image")
def _set_provider_image_attr(self, provider_image_id, attr, value):
self.warehouse.set_metadata_for_id_of_type({attr : value},
provider_image_id,
"provider_image")
def _template_for_target_image_id(self, target_image_id): return self.warehouse.metadata_for_id_of_type(['template'],
-- 1.7.4.4
Signed-off-by: Chris Lalancette clalance@redhat.com --- imgfac/builders/BaseBuilder.py | 2 -- 1 files changed, 0 insertions(+), 2 deletions(-)
diff --git a/imgfac/builders/BaseBuilder.py b/imgfac/builders/BaseBuilder.py index 2df9b0d..d65abbd 100644 --- a/imgfac/builders/BaseBuilder.py +++ b/imgfac/builders/BaseBuilder.py @@ -16,8 +16,6 @@ import logging import zope import uuid -import os -import httplib2 from IBuilder import IBuilder from imgfac import props from imgfac.ApplicationConfiguration import ApplicationConfiguration
ACK
On Aug 3, 2011, at 8:27 AM, Chris Lalancette wrote:
Signed-off-by: Chris Lalancette clalance@redhat.com
imgfac/builders/BaseBuilder.py | 2 -- 1 files changed, 0 insertions(+), 2 deletions(-)
diff --git a/imgfac/builders/BaseBuilder.py b/imgfac/builders/BaseBuilder.py index 2df9b0d..d65abbd 100644 --- a/imgfac/builders/BaseBuilder.py +++ b/imgfac/builders/BaseBuilder.py @@ -16,8 +16,6 @@ import logging import zope import uuid -import os -import httplib2 from IBuilder import IBuilder from imgfac import props from imgfac.ApplicationConfiguration import ApplicationConfiguration -- 1.7.4.4
Signed-off-by: Chris Lalancette clalance@redhat.com --- imagefactory | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-)
diff --git a/imagefactory b/imagefactory index 783c9a6..4faeb93 100755 --- a/imagefactory +++ b/imagefactory @@ -21,7 +21,7 @@ import os.path import signal import logging from imgfac.ApplicationConfiguration import ApplicationConfiguration -from imgfac.qmfagent.ImageFactoryAgent import * +from imgfac.qmfagent.ImageFactoryAgent import ImageFactoryAgent from imgfac.BuildDispatcher import BuildDispatcher from imgfac.ImageWarehouse import ImageWarehouse from imgfac.Singleton import Singleton
ACK
On Aug 3, 2011, at 8:27 AM, Chris Lalancette wrote:
Signed-off-by: Chris Lalancette clalance@redhat.com
imagefactory | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-)
diff --git a/imagefactory b/imagefactory index 783c9a6..4faeb93 100755 --- a/imagefactory +++ b/imagefactory @@ -21,7 +21,7 @@ import os.path import signal import logging from imgfac.ApplicationConfiguration import ApplicationConfiguration -from imgfac.qmfagent.ImageFactoryAgent import * +from imgfac.qmfagent.ImageFactoryAgent import ImageFactoryAgent from imgfac.BuildDispatcher import BuildDispatcher from imgfac.ImageWarehouse import ImageWarehouse from imgfac.Singleton import Singleton -- 1.7.4.4
Signed-off-by: Chris Lalancette clalance@redhat.com --- imagefactory | 3 +-- 1 files changed, 1 insertions(+), 2 deletions(-)
diff --git a/imagefactory b/imagefactory index 4faeb93..30cec84 100755 --- a/imagefactory +++ b/imagefactory @@ -179,5 +179,4 @@ class Application(Singleton): print("Pushing build %s of image %s to provider %s" % (dispatcher.build_id, dispatcher.image_id, dispatcher.provider))
if __name__ == "__main__": - application = Application() - sys.exit(application.main()) + sys.exit(Application().main())
ACK
On Aug 3, 2011, at 8:27 AM, Chris Lalancette wrote:
Signed-off-by: Chris Lalancette clalance@redhat.com
imagefactory | 3 +-- 1 files changed, 1 insertions(+), 2 deletions(-)
diff --git a/imagefactory b/imagefactory index 4faeb93..30cec84 100755 --- a/imagefactory +++ b/imagefactory @@ -179,5 +179,4 @@ class Application(Singleton): print("Pushing build %s of image %s to provider %s" % (dispatcher.build_id, dispatcher.image_id, dispatcher.provider))
if __name__ == "__main__":
- application = Application()
- sys.exit(application.main())
- sys.exit(Application().main())
-- 1.7.4.4
As pointed out by pylint, start_builder_thread was using a dangerous default dictionary. Default dictionaries are dangerous because they are initialized at the time that the method is parsed, not at the time that it is invoked. This means that if you call the method twice and manipulate the dictionary within the method, it will manipulate the first version of the dictionary, not a brand-new one. Luckily all callers of the method passed the argument, so just remove the default empty dictionary.
Signed-off-by: Chris Lalancette clalance@redhat.com --- imgfac/BuildJob.py | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-)
diff --git a/imgfac/BuildJob.py b/imgfac/BuildJob.py index aae87da..2fc4dbd 100644 --- a/imgfac/BuildJob.py +++ b/imgfac/BuildJob.py @@ -95,7 +95,7 @@ class BuildJob(object):
return builder_class(self.template, self.target)
- def _start_builder_thread(self, method_name, arg_dict={}): + def _start_builder_thread(self, method_name, arg_dict): thread_name = "%s.%s()" % (self.new_image_id, method_name) # using args to pass the method we want to call on the target object. builder_thread = threading.Thread(target = self._builder, name=thread_name, args=(method_name), kwargs=arg_dict)
ACK
On Aug 3, 2011, at 8:27 AM, Chris Lalancette wrote:
As pointed out by pylint, start_builder_thread was using a dangerous default dictionary. Default dictionaries are dangerous because they are initialized at the time that the method is parsed, not at the time that it is invoked. This means that if you call the method twice and manipulate the dictionary within the method, it will manipulate the first version of the dictionary, not a brand-new one. Luckily all callers of the method passed the argument, so just remove the default empty dictionary.
Signed-off-by: Chris Lalancette clalance@redhat.com
imgfac/BuildJob.py | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-)
diff --git a/imgfac/BuildJob.py b/imgfac/BuildJob.py index aae87da..2fc4dbd 100644 --- a/imgfac/BuildJob.py +++ b/imgfac/BuildJob.py @@ -95,7 +95,7 @@ class BuildJob(object):
return builder_class(self.template, self.target)
- def _start_builder_thread(self, method_name, arg_dict={}):
- def _start_builder_thread(self, method_name, arg_dict): thread_name = "%s.%s()" % (self.new_image_id, method_name) # using args to pass the method we want to call on the target object. builder_thread = threading.Thread(target = self._builder, name=thread_name, args=(method_name), kwargs=arg_dict)
-- 1.7.4.4
Signed-off-by: Chris Lalancette clalance@redhat.com --- imgfac/ImageWarehouse.py | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-)
diff --git a/imgfac/ImageWarehouse.py b/imgfac/ImageWarehouse.py index 8a498fc..9a61413 100644 --- a/imgfac/ImageWarehouse.py +++ b/imgfac/ImageWarehouse.py @@ -67,7 +67,7 @@ class ImageWarehouse(object): self.provider_image_bucket = ApplicationConfiguration().configuration['provider_bucket']
if (url.endswith('/')): - self.url = url[0:len(url)-1] + self.url = url[0:len(url)-1] else: self.url = url
ACK
On Aug 3, 2011, at 8:27 AM, Chris Lalancette wrote:
Signed-off-by: Chris Lalancette clalance@redhat.com
imgfac/ImageWarehouse.py | 2 +- 1 files changed, 1 insertions(+), 1 deletions(-)
diff --git a/imgfac/ImageWarehouse.py b/imgfac/ImageWarehouse.py index 8a498fc..9a61413 100644 --- a/imgfac/ImageWarehouse.py +++ b/imgfac/ImageWarehouse.py @@ -67,7 +67,7 @@ class ImageWarehouse(object): self.provider_image_bucket = ApplicationConfiguration().configuration['provider_bucket']
if (url.endswith('/')):
self.url = url[0:len(url)-1]
self.url = url[0:len(url)-1] else: self.url = url
-- 1.7.4.4
pylint is a utility to statically analyze your python code and produce a report. While it does tend to have false positives, it also discovers many problems so is worth using.
Signed-off-by: Chris Lalancette clalance@redhat.com --- Makefile | 3 + pylint.conf | 238 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 241 insertions(+), 0 deletions(-) create mode 100644 pylint.conf
diff --git a/Makefile b/Makefile index afa6eba..d3eff9f 100644 --- a/Makefile +++ b/Makefile @@ -14,5 +14,8 @@ rpm: sdist version srpm: sdist version rpmbuild -bs imagefactory.spec --define "_sourcedir `pwd`/dist"
+pylint: + pylint --rcfile=pylint.conf imagefactory imgfac + clean: rm -rf MANIFEST build dist imagefactory.spec version.txt diff --git a/pylint.conf b/pylint.conf new file mode 100644 index 0000000..697c782 --- /dev/null +++ b/pylint.conf @@ -0,0 +1,238 @@ +[MASTER] + +# Specify a configuration file. +#rcfile= + +# Python code to execute, usually for sys.path manipulation such as +# pygtk.require(). +#init-hook= + +# Profiled execution. +profile=no + +# Add <file or directory> to the black list. It should be a base name, not a +# path. You may set this option multiple times. +ignore=CVS + +# Pickle collected data for later comparisons. +persistent=yes + +# List of plugins (as comma separated values of python modules names) to load, +# usually to register additional checkers. +load-plugins= + + +[MESSAGES CONTROL] + +# Enable the message, report, category or checker with the given id(s). You can +# either give multiple identifier separated by comma (,) or put this option +# multiple time. +#enable= + +# Disable the message, report, category or checker with the given id(s). You +# can either give multiple identifier separated by comma (,) or put this option +# multiple time (only on the command line, not in the configuration file where +# it should appear only once). +#disable= + + +[REPORTS] + +# Set the output format. Available formats are text, parseable, colorized, msvs +# (visual studio) and html +output-format=text + +# Include message's id in output +include-ids=no + +# Put messages in a separate file for each module / package specified on the +# command line instead of printing them on stdout. Reports (if any) will be +# written in a file name "pylint_global.[txt|html]". +files-output=no + +# Tells whether to display a full report or only the messages +reports=yes + +# Python expression which should return a note less than 10 (10 is the highest +# note). You have access to the variables errors warning, statement which +# respectively contain the number of errors / warnings messages and the total +# number of statements analyzed. This is used by the global evaluation report +# (RP0004). +evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) + +# Add a comment according to your evaluation note. This is used by the global +# evaluation report (RP0004). +comment=no + + +[TYPECHECK] + +# Tells whether missing members accessed in mixin class should be ignored. A +# mixin class is detected if its name ends with "mixin" (case insensitive). +ignore-mixin-members=yes + +# List of classes names for which member attributes should not be checked +# (useful for classes with attributes dynamically set). +ignored-classes=SQLObject + +# When zope mode is activated, add a predefined set of Zope acquired attributes +# to generated-members. +zope=no + +# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E0201 when accessed. +generated-members=REQUEST,acl_users,aq_parent + + +[FORMAT] + +# Maximum number of characters on a single line. +max-line-length=80 + +# Maximum number of lines in a module +max-module-lines=2000 + +# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 +# tab). +indent-string=' ' + + +[VARIABLES] + +# Tells whether we should check for unused import in __init__ files. +init-import=no + +# A regular expression matching the beginning of the name of dummy variables +# (i.e. not used). +dummy-variables-rgx=_|dummy + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid to define new builtins when possible. +additional-builtins= + + +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +notes=FIXME,XXX,TODO + + +[BASIC] + +# Required attributes for module, separated by a comma +required-attributes= + +# List of builtins function names that should not be used, separated by a comma +bad-functions=filter,apply,input + +# Regular expression which should only match correct module names +module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9_]+))$ + +# Regular expression which should only match correct module level names +const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$ + +# Regular expression which should only match correct class names +class-rgx=[A-Z_][a-zA-Z0-9]+$ + +# Regular expression which should only match correct function names +function-rgx=[a-z_][a-z0-9_]{2,50}$ + +# Regular expression which should only match correct method names +method-rgx=[a-z_][a-zA-Z0-9_]{2,50}$ + +# Regular expression which should only match correct instance attribute names +attr-rgx=[a-z_][a-z0-9_]{2,30}$ + +# Regular expression which should only match correct argument names +argument-rgx=[a-z_][a-zA-Z0-9_]{0,30}$ + +# Regular expression which should only match correct variable names +variable-rgx=[a-z_][a-zA-Z0-9_]{0,30}$ + +# Regular expression which should only match correct list comprehension / +# generator expression variable names +inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ + +# Good variable names which should always be accepted, separated by a comma +good-names=i,j,k,ex,Run,_ + +# Bad variable names which should always be refused, separated by a comma +bad-names=foo,bar,baz,toto,tutu,tata + +# Regular expression which should only match functions or classes name which do +# not require a docstring +no-docstring-rgx=__.*__ + + +[SIMILARITIES] + +# Minimum lines number of a similarity. +min-similarity-lines=4 + +# Ignore comments when computing similarities. +ignore-comments=yes + +# Ignore docstrings when computing similarities. +ignore-docstrings=yes + + +[IMPORTS] + +# Deprecated modules which should not be used, separated by a comma +deprecated-modules=regsub,string,TERMIOS,Bastion,rexec + +# Create a graph of every (i.e. internal and external) dependencies in the +# given file (report RP0402 must not be disabled) +import-graph= + +# Create a graph of external dependencies in the given file (report RP0402 must +# not be disabled) +ext-import-graph= + +# Create a graph of internal dependencies in the given file (report RP0402 must +# not be disabled) +int-import-graph= + + +[DESIGN] + +# Maximum number of arguments for function / method +max-args=5 + +# Argument names that match this expression will be ignored. Default to name +# with leading underscore +ignored-argument-names=_.* + +# Maximum number of locals for function / method body +max-locals=15 + +# Maximum number of return / yield for function / method body +max-returns=6 + +# Maximum number of branch for function / method body +max-branchs=12 + +# Maximum number of statements in function / method body +max-statements=50 + +# Maximum number of parents for a class (see R0901). +max-parents=7 + +# Maximum number of attributes for a class (see R0902). +max-attributes=7 + +# Minimum number of public methods for a class (see R0903). +min-public-methods=2 + +# Maximum number of public methods for a class (see R0904). +max-public-methods=20 + + +[CLASSES] + +# List of interface methods to ignore, separated by a comma. This is used for +# instance to not check methods defines in Zope's Interface base class. +ignore-iface-methods=isImplementedBy,deferred,extends,names,namesAndDescriptions,queryDescriptionFor,getBases,getDescriptionFor,getDoc,getName,getTaggedValue,getTaggedValueTags,isEqualOrExtendedBy,setTaggedValue,isImplementedByInstancesOf,adaptWith,is_implemented_by + +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__,__new__,setUp
ACK
On Aug 3, 2011, at 8:27 AM, Chris Lalancette wrote:
pylint is a utility to statically analyze your python code and produce a report. While it does tend to have false positives, it also discovers many problems so is worth using.
Signed-off-by: Chris Lalancette clalance@redhat.com
Makefile | 3 + pylint.conf | 238 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 241 insertions(+), 0 deletions(-) create mode 100644 pylint.conf
diff --git a/Makefile b/Makefile index afa6eba..d3eff9f 100644 --- a/Makefile +++ b/Makefile @@ -14,5 +14,8 @@ rpm: sdist version srpm: sdist version rpmbuild -bs imagefactory.spec --define "_sourcedir `pwd`/dist"
+pylint:
- pylint --rcfile=pylint.conf imagefactory imgfac
clean: rm -rf MANIFEST build dist imagefactory.spec version.txt diff --git a/pylint.conf b/pylint.conf new file mode 100644 index 0000000..697c782 --- /dev/null +++ b/pylint.conf @@ -0,0 +1,238 @@ +[MASTER]
+# Specify a configuration file. +#rcfile=
+# Python code to execute, usually for sys.path manipulation such as +# pygtk.require(). +#init-hook=
+# Profiled execution. +profile=no
+# Add <file or directory> to the black list. It should be a base name, not a +# path. You may set this option multiple times. +ignore=CVS
+# Pickle collected data for later comparisons. +persistent=yes
+# List of plugins (as comma separated values of python modules names) to load, +# usually to register additional checkers. +load-plugins=
+[MESSAGES CONTROL]
+# Enable the message, report, category or checker with the given id(s). You can +# either give multiple identifier separated by comma (,) or put this option +# multiple time. +#enable=
+# Disable the message, report, category or checker with the given id(s). You +# can either give multiple identifier separated by comma (,) or put this option +# multiple time (only on the command line, not in the configuration file where +# it should appear only once). +#disable=
+[REPORTS]
+# Set the output format. Available formats are text, parseable, colorized, msvs +# (visual studio) and html +output-format=text
+# Include message's id in output +include-ids=no
+# Put messages in a separate file for each module / package specified on the +# command line instead of printing them on stdout. Reports (if any) will be +# written in a file name "pylint_global.[txt|html]". +files-output=no
+# Tells whether to display a full report or only the messages +reports=yes
+# Python expression which should return a note less than 10 (10 is the highest +# note). You have access to the variables errors warning, statement which +# respectively contain the number of errors / warnings messages and the total +# number of statements analyzed. This is used by the global evaluation report +# (RP0004). +evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
+# Add a comment according to your evaluation note. This is used by the global +# evaluation report (RP0004). +comment=no
+[TYPECHECK]
+# Tells whether missing members accessed in mixin class should be ignored. A +# mixin class is detected if its name ends with "mixin" (case insensitive). +ignore-mixin-members=yes
+# List of classes names for which member attributes should not be checked +# (useful for classes with attributes dynamically set). +ignored-classes=SQLObject
+# When zope mode is activated, add a predefined set of Zope acquired attributes +# to generated-members. +zope=no
+# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E0201 when accessed. +generated-members=REQUEST,acl_users,aq_parent
+[FORMAT]
+# Maximum number of characters on a single line. +max-line-length=80
+# Maximum number of lines in a module +max-module-lines=2000
+# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 +# tab). +indent-string=' '
+[VARIABLES]
+# Tells whether we should check for unused import in __init__ files. +init-import=no
+# A regular expression matching the beginning of the name of dummy variables +# (i.e. not used). +dummy-variables-rgx=_|dummy
+# List of additional names supposed to be defined in builtins. Remember that +# you should avoid to define new builtins when possible. +additional-builtins=
+[MISCELLANEOUS]
+# List of note tags to take in consideration, separated by a comma. +notes=FIXME,XXX,TODO
+[BASIC]
+# Required attributes for module, separated by a comma +required-attributes=
+# List of builtins function names that should not be used, separated by a comma +bad-functions=filter,apply,input
+# Regular expression which should only match correct module names +module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9_]+))$
+# Regular expression which should only match correct module level names +const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$
+# Regular expression which should only match correct class names +class-rgx=[A-Z_][a-zA-Z0-9]+$
+# Regular expression which should only match correct function names +function-rgx=[a-z_][a-z0-9_]{2,50}$
+# Regular expression which should only match correct method names +method-rgx=[a-z_][a-zA-Z0-9_]{2,50}$
+# Regular expression which should only match correct instance attribute names +attr-rgx=[a-z_][a-z0-9_]{2,30}$
+# Regular expression which should only match correct argument names +argument-rgx=[a-z_][a-zA-Z0-9_]{0,30}$
+# Regular expression which should only match correct variable names +variable-rgx=[a-z_][a-zA-Z0-9_]{0,30}$
+# Regular expression which should only match correct list comprehension / +# generator expression variable names +inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
+# Good variable names which should always be accepted, separated by a comma +good-names=i,j,k,ex,Run,_
+# Bad variable names which should always be refused, separated by a comma +bad-names=foo,bar,baz,toto,tutu,tata
+# Regular expression which should only match functions or classes name which do +# not require a docstring +no-docstring-rgx=__.*__
+[SIMILARITIES]
+# Minimum lines number of a similarity. +min-similarity-lines=4
+# Ignore comments when computing similarities. +ignore-comments=yes
+# Ignore docstrings when computing similarities. +ignore-docstrings=yes
+[IMPORTS]
+# Deprecated modules which should not be used, separated by a comma +deprecated-modules=regsub,string,TERMIOS,Bastion,rexec
+# Create a graph of every (i.e. internal and external) dependencies in the +# given file (report RP0402 must not be disabled) +import-graph=
+# Create a graph of external dependencies in the given file (report RP0402 must +# not be disabled) +ext-import-graph=
+# Create a graph of internal dependencies in the given file (report RP0402 must +# not be disabled) +int-import-graph=
+[DESIGN]
+# Maximum number of arguments for function / method +max-args=5
+# Argument names that match this expression will be ignored. Default to name +# with leading underscore +ignored-argument-names=_.*
+# Maximum number of locals for function / method body +max-locals=15
+# Maximum number of return / yield for function / method body +max-returns=6
+# Maximum number of branch for function / method body +max-branchs=12
+# Maximum number of statements in function / method body +max-statements=50
+# Maximum number of parents for a class (see R0901). +max-parents=7
+# Maximum number of attributes for a class (see R0902). +max-attributes=7
+# Minimum number of public methods for a class (see R0903). +min-public-methods=2
+# Maximum number of public methods for a class (see R0904). +max-public-methods=20
+[CLASSES]
+# List of interface methods to ignore, separated by a comma. This is used for +# instance to not check methods defines in Zope's Interface base class. +ignore-iface-methods=isImplementedBy,deferred,extends,names,namesAndDescriptions,queryDescriptionFor,getBases,getDescriptionFor,getDoc,getName,getTaggedValue,getTaggedValueTags,isEqualOrExtendedBy,setTaggedValue,isImplementedByInstancesOf,adaptWith,is_implemented_by
+# List of method names used to declare (i.e. assign) instance attributes.
+defining-attr-methods=__init__,__new__,setUp
1.7.4.4
Thanks, Chris! -steve
On Aug 3, 2011, at 8:27 AM, Chris Lalancette wrote:
This patch series just does a bunch of cleanup to the imagefactory, found by code inspection and through the use of pylint. There should be no functional change after this series, just cleaner code. With these patches in place tests still pass and a local upload style build still succeeds. Please review and ACK.
Chris Lalancette
aeolus-devel@lists.fedorahosted.org