Repository :
http://git.fedorahosted.org/cgit/copr.git
On branch : skvidal-backend
---------------------------------------------------------------
commit ead83c2231e7a7c90a5a2a881413fd4a0c0fdf34
Author: Seth Vidal <skvidal(a)fedoraproject.org>
Date: Wed Nov 14 01:57:09 2012 -0500
since I was making branches to help out Slavek, figured I'd add one for me,
too
---------------------------------------------------------------
backend/__init__.py | 7 +
backend/dispatcher.py | 148 ++++++++++++
backend/mockremote.py | 633 +++++++++++++++++++++++++++++++++++++++++++++++++
copr-be.py | 139 +++++++++++
4 files changed, 927 insertions(+), 0 deletions(-)
diff --git a/backend/__init__.py b/backend/__init__.py
new file mode 100644
index 0000000..e25106e
--- /dev/null
+++ b/backend/__init__.py
@@ -0,0 +1,7 @@
+# part of copr backend
+# skvidal(a)fedoraproject.org - seth vidal
+# (c) copyright Red Hat, Inc 2012
+# gplv2+
+
+__version__ = "0.1"
+__author__ = "Seth Vidal"
diff --git a/backend/dispatcher.py b/backend/dispatcher.py
new file mode 100644
index 0000000..748e519
--- /dev/null
+++ b/backend/dispatcher.py
@@ -0,0 +1,148 @@
+#!/usr/bin/python -tt
+
+
+import sys
+import os
+import glob
+import subprocess
+import multiprocessing
+import time
+import Queue
+import json
+import mockremote
+from bunch import Bunch
+import ansible
+import ansible.playbook
+from ansible import callbacks
+
+
+
+
+
+class SilentPlaybookCallbacks(callbacks.object):
+ ''' playbook callbacks - quietly! '''
+
+ def __init__(self, verbose=False):
+
+ self.verbose = verbose
+
+ def on_start(self):
+ callbacks.call_callback_module('playbook_on_start')
+
+ def on_notify(self, host, handler):
+ callbacks.call_callback_module('playbook_on_notify', host, handler)
+
+ def on_no_hosts_matched(self):
+ callbacks.call_callback_module('playbook_on_no_hosts_matched')
+
+ def on_no_hosts_remaining(self):
+ callbacks.call_callback_module('playbook_on_no_hosts_remaining')
+
+ def on_task_start(self, name, is_conditional):
+ callbacks.call_callback_module('playbook_on_task_start', name,
is_conditional)
+
+ def on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None,
confirm=False, salt_size=None, salt=None):
+ result = None
+ print "***** VARS_PROMPT WILL NOT BE RUN IN THIS KIND OF PLAYBOOK
*****"
+ callbacks.call_callback_module('playbook_on_vars_prompt', varname,
private=private, prompt=prompt, encrypt=encrypt, confirm=confirm, salt_size=salt_size,
salt=None)
+ return result
+
+ def on_setup(self):
+ callbacks.call_callback_module('playbook_on_setup')
+
+ def on_import_for_host(self, host, imported_file):
+ callbacks.call_callback_module('playbook_on_import_for_host', host,
imported_file)
+
+ def on_not_import_for_host(self, host, missing_file):
+ callbacks.call_callback_module('playbook_on_not_import_for_host', host,
missing_file)
+
+ def on_play_start(self, pattern):
+ callbacks.call_callback_module('playbook_on_play_start', pattern)
+
+ def on_stats(self, stats):
+ callbacks.call_callback_module('playbook_on_stats', stats)
+
+def spawn_instance(opts, ip):
+
+ # FIXME - setup silent callbacks
+ # - check errors in setup
+ # - playbook variablized
+ stats = callbacks.AggregateStats()
+ playbook_cb = SilentPlaybookCallbacks(verbose=False)
+ runner_cb = callbacks.DefaultRunnerCallbacks()
+ play = ansible.playbook.PlayBook(stats=stats,
playbook='/srv/copr-work/provision/builderpb.yml',
+ callbacks=playbook_cb, runner_callbacks=runner_cb,
remote_user='root')
+
+ play.run()
+ if ip:
+ return ip
+
+ for i in play.SETUP_CACHE:
+ if i =='localhost':
+ continue
+ return i
+
+class Worker(multiprocessing.Process):
+ def __init__(self, opts, jobs, ip=None, create=True, callback=None):
+
+ # base class initialization
+ multiprocessing.Process.__init__(self, name="worker-builder")
+
+ # job management stuff
+ self.jobs = jobs
+ self.ip = ip
+ self.opts = opts
+ self.kill_received = False
+ print 'creating worker: %s' % ip
+
+
+ def parse_job(job):
+ # read the json of the job in
+ # break out what we need return a structured
+ def run(self):
+ # worker should startup and check if it can function
+ # for each job it takes from the jobs queue
+ # run opts.setup_playbook to create the instance
+ # do the build (mockremote)
+ # terminate the instance
+
+ while not self.kill_received:
+ try:
+ job = self.jobs.get()
+ except Queue.Empty:
+ break
+
+ self.cur_job = job
+ f = open(self.opts.get('destdir', '/') + '/' + job,
'w')
+ f.write('')
+ f.close()
+
+ # parse the job json into our info
+ # pkgs
+ # repos
+ # chroot(s)
+ # memory needed
+ # timeout
+ # make up a destdir
+
+ #print 'start up instance %s using %s' % (self.ip,
self.opts.get('playbook', None))
+ ip = spawn_instance(self.opts, ip=ip)
+
+ destdir = construct_something_here
+
+ try:
+ mr = mockremote.MockRemote(builder=ip, timeout=timeout,
+ destdir=destdir, chroot=chroot, cont=True, recurse=True,
+ repos=repos, callback=None)
+ mr.build_pkgs(pkgs)
+ except mockremote.MockRemoteError, e:
+ # record and break
+ print '%s - %s' % (ip, e)
+ break
+
+ # run mockremote to that ip with the args from above
+ print 'mockremote on %s - %s' % (ip, job)
+ time.sleep(30)
+ #print 'terminate-instance %s' % (self. ip)
+
+
diff --git a/backend/mockremote.py b/backend/mockremote.py
new file mode 100755
index 0000000..44e9fd9
--- /dev/null
+++ b/backend/mockremote.py
@@ -0,0 +1,633 @@
+#!/usr/bin/python -tt
+# by skvidal
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Library General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+# copyright 2012 Red Hat, Inc.
+
+
+# take list of pkgs
+# take single hostname
+# send 1 pkg at a time to host
+# build in remote w/mockchain
+# rsync results back
+# repeat
+# take args from mockchain (more or less)
+
+
+import os
+import sys
+import subprocess
+
+import ansible.runner
+import optparse
+from operator import methodcaller
+import time
+import socket
+import traceback
+
+# where we should execute mockchain from on the remote
+mockchain='/usr/bin/mockchain'
+# rsync path
+rsync='/usr/bin/rsync'
+
+DEF_REMOTE_BASEDIR='/var/tmp'
+DEF_TIMEOUT=3600
+DEF_REPOS = []
+DEF_CHROOT= None
+DEF_USER = 'mockbuilder'
+DEF_DESTDIR = os.getcwd()
+
+class SortedOptParser(optparse.OptionParser):
+ '''Optparser which sorts the options by opt before outputting
--help'''
+ def format_help(self, formatter=None):
+ self.option_list.sort(key=methodcaller('get_opt_string'))
+ return optparse.OptionParser.format_help(self, formatter=None)
+
+
+def createrepo(path):
+ if os.path.exists(path + '/repodata/repomd.xml'):
+ comm = ['/usr/bin/createrepo', '--update', path]
+ else:
+ comm = ['/usr/bin/createrepo', path]
+ cmd = subprocess.Popen(comm,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ out, err = cmd.communicate()
+ return cmd.returncode, out, err
+
+def read_list_from_file(fn):
+ lst = []
+ f = open(fn, 'r')
+ for line in f.readlines():
+ line = line.replace('\n','')
+ line = line.strip()
+ if line.startswith('#'):
+ continue
+ lst.append(line)
+
+ return lst
+
+def log(lf, msg):
+ if lf:
+ now = time.time()
+ try:
+ open(lf, 'a').write(str(now) + ':' + msg + '\n')
+ except (IOError, OSError), e:
+ print 'Could not write to logfile %s - %s' % (lf, str(e))
+ print msg
+
+def get_ans_results(results, hostname):
+ if hostname in results['dark']:
+ return results['dark'][hostname]
+ if hostname in results['contacted']:
+ return results['contacted'][hostname]
+
+ return {}
+
+def _create_ans_conn(hostname, username, timeout):
+ ans_conn = ansible.runner.Runner(remote_user=username,
+ host_list=[hostname], pattern=hostname, forks=1,
+ timeout=timeout, transport='ssh')
+ return ans_conn
+
+def check_for_ans_error(results, hostname, err_codes=[], success_codes=[0],
+ return_on_error=['stdout', 'stderr']):
+ # returns True or False + dict
+ # dict includes 'msg'
+ # may include 'rc', 'stderr', 'stdout' and any other
+ # requested result codes
+ err_results = {}
+
+ if 'dark' in results and hostname in results['dark']:
+ err_results['msg'] = "Error: Could not contact/connect to %s."
% hostname
+ return (True, err_results)
+
+ error = False
+
+ if err_codes or success_codes:
+ if hostname in results['contacted']:
+ if 'rc' in results['contacted'][hostname]:
+ rc = int(results['contacted'][hostname]['rc'])
+ err_results['rc'] = rc
+ # check for err codes first
+ if rc in err_codes:
+ error = True
+ err_results['msg'] = 'rc %s matched err_codes' % rc
+ elif rc not in success_codes:
+ error = True
+ err_results['msg'] = 'rc %s not in success_codes' %
rc
+ elif 'failed' in results['contacted'][hostname] and
results['contacted'][hostname]['failed']:
+ error = True
+ err_results['msg'] = 'results included failed as true'
+
+ if error:
+ for item in return_on_error:
+ if item in results['contacted'][hostname]:
+ err_results[item] = results['contacted'][hostname][item]
+
+ return error, err_results
+
+
+class MockRemoteError(Exception):
+
+ def __init__(self, msg):
+ self.msg = msg
+
+ def __str__(self):
+ return self.msg
+
+class BuilderError(MockRemoteError):
+ pass
+
+class DefaultCallBack(object):
+ def __init__(self, **kwargs):
+ self.quiet = kwargs.get('quiet', False)
+ self.logfn = kwargs.get('logfn', None)
+
+ def start_build(self, pkg):
+ pass
+
+ def end_build(self, pkg):
+ pass
+
+ def start_download(self, pkg):
+ pass
+
+ def end_download(self, pkg):
+ pass
+
+ def error(self, msg):
+ self.log("Error: %s" % msg)
+
+ def log(self, msg):
+ if not self.quiet:
+ print msg
+
+class CliLogCallBack(DefaultCallBack):
+ def __init__(self, **kwargs):
+ DefaultCallBack.__init__(self, **kwargs)
+
+ def start_build(self, pkg):
+ msg = "Start build: %s" % pkg
+ self.log(msg)
+
+
+ def end_build(self, pkg):
+ msg = "End Build: %s" % pkg
+ self.log(msg)
+
+ def start_download(self, pkg):
+ msg = "Start Download: %s" % pkg
+ self.log(msg)
+
+ def end_download(self, pkg):
+ msg = "End Download: %s" % pkg
+ self.log(msg)
+
+ def error(self, msg):
+ self.log("Error: %s" % msg)
+
+ def log(self, msg):
+ if self.logfn:
+ now = time.time()
+ try:
+ open(self.logfn, 'a').write(str(now) + ':' + msg +
'\n')
+ except (IOError, OSError), e:
+ print >>sys.stderr, 'Could not write to logfile %s - %s' %
(self.lf, str(e))
+ if not self.quiet:
+ print msg
+
+class Builder(object):
+ def __init__(self, hostname, username, timeout, mockremote):
+ self.hostname = hostname
+ self.username = username
+ self.timeout = timeout
+ self.chroot = mockremote.chroot
+ self.repos = mockremote.repos
+ self.mockremote = mockremote
+ self.checked = False
+ self._tempdir = None
+ # check out the host - make sure it can build/be contacted/etc
+ self.check()
+ # if we're at this point we've connected and done stuff on the host
+ self.conn = _create_ans_conn(self.hostname, self.username, self.timeout)
+
+ @property
+ def remote_build_dir(self):
+ return self.tempdir + '/build/'
+
+ @property
+ def tempdir(self):
+ if self.mockremote.remote_tempdir:
+ return self.mockremote.remote_tempdir
+
+ if self._tempdir:
+ return self._tempdir
+
+ cmd='/bin/mktemp -d %s/%s-XXXXX' % (self.mockremote.remote_basedir,
'mockremote')
+ self.conn.module_name="shell"
+ self.conn.module_args = str(cmd)
+ results = self.conn.run()
+ tempdir = None
+ for hn, resdict in results['contacted'].items():
+ tempdir = resdict['stdout']
+
+ # if still nothing then we've broken
+ if not tempdir:
+ raise BuilderError('Could not make tmpdir on %s' % self.hostname)
+
+ cmd = "/bin/chmod 755 %s" % tempdir
+ self.conn.module_args = str(cmd)
+ self.conn.run()
+ self._tempdir = tempdir
+
+ return self._tempdir
+
+ @tempdir.setter
+ def tempdir(self, value):
+ self._tempdir = value
+
+ def _get_remote_pkg_dir(self, pkg):
+ # the pkg will build into a dir by mockchain named:
+ # $tempdir/build/results/$chroot/$packagename
+ s_pkg = os.path.basename(pkg)
+ pdn = s_pkg.replace('.src.rpm', '')
+ remote_pkg_dir = self.remote_build_dir + '/results/' + self.chroot +
'/' + pdn
+ return remote_pkg_dir
+
+ def build(self, pkg):
+
+ # build the pkg passed in
+ # add pkg to various lists
+ # check for success/failure of build
+ # return success/failure,stdout,stderr of build command
+ # returns success_bool, out, err
+
+ success = False
+
+ # check if pkg is local or http
+ dest = None
+ if os.path.exists(pkg):
+ dest = self.tempdir + '/' + os.path.basename(pkg)
+ self.conn.module_name="copy"
+ margs = 'src=%s dest=%s' % (pkg, dest)
+ self.conn.module_args = str(margs)
+ self.mockremote.callback.log("Sending %s to %s to build" %
(os.path.basename(pkg), self.hostname))
+
+ # FIXME should probably check this but <shrug>
+ self.conn.run()
+ else:
+ dest = pkg
+
+ # construct the mockchain command
+ buildcmd = '%s -r %s -l %s ' % (mockchain, self.chroot,
self.remote_build_dir)
+ for r in self.repos:
+ buildcmd += '-a %s ' % r
+
+ buildcmd += dest
+
+ #print ' Running %s on %s' % (buildcmd, hostname)
+ # run the mockchain command async
+ # this runs it sync - FIXME
+ self.conn.module_name="shell"
+ self.conn.module_args = str(buildcmd)
+ results = self.conn.run()
+
+ is_err, err_results = check_for_ans_error(results, self.hostname,
success_codes=[0],
+ return_on_error=['stdout', 'stderr'])
+ if is_err:
+ return success, err_results.get('stdout', ''),
err_results.get('stderr', '')
+
+ # we know the command ended successfully but not if the pkg built successfully
+ myresults = get_ans_results(results, self.hostname)
+ out = myresults.get('stdout', '')
+ err = myresults.get('stderr', '')
+
+ successfile = self._get_remote_pkg_dir(pkg) + '/success'
+ testcmd = '/usr/bin/test -f %s' % successfile
+ self.conn.module_args = str(testcmd)
+ results = self.conn.run()
+ is_err, err_results = check_for_ans_error(results, self.hostname,
success_codes=[0])
+ if not is_err:
+ success = True
+
+ return success, out, err
+
+ def download(self, pkg, destdir):
+ # download the pkg to destdir using rsync + ssh
+ # return success/failure, stdout, stderr
+
+ success = False
+ rpd = self._get_remote_pkg_dir(pkg)
+ # build rsync command line from the above
+ remote_src = '%s@%s:%s' % (self.username, self.hostname, rpd)
+ ssh_opts = "'ssh -o PasswordAuthentication=no -o
StrictHostKeyChecking=no'"
+ command = "%s -avH -e %s %s %s/" % (rsync, ssh_opts, remote_src,
destdir)
+ cmd = subprocess.Popen(command, shell=True,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+
+ # rsync results into opts.destdir
+ out, err = cmd.communicate()
+ if cmd.returncode:
+ success = False
+ else:
+ success = True
+
+ return success, out, err
+
+ def check(self):
+ # do check of host
+ # set checked if successful
+ # return success/failure, errorlist
+
+ if self.checked:
+ return True, []
+
+ errors = []
+
+ try:
+ socket.gethostbyname(self.hostname)
+ except socket.gaierror:
+ raise BuilderError('%s could not be resolved' % self.hostname)
+
+ # connect as user
+ ans = ansible.runner.Runner(host_list=[self.hostname], pattern='*',
+ remote_user=self.username, forks=1, timeout=20, transport='ssh')
+ ans.module_name = "shell"
+ ans.module_args = str("/bin/rpm -q mock rsync")
+ res = ans.run()
+ # check for mock/rsync from results
+ is_err, err_results = check_for_ans_error(res, self.hostname, success_codes=[0])
+ if is_err:
+ if 'rc' in err_results:
+ errors.append('Warning: %s does not have mock or rsync installed'
% self.hostname)
+ else:
+ errors.append(err_results['msg'])
+
+
+ # test for path existence for mockchain and chroot config for this chroot
+ ans.module_name = "shell"
+ ans.module_args = str("/usr/bin/test -f %s && /usr/bin/test -f
/etc/mock/%s.cfg" % (mockchain, self.chroot))
+ res = ans.run()
+
+ is_err, err_results = check_for_ans_error(res, self.hostname, success_codes=[0])
+ if is_err:
+ if 'rc' in err_results:
+ errors.append('Warning: %s lacks mockchain or the chroot %s' %
(self.hostname, self.chroot))
+ else:
+ errors.append(err_results['msg'])
+
+ if not errors:
+ self.checked = True
+ else:
+ msg = '\n'.join(errors)
+ raise BuilderError(msg)
+
+
+class MockRemote(object):
+ def __init__(self, builder=None, user=DEF_USER, timeout=DEF_TIMEOUT,
+ destdir=DEF_DESTDIR, chroot=DEF_CHROOT, cont=False, recurse=False,
+ repos=DEF_REPOS, callback=None,
+ remote_basedir=DEF_REMOTE_BASEDIR, remote_tempdir=None):
+
+ self.destdir = destdir
+ self.chroot = chroot
+ self.repos = repos
+ self.cont = cont
+ self.recurse = recurse
+ self.callback = callback
+ self.remote_basedir = remote_basedir
+ self.remote_tempdir = remote_tempdir
+
+ if not self.callback:
+ self.callback = DefaultCallBack()
+
+ self.callback.log("Setting up builder: %s" % builder)
+ self.builder = Builder(builder, user, timeout, self)
+
+ if not self.chroot:
+ raise MockRemoteError("No chroot specified!")
+
+
+ self.failed = []
+ self.finished = []
+ self.pkg_list = []
+
+
+ def _get_pkg_destpath(self, pkg):
+ s_pkg = os.path.basename(pkg)
+ pdn = s_pkg.replace('.src.rpm', '')
+ resdir = '%s/%s/%s' % (self.destdir, self.chroot, pdn)
+ resdir = os.path.normpath(resdir)
+ return resdir
+
+ def build_pkgs(self, pkgs=None):
+
+ if not pkgs:
+ pkgs = self.pkg_list
+
+ built_pkgs = []
+ downloaded_pkgs = {}
+
+ try_again = True
+ to_be_built = pkgs
+ while try_again:
+ self.failed = []
+ just_built = []
+ for pkg in to_be_built:
+ if pkg in just_built:
+ self.callback.log("skipping duplicate pkg in this list: %s"
% pkg)
+ continue
+ else:
+ just_built.append(pkg)
+
+ p_path = self._get_pkg_destpath(pkg)
+
+ # check the destdir to see if these pkgs need to be built
+ if os.path.exists(p_path):
+ if os.path.exists(p_path + '/success'):
+ self.callback.log("Skipping already built pkg %s" %
os.path.basename(pkg))
+ continue
+ # if we're asking to build it and it is marked as fail - nuke
+ # the failure and try rebuilding it
+ elif os.path.exists(p_path + '/fail'):
+ os.unlink(p_path + '/fail')
+
+ # off to the builder object
+ # building
+ self.callback.start_build(pkg)
+ b_status, b_out, b_err = self.builder.build(pkg)
+ self.callback.end_build(pkg)
+
+ # downloading
+ self.callback.start_download(pkg)
+ # mockchain makes things with the chroot appended - so suck down
+ # that pkg subdir from w/i that location
+ d_ret, d_out, d_err = self.builder.download(pkg, self.destdir +
'/' + self.chroot)
+ if not d_ret:
+ msg = "Failure to download %s: %s" % (pkg, d_out + d_err)
+ if not self.cont:
+ raise MockRemoteError, msg
+ self.callback.error(msg)
+
+ self.callback.end_download(pkg)
+
+ # checking where to stick stuff
+ if not b_status:
+ if self.recurse:
+ self.failed.append(pkg)
+ self.callback.error("Error building %s, will try again"
% os.path.basename(pkg))
+ else:
+ msg = "Error building %s\nSee logs/resultsin %s" %
(os.path.basename(pkg), self.destdir)
+ if not self.cont:
+ raise MockRemoteError, msg
+ self.callback.error(msg)
+
+ else:
+ self.callback.log("Success building %s" %
os.path.basename(pkg))
+ built_pkgs.append(pkg)
+ # createrepo with the new pkgs
+ rc, out, err = createrepo(self.destdir)
+ if err.strip():
+ self.callback.error("Error making local repo: %s" %
self.destdir)
+ self.callback.error("%s" % err)
+ #FIXME - maybe clean up .repodata and .olddata here?
+
+ if self.failed:
+ if len(self.failed) != len(to_be_built):
+ to_be_built = self.failed
+ try_again = True
+ self.callback.log('Trying to rebuild %s failed pkgs' %
len(self.failed))
+ else:
+ self.callback.log("Tried twice - following pkgs could not be
successfully built:")
+ for pkg in self.failed:
+ msg = pkg
+ if pkg in downloaded_pkgs:
+ msg = downloaded_pkgs[pkg]
+ self.callback.log(msg)
+
+ try_again = False
+ else:
+ try_again = False
+
+
+
+def parse_args(args):
+
+ parser = SortedOptParser("mockremote -b hostname -u user -r chroot pkg pkg
pkg")
+ parser.add_option('-r', '--root', default=DEF_CHROOT,
dest='chroot',
+ help="chroot config name/base to use in the mock build")
+ parser.add_option('-c', '--continue', default=False,
action='store_true',
+ dest='cont',
+ help="if a pkg fails to build, continue to the next one")
+ parser.add_option('-a','--addrepo', default=DEF_REPOS,
action='append',
+ dest='repos',
+ help="add these repo baseurls to the chroot's yum config")
+ parser.add_option('--recurse', default=False, action='store_true',
+ help="if more than one pkg and it fails to build, try to build the rest
and come back to it")
+ parser.add_option('--log', default=None, dest='logfile',
+ help="log to the file named by this option, defaults to not
logging")
+ parser.add_option("-b", "--builder", dest='builder',
default=None,
+ help="builder to use")
+ parser.add_option("-u", dest="user", default=DEF_USER,
+ help="user to run as/connect as on builder systems")
+ parser.add_option("-t", "--timeout", dest="timeout",
type="int",
+ default=DEF_TIMEOUT, help="maximum time in seconds a build can take to
run")
+ parser.add_option("--destdir", dest="destdir",
default=DEF_DESTDIR,
+ help="place to download all the results/packages")
+ parser.add_option("--packages", dest="packages_file",
default=None,
+ help="file to read list of packages from")
+ parser.add_option("-q","--quiet", dest="quiet",
default=False, action="store_true",
+ help="output very little to the terminal")
+
+ opts,args = parser.parse_args(args)
+
+ if not opts.builder:
+ print "Must specify a system to build on"
+ sys.exit(1)
+
+ if opts.packages_file and os.path.exists(opts.packages_file):
+ args.extend(read_list_from_file(opts.packages_file))
+
+ #args = list(set(args)) # poor man's 'unique' - this also changes the
order
+ # :(
+
+ if not args:
+ print "Must specify at least one pkg to build"
+ sys.exit(1)
+
+ if not opts.chroot:
+ print "Must specify a mock chroot"
+ sys.exit(1)
+
+ for url in opts.repos:
+ if not (url.startswith('http') or url.startswith('file://')):
+ print "Only http[s] or file urls allowed for repos"
+ sys.exit(1)
+
+ return opts, args
+
+
+#FIXME
+# play with createrepo run at the end of each build
+# need to output the things that actually worked :)
+
+
+def main(args):
+
+ # parse args
+ opts,pkgs = parse_args(args)
+
+ if not os.path.exists(opts.destdir):
+ os.makedirs(opts.destdir)
+
+ try:
+ # setup our callback
+ callback = CliLogCallBack(logfn=opts.logfile, quiet=opts.quiet)
+ # our mockremote instance
+ mr = MockRemote(builder=opts.builder, user=opts.user,
+ timeout=opts.timeout, destdir=opts.destdir, chroot=opts.chroot,
+ cont=opts.cont, recurse=opts.recurse, repos=opts.repos,
+ callback=callback)
+
+ # FIXMES
+ # things to think about doing:
+ # output the remote tempdir when you start up
+ # output the number of pkgs
+ # output where you're writing things to
+ # consider option to sync over destdir to the remote system to use
+ # as a local repo for the build
+ #
+
+ if not opts.quiet:
+ print "Building %s pkgs" % len(pkgs)
+
+ mr.build_pkgs(pkgs)
+
+ if not opts.quiet:
+ print "Output written to: %s" % mr.destdir
+
+ except MockRemoteError, e:
+ print >>sys.stderr, "Error on build:"
+ print >>sys.stderr, str(e)
+ return
+
+
+if __name__ == '__main__':
+ try:
+ main(sys.argv[1:])
+ except Exception, e:
+
+ print "ERROR: %s - %s" % (str(type(e)), str(e))
+ traceback.print_exc()
+ sys.exit(1)
diff --git a/copr-be.py b/copr-be.py
new file mode 100644
index 0000000..4e8945b
--- /dev/null
+++ b/copr-be.py
@@ -0,0 +1,139 @@
+#!/usr/bin/python -tt
+
+
+import sys
+import os
+import glob
+import time
+import json
+import multiprocessing
+from backend.dispatcher import Worker
+from bunch import Bunch
+from ConfigParser import ConfigParser
+
+def _get_conf(cp, section, option, default):
+ """to make returning items from config parser less
irritating"""
+ if cp.has_section(section) and cp.has_option(section,option):
+ return cp.get(section, option)
+ return default
+
+class CoprBackendError(Exception):
+
+ def __init__(self, msg):
+ self.msg = msg
+
+ def __str__(self):
+ return self.msg
+
+class CoprBackend(object):
+ def __init__(self, config_file=None):
+ # read in config file
+ # put all the config items into a single self.opts bunch
+
+ if not config_file:
+ raise CoprBackendError, "Must specify config_file"
+
+ self.config_file = config_file
+ self.opts = self.read_config()
+
+ self.jobs = multiprocessing.Queue()
+ self.workers = []
+ self.added_jobs = []
+
+ # setup a log file to write to
+ self.logfile = self.opts.logfile
+
+ def read_conf(self):
+ "read in config file - return Bunch of config data"
+ opts = Bunch()
+ cp = ConfigParser.ConfigParser()
+ try:
+ cp.read(self.config_file)
+ opts.results_baseurl = _get_conf(cp,'backend',
'results_baseurl', 'http://copr')
+ opts.frontend_url = _get_config(cp, 'backend',
'frontend_url', 'http://coprs/rest/api')
+ opts.frontend_auth = _get_conf(cp,'backend', 'frontend_auth',
'PASSWORDHERE')
+ opts.playbook = _get_conf(cp,'backend','playbook',
'/etc/copr/builder_playbook.yml')
+ opts.jobsdir = _get_conf(cp, 'backend', 'jobsdir', None)
+ opts.destdir = _get_conf(cp, 'backend', 'destdir', None)
+ opts.sleeptime = int(_get_conf(cp, 'backend', 'sleeptime',
10))
+ opts.num_workers = int(_get_conf(cp, 'backend',
'num_workers', 8))
+ opts.timeout = int(_get_conf(cp, 'builder', 'timeout',
1800))
+ opts.logfile = _get_conf(cp, 'backend', 'logfile',
'/var/log/copr-be.log')
+ # thoughts for later
+ # ssh key for connecting to builders?
+ # cloud key stuff?
+ #
+ except ConfigParser.Error, e:
+ raise CoprBackendError, 'Error parsing config file: %s: %s' %
(self.config_file, e)
+
+
+ if not opts.jobsdir or not opts.destdir:
+ raise CoprBackendError, "Incomplete Config - must specify jobsdir and
destdir in configuration"
+
+ return opts
+
+
+ def log(self, msg):
+ if self.logfile:
+ now = time.time()
+ try:
+ open(self.logfn, 'a').write(str(now) + ':' + msg +
'\n')
+ except (IOError, OSError), e:
+ print >>sys.stderr, 'Could not write to logfile %s - %s' %
(self.lf, str(e))
+ if not self.quiet:
+ print msg
+
+
+ def run(self):
+ # start processing builds, etc
+ # setup and run our workers
+ for i in range(opts.num_workers):
+ w = backend.dispatcher.Worker(opts, jobs)
+ workers.append(w)
+ w.start()
+
+ abort = False
+ while not abort:
+ print 'adding jobs'
+ for f in sorted(glob.glob(jobsdir + '/*.json')):
+ n = os.path.basename(f).replace('.json', '')
+ if not is_completed(n) and n not in added:
+ #jobdata = open(f).read()
+ jobs.put(n)
+ added.append(n)
+ print 'adding %s' % n
+
+
+
+ print "# jobs in queue: %s" % jobs.qsize()
+
+
+ # FIXME:
+ # look up number of workers in config
+ # see if it changed and update accordingly?
+ # poison pill? if opts.num_workers < len(workers)?
+ time.sleep(opts.sleeptime)
+
+
+def is_completed(jobid):
+
+ if glob.glob(destdir + '/' + jobid + '*'):
+ return True
+ return False
+
+def main(args):
+
+
+
+
+
+if __name__ == '__main__':
+ try:
+ main(sys.argv[1:])
+ except Exception, e:
+ print "ERROR: %s - %s" % (str(type(e)), str(e))
+ sys.exit(1)
+ except KeyboardInterrupt, e:
+ print "\nUser cancelled, need cleanup\n"
+ sys.exit(0)
+