extras-buildsys/server ArchJob.py, 1.6, 1.7 BuildMaster.py, 1.18, 1.19 PackageJob.py, 1.12, 1.13 User.py, 1.4, 1.5 UserInterface.py, 1.25, 1.26 main.py, 1.10, 1.11
Daniel Williams (dcbw)
fedora-extras-commits at redhat.com
Fri Jul 22 21:35:29 UTC 2005
Author: dcbw
Update of /cvs/fedora/extras-buildsys/server
In directory cvs-int.fedora.redhat.com:/tmp/cvs-serv23772/server
Modified Files:
ArchJob.py BuildMaster.py PackageJob.py User.py
UserInterface.py main.py
Log Message:
2005-07-22 Dan Williams <dcbw at redhat.com>
* builder/builder.py
- Fix traceback when killing jobs on shutdown of the builder
* client/client.py
- Add a job detail command
* server/ArchJob.py
- Store job start and end times in the database
* server/BuildMaster.py
- Remove JobsQuery class, no longer used
- Store more info in the job database to support web front end features
- Don't try to restart jobs in the 'initialize' state since they'll
get restarted anyway
* server/PackageJob.py
- Generalize log URL construction so it can be accessed from the user interface
- Pass more job info to the BuildMaster to be written to the DB
- Don't use tempfile.mkdtemp(), it seems to have issues
- Return 30 lines of log rather than 20
* server/User.py
- Grab a new connection to the database on every access, so that
user addition/modification can happen when the server is running
* server/UserInterface.py
- Add a "job detail" interface that returns information about a single
specific job
* server/main.py
- Greatly reduce buffer size for the logfile, now it actually gets written
out in a timely fashion
Index: ArchJob.py
===================================================================
RCS file: /cvs/fedora/extras-buildsys/server/ArchJob.py,v
retrieving revision 1.6
retrieving revision 1.7
diff -u -r1.6 -r1.7
--- ArchJob.py 18 Jul 2005 17:24:39 -0000 1.6
+++ ArchJob.py 22 Jul 2005 21:35:27 -0000 1.7
@@ -51,6 +51,7 @@
self._builder_gone = False
self.downloads = {}
self.starttime = time.time()
+ self.endtime = 0
def _builder_finished(self):
if self.builder_status == 'done' or self.builder_status == 'killed' or self.builder_status == 'failed' or self.builder_status == 'orphaned':
@@ -83,6 +84,8 @@
attrdict['builder_addr'] = host
attrdict['status'] = self.status
attrdict['builder_status'] = self.builder_status
+ attrdict['starttime'] = self.starttime
+ attrdict['endtime'] = self.endtime
return attrdict
def set_builder_status(self, status):
@@ -189,6 +192,7 @@
# All done downloading?
if not undownloaded:
self._print_downloaded_files()
+ self.endtime = time.time()
self._set_status('done')
self.par_job.wake()
Index: BuildMaster.py
===================================================================
RCS file: /cvs/fedora/extras-buildsys/server/BuildMaster.py,v
retrieving revision 1.18
retrieving revision 1.19
diff -u -r1.18 -r1.19
--- BuildMaster.py 20 Jul 2005 18:11:37 -0000 1.18
+++ BuildMaster.py 22 Jul 2005 21:35:27 -0000 1.19
@@ -30,18 +30,6 @@
execfile(CONFIG_LOCATION + "CONFIG.py")
-class JobsQuery:
- """ Wrapper class around DB query results. Since only one
- thread has access to the database (to work around sqlite
- locking limitations), this object wraps requests to and
- from the BuildMaster thread.
- """
- def __init__(self, query):
- self.sql = query
- self.done = False
- self.result = None
-
-
def ensure_job_db_tables(dbcx):
""" Central routine to create the database table structure """
@@ -52,11 +40,22 @@
except Exception, e:
# If DB wasn't created, try to create it
try:
- curs.execute('CREATE TABLE jobs (uid INTEGER PRIMARY KEY, ' \
- 'username VARCHAR(20), package VARCHAR(50), ' \
- 'cvs_tag VARCHAR(255), target VARCHAR(20), ' \
- 'buildreq VARCHAR(75), starttime BIGINT, ' \
- 'endtime BIGINT, status VARCHAR(15))')
+ curs.execute('CREATE TABLE jobs (' \
+ 'uid INTEGER PRIMARY KEY, ' \
+ 'username VARCHAR(20), ' \
+ 'package VARCHAR(50), ' \
+ 'cvs_tag VARCHAR(255), ' \
+ 'target VARCHAR(20), ' \
+ 'buildreq VARCHAR(75), ' \
+ 'starttime BIGINT, ' \
+ 'endtime BIGINT, ' \
+ 'status VARCHAR(15), ' \
+ 'epoch VARCHAR(4), ' \
+ 'version VARCHAR(25), ' \
+ 'release VARCHAR(25), ' \
+ 'archlist VARCHAR(75), ' \
+ 'result_msg TEXT' \
+ ')')
except sqlite.OperationalError, e:
print "Could not access the job database. Reason: '%s'. Exiting..." % e
os._exit(1)
@@ -75,10 +74,16 @@
except Exception, e:
# If DB wasn't created, try to create it
try:
- curs.execute('CREATE TABLE archjobs (jobid VARCHAR(40) PRIMARY KEY, ' \
- 'parent_uid INTEGER, starttime BIGINT, endtime BIGINT, ' \
- 'arch VARCHAR(15), builder_addr VARCHAR(100), ' \
- 'status VARCHAR(15), builder_status VARCHAR(15))')
+ curs.execute('CREATE TABLE archjobs (' \
+ 'jobid VARCHAR(40) PRIMARY KEY, ' \
+ 'parent_uid INTEGER, ' \
+ 'starttime BIGINT, ' \
+ 'endtime BIGINT, ' \
+ 'arch VARCHAR(15), ' \
+ 'builder_addr VARCHAR(100), ' \
+ 'status VARCHAR(15), ' \
+ 'builder_status VARCHAR(15)' \
+ ')')
except sqlite.OperationalError, e:
print "Could not access the job database. Reason: '%s'. Exiting..." % e
os._exit(1)
@@ -111,7 +116,7 @@
self._archjob_status_updates = {}
self._archjob_status_updates_lock = threading.Lock()
- self._building_jobs = []
+ self._building_jobs = {}
self._building_jobs_lock = threading.Lock()
try:
@@ -131,7 +136,7 @@
def _restart_interrupted_jobs(self):
""" Restart interrupted jobs from our db. """
- self.curs.execute('SELECT * FROM jobs WHERE (status!="needsign" AND status!="failed" AND status!="killed")')
+ self.curs.execute('SELECT * FROM jobs WHERE (status!="needsign" AND status!="failed" AND status!="killed" AND status!="initialize")')
self.dbcx.commit()
jobs = self.curs.fetchall()
@@ -139,21 +144,21 @@
return
for row in jobs:
- uniqid = row[0]
+ uid = row[0]
# Kill any archjobs that are left around
- self.curs.execute('DELETE FROM archjobs WHERE parent_uid=%d' % uniqid)
+ self.curs.execute('DELETE FROM archjobs WHERE parent_uid=%d' % uid)
self.dbcx.commit()
# Now requeue the job
try:
repo = self.repos[row[4]]
except KeyError:
- print "%s (%s): Target '%s' not found." % (uniqid, row[2], row[4])
+ print "%s (%s): Target '%s' not found." % (uid, row[2], row[4])
else:
- job = PackageJob.PackageJob(uniqid, row[1], row[2], row[3], repo, self, self.hostname)
- print "%s (%s): Restarting '%s' on target '%s'" % (uniqid, row[2], row[3], row[4])
+ job = PackageJob.PackageJob(uid, row[1], row[2], row[3], repo, self, self.hostname)
+ print "%s (%s): Restarting '%s' on target '%s'" % (uid, row[2], row[3], row[4])
self._building_jobs_lock.acquire()
- self._building_jobs.append(job)
+ self._building_jobs[uid] = job
self._building_jobs_lock.release()
def stop(self):
@@ -188,18 +193,18 @@
self._new_queue.append(job_desc)
self._new_queue_lock.release()
- def queue_job_status_update(self, uid, status):
+ def queue_job_status_update(self, uid, attrdict):
self._status_updates_lock.acquire()
lcl_uid = copy.copy(uid)
- lcl_status = copy.copy(status)
- self._status_updates[lcl_uid] = lcl_status
+ lcl_attrdict = copy.deepcopy(attrdict)
+ self._status_updates[lcl_uid] = lcl_attrdict
self._status_updates_lock.release()
- def queue_archjob_status_update(self, jobid, attrdict):
+ def queue_archjob_status_update(self, uid, attrdict):
self._archjob_status_updates_lock.acquire()
- lcl_jobid = copy.copy(jobid)
+ lcl_uid = copy.copy(uid)
lcl_attrdict = copy.deepcopy(attrdict)
- self._archjob_status_updates[lcl_jobid] = lcl_attrdict
+ self._archjob_status_updates[lcl_uid] = lcl_attrdict
self._archjob_status_updates_lock.release()
def notify_job_done(self, job):
@@ -211,9 +216,10 @@
self._done_queue_lock.acquire()
for job in self._done_queue:
- curstage = job.get_cur_stage()
uid = job.get_uid()
- self._write_job_status_to_db(uid, curstage)
+ attrdict = {}
+ attrdict['status'] = job.get_cur_stage()
+ self._write_job_status_to_db(uid, attrdict)
# Update job end time
try:
@@ -229,36 +235,44 @@
self._status_updates_lock.release()
print "%s (%s): Job finished." % (uid, job.package)
+
self._building_jobs_lock.acquire()
- self._building_jobs.remove(job)
+ self._building_jobs[uid] = None
self._building_jobs_lock.release()
self._done_queue = []
self._done_queue_lock.release()
- def _write_job_status_to_db(self, uid, status):
+ def _write_job_status_to_db(self, uid, attrdict):
+ sql = 'status="%s"' % attrdict['status']
+ if attrdict.has_key('epoch') and attrdict.has_key('version') and attrdict.has_key('release'):
+ sql = sql + ', epoch="%s", version="%s", release="%s"' % (attrdict['epoch'], attrdict['version'], attrdict['release'])
+ if attrdict.has_key('result_msg'):
+ import urllib
+ sql = sql + ', result_msg="%s"' % (urllib.quote(attrdict['result_msg']))
+
+ sql = 'UPDATE jobs SET ' + sql + ' WHERE uid=%d' % uid
try:
- self.curs.execute('UPDATE jobs SET status="%s" WHERE uid=%d' \
- % (status, uid))
+ self.curs.execute(sql)
except sqlite.OperationalError, e:
print "DB Error: could not access jobs database. Reason: '%s'" % e
self.dbcx.commit()
- def _write_archjob_status_to_db(self, jobid, attrdict):
- self.curs.execute('SELECT * FROM archjobs WHERE jobid="%s"' % jobid)
+ def _write_archjob_status_to_db(self, uid, attrdict):
+ self.curs.execute('SELECT * FROM archjobs WHERE jobid="%s"' % uid)
self.dbcx.commit()
if len(self.curs.fetchall()) == 0:
try:
self.curs.execute('INSERT INTO archjobs (jobid, parent_uid, starttime, endtime, arch, builder_addr, status, builder_status) ' \
- 'VALUES ("%s", %d, %d, %d, "%s", "%s", "%s", "%s")' % (jobid, attrdict['parent_uid'], time.time(), 0, attrdict['arch'], \
- attrdict['builder_addr'], attrdict['status'], attrdict['builder_status']))
+ 'VALUES ("%s", %d, %d, %d, "%s", "%s", "%s", "%s")' % (uid, attrdict['parent_uid'], attrdict['starttime'], attrdict['endtime'], \
+ attrdict['arch'], attrdict['builder_addr'], attrdict['status'], attrdict['builder_status']))
except sqlite.OperationalError, e:
print "DB Error: could not access jobs database. Reason: '%s'" % e
else:
try:
- self.curs.execute('UPDATE archjobs SET status="%s", builder_status="%s" ' \
- 'WHERE jobid="%s" AND parent_uid=%d' % (attrdict['status'], attrdict['builder_status'], jobid, attrdict['parent_uid']))
+ self.curs.execute('UPDATE archjobs SET status="%s", builder_status="%s", endtime=%d ' \
+ 'WHERE jobid="%s" AND parent_uid=%d' % (attrdict['status'], attrdict['builder_status'], attrdict['endtime'], uid, attrdict['parent_uid']))
except sqlite.OperationalError, e:
print "DB Error: could not access jobs database. Reason: '%s'" % e
@@ -275,8 +289,8 @@
self._status_updates_lock.release()
self._archjob_status_updates_lock.acquire()
- for jobid in self._archjob_status_updates.keys():
- self._write_archjob_status_to_db(jobid, self._archjob_status_updates[jobid])
+ for uid in self._archjob_status_updates.keys():
+ self._write_archjob_status_to_db(uid, self._archjob_status_updates[uid])
self._archjob_status_updates = {}
self._archjob_status_updates_lock.release()
@@ -319,7 +333,7 @@
item['package'], locator, item['target'])
self._building_jobs_lock.acquire()
- self._building_jobs.append(job)
+ self._building_jobs[row['uid']] = job
self._building_jobs_lock.release()
self._new_queue = []
@@ -354,13 +368,12 @@
return False
- def get_job(self, jobid):
+ def get_job(self, uid):
self._building_jobs_lock.acquire()
- ret_job = None
- for job in self._building_jobs:
- if job.uid == jobid:
- ret_job = job
- break
+ try:
+ ret_job = self._building_jobs[uid]
+ except KeyError:
+ ret_job = None
self._building_jobs_lock.release()
return ret_job
Index: PackageJob.py
===================================================================
RCS file: /cvs/fedora/extras-buildsys/server/PackageJob.py,v
retrieving revision 1.12
retrieving revision 1.13
diff -u -r1.12 -r1.13
--- PackageJob.py 21 Jul 2005 14:36:54 -0000 1.12
+++ PackageJob.py 22 Jul 2005 21:35:27 -0000 1.13
@@ -27,6 +27,7 @@
import shutil
import tempfile
import smtplib
+import copy
from email.MIMEText import MIMEText
import string
import SimpleXMLRPCServer
@@ -98,9 +99,10 @@
return True
return False
-
-# tempfile.mkdtemp seems to have threading issues
-_mkdtemp_lock = threading.Lock()
+def make_job_log_url(target, uid, name, ver, release):
+ if target and uid and name and ver and release:
+ return "%s/%s/%s-%s-%s-%s/" % (config_opts['log_url'], target, uid, name, ver, release)
+ return None
class PackageJob:
@@ -112,14 +114,16 @@
self.curstage = ''
self.bm = buildmaster
self.uid = uid
+ self.package = package
+ self.name = None
+ self.epoch = None
+ self.ver = None
+ self.release = None
pjc = PackageJobController(self, 'initialize', 'waiting')
self.hostname = hostname
self.username = username
self.starttime = time.time()
- self.endtime = None
- self.package = package
- self.name = None
self.target = repo.target()
self.repo = repo
self.no_cvs = config_opts['use_srpm_not_cvs']
@@ -139,14 +143,23 @@
def get_cur_stage(self):
return self.curstage
- def _set_cur_stage(self, stage):
+ def _set_cur_stage(self, stage, result_msg=None):
""" Update our internal job stage, and notify the BuildMaster that
we've changed as well.
"""
oldstage = self.curstage
self.curstage = stage
if oldstage != stage:
- self.bm.queue_job_status_update(self.uid, stage)
+ attrdict = {}
+ attrdict['status'] = copy.copy(stage)
+ if self.name and self.epoch and self.ver and self.release:
+ attrdict['epoch'] = self.epoch
+ attrdict['version'] = self.ver
+ attrdict['release'] = self.release
+ if result_msg:
+ attrdict['result_msg'] = result_msg
+
+ self.bm.queue_job_status_update(self.uid, attrdict)
def get_uid(self):
return self.uid
@@ -252,10 +265,13 @@
def _checkout(self):
self._set_cur_stage('checkout')
- dir_prefix = self.cvs_tag + "-"
- _mkdtemp_lock.acquire()
- self.checkout_tmpdir = tempfile.mkdtemp(prefix=dir_prefix, dir=config_opts['tmpdir'])
- _mkdtemp_lock.release()
+
+ # Create the temporary checkout directory
+ dirname = "%s-%s-%d" % (self.uid, self.cvs_tag, time.time())
+ self.checkout_tmpdir = os.path.join(config_opts['tmpdir'], dirname)
+ if os.path.exists(self.checkout_tmpdir):
+ shutil.rmtree(self.checkout_tmpdir, ignore_errors=True)
+ os.makedirs(self.checkout_tmpdir)
# Checkout the module
cmd = 'cd %s; %s co -r %s %s' % (self.checkout_tmpdir, config_opts['cvs_cmd'], self.cvs_tag, self.package)
@@ -323,6 +339,9 @@
ts = rpmUtils.transaction.initReadOnlyTransaction()
hdr = rpmUtils.miscutils.hdrFromPackage(ts, self.srpm_path)
self.name = hdr['name']
+ self.epoch = hdr['epoch']
+ if not self.epoch:
+ self.epoch = '0'
self.ver = hdr['version']
self.release = hdr['release']
(self.archjobs, pkg_arches, allowed_arches) = self.arch_handling(hdr)
@@ -411,7 +430,11 @@
def die(self, username):
# Kill any building jobs
- self._set_cur_stage('killed')
+ resultstring = "%s (%s): Build on target %s was killed by %s." % (self.uid, self.name, self.target, username)
+
+ self._set_cur_stage('killed', resultstring)
+ self.email_result(resultstring)
+
self._archjobs_lock.acquire()
for job in self.archjobs.values():
if job:
@@ -419,8 +442,6 @@
self.archjobs = {}
self._archjobs_lock.release()
- resultstring = "%s (%s): Build on target %s was killed by %s." % (self.uid, self.name, self.target, username)
- self.email_result(resultstring)
self.bm.notify_job_done(self)
def wake(self):
@@ -451,10 +472,11 @@
shutil.rmtree(self.checkout_tmpdir, ignore_errors=True)
subj = 'Prep Error (Job %s): %s on %s' % (self.uid, self.cvs_tag, self.target)
self.email_result(resultstring=e.args, subject=subj)
- self._failed()
+ self._failed(e.args)
except BuildError, e:
subj = 'Build Error (Job %s): %s on %s' % (self.uid, self.cvs_tag, self.target)
- msg = "%s\n\n Build logs may be found at %s\n\n" % (e.msg, self._make_log_url())
+ log_url = make_job_log_url(self.target, self.uid, self.name, self.ver, self.release)
+ msg = "%s\n\n Build logs may be found at %s\n\n" % (e.msg, log_url)
logtail = self._get_log_tail(e.arch)
msg = "%s\n-------------------------------------------------\n\n%s\n" % (msg, logtail)
self.email_result(resultstring=msg, subject=subj)
@@ -464,7 +486,7 @@
if job:
job.die()
self._archjobs_lock.release()
- self._failed()
+ self._failed(e.msg)
else:
# Wait to be woken up when long-running operations complete
if wait:
@@ -499,8 +521,8 @@
def get_stage_dir(self):
return self.stage_dir
- def _failed(self):
- self._set_cur_stage('failed')
+ def _failed(self, msg=None):
+ self._set_cur_stage('failed', msg)
self.bm.notify_job_done(self)
def _add_to_repo(self):
@@ -535,19 +557,17 @@
self.wake()
def _succeeded(self):
- self._set_cur_stage('needsign')
- resultstring = """ %s (%s): Build on target %s succeeded.
+ resultstring = " %s (%s): Build on target %s succeeded." % (self.uid, self.name, self.target)
+ self._set_cur_stage('needsign', resultstring)
- Build logs may be found at %s
-""" % (self.uid, self.name, self.target, self._make_log_url())
+ log_url = make_job_log_url(self.target, self.uid, self.name, self.ver, self.release)
+ resultstring = resultstring + "\n Build logs may be found at %s\n" % (log_url)
self.email_result(resultstring)
- self.bm.notify_job_done(self)
- def _make_log_url(self, arch=None):
- return "%s/%s/%s-%s-%s-%s/" % (config_opts['log_url'], self.target, self.uid, self.name, self.ver, self.release)
+ self.bm.notify_job_done(self)
def _get_log_tail(self, arch):
- """ Returns the last 20 lines of the most relevant log file """
+ """ Returns the last 30 lines of the most relevant log file """
pkg_dir = "%s-%s-%s-%s" % (self.uid, self.name, self.ver, self.release)
log_dir = os.path.join(config_opts['server_work_dir'], self.target, pkg_dir, arch)
@@ -577,14 +597,14 @@
except:
pass
lines = []
- # Grab the last 20 lines from the file
+ # Grab the last 30 lines from the file
while True:
try:
line = f.next()
except StopIteration:
break
lines.append(line)
- if len(lines) > 20: # only want last 20 lines
+ if len(lines) > 30: # only want last 30 lines
del lines[0]
f.close()
Index: User.py
===================================================================
RCS file: /cvs/fedora/extras-buildsys/server/User.py,v
retrieving revision 1.4
retrieving revision 1.5
diff -u -r1.4 -r1.5
--- User.py 10 Jul 2005 03:44:37 -0000 1.4
+++ User.py 22 Jul 2005 21:35:27 -0000 1.5
@@ -34,45 +34,51 @@
self.server_admin = False
+def get_userdb_dbcx():
+ try:
+ dbcx = sqlite.connect(CONFIG_LOCATION + "userdb", encoding="utf-8", timeout=4)
+ curs = dbcx.cursor()
+ return (dbcx, curs)
+ except sqlite.DatabaseError, e:
+ print "sqlite DatabaseError: %s" % str(e)
+ return (None, None)
+
class Authenticator:
"""
Talks to a database of users & capabilities
"""
def __init__(self):
- try:
- self.dbcx = sqlite.connect(CONFIG_LOCATION + "userdb", encoding="utf-8", timeout=2)
- except sqlite.DatabaseError, e:
- s = "%s" % e
- if s == 'unable to open database file':
- print "Unable to open the user database. Exiting..."
- os._exit(1)
- self.curs = self.dbcx.cursor()
+ (dbcx, curs) = get_userdb_dbcx()
+ if not dbcx or not curs:
+ print "Unable to open the user database. Exiting..."
+ os._exit(1)
# Ensure the table exists in the database
create = False
try:
- self.curs.execute('SELECT * FROM users')
- self.dbcx.commit()
+ curs.execute('SELECT * FROM users')
+ dbcx.commit()
except sqlite._sqlite.DatabaseError, e:
create = True
if create:
- self.curs.execute('CREATE TABLE users (email VARCHAR(50), ' \
+ curs.execute('CREATE TABLE users (email VARCHAR(50), ' \
'own_jobs BOOLEAN, kill_any_job BOOLEAN, ' \
'modify_users BOOLEAN, server_admin BOOLEAN)')
- self.dbcx.commit()
+ dbcx.commit()
def new_authed_user(self, email, client_address):
if not email:
return None
+ (dbcx, curs) = get_userdb_dbcx()
user = None
- self.curs.execute('SELECT email, own_jobs, kill_any_job, modify_users, ' \
+ curs.execute('SELECT email, own_jobs, kill_any_job, modify_users, ' \
'server_admin FROM users WHERE email="%s"' % email)
- self.dbcx.commit()
- item = self.curs.fetchone()
+ dbcx.commit()
+ item = curs.fetchone()
if item:
user = User(email, False)
user.own_jobs = item['own_jobs']
Index: UserInterface.py
===================================================================
RCS file: /cvs/fedora/extras-buildsys/server/UserInterface.py,v
retrieving revision 1.25
retrieving revision 1.26
diff -u -r1.25 -r1.26
--- UserInterface.py 22 Jul 2005 02:54:07 -0000 1.25
+++ UserInterface.py 22 Jul 2005 21:35:27 -0000 1.26
@@ -69,6 +69,8 @@
uid = int(uid_in)
except ValueError:
return None
+ except TypeError:
+ return None
if uid < 0:
return None
return uid
@@ -284,6 +286,71 @@
return (0, "Success.", jobs)
+ def detail_job(self, uid):
+ """ Query job information and return it to the user """
+
+ uid = validate_uid(uid)
+ if not uid:
+ return (-1, "Error: Invalid job UID.", {})
+
+ sql = 'SELECT uid, username, package, cvs_tag, target, starttime, endtime, status, '\
+ 'epoch, version, release, archlist, result_msg FROM jobs WHERE uid=%d' % uid
+
+ # Run the query for the job
+ try:
+ dbcx, curs = get_dbcx()
+ except sqlite.DatabaseError, e:
+ return (-1, "Unable to access job database.", {})
+ curs.execute(sql)
+ job = curs.fetchone()
+ if not job:
+ return (-1, "Error: Invalid job UID.", {})
+ jobrec = {}
+ jobrec['uid'] = job[0]
+ jobrec['username'] = job[1]
+ jobrec['package'] = job[2]
+ jobrec['source'] = job[3]
+ jobrec['target'] = job[4]
+ jobrec['starttime'] = job[5]
+ jobrec['endtime'] = job[6]
+ jobrec['status'] = job[7]
+ if job[8]:
+ jobrec['epoch'] = job[8]
+ if job[9]:
+ jobrec['version'] = job[9]
+ if job[10]:
+ jobrec['release'] = job[10]
+ if job[12]:
+ jobrec['result_msg'] = job[12]
+ log_url = PackageJob.make_job_log_url(jobrec['target'], str(uid), jobrec['package'], jobrec['version'], jobrec['release'])
+ if log_url and len(log_url):
+ jobrec['log_url'] = log_url
+ jobrec['archjobs'] = []
+
+ # Get all archjobs for this job
+ sql = "SELECT jobid, parent_uid, starttime, endtime, arch, builder_addr, status, " \
+ "builder_status FROM archjobs WHERE parent_uid=%d " % uid
+ curs.execute(sql)
+ data = curs.fetchall()
+ for row in data:
+ ajrec = {}
+ ajrec['jobid'] = row[0]
+ ajrec['parent_uid'] = row[1]
+ ajrec['starttime'] = row[2]
+ ajrec['endtime'] = row[3]
+ ajrec['arch'] = row[4]
+ ajrec['builder_addr'] = row[5]
+ ajrec['status'] = row[6]
+ ajrec['builder_status'] = row[7]
+ jobrec['archjobs'].append(ajrec)
+
+ del curs
+ del dbcx
+
+ ret_job = copy.deepcopy(jobrec)
+ return (0, "Success.", ret_job)
+
+
def update_builders(self):
execfile("/etc/plague/server/CONFIG.py")
builder_list = self._builder_manager.update_builders()
@@ -337,6 +404,11 @@
return UserInterface.list_jobs(self, args_dict)
+ def detail_job(self, email, jobid):
+ user = AuthedXMLRPCServer.get_authinfo()
+ return UserInterface.detail_job(self, jobid)
+
+
def update_builders(self):
user = AuthedXMLRPCServer.get_authinfo()
if not user or not user.server_admin:
Index: main.py
===================================================================
RCS file: /cvs/fedora/extras-buildsys/server/main.py,v
retrieving revision 1.10
retrieving revision 1.11
diff -u -r1.10 -r1.11
--- main.py 20 Jul 2005 14:51:53 -0000 1.10
+++ main.py 22 Jul 2005 21:35:27 -0000 1.11
@@ -86,7 +86,8 @@
open(opts.pidfile, 'w').write('%d\n' % os.getpid())
if opts.logfile:
- log=open(opts.logfile, 'a')
+ # 1 == line buffer the log file
+ log=open(opts.logfile, 'a', 1)
sys.stdout=log
sys.stderr=log
More information about the scm-commits
mailing list