[copr] master: Only display initial packages textarea on copr creation (19adab0)
by bkabrda@fedoraproject.org
Repository : http://git.fedorahosted.org/cgit/copr.git
On branch : master
>---------------------------------------------------------------
commit 19adab0d567c68ca9c1880fb6ad1852e8fa3a786
Author: Bohuslav Kabrda <bkabrda(a)redhat.com>
Date: Fri Dec 21 17:26:21 2012 +0100
Only display initial packages textarea on copr creation
>---------------------------------------------------------------
.../coprs/templates/coprs/_coprs_forms.html | 4 +++-
1 files changed, 3 insertions(+), 1 deletions(-)
diff --git a/coprs_frontend/coprs/templates/coprs/_coprs_forms.html b/coprs_frontend/coprs/templates/coprs/_coprs_forms.html
index 5f3baa4..e29060f 100644
--- a/coprs_frontend/coprs/templates/coprs/_coprs_forms.html
+++ b/coprs_frontend/coprs/templates/coprs/_coprs_forms.html
@@ -10,7 +10,9 @@
{{ render_field(form.release) }}
{{ render_field(form.arches, size = 2) }}
{{ render_field(form.repos, rows = 10, cols = 50) }}
- {{ render_field(form.initial_pkgs, rows = 10, cols = 50) }}
+ {% if copr is none %}{# we're creating the copr, so display initial builds area #}
+ {{ render_field(form.initial_pkgs, rows = 10, cols = 50) }}
+ {% endif %}
<dd><input type=submit value=Submit></dd>
</dl>
</form>
10 years, 9 months
[copr] master: Allow specifing a set of usernames that are alowed to log in (4ed4ab6)
by bkabrda@fedorahosted.org
Repository : http://git.fedorahosted.org/cgit/copr.git
On branch : master
>---------------------------------------------------------------
commit 4ed4ab64e923aacfc3a59562724ca9e5c192bd92
Author: Bohuslav Kabrda <bkabrda(a)redhat.com>
Date: Fri Dec 21 10:57:15 2012 +0100
Allow specifing a set of usernames that are alowed to log in
>---------------------------------------------------------------
coprs_frontend/coprs/config.py | 4 ++++
coprs_frontend/coprs/views/misc.py | 3 ++-
2 files changed, 6 insertions(+), 1 deletions(-)
diff --git a/coprs_frontend/coprs/config.py b/coprs_frontend/coprs/config.py
index e8a403e..bbc6c0a 100644
--- a/coprs_frontend/coprs/config.py
+++ b/coprs_frontend/coprs/config.py
@@ -7,6 +7,10 @@ class Config(object):
SECRET_KEY = 'THISISNOTASECRETATALL'
BACKEND_PASSWORD = 'thisisbackend'
+ # restrict access to a set of users
+ USE_ALLOWED_USERS = False
+ ALLOWED_USERS = []
+
# SQLAlchemy
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.abspath(DATABASE)
diff --git a/coprs_frontend/coprs/views/misc.py b/coprs_frontend/coprs/views/misc.py
index 3836a90..ca67c75 100644
--- a/coprs_frontend/coprs/views/misc.py
+++ b/coprs_frontend/coprs/views/misc.py
@@ -5,6 +5,7 @@ import flask
from flask.ext.openid import OpenID
from coprs import app
+from coprs import config
from coprs import db
from coprs import models
from coprs import oid
@@ -18,7 +19,7 @@ def login():
return flask.redirect(oid.get_next_url())
if flask.request.method == 'POST':
fasusername = flask.request.form.get('fasuname')
- if fasusername:
+ if fasusername and ((app.config['USE_ALLOWED_USERS'] and fasusername in app.config['ALLOWED_USERS']) or not app.config['USE_ALLOWED_USERS']):
return oid.try_login('http://{0}.id.fedoraproject.org/'.format(fasusername), ask_for = ["email"])
return flask.render_template('login.html',
next=oid.get_next_url(),
10 years, 9 months
[copr] master: Backend doesn't need to know build count (81584a8)
by bkabrda@fedoraproject.org
Repository : http://git.fedorahosted.org/cgit/copr.git
On branch : master
>---------------------------------------------------------------
commit 81584a8b8fc8c8ac0674f9ca02855beb829ffd33
Author: Bohuslav Kabrda <bkabrda(a)redhat.com>
Date: Fri Dec 21 10:48:17 2012 +0100
Backend doesn't need to know build count
>---------------------------------------------------------------
.../coprs/views/backend_ns/backend_general.py | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)
diff --git a/coprs_frontend/coprs/views/backend_ns/backend_general.py b/coprs_frontend/coprs/views/backend_ns/backend_general.py
index 4c08e88..1c15595 100644
--- a/coprs_frontend/coprs/views/backend_ns/backend_general.py
+++ b/coprs_frontend/coprs/views/backend_ns/backend_general.py
@@ -14,7 +14,7 @@ def waiting_builds():
builds = query[0:10]
return flask.jsonify({'builds': [build.to_dict(options = {'copr': {'owner': {'__columns_except__': ['openid_name', 'proven', 'admin', 'mail'] },
- '__columns_except__': ['chroots', 'repos'],
+ '__columns_except__': ['chroots', 'repos', 'build_count'],
'__included_ids__': False},
'__included_ids__': False}) for build in builds]})
10 years, 9 months
[copr] master: Make results, packages and repos clickable (6401b86)
by bkabrda@fedoraproject.org
Repository : http://git.fedorahosted.org/cgit/copr.git
On branch : master
>---------------------------------------------------------------
commit 6401b86fa4c732a8a6e9a899c4d1eb0c975f7609
Author: Bohuslav Kabrda <bkabrda(a)redhat.com>
Date: Fri Dec 21 10:46:09 2012 +0100
Make results, packages and repos clickable
>---------------------------------------------------------------
.../coprs/templates/coprs/_builds_table.html | 4 ++--
coprs_frontend/coprs/templates/coprs/detail.html | 2 +-
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/coprs_frontend/coprs/templates/coprs/_builds_table.html b/coprs_frontend/coprs/templates/coprs/_builds_table.html
index 8b7082c..f6840d9 100644
--- a/coprs_frontend/coprs/templates/coprs/_builds_table.html
+++ b/coprs_frontend/coprs/templates/coprs/_builds_table.html
@@ -19,7 +19,7 @@
<td>{{ build.started_on|date_from_secs or 'Not yet' }}</td>
<td>{{ build.ended_on|date_from_secs or 'Not yet' }}</td>
<td>{{ build.state }}</td>
- <td>{{ build.results }}</td>
+ <td>{% if build.results %}<a href={{ build.results }}>{% endif %}{{ build.results }}{% if build.results %}</a>{% endif %}</td>
<td>
{% if g.user and g.user == build.user %}
{{ copr_build_cancel_form(build) }}
@@ -34,7 +34,7 @@
<ul class=packages-list>
{% if build.pkgs is not none %}
{% for pkg in build.pkgs.split() %}
- <li>{{ pkg }}</li>
+ <li><a href={{ pkg }}>{{ pkg }}</a></li>
{% endfor %}
{% endif %}
</ul>
diff --git a/coprs_frontend/coprs/templates/coprs/detail.html b/coprs_frontend/coprs/templates/coprs/detail.html
index fb94430..9f54040 100644
--- a/coprs_frontend/coprs/templates/coprs/detail.html
+++ b/coprs_frontend/coprs/templates/coprs/detail.html
@@ -17,7 +17,7 @@
<h3>repos</h3>
<ul class=repos-list>
{% for repo in copr.repos_list %}
- <li>{{ repo }}</li>
+ <li><a href="{{ repo }}">{{ repo }}</a></li>
{% endfor %}
</ul>
{% endif %}
10 years, 9 months
[copr] master: Allow submitting initial packages for copr (9066404)
by bkabrda@fedorahosted.org
Repository : http://git.fedorahosted.org/cgit/copr.git
On branch : master
>---------------------------------------------------------------
commit 906640441e831b246a7a0c9c5d8e43a02ea4caee
Author: Bohuslav Kabrda <bkabrda(a)redhat.com>
Date: Fri Dec 21 09:44:43 2012 +0100
Allow submitting initial packages for copr
>---------------------------------------------------------------
coprs_frontend/coprs/forms.py | 3 ++
.../coprs/templates/coprs/_coprs_forms.html | 1 +
.../coprs/views/coprs_ns/coprs_general.py | 15 ++++++++++++-
.../tests/test_views/test_coprs_ns/test_general.py | 23 ++++++++++++++++++-
4 files changed, 39 insertions(+), 3 deletions(-)
diff --git a/coprs_frontend/coprs/forms.py b/coprs_frontend/coprs/forms.py
index 4fe86ac..69e6f69 100644
--- a/coprs_frontend/coprs/forms.py
+++ b/coprs_frontend/coprs/forms.py
@@ -86,6 +86,9 @@ class CoprForm(wtf.Form):
repos = wtf.TextAreaField('Repos',
validators = [UrlListValidator()],
filters = [StringListFilter()])
+ initial_pkgs = wtf.TextAreaField('Initial packages to build',
+ validators = [UrlListValidator()],
+ filters = [StringListFilter()])
@property
def chroots(self):
diff --git a/coprs_frontend/coprs/templates/coprs/_coprs_forms.html b/coprs_frontend/coprs/templates/coprs/_coprs_forms.html
index 98e9b40..5f3baa4 100644
--- a/coprs_frontend/coprs/templates/coprs/_coprs_forms.html
+++ b/coprs_frontend/coprs/templates/coprs/_coprs_forms.html
@@ -10,6 +10,7 @@
{{ render_field(form.release) }}
{{ render_field(form.arches, size = 2) }}
{{ render_field(form.repos, rows = 10, cols = 50) }}
+ {{ render_field(form.initial_pkgs, rows = 10, cols = 50) }}
<dd><input type=submit value=Submit></dd>
</dl>
</form>
diff --git a/coprs_frontend/coprs/views/coprs_ns/coprs_general.py b/coprs_frontend/coprs/views/coprs_ns/coprs_general.py
index 08dc164..f616ccc 100644
--- a/coprs_frontend/coprs/views/coprs_ns/coprs_general.py
+++ b/coprs_frontend/coprs/views/coprs_ns/coprs_general.py
@@ -12,6 +12,7 @@ from coprs.views.misc import login_required
from coprs.views.coprs_ns import coprs_ns
+from coprs.logic import builds_logic
from coprs.logic import coprs_logic
@coprs_ns.route('/', defaults = {'page': 1})
@@ -65,8 +66,20 @@ def copr_new():
created_on = int(time.time()))
coprs_logic.CoprsLogic.new(flask.g.user, copr, check_for_duplicates = False) # form validation checks for duplicates
db.session.commit()
+ flask.flash('New copr was successfully created.')
+
+ if form.initial_pkgs.data:
+ build = models.Build(pkgs = form.initial_pkgs.data.replace('\n', ' '),
+ copr = copr,
+ chroots = copr.chroots,
+ repos = copr.repos,
+ user = flask.g.user,
+ submitted_on = int(time.time()))
+ # no need to check for authorization here
+ builds_logic.BuildsLogic.new(flask.g.user, build, copr, check_authorized = False)
+ db.session.commit()
+ flask.flash('Initial packages were successfully submitted for building.')
- flask.flash('New entry was successfully posted')
return flask.redirect(flask.url_for('coprs_ns.coprs_show'))
else:
return flask.render_template('coprs/add.html', form = form)
diff --git a/coprs_frontend/tests/test_views/test_coprs_ns/test_general.py b/coprs_frontend/tests/test_views/test_coprs_ns/test_general.py
index 6a318bd..cf42e2c 100644
--- a/coprs_frontend/tests/test_views/test_coprs_ns/test_general.py
+++ b/coprs_frontend/tests/test_views/test_coprs_ns/test_general.py
@@ -58,6 +58,8 @@ class TestCoprsAllowed(CoprsTestCase):
assert r.data.count('<div class=copr>') == 1
class TestCoprNew(CoprsTestCase):
+ success_string = 'New copr was successfully created'
+
def test_copr_new_normal(self, f_users):
with self.tc as c:
with c.session_transaction() as s:
@@ -65,7 +67,10 @@ class TestCoprNew(CoprsTestCase):
r = c.post('/coprs/new/', data = {'name': 'foo', 'release': 'fedora-rawhide', 'arches': ['i386']}, follow_redirects = True)
assert self.models.Copr.query.filter(self.models.Copr.name == 'foo').first()
- assert "New entry was successfully posted" in r.data
+ assert self.success_string in r.data
+
+ # make sure no initial build was submitted
+ assert self.models.Build.query.first() == None
def test_copr_new_exists_for_another_user(self, f_users, f_coprs):
with self.tc as c:
@@ -79,7 +84,7 @@ class TestCoprNew(CoprsTestCase):
r = c.post('/coprs/new/', data = {'name': self.c1.name, 'release': 'fedora-rawhide', 'arches': ['i386']}, follow_redirects = True)
self.db.session.add(self.c1)
assert len(self.models.Copr.query.filter(self.models.Copr.name == self.c1.name).all()) == foocoprs + 1
- assert "New entry was successfully posted" in r.data
+ assert self.success_string in r.data
def test_copr_new_exists_for_this_user(self, f_users, f_coprs):
with self.tc as c:
@@ -95,6 +100,20 @@ class TestCoprNew(CoprsTestCase):
assert len(self.models.Copr.query.filter(self.models.Copr.name == self.c1.name).all()) == foocoprs
assert "You already have copr named" in r.data
+ def test_copr_new_with_initial_pkgs(self, f_users):
+ with self.tc as c:
+ with c.session_transaction() as s:
+ s['openid'] = self.u1.openid_name
+
+ r = c.post('/coprs/new/', data = {'name': 'foo', 'release': 'fedora-rawhide', 'arches': ['i386'], 'initial_pkgs': ['http://f', 'http://b']}, follow_redirects = True)
+ copr = self.models.Copr.query.filter(self.models.Copr.name == 'foo').first()
+ assert copr
+ assert self.success_string in r.data
+
+ assert self.models.Build.query.first().copr == copr
+ assert copr.build_count == 1
+ assert 'Initial packages were successfully submitted' in r.data
+
class TestCoprDetail(CoprsTestCase):
def test_copr_detail_not_found(self):
r = self.tc.get('/coprs/detail/foo/bar/')
10 years, 9 months
[copr] master's head updated: Merge branch 'bkabrda-workspace' (95f1599)
by bkabrda@fedorahosted.org
Repository : http://git.fedorahosted.org/cgit/copr.git
Branch 'master' now includes:
48cc349 adding all of Slavek's changes from the coprs openshift repo
4fce30f Make sure that datadir exists if using sqlite
575d718 Bulk of changes to have unique coprs only for each user, not across the whole system (will need some more fixing, probably doesn't work well, yet).
17cc75b Drop copr.name uniqueness. Don't create more alembic revisions until we actually deploy it somewhere, this makes it easier
02c5c76 A little fix to the autogenerated migration
3e1d319 Make sure that user can't create two coprs with the same name
0a6a967 Fix applying for building to use username/coprname scheme
78eb27f Filter user by username, not coprname :)
f742e83 Move TODOs to one file, so that they're not present in source code changes
6d34286 Add build counts to copr model to make things faster and easier to query
9a4f904 Pull the app logic out from views into separate module
91c40fa Enable just creating the data dir in manage.py for easier use with alembic
902b5ec Remove unneeded import
5c0f58a Transfer the builds logic into separate file
ffa1793 Keep commits in views
a8b23b8 Restructured the app to be more consistent
1fcb1a3 Extract logic from waiting builds
e6960cb Enable updating builds by backend
86b8246 Add status for builds, enable updating and displaying data submitted by backend
fde6b4a Use http basic auth for backend
08ae746 Logic for copr permissions
ae1fd8f Reverse the logic for build state a bit for cases when the build start submission may get lost
783c5a8 Only drop data, not tables during tests, to speed them up
ddb1ded Improve the fixtures
96511f1 More tests for copr listing
04e9f59 More test cases
ff3b1f5 Few changes to be able to test forms (no csrf, test modules can't import anything from coprs, as that instantiates new app outside of test env
a23368b Test adding new copr
b2b2415 Some tests for the copr detail
b174ef1 Make the tests more adaptable to possible changes in fixtures
aaafb07 More pythonic way of checking for substrin presence :)
d240462 Few more tests and some fixes/alterations
90e7aae Some more tests, properly refresh instances after test client methods
c7ce8df Few more tests and a connected fix in a url route
036c99a Fix the form target in copr_permissions_form
4637aa1 Don't use with_permissions for getting a copr (using .first() on it results in just one permission)
a94e877 More tests
3122b10 Some initial builds tests and removal of unnecessary fixtures
bdfbe81 Fix the query to really be filtered by copr
405cb83 More builds tests and minor fixes
36c1716 Add some more data that we may use in future, regenerate the alembic initial DB setup (noone is using this, so no migrations are needed yet)
e19fa97 Ask fedorapeople for mail, too
e6c9a1e Yet some more information in models
8a2bb64 approved -> copr_builder
f6a2268 Fix tests after previous changes
1776cca Transfer permissions to enums
10ef19c Huge load of changes that make the new permissions work
2e0ada3 Refresh test fixtures, fix one logics function
0af4a30 Typo...
d470a3a Couple of fixes and adapted tests
bdea269 Make the enum fail fast instead of returning None
7b39efd More fixes
c79f63a Yet some more fixes
30aee74 Some more fixes
5508aa7 Last couple of fixes to allow copr_admins to edit copr.
5bf70ae Add epel to CHROOTS and sort the chroots for form select field
2578e93 Don't forget to store the copr creation date
7632011 Get rid of the OpenShift stuff, rename the containing dir to coprs_frontend, include sample apache config file
531bc59 Fix the apache conf example
f2a4319 Pull applying for permissions into logic
d1f6144 Remove this TODO...
8d3ced3 Allow admins to modify copr permissions
966c2fe Change the messages to also contain copr admins
4820c7c Add test that makes sure that admin can update copr attributes
f4ce8a0 More pulling into logic, also removing permissions now moves them to 'No Action' instead of 'Requested' state
7825ad2 Build is waiting if it didn't start AND didn't end
acbfd3d Don't render these attributes for backend
0247a86 Some initial backend api tests
cea72f9 Show packages belonging to builds
0b25123 Say explicitly that there are no permissions
f31b8f8 Update TODO, something was done, more requirements are comming
b2e5368 The update_builds url is now returning information about builds that weren't found while updating (if any)
2d295d0 Rewrite update_builds to be more legible, some more tests for it
422bcaf Make sure build.pkgs is not None before splitting
26e2324 A little simplification...
e9724c2 Fix showing cancel build button
7869d40 Fix displaying the permissions table
95f1599 Merge branch 'bkabrda-workspace'
10 years, 9 months
[copr] master: Merge branch 'bkabrda-workspace' (95f1599)
by bkabrda@fedorahosted.org
Repository : http://git.fedorahosted.org/cgit/copr.git
On branch : master
>---------------------------------------------------------------
commit 95f15998f39801a336b966e20e16d199844f67e6
Merge: d05eb48 7869d40
Author: Bohuslav Kabrda <bkabrda(a)redhat.com>
Date: Wed Dec 19 16:13:31 2012 +0100
Merge branch 'bkabrda-workspace'
>---------------------------------------------------------------
Diff suppressed because of size. To see it, use:
git diff --patch-with-stat --no-color --find-copies-harder --ignore-space-at-eol ^95f15998f39801a336b966e20e16d199844f67e6~1 95f15998f39801a336b966e20e16d199844f67e6
10 years, 9 months
[copr] master's head updated: Merge branch 'skvidal-backend' (d05eb48)
by skvidal@fedorahosted.org
Repository : http://git.fedorahosted.org/cgit/copr.git
Branch 'master' now includes:
ead83c2 since I was making branches to help out Slavek, figured I'd add one for me, too
75b9e88 add errors file for exceptions separately continue to flesh out functionality
9938a2f make it create the dirs we need
ee51389 fix up a few tyops
753cefd callbacks fix
f8ae662 when we abort- show why
7aa741d thinkos/cleanups and make sure we reread the config file before starting up builders
947827f only spin up any workers if we have jobs
bf225bb locals:
3ca4f10 log to the right place add a fixme for later
1412938 logs should go to a better place now
f43545a add a few log outputs so I know where it is
81c2267 add some more logging - trying to figure out why the mockremote call is failing
6619282 running from el6 and using transport=ssh ends in tears :)
78b5e37 more logging
2e6ae6d best if you don't cause tracebacks in your logging calls, dumbass
55d6d74 catch the common errors and make sure we're logging as much as we can from mockremote(and the rest is silent)
a3f4780 started_on not startedon
2e29020 should run pyflakes more often
ffcbad7 - move the json job file out to the results dir - more logging output
23ceef0 terminate instances when the build finishes
3d0d368 basename not dirname
452080a move to shutil.move not os.rename b/c of mount boundaries
7080aeb need to record the ip of each worker so we can kill it later
3ab6609 pass the ip of our builder over to terminate()
d29ca18 need a bit more logging detail to see what's breaking and why
a58fc08 make sure out results dir is there
2884bdb typo . instead of _
a70a73c output whatever mockremote sees a little more obviously
0f2031c - output what mockremote is seeing to the chroot log for the pkg - move around where we need to add check against pkgs
7f1ac35 typo
6159878 if any worker dies and we don't know why - raise an exception to kill us off b/c until we know all of the reasons why I'd rather just stop.
31ab388 more logging in error conditions having to do with spawning instances
16acf7c dear world in the future - read your own code
f568f12 more debug output yay!
a1349ac when we're dealing with no repos specified - make sure we don't treat '' as a repo
c881da2 localhost as string to get back debug on heisenbug
8fc0c3e touch up the log outputs to make it easier to know what to do
2840524 - add option parsing for config file, exit_on_worker, daemonize and pidfile - try out daemonize support
c7f2dc7 odd that pyflakes didn't catch a comma missing
4f0bd84 - check daemonize before daemonizing - output time in a more human readable format
15066b2 - make opts behave in face of cli-opts and on config reread - make exit be a bit quieter on ctrl-c (I hope) - prepare for a case where our builders are not dynamically created - obey pyflakes and pychecker suggestions
58c0441 still need something like a useful output when it does break
858a533 spaces in rsync command for name of copr :-/
2a047db - make pid file default to cwd unless specified - better log output for the building - fixme notes
7ae63a9 trying to figure out why the daemonized process dies
9f65c8f when we daemonize() we chdir() so if we don't abspath the config file we fall over and die :-/
6d065fa make sure we're using whatever current umask is and we don't force into lesser levels
2eb8d67 - saving the jobs out per-build - so don't expect a 'builds' dictionary - fetch the jobs from the front end
02f9e18 older python-requests doesn't have attribute :(
9da44f0 when we startup make sure our destdir exists when a worker dies unexpectedly - make sure we kill it and get rid of it
00ccbef move the config reread out of the queuesize change - so we know it will happen in the loop even when no jobs are happening
c4f21e6 log output a little simpler to read
9515168 - touch up how mockremote logs downloads to make it less confusing - note for tomorrow -
707d001 post status to frontend using python-requests
3094750 output what pkgs we're building to the logs
85be22f example config file
cc1f0ba add TODO list from my paper lists
24fbf8d log how long the instance spawn takes
25b9a4f remove/add todo's
04b2f89 add another TODO note
b336c6d more todo's highlighting a bug
7ae3974 rename file to README and explain where to look
d05eb48 Merge branch 'skvidal-backend'
10 years, 9 months
[copr] master: Merge branch 'skvidal-backend' (d05eb48)
by skvidal@fedorahosted.org
Repository : http://git.fedorahosted.org/cgit/copr.git
On branch : master
>---------------------------------------------------------------
commit d05eb48433da4dabbe67d58a7fdeb91c2177c1be
Merge: 5fdd930 7ae3974
Author: Seth Vidal <skvidal(a)fedoraproject.org>
Date: Mon Dec 10 17:02:09 2012 -0500
Merge branch 'skvidal-backend'
* skvidal-backend: (63 commits)
rename file to README and explain where to look
...
>---------------------------------------------------------------
TODO-backend | 19 ++
backend/__init__.py | 7 +
backend/dispatcher.py | 300 +++++++++++++++++++++++
backend/errors.py | 12 +
backend/mockremote.py | 645 +++++++++++++++++++++++++++++++++++++++++++++++++
copr-be.conf.example | 16 ++
copr-be.py | 263 ++++++++++++++++++++
7 files changed, 1262 insertions(+), 0 deletions(-)
diff --git a/TODO-backend b/TODO-backend
new file mode 100644
index 0000000..4cc5ca1
--- /dev/null
+++ b/TODO-backend
@@ -0,0 +1,19 @@
+
+- change instance type by build request for more mem/procs/extend timeouts
+ - use extra-vars?
+ - need ansible 0.9?
+- auto-timer/cleanup script for old instances that may have been orphaned
+- prune out builders when we drop the number of them active
+- LOADS of fixme and catching weird conditions
+- make logging from mockremote more sane and coinsistent
+- mock configs should be pushed to instances at creation time
+ - single url to repos, not mirrorlists
+- consider making each worker return job to a completed queue so the primary
+ process can do other kinds of notification
+- email notifications from backend?
+- refactor mockremote/dispatcher.worker together?
+- work on a way to find and cancel a specific build that's happening other than just killing the instance
+- determine if it is properly checking the timeout from a dead instance
+- maybe dump out the PID of the worker that is running so we know which one to kill?
+- failure/success not being returned correctly. Should check for 'fail' in the directories and return based on
+ that. also anything lacking success is a failure.
diff --git a/backend/__init__.py b/backend/__init__.py
new file mode 100644
index 0000000..e25106e
--- /dev/null
+++ b/backend/__init__.py
@@ -0,0 +1,7 @@
+# part of copr backend
+# skvidal(a)fedoraproject.org - seth vidal
+# (c) copyright Red Hat, Inc 2012
+# gplv2+
+
+__version__ = "0.1"
+__author__ = "Seth Vidal"
diff --git a/backend/dispatcher.py b/backend/dispatcher.py
new file mode 100644
index 0000000..9b0399d
--- /dev/null
+++ b/backend/dispatcher.py
@@ -0,0 +1,300 @@
+#!/usr/bin/python -tt
+
+
+import os
+import sys
+import multiprocessing
+import time
+import Queue
+import json
+import mockremote
+from bunch import Bunch
+import errors
+import ansible
+import ansible.playbook
+import ansible.errors
+from ansible import callbacks
+import requests
+
+
+
+
+class SilentPlaybookCallbacks(callbacks.PlaybookCallbacks):
+ ''' playbook callbacks - quietly! '''
+
+ def __init__(self, verbose=False):
+
+ self.verbose = verbose
+
+ def on_start(self):
+ callbacks.call_callback_module('playbook_on_start')
+
+ def on_notify(self, host, handler):
+ callbacks.call_callback_module('playbook_on_notify', host, handler)
+
+ def on_no_hosts_matched(self):
+ callbacks.call_callback_module('playbook_on_no_hosts_matched')
+
+ def on_no_hosts_remaining(self):
+ callbacks.call_callback_module('playbook_on_no_hosts_remaining')
+
+ def on_task_start(self, name, is_conditional):
+ callbacks.call_callback_module('playbook_on_task_start', name, is_conditional)
+
+ def on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None):
+ result = None
+ print "***** VARS_PROMPT WILL NOT BE RUN IN THIS KIND OF PLAYBOOK *****"
+ callbacks.call_callback_module('playbook_on_vars_prompt', varname, private=private, prompt=prompt, encrypt=encrypt, confirm=confirm, salt_size=salt_size, salt=None)
+ return result
+
+ def on_setup(self):
+ callbacks.call_callback_module('playbook_on_setup')
+
+ def on_import_for_host(self, host, imported_file):
+ callbacks.call_callback_module('playbook_on_import_for_host', host, imported_file)
+
+ def on_not_import_for_host(self, host, missing_file):
+ callbacks.call_callback_module('playbook_on_not_import_for_host', host, missing_file)
+
+ def on_play_start(self, pattern):
+ callbacks.call_callback_module('playbook_on_play_start', pattern)
+
+ def on_stats(self, stats):
+ callbacks.call_callback_module('playbook_on_stats', stats)
+
+
+class WorkerCallback(object):
+ def __init__(self, logfile=None):
+ self.logfile = logfile
+
+ def log(self, msg):
+ if not self.logfile:
+ return
+
+ now = time.strftime('%F %T')
+ try:
+ open(self.logfile, 'a').write(str(now) + ': ' + msg + '\n')
+ except (IOError, OSError), e:
+ print >>sys.stderr, 'Could not write to logfile %s - %s' % (self.logfile, str(e))
+
+
+class Worker(multiprocessing.Process):
+ def __init__(self, opts, jobs, worker_num, ip=None, create=True, callback=None):
+
+ # base class initialization
+ multiprocessing.Process.__init__(self, name="worker-builder")
+
+
+ # job management stuff
+ self.jobs = jobs
+ self.worker_num = worker_num
+ self.ip = ip
+ self.opts = opts
+ self.kill_received = False
+ self.callback = callback
+ self.create = create
+ if not self.callback:
+ self.logfile = self.opts.worker_logdir + '/worker-%s.log' % self.worker_num
+ self.callback = WorkerCallback(logfile = self.logfile)
+
+ if ip:
+ self.callback.log('creating worker: %s' % ip)
+ else:
+ self.callback.log('creating worker: dynamic ip')
+
+ def spawn_instance(self):
+ """call the spawn playbook to startup/provision a building instance"""
+ self.callback.log('spawning instance begin')
+ start = time.time()
+
+ stats = callbacks.AggregateStats()
+ playbook_cb = SilentPlaybookCallbacks(verbose=False)
+ runner_cb = callbacks.DefaultRunnerCallbacks()
+ # fixme - extra_vars to include ip as a var if we need to specify ips
+ # also to include info for instance type to handle the memory requirements of builds
+ play = ansible.playbook.PlayBook(stats=stats, playbook=self.opts.spawn_playbook,
+ callbacks=playbook_cb, runner_callbacks=runner_cb,
+ remote_user='root')
+
+ play.run()
+ self.callback.log('spawning instance end')
+ self.callback.log('Instance spawn/provision took %s sec' % (time.time() - start))
+
+ if self.ip:
+ return self.ip
+
+ for i in play.SETUP_CACHE:
+ if i =='localhost':
+ continue
+ return i
+
+ # if we get here we're in trouble
+ self.callback.log('No IP back from spawn_instance - dumping cache output')
+ self.callback.log(str(play.SETUP_CACHE))
+ self.callback.log(str(play.stats.summarize('localhost')))
+ self.callback.log('Test spawn_instance playbook manually')
+
+ return None
+
+ def terminate_instance(self,ip):
+ """call the terminate playbook to destroy the building instance"""
+ self.callback.log('terminate instance begin')
+
+ stats = callbacks.AggregateStats()
+ playbook_cb = SilentPlaybookCallbacks(verbose=False)
+ runner_cb = callbacks.DefaultRunnerCallbacks()
+ play = ansible.playbook.PlayBook(host_list=[ip], stats=stats, playbook=self.opts.terminate_playbook,
+ callbacks=playbook_cb, runner_callbacks=runner_cb,
+ remote_user='root')
+
+ play.run()
+ self.callback.log('terminate instance end')
+
+ def parse_job(self, jobfile):
+ # read the json of the job in
+ # break out what we need return a bunch of the info we need
+ build = json.load(open(jobfile))
+ jobdata = Bunch()
+ jobdata.pkgs = build['pkgs'].split(' ')
+ jobdata.repos = [r for r in build['repos'].split(' ') if r.strip() ]
+ jobdata.chroots = build['chroots'].split(' ')
+ jobdata.memory_reqs = build['memory_reqs']
+ jobdata.timeout = build['timeout']
+ jobdata.destdir = self.opts.destdir + '/' + build['copr']['owner']['name'] + '/' + build['copr']['name'] + '/'
+ jobdata.build_id = build['id']
+ jobdata.results = self.opts.results_baseurl + '/' + build['copr']['owner']['name'] + '/' + build['copr']['name'] + '/'
+ jobdata.copr_id = build['copr']['id']
+ jobdata.user_id = build['user_id']
+ return jobdata
+
+ # maybe we move this to the callback?
+ def post_to_frontend(self, data):
+ """send data to frontend"""
+
+ headers = {'content-type': 'application/json'}
+ url='%s/update_builds/' % self.opts.frontend_url
+ auth=('user', self.opts.frontend_auth)
+
+ msg = None
+ try:
+ r = requests.post(url, data=json.dumps(data), auth=auth,
+ headers=headers)
+ if r.status_code != 200:
+ msg = 'Failed to submit to frontend: %s: %s' % (r.status_code, r.text)
+ except requests.RequestException, e:
+ msg = 'Post request failed: %s' % e
+
+ if msg:
+ self.callback.log(msg)
+ return False
+
+ return True
+
+ # maybe we move this to the callback?
+ def mark_started(self, job):
+
+ build = {'id':job.build_id,
+ 'started_on': job.started_on,
+ 'results': job.results,
+ }
+ data = {'builds':[build]}
+
+ if not self.post_to_frontend(data):
+ raise errors.CoprWorkerError, "Could not communicate to front end to submit status info"
+
+ # maybe we move this to the callback?
+ def return_results(self, job):
+ self.callback.log('%s status %s. Took %s seconds' % (job.build_id, job.status, job.ended_on - job.started_on))
+
+ build = {'id':job.build_id,
+ 'ended_on': job.ended_on,
+ 'status': job.status,
+ }
+ data = {'builds':[build]}
+
+ if not self.post_to_frontend(data):
+ raise errors.CoprWorkerError, "Could not communicate to front end to submit results"
+
+ os.unlink(job.jobfile)
+
+ def run(self):
+ # worker should startup and check if it can function
+ # for each job it takes from the jobs queue
+ # run opts.setup_playbook to create the instance
+ # do the build (mockremote)
+ # terminate the instance
+
+ while not self.kill_received:
+ try:
+ jobfile = self.jobs.get()
+ except Queue.Empty:
+ break
+
+ # parse the job json into our info
+ job = self.parse_job(jobfile)
+
+ # FIXME
+ # this is our best place to sanity check the job before starting
+ # up any longer process
+
+ job.jobfile = jobfile
+
+ # spin up our build instance
+ if self.create:
+ try:
+ ip = self.spawn_instance()
+ if not ip:
+ raise errors.CoprWorkerError, "No IP found from creating instance"
+
+ except ansible.errors.AnsibleError, e:
+ self.callback.log('failure to setup instance: %s' % e)
+ raise
+
+ status = 1
+ job.started_on = time.time()
+ self.mark_started(job)
+
+ for chroot in job.chroots:
+
+ chroot_destdir = job.destdir + '/' + chroot
+ # setup our target dir locally
+ if not os.path.exists(chroot_destdir):
+ try:
+ os.makedirs(chroot_destdir)
+ except (OSError, IOError), e:
+ msg = "Could not make results dir for job: %s - %s" % (chroot_destdir, str(e))
+ self.callback.log(msg)
+ status = 0
+ continue
+
+ # FIXME
+ # need a plugin hook or some mechanism to check random
+ # info about the pkgs
+ # this should use ansible to download the pkg on the remote system
+ # and run a series of checks on the package before we
+ # start the build - most importantly license checks.
+
+
+ self.callback.log('Starting build: id=%r builder=%r timeout=%r destdir=%r chroot=%r repos=%r' % (job.build_id,ip, job.timeout, job.destdir, chroot, str(job.repos)))
+ self.callback.log('building pkgs: %s' % ' '.join(job.pkgs))
+ try:
+ chrootlogfile = chroot_destdir + '/mockremote.log'
+ mr = mockremote.MockRemote(builder=ip, timeout=job.timeout,
+ destdir=job.destdir, chroot=chroot, cont=True, recurse=True,
+ repos=job.repos,
+ callback=mockremote.CliLogCallBack(quiet=True,logfn=chrootlogfile))
+ mr.build_pkgs(job.pkgs)
+ except mockremote.MockRemoteError, e:
+ # record and break
+ self.callback.log('%s - %s' % (ip, e))
+ status = 0 # failure
+ self.callback.log('Finished build: builder=%r timeout=%r destdir=%r chroot=%r repos=%r' % (ip, job.timeout, job.destdir, chroot, str(job.repos)))
+
+ job.ended_on = time.time()
+ job.status = status
+ self.return_results(job)
+ self.callback.log('worker finished build: %s' % ip)
+ # clean up the instance
+ if self.create:
+ self.terminate_instance(ip)
+
diff --git a/backend/errors.py b/backend/errors.py
new file mode 100644
index 0000000..ae8ac34
--- /dev/null
+++ b/backend/errors.py
@@ -0,0 +1,12 @@
+# copr error/exceptions
+class CoprBackendError(Exception):
+
+ def __init__(self, msg):
+ self.msg = msg
+
+ def __str__(self):
+ return self.msg
+
+class CoprWorkerError(CoprBackendError):
+ pass
+
diff --git a/backend/mockremote.py b/backend/mockremote.py
new file mode 100755
index 0000000..2c641c8
--- /dev/null
+++ b/backend/mockremote.py
@@ -0,0 +1,645 @@
+#!/usr/bin/python -tt
+# by skvidal
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Library General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+# copyright 2012 Red Hat, Inc.
+
+
+# take list of pkgs
+# take single hostname
+# send 1 pkg at a time to host
+# build in remote w/mockchain
+# rsync results back
+# repeat
+# take args from mockchain (more or less)
+
+
+import os
+import sys
+import subprocess
+
+import ansible.runner
+import optparse
+from operator import methodcaller
+import time
+import socket
+import traceback
+
+# where we should execute mockchain from on the remote
+mockchain='/usr/bin/mockchain'
+# rsync path
+rsync='/usr/bin/rsync'
+
+DEF_REMOTE_BASEDIR='/var/tmp'
+DEF_TIMEOUT=3600
+DEF_REPOS = []
+DEF_CHROOT= None
+DEF_USER = 'mockbuilder'
+DEF_DESTDIR = os.getcwd()
+
+class SortedOptParser(optparse.OptionParser):
+ '''Optparser which sorts the options by opt before outputting --help'''
+ def format_help(self, formatter=None):
+ self.option_list.sort(key=methodcaller('get_opt_string'))
+ return optparse.OptionParser.format_help(self, formatter=None)
+
+
+def createrepo(path):
+ if os.path.exists(path + '/repodata/repomd.xml'):
+ comm = ['/usr/bin/createrepo', '--update', path]
+ else:
+ comm = ['/usr/bin/createrepo', path]
+ cmd = subprocess.Popen(comm,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ out, err = cmd.communicate()
+ return cmd.returncode, out, err
+
+def read_list_from_file(fn):
+ lst = []
+ f = open(fn, 'r')
+ for line in f.readlines():
+ line = line.replace('\n','')
+ line = line.strip()
+ if line.startswith('#'):
+ continue
+ lst.append(line)
+
+ return lst
+
+def log(lf, msg):
+ if lf:
+ now = time.time()
+ try:
+ open(lf, 'a').write(str(now) + ':' + msg + '\n')
+ except (IOError, OSError), e:
+ print 'Could not write to logfile %s - %s' % (lf, str(e))
+ print msg
+
+def get_ans_results(results, hostname):
+ if hostname in results['dark']:
+ return results['dark'][hostname]
+ if hostname in results['contacted']:
+ return results['contacted'][hostname]
+
+ return {}
+
+def _create_ans_conn(hostname, username, timeout):
+ ans_conn = ansible.runner.Runner(remote_user=username,
+ host_list=[hostname], pattern=hostname, forks=1,
+ timeout=timeout)
+ return ans_conn
+
+def check_for_ans_error(results, hostname, err_codes=[], success_codes=[0],
+ return_on_error=['stdout', 'stderr']):
+ # returns True or False + dict
+ # dict includes 'msg'
+ # may include 'rc', 'stderr', 'stdout' and any other
+ # requested result codes
+ err_results = {}
+
+ if 'dark' in results and hostname in results['dark']:
+ err_results['msg'] = "Error: Could not contact/connect to %s." % hostname
+ return (True, err_results)
+
+ error = False
+
+ if err_codes or success_codes:
+ if hostname in results['contacted']:
+ if 'rc' in results['contacted'][hostname]:
+ rc = int(results['contacted'][hostname]['rc'])
+ err_results['rc'] = rc
+ # check for err codes first
+ if rc in err_codes:
+ error = True
+ err_results['msg'] = 'rc %s matched err_codes' % rc
+ elif rc not in success_codes:
+ error = True
+ err_results['msg'] = 'rc %s not in success_codes' % rc
+ elif 'failed' in results['contacted'][hostname] and results['contacted'][hostname]['failed']:
+ error = True
+ err_results['msg'] = 'results included failed as true'
+
+ if error:
+ for item in return_on_error:
+ if item in results['contacted'][hostname]:
+ err_results[item] = results['contacted'][hostname][item]
+
+ return error, err_results
+
+
+class MockRemoteError(Exception):
+
+ def __init__(self, msg):
+ self.msg = msg
+
+ def __str__(self):
+ return self.msg
+
+class BuilderError(MockRemoteError):
+ pass
+
+class DefaultCallBack(object):
+ def __init__(self, **kwargs):
+ self.quiet = kwargs.get('quiet', False)
+ self.logfn = kwargs.get('logfn', None)
+
+ def start_build(self, pkg):
+ pass
+
+ def end_build(self, pkg):
+ pass
+
+ def start_download(self, pkg):
+ pass
+
+ def end_download(self, pkg):
+ pass
+
+ def error(self, msg):
+ self.log("Error: %s" % msg)
+
+ def log(self, msg):
+ if not self.quiet:
+ print msg
+
+class CliLogCallBack(DefaultCallBack):
+ def __init__(self, **kwargs):
+ DefaultCallBack.__init__(self, **kwargs)
+
+ def start_build(self, pkg):
+ msg = "Start build: %s" % pkg
+ self.log(msg)
+
+
+ def end_build(self, pkg):
+ msg = "End Build: %s" % pkg
+ self.log(msg)
+
+ def start_download(self, pkg):
+ msg = "Start retrieve results for: %s" % pkg
+ self.log(msg)
+
+ def end_download(self, pkg):
+ msg = "End retrieve results for: %s" % pkg
+ self.log(msg)
+
+ def error(self, msg):
+ self.log("Error: %s" % msg)
+
+ def log(self, msg):
+ if self.logfn:
+ now = time.time()
+ try:
+ open(self.logfn, 'a').write(str(now) + ':' + msg + '\n')
+ except (IOError, OSError), e:
+ print >>sys.stderr, 'Could not write to logfile %s - %s' % (self.lf, str(e))
+ if not self.quiet:
+ print msg
+
+class Builder(object):
+ def __init__(self, hostname, username, timeout, mockremote):
+ self.hostname = hostname
+ self.username = username
+ self.timeout = timeout
+ self.chroot = mockremote.chroot
+ self.repos = mockremote.repos
+ self.mockremote = mockremote
+ self.checked = False
+ self._tempdir = None
+ # check out the host - make sure it can build/be contacted/etc
+ self.check()
+ # if we're at this point we've connected and done stuff on the host
+ self.conn = _create_ans_conn(self.hostname, self.username, self.timeout)
+
+ @property
+ def remote_build_dir(self):
+ return self.tempdir + '/build/'
+
+ @property
+ def tempdir(self):
+ if self.mockremote.remote_tempdir:
+ return self.mockremote.remote_tempdir
+
+ if self._tempdir:
+ return self._tempdir
+
+ cmd='/bin/mktemp -d %s/%s-XXXXX' % (self.mockremote.remote_basedir, 'mockremote')
+ self.conn.module_name="shell"
+ self.conn.module_args = str(cmd)
+ results = self.conn.run()
+ tempdir = None
+ for hn, resdict in results['contacted'].items():
+ tempdir = resdict['stdout']
+
+ # if still nothing then we've broken
+ if not tempdir:
+ raise BuilderError('Could not make tmpdir on %s' % self.hostname)
+
+ cmd = "/bin/chmod 755 %s" % tempdir
+ self.conn.module_args = str(cmd)
+ self.conn.run()
+ self._tempdir = tempdir
+
+ return self._tempdir
+
+ @tempdir.setter
+ def tempdir(self, value):
+ self._tempdir = value
+
+ def _get_remote_pkg_dir(self, pkg):
+ # the pkg will build into a dir by mockchain named:
+ # $tempdir/build/results/$chroot/$packagename
+ s_pkg = os.path.basename(pkg)
+ pdn = s_pkg.replace('.src.rpm', '')
+ remote_pkg_dir = self.remote_build_dir + '/results/' + self.chroot + '/' + pdn
+ return remote_pkg_dir
+
+ def build(self, pkg):
+
+ # build the pkg passed in
+ # add pkg to various lists
+ # check for success/failure of build
+ # return success/failure,stdout,stderr of build command
+ # returns success_bool, out, err
+
+ success = False
+
+ # check if pkg is local or http
+ dest = None
+ if os.path.exists(pkg):
+ dest = self.tempdir + '/' + os.path.basename(pkg)
+ self.conn.module_name="copy"
+ margs = 'src=%s dest=%s' % (pkg, dest)
+ self.conn.module_args = str(margs)
+ self.mockremote.callback.log("Sending %s to %s to build" % (os.path.basename(pkg), self.hostname))
+
+ # FIXME should probably check this but <shrug>
+ self.conn.run()
+ else:
+ dest = pkg
+
+ # construct the mockchain command
+ buildcmd = '%s -r %s -l %s ' % (mockchain, self.chroot, self.remote_build_dir)
+ for r in self.repos:
+ buildcmd += '-a %s ' % r
+
+ buildcmd += dest
+
+ #print ' Running %s on %s' % (buildcmd, hostname)
+ # run the mockchain command async
+ # this runs it sync - FIXME
+ self.conn.module_name="shell"
+ self.conn.module_args = str(buildcmd)
+ results = self.conn.run()
+
+ is_err, err_results = check_for_ans_error(results, self.hostname, success_codes=[0],
+ return_on_error=['stdout', 'stderr'])
+ if is_err:
+ return success, err_results.get('stdout', ''), err_results.get('stderr', '')
+
+ # we know the command ended successfully but not if the pkg built successfully
+ myresults = get_ans_results(results, self.hostname)
+ out = myresults.get('stdout', '')
+ err = myresults.get('stderr', '')
+
+ successfile = self._get_remote_pkg_dir(pkg) + '/success'
+ testcmd = '/usr/bin/test -f %s' % successfile
+ self.conn.module_args = str(testcmd)
+ results = self.conn.run()
+ is_err, err_results = check_for_ans_error(results, self.hostname, success_codes=[0])
+ if not is_err:
+ success = True
+
+ return success, out, err
+
+ def download(self, pkg, destdir):
+ # download the pkg to destdir using rsync + ssh
+ # return success/failure, stdout, stderr
+
+ success = False
+ rpd = self._get_remote_pkg_dir(pkg)
+ destdir = "'" + destdir.replace("'", "'\\''") + "'" # make spaces work w/our rsync command below :(
+ # build rsync command line from the above
+ remote_src = '%s@%s:%s' % (self.username, self.hostname, rpd)
+ ssh_opts = "'ssh -o PasswordAuthentication=no -o StrictHostKeyChecking=no'"
+ command = "%s -avH -e %s %s %s/" % (rsync, ssh_opts, remote_src, destdir)
+ cmd = subprocess.Popen(command, shell=True,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+
+ # rsync results into opts.destdir
+ out, err = cmd.communicate()
+ if cmd.returncode:
+ success = False
+ else:
+ success = True
+
+ return success, out, err
+
+ def check(self):
+ # do check of host
+ # set checked if successful
+ # return success/failure, errorlist
+
+ if self.checked:
+ return True, []
+
+ errors = []
+
+ try:
+ socket.gethostbyname(self.hostname)
+ except socket.gaierror:
+ raise BuilderError('%s could not be resolved' % self.hostname)
+
+ # connect as user
+ ans = ansible.runner.Runner(host_list=[self.hostname], pattern='*',
+ remote_user=self.username, forks=1, timeout=20)
+ ans.module_name = "shell"
+ ans.module_args = str("/bin/rpm -q mock rsync")
+ res = ans.run()
+ # check for mock/rsync from results
+ is_err, err_results = check_for_ans_error(res, self.hostname, success_codes=[0])
+ if is_err:
+ if 'rc' in err_results:
+ errors.append('Warning: %s does not have mock or rsync installed' % self.hostname)
+ else:
+ errors.append(err_results['msg'])
+
+
+ # test for path existence for mockchain and chroot config for this chroot
+ ans.module_name = "shell"
+ ans.module_args = str("/usr/bin/test -f %s && /usr/bin/test -f /etc/mock/%s.cfg" % (mockchain, self.chroot))
+ res = ans.run()
+
+ is_err, err_results = check_for_ans_error(res, self.hostname, success_codes=[0])
+ if is_err:
+ if 'rc' in err_results:
+ errors.append('Warning: %s lacks mockchain or the chroot %s' % (self.hostname, self.chroot))
+ else:
+ errors.append(err_results['msg'])
+
+ if not errors:
+ self.checked = True
+ else:
+ msg = '\n'.join(errors)
+ raise BuilderError(msg)
+
+
+class MockRemote(object):
+ def __init__(self, builder=None, user=DEF_USER, timeout=DEF_TIMEOUT,
+ destdir=DEF_DESTDIR, chroot=DEF_CHROOT, cont=False, recurse=False,
+ repos=DEF_REPOS, callback=None,
+ remote_basedir=DEF_REMOTE_BASEDIR, remote_tempdir=None):
+
+ self.destdir = destdir
+ self.chroot = chroot
+ self.repos = repos
+ self.cont = cont
+ self.recurse = recurse
+ self.callback = callback
+ self.remote_basedir = remote_basedir
+ self.remote_tempdir = remote_tempdir
+
+ if not self.callback:
+ self.callback = DefaultCallBack()
+
+ self.callback.log("Setting up builder: %s" % builder)
+ self.builder = Builder(builder, user, timeout, self)
+
+ if not self.chroot:
+ raise MockRemoteError("No chroot specified!")
+
+
+ self.failed = []
+ self.finished = []
+ self.pkg_list = []
+
+
+ def _get_pkg_destpath(self, pkg):
+ s_pkg = os.path.basename(pkg)
+ pdn = s_pkg.replace('.src.rpm', '')
+ resdir = '%s/%s/%s' % (self.destdir, self.chroot, pdn)
+ resdir = os.path.normpath(resdir)
+ return resdir
+
+ def build_pkgs(self, pkgs=None):
+
+ if not pkgs:
+ pkgs = self.pkg_list
+
+ built_pkgs = []
+ downloaded_pkgs = {}
+
+ try_again = True
+ to_be_built = pkgs
+ while try_again:
+ self.failed = []
+ just_built = []
+ for pkg in to_be_built:
+ if pkg in just_built:
+ self.callback.log("skipping duplicate pkg in this list: %s" % pkg)
+ continue
+ else:
+ just_built.append(pkg)
+
+ p_path = self._get_pkg_destpath(pkg)
+
+ # check the destdir to see if these pkgs need to be built
+ if os.path.exists(p_path):
+ if os.path.exists(p_path + '/success'):
+ self.callback.log("Skipping already built pkg %s" % os.path.basename(pkg))
+ continue
+ # if we're asking to build it and it is marked as fail - nuke
+ # the failure and try rebuilding it
+ elif os.path.exists(p_path + '/fail'):
+ os.unlink(p_path + '/fail')
+
+ # off to the builder object
+ # building
+ self.callback.start_build(pkg)
+ b_status, b_out, b_err = self.builder.build(pkg)
+ self.callback.end_build(pkg)
+
+ # downloading
+ self.callback.start_download(pkg)
+ # mockchain makes things with the chroot appended - so suck down
+ # that pkg subdir from w/i that location
+ d_ret, d_out, d_err = self.builder.download(pkg, self.destdir + '/' + self.chroot)
+ if not d_ret:
+ msg = "Failure to download %s: %s" % (pkg, d_out + d_err)
+ if not self.cont:
+ raise MockRemoteError, msg
+ self.callback.error(msg)
+
+ self.callback.end_download(pkg)
+ # write out whatever came from the builder call into the destdir/chroot
+ if not os.path.exists(self.destdir + '/' + self.chroot):
+ os.makedirs(self.destdir + '/' + self.chroot)
+ r_log = open(self.destdir + '/' + self.chroot + '/mockchain.log', 'a')
+ r_log.write('%s\n' % pkg)
+ r_log.write(b_out)
+ if b_err:
+ r_log.write('\nstderr\n')
+ r_log.write(b_err)
+ r_log.close()
+
+
+ # checking where to stick stuff
+ if not b_status:
+ if self.recurse:
+ self.failed.append(pkg)
+ self.callback.error("Error building %s, will try again" % os.path.basename(pkg))
+ else:
+ msg = "Error building %s\nSee logs/resultsin %s" % (os.path.basename(pkg), self.destdir)
+ if not self.cont:
+ raise MockRemoteError, msg
+ self.callback.error(msg)
+
+ else:
+ self.callback.log("Success building %s" % os.path.basename(pkg))
+ built_pkgs.append(pkg)
+ # createrepo with the new pkgs
+ rc, out, err = createrepo(self.destdir)
+ if err.strip():
+ self.callback.error("Error making local repo: %s" % self.destdir)
+ self.callback.error("%s" % err)
+ #FIXME - maybe clean up .repodata and .olddata here?
+
+ if self.failed:
+ if len(self.failed) != len(to_be_built):
+ to_be_built = self.failed
+ try_again = True
+ self.callback.log('Trying to rebuild %s failed pkgs' % len(self.failed))
+ else:
+ self.callback.log("Tried twice - following pkgs could not be successfully built:")
+ for pkg in self.failed:
+ msg = pkg
+ if pkg in downloaded_pkgs:
+ msg = downloaded_pkgs[pkg]
+ self.callback.log(msg)
+
+ try_again = False
+ else:
+ try_again = False
+
+
+
+def parse_args(args):
+
+ parser = SortedOptParser("mockremote -b hostname -u user -r chroot pkg pkg pkg")
+ parser.add_option('-r', '--root', default=DEF_CHROOT, dest='chroot',
+ help="chroot config name/base to use in the mock build")
+ parser.add_option('-c', '--continue', default=False, action='store_true',
+ dest='cont',
+ help="if a pkg fails to build, continue to the next one")
+ parser.add_option('-a','--addrepo', default=DEF_REPOS, action='append',
+ dest='repos',
+ help="add these repo baseurls to the chroot's yum config")
+ parser.add_option('--recurse', default=False, action='store_true',
+ help="if more than one pkg and it fails to build, try to build the rest and come back to it")
+ parser.add_option('--log', default=None, dest='logfile',
+ help="log to the file named by this option, defaults to not logging")
+ parser.add_option("-b", "--builder", dest='builder', default=None,
+ help="builder to use")
+ parser.add_option("-u", dest="user", default=DEF_USER,
+ help="user to run as/connect as on builder systems")
+ parser.add_option("-t", "--timeout", dest="timeout", type="int",
+ default=DEF_TIMEOUT, help="maximum time in seconds a build can take to run")
+ parser.add_option("--destdir", dest="destdir", default=DEF_DESTDIR,
+ help="place to download all the results/packages")
+ parser.add_option("--packages", dest="packages_file", default=None,
+ help="file to read list of packages from")
+ parser.add_option("-q","--quiet", dest="quiet", default=False, action="store_true",
+ help="output very little to the terminal")
+
+ opts,args = parser.parse_args(args)
+
+ if not opts.builder:
+ print "Must specify a system to build on"
+ sys.exit(1)
+
+ if opts.packages_file and os.path.exists(opts.packages_file):
+ args.extend(read_list_from_file(opts.packages_file))
+
+ #args = list(set(args)) # poor man's 'unique' - this also changes the order
+ # :(
+
+ if not args:
+ print "Must specify at least one pkg to build"
+ sys.exit(1)
+
+ if not opts.chroot:
+ print "Must specify a mock chroot"
+ sys.exit(1)
+
+ for url in opts.repos:
+ if not (url.startswith('http') or url.startswith('file://')):
+ print "Only http[s] or file urls allowed for repos"
+ sys.exit(1)
+
+ return opts, args
+
+
+#FIXME
+# play with createrepo run at the end of each build
+# need to output the things that actually worked :)
+
+
+def main(args):
+
+ # parse args
+ opts,pkgs = parse_args(args)
+
+ if not os.path.exists(opts.destdir):
+ os.makedirs(opts.destdir)
+
+ try:
+ # setup our callback
+ callback = CliLogCallBack(logfn=opts.logfile, quiet=opts.quiet)
+ # our mockremote instance
+ mr = MockRemote(builder=opts.builder, user=opts.user,
+ timeout=opts.timeout, destdir=opts.destdir, chroot=opts.chroot,
+ cont=opts.cont, recurse=opts.recurse, repos=opts.repos,
+ callback=callback)
+
+ # FIXMES
+ # things to think about doing:
+ # output the remote tempdir when you start up
+ # output the number of pkgs
+ # output where you're writing things to
+ # consider option to sync over destdir to the remote system to use
+ # as a local repo for the build
+ #
+
+ if not opts.quiet:
+ print "Building %s pkgs" % len(pkgs)
+
+ mr.build_pkgs(pkgs)
+
+ if not opts.quiet:
+ print "Output written to: %s" % mr.destdir
+
+ except MockRemoteError, e:
+ print >>sys.stderr, "Error on build:"
+ print >>sys.stderr, str(e)
+ return
+
+
+if __name__ == '__main__':
+ try:
+ main(sys.argv[1:])
+ except Exception, e:
+
+ print "ERROR: %s - %s" % (str(type(e)), str(e))
+ traceback.print_exc()
+ sys.exit(1)
diff --git a/copr-be.conf.example b/copr-be.conf.example
new file mode 100644
index 0000000..ffeb235
--- /dev/null
+++ b/copr-be.conf.example
@@ -0,0 +1,16 @@
+[backend]
+results_baseurl=http://copr-be.cloud.fedoraproject.org/results
+frontend_url=http://copr-fe.cloud.fedoraproject.org/backend
+frontend_auth=backend_password_from_fe_config
+spawn_playbook=/srv/copr-work/provision/builderpb.yml
+terminate_playbook=/srv/copr-work/provision/terminatepb.yml
+jobsdir=/srv/copr-work/jobs
+destdir=/srv/copr-repo/results
+sleeptime=30
+num_workers=5
+logfile=/srv/copr-work/logs/copr.log
+worker_logdir=/srv/copr-work/logs/workers/
+
+
+[builder]
+timeout=3600
diff --git a/copr-be.py b/copr-be.py
new file mode 100644
index 0000000..6631ac4
--- /dev/null
+++ b/copr-be.py
@@ -0,0 +1,263 @@
+#!/usr/bin/python -tt
+
+
+import sys
+import os
+import glob
+import time
+import multiprocessing
+from backend.dispatcher import Worker
+from backend import errors
+from bunch import Bunch
+import ConfigParser
+import optparse
+import json
+import requests
+
+def _get_conf(cp, section, option, default):
+ """to make returning items from config parser less irritating"""
+ if cp.has_section(section) and cp.has_option(section,option):
+ return cp.get(section, option)
+ return default
+
+
+class CoprBackend(object):
+ def __init__(self, config_file=None, ext_opts=None):
+ # read in config file
+ # put all the config items into a single self.opts bunch
+
+ if not config_file:
+ raise errors.CoprBackendError, "Must specify config_file"
+
+ self.config_file = config_file
+ self.ext_opts = ext_opts # to stow our cli options for read_conf()
+ self.opts = self.read_conf()
+
+ logdir = os.path.dirname(self.opts.logfile)
+ if not os.path.exists(logdir):
+ os.makedirs(logdir, mode=0750)
+
+ if not os.path.exists(self.opts.destdir):
+ os.makedirs(self.opts.destdir, mode=0755)
+
+ # setup a log file to write to
+ self.logfile = self.opts.logfile
+ self.log("Starting up new copr-be instance")
+
+
+ if not os.path.exists(self.opts.worker_logdir):
+ os.makedirs(self.opts.worker_logdir, mode=0750)
+
+ self.jobs = multiprocessing.Queue()
+ self.workers = []
+ self.added_jobs = []
+
+
+ def read_conf(self):
+ "read in config file - return Bunch of config data"
+ opts = Bunch()
+ cp = ConfigParser.ConfigParser()
+ try:
+ cp.read(self.config_file)
+ opts.results_baseurl = _get_conf(cp,'backend', 'results_baseurl', 'http://copr')
+ opts.frontend_url = _get_conf(cp, 'backend', 'frontend_url', 'http://coprs/rest/api')
+ opts.frontend_auth = _get_conf(cp,'backend', 'frontend_auth', 'PASSWORDHERE')
+ opts.spawn_playbook = _get_conf(cp,'backend','spawn_playbook', '/etc/copr/builder_playbook.yml')
+ opts.terminate_playbook = _get_conf(cp,'backend','terminate_playbook', '/etc/copr/terminate_playbook.yml')
+ opts.jobsdir = _get_conf(cp, 'backend', 'jobsdir', None)
+ opts.destdir = _get_conf(cp, 'backend', 'destdir', None)
+ opts.daemonize = _get_conf(cp, 'backend', 'daemonize', True)
+ opts.exit_on_worker = _get_conf(cp, 'backend', 'exit_on_worker', False)
+ opts.sleeptime = int(_get_conf(cp, 'backend', 'sleeptime', 10))
+ opts.num_workers = int(_get_conf(cp, 'backend', 'num_workers', 8))
+ opts.timeout = int(_get_conf(cp, 'builder', 'timeout', 1800))
+ opts.logfile = _get_conf(cp, 'backend', 'logfile', '/var/log/copr/backend.log')
+ opts.worker_logdir = _get_conf(cp, 'backend', 'worker_logdir', '/var/log/copr/worker/')
+ # thoughts for later
+ # ssh key for connecting to builders?
+ # cloud key stuff?
+ #
+ except ConfigParser.Error, e:
+ raise errors.CoprBackendError, 'Error parsing config file: %s: %s' % (self.config_file, e)
+
+
+ if not opts.jobsdir or not opts.destdir:
+ raise errors.CoprBackendError, "Incomplete Config - must specify jobsdir and destdir in configuration"
+
+ if self.ext_opts:
+ for v in self.ext_opts:
+ setattr(opts, v, self.ext_opts.get(v))
+ return opts
+
+
+ def log(self, msg):
+ now = time.strftime('%F %T')
+ output = str(now) + ': ' + msg
+ if not self.opts.daemonize:
+ print output
+
+ try:
+ open(self.logfile, 'a').write(output + '\n')
+ except (IOError, OSError), e:
+ print >>sys.stderr, 'Could not write to logfile %s - %s' % (self.logfile, str(e))
+
+
+ def fetch_jobs(self):
+ self.log('fetching jobs')
+ try:
+ r = requests.get('%s/waiting_builds/' % self.opts.frontend_url) # auth stuff here? maybe/maybenot
+ except requests.RequestException, e:
+ self.log('Error retrieving jobs from %s: %s' % (self.opts.frontend_url, e))
+ else:
+ r_json = json.loads(r.content) # using old requests on el6 :(
+ if 'builds' in r_json:
+ self.log('%s jobs returned' % len(r_json['builds']))
+ count = 0
+ for b in r_json['builds']:
+ if 'id' in b:
+ jobfile = self.opts.jobsdir + '/%s.json' % b['id']
+ if not os.path.exists(jobfile) and b['id'] not in self.added_jobs:
+ count += 1
+ open(jobfile, 'w').write(json.dumps(b))
+ self.log('Wrote job: %s' % b['id'])
+ self.log('New jobs: %s' % count)
+
+ def run(self):
+
+ abort = False
+ while not abort:
+ self.fetch_jobs()
+ for f in sorted(glob.glob(self.opts.jobsdir + '/*.json')):
+ n = os.path.basename(f).replace('.json', '')
+ if n not in self.added_jobs:
+ self.jobs.put(f)
+ self.added_jobs.append(n)
+ self.log('adding to work queue id %s' % n)
+
+ # re-read config into opts
+ self.opts = self.read_conf()
+
+ if self.jobs.qsize():
+ self.log("# jobs in queue: %s" % self.jobs.qsize())
+ # this handles starting/growing the number of workers
+ if len(self.workers) < self.opts.num_workers:
+ self.log("Spinning up more workers for jobs")
+ for i in range(self.opts.num_workers - len(self.workers)):
+ worker_num = len(self.workers) + 1
+ w = Worker(self.opts, self.jobs, worker_num)
+ self.workers.append(w)
+ w.start()
+ self.log("Finished starting worker processes")
+ # FIXME - prune out workers
+ #if len(self.workers) > self.opts.num_workers:
+ # killnum = len(self.workers) - self.opts.num_workers
+ # for w in self.workers[:killnum]:
+ # #insert a poison pill? Kill after something? I dunno.
+ # FIXME - if a worker bombs out - we need to check them
+ # and startup a new one if it happens
+ # check for dead workers and abort
+ for w in self.workers:
+ if not w.is_alive():
+ self.log('Worker %d died unexpectedly' % w.worker_num)
+ if self.opts.exit_on_worker:
+ raise errors.CoprBackendError, "Worker died unexpectedly, exiting"
+ else:
+ self.workers.remove(w) # it is not working anymore
+ w.terminate() # kill it with a fire
+
+ time.sleep(self.opts.sleeptime)
+
+# lifted from certmaster
+def daemonize(pidfile=None):
+ """
+ Daemonize this process with the UNIX double-fork trick.
+ Writes the new PID to the provided file name if not None.
+ """
+
+ cur_umask = os.umask(077)
+ os.umask(cur_umask)
+
+ pid = os.fork()
+ if pid > 0:
+ sys.exit(0)
+ os.chdir("/")
+ os.setsid()
+ os.umask(cur_umask)
+ pid = os.fork()
+
+ os.close(0)
+ os.close(1)
+ os.close(2)
+
+ # The standard I/O file descriptors are redirected to /dev/null by default.
+ if (hasattr(os, "devnull")):
+ REDIRECT_TO = os.devnull
+ else:
+ REDIRECT_TO = "/dev/null"
+
+ # based on http://code.activestate.com/recipes/278731/
+ os.open(REDIRECT_TO, os.O_RDWR) # standard input (0)
+
+ os.dup2(0, 1) # standard output (1)
+ os.dup2(0, 2) # standard error (2)
+
+
+
+ if pid > 0:
+ if pidfile is not None:
+ open(pidfile, "w").write(str(pid))
+ sys.exit(0)
+
+def parse_args(args):
+ parser = optparse.OptionParser('\ncopr-be [options]')
+ parser.add_option('-c', '--config', default='/etc/copr-be.conf', dest='config_file',
+ help="config file to use for copr-be run")
+ parser.add_option('-d','--daemonize', default=False, dest='daemonize',
+ action='store_true', help="daemonize or not")
+ parser.add_option('-p', '--pidfile', default='copr-be.pid', dest='pidfile',
+ help="pid file to use for copr-be if daemonized")
+ parser.add_option('-x', '--exit', default=False, dest='exit_on_worker',
+ action='store_true', help="exit on worker failure")
+
+ opts, args = parser.parse_args(args)
+ if not os.path.exists(opts.config_file):
+ print "No config file found at: %s" % opts.config_file
+ sys.exit(1)
+ opts.config_file = os.path.abspath(opts.config_file)
+
+ ret_opts = Bunch()
+ for o in ('daemonize', 'exit_on_worker', 'pidfile', 'config_file'):
+ setattr(ret_opts, o, getattr(opts, o))
+
+ return ret_opts
+
+
+
+def main(args):
+ opts = parse_args(args)
+
+ try:
+ cbe = CoprBackend(opts.config_file, ext_opts=opts)
+ if opts.daemonize:
+ daemonize(opts.pidfile)
+ cbe.run()
+ except Exception, e:
+ print 'Killing/Dying'
+ if 'cbe' in locals():
+ for w in cbe.workers:
+ w.terminate()
+ raise
+ except KeyboardInterrupt, e:
+ pass
+
+if __name__ == '__main__':
+ try:
+ main(sys.argv[1:])
+ except Exception, e:
+ print "ERROR: %s - %s" % (str(type(e)), str(e))
+ # FIXME - maybe check on daemonize and do this as a syslog.syslog() call?
+ sys.exit(1)
+ except KeyboardInterrupt, e:
+ print "\nUser cancelled, may need cleanup\n"
+ sys.exit(0)
+
10 years, 9 months
[copr] skvidal-backend: rename file to README and explain where to look (7ae3974)
by skvidal@fedorahosted.org
Repository : http://git.fedorahosted.org/cgit/copr.git
On branch : skvidal-backend
>---------------------------------------------------------------
commit 7ae397402e39fba26586207b4d8a6a1501e7c765
Author: Seth Vidal <skvidal(a)fedoraproject.org>
Date: Thu Nov 15 12:20:36 2012 -0500
rename file to README and explain where to look
>---------------------------------------------------------------
README | 7 +++++++
text | 1 -
2 files changed, 7 insertions(+), 1 deletions(-)
diff --git a/README b/README
new file mode 100644
index 0000000..92034f0
--- /dev/null
+++ b/README
@@ -0,0 +1,7 @@
+This is the master copr branch.
+Right now all the work is going on in 2 branches:
+ - skvidal-backend - this is the backend/builder code
+ - bkabrda-workspace - this is the frontend/webinterface code
+
+
+
diff --git a/text b/text
deleted file mode 100644
index 5b9e8fe..0000000
--- a/text
+++ /dev/null
@@ -1 +0,0 @@
-open it up
10 years, 9 months