[Reproducible-commits] [koji] 01/02: Imported Upstream version 1.10.0
Ximin Luo
infinity0 at debian.org
Wed Dec 2 23:28:36 UTC 2015
This is an automated email from the git hooks/post-receive script.
infinity0 pushed a commit to branch master
in repository koji.
commit a9f61153b70bff4e73d8a744ee5c5e32d50b732f
Author: Marek Marczykowski-Górecki <marmarek at invisiblethingslab.com>
Date: Wed Dec 2 15:46:40 2015 +0100
Imported Upstream version 1.10.0
---
.gitignore | 3 +
Authors | 4 +
COPYING | 16 +
LGPL | 458 ++
Makefile | 119 +
builder/Makefile | 42 +
builder/kojid | 4651 ++++++++++++
builder/kojid.conf | 88 +
builder/kojid.init | 99 +
builder/kojid.service | 14 +
builder/kojid.sysconfig | 3 +
builder/mergerepos | 276 +
cli/Makefile | 19 +
cli/koji | 6673 ++++++++++++++++++
cli/koji.conf | 31 +
docs/HOWTO.html | 321 +
docs/Makefile | 4 +
docs/Migrating_to_1.10.txt | 76 +
docs/Migrating_to_1.7.txt | 141 +
docs/Migrating_to_1.8.txt | 112 +
docs/Migrating_to_1.9.txt | 79 +
docs/schema-upgrade-1.2-1.3.sql | 62 +
docs/schema-upgrade-1.3-1.4.sql | 267 +
docs/schema-upgrade-1.4-1.5.sql | 36 +
docs/schema-upgrade-1.6-1.7.sql | 25 +
docs/schema-upgrade-1.7-1.8.sql | 47 +
docs/schema-upgrade-1.8-1.9.sql | 16 +
docs/schema-upgrade-1.9-1.10.sql | 50 +
docs/schema.sql | 813 +++
hub/Makefile | 41 +
hub/httpd.conf | 57 +
hub/hub.conf | 85 +
hub/kojihub.py | 10968 +++++++++++++++++++++++++++++
hub/kojixmlrpc.py | 796 +++
hub/rpmdiff | 248 +
koji.spec | 587 ++
koji/Makefile | 30 +
koji/__init__.py | 2476 +++++++
koji/auth.py | 736 ++
koji/context.py | 112 +
koji/daemon.py | 1190 ++++
koji/db.py | 171 +
koji/plugin.py | 169 +
koji/policy.py | 370 +
koji/server.py | 189 +
koji/ssl/Makefile | 21 +
koji/ssl/SSLCommon.py | 141 +
koji/ssl/SSLConnection.py | 158 +
koji/ssl/XMLRPCServerProxy.py | 178 +
koji/ssl/__init__.py | 1 +
koji/tasks.py | 555 ++
koji/util.py | 611 ++
plugins/Makefile | 24 +
plugins/echo.py | 15 +
plugins/messagebus.conf | 24 +
plugins/messagebus.py | 226 +
plugins/rpm2maven.conf | 5 +
plugins/rpm2maven.py | 107 +
plugins/runroot.conf | 25 +
plugins/runroot.py | 322 +
plugins/runroot_hub.py | 61 +
tests/runtests.py | 32 +
tests/test___init__.py | 67 +
util/Makefile | 40 +
util/koji-gc | 959 +++
util/koji-gc.conf | 43 +
util/koji-shadow | 1330 ++++
util/koji-shadow.conf | 7 +
util/kojira | 822 +++
util/kojira.conf | 44 +
util/kojira.init | 85 +
util/kojira.service | 14 +
util/kojira.sysconfig | 4 +
vm/Makefile | 42 +
vm/fix_kojikamid.sh | 10 +
vm/kojikamid.py | 760 ++
vm/kojivmd | 1119 +++
vm/kojivmd.conf | 57 +
vm/kojivmd.init | 93 +
vm/kojivmd.service | 14 +
vm/kojivmd.sysconfig | 3 +
www/Makefile | 20 +
www/conf/Makefile | 20 +
www/conf/kojiweb.conf | 62 +
www/conf/web.conf | 31 +
www/docs/negotiate/index.html | 78 +
www/kojiweb/Makefile | 24 +
www/kojiweb/archiveinfo.chtml | 147 +
www/kojiweb/archivelist.chtml | 83 +
www/kojiweb/buildinfo.chtml | 195 +
www/kojiweb/buildrootinfo.chtml | 56 +
www/kojiweb/builds.chtml | 173 +
www/kojiweb/buildsbystatus.chtml | 55 +
www/kojiweb/buildsbytarget.chtml | 99 +
www/kojiweb/buildsbyuser.chtml | 73 +
www/kojiweb/buildtargetedit.chtml | 63 +
www/kojiweb/buildtargetinfo.chtml | 30 +
www/kojiweb/buildtargets.chtml | 76 +
www/kojiweb/channelinfo.chtml | 31 +
www/kojiweb/error.chtml | 30 +
www/kojiweb/externalrepoinfo.chtml | 31 +
www/kojiweb/fileinfo.chtml | 64 +
www/kojiweb/hostedit.chtml | 57 +
www/kojiweb/hostinfo.chtml | 91 +
www/kojiweb/hosts.chtml | 96 +
www/kojiweb/imageinfo.chtml | 60 +
www/kojiweb/includes/Makefile | 18 +
www/kojiweb/includes/footer.chtml | 23 +
www/kojiweb/includes/header.chtml | 103 +
www/kojiweb/index.chtml | 161 +
www/kojiweb/index.py | 2241 ++++++
www/kojiweb/notificationedit.chtml | 56 +
www/kojiweb/packageinfo.chtml | 113 +
www/kojiweb/packages.chtml | 116 +
www/kojiweb/packagesbyuser.chtml | 73 +
www/kojiweb/recentbuilds.chtml | 54 +
www/kojiweb/reports.chtml | 17 +
www/kojiweb/rpminfo.chtml | 224 +
www/kojiweb/rpmlist.chtml | 112 +
www/kojiweb/rpmsbyhost.chtml | 105 +
www/kojiweb/search.chtml | 48 +
www/kojiweb/searchresults.chtml | 75 +
www/kojiweb/tagedit.chtml | 65 +
www/kojiweb/taginfo.chtml | 170 +
www/kojiweb/tagparent.chtml | 72 +
www/kojiweb/tags.chtml | 76 +
www/kojiweb/taskinfo.chtml | 447 ++
www/kojiweb/tasks.chtml | 190 +
www/kojiweb/tasksbyhost.chtml | 89 +
www/kojiweb/tasksbyuser.chtml | 73 +
www/kojiweb/userinfo.chtml | 108 +
www/kojiweb/users.chtml | 94 +
www/kojiweb/wsgi_publisher.py | 481 ++
www/lib/Makefile | 20 +
www/lib/kojiweb/Makefile | 30 +
www/lib/kojiweb/__init__.py | 1 +
www/lib/kojiweb/util.py | 583 ++
www/static/Makefile | 24 +
www/static/debug.css | 9 +
www/static/errors/Makefile | 18 +
www/static/errors/unauthorized.html | 37 +
www/static/images/1px.gif | Bin 0 -> 807 bytes
www/static/images/Makefile | 18 +
www/static/images/assigned.png | Bin 0 -> 597 bytes
www/static/images/bkgrnd_greydots.png | Bin 0 -> 234 bytes
www/static/images/building.png | Bin 0 -> 1439 bytes
www/static/images/canceled.png | Bin 0 -> 1137 bytes
www/static/images/closed.png | Bin 0 -> 1115 bytes
www/static/images/complete.png | Bin 0 -> 1115 bytes
www/static/images/deleted.png | Bin 0 -> 1249 bytes
www/static/images/expired.png | Bin 0 -> 1088 bytes
www/static/images/failed.png | Bin 0 -> 648 bytes
www/static/images/free.png | Bin 0 -> 945 bytes
www/static/images/gray-triangle-down.gif | Bin 0 -> 85 bytes
www/static/images/gray-triangle-up.gif | Bin 0 -> 85 bytes
www/static/images/init.png | Bin 0 -> 1056 bytes
www/static/images/initializing.png | Bin 0 -> 1056 bytes
www/static/images/koji.ico | Bin 0 -> 1150 bytes
www/static/images/koji.png | Bin 0 -> 2118 bytes
www/static/images/no.png | Bin 0 -> 648 bytes
www/static/images/open.png | Bin 0 -> 995 bytes
www/static/images/powered-by-koji.png | Bin 0 -> 2809 bytes
www/static/images/ready.png | Bin 0 -> 1115 bytes
www/static/images/unknown.png | Bin 0 -> 663 bytes
www/static/images/waiting.png | Bin 0 -> 945 bytes
www/static/images/yes.png | Bin 0 -> 1115 bytes
www/static/js/Makefile | 18 +
www/static/js/watchlogs.js | 194 +
www/static/koji.css | 448 ++
www/static/themes/Makefile | 18 +
www/static/themes/README | 5 +
171 files changed, 49361 insertions(+)
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..4857165
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,3 @@
+*.pyc
+*.pyo
+tests/test.py
diff --git a/Authors b/Authors
new file mode 100644
index 0000000..2a902f0
--- /dev/null
+++ b/Authors
@@ -0,0 +1,4 @@
+Mike McLean <mikem at redhat dot com>
+Dennis Gregorovic <dgregor at redhat dot com>
+Mike Bonnet <mikeb at redhat dot com>
+Jesse Keating <jkeating at redhat dot com>
diff --git a/COPYING b/COPYING
new file mode 100644
index 0000000..918a391
--- /dev/null
+++ b/COPYING
@@ -0,0 +1,16 @@
+ Koji - a system for building and tracking software.
+ Copyright (c) 2007-2014 Red Hat, Inc.
+
+ Koji is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation;
+ version 2.1 of the License.
+
+ This software is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this software; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
diff --git a/LGPL b/LGPL
new file mode 100644
index 0000000..3b473db
--- /dev/null
+++ b/LGPL
@@ -0,0 +1,458 @@
+ GNU LESSER GENERAL PUBLIC LICENSE
+ Version 2.1, February 1999
+
+ Copyright (C) 1991, 1999 Free Software Foundation, Inc.
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+[This is the first released version of the Lesser GPL. It also counts
+ as the successor of the GNU Library Public License, version 2, hence
+ the version number 2.1.]
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+Licenses are intended to guarantee your freedom to share and change
+free software--to make sure the software is free for all its users.
+
+ This license, the Lesser General Public License, applies to some
+specially designated software packages--typically libraries--of the
+Free Software Foundation and other authors who decide to use it. You
+can use it too, but we suggest you first think carefully about whether
+this license or the ordinary General Public License is the better
+strategy to use in any particular case, based on the explanations below.
+
+ When we speak of free software, we are referring to freedom of use,
+not price. Our General Public Licenses are designed to make sure that
+you have the freedom to distribute copies of free software (and charge
+for this service if you wish); that you receive source code or can get
+it if you want it; that you can change the software and use pieces of
+it in new free programs; and that you are informed that you can do
+these things.
+
+ To protect your rights, we need to make restrictions that forbid
+distributors to deny you these rights or to ask you to surrender these
+rights. These restrictions translate to certain responsibilities for
+you if you distribute copies of the library or if you modify it.
+
+ For example, if you distribute copies of the library, whether gratis
+or for a fee, you must give the recipients all the rights that we gave
+you. You must make sure that they, too, receive or can get the source
+code. If you link other code with the library, you must provide
+complete object files to the recipients, so that they can relink them
+with the library after making changes to the library and recompiling
+it. And you must show them these terms so they know their rights.
+
+ We protect your rights with a two-step method: (1) we copyright the
+library, and (2) we offer you this license, which gives you legal
+permission to copy, distribute and/or modify the library.
+
+ To protect each distributor, we want to make it very clear that
+there is no warranty for the free library. Also, if the library is
+modified by someone else and passed on, the recipients should know
+that what they have is not the original version, so that the original
+author's reputation will not be affected by problems that might be
+introduced by others.
+
+ Finally, software patents pose a constant threat to the existence of
+any free program. We wish to make sure that a company cannot
+effectively restrict the users of a free program by obtaining a
+restrictive license from a patent holder. Therefore, we insist that
+any patent license obtained for a version of the library must be
+consistent with the full freedom of use specified in this license.
+
+ Most GNU software, including some libraries, is covered by the
+ordinary GNU General Public License. This license, the GNU Lesser
+General Public License, applies to certain designated libraries, and
+is quite different from the ordinary General Public License. We use
+this license for certain libraries in order to permit linking those
+libraries into non-free programs.
+
+ When a program is linked with a library, whether statically or using
+a shared library, the combination of the two is legally speaking a
+combined work, a derivative of the original library. The ordinary
+General Public License therefore permits such linking only if the
+entire combination fits its criteria of freedom. The Lesser General
+Public License permits more lax criteria for linking other code with
+the library.
+
+ We call this license the "Lesser" General Public License because it
+does Less to protect the user's freedom than the ordinary General
+Public License. It also provides other free software developers Less
+of an advantage over competing non-free programs. These disadvantages
+are the reason we use the ordinary General Public License for many
+libraries. However, the Lesser license provides advantages in certain
+special circumstances.
+
+ For example, on rare occasions, there may be a special need to
+encourage the widest possible use of a certain library, so that it becomes
+a de-facto standard. To achieve this, non-free programs must be
+allowed to use the library. A more frequent case is that a free
+library does the same job as widely used non-free libraries. In this
+case, there is little to gain by limiting the free library to free
+software only, so we use the Lesser General Public License.
+
+ In other cases, permission to use a particular library in non-free
+programs enables a greater number of people to use a large body of
+free software. For example, permission to use the GNU C Library in
+non-free programs enables many more people to use the whole GNU
+operating system, as well as its variant, the GNU/Linux operating
+system.
+
+ Although the Lesser General Public License is Less protective of the
+users' freedom, it does ensure that the user of a program that is
+linked with the Library has the freedom and the wherewithal to run
+that program using a modified version of the Library.
+
+ The precise terms and conditions for copying, distribution and
+modification follow. Pay close attention to the difference between a
+"work based on the library" and a "work that uses the library". The
+former contains code derived from the library, whereas the latter must
+be combined with the library in order to run.
+
+ GNU LESSER GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License Agreement applies to any software library or other
+program which contains a notice placed by the copyright holder or
+other authorized party saying it may be distributed under the terms of
+this Lesser General Public License (also called "this License").
+Each licensee is addressed as "you".
+
+ A "library" means a collection of software functions and/or data
+prepared so as to be conveniently linked with application programs
+(which use some of those functions and data) to form executables.
+
+ The "Library", below, refers to any such software library or work
+which has been distributed under these terms. A "work based on the
+Library" means either the Library or any derivative work under
+copyright law: that is to say, a work containing the Library or a
+portion of it, either verbatim or with modifications and/or translated
+straightforwardly into another language. (Hereinafter, translation is
+included without limitation in the term "modification".)
+
+ "Source code" for a work means the preferred form of the work for
+making modifications to it. For a library, complete source code means
+all the source code for all modules it contains, plus any associated
+interface definition files, plus the scripts used to control compilation
+and installation of the library.
+
+ Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running a program using the Library is not restricted, and output from
+such a program is covered only if its contents constitute a work based
+on the Library (independent of the use of the Library in a tool for
+writing it). Whether that is true depends on what the Library does
+and what the program that uses the Library does.
+
+ 1. You may copy and distribute verbatim copies of the Library's
+complete source code as you receive it, in any medium, provided that
+you conspicuously and appropriately publish on each copy an
+appropriate copyright notice and disclaimer of warranty; keep intact
+all the notices that refer to this License and to the absence of any
+warranty; and distribute a copy of this License along with the
+Library.
+
+ You may charge a fee for the physical act of transferring a copy,
+and you may at your option offer warranty protection in exchange for a
+fee.
+
+ 2. You may modify your copy or copies of the Library or any portion
+of it, thus forming a work based on the Library, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) The modified work must itself be a software library.
+
+ b) You must cause the files modified to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ c) You must cause the whole of the work to be licensed at no
+ charge to all third parties under the terms of this License.
+
+ d) If a facility in the modified Library refers to a function or a
+ table of data to be supplied by an application program that uses
+ the facility, other than as an argument passed when the facility
+ is invoked, then you must make a good faith effort to ensure that,
+ in the event an application does not supply such function or
+ table, the facility still operates, and performs whatever part of
+ its purpose remains meaningful.
+
+ (For example, a function in a library to compute square roots has
+ a purpose that is entirely well-defined independent of the
+ application. Therefore, Subsection 2d requires that any
+ application-supplied function or table used by this function must
+ be optional: if the application does not supply it, the square
+ root function must still compute square roots.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Library,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Library, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote
+it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Library.
+
+In addition, mere aggregation of another work not based on the Library
+with the Library (or with a work based on the Library) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may opt to apply the terms of the ordinary GNU General Public
+License instead of this License to a given copy of the Library. To do
+this, you must alter all the notices that refer to this License, so
+that they refer to the ordinary GNU General Public License, version 2,
+instead of to this License. (If a newer version than version 2 of the
+ordinary GNU General Public License has appeared, then you can specify
+that version instead if you wish.) Do not make any other change in
+these notices.
+
+ Once this change is made in a given copy, it is irreversible for
+that copy, so the ordinary GNU General Public License applies to all
+subsequent copies and derivative works made from that copy.
+
+ This option is useful when you wish to copy part of the code of
+the Library into a program that is not a library.
+
+ 4. You may copy and distribute the Library (or a portion or
+derivative of it, under Section 2) in object code or executable form
+under the terms of Sections 1 and 2 above provided that you accompany
+it with the complete corresponding machine-readable source code, which
+must be distributed under the terms of Sections 1 and 2 above on a
+medium customarily used for software interchange.
+
+ If distribution of object code is made by offering access to copy
+from a designated place, then offering equivalent access to copy the
+source code from the same place satisfies the requirement to
+distribute the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 5. A program that contains no derivative of any portion of the
+Library, but is designed to work with the Library by being compiled or
+linked with it, is called a "work that uses the Library". Such a
+work, in isolation, is not a derivative work of the Library, and
+therefore falls outside the scope of this License.
+
+ However, linking a "work that uses the Library" with the Library
+creates an executable that is a derivative of the Library (because it
+contains portions of the Library), rather than a "work that uses the
+library". The executable is therefore covered by this License.
+Section 6 states terms for distribution of such executables.
+
+ When a "work that uses the Library" uses material from a header file
+that is part of the Library, the object code for the work may be a
+derivative work of the Library even though the source code is not.
+Whether this is true is especially significant if the work can be
+linked without the Library, or if the work is itself a library. The
+threshold for this to be true is not precisely defined by law.
+
+ If such an object file uses only numerical parameters, data
+structure layouts and accessors, and small macros and small inline
+functions (ten lines or less in length), then the use of the object
+file is unrestricted, regardless of whether it is legally a derivative
+work. (Executables containing this object code plus portions of the
+Library will still fall under Section 6.)
+
+ Otherwise, if the work is a derivative of the Library, you may
+distribute the object code for the work under the terms of Section 6.
+Any executables containing that work also fall under Section 6,
+whether or not they are linked directly with the Library itself.
+
+ 6. As an exception to the Sections above, you may also combine or
+link a "work that uses the Library" with the Library to produce a
+work containing portions of the Library, and distribute that work
+under terms of your choice, provided that the terms permit
+modification of the work for the customer's own use and reverse
+engineering for debugging such modifications.
+
+ You must give prominent notice with each copy of the work that the
+Library is used in it and that the Library and its use are covered by
+this License. You must supply a copy of this License. If the work
+during execution displays copyright notices, you must include the
+copyright notice for the Library among them, as well as a reference
+directing the user to the copy of this License. Also, you must do one
+of these things:
+
+ a) Accompany the work with the complete corresponding
+ machine-readable source code for the Library including whatever
+ changes were used in the work (which must be distributed under
+ Sections 1 and 2 above); and, if the work is an executable linked
+ with the Library, with the complete machine-readable "work that
+ uses the Library", as object code and/or source code, so that the
+ user can modify the Library and then relink to produce a modified
+ executable containing the modified Library. (It is understood
+ that the user who changes the contents of definitions files in the
+ Library will not necessarily be able to recompile the application
+ to use the modified definitions.)
+
+ b) Use a suitable shared library mechanism for linking with the
+ Library. A suitable mechanism is one that (1) uses at run time a
+ copy of the library already present on the user's computer system,
+ rather than copying library functions into the executable, and (2)
+ will operate properly with a modified version of the library, if
+ the user installs one, as long as the modified version is
+ interface-compatible with the version that the work was made with.
+
+ c) Accompany the work with a written offer, valid for at
+ least three years, to give the same user the materials
+ specified in Subsection 6a, above, for a charge no more
+ than the cost of performing this distribution.
+
+ d) If distribution of the work is made by offering access to copy
+ from a designated place, offer equivalent access to copy the above
+ specified materials from the same place.
+
+ e) Verify that the user has already received a copy of these
+ materials or that you have already sent this user a copy.
+
+ For an executable, the required form of the "work that uses the
+Library" must include any data and utility programs needed for
+reproducing the executable from it. However, as a special exception,
+the materials to be distributed need not include anything that is
+normally distributed (in either source or binary form) with the major
+components (compiler, kernel, and so on) of the operating system on
+which the executable runs, unless that component itself accompanies
+the executable.
+
+ It may happen that this requirement contradicts the license
+restrictions of other proprietary libraries that do not normally
+accompany the operating system. Such a contradiction means you cannot
+use both them and the Library together in an executable that you
+distribute.
+
+ 7. You may place library facilities that are a work based on the
+Library side-by-side in a single library together with other library
+facilities not covered by this License, and distribute such a combined
+library, provided that the separate distribution of the work based on
+the Library and of the other library facilities is otherwise
+permitted, and provided that you do these two things:
+
+ a) Accompany the combined library with a copy of the same work
+ based on the Library, uncombined with any other library
+ facilities. This must be distributed under the terms of the
+ Sections above.
+
+ b) Give prominent notice with the combined library of the fact
+ that part of it is a work based on the Library, and explaining
+ where to find the accompanying uncombined form of the same work.
+
+ 8. You may not copy, modify, sublicense, link with, or distribute
+the Library except as expressly provided under this License. Any
+attempt otherwise to copy, modify, sublicense, link with, or
+distribute the Library is void, and will automatically terminate your
+rights under this License. However, parties who have received copies,
+or rights, from you under this License will not have their licenses
+terminated so long as such parties remain in full compliance.
+
+ 9. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Library or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Library (or any work based on the
+Library), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Library or works based on it.
+
+ 10. Each time you redistribute the Library (or any work based on the
+Library), the recipient automatically receives a license from the
+original licensor to copy, distribute, link with or modify the Library
+subject to these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties with
+this License.
+
+ 11. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Library at all. For example, if a patent
+license would not permit royalty-free redistribution of the Library by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Library.
+
+If any portion of this section is held invalid or unenforceable under any
+particular circumstance, the balance of the section is intended to apply,
+and the section as a whole is intended to apply in other circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 12. If the distribution and/or use of the Library is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Library under this License may add
+an explicit geographical distribution limitation excluding those countries,
+so that distribution is permitted only in or among countries not thus
+excluded. In such case, this License incorporates the limitation as if
+written in the body of this License.
+
+ 13. The Free Software Foundation may publish revised and/or new
+versions of the Lesser General Public License from time to time.
+Such new versions will be similar in spirit to the present version,
+but may differ in detail to address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Library
+specifies a version number of this License which applies to it and
+"any later version", you have the option of following the terms and
+conditions either of that version or of any later version published by
+the Free Software Foundation. If the Library does not specify a
+license version number, you may choose any version ever published by
+the Free Software Foundation.
+
+ 14. If you wish to incorporate parts of the Library into other free
+programs whose distribution conditions are incompatible with these,
+write to the author to ask for permission. For software which is
+copyrighted by the Free Software Foundation, write to the Free
+Software Foundation; we sometimes make exceptions for this. Our
+decision will be guided by the two goals of preserving the free status
+of all derivatives of our free software and of promoting the sharing
+and reuse of software generally.
+
+ NO WARRANTY
+
+ 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
+WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
+EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
+OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
+KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
+LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
+THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
+WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
+AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
+FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
+CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
+LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
+RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
+FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
+SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGES.
+
+ END OF TERMS AND CONDITIONS
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..d5e9b77
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,119 @@
+NAME=koji
+SPECFILE = $(firstword $(wildcard *.spec))
+SUBDIRS = hub builder koji cli docs util www plugins vm
+
+ifdef DIST
+DIST_DEFINES := --define "dist $(DIST)"
+endif
+
+ifndef VERSION
+VERSION := $(shell rpm $(RPM_DEFINES) $(DIST_DEFINES) -q --qf "%{VERSION}\n" --specfile $(SPECFILE)| head -1)
+endif
+# the release of the package
+ifndef RELEASE
+RELEASE := $(shell rpm $(RPM_DEFINES) $(DIST_DEFINES) -q --qf "%{RELEASE}\n" --specfile $(SPECFILE)| head -1)
+endif
+
+ifndef WORKDIR
+WORKDIR := $(shell pwd)
+endif
+## Override RPM_WITH_DIRS to avoid the usage of these variables.
+ifndef SRCRPMDIR
+SRCRPMDIR = $(WORKDIR)
+endif
+ifndef BUILDDIR
+BUILDDIR = $(WORKDIR)
+endif
+ifndef RPMDIR
+RPMDIR = $(WORKDIR)
+endif
+## SOURCEDIR is special; it has to match the CVS checkout directory,-
+## because the CVS checkout directory contains the patch files. So it basically-
+## can't be overridden without breaking things. But we leave it a variable
+## for consistency, and in hopes of convincing it to work sometime.
+ifndef SOURCEDIR
+SOURCEDIR := $(shell pwd)
+endif
+
+
+# RPM with all the overrides in place;
+ifndef RPM
+RPM := $(shell if test -f /usr/bin/rpmbuild ; then echo rpmbuild ; else echo rpm ; fi)
+endif
+ifndef RPM_WITH_DIRS
+RPM_WITH_DIRS = $(RPM) --define "_sourcedir $(SOURCEDIR)" \
+ --define "_builddir $(BUILDDIR)" \
+ --define "_srcrpmdir $(SRCRPMDIR)" \
+ --define "_rpmdir $(RPMDIR)"
+endif
+
+# tag to export, defaulting to current tag in the spec file
+ifndef TAG
+TAG=$(NAME)-$(VERSION)-$(RELEASE)
+endif
+
+_default:
+ @echo "read the makefile"
+
+clean:
+ rm -f *.o *.so *.pyc *~ koji*.bz2 koji*.src.rpm
+ rm -rf koji-$(VERSION)
+ for d in $(SUBDIRS); do make -s -C $$d clean; done
+
+git-clean:
+ @git clean -d -q -x
+
+subdirs:
+ for d in $(SUBDIRS); do make -C $$d; [ $$? = 0 ] || exit 1; done
+
+test-tarball:
+ @rm -rf .koji-$(VERSION)
+ @mkdir .koji-$(VERSION)
+ @cp -al [A-Za-z]* .koji-$(VERSION)
+ @mv .koji-$(VERSION) koji-$(VERSION)
+ tar --bzip2 --exclude '*.tar.bz2' --exclude '*.rpm' --exclude '.#*' \
+ -cpf koji-$(VERSION).tar.bz2 koji-$(VERSION)
+ @rm -rf koji-$(VERSION)
+
+tarball: clean
+ @git archive --format=tar --prefix=$(NAME)-$(VERSION)/ HEAD |bzip2 > $(NAME)-$(VERSION).tar.bz2
+
+sources: tarball
+
+srpm: tarball
+ $(RPM_WITH_DIRS) $(DIST_DEFINES) -bs $(SPECFILE)
+
+rpm: tarball
+ $(RPM_WITH_DIRS) $(DIST_DEFINES) -bb $(SPECFILE)
+
+test-rpm: tarball
+ $(RPM_WITH_DIRS) $(DIST_DEFINES) --define "testbuild 1" -bb $(SPECFILE)
+
+tag::
+ git tag -a $(TAG)
+ @echo "Tagged with: $(TAG)"
+ @echo
+
+force-tag::
+ git tag -f -a $(TAG)
+ @echo "Tagged with: $(TAG)"
+ @echo
+
+# If and only if "make build" fails, use "make force-tag" to
+# re-tag the version.
+#force-tag: $(SPECFILE)
+# @$(MAKE) tag TAG_OPTS="-F $(TAG_OPTS)"
+
+DESTDIR ?= /
+TYPE = systemd
+install:
+ @if [ "$(DESTDIR)" = "" ]; then \
+ echo " "; \
+ echo "ERROR: A destdir is required"; \
+ exit 1; \
+ fi
+
+ mkdir -p $(DESTDIR)
+
+ for d in $(SUBDIRS); do make DESTDIR=`cd $(DESTDIR); pwd` \
+ -C $$d install TYPE=$(TYPE); [ $$? = 0 ] || exit 1; done
diff --git a/builder/Makefile b/builder/Makefile
new file mode 100644
index 0000000..1de0a9c
--- /dev/null
+++ b/builder/Makefile
@@ -0,0 +1,42 @@
+BINFILES = kojid
+LIBEXECFILES = mergerepos
+SYSTEMDSYSTEMUNITDIR = $(shell pkg-config systemd --variable=systemdsystemunitdir)
+TYPE = systemd
+
+_default:
+ @echo "nothing to make. try make install"
+
+clean:
+ rm -f *.o *.so *.pyc *~
+
+
+_install:
+ @if [ "$(DESTDIR)" = "" ]; then \
+ echo " "; \
+ echo "ERROR: A destdir is required"; \
+ exit 1; \
+ fi
+
+ mkdir -p $(DESTDIR)/usr/sbin
+ install -p -m 755 $(BINFILES) $(DESTDIR)/usr/sbin
+
+ mkdir -p $(DESTDIR)/usr/libexec/kojid
+ install -p -m 755 $(LIBEXECFILES) $(DESTDIR)/usr/libexec/kojid
+
+ mkdir -p $(DESTDIR)/etc/mock/koji
+
+ mkdir -p $(DESTDIR)/etc/kojid
+ install -p -m 644 kojid.conf $(DESTDIR)/etc/kojid/kojid.conf
+
+install-systemd: _install
+ mkdir -p $(DESTDIR)$(SYSTEMDSYSTEMUNITDIR)
+ install -p -m 644 kojid.service $(DESTDIR)$(SYSTEMDSYSTEMUNITDIR)
+
+install-sysv: _install
+ mkdir -p $(DESTDIR)/etc/rc.d/init.d
+ install -p -m 755 kojid.init $(DESTDIR)/etc/rc.d/init.d/kojid
+
+ mkdir -p $(DESTDIR)/etc/sysconfig
+ install -p -m 644 kojid.sysconfig $(DESTDIR)/etc/sysconfig/kojid
+
+install: install-$(TYPE)
diff --git a/builder/kojid b/builder/kojid
new file mode 100755
index 0000000..c0759a8
--- /dev/null
+++ b/builder/kojid
@@ -0,0 +1,4651 @@
+#!/usr/bin/python
+
+# Koji build daemon
+# Copyright (c) 2005-2014 Red Hat, Inc.
+#
+# Koji is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation;
+# version 2.1 of the License.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this software; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+# Authors:
+# Mike McLean <mikem at redhat.com>
+# Mike Bonnet <mikeb at redhat.com>
+
+try:
+ import krbV
+except ImportError:
+ pass
+import koji
+import koji.plugin
+import koji.util
+import koji.tasks
+import glob
+import logging
+import logging.handlers
+from koji.daemon import incremental_upload, log_output, TaskManager, SCM
+from koji.tasks import ServerExit, ServerRestart, BaseTaskHandler, MultiPlatformTask
+from koji.util import parseStatus, isSuccess, dslice, dslice_ex
+import os
+import pwd
+import grp
+import random
+import re
+import rpm
+import shutil
+import signal
+import smtplib
+import socket
+import sys
+import time
+import traceback
+import xml.dom.minidom
+import xmlrpclib
+import zipfile
+import copy
+import Cheetah.Template
+from ConfigParser import ConfigParser
+from fnmatch import fnmatch
+from gzip import GzipFile
+from optparse import OptionParser, SUPPRESS_HELP
+from StringIO import StringIO
+from yum import repoMDObject
+
+#imports for LiveCD and Appliance handler
+image_enabled = False
+try:
+ import pykickstart.parser as ksparser
+ import pykickstart.handlers.control as kscontrol
+ import pykickstart.errors as kserrors
+ import hashlib
+ import iso9660 # from pycdio
+ image_enabled = True
+except ImportError:
+ pass
+
+ozif_enabled = False
+try:
+ from imgfac.BuildDispatcher import BuildDispatcher
+ from imgfac.Builder import Builder
+ from imgfac.PluginManager import PluginManager
+ from imgfac.ReservationManager import ReservationManager
+ plugin_mgr = PluginManager('/etc/imagefactory/plugins.d')
+ plugin_mgr.load()
+ from imgfac.ApplicationConfiguration import ApplicationConfiguration
+ from imgfac.PersistentImageManager import PersistentImageManager
+ from imgfac.BaseImage import BaseImage
+ from imgfac.TargetImage import TargetImage
+ ozif_enabled = True
+except ImportError:
+ pass
+
+def main(options, session):
+ logger = logging.getLogger("koji.build")
+ logger.info('Starting up')
+ koji.util.setup_rlimits(options.__dict__, logger)
+ tm = TaskManager(options, session)
+ tm.findHandlers(globals())
+ tm.findHandlers(vars(koji.tasks))
+ if options.plugin:
+ #load plugins
+ pt = koji.plugin.PluginTracker(path=options.pluginpath.split(':'))
+ for name in options.plugin:
+ logger.info('Loading plugin: %s' % name)
+ tm.scanPlugin(pt.load(name))
+ def shutdown(*args):
+ raise SystemExit
+ def restart(*args):
+ logger.warn("Initiating graceful restart")
+ tm.restart_pending = True
+ signal.signal(signal.SIGTERM,shutdown)
+ signal.signal(signal.SIGUSR1,restart)
+ while 1:
+ try:
+ taken = False
+ tm.updateBuildroots()
+ tm.updateTasks()
+ taken = tm.getNextTask()
+ except (SystemExit,ServerExit,KeyboardInterrupt):
+ logger.warn("Exiting")
+ break
+ except ServerRestart:
+ logger.warn("Restarting")
+ os.execv(sys.argv[0], sys.argv)
+ except koji.AuthExpired:
+ logger.error('Session expired')
+ break
+ except koji.RetryError:
+ raise
+ except:
+ # XXX - this is a little extreme
+ # log the exception and continue
+ logger.error(''.join(traceback.format_exception(*sys.exc_info())))
+ try:
+ if not taken:
+ # Only sleep if we didn't take a task, otherwise retry immediately.
+ # The load-balancing code in getNextTask() will prevent a single builder
+ # from getting overloaded.
+ time.sleep(options.sleeptime)
+ except (SystemExit,KeyboardInterrupt):
+ logger.warn("Exiting")
+ break
+ logger.warn("Shutting down, please wait...")
+ tm.shutdown()
+ session.logout()
+ sys.exit(0)
+
+
+class BuildRoot(object):
+
+ def __init__(self,session,options,*args,**kwargs):
+ self.logger = logging.getLogger("koji.build.buildroot")
+ self.session = session
+ self.options = options
+ if len(args) + len(kwargs) == 1:
+ # manage an existing mock buildroot
+ self._load(*args,**kwargs)
+ else:
+ self._new(*args,**kwargs)
+
+ def _load(self, data):
+ #manage an existing buildroot
+ if isinstance(data, dict):
+ #assume data already pulled from db
+ self.id = data['id']
+ else:
+ self.id = data
+ data = self.session.getBuildroot(self.id)
+ self.task_id = data['task_id']
+ self.tag_id = data['tag_id']
+ self.tag_name = data['tag_name']
+ self.repoid = data['repo_id']
+ self.repo_info = self.session.repoInfo(self.repoid, strict=True)
+ self.event_id = self.repo_info['create_event']
+ self.br_arch = data['arch']
+ self.name = "%(tag_name)s-%(id)s-%(repoid)s" % vars(self)
+ self.config = self.session.getBuildConfig(self.tag_id, event=self.event_id)
+
+ def _new(self, tag, arch, task_id, repo_id=None, install_group='build',
+ setup_dns=False, bind_opts=None, maven_opts=None, maven_envs=None, deps=None):
+ """Create a brand new repo"""
+ if not repo_id:
+ raise koji.BuildrootError, "A repo id must be provided"
+ repo_info = self.session.repoInfo(repo_id, strict=True)
+ self.repo_info = repo_info
+ self.repoid = self.repo_info['id']
+ self.event_id = self.repo_info['create_event']
+ self.task_id = task_id
+ self.config = self.session.getBuildConfig(tag, event=self.event_id)
+ if not self.config:
+ raise koji.BuildrootError("Could not get config info for tag: %s" % tag)
+ self.tag_id = self.config['id']
+ self.tag_name = self.config['name']
+ if self.config['id'] != repo_info['tag_id']:
+ raise koji.BuildrootError, "tag/repo mismatch: %s vs %s" \
+ % (self.config['name'], repo_info['tag_name'])
+ repo_state = koji.REPO_STATES[repo_info['state']]
+ if repo_state == 'EXPIRED':
+ # This should be ok. Expired repos are still intact, just not
+ # up-to-date (which may be the point in some cases).
+ self.logger.info("Requested repo (%i) is no longer current" % repo_id)
+ elif repo_state != 'READY':
+ raise koji.BuildrootError, "Requested repo (%i) is %s" % (repo_id, repo_state)
+ self.br_arch = koji.canonArch(arch)
+ # armhfp is not a valid arch according to autoconf
+ if arch == 'armhfp':
+ self.target_arch = 'arm'
+ else:
+ self.target_arch = arch
+ self.logger.debug("New buildroot: %(tag_name)s/%(br_arch)s/%(repoid)s" % vars(self))
+ id = self.session.host.newBuildRoot(self.repoid, self.br_arch, task_id=task_id)
+ if id is None:
+ raise koji.BuildrootError, "failed to get a buildroot id"
+ self.id = id
+ self.name = "%(tag_name)s-%(id)s-%(repoid)s" % vars(self)
+ self.install_group = install_group
+ self.setup_dns = setup_dns
+ self.bind_opts = bind_opts
+ self.maven_opts = maven_opts
+ self.maven_envs = maven_envs
+ self.deps = deps
+ self._writeMockConfig()
+
+ def _writeMockConfig(self):
+ # mock config
+ configdir = '/etc/mock/koji'
+ configfile = "%s/%s.cfg" % (configdir,self.name)
+ self.mockcfg = "koji/%s" % self.name
+
+ opts = {}
+ for k in ('repoid', 'tag_name'):
+ if hasattr(self, k):
+ opts[k] = getattr(self, k)
+ for k in ('mockdir', 'topdir', 'topurl', 'topurls', 'packager', 'vendor', 'distribution', 'mockhost', 'yum_proxy', 'rpmbuild_timeout'):
+ if hasattr(self.options, k):
+ opts[k] = getattr(self.options, k)
+ opts['buildroot_id'] = self.id
+ opts['use_host_resolv'] = self.setup_dns
+ opts['install_group'] = self.install_group
+ opts['maven_opts'] = self.maven_opts
+ opts['maven_envs'] = self.maven_envs
+ opts['bind_opts'] = self.bind_opts
+ opts['target_arch'] = self.target_arch
+ if 'mock.package_manager' in self.config['extra']:
+ opts['package_manager'] = self.config['extra']['mock.package_manager']
+ output = koji.genMockConfig(self.name, self.br_arch, managed=True, **opts)
+
+ #write config
+ fo = file(configfile,'w')
+ fo.write(output)
+ fo.close()
+
+ def _repositoryEntries(self, pi, plugin=False):
+ entries = []
+ if plugin:
+ tag_name = 'pluginRepository'
+ else:
+ tag_name = 'repository'
+ id_suffix = 'repo'
+ name_prefix = 'Repository for Koji'
+ for dep in self.deps:
+ if isinstance(dep, (int, long)):
+ # dep is a task ID, the url points to the task output directory
+ repo_type = 'task'
+ dep_url = pi.task(dep)
+ snapshots = 'true'
+ else:
+ # dep is a build NVR, the url points to the build output directory
+ repo_type = 'build'
+ build = koji.parse_NVR(dep)
+ dep_url = pi.mavenbuild(build)
+ snapshots = 'false'
+ repo_id = 'koji-%(repo_type)s-%(dep)s-%(id_suffix)s' % locals()
+ entry = """
+ <%(tag_name)s>
+ <id>%(repo_id)s</id>
+ <name>%(name_prefix)s %(repo_type)s %(dep)s</name>
+ <url>%(dep_url)s</url>
+ <layout>default</layout>
+ <releases>
+ <enabled>true</enabled>
+ <updatePolicy>never</updatePolicy>
+ <checksumPolicy>fail</checksumPolicy>
+ </releases>
+ <snapshots>
+ <enabled>%(snapshots)s</enabled>
+ <updatePolicy>never</updatePolicy>
+ <checksumPolicy>fail</checksumPolicy>
+ </snapshots>
+ </%(tag_name)s>""" % locals()
+ entries.append((repo_id, entry))
+ return entries
+
+ def writeMavenSettings(self, destfile, outputdir):
+ """
+ Write the Maven settings.xml file to the specified destination.
+ """
+ task_id = self.task_id
+ repo_id = self.repoid
+ tag_name = self.tag_name
+ deploy_dir = outputdir[len(self.rootdir()):]
+
+ pi = koji.PathInfo(topdir=self.options.topurl)
+ repourl = pi.repo(repo_id, tag_name) + '/maven'
+
+ mirror_spec = '*'
+ settings = """<settings xmlns="http://maven.apache.org/POM/4.0.0"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
+ http://maven.apache.org/xsd/settings-1.0.0.xsd">
+ <interactiveMode>false</interactiveMode>
+ <mirrors>
+ <mirror>
+ <id>koji-maven-repo-%(tag_name)s-%(repo_id)i</id>
+ <name>Koji-managed Maven repository (%(tag_name)s-%(repo_id)i)</name>
+ <url>%(repourl)s</url>
+ <mirrorOf>%(mirror_spec)s</mirrorOf>
+ </mirror>
+ </mirrors>
+ <profiles>
+ <profile>
+ <id>koji-task-%(task_id)s</id>
+ <properties>
+ <altDeploymentRepository>koji-output::default::file://%(deploy_dir)s</altDeploymentRepository>
+ </properties>"""
+ if self.deps:
+ settings += """
+ <repositories>"""
+ for dep_repo_id, dep_repo_entry in self._repositoryEntries(pi):
+ mirror_spec += ',!' + dep_repo_id
+ settings += dep_repo_entry
+ settings += """
+ </repositories>
+ <pluginRepositories>"""
+ for dep_repo_id, dep_repo_entry in self._repositoryEntries(pi, plugin=True):
+ mirror_spec += ',!' + dep_repo_id
+ settings += dep_repo_entry
+ settings += """
+ </pluginRepositories>"""
+ settings += """
+ </profile>
+ </profiles>
+ <activeProfiles>
+ <activeProfile>koji-task-%(task_id)s</activeProfile>
+ </activeProfiles>
+</settings>
+"""
+ settings = settings % locals()
+ fo = file(self.rootdir() + destfile, 'w')
+ fo.write(settings)
+ fo.close()
+
+ def mock(self, args):
+ """Run mock"""
+ mockpath = getattr(self.options,"mockpath","/usr/bin/mock")
+ cmd = [mockpath, "-r", self.mockcfg]
+ #if self.options.debug_mock:
+ # cmd.append('--debug')
+ # TODO: should we pass something like --verbose --trace instead?
+ cmd.extend(args)
+ self.logger.info(' '.join(cmd))
+ workdir = getattr(self, 'workdir', None)
+ mocklog = 'mock_output.log'
+ pid = os.fork()
+ if pid:
+ resultdir = self.resultdir()
+ uploadpath = self.getUploadPath()
+ logs = {}
+
+ finished = False
+ while not finished:
+ time.sleep(1)
+ status = os.waitpid(pid, os.WNOHANG)
+ if status[0] != 0:
+ finished = True
+
+ try:
+ results = os.listdir(resultdir)
+ except OSError:
+ # will happen when mock hasn't created the resultdir yet
+ results = []
+
+ for fname in results:
+ if fname.endswith('.log') and not logs.has_key(fname):
+ fpath = os.path.join(resultdir, fname)
+ logs[fname] = (None, None, 0, fpath)
+ if workdir and mocklog not in logs:
+ fpath = os.path.join(workdir, mocklog)
+ if os.path.exists(fpath):
+ logs[mocklog] = (None, None, 0, fpath)
+
+ for (fname, (fd, inode, size, fpath)) in logs.items():
+ try:
+ stat_info = os.stat(fpath)
+ if not fd or stat_info.st_ino != inode or stat_info.st_size < size:
+ # either a file we haven't opened before, or mock replaced a file we had open with
+ # a new file and is writing to it, or truncated the file we're reading,
+ # but our fd is pointing to the previous location in the old file
+ if fd:
+ self.logger.info('Rereading %s, inode: %s -> %s, size: %s -> %s' %
+ (fpath, inode, stat_info.st_ino, size, stat_info.st_size))
+ fd.close()
+ fd = file(fpath, 'r')
+ logs[fname] = (fd, stat_info.st_ino, stat_info.st_size, fpath)
+ except:
+ self.logger.error("Error reading mock log: %s", fpath)
+ self.logger.error(''.join(traceback.format_exception(*sys.exc_info())))
+ continue
+
+ incremental_upload(self.session, fname, fd, uploadpath, logger=self.logger)
+ #clean up and return exit status of command
+ for (fname, (fd, inode, size, fpath)) in logs.items():
+ if fd:
+ fd.close()
+ return status[1]
+
+ else:
+ #in no case should exceptions propagate past here
+ try:
+ self.session._forget()
+ if workdir:
+ outfile = os.path.join(workdir, mocklog)
+ flags = os.O_CREAT | os.O_WRONLY | os.O_APPEND
+ fd = os.open(outfile, flags, 0666)
+ os.dup2(fd, 1)
+ os.dup2(fd, 2)
+ if os.getuid() == 0 and hasattr(self.options,"mockuser"):
+ self.logger.info('Running mock as %s' % self.options.mockuser)
+ uid,gid = pwd.getpwnam(self.options.mockuser)[2:4]
+ os.setgroups([grp.getgrnam('mock')[2]])
+ os.setregid(gid,gid)
+ os.setreuid(uid,uid)
+ os.execvp(cmd[0],cmd)
+ except:
+ #diediedie
+ print "Failed to exec mock"
+ print ''.join(traceback.format_exception(*sys.exc_info()))
+ os._exit(1)
+
+ def getUploadPath(self):
+ """Get the path that should be used when uploading files to
+ the hub."""
+ return koji.pathinfo.taskrelpath(self.task_id)
+
+ def uploadDir(self, dirpath, suffix=None):
+ """Upload the contents of the given directory to the
+ task output directory on the hub. If suffix is provided,
+ append '.' + suffix to the filenames, so that successive uploads
+ of the same directory won't overwrite each other, if the files have
+ the same name but different contents."""
+ if not os.path.isdir(dirpath):
+ return
+ uploadpath = self.getUploadPath()
+ for filename in os.listdir(dirpath):
+ filepath = os.path.join(dirpath, filename)
+ if os.stat(filepath).st_size > 0:
+ if suffix:
+ filename = '%s.%s' % (filename, suffix)
+ self.session.uploadWrapper(filepath, uploadpath, filename)
+
+ def init(self):
+ rv = self.mock(['--init'])
+
+ if rv:
+ self.expire()
+ raise koji.BuildrootError, "could not init mock buildroot, %s" % self._mockResult(rv)
+ self.session.host.setBuildRootList(self.id,self.getPackageList())
+
+ def _mockResult(self, rv, logfile=None):
+ if logfile:
+ pass
+ elif os.WIFEXITED(rv) and os.WEXITSTATUS(rv) == 1:
+ logfile = 'build.log'
+ else:
+ logfile = 'root.log'
+ msg = '; see %s for more information' % logfile
+ return parseStatus(rv, 'mock') + msg
+
+ def build_srpm(self, specfile, sourcedir, source_cmd):
+ self.session.host.setBuildRootState(self.id,'BUILDING')
+ alt_sources_dir = "%s/SOURCES" % sourcedir
+ if self.options.support_rpm_source_layout and os.path.isdir(alt_sources_dir):
+ sources_dir = alt_sources_dir
+ else:
+ sources_dir = sourcedir
+ if source_cmd:
+ # call the command defined by source_cmd in the chroot so any required files not stored in
+ # the SCM can be retrieved
+ chroot_sourcedir = sourcedir[len(self.rootdir()):]
+ args = ['--no-clean', '--unpriv', '--cwd', chroot_sourcedir, '--chroot']
+ args.extend(source_cmd)
+ rv = self.mock(args)
+ if rv:
+ self.expire()
+ raise koji.BuildError, "error retrieving sources, %s" % self._mockResult(rv)
+
+ args = ['--no-clean', '--buildsrpm', '--spec', specfile, '--sources', sources_dir,
+ '--target', 'noarch']
+
+ rv = self.mock(args)
+
+ if rv:
+ self.expire()
+ raise koji.BuildError, "error building srpm, %s" % self._mockResult(rv)
+
+ def build(self,srpm,arch=None):
+ # run build
+ self.session.host.setBuildRootState(self.id,'BUILDING')
+ args = ['--no-clean']
+ if arch:
+ args.extend(['--target', arch])
+ args.extend(['--rebuild', srpm])
+ rv = self.mock(args)
+
+ self.session.host.updateBuildRootList(self.id,self.getPackageList())
+ if rv:
+ self.expire()
+ raise koji.BuildError, "error building package (arch %s), %s" % (arch, self._mockResult(rv))
+
+ def getPackageList(self):
+ """Return a list of packages from the buildroot
+
+ Each member of the list is a dictionary containing the following fields:
+ - name
+ - version
+ - release
+ - epoch
+ - arch
+ - payloadhash
+ - size
+ - buildtime
+ """
+ fields = ('name',
+ 'version',
+ 'release',
+ 'epoch',
+ 'arch',
+ 'sigmd5',
+ 'size',
+ 'buildtime')
+ rpm.addMacro("_dbpath", "%s/var/lib/rpm" % self.rootdir())
+ ret = []
+ try:
+ ts = rpm.TransactionSet()
+ for h in ts.dbMatch():
+ pkg = koji.get_header_fields(h,fields)
+ #skip our fake packages
+ if pkg['name'] in ['buildsys-build', 'gpg-pubkey']:
+ #XXX config
+ continue
+ pkg['payloadhash'] = koji.hex_string(pkg['sigmd5'])
+ del pkg['sigmd5']
+ ret.append(pkg)
+ finally:
+ rpm.delMacro("_dbpath")
+ self.markExternalRPMs(ret)
+ return ret
+
+ def getMavenPackageList(self, repodir):
+ """Return a list of Maven packages that were installed into the local repo
+ to satisfy build requirements.
+
+ Each member of the list is a dictionary containing the following fields:
+ - maven_info: a dict of Maven info containing the groupId, artifactId, and version fields
+ - files: a list of files associated with that POM
+ """
+ packages = []
+ for path, dirs, files in os.walk(repodir):
+ relpath = path[len(repodir) + 1:]
+ maven_files = []
+ for repofile in files:
+ if koji.util.multi_fnmatch(repofile, self.options.maven_repo_ignore) or \
+ koji.util.multi_fnmatch(os.path.join(relpath, repofile), self.options.maven_repo_ignore):
+ continue
+ if relpath == '' and repofile in ['scm-sources.zip', 'patches.zip']:
+ # special-case the archives of the sources and patches, since we drop them in
+ # root of the output directory
+ continue
+ maven_files.append({'path': relpath, 'filename': repofile,
+ 'size': os.path.getsize(os.path.join(path, repofile))})
+ if maven_files:
+ path_comps = relpath.split('/')
+ if len(path_comps) < 3:
+ raise koji.BuildrootError, 'files found in unexpected path in local Maven repo, directory: %s, files: %s' % \
+ (relpath, ', '.join([f['filename'] for f in maven_files]))
+ # extract the Maven info from the path within the local repo
+ maven_info = {'version': path_comps[-1],
+ 'artifact_id': path_comps[-2],
+ 'group_id': '.'.join(path_comps[:-2])}
+ packages.append({'maven_info': maven_info, 'files': maven_files})
+
+ return packages
+
+ def mavenBuild(self, sourcedir, outputdir, repodir,
+ props=None, profiles=None, options=None, goals=None):
+ self.session.host.setBuildRootState(self.id, 'BUILDING')
+ cmd = ['--no-clean', '--chroot', '--unpriv', '--cwd', sourcedir[len(self.rootdir()):], '--',
+ '/usr/bin/mvn', '-C']
+ if options:
+ cmd.extend(options)
+ if profiles:
+ cmd.append('-P%s' % ','.join(profiles))
+ if props:
+ for name, value in props.items():
+ if value is not None:
+ cmd.append('-D%s=%s' % (name, value))
+ else:
+ cmd.append('-D%s' % name)
+ if goals:
+ cmd.extend(goals)
+ cmd.extend(['deploy'])
+ rv = self.mock(cmd)
+
+ # if the deploy command failed, don't raise an error on unknown artifacts, because that
+ # will mask the underlying failure
+ ignore_unknown = False
+ if rv:
+ ignore_unknown = True
+ self.session.host.updateMavenBuildRootList(self.id, self.task_id, self.getMavenPackageList(repodir),
+ ignore=self.getMavenPackageList(outputdir),
+ project=True, ignore_unknown=ignore_unknown,
+ extra_deps=self.deps)
+ if rv:
+ self.expire()
+ raise koji.BuildrootError, 'error building Maven package, %s' % self._mockResult(rv, logfile='root.log')
+
+ def scrub(self):
+ "Non-mock implementation of clean"
+ raise koji.FunctionDeprecated, "no longer needed and deprecated. use clean()"
+
+ def markExternalRPMs(self, rpmlist):
+ """Check rpms against pkgorigins and add external repo data to the external ones
+
+ Modifies rpmlist in place. No return
+ """
+ external_repos = self.session.getExternalRepoList(self.repo_info['tag_id'],
+ event=self.repo_info['create_event'])
+ if not external_repos:
+ #nothing to do
+ return
+ #index external repos by expanded url
+ erepo_idx = {}
+ for erepo in external_repos:
+ # substitute $arch in the url with the arch of the repo we're generating
+ ext_url = erepo['url'].replace('$arch', self.br_arch)
+ erepo_idx[ext_url] = erepo
+ pathinfo = koji.PathInfo(topdir='')
+ #XXX - cheap hack to get relative paths
+ repodir = pathinfo.repo(self.repo_info['id'], self.repo_info['tag_name'])
+ repomdpath = os.path.join(repodir, self.br_arch, 'repodata', 'repomd.xml')
+
+ opts = dict([(k, getattr(self.options, k)) for k in 'topurl','topdir'])
+ fo = koji.openRemoteFile(repomdpath, **opts)
+ try:
+ repodata = repoMDObject.RepoMD('ourrepo', fo)
+ except:
+ raise koji.BuildError, "Unable to parse repomd.xml file for %s" % os.path.join(repodir, self.br_arch)
+ data = repodata.getData('origin')
+ pkgorigins = data.location[1]
+
+ relpath = os.path.join(repodir, self.br_arch, pkgorigins)
+ fo = koji.openRemoteFile(relpath, **opts)
+ #at this point we know there were external repos at the create event,
+ #so there should be an origins file.
+ origin_idx = {}
+ fo2 = GzipFile(fileobj=fo, mode='r')
+ for line in fo2:
+ parts=line.split(None, 2)
+ if len(parts) < 2:
+ continue
+ #first field is formated by yum as [e:]n-v-r.a
+ nvra = "%(name)s-%(version)s-%(release)s.%(arch)s" % koji.parse_NVRA(parts[0])
+ origin_idx[nvra] = parts[1]
+ fo2.close()
+ fo.close()
+ # mergerepo starts from a local repo in the task workdir, so internal
+ # rpms have an odd-looking origin that we need to look for
+ localtail = '/repo_%s_premerge/' % self.repo_info['id']
+ for rpm_info in rpmlist:
+ key = "%(name)s-%(version)s-%(release)s.%(arch)s" % rpm_info
+ # src rpms should not show up in rpmlist so we do not have to
+ # worry about fixing the arch for them
+ ext_url = origin_idx.get(key)
+ if not ext_url:
+ raise koji.BuildError, "No origin for %s" % key
+ erepo = erepo_idx.get(ext_url)
+ if not erepo:
+ if ext_url.startswith('file://') and ext_url.endswith(localtail):
+ # internal rpm
+ continue
+ raise koji.BuildError, "Unknown origin for %s: %s" % (key, ext_url)
+ rpm_info['external_repo'] = erepo
+ rpm_info['location'] = erepo['external_repo_id']
+
+ def resultdir(self):
+ return "%s/%s/result" % (self.options.mockdir, self.name)
+
+ def rootdir(self):
+ return "%s/%s/root" % (self.options.mockdir, self.name)
+
+ def expire(self):
+ self.session.host.setBuildRootState(self.id,'EXPIRED')
+
+
+class ChainBuildTask(BaseTaskHandler):
+
+ Methods = ['chainbuild']
+ #mostly just waiting on other tasks
+ _taskWeight = 0.1
+
+ def handler(self, srcs, target, opts=None):
+ """Run a chain build
+
+ target and opts are passed on to the build tasks
+ srcs is a list of "build levels"
+ each build level is a list of strings, each string may be one of:
+ - a build src (SCM url only)
+ - an n-v-r
+ each build level is processed in order
+ successive levels are only started once the previous levels have completed
+ and gotten into the repo.
+ """
+ if opts.get('scratch'):
+ raise koji.BuildError, "--scratch is not allowed with chain-builds"
+ target_info = self.session.getBuildTarget(target)
+ if not target_info:
+ raise koji.GenericError, 'unknown build target: %s' % target
+ nvrs = []
+ for n_level, build_level in enumerate(srcs):
+ #if there are any nvrs to wait on, do so
+ if nvrs:
+ task_id = self.session.host.subtask(method='waitrepo',
+ arglist=[target_info['build_tag_name'], None, nvrs],
+ label="wait %i" % n_level,
+ parent=self.id)
+ self.wait(task_id, all=True, failany=True)
+ nvrs = []
+ #kick off the builds for this level
+ build_tasks = []
+ for n_src, src in enumerate(build_level):
+ if SCM.is_scm_url(src):
+ task_id = self.session.host.subtask(method='build',
+ arglist=[src, target, opts],
+ label="build %i,%i" % (n_level, n_src),
+ parent=self.id)
+ build_tasks.append(task_id)
+ else:
+ nvrs.append(src)
+ #next pass will wait for these
+ if build_tasks:
+ #the level could have been all nvrs
+ self.wait(build_tasks, all=True, failany=True)
+ #see what builds we created in this batch so the next pass can wait for them also
+ for build_task in build_tasks:
+ builds = self.session.listBuilds(taskID=build_task)
+ if builds:
+ nvrs.append(builds[0]['nvr'])
+
+
+class BuildTask(BaseTaskHandler):
+
+ Methods = ['build']
+ #we mostly just wait on other tasks
+ _taskWeight = 0.2
+
+ def handler(self, src, target, opts=None):
+ """Handler for the master build task"""
+ if opts is None:
+ opts = {}
+ self.opts = opts
+ if opts.get('arch_override') and not opts.get('scratch'):
+ raise koji.BuildError, "arch_override is only allowed for scratch builds"
+ if opts.get('repo_id') is not None:
+ repo_info = self.session.repoInfo(opts['repo_id'])
+ if not repo_info:
+ raise koji.BuildError, 'No such repo: %s' % opts['repo_id']
+ repo_state = koji.REPO_STATES[repo_info['state']]
+ if repo_state not in ('READY', 'EXPIRED'):
+ raise koji.BuildError, 'Bad repo: %s (%s)' % (repo_info['id'], repo_state)
+ self.event_id = repo_info['create_event']
+ else:
+ repo_info = None
+ #we'll wait for a repo later (self.getRepo)
+ self.event_id = None
+ task_info = self.session.getTaskInfo(self.id)
+ target_info = None
+ if target:
+ target_info = self.session.getBuildTarget(target, event=self.event_id)
+ if target_info:
+ dest_tag = target_info['dest_tag']
+ build_tag = target_info['build_tag']
+ if repo_info is not None:
+ #make sure specified repo matches target
+ if repo_info['tag_id'] != target_info['build_tag']:
+ raise koji.BuildError, 'Repo/Target mismatch: %s/%s' \
+ % (repo_info['tag_name'], target_info['build_tag_name'])
+ else:
+ # if repo_id is specified, we can allow the 'target' arg to simply specify
+ # the destination tag (since the repo specifies the build tag).
+ if repo_info is None:
+ raise koji.GenericError, 'unknown build target: %s' % target
+ build_tag = repo_info['tag_id']
+ if target is None:
+ #ok, call it skip-tag for the buildroot tag
+ self.opts['skip_tag'] = True
+ dest_tag = build_tag
+ else:
+ taginfo = self.session.getTag(target, event=self.event_id)
+ if not taginfo:
+ raise koji.GenericError, 'neither tag nor target: %s' % target
+ dest_tag = taginfo['id']
+ #policy checks...
+ policy_data = {
+ 'user_id' : task_info['owner'],
+ 'source' : src,
+ 'task_id' : self.id,
+ 'build_tag' : build_tag, #id
+ 'skip_tag' : bool(self.opts.get('skip_tag')),
+ }
+ if target_info:
+ policy_data['target'] = target_info['id'],
+ if not self.opts.get('skip_tag'):
+ policy_data['tag'] = dest_tag #id
+ if not SCM.is_scm_url(src) and not opts.get('scratch'):
+ #let hub policy decide
+ self.session.host.assertPolicy('build_from_srpm', policy_data)
+ if opts.get('repo_id') is not None:
+ # use of this option is governed by policy
+ self.session.host.assertPolicy('build_from_repo_id', policy_data)
+ if not repo_info:
+ repo_info = self.getRepo(build_tag) #(subtask)
+ self.event_id = self.session.getLastEvent()['id']
+ srpm = self.getSRPM(src, build_tag, repo_info['id'])
+ h = self.readSRPMHeader(srpm)
+ data = koji.get_header_fields(h,['name','version','release','epoch'])
+ data['task_id'] = self.id
+ extra_arches = None
+ self.logger.info("Reading package config for %(name)s" % data)
+ pkg_cfg = self.session.getPackageConfig(dest_tag,data['name'],event=self.event_id)
+ self.logger.debug("%r" % pkg_cfg)
+ if pkg_cfg is not None:
+ extra_arches = pkg_cfg.get('extra_arches')
+ if not self.opts.get('skip_tag') and not self.opts.get('scratch'):
+ # Make sure package is on the list for this tag
+ if pkg_cfg is None:
+ raise koji.BuildError, "package %s not in list for tag %s" \
+ % (data['name'], target_info['dest_tag_name'])
+ elif pkg_cfg['blocked']:
+ raise koji.BuildError, "package %s is blocked for tag %s" \
+ % (data['name'], target_info['dest_tag_name'])
+ # TODO - more pre tests
+ archlist = self.getArchList(build_tag, h, extra=extra_arches)
+ #let the system know about the build we're attempting
+ if not self.opts.get('scratch'):
+ #scratch builds do not get imported
+ build_id = self.session.host.initBuild(data)
+ #(initBuild raises an exception if there is a conflict)
+ try:
+ self.extra_information = { "src": src, "data": data, "target": target }
+ srpm,rpms,brmap,logs = self.runBuilds(srpm,build_tag,archlist,repo_info['id'])
+
+ if opts.get('scratch'):
+ #scratch builds do not get imported
+ self.session.host.moveBuildToScratch(self.id,srpm,rpms,logs=logs)
+ else:
+ self.session.host.completeBuild(self.id,build_id,srpm,rpms,brmap,logs=logs)
+ except (SystemExit,ServerExit,KeyboardInterrupt):
+ #we do not trap these
+ raise
+ except:
+ if not self.opts.get('scratch'):
+ #scratch builds do not get imported
+ self.session.host.failBuild(self.id, build_id)
+ # reraise the exception
+ raise
+ if not self.opts.get('skip_tag') and not self.opts.get('scratch'):
+ self.tagBuild(build_id,dest_tag)
+
+ def getSRPM(self, src, build_tag, repo_id):
+ """Get srpm from src"""
+ if isinstance(src,str):
+ if SCM.is_scm_url(src):
+ return self.getSRPMFromSCM(src, build_tag, repo_id)
+ else:
+ #assume this is a path under uploads
+ return src
+ else:
+ raise koji.BuildError, 'Invalid source specification: %s' % src
+ #XXX - other methods?
+
+ def getSRPMFromSCM(self, url, build_tag, repo_id):
+ #TODO - allow different ways to get the srpm
+ task_id = self.session.host.subtask(method='buildSRPMFromSCM',
+ arglist=[url, build_tag, {'repo_id': repo_id}],
+ label='srpm',
+ parent=self.id)
+ # wait for subtask to finish
+ result = self.wait(task_id)[task_id]
+ srpm = result['srpm']
+ return srpm
+
+ def readSRPMHeader(self, srpm):
+ #srpm arg should be a path relative to <BASEDIR>/work
+ self.logger.debug("Reading SRPM")
+ relpath = "work/%s" % srpm
+ opts = dict([(k, getattr(self.options, k)) for k in 'topurl','topdir'])
+ fo = koji.openRemoteFile(relpath, **opts)
+ h = koji.get_rpm_header(fo)
+ fo.close()
+ if h[rpm.RPMTAG_SOURCEPACKAGE] != 1:
+ raise koji.BuildError, "%s is not a source package" % srpm
+ return h
+
+ def getArchList(self, build_tag, h, extra=None):
+ # get list of arches to build for
+ buildconfig = self.session.getBuildConfig(build_tag, event=self.event_id)
+ arches = buildconfig['arches']
+ if not arches:
+ #XXX - need to handle this better
+ raise koji.BuildError, "No arches for tag %(name)s [%(id)s]" % buildconfig
+ tag_archlist = [koji.canonArch(a) for a in arches.split()]
+ self.logger.debug('arches: %s' % arches)
+ if extra:
+ self.logger.debug('Got extra arches: %s' % extra)
+ arches = "%s %s" % (arches,extra)
+ archlist = arches.split()
+ self.logger.debug('base archlist: %r' % archlist)
+ # - adjust arch list based on srpm macros
+ buildarchs = h[rpm.RPMTAG_BUILDARCHS]
+ exclusivearch = h[rpm.RPMTAG_EXCLUSIVEARCH]
+ excludearch = h[rpm.RPMTAG_EXCLUDEARCH]
+ if buildarchs:
+ archlist = buildarchs
+ self.logger.debug('archlist after buildarchs: %r' % archlist)
+ if exclusivearch:
+ archlist = [ a for a in archlist if a in exclusivearch ]
+ self.logger.debug('archlist after exclusivearch: %r' % archlist)
+ if excludearch:
+ archlist = [ a for a in archlist if a not in excludearch ]
+ self.logger.debug('archlist after excludearch: %r' % archlist)
+ #noarch is funny
+ if 'noarch' not in excludearch and \
+ ( 'noarch' in buildarchs or 'noarch' in exclusivearch ):
+ archlist.append('noarch')
+ override = self.opts.get('arch_override')
+ if self.opts.get('scratch') and override:
+ #only honor override for scratch builds
+ self.logger.debug('arch override: %s' % override)
+ archlist = override.split()
+ archdict = {}
+ for a in archlist:
+ # Filter based on canonical arches for tag
+ # This prevents building for an arch that we can't handle
+ if a == 'noarch' or koji.canonArch(a) in tag_archlist:
+ archdict[a] = 1
+ if not archdict:
+ raise koji.BuildError, "No matching arches were found"
+ return archdict.keys()
+
+ def runBuilds(self, srpm, build_tag, archlist, repo_id):
+ self.logger.debug("Spawning jobs for arches: %r" % (archlist))
+ subtasks = {}
+ keep_srpm = True
+ for arch in archlist:
+ if koji.util.multi_fnmatch(arch, self.options.literal_task_arches):
+ taskarch = arch
+ else:
+ taskarch = koji.canonArch(arch)
+ subtasks[arch] = self.session.host.subtask(method='buildArch',
+ arglist=[srpm, build_tag, arch, keep_srpm, {'repo_id': repo_id}],
+ label=arch,
+ parent=self.id,
+ arch=taskarch)
+ keep_srpm = False
+
+ self.logger.debug("Got subtasks: %r" % (subtasks))
+ self.logger.debug("Waiting on subtasks...")
+
+ # wait for subtasks to finish
+ results = self.wait(subtasks.values(), all=True, failany=True)
+
+ # finalize import
+ # merge data into needed args for completeBuild call
+ rpms = []
+ brmap = {}
+ logs = {}
+ built_srpm = None
+ for (arch, task_id) in subtasks.iteritems():
+ result = results[task_id]
+ self.logger.debug("DEBUG: %r : %r " % (arch,result,))
+ brootid = result['brootid']
+ for fn in result['rpms']:
+ rpms.append(fn)
+ brmap[fn] = brootid
+ for fn in result['logs']:
+ logs.setdefault(arch,[]).append(fn)
+ if result['srpms']:
+ if built_srpm:
+ raise koji.BuildError, "multiple builds returned a srpm. task %i" % self.id
+ else:
+ built_srpm = result['srpms'][0]
+ brmap[result['srpms'][0]] = brootid
+ if built_srpm:
+ srpm = built_srpm
+ else:
+ raise koji.BuildError("could not find a built srpm")
+
+ return srpm,rpms,brmap,logs
+
+ def tagBuild(self,build_id,dest_tag):
+ #XXX - need options to skip tagging and to force tagging
+ #create the tagBuild subtask
+ #this will handle the "post tests"
+ task_id = self.session.host.subtask(method='tagBuild',
+ arglist=[dest_tag,build_id,False,None,True],
+ label='tag',
+ parent=self.id,
+ arch='noarch')
+ self.wait(task_id)
+
+
+class BaseBuildTask(BaseTaskHandler):
+ """Base class for tasks the create a build root"""
+
+ def checkHostArch(self, tag, hostdata, event=None):
+ tagref = tag
+ if isinstance(tag, dict):
+ tagref = tag.get('id') or tag.get('name')
+ opts = {}
+ if event is not None:
+ opts['event'] = event
+ tag = self.session.getBuildConfig(tagref, **opts)
+ if tag and tag['arches']:
+ tag_arches = [koji.canonArch(a) for a in tag['arches'].split()]
+ host_arches = hostdata['arches'].split()
+ if not set(tag_arches).intersection(host_arches):
+ self.logger.info('Task %s (%s): tag arches (%s) and ' \
+ 'host arches (%s) are disjoint' % \
+ (self.id, self.method,
+ ', '.join(tag_arches), ', '.join(host_arches)))
+ return False
+ #otherwise...
+ # This is in principle an error condition, but this is not a good place
+ # to fail. Instead we proceed and let the task fail normally.
+ return True
+
+
+class BuildArchTask(BaseBuildTask):
+
+ Methods = ['buildArch']
+
+ def weight(self):
+ return 1.5
+
+ def updateWeight(self, name):
+ """
+ Update the weight of this task based on the package we're building.
+ weight is scaled from a minimum of 1.5 to a maximum of 6, based on
+ the average duration of a build of this package.
+ """
+ avg = self.session.getAverageBuildDuration(name)
+ if not avg:
+ return
+ if avg < 0:
+ self.logger.warn("Negative average build duration for %s: %s", name, avg)
+ return
+ # increase the task weight by 0.75 for every hour of build duration
+ adj = (avg / 4800.0)
+ # cap the adjustment at +4.5
+ weight = self.weight() + min(4.5, adj)
+ self.session.host.setTaskWeight(self.id, weight)
+
+ def checkHost(self, hostdata):
+ tag = self.params[1]
+ return self.checkHostArch(tag, hostdata)
+
+ def srpm_sanity_checks(self, filename):
+ header = koji.get_rpm_header(filename)
+
+ if not header[rpm.RPMTAG_PACKAGER]:
+ raise koji.BuildError, "The build system failed to set the packager tag"
+ if not header[rpm.RPMTAG_VENDOR]:
+ raise koji.BuildError, "The build system failed to set the vendor tag"
+ if not header[rpm.RPMTAG_DISTRIBUTION]:
+ raise koji.BuildError, "The build system failed to set the distribution tag"
+
+ def handler(self, pkg, root, arch, keep_srpm, opts=None):
+ """Build a package in a buildroot for one arch"""
+ ret = {}
+ if opts is None:
+ opts = {}
+ repo_id = opts.get('repo_id')
+ if not repo_id:
+ raise koji.BuildError, "A repo id must be provided"
+ repo_info = self.session.repoInfo(repo_id, strict=True)
+ event_id = repo_info['create_event']
+
+ # starting srpm should already have been uploaded by parent
+ self.logger.debug("Reading SRPM")
+ fn = self.localPath("work/%s" % pkg)
+ if not os.path.exists(fn):
+ raise koji.BuildError, "SRPM file missing: %s" % fn
+ # peel E:N-V-R from package
+ h = koji.get_rpm_header(fn)
+ name = h[rpm.RPMTAG_NAME]
+ ver = h[rpm.RPMTAG_VERSION]
+ rel = h[rpm.RPMTAG_RELEASE]
+ epoch = h[rpm.RPMTAG_EPOCH]
+ if h[rpm.RPMTAG_SOURCEPACKAGE] != 1:
+ raise koji.BuildError, "not a source package"
+ # Disable checking for distribution in the initial SRPM because it
+ # might have been built outside of the build system
+ # if not h[rpm.RPMTAG_DISTRIBUTION]:
+ # raise koji.BuildError, "the distribution tag is not set in the original srpm"
+
+ self.updateWeight(name)
+
+ rootopts = {
+ 'repo_id': repo_id
+ }
+ br_arch = self.find_arch(arch, self.session.host.getHost(), self.session.getBuildConfig(root, event=event_id))
+ broot = BuildRoot(self.session, self.options, root, br_arch, self.id, **rootopts)
+ broot.workdir = self.workdir
+
+ self.logger.debug("Initializing buildroot")
+ broot.init()
+
+ # run build
+ self.logger.debug("Running build")
+ broot.build(fn,arch)
+
+ # extract results
+ resultdir = broot.resultdir()
+ rpm_files = []
+ srpm_files = []
+ log_files = []
+ unexpected = []
+ for f in os.listdir(resultdir):
+ # files here should have one of two extensions: .log and .rpm
+ if f[-4:] == ".log":
+ log_files.append(f)
+ elif f[-8:] == ".src.rpm":
+ srpm_files.append(f)
+ elif f[-4:] == ".rpm":
+ rpm_files.append(f)
+ else:
+ unexpected.append(f)
+ self.logger.debug("rpms: %r" % rpm_files)
+ self.logger.debug("srpms: %r" % srpm_files)
+ self.logger.debug("logs: %r" % log_files)
+ self.logger.debug("unexpected: %r" % unexpected)
+
+ # upload files to storage server
+ uploadpath = broot.getUploadPath()
+ for f in rpm_files:
+ self.uploadFile("%s/%s" % (resultdir,f))
+ self.logger.debug("keep srpm %i %s %s" % (self.id, keep_srpm, opts))
+ if keep_srpm:
+ if len(srpm_files) == 0:
+ raise koji.BuildError, "no srpm files found for task %i" % self.id
+ if len(srpm_files) > 1:
+ raise koji.BuildError, "mulitple srpm files found for task %i: %s" % (self.id, srpm_files)
+
+ # Run sanity checks. Any failures will throw a BuildError
+ self.srpm_sanity_checks("%s/%s" % (resultdir,srpm_files[0]))
+
+ self.logger.debug("uploading %s/%s to %s" % (resultdir,srpm_files[0], uploadpath))
+ self.uploadFile("%s/%s" % (resultdir,srpm_files[0]))
+ if rpm_files:
+ ret['rpms'] = [ "%s/%s" % (uploadpath,f) for f in rpm_files ]
+ else:
+ ret['rpms'] = []
+ if keep_srpm:
+ ret['srpms'] = [ "%s/%s" % (uploadpath,f) for f in srpm_files ]
+ else:
+ ret['srpms'] = []
+ ret['logs'] = [ "%s/%s" % (uploadpath,f) for f in log_files ]
+
+ ret['brootid'] = broot.id
+
+ broot.expire()
+ #Let TaskManager clean up
+
+ return ret
+
+class MavenTask(MultiPlatformTask):
+
+ Methods = ['maven']
+
+ _taskWeight = 0.2
+
+ def handler(self, url, target, opts=None):
+ """Use Maven to build the source from the given url"""
+ if opts is None:
+ opts = {}
+ self.opts = opts
+ target_info = self.session.getBuildTarget(target)
+ if not target_info:
+ raise koji.BuildError, 'unknown build target: %s' % target
+ dest_tag = self.session.getTag(target_info['dest_tag'], strict=True)
+ build_tag = self.session.getTag(target_info['build_tag'], strict=True)
+
+ repo_id = opts.get('repo_id')
+ if not repo_id:
+ repo = self.session.getRepo(build_tag['id'])
+ if repo:
+ repo_id = repo['id']
+ else:
+ raise koji.BuildError, 'no repo for tag %s' % build_tag['name']
+
+ build_opts = dslice(opts, ['goals', 'profiles', 'properties', 'envs', 'patches',
+ 'packages', 'jvm_options', 'maven_options', 'deps'],
+ strict=False)
+ build_opts['repo_id'] = repo_id
+
+ self.build_task_id = self.session.host.subtask(method='buildMaven',
+ arglist=[url, build_tag, build_opts],
+ label='build',
+ parent=self.id,
+ arch='noarch')
+ maven_results = self.wait(self.build_task_id)[self.build_task_id]
+ maven_results['task_id'] = self.build_task_id
+
+ build_info = None
+ if not self.opts.get('scratch'):
+ maven_info = maven_results['maven_info']
+ if maven_info['version'].endswith('-SNAPSHOT'):
+ raise koji.BuildError, '-SNAPSHOT versions are only supported in scratch builds'
+ build_info = koji.maven_info_to_nvr(maven_info)
+
+ if not self.opts.get('skip_tag'):
+ dest_cfg = self.session.getPackageConfig(dest_tag['id'], build_info['name'])
+ # Make sure package is on the list for this tag
+ if dest_cfg is None:
+ raise koji.BuildError, "package %s not in list for tag %s" \
+ % (build_info['name'], dest_tag['name'])
+ elif dest_cfg['blocked']:
+ raise koji.BuildError, "package %s is blocked for tag %s" \
+ % (build_info['name'], dest_tag['name'])
+
+ build_info = self.session.host.initMavenBuild(self.id, build_info, maven_info)
+ self.build_id = build_info['id']
+
+ try:
+ rpm_results = None
+ spec_url = self.opts.get('specfile')
+ if spec_url:
+ rpm_results = self.buildWrapperRPM(spec_url, self.build_task_id, target_info, build_info, repo_id)
+
+ if self.opts.get('scratch'):
+ self.session.host.moveMavenBuildToScratch(self.id, maven_results, rpm_results)
+ else:
+ self.session.host.completeMavenBuild(self.id, self.build_id, maven_results, rpm_results)
+ except (SystemExit, ServerExit, KeyboardInterrupt):
+ # we do not trap these
+ raise
+ except:
+ if not self.opts.get('scratch'):
+ #scratch builds do not get imported
+ self.session.host.failBuild(self.id, self.build_id)
+ # reraise the exception
+ raise
+
+ if not self.opts.get('scratch') and not self.opts.get('skip_tag'):
+ tag_task_id = self.session.host.subtask(method='tagBuild',
+ arglist=[dest_tag['id'], self.build_id, False, None, True],
+ label='tag',
+ parent=self.id,
+ arch='noarch')
+ self.wait(tag_task_id)
+
+class BuildMavenTask(BaseBuildTask):
+
+ Methods = ['buildMaven']
+
+ _taskWeight = 1.5
+
+ def _zip_dir(self, rootdir, filename):
+ rootbase = os.path.basename(rootdir)
+ roottrim = len(rootdir) - len(rootbase)
+ zfo = zipfile.ZipFile(filename, 'w', zipfile.ZIP_DEFLATED)
+ for dirpath, dirnames, filenames in os.walk(rootdir):
+ for skip in ['CVS', '.svn', '.git']:
+ if skip in dirnames:
+ dirnames.remove(skip)
+ for filename in filenames:
+ filepath = os.path.join(dirpath, filename)
+ if os.path.islink(filepath):
+ content = os.readlink(filepath)
+ st = os.lstat(filepath)
+ mtime = time.localtime(st.st_mtime)
+ info = zipfile.ZipInfo(filepath[roottrim:])
+ info.external_attr |= 0120000 << 16L # symlink file type
+ info.compress_type = zipfile.ZIP_STORED
+ info.date_time = mtime[:6]
+ zfo.writestr(info, content)
+ else:
+ zfo.write(filepath, filepath[roottrim:])
+ zfo.close()
+
+ def checkHost(self, hostdata):
+ tag = self.params[1]
+ return self.checkHostArch(tag, hostdata)
+
+ def handler(self, url, build_tag, opts=None):
+ if opts is None:
+ opts = {}
+ self.opts = opts
+
+ scm = SCM(url)
+ scm.assert_allowed(self.options.allowed_scms)
+
+ repo_id = opts.get('repo_id')
+ if not repo_id:
+ raise koji.BuildError, 'A repo_id must be provided'
+ repo_info = self.session.repoInfo(repo_id, strict=True)
+ event_id = repo_info['create_event']
+
+ br_arch = self.find_arch('noarch', self.session.host.getHost(), session.getBuildConfig(build_tag['id'], event=event_id))
+ maven_opts = opts.get('jvm_options')
+ if not maven_opts:
+ maven_opts = []
+ for opt in maven_opts:
+ if opt.startswith('-Xmx'):
+ break
+ else:
+ # Give the JVM 2G to work with by default, if the build isn't specifying its own max. memory
+ maven_opts.append('-Xmx2048m')
+ buildroot = BuildRoot(self.session, self.options, build_tag['id'], br_arch, self.id,
+ install_group='maven-build', setup_dns=True, repo_id=repo_id,
+ maven_opts=maven_opts, maven_envs=opts.get('envs'),
+ deps=opts.get('deps'))
+ buildroot.workdir = self.workdir
+ self.logger.debug("Initializing buildroot")
+ buildroot.init()
+
+ packages = opts.get('packages')
+ if packages:
+ rv = buildroot.mock(['--install'] + packages)
+ self.session.host.setBuildRootState(buildroot.id, 'BUILDING')
+ self.session.host.updateBuildRootList(buildroot.id, buildroot.getPackageList())
+ if rv:
+ buildroot.expire()
+ raise koji.BuildrootError, 'error installing packages, %s' % buildroot._mockResult(rv, logfile='mock_output.log')
+
+ if not os.path.exists('%s/usr/bin/mvn' % buildroot.rootdir()):
+ raise koji.BuildError, '/usr/bin/mvn was not found in the buildroot'
+
+ scmdir = '%s/maven/build' % buildroot.rootdir()
+ outputdir = '%s/maven/output' % buildroot.rootdir()
+ m2dir = '%s/builddir/.m2' % buildroot.rootdir()
+ repodir = '%s/builddir/.m2/repository' % buildroot.rootdir()
+ patchdir = '%s/maven/patches' % buildroot.rootdir()
+
+ koji.ensuredir(scmdir)
+ koji.ensuredir(outputdir)
+ koji.ensuredir(repodir)
+ koji.ensuredir(patchdir)
+
+ logfile = self.workdir + '/checkout.log'
+ uploadpath = self.getUploadDir()
+
+ # Check out sources from the SCM
+ sourcedir = scm.checkout(scmdir, self.session, uploadpath, logfile)
+
+ # zip up pristine sources for auditing purposes
+ self._zip_dir(sourcedir, os.path.join(outputdir, 'scm-sources.zip'))
+
+ # Checkout out patches, if present
+ if self.opts.get('patches'):
+ patchlog = self.workdir + '/patches.log'
+ patch_scm = SCM(self.opts.get('patches'))
+ patch_scm.assert_allowed(self.options.allowed_scms)
+ # never try to check out a common/ dir when checking out patches
+ patch_scm.use_common = False
+ patchcheckoutdir = patch_scm.checkout(patchdir, self.session, uploadpath, patchlog)
+ self._zip_dir(patchcheckoutdir, os.path.join(outputdir, 'patches.zip'))
+
+ # Apply patches, if present
+ if self.opts.get('patches'):
+ # filter out directories and files beginning with . (probably scm metadata)
+ patches = [patch for patch in os.listdir(patchcheckoutdir) if \
+ os.path.isfile(os.path.join(patchcheckoutdir, patch)) and \
+ patch.endswith('.patch')]
+ if not patches:
+ raise koji.BuildError, 'no patches found at %s' % self.opts.get('patches')
+ patches.sort()
+ for patch in patches:
+ cmd = ['/usr/bin/patch', '--verbose', '--no-backup-if-mismatch', '-d', sourcedir, '-p1', '-i', os.path.join(patchcheckoutdir, patch)]
+ ret = log_output(self.session, cmd[0], cmd, patchlog, uploadpath, logerror=1, append=1)
+ if ret:
+ raise koji.BuildError, 'error applying patches from %s, see patches.log for details' % self.opts.get('patches')
+
+ # Set ownership of the entire source tree to the mock user
+ uid = pwd.getpwnam(self.options.mockuser)[2]
+ gid = grp.getgrnam('mock')[2]
+ self.chownTree(scmdir, uid, gid)
+ self.chownTree(outputdir, uid, gid)
+ self.chownTree(m2dir, uid, gid)
+ if self.opts.get('patches'):
+ self.chownTree(patchdir, uid, gid)
+
+ settingsfile = '/builddir/.m2/settings.xml'
+ buildroot.writeMavenSettings(settingsfile, outputdir)
+
+ pomfile = 'pom.xml'
+ maven_options = self.opts.get('maven_options', [])
+ for i, opt in enumerate(maven_options):
+ if opt == '-f' or opt == '--file':
+ if len(maven_options) > (i + 1):
+ pomfile = maven_options[i + 1]
+ break
+ else:
+ raise koji.BuildError, '%s option requires a file path' % opt
+ elif opt.startswith('-f=') or opt.startswith('--file='):
+ pomfile = opt.split('=', 1)[1]
+ break
+ elif opt.startswith('-f'):
+ pomfile = opt[2:]
+ break
+
+ buildroot.mavenBuild(sourcedir, outputdir, repodir,
+ props=self.opts.get('properties'), profiles=self.opts.get('profiles'),
+ options=self.opts.get('maven_options'), goals=self.opts.get('goals'))
+
+ build_pom = os.path.join(sourcedir, pomfile)
+ if not os.path.exists(build_pom):
+ raise koji.BuildError, '%s does not exist' % pomfile
+ pom_info = koji.parse_pom(build_pom)
+ maven_info = koji.pom_to_maven_info(pom_info)
+
+ # give the zip files more descriptive names
+ os.rename(os.path.join(outputdir, 'scm-sources.zip'),
+ os.path.join(outputdir, maven_info['artifact_id'] + '-' +
+ maven_info['version'] + '-scm-sources.zip'))
+ if self.opts.get('patches'):
+ os.rename(os.path.join(outputdir, 'patches.zip'),
+ os.path.join(outputdir, maven_info['artifact_id'] + '-' +
+ maven_info['version'] + '-patches.zip'))
+
+ logs = ['checkout.log']
+ if self.opts.get('patches'):
+ logs.append('patches.log')
+ output_files = {}
+
+ for path, dirs, files in os.walk(outputdir):
+ if not files:
+ continue
+ reldir = path[len(outputdir) + 1:]
+ for filename in files:
+ root, ext = os.path.splitext(filename)
+ if ext == '.log':
+ logs.append(os.path.join(reldir, filename))
+ else:
+ output_files.setdefault(reldir, []).append(filename)
+
+ # upload the build output
+ for filepath in logs:
+ self.uploadFile(os.path.join(outputdir, filepath),
+ relPath=os.path.dirname(filepath))
+ for relpath, files in output_files.iteritems():
+ for filename in files:
+ self.uploadFile(os.path.join(outputdir, relpath, filename),
+ relPath=relpath)
+
+ # Should only find log files in the mock result directory.
+ # Don't upload these log files, they've already been streamed
+ # the hub.
+ for filename in os.listdir(buildroot.resultdir()):
+ root, ext = os.path.splitext(filename)
+ if ext == '.log':
+ filepath = os.path.join(buildroot.resultdir(), filename)
+ if os.path.isfile(filepath) and os.stat(filepath).st_size > 0:
+ # only files with content get uploaded to the hub
+ logs.append(filename)
+
+ buildroot.expire()
+
+ return {'maven_info': maven_info,
+ 'buildroot_id': buildroot.id,
+ 'logs': logs,
+ 'files': output_files}
+
+class WrapperRPMTask(BaseBuildTask):
+ """Build a wrapper rpm around archives output from a Maven or Windows build.
+ May either be called as a subtask or as a separate
+ top-level task. In the latter case it can either associate the new rpms
+ with the existing build or create a new build."""
+
+ Methods = ['wrapperRPM']
+
+ _taskWeight = 1.5
+
+ def copy_fields(self, src, tgt, *fields):
+ for field in fields:
+ tgt[field] = src.get(field)
+
+ def spec_sanity_checks(self, filename):
+ spec = open(filename).read()
+ for tag in ("Packager", "Distribution", "Vendor"):
+ if re.match("%s:" % tag, spec, re.M):
+ raise koji.BuildError, "%s is not allowed to be set in spec file" % tag
+ for tag in ("packager", "distribution", "vendor"):
+ if re.match("%%define\s+%s\s+" % tag, spec, re.M):
+ raise koji.BuildError, "%s is not allowed to be defined in spec file" % tag
+
+ def checkHost(self, hostdata):
+ target = self.params[1]
+ return self.checkHostArch(target['build_tag'], hostdata)
+
+ def handler(self, spec_url, build_target, build, task, opts=None):
+ if not opts:
+ opts = {}
+
+ if not (build or task):
+ raise koji.BuildError, 'build and/or task must be specified'
+
+ values = {}
+
+ if build:
+ maven_info = self.session.getMavenBuild(build['id'], strict=False)
+ win_info = self.session.getWinBuild(build['id'], strict=False)
+ image_info = self.session.getImageBuild(build['id'], strict=False)
+ else:
+ maven_info = None
+ win_info = None
+ image_info = None
+
+ # list of artifact paths relative to kojiroot (not exposed to the specfile)
+ artifact_relpaths = []
+ # map of file extension to a list of files
+ artifacts = {}
+ # list of all files
+ all_artifacts = []
+ # list of all files with their repo path
+ all_artifacts_with_path = []
+
+ # makes generating relative paths easier
+ self.pathinfo = koji.PathInfo(topdir='')
+
+ if task:
+ # called as a subtask of a build
+ artifact_paths = self.session.listTaskOutput(task['id'])
+
+ for artifact_path in artifact_paths:
+ artifact_name = os.path.basename(artifact_path)
+ base, ext = os.path.splitext(artifact_name)
+ if ext == '.log':
+ # Exclude log files for consistency with the output of listArchives() used below
+ continue
+ relpath = os.path.join(self.pathinfo.task(task['id']), artifact_path)[1:]
+ artifact_relpaths.append(relpath)
+ artifacts.setdefault(ext, []).append(artifact_name)
+ all_artifacts.append(artifact_name)
+ all_artifacts_with_path.append(artifact_path)
+ else:
+ # called as a top-level task to create wrapper rpms for an existing build
+ # verify that the build is complete
+ if not build['state'] == koji.BUILD_STATES['COMPLETE']:
+ raise koji.BuildError, 'cannot call wrapperRPM on a build that did not complete successfully'
+
+ # get the list of files from the build instead of the task, because the task output directory may
+ # have already been cleaned up
+ if maven_info:
+ build_artifacts = self.session.listArchives(buildID=build['id'], type='maven')
+ elif win_info:
+ build_artifacts = self.session.listArchives(buildID=build['id'], type='win')
+ elif image_info:
+ build_artifacts = self.session.listArchives(buildID=build['id'], type='image')
+ else:
+ raise koji.BuildError, 'unsupported build type'
+
+ for artifact in build_artifacts:
+ artifact_name = artifact['filename']
+ base, ext = os.path.splitext(artifact_name)
+ artifacts.setdefault(ext, []).append(artifact_name)
+ all_artifacts.append(artifact_name)
+ if ext == '.log':
+ # listArchives() should never return .log files, but we check for completeness
+ continue
+ if maven_info:
+ repopath = self.pathinfo.mavenfile(artifact)
+ relpath = os.path.join(self.pathinfo.mavenbuild(build), repopath)[1:]
+ artifact_relpaths.append(relpath)
+ all_artifacts_with_path.append(repopath)
+ elif win_info:
+ repopath = self.pathinfo.winfile(artifact)
+ relpath = os.path.join(self.pathinfo.winbuild(build), repopath)[1:]
+ artifact_relpaths.append(relpath)
+ all_artifacts_with_path.append(repopath)
+ elif image_info:
+ ipath = self.pathinfo.imagebuild(build)
+ relpath = os.path.join(ipath, artifact_name)[1:]
+ artifact_relpaths.append(relpath)
+ all_artifacts_with_path.append(artifact_name)
+ else:
+ # can't happen
+ assert False
+
+ if not artifacts:
+ raise koji.BuildError, 'no output found for %s' % (task and koji.taskLabel(task) or koji.buildLabel(build))
+
+ values['artifacts'] = artifacts
+ values['all_artifacts'] = all_artifacts
+ values['all_artifacts_with_path'] = all_artifacts_with_path
+
+ if build:
+ self.copy_fields(build, values, 'epoch', 'name', 'version', 'release')
+ if maven_info:
+ values['maven_info'] = maven_info
+ elif win_info:
+ values['win_info'] = win_info
+ elif image_info:
+ values['image_info'] = image_info
+ else:
+ # can't happen
+ assert False
+ else:
+ task_result = self.session.getTaskResult(task['id'])
+ if task['method'] == 'buildMaven':
+ maven_info = task_result['maven_info']
+ maven_nvr = koji.maven_info_to_nvr(maven_info)
+ maven_nvr['release'] = '0.scratch'
+ self.copy_fields(maven_nvr, values, 'epoch', 'name', 'version', 'release')
+ values['maven_info'] = maven_info
+ elif task['method'] == 'vmExec':
+ self.copy_fields(task_result, values, 'epoch', 'name', 'version', 'release')
+ values['win_info'] = {'platform': task_result['platform']}
+ elif task['method'] in ('createLiveCD', 'createAppliance', 'createImage'):
+ self.copy_fields(task_result, values, 'epoch', 'name', 'version', 'release')
+ else:
+ # can't happen
+ assert False
+
+ scm = SCM(spec_url)
+ scm.assert_allowed(self.options.allowed_scms)
+
+ repo_id = opts.get('repo_id')
+ if not repo_id:
+ raise koji.BuildError, "A repo id must be provided"
+
+ repo_info = self.session.repoInfo(repo_id, strict=True)
+ event_id = repo_info['create_event']
+ build_tag = self.session.getTag(build_target['build_tag'], strict=True)
+ br_arch = self.find_arch('noarch', self.session.host.getHost(), self.session.getBuildConfig(build_tag['id'], event=event_id))
+
+ buildroot = BuildRoot(self.session, self.options, build_tag['id'], br_arch, self.id, install_group='wrapper-rpm-build', repo_id=repo_id)
+ buildroot.workdir = self.workdir
+ self.logger.debug("Initializing buildroot")
+ buildroot.init()
+
+ logfile = os.path.join(self.workdir, 'checkout.log')
+ scmdir = buildroot.rootdir() + '/tmp/scmroot'
+ koji.ensuredir(scmdir)
+ specdir = scm.checkout(scmdir, self.session, self.getUploadDir(), logfile)
+
+ spec_template = None
+ for path, dir, files in os.walk(specdir):
+ files.sort()
+ for filename in files:
+ if filename.endswith('.spec.tmpl'):
+ spec_template = os.path.join(path, filename)
+ break
+ if not spec_template:
+ raise koji.BuildError, 'no spec file template found at URL: %s' % spec_url
+
+ # Put the jars into the same directory as the specfile. This directory will be
+ # set to the rpm _sourcedir so other files in the SCM may be referenced in the
+ # specfile as well.
+ specdir = os.path.dirname(spec_template)
+ for relpath in artifact_relpaths:
+ localpath = self.localPath(relpath)
+ # RPM requires all SOURCE files in the srpm to be in the same directory, so
+ # we flatten any directory structure of the output files here.
+ # If multiple files in the build have the same basename, duplicate files will
+ # have their relative path prepended to their name, with / replaced with -.
+ destpath = os.path.join(specdir, os.path.basename(relpath))
+ if os.path.exists(destpath):
+ destpath = os.path.join(specdir, relpath.replace('/', '-'))
+ shutil.copy(localpath, destpath)
+
+ # change directory to the specdir to the template can reference files there
+ os.chdir(specdir)
+ contents = Cheetah.Template.Template(file=spec_template,
+ searchList=[values]).respond()
+ contents = contents.encode('utf-8')
+
+ specfile = spec_template[:-5]
+ specfd = file(specfile, 'w')
+ specfd.write(contents)
+ specfd.close()
+
+ # Run spec file sanity checks. Any failures will throw a BuildError
+ self.spec_sanity_checks(specfile)
+
+ # chown the specdir to the mock user, because srpm creation happens
+ # as an unprivileged user
+ uid = pwd.getpwnam(self.options.mockuser)[2]
+ gid = grp.getgrnam('mock')[2]
+ self.chownTree(specdir, uid, gid)
+
+ #build srpm
+ self.logger.debug("Running srpm build")
+ buildroot.build_srpm(specfile, specdir, None)
+
+ srpms = glob.glob('%s/*.src.rpm' % buildroot.resultdir())
+ if len(srpms) == 0:
+ raise koji.BuildError, 'no srpms found in %s' % buildroot.resultdir()
+ elif len(srpms) > 1:
+ raise koji.BuildError, 'multiple srpms found in %s: %s' % (buildroot.resultdir(), ', '.join(srpms))
+ else:
+ srpm = srpms[0]
+
+ shutil.move(srpm, self.workdir)
+ srpm = os.path.join(self.workdir, os.path.basename(srpm))
+
+ self.new_build_id = None
+ if opts.get('create_build') and not opts.get('scratch'):
+ h = koji.get_rpm_header(srpm)
+ data = koji.get_header_fields(h, ['name', 'version', 'release', 'epoch'])
+ data['task_id'] = self.id
+ self.logger.info("Reading package config for %(name)s" % data)
+ pkg_cfg = self.session.getPackageConfig(build_target['dest_tag'], data['name'])
+ if not opts.get('skip_tag'):
+ # Make sure package is on the list for this tag
+ if pkg_cfg is None:
+ raise koji.BuildError, "package %s not in list for tag %s" \
+ % (data['name'], build_target['dest_tag_name'])
+ elif pkg_cfg['blocked']:
+ raise koji.BuildError, "package %s is blocked for tag %s" \
+ % (data['name'], build_target['dest_tag_name'])
+ self.new_build_id = self.session.host.initBuild(data)
+
+ try:
+ buildroot.build(srpm)
+ except (SystemExit, ServerExit, KeyboardInterrupt):
+ raise
+ except:
+ if self.new_build_id:
+ self.session.host.failBuild(self.id, self.new_build_id)
+ raise
+
+ resultdir = buildroot.resultdir()
+ srpm = None
+ rpms = []
+ logs = ['checkout.log']
+
+ for filename in os.listdir(resultdir):
+ if filename.endswith('.src.rpm'):
+ if not srpm:
+ srpm = filename
+ else:
+ if self.new_build_id:
+ self.session.host.failBuild(self.id, self.new_build_id)
+ raise koji.BuildError, 'multiple srpms found in %s: %s, %s' % \
+ (resultdir, srpm, filename)
+ elif filename.endswith('.rpm'):
+ rpms.append(filename)
+ elif filename.endswith('.log'):
+ logs.append(filename)
+ else:
+ if self.new_build_id:
+ self.session.host.failBuild(self.id, self.new_build_id)
+ raise koji.BuildError, 'unexpected file found in %s: %s' % \
+ (resultdir, filename)
+
+ if not srpm:
+ if self.new_build_id:
+ self.session.host.failBuild(self.id, self.new_build_id)
+ raise koji.BuildError, 'no srpm found'
+
+ if not rpms:
+ if self.new_build_id:
+ self.session.host.failBuild(self.id, self.new_build_id)
+ raise koji.BuildError, 'no rpms found'
+
+ try:
+ for rpm in [srpm] + rpms:
+ self.uploadFile(os.path.join(resultdir, rpm))
+ except (SystemExit, ServerExit, KeyboardInterrupt):
+ raise
+ except:
+ if self.new_build_id:
+ self.session.host.failBuild(self.id, self.new_build_id)
+ raise
+
+ results = {'buildroot_id': buildroot.id,
+ 'srpm': srpm,
+ 'rpms': rpms,
+ 'logs': logs}
+
+ if not task:
+ # Called as a standalone top-level task, so handle the rpms now.
+ # Otherwise we let the parent task handle it.
+ uploaddir = self.getUploadDir()
+ relsrpm = uploaddir + '/' + srpm
+ relrpms = [uploaddir + '/' + r for r in rpms]
+ rellogs = [uploaddir + '/' + l for l in logs]
+ if opts.get('scratch'):
+ self.session.host.moveBuildToScratch(self.id, relsrpm, relrpms, {'noarch': rellogs})
+ else:
+ if opts.get('create_build'):
+ brmap = dict.fromkeys([relsrpm] + relrpms, buildroot.id)
+ try:
+ self.session.host.completeBuild(self.id, self.new_build_id,
+ relsrpm, relrpms, brmap, {'noarch': rellogs})
+ except (SystemExit, ServerExit, KeyboardInterrupt):
+ raise
+ except:
+ self.session.host.failBuild(self.id, self.new_build_id)
+ raise
+ if not opts.get('skip_tag'):
+ tag_task_id = self.session.host.subtask(method='tagBuild',
+ arglist=[build_target['dest_tag'],
+ self.new_build_id, False, None, True],
+ label='tag', parent=self.id, arch='noarch')
+ self.wait(tag_task_id)
+ else:
+ self.session.host.importWrapperRPMs(self.id, build['id'], results)
+
+ # no need to upload logs, they've already been streamed to the hub
+ # during the build process
+
+ buildroot.expire()
+
+ return results
+
+class ChainMavenTask(MultiPlatformTask):
+
+ Methods = ['chainmaven']
+
+ _taskWeight = 0.2
+
+ def handler(self, builds, target, opts=None):
+ """Run a sequence of Maven builds in dependency order"""
+ if not opts:
+ opts = {}
+ target_info = self.session.getBuildTarget(target)
+ if not target_info:
+ raise koji.BuildError, 'unknown build target: %s' % target
+ dest_tag = self.session.getTag(target_info['dest_tag'], strict=True)
+
+ if not (opts.get('scratch') or opts.get('skip_tag')):
+ for package in builds:
+ dest_cfg = self.session.getPackageConfig(dest_tag['id'], package)
+ # Make sure package is on the list for this tag
+ if dest_cfg is None:
+ raise koji.BuildError, "package %s not in list for tag %s" \
+ % (package, dest_tag['name'])
+ elif dest_cfg['blocked']:
+ raise koji.BuildError, "package %s is blocked for tag %s" \
+ % (package, dest_tag['name'])
+
+ self.depmap = {}
+ for package, params in builds.items():
+ self.depmap[package] = set(params.get('buildrequires', []))
+
+ todo = copy.deepcopy(self.depmap)
+ running = {}
+ self.done = {}
+ self.results = []
+
+ while True:
+ ready = [package for package, deps in todo.items() if not deps]
+ if not ready and not running:
+ break
+ for package in ready:
+ params = builds[package]
+ buildtype = params.get('type', 'maven')
+ task_url = params['scmurl']
+ task_opts = dslice_ex(params, ['scmurl', 'buildrequires', 'type'], strict=False)
+ if buildtype == 'maven':
+ task_deps = list(self.depset(package))
+ if task_deps:
+ task_opts['deps'] = task_deps
+
+ if not opts.get('force'):
+ # check for a duplicate build (a build performed with the
+ # same scmurl and options)
+ dup_build = self.get_duplicate_build(dest_tag['name'], package, params, task_opts)
+ # if we find one, mark the package as built and remove it from todo
+ if dup_build:
+ self.done[package] = dup_build['nvr']
+ for deps in todo.values():
+ deps.discard(package)
+ del todo[package]
+ self.results.append('%s previously built from %s' % (dup_build['nvr'], task_url))
+ continue
+ task_opts.update(dslice(opts, ['skip_tag', 'scratch'], strict=False))
+
+ if buildtype == 'maven':
+ if opts.get('debug'):
+ task_opts.setdefault('maven_options', []).append('--debug')
+ task_id = self.subtask('maven', [task_url, target, task_opts],
+ label=package)
+ elif buildtype == 'wrapper':
+ pkg_to_wrap = params['buildrequires'][0]
+ to_wrap = self.done[pkg_to_wrap]
+
+ if isinstance(to_wrap, (int, long)):
+ task_to_wrap = self.session.getTaskInfo(to_wrap, request=True)
+ build_to_wrap = None
+ else:
+ build_to_wrap = self.session.getBuild(to_wrap, strict=True)
+ task_to_wrap = None
+ target_info = self.session.getBuildTarget(target, strict=True)
+ repo_info = self.getRepo(target_info['build_tag'])
+ task_opts['repo_id'] = repo_info['id']
+ task_id = self.subtask('wrapperRPM', [task_url, target_info,
+ build_to_wrap, task_to_wrap,
+ task_opts],
+ label=package)
+ else:
+ raise koji.BuilError, 'unsupported build type: %s' % buildtype
+
+ running[task_id] = package
+ del todo[package]
+ try:
+ results = self.wait(running.keys())
+ except (xmlrpclib.Fault, koji.GenericError), e:
+ # One task has failed, wait for the rest to complete before the
+ # chainmaven task fails. self.wait(all=True) should thrown an exception.
+ self.wait(all=True)
+ raise
+ # if we get here, results is a map whose keys are the ids of tasks
+ # that have completed successfully
+ for task_id in results:
+ package = running.pop(task_id)
+ task_url = builds[package]['scmurl']
+ if opts.get('scratch'):
+ if builds[package].get('type') == 'wrapper':
+ self.done[package] = task_id
+ else:
+ children = self.session.getTaskChildren(task_id)
+ for child in children:
+ # we want the ID of the buildMaven task because the
+ # output dir of that task is where the Maven repo is
+ if child['method'] == 'buildMaven':
+ self.done[package] = child['id']
+ break
+ else:
+ raise koji.BuildError, 'could not find buildMaven subtask of %s' % task_id
+ self.results.append('%s built from %s by task %s' % \
+ (package, task_url, task_id))
+ else:
+ task_builds = self.session.listBuilds(taskID=task_id)
+ if not task_builds:
+ raise koji.BuildError, 'could not find build for task %s' % task_id
+ task_build = task_builds[0]
+ self.done[package] = task_build['nvr']
+ self.results.append('%s built from %s' % (task_build['nvr'], task_url))
+ for deps in todo.values():
+ deps.discard(package)
+
+ if todo:
+ # should never happen, the client should have checked for circular dependencies
+ raise koji.BuildError, 'unable to run chain build, circular dependencies'
+ return self.results
+
+ def depset(self, package):
+ deps = set()
+ for dep in self.depmap[package]:
+ deps.add(self.done[dep])
+ deps.update(self.depset(dep))
+ return deps
+
+ def dicts_equal(self, a, b):
+ """Check if two dicts are equal. They are considered equal if they
+ have the same keys and those keys have the same values. If a value is
+ list, it will be considered equal to a list with the same values in
+ a different order."""
+ akeys = a.keys()
+ bkeys = b.keys()
+ if sorted(akeys) != sorted(bkeys):
+ return False
+ for key in akeys:
+ aval = a.get(key)
+ bval = b.get(key)
+ if type(aval) != type(bval):
+ return False
+ if isinstance(aval, dict):
+ if not self.dicts_equal(aval, bval):
+ return False
+ elif isinstance(aval, list):
+ if not sorted(aval) == sorted(bval):
+ return False
+ else:
+ if not aval == bval:
+ return False
+ return True
+
+ def get_duplicate_build(self, tag, package, params, task_opts):
+ """Find the latest build of package in tag and compare it to the
+ scmurl and task_opts. If they're identical, return the build."""
+ builds = self.session.getLatestBuilds(tag, package=package)
+ if not builds:
+ return None
+ build = builds[0]
+ if not build['task_id']:
+ return None
+ build_task = session.getTaskInfo(build['task_id'], request=True)
+ request = build_task['request']
+ if request[0] != params['scmurl']:
+ return None
+ if params.get('type') == 'wrapper':
+ wrapped_build = request[2]
+ pkg_to_wrap = params['buildrequires'][0]
+ nvr_to_wrap = self.done[pkg_to_wrap]
+ if wrapped_build['nvr'] != nvr_to_wrap:
+ return None
+ # For a wrapper-rpm build, the only parameters that really matter
+ # are the scmurl and the wrapped NVR. These both match, so
+ # return the existing build.
+ return build
+ if len(request) > 2:
+ build_opts = dslice_ex(request[2], ['skip_tag', 'scratch'], strict=False)
+ else:
+ build_opts = {}
+ task_opts = copy.deepcopy(task_opts)
+ # filter out options that don't affect the build output
+ # to avoid unnecessary rebuilds
+ for opts in [build_opts, task_opts]:
+ if 'maven_options' in opts:
+ maven_options = opts['maven_options']
+ for opt in ['-e', '--errors', '-q', '--quiet',
+ '-V', '--show-version', '-X', '--debug']:
+ if opt in maven_options:
+ maven_options.remove(opt)
+ if not maven_options:
+ del opts['maven_options']
+ if 'jvm_options' in opts:
+ del opts['jvm_options']
+ if not self.dicts_equal(build_opts, task_opts):
+ return None
+ # everything matches
+ return build
+
+class TagBuildTask(BaseTaskHandler):
+
+ Methods = ['tagBuild']
+ #XXX - set weight?
+
+ def handler(self, tag_id, build_id, force=False, fromtag=None, ignore_success=False):
+ task = self.session.getTaskInfo(self.id)
+ user_id = task['owner']
+ try:
+ build = self.session.getBuild(build_id, strict=True)
+ tag = self.session.getTag(tag_id, strict=True)
+
+ #several basic sanity checks have already been run (and will be run
+ #again when we make the final call). Our job is to perform the more
+ #computationally expensive 'post' tests.
+
+ #XXX - add more post tests
+ self.session.host.tagBuild(self.id,tag_id,build_id,force=force,fromtag=fromtag)
+ self.session.host.tagNotification(True, tag_id, fromtag, build_id, user_id, ignore_success)
+ except Exception, e:
+ exctype, value = sys.exc_info()[:2]
+ self.session.host.tagNotification(False, tag_id, fromtag, build_id, user_id, ignore_success, "%s: %s" % (exctype, value))
+ raise e
+
+class BuildImageTask(MultiPlatformTask):
+
+ def initImageBuild(self, name, version, release, target_info, opts):
+ """create a build object for this image build"""
+ pkg_cfg = self.session.getPackageConfig(target_info['dest_tag_name'],
+ name)
+ self.logger.debug("%r" % pkg_cfg)
+ if not opts.get('skip_tag') and not opts.get('scratch'):
+ # Make sure package is on the list for this tag
+ if pkg_cfg is None:
+ raise koji.BuildError, "package (image) %s not in list for tag %s" % (name, target_info['dest_tag_name'])
+ elif pkg_cfg['blocked']:
+ raise koji.BuildError, "package (image) %s is blocked for tag %s" % (name, target_info['dest_tag_name'])
+ return self.session.host.initImageBuild(self.id,
+ dict(name=name, version=version, release=release, epoch=0))
+
+ def getRelease(self, name, ver):
+ """return the next available release number for an N-V"""
+ return self.session.getNextRelease(dict(name=name, version=ver))
+
+class BuildBaseImageTask(BuildImageTask):
+ Methods = ['image']
+
+ def handler(self, name, version, arches, target, inst_tree, opts=None):
+ """Governing task for building an appliance using Oz"""
+ target_info = self.session.getBuildTarget(target, strict=True)
+ build_tag = target_info['build_tag']
+ repo_info = self.getRepo(build_tag)
+ #check requested arches against build tag
+ buildconfig = self.session.getBuildConfig(build_tag)
+ if not buildconfig['arches']:
+ raise koji.BuildError, "No arches for tag %(name)s [%(id)s]" % buildconfig
+ tag_archlist = [koji.canonArch(a) for a in buildconfig['arches'].split()]
+ for arch in arches:
+ if koji.canonArch(arch) not in tag_archlist:
+ raise koji.BuildError, "Invalid arch for build tag: %s" % arch
+
+ if not opts:
+ opts = {}
+
+ if not ozif_enabled:
+ self.logger.error("ImageFactory features require the following dependencies: pykickstart, imagefactory, oz and possibly python-hashlib")
+ raise koji.ApplianceError, 'ImageFactory functions not available'
+
+ # build image(s)
+ bld_info = None
+ try:
+ release = opts.get('release')
+ if not release:
+ release = self.getRelease(name, version)
+ if '-' in version:
+ raise koji.ApplianceError('The Version may not have a hyphen')
+ if '-' in release:
+ raise koji.ApplianceError('The Release may not have a hyphen')
+ if not opts.get('scratch'):
+ bld_info = self.initImageBuild(name, version, release,
+ target_info, opts)
+
+ subtasks = {}
+ self.logger.debug("Spawning jobs for image arches: %r" % (arches))
+ for arch in arches:
+ inst_url = inst_tree.replace('$arch', arch)
+ subtasks[arch] = self.session.host.subtask(
+ method='createImage',
+ arglist=[name, version, release, arch, target_info,
+ build_tag, repo_info, inst_url, opts],
+ label=arch, parent=self.id, arch=arch)
+ self.logger.debug("Got image subtasks: %r" % (subtasks))
+ self.logger.debug("Waiting on image subtasks...")
+ results = self.wait(subtasks.values(), all=True, failany=True)
+
+ # wrap in an RPM if asked
+ rpm_results = None
+ spec_url = opts.get('specfile')
+ for arch in arches:
+ # get around an xmlrpc limitation, use arches for keys instead
+ results[arch] = results[subtasks[arch]]
+ del results[subtasks[arch]]
+ if spec_url:
+ subtask = subtasks[arch]
+ results[arch]['rpmresults'] = self.buildWrapperRPM(
+ spec_url, subtask, target_info, bld_info,
+ repo_info['id'])
+
+ # make sure we only import the user-submitted kickstart file one
+ # time, otherwise we will have collisions. Remove it from exactly
+ # 1 results hash from the subtasks
+ if opts.has_key('kickstart'):
+ saw_ks = False
+ for arch in results.keys():
+ ks = os.path.basename(opts.get('kickstart'))
+ if ks in results[arch]['files']:
+ if saw_ks:
+ results[arch]['files'].remove(ks)
+ saw_ks = True
+
+ self.logger.debug('Image Results for hub: %s' % results)
+ if opts.get('scratch'):
+ self.session.host.moveImageBuildToScratch(self.id, results)
+ else:
+ self.session.host.completeImageBuild(self.id, bld_info['id'],
+ results)
+
+ except (SystemExit,ServerExit,KeyboardInterrupt):
+ #we do not trap these
+ raise
+ except:
+ if not opts.get('scratch'):
+ #scratch builds do not get imported
+ if bld_info:
+ self.session.host.failBuild(self.id, bld_info['id'])
+ # reraise the exception
+ raise
+
+ # tag it
+ if not opts.get('scratch') and not opts.get('skip_tag'):
+ tag_task_id = self.session.host.subtask(method='tagBuild',
+ arglist=[target_info['dest_tag'], bld_info['id'], False, None, True],
+ label='tag', parent=self.id, arch='noarch')
+ self.wait(tag_task_id)
+
+ # report results
+ report = ''
+ if opts.get('scratch'):
+ respath = ', '.join(
+ [os.path.join(koji.pathinfo.work(), koji.pathinfo.taskrelpath(tid)) for tid in subtasks.values()])
+ report += 'Scratch '
+ else:
+ respath = koji.pathinfo.imagebuild(bld_info)
+ report += 'image build results in: %s' % respath
+ return report
+
+
+class BuildApplianceTask(BuildImageTask):
+ Methods = ['appliance']
+
+ def handler(self, name, version, arch, target, ksfile, opts=None):
+ """Governing task for building an appliance"""
+ target_info = self.session.getBuildTarget(target, strict=True)
+ build_tag = target_info['build_tag']
+ repo_info = self.getRepo(build_tag)
+ #check requested arch against build tag
+ buildconfig = self.session.getBuildConfig(build_tag)
+ if not buildconfig['arches']:
+ raise koji.BuildError, "No arches for tag %(name)s [%(id)s]" % buildconfig
+ tag_archlist = [koji.canonArch(a) for a in buildconfig['arches'].split()]
+ if koji.canonArch(arch) not in tag_archlist:
+ raise koji.BuildError, "Invalid arch for build tag: %s" % arch
+
+
+ if not opts:
+ opts = {}
+
+ if not image_enabled:
+ self.logger.error("Appliance features require the following dependencies: pykickstart, and possibly python-hashlib")
+ raise koji.ApplianceError, 'Appliance functions not available'
+
+ # build image
+ try:
+ release = opts.get('release')
+ if not release:
+ release = self.getRelease(name, version)
+ bld_info = None
+ if not opts.get('scratch'):
+ bld_info = self.initImageBuild(name, version, release,
+ target_info, opts)
+ create_task_id = self.session.host.subtask(method='createAppliance',
+ arglist=[name, version, release, arch, target_info, build_tag,
+ repo_info, ksfile, opts],
+ label='appliance', parent=self.id, arch=arch)
+ results = self.wait(create_task_id)
+ self.logger.info('image build task (%s) completed' % create_task_id)
+ self.logger.info('results: %s' % results)
+
+ # wrap in an RPM if asked
+ rpm_results = None
+ spec_url = opts.get('specfile')
+ if spec_url:
+ results[create_task_id]['rpmresults'] = self.buildWrapperRPM(
+ spec_url, create_task_id,
+ target_info, bld_info, repo_info['id'])
+ results[str(create_task_id)] = results[create_task_id]
+ del results[create_task_id]
+
+ # import the image (move it too)
+ if not opts.get('scratch'):
+ self.session.host.completeImageBuild(self.id, bld_info['id'], results)
+ else:
+ self.session.host.moveImageBuildToScratch(self.id, results)
+
+ except (SystemExit,ServerExit,KeyboardInterrupt):
+ #we do not trap these
+ raise
+ except:
+ if not opts.get('scratch'):
+ #scratch builds do not get imported
+ if bld_info:
+ self.session.host.failBuild(self.id, bld_info['id'])
+ # reraise the exception
+ raise
+
+ # tag it
+ if not opts.get('scratch') and not opts.get('skip_tag'):
+ tag_task_id = self.session.host.subtask(method='tagBuild',
+ arglist=[target_info['dest_tag'], bld_info['id'], False, None, True],
+ label='tag', parent=self.id, arch='noarch')
+ self.wait(tag_task_id)
+
+ # report results
+ if opts.get('scratch'):
+ respath = os.path.join(koji.pathinfo.work(),
+ koji.pathinfo.taskrelpath(create_task_id))
+ report = 'Scratch '
+ else:
+ respath = koji.pathinfo.imagebuild(bld_info)
+ report = ''
+ report += 'appliance build results in: %s' % respath
+ return report
+
+class BuildLiveCDTask(BuildImageTask):
+ Methods = ['livecd']
+
+ def handler(self, name, version, arch, target, ksfile, opts=None):
+ """Governing task for building LiveCDs"""
+ target_info = self.session.getBuildTarget(target, strict=True)
+ build_tag = target_info['build_tag']
+ repo_info = self.getRepo(build_tag)
+ #check requested arch against build tag
+ buildconfig = self.session.getBuildConfig(build_tag)
+ if not buildconfig['arches']:
+ raise koji.BuildError, "No arches for tag %(name)s [%(id)s]" % buildconfig
+ tag_archlist = [koji.canonArch(a) for a in buildconfig['arches'].split()]
+ if koji.canonArch(arch) not in tag_archlist:
+ raise koji.BuildError, "Invalid arch for build tag: %s" % arch
+
+ if not opts:
+ opts = {}
+ if not image_enabled:
+ self.logger.error("LiveCD features require the following dependencies: "
+ "pykickstart, pycdio, and possibly python-hashlib")
+ raise koji.LiveCDError, 'LiveCD functions not available'
+
+ # build the image
+ try:
+ release = opts.get('release')
+ if not release:
+ release = self.getRelease(name, version)
+ bld_info = None
+ if not opts.get('scratch'):
+ bld_info = self.initImageBuild(name, version, release,
+ target_info, opts)
+ create_task_id = self.session.host.subtask(method='createLiveCD',
+ arglist=[name, version, release, arch, target_info, build_tag,
+ repo_info, ksfile, opts],
+ label='livecd', parent=self.id, arch=arch)
+ results = self.wait(create_task_id)
+ self.logger.info('image build task (%s) completed' % create_task_id)
+ self.logger.info('results: %s' % results)
+
+ # wrap in an RPM if needed
+ spec_url = opts.get('specfile')
+ rpm_results = None
+ if spec_url:
+ results[create_task_id]['rpmresults'] = self.buildWrapperRPM(
+ spec_url, create_task_id,
+ target_info, bld_info, repo_info['id'])
+ results[str(create_task_id)] = results[create_task_id]
+ del results[create_task_id]
+
+ # import it (and move)
+ if not opts.get('scratch'):
+ self.session.host.completeImageBuild(self.id, bld_info['id'], results)
+ else:
+ self.session.host.moveImageBuildToScratch(self.id, results)
+
+ except (SystemExit,ServerExit,KeyboardInterrupt):
+ #we do not trap these
+ raise
+ except:
+ if not opts.get('scratch'):
+ #scratch builds do not get imported
+ if bld_info:
+ self.session.host.failBuild(self.id, bld_info['id'])
+ # reraise the exception
+ raise
+
+ # tag it if necessary
+ if not opts.get('scratch') and not opts.get('skip_tag'):
+ tag_task_id = self.session.host.subtask(method='tagBuild',
+ arglist=[target_info['dest_tag'], bld_info['id'], False, None, True],
+ label='tag', parent=self.id, arch='noarch')
+ self.wait(tag_task_id)
+
+ # report the results
+ if opts.get('scratch'):
+ respath = os.path.join(koji.pathinfo.work(),
+ koji.pathinfo.taskrelpath(create_task_id))
+ report = 'Scratch '
+ else:
+ respath = koji.pathinfo.imagebuild(bld_info)
+ report = ''
+ report += 'livecd build results in: %s' % respath
+ return report
+
+# A generic task for building cd or disk images using chroot-based tools.
+# Other chroot-based image handlers should inherit this.
+class ImageTask(BaseTaskHandler):
+ Methods = []
+
+ def makeImgBuildRoot(self, buildtag, repoinfo, arch, inst_group):
+ """
+ Create and prepare the chroot we're going to build an image in.
+ Binds necessary directories and creates needed device files.
+
+ @args:
+ buildtag: a build tag
+ repoinfo: a session.getRepo() object
+ arch: a canonical architecture name
+ inst_group: a string representing the yum group to install with
+ @returns: a buildroot object
+ """
+
+ # Here we configure mock to bind mount a set of /dev directories
+ bind_opts = {'dirs' : {'/dev' : '/dev',}}
+ if os.path.exists('/selinux'):
+ bind_opts['dirs']['/selinux'] = '/selinux'
+ rootopts = {'install_group': inst_group,
+ 'setup_dns': True,
+ 'repo_id': repoinfo['id'],
+ 'bind_opts' : bind_opts}
+
+ broot = BuildRoot(self.session, self.options, buildtag, arch, self.id, **rootopts)
+ broot.workdir = self.workdir
+
+ # create the mock chroot
+ self.logger.debug("Initializing image buildroot")
+ broot.init()
+ self.logger.debug("Image buildroot ready: " + broot.rootdir())
+ return broot
+
+ def fetchKickstart(self, broot, ksfile):
+ """
+ Retrieve the kickstart file we were given (locally or remotely) and
+ upload it.
+
+ Note that if the KS file existed locally, then "ksfile" is a relative
+ path to it in the /mnt/koji/work directory. If not, then it is still
+ the parameter the user passed in initially, and we assume it is a
+ relative path in a remote scm. The user should have passed in an scm
+ url with --ksurl.
+
+ @args:
+ broot: a buildroot object
+ ksfile: path to a kickstart file
+ @returns: absolute path to the retrieved kickstart file
+ """
+ scmdir = os.path.join(broot.rootdir(), 'tmp')
+ koji.ensuredir(scmdir)
+ self.logger.debug("ksfile = %s" % ksfile)
+ if self.opts.get('ksurl'):
+ scm = SCM(self.opts['ksurl'])
+ scm.assert_allowed(self.options.allowed_scms)
+ logfile = os.path.join(self.workdir, 'checkout.log')
+ scmsrcdir = scm.checkout(scmdir, self.session, self.getUploadDir(), logfile)
+ kspath = os.path.join(scmsrcdir, ksfile)
+ else:
+ kspath = self.localPath("work/%s" % ksfile)
+
+ self.uploadFile(kspath) # upload the original ks file
+ return kspath # full absolute path to the file in the chroot
+
+ def readKickstart(self, kspath, opts):
+ """
+ Read a kickstart file and save the ks object as a task member.
+
+ @args:
+ kspath: path to a kickstart file
+ @returns: None
+ """
+ # XXX: If the ks file came from a local path and has %include
+ # macros, *-creator will fail because the included
+ # kickstarts were not copied into the chroot. For now we
+ # require users to flatten their kickstart file if submitting
+ # the task with a local path.
+ #
+ # Note that if an SCM URL was used instead, %include macros
+ # may not be a problem if the included kickstarts are present
+ # in the repository we checked out.
+ if opts.get('ksversion'):
+ version = ksparser.version.makeVersion(ksparser.stringToVersion(opts['ksversion']))
+ else:
+ version = ksparser.version.makeVersion()
+ self.ks = ksparser.KickstartParser(version)
+ try:
+ self.ks.readKickstart(kspath)
+ except IOError, e:
+ raise koji.LiveCDError("Failed to read kickstart file "
+ "'%s' : %s" % (kspath, e))
+ except kserrors.KickstartError, e:
+ raise koji.LiveCDError("Failed to parse kickstart file "
+ "'%s' : %s" % (kspath, e))
+
+ def prepareKickstart(self, repo_info, target_info, arch, broot, opts):
+ """
+ Process the ks file to be used for controlled image generation. This
+ method also uploads the modified kickstart file to the task output
+ area.
+
+ @args:
+ target_info: a sesion.getBuildTarget() object
+ repo_info: session.getRepo() object
+ arch: canonical architecture name
+ broot: a buildroot object
+ kspath: absolute path to a kickstart file
+ @returns:
+ absolute path to a processed kickstart file within the buildroot
+ """
+ # Now we do some kickstart manipulation. If the user passed in a repo
+ # url with --repo, then we substitute that in for the repo(s) specified
+ # in the kickstart file. If --repo wasn't specified, then we use the
+ # repo associated with the target passed in initially.
+ repo_class = kscontrol.dataMap[self.ks.version]['RepoData']
+ self.ks.handler.repo.repoList = [] # delete whatever the ks file told us
+ if opts.get('repo'):
+ user_repos = opts['repo']
+ if isinstance(user_repos, basestring):
+ user_repos = user_repos.split(',')
+ index = 0
+ for user_repo in user_repos:
+ self.ks.handler.repo.repoList.append(repo_class(baseurl=user_repo, name='koji-override-%i' % index))
+ index += 1
+ else:
+ path_info = koji.PathInfo(topdir=self.options.topurl)
+ repopath = path_info.repo(repo_info['id'],
+ target_info['build_tag_name'])
+ baseurl = '%s/%s' % (repopath, arch)
+ self.logger.debug('BASEURL: %s' % baseurl)
+ self.ks.handler.repo.repoList.append(repo_class(baseurl=baseurl, name='koji-%s-%i' % (target_info['build_tag_name'], repo_info['id'])))
+
+ # Write out the new ks file. Note that things may not be in the same
+ # order and comments in the original ks file may be lost.
+ kskoji = os.path.join('/tmp', 'koji-image-%s-%i.ks' %
+ (target_info['build_tag_name'], self.id))
+ kojikspath = os.path.join(broot.rootdir(), kskoji[1:])
+ outfile = open(kojikspath, 'w')
+ outfile.write(str(self.ks.handler))
+ outfile.close()
+
+ # put the new ksfile in the output directory
+ if not os.path.exists(kojikspath):
+ raise koji.LiveCDError, "KS file missing: %s" % kojikspath
+ self.uploadFile(kojikspath)
+ return kskoji # absolute path within chroot
+
+ def getImagePackages(self, cachepath):
+ """
+ Read RPM header information from the yum cache available in the
+ given path. Returns a list of dictionaries for each RPM included.
+ """
+ found = False
+ hdrlist = []
+ fields = ['name', 'version', 'release', 'epoch', 'arch', \
+ 'buildtime', 'sigmd5']
+ for root, dirs, files in os.walk(cachepath):
+ for f in files:
+ if fnmatch(f, '*.rpm'):
+ pkgfile = os.path.join(root, f)
+ hdr = koji.get_header_fields(pkgfile, fields)
+ hdr['size'] = os.path.getsize(pkgfile)
+ hdr['payloadhash'] = koji.hex_string(hdr['sigmd5'])
+ del hdr['sigmd5']
+ hdrlist.append(hdr)
+ found = True
+ if not found:
+ raise koji.LiveCDError, 'No repos found in yum cache!'
+ return hdrlist
+
+# ApplianceTask begins with a mock chroot, and then installs appliance-tools
+# into it via the appliance-build group. appliance-creator is then executed
+# in the chroot to create the appliance image.
+#
+class ApplianceTask(ImageTask):
+
+ Methods = ['createAppliance']
+ _taskWeight = 1.5
+
+ def getRootDevice(self):
+ """
+ Return the device name for the / partition, as specified in the
+ kickstart file. Appliances should have this defined.
+ """
+ for part in self.ks.handler.partition.partitions:
+ if part.mountpoint == '/':
+ return part.disk
+ raise koji.ApplianceError, 'kickstart lacks a "/" mountpoint'
+
+ def handler(self, name, version, release, arch, target_info, build_tag, repo_info, ksfile, opts=None):
+
+ if opts == None:
+ opts = {}
+ self.opts = opts
+ broot = self.makeImgBuildRoot(build_tag, repo_info, arch,
+ 'appliance-build')
+ kspath = self.fetchKickstart(broot, ksfile)
+ self.readKickstart(kspath, opts)
+ kskoji = self.prepareKickstart(repo_info, target_info, arch, broot, opts)
+ # Figure out appliance-creator arguments, let it fail if something
+ # is wrong.
+ odir = 'app-output'
+ opath = os.path.join(broot.rootdir(), 'tmp', odir)
+ cachedir = '/tmp/koji-appliance' # arbitrary paths in chroot
+ app_log = '/tmp/appliance.log'
+ os.mkdir(opath)
+
+ cmd = ['/usr/bin/appliance-creator', '-c', kskoji, '-d', '-v',
+ '--logfile', app_log, '--cache', cachedir, '-o', odir]
+ for arg_name in ('vmem', 'vcpu', 'format'):
+ arg = opts.get(arg_name)
+ if arg != None:
+ cmd.extend(['--%s' % arg_name, arg])
+ appname = '%s-%s-%s' % (name, version, release)
+ cmd.extend(['--name', appname])
+ cmd.extend(['--version', version, '--release', release])
+
+ # Run appliance-creator
+ rv = broot.mock(['--cwd', '/tmp', '--chroot', '--'] + cmd)
+ self.uploadFile(os.path.join(broot.rootdir(), app_log[1:]))
+ if rv:
+ raise koji.ApplianceError, \
+ "Could not create appliance: %s" % parseStatus(rv, 'appliance-creator') + "; see root.log or appliance.log for more information"
+
+ # Find the results
+ results = []
+ for directory, subdirs, files in os.walk(opath):
+ for f in files:
+ results.append(os.path.join(broot.rootdir(), 'tmp',
+ directory, f))
+ self.logger.debug('output: %s' % results)
+ if len(results) == 0:
+ raise koji.ApplianceError, "Could not find image build results!"
+ imgdata = {
+ 'arch': arch,
+ 'rootdev': self.getRootDevice(),
+ 'task_id': self.id,
+ 'logs': ['build.log', 'mock_output.log', 'root.log', 'state.log',
+ 'appliance.log', os.path.basename(ksfile),
+ os.path.basename(kskoji)],
+ 'name': name,
+ 'version': version,
+ 'release': release
+ }
+ imgdata['files'] = []
+ for ofile in results:
+ self.uploadFile(ofile)
+ imgdata['files'].append(os.path.basename(ofile))
+
+ # TODO: get file manifest from the appliance
+
+ if not opts.get('scratch'):
+ hdrlist = self.getImagePackages(os.path.join(broot.rootdir(),
+ cachedir[1:]))
+ broot.markExternalRPMs(hdrlist)
+ imgdata['rpmlist'] = hdrlist
+
+ broot.expire()
+ return imgdata
+
+# LiveCDTask begins with a mock chroot, and then installs livecd-tools into it
+# via the livecd-build group. livecd-creator is then executed in the chroot
+# to create the LiveCD image.
+#
+class LiveCDTask(ImageTask):
+
+ Methods = ['createLiveCD']
+ _taskWeight = 1.5
+
+ def genISOManifest(self, image, manifile):
+ """
+ Using iso9660 from pycdio, get the file manifest of the given image,
+ and save it to the text file manifile.
+ """
+ fd = open(manifile, 'w')
+ if not fd:
+ raise koji.GenericError, \
+ 'Unable to open manifest file (%s) for writing!' % manifile
+ iso = iso9660.ISO9660.IFS(source=image)
+ if not iso.is_open():
+ raise koji.GenericError, \
+ 'Could not open %s as an ISO-9660 image!' % image
+
+ # image metadata
+ id = iso.get_application_id()
+ if id is not None: fd.write("Application ID: %s\n" % id)
+ id = iso.get_preparer_id()
+ if id is not None: fd.write("Preparer ID: %s\n" % id)
+ id = iso.get_publisher_id()
+ if id is not None: fd.write("Publisher ID: %s\n" % id)
+ id = iso.get_system_id()
+ if id is not None: fd.write("System ID: %s\n" % id)
+ id = iso.get_volume_id()
+ if id is not None: fd.write("Volume ID: %s\n" % id)
+ id = iso.get_volumeset_id()
+ if id is not None: fd.write("Volumeset ID: %s\n" % id)
+
+ fd.write('\nSize(bytes) File Name\n')
+ manifest = self.listISODir(iso, '/')
+ for a_file in manifest:
+ fd.write(a_file)
+ fd.close()
+ iso.close()
+
+ def listISODir(self, iso, path):
+ """
+ Helper function called recursively by genISOManifest. Returns a
+ listing of files/directories at the given path in an iso image obj.
+ """
+ manifest = []
+ file_stats = iso.readdir(path)
+ for stat in file_stats:
+ filename = stat[0]
+ size = stat[2]
+ is_dir = stat[4] == 2
+
+ if filename == '..':
+ continue
+ elif filename == '.':
+ # path should always end in a trailing /
+ filepath = path
+ else:
+ filepath = path + filename
+ # identify directories with a trailing /
+ if is_dir:
+ filepath += '/'
+
+ if is_dir and filename != '.':
+ # recurse into subdirectories
+ manifest.extend(self.listISODir(iso, filepath))
+ else:
+ # output information for the current directory and files
+ manifest.append("%-10d %s\n" % (size, filepath))
+
+ return manifest
+
+ def handler(self, name, version, release, arch, target_info, build_tag, repo_info, ksfile, opts=None):
+
+ if opts == None:
+ opts = {}
+ self.opts = opts
+
+ broot = self.makeImgBuildRoot(build_tag, repo_info, arch,
+ 'livecd-build')
+ kspath = self.fetchKickstart(broot, ksfile)
+ self.readKickstart(kspath, opts)
+ kskoji = self.prepareKickstart(repo_info, target_info, arch, broot, opts)
+
+ cachedir = '/tmp/koji-livecd' # arbitrary paths in chroot
+ livecd_log = '/tmp/livecd.log'
+ cmd = ['/usr/bin/livecd-creator', '-c', kskoji, '-d', '-v',
+ '--logfile', livecd_log, '--cache', cachedir]
+ # we set the fs label to the same as the isoname if it exists,
+ # taking at most 32 characters
+ isoname = '%s-%s-%s' % (name, version, release)
+ cmd.extend(['-f', isoname[:32]])
+
+ # Run livecd-creator
+ rv = broot.mock(['--cwd', '/tmp', '--chroot', '--'] + cmd)
+ self.uploadFile(os.path.join(broot.rootdir(), livecd_log[1:]))
+ if rv:
+ raise koji.LiveCDError, \
+ 'Could not create LiveCD: %s' % parseStatus(rv, 'livecd-creator') + '; see root.log or livecd.log for more information'
+
+ # Find the resultant iso
+ # The cwd of the livecd-creator process is /tmp in the chroot, so
+ # that is where it writes the .iso
+ files = os.listdir(os.path.join(broot.rootdir(), 'tmp'))
+ isofile = None
+ for afile in files:
+ if afile.endswith('.iso'):
+ if not isofile:
+ isofile = afile
+ else:
+ raise koji.LiveCDError, 'multiple .iso files found: %s and %s' % (isofile, afile)
+ if not isofile:
+ raise koji.LiveCDError, 'could not find iso file in chroot'
+ isosrc = os.path.join(broot.rootdir(), 'tmp', isofile)
+
+ # copy the iso out of the chroot. If we were given an isoname,
+ # this is where the renaming happens.
+ self.logger.debug('uploading image: %s' % isosrc)
+ isoname += '.iso'
+
+ # Generate the file manifest of the image, upload the results
+ manifest = os.path.join(broot.resultdir(), 'manifest.log')
+ self.genISOManifest(isosrc, manifest)
+ self.uploadFile(manifest)
+ self.uploadFile(isosrc, remoteName=isoname)
+
+ imgdata = {'arch': arch,
+ 'files': [isoname],
+ 'rootdev': None,
+ 'task_id': self.id,
+ 'logs': ['build.log', 'mock_output.log', 'root.log', 'state.log',
+ 'livecd.log', os.path.basename(ksfile),
+ os.path.basename(kskoji)],
+ 'name': name,
+ 'version': version,
+ 'release': release
+ }
+ if not opts.get('scratch'):
+ hdrlist = self.getImagePackages(os.path.join(broot.rootdir(),
+ cachedir[1:]))
+ imgdata ['rpmlist'] = hdrlist
+ broot.markExternalRPMs(hdrlist)
+
+ broot.expire()
+ return imgdata
+
+# A generic task for building disk images using Oz
+# Other Oz-based image handlers should inherit this.
+class OzImageTask(BaseTaskHandler):
+ Methods = []
+
+ def fetchKickstart(self):
+ """
+ Retrieve the kickstart file we were given (locally or remotely) and
+ upload it to the hub.
+
+ Note that if the KS file existed locally, then "ksfile" is a relative
+ path to it in the /mnt/koji/work directory. If not, then it is still
+ the parameter the user passed in initially, and we assume it is a
+ relative path in a remote scm. The user should have passed in an scm
+ url with --ksurl.
+
+ @args: None, use self.opts for options
+ @returns:
+ absolute path to the retrieved kickstart file
+ """
+ ksfile = self.opts.get('kickstart')
+ self.logger.debug("ksfile = %s" % ksfile)
+ if self.opts.get('ksurl'):
+ scm = SCM(self.opts['ksurl'])
+ scm.assert_allowed(self.options.allowed_scms)
+ logfile = os.path.join(self.workdir, 'checkout-%s.log' % self.arch)
+ scmsrcdir = scm.checkout(self.workdir, self.session,
+ self.getUploadDir(), logfile)
+ kspath = os.path.join(scmsrcdir, os.path.basename(ksfile))
+ else:
+ tops = dict([(k, getattr(self.options, k)) for k in 'topurl','topdir'])
+ ks_src = koji.openRemoteFile(ksfile, **tops)
+ kspath = os.path.join(self.workdir, os.path.basename(ksfile))
+ ks_dest = open(kspath, 'w')
+ ks_dest.write(ks_src.read())
+ ks_dest.close()
+ self.logger.debug('uploading kickstart from here: %s' % kspath)
+ self.uploadFile(kspath) # upload the original ks file
+ return kspath # absolute path to the ks file
+
+ def readKickstart(self, kspath):
+ """
+ Read a kickstart file and save the ks object as a task member.
+
+ @args:
+ kspath: path to a kickstart file
+ @returns:
+ a kickstart object returned by pykickstart
+ """
+ # XXX: If the ks file came from a local path and has %include
+ # macros, Oz will fail because it can only handle flat files.
+ # We require users to flatten their kickstart file.
+ if self.opts.get('ksversion'):
+ version = ksparser.version.makeVersion(
+ ksparser.stringToVersion(self.opts['ksversion']))
+ else:
+ version = ksparser.version.makeVersion()
+ ks = ksparser.KickstartParser(version)
+ self.logger.debug('attempting to read kickstart: %s' % kspath)
+ try:
+ ks.readKickstart(kspath)
+ except IOError, e:
+ raise koji.BuildError("Failed to read kickstart file "
+ "'%s' : %s" % (kspath, e))
+ except kserrors.KickstartError, e:
+ raise koji.BuildError("Failed to parse kickstart file "
+ "'%s' : %s" % (kspath, e))
+ return ks
+
+ def prepareKickstart(self, kspath, install_tree):
+ """
+ Process the ks file to be used for controlled image generation. This
+ method also uploads the modified kickstart file to the task output
+ area on the hub.
+
+ @args:
+ kspath: a path to a kickstart file
+ @returns:
+ a kickstart object with koji-specific modifications
+ """
+ ks = self.readKickstart(kspath)
+ # Now we do some kickstart manipulation. If the user passed in a repo
+ # url with --repo, then we substitute that in for the repo(s) specified
+ # in the kickstart file. If --repo wasn't specified, then we use the
+ # repo associated with the target passed in initially.
+ ks.handler.repo.repoList = [] # delete whatever the ks file told us
+ repo_class = kscontrol.dataMap[ks.version]['RepoData']
+ # TODO: sensibly use "url" and "repo" commands in kickstart
+ if self.opts.get('repo'):
+ # the user used --repo at least once
+ user_repos = self.opts.get('repo')
+ index = 0
+ for user_repo in user_repos:
+ repo_url = user_repo.replace('$arch', self.arch)
+ ks.handler.repo.repoList.append(repo_class(
+ baseurl=repo_url, name='koji-override-%i' % index))
+ index += 1
+ else:
+ # --repo was not given, so we use the target's build repo
+ path_info = koji.PathInfo(topdir=self.options.topurl)
+ repopath = path_info.repo(self.repo_info['id'],
+ self.target_info['build_tag_name'])
+ baseurl = '%s/%s' % (repopath, self.arch)
+ self.logger.debug('BASEURL: %s' % baseurl)
+ ks.handler.repo.repoList.append(repo_class(
+ baseurl=baseurl, name='koji-override-0'))
+ # inject the URL of the install tree into the kickstart
+ ks.handler.url.url = install_tree
+ return ks
+
+ def writeKickstart(self, ksobj, ksname):
+ """
+ Write out the new ks file. Note that things may not be in the same
+ order and comments in the original ks file may be lost.
+
+ @args:
+ ksobj: a pykickstart object of what we want to write
+ ksname: file name for the kickstart
+ @returns:
+ an absolute path to the kickstart file we wrote
+ """
+ kspath = os.path.join(self.workdir, ksname)
+ outfile = open(kspath, 'w')
+ outfile.write(str(ksobj.handler))
+ outfile.close()
+
+ # put the new ksfile in the output directory
+ if not os.path.exists(kspath):
+ raise koji.BuildError, "KS file missing: %s" % kspath
+ self.uploadFile(kspath) # upload the modified ks file
+ return kspath
+
+ def makeConfig(self):
+ """
+ Generate a configuration dict for ImageFactory. This will override
+ anything in the /etc config files. We do this forcibly so that it is
+ impossible for Koji to use any image caches or leftover metadata from
+ other images created by the service.
+
+ @args: none
+ @returns:
+ a dictionary used for configuring ImageFactory to built an image
+ the way we want
+ """
+ return {
+ #Oz specific
+ 'oz_data_dir': os.path.join(self.workdir, 'oz_data'),
+ 'oz_screenshot_dir': os.path.join(self.workdir, 'oz_screenshots'),
+ #IF specific
+ 'imgdir': os.path.join(self.workdir, 'scratch_images'),
+ 'tmpdir': os.path.join(self.workdir, 'oz-tmp'),
+ 'verbose' : True,
+ 'timeout': 7200,
+ 'output': 'log',
+ 'raw': False,
+ 'debug': True,
+ 'image_manager': 'file',
+ 'plugins': '/etc/imagefactory/plugins.d',
+ 'rhevm_image_format': 'qcow2',
+ 'tdl_require_root_pw': False,
+ 'image_manager_args': {
+ 'storage_path': os.path.join(self.workdir, 'output_image')},
+ }
+
+ def makeTemplate(self, name, inst_tree):
+ """
+ Generate a simple "TDL" for ImageFactory to build an image with.
+
+ @args:
+ name: a name for the image
+ inst_tree: a string, a URL to the install tree (a compose)
+ @returns:
+ An XML string that imagefactory can consume
+ """
+ # we have to split this up so the variable substitution works
+ # XXX: using a packages section (which we don't) will have IF boot the
+ # image and attempt to ssh in. This breaks docker image creation.
+ # TODO: intelligently guess the distro based on the install tree URL
+ distname, distver = self.parseDistro(self.opts.get('distro'))
+ template = """<template>
+ <name>%s</name>
+ <os>
+ <name>%s</name>
+ <version>%s</version>
+ <arch>%s</arch>
+ <install type='url'>
+ <url>%s</url>
+ </install>
+ """ % (name, distname, distver, self.arch, inst_tree)
+ template += """<icicle>
+ <extra_command>rpm -qa --qf '%{NAME},%{VERSION},%{RELEASE},%{ARCH},%{EPOCH},%{SIZE},%{SIGMD5},%{BUILDTIME}\n'</extra_command>
+ </icicle>
+ """
+ # TODO: intelligently guess the size based on the kickstart file
+ template += """</os>
+ <description>%s OS</description>
+ <disk>
+ <size>%sG</size>
+ </disk>
+</template>
+""" % (name, self.opts.get('disk_size'))
+ return template
+
+ def parseDistro(self, distro):
+ """
+ Figure out the distribution name and version we are going to build an
+ image on.
+
+ args:
+ a string of the form: RHEL-X.Y, Fedora-NN, CentOS-X.Y, or SL-X.Y
+ returns:
+ a 2-element list, depends on the distro where the split happened
+ """
+ if distro.startswith('RHEL'):
+ major, minor = distro.split('.')
+ if major == 'RHEL-5':
+ minor = 'U' + minor
+ return major, minor
+ elif distro.startswith('Fedora'):
+ return distro.split('-')
+ elif distro.startswith('CentOS'):
+ return distro.split('.')
+ elif distro.startswith('SL'):
+ return distro.split('.')
+ else:
+ raise BuildError('Unknown or supported distro given: %s' % distro)
+
+ def fixImageXML(self, format, filename, xmltext):
+
+ """
+ The XML generated by Oz/ImageFactory knows nothing about the name
+ or image format conversions Koji does. We fix those values in the
+ libvirt XML and write the changes out to a file, the path of which
+ we return.
+
+ @args:
+ format = raw, qcow2, vmdk, etc... a string representation
+ filename = the name of the XML file we will save this too
+ xmltext = the libvirt XML to start with
+ @return:
+ an absolute path to the modified XML
+ """
+ newxml = xml.dom.minidom.parseString(xmltext)
+ ename = newxml.getElementsByTagName('name')[0]
+ ename.firstChild.nodeValue = self.imgname
+ esources = newxml.getElementsByTagName('source')
+ for e in esources:
+ if e.hasAttribute('file'):
+ e.setAttribute('file', '%s.%s' % (self.imgname, format))
+ edriver = newxml.getElementsByTagName('driver')[0]
+ edriver.setAttribute('type', format)
+ xml_path = os.path.join(self.workdir, filename)
+ xmlfd = open(xml_path, 'w')
+ xmlfd.write(newxml.toprettyxml())
+ xmlfd.close()
+ return xml_path
+
+ def getScreenshot(self):
+ """
+ Locate a screenshot taken by libvirt in the case of build failure,
+ if it exists. If it does, return the path, else return None.
+
+ @args: none
+ @returns: a path to a screenshot take by libvirt
+ """
+ shotdir = os.path.join(self.workdir, 'oz_screenshots')
+ screenshot = None
+ found = glob.glob(os.path.join(shotdir, '*.ppm'))
+ if len(found) > 0:
+ screenshot = found[0]
+ found = glob.glob(os.path.join(shotdir, '*.png'))
+ if len(found) > 0:
+ screenshot = found[0]
+ return screenshot
+
+class BaseImageTask(OzImageTask):
+
+ Methods = ['createImage']
+ _taskWeight = 2.0
+
+ def _format_deps(self, formats):
+ """
+ Return a dictionary where the keys are the image formats we need to
+ build/convert, and the values are booleans that indicate whether the
+ output should be included in the task results.
+
+ Some image formats require others to be processed first, which is why
+ we have to do this. raw files in particular may not be kept.
+ """
+ supported = ('raw', 'raw-xz', 'vmdk', 'qcow', 'qcow2', 'vdi', 'rhevm-ova', 'vsphere-ova', 'docker', 'vagrant-virtualbox', 'vagrant-libvirt', 'vpc')
+ for f in formats:
+ if f not in supported:
+ raise koji.ApplianceError('Invalid format: %s' % f)
+ f_dict = dict((f, True) for f in formats)
+
+ # If a user requests 1 or more image formats (with --format) we do not
+ # by default include the raw disk image in the results, because it is
+ # 10G in size. To override this behavior, the user must specify
+ # "--format raw" in their command. If --format was not used at all,
+ # then we do include the raw disk image by itself.
+ if len(formats) == 0:
+ # we only want a raw disk image (no format option given)
+ f_dict['raw'] = True
+ elif 'raw' not in f_dict.keys():
+ f_dict['raw'] = False
+ self.logger.debug('Image delivery plan: %s' % f_dict)
+ return f_dict
+
+ def do_images(self, ks, template, inst_tree):
+ """
+ Call out to ImageFactory to build the image(s) we want. Returns a dict
+ of details for each image type we had to ask ImageFactory to build
+ """
+ fcalls = {'raw': self._buildBase,
+ 'raw-xz': self._buildXZ,
+ 'vmdk': self._buildConvert,
+ 'vdi': self._buildConvert,
+ 'qcow': self._buildConvert,
+ 'qcow2': self._buildConvert,
+ 'vpc': self._buildConvert,
+ 'rhevm-ova': self._buildOVA,
+ 'vsphere-ova': self._buildOVA,
+ 'vagrant-virtualbox': self._buildOVA,
+ 'vagrant-libvirt': self._buildOVA,
+ 'docker': self._buildDocker
+ }
+ # add a handler to the logger so that we capture ImageFactory's logging
+ self.fhandler = logging.FileHandler(self.ozlog)
+ self.bd = BuildDispatcher()
+ self.tlog = logging.getLogger()
+ self.tlog.setLevel(logging.DEBUG)
+ self.tlog.addHandler(self.fhandler)
+ images = {}
+ random.seed() # necessary to ensure a unique mac address
+ params = {'install_script': str(ks.handler),
+ 'offline_icicle': True}
+ # build the base (raw) image
+ self.base_img = self._buildBase(template, params)
+ images['raw'] = {'image': self.base_img.base_image.data,
+ 'icicle': self.base_img.base_image.icicle}
+ # Do the rest of the image types (everything but raw)
+ for format in self.formats:
+ if format == 'raw':
+ continue
+ self.logger.info('dispatching %s image builder' % format)
+ images[format] = fcalls[format](format)
+ imginfo = self._processXML(images)
+ self.tlog.removeHandler(self.fhandler)
+ self.uploadFile(self.ozlog)
+ return imginfo
+
+ def _processXML(self, images):
+ """
+ Produce XML that libvirt can import to create a domain based on image(s)
+ we produced. We save the location of the XML file in the dictionary
+ it corresponds to here.
+
+ @args:
+ images - a dict where the keys are image formats, and the values
+ are dicts with details about the image (location, icicle, etc)
+ @returns:
+ a dictionary just like "images" but with a new key called "libvirt"
+ that points to the path of the XML file for that image
+ """
+ imginfo = {}
+ for fmt in images.keys():
+ imginfo[fmt] = images[fmt]
+ lxml = self.fixImageXML(fmt, 'libvirt-%s-%s.xml' % (fmt, self.arch),
+ self.base_img.base_image.parameters['libvirt_xml'])
+ imginfo[fmt]['libvirt'] = lxml
+ return imginfo
+
+ def _checkImageState(self, image):
+ """
+ Query ImageFactory for details of a dispatched image build. If it is
+ FAILED we raise an exception.
+
+ @args:
+ image - a build dispatcher object returned by a BuildDispatcher
+ @returns: nothing
+ """
+ if image.target_image:
+ status = image.target_image.status
+ details = image.target_image.status_detail['error']
+ else:
+ status = image.base_image.status
+ details = image.base_image.status_detail['error']
+ self.logger.debug('check image results: %s' % status)
+ if status == 'FAILED':
+ scrnshot = self.getScreenshot()
+ if scrnshot:
+ ext = scrnshot[-3:]
+ self.uploadFile(scrnshot, remoteName='screenshot.%s' % ext)
+ image.os_plugin.abort() # forcibly tear down the VM
+ # TODO abort when a task is CANCELLED
+ if not self.session.checkUpload('', os.path.basename(self.ozlog)):
+ self.tlog.removeHandler(self.fhandler)
+ self.uploadFile(self.ozlog)
+ if 'No disk activity' in details:
+ details = 'Automated install failed or prompted for input. See the screenshot in the task results for more information.'
+ raise koji.ApplianceError('Image status is %s: %s' %
+ (status, details))
+
+ def _buildBase(self, template, params, wait=True):
+ """
+ Build a base image using ImageFactory. This is a "raw" image.
+
+ @args:
+ template - an XML string for the TDL
+ params - a dict that controls some ImageFactory settings
+ wait - call join() on the building thread if True
+ @returns:
+ a dict with some metadata about the image (includes an icicle)
+ """
+ # TODO: test the failure case where IF itself throws an exception
+ # ungracefully (missing a plugin for example)
+ # may need to still upload ozlog and remove the log handler
+ self.logger.info('dispatching a baseimg builder')
+ self.logger.debug('templates: %s' % template)
+ self.logger.debug('params: %s' % params)
+ base = self.bd.builder_for_base_image(template, parameters=params)
+ if wait:
+ base.base_thread.join()
+ self._checkImageState(base)
+ return base
+
+ def _buildXZ(self, format):
+ """
+ Use xz to compress a raw disk image. This is very straightforward.
+
+ @args:
+ format - a string representing the image format, "raw-xz"
+ @returns:
+ a dict with some metadata about the image
+ """
+ newimg = os.path.join(self.workdir, self.imgname + '.raw.xz')
+ rawimg = os.path.join(self.workdir, self.imgname + '.raw')
+ cmd = ['/bin/cp', self.base_img.base_image.data, rawimg]
+ conlog = os.path.join(self.workdir,
+ 'xz-cp-%s-%s.log' % (format, self.arch))
+ log_output(self.session, cmd[0], cmd, conlog, self.getUploadDir(),
+ logerror=1)
+ cmd = ['/usr/bin/xz', '-z', rawimg]
+ conlog = os.path.join(self.workdir,
+ 'xz-%s-%s.log' % (format, self.arch))
+ log_output(self.session, cmd[0], cmd, conlog, self.getUploadDir(),
+ logerror=1)
+ return {'image': newimg}
+
+ def _buildOVA(self, format):
+ """
+ Build an OVA target image. This is a format supported by RHEV and
+ vSphere
+
+ @args:
+ format - a string representing the image format, "rhevm-ova"
+ @returns:
+ a dict with some metadata about the image
+ """
+ img_opts = {}
+ if self.opts.get('ova_option'):
+ img_opts = dict([o.split('=') for o in self.opts.get('ova_option')])
+ # As far as Image Factory is concerned, vagrant boxes are just another type of OVA
+ # We communicate the desire for vagrant-specific formatting by adding the *_ova_format
+ # options and turning the underlying format option back into one of the two target
+ # image types ('vsphere-ova' or 'rhevm-ova') that are used to generate the intermediate
+ # disk image
+ if format == 'vagrant-virtualbox':
+ format = 'vsphere-ova'
+ img_opts['vsphere_ova_format'] = 'vagrant-virtualbox'
+ if format == 'vagrant-libvirt':
+ format = 'rhevm-ova'
+ img_opts['rhevm_ova_format'] = 'vagrant-libvirt'
+ targ = self._do_target_image(self.base_img.base_image.identifier,
+ format.replace('-ova', ''))
+ targ2 = self._do_target_image(targ.target_image.identifier, 'OVA',
+ img_opts=img_opts)
+ return {'image': targ2.target_image.data}
+
+ def _buildDocker(self, format):
+ """
+ Build a base docker image. This image will be tagged with the NVR.A
+ automatically because we name it that way in the ImageFactory TDL.
+
+ @args:
+ format - the string "docker"
+ @returns:
+ a dict with some metadata about the image
+ """
+ img_opts = {'compress': 'xz'}
+ targ = self._do_target_image(self.base_img.base_image.identifier,
+ 'docker', img_opts=img_opts)
+ return {'image': targ.target_image.data}
+
+ def _do_target_image(self, base_id, image_type, img_opts={}):
+ """
+ A generic method for building what ImageFactory calls "target images".
+ These are images based on a raw disk that was built before using the
+ _buildBase method.
+
+ @args:
+ base_id - a string ID of the image to build off of
+ image_type - a string representing the target type. ImageFactory
+ uses this to figure out what plugin to run
+ img_opts - a dict of additional options that specific to the target
+ type we pass in via image_type
+ @returns:
+ A Builder() object from ImageFactory that contains information
+ about the image building include state and progress.
+ """
+ # TODO: test the failure case where IF itself throws an exception
+ # ungracefully (missing a plugin for example)
+ # may need to still upload ozlog and remove the log handler
+ self.logger.debug('img_opts: %s' % img_opts)
+ target = self.bd.builder_for_target_image(image_type,
+ image_id=base_id, template=None, parameters=img_opts)
+ target.target_thread.join()
+ self._checkImageState(target)
+ return target
+
+ def _buildConvert(self, format):
+ """
+ Build an image by converting the format using qemu-img. This is method
+ enables a variety of formats like qcow, qcow2, vmdk, and vdi.
+
+ @args:
+ format - a string representing the image format, "qcow2"
+ @returns
+ a dict with some metadata about the image
+ """
+ self.logger.debug('converting an image to "%s"' % format)
+ ofmt = format
+ if format == 'vpc':
+ ofmt = 'vhd'
+ newimg = os.path.join(self.workdir, self.imgname + '.%s' % ofmt)
+ cmd = ['/usr/bin/qemu-img', 'convert', '-f', 'raw', '-O',
+ format, self.base_img.base_image.data, newimg]
+ if format in ('qcow', 'qcow2'):
+ cmd.insert(2, '-c') # enable compression for qcow images
+ conlog = os.path.join(self.workdir,
+ 'qemu-img-%s-%s.log' % (format, self.arch))
+ log_output(self.session, cmd[0], cmd, conlog,
+ self.getUploadDir(), logerror=1)
+ return {'image': newimg}
+
+ def handler(self, name, version, release, arch, target_info, build_tag, repo_info, inst_tree, opts=None):
+
+ if opts == None:
+ opts = {}
+ self.arch = arch
+ self.target_info = target_info
+ self.repo_info = repo_info
+ self.opts = opts
+ self.formats = self._format_deps(opts.get('format'))
+
+ # First, prepare the kickstart to use the repos we tell it
+ kspath = self.fetchKickstart()
+ ks = self.prepareKickstart(kspath, inst_tree)
+ kskoji = self.writeKickstart(ks,
+ os.path.join(self.workdir, 'koji-%s-%i-base.ks' %
+ (self.target_info['build_tag_name'], self.id)))
+
+ # auto-generate a TDL file and config dict for ImageFactory
+ self.imgname = '%s-%s-%s.%s' % (name, version, release, self.arch)
+ template = self.makeTemplate(self.imgname, inst_tree)
+ self.logger.debug('oz template: %s' % template)
+ config = self.makeConfig()
+ self.logger.debug('IF config object: %s' % config)
+ ApplicationConfiguration(configuration=config)
+
+ tdl_path = os.path.join(self.workdir, 'tdl-%s.xml' % self.arch)
+ tdl = open(tdl_path, 'w')
+ tdl.write(template)
+ tdl.close()
+ self.uploadFile(tdl_path)
+
+ # ImageFactory picks a port to the guest VM using a rolling integer.
+ # This is a problem for concurrency, so we override the port it picks
+ # here using the task ID. (not a perfect solution but good enough:
+ # the likelihood of image tasks clashing here is very small)
+ rm = ReservationManager()
+ rm._listen_port = rm.MIN_PORT + self.id % (rm.MAX_PORT - rm.MIN_PORT)
+ ozlogname = 'oz-%s.log' % self.arch
+ self.ozlog = os.path.join(self.workdir, ozlogname)
+
+ # invoke the image builds
+ images = self.do_images(ks, template, inst_tree)
+ images['raw']['tdl'] = os.path.basename(tdl_path),
+
+ # structure the results to pass back to the hub:
+ imgdata = {
+ 'arch': self.arch,
+ 'task_id': self.id,
+ 'logs': [ozlogname],
+ 'name': name,
+ 'version': version,
+ 'release': release,
+ 'rpmlist': [],
+ 'files': [os.path.basename(tdl_path),
+ os.path.basename(kspath),
+ os.path.basename(kskoji)]
+ }
+ # record the RPMs that were installed
+ if not opts.get('scratch'):
+ fields = ('name', 'version', 'release', 'arch', 'epoch', 'size',
+ 'payloadhash', 'buildtime')
+ icicle = xml.dom.minidom.parseString(images['raw']['icicle'])
+ self.logger.debug('ICICLE: %s' % images['raw']['icicle'])
+ for p in icicle.getElementsByTagName('extra'):
+ bits = p.firstChild.nodeValue.split(',')
+ rpm = {
+ 'name': bits[0],
+ 'version': bits[1],
+ 'release': bits[2],
+ 'arch': bits[3],
+ # epoch is a special case, as usual
+ 'size': int(bits[5]),
+ 'payloadhash': bits[6],
+ 'buildtime': int(bits[7])
+ }
+ if rpm['name'] in ['buildsys-build', 'gpg-pubkey']:
+ continue
+ if bits[4] == '(none)':
+ rpm['epoch'] = None
+ else:
+ rpm['epoch'] = int(bits[4])
+ imgdata['rpmlist'].append(rpm)
+ # TODO: hack to make this work for now, need to refactor
+ br = BuildRoot(self.session, self.options, build_tag, self.arch,
+ self.id, repo_id=self.repo_info['id'])
+ br.markExternalRPMs(imgdata['rpmlist'])
+
+ # upload the results
+ for format in (f for f in self.formats.keys() if self.formats[f]):
+ newimg = images[format]['image']
+ if 'ova' in format or format == 'raw-xz':
+ newname = self.imgname + '.' + format.replace('-', '.')
+ elif 'vagrant' in format:
+ # This embeds the vagrant target and the ".box" format in the name
+ # Previously, based on filename, these looked like OVAs
+ # This was confusing to many people
+ newname = self.imgname + '.' + format + '.box'
+ elif format == 'docker':
+ newname = self.imgname + '.' + 'tar.xz'
+ elif format == 'vpc':
+ newname = self.imgname + '.' + 'vhd'
+ else:
+ newname = self.imgname + '.' + format
+ if format != 'docker':
+ lxml = images[format]['libvirt']
+ imgdata['files'].append(os.path.basename(lxml))
+ self.uploadFile(lxml)
+ imgdata['files'].append(os.path.basename(newname))
+ self.uploadFile(newimg, remoteName=newname)
+
+ # no need to delete anything since self.workdir will get scrubbed
+ return imgdata
+
+class BuildIndirectionImageTask(OzImageTask):
+ Methods = ['indirectionimage']
+
+ # So, these are copied directly from the base image class
+ # Realistically, we want to inherit methods from both BuildImageTask
+ # and OzImageTask.
+ # TODO: refactor - my initial suggestion would be to have OzImageTask
+ # be a child of BuildImageTask
+
+ def initImageBuild(self, name, version, release, target_info, opts):
+ """create a build object for this image build"""
+ pkg_cfg = self.session.getPackageConfig(target_info['dest_tag_name'],
+ name)
+ self.logger.debug("%r" % pkg_cfg)
+ if not opts.get('skip_tag') and not opts.get('scratch'):
+ # Make sure package is on the list for this tag
+ if pkg_cfg is None:
+ raise koji.BuildError, "package (image) %s not in list for tag %s" % (name, target_info['dest_tag_name'])
+ elif pkg_cfg['blocked']:
+ raise koji.BuildError, "package (image) %s is blocked for tag %s" % (name, target_info['dest_tag_name'])
+ return self.session.host.initImageBuild(self.id,
+ dict(name=name, version=version, release=release, epoch=0))
+
+ def getRelease(self, name, ver):
+ """return the next available release number for an N-V"""
+ return self.session.getNextRelease(dict(name=name, version=ver))
+
+ # END inefficient base image task method copies
+
+ def fetchHubOrSCM(self, filepath, fileurl):
+ """
+ Retrieve a file either from the hub or a remote scm
+
+ If fileurl is None we assume we are being asked to retrieve from
+ the hub and that filepath is relative to /mnt/koji/work.
+ if fileurl contains a value we assume a remote SCM.
+
+ If retrieving remote we assume that filepath is the file name and
+ fileurl is the path in the remote SCM where that file can be found.
+
+ @returns: absolute path to the retrieved file
+ """
+ # TODO: A small change to the base image build code could allow this method
+ # to be shared between both tasks. I wanted this initial implementation
+ # to be entirely self contained. Revisit if anyone feels like a refactor.
+ self.logger.debug("filepath = %s" % filepath)
+ if fileurl:
+ scm = SCM(fileurl)
+ scm.assert_allowed(self.options.allowed_scms)
+ logfile = os.path.join(self.workdir, 'checkout.log')
+ scmsrcdir = scm.checkout(self.workdir, self.session,
+ self.getUploadDir(), logfile)
+ final_path = os.path.join(scmsrcdir, os.path.basename(filepath))
+ else:
+ tops = dict([(k, getattr(self.options, k)) for k in 'topurl','topdir'])
+ remote_fileobj = koji.openRemoteFile(filepath, **tops)
+ final_path = os.path.join(self.workdir, os.path.basename(filepath))
+ final_fileobj = open(final_path, 'w')
+ final_fileobj.write(remote_fileobj.read())
+ final_fileobj.close()
+ self.logger.debug('uploading retrieved file from here: %s' % final_path)
+ self.uploadFile(final_path) # upload the original ks file
+ return final_path # absolute path to the ks file
+
+ def handler(self, opts):
+ """Governing task for building an image with two other images using Factory Indirection"""
+ # TODO: Add mode of operation where full build details are given for
+ # either base or utility or both, then spawn subtasks to do them first
+ def _task_to_image(task_id):
+ """ Take a task ID and turn it into an Image Factory Base Image object """
+ pim = PersistentImageManager.default_manager()
+ taskinfo = self.session.getTaskInfo(task_id)
+ taskstate = koji.TASK_STATES[taskinfo['state']].lower()
+ if taskstate != 'closed':
+ raise koji.BuildError("Input task (%d) must be in closed state - current state is (%s)" %
+ (task_id, taskstate))
+ taskmethod = taskinfo['method']
+ if taskmethod != "createImage":
+ raise koji.BuildError("Input task method must be 'createImage' - actual method (%s)" %
+ (taskmethod))
+ result = self.session.getTaskResult(task_id)
+ files = self.session.listTaskOutput(task_id)
+
+ # This approach works for both scratch and saved/formal images
+ # The downside is that we depend on the output file naming convention
+ def _match_name(inlist, namere):
+ for filename in inlist:
+ if re.search(namere, filename):
+ return filename
+ task_diskimage = _match_name(result['files'], ".*qcow2$")
+ task_tdl = _match_name(result['files'], "tdl.*xml")
+
+ task_dir = os.path.join(koji.pathinfo.work(),koji.pathinfo.taskrelpath(task_id))
+ diskimage_full = os.path.join(task_dir, task_diskimage)
+ tdl_full = os.path.join(task_dir, task_tdl)
+
+ if not (os.path.isfile(diskimage_full) and os.path.isfile(tdl_full)):
+ raise koji.BuildError("Missing TDL or qcow2 image for task (%d) - possible expired scratch build" % (task_id))
+
+ # The sequence to recreate a valid persistent image is as follows
+ # Create a new BaseImage object
+ factory_base_image = BaseImage()
+ # Add it to the persistence layer
+ pim.add_image(factory_base_image)
+ # Now replace the data and template with the files referenced above
+ # and mark it as a complete image
+ # Factory doesn't attempt to modify a disk image after it is COMPLETE so
+ # this will work safely on read-only NFS mounts
+ factory_base_image.data = diskimage_full
+ factory_base_image.template = open(tdl_full).read()
+ factory_base_image.status = 'COMPLETE'
+ # Now save it
+ pim.save_image(factory_base_image)
+
+ # We can now reference this object directly or via its UUID in persistent storage
+ return factory_base_image
+
+ def _nvr_to_image(nvr, arch):
+ """ Take a build ID or NVR plus arch and turn it into an Image Factory Base Image object """
+ pim = PersistentImageManager.default_manager()
+ build = self.session.getBuild(nvr)
+ if not build:
+ raise koji.BuildError("Could not find build for (%s)" % (nvr))
+
+ buildarchives = self.session.listArchives(build['id'])
+ if not buildarchives:
+ raise koji.Builderror("Could not retrieve archives for build (%s) from NVR (%s)" %
+ (build['id'], nvr))
+
+ buildfiles = [ x['filename'] for x in buildarchives ]
+ builddir = koji.pathinfo.imagebuild(build)
+
+ def _match_name(inlist, namere):
+ for filename in inlist:
+ if re.search(namere, filename):
+ return filename
+
+ build_diskimage = _match_name(buildfiles, ".*%s\.qcow2$" % (arch))
+ build_tdl = _match_name(buildfiles, "tdl.%s\.xml" % (arch))
+
+ diskimage_full = os.path.join(builddir, build_diskimage)
+ tdl_full = os.path.join(builddir, build_tdl)
+
+ if not (os.path.isfile(diskimage_full) and os.path.isfile(tdl_full)):
+ raise koji.BuildError("Missing TDL (%s) or qcow2 (%s) image for image (%s) - this should never happen" %
+ (build_tdl, build_diskimage, nvr))
+
+ # The sequence to recreate a valid persistent image is as follows
+ # Create a new BaseImage object
+ factory_base_image = BaseImage()
+ # Add it to the persistence layer
+ pim.add_image(factory_base_image)
+ # Now replace the data and template with the files referenced above
+ # and mark it as a complete image
+ # Factory doesn't attempt to modify a disk image after it is COMPLETE so
+ # this will work safely on read-only NFS mounts
+ factory_base_image.data = diskimage_full
+ factory_base_image.template = open(tdl_full).read()
+ factory_base_image.status = 'COMPLETE'
+ # Now save it
+ pim.save_image(factory_base_image)
+
+ # We can now reference this object directly or via its UUID in persistent storage
+ return factory_base_image
+
+ if opts == None:
+ opts = {}
+ self.opts = opts
+
+ config = self.makeConfig()
+ self.logger.debug('IF config object: %s' % config)
+ ApplicationConfiguration(configuration=config)
+
+ ozlogname = 'oz-indirection.log'
+ ozlog = os.path.join(self.workdir, ozlogname)
+ # END shared code
+
+ fhandler = logging.FileHandler(ozlog)
+ bd = BuildDispatcher()
+ tlog = logging.getLogger()
+ tlog.setLevel(logging.DEBUG)
+ tlog.addHandler(fhandler)
+
+ # TODO: Copy-paste from BaseImage - refactor
+ target_info = self.session.getBuildTarget(opts['target'], strict=True)
+ build_tag = target_info['build_tag']
+ repo_info = self.getRepo(build_tag)
+
+ name = opts['name']
+ version = opts['version']
+ release = opts['release']
+
+ # TODO: Another mostly copy-paste
+ if not release:
+ release = self.getRelease(name, version)
+ if '-' in version:
+ raise koji.ApplianceError('The Version may not have a hyphen')
+ if '-' in release:
+ raise koji.ApplianceError('The Release may not have a hyphen')
+
+ indirection_template = self.fetchHubOrSCM(opts.get('indirection_template'),
+ opts.get('indirection_template_url'))
+
+ self.logger.debug('Got indirection template %s' % (indirection_template))
+
+ try:
+ if opts['utility_image_build']:
+ utility_factory_image = _nvr_to_image(opts['utility_image_build'], opts['arch'])
+ else:
+ utility_factory_image = _task_to_image(int(opts['utility_image_task']))
+
+ if opts['base_image_build']:
+ base_factory_image = _nvr_to_image(opts['base_image_build'], opts['arch'])
+ else:
+ base_factory_image = _task_to_image(int(opts['base_image_task']))
+ except Exception, e:
+ self.logger.exception(e)
+ raise
+
+ # OK - We have a template and two input images - lets build
+ bld_info = None
+ if not opts['scratch']:
+ bld_info = self.initImageBuild(name, version, release,
+ target_info, opts)
+
+ try:
+ return self._do_indirection(opts, base_factory_image, utility_factory_image,
+ indirection_template, tlog, ozlog, fhandler,
+ bld_info, target_info, bd)
+ except:
+ if not opts.get('scratch'):
+ #scratch builds do not get imported
+ if bld_info:
+ self.session.host.failBuild(self.id, bld_info['id'])
+ # reraise the exception
+ raise
+
+
+ def _do_indirection(self, opts, base_factory_image, utility_factory_image,
+ indirection_template, tlog, ozlog, fhandler, bld_info,
+ target_info, bd):
+ # TODO: The next several lines are shared with the handler for other Factory tasks
+ # refactor in such a way that this can be a helper in OzImageTask
+
+ # ImageFactory picks a port to the guest VM using a rolling integer.
+ # This is a problem for concurrency, so we override the port it picks
+ # here using the task ID. (not a perfect solution but good enough:
+ # the likelihood of image tasks clashing here is very small)
+ rm = ReservationManager()
+ rm._listen_port = rm.MIN_PORT + self.id % (rm.MAX_PORT - rm.MIN_PORT)
+
+ utility_customizations = open(indirection_template).read()
+ results_loc = opts.get('results_loc', None)
+ if results_loc[0] != "/":
+ results_loc = "/" + results_loc
+ params = {'utility_image': str(utility_factory_image.identifier),
+ 'utility_customizations': utility_customizations,
+ 'results_location': results_loc }
+ random.seed() # necessary to ensure a unique mac address
+ try:
+ try:
+ # Embedded deep debug option - if template is just the string MOCK
+ # skip the actual build and create a mock target image instead
+ if utility_customizations.strip() == "MOCK":
+ target = Builder()
+ target_image = TargetImage()
+ pim = PersistentImageManager.default_manager()
+ pim.add_image(target_image)
+ target.target_image = target_image
+ open(target_image.data, "w").write("Mock build from task ID: %s" %
+ (str(self.id)))
+ target_image.status='COMPLETE'
+ else:
+ target = bd.builder_for_target_image('indirection',
+ image_id=base_factory_image.identifier,
+ parameters=params)
+ target.target_thread.join()
+ except Exception, e:
+ self.logger.debug("Exception encountered during target build")
+ self.logger.exception(e)
+ finally:
+ # upload log even if we failed to help diagnose an issue
+ tlog.removeHandler(fhandler)
+ self.uploadFile(ozlog)
+ self.logger.debug('Target image results: %s' % target.target_image.status)
+
+ if target.target_image.status == 'FAILED':
+ # TODO abort when a task is CANCELLED
+ if not self.session.checkUpload('', os.path.basename(ozlog)):
+ tlog.removeHandler(fhandler)
+ self.uploadFile(ozlog)
+ raise koji.ApplianceError('Image status is %s: %s' %
+ (target.target_image.status, target.target_image.status_detail))
+
+ self.uploadFile(target.target_image.data, remoteName=os.path.basename(results_loc))
+
+ myresults = { }
+ myresults['task_id'] = self.id
+ myresults['files'] = [ os.path.basename(results_loc) ]
+ myresults['logs'] = [ os.path.basename(ozlog) ]
+ myresults['arch'] = opts['arch']
+ # TODO: This should instead track the two input images: base and utility
+ myresults['rpmlist'] = [ ]
+
+ # This is compatible with some helper methods originally implemented for the base
+ # image build. In the original usage, the dict contains an entry per build arch
+ # TODO: If adding multiarch support, keep this in mind
+ results = { str(self.id): myresults }
+ self.logger.debug('Image Results for hub: %s' % results)
+
+ if opts['scratch']:
+ self.session.host.moveImageBuildToScratch(self.id, results)
+ else:
+ self.session.host.completeImageBuild(self.id, bld_info['id'],
+ results)
+
+ # tag it
+ if not opts.get('scratch') and not opts.get('skip_tag'):
+ tag_task_id = self.session.host.subtask(method='tagBuild',
+ arglist=[target_info['dest_tag'], bld_info['id'], False, None, True],
+ label='tag', parent=self.id, arch='noarch')
+ self.wait(tag_task_id)
+
+ # report results
+ report = ''
+ if opts.get('scratch'):
+ respath = ', '.join(
+ [os.path.join(koji.pathinfo.work(), koji.pathinfo.taskrelpath(tid)) for tid in [self.id] ])
+ report += 'Scratch '
+ else:
+ respath = koji.pathinfo.imagebuild(bld_info)
+ report += 'image build results in: %s' % respath
+ return report
+
+
+class BuildSRPMFromSCMTask(BaseBuildTask):
+
+ Methods = ['buildSRPMFromSCM']
+ _taskWeight = 1.0
+
+ def spec_sanity_checks(self, filename):
+ spec = open(filename).read()
+ for tag in ("Packager", "Distribution", "Vendor"):
+ if re.match("%s:" % tag, spec, re.M):
+ raise koji.BuildError, "%s is not allowed to be set in spec file" % tag
+ for tag in ("packager", "distribution", "vendor"):
+ if re.match("%%define\s+%s\s+" % tag, spec, re.M):
+ raise koji.BuildError, "%s is not allowed to be defined in spec file" % tag
+
+ def patch_scm_source(self, sourcedir, logfile, opts):
+ # override if desired
+ pass
+
+ def checkHost(self, hostdata):
+ tag = self.params[1]
+ return self.checkHostArch(tag, hostdata)
+
+ def handler(self, url, build_tag, opts=None):
+ # will throw a BuildError if the url is invalid
+ scm = SCM(url)
+ scm.assert_allowed(self.options.allowed_scms)
+
+ if opts is None:
+ opts = {}
+ repo_id = opts.get('repo_id')
+ if not repo_id:
+ raise koji.BuildError, "A repo id must be provided"
+
+ repo_info = self.session.repoInfo(repo_id, strict=True)
+ event_id = repo_info['create_event']
+ build_tag = self.session.getTag(build_tag, strict=True, event=event_id)
+
+ # need DNS in the chroot because "make srpm" may need to contact
+ # a SCM or lookaside cache to retrieve the srpm contents
+ rootopts = {'install_group': 'srpm-build',
+ 'setup_dns': True,
+ 'repo_id': repo_id}
+ br_arch = self.find_arch('noarch', self.session.host.getHost(), self.session.getBuildConfig(build_tag['id'], event=event_id))
+ broot = BuildRoot(self.session, self.options, build_tag['id'], br_arch, self.id, **rootopts)
+ broot.workdir = self.workdir
+
+ self.logger.debug("Initializing buildroot")
+ broot.init()
+
+ # Setup files and directories for SRPM creation
+ # We can't put this under the mock homedir because that directory
+ # is completely blown away and recreated on every mock invocation
+ scmdir = broot.rootdir() + '/tmp/scmroot'
+ koji.ensuredir(scmdir)
+ logfile = self.workdir + '/checkout.log'
+ uploadpath = self.getUploadDir()
+
+ # Check out spec file, etc. from SCM
+ sourcedir = scm.checkout(scmdir, self.session, uploadpath, logfile)
+ # chown the sourcedir and everything under it to the mockuser
+ # so we can build the srpm as non-root
+ uid = pwd.getpwnam(self.options.mockuser)[2]
+ # rpmbuild seems to complain if it's running in the "mock" group but
+ # files are in a different group
+ gid = grp.getgrnam('mock')[2]
+ self.chownTree(scmdir, uid, gid)
+
+ # Hook for patching spec file in place
+ self.patch_scm_source(sourcedir, logfile, opts)
+
+ # Find and verify that there is only one spec file.
+ spec_files = glob.glob("%s/*.spec" % sourcedir)
+ if not spec_files and self.options.support_rpm_source_layout:
+ # also check SPECS dir
+ spec_files = glob.glob("%s/SPECS/*.spec" % sourcedir)
+ if len(spec_files) == 0:
+ raise koji.BuildError("No spec file found")
+ elif len(spec_files) > 1:
+ raise koji.BuildError("Multiple spec files found: %s" % spec_files)
+ spec_file = spec_files[0]
+
+ # Run spec file sanity checks. Any failures will throw a BuildError
+ self.spec_sanity_checks(spec_file)
+
+ #build srpm
+ self.logger.debug("Running srpm build")
+ broot.build_srpm(spec_file, sourcedir, scm.source_cmd)
+
+ srpms = glob.glob('%s/*.src.rpm' % broot.resultdir())
+ if len(srpms) == 0:
+ raise koji.BuildError, "No srpms found in %s" % sourcedir
+ elif len(srpms) > 1:
+ raise koji.BuildError, "Multiple srpms found in %s: %s" % (sourcedir, ", ".join(srpms))
+ else:
+ srpm = srpms[0]
+
+ # check srpm name
+ h = koji.get_rpm_header(srpm)
+ name = h[rpm.RPMTAG_NAME]
+ version = h[rpm.RPMTAG_VERSION]
+ release = h[rpm.RPMTAG_RELEASE]
+ srpm_name = "%(name)s-%(version)s-%(release)s.src.rpm" % locals()
+ if srpm_name != os.path.basename(srpm):
+ raise koji.BuildError, 'srpm name mismatch: %s != %s' % (srpm_name, os.path.basename(srpm))
+
+ #upload srpm and return
+ self.uploadFile(srpm)
+
+ brootid = broot.id
+ log_files = glob.glob('%s/*.log' % broot.resultdir())
+
+ broot.expire()
+
+ return {'srpm': "%s/%s" % (uploadpath, srpm_name),
+ 'logs': ["%s/%s" % (uploadpath, os.path.basename(f))
+ for f in log_files],
+ 'brootid': brootid,
+ }
+
+class TagNotificationTask(BaseTaskHandler):
+ Methods = ['tagNotification']
+
+ _taskWeight = 0.1
+
+ message_templ = \
+"""From: %(from_addr)s\r
+Subject: %(nvr)s %(result)s %(operation)s by %(user_name)s\r
+To: %(to_addrs)s\r
+X-Koji-Package: %(pkg_name)s\r
+X-Koji-NVR: %(nvr)s\r
+X-Koji-User: %(user_name)s\r
+X-Koji-Status: %(status)s\r
+%(tag_headers)s\r
+\r
+Package: %(pkg_name)s\r
+NVR: %(nvr)s\r
+User: %(user_name)s\r
+Status: %(status)s\r
+%(operation_details)s\r
+%(nvr)s %(result)s %(operation)s by %(user_name)s\r
+%(failure_info)s\r
+"""
+
+ def handler(self, recipients, is_successful, tag_info, from_info, build_info, user_info, ignore_success=None, failure_msg=''):
+ if len(recipients) == 0:
+ self.logger.debug('task %i: no recipients, not sending notifications', self.id)
+ return
+
+ if ignore_success and is_successful:
+ self.logger.debug('task %i: tag operation successful and ignore success is true, not sending notifications', self.id)
+ return
+
+ build = self.session.getBuild(build_info)
+ user = self.session.getUser(user_info)
+ pkg_name = build['package_name']
+ nvr = koji.buildLabel(build)
+ user_name = user['name']
+
+ from_addr = self.options.from_addr
+ to_addrs = ', '.join(recipients)
+
+ operation = '%(action)s'
+ operation_details = 'Tag Operation: %(action)s\r\n'
+ tag_headers = ''
+ if from_info:
+ from_tag = self.session.getTag(from_info)
+ from_tag_name = from_tag['name']
+ operation += ' from %s' % from_tag_name
+ operation_details += 'From Tag: %s\r\n' % from_tag_name
+ tag_headers += 'X-Koji-Tag: %s' % from_tag_name
+ action = 'untagged'
+ if tag_info:
+ tag = self.session.getTag(tag_info)
+ tag_name = tag['name']
+ operation += ' into %s' % tag_name
+ operation_details += 'Into Tag: %s\r\n' % tag_name
+ if tag_headers:
+ tag_headers += '\r\n'
+ tag_headers += 'X-Koji-Tag: %s' % tag_name
+ action = 'tagged'
+ if tag_info and from_info:
+ action = 'moved'
+ operation = operation % locals()
+ operation_details = operation_details % locals()
+
+ if is_successful:
+ result = 'successfully'
+ status = 'complete'
+ failure_info = ''
+ else:
+ result = 'unsuccessfully'
+ status = 'failed'
+ failure_info = "Operation failed with the error:\r\n %s\r\n" % failure_msg
+
+ message = self.message_templ % locals()
+ # ensure message is in UTF-8
+ message = koji.fixEncoding(message)
+
+ server = smtplib.SMTP(self.options.smtphost)
+ #server.set_debuglevel(True)
+ server.sendmail(from_addr, recipients, message)
+ server.quit()
+
+ return 'sent notification of tag operation %i to: %s' % (self.id, to_addrs)
+
+class BuildNotificationTask(BaseTaskHandler):
+ Methods = ['buildNotification']
+
+ _taskWeight = 0.1
+
+ # XXX externalize these templates somewhere
+ subject_templ = """Package: %(build_nvr)s Tag: %(dest_tag)s Status: %(status)s Built by: %(build_owner)s"""
+ message_templ = \
+"""From: %(from_addr)s\r
+Subject: %(subject)s\r
+To: %(to_addrs)s\r
+X-Koji-Tag: %(dest_tag)s\r
+X-Koji-Package: %(build_pkg_name)s\r
+X-Koji-Builder: %(build_owner)s\r
+X-Koji-Status: %(status)s\r
+\r
+Package: %(build_nvr)s\r
+Tag: %(dest_tag)s\r
+Status: %(status)s%(cancel_info)s\r
+Built by: %(build_owner)s\r
+ID: %(build_id)i\r
+Started: %(creation_time)s\r
+Finished: %(completion_time)s\r
+%(changelog)s\r
+%(failure)s\r
+%(output)s\r
+Task Info: %(weburl)s/taskinfo?taskID=%(task_id)i\r
+Build Info: %(weburl)s/buildinfo?buildID=%(build_id)i\r
+"""
+
+ def _getTaskData(self, task_id, data=None):
+ if not data:
+ data = {}
+ taskinfo = self.session.getTaskInfo(task_id)
+
+ if not taskinfo:
+ # invalid task_id
+ return data
+
+ if taskinfo['host_id']:
+ hostinfo = self.session.getHost(taskinfo['host_id'])
+ else:
+ hostinfo = None
+
+ result = None
+ try:
+ result = self.session.getTaskResult(task_id)
+ except:
+ excClass, result = sys.exc_info()[:2]
+ if hasattr(result, 'faultString'):
+ result = result.faultString
+ else:
+ result = '%s: %s' % (excClass.__name__, result)
+ result = result.strip()
+ # clear the exception, since we're just using
+ # it for display purposes
+ sys.exc_clear()
+ if not result:
+ result = 'Unknown'
+
+ files = self.session.listTaskOutput(task_id)
+ logs = [filename for filename in files if filename.endswith('.log')]
+ rpms = [filename for filename in files if filename.endswith('.rpm') and not filename.endswith('.src.rpm')]
+ srpms = [filename for filename in files if filename.endswith('.src.rpm')]
+ misc = [filename for filename in files if filename not in logs + rpms + srpms]
+
+ logs.sort()
+ rpms.sort()
+ misc.sort()
+
+ data[task_id] = {}
+ data[task_id]['id'] = taskinfo['id']
+ data[task_id]['method'] = taskinfo['method']
+ data[task_id]['arch'] = taskinfo['arch']
+ data[task_id]['build_arch'] = taskinfo['label']
+ data[task_id]['host'] = hostinfo and hostinfo['name'] or None
+ data[task_id]['state'] = koji.TASK_STATES[taskinfo['state']].lower()
+ data[task_id]['result'] = result
+ data[task_id]['request'] = self.session.getTaskRequest(task_id)
+ data[task_id]['logs'] = logs
+ data[task_id]['rpms'] = rpms
+ data[task_id]['srpms'] = srpms
+ data[task_id]['misc'] = misc
+
+ children = self.session.getTaskChildren(task_id)
+ for child in children:
+ data = self._getTaskData(child['id'], data)
+ return data
+
+ def handler(self, recipients, build, target, weburl):
+ if len(recipients) == 0:
+ self.logger.debug('task %i: no recipients, not sending notifications', self.id)
+ return
+
+ build_pkg_name = build['package_name']
+ build_pkg_evr = '%s%s-%s' % ((build['epoch'] and str(build['epoch']) + ':' or ''), build['version'], build['release'])
+ build_nvr = koji.buildLabel(build)
+ build_id = build['id']
+ build_owner = build['owner_name']
+ # target comes from session.py:_get_build_target()
+ dest_tag = None
+ if target is not None:
+ dest_tag = target['dest_tag_name']
+ status = koji.BUILD_STATES[build['state']].lower()
+ creation_time = koji.formatTimeLong(build['creation_time'])
+ completion_time = koji.formatTimeLong(build['completion_time'])
+ task_id = build['task_id']
+
+ task_data = self._getTaskData(task_id)
+
+ cancel_info = ''
+ failure_info = ''
+ if build['state'] == koji.BUILD_STATES['CANCELED']:
+ # The owner of the buildNotification task is the one
+ # who canceled the task, it turns out.
+ this_task = self.session.getTaskInfo(self.id)
+ if this_task['owner']:
+ canceler = self.session.getUser(this_task['owner'])
+ cancel_info = "\r\nCanceled by: %s" % canceler['name']
+ elif build['state'] == koji.BUILD_STATES['FAILED']:
+ failure_data = task_data[task_id]['result']
+ failed_hosts = ['%s (%s)' % (task['host'], task['arch']) for task in task_data.values() if task['host'] and task['state'] == 'failed']
+ failure_info = "\r\n%s (%d) failed on %s:\r\n %s" % (build_nvr, build_id,
+ ', '.join(failed_hosts),
+ failure_data)
+
+ failure = failure_info or cancel_info or ''
+
+ tasks = {'failed' : [task for task in task_data.values() if task['state'] == 'failed'],
+ 'canceled' : [task for task in task_data.values() if task['state'] == 'canceled'],
+ 'closed' : [task for task in task_data.values() if task['state'] == 'closed']}
+
+ srpms = []
+ for taskinfo in task_data.values():
+ for srpmfile in taskinfo['srpms']:
+ srpms.append(srpmfile)
+ srpms = self.uniq(srpms)
+ srpms.sort()
+
+ if srpms:
+ output = "SRPMS:\r\n"
+ for srpm in srpms:
+ output += " %s" % srpm
+ output += "\r\n\r\n"
+ else:
+ output = ''
+
+ pathinfo = koji.PathInfo(topdir=self.options.topurl)
+ buildurl = pathinfo.build(build)
+ # list states here to make them go in the correct order
+ for task_state in ['failed', 'canceled', 'closed']:
+ if tasks[task_state]:
+ output += "%s tasks:\r\n" % task_state.capitalize()
+ output += "%s-------\r\n\r\n" % ("-" * len(task_state))
+ for task in tasks[task_state]:
+ output += "Task %s" % task['id']
+ if task['host']:
+ output += " on %s\r\n" % task['host']
+ else:
+ output += "\r\n"
+ output += "Task Type: %s\r\n" % koji.taskLabel(task)
+ for filetype in ['logs', 'rpms', 'misc']:
+ if task[filetype]:
+ output += "%s:\r\n" % filetype
+ for file in task[filetype]:
+ if filetype == 'rpms':
+ output += " %s\r\n" % '/'.join([buildurl, task['build_arch'], file])
+ elif filetype == 'logs':
+ if tasks[task_state] != 'closed':
+ output += " %s/getfile?taskID=%s&name=%s\r\n" % (weburl, task['id'], file)
+ else:
+ output += " %s\r\n" % '/'.join([buildurl, 'data', 'logs', task['build_arch'], file])
+ elif task[filetype] == 'misc':
+ output += " %s/getfile?taskID=%s&name=%s\r\n" % (weburl, task['id'], file)
+ output += "\r\n"
+ output += "\r\n"
+
+ changelog = koji.util.formatChangelog(self.session.getChangelogEntries(build_id, queryOpts={'limit': 3})).replace("\n","\r\n")
+ if changelog:
+ changelog = "Changelog:\r\n%s" % changelog
+
+ from_addr = self.options.from_addr
+ to_addrs = ', '.join(recipients)
+ subject = self.subject_templ % locals()
+ message = self.message_templ % locals()
+ # ensure message is in UTF-8
+ message = koji.fixEncoding(message)
+
+ server = smtplib.SMTP(self.options.smtphost)
+ # server.set_debuglevel(True)
+ server.sendmail(from_addr, recipients, message)
+ server.quit()
+
+ return 'sent notification of build %i to: %s' % (build_id, to_addrs)
+
+ def uniq(self, items):
+ """Remove duplicates from the list of items, and sort the list."""
+ m = dict(zip(items, [1] * len(items)))
+ l = m.keys()
+ l.sort()
+ return l
+
+
+class NewRepoTask(BaseTaskHandler):
+ Methods = ['newRepo']
+ _taskWeight = 0.1
+
+ def handler(self, tag, event=None, src=False, debuginfo=False):
+ tinfo = self.session.getTag(tag, strict=True, event=event)
+ kwargs = {}
+ if event is not None:
+ kwargs['event'] = event
+ if src:
+ kwargs['with_src'] = True
+ if debuginfo:
+ kwargs['with_debuginfo'] = True
+ repo_id, event_id = self.session.host.repoInit(tinfo['id'], **kwargs)
+ path = koji.pathinfo.repo(repo_id, tinfo['name'])
+ if not os.path.isdir(path):
+ raise koji.GenericError, "Repo directory missing: %s" % path
+ arches = []
+ for fn in os.listdir(path):
+ if fn != 'groups' and os.path.isfile("%s/%s/pkglist" % (path, fn)):
+ arches.append(fn)
+ #see if we can find a previous repo to update from
+ #only shadowbuild tags should start with SHADOWBUILD, their repos are auto
+ #expired. so lets get the most recent expired tag for newRepo shadowbuild tasks.
+ if tinfo['name'].startswith('SHADOWBUILD'):
+ oldrepo = self.session.getRepo(tinfo['id'], state=koji.REPO_EXPIRED)
+ else:
+ oldrepo = self.session.getRepo(tinfo['id'], state=koji.REPO_READY)
+ subtasks = {}
+ for arch in arches:
+ arglist = [repo_id, arch, oldrepo]
+ subtasks[arch] = self.session.host.subtask(method='createrepo',
+ arglist=arglist,
+ label=arch,
+ parent=self.id,
+ arch='noarch')
+ # wait for subtasks to finish
+ results = self.wait(subtasks.values(), all=True, failany=True)
+ data = {}
+ for (arch, task_id) in subtasks.iteritems():
+ data[arch] = results[task_id]
+ self.logger.debug("DEBUG: %r : %r " % (arch,data[arch],))
+ kwargs = {}
+ if event is not None:
+ kwargs['expire'] = True
+ self.session.host.repoDone(repo_id, data, **kwargs)
+ return repo_id, event_id
+
+class CreaterepoTask(BaseTaskHandler):
+
+ Methods = ['createrepo']
+ _taskWeight = 1.5
+
+ def handler(self, repo_id, arch, oldrepo):
+ #arch is the arch of the repo, not the task
+ rinfo = self.session.repoInfo(repo_id, strict=True)
+ if rinfo['state'] != koji.REPO_INIT:
+ raise koji.GenericError, "Repo %(id)s not in INIT state (got %(state)s)" % rinfo
+ self.repo_id = rinfo['id']
+ self.pathinfo = koji.PathInfo(self.options.topdir)
+ toprepodir = self.pathinfo.repo(repo_id, rinfo['tag_name'])
+ self.repodir = '%s/%s' % (toprepodir, arch)
+ if not os.path.isdir(self.repodir):
+ raise koji.GenericError, "Repo directory missing: %s" % self.repodir
+ groupdata = os.path.join(toprepodir, 'groups', 'comps.xml')
+ #set up our output dir
+ self.outdir = '%s/repo' % self.workdir
+ self.datadir = '%s/repodata' % self.outdir
+ pkglist = os.path.join(self.repodir, 'pkglist')
+ if os.path.getsize(pkglist) == 0:
+ pkglist = None
+ self.create_local_repo(rinfo, arch, pkglist, groupdata, oldrepo)
+
+ external_repos = self.session.getExternalRepoList(rinfo['tag_id'], event=rinfo['create_event'])
+ if external_repos:
+ self.merge_repos(external_repos, arch, groupdata)
+ elif pkglist is None:
+ fo = file(os.path.join(self.datadir, "EMPTY_REPO"), 'w')
+ fo.write("This repo is empty because its tag has no content for this arch\n")
+ fo.close()
+
+ uploadpath = self.getUploadDir()
+ files = []
+ for f in os.listdir(self.datadir):
+ files.append(f)
+ self.session.uploadWrapper('%s/%s' % (self.datadir, f), uploadpath, f)
+
+ return [uploadpath, files]
+
+ def create_local_repo(self, rinfo, arch, pkglist, groupdata, oldrepo):
+ koji.ensuredir(self.outdir)
+ cmd = ['/usr/bin/createrepo', '-vd', '-o', self.outdir]
+ if pkglist is not None:
+ cmd.extend(['-i', pkglist])
+ if os.path.isfile(groupdata):
+ cmd.extend(['-g', groupdata])
+ #attempt to recycle repodata from last repo
+ if pkglist and oldrepo and self.options.createrepo_update:
+ oldpath = self.pathinfo.repo(oldrepo['id'], rinfo['tag_name'])
+ olddatadir = '%s/%s/repodata' % (oldpath, arch)
+ if not os.path.isdir(olddatadir):
+ self.logger.warn("old repodata is missing: %s" % olddatadir)
+ else:
+ shutil.copytree(olddatadir, self.datadir)
+ oldorigins = os.path.join(self.datadir, 'pkgorigins.gz')
+ if os.path.isfile(oldorigins):
+ # remove any previous origins file and rely on mergerepos
+ # to rewrite it (if we have external repos to merge)
+ os.unlink(oldorigins)
+ cmd.append('--update')
+ if self.options.createrepo_skip_stat:
+ cmd.append('--skip-stat')
+ # note: we can't easily use a cachedir because we do not have write
+ # permission. The good news is that with --update we won't need to
+ # be scanning many rpms.
+ if pkglist is None:
+ cmd.append(self.outdir)
+ else:
+ cmd.append(self.repodir)
+
+ logfile = '%s/createrepo.log' % self.workdir
+ status = log_output(self.session, cmd[0], cmd, logfile, self.getUploadDir(), logerror=True)
+ if not isSuccess(status):
+ raise koji.GenericError, 'failed to create repo: %s' \
+ % parseStatus(status, ' '.join(cmd))
+
+ def merge_repos(self, external_repos, arch, groupdata):
+ repos = []
+ localdir = '%s/repo_%s_premerge' % (self.workdir, self.repo_id)
+ os.rename(self.outdir, localdir)
+ koji.ensuredir(self.outdir)
+ repos.append('file://' + localdir + '/')
+
+ for repo in external_repos:
+ ext_url = repo['url']
+ # substitute $arch in the url with the arch of the repo we're generating
+ ext_url = ext_url.replace('$arch', arch)
+ repos.append(ext_url)
+
+ blocklist = self.repodir + '/blocklist'
+ cmd = ['/usr/libexec/kojid/mergerepos', '-a', arch, '-b', blocklist, '-o', self.outdir]
+ if os.path.isfile(groupdata):
+ cmd.extend(['-g', groupdata])
+ for repo in repos:
+ cmd.extend(['-r', repo])
+
+ logfile = '%s/mergerepos.log' % self.workdir
+ status = log_output(self.session, cmd[0], cmd, logfile, self.getUploadDir(), logerror=True)
+ if not isSuccess(status):
+ raise koji.GenericError, 'failed to merge repos: %s' \
+ % parseStatus(status, ' '.join(cmd))
+
+class WaitrepoTask(BaseTaskHandler):
+
+ Methods = ['waitrepo']
+ #mostly just waiting
+ _taskWeight = 0.2
+
+ PAUSE = 60
+ # time in minutes before we fail this task
+ TIMEOUT = 120
+
+ def handler(self, tag, newer_than=None, nvrs=None):
+ """Wait for a repo for the tag, subject to given conditions
+
+ newer_than: create_event timestamp should be newer than this
+ nvr: repo should contain this nvr (which may not exist at first)
+
+ Only one of the options may be specified. If neither is, then
+ the call will wait for the first ready repo.
+
+ Returns the repo info (from getRepo) of the chosen repo
+ """
+
+ start = time.time()
+
+ taginfo = self.session.getTag(tag, strict=True)
+ targets = self.session.getBuildTargets(buildTagID=taginfo['id'])
+ if not targets:
+ raise koji.GenericError("No build target for tag: %s" % taginfo['name'])
+
+ if isinstance(newer_than, basestring) and newer_than.lower() == "now":
+ newer_than = start
+ if not isinstance(newer_than, (type(None), int, long, float)):
+ raise koji.GenericError, "Invalid value for newer_than: %s" % newer_than
+
+ if newer_than and nvrs:
+ raise koji.GenericError, "only one of (newer_than, nvrs) may be specified"
+
+ if not nvrs:
+ nvrs = []
+ builds = [koji.parse_NVR(nvr) for nvr in nvrs]
+
+ last_repo = None
+
+ while True:
+ repo = self.session.getRepo(taginfo['id'])
+ if repo and repo != last_repo:
+ if builds:
+ if koji.util.checkForBuilds(self.session, taginfo['id'], builds, repo['create_event']):
+ self.logger.debug("Successfully waited %s for %s to appear in the %s repo" % \
+ (koji.util.duration(start), koji.util.printList(nvrs), taginfo['name']))
+ return repo
+ elif newer_than:
+ if repo['create_ts'] > newer_than:
+ self.logger.debug("Successfully waited %s for a new %s repo" % \
+ (koji.util.duration(start), taginfo['name']))
+ return repo
+ else:
+ #no check requested -- return first ready repo
+ return repo
+
+ if (time.time() - start) > (self.TIMEOUT * 60.0):
+ if builds:
+ raise koji.GenericError, "Unsuccessfully waited %s for %s to appear in the %s repo" % \
+ (koji.util.duration(start), koji.util.printList(nvrs), taginfo['name'])
+ else:
+ raise koji.GenericError, "Unsuccessfully waited %s for a new %s repo" % \
+ (koji.util.duration(start), taginfo['name'])
+
+ time.sleep(self.PAUSE)
+ last_repo = repo
+
+
+def get_options():
+ """process options from command line and config file"""
+ # parse command line args
+ logger = logging.getLogger("koji.build")
+ parser = OptionParser()
+ parser.add_option("-c", "--config", dest="configFile",
+ help="use alternate configuration file", metavar="FILE",
+ default="/etc/kojid/kojid.conf")
+ parser.add_option("--user", help="specify user")
+ parser.add_option("--password", help="specify password")
+ parser.add_option("-f", "--fg", dest="daemon",
+ action="store_false", default=True,
+ help="run in foreground")
+ parser.add_option("--force-lock", action="store_true", default=False,
+ help="force lock for exclusive session")
+ parser.add_option("-v", "--verbose", action="store_true", default=False,
+ help="show verbose output")
+ parser.add_option("-d", "--debug", action="store_true", default=False,
+ help="show debug output")
+ parser.add_option("--debug-task", action="store_true", default=False,
+ help="enable debug output for tasks")
+ parser.add_option("--debug-xmlrpc", action="store_true", default=False,
+ help="show xmlrpc debug output")
+ parser.add_option("--debug-mock", action="store_true", default=False,
+ #obsolete option
+ help=SUPPRESS_HELP)
+ parser.add_option("--skip-main", action="store_true", default=False,
+ help="don't actually run main")
+ parser.add_option("--maxjobs", type='int', help="Specify maxjobs")
+ parser.add_option("--minspace", type='int', help="Specify minspace")
+ parser.add_option("--sleeptime", type='int', help="Specify the polling interval")
+ parser.add_option("--admin-emails", help="Address(es) to send error notices to")
+ parser.add_option("--topdir", help="Specify topdir")
+ parser.add_option("--topurl", help="Specify topurl")
+ parser.add_option("--workdir", help="Specify workdir")
+ parser.add_option("--pluginpath", help="Specify plugin search path")
+ parser.add_option("--plugin", action="append", help="Load specified plugin")
+ parser.add_option("--mockdir", help="Specify mockdir")
+ parser.add_option("--mockuser", help="User to run mock as")
+ parser.add_option("-s", "--server", help="url of XMLRPC server")
+ parser.add_option("--pkgurl", help=SUPPRESS_HELP)
+ (options, args) = parser.parse_args()
+
+ if args:
+ parser.error("incorrect number of arguments")
+ #not reached
+ assert False
+
+ # load local config
+ config = ConfigParser()
+ config.read(options.configFile)
+ for x in config.sections():
+ if x != 'kojid':
+ quit('invalid section found in config file: %s' % x)
+ defaults = {'sleeptime': 15,
+ 'maxjobs': 10,
+ 'literal_task_arches': '',
+ 'minspace': 8192,
+ 'admin_emails': None,
+ 'log_level': None,
+ 'topdir': '/mnt/koji',
+ 'topurl': None,
+ 'workdir': '/var/tmp/koji',
+ 'pluginpath': '/usr/lib/koji-builder-plugins',
+ 'mockdir': '/var/lib/mock',
+ 'mockuser': 'kojibuilder',
+ 'packager': 'Koji',
+ 'vendor': 'Koji',
+ 'distribution': 'Koji',
+ 'mockhost': 'koji-linux-gnu',
+ 'smtphost': 'example.com',
+ 'from_addr': 'Koji Build System <buildsys at example.com>',
+ 'krb_principal': None,
+ 'host_principal_format': 'compile/%s at EXAMPLE.COM',
+ 'keytab': '/etc/kojid/kojid.keytab',
+ 'ccache': '/var/tmp/kojid.ccache',
+ 'krbservice': 'host',
+ 'server': None,
+ 'user': None,
+ 'password': None,
+ 'retry_interval': 60,
+ 'max_retries': 120,
+ 'offline_retry': True,
+ 'offline_retry_interval': 120,
+ 'keepalive' : True,
+ 'timeout' : None,
+ 'use_fast_upload': True,
+ 'createrepo_skip_stat': True,
+ 'createrepo_update': True,
+ 'pkgurl': None,
+ 'allowed_scms': '',
+ 'support_rpm_source_layout': True,
+ 'yum_proxy': None,
+ 'maven_repo_ignore': '*.md5 *.sha1 maven-metadata*.xml _maven.repositories '
+ 'resolver-status.properties *.lastUpdated',
+ 'failed_buildroot_lifetime' : 3600 * 4,
+ 'rpmbuild_timeout' : 3600 * 24,
+ 'cert': '/etc/kojid/client.crt',
+ 'ca': '/etc/kojid/clientca.crt',
+ 'serverca': '/etc/kojid/serverca.crt'}
+ if config.has_section('kojid'):
+ for name, value in config.items('kojid'):
+ if name in ['sleeptime', 'maxjobs', 'minspace', 'retry_interval',
+ 'max_retries', 'offline_retry_interval', 'failed_buildroot_lifetime',
+ 'timeout', 'rpmbuild_timeout',]:
+ try:
+ defaults[name] = int(value)
+ except ValueError:
+ quit("value for %s option must be a valid integer" % name)
+ elif name in ['offline_retry', 'createrepo_skip_stat', 'createrepo_update',
+ 'keepalive', 'use_fast_upload', 'support_rpm_source_layout']:
+ defaults[name] = config.getboolean('kojid', name)
+ elif name in ['plugin', 'plugins']:
+ defaults['plugin'] = value.split()
+ elif name in defaults.keys():
+ defaults[name] = value
+ elif name.upper().startswith('RLIMIT_'):
+ defaults[name.upper()] = value
+ else:
+ quit("unknown config option: %s" % name)
+ for name, value in defaults.items():
+ if getattr(options, name, None) is None:
+ setattr(options, name, value)
+
+ #honor topdir
+ if options.topdir:
+ koji.BASEDIR = options.topdir
+ koji.pathinfo.topdir = options.topdir
+
+ #make sure workdir exists
+ if not os.path.exists(options.workdir):
+ koji.ensuredir(options.workdir)
+
+ if not options.server:
+ msg = "the server option is required"
+ logger.error(msg)
+ parser.error(msg)
+
+ if not options.topurl:
+ msg = "the topurl option is required"
+ logger.error(msg)
+ parser.error(msg)
+
+ topurls = options.topurl.split()
+ options.topurls = topurls
+ if len(topurls) > 1:
+ # XXX - fix the rest of the code so this is not necessary
+ options.topurl = topurls[0]
+
+ if options.pkgurl:
+ logger.warning("The pkgurl option is obsolete")
+ if options.debug_mock:
+ logger.warning("The debug-mock option is obsolete")
+
+ return options
+
+def quit(msg=None, code=1):
+ if msg:
+ logging.getLogger("koji.build").error(msg)
+ sys.stderr.write('%s\n' % msg)
+ sys.stderr.flush()
+ sys.exit(code)
+
+if __name__ == "__main__":
+ koji.add_file_logger("koji", "/var/log/kojid.log")
+ #note we're setting logging params for all of koji*
+ options = get_options()
+ if options.log_level:
+ lvl = getattr(logging, options.log_level, None)
+ if lvl is None:
+ quit("Invalid log level: %s" % options.log_level)
+ logging.getLogger("koji").setLevel(lvl)
+ else:
+ logging.getLogger("koji").setLevel(logging.WARN)
+ if options.debug:
+ logging.getLogger("koji").setLevel(logging.DEBUG)
+ elif options.verbose:
+ logging.getLogger("koji").setLevel(logging.INFO)
+ if options.debug_task:
+ logging.getLogger("koji.build.BaseTaskHandler").setLevel(logging.DEBUG)
+ if options.admin_emails:
+ koji.add_mail_logger("koji", options.admin_emails)
+
+ #build session options
+ session_opts = {}
+ for k in ('user', 'password', 'krbservice', 'debug_xmlrpc', 'debug',
+ 'retry_interval', 'max_retries', 'offline_retry', 'offline_retry_interval',
+ 'keepalive', 'timeout', 'use_fast_upload',
+ ):
+ v = getattr(options, k, None)
+ if v is not None:
+ session_opts[k] = v
+ #start a session and login
+ session = koji.ClientSession(options.server, session_opts)
+ if os.path.isfile(options.cert):
+ try:
+ # authenticate using SSL client certificates
+ session.ssl_login(options.cert, options.ca,
+ options.serverca)
+ except koji.AuthError, e:
+ quit("Error: Unable to log in: %s" % e)
+ except xmlrpclib.ProtocolError:
+ quit("Error: Unable to connect to server %s" % (options.server))
+ elif options.user:
+ try:
+ # authenticate using user/password
+ session.login()
+ except koji.AuthError:
+ quit("Error: Unable to log in. Bad credentials?")
+ except xmlrpclib.ProtocolError:
+ quit("Error: Unable to connect to server %s" % (options.server))
+ elif sys.modules.has_key('krbV'):
+ krb_principal = options.krb_principal
+ if krb_principal is None:
+ krb_principal = options.host_principal_format % socket.getfqdn()
+ try:
+ session.krb_login(principal=krb_principal,
+ keytab=options.keytab,
+ ccache=options.ccache)
+ except krbV.Krb5Error, e:
+ quit("Kerberos authentication failed: '%s' (%s)" % (e.args[1], e.args[0]))
+ except socket.error, e:
+ quit("Could not connect to Kerberos authentication service: '%s'" % e.args[1])
+ else:
+ quit("No username/password supplied and Kerberos missing or not configured")
+ #make session exclusive
+ try:
+ session.exclusiveSession(force=options.force_lock)
+ except koji.AuthLockError:
+ quit("Error: Unable to get lock. Trying using --force-lock")
+ if not session.logged_in:
+ quit("Error: Unknown login error")
+ #make sure it works
+ try:
+ ret = session.echo("OK")
+ except xmlrpclib.ProtocolError:
+ quit("Error: Unable to connect to server %s" % (options.server))
+ if ret != ["OK"]:
+ quit("Error: incorrect server response: %r" % (ret))
+
+ # run main
+ if options.daemon:
+ #detach
+ koji.daemonize()
+ main(options, session)
+ # not reached
+ assert False
+ elif not options.skip_main:
+ koji.add_stderr_logger("koji")
+ main(options, session)
diff --git a/builder/kojid.conf b/builder/kojid.conf
new file mode 100644
index 0000000..5f0aaec
--- /dev/null
+++ b/builder/kojid.conf
@@ -0,0 +1,88 @@
+[kojid]
+; The number of seconds to sleep between tasks
+; sleeptime=15
+
+; The maximum number of jobs that kojid will handle at a time
+; maxjobs=10
+
+; The minimum amount of free space (in MBs) required for each build root
+; minspace=8192
+
+; The directory root where work data can be found from the koji hub
+; topdir=/mnt/koji
+
+; The directory root for temporary storage
+; workdir=/tmp/koji
+
+; The directory root for mock
+; mockdir=/var/lib/mock
+
+; The user to run as when doing builds
+; mockuser=kojibuilder
+
+; The vendor to use in rpm headers
+; vendor=Koji
+
+; The packager to use in rpm headers
+; packager=Koji
+
+; The distribution to use in rpm headers
+; distribution=Koji
+
+; The _host string to use in mock
+; mockhost=koji-linux-gnu
+
+; Timeout for build duration (24 hours)
+; rpmbuild_timeout=86400
+
+; The URL for the xmlrpc server
+server=http://hub.example.com/kojihub
+
+; The URL for the file access
+topurl=http://hub.example.com/kojifiles
+
+; A space-separated list of tuples from which kojid is allowed to checkout.
+; The format of those tuples is:
+;
+; host:repository[:use_common[:source_cmd]]
+;
+; Incorrectly-formatted tuples will be ignored.
+;
+; If use_common is not present, kojid will attempt to checkout a common/
+; directory from the repository. If use_common is set to no, off, false, or 0,
+; it will not attempt to checkout a common/ directory.
+;
+; source_cmd is a shell command (args separated with commas instead of spaces)
+; to run before building the srpm. It is generally used to retrieve source
+; files from a remote location. If no source_cmd is specified, "make sources"
+; is run by default.
+allowed_scms=scm.example.com:/cvs/example git.example.org:/example svn.example.org:/users/*:no
+
+; The mail host to use for sending email notifications
+smtphost=example.com
+
+; The From address used when sending email notifications
+from_addr=Koji Build System <buildsys at example.com>
+
+;configuration for Kerberos authentication
+
+;the format of the principal used by the build hosts
+;%s will be replaced by the FQDN of the host
+;host_principal_format = compile/%s at EXAMPLE.COM
+
+;location of the keytab
+;keytab = /etc/kojid/kojid.keytab
+
+;the service name of the principal being used by the hub
+;krbservice = host
+
+;configuration for SSL authentication
+
+;client certificate
+;cert = /etc/kojid/client.crt
+
+;certificate of the CA that issued the client certificate
+;ca = /etc/kojid/clientca.crt
+
+;certificate of the CA that issued the HTTP server certificate
+;serverca = /etc/kojid/serverca.crt
diff --git a/builder/kojid.init b/builder/kojid.init
new file mode 100755
index 0000000..691b77b
--- /dev/null
+++ b/builder/kojid.init
@@ -0,0 +1,99 @@
+#! /bin/sh
+#
+# kojid Start/Stop kojid
+#
+# chkconfig: 345 99 99
+# description: kojid server
+# processname: kojid
+
+# This is an interactive program, we need the current locale
+
+# Source function library.
+. /etc/init.d/functions
+
+# Check that we're a priviledged user
+[ `id -u` = 0 ] || exit 0
+
+[ -f /etc/sysconfig/kojid ] && . /etc/sysconfig/kojid
+
+prog="kojid"
+
+# Check that networking is up.
+if [ "$NETWORKING" = "no" ]
+then
+ exit 0
+fi
+
+[ -f /usr/sbin/kojid ] || exit 0
+
+RETVAL=0
+
+start() {
+ echo -n $"Starting $prog: "
+ cd /
+ ARGS=""
+ [ "$FORCE_LOCK" == "Y" ] && ARGS="$ARGS --force-lock"
+ [ "$KOJID_DEBUG" == "Y" ] && ARGS="$ARGS --debug"
+ [ "$KOJID_VERBOSE" == "Y" ] && ARGS="$ARGS --verbose"
+ # XXX Fix for make download-checks in kernel builds
+ # Remove once we're running the buildSRPMFromSCM task
+ # as an unpriviledged user with their own environment
+ export HOME="/root"
+ daemon /usr/sbin/kojid $ARGS
+ RETVAL=$?
+ echo
+ [ $RETVAL -eq 0 ] && touch /var/lock/subsys/kojid
+ return $RETVAL
+}
+
+stop() {
+ echo -n $"Stopping $prog: "
+ killproc kojid
+ RETVAL=$?
+ echo
+ [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/kojid
+ return $RETVAL
+}
+
+restart() {
+ stop
+ start
+}
+
+graceful() {
+ #SIGUSR1 initiates a graceful restart
+ pid=$(pidofproc kojid)
+ if test -z "$pid"
+ then
+ echo $"$prog not running"
+ else
+ kill -10 $pid
+ fi
+}
+
+# See how we were called.
+case "$1" in
+ start)
+ start
+ ;;
+ stop)
+ stop
+ ;;
+ status)
+ status $prog
+ ;;
+ restart|reload|force-reload)
+ restart
+ ;;
+ condrestart|try-restart)
+ [ -f /var/lock/subsys/kojid ] && restart || :
+ ;;
+ graceful)
+ graceful
+ ;;
+ *)
+ echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload|graceful}"
+ exit 1
+esac
+
+exit $?
diff --git a/builder/kojid.service b/builder/kojid.service
new file mode 100644
index 0000000..1886a44
--- /dev/null
+++ b/builder/kojid.service
@@ -0,0 +1,14 @@
+[Unit]
+Description=Koji build server
+Documentation=https://fedoraproject.org/wiki/Koji/ServerHowTo
+
+After=network.target
+
+[Service]
+ExecStart=/usr/sbin/kojid \
+ --fg \
+ --force-lock \
+ --verbose
+
+[Install]
+WantedBy=multi-user.target
diff --git a/builder/kojid.sysconfig b/builder/kojid.sysconfig
new file mode 100644
index 0000000..393aee9
--- /dev/null
+++ b/builder/kojid.sysconfig
@@ -0,0 +1,3 @@
+FORCE_LOCK=Y
+KOJID_DEBUG=N
+KOJID_VERBOSE=Y
diff --git a/builder/mergerepos b/builder/mergerepos
new file mode 100755
index 0000000..c5349ef
--- /dev/null
+++ b/builder/mergerepos
@@ -0,0 +1,276 @@
+#!/usr/bin/python
+
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Library General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+
+# Copyright 2009-2014 Red Hat, Inc.
+# Written by Mike Bonnet <mikeb at redhat.com>
+
+# Merge repos using rules specific to Koji
+# Largely borrowed from the mergerepo script included in createrepo and
+# written by Seth Vidal
+
+import createrepo
+import os.path
+import rpmUtils.miscutils
+import shutil
+import sys
+import tempfile
+import yum
+import yum.misc
+from optparse import OptionParser
+
+# Expand a canonical arch to the full list of
+# arches that should be included in the repo.
+# Basically the inverse of koji.canonArch().
+# Lists taken from rpmUtils.arch.
+EXPAND_ARCHES = {
+ 'i386': ['i486', 'i586', 'geode', 'i686', 'athlon'],
+ 'x86_64': ['ia32e', 'amd64'],
+ 'ppc64': ['ppc64pseries', 'ppc64iseries'],
+ 'sparc64': ['sparc64v', 'sparc64v2'],
+ 'sparc': ['sparcv8', 'sparcv9', 'sparcv9v', 'sparcv9v2'],
+ 'alpha': ['alphaev4', 'alphaev45', 'alphaev5', 'alphaev56',
+ 'alphapca56', 'alphaev6', 'alphaev67', 'alphaev68', 'alphaev7'],
+ 'armhfp': ['armv7hl', 'armv7hnl'],
+ 'arm': ['armv5tel', 'armv5tejl', 'armv6l','armv7l'],
+ 'sh4': ['sh4a']
+ }
+
+MULTILIB_ARCHES = {
+ 'x86_64': 'i386',
+ 'ppc64': 'ppc',
+ 's390x': 's390'
+ }
+
+def parse_args(args):
+ """Parse our opts/args"""
+ usage = """
+ mergerepos: take 2 or more repositories and merge their metadata into a new repo using Koji semantics
+
+ mergerepos --repo=url --repo=url --outputdir=/some/path"""
+
+ parser = OptionParser(version = "mergerepos 0.1", usage=usage)
+ # query options
+ parser.add_option("-r", "--repo", dest="repos", default=[], action="append",
+ help="repo url")
+ parser.add_option("-g", "--groupfile", default=None,
+ help="path to groupfile to include in metadata")
+ parser.add_option("-a", "--arch", dest="arches", default=[], action="append",
+ help="List of arches to include in the repo")
+ parser.add_option("-b", "--blocked", default=None,
+ help="A file containing a list of srpm names to exclude from the merged repo")
+ parser.add_option("-o", "--outputdir", default=None,
+ help="Location to create the repository")
+ (opts, argsleft) = parser.parse_args(args)
+
+ if len(opts.repos) < 1:
+ parser.print_usage()
+ sys.exit(1)
+
+ # expand arches
+ for arch in opts.arches[:]:
+ if EXPAND_ARCHES.has_key(arch):
+ opts.arches.extend(EXPAND_ARCHES[arch])
+
+ # support multilib repos
+ for arch in opts.arches[:]:
+ multilib_arch = MULTILIB_ARCHES.get(arch)
+ if multilib_arch:
+ opts.arches.append(multilib_arch)
+ if multilib_arch in EXPAND_ARCHES:
+ opts.arches.extend(EXPAND_ARCHES[multilib_arch])
+
+ # always include noarch
+ if not 'noarch' in opts.arches:
+ opts.arches.append('noarch')
+
+ if not opts.outputdir:
+ parser.error('You must specify an outputdir with -o')
+ sys.exit(1)
+
+ return opts
+
+
+def make_const_func(value):
+ """Return a function that returns the given value"""
+ return lambda *a: value
+
+
+class RepoMerge(object):
+ def __init__(self, repolist, arches, groupfile, blocked, outputdir):
+ self.repolist = repolist
+ self.outputdir = outputdir
+ self.mdconf = createrepo.MetaDataConfig()
+ # explicitly request sha1 for backward compatibility with older yum
+ self.mdconf.sumtype = 'sha1'
+ self.mdconf.database = True
+ self.mdconf.verbose = True
+ self.mdconf.changelog_limit = 3
+ self.yumbase = yum.YumBase()
+ if hasattr(self.yumbase, 'preconf'):
+ self.yumbase.preconf.fn = '/dev/null'
+ self.yumbase.preconf.init_plugins = False
+ self.yumbase.preconf.debuglevel = 2
+ else:
+ self.yumbase._getConfig('/dev/null', init_plugins=False, debuglevel=2)
+ self.yumbase.conf.cachedir = tempfile.mkdtemp()
+ self.yumbase.conf.cache = 0
+ self.archlist = arches
+ self.mdconf.groupfile = groupfile
+ self.blocked = blocked
+
+ def close(self):
+ if self.yumbase is not None:
+ cachedir = self.yumbase.conf.cachedir
+ self.yumbase.close()
+ self.yumbase = None
+ self.mdconf = None
+ if os.path.isdir(cachedir):
+ shutil.rmtree(cachedir)
+
+ def __del__(self):
+ self.close()
+
+ def merge_repos(self):
+ self.yumbase.repos.disableRepo('*')
+ # add our repos and give them a merge rank in the order they appear in
+ # in the repolist
+ count = 0
+ for r in self.repolist:
+ count +=1
+ rid = 'repo%s' % count
+ print >> sys.stderr, 'Adding repo: ' + r
+ n = self.yumbase.add_enable_repo(rid, baseurls=[r])
+ n._merge_rank = count
+
+ #setup our sacks
+ self.yumbase._getSacks(archlist=self.archlist)
+
+ self.sort_and_filter()
+
+ def sort_and_filter(self):
+ """
+ For each package object, check if the srpm name has ever been seen before.
+ If is has not, keep the package. If it has, check if the srpm name was first seen
+ in the same repo as the current package. If so, keep the package from the srpm with the
+ highest NVR. If not, keep the packages from the first srpm we found, and delete packages from
+ all other srpms.
+
+ Packages with matching NVRs in multiple repos will be taken from the first repo.
+
+ If the srpm name appears in the blocked package list, any packages generated from the srpm
+ will be deleted from the package sack as well.
+
+ This method will also generate a file called "pkgorigins" and add it to the repo metadata. This
+ is a tab-separated map of package E:N-V-R.A to repo URL (as specified on the command-line). This
+ allows a package to be tracked back to its origin, even if the location field in the repodata does
+ not match the original repo location.
+ """
+ # sort the repos by _merge_rank
+ # lowest number is the highest rank (1st place, 2nd place, etc.)
+ repos = self.yumbase.repos.listEnabled()
+ repos.sort(key=lambda o: o._merge_rank)
+
+ include_srpms = {}
+
+ # calculating what "builds" (srpms) we're allowing into the repo
+ for reponum, repo in enumerate(repos):
+ for pkg in repo.sack:
+ if reponum == 0 and not pkg.basepath:
+ # this is the first repo (i.e. the koji repo) and appears
+ # to be using relative urls
+ #XXX - kind of a hack, but yum leaves us little choice
+ #force the pkg object to report a relative location
+ loc = """<location href="%s"/>\n""" % yum.misc.to_xml(pkg.remote_path, attrib=True)
+ pkg._return_remote_location = make_const_func(loc)
+ srpm_name, ver, rel, epoch, arch = rpmUtils.miscutils.splitFilename(pkg.sourcerpm)
+ if include_srpms.has_key(srpm_name):
+ other_srpm, other_repoid = include_srpms[srpm_name]
+ if pkg.repoid != other_repoid:
+ # We found a rpm built from an srpm with the same name in a previous repo.
+ # The previous repo takes precendence, so ignore the srpm found here.
+ continue
+ else:
+ # We're in the same repo, so compare srpm NVRs
+ other_srpm_name, other_ver, other_rel, other_epoch, other_arch = \
+ rpmUtils.miscutils.splitFilename(other_srpm)
+ cmp = rpmUtils.miscutils.compareEVR((epoch, ver, rel),
+ (other_epoch, other_ver, other_rel))
+ if cmp > 0:
+ # The current package we're processing is from a newer srpm than the
+ # existing srpm in the dict, so update the dict
+ include_srpms[srpm_name] = (pkg.sourcerpm, pkg.repoid)
+ elif self.blocked.has_key(srpm_name):
+ continue
+ else:
+ include_srpms[srpm_name] = (pkg.sourcerpm, pkg.repoid)
+
+ pkgorigins = os.path.join(self.yumbase.conf.cachedir, 'pkgorigins')
+ origins = file(pkgorigins, 'w')
+
+ seen_rpms = {}
+ for repo in repos:
+ for pkg in repo.sack:
+ srpm_name, ver, rel, epoch, arch = rpmUtils.miscutils.splitFilename(pkg.sourcerpm)
+ incl_srpm, incl_repoid = include_srpms.get(srpm_name, (None, None))
+ pkg_nvra = str(pkg)
+ if incl_srpm == pkg.sourcerpm and not seen_rpms.has_key(pkg_nvra):
+ origins.write('%s\t%s\n' % (pkg_nvra, repo.urls[0]))
+ seen_rpms[pkg_nvra] = 1
+ else:
+ # Either the srpm is in the block list, it is not built from the srpm we
+ # identified above, or it's a duplicate, so exclude it
+ repo.sack.delPackage(pkg)
+
+ origins.close()
+ self.mdconf.additional_metadata['origin'] = pkgorigins
+
+ def write_metadata(self):
+ self.mdconf.pkglist = self.yumbase.pkgSack
+ self.mdconf.directory = self.outputdir
+ # clean out what was there
+ if os.path.exists(self.mdconf.directory + '/repodata'):
+ shutil.rmtree(self.mdconf.directory + '/repodata')
+
+ if not os.path.exists(self.mdconf.directory):
+ os.makedirs(self.mdconf.directory)
+
+ mdgen = createrepo.MetaDataGenerator(config_obj=self.mdconf)
+ mdgen.doPkgMetadata()
+ mdgen.doRepoMetadata()
+ mdgen.doFinalMove()
+
+def main(args):
+ """main"""
+ opts = parse_args(args)
+
+ if opts.blocked:
+ blocked_fo = file(opts.blocked)
+ blocked_list = blocked_fo.readlines()
+ blocked_fo.close()
+ blocked = dict([(b.strip(), 1) for b in blocked_list])
+ else:
+ blocked = {}
+
+ merge = RepoMerge(opts.repos, opts.arches, opts.groupfile, blocked, opts.outputdir)
+
+ try:
+ merge.merge_repos()
+ merge.write_metadata()
+ finally:
+ merge.close()
+
+if __name__ == "__main__":
+ main(sys.argv[1:])
diff --git a/cli/Makefile b/cli/Makefile
new file mode 100644
index 0000000..3573aef
--- /dev/null
+++ b/cli/Makefile
@@ -0,0 +1,19 @@
+FILES = koji
+
+_default:
+ @echo "nothing to make. try make install"
+
+clean:
+ rm -f *.o *.so *.pyc *~
+
+install:
+ @if [ "$(DESTDIR)" = "" ]; then \
+ echo " "; \
+ echo "ERROR: A destdir is required"; \
+ exit 1; \
+ fi
+
+ mkdir -p $(DESTDIR)/usr/bin
+ install -p -m 755 $(FILES) $(DESTDIR)/usr/bin
+ install -p -m 644 koji.conf $(DESTDIR)/etc/koji.conf
+ mkdir -p $(DESTDIR)/etc/koji.conf.d
diff --git a/cli/koji b/cli/koji
new file mode 100755
index 0000000..f521d6d
--- /dev/null
+++ b/cli/koji
@@ -0,0 +1,6673 @@
+#!/usr/bin/python
+# coding=utf-8
+
+# command line interface for the Koji build system
+# Copyright (c) 2005-2014 Red Hat, Inc.
+#
+# Koji is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation;
+# version 2.1 of the License.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this software; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+# Authors:
+# Dennis Gregorovic <dgregor at redhat.com>
+# Mike McLean <mikem at redhat.com>
+# Mike Bonnet <mikeb at redhat.com>
+# Cristian Balint <cbalint at redhat.com>
+
+import sys
+try:
+ import krbV
+except ImportError:
+ pass
+try:
+ import ast
+except ImportError:
+ ast = None
+import ConfigParser
+import base64
+import errno
+import koji
+import koji.util
+import fnmatch
+from koji.util import md5_constructor
+import logging
+import os
+import re
+import pprint
+import random
+import socket
+import stat
+import string
+import time
+import traceback
+import urlgrabber.grabber as grabber
+import urlgrabber.progress as progress
+import xmlrpclib
+import yum.comps
+import optparse
+#for import-comps handler (currently disabled)
+#from rhpl.comps import Comps
+
+# fix OptionParser for python 2.3 (optparse verion 1.4.1+)
+# code taken from optparse version 1.5a2
+OptionParser = optparse.OptionParser
+if optparse.__version__ == "1.4.1+":
+ def _op_error(self, msg):
+ self.print_usage(sys.stderr)
+ msg = "%s: error: %s\n" % (self._get_prog_name(), msg)
+ if msg:
+ sys.stderr.write(msg)
+ sys.exit(2)
+ OptionParser.error = _op_error
+
+greetings = ('hello', 'hi', 'yo', "what's up", "g'day", 'back to work',
+ 'bonjour',
+ 'hallo',
+ 'ciao',
+ 'hola',
+ u'olá',
+ u'dobrý den',
+ u'zdravstvuite',
+ u'góðan daginn',
+ 'hej',
+ 'tervehdys',
+ u'grüezi',
+ u'céad míle fáilte',
+ u'hylô',
+ u'bună ziua',
+ u'jó napot',
+ 'dobre dan',
+ u'你好',
+ u'こんにちは',
+ u'नमस्कार',
+ u'안녕하세요')
+
+def _(args):
+ """Stub function for translation"""
+ return args
+
+ARGMAP = {'None': None,
+ 'True': True,
+ 'False': False}
+
+def arg_filter(arg):
+ try:
+ return int(arg)
+ except ValueError:
+ pass
+ try:
+ return float(arg)
+ except ValueError:
+ pass
+ if arg in ARGMAP:
+ return ARGMAP[arg]
+ #handle lists/dicts?
+ return arg
+
+
+def get_options():
+ """process options from command line and config file"""
+
+ usage = _("%prog [global-options] command [command-options-and-arguments]")
+ parser = OptionParser(usage=usage)
+ parser.disable_interspersed_args()
+ progname = os.path.basename(sys.argv[0]) or 'koji'
+ parser.add_option("-c", "--config", dest="configFile",
+ help=_("use alternate configuration file"), metavar="FILE")
+ parser.add_option("-p", "--profile", default=progname,
+ help=_("specify a configuration profile"))
+ parser.add_option("--keytab", help=_("specify a Kerberos keytab to use"), metavar="FILE")
+ parser.add_option("--principal", help=_("specify a Kerberos principal to use"))
+ parser.add_option("--runas", help=_("run as the specified user (requires special privileges)"))
+ parser.add_option("--user", help=_("specify user"))
+ parser.add_option("--password", help=_("specify password"))
+ parser.add_option("--noauth", action="store_true", default=False,
+ help=_("do not authenticate"))
+ parser.add_option("--force-auth", action="store_true", default=False,
+ help=_("authenticate even for read-only operations"))
+ parser.add_option("--authtype", help=_("force use of a type of authentication, options: noauth, ssl, password, or kerberos"))
+ parser.add_option("-d", "--debug", action="store_true", default=False,
+ help=_("show debug output"))
+ parser.add_option("--debug-xmlrpc", action="store_true", default=False,
+ help=_("show xmlrpc debug output"))
+ parser.add_option("-q", "--quiet", action="store_true", default=False,
+ help=_("run quietly"))
+ parser.add_option("--skip-main", action="store_true", default=False,
+ help=_("don't actually run main"))
+ parser.add_option("-s", "--server", help=_("url of XMLRPC server"))
+ parser.add_option("--topdir", help=_("specify topdir"))
+ parser.add_option("--weburl", help=_("url of the Koji web interface"))
+ parser.add_option("--topurl", help=_("url for Koji file access"))
+ parser.add_option("--pkgurl", help=optparse.SUPPRESS_HELP)
+ parser.add_option("--help-commands", action="store_true", default=False, help=_("list commands"))
+ (options, args) = parser.parse_args()
+
+ if options.help_commands:
+ list_commands()
+ sys.exit(0)
+ if not args:
+ list_commands()
+ sys.exit(0)
+
+ aliases = {
+ 'cancel-task' : 'cancel',
+ 'cxl' : 'cancel',
+ 'list-commands' : 'help',
+ 'move-pkg': 'move-build',
+ 'move': 'move-build',
+ 'latest-pkg': 'latest-build',
+ 'tag-pkg': 'tag-build',
+ 'tag': 'tag-build',
+ 'untag-pkg': 'untag-build',
+ 'untag': 'untag-build',
+ }
+ cmd = args[0]
+ cmd = aliases.get(cmd, cmd)
+ if cmd.lower() in greetings:
+ cmd = "moshimoshi"
+ cmd = cmd.replace('-', '_')
+ if globals().has_key('anon_handle_' + cmd):
+ if not options.force_auth:
+ options.noauth = True
+ cmd = 'anon_handle_' + cmd
+ elif globals().has_key('handle_' + cmd):
+ cmd = 'handle_' + cmd
+ else:
+ list_commands()
+ parser.error('Unknown command: %s' % args[0])
+ assert False
+ # load local config
+ defaults = {
+ 'server' : 'http://localhost/kojihub',
+ 'weburl' : 'http://localhost/koji',
+ 'topurl' : None,
+ 'pkgurl' : None,
+ 'topdir' : '/mnt/koji',
+ 'max_retries' : None,
+ 'retry_interval': None,
+ 'anon_retry' : None,
+ 'offline_retry' : None,
+ 'offline_retry_interval' : None,
+ 'keepalive' : True,
+ 'timeout' : None,
+ 'use_fast_upload': False,
+ 'poll_interval': 5,
+ 'krbservice': 'host',
+ 'cert': '~/.koji/client.crt',
+ 'ca': '~/.koji/clientca.crt',
+ 'serverca': '~/.koji/serverca.crt',
+ 'authtype': None
+ }
+ #note: later config files override earlier ones
+ configs = koji.config_directory_contents('/etc/koji.conf.d')
+ if os.access('/etc/koji.conf', os.F_OK):
+ configs.append('/etc/koji.conf')
+ if options.configFile:
+ fn = os.path.expanduser(options.configFile)
+ if os.path.isdir(fn):
+ contents = koji.config_directory_contents(fn)
+ if not contents:
+ parser.error("No config files found in directory: %s" % fn)
+ configs.extend(contents)
+ else:
+ if not os.access(fn, os.F_OK):
+ parser.error("No such file: %s" % fn)
+ configs.append(fn)
+ else:
+ user_config_dir = os.path.expanduser("~/.koji/config.d")
+ configs.extend(koji.config_directory_contents(user_config_dir))
+ fn = os.path.expanduser("~/.koji/config")
+ if os.access(fn, os.F_OK):
+ configs.append(fn)
+ got_conf = False
+ for configFile in configs:
+ f = open(configFile)
+ config = ConfigParser.ConfigParser()
+ config.readfp(f)
+ f.close()
+ if config.has_section(options.profile):
+ got_conf = True
+ for name, value in config.items(options.profile):
+ #note the defaults dictionary also serves to indicate which
+ #options *can* be set via the config file. Such options should
+ #not have a default value set in the option parser.
+ if defaults.has_key(name):
+ if name in ('anon_retry', 'offline_retry', 'keepalive', 'use_fast_upload'):
+ defaults[name] = config.getboolean(options.profile, name)
+ elif name in ('max_retries', 'retry_interval',
+ 'offline_retry_interval', 'poll_interval', 'timeout'):
+ try:
+ defaults[name] = int(value)
+ except ValueError:
+ parser.error("value for %s config option must be a valid integer" % name)
+ assert False
+ else:
+ defaults[name] = value
+ if configs and not got_conf:
+ warn("Warning: no configuration for profile name: %s" % options.profile)
+ for name, value in defaults.iteritems():
+ if getattr(options, name, None) is None:
+ setattr(options, name, value)
+ dir_opts = ('topdir', 'cert', 'ca', 'serverca')
+ for name in dir_opts:
+ # expand paths here, so we don't have to worry about it later
+ value = os.path.expanduser(getattr(options, name))
+ setattr(options, name, value)
+
+ #honor topdir
+ if options.topdir:
+ koji.BASEDIR = options.topdir
+ koji.pathinfo.topdir = options.topdir
+
+ #pkgurl is obsolete
+ if options.pkgurl:
+ if options.topurl:
+ warn("Warning: the pkgurl option is obsolete")
+ else:
+ suggest = re.sub(r'/packages/?$', '', options.pkgurl)
+ if suggest != options.pkgurl:
+ warn("Warning: the pkgurl option is obsolete, using topurl=%r"
+ % suggest)
+ options.topurl = suggest
+ else:
+ warn("Warning: The pkgurl option is obsolete, please use topurl instead")
+
+ return options, cmd, args[1:]
+
+def ensure_connection(session):
+ try:
+ ret = session.getAPIVersion()
+ except xmlrpclib.ProtocolError:
+ error(_("Error: Unable to connect to server"))
+ if ret != koji.API_VERSION:
+ warn(_("WARNING: The server is at API version %d and the client is at %d" % (ret, koji.API_VERSION)))
+
+def print_task_headers():
+ """Print the column headers"""
+ print "ID Pri Owner State Arch Name"
+
+def print_task(task,depth=0):
+ """Print a task"""
+ task = task.copy()
+ task['state'] = koji.TASK_STATES.get(task['state'],'BADSTATE')
+ fmt = "%(id)-8s %(priority)-4s %(owner_name)-20s %(state)-8s %(arch)-10s "
+ if depth:
+ indent = " "*(depth-1) + " +"
+ else:
+ indent = ''
+ label = koji.taskLabel(task)
+ print ''.join([fmt % task, indent, label])
+
+def print_task_recurse(task,depth=0):
+ """Print a task and its children"""
+ print_task(task,depth)
+ for child in task.get('children',()):
+ print_task_recurse(child,depth+1)
+
+
+class TaskWatcher(object):
+
+ def __init__(self,task_id,session,level=0,quiet=False):
+ self.id = task_id
+ self.session = session
+ self.info = None
+ self.level = level
+ self.quiet = quiet
+
+ #XXX - a bunch of this stuff needs to adapt to different tasks
+
+ def str(self):
+ if self.info:
+ label = koji.taskLabel(self.info)
+ return "%s%d %s" % (' ' * self.level, self.id, label)
+ else:
+ return "%s%d" % (' ' * self.level, self.id)
+
+ def __str__(self):
+ return self.str()
+
+ def get_failure(self):
+ """Print infomation about task completion"""
+ if self.info['state'] != koji.TASK_STATES['FAILED']:
+ return ''
+ error = None
+ try:
+ result = self.session.getTaskResult(self.id)
+ except (xmlrpclib.Fault,koji.GenericError),e:
+ error = e
+ if error is None:
+ # print "%s: complete" % self.str()
+ # We already reported this task as complete in update()
+ return ''
+ else:
+ return '%s: %s' % (error.__class__.__name__, str(error).strip())
+
+ def update(self):
+ """Update info and log if needed. Returns True on state change."""
+ if self.is_done():
+ # Already done, nothing else to report
+ return False
+ last = self.info
+ self.info = self.session.getTaskInfo(self.id, request=True)
+ if self.info is None:
+ if not self.quiet:
+ print "No such task id: %i" % self.id
+ sys.exit(1)
+ state = self.info['state']
+ if last:
+ #compare and note status changes
+ laststate = last['state']
+ if laststate != state:
+ if not self.quiet:
+ print "%s: %s -> %s" % (self.str(), self.display_state(last), self.display_state(self.info))
+ return True
+ return False
+ else:
+ # First time we're seeing this task, so just show the current state
+ if not self.quiet:
+ print "%s: %s" % (self.str(), self.display_state(self.info))
+ return False
+
+ def is_done(self):
+ if self.info is None:
+ return False
+ state = koji.TASK_STATES[self.info['state']]
+ return (state in ['CLOSED','CANCELED','FAILED'])
+
+ def is_success(self):
+ if self.info is None:
+ return False
+ state = koji.TASK_STATES[self.info['state']]
+ return (state == 'CLOSED')
+
+ def display_state(self, info):
+ # We can sometimes be passed a task that is not yet open, but
+ # not finished either. info would be none.
+ if not info:
+ return 'unknown'
+ if info['state'] == koji.TASK_STATES['OPEN']:
+ if info['host_id']:
+ host = self.session.getHost(info['host_id'])
+ return 'open (%s)' % host['name']
+ else:
+ return 'open'
+ elif info['state'] == koji.TASK_STATES['FAILED']:
+ return 'FAILED: %s' % self.get_failure()
+ else:
+ return koji.TASK_STATES[info['state']].lower()
+
+def display_tasklist_status(tasks):
+ free = 0
+ open = 0
+ failed = 0
+ done = 0
+ for task_id in tasks.keys():
+ status = tasks[task_id].info['state']
+ if status == koji.TASK_STATES['FAILED']:
+ failed += 1
+ elif status == koji.TASK_STATES['CLOSED'] or status == koji.TASK_STATES['CANCELED']:
+ done += 1
+ elif status == koji.TASK_STATES['OPEN'] or status == koji.TASK_STATES['ASSIGNED']:
+ open += 1
+ elif status == koji.TASK_STATES['FREE']:
+ free += 1
+ print " %d free %d open %d done %d failed" % (free, open, done, failed)
+
+def display_task_results(tasks):
+ for task in [task for task in tasks.values() if task.level == 0]:
+ state = task.info['state']
+ task_label = task.str()
+
+ if state == koji.TASK_STATES['CLOSED']:
+ print '%s completed successfully' % task_label
+ elif state == koji.TASK_STATES['FAILED']:
+ print '%s failed' % task_label
+ elif state == koji.TASK_STATES['CANCELED']:
+ print '%s was canceled' % task_label
+ else:
+ # shouldn't happen
+ print '%s has not completed' % task_label
+
+def watch_tasks(session,tasklist,quiet=False):
+ global options
+ if not tasklist:
+ return
+ if not quiet:
+ print "Watching tasks (this may be safely interrupted)..."
+ sys.stdout.flush()
+ rv = 0
+ try:
+ tasks = {}
+ for task_id in tasklist:
+ tasks[task_id] = TaskWatcher(task_id,session,quiet=quiet)
+ while True:
+ all_done = True
+ for task_id,task in tasks.items():
+ changed = task.update()
+ if not task.is_done():
+ all_done = False
+ else:
+ if changed:
+ # task is done and state just changed
+ if not quiet:
+ display_tasklist_status(tasks)
+ if not task.is_success():
+ rv = 1
+ for child in session.getTaskChildren(task_id):
+ child_id = child['id']
+ if not child_id in tasks.keys():
+ tasks[child_id] = TaskWatcher(child_id, session, task.level + 1, quiet=quiet)
+ tasks[child_id].update()
+ # If we found new children, go through the list again,
+ # in case they have children also
+ all_done = False
+ if all_done:
+ if not quiet:
+ print
+ display_task_results(tasks)
+ break
+
+ sys.stdout.flush()
+ time.sleep(options.poll_interval)
+ except (KeyboardInterrupt):
+ if tasks and not quiet:
+ progname = os.path.basename(sys.argv[0]) or 'koji'
+ tlist = ['%s: %s' % (t.str(), t.display_state(t.info))
+ for t in tasks.values() if not t.is_done()]
+ print \
+"""Tasks still running. You can continue to watch with the '%s watch-task' command.
+Running Tasks:
+%s""" % (progname, '\n'.join(tlist))
+ rv = 1
+ return rv
+
+def watch_logs(session, tasklist, opts):
+ global options
+ print "Watching logs (this may be safely interrupted)..."
+ def _isDone(session, taskId):
+ info = session.getTaskInfo(taskId)
+ if info is None:
+ print "No such task id: %i" % taskId
+ sys.exit(1)
+ state = koji.TASK_STATES[info['state']]
+ return (state in ['CLOSED','CANCELED','FAILED'])
+
+ try:
+ offsets = {}
+ for task_id in tasklist:
+ offsets[task_id] = {}
+
+ lastlog = None
+ while True:
+ for task_id in tasklist[:]:
+ if _isDone(session, task_id):
+ tasklist.remove(task_id)
+
+ output = session.listTaskOutput(task_id)
+
+ if opts.log:
+ logs = [filename for filename in output if filename == opts.log]
+ else:
+ logs = [filename for filename in output if filename.endswith('.log')]
+
+ taskoffsets = offsets[task_id]
+ for log in logs:
+ contents = 'placeholder'
+ while contents:
+ if not taskoffsets.has_key(log):
+ taskoffsets[log] = 0
+
+ contents = session.downloadTaskOutput(task_id, log, taskoffsets[log], 16384)
+ taskoffsets[log] += len(contents)
+ if contents:
+ currlog = "%d:%s:" % (task_id, log)
+ if currlog != lastlog:
+ if lastlog:
+ sys.stdout.write("\n")
+ sys.stdout.write("==> %s <==\n" % currlog)
+ lastlog = currlog
+ sys.stdout.write(contents)
+
+ if not tasklist:
+ break
+
+ time.sleep(options.poll_interval)
+ except (KeyboardInterrupt):
+ pass
+
+def handle_add_group(options, session, args):
+ "[admin] Add a group to a tag"
+ usage = _("usage: %prog add-group <tag> <group>")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ (options, args) = parser.parse_args(args)
+ if len(args) != 2:
+ parser.error(_("Please specify a tag name and a group name"))
+ assert False
+ tag = args[0]
+ group = args[1]
+
+ activate_session(session)
+ if not session.hasPerm('admin'):
+ print "This action requires admin privileges"
+ return 1
+
+ dsttag = session.getTag(tag)
+ if not dsttag:
+ print "Unknown tag: %s" % tag
+ return 1
+
+ groups = dict([(p['name'], p['group_id']) for p in session.getTagGroups(tag, inherit=False)])
+ group_id = groups.get(group, None)
+ if group_id is not None:
+ print "Group %s already exists for tag %s" % (group, tag)
+ return 1
+
+ session.groupListAdd(tag, group)
+
+def handle_add_host(options, session, args):
+ "[admin] Add a host"
+ usage = _("usage: %prog add-host [options] hostname arch [arch2 ...]")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--krb-principal", help=_("set a non-default kerberos principal for the host"))
+ (options, args) = parser.parse_args(args)
+ if len(args) < 2:
+ parser.error(_("Please specify a hostname and at least one arch"))
+ assert False
+ host = args[0]
+ activate_session(session)
+ id = session.getHost(host)
+ if id:
+ print "%s is already in the database" % host
+ return 1
+ else:
+ kwargs = {}
+ if options.krb_principal is not None:
+ kwargs['krb_principal'] = options.krb_principal
+ id = session.addHost(host, args[1:], **kwargs)
+ if id:
+ print "%s added: id %d" % (host, id)
+
+def handle_edit_host(options, session, args):
+ "[admin] Edit a host"
+ usage = _("usage: %prog edit-host hostname ... [options]")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--arches", help=_("Space-separated list of supported architectures"))
+ parser.add_option("--capacity", type="float", help=_("Capacity of this host"))
+ parser.add_option("--description", metavar="DESC", help=_("Description of this host"))
+ parser.add_option("--comment", help=_("A brief comment about this host"))
+ (subopts, args) = parser.parse_args(args)
+ if not args:
+ parser.error(_("Please specify a hostname"))
+
+ activate_session(session)
+
+ vals = {}
+ for key, val in subopts.__dict__.items():
+ if val is not None:
+ vals[key] = val
+
+ session.multicall = True
+ for host in args:
+ session.getHost(host)
+ error = False
+ for host, [info] in zip(args, session.multiCall(strict=True)):
+ if not info:
+ print _("Host %s does not exist") % host
+ error = True
+
+ if error:
+ print _("No changes made, please correct the command line")
+ return 1
+
+ session.multicall = True
+ for host in args:
+ session.editHost(host, **vals)
+ for host, [result] in zip(args, session.multiCall(strict=True)):
+ if result:
+ print _("Edited %s") % host
+ else:
+ print _("No changes made to %s") % host
+
+def handle_add_host_to_channel(options, session, args):
+ "[admin] Add a host to a channel"
+ usage = _("usage: %prog add-host-to-channel [options] hostname channel")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--list", action="store_true", help=_("List possible channels"))
+ parser.add_option("--new", action="store_true", help=_("Create channel if needed"))
+ (options, args) = parser.parse_args(args)
+ if not options.list and len(args) != 2:
+ parser.error(_("Please specify a hostname and a channel"))
+ assert False
+ activate_session(session)
+ if options.list:
+ for channel in session.listChannels():
+ print channel['name']
+ return
+ channel = args[1]
+ if not options.new:
+ channelinfo = session.getChannel(channel)
+ if not channelinfo:
+ print "No such channel: %s" % channel
+ return 1
+ host = args[0]
+ hostinfo = session.getHost(host)
+ if not hostinfo:
+ print "No such host: %s" % host
+ return 1
+ kwargs = {}
+ if options.new:
+ kwargs['create'] = True
+ session.addHostToChannel(host, channel, **kwargs)
+
+def handle_remove_host_from_channel(options, session, args):
+ "[admin] Remove a host from a channel"
+ usage = _("usage: %prog remove-host-from-channel [options] hostname channel")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ (options, args) = parser.parse_args(args)
+ if len(args) != 2:
+ parser.error(_("Please specify a hostname and a channel"))
+ assert False
+ host = args[0]
+ activate_session(session)
+ hostinfo = session.getHost(host)
+ if not hostinfo:
+ print "No such host: %s" % host
+ return 1
+ hostchannels = [c['name'] for c in session.listChannels(hostinfo['id'])]
+
+ channel = args[1]
+ if channel not in hostchannels:
+ print "Host %s is not a member of channel %s" % (host, channel)
+ return 1
+
+ session.removeHostFromChannel(host, channel)
+
+def handle_remove_channel(options, session, args):
+ "[admin] Remove a channel entirely"
+ usage = _("usage: %prog remove-channel [options] channel")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--force", action="store_true", help=_("force removal, if possible"))
+ (options, args) = parser.parse_args(args)
+ if len(args) != 1:
+ parser.error(_("Incorrect number of arguments"))
+ assert False
+ activate_session(session)
+ cinfo = session.getChannel(args[0])
+ if not cinfo:
+ print "No such channel: %s" % args[0]
+ return 1
+ session.removeChannel(args[0], force=options.force)
+
+def handle_rename_channel(options, session, args):
+ "[admin] Rename a channel"
+ usage = _("usage: %prog rename-channel [options] old-name new-name")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ (options, args) = parser.parse_args(args)
+ if len(args) != 2:
+ parser.error(_("Incorrect number of arguments"))
+ assert False
+ activate_session(session)
+ cinfo = session.getChannel(args[0])
+ if not cinfo:
+ print "No such channel: %s" % args[0]
+ return 1
+ session.renameChannel(args[0], args[1])
+
+def handle_add_pkg(options, session, args):
+ "[admin] Add a package to the listing for tag"
+ usage = _("usage: %prog add-pkg [options] tag package [package2 ...]")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--force", action='store_true', help=_("Override blocks if necessary"))
+ parser.add_option("--owner", help=_("Specify owner"))
+ parser.add_option("--extra-arches", help=_("Specify extra arches"))
+ (options, args) = parser.parse_args(args)
+ if len(args) < 2:
+ parser.error(_("Please specify a tag and at least one package"))
+ assert False
+ if not options.owner:
+ parser.error(_("Please specify an owner for the package(s)"))
+ assert False
+ if not session.getUser(options.owner):
+ print "User %s does not exist" % options.owner
+ return 1
+ activate_session(session)
+ tag = args[0]
+ opts = {}
+ opts['force'] = options.force
+ opts['block'] = False
+ # check if list of packages exists for that tag already
+ dsttag=session.getTag(tag)
+ pkglist = dict([(p['package_name'], p['package_id']) for p in session.listPackages(tagID=dsttag['id'])])
+ ret = 0
+ for package in args[1:]:
+ package_id = pkglist.get(package, None)
+ if not package_id is None:
+ print "Package %s already exists in tag %s" % (package, tag)
+ ret = 1
+ if ret:
+ return ret
+ if options.extra_arches:
+ opts['extra_arches'] = ' '.join(options.extra_arches.replace(',',' ').split())
+ for package in args[1:]:
+ #really should implement multicall...
+ session.packageListAdd(tag,package,options.owner,**opts)
+
+def handle_block_pkg(options, session, args):
+ "[admin] Block a package in the listing for tag"
+ usage = _("usage: %prog block-pkg [options] tag package [package2 ...]")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ (options, args) = parser.parse_args(args)
+ if len(args) < 2:
+ parser.error(_("Please specify a tag and at least one package"))
+ assert False
+ activate_session(session)
+ tag = args[0]
+ # check if list of packages exists for that tag already
+ dsttag=session.getTag(tag)
+ pkglist = dict([(p['package_name'], p['package_id']) for p in session.listPackages(tagID=dsttag['id'], inherited=True)])
+ ret = 0
+ for package in args[1:]:
+ package_id = pkglist.get(package, None)
+ if package_id is None:
+ print "Package %s doesn't exist in tag %s" % (package, tag)
+ ret = 1
+ if ret:
+ return ret
+ for package in args[1:]:
+ #really should implement multicall...
+ session.packageListBlock(tag,package)
+
+def handle_remove_pkg(options, session, args):
+ "[admin] Remove a package from the listing for tag"
+ usage = _("usage: %prog remove-pkg [options] tag package [package2 ...]")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--force", action='store_true', help=_("Override blocks if necessary"))
+ (options, args) = parser.parse_args(args)
+ if len(args) < 2:
+ parser.error(_("Please specify a tag and at least one package"))
+ assert False
+ activate_session(session)
+ tag = args[0]
+ opts = {}
+ opts['force'] = options.force
+ # check if list of packages exists for that tag already
+ dsttag=session.getTag(tag)
+ pkglist = dict([(p['package_name'], p['package_id']) for p in session.listPackages(tagID=dsttag['id'])])
+ ret = 0
+ for package in args[1:]:
+ package_id = pkglist.get(package, None)
+ if package_id is None:
+ print "Package %s is not in tag %s" % (package, tag)
+ ret = 1
+ if ret:
+ return ret
+ for package in args[1:]:
+ #really should implement multicall...
+ session.packageListRemove(tag, package, **opts)
+
+def _unique_path(prefix):
+ """Create a unique path fragment by appending a path component
+ to prefix. The path component will consist of a string of letter and numbers
+ that is unlikely to be a duplicate, but is not guaranteed to be unique."""
+ # Use time() in the dirname to provide a little more information when
+ # browsing the filesystem.
+ # For some reason repr(time.time()) includes 4 or 5
+ # more digits of precision than str(time.time())
+ return '%s/%r.%s' % (prefix, time.time(),
+ ''.join([random.choice(string.ascii_letters) for i in range(8)]))
+
+def _format_size(size):
+ if (size / 1073741824 >= 1):
+ return "%0.2f GiB" % (size / 1073741824.0)
+ if (size / 1048576 >= 1):
+ return "%0.2f MiB" % (size / 1048576.0)
+ if (size / 1024 >=1):
+ return "%0.2f KiB" % (size / 1024.0)
+ return "%0.2f B" % (size)
+
+def _format_secs(t):
+ h = t / 3600
+ t = t % 3600
+ m = t / 60
+ s = t % 60
+ return "%02d:%02d:%02d" % (h, m, s)
+
+def _progress_callback(uploaded, total, piece, time, total_time):
+ percent_done = float(uploaded)/float(total)
+ percent_done_str = "%02d%%" % (percent_done * 100)
+ data_done = _format_size(uploaded)
+ elapsed = _format_secs(total_time)
+
+ speed = "- B/sec"
+ if (time):
+ if (uploaded != total):
+ speed = _format_size(float(piece)/float(time)) + "/sec"
+ else:
+ speed = _format_size(float(total)/float(total_time)) + "/sec"
+
+ # write formated string and flush
+ sys.stdout.write("[% -36s] % 4s % 8s % 10s % 14s\r" % ('='*(int(percent_done*36)), percent_done_str, elapsed, data_done, speed))
+ sys.stdout.flush()
+
+def _running_in_bg():
+ try:
+ if (not os.isatty(0)) or (os.getpgrp() != os.tcgetpgrp(0)):
+ return True
+ except OSError, e:
+ return True
+ return False
+
+def handle_build(options, session, args):
+ "Build a package from source"
+ usage = _("usage: %prog build [options] target <srpm path or scm url>")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--skip-tag", action="store_true",
+ help=_("Do not attempt to tag package"))
+ parser.add_option("--scratch", action="store_true",
+ help=_("Perform a scratch build"))
+ parser.add_option("--wait", action="store_true",
+ help=_("Wait on the build, even if running in the background"))
+ parser.add_option("--nowait", action="store_false", dest="wait",
+ help=_("Don't wait on build"))
+ parser.add_option("--quiet", action="store_true",
+ help=_("Do not print the task information"), default=options.quiet)
+ parser.add_option("--arch-override", help=_("Override build arches"))
+ parser.add_option("--repo-id", type="int", help=_("Use a specific repo"))
+ parser.add_option("--noprogress", action="store_true",
+ help=_("Do not display progress of the upload"))
+ parser.add_option("--background", action="store_true",
+ help=_("Run the build at a lower priority"))
+ (build_opts, args) = parser.parse_args(args)
+ if len(args) != 2:
+ parser.error(_("Exactly two arguments (a build target and a SCM URL or srpm file) are required"))
+ assert False
+ if build_opts.arch_override and not build_opts.scratch:
+ parser.error(_("--arch_override is only allowed for --scratch builds"))
+ activate_session(session)
+ target = args[0]
+ if target.lower() == "none" and build_opts.repo_id:
+ target = None
+ build_opts.skip_tag = True
+ else:
+ build_target = session.getBuildTarget(target)
+ if not build_target:
+ parser.error(_("Unknown build target: %s" % target))
+ dest_tag = session.getTag(build_target['dest_tag'])
+ if not dest_tag:
+ parser.error(_("Unknown destination tag: %s" % build_target['dest_tag_name']))
+ if dest_tag['locked'] and not build_opts.scratch:
+ parser.error(_("Destination tag %s is locked" % dest_tag['name']))
+ source = args[1]
+ opts = {}
+ if build_opts.arch_override:
+ opts['arch_override'] = ' '.join(build_opts.arch_override.replace(',',' ').split())
+ for key in ('skip_tag', 'scratch', 'repo_id'):
+ val = getattr(build_opts, key)
+ if val is not None:
+ opts[key] = val
+ priority = None
+ if build_opts.background:
+ #relative to koji.PRIO_DEFAULT
+ priority = 5
+ # try to check that source is an SRPM
+ if '://' not in source:
+ #treat source as an srpm and upload it
+ if not build_opts.quiet:
+ print "Uploading srpm: %s" % source
+ serverdir = _unique_path('cli-build')
+ if _running_in_bg() or build_opts.noprogress or build_opts.quiet:
+ callback = None
+ else:
+ callback = _progress_callback
+ session.uploadWrapper(source, serverdir, callback=callback)
+ print
+ source = "%s/%s" % (serverdir, os.path.basename(source))
+ task_id = session.build(source, target, opts, priority=priority)
+ if not build_opts.quiet:
+ print "Created task:", task_id
+ print "Task info: %s/taskinfo?taskID=%s" % (options.weburl, task_id)
+ if build_opts.wait or (build_opts.wait is None and not _running_in_bg()):
+ session.logout()
+ return watch_tasks(session, [task_id], quiet=build_opts.quiet)
+ else:
+ return
+
+def handle_chain_build(options, session, args):
+ # XXX - replace handle_build with this, once chain-building has gotten testing
+ "Build one or more packages from source"
+ usage = _("usage: %prog chain-build [options] target URL [URL2 [:] URL3 [:] URL4 ...]")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--nowait", action="store_true",
+ help=_("Don't wait on build"))
+ parser.add_option("--noprogress", action="store_true",
+ help=_("Do not display progress of the upload"))
+ parser.add_option("--background", action="store_true",
+ help=_("Run the build at a lower priority"))
+ (build_opts, args) = parser.parse_args(args)
+ if len(args) < 2:
+ parser.error(_("At least two arguments (a build target and a SCM URL) are required"))
+ assert False
+ activate_session(session)
+ target = args[0]
+ build_target = session.getBuildTarget(target)
+ if not build_target:
+ parser.error(_("Unknown build target: %s" % target))
+ dest_tag = session.getTag(build_target['dest_tag'], strict=True)
+ if dest_tag['locked']:
+ parser.error(_("Destination tag %s is locked" % dest_tag['name']))
+
+ # check that the destination tag is in the inheritance tree of the build tag
+ # otherwise there is no way that a chain-build can work
+ ancestors = session.getFullInheritance(build_target['build_tag'])
+ if dest_tag['id'] not in [build_target['build_tag']] + [ancestor['parent_id'] for ancestor in ancestors]:
+ print _("Packages in destination tag %(dest_tag_name)s are not inherited by build tag %(build_tag_name)s" % build_target)
+ print _("Target %s is not usable for a chain-build" % build_target['name'])
+ return 1
+
+ sources = args[1:]
+
+ src_list = []
+ build_level = []
+ #src_lists is a list of lists of sources to build.
+ # each list is block of builds ("build level") which must all be completed
+ # before the next block begins. Blocks are separated on the command line with ':'
+ for src in sources:
+ if src == ':':
+ if build_level:
+ src_list.append(build_level)
+ build_level = []
+ elif '://' in src:
+ # quick check that src might be a url
+ build_level.append(src)
+ elif '/' not in src and not src.endswith('.rpm') and len(src.split('-')) >= 3:
+ # quick check that it looks like a N-V-R
+ build_level.append(src)
+ else:
+ print _('"%s" is not a SCM URL or package N-V-R' % src)
+ return 1
+ if build_level:
+ src_list.append(build_level)
+
+ if len(src_list) < 2:
+ parser.error(_('You must specify at least one dependency between builds with : (colon)\nIf there are no dependencies, use the build command instead'))
+
+ priority = None
+ if build_opts.background:
+ #relative to koji.PRIO_DEFAULT
+ priority = 5
+
+ task_id = session.chainBuild(src_list, target, priority=priority)
+
+ print "Created task:", task_id
+ print "Task info: %s/taskinfo?taskID=%s" % (options.weburl, task_id)
+ if _running_in_bg() or build_opts.nowait:
+ return
+ else:
+ session.logout()
+ return watch_tasks(session,[task_id],quiet=options.quiet)
+
+def handle_maven_build(options, session, args):
+ "Build a Maven package from source"
+ usage = _("usage: %prog maven-build [options] target URL")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--patches", action="store", metavar="URL",
+ help=_("SCM URL of a directory containing patches to apply to the sources before building"))
+ parser.add_option("-G", "--goal", action="append",
+ dest="goals", metavar="GOAL", default=[],
+ help=_("Additional goal to run before \"deploy\""))
+ parser.add_option("-P", "--profile", action="append",
+ dest="profiles", metavar="PROFILE", default=[],
+ help=_("Enable a profile for the Maven build"))
+ parser.add_option("-D", "--property", action="append",
+ dest="properties", metavar="NAME=VALUE", default=[],
+ help=_("Pass a system property to the Maven build"))
+ parser.add_option("-E", "--env", action="append",
+ dest="envs", metavar="NAME=VALUE", default=[],
+ help=_("Set an environment variable"))
+ parser.add_option("-p", "--package", action="append",
+ dest="packages", metavar="PACKAGE", default=[],
+ help=_("Install an additional package into the buildroot"))
+ parser.add_option("-J", "--jvm-option", action="append",
+ dest="jvm_options", metavar="OPTION", default=[],
+ help=_("Pass a command-line option to the JVM"))
+ parser.add_option("-M", "--maven-option", action="append",
+ dest="maven_options", metavar="OPTION", default=[],
+ help=_("Pass a command-line option to Maven"))
+ parser.add_option("--ini", action="append",
+ dest="inis", metavar="CONFIG", default=[],
+ help=_("Pass build parameters via a .ini file"))
+ parser.add_option("-s", "--section",
+ help=_("Get build parameters from this section of the .ini"))
+ parser.add_option("--debug", action="store_true",
+ help=_("Run Maven build in debug mode"))
+ parser.add_option("--specfile", action="store", metavar="URL",
+ help=_("SCM URL of a spec file fragment to use to generate wrapper RPMs"))
+ parser.add_option("--skip-tag", action="store_true",
+ help=_("Do not attempt to tag package"))
+ parser.add_option("--scratch", action="store_true",
+ help=_("Perform a scratch build"))
+ parser.add_option("--nowait", action="store_true",
+ help=_("Don't wait on build"))
+ parser.add_option("--noprogress", action="store_true",
+ help=_("Do not display progress of the upload"))
+ parser.add_option("--background", action="store_true",
+ help=_("Run the build at a lower priority"))
+ (build_opts, args) = parser.parse_args(args)
+ if build_opts.inis:
+ if len(args)!= 1:
+ parser.error(_("Exactly one argument (a build target) is required"))
+ else:
+ if len(args) != 2:
+ parser.error(_("Exactly two arguments (a build target and a SCM URL) are required"))
+ activate_session(session)
+ target = args[0]
+ build_target = session.getBuildTarget(target)
+ if not build_target:
+ parser.error(_("Unknown build target: %s" % target))
+ dest_tag = session.getTag(build_target['dest_tag'])
+ if not dest_tag:
+ parser.error(_("Unknown destination tag: %s" % build_target['dest_tag_name']))
+ if dest_tag['locked'] and not build_opts.scratch:
+ parser.error(_("Destination tag %s is locked" % dest_tag['name']))
+ if build_opts.inis:
+ try:
+ params = koji.util.parse_maven_param(build_opts.inis, scratch=build_opts.scratch,
+ section=build_opts.section)
+ except ValueError, e:
+ parser.error(e.args[0])
+ opts = params.values()[0]
+ if opts.pop('type', 'maven') != 'maven':
+ parser.error(_("Section %s does not contain a maven-build config") % params.keys()[0])
+ source = opts.pop('scmurl')
+ else:
+ source = args[1]
+ if '://' not in source:
+ parser.error(_("Invalid SCM URL: %s" % source))
+ opts = koji.util.maven_opts(build_opts, scratch=build_opts.scratch)
+ if build_opts.debug:
+ opts.setdefault('maven_options', []).append('--debug')
+ if build_opts.skip_tag:
+ opts['skip_tag'] = True
+ priority = None
+ if build_opts.background:
+ #relative to koji.PRIO_DEFAULT
+ priority = 5
+ task_id = session.mavenBuild(source, target, opts, priority=priority)
+ print "Created task:", task_id
+ print "Task info: %s/taskinfo?taskID=%s" % (options.weburl, task_id)
+ if _running_in_bg() or build_opts.nowait:
+ return
+ else:
+ session.logout()
+ return watch_tasks(session,[task_id],quiet=options.quiet)
+
+def handle_wrapper_rpm(options, session, args):
+ """Build wrapper rpms for any archives associated with a build."""
+ usage = _("usage: %prog wrapper-rpm [options] target build-id|n-v-r URL")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--create-build", action="store_true", help=_("Create a new build to contain wrapper rpms"))
+ parser.add_option("--ini", action="append",
+ dest="inis", metavar="CONFIG", default=[],
+ help=_("Pass build parameters via a .ini file"))
+ parser.add_option("-s", "--section",
+ help=_("Get build parameters from this section of the .ini"))
+ parser.add_option("--skip-tag", action="store_true", help=_("If creating a new build, don't tag it"))
+ parser.add_option("--scratch", action="store_true", help=_("Perform a scratch build"))
+ parser.add_option("--nowait", action="store_true", help=_("Don't wait on build"))
+ parser.add_option("--background", action="store_true", help=_("Run the build at a lower priority"))
+
+ (build_opts, args) = parser.parse_args(args)
+ if build_opts.inis:
+ if len(args)!= 1:
+ parser.error(_("Exactly one argument (a build target) is required"))
+ else:
+ if len(args) < 3:
+ parser.error(_("You must provide a build target, a build ID or NVR, and a SCM URL to a specfile fragment"))
+ activate_session(session)
+
+ target = args[0]
+ if build_opts.inis:
+ try:
+ params = koji.util.parse_maven_param(build_opts.inis, scratch=build_opts.scratch,
+ section=build_opts.section)
+ except ValueError, e:
+ parser.error(e.args[0])
+ opts = params.values()[0]
+ if opts.get('type') != 'wrapper':
+ parser.error(_("Section %s does not contain a wrapper-rpm config") % params.keys()[0])
+ url = opts['scmurl']
+ package = opts['buildrequires'][0]
+ target_info = session.getBuildTarget(target, strict=True)
+ latest_builds = session.getLatestBuilds(target_info['dest_tag'], package=package)
+ if not latest_builds:
+ parser.error(_("No build of %s in %s") % (package, target_info['dest_tag_name']))
+ build_id = latest_builds[0]['nvr']
+ else:
+ build_id = args[1]
+ if build_id.isdigit():
+ build_id = int(build_id)
+ url = args[2]
+ priority = None
+ if build_opts.background:
+ priority = 5
+ opts = {}
+ if build_opts.create_build:
+ opts['create_build'] = True
+ if build_opts.skip_tag:
+ opts['skip_tag'] = True
+ if build_opts.scratch:
+ opts['scratch'] = True
+ task_id = session.wrapperRPM(build_id, url, target, priority, opts=opts)
+ print "Created task:", task_id
+ print "Task info: %s/taskinfo?taskID=%s" % (options.weburl, task_id)
+ if _running_in_bg() or build_opts.nowait:
+ return
+ else:
+ session.logout()
+ return watch_tasks(session,[task_id],quiet=options.quiet)
+
+def handle_maven_chain(options, session, args):
+ "Run a set of Maven builds in dependency order"
+ usage = _("usage: %prog maven-chain [options] target config...")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--skip-tag", action="store_true",
+ help=_("Do not attempt to tag builds"))
+ parser.add_option("--scratch", action="store_true",
+ help=_("Perform scratch builds"))
+ parser.add_option("--debug", action="store_true",
+ help=_("Run Maven build in debug mode"))
+ parser.add_option("--force", action="store_true",
+ help=_("Force rebuilds of all packages"))
+ parser.add_option("--nowait", action="store_true",
+ help=_("Don't wait on build"))
+ parser.add_option("--background", action="store_true",
+ help=_("Run the build at a lower priority"))
+ (build_opts, args) = parser.parse_args(args)
+ if len(args) < 2:
+ parser.error(_("Two arguments (a build target and a config file) are required"))
+ assert False
+ activate_session(session)
+ target = args[0]
+ build_target = session.getBuildTarget(target)
+ if not build_target:
+ parser.error(_("Unknown build target: %s") % target)
+ dest_tag = session.getTag(build_target['dest_tag'])
+ if not dest_tag:
+ parser.error(_("Unknown destination tag: %s") % build_target['dest_tag_name'])
+ if dest_tag['locked'] and not build_opts.scratch:
+ parser.error(_("Destination tag %s is locked") % dest_tag['name'])
+ opts = {}
+ for key in ('skip_tag', 'scratch', 'debug', 'force'):
+ val = getattr(build_opts, key)
+ if val:
+ opts[key] = val
+ try:
+ builds = koji.util.parse_maven_chain(args[1:], scratch=opts.get('scratch'))
+ except ValueError, e:
+ parser.error(e.args[0])
+ priority = None
+ if build_opts.background:
+ priority = 5
+ task_id = session.chainMaven(builds, target, opts, priority=priority)
+ print "Created task:", task_id
+ print "Task info: %s/taskinfo?taskID=%s" % (options.weburl, task_id)
+ if _running_in_bg() or build_opts.nowait:
+ return
+ else:
+ session.logout()
+ return watch_tasks(session, [task_id], quiet=options.quiet)
+
+def handle_resubmit(options, session, args):
+ """Retry a canceled or failed task, using the same parameter as the original task."""
+ usage = _("usage: %prog resubmit [options] taskID")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--nowait", action="store_true", help=_("Don't wait on task"))
+ parser.add_option("--nowatch", action="store_true", dest="nowait",
+ help=_("An alias for --nowait"))
+ parser.add_option("--quiet", action="store_true", help=_("Do not print the task information"), default=options.quiet)
+ (options, args) = parser.parse_args(args)
+ if len(args) != 1:
+ parser.error(_("Please specify a single task ID"))
+ assert False
+ activate_session(session)
+ taskID = int(args[0])
+ if not options.quiet:
+ print "Resubmitting the following task:"
+ _printTaskInfo(session, taskID, 0, False, True)
+ newID = session.resubmitTask(taskID)
+ if not options.quiet:
+ print "Resubmitted task %s as new task %s" % (taskID, newID)
+ if _running_in_bg() or options.nowait:
+ return
+ else:
+ session.logout()
+ return watch_tasks(session, [newID], quiet=options.quiet)
+
+def handle_call(options, session, args):
+ "[admin] Execute an arbitrary XML-RPC call"
+ usage = _("usage: %prog call [options] name [arg...]")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--python", action="store_true", help=_("Use python syntax for values"))
+ parser.add_option("--kwargs", help=_("Specify keyword arguments as a dictionary (implies --python)"))
+ (options, args) = parser.parse_args(args)
+ if len(args) < 1:
+ parser.error(_("Please specify the name of the XML-RPC method"))
+ assert False
+ if options.kwargs:
+ options.python = True
+ if options.python and ast is None:
+ parser.error(_("The ast module is required to read python syntax"))
+ activate_session(session)
+ name = args[0]
+ non_kw = []
+ kw = {}
+ if options.python:
+ non_kw = [ast.literal_eval(a) for a in args[1:]]
+ if options.kwargs:
+ kw = ast.literal_eval(options.kwargs)
+ else:
+ for arg in args[1:]:
+ if arg.find('=') != -1:
+ key, value = arg.split('=', 1)
+ kw[key] = arg_filter(value)
+ else:
+ non_kw.append(arg_filter(arg))
+ pprint.pprint(getattr(session, name).__call__(*non_kw, **kw))
+
+def anon_handle_mock_config(options, session, args):
+ "Create a mock config"
+ usage = _("usage: %prog mock-config [options]")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("-a", "--arch", help=_("Specify the arch"))
+ parser.add_option("-n", "--name", help=_("Specify the name for the buildroot"))
+ parser.add_option("--tag", help=_("Create a mock config for a tag"))
+ parser.add_option("--target", help=_("Create a mock config for a build target"))
+ parser.add_option("--task", help=_("Duplicate the mock config of a previous task"))
+ parser.add_option("--latest", action="store_true", help=_("use the latest redirect url"))
+ parser.add_option("--buildroot", help=_("Duplicate the mock config for the specified buildroot id"))
+ parser.add_option("--mockdir", default="/var/lib/mock", metavar="DIR",
+ help=_("Specify mockdir"))
+ parser.add_option("--topdir", metavar="DIR",
+ help=_("Specify topdir"))
+ parser.add_option("--topurl", metavar="URL", default=options.topurl,
+ help=_("URL under which Koji files are accessible"))
+ parser.add_option("--distribution", default="Koji Testing",
+ help=_("Change the distribution macro"))
+ parser.add_option("--yum-proxy", help=_("Specify a yum proxy"))
+ parser.add_option("-o", metavar="FILE", dest="ofile", help=_("Output to a file"))
+ (options, args) = parser.parse_args(args)
+ activate_session(session)
+ if args:
+ #for historical reasons, we also accept buildroot name as first arg
+ if not options.name:
+ options.name = args[0]
+ else:
+ parser.error(_("Name already specified via option"))
+ arch = None
+ opts = {}
+ for k in ('topdir', 'topurl', 'distribution', 'mockdir', 'yum_proxy'):
+ if hasattr(options, k):
+ opts[k] = getattr(options, k)
+ if options.buildroot:
+ try:
+ br_id = int(options.buildroot)
+ except ValueError:
+ parser.error(_("Buildroot id must be an integer"))
+ brootinfo = session.getBuildroot(br_id)
+ if options.latest:
+ opts['repoid'] = 'latest'
+ else:
+ opts['repoid'] = brootinfo['repo_id']
+ opts['tag_name'] = brootinfo['tag_name']
+ arch = brootinfo['arch']
+ elif options.task:
+ try:
+ task_id = int(options.task)
+ except ValueError:
+ parser.error(_("Task id must be an integer"))
+ broots = session.listBuildroots(taskID=task_id)
+ if not broots:
+ print _("No buildroots for task %s (or no such task)") % options.task
+ return 1
+ if len(broots) > 1:
+ print _("Multiple buildroots found: %s" % [br['id'] for br in broots])
+ brootinfo = broots[-1]
+ if options.latest:
+ opts['repoid'] = 'latest'
+ else:
+ opts['repoid'] = brootinfo['repo_id']
+ opts['tag_name'] = brootinfo['tag_name']
+ arch = brootinfo['arch']
+ def_name = "%s-task_%i" % (opts['tag_name'], task_id)
+ elif options.tag:
+ if not options.arch:
+ print _("Please specify an arch")
+ return 1
+ tag = session.getTag(options.tag)
+ if not tag:
+ parser.error(_("Invalid tag: %s" % options.tag))
+ arch = options.arch
+ config = session.getBuildConfig(tag['id'])
+ if not config:
+ print _("Could not get config info for tag: %(name)s") % tag
+ return 1
+ opts['tag_name'] = tag['name']
+ if options.latest:
+ opts['repoid'] = 'latest'
+ else:
+ repo = session.getRepo(config['id'])
+ if not repo:
+ print _("Could not get a repo for tag: %(name)s") % tag
+ return 1
+ opts['repoid'] = repo['id']
+ def_name = "%(tag_name)s-repo_%(repoid)s" % opts
+ elif options.target:
+ if not options.arch:
+ print _("Please specify an arch")
+ return 1
+ target = session.getBuildTarget(options.target)
+ if not target:
+ parser.error(_("Invalid target: %s" % options.target))
+ opts['tag_name'] = target['build_tag_name']
+ if options.latest:
+ opts['repoid'] = 'latest'
+ else:
+ repo = session.getRepo(target['build_tag'])
+ if not repo:
+ print _("Could not get a repo for tag: %(name)s") % opts['tag_name']
+ return 1
+ opts['repoid'] = repo['id']
+ else:
+ parser.error(_("Please specify one of: --tag, --target, --task, --buildroot"))
+ assert False
+ if options.name:
+ name = options.name
+ else:
+ name = "%(tag_name)s-repo_%(repoid)s" % opts
+ output = koji.genMockConfig(name, arch, **opts)
+ if options.ofile:
+ fo = file(options.ofile, 'w')
+ fo.write(output)
+ fo.close()
+ else:
+ print output
+
+def handle_disable_host(options, session, args):
+ "[admin] Mark one or more hosts as disabled"
+ usage = _("usage: %prog disable-host [options] hostname ...")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--comment", help=_("Comment indicating why the host(s) are being disabled"))
+ (options, args) = parser.parse_args(args)
+
+ activate_session(session)
+ session.multicall = True
+ for host in args:
+ session.getHost(host)
+ error = False
+ for host, [id] in zip(args, session.multiCall(strict=True)):
+ if not id:
+ print "Host %s does not exist" % host
+ error = True
+ if error:
+ print "No changes made. Please correct the command line."
+ return 1
+ session.multicall = True
+ for host in args:
+ session.disableHost(host)
+ if options.comment is not None:
+ session.editHost(host, comment=options.comment)
+ session.multiCall(strict=True)
+
+def handle_enable_host(options, session, args):
+ "[admin] Mark one or more hosts as enabled"
+ usage = _("usage: %prog enable-host [options] hostname ...")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--comment", help=_("Comment indicating why the host(s) are being enabled"))
+ (options, args) = parser.parse_args(args)
+
+ activate_session(session)
+ session.multicall = True
+ for host in args:
+ session.getHost(host)
+ error = False
+ for host, [id] in zip(args, session.multiCall(strict=True)):
+ if not id:
+ print "Host %s does not exist" % host
+ error = True
+ if error:
+ print "No changes made. Please correct the command line."
+ return 1
+ session.multicall = True
+ for host in args:
+ session.enableHost(host)
+ if options.comment is not None:
+ session.editHost(host, comment=options.comment)
+ session.multiCall(strict=True)
+
+
+def handle_restart_hosts(options, session, args):
+ "[admin] Restart enabled hosts"
+ usage = _("usage: %prog restart-hosts [options]")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--wait", action="store_true",
+ help=_("Wait on the task, even if running in the background"))
+ parser.add_option("--nowait", action="store_false", dest="wait",
+ help=_("Don't wait on task"))
+ parser.add_option("--quiet", action="store_true",
+ help=_("Do not print the task information"), default=options.quiet)
+ (my_opts, args) = parser.parse_args(args)
+
+ activate_session(session)
+ task_id = session.restartHosts()
+ if my_opts.wait or (my_opts.wait is None and not _running_in_bg()):
+ session.logout()
+ return watch_tasks(session, [task_id], quiet=my_opts.quiet)
+ else:
+ return
+
+
+def linked_upload(localfile, path, name=None):
+ """Link a file into the (locally writable) workdir, bypassing upload"""
+ old_umask = os.umask(002)
+ try:
+ if name is None:
+ name = os.path.basename(localfile)
+ dest_dir = os.path.join(koji.pathinfo.work(), path)
+ dst = os.path.join(dest_dir, name)
+ koji.ensuredir(dest_dir)
+ # fix uid/gid to keep httpd happy
+ st = os.stat(koji.pathinfo.work())
+ os.chown(dest_dir, st.st_uid, st.st_gid)
+ print "Linking rpm to: %s" % dst
+ os.link(localfile, dst)
+ finally:
+ os.umask(old_umask)
+
+
+def handle_import(options, session, args):
+ "[admin] Import externally built RPMs into the database"
+ usage = _("usage: %prog import [options] package [package...]")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--link", action="store_true", help=_("Attempt to hardlink instead of uploading"))
+ parser.add_option("--test", action="store_true", help=_("Don't actually import"))
+ parser.add_option("--create-build", action="store_true", help=_("Auto-create builds as needed"))
+ parser.add_option("--src-epoch", help=_("When auto-creating builds, use this epoch"))
+ (options, args) = parser.parse_args(args)
+ if len(args) < 1:
+ parser.error(_("At least one package must be specified"))
+ assert False
+ if options.src_epoch in ('None', 'none', '(none)'):
+ options.src_epoch = None
+ elif options.src_epoch:
+ try:
+ options.src_epoch = int(options.src_epoch)
+ except (ValueError, TypeError):
+ parser.error(_("Invalid value for epoch: %s") % options.src_epoch)
+ assert False
+ activate_session(session)
+ to_import = {}
+ for path in args:
+ data = koji.get_header_fields(path, ('name','version','release','epoch',
+ 'arch','sigmd5','sourcepackage','sourcerpm'))
+ if data['sourcepackage']:
+ data['arch'] = 'src'
+ nvr = "%(name)s-%(version)s-%(release)s" % data
+ else:
+ nvr = "%(name)s-%(version)s-%(release)s" % koji.parse_NVRA(data['sourcerpm'])
+ to_import.setdefault(nvr,[]).append((path,data))
+ builds_missing = False
+ nvrs = to_import.keys()
+ nvrs.sort()
+ for nvr in nvrs:
+ to_import[nvr].sort()
+ for path, data in to_import[nvr]:
+ if data['sourcepackage']:
+ break
+ else:
+ #no srpm included, check for build
+ binfo = session.getBuild(nvr)
+ if not binfo:
+ print _("Missing build or srpm: %s") % nvr
+ builds_missing = True
+ if builds_missing and not options.create_build:
+ print _("Aborting import")
+ return
+
+ #local function to help us out below
+ def do_import(path, data):
+ rinfo = dict([(k,data[k]) for k in ('name','version','release','arch')])
+ prev = session.getRPM(rinfo)
+ if prev and not prev.get('external_repo_id', 0):
+ if prev['payloadhash'] == koji.hex_string(data['sigmd5']):
+ print _("RPM already imported: %s") % path
+ else:
+ print _("WARNING: md5sum mismatch for %s") % path
+ print _("Skipping import")
+ return
+ if options.test:
+ print _("Test mode -- skipping import for %s") % path
+ return
+ serverdir = _unique_path('cli-import')
+ if options.link:
+ linked_upload(path, serverdir)
+ else:
+ print _("uploading %s...") % path,
+ sys.stdout.flush()
+ session.uploadWrapper(path, serverdir)
+ print _("done")
+ sys.stdout.flush()
+ print _("importing %s...") % path,
+ sys.stdout.flush()
+ try:
+ session.importRPM(serverdir, os.path.basename(path))
+ except koji.GenericError, e:
+ print _("\nError importing: %s" % str(e).splitlines()[-1])
+ sys.stdout.flush()
+ else:
+ print _("done")
+ sys.stdout.flush()
+
+ for nvr in nvrs:
+ got_build = False
+ #srpms first, if any
+ for path, data in to_import[nvr]:
+ if data['sourcepackage']:
+ do_import(path, data)
+ got_build = True
+ for path, data in to_import[nvr]:
+ if data['sourcepackage']:
+ continue
+ if not got_build:
+ binfo = session.getBuild(nvr)
+ if binfo:
+ got_build = True
+ elif options.create_build:
+ binfo = koji.parse_NVR(nvr)
+ if options.src_epoch:
+ binfo['epoch'] = options.src_epoch
+ else:
+ binfo['epoch'] = data['epoch']
+ if options.test:
+ print _("Test mode -- would have created empty build: %s") % nvr
+ got_build = True #avoid duplicate notices
+ else:
+ print _("Creating empty build: %s") % nvr
+ session.createEmptyBuild(**binfo)
+ else:
+ #shouldn't happen
+ print _("Build missing: %s") % nvr
+ break
+ do_import(path, data)
+
+
+def handle_import_comps(options, session, args):
+ "Import group/package information from a comps file"
+ usage = _("usage: %prog import-comps [options] <file> <tag>")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--force", action="store_true", help=_("force import"))
+ (local_options, args) = parser.parse_args(args)
+ if len(args) != 2:
+ parser.error(_("Incorrect number of arguments"))
+ assert False
+ comps = yum.comps.Comps()
+ comps.add(args[0])
+ tag = args[1]
+ force = local_options.force
+ activate_session(session)
+ for group in comps.groups:
+ print "Group: %(groupid)s (%(name)s)" % vars(group)
+ session.groupListAdd(tag, group.groupid, force=force, display_name=group.name,
+ is_default=bool(group.default),
+ uservisible=bool(group.user_visible),
+ description=group.description,
+ langonly=group.langonly)
+ #yum.comps does not support the biarchonly field
+ for ptype, pdata in [('mandatory', group.mandatory_packages),
+ ('default', group.default_packages),
+ ('optional', group.optional_packages),
+ ('conditional', group.conditional_packages)]:
+ for pkg in pdata:
+ pkgopts = {'type' : ptype}
+ if ptype == 'conditional':
+ pkgopts['requires'] = pdata[pkg]
+ #yum.comps does not support basearchonly
+ print " Package: %s: %r" % (pkg, pkgopts)
+ session.groupPackageListAdd(tag, group.groupid, pkg, force=force, **pkgopts)
+ #yum.comps does not support group dependencies
+ #yum.comps does not support metapkgs
+
+def handle_import_sig(options, session, args):
+ "[admin] Import signatures into the database"
+ usage = _("usage: %prog import-sig [options] package [package...]")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--with-unsigned", action="store_true",
+ help=_("Also import unsigned sig headers"))
+ parser.add_option("--test", action="store_true",
+ help=_("Test mode -- don't actually import"))
+ (options, args) = parser.parse_args(args)
+ if len(args) < 1:
+ parser.error(_("At least one package must be specified"))
+ assert False
+ for path in args:
+ if not os.path.exists(path):
+ parser.error(_("No such file: %s") % path)
+ activate_session(session)
+ for path in args:
+ data = koji.get_header_fields(path, ('name','version','release','arch','siggpg','sigpgp','sourcepackage'))
+ if data['sourcepackage']:
+ data['arch'] = 'src'
+ sigkey = data['siggpg']
+ if not sigkey:
+ sigkey = data['sigpgp']
+ if not sigkey:
+ sigkey = ""
+ if not options.with_unsigned:
+ print _("Skipping unsigned package: %s" % path)
+ continue
+ else:
+ sigkey = koji.get_sigpacket_key_id(sigkey)
+ del data['siggpg']
+ del data['sigpgp']
+ rinfo = session.getRPM(data)
+ if not rinfo:
+ print "No such rpm in system: %(name)s-%(version)s-%(release)s.%(arch)s" % data
+ continue
+ if rinfo.get('external_repo_id'):
+ print "Skipping external rpm: %(name)s-%(version)s-%(release)s.%(arch)s@%(external_repo_name)s" % rinfo
+ continue
+ sighdr = koji.rip_rpm_sighdr(path)
+ previous = session.queryRPMSigs(rpm_id=rinfo['id'], sigkey=sigkey)
+ assert len(previous) <= 1
+ if previous:
+ sighash = md5_constructor(sighdr).hexdigest()
+ if previous[0]['sighash'] == sighash:
+ print _("Signature already imported: %s") % path
+ continue
+ else:
+ print _("Warning: signature mismatch: %s") % path
+ continue
+ print _("Importing signature [key %s] from %s...") % (sigkey, path)
+ if not options.test:
+ session.addRPMSig(rinfo['id'], base64.encodestring(sighdr))
+
+def handle_write_signed_rpm(options, session, args):
+ "[admin] Write signed RPMs to disk"
+ usage = _("usage: %prog write-signed-rpm [options] <signature-key> n-v-r [n-v-r...]")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--all", action="store_true", help=_("Write out all RPMs signed with this key"))
+ parser.add_option("--buildid", help=_("Specify a build id rather than an n-v-r"))
+ (options, args) = parser.parse_args(args)
+ if len(args) < 1:
+ parser.error(_("A signature key must be specified"))
+ assert False
+ if len(args) < 2 and not (options.all or options.buildid):
+ parser.error(_("At least one RPM must be specified"))
+ assert False
+ key = args.pop(0)
+ activate_session(session)
+ if options.all:
+ rpms = session.queryRPMSigs(sigkey=key)
+ count = 1
+ for rpm in rpms:
+ print "%d/%d" % (count, len(rpms))
+ count += 1
+ session.writeSignedRPM(rpm['rpm_id'], key)
+ elif options.buildid:
+ rpms = session.listRPMs(int(options.buildid))
+ for rpm in rpms:
+ session.writeSignedRPM(rpm['id'], key)
+ else:
+ for nvr in args:
+ build = session.getBuild(nvr)
+ rpms = session.listRPMs(buildID=build['id'])
+ for rpm in rpms:
+ session.writeSignedRPM(rpm['id'], key)
+
+def handle_prune_signed_copies(options, session, args):
+ "[admin] Prune signed copies"
+ usage = _("usage: %prog prune-sigs [options]")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("-n", "--test", action="store_true", help=_("Test mode"))
+ parser.add_option("-v", "--verbose", action="store_true", help=_("Be more verbose"))
+ parser.add_option("--days", type="int", default=5, help=_("Timeout before clearing"))
+ parser.add_option("-p", "--package", "--pkg", help=_("Limit to a single package"))
+ parser.add_option("-b", "--build", help=_("Limit to a single build"))
+ parser.add_option("-i", "--ignore-tag", action="append", default=[],
+ help=_("Ignore these tags when considering whether a build is/was latest"))
+ parser.add_option("--ignore-tag-file",
+ help=_("File to read tag ignore patterns from"))
+ parser.add_option("-r", "--protect-tag", action="append", default=[],
+ help=_("Do not prune signed copies from matching tags"))
+ parser.add_option("--protect-tag-file",
+ help=_("File to read tag protect patterns from"))
+ parser.add_option("--trashcan-tag", default="trashcan", help=_("Specify trashcan tag"))
+ parser.add_option("--debug", action="store_true", help=_("Show debugging output"))
+ (options, args) = parser.parse_args(args)
+ # different ideas/modes
+ # 1) remove all signed copies of builds that are not latest for some tag
+ # 2) remove signed copies when a 'better' signature is available
+ # 3) for a specified tag, remove all signed copies that are not latest (w/ inheritance)
+ # 4) for a specified tag, remove all signed copies (no inheritance)
+ # (but skip builds that are multiply tagged)
+
+ #for now, we're just implementing mode #1
+ #(with the modification that we check to see if the build was latest within
+ #the last N days)
+ if options.ignore_tag_file:
+ fo = file(options.ignore_tag_file)
+ options.ignore_tag.extend([line.strip() for line in fo.readlines()])
+ fo.close()
+ if options.protect_tag_file:
+ fo = file(options.protect_tag_file)
+ options.protect_tag.extend([line.strip() for line in fo.readlines()])
+ fo.close()
+ if options.debug:
+ options.verbose = True
+ cutoff_ts = time.time() - options.days * 24 * 3600
+ if options.debug:
+ print "Cutoff date: %s" % time.asctime(time.localtime(cutoff_ts))
+ if not options.build:
+ if options.verbose:
+ print "Getting builds..."
+ qopts = {'state' : koji.BUILD_STATES['COMPLETE']}
+ if options.package:
+ pkginfo = session.getPackage(options.package)
+ qopts['packageID'] = pkginfo['id']
+ builds = [(b['nvr'], b) for b in session.listBuilds(**qopts)]
+ if options.verbose:
+ print "...got %i builds" % len(builds)
+ builds.sort()
+ else:
+ #single build
+ binfo = session.getBuild(options.build)
+ if not binfo:
+ parser.error('No such build: %s' % options.build)
+ assert False
+ builds = [("%(name)s-%(version)s-%(release)s" % binfo, binfo)]
+ total_files = 0
+ total_space = 0
+ def _histline(event_id, x):
+ if event_id == x['revoke_event']:
+ ts = x['revoke_ts']
+ fmt = "Untagged %(name)s-%(version)s-%(release)s from %(tag_name)s"
+ elif event_id == x['create_event']:
+ ts = x['create_ts']
+ fmt = "Tagged %(name)s-%(version)s-%(release)s with %(tag_name)s"
+ if x['active']:
+ fmt += " [still active]"
+ else:
+ raise koji.GenericError, "unknown event: (%r, %r)" % (event_id, x)
+ time_str = time.asctime(time.localtime(ts))
+ return "%s: %s" % (time_str, fmt % x)
+ for nvr, binfo in builds:
+ #listBuilds returns slightly different data than normal
+ if not binfo.has_key('id'):
+ binfo['id'] = binfo['build_id']
+ if not binfo.has_key('name'):
+ binfo['name'] = binfo['package_name']
+ if options.debug:
+ print "DEBUG: %s" % nvr
+ #see how recently this build was latest for a tag
+ is_latest = False
+ is_protected = False
+ last_latest = None
+ tags = {}
+ for entry in session.tagHistory(build=binfo['id']):
+ #we used tagHistory rather than listTags so we can consider tags
+ #that the build was recently untagged from
+ tags.setdefault(entry['tag_name'], 1)
+ if options.debug:
+ print "Tags: %s" % tags.keys()
+ for tag_name in tags:
+ if tag_name == options.trashcan_tag:
+ if options.debug:
+ print "Ignoring trashcan tag for build %s" % nvr
+ continue
+ ignore_tag = False
+ for pattern in options.ignore_tag:
+ if fnmatch.fnmatch(tag_name, pattern):
+ if options.debug:
+ print "Ignoring tag %s for build %s" % (tag_name, nvr)
+ ignore_tag = True
+ break
+ if ignore_tag:
+ continue
+ #in order to determine how recently this build was latest, we have
+ #to look at the tagging history.
+ hist = session.tagHistory(tag=tag_name, package=binfo['name'])
+ if not hist:
+ #really shouldn't happen
+ raise koji.GenericError, "No history found for %s in %s" % (nvr, tag_name)
+ timeline = []
+ for x in hist:
+ #note that for revoked entries, we're effectively splitting them into
+ #two parts: creation and revocation.
+ timeline.append((x['create_event'], 1, x))
+ #at the same event, revokes happen first
+ if x['revoke_event'] is not None:
+ timeline.append((x['revoke_event'], 0, x))
+ timeline.sort()
+ #find most recent creation entry for our build and crop there
+ latest_ts = None
+ for i in xrange(len(timeline)-1, -1, -1):
+ #searching in reverse cronological order
+ event_id, is_create, entry = timeline[i]
+ if entry['build_id'] == binfo['id'] and is_create:
+ latest_ts = event_id
+ break
+ if not latest_ts:
+ #really shouldn't happen
+ raise koji.GenericError, "No creation event found for %s in %s" % (nvr, tag_name)
+ our_entry = entry
+ if options.debug:
+ print _histline(event_id, our_entry)
+ #now go through the events since most recent creation entry
+ timeline = timeline[i+1:]
+ if not timeline:
+ is_latest = True
+ if options.debug:
+ print "%s is latest in tag %s" % (nvr, tag_name)
+ break
+ #before we go any further, is this a protected tag?
+ protect_tag = False
+ for pattern in options.protect_tag:
+ if fnmatch.fnmatch(tag_name, pattern):
+ protect_tag = True
+ break
+ if protect_tag:
+ # we use the same time limit as for the latest calculation
+ # if this build was in this tag within that limit, then we will
+ # not prune its signed copies
+ if our_entry['revoke_event'] is None:
+ #we're still tagged with a protected tag
+ if options.debug:
+ print "Build %s has protected tag %s" % (nvr, tag_name)
+ is_protected = True
+ break
+ elif our_entry['revoke_ts'] > cutoff_ts:
+ #we were still tagged here sometime before the cutoff
+ if options.debug:
+ print "Build %s had protected tag %s until %s" \
+ % (nvr, tag_name, time.asctime(time.localtime(our_entry['revoke_ts'])))
+ is_protected = True
+ break
+ replaced_ts = None
+ revoke_ts = None
+ others = {}
+ for event_id, is_create, entry in timeline:
+ #So two things can knock this build from the title of latest:
+ # - it could be untagged (entry revoked)
+ # - another build could become latest (replaced)
+ #Note however that if the superceding entry is itself revoked, then
+ #our build could become latest again
+ if options.debug:
+ print _histline(event_id, entry)
+ if entry['build_id'] == binfo['id']:
+ if is_create:
+ #shouldn't happen
+ raise koji.GenericError, "Duplicate creation event found for %s in %s" \
+ % (nvr, tag_name)
+ else:
+ #we've been revoked
+ revoke_ts = entry['revoke_ts']
+ break
+ else:
+ if is_create:
+ #this build has become latest
+ replaced_ts = entry['create_ts']
+ if entry['active']:
+ #this entry not revoked yet, so we're done for this tag
+ break
+ #since this entry is revoked later, our build might eventually be
+ #uncovered, so we have to keep looking
+ others[entry['build_id']] = 1
+ else:
+ #other build revoked
+ #see if our build has resurfaced
+ if others.has_key(entry['build_id']):
+ del others[entry['build_id']]
+ if replaced_ts is not None and not others:
+ #we've become latest again
+ #(note: we're not revoked yet because that triggers a break above)
+ replaced_ts = None
+ latest_ts = entry['revoke_ts']
+ if last_latest is None:
+ timestamps = []
+ else:
+ timestamps = [last_latest]
+ if revoke_ts is None:
+ if replaced_ts is None:
+ #turns out we are still latest
+ is_latest = True
+ if options.debug:
+ print "%s is latest (again) in tag %s" % (nvr, tag_name)
+ break
+ else:
+ #replaced (but not revoked)
+ timestamps.append(replaced_ts)
+ if options.debug:
+ print "tag %s: %s not latest (replaced %s)" \
+ % (tag_name, nvr, time.asctime(time.localtime(replaced_ts)))
+ elif replaced_ts is None:
+ #revoked but not replaced
+ timestamps.append(revoke_ts)
+ if options.debug:
+ print "tag %s: %s not latest (revoked %s)" \
+ % (tag_name, nvr, time.asctime(time.localtime(revoke_ts)))
+ else:
+ #revoked AND replaced
+ timestamps.append(min(revoke_ts, replaced_ts))
+ if options.debug:
+ print "tag %s: %s not latest (revoked %s, replaced %s)" \
+ % (tag_name, nvr, time.asctime(time.localtime(revoke_ts)),
+ time.asctime(time.localtime(replaced_ts)))
+ last_latest = max(timestamps)
+ if last_latest > cutoff_ts:
+ if options.debug:
+ print "%s was latest past the cutoff" % nvr
+ is_latest = True
+ break
+ if is_latest:
+ continue
+ if is_protected:
+ continue
+ #not latest anywhere since cutoff, so we can remove all signed copies
+ rpms = session.listRPMs(buildID=binfo['id'])
+ session.multicall = True
+ for rpminfo in rpms:
+ session.queryRPMSigs(rpm_id=rpminfo['id'])
+ by_sig = {}
+ #index by sig
+ for rpminfo, [sigs] in zip(rpms, session.multiCall()):
+ for sig in sigs:
+ sigkey = sig['sigkey']
+ by_sig.setdefault(sigkey, []).append(rpminfo)
+ builddir = koji.pathinfo.build(binfo)
+ build_files = 0
+ build_space = 0
+ if not by_sig and options.debug:
+ print "(build has no signatures)"
+ for sigkey, rpms in by_sig.iteritems():
+ mycount = 0
+ archdirs = {}
+ sigdirs = {}
+ for rpminfo in rpms:
+ signedpath = "%s/%s" % (builddir, koji.pathinfo.signed(rpminfo, sigkey))
+ try:
+ st = os.lstat(signedpath)
+ except OSError:
+ continue
+ if not stat.S_ISREG(st.st_mode):
+ #warn about this
+ print "Skipping %s. Not a regular file" % signedpath
+ continue
+ if st.st_mtime > cutoff_ts:
+ print "Skipping %s. File newer than cutoff" % signedpath
+ continue
+ if options.test:
+ print "Would have unlinked: %s" % signedpath
+ else:
+ if options.verbose:
+ print "Unlinking: %s" % signedpath
+ try:
+ os.unlink(signedpath)
+ except OSError, e:
+ print "Error removing %s: %s" % (signedpath, e)
+ print "This script needs write access to %s" % koji.BASEDIR
+ continue
+ mycount +=1
+ build_files += 1
+ build_space += st.st_size
+ #XXX - this makes some layout assumptions, but
+ # pathinfo doesn't report what we need
+ mydir = os.path.dirname(signedpath)
+ archdirs[mydir] = 1
+ sigdirs[os.path.dirname(mydir)] = 1
+ for dir in archdirs:
+ if options.test:
+ print "Would have removed dir: %s" % dir
+ else:
+ if options.verbose:
+ print "Removing dir: %s" % dir
+ try:
+ os.rmdir(dir)
+ except OSError, e:
+ print "Error removing %s: %s" % (signedpath, e)
+ if len(sigdirs) == 1:
+ dir = sigdirs.keys()[0]
+ if options.test:
+ print "Would have removed dir: %s" % dir
+ else:
+ if options.verbose:
+ print "Removing dir: %s" % dir
+ try:
+ os.rmdir(dir)
+ except OSError, e:
+ print "Error removing %s: %s" % (signedpath, e)
+ elif len(sigdirs) > 1:
+ print "Warning: more than one signature dir for %s: %r" % (sigkey, sigdirs)
+ if build_files:
+ total_files += build_files
+ total_space += build_space
+ if options.verbose:
+ print "Build: %s, Removed %i signed copies (%i bytes). Total: %i/%i" \
+ % (nvr, build_files, build_space, total_files, total_space)
+ elif options.debug and by_sig:
+ print "(build has no signed copies)"
+ print "--- Grand Totals ---"
+ print "Files: %i" % total_files
+ print "Bytes: %i" % total_space
+
+def handle_set_build_volume(options, session, args):
+ "[admin] Move a build to a different volume"
+ usage = _("usage: %prog set-build-volume volume n-v-r [n-v-r ...]")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("-v", "--verbose", action="store_true", help=_("Be verbose"))
+ (options, args) = parser.parse_args(args)
+ volinfo = session.getVolume(args[0])
+ if not volinfo:
+ print "No such volume: %s" % args[0]
+ return 1
+ activate_session(session)
+ builds = []
+ for nvr in args[1:]:
+ binfo = session.getBuild(nvr)
+ if not binfo:
+ print "No such build: %s" % nvr
+ elif binfo['volume_id'] == volinfo['id']:
+ print "Build %s already on volume %s" %(nvr, volinfo['name'])
+ else:
+ builds.append(binfo)
+ if not builds:
+ print "No builds to move"
+ return 1
+ for binfo in builds:
+ session.changeBuildVolume(binfo['id'], volinfo['id'])
+ if options.verbose:
+ print "%s: %s -> %s" % (binfo['nvr'], binfo['volume_name'], volinfo['name'])
+
+def handle_add_volume(options, session, args):
+ "[admin] Add a new storage volume"
+ usage = _("usage: %prog add-volume volume-name")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ (options, args) = parser.parse_args(args)
+ name = args[0]
+ volinfo = session.getVolume(name)
+ if volinfo:
+ print "Volume %s already exists" % name
+ return 1
+ activate_session(session)
+ volinfo = session.addVolume(name)
+ print "Added volume %(name)s with id %(id)i" % volinfo
+
+def handle_list_volumes(options, session, args):
+ "List storage volumes"
+ usage = _("usage: %prog list-volumes")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ (options, args) = parser.parse_args(args)
+ for volinfo in session.listVolumes():
+ print volinfo['name']
+
+def handle_list_permissions(options, session, args):
+ "[admin] List user permissions"
+ usage = _("usage: %prog list-permissions [options]")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--user", help=_("List permissions for the given user"))
+ parser.add_option("--mine", action="store_true", help=_("List your permissions"))
+ (options, args) = parser.parse_args(args)
+ if len(args) > 0:
+ parser.error(_("This command takes no arguments"))
+ assert False
+ activate_session(session)
+ if options.user:
+ user = session.getUser(options.user)
+ if not user:
+ print "User %s does not exist" % options.user
+ return 1
+ perms = session.getUserPerms(user['id'])
+ elif options.mine:
+ perms = session.getPerms()
+ else:
+ perms = [p['name'] for p in session.getAllPerms()]
+ for perm in perms:
+ print perm
+
+def handle_add_user(options, session, args):
+ "[admin] Add a user"
+ usage = _("usage: %prog add-user username [options]")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--principal", help=_("The Kerberos principal for this user"))
+ parser.add_option("--disable", help=_("Prohibit logins by this user"), action="store_true")
+ (options, args) = parser.parse_args(args)
+ if len(args) < 1:
+ parser.error(_("You must specify the username of the user to add"))
+ elif len(args) > 1:
+ parser.error(_("This command only accepts one argument (username)"))
+ username = args[0]
+ if options.disable:
+ status = koji.USER_STATUS['BLOCKED']
+ else:
+ status = koji.USER_STATUS['NORMAL']
+ activate_session(session)
+ user_id = session.createUser(username, status=status, krb_principal=options.principal)
+ print "Added user %s (%i)" % (username, user_id)
+
+def handle_enable_user(options, session, args):
+ "[admin] Enable logins by a user"
+ usage = _("usage: %prog enable-user username")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ (options, args) = parser.parse_args(args)
+ if len(args) < 1:
+ parser.error(_("You must specify the username of the user to enable"))
+ elif len(args) > 1:
+ parser.error(_("This command only accepts one argument (username)"))
+ username = args[0]
+ activate_session(session)
+ session.enableUser(username)
+
+def handle_disable_user(options, session, args):
+ "[admin] Disable logins by a user"
+ usage = _("usage: %prog disable-user username")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ (options, args) = parser.parse_args(args)
+ if len(args) < 1:
+ parser.error(_("You must specify the username of the user to disable"))
+ elif len(args) > 1:
+ parser.error(_("This command only accepts one argument (username)"))
+ username = args[0]
+ activate_session(session)
+ session.disableUser(username)
+
+def handle_list_signed(options, session, args):
+ "[admin] List signed copies of rpms"
+ usage = _("usage: %prog list-signed [options]")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--debug", action="store_true")
+ parser.add_option("--key", help=_("Only list RPMs signed with this key"))
+ parser.add_option("--build", help=_("Only list RPMs from this build"))
+ parser.add_option("--rpm", help=_("Only list signed copies for this RPM"))
+ parser.add_option("--tag", help=_("Only list RPMs within this tag"))
+ (options, args) = parser.parse_args(args)
+ activate_session(session)
+ qopts = {}
+ build_idx = {}
+ rpm_idx = {}
+ if options.key:
+ qopts['sigkey'] = options.key
+ if options.rpm:
+ rinfo = session.getRPM(options.rpm)
+ rpm_idx[rinfo['id']] = rinfo
+ if rinfo is None:
+ parser.error(_("No such RPM: %s") % options.rpm)
+ if rinfo.get('external_repo_id'):
+ print "External rpm: %(name)s-%(version)s-%(release)s.%(arch)s@%(external_repo_name)s" % rinfo
+ return 1
+ qopts['rpm_id'] = rinfo['id']
+ if options.build:
+ binfo = session.getBuild(options.build)
+ build_idx[binfo['id']] = binfo
+ if binfo is None:
+ parser.error(_("No such build: %s") % options.rpm)
+ sigs = []
+ rpms = session.listRPMs(buildID=binfo['id'])
+ for rinfo in rpms:
+ rpm_idx[rinfo['id']] = rinfo
+ sigs += session.queryRPMSigs(rpm_id=rinfo['id'], **qopts)
+ else:
+ sigs = session.queryRPMSigs(**qopts)
+ if options.tag:
+ print "getting tag listing"
+ rpms, builds = session.listTaggedRPMS(options.tag, inherit=False, latest=False)
+ print "got tag listing"
+ tagged = {}
+ for binfo in builds:
+ build_idx.setdefault(binfo['id'], binfo)
+ for rinfo in rpms:
+ rpm_idx.setdefault(rinfo['id'], rinfo)
+ tagged[rinfo['id']] = 1
+ #Now figure out which sig entries actually have live copies
+ for sig in sigs:
+ rpm_id = sig['rpm_id']
+ sigkey = sig['sigkey']
+ if options.tag:
+ if tagged.get(rpm_id) is None:
+ continue
+ rinfo = rpm_idx.get(rpm_id)
+ if not rinfo:
+ rinfo = session.getRPM(rpm_id)
+ rpm_idx[rinfo['id']] = rinfo
+ binfo = build_idx.get(rinfo['build_id'])
+ if not binfo:
+ binfo = session.getBuild(rinfo['build_id'])
+ build_idx[binfo['id']] = binfo
+ binfo['name'] = binfo['package_name']
+ builddir = koji.pathinfo.build(binfo)
+ signedpath = "%s/%s" % (builddir, koji.pathinfo.signed(rinfo, sigkey))
+ if not os.path.exists(signedpath):
+ if options.debug:
+ print "No copy: %s" % signedpath
+ continue
+ print signedpath
+
+def handle_import_in_place(options, session, args):
+ "[admin] Import RPMs that are already in place"
+ usage = _("usage: %prog import-in-place [options] package [package...]")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ (options, args) = parser.parse_args(args)
+ if len(args) < 1:
+ parser.error(_("At least one package must be specified"))
+ assert False
+ activate_session(session)
+ for nvr in args:
+ data = koji.parse_NVR(nvr)
+ print _("importing %s...") % nvr,
+ try:
+ session.importBuildInPlace(data)
+ except koji.GenericError, e:
+ print _("\nError importing: %s" % str(e).splitlines()[-1])
+ sys.stdout.flush()
+ else:
+ print _("done")
+ sys.stdout.flush()
+
+def handle_import_archive(options, session, args):
+ "[admin] Import an archive file and associate it with a build"
+ usage = _("usage: %prog import-archive build-id|n-v-r /path/to/archive...")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--noprogress", action="store_true",
+ help=_("Do not display progress of the upload"))
+ parser.add_option("--create-build", action="store_true", help=_("Auto-create builds as needed"))
+ parser.add_option("--link", action="store_true", help=_("Attempt to hardlink instead of uploading"))
+ parser.add_option("--type", help=_("The type of archive being imported. Currently supported types: maven, win, image"))
+ parser.add_option("--type-info", help=_("Type-specific information to associate with the archives. "
+ "For Maven archives this should be a local path to a .pom file. "
+ "For Windows archives this should be relpath:platforms[:flags])) "
+ "Images need an arch"))
+ (suboptions, args) = parser.parse_args(args)
+
+ if not len(args) > 1:
+ parser.error(_("You must specify a build ID or N-V-R and an archive to import"))
+ assert False
+
+ activate_session(session)
+
+ if not suboptions.type:
+ parser.error(_("You must specify an archive type"))
+ assert False
+ if suboptions.type == 'maven':
+ if not (session.hasPerm('maven-import') or session.hasPerm('admin')):
+ parser.error(_("This action requires the maven-import privilege"))
+ assert False
+ if not suboptions.type_info:
+ parser.error(_("--type-info must point to a .pom file when importing Maven archives"))
+ assert False
+ pom_info = koji.parse_pom(suboptions.type_info)
+ maven_info = koji.pom_to_maven_info(pom_info)
+ suboptions.type_info = maven_info
+ elif suboptions.type == 'win':
+ if not (session.hasPerm('win-import') or session.hasPerm('admin')):
+ parser.error(_("This action requires the win-import privilege"))
+ assert False
+ if not suboptions.type_info:
+ parser.error(_("--type-info must be specified"))
+ assert False
+ type_info = suboptions.type_info.split(':', 2)
+ if len(type_info) < 2:
+ parser.error(_("--type-info must be in relpath:platforms[:flags] format"))
+ win_info = {'relpath': type_info[0], 'platforms': type_info[1].split()}
+ if len(type_info) > 2:
+ win_info['flags'] = type_info[2].split()
+ else:
+ win_info['flags'] = []
+ suboptions.type_info = win_info
+ elif suboptions.type == 'image':
+ if not (session.hasPerm('image-import') or session.hasPerm('admin')):
+ parser.error(_("This action requires the image-import privilege"))
+ assert False
+ if not suboptions.type_info:
+ parser.error(_("--type-info must be specified"))
+ assert False
+ image_info = {'arch': suboptions.type_info}
+ suboptions.type_info = image_info
+ else:
+ parser.error(_("Unsupported archive type: %s" % suboptions.type))
+ assert False
+
+ buildinfo = session.getBuild(arg_filter(args[0]))
+ if not buildinfo:
+ if not suboptions.create_build:
+ parser.error(_("No such build: %s") % args[0])
+ buildinfo = koji.parse_NVR(args[0])
+ if buildinfo['epoch'] == '':
+ buildinfo['epoch'] = None
+ else:
+ buildinfo['epoch'] = int(buildinfo['epoch'])
+ if suboptions.type == 'maven':
+ # --type-info should point to a local .pom file
+ session.createMavenBuild(buildinfo, suboptions.type_info)
+ elif suboptions.type == 'win':
+ # We're importing, so we don't know what platform the build
+ # was run on. Use "import" as a placeholder.
+ session.createWinBuild(buildinfo, {'platform': 'import'})
+ elif suboptions.type == 'image':
+ # --type-info should have an arch of the image
+ session.createImageBuild(buildinfo)
+ else:
+ # should get caught above
+ assert False
+
+ for filepath in args[1:]:
+ filename = os.path.basename(filepath)
+ print "Uploading archive: %s" % filename
+ serverdir = _unique_path('cli-import')
+ if _running_in_bg() or suboptions.noprogress:
+ callback = None
+ else:
+ callback = _progress_callback
+ if suboptions.link:
+ linked_upload(filepath, serverdir)
+ else:
+ session.uploadWrapper(filepath, serverdir, callback=callback)
+ print
+ serverpath = "%s/%s" % (serverdir, filename)
+ session.importArchive(serverpath, buildinfo, suboptions.type, suboptions.type_info)
+ print "Imported: %s" % filename
+
+def handle_grant_permission(options, session, args):
+ "[admin] Grant a permission to a user"
+ usage = _("usage: %prog grant-permission <permission> <user> [<user> ...]")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--new", action="store_true", help=_("Create a new permission"))
+ (options, args) = parser.parse_args(args)
+ if len(args) < 2:
+ parser.error(_("Please specify a permission and at least one user"))
+ assert False
+ activate_session(session)
+ perm = args[0]
+ names = args[1:]
+ users = []
+ for n in names:
+ user = session.getUser(n)
+ if user is None:
+ parser.error(_("No such user: %s" % n))
+ assert False
+ users.append(user)
+ kwargs = {}
+ if options.new:
+ kwargs['create'] = True
+ for user in users:
+ session.grantPermission(user['name'], perm, **kwargs)
+
+def handle_revoke_permission(options, session, args):
+ "[admin] Revoke a permission from a user"
+ usage = _("usage: %prog revoke-permission <permission> <user> [<user> ...]")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ (options, args) = parser.parse_args(args)
+ if len(args) < 2:
+ parser.error(_("Please specify a permission and at least one user"))
+ assert False
+ activate_session(session)
+ perm = args[0]
+ names = args[1:]
+ users = []
+ for n in names:
+ user = session.getUser(n)
+ if user is None:
+ parser.error(_("No such user: %s" % n))
+ assert False
+ users.append(user)
+ for user in users:
+ session.revokePermission(user['name'], perm)
+
+def anon_handle_latest_build(options, session, args):
+ "Print the latest builds for a tag"
+ usage = _("usage: %prog latest-build [options] tag package [package...]")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--arch", help=_("List all of the latest packages for this arch"))
+ parser.add_option("--all", action="store_true", help=_("List all of the latest packages for this tag"))
+ parser.add_option("--quiet", action="store_true", help=_("Do not print the header information"), default=options.quiet)
+ parser.add_option("--paths", action="store_true", help=_("Show the file paths"))
+ parser.add_option("--type", help=_("Show builds of the given type only. Currently supported types: maven"))
+ (options, args) = parser.parse_args(args)
+ if len(args) == 0:
+ parser.error(_("A tag name must be specified"))
+ assert False
+ activate_session(session)
+ if options.all:
+ if len(args) > 1:
+ parser.error(_("A package name may not be combined with --all"))
+ assert False
+ # Set None as the package argument
+ args.append(None)
+ else:
+ if len(args) < 2:
+ parser.error(_("A tag name and package name must be specified"))
+ assert False
+ pathinfo = koji.PathInfo()
+
+ for pkg in args[1:]:
+ if options.arch:
+ rpms, builds = session.getLatestRPMS(args[0], package=pkg, arch=options.arch)
+ builds_hash = dict([(x['build_id'], x) for x in builds])
+ data = rpms
+ if options.paths:
+ for x in data:
+ z = x.copy()
+ x['name'] = builds_hash[x['build_id']]['package_name']
+ x['path'] = os.path.join(pathinfo.build(x), pathinfo.rpm(z))
+ fmt = "%(path)s"
+ else:
+ fmt = "%(name)s-%(version)s-%(release)s.%(arch)s"
+ else:
+ kwargs = {'package': pkg}
+ if options.type:
+ kwargs['type'] = options.type
+ data = session.getLatestBuilds(args[0], **kwargs)
+ if options.paths:
+ if options.type == 'maven':
+ for x in data:
+ x['path'] = pathinfo.mavenbuild(x)
+ fmt = "%(path)-40s %(tag_name)-20s %(maven_group_id)-20s %(maven_artifact_id)-20s %(owner_name)s"
+ else:
+ for x in data:
+ x['path'] = pathinfo.build(x)
+ fmt = "%(path)-40s %(tag_name)-20s %(owner_name)s"
+ else:
+ if options.type == 'maven':
+ fmt = "%(nvr)-40s %(tag_name)-20s %(maven_group_id)-20s %(maven_artifact_id)-20s %(owner_name)s"
+ else:
+ fmt = "%(nvr)-40s %(tag_name)-20s %(owner_name)s"
+ if not options.quiet:
+ if options.type == 'maven':
+ print "%-40s %-20s %-20s %-20s %s" % ("Build", "Tag", "Group Id", "Artifact Id", "Built by")
+ print "%s %s %s %s %s" % ("-"*40, "-"*20, "-"*20, "-"*20, "-"*16)
+ else:
+ print "%-40s %-20s %s" % ("Build","Tag","Built by")
+ print "%s %s %s" % ("-"*40, "-"*20, "-"*16)
+ options.quiet = True
+
+ output = [ fmt % x for x in data]
+ output.sort()
+ for line in output:
+ print line
+
+
+def anon_handle_list_api(options, session, args):
+ "Print the list of XML-RPC APIs"
+ usage = _("usage: %prog list-api [options]")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ (options, args) = parser.parse_args(args)
+ if len(args) != 0:
+ parser.error(_("This command takes no arguments"))
+ assert False
+ activate_session(session)
+ tmplist = [(x['name'], x) for x in session._listapi()]
+ tmplist.sort()
+ funcs = [x[1] for x in tmplist]
+ for x in funcs:
+ if 'argdesc' in x:
+ args = x['argdesc']
+ elif x['args']:
+ # older servers may not provide argdesc
+ expanded = []
+ for arg in x['args']:
+ if type(arg) is str:
+ expanded.append(arg)
+ else:
+ expanded.append('%s=%s' % (arg[0], arg[1]))
+ args = "(%s)" % ", ".join(expanded)
+ else:
+ args = "()"
+ print '%s%s' % (x['name'], args)
+ if x['doc']:
+ print " description: %s" % x['doc']
+
+def anon_handle_list_tagged(options, session, args):
+ "List the builds or rpms in a tag"
+ usage = _("usage: %prog list-tagged [options] tag [package]")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--arch", help=_("List rpms for this arch"))
+ parser.add_option("--rpms", action="store_true", help=_("Show rpms instead of builds"))
+ parser.add_option("--inherit", action="store_true", help=_("Follow inheritance"))
+ parser.add_option("--latest", action="store_true", help=_("Only show the latest builds/rpms"))
+ parser.add_option("--latest-n", type='int', metavar="N", help=_("Only show the latest N builds/rpms"))
+ parser.add_option("--quiet", action="store_true", help=_("Do not print the header information"), default=options.quiet)
+ parser.add_option("--paths", action="store_true", help=_("Show the file paths"))
+ parser.add_option("--sigs", action="store_true", help=_("Show signatures"))
+ parser.add_option("--type", help=_("Show builds of the given type only. Currently supported types: maven, win, image"))
+ parser.add_option("--event", type='int', metavar="EVENT#", help=_("query at event"))
+ parser.add_option("--ts", type='int', metavar="TIMESTAMP", help=_("query at timestamp"))
+ parser.add_option("--repo", type='int', metavar="REPO#", help=_("query at event for a repo"))
+ (options, args) = parser.parse_args(args)
+ if len(args) == 0:
+ parser.error(_("A tag name must be specified"))
+ assert False
+ elif len(args) > 2:
+ parser.error(_("Only one package name may be specified"))
+ assert False
+ activate_session(session)
+ pathinfo = koji.PathInfo()
+ package = None
+ if len(args) > 1:
+ package = args[1]
+ tag = args[0]
+ opts = {}
+ for key in ('latest','inherit'):
+ opts[key] = getattr(options, key)
+ if options.latest_n is not None:
+ opts['latest'] = options.latest_n
+ if package:
+ opts['package'] = package
+ if options.arch:
+ options.rpms = True
+ opts['arch'] = options.arch
+ if options.sigs:
+ opts['rpmsigs'] = True
+ options.rpms = True
+ if options.type:
+ opts['type'] = options.type
+ event = koji.util.eventFromOpts(session, options)
+ if event:
+ opts['event'] = event['id']
+ event['timestr'] = time.asctime(time.localtime(event['ts']))
+ print "Querying at event %(id)i (%(timestr)s)" % event
+
+ if options.rpms:
+ rpms, builds = session.listTaggedRPMS(tag, **opts)
+ data = rpms
+ if options.paths:
+ build_idx = dict([(b['id'],b) for b in builds])
+ for rinfo in data:
+ build = build_idx[rinfo['build_id']]
+ builddir = pathinfo.build(build)
+ if options.sigs:
+ sigkey = rinfo['sigkey']
+ signedpath = os.path.join(builddir, pathinfo.signed(rinfo, sigkey))
+ if os.path.exists(signedpath):
+ rinfo['path'] = signedpath
+ else:
+ rinfo['path'] = os.path.join(builddir, pathinfo.rpm(rinfo))
+ fmt = "%(path)s"
+ data = [x for x in data if x.has_key('path')]
+ else:
+ fmt = "%(name)s-%(version)s-%(release)s.%(arch)s"
+ if options.sigs:
+ fmt = "%(sigkey)s " + fmt
+ else:
+ data = session.listTagged(tag, **opts)
+ if options.paths:
+ if options.type == 'maven':
+ for x in data:
+ x['path'] = pathinfo.mavenbuild(x)
+ fmt = "%(path)-40s %(tag_name)-20s %(maven_group_id)-20s %(maven_artifact_id)-20s %(owner_name)s"
+ else:
+ for x in data:
+ x['path'] = pathinfo.build(x)
+ fmt = "%(path)-40s %(tag_name)-20s %(owner_name)s"
+ else:
+ if options.type == 'maven':
+ fmt = "%(nvr)-40s %(tag_name)-20s %(maven_group_id)-20s %(maven_artifact_id)-20s %(owner_name)s"
+ else:
+ fmt = "%(nvr)-40s %(tag_name)-20s %(owner_name)s"
+ if not options.quiet:
+ if options.type == 'maven':
+ print "%-40s %-20s %-20s %-20s %s" % ("Build", "Tag", "Group Id", "Artifact Id", "Built by")
+ print "%s %s %s %s %s" % ("-"*40, "-"*20, "-"*20, "-"*20, "-"*16)
+ else:
+ print "%-40s %-20s %s" % ("Build","Tag","Built by")
+ print "%s %s %s" % ("-"*40, "-"*20, "-"*16)
+
+ output = [ fmt % x for x in data]
+ output.sort()
+ for line in output:
+ print line
+
+def anon_handle_list_buildroot(options, session, args):
+ "List the rpms used in or built in a buildroot"
+ usage = _("usage: %prog list-buildroot [options] buildroot-id")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--paths", action="store_true", help=_("Show the file paths"))
+ parser.add_option("--built", action="store_true", help=_("Show the built rpms"))
+ parser.add_option("--verbose", "-v", action="store_true", help=_("Show more information"))
+ (options, args) = parser.parse_args(args)
+ if len(args) != 1:
+ parser.error(_("Incorrect number of arguments"))
+ assert False
+ activate_session(session)
+ buildrootID = int(args[0])
+ opts = {}
+ if options.built:
+ opts['buildrootID'] = buildrootID
+ else:
+ opts['componentBuildrootID'] = buildrootID
+ data = session.listRPMs(**opts)
+
+ fmt = "%(nvr)s.%(arch)s"
+ order = [(fmt % x, x) for x in data]
+ order.sort()
+ for nvra, rinfo in order:
+ if options.verbose and rinfo.get('is_update'):
+ print nvra, "[update]"
+ else:
+ print nvra
+
+def anon_handle_list_untagged(options, session, args):
+ "List untagged builds"
+ usage = _("usage: %prog list-untagged [options] [package]")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--paths", action="store_true", help=_("Show the file paths"))
+ parser.add_option("--show-references", action="store_true", help=_("Show build references"))
+ (options, args) = parser.parse_args(args)
+ if len(args) > 1:
+ parser.error(_("Only one package name may be specified"))
+ assert False
+ activate_session(session)
+ package = None
+ if len(args) > 0:
+ package = args[0]
+ opts = {}
+ if package:
+ opts['name'] = package
+ pathinfo = koji.PathInfo()
+
+ data = session.untaggedBuilds(**opts)
+ if options.show_references:
+ print "(Showing build references)"
+ refs = {}
+ refs2 = {} #reverse map
+ for x in session.buildMap():
+ refs.setdefault(x['used'], {}).setdefault(x['built'], 1)
+ refs2.setdefault(x['built'], {}).setdefault(x['used'], 1)
+ #XXX - need to ignore refs to unreferenced builds
+ for x in data:
+ builds = refs.get(x['id'])
+ if builds:
+ x['refs'] = "%s" % builds
+ else:
+ x['refs'] = ''
+ #data = [x for x in data if not refs.has_key(x['id'])]
+ if options.paths:
+ for x in data:
+ x['path'] = pathinfo.build(x)
+ fmt = "%(path)s"
+ else:
+ fmt = "%(name)s-%(version)s-%(release)s"
+ if options.show_references:
+ fmt = fmt + " %(refs)s"
+
+ output = [ fmt % x for x in data]
+ output.sort()
+ for line in output:
+ print line
+
+def print_group_list_req_group(group):
+ print " @%(name)s [%(tag_name)s]" % group
+
+def print_group_list_req_package(pkg):
+ print " %(package)s: %(basearchonly)s, %(type)s [%(tag_name)s]" % pkg
+
+def anon_handle_list_groups(options, session, args):
+ "Print the group listings"
+ usage = _("usage: %prog list-groups [options] <tag> [group]")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--event", type='int', metavar="EVENT#", help=_("query at event"))
+ parser.add_option("--ts", type='int', metavar="TIMESTAMP", help=_("query at timestamp"))
+ parser.add_option("--repo", type='int', metavar="REPO#", help=_("query at event for a repo"))
+ (options, args) = parser.parse_args(args)
+ if len(args) < 1 or len(args) > 2:
+ parser.error(_("Incorrect number of arguments"))
+ assert False
+ opts = {}
+ activate_session(session)
+ event = koji.util.eventFromOpts(session, options)
+ if event:
+ opts['event'] = event['id']
+ event['timestr'] = time.asctime(time.localtime(event['ts']))
+ print "Querying at event %(id)i (%(timestr)s)" % event
+ tags = dict([(x['id'], x['name']) for x in session.listTags()])
+ tmp_list = [(x['name'], x) for x in session.getTagGroups(args[0], **opts)]
+ tmp_list.sort()
+ groups = [x[1] for x in tmp_list]
+ for group in groups:
+ if len(args) > 1 and group['name'] != args[1]:
+ continue
+ print "%s [%s]" % (group['name'], tags.get(group['tag_id'], group['tag_id']))
+ groups = [(x['name'], x) for x in group['grouplist']]
+ groups.sort()
+ for x in [x[1] for x in groups]:
+ x['tag_name'] = tags.get(x['tag_id'], x['tag_id'])
+ print_group_list_req_group(x)
+ pkgs = [(x['package'], x) for x in group['packagelist']]
+ pkgs.sort()
+ for x in [x[1] for x in pkgs]:
+ x['tag_name'] = tags.get(x['tag_id'], x['tag_id'])
+ print_group_list_req_package(x)
+
+def handle_add_group_pkg(options, session, args):
+ "[admin] Add a package to a group's package listing"
+ usage = _("usage: %prog add-group-pkg [options] <tag> <group> <pkg> [<pkg>...]")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ (options, args) = parser.parse_args(args)
+ if len(args) < 3:
+ parser.error(_("You must specify a tag name, group name, and one or more package names"))
+ assert False
+ tag = args[0]
+ group = args[1]
+ activate_session(session)
+ for pkg in args[2:]:
+ session.groupPackageListAdd(tag, group, pkg)
+
+def handle_block_group_pkg(options, session, args):
+ "[admin] Block a package from a group's package listing"
+ usage = _("usage: %prog block-group-pkg [options] <tag> <group> <pkg> [<pkg>...]")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ (options, args) = parser.parse_args(args)
+ if len(args) < 3:
+ parser.error(_("You must specify a tag name, group name, and one or more package names"))
+ assert False
+ tag = args[0]
+ group = args[1]
+ activate_session(session)
+ for pkg in args[2:]:
+ session.groupPackageListBlock(tag, group, pkg)
+
+def handle_unblock_group_pkg(options, session, args):
+ "[admin] Unblock a package from a group's package listing"
+ usage = _("usage: %prog unblock-group-pkg [options] <tag> <group> <pkg> [<pkg>...]")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ (options, args) = parser.parse_args(args)
+ if len(args) < 3:
+ parser.error(_("You must specify a tag name, group name, and one or more package names"))
+ assert False
+ tag = args[0]
+ group = args[1]
+ activate_session(session)
+ for pkg in args[2:]:
+ session.groupPackageListUnblock(tag, group, pkg)
+
+def handle_add_group_req(options, session, args):
+ "[admin] Add a group to a group's required list"
+ usage = _("usage: %prog add-group-req [options] <tag> <target group> <required group>")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ (options, args) = parser.parse_args(args)
+ if len(args) != 3:
+ parser.error(_("You must specify a tag name and two group names"))
+ assert False
+ tag = args[0]
+ group = args[1]
+ req = args[2]
+ activate_session(session)
+ session.groupReqListAdd(tag, group, req)
+
+def handle_block_group_req(options, session, args):
+ "[admin] Block a group's requirement listing"
+ usage = _("usage: %prog block-group-req [options] <tag> <group> <blocked req>")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ (options, args) = parser.parse_args(args)
+ if len(args) != 3:
+ parser.error(_("You must specify a tag name and two group names"))
+ assert False
+ tag = args[0]
+ group = args[1]
+ req = args[2]
+ activate_session(session)
+ session.groupReqListBlock(tag, group, req)
+
+def handle_unblock_group_req(options, session, args):
+ "[admin] Unblock a group's requirement listing"
+ usage = _("usage: %prog unblock-group-req [options] <tag> <group> <requirement>")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ (options, args) = parser.parse_args(args)
+ if len(args) != 3:
+ parser.error(_("You must specify a tag name and two group names"))
+ assert False
+ tag = args[0]
+ group = args[1]
+ req = args[2]
+ activate_session(session)
+ session.groupReqListUnblock(tag, group, req)
+
+def anon_handle_list_hosts(options, session, args):
+ "Print the host listing"
+ usage = _("usage: %prog list-hosts [options]")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--arch", action="append", default=[], help=_("Specify an architecture"))
+ parser.add_option("--channel", help=_("Specify a channel"))
+ parser.add_option("--ready", action="store_true", help=_("Limit to ready hosts"))
+ parser.add_option("--not-ready", action="store_false", dest="ready", help=_("Limit to not ready hosts"))
+ parser.add_option("--enabled", action="store_true", help=_("Limit to enabled hosts"))
+ parser.add_option("--not-enabled", action="store_false", dest="enabled", help=_("Limit to not enabled hosts"))
+ parser.add_option("--quiet", action="store_true", help=_("Do not print header information"), default=options.quiet)
+ (options, args) = parser.parse_args(args)
+ opts = {}
+ activate_session(session)
+ if options.arch:
+ opts['arches'] = options.arch
+ if options.channel:
+ channel = session.getChannel(options.channel)
+ if not channel:
+ parser.error(_('Unknown channel: %s' % options.channel))
+ assert False
+ opts['channelID'] = channel['id']
+ if options.ready is not None:
+ opts['ready'] = options.ready
+ if options.enabled is not None:
+ opts['enabled'] = options.enabled
+ tmp_list = [(x['name'], x) for x in session.listHosts(**opts)]
+ tmp_list.sort()
+ hosts = [x[1] for x in tmp_list]
+
+ def yesno(x):
+ if x: return 'Y'
+ else: return 'N'
+
+ # pull in the last update using multicall to speed it up a bit
+ session.multicall = True
+ for host in hosts:
+ session.getLastHostUpdate(host['id'])
+ updateList = session.multiCall()
+
+ for host, [update] in zip(hosts, updateList):
+ if update is None:
+ host['update'] = '-'
+ else:
+ host['update'] = update.split('.')[0]
+ host['enabled'] = yesno(host['enabled'])
+ host['ready'] = yesno(host['ready'])
+ host['arches'] = ','.join(host['arches'].split())
+
+ if not options.quiet:
+ print "Hostname Enb Rdy Load/Cap Arches Last Update"
+ for host in hosts:
+ print "%(name)-28s %(enabled)-3s %(ready)-3s %(task_load)4.1f/%(capacity)-3.1f %(arches)-16s %(update)s" % host
+
+def anon_handle_list_pkgs(options, session, args):
+ "Print the package listing for tag or for owner"
+ usage = _("usage: %prog list-pkgs [options]")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--owner", help=_("Specify owner"))
+ parser.add_option("--tag", help=_("Specify tag"))
+ parser.add_option("--package", help=_("Specify package"))
+ parser.add_option("--quiet", action="store_true", help=_("Do not print header information"), default=options.quiet)
+ parser.add_option("--noinherit", action="store_true", help=_("Don't follow inheritance"))
+ parser.add_option("--show-blocked", action="store_true", help=_("Show blocked packages"))
+ parser.add_option("--show-dups", action="store_true", help=_("Show superseded owners"))
+ parser.add_option("--event", type='int', metavar="EVENT#", help=_("query at event"))
+ parser.add_option("--ts", type='int', metavar="TIMESTAMP", help=_("query at timestamp"))
+ parser.add_option("--repo", type='int', metavar="REPO#", help=_("query at event for a repo"))
+ (options, args) = parser.parse_args(args)
+ if len(args) != 0:
+ parser.error(_("This command takes no arguments"))
+ assert False
+ activate_session(session)
+ opts = {}
+ if options.owner:
+ user = session.getUser(options.owner)
+ if user is None:
+ parser.error(_("Invalid user"))
+ assert False
+ opts['userID'] = user['id']
+ if options.tag:
+ tag = session.getTag(options.tag)
+ if tag is None:
+ parser.error(_("Invalid tag"))
+ assert False
+ opts['tagID'] = tag['id']
+ if options.package:
+ opts['pkgID'] = options.package
+ allpkgs = False
+ if not opts:
+ # no limiting clauses were specified
+ allpkgs = True
+ opts['inherited'] = not options.noinherit
+ #hiding dups only makes sense if we're querying a tag
+ if options.tag:
+ opts['with_dups'] = options.show_dups
+ else:
+ opts['with_dups'] = True
+ event = koji.util.eventFromOpts(session, options)
+ if event:
+ opts['event'] = event['id']
+ event['timestr'] = time.asctime(time.localtime(event['ts']))
+ print "Querying at event %(id)i (%(timestr)s)" % event
+ data = session.listPackages(**opts)
+ if not data:
+ print "(no matching packages)"
+ return 1
+ if not options.quiet:
+ if allpkgs:
+ print "Package"
+ print '-'*23
+ else:
+ print "%-23s %-23s %-16s %-15s" % ('Package','Tag','Extra Arches','Owner')
+ print "%s %s %s %s" % ('-'*23,'-'*23,'-'*16,'-'*15)
+ for pkg in data:
+ if allpkgs:
+ print pkg['package_name']
+ else:
+ if not options.show_blocked and pkg.get('blocked',False):
+ continue
+ if pkg.has_key('tag_id'):
+ if pkg['extra_arches'] is None:
+ pkg['extra_arches'] = ""
+ fmt = "%(package_name)-23s %(tag_name)-23s %(extra_arches)-16s %(owner_name)-15s"
+ if pkg.get('blocked',False):
+ fmt += " [BLOCKED]"
+ else:
+ fmt = "%(package_name)s"
+ print fmt % pkg
+
+def anon_handle_rpminfo(options, session, args):
+ "Print basic information about an RPM"
+ usage = _("usage: %prog rpminfo [options] <n-v-r.a> [<n-v-r.a> ...]")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--buildroots", action="store_true", help=_("show buildroots the rpm was used in"))
+ (options, args) = parser.parse_args(args)
+ if len(args) < 1:
+ parser.error(_("Please specify an RPM"))
+ assert False
+ activate_session(session)
+ for rpm in args:
+ info = session.getRPM(rpm)
+ if info is None:
+ print "No such rpm: %s\n" % rpm
+ continue
+ if info['epoch'] is None:
+ info['epoch'] = ""
+ else:
+ info['epoch'] = str(info['epoch']) + ":"
+ if not info.get('external_repo_id', 0):
+ buildinfo = session.getBuild(info['build_id'])
+ buildinfo['name'] = buildinfo['package_name']
+ buildinfo['arch'] = 'src'
+ if buildinfo['epoch'] is None:
+ buildinfo['epoch'] = ""
+ else:
+ buildinfo['epoch'] = str(buildinfo['epoch']) + ":"
+ print "RPM: %(epoch)s%(name)s-%(version)s-%(release)s.%(arch)s [%(id)d]" %info
+ if info.get('external_repo_id'):
+ repo = session.getExternalRepo(info['external_repo_id'])
+ print "External Repository: %(name)s [%(id)i]" % repo
+ print "External Repository url: %(url)s" % repo
+ else:
+ print "RPM Path: %s" % os.path.join(koji.pathinfo.build(buildinfo), koji.pathinfo.rpm(info))
+ print "SRPM: %(epoch)s%(name)s-%(version)s-%(release)s [%(id)d]" % buildinfo
+ print "SRPM Path: %s" % os.path.join(koji.pathinfo.build(buildinfo), koji.pathinfo.rpm(buildinfo))
+ print "Built: %s" % time.strftime('%a, %d %b %Y %H:%M:%S %Z', time.localtime(info['buildtime']))
+ print "Payload: %(payloadhash)s" %info
+ print "Size: %(size)s" %info
+ if not info.get('external_repo_id', 0):
+ print "Build ID: %(build_id)s" %info
+ if info['buildroot_id'] is None:
+ print "No buildroot data available"
+ else:
+ br_info = session.getBuildroot(info['buildroot_id'])
+ print "Buildroot: %(id)i (tag %(tag_name)s, arch %(arch)s, repo %(repo_id)i)" % br_info
+ print "Build Host: %(host_name)s" % br_info
+ print "Build Task: %(task_id)i" % br_info
+ if options.buildroots:
+ br_list = session.listBuildroots(rpmID=info['id'], queryOpts={'order':'buildroot.id'})
+ print "Used in %i buildroots:" % len(br_list)
+ if len(br_list):
+ print " %8s %-28s %-8s %-29s" % ('id','build tag','arch','build host')
+ print " %s %s %s %s" % ('-'*8, '-'*28, '-'*8, '-'*29)
+ for br_info in br_list:
+ print " %(id)8i %(tag_name)-28s %(arch)-8s %(host_name)-29s" % br_info
+
+
+def anon_handle_buildinfo(options, session, args):
+ "Print basic information about a build"
+ usage = _("usage: %prog buildinfo [options] <n-v-r> [<n-v-r> ...]")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--changelog", action="store_true", help=_("Show the changelog for the build"))
+ (options, args) = parser.parse_args(args)
+ if len(args) < 1:
+ parser.error(_("Please specify a build"))
+ assert False
+ activate_session(session)
+ for build in args:
+ if build.isdigit():
+ build = int(build)
+ info = session.getBuild(build)
+ if info is None:
+ print "No such build: %s\n" % build
+ continue
+ task = None
+ if info['task_id']:
+ task = session.getTaskInfo(info['task_id'], request=True)
+ taglist = []
+ for tag in session.listTags(build):
+ taglist.append(tag['name'])
+ info['arch'] = 'src'
+ info['state'] = koji.BUILD_STATES[info['state']]
+ print "BUILD: %(name)s-%(version)s-%(release)s [%(id)d]" % info
+ print "State: %(state)s" % info
+ print "Built by: %(owner_name)s" % info
+ if 'volume_name' in info:
+ print "Volume: %(volume_name)s" % info
+ if task:
+ print "Task: %s %s" % (task['id'], koji.taskLabel(task))
+ else:
+ print "Task: none"
+ print "Finished: %s" % koji.formatTimeLong(info['completion_time'])
+ maven_info = session.getMavenBuild(info['id'])
+ if maven_info:
+ print "Maven groupId: %s" % maven_info['group_id']
+ print "Maven artifactId: %s" % maven_info['artifact_id']
+ print "Maven version: %s" % maven_info['version']
+ win_info = session.getWinBuild(info['id'])
+ if win_info:
+ print "Windows build platform: %s" % win_info['platform']
+ print "Tags: %s" % ' '.join(taglist)
+ maven_archives = session.listArchives(buildID=info['id'], type='maven')
+ if maven_archives:
+ print "Maven archives:"
+ for archive in maven_archives:
+ print os.path.join(koji.pathinfo.mavenbuild(info), koji.pathinfo.mavenfile(archive))
+ win_archives = session.listArchives(buildID=info['id'], type='win')
+ if win_archives:
+ print "Windows archives:"
+ for archive in win_archives:
+ print os.path.join(koji.pathinfo.winbuild(info), koji.pathinfo.winfile(archive))
+ rpms = session.listRPMs(buildID=info['id'])
+ image_info = session.getImageBuild(info['id'])
+ img_archives = session.listArchives(buildID=info['id'], type='image')
+ if img_archives:
+ print 'Image archives:'
+ for archive in img_archives:
+ print os.path.join(koji.pathinfo.imagebuild(info), archive['filename'])
+ if rpms:
+ print "RPMs:"
+ for rpm in rpms:
+ print os.path.join(koji.pathinfo.build(info), koji.pathinfo.rpm(rpm))
+ if options.changelog:
+ changelog = session.getChangelogEntries(info['id'])
+ if changelog:
+ print "Changelog:"
+ print koji.util.formatChangelog(changelog)
+
+def handle_clone_tag(options, session, args):
+ "[admin] Duplicate the contents of one tag onto another tag"
+ usage = _("usage: %prog clone-tag [options] <src-tag> <dst-tag>")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("-v","--verbose", action="store_true", help=_("show changes"),)
+ parser.add_option("-f","--force", action="store_true", help=_("override tag locks if necessary"),)
+ parser.add_option("-n","--test", action="store_true", help=_("test mode"))
+ (options, args) = parser.parse_args(args)
+
+ if len(args) != 2:
+ parser.error(_("This command takes two argument: <src-tag> <dst-tag>"))
+ assert False
+ activate_session(session)
+
+ if not session.hasPerm('admin') and not options.test:
+ print "This action requires admin privileges"
+ return
+
+ if args[0] == args[1]:
+ sys.stdout.write('Source and destination tags must be different.\n')
+ return
+ # store tags.
+ srctag = session.getTag(args[0])
+ dsttag = session.getTag(args[1])
+ if not srctag:
+ sys.stdout.write("Unknown src-tag: %s\n" % args[0])
+ return
+ if (srctag['locked'] and not options.force) or (dsttag and dsttag['locked'] and not options.force):
+ print _("Error: You are attempting to clone from or to a tag which is locked.")
+ print _("Please use --force if this is what you really want to do.")
+ return
+
+ # init debug lists.
+ chgpkglist=[]
+ chgbldlist=[]
+ chggrplist=[]
+ # case of brand new dst-tag.
+ if not dsttag:
+ # create a new tag, copy srctag header.
+ if not options.test:
+ session.createTag(args[1], parent=None, arches=srctag['arches'], perm=srctag['perm_id'], locked=srctag['locked'])
+ newtag = session.getTag(args[1]) # store the new tag, need its asigned id.
+ # get pkglist of src-tag, including inherited packages.
+ srcpkgs = session.listPackages(tagID=srctag['id'],inherited=True)
+ srcpkgs.sort(lambda x, y: cmp(x['package_name'],y['package_name']))
+ for pkgs in srcpkgs:
+ # for each package add one entry in the new tag.
+ chgpkglist.append(('[new]',pkgs['package_name'],pkgs['blocked'],pkgs['owner_name'],pkgs['tag_name']))
+ if not options.test:
+ # add packages.
+ session.packageListAdd(newtag['name'],pkgs['package_name'],
+ owner=pkgs['owner_name'],block=pkgs['blocked'],
+ extra_arches=pkgs['extra_arches'])
+ # get --all latest builds from src tag
+ builds=session.getLatestBuilds(srctag['name'])
+ for build in builds:
+ build['name']=build['package_name'] # add missing 'name' field.
+ chgbldlist.append(('[new]',build['package_name'],
+ build['nvr'],koji.BUILD_STATES[build['state']],
+ build['owner_name'],build['tag_name']))
+ # copy latest builds into new tag
+ if not options.test:
+ session.tagBuildBypass(newtag['name'], build, force=options.force)
+ # Copy the group data
+ srcgroups = session.getTagGroups(srctag['name'])
+ for group in srcgroups:
+ if not options.test:
+ session.groupListAdd(newtag['name'], group['name'])
+ for pkg in group['packagelist']:
+ if not options.test:
+ session.groupPackageListAdd(newtag['name'], group['name'], pkg['package'], block=pkg['blocked'])
+ chggrplist.append(('[new]', pkg['package'], group['name']))
+ # case of existing dst-tag.
+ if dsttag:
+ # get fresh list of packages & builds into maps.
+ srcpkgs = {}
+ for pkg in session.listPackages(tagID=srctag['id'],inherited=True):
+ srcpkgs[pkg['package_name']] = pkg
+ dstpkgs = {}
+ for pkg in session.listPackages(tagID=dsttag['id'],inherited=True):
+ dstpkgs[pkg['package_name']] = pkg
+ srclblds = {}
+ for build in session.getLatestBuilds(srctag['name']):
+ srclblds[build['nvr']] = build
+ dstlblds = {}
+ for build in session.getLatestBuilds(dsttag['name']):
+ dstlblds[build['nvr']] = build
+ srcgroups = {}
+ for group in session.getTagGroups(srctag['name']):
+ srcgroups[group['name']] = group
+ dstgroups = {}
+ for group in session.getTagGroups(dsttag['name']):
+ dstgroups[group['name']] = group
+ #construct to-do lists.
+ paddlist=[] # list containing new packages to be added from src tag
+ for (package_name,pkg) in srcpkgs.iteritems():
+ if not dstpkgs.has_key(package_name):
+ paddlist.append(pkg)
+ paddlist.sort(lambda x, y: cmp(x['package_name'],y['package_name']))
+ pdellist=[] # list containing packages no more present in dst tag
+ for (package_name,pkg) in dstpkgs.iteritems():
+ if not srcpkgs.has_key(package_name):
+ pdellist.append(pkg)
+ pdellist.sort(lambda x, y: cmp(x['package_name'],y['package_name']))
+ baddlist=[] # list containing new builds to be added from src tag
+ for (nvr,lbld) in srclblds.iteritems():
+ if not dstlblds.has_key(nvr):
+ baddlist.append(lbld)
+ baddlist.sort(lambda x, y: cmp(x['package_name'],y['package_name']))
+ bdellist=[] # list containing new builds to be removed from src tag
+ for (nvr,lbld) in dstlblds.iteritems():
+ if not srclblds.has_key(nvr):
+ bdellist.append(lbld)
+ bdellist.sort(lambda x, y: cmp(x['package_name'],y['package_name']))
+ gaddlist=[] # list containing new groups to be added from src tag
+ for (grpname, group) in srcgroups.iteritems():
+ if not dstgroups.has_key(grpname):
+ gaddlist.append(group)
+ gdellist=[] # list containing groups to be removed from src tag
+ for (grpname, group) in dstgroups.iteritems():
+ if not srcgroups.has_key(grpname):
+ gdellist.append(group)
+ grpchanges={} # dict of changes to make in shared groups
+ for (grpname, group) in srcgroups.iteritems():
+ if dstgroups.has_key(grpname):
+ grpchanges[grpname] = {'adds':[], 'dels':[]}
+ # Store whether group is inherited or not
+ grpchanges[grpname]['inherited'] = False
+ if group['tag_id'] != dsttag['id']:
+ grpchanges[grpname]['inherited'] = True
+ srcgrppkglist=[]
+ dstgrppkglist=[]
+ for pkg in group['packagelist']:
+ srcgrppkglist.append(pkg['package'])
+ for pkg in dstgroups[grpname]['packagelist']:
+ dstgrppkglist.append(pkg['package'])
+ for pkg in srcgrppkglist:
+ if not pkg in dstgrppkglist:
+ grpchanges[grpname]['adds'].append(pkg)
+ for pkg in dstgrppkglist:
+ if not pkg in srcgrppkglist:
+ grpchanges[grpname]['dels'].append(pkg)
+ # ADD new packages.
+ for pkg in paddlist:
+ chgpkglist.append(('[add]',pkg['package_name'],
+ pkg['blocked'],pkg['owner_name'],
+ pkg['tag_name']))
+ if not options.test:
+ session.packageListAdd(dsttag['name'],pkg['package_name'],
+ owner=pkg['owner_name'],
+ block=pkg['blocked'],
+ extra_arches=pkg['extra_arches'])
+ # ADD builds.
+ for build in baddlist:
+ build['name']=build['package_name'] # add missing 'name' field.
+ chgbldlist.append(('[add]',build['package_name'],build['nvr'],
+ koji.BUILD_STATES[build['state']],
+ build['owner_name'],build['tag_name']))
+ # copy latest builds into new tag.
+ if not options.test:
+ session.tagBuildBypass(dsttag['name'], build, force=options.force)
+ # ADD groups.
+ for group in gaddlist:
+ if not options.test:
+ session.groupListAdd(dsttag['name'], group['name'], force=options.force)
+ for pkg in group['packagelist']:
+ if not options.test:
+ session.groupPackageListAdd(dsttag['name'], group['name'], pkg['package'], force=options.force)
+ chggrplist.append(('[new]', pkg['package'], group['name']))
+ # ADD group pkgs.
+ for group in grpchanges:
+ for pkg in grpchanges[group]['adds']:
+ chggrplist.append(('[new]', pkg, group))
+ if not options.test:
+ session.groupPackageListAdd(dsttag['name'], group, pkg, force=options.force)
+ # DEL builds.
+ for build in bdellist:
+ # dont delete an inherited build.
+ if build['tag_name'] == dsttag['name']:
+ build['name']=build['package_name'] # add missing 'name' field.
+ chgbldlist.append(('[del]',build['package_name'],build['nvr'],
+ koji.BUILD_STATES[build['state']],
+ build['owner_name'],build['tag_name']))
+ # go on del builds from new tag.
+ if not options.test:
+ session.untagBuildBypass(dsttag['name'], build, force=options.force)
+ # DEL packages.
+ for pkg in pdellist:
+ # delete only non-inherited packages.
+ if build['tag_name'] == dsttag['name']:
+ # check if package have owned builds inside.
+ builds=session.listTagged(dsttag['name'],package=pkg['package_name'],inherit=False)
+ #remove all its builds first if there are any.
+ for build in builds:
+ build['name']=build['package_name'] #add missing 'name' field.
+ chgbldlist.append(('[del]',build['package_name'],build['nvr'],
+ koji.BUILD_STATES[build['state']],
+ build['owner_name'],build['tag_name']))
+ # so delete latest build(s) from new tag.
+ if not options.test:
+ session.untagBuildBypass(dsttag['name'], build, force=options.force)
+ # now safe to remove package itselfm since we resolved its builds.
+ chgpkglist.append(('[del]',pkg['package_name'],pkg['blocked'],
+ pkg['owner_name'],pkg['tag_name']))
+ if not options.test:
+ session.packageListRemove(dsttag['name'],pkg['package_name'],force=False)
+ # mark as blocked inherited packages.
+ if build['tag_name'] != dsttag['name']:
+ chgpkglist.append(('[blk]',pkg['package_name'],pkg['blocked'],pkg['owner_name'],pkg['tag_name']))
+ if not options.test:
+ session.packageListBlock(dsttag['name'],pkg['package_name'])
+ # DEL groups.
+ for group in gdellist:
+ # Only delete a group that isn't inherited
+ if group['tag_id'] == dsttag['id']:
+ if not options.test:
+ session.groupListRemove(dsttag['name'], group['name'], force=options.force)
+ for pkg in group['packagelist']:
+ chggrplist.append(('[del]', pkg['package'], group['name']))
+ # mark as blocked inherited groups.
+ else:
+ if not options.test:
+ session.groupListBlock(dsttag['name'], group['name'])
+ for pkg in group['packagelist']:
+ chggrplist.append(('[blk]', pkg['package'], group['name']))
+ # DEL group pkgs.
+ for group in grpchanges:
+ for pkg in grpchanges[group]['dels']:
+ # Only delete a group that isn't inherited
+ if not grpchanges[group]['inherited']:
+ chggrplist.append(('[del]', pkg, group))
+ if not options.test:
+ session.groupPackageListRemove(dsttag['name'], group, pkg, force=options.force)
+ else:
+ chggrplist.append(('[blk]', pkg, group))
+ if not options.test:
+ session.groupPackageListBlock(dsttag['name'], group, pkg)
+ # print final list of actions.
+ if options.verbose:
+ pfmt=' %-7s %-28s %-10s %-10s %-10s\n'
+ bfmt=' %-7s %-28s %-40s %-10s %-10s %-10s\n'
+ gfmt=' %-7s %-28s %-28s\n'
+ sys.stdout.write('\nList of changes:\n\n')
+ sys.stdout.write(pfmt % ('Action','Package','Blocked','Owner','From Tag'))
+ sys.stdout.write(pfmt % ('-'*7,'-'*28,'-'*10,'-'*10,'-'*10))
+ for changes in chgpkglist:
+ sys.stdout.write(pfmt % changes)
+ sys.stdout.write('\n')
+ sys.stdout.write(bfmt % ('Action','From/To Package','Latest Build(s)','State','Owner','From Tag'))
+ sys.stdout.write(bfmt % ('-'*7,'-'*28,'-'*40,'-'*10,'-'*10,'-'*10))
+ for changes in chgbldlist:
+ sys.stdout.write(bfmt % changes)
+ sys.stdout.write('\n')
+ sys.stdout.write(gfmt % ('Action','Package','Group'))
+ sys.stdout.write(gfmt % ('-'*7,'-'*28,'-'*28))
+ for changes in chggrplist:
+ sys.stdout.write(gfmt % changes)
+
+
+def handle_add_target(options, session, args):
+ "[admin] Create a new build target"
+ usage = _("usage: %prog add-target name build-tag <dest-tag>")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ (options, args) = parser.parse_args(args)
+ if len(args) < 2:
+ parser.error(_("Please specify a target name, a build tag, and destination tag"))
+ assert False
+ elif len(args) > 3:
+ parser.error(_("Incorrect number of arguments"))
+ assert False
+ name = args[0]
+ build_tag = args[1]
+ if len(args) > 2:
+ dest_tag = args[2]
+ else:
+ #most targets have the same name as their destination
+ dest_tag = name
+ activate_session(session)
+ if not session.hasPerm('admin'):
+ print "This action requires admin privileges"
+ return 1
+
+ chkbuildtag = session.getTag(build_tag)
+ chkdesttag = session.getTag(dest_tag)
+ if not chkbuildtag:
+ print "Build tag does not exist: %s" % build_tag
+ return 1
+ if not chkbuildtag.get("arches", None):
+ print "Build tag has no arches: %s" % build_tag
+ return 1
+ if not chkdesttag:
+ print "Destination tag does not exist: %s" % dest_tag
+ return 1
+
+ session.createBuildTarget(name, build_tag, dest_tag)
+
+def handle_edit_target(options, session, args):
+ "[admin] Set the name, build_tag, and/or dest_tag of an existing build target to new values"
+ usage = _("usage: %prog edit-target [options] name")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--rename", help=_("Specify new name for target"))
+ parser.add_option("--build-tag", help=_("Specify a different build tag"))
+ parser.add_option("--dest-tag", help=_("Specify a different destination tag"))
+
+ (options, args) = parser.parse_args(args)
+
+ if len(args) != 1:
+ parser.error(_("Please specify a build target"))
+ assert False
+ activate_session(session)
+
+ if not session.hasPerm('admin'):
+ print "This action requires admin privileges"
+ return
+
+ targetInfo = session.getBuildTarget(args[0])
+ if targetInfo == None:
+ raise koji.GenericError("No build target with the name or id '%s'" % args[0])
+
+ targetInfo['orig_name'] = targetInfo['name']
+
+ if options.rename:
+ targetInfo['name'] = options.rename
+ if options.build_tag:
+ targetInfo['build_tag_name'] = options.build_tag
+ chkbuildtag = session.getTag(options.build_tag)
+ if not chkbuildtag:
+ print "Build tag does not exist: %s" % options.build_tag
+ return 1
+ if not chkbuildtag.get("arches", None):
+ print "Build tag has no arches: %s" % options.build_tag
+ return 1
+ if options.dest_tag:
+ chkdesttag = session.getTag(options.dest_tag)
+ if not chkdesttag:
+ print "Destination tag does not exist: %s" % options.dest_tag
+ return 1
+ targetInfo['dest_tag_name'] = options.dest_tag
+
+ session.editBuildTarget(targetInfo['orig_name'], targetInfo['name'], targetInfo['build_tag_name'], targetInfo['dest_tag_name'])
+
+def handle_remove_target(options, session, args):
+ "[admin] Remove a build target"
+ usage = _("usage: %prog remove-target [options] name")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ (options, args) = parser.parse_args(args)
+
+ if len(args) != 1:
+ parser.error(_("Please specify a build target to remove"))
+ assert False
+ activate_session(session)
+
+ if not session.hasPerm('admin'):
+ print "This action requires admin privileges"
+ return
+
+ target = args[0]
+ target_info = session.getBuildTarget(target)
+ if not target_info:
+ print "Build target %s does not exist" % target
+ return 1
+
+ session.deleteBuildTarget(target_info['id'])
+
+def handle_remove_tag(options, session, args):
+ "[admin] Remove a tag"
+ usage = _("usage: %prog remove-tag [options] name")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ (options, args) = parser.parse_args(args)
+
+ if len(args) != 1:
+ parser.error(_("Please specify a tag to remove"))
+ assert False
+ activate_session(session)
+
+ if not session.hasPerm('admin'):
+ print "This action requires admin privileges"
+ return
+
+ tag = args[0]
+ tag_info = session.getTag(tag)
+ if not tag_info:
+ print "Tag %s does not exist" % tag
+ return 1
+
+ session.deleteTag(tag_info['id'])
+
+def anon_handle_list_targets(options, session, args):
+ "List the build targets"
+ usage = _("usage: %prog list-targets [options]")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--name", help=_("Specify the build target name"))
+ parser.add_option("--quiet", action="store_true", help=_("Do not print the header information"), default=options.quiet)
+ (options, args) = parser.parse_args(args)
+ if len(args) != 0:
+ parser.error(_("This command takes no arguments"))
+ assert False
+ activate_session(session)
+
+ fmt = "%(name)-30s %(build_tag_name)-30s %(dest_tag_name)-30s"
+ if not options.quiet:
+ print "%-30s %-30s %-30s" % ('Name','Buildroot','Destination')
+ print "-" * 93
+ tmp_list = [(x['name'], x) for x in session.getBuildTargets(options.name)]
+ tmp_list.sort()
+ targets = [x[1] for x in tmp_list]
+ for target in targets:
+ print fmt % target
+ #pprint.pprint(session.getBuildTargets())
+
+def _printInheritance(tags, sibdepths=None, reverse=False):
+ if len(tags) == 0:
+ return
+ if sibdepths == None:
+ sibdepths = []
+ currtag = tags[0]
+ tags = tags[1:]
+ if reverse:
+ siblings = len([tag for tag in tags if tag['parent_id'] == currtag['parent_id']])
+ else:
+ siblings = len([tag for tag in tags if tag['child_id'] == currtag['child_id']])
+
+ outdepth = 0
+ for depth in sibdepths:
+ if depth < currtag['currdepth']:
+ outspacing = depth - outdepth
+ sys.stdout.write(' ' * (outspacing * 3 - 1))
+ sys.stdout.write(u'\u2502'.encode('UTF-8'))
+ outdepth = depth
+
+ sys.stdout.write(' ' * ((currtag['currdepth'] - outdepth) * 3 - 1))
+ if siblings:
+ sys.stdout.write(u'\u251c'.encode('UTF-8'))
+ else:
+ sys.stdout.write(u'\u2514'.encode('UTF-8'))
+ sys.stdout.write(u'\u2500'.encode('UTF-8'))
+ if reverse:
+ sys.stdout.write('%(name)s (%(tag_id)i)\n' % currtag)
+ else:
+ sys.stdout.write('%(name)s (%(parent_id)i)\n' % currtag)
+
+ if siblings:
+ if len(sibdepths) == 0 or sibdepths[-1] != currtag['currdepth']:
+ sibdepths.append(currtag['currdepth'])
+ else:
+ if len(sibdepths) > 0 and sibdepths[-1] == currtag['currdepth']:
+ sibdepths.pop()
+
+ _printInheritance(tags, sibdepths, reverse)
+
+def anon_handle_list_tag_inheritance(options, session, args):
+ "Print the inheritance information for a tag"
+ usage = _("usage: %prog list-tag-inheritance [options] <tag>")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--reverse", action="store_true", help=_("Process tag's children instead of its parents"))
+ parser.add_option("--stop", help=_("Stop processing inheritance at this tag"))
+ parser.add_option("--jump", help=_("Jump from one tag to another when processing inheritance"))
+ parser.add_option("--event", type='int', metavar="EVENT#", help=_("query at event"))
+ parser.add_option("--ts", type='int', metavar="TIMESTAMP", help=_("query at timestamp"))
+ parser.add_option("--repo", type='int', metavar="REPO#", help=_("query at event for a repo"))
+ (options, args) = parser.parse_args(args)
+ if len(args) != 1:
+ parser.error(_("This command takes exctly one argument: a tag name or ID"))
+ assert False
+ activate_session(session)
+ event = koji.util.eventFromOpts(session, options)
+ if event:
+ event['timestr'] = time.asctime(time.localtime(event['ts']))
+ print "Querying at event %(id)i (%(timestr)s)" % event
+ if event:
+ tag = session.getTag(args[0], event=event['id'])
+ else:
+ tag = session.getTag(args[0])
+ if not tag:
+ parser.error(_("Unknown tag: %s" % args[0]))
+
+ opts = {}
+ opts['reverse'] = options.reverse or False
+ opts['stops'] = {}
+ opts['jumps'] = {}
+ if event:
+ opts['event'] = event['id']
+
+ if options.jump:
+ match = re.match(r'^(.*)/(.*)$', options.jump)
+ if match:
+ tag1 = session.getTagID(match.group(1))
+ if not tag1:
+ parser.error(_("Unknown tag: %s" % match.group(1)))
+ tag2 = session.getTagID(match.group(2))
+ if not tag2:
+ parser.error(_("Unknown tag: %s" % match.group(2)))
+ opts['jumps'][str(tag1)] = tag2
+
+ if options.stop:
+ tag1 = session.getTagID(options.stop)
+ if not tag1:
+ parser.error(_("Unknown tag: %s" % options.stop))
+ opts['stops'] = {str(tag1): 1}
+
+ sys.stdout.write('%s (%i)\n' % (tag['name'], tag['id']))
+ data = session.getFullInheritance(tag['id'], **opts)
+ _printInheritance(data, None, opts['reverse'])
+
+def anon_handle_list_tags(options, session, args):
+ "Print the list of tags"
+ usage = _("usage: %prog list-tags [options] [pattern]")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--show-id", action="store_true", help=_("Show tag ids"))
+ parser.add_option("--verbose", action="store_true", help=_("Show more information"))
+ parser.add_option("--unlocked", action="store_true", help=_("Only show unlocked tags"))
+ parser.add_option("--build", help=_("Show tags associated with a build"))
+ parser.add_option("--package", help=_("Show tags associated with a package"))
+ (options, args) = parser.parse_args(args)
+ activate_session(session)
+
+ pkginfo = {}
+ buildinfo = {}
+
+ if options.package:
+ pkginfo = session.getPackage(options.package)
+ if not pkginfo:
+ parser.error(_("Invalid package %s" % options.package))
+ assert False
+
+ if options.build:
+ buildinfo = session.getBuild(options.build)
+ if not buildinfo:
+ parser.error(_("Invalid build %s" % options.build))
+ assert False
+
+ tags = session.listTags(buildinfo.get('id',None), pkginfo.get('id',None))
+ tags.sort(lambda a,b: cmp(a['name'],b['name']))
+ #if options.verbose:
+ # fmt = "%(name)s [%(id)i] %(perm)s %(locked)s %(arches)s"
+ if options.show_id:
+ fmt = "%(name)s [%(id)i]"
+ else:
+ fmt = "%(name)s"
+ for tag in tags:
+ if args:
+ for pattern in args:
+ if fnmatch.fnmatch(tag['name'], pattern):
+ break
+ else:
+ continue
+ if options.unlocked:
+ if tag['locked'] or tag['perm']:
+ continue
+ if not options.verbose:
+ print fmt % tag
+ else:
+ print fmt % tag,
+ if tag['locked']:
+ print ' [LOCKED]',
+ if tag['perm']:
+ print ' [%(perm)s perm required]' % tag,
+ print ''
+
+def anon_handle_list_tag_history(options, session, args):
+ "Print a history of tag operations"
+ usage = _("usage: %prog list-tag-history [options]")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--debug", action="store_true")
+ parser.add_option("--build", help=_("Only show data for a specific build"))
+ parser.add_option("--package", help=_("Only show data for a specific package"))
+ parser.add_option("--tag", help=_("Only show data for a specific tag"))
+ parser.add_option("--all", action="store_true", help=_("Allows listing the entire global history"))
+ (options, args) = parser.parse_args(args)
+ if len(args) != 0:
+ parser.error(_("This command takes no arguments"))
+ assert False
+ kwargs = {}
+ limited = False
+ if options.package:
+ kwargs['package'] = options.package
+ limited = True
+ if options.tag:
+ kwargs['tag'] = options.tag
+ limited = True
+ if options.build:
+ kwargs['build'] = options.build
+ limited = True
+ if not limited and not options.all:
+ parser.error(_("Please specify an option to limit the query"))
+
+ activate_session(session)
+
+ hist = session.tagHistory(**kwargs)
+ timeline = []
+ for x in hist:
+ event_id = x['revoke_event']
+ if event_id is not None:
+ timeline.append((event_id, x))
+ event_id = x['create_event']
+ timeline.append((event_id, x))
+ timeline.sort()
+ def _histline(event_id, x):
+ if event_id == x['revoke_event']:
+ ts = x['revoke_ts']
+ fmt = "%(name)s-%(version)s-%(release)s untagged from %(tag_name)s"
+ if x.has_key('revoker_name'):
+ fmt += " by %(revoker_name)s"
+ elif event_id == x['create_event']:
+ ts = x['create_ts']
+ fmt = "%(name)s-%(version)s-%(release)s tagged into %(tag_name)s"
+ if x.has_key('creator_name'):
+ fmt += " by %(creator_name)s"
+ if x['active']:
+ fmt += " [still active]"
+ else:
+ raise koji.GenericError, "unknown event: (%r, %r)" % (event_id, x)
+ time_str = time.asctime(time.localtime(ts))
+ return "%s: %s" % (time_str, fmt % x)
+ for event_id, x in timeline:
+ if options.debug:
+ print "%r" % x
+ print _histline(event_id, x)
+
+def _print_histline(entry, **kwargs):
+ options = kwargs['options']
+ event_id, table, create, x = entry
+ who = None
+ edit = x.get('.related')
+ if edit:
+ del x['.related']
+ bad_edit = None
+ if len(edit) != 1:
+ bad_edit = "%i elements" % len(edit)+1
+ other = edit[0]
+ #check edit for sanity
+ if create or not other[2]:
+ bad_edit = "out of order"
+ if event_id != other[0]:
+ bad_edit = "non-matching"
+ if bad_edit:
+ print "Warning: unusual edit at event %i in table %s (%s)" % (event_id, table, bad_edit)
+ #we'll simply treat them as separate events
+ pprint.pprint(entry)
+ pprint.pprint(edit)
+ _print_histline(entry, **kwargs)
+ for data in edit:
+ _print_histline(entry, **kwargs)
+ return
+ if create:
+ ts = x['create_ts']
+ if x.has_key('creator_name'):
+ who = "by %(creator_name)s"
+ else:
+ ts = x['revoke_ts']
+ if x.has_key('revoker_name'):
+ who = "by %(revoker_name)s"
+ if table == 'tag_listing':
+ if edit:
+ fmt = "%(name)s-%(version)s-%(release)s re-tagged into %(tag.name)s"
+ elif create:
+ fmt = "%(name)s-%(version)s-%(release)s tagged into %(tag.name)s"
+ else:
+ fmt = "%(name)s-%(version)s-%(release)s untagged from %(tag.name)s"
+ elif table == 'user_perms':
+ if edit:
+ fmt = "permission %(permission.name)s re-granted to %(user.name)s"
+ elif create:
+ fmt = "permission %(permission.name)s granted to %(user.name)s"
+ else:
+ fmt = "permission %(permission.name)s revoked for %(user.name)s"
+ elif table == 'user_groups':
+ if edit:
+ fmt = "user %(user.name)s re-added to group %(group.name)s"
+ elif create:
+ fmt = "user %(user.name)s added to group %(group.name)s"
+ else:
+ fmt = "user %(user.name)s removed from group %(group.name)s"
+ elif table == 'tag_packages':
+ if edit:
+ fmt = "package list entry for %(package.name)s in %(tag.name)s updated"
+ elif create:
+ fmt = "package list entry created: %(package.name)s in %(tag.name)s"
+ else:
+ fmt = "package list entry revoked: %(package.name)s in %(tag.name)s"
+ elif table == 'tag_inheritance':
+ if edit:
+ fmt = "inheritance line %(tag.name)s->%(parent.name)s updated"
+ elif create:
+ fmt = "inheritance line %(tag.name)s->%(parent.name)s added"
+ else:
+ fmt = "inheritance line %(tag.name)s->%(parent.name)s removed"
+ elif table == 'tag_config':
+ if edit:
+ fmt = "tag configuration for %(tag.name)s altered"
+ elif create:
+ fmt = "new tag: %(tag.name)s"
+ else:
+ fmt = "tag deleted: %(tag.name)s"
+ elif table == 'tag_extra':
+ if edit:
+ fmt = "tag option %(key)s for tag %(tag.name)s altered"
+ elif create:
+ fmt = "added tag option %(key)s for tag %(tag.name)s"
+ else:
+ fmt = "tag option %(key)s removed for %(tag.name)s"
+ elif table == 'build_target_config':
+ if edit:
+ fmt = "build target configuration for %(build_target.name)s updated"
+ elif create:
+ fmt = "new build target: %(build_target.name)s"
+ else:
+ fmt = "build target deleted: %(build_target.name)s"
+ elif table == 'external_repo_config':
+ if edit:
+ fmt = "external repo configuration for %(external_repo.name)s altered"
+ elif create:
+ fmt = "new external repo: %(external_repo.name)s"
+ else:
+ fmt = "external repo deleted: %(external_repo.name)s"
+ elif table == 'tag_external_repos':
+ if edit:
+ fmt = "external repo entry for %(external_repo.name)s in tag %(tag.name)s updated"
+ elif create:
+ fmt = "external repo entry for %(external_repo.name)s added to tag %(tag.name)s"
+ else:
+ fmt = "external repo entry for %(external_repo.name)s removed from tag %(tag.name)s"
+ elif table == 'group_config':
+ if edit:
+ fmt = "group %(group.name)s configuration for tag %(tag.name)s updated"
+ elif create:
+ fmt = "group %(group.name)s added to tag %(tag.name)s"
+ else:
+ fmt = "group %(group.name)s removed from tag %(tag.name)s"
+ elif table == 'group_req_listing':
+ if edit:
+ fmt = "group dependency %(group.name)s->%(req.name)s updated in tag %(tag.name)s"
+ elif create:
+ fmt = "group dependency %(group.name)s->%(req.name)s added in tag %(tag.name)s"
+ else:
+ fmt = "group dependency %(group.name)s->%(req.name)s dropped from tag %(tag.name)s"
+ elif table == 'group_package_listing':
+ if edit:
+ fmt = "package entry %(package)s in group %(group.name)s, tag %(tag.name)s updated"
+ elif create:
+ fmt = "package %(package)s added to group %(group.name)s in tag %(tag.name)s"
+ else:
+ fmt = "package %(package)s removed from group %(group.name)s in tag %(tag.name)s"
+ else:
+ if edit:
+ fmt = "%s entry updated" % table
+ elif create:
+ fmt = "%s entry created" % table
+ else:
+ fmt = "%s entry revoked" % table
+ time_str = time.asctime(time.localtime(ts))
+ parts = [time_str, fmt % x]
+ if options.events or options.verbose:
+ parts.insert(1, "(eid %i)" % event_id)
+ if who:
+ parts.append(who % x)
+ if create and x['active']:
+ parts.append("[still active]")
+ print ' '.join(parts)
+ hidden_fields = ['active', 'create_event', 'revoke_event', 'creator_id', 'revoker_id',
+ 'creator_name', 'revoker_name', 'create_ts', 'revoke_ts']
+ def get_nkey(key):
+ if key == 'perm_id':
+ return 'permission.name'
+ elif key.endswith('_id'):
+ return '%s.name' % key[:-3]
+ else:
+ return '%s.name' % key
+ if edit:
+ keys = x.keys()
+ keys.sort()
+ y = other[-1]
+ for key in keys:
+ if key in hidden_fields:
+ continue
+ if x[key] == y[key]:
+ continue
+ if key[0] == '_':
+ continue
+ nkey = get_nkey(key)
+ if nkey in x and nkey in y:
+ continue
+ print " %s: %s -> %s" % (key, x[key], y[key])
+ elif create and options.verbose and table != 'tag_listing':
+ keys = x.keys()
+ keys.sort()
+ # the table keys have already been represented in the base format string
+ also_hidden = list(_table_keys[table])
+ also_hidden.extend([get_nkey(k) for k in also_hidden])
+ for key in keys:
+ if key in hidden_fields or key in also_hidden:
+ continue
+ nkey = get_nkey(key)
+ if nkey in x:
+ continue
+ if key[0] == '_':
+ continue
+ if x.get('blocked') and key != 'blocked':
+ continue
+ if key.endswith('.name'):
+ dkey = key[:-5]
+ else:
+ dkey = key
+ print " %s: %s" % (dkey, x[key])
+
+_table_keys = {
+ 'user_perms' : ['user_id', 'perm_id'],
+ 'user_groups' : ['user_id', 'group_id'],
+ 'tag_inheritance' : ['tag_id', 'parent_id'],
+ 'tag_config' : ['tag_id'],
+ 'tag_extra' : ['tag_id', 'key'],
+ 'build_target_config' : ['build_target_id'],
+ 'external_repo_config' : ['external_repo_id'],
+ 'tag_external_repos' : ['tag_id', 'external_repo_id'],
+ 'tag_listing' : ['build_id', 'tag_id'],
+ 'tag_packages' : ['package_id', 'tag_id'],
+ 'group_config' : ['group_id', 'tag_id'],
+ 'group_req_listing' : ['group_id', 'tag_id', 'req_id'],
+ 'group_package_listing' : ['group_id', 'tag_id', 'package'],
+ }
+
+def anon_handle_list_history(options, session, args):
+ "Display historical data"
+ usage = _("usage: %prog list-history [options]")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--debug", action="store_true")
+ parser.add_option("--build", help=_("Only show data for a specific build"))
+ parser.add_option("--package", help=_("Only show data for a specific package"))
+ parser.add_option("--tag", help=_("Only show data for a specific tag"))
+ parser.add_option("--editor", "--by", metavar="USER", help=_("Only show entries modified by user"))
+ parser.add_option("--user", help=_("Only show entries affecting a user"))
+ parser.add_option("--permission", help=_("Only show entries relating to a given permission"))
+ parser.add_option("--external-repo", "--erepo", help=_("Only show entries relating to a given external repo"))
+ parser.add_option("--build-target", "--target", help=_("Only show entries relating to a given build target"))
+ parser.add_option("--group", help=_("Only show entries relating to a given group"))
+ parser.add_option("--before", metavar="TIMESTAMP", help=_("Only show entries before timestamp"))
+ parser.add_option("--after", metavar="TIMESTAMP", help=_("Only show entries after timestamp"))
+ parser.add_option("--before-event", metavar="EVENT_ID", type='int', help=_("Only show entries before event"))
+ parser.add_option("--after-event", metavar="EVENT_ID", type='int', help=_("Only show entries after event"))
+ parser.add_option("--watch", action="store_true", help=_("Monitor history data"))
+ parser.add_option("--active", action='store_true', help=_("Only show entries that are currently active"))
+ parser.add_option("--revoked", action='store_false', dest='active',
+ help=_("Only show entries that are currently revoked"))
+ parser.add_option("--context", action="store_true", help=_("Show related entries"))
+ parser.add_option("-s", "--show", action="append", help=_("Show data from selected tables"))
+ parser.add_option("-v", "--verbose", action="store_true", help=_("Show more detail"))
+ parser.add_option("-e", "--events", action="store_true", help=_("Show event ids"))
+ parser.add_option("--all", action="store_true", help=_("Allows listing the entire global history"))
+ (options, args) = parser.parse_args(args)
+ if len(args) != 0:
+ parser.error(_("This command takes no arguments"))
+ assert False
+ kwargs = {}
+ limited = False
+ for opt in 'package', 'tag', 'build', 'editor', 'user', 'permission', 'external_repo', \
+ 'build_target', 'group', 'before', 'after':
+ val = getattr(options, opt)
+ if val:
+ kwargs[opt] = val
+ limited = True
+ if options.before_event:
+ kwargs['beforeEvent'] = options.before_event
+ if options.after_event:
+ kwargs['afterEvent'] = options.after_event
+ if options.active is not None:
+ kwargs['active'] = options.active
+ tables = None
+ if options.show:
+ tables = []
+ for arg in options.show:
+ tables.extend(arg.split(','))
+ if not limited and not options.all:
+ parser.error(_("Please specify an option to limit the query"))
+
+ activate_session(session)
+
+ if options.watch:
+ if not kwargs.get('afterEvent') and not kwargs.get('after'):
+ kwargs['afterEvent'] = session.getLastEvent()['id']
+
+ while True:
+ histdata = session.queryHistory(tables=tables, **kwargs)
+ timeline = []
+ def distinguish_match(x, name):
+ """determine if create or revoke event matched"""
+ if options.context:
+ return True
+ name = "_" + name
+ ret = True
+ for key in x:
+ if key.startswith(name):
+ ret = ret and x[key]
+ return ret
+ for table in histdata:
+ hist = histdata[table]
+ for x in hist:
+ if x['revoke_event'] is not None:
+ if distinguish_match(x, 'revoked'):
+ timeline.append((x['revoke_event'], table, 0, x.copy()))
+ #pprint.pprint(timeline[-1])
+ if distinguish_match(x, 'created'):
+ timeline.append((x['create_event'], table, 1, x))
+ timeline.sort()
+ #group edits together
+ new_timeline = []
+ last_event = None
+ edit_index = {}
+ for entry in timeline:
+ event_id, table, create, x = entry
+ if event_id != last_event:
+ edit_index = {}
+ last_event = event_id
+ key = tuple([x[k] for k in _table_keys[table]])
+ prev = edit_index.get((table, event_id), {}).get(key)
+ if prev:
+ prev[-1].setdefault('.related', []).append(entry)
+ else:
+ edit_index.setdefault((table, event_id), {})[key] = entry
+ new_timeline.append(entry)
+ for entry in new_timeline:
+ if options.debug:
+ print "%r" % list(entry)
+ _print_histline(entry, options=options)
+ if not options.watch:
+ break
+ else:
+ time.sleep(5)
+ # repeat query for later events
+ if last_event:
+ kwargs['afterEvent'] = last_event
+
+def _handleMap(lines, data, prefix=''):
+ for key, val in data.items():
+ if key != '__starstar':
+ lines.append(' %s%s: %s' % (prefix, key, val))
+
+def _handleOpts(lines, opts, prefix=''):
+ if opts:
+ lines.append("%sOptions:" % prefix)
+ _handleMap(lines, opts, prefix)
+
+def _parseTaskParams(session, method, task_id):
+ """Parse the return of getTaskRequest()"""
+ params = session.getTaskRequest(task_id)
+
+ lines = []
+
+ if method == 'buildSRPMFromCVS':
+ lines.append("CVS URL: %s" % params[0])
+ elif method == 'buildSRPMFromSCM':
+ lines.append("SCM URL: %s" % params[0])
+ elif method == 'buildArch':
+ lines.append("SRPM: %s/work/%s" % (options.topdir, params[0]))
+ lines.append("Build Tag: %s" % session.getTag(params[1])['name'])
+ lines.append("Build Arch: %s" % params[2])
+ lines.append("SRPM Kept: %r" % params[3])
+ if len(params) > 4:
+ _handleOpts(lines, params[4])
+ elif method == 'tagBuild':
+ build = session.getBuild(params[1])
+ lines.append("Destination Tag: %s" % session.getTag(params[0])['name'])
+ lines.append("Build: %s" % koji.buildLabel(build))
+ elif method == 'buildNotification':
+ build = params[1]
+ buildTarget = params[2]
+ lines.append("Recipients: %s" % (", ".join(params[0])))
+ lines.append("Build: %s" % koji.buildLabel(build))
+ lines.append("Build Target: %s" % buildTarget['name'])
+ lines.append("Web URL: %s" % params[3])
+ elif method == 'build':
+ lines.append("Source: %s" % params[0])
+ lines.append("Build Target: %s" % params[1])
+ if len(params) > 2:
+ _handleOpts(lines, params[2])
+ elif method == 'maven':
+ lines.append("SCM URL: %s" % params[0])
+ lines.append("Build Target: %s" % params[1])
+ if len(params) > 2:
+ _handleOpts(lines, params[2])
+ elif method == 'buildMaven':
+ lines.append("SCM URL: %s" % params[0])
+ lines.append("Build Tag: %s" % params[1]['name'])
+ if len(params) > 2:
+ _handleOpts(lines, params[2])
+ elif method == 'wrapperRPM':
+ lines.append("Spec File URL: %s" % params[0])
+ lines.append("Build Tag: %s" % params[1]['name'])
+ if params[2]:
+ lines.append("Build: %s" % koji.buildLabel(params[2]))
+ if params[3]:
+ lines.append("Task: %s %s" % (params[3]['id'], koji.taskLabel(params[3])))
+ if len(params) > 4:
+ _handleOpts(lines, params[4])
+ elif method == 'chainmaven':
+ lines.append("Builds:")
+ for package, opts in params[0].items():
+ lines.append(" " + package)
+ _handleMap(lines, opts, prefix=" ")
+ lines.append("Build Target: %s" % params[1])
+ if len(params) > 2:
+ _handleOpts(lines, params[2])
+ elif method == 'winbuild':
+ lines.append("VM: %s" % params[0])
+ lines.append("SCM URL: %s" % params[1])
+ lines.append("Build Target: %s" % params[2])
+ if len(params) > 3:
+ _handleOpts(lines, params[3])
+ elif method == 'vmExec':
+ lines.append("VM: %s" % params[0])
+ lines.append("Exec Params:")
+ for info in params[1]:
+ if isinstance(info, dict):
+ _handleMap(lines, info, prefix=' ')
+ else:
+ lines.append(" %s" % info)
+ if len(params) > 2:
+ _handleOpts(lines, params[2])
+ elif method in ('createLiveCD', 'createAppliance'):
+ lines.append("Arch: %s" % params[0])
+ lines.append("Build Target: %s" % params[1])
+ lines.append("Kickstart File: %s" % params[2])
+ if len(params) > 3:
+ _handleOpts(lines, params[3])
+ elif method == 'newRepo':
+ tag = session.getTag(params[0])
+ lines.append("Tag: %s" % tag['name'])
+ elif method == 'prepRepo':
+ lines.append("Tag: %s" % params[0]['name'])
+ elif method == 'createrepo':
+ lines.append("Repo ID: %i" % params[0])
+ lines.append("Arch: %s" % params[1])
+ oldrepo = params[2]
+ if oldrepo:
+ lines.append("Old Repo ID: %i" % oldrepo['id'])
+ lines.append("Old Repo Creation: %s" % koji.formatTimeLong(oldrepo['creation_time']))
+ if len(params) > 3:
+ lines.append("External Repos: %s" % ', '.join([ext['external_repo_name'] for ext in params[3]]))
+ elif method == 'tagNotification':
+ destTag = session.getTag(params[2])
+ srcTag = None
+ if params[3]:
+ srcTag = session.getTag(params[3])
+ build = session.getBuild(params[4])
+ user = session.getUser(params[5])
+
+ lines.append("Recipients: %s" % ", ".join(params[0]))
+ lines.append("Successful?: %s" % (params[1] and 'yes' or 'no'))
+ lines.append("Tagged Into: %s" % destTag['name'])
+ if srcTag:
+ lines.append("Moved From: %s" % srcTag['name'])
+ lines.append("Build: %s" % koji.buildLabel(build))
+ lines.append("Tagged By: %s" % user['name'])
+ lines.append("Ignore Success?: %s" % (params[6] and 'yes' or 'no'))
+ if params[7]:
+ lines.append("Failure Message: %s" % params[7])
+ elif method == 'dependantTask':
+ lines.append("Dependant Tasks: %s" % ", ".join([str(depID) for depID in params[0]]))
+ lines.append("Subtasks:")
+ for subtask in params[1]:
+ lines.append(" Method: %s" % subtask[0])
+ lines.append(" Parameters: %s" % ", ".join([str(subparam) for subparam in subtask[1]]))
+ if len(subtask) > 2 and subtask[2]:
+ subopts = subtask[2]
+ _handleOpts(lines, subopts, prefix=' ')
+ lines.append("")
+ elif method == 'chainbuild':
+ lines.append("Build Groups:")
+ group_num = 0
+ for group_list in params[0]:
+ group_num += 1
+ lines.append(" %i: %s" % (group_num, ', '.join(group_list)))
+ lines.append("Build Target: %s" % params[1])
+ if len(params) > 2:
+ _handleOpts(lines, params[2])
+ elif method == 'waitrepo':
+ lines.append("Build Target: %s" % params[0])
+ if params[1]:
+ lines.append("Newer Than: %s" % params[1])
+ if params[2]:
+ lines.append("NVRs: %s" % ', '.join(params[2]))
+
+ return lines
+
+def _printTaskInfo(session, task_id, level=0, recurse=True, verbose=True):
+ """Recursive function to print information about a task
+ and its children."""
+
+ BUILDDIR = '/var/lib/mock'
+ indent = " "*2*level
+
+ info = session.getTaskInfo(task_id)
+ if info['host_id']:
+ host_info = session.getHost(info['host_id'])
+ else:
+ host_info = None
+ buildroot_infos = session.listBuildroots(taskID=task_id)
+ build_info = session.listBuilds(taskID=task_id)
+
+ files = session.listTaskOutput(task_id)
+ logs = [filename for filename in files if filename.endswith('.log')]
+ output = [filename for filename in files if not filename.endswith('.log')]
+ files_dir = '%s/%s' % (koji.pathinfo.work(), koji.pathinfo.taskrelpath(task_id))
+
+ owner = session.getUser(info['owner'])['name']
+
+ print "%sTask: %d" % (indent, task_id)
+ print "%sType: %s" % (indent, info['method'])
+ if verbose:
+ print "%sRequest Parameters:" % indent
+ for line in _parseTaskParams(session, info['method'], task_id):
+ print "%s %s" % (indent, line)
+ print "%sOwner: %s" % (indent, owner)
+ print "%sState: %s" % (indent, koji.TASK_STATES[info['state']].lower())
+ print "%sCreated: %s" % (indent, time.asctime(time.localtime(info['create_ts'])))
+ if info.get('start_ts'):
+ print "%sStarted: %s" % (indent, time.asctime(time.localtime(info['start_ts'])))
+ if info.get('completion_ts'):
+ print "%sFinished: %s" % (indent, time.asctime(time.localtime(info['completion_ts'])))
+ if host_info:
+ print "%sHost: %s" % (indent, host_info['name'])
+ if build_info:
+ print "%sBuild: %s (%d)" % (indent, build_info[0]['nvr'], build_info[0]['build_id'])
+ if buildroot_infos:
+ print "%sBuildroots:" % indent
+ for root in buildroot_infos:
+ print "%s %s/%s-%d-%d/" % (indent, BUILDDIR, root['tag_name'], root['id'], root['repo_id'])
+ if logs:
+ print "%sLog Files:" % indent
+ for log in logs:
+ print "%s %s/%s" % (indent, files_dir, log)
+ if output:
+ print "%sOutput:" % indent
+ for filename in output:
+ print "%s %s/%s" % (indent, files_dir, filename)
+
+ # white space
+ print
+
+ if recurse:
+ level += 1
+ children = session.getTaskChildren(task_id, request=True)
+ children.sort(cmp=lambda a, b: cmp(a['id'], b['id']))
+ for child in children:
+ _printTaskInfo(session, child['id'], level, verbose=verbose)
+
+def anon_handle_taskinfo(options, session, args):
+ """Show information about a task"""
+ usage = _("usage: %prog taskinfo [options] taskID [taskID...]")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("-r", "--recurse", action="store_true", help=_("Show children of this task as well"))
+ parser.add_option("-v", "--verbose", action="store_true", help=_("Be verbose"))
+ (options, args) = parser.parse_args(args)
+ if len(args) < 1:
+ parser.error(_("You must specify at least one task ID"))
+ assert False
+
+ activate_session(session)
+
+ for arg in args:
+ task_id = int(arg)
+ _printTaskInfo(session, task_id, 0, options.recurse, options.verbose)
+
+def anon_handle_taginfo(options, session, args):
+ "Print basic information about a tag"
+ usage = _("usage: %prog taginfo [options] <tag> [<tag> ...]")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--event", type='int', metavar="EVENT#", help=_("query at event"))
+ parser.add_option("--ts", type='int', metavar="TIMESTAMP", help=_("query at timestamp"))
+ parser.add_option("--repo", type='int', metavar="REPO#", help=_("query at event for a repo"))
+ (options, args) = parser.parse_args(args)
+ if len(args) < 1:
+ parser.error(_("Please specify a tag"))
+ assert False
+ activate_session(session)
+ event = koji.util.eventFromOpts(session, options)
+ event_opts = {}
+ if event:
+ event['timestr'] = time.asctime(time.localtime(event['ts']))
+ print "Querying at event %(id)i (%(timestr)s)" % event
+ event_opts['event'] = event['id']
+ perms = dict([(p['id'], p['name']) for p in session.getAllPerms()])
+ for tag in args:
+ info = session.getTag(tag, **event_opts)
+ if info is None:
+ print "No such tag: %s\n" % tag
+ continue
+ print "Tag: %(name)s [%(id)d]" %info
+ print "Arches: %(arches)s" %info
+ group_list = [x['name'] for x in session.getTagGroups(info['id'], **event_opts)]
+ group_list.sort()
+ print "Groups: " + ', '.join(group_list)
+ if info.get('locked'):
+ print 'LOCKED'
+ if info.get('perm_id') is not None:
+ perm_id = info['perm_id']
+ print "Required permission: %r" % perms.get(perm_id, perm_id)
+ if session.mavenEnabled():
+ print "Maven support?: %s" % (info['maven_support'] and 'yes' or 'no')
+ print "Include all Maven archives?: %s" % (info['maven_include_all'] and 'yes' or 'no')
+ if 'extra' in info:
+ print "Tag options:"
+ keys = info['extra'].keys()
+ keys.sort()
+ for key in keys:
+ print " %s : %s" % (key, pprint.pformat(info['extra'][key]))
+ dest_targets = session.getBuildTargets(destTagID=info['id'], **event_opts)
+ build_targets = session.getBuildTargets(buildTagID=info['id'], **event_opts)
+ repos = {}
+ if not event:
+ for target in dest_targets + build_targets:
+ if not repos.has_key(target['build_tag']):
+ repo = session.getRepo(target['build_tag'])
+ if repo is None:
+ repos[target['build_tag']] = "no active repo"
+ else:
+ repos[target['build_tag']] = "repo#%(id)i: %(creation_time)s" % repo
+ if dest_targets:
+ print "Targets that build into this tag:"
+ for target in dest_targets:
+ if event:
+ print " %s (%s)" % (target['name'], target['build_tag_name'])
+ else:
+ print " %s (%s, %s)" % (target['name'], target['build_tag_name'], repos[target['build_tag']])
+ if build_targets:
+ print "This tag is a buildroot for one or more targets"
+ if not event:
+ print "Current repo: %s" % repos[info['id']]
+ print "Targets that build from this tag:"
+ for target in build_targets:
+ print " %s" % target['name']
+ external_repos = session.getTagExternalRepos(tag_info=info['id'], **event_opts)
+ if external_repos:
+ print "External repos:"
+ for rinfo in external_repos:
+ print " %(priority)3i %(external_repo_name)s (%(url)s)" % rinfo
+ print "Inheritance:"
+ for parent in session.getInheritanceData(tag, **event_opts):
+ flags = ''
+ for code,expr in (
+ ('M',parent['maxdepth'] is not None),
+ ('F',parent['pkg_filter']),
+ ('I',parent['intransitive']),
+ ('N',parent['noconfig']),):
+ if expr:
+ flags += code
+ else:
+ flags += '.'
+ parent['flags'] = flags
+ print " %(priority)-4d %(flags)s %(name)s [%(parent_id)s]" % parent
+ if parent['maxdepth'] is not None:
+ print " maxdepth: %(maxdepth)s" % parent
+ if parent['pkg_filter']:
+ print " package filter: %(pkg_filter)s" % parent
+ print
+
+def handle_add_tag(options, session, args):
+ "[admin] Add a new tag to the database"
+ usage = _("usage: %prog add-tag [options] name")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--parent", help=_("Specify parent"))
+ parser.add_option("--arches", help=_("Specify arches"))
+ parser.add_option("--maven-support", action="store_true", help=_("Enable creation of Maven repos for this tag"))
+ parser.add_option("--include-all", action="store_true", help=_("Include all packages in this tag when generating Maven repos"))
+ (options, args) = parser.parse_args(args)
+ if len(args) != 1:
+ parser.error(_("Please specify a name for the tag"))
+ assert False
+ activate_session(session)
+ if not session.hasPerm('admin'):
+ print "This action requires admin privileges"
+ return
+ opts = {}
+ if options.parent:
+ opts['parent'] = options.parent
+ if options.arches:
+ opts['arches'] = ' '.join(options.arches.replace(',',' ').split())
+ if options.maven_support:
+ opts['maven_support'] = True
+ if options.include_all:
+ opts['maven_include_all'] = True
+ session.createTag(args[0],**opts)
+
+def handle_edit_tag(options, session, args):
+ "[admin] Alter tag information"
+ usage = _("usage: %prog edit-tag [options] name")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--arches", help=_("Specify arches"))
+ parser.add_option("--perm", help=_("Specify permission requirement"))
+ parser.add_option("--no-perm", action="store_true", help=_("Remove permission requirement"))
+ parser.add_option("--lock", action="store_true", help=_("Lock the tag"))
+ parser.add_option("--unlock", action="store_true", help=_("Unlock the tag"))
+ parser.add_option("--rename", help=_("Rename the tag"))
+ parser.add_option("--maven-support", action="store_true", help=_("Enable creation of Maven repos for this tag"))
+ parser.add_option("--no-maven-support", action="store_true", help=_("Disable creation of Maven repos for this tag"))
+ parser.add_option("--include-all", action="store_true", help=_("Include all packages in this tag when generating Maven repos"))
+ parser.add_option("--no-include-all", action="store_true", help=_("Do not include all packages in this tag when generating Maven repos"))
+ parser.add_option("-x", "--extra", action="append", default=[], metavar="key=value",
+ help=_("Set tag extra option"))
+ (options, args) = parser.parse_args(args)
+ if len(args) != 1:
+ parser.error(_("Please specify a name for the tag"))
+ assert False
+ activate_session(session)
+ tag = args[0]
+ opts = {}
+ if options.arches:
+ opts['arches'] = ' '.join(options.arches.replace(',',' ').split())
+ if options.no_perm:
+ opts['perm_id'] = None
+ elif options.perm:
+ opts['perm'] = options.perm
+ if options.unlock:
+ opts['locked'] = False
+ if options.lock:
+ opts['locked'] = True
+ if options.rename:
+ opts['name'] = options.rename
+ if options.maven_support:
+ opts['maven_support'] = True
+ if options.no_maven_support:
+ opts['maven_support'] = False
+ if options.include_all:
+ opts['maven_include_all'] = True
+ if options.no_include_all:
+ opts['maven_include_all'] = False
+ if options.extra:
+ extra = {}
+ for xopt in options.extra:
+ key, value = xopt.split('=')
+ value = arg_filter(value)
+ extra[key] = value
+ opts['extra'] = extra
+ #XXX change callname
+ session.editTag2(tag,**opts)
+
+def handle_lock_tag(options, session, args):
+ "[admin] Lock a tag"
+ usage = _("usage: %prog lock-tag [options] <tag> [<tag> ...] ")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--perm", help=_("Specify permission requirement"))
+ parser.add_option("--glob", action="store_true", help=_("Treat args as glob patterns"))
+ parser.add_option("--master", action="store_true", help=_("Lock the master lock"))
+ parser.add_option("-n", "--test", action="store_true", help=_("Test mode"))
+ (options, args) = parser.parse_args(args)
+ if len(args) < 1:
+ parser.error(_("Please specify a tag"))
+ assert False
+ activate_session(session)
+ pdata = session.getAllPerms()
+ perm_ids = dict([(p['name'], p['id']) for p in pdata])
+ perm = options.perm
+ if perm is None:
+ perm = 'admin'
+ perm_id = perm_ids[perm]
+ if options.glob:
+ selected = []
+ for tag in session.listTags():
+ for pattern in args:
+ if fnmatch.fnmatch(tag['name'], pattern):
+ selected.append(tag)
+ break
+ if not selected:
+ print _("No tags matched")
+ else:
+ selected = [session.getTag(name) for name in args]
+ for tag in selected:
+ if options.master:
+ #set the master lock
+ if tag['locked']:
+ print _("Tag %s: master lock already set") % tag['name']
+ continue
+ elif options.test:
+ print _("Would have set master lock for: %s") % tag['name']
+ continue
+ session.editTag2(tag['id'], locked=True)
+ else:
+ if tag['perm_id'] == perm_id:
+ print _("Tag %s: %s permission already required") % (tag['name'], perm)
+ continue
+ elif options.test:
+ print _("Would have set permission requirement %s for tag %s") % (perm, tag['name'])
+ continue
+ session.editTag2(tag['id'], perm=perm_id)
+
+def handle_unlock_tag(options, session, args):
+ "[admin] Unlock a tag"
+ usage = _("usage: %prog unlock-tag [options] <tag> [<tag> ...]")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--glob", action="store_true", help=_("Treat args as glob patterns"))
+ parser.add_option("-n", "--test", action="store_true", help=_("Test mode"))
+ (options, args) = parser.parse_args(args)
+ if len(args) < 1:
+ parser.error(_("Please specify a tag"))
+ assert False
+ activate_session(session)
+ if options.glob:
+ selected = []
+ for tag in session.listTags():
+ for pattern in args:
+ if fnmatch.fnmatch(tag['name'], pattern):
+ selected.append(tag)
+ break
+ if not selected:
+ print _("No tags matched")
+ else:
+ selected = []
+ for name in args:
+ tag = session.getTag(name)
+ if tag is None:
+ parser.error(_("No such tag: %s") % name)
+ assert False
+ selected.append(tag)
+ selected = [session.getTag(name) for name in args]
+ for tag in selected:
+ opts = {}
+ if tag['locked']:
+ opts['locked'] = False
+ if tag['perm_id']:
+ opts['perm'] = None
+ if not opts:
+ print "Tag %(name)s: not locked" % tag
+ continue
+ if options.test:
+ print "Tag %s: skipping changes: %r" % (tag['name'], opts)
+ else:
+ session.editTag2(tag['id'], locked=False, perm_id=None)
+
+def handle_add_tag_inheritance(options, session, args):
+ """[admin] Add to a tag's inheritance"""
+ usage = _("usage: %prog add-tag-inheritance [options] tag parent-tag")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--priority", help=_("Specify priority"))
+ parser.add_option("--maxdepth", help=_("Specify max depth"))
+ parser.add_option("--intransitive", action="store_true", help=_("Set intransitive"))
+ parser.add_option("--noconfig", action="store_true", help=_("Set to packages only"))
+ parser.add_option("--pkg-filter", help=_("Specify the package filter"))
+ parser.add_option("--force", help=_("Force adding a parent to a tag that already has that parent tag"))
+ (options, args) = parser.parse_args(args)
+
+ if len(args) != 2:
+ parser.error(_("This command takes exctly two argument: a tag name or ID and that tag's new parent name or ID"))
+ assert False
+
+ activate_session(session)
+
+ tag = session.getTag(args[0])
+ if not tag:
+ parser.error(_("Invalid tag: %s" % args[0]))
+
+ parent = session.getTag(args[1])
+ if not parent:
+ parser.error(_("Invalid tag: %s" % args[1]))
+
+ inheritanceData = session.getInheritanceData(tag['id'])
+ priority = options.priority and int(options.priority) or 0
+ sameParents = [datum for datum in inheritanceData if datum['parent_id'] == parent['id']]
+ samePriority = [datum for datum in inheritanceData if datum['priority'] == priority]
+
+ if sameParents and not options.force:
+ print _("Error: You are attempting to add %s as %s's parent even though it already is %s's parent."
+ % (parent['name'], tag['name'], tag['name']))
+ print _("Please use --force if this is what you really want to do.")
+ return
+ if samePriority:
+ print _("Error: There is already an active inheritance with that priority on %s, please specify a different priority with --priority." % tag['name'])
+ return
+
+ new_data = {}
+ new_data['parent_id'] = parent['id']
+ new_data['priority'] = options.priority or 0
+ if options.maxdepth and options.maxdepth.isdigit():
+ new_data['maxdepth'] = int(options.maxdepth)
+ else:
+ new_data['maxdepth'] = None
+ new_data['intransitive'] = options.intransitive or False
+ new_data['noconfig'] = options.noconfig or False
+ new_data['pkg_filter'] = options.pkg_filter or ''
+
+ inheritanceData.append(new_data)
+ session.setInheritanceData(tag['id'], inheritanceData)
+
+
+def handle_edit_tag_inheritance(options, session, args):
+ """[admin] Edit tag inheritance"""
+ usage = _("usage: %prog edit-tag-inheritance [options] tag <parent> <priority>")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--priority", help=_("Specify a new priority"))
+ parser.add_option("--maxdepth", help=_("Specify max depth"))
+ parser.add_option("--intransitive", action="store_true", help=_("Set intransitive"))
+ parser.add_option("--noconfig", action="store_true", help=_("Set to packages only"))
+ parser.add_option("--pkg-filter", help=_("Specify the package filter"))
+ (options, args) = parser.parse_args(args)
+
+ if len(args) < 1:
+ parser.error(_("This command takes at lease one argument: a tag name or ID"))
+ assert False
+
+ if len(args) > 3:
+ parser.error(_("This command takes at most three argument: a tag name or ID, a parent tag name or ID, and a priority"))
+ assert False
+
+ activate_session(session)
+
+ tag = session.getTag(args[0])
+ if not tag:
+ parser.error(_("Invalid tag: %s" % args[0]))
+
+ parent = None
+ priority = None
+ if len(args) > 1:
+ parent = session.getTag(args[1])
+ if not parent:
+ parser.error(_("Invalid tag: %s" % args[1]))
+ if len(args) > 2:
+ priority = args[2]
+
+ data = session.getInheritanceData(tag['id'])
+ if parent and data:
+ data = [datum for datum in data if datum['parent_id'] == parent['id']]
+ if priority and data:
+ data = [datum for datum in data if datum['priority'] == priority]
+
+ if len(data) == 0:
+ print _("No inheritance link found to remove. Please check your arguments")
+ return 1
+ elif len(data) > 1:
+ print _("Multiple matches for tag.")
+ if not parent:
+ print _("Please specify a parent on the command line.")
+ return 1
+ if not priority:
+ print _("Please specify a priority on the command line.")
+ return 1
+ print _("Error: Key constraints may be broken. Exiting.")
+ return 1
+
+ # len(data) == 1
+ data = data[0]
+
+ inheritanceData = session.getInheritanceData(tag['id'])
+ samePriority = [datum for datum in inheritanceData if datum['priority'] == options.priority]
+ if samePriority:
+ print _("Error: There is already an active inheritance with that priority on %s, please specify a different priority with --priority.") % tag['name']
+ return 1
+
+ new_data = data.copy()
+ if options.priority is not None and options.priority.isdigit():
+ new_data['priority'] = int(options.priority)
+ if options.maxdepth is not None:
+ if options.maxdepth.isdigit():
+ new_data['maxdepth'] = int(options.maxdepth)
+ elif options.maxdepth.lower() == "none":
+ new_data['maxdepth'] = None
+ else:
+ print _("Invalid maxdepth: %s") % options.maxdepth
+ return 1
+ if options.intransitive:
+ new_data['intransitive'] = options.intransitive
+ if options.noconfig:
+ new_data['noconfig'] = options.noconfig
+ if options.pkg_filter:
+ new_data['pkg_filter'] = options.pkg_filter
+
+ # find the data we want to edit and replace it
+ index = inheritanceData.index(data)
+ inheritanceData[index] = new_data
+ session.setInheritanceData(tag['id'], inheritanceData)
+
+def handle_remove_tag_inheritance(options, session, args):
+ """[admin] Remove a tag inheritance link"""
+ usage = _("usage: %prog remove-tag-inheritance tag <parent> <priority>")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ (options, args) = parser.parse_args(args)
+
+ if len(args) < 1:
+ parser.error(_("This command takes at lease one argument: a tag name or ID"))
+ assert False
+
+ if len(args) > 3:
+ parser.error(_("This command takes at most three argument: a tag name or ID, a parent tag name or ID, and a priority"))
+ assert False
+
+ activate_session(session)
+
+ tag = session.getTag(args[0])
+ if not tag:
+ parser.error(_("Invalid tag: %s" % args[0]))
+
+ parent = None
+ priority = None
+ if len(args) > 1:
+ parent = session.getTag(args[1])
+ if not parent:
+ parser.error(_("Invalid tag: %s" % args[1]))
+ if len(args) > 2:
+ priority = args[2]
+
+ data = session.getInheritanceData(tag['id'])
+ if parent and data:
+ data = [datum for datum in data if datum['parent_id'] == parent['id']]
+ if priority and data:
+ data = [datum for datum in data if datum['priority'] == priority]
+
+ if len(data) == 0:
+ print _("No inheritance link found to remove. Please check your arguments")
+ return
+ elif len(data) > 1:
+ print _("Multiple matches for tag.")
+ if not parent:
+ print _("Please specify a parent on the command line.")
+ return
+ if not priority:
+ print _("Please specify a priority on the command line.")
+ return
+ print _("Error: Key constrainsts may be broken. Exiting.")
+ return
+
+ # len(data) == 1
+ data = data[0]
+
+ inheritanceData = session.getInheritanceData(tag['id'])
+
+ new_data = data.copy()
+ new_data['delete link'] = True
+
+ # find the data we want to edit and replace it
+ index = inheritanceData.index(data)
+ inheritanceData[index] = new_data
+ session.setInheritanceData(tag['id'], inheritanceData)
+
+def anon_handle_show_groups(options, session, args):
+ "Show groups data for a tag"
+ usage = _("usage: %prog show-groups [options] <tag>")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--comps", action="store_true", help=_("Print in comps format"))
+ parser.add_option("-x", "--expand", action="store_true", default=False,
+ help=_("Expand groups in comps format"))
+ parser.add_option("--spec", action="store_true", help=_("Print build spec"))
+ (options, args) = parser.parse_args(args)
+ if len(args) != 1:
+ parser.error(_("Incorrect number of arguments"))
+ assert False
+ activate_session(session)
+ tag = args[0]
+ groups = session.getTagGroups(tag)
+ if options.comps:
+ print koji.generate_comps(groups, expand_groups=options.expand)
+ elif options.spec:
+ print koji.make_groups_spec(groups,name='buildgroups',buildgroup='build')
+ else:
+ pprint.pprint(groups)
+
+def anon_handle_list_external_repos(options, session, args):
+ "List external repos"
+ usage = _("usage: %prog list-external-repos [options]")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--url", help=_("Select by url"))
+ parser.add_option("--name", help=_("Select by name"))
+ parser.add_option("--id", type="int", help=_("Select by id"))
+ parser.add_option("--tag", help=_("Select by tag"))
+ parser.add_option("--used", action='store_true', help=_("List which tags use the repo(s)"))
+ parser.add_option("--inherit", action='store_true', help=_("Follow tag inheritance when selecting by tag"))
+ parser.add_option("--event", type='int', metavar="EVENT#", help=_("Query at event"))
+ parser.add_option("--ts", type='int', metavar="TIMESTAMP", help=_("Query at timestamp"))
+ parser.add_option("--repo", type='int', metavar="REPO#",
+ help=_("Query at event corresponding to (nonexternal) repo"))
+ parser.add_option("--quiet", action="store_true", default=options.quiet,
+ help=_("Do not display the column headers"))
+ (options, args) = parser.parse_args(args)
+ if len(args) > 0:
+ parser.error(_("This command takes no arguments"))
+ assert False
+ activate_session(session)
+ opts = {}
+ event = koji.util.eventFromOpts(session, options)
+ if event:
+ opts['event'] = event['id']
+ event['timestr'] = time.asctime(time.localtime(event['ts']))
+ print "Querying at event %(id)i (%(timestr)s)" % event
+ if options.tag:
+ format = "tag"
+ opts['tag_info'] = options.tag
+ opts['repo_info'] = options.id or options.name or None
+ if opts['repo_info']:
+ if options.inherit:
+ parser.error(_("Can't select by repo when using --inherit"))
+ assert False
+ if options.inherit:
+ del opts['repo_info']
+ data = session.getExternalRepoList(**opts)
+ format = "multitag"
+ else:
+ data = session.getTagExternalRepos(**opts)
+ elif options.used:
+ format = "multitag"
+ opts['repo_info'] = options.id or options.name or None
+ data = session.getTagExternalRepos(**opts)
+ else:
+ format = "basic"
+ opts['info'] = options.id or options.name or None
+ opts['url'] = options.url or None
+ data = session.listExternalRepos (**opts)
+
+ # There are three different output formats
+ # 1) Listing just repo data (name, url)
+ # 2) Listing repo data for a tag (priority, name, url)
+ # 3) Listing repo data for multiple tags (tag, priority, name, url)
+ if format == "basic":
+ format = "%(name)-25s %(url)s"
+ header1 = "%-25s %s" % ("External repo name", "URL")
+ header2 = "%s %s" % ("-"*25, "-"*40)
+ elif format == "tag":
+ format = "%(priority)-3i %(external_repo_name)-25s %(url)s"
+ header1 = "%-3s %-25s %s" % ("Pri", "External repo name", "URL")
+ header2 = "%s %s %s" % ("-"*3, "-"*25, "-"*40)
+ elif format == "multitag":
+ format = "%(tag_name)-20s %(priority)-3i %(external_repo_name)s"
+ header1 = "%-20s %-3s %s" % ("Tag", "Pri", "External repo name")
+ header2 = "%s %s %s" % ("-"*20, "-"*3, "-"*25)
+ if not options.quiet:
+ print header1
+ print header2
+ for rinfo in data:
+ print format % rinfo
+
+def _pick_external_repo_priority(session, tag):
+ """pick priority after current ones, leaving space for later insertions"""
+ repolist = session.getTagExternalRepos(tag_info=tag)
+ #ordered by priority
+ if not repolist:
+ priority = 5
+ else:
+ priority = (repolist[-1]['priority'] + 7) / 5 * 5
+ #at least 3 higher than current max and a multiple of 5
+ return priority
+
+def _parse_tagpri(tagpri):
+ parts = tagpri.rsplit('::', 1)
+ tag = parts[0]
+ if len(parts) == 1:
+ return tag, None
+ elif parts[1] in ('auto', '-1'):
+ return tag, None
+ else:
+ try:
+ pri = int(parts[1])
+ except ValueError:
+ raise koji.GenericError, "Invalid priority: %s" % parts[1]
+ return tag, pri
+
+def handle_add_external_repo(options, session, args):
+ "[admin] Create an external repo and/or add one to a tag"
+ usage = _("usage: %prog add-external-repo [options] name [url]")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("-t", "--tag", action="append", metavar="TAG",
+ help=_("Also add repo to tag. Use tag::N to set priority"))
+ parser.add_option("-p", "--priority", type='int',
+ help=_("Set priority (when adding to tag)"))
+ (options, args) = parser.parse_args(args)
+ activate_session(session)
+ if len(args) == 1:
+ name = args[0]
+ rinfo = session.getExternalRepo(name, strict=True)
+ if not options.tag:
+ parser.error(_("A url is required to create an external repo entry"))
+ elif len(args) == 2:
+ name, url = args
+ rinfo = session.createExternalRepo(name, url)
+ print "Created external repo %(id)i" % rinfo
+ else:
+ parser.error(_("Incorrect number of arguments"))
+ assert False
+ if options.tag:
+ for tagpri in options.tag:
+ tag, priority = _parse_tagpri(tagpri)
+ if priority is None:
+ if options.priority is not None:
+ priority = options.priority
+ else:
+ priority = _pick_external_repo_priority(session, tag)
+ session.addExternalRepoToTag(tag, rinfo['name'], priority)
+ print "Added external repo %s to tag %s (priority %i)" \
+ % (rinfo['name'], tag, priority)
+
+def handle_edit_external_repo(options, session, args):
+ "[admin] Edit data for an external repo"
+ usage = _("usage: %prog edit-external-repo name")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--url", help=_("Change the url"))
+ parser.add_option("--name", help=_("Change the name"))
+ (options, args) = parser.parse_args(args)
+ if len(args) != 1:
+ parser.error(_("Incorrect number of arguments"))
+ parser.error(_("This command takes no arguments"))
+ assert False
+ opts = {}
+ if options.url:
+ opts['url'] = options.url
+ if options.name:
+ opts['name'] = options.name
+ if not opts:
+ parser.error(_("No changes specified"))
+ activate_session(session)
+ session.editExternalRepo(args[0], **opts)
+
+def handle_remove_external_repo(options, session, args):
+ "[admin] Remove an external repo from a tag or tags, or remove entirely"
+ usage = _("usage: %prog remove-external-repo repo [tag ...]")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--alltags", action="store_true", help=_("Remove from all tags"))
+ parser.add_option("--force", action='store_true', help=_("Force action"))
+ (options, args) = parser.parse_args(args)
+ if len(args) < 1:
+ parser.error(_("Incorrect number of arguments"))
+ assert False
+ activate_session(session)
+ repo = args[0]
+ tags = args[1:]
+ delete = not bool(tags)
+ data = session.getTagExternalRepos(repo_info=repo)
+ current_tags = [d['tag_name'] for d in data]
+ if options.alltags:
+ delete = False
+ if tags:
+ parser.error(_("Do not specify tags when using --alltags"))
+ assert False
+ if not current_tags:
+ print _("External repo %s not associated with any tags") % repo
+ return 0
+ tags = current_tags
+ if delete:
+ #removing entirely
+ if current_tags and not options.force:
+ print _("Error: external repo %s used by tag(s): %s") % (repo, ', '.join(current_tags))
+ print _("Use --force to remove anyway")
+ return 1
+ session.deleteExternalRepo(args[0])
+ else:
+ for tag in tags:
+ if not tag in current_tags:
+ print _("External repo %s not associated with tag %s") % (repo, tag)
+ continue
+ session.removeExternalRepoFromTag(tag, repo)
+
+# This handler is for spinning livecd images
+#
+def handle_spin_livecd(options, session, args):
+ """[admin] Create a live CD image given a kickstart file"""
+
+ # Usage & option parsing.
+ usage = _("usage: %prog spin-livecd [options] <name> <version> <target>" +
+ " <arch> <kickstart-file>")
+ usage += _("\n(Specify the --help global option for a list of other " +
+ "help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--wait", action="store_true",
+ help=_("Wait on the livecd creation, even if running in the background"))
+ parser.add_option("--nowait", action="store_false", dest="wait",
+ help=_("Don't wait on livecd creation"))
+ parser.add_option("--noprogress", action="store_true",
+ help=_("Do not display progress of the upload"))
+ parser.add_option("--background", action="store_true",
+ help=_("Run the livecd creation task at a lower priority"))
+ parser.add_option("--ksurl", metavar="SCMURL",
+ help=_("The URL to the SCM containing the kickstart file"))
+ parser.add_option("--ksversion", metavar="VERSION",
+ help=_("The syntax version used in the kickstart file"))
+ parser.add_option("--scratch", action="store_true",
+ help=_("Create a scratch LiveCD image"))
+ parser.add_option("--repo", action="append",
+ help=_("Specify a repo that will override the repo used to install " +
+ "RPMs in the LiveCD. May be used multiple times. The " +
+ "build tag repo associated with the target is the default."))
+ parser.add_option("--release", help=_("Forcibly set the release field"))
+ parser.add_option("--specfile", metavar="URL",
+ help=_("SCM URL to spec file fragment to use to generate wrapper RPMs"))
+ parser.add_option("--skip-tag", action="store_true",
+ help=_("Do not attempt to tag package"))
+ (task_options, args) = parser.parse_args(args)
+
+ # Make sure the target and kickstart is specified.
+ if len(args) != 5:
+ parser.error(_("Five arguments are required: a name, a version, an" +
+ " architecture, a build target, and a relative path to" +
+ " a kickstart file."))
+ assert False
+ _build_image(options, task_options, session, args, 'livecd')
+
+# This handler is for spinning appliance images
+#
+def handle_spin_appliance(options, session, args):
+ """[admin] Create an appliance given a kickstart file"""
+
+ # Usage & option parsing
+ usage = _("usage: %prog spin-appliance [options] <name> <version> " +
+ "<target> <arch> <kickstart-file>")
+ usage += _("\n(Specify the --help global option for a list of other " +
+ "help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--wait", action="store_true",
+ help=_("Wait on the appliance creation, even if running in the background"))
+ parser.add_option("--nowait", action="store_false", dest="wait",
+ help=_("Don't wait on appliance creation"))
+ parser.add_option("--noprogress", action="store_true",
+ help=_("Do not display progress of the upload"))
+ parser.add_option("--background", action="store_true",
+ help=_("Run the appliance creation task at a lower priority"))
+ parser.add_option("--ksurl", metavar="SCMURL",
+ help=_("The URL to the SCM containing the kickstart file"))
+ parser.add_option("--ksversion", metavar="VERSION",
+ help=_("The syntax version used in the kickstart file"))
+ parser.add_option("--scratch", action="store_true",
+ help=_("Create a scratch appliance"))
+ parser.add_option("--repo", action="append",
+ help=_("Specify a repo that will override the repo used to install " +
+ "RPMs in the appliance. May be used multiple times. The " +
+ "build tag repo associated with the target is the default."))
+ parser.add_option("--release", help=_("Forcibly set the release field"))
+ parser.add_option("--specfile", metavar="URL",
+ help=_("SCM URL to spec file fragment to use to generate wrapper RPMs"))
+ parser.add_option("--skip-tag", action="store_true",
+ help=_("Do not attempt to tag package"))
+ parser.add_option("--vmem", metavar="VMEM", default=None,
+ help=_("Set the amount of virtual memory in the appliance in MB, " +
+ "default is 512"))
+ parser.add_option("--vcpu", metavar="VCPU", default=None,
+ help=_("Set the number of virtual cpus in the appliance, " +
+ "default is 1"))
+ parser.add_option("--format", metavar="DISK_FORMAT", default='raw',
+ help=_("Disk format, default is raw. Other options are qcow, " +
+ "qcow2, and vmx."))
+
+ (task_options, args) = parser.parse_args(args)
+
+ # Make sure the target and kickstart is specified.
+ print 'spin-appliance is deprecated and will be replaced with image-build'
+ if len(args) != 5:
+ parser.error(_("Five arguments are required: a name, a version, " +
+ "an architecture, a build target, and a relative path" +
+ " to a kickstart file."))
+ assert False
+ _build_image(options, task_options, session, args, 'appliance')
+
+def handle_image_build_indirection(options, session, args):
+ """Create a disk image using other disk images via the Indirection plugin"""
+ usage = _("usage: %prog image-build-indirection [base_image] " +
+ "[utility_image] [indirection_build_template]")
+ usage += _("\n %prog image-build --config FILE")
+ usage += _("\n\n(Specify the --help global option for a list of other " +
+ "help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--config",
+ help=_("Use a configuration file to define image-build options " +
+ "instead of command line options (they will be ignored)."))
+ parser.add_option("--background", action="store_true",
+ help=_("Run the image creation task at a lower priority"))
+ parser.add_option("--name",
+ help=_("Name of the output image"))
+ parser.add_option("--version",
+ help=_("Version of the output image"))
+ parser.add_option("--release",
+ help=_("Release of the output image"))
+ parser.add_option("--arch",
+ help=_("Architecture of the output image and input images"))
+ parser.add_option("--target",
+ help=_("Build target to use for the indirection build"))
+ parser.add_option("--skip-tag", action="store_true",
+ help=_("Do not tag the resulting build"))
+ parser.add_option("--base-image-task",
+ help=_("ID of the createImage task of the base image to be used"))
+ parser.add_option("--base-image-build",
+ help=_("NVR or build ID of the base image to be used"))
+ parser.add_option("--utility-image-task",
+ help=_("ID of the createImage task of the utility image to be used"))
+ parser.add_option("--utility-image-build",
+ help=_("NVR or build ID of the utility image to be used"))
+ parser.add_option("--indirection-template",
+ help=_("Name of the local file, or SCM file containing the template used to drive the indirection plugin"))
+ parser.add_option("--indirection-template-url",
+ help=_("SCM URL containing the template used to drive the indirection plugin"))
+ parser.add_option("--results-loc",
+ help=_("Relative path inside the working space image where the results should be extracted from"))
+ parser.add_option("--scratch", action="store_true",
+ help=_("Create a scratch image"))
+ parser.add_option("--wait", action="store_true",
+ help=_("Wait on the image creation, even if running in the background"))
+ parser.add_option("--noprogress", action="store_true",
+ help=_("Do not display progress of the upload"))
+
+
+ (task_options, args) = parser.parse_args(args)
+ _build_image_indirection(options, task_options, session, args)
+
+
+def _build_image_indirection(options, task_opts, session, args):
+ """
+ A private helper function for builds using the indirection plugin of ImageFactory
+ """
+
+ # Do some sanity checks before even attempting to create the session
+ if not (bool(task_opts.utility_image_task) !=
+ bool(task_opts.utility_image_build)):
+ raise koji.GenericError, _("You must specify either a utility-image task or build ID/NVR")
+
+ if not (bool(task_opts.base_image_task) !=
+ bool(task_opts.base_image_build)):
+ raise koji.GenericError, _("You must specify either a base-image task or build ID/NVR")
+
+ required_opts = [ 'name', 'version', 'arch', 'target', 'indirection_template', 'results_loc' ]
+ optional_opts = [ 'indirection_template_url', 'scratch', 'utility_image_task', 'utility_image_build',
+ 'base_image_task', 'base_image_build', 'release', 'skip_tag' ]
+
+ missing = [ ]
+ for opt in required_opts:
+ if not getattr(task_opts, opt, None):
+ missing.append(opt)
+
+ if len(missing) > 0:
+ print "Missing the following required options:" ,
+ for opt in missing:
+ print "--" + opt.replace('_','-') ,
+ print
+ raise koji.GenericError, _("Missing required options specified above")
+
+ activate_session(session)
+
+ # Set the task's priority. Users can only lower it with --background.
+ priority = None
+ if task_opts.background:
+ # relative to koji.PRIO_DEFAULT; higher means a "lower" priority.
+ priority = 5
+ if _running_in_bg() or task_opts.noprogress:
+ callback = None
+ else:
+ callback = _progress_callback
+
+ # We do some early sanity checking of the given target.
+ # Kojid gets these values again later on, but we check now as a convenience
+ # for the user.
+
+ tmp_target = session.getBuildTarget(task_opts.target)
+ if not tmp_target:
+ raise koji.GenericError, _("Unknown build target: %s" % tmp_target)
+ dest_tag = session.getTag(tmp_target['dest_tag'])
+ if not dest_tag:
+ raise koji.GenericError, _("Unknown destination tag: %s" %
+ tmp_target['dest_tag_name'])
+
+ # Set the architecture
+ task_opts.arch = koji.canonArch(task_opts.arch)
+
+
+ # Upload the indirection template file to the staging area.
+ # If it's a URL, it's kojid's job to go get it when it does the checkout.
+ if not task_opts.indirection_template_url:
+ if not task_opts.scratch:
+ # only scratch builds can omit indirection_template_url
+ raise koji.GenericError, _("Non-scratch builds must provide a URL for the indirection template")
+ templatefile = task_opts.indirection_template
+ serverdir = _unique_path('cli-image-indirection')
+ session.uploadWrapper(templatefile, serverdir, callback=callback)
+ task_opts.indirection_template = os.path.join('work', serverdir,
+ os.path.basename(templatefile))
+ print
+
+ hub_opts = { }
+ # Just pass everything in as opts. No posiitonal arguments at all. Why not?
+ for opt in required_opts + optional_opts:
+ val = getattr(task_opts, opt, None)
+ # We pass these through even if they are None
+ # The builder code can then check if they are set without using getattr
+ hub_opts[opt] = val
+
+ # finally, create the task.
+ task_id = session.buildImageIndirection(opts=hub_opts,
+ priority=priority)
+
+ if not options.quiet:
+ print "Created task:", task_id
+ print "Task info: %s/taskinfo?taskID=%s" % (options.weburl, task_id)
+ #if task_opts.wait or (task_opts.wait is None and not _running_in_bg()):
+ # session.logout()
+ # return watch_tasks(session, [task_id], quiet=options.quiet)
+ #else:
+ # return
+
+
+def handle_image_build(options, session, args):
+ """Create a disk image given an install tree"""
+ formats = ('vmdk', 'qcow', 'qcow2', 'vdi', 'vpc', 'rhevm-ova',
+ 'vsphere-ova', 'vagrant-virtualbox', 'vagrant-libvirt',
+ 'docker', 'raw-xz')
+ usage = _("usage: %prog image-build [options] <name> <version> " +
+ "<target> <install-tree-url> <arch> [<arch>...]")
+ usage += _("\n %prog image-build --config FILE")
+ usage += _("\n\n(Specify the --help global option for a list of other " +
+ "help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--background", action="store_true",
+ help=_("Run the image creation task at a lower priority"))
+ parser.add_option("--config",
+ help=_("Use a configuration file to define image-build options " +
+ "instead of command line options (they will be ignored)."))
+ parser.add_option("--disk-size", default=10,
+ help=_("Set the disk device size in gigabytes"))
+ parser.add_option("--distro",
+ help=_("specify the RPM based distribution the image will be based " +
+ "on with the format RHEL-X.Y, CentOS-X.Y, SL-X.Y, or Fedora-NN. " +
+ "The packages for the Distro you choose must have been built " +
+ "in this system."))
+ parser.add_option("--format", default=[], action="append",
+ help=_("Convert results to one or more formats " +
+ "(%s), this option may be used " % ', '.join(formats) +
+ "multiple times. By default, specifying this option will " +
+ "omit the raw disk image (which is 10G in size) from the " +
+ "build results. If you really want it included with converted " +
+ "images, pass in 'raw' as an option."))
+ parser.add_option("--kickstart", help=_("Path to a local kickstart file"))
+ parser.add_option("--ksurl", metavar="SCMURL",
+ help=_("The URL to the SCM containing the kickstart file"))
+ parser.add_option("--ksversion", metavar="VERSION",
+ help=_("The syntax version used in the kickstart file"))
+ parser.add_option("--noprogress", action="store_true",
+ help=_("Do not display progress of the upload"))
+ parser.add_option("--nowait", action="store_false", dest="wait",
+ help=_("Don't wait on image creation"))
+ parser.add_option("--ova-option", action="append",
+ help=_("Override a value in the OVA description XML. Provide a value " +
+ "in a name=value format, such as 'ovf_memory_mb=6144'"))
+ parser.add_option("--release", help=_("Forcibly set the release field"))
+ parser.add_option("--repo", action="append",
+ help=_("Specify a repo that will override the repo used to install " +
+ "RPMs in the image. May be used multiple times. The " +
+ "build tag repo associated with the target is the default."))
+ parser.add_option("--scratch", action="store_true",
+ help=_("Create a scratch image"))
+ parser.add_option("--skip-tag", action="store_true",
+ help=_("Do not attempt to tag package"))
+ parser.add_option("--specfile", metavar="URL",
+ help=_("SCM URL to spec file fragment to use to generate wrapper RPMs"))
+ parser.add_option("--wait", action="store_true",
+ help=_("Wait on the image creation, even if running in the background"))
+
+ (task_options, args) = parser.parse_args(args)
+
+ if task_options.config:
+ if not os.path.exists(task_options.config):
+ parser.error(_("%s not found!" % task_options.config))
+ section = 'image-build'
+ config = ConfigParser.ConfigParser()
+ conf_fd = open(task_options.config)
+ config.readfp(conf_fd)
+ conf_fd.close()
+ if not config.has_section(section):
+ parser.error(_("single section called [%s] is required" % section))
+ # pluck out the positional arguments first
+ args = []
+ for arg in ('name', 'version', 'target', 'install_tree'):
+ args.append(config.get(section, arg))
+ config.remove_option(section, arg)
+ args.extend(config.get(section, 'arches').split(','))
+ config.remove_option(section, 'arches')
+ # turn comma-separated options into lists
+ for arg in ('repo', 'format'):
+ if config.has_option(section, arg):
+ setattr(task_options, arg, config.get(section, arg).split(','))
+ config.remove_option(section, arg)
+ # handle everything else
+ for k, v in config.items(section):
+ setattr(task_options, k, v)
+
+ # ova-options belong in their own section
+ section = 'ova-options'
+ if config.has_section(section):
+ task_options.ova_option = []
+ for k, v in config.items(section):
+ task_options.ova_option.append('%s=%s' % (k, v))
+
+ else:
+ if len(args) < 5:
+ parser.error(_("At least five arguments are required: a name, " +
+ "a version, a build target, a URL to an " +
+ "install tree, and 1 or more architectures."))
+ if not task_options.ksurl and not task_options.kickstart:
+ parser.error(_('You must specify --kickstart'))
+ if not task_options.distro:
+ parser.error(
+ _("You must specify --distro. Examples: Fedora-16, RHEL-6.4, " +
+ "SL-6.4 or CentOS-6.4"))
+ _build_image_oz(options, task_options, session, args)
+
+def _build_image(options, task_opts, session, args, img_type):
+ """
+ A private helper function that houses common CLI code for building
+ images with chroot-based tools.
+ """
+
+ if img_type not in ('livecd', 'appliance'):
+ raise koji.GenericError, 'Unrecognized image type: %s' % img_type
+ activate_session(session)
+
+ # Set the task's priority. Users can only lower it with --background.
+ priority = None
+ if task_opts.background:
+ # relative to koji.PRIO_DEFAULT; higher means a "lower" priority.
+ priority = 5
+ if _running_in_bg() or task_opts.noprogress:
+ callback = None
+ else:
+ callback = _progress_callback
+
+ # We do some early sanity checking of the given target.
+ # Kojid gets these values again later on, but we check now as a convenience
+ # for the user.
+ target = args[2]
+ tmp_target = session.getBuildTarget(target)
+ if not tmp_target:
+ raise koji.GenericError, _("Unknown build target: %s" % target)
+ dest_tag = session.getTag(tmp_target['dest_tag'])
+ if not dest_tag:
+ raise koji.GenericError, _("Unknown destination tag: %s" %
+ tmp_target['dest_tag_name'])
+
+ # Set the architecture
+ arch = koji.canonArch(args[3])
+
+ # Upload the KS file to the staging area.
+ # If it's a URL, it's kojid's job to go get it when it does the checkout.
+ ksfile = args[4]
+
+ if not task_opts.ksurl:
+ serverdir = _unique_path('cli-' + img_type)
+ session.uploadWrapper(ksfile, serverdir, callback=callback)
+ ksfile = os.path.join(serverdir, os.path.basename(ksfile))
+ print
+
+ hub_opts = {}
+ for opt in ('isoname', 'ksurl', 'ksversion', 'scratch', 'repo',
+ 'release', 'skip_tag', 'vmem', 'vcpu', 'format', 'specfile'):
+ val = getattr(task_opts, opt, None)
+ if val is not None:
+ hub_opts[opt] = val
+
+ # finally, create the task.
+ task_id = session.buildImage(args[0], args[1], arch, target, ksfile,
+ img_type, opts=hub_opts, priority=priority)
+
+ if not options.quiet:
+ print "Created task:", task_id
+ print "Task info: %s/taskinfo?taskID=%s" % (options.weburl, task_id)
+ if task_opts.wait or (task_opts.wait is None and not _running_in_bg()):
+ session.logout()
+ return watch_tasks(session, [task_id], quiet=options.quiet)
+ else:
+ return
+
+def _build_image_oz(options, task_opts, session, args):
+ """
+ A private helper function that houses common CLI code for building
+ images with Oz and ImageFactory
+ """
+ activate_session(session)
+
+ # Set the task's priority. Users can only lower it with --background.
+ priority = None
+ if task_opts.background:
+ # relative to koji.PRIO_DEFAULT; higher means a "lower" priority.
+ priority = 5
+ if _running_in_bg() or task_opts.noprogress:
+ callback = None
+ else:
+ callback = _progress_callback
+
+ # We do some early sanity checking of the given target.
+ # Kojid gets these values again later on, but we check now as a convenience
+ # for the user.
+ target = args[2]
+ tmp_target = session.getBuildTarget(target)
+ if not tmp_target:
+ raise koji.GenericError, _("Unknown build target: %s" % target)
+ dest_tag = session.getTag(tmp_target['dest_tag'])
+ if not dest_tag:
+ raise koji.GenericError, _("Unknown destination tag: %s" %
+ tmp_target['dest_tag_name'])
+
+ # Set the architectures
+ arches = []
+ for arch in args[4:]:
+ arches.append(koji.canonArch(arch))
+
+ # Upload the KS file to the staging area.
+ # If it's a URL, it's kojid's job to go get it when it does the checkout.
+ if not task_opts.ksurl:
+ if not task_opts.scratch:
+ # only scratch builds can omit ksurl
+ raise koji.GenericError, _("Non-scratch builds must provide ksurl")
+ ksfile = task_opts.kickstart
+ serverdir = _unique_path('cli-image')
+ session.uploadWrapper(ksfile, serverdir, callback=callback)
+ task_opts.kickstart = os.path.join('work', serverdir,
+ os.path.basename(ksfile))
+ print
+
+ hub_opts = {}
+ for opt in ('ksurl', 'ksversion', 'kickstart', 'scratch', 'repo',
+ 'release', 'skip_tag', 'specfile', 'distro', 'format',
+ 'disk_size', 'ova_option'):
+ val = getattr(task_opts, opt, None)
+ if val is not None:
+ hub_opts[opt] = val
+
+ # finally, create the task.
+ task_id = session.buildImageOz(args[0], args[1], arches, target, args[3],
+ opts=hub_opts, priority=priority)
+
+ if not options.quiet:
+ print "Created task:", task_id
+ print "Task info: %s/taskinfo?taskID=%s" % (options.weburl, task_id)
+ if task_opts.wait or (task_opts.wait is None and not _running_in_bg()):
+ session.logout()
+ return watch_tasks(session, [task_id], quiet=options.quiet)
+ else:
+ return
+
+def handle_win_build(options, session, args):
+ """Build a Windows package from source"""
+ # Usage & option parsing
+ usage = _("usage: %prog win-build [options] target URL VM")
+ usage += _("\n(Specify the --help global option for a list of other " +
+ "help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--winspec", metavar="URL",
+ help=_("SCM URL to retrieve the build descriptor from. " + \
+ "If not specified, the winspec must be in the root directory " + \
+ "of the source repository."))
+ parser.add_option("--patches", metavar="URL",
+ help=_("SCM URL of a directory containing patches to apply " + \
+ "to the sources before building"))
+ parser.add_option("--cpus", type="int",
+ help=_("Number of cpus to allocate to the build VM " + \
+ "(requires admin access)"))
+ parser.add_option("--mem", type="int",
+ help=_("Amount of memory (in megabytes) to allocate to the build VM " + \
+ "(requires admin access)"))
+ parser.add_option("--static-mac", action="store_true",
+ help=_("Retain the original MAC address when cloning the VM"))
+ parser.add_option("--specfile", metavar="URL",
+ help=_("SCM URL of a spec file fragment to use to generate wrapper RPMs"))
+ parser.add_option("--scratch", action="store_true",
+ help=_("Perform a scratch build"))
+ parser.add_option("--repo-id", type="int", help=_("Use a specific repo"))
+ parser.add_option("--skip-tag", action="store_true",
+ help=_("Do not attempt to tag package"))
+ parser.add_option("--background", action="store_true",
+ help=_("Run the build at a lower priority"))
+ parser.add_option("--wait", action="store_true",
+ help=_("Wait on the build, even if running in the background"))
+ parser.add_option("--nowait", action="store_false", dest="wait",
+ help=_("Don't wait on build"))
+ parser.add_option("--quiet", action="store_true",
+ help=_("Do not print the task information"), default=options.quiet)
+ (build_opts, args) = parser.parse_args(args)
+ if len(args) != 3:
+ parser.error(_("Exactly three arguments (a build target, a SCM URL, and a VM name) are required"))
+ assert False
+ activate_session(session)
+ target = args[0]
+ if target.lower() == "none" and build_opts.repo_id:
+ target = None
+ build_opts.skip_tag = True
+ else:
+ build_target = session.getBuildTarget(target)
+ if not build_target:
+ parser.error(_("Unknown build target: %s" % target))
+ dest_tag = session.getTag(build_target['dest_tag'])
+ if not dest_tag:
+ parser.error(_("Unknown destination tag: %s" % build_target['dest_tag_name']))
+ if dest_tag['locked'] and not build_opts.scratch:
+ parser.error(_("Destination tag %s is locked" % dest_tag['name']))
+ scmurl = args[1]
+ vm_name = args[2]
+ opts = {}
+ for key in ('winspec', 'patches', 'cpus', 'mem', 'static_mac',
+ 'specfile', 'scratch', 'repo_id', 'skip_tag'):
+ val = getattr(build_opts, key)
+ if val is not None:
+ opts[key] = val
+ priority = None
+ if build_opts.background:
+ #relative to koji.PRIO_DEFAULT
+ priority = 5
+ task_id = session.winBuild(vm_name, scmurl, target, opts, priority=priority)
+ if not build_opts.quiet:
+ print "Created task:", task_id
+ print "Task info: %s/taskinfo?taskID=%s" % (options.weburl, task_id)
+ if build_opts.wait or (build_opts.wait is None and not _running_in_bg()):
+ session.logout()
+ return watch_tasks(session, [task_id], quiet=build_opts.quiet)
+ else:
+ return
+
+def handle_free_task(options, session, args):
+ "[admin] Free a task"
+ usage = _("usage: %prog free-task [options] <task-id> [<task-id> ...]")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ (options, args) = parser.parse_args(args)
+ activate_session(session)
+ tlist = []
+ for task_id in args:
+ try:
+ tlist.append(int(task_id))
+ except ValueError:
+ parser.error(_("task-id must be an integer"))
+ assert False
+ for task_id in tlist:
+ session.freeTask(task_id)
+
+def handle_cancel(options, session, args):
+ "Cancel tasks and/or builds"
+ usage = _("usage: %prog cancel [options] <task-id|build> [<task-id|build> ...]")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--justone", action="store_true", help=_("Do not cancel subtasks"))
+ parser.add_option("--full", action="store_true", help=_("Full cancellation (admin only)"))
+ parser.add_option("--force", action="store_true", help=_("Allow subtasks with --full"))
+ (options, args) = parser.parse_args(args)
+ if len(args) == 0:
+ parser.error(_("You must specify at least one task id or build"))
+ assert False
+ activate_session(session)
+ tlist = []
+ blist = []
+ for arg in args:
+ try:
+ tlist.append(int(arg))
+ except ValueError:
+ try:
+ koji.parse_NVR(arg)
+ blist.append(arg)
+ except koji.GenericError:
+ parser.error(_("please specify only task ids (integer) or builds (n-v-r)"))
+ assert False
+ if tlist:
+ opts = {}
+ remote_fn = session.cancelTask
+ if options.justone:
+ opts['recurse'] = False
+ elif options.full:
+ remote_fn = session.cancelTaskFull
+ if options.force:
+ opts['strict'] = False
+ for task_id in tlist:
+ remote_fn(task_id, **opts)
+ for build in blist:
+ session.cancelBuild(build)
+
+def handle_set_task_priority(options, session, args):
+ "[admin] Set task priority"
+ usage = _("usage: %prog set-task-priority [options] --priority=<priority> <task-id> [task-id]...")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--priority", type="int", help=_("New priority"))
+ parser.add_option("--recurse", action="store_true", default=False, help=_("Change priority of child tasks as well"))
+ (options, args) = parser.parse_args(args)
+ if len(args) == 0:
+ parser.error(_("You must specify at least one task id"))
+ assert False
+
+ if options.priority is None:
+ parser.error(_("You must specify --priority"))
+ assert False
+ try:
+ tasks = [int(a) for a in args]
+ except ValueError:
+ parser.error(_("Task numbers must be integers"))
+
+ activate_session(session)
+
+ for task_id in tasks:
+ session.setTaskPriority(task_id, options.priority, options.recurse)
+
+def handle_list_tasks(options, session, args):
+ "Print the list of tasks"
+ usage = _("usage: %prog list-tasks [options]")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--mine", action="store_true", help=_("Just print your tasks"))
+ parser.add_option("--user", help=_("Only tasks for this user"))
+ parser.add_option("--arch", help=_("Only tasks for this architecture"))
+ parser.add_option("--method", help=_("Only tasks of this method"))
+ parser.add_option("--channel", help=_("Only tasks in this channel"))
+ parser.add_option("--host", help=_("Only tasks for this host"))
+ parser.add_option("--quiet", action="store_true", help=_("Do not display the column headers"), default=options.quiet)
+ (options, args) = parser.parse_args(args)
+ if len(args) != 0:
+ parser.error(_("This command takes no arguments"))
+ assert False
+ activate_session(session)
+ callopts = {
+ 'state' : [koji.TASK_STATES[s] for s in ('FREE', 'OPEN', 'ASSIGNED')],
+ 'decode' : True,
+ }
+ if options.mine:
+ user = session.getLoggedInUser()
+ if not user:
+ print "Unable to determine user"
+ sys.exit(1)
+ callopts['owner'] = user['id']
+ if options.user:
+ user = session.getUser(options.user)
+ if not user:
+ print "No such user: %s" % options.user
+ sys.exit(1)
+ callopts['owner'] = user['id']
+ if options.arch:
+ arches = options.arch.replace(',',' ').split()
+ callopts['arch'] = arches
+ if options.method:
+ callopts['method'] = options.method
+ if options.channel:
+ chan = session.getChannel(options.channel)
+ if not chan:
+ print "No such channel: %s" % options.channel
+ sys.exit(1)
+ callopts['channel_id'] = chan['id']
+ if options.host:
+ host = session.getHost(options.host)
+ if not host:
+ print "No such host: %s" % options.host
+ sys.exit(1)
+ callopts['host_id'] = host['id']
+ #tasklist = session.taskReport(owner=id)
+ qopts = {'order' : 'priority,create_time'}
+ tasklist = session.listTasks(callopts, qopts)
+ tasks = dict([(x['id'], x) for x in tasklist])
+ #thread the tasks
+ if not tasklist:
+ print "(no tasks)"
+ return
+ for t in tasklist:
+ if t['parent'] is not None:
+ parent = tasks.get(t['parent'])
+ if parent:
+ parent.setdefault('children',[])
+ parent['children'].append(t)
+ t['sub'] = True
+ if not options.quiet:
+ print_task_headers()
+ for t in tasklist:
+ if t.get('sub'):
+ # this subtask will appear under another task
+ continue
+ print_task_recurse(t)
+
+def handle_set_pkg_arches(options, session, args):
+ "[admin] Set the list of extra arches for a package"
+ usage = _("usage: %prog set-pkg-arches [options] arches tag package [package2 ...]")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--force", action='store_true', help=_("Force operation"))
+ (options, args) = parser.parse_args(args)
+ if len(args) < 3:
+ parser.error(_("Please specify an archlist, a tag, and at least one package"))
+ assert False
+ activate_session(session)
+ arches = ' '.join(args[0].replace(',',' ').split())
+ tag = args[1]
+ for package in args[2:]:
+ #really should implement multicall...
+ session.packageListSetArches(tag,package,arches,force=options.force)
+
+def handle_set_pkg_owner(options, session, args):
+ "[admin] Set the owner for a package"
+ usage = _("usage: %prog set-pkg-owner [options] owner tag package [package2 ...]")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--force", action='store_true', help=_("Force operation"))
+ (options, args) = parser.parse_args(args)
+ if len(args) < 3:
+ parser.error(_("Please specify an owner, a tag, and at least one package"))
+ assert False
+ activate_session(session)
+ owner = args[0]
+ tag = args[1]
+ for package in args[2:]:
+ #really should implement multicall...
+ session.packageListSetOwner(tag,package,owner,force=options.force)
+
+def handle_set_pkg_owner_global(options, session, args):
+ "[admin] Set the owner for a package globally"
+ usage = _("usage: %prog set-pkg-owner-global [options] owner package [package2 ...]")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--verbose", action='store_true', help=_("List changes"))
+ parser.add_option("--test", action='store_true', help=_("Test mode"))
+ parser.add_option("--old-user", "--from", action="store", help=_("Only change ownership for packages belonging to this user"))
+ (options, args) = parser.parse_args(args)
+ if options.old_user:
+ if len(args) < 1:
+ parser.error(_("Please specify an owner"))
+ assert False
+ elif len(args) < 2:
+ parser.error(_("Please specify an owner and at least one package"))
+ assert False
+ activate_session(session)
+ owner = args[0]
+ packages = args[1:]
+ user = session.getUser(owner)
+ if not user:
+ print "No such user: %s" % owner
+ return 1
+ opts = {'with_dups' : True}
+ old_user = None
+ if options.old_user:
+ old_user = session.getUser(options.old_user)
+ if not old_user:
+ print "No such user: %s" % options.old_user
+ return 1
+ opts['userID'] = old_user['id']
+ to_change = []
+ for package in packages:
+ entries = session.listPackages(pkgID=package, **opts)
+ if not entries:
+ print "No data for package %s" % package
+ continue
+ to_change.extend(entries)
+ if not packages and options.old_user:
+ entries = session.listPackages(**opts)
+ if not entries:
+ print "No data for user %s" % old_user['name']
+ return 1
+ to_change.extend(entries)
+ for entry in to_change:
+ if user['id'] == entry['owner_id']:
+ if options.verbose:
+ print "Preserving owner=%s for package %s in tag %s" \
+ % (user['name'], package, entry['tag_name'] )
+ else:
+ if options.test:
+ print "Would have changed owner for %s in tag %s: %s -> %s" \
+ % (entry['package_name'], entry['tag_name'], entry['owner_name'], user['name'])
+ continue
+ if options.verbose:
+ print "Changing owner for %s in tag %s: %s -> %s" \
+ % (entry['package_name'], entry['tag_name'], entry['owner_name'], user['name'])
+ session.packageListSetOwner(entry['tag_id'], entry['package_name'], user['id'])
+
+def anon_handle_watch_task(options, session, args):
+ "Track progress of particular tasks"
+ usage = _("usage: %prog watch-task [options] <task id> [<task id>...]")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--quiet", action="store_true",
+ help=_("Do not print the task information"), default=options.quiet)
+ (options, args) = parser.parse_args(args)
+ activate_session(session)
+ tasks = []
+ for task in args:
+ try:
+ tasks.append(int(task))
+ except ValueError:
+ parser.error(_("task id must be an integer"))
+ if not tasks:
+ parser.error(_("at least one task id must be specified"))
+
+ return watch_tasks(session, tasks, quiet=options.quiet)
+
+def anon_handle_watch_logs(options, session, args):
+ "Watch logs in realtime"
+ usage = _("usage: %prog watch-logs [options] <task id> [<task id>...]")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--log", help=_("Watch only a specific log"))
+ (options, args) = parser.parse_args(args)
+ activate_session(session)
+
+ tasks = []
+ for task in args:
+ try:
+ tasks.append(int(task))
+ except ValueError:
+ parser.error(_("task id must be an integer"))
+ if not tasks:
+ parser.error(_("at least one task id must be specified"))
+
+ watch_logs(session, tasks, options)
+
+def handle_make_task(opts, session, args):
+ "[admin] Create an arbitrary task"
+ usage = _("usage: %prog make-task [options] <arg1> [<arg2>...]")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--channel", help=_("set channel"))
+ parser.add_option("--priority", help=_("set priority"))
+ parser.add_option("--watch", action="store_true", help=_("watch the task"))
+ parser.add_option("--arch", help=_("set arch"))
+ (options, args) = parser.parse_args(args)
+ activate_session(session)
+
+ taskopts = {}
+ for key in ('channel','priority','arch'):
+ value = getattr(options,key,None)
+ if value is not None:
+ taskopts[key] = value
+ task_id = session.makeTask(method=args[0],
+ arglist=map(arg_filter,args[1:]),
+ **taskopts)
+ print "Created task id %d" % task_id
+ if _running_in_bg() or not options.watch:
+ return
+ else:
+ session.logout()
+ return watch_tasks(session, [task_id], quiet=opts.quiet)
+
+def handle_tag_build(opts, session, args):
+ "Apply a tag to one or more builds"
+ usage = _("usage: %prog tag-build [options] <tag> <pkg> [<pkg>...]")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--force", action="store_true", help=_("force operation"))
+ parser.add_option("--nowait", action="store_true", help=_("Do not wait on task"))
+ (options, args) = parser.parse_args(args)
+ if len(args) < 2:
+ parser.error(_("This command takes at least two arguments: a tag name/ID and one or more package n-v-r's"))
+ assert False
+ activate_session(session)
+ tasks = []
+ for pkg in args[1:]:
+ task_id = session.tagBuild(args[0], pkg, force=options.force)
+ #XXX - wait on task
+ tasks.append(task_id)
+ print "Created task %s" % task_id
+ if _running_in_bg() or options.nowait:
+ return
+ else:
+ session.logout()
+ return watch_tasks(session,tasks,quiet=opts.quiet)
+
+def handle_move_build(opts, session, args):
+ "'Move' one or more builds between tags"
+ usage = _("usage: %prog move-build [options] <tag1> <tag2> <pkg> [<pkg>...]")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--force", action="store_true", help=_("force operation"))
+ parser.add_option("--nowait", action="store_true", help=_("do not wait on tasks"))
+ parser.add_option("--all", action="store_true", help=_("move all instances of a package, <pkg>'s are package names"))
+ (options, args) = parser.parse_args(args)
+ if len(args) < 3:
+ if options.all:
+ parser.error(_("This command, with --all, takes at least three arguments: two tags and one or more package names"))
+ else:
+ parser.error(_("This command takes at least three arguments: two tags and one or more package n-v-r's"))
+ assert False
+ activate_session(session)
+ tasks = []
+ builds = []
+
+ if options.all:
+ for arg in args[2:]:
+ pkg = session.getPackage(arg)
+ if not pkg:
+ print _("Invalid package name %s, skipping." % arg)
+ continue
+ tasklist = session.moveAllBuilds(args[0], args[1], arg, options.force)
+ tasks.extend(tasklist)
+ else:
+ for arg in args[2:]:
+ build = session.getBuild(arg)
+ if not build:
+ print _("Invalid build %s, skipping." % arg)
+ continue
+ if not build in builds:
+ builds.append(build)
+
+ for build in builds:
+ task_id = session.moveBuild(args[0], args[1], build['id'], options.force)
+ tasks.append(task_id)
+ print "Created task %s, moving %s" % (task_id, koji.buildLabel(build))
+ if _running_in_bg() or options.nowait:
+ return
+ else:
+ session.logout()
+ return watch_tasks(session, tasks, quiet=opts.quiet)
+
+def handle_untag_build(options, session, args):
+ "Remove a tag from one or more builds"
+ usage = _("usage: %prog untag-build [options] <tag> <pkg> [<pkg>...]")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--all", action="store_true", help=_("untag all versions of the package in this tag"))
+ parser.add_option("--non-latest", action="store_true", help=_("untag all versions of the package in this tag except the latest"))
+ parser.add_option("-n", "--test", action="store_true", help=_("test mode"))
+ parser.add_option("-v", "--verbose", action="store_true", help=_("print details"))
+ parser.add_option("--force", action="store_true", help=_("force operation"))
+ (options, args) = parser.parse_args(args)
+ if options.non_latest and options.force:
+ if len(args) < 1:
+ parser.error(_("Please specify a tag"))
+ assert False
+ elif len(args) < 2:
+ parser.error(_("This command takes at least two arguments: a tag name/ID and one or more package n-v-r's"))
+ assert False
+ activate_session(session)
+ tag = session.getTag(args[0])
+ if not tag:
+ parser.error(_("Invalid tag: %s" % args[0]))
+ if options.all:
+ builds = []
+ for pkg in args[1:]:
+ builds.extend(session.listTagged(args[0], package=pkg))
+ elif options.non_latest:
+ if options.force and len(args) == 1:
+ tagged = session.listTagged(args[0])
+ else:
+ tagged = []
+ for pkg in args[1:]:
+ tagged.extend(session.listTagged(args[0], package=pkg))
+ # listTagged orders entries latest first
+ seen_pkg = {}
+ builds = []
+ for binfo in tagged:
+ if not seen_pkg.has_key(binfo['name']):
+ #latest for this package
+ if options.verbose:
+ print _("Leaving latest build for package %(name)s: %(nvr)s") % binfo
+ else:
+ builds.append(binfo)
+ seen_pkg[binfo['name']] = 1
+ else:
+ tagged = session.listTagged(args[0])
+ idx = dict([(b['nvr'], b) for b in tagged])
+ builds = []
+ for nvr in args[1:]:
+ binfo = idx.get(nvr)
+ if binfo:
+ builds.append(binfo)
+ else:
+ # not in tag, see if it even exists
+ binfo = session.getBuild(nvr)
+ if not binfo:
+ print _("No such build: %s") % nvr
+ else:
+ print _("Build %s not in tag %s") % (nvr, tag['name'])
+ if not options.force:
+ return 1
+ builds.reverse()
+ for binfo in builds:
+ if options.test:
+ print _("would have untagged %(nvr)s") % binfo
+ else:
+ if options.verbose:
+ print _("untagging %(nvr)s") % binfo
+ session.untagBuild(tag['name'], binfo['nvr'], force=options.force)
+
+def handle_unblock_pkg(options, session, args):
+ "[admin] Unblock a package in the listing for tag"
+ usage = _("usage: %prog unblock-pkg [options] tag package [package2 ...]")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ (options, args) = parser.parse_args(args)
+ if len(args) < 2:
+ parser.error(_("Please specify a tag and at least one package"))
+ assert False
+ activate_session(session)
+ tag = args[0]
+ for package in args[1:]:
+ #really should implement multicall...
+ session.packageListUnblock(tag,package)
+
+def anon_handle_download_build(options, session, args):
+ "Download a built package"
+ usage = _("usage: %prog download-build [options] <n-v-r | build_id | package>")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--arch", dest="arches", metavar="ARCH", action="append", default=[],
+ help=_("Only download packages for this arch (may be used multiple times)"))
+ parser.add_option("--type", help=_("Download archives of the given type, rather than rpms (maven, win, or image)"))
+ parser.add_option("--latestfrom", dest="latestfrom", help=_("Download the latest build from this tag"))
+ parser.add_option("--debuginfo", action="store_true", help=_("Also download -debuginfo rpms"))
+ parser.add_option("--task-id", action="store_true", help=_("Interperet id as a task id"))
+ parser.add_option("--key", help=_("Download rpms signed with the given key"))
+ parser.add_option("--topurl", metavar="URL", default=options.topurl,
+ help=_("URL under which Koji files are accessible"))
+ parser.add_option("-q", "--quiet", action="store_true", help=_("Do not display progress meter"),
+ default=options.quiet)
+ (suboptions, args) = parser.parse_args(args)
+ if len(args) < 1:
+ parser.error(_("Please specify a package N-V-R or build ID"))
+ assert False
+ elif len(args) > 1:
+ parser.error(_("Only a single package N-V-R or build ID may be specified"))
+ assert False
+
+ activate_session(session)
+ build = args[0]
+
+ if build.isdigit():
+ if suboptions.latestfrom:
+ print "--latestfrom not compatible with build IDs, specify a package name."
+ return 1
+ build = int(build)
+ if suboptions.task_id:
+ builds = session.listBuilds(taskID=build)
+ if not builds:
+ print "No associated builds for task %s" % build
+ return 1
+ build = builds[0]['build_id']
+
+ if suboptions.latestfrom:
+ # We want the latest build, not a specific build
+ try:
+ builds = session.listTagged(suboptions.latestfrom, latest=True, package=build, type=suboptions.type)
+ except koji.GenericError, data:
+ print "Error finding latest build: %s" % data
+ return 1
+ if not builds:
+ print "%s has no builds of %s" % (suboptions.latestfrom, build)
+ return 1
+ info = builds[0]
+ else:
+ info = session.getBuild(build)
+
+ if info is None:
+ print "No such build: %s" % build
+ return 1
+
+ if not suboptions.topurl:
+ print "You must specify --topurl to download files"
+ return 1
+ pathinfo = koji.PathInfo(topdir=suboptions.topurl)
+
+ urls = []
+ if suboptions.type:
+ archives = session.listArchives(buildID=info['id'], type=suboptions.type)
+ if not archives:
+ print "No %s archives available for %s" % (suboptions.type, koji.buildLabel(info))
+ return 1
+ if suboptions.type == 'maven':
+ for archive in archives:
+ url = pathinfo.mavenbuild(info) + '/' + pathinfo.mavenfile(archive)
+ urls.append((url, pathinfo.mavenfile(archive)))
+ elif suboptions.type == 'win':
+ for archive in archives:
+ url = pathinfo.winbuild(info) + '/' + pathinfo.winfile(archive)
+ urls.append((url, pathinfo.winfile(archive)))
+ elif suboptions.type == 'image':
+ if not suboptions.topurl:
+ print "You must specify --topurl to download images"
+ return 1
+ pi = koji.PathInfo(topdir=suboptions.topurl)
+ for archive in archives:
+ url = '%s/%s' % (pi.imagebuild(info), archive['filename'])
+ urls.append((url, archive['filename']))
+ else:
+ # can't happen
+ assert False
+ else:
+ arches = suboptions.arches
+ if len(arches) == 0:
+ arches = None
+ rpms = session.listRPMs(buildID=info['id'], arches=arches)
+ if not rpms:
+ if arches:
+ print "No %s packages available for %s" % (" or ".join(arches), koji.buildLabel(info))
+ else:
+ print "No packages available for %s" % koji.buildLabel(info)
+ return 1
+ for rpm in rpms:
+ if not suboptions.debuginfo and koji.is_debuginfo(rpm['name']):
+ continue
+ if suboptions.key:
+ fname = pathinfo.signed(rpm, suboptions.key)
+ else:
+ fname = pathinfo.rpm(rpm)
+ url = pathinfo.build(info) + '/' + fname
+ urls.append((url, os.path.basename(fname)))
+
+ if suboptions.quiet:
+ pg = None
+ else:
+ pg = progress.TextMeter()
+
+ for url, relpath in urls:
+ file = grabber.urlopen(url, progress_obj=pg, text=relpath)
+
+ if '/' in relpath:
+ koji.ensuredir(os.path.dirname(relpath))
+ out = os.open(relpath, os.O_WRONLY|os.O_CREAT|os.O_TRUNC, 0666)
+ try:
+ while 1:
+ buf = file.read(4096)
+ if not buf:
+ break
+ os.write(out, buf)
+ finally:
+ os.close(out)
+ file.close()
+
+
+def anon_handle_download_logs(options, session, args):
+ "Download a logs for package"
+
+ FAIL_LOG = "task_failed.log"
+ usage = _("usage: %prog download-logs [options] <task-id> [<task-id> ...]")
+ usage += _("\n %prog download-logs [options] --nvr <n-v-r> [<n-v-r> ...]")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ usage += _("\nCreates special log with name %s if task failed." % FAIL_LOG)
+ parser = OptionParser(usage=usage)
+ parser.add_option("-r", "--recurse", action="store_true",
+ help=_("Process children of this task as well"))
+ parser.add_option("--nvr", action="store_true",
+ help=_("Get logs from n-v-r"))
+ parser.add_option("-m", "--match", action="append", metavar="PATTERN",
+ help=_("Get only log matching PATTERN. May be used multiple times."))
+ parser.add_option("-c", "--continue", action="store_true", dest="cont",
+ help=_("Continue previous download"))
+ parser.add_option("-d", "--dir", metavar="DIRECTORY", default='kojilogs',
+ help=_("Write logs to DIRECTORY"))
+ (suboptions, args) = parser.parse_args(args)
+
+ if len(args) < 1:
+ parser.error(_("Please specify at least one task id or n-v-r"))
+
+ def write_fail_log(task_log_dir, task_id):
+ """Gets output only from failed tasks"""
+ try:
+ result = session.getTaskResult(task_id)
+ # with current code, failed task results should always be faults,
+ # but that could change in the future
+ content = pprint.pformat(result)
+ except koji.GenericError:
+ etype, e = sys.exc_info()[:2]
+ content = ''.join(traceback.format_exception_only(etype, e))
+ full_filename = os.path.normpath(os.path.join(task_log_dir, FAIL_LOG))
+ koji.ensuredir(os.path.dirname(full_filename))
+ sys.stdout.write("Writing: %s\n" % full_filename)
+ file(full_filename, 'w').write(content)
+
+ def download_log(task_log_dir, task_id, filename, blocksize=102400):
+ #Create directories only if there is any log file to write to
+ full_filename = os.path.normpath(os.path.join(task_log_dir, filename))
+ koji.ensuredir(os.path.dirname(full_filename))
+ contents = 'IGNORE ME!'
+ if suboptions.cont and os.path.exists(full_filename):
+ sys.stdout.write("Continuing: %s\n" % full_filename)
+ fd = file(full_filename, 'ab')
+ offset = fd.tell()
+ else:
+ sys.stdout.write("Downloading: %s\n" % full_filename)
+ fd = file(full_filename, 'wb')
+ offset = 0
+ try:
+ while contents:
+ contents = session.downloadTaskOutput(task_id, filename, offset, blocksize)
+ offset += len(contents)
+ if contents:
+ fd.write(contents)
+ finally:
+ fd.close()
+
+ def save_logs(task_id, match, parent_dir='.', recurse=True):
+ assert task_id == int(task_id), "Task id must be number: %r" % task_id
+ task_info = session.getTaskInfo(task_id)
+ if task_info is None:
+ error(_("No such task id: %i" % task_id))
+ files = session.listTaskOutput(task_id)
+ logs = []
+ for filename in files:
+ if not filename.endswith(".log"):
+ continue
+ if match and not koji.util.multi_fnmatch(filename, match):
+ continue
+ logs.append(filename)
+
+ task_log_dir = os.path.join(parent_dir,
+ "%s-%s" % (task_info["arch"], task_id))
+
+ count = 0
+ state = koji.TASK_STATES[task_info['state']]
+ if state == 'FAILED':
+ if not match or koji.util.multi_fnmatch(FAIL_LOG, match):
+ write_fail_log(task_log_dir, task_id)
+ count += 1
+ elif state not in ['CLOSED', 'CANCELED']:
+ sys.stderr.write(_("Warning: task %s is %s\n") % (task_id, state))
+
+ for log_filename in logs:
+ download_log(task_log_dir, task_id, log_filename)
+ count += 1
+
+ if count == 0 and not recurse:
+ sys.stderr.write(_("No logs found for task %i. Perhaps try --recurse?\n") % task_id)
+
+ if recurse:
+ child_tasks = session.getTaskChildren(task_id)
+ for child_task in child_tasks:
+ save_logs(child_task['id'], match, task_log_dir, recurse)
+
+ for arg in args:
+ if suboptions.nvr:
+ suboptions.recurse = True
+ binfo = session.getBuild(arg)
+ if binfo is None:
+ error(_("There is no build with n-v-r: %s" % arg))
+ assert binfo['task_id'], binfo
+ arg = binfo['task_id']
+ sys.stdout.write("Using task ID: %s\n" % arg)
+ else:
+ try:
+ task_id = int(arg)
+ except ValueError:
+ error(_("Task id must be number: %r") % task_id)
+ continue
+ save_logs(task_id, suboptions.match, suboptions.dir, suboptions.recurse)
+
+
+def anon_handle_wait_repo(options, session, args):
+ "Wait for a repo to be regenerated"
+ usage = _("usage: %prog wait-repo [options] <tag>")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--build", metavar="NVR", dest="builds", action="append", default=[],
+ help=_("Check that the given build is in the newly-generated repo (may be used multiple times)"))
+ parser.add_option("--target", action="store_true", help=_("Interpret the argument as a build target name"))
+ parser.add_option("--timeout", type="int", help=_("Amount of time to wait (in minutes) before giving up (default: 120)"), default=120)
+ parser.add_option("--quiet", action="store_true", help=_("Suppress output, success or failure will be indicated by the return value only"), default=options.quiet)
+ (suboptions, args) = parser.parse_args(args)
+
+ start = time.time()
+
+ builds = [koji.parse_NVR(build) for build in suboptions.builds]
+ if len(args) < 1:
+ parser.error(_("Please specify a tag name"))
+ elif len(args) > 1:
+ parser.error(_("Only one tag may be specified"))
+
+ tag = args[0]
+
+ if suboptions.target:
+ target_info = session.getBuildTarget(tag)
+ if not target_info:
+ parser.error("Invalid build target: %s" % tag)
+ tag = target_info['build_tag_name']
+ tag_id = target_info['build_tag']
+ else:
+ tag_info = session.getTag(tag)
+ if not tag_info:
+ parser.error("Invalid tag: %s" % tag)
+ targets = session.getBuildTargets(buildTagID=tag_info['id'])
+ if not targets:
+ print "%(name)s is not a build tag for any target" % tag_info
+ targets = session.getBuildTargets(destTagID=tag_info['id'])
+ if targets:
+ maybe = {}.fromkeys([t['build_tag_name'] for t in targets])
+ maybe = maybe.keys()
+ maybe.sort()
+ print "Suggested tags: %s" % ', '.join(maybe)
+ return 1
+ tag_id = tag_info['id']
+
+
+ for nvr in builds:
+ data = session.getLatestBuilds(tag_id, package=nvr["name"])
+ if len(data) == 0:
+ print "Warning: package %s is not in tag %s" % (nvr["name"], tag)
+ else:
+ present_nvr = [x["nvr"] for x in data][0]
+ if present_nvr != "%s-%s-%s" % (nvr["name"], nvr["version"], nvr["release"]):
+ print "Warning: nvr %s-%s-%s is not current in tag %s\n latest build in %s is %s" % (nvr["name"], nvr["version"], nvr["release"], tag, tag, present_nvr)
+
+ last_repo = None
+ repo = session.getRepo(tag_id)
+
+ while True:
+ if builds and repo and repo != last_repo:
+ if koji.util.checkForBuilds(session, tag_id, builds, repo['create_event'], latest=True):
+ if not suboptions.quiet:
+ print "Successfully waited %s for %s to appear in the %s repo" % (koji.util.duration(start), koji.util.printList(suboptions.builds), tag)
+ return
+
+ time.sleep(60)
+ last_repo = repo
+ repo = session.getRepo(tag_id)
+
+ if not builds:
+ if repo != last_repo:
+ if not suboptions.quiet:
+ print "Successfully waited %s for a new %s repo" % (koji.util.duration(start), tag)
+ return
+
+ if (time.time() - start) > (suboptions.timeout * 60.0):
+ if not suboptions.quiet:
+ if builds:
+ print "Unsuccessfully waited %s for %s to appear in the %s repo" % (koji.util.duration(start), koji.util.printList(suboptions.builds), tag)
+ else:
+ print "Unsuccessfully waited %s for a new %s repo" % (koji.util.duration(start), tag)
+ return 1
+
+_search_types = ('package', 'build', 'tag', 'target', 'user', 'host', 'rpm', 'maven', 'win')
+
+def handle_regen_repo(options, session, args):
+ "[admin] Force a repo to be regenerated"
+ usage = _("usage: %prog regen-repo [options] <tag>")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--target", action="store_true", help=_("Interpret the argument as a build target name"))
+ parser.add_option("--nowait", action="store_true", help=_("Don't wait on for regen to finish"))
+ parser.add_option("--debuginfo", action="store_true", help=_("Include debuginfo rpms in repo"))
+ parser.add_option("--source", "--src", action="store_true", help=_("Include source rpms in the repo"))
+ (suboptions, args) = parser.parse_args(args)
+ if len(args) == 0:
+ parser.error(_("A tag name must be specified"))
+ assert False
+ elif len(args) > 1:
+ if suboptions.target:
+ parser.error(_("Only a single target may be specified"))
+ else:
+ parser.error(_("Only a single tag name may be specified"))
+ assert False
+ activate_session(session)
+ tag = args[0]
+ repo_opts = {}
+ if suboptions.target:
+ info = session.getBuildTarget(tag)
+ if not info:
+ parser.error(_("No matching build target: " + tag))
+ assert False
+ tag = info['build_tag_name']
+ info = session.getTag(tag)
+ else:
+ info = session.getTag(tag)
+ if not info:
+ parser.error(_("No matching tag: " + tag))
+ assert False
+ tag = info['name']
+ targets = session.getBuildTargets(buildTagID=info['id'])
+ if not targets:
+ print "Warning: %s is not a build tag" % tag
+ if not info['arches']:
+ print "Warning: tag %s has an empty arch list" % info['name']
+ if suboptions.debuginfo:
+ repo_opts['debuginfo'] = True
+ if suboptions.source:
+ repo_opts['src'] = True
+ task_id = session.newRepo(tag, **repo_opts)
+ print "Regenerating repo for tag " + tag
+ if _running_in_bg() or suboptions.nowait:
+ return
+ else:
+ session.logout()
+ return watch_tasks(session, [task_id], quiet=options.quiet)
+
+def anon_handle_search(options, session, args):
+ "Search the system"
+ usage = _("usage: %prog search [options] search_type pattern")
+ usage += _('\nAvailable search types: %s') % ', '.join(_search_types)
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("-r", "--regex", action="store_true", help=_("treat pattern as regex"))
+ parser.add_option("--exact", action="store_true", help=_("exact matches only"))
+ (options, args) = parser.parse_args(args)
+ if not args:
+ parser.print_help()
+ return
+ type = args[0]
+ if type not in _search_types:
+ parser.error(_("Unknown search type: %s") % type)
+ pattern = args[1]
+ matchType = 'glob'
+ if options.regex:
+ matchType = 'regexp'
+ elif options.exact:
+ matchType = 'exact'
+ data = session.search(pattern, type, matchType)
+ for row in data:
+ print row['name']
+
+def handle_moshimoshi(options, session, args):
+ "Introduce yourself"
+ usage = _("usage: %prog moshimoshi [options]")
+ parser = OptionParser(usage=usage)
+ (options, args) = parser.parse_args(args)
+ if len(args) != 0:
+ parser.error(_("This command takes no arguments"))
+ assert False
+ activate_session(session)
+ u = session.getLoggedInUser()
+ if not u:
+ print "Not authenticated"
+ u = {'name' : 'anonymous user'}
+ print "%s, %s!" % (random.choice(greetings), u["name"],)
+ print ""
+ print "You are using the hub at %s" % (session.baseurl,)
+ if u.get("krb_principal", None) is not None:
+ print "Authenticated via Kerberos principal %s" % (u["krb_principal"])
+
+
+def handle_runroot(options, session, args):
+ "[admin] Run a command in a buildroot"
+ usage = _("usage: %prog runroot [options] <tag> <arch> <command>")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.disable_interspersed_args()
+ parser.add_option("-p", "--package", action="append", default=[], help=_("make sure this package is in the chroot"))
+ parser.add_option("-m", "--mount", action="append", default=[], help=_("mount this directory read-write in the chroot"))
+ parser.add_option("--skip-setarch", action="store_true", default=False,
+ help=_("bypass normal setarch in the chroot"))
+ parser.add_option("-w", "--weight", type='int', help=_("set task weight"))
+ parser.add_option("--channel-override", help=_("use a non-standard channel"))
+ parser.add_option("--task-id", action="store_true", default=False,
+ help=_("Print the ID of the runroot task"))
+ parser.add_option("--use-shell", action="store_true", default=False,
+ help=_("Run command through a shell, otherwise uses exec"))
+ parser.add_option("--repo-id", type="int", help=_("ID of the repo to use"))
+
+ (opts, args) = parser.parse_args(args)
+
+ if len(args) < 3:
+ parser.error(_("Incorrect number of arguments"))
+ activate_session(session)
+ tag = args[0]
+ arch = args[1]
+ if opts.use_shell:
+ # everything must be correctly quoted
+ command = ' '.join(args[2:])
+ else:
+ command = args[2:]
+ try:
+ task_id = session.runroot(tag, arch, command,
+ channel=opts.channel_override,
+ packages=opts.package, mounts=opts.mount,
+ repo_id=opts.repo_id,
+ skip_setarch=opts.skip_setarch,
+ weight=opts.weight)
+ except koji.GenericError as e:
+ if 'Invalid method' in str(e):
+ print "* The runroot plugin appears to not be installed on the",
+ print "koji hub. Please contact the administrator."
+ raise
+ if opts.task_id:
+ print task_id
+
+ try:
+ while True:
+ # wait for the task to finish
+ if session.taskFinished(task_id):
+ break
+ time.sleep(options.poll_interval)
+ except KeyboardInterrupt:
+ # this is probably the right thing to do here
+ print "User interrupt: canceling runroot task"
+ session.cancelTask(task_id)
+ return
+ output = None
+ if "runroot.log" in session.listTaskOutput(task_id):
+ output = session.downloadTaskOutput(task_id, "runroot.log")
+ if output:
+ sys.stdout.write(output)
+ info = session.getTaskInfo(task_id)
+ if info is None:
+ sys.exit(1)
+ state = koji.TASK_STATES[info['state']]
+ if state in ('FAILED', 'CANCELED'):
+ sys.exit(1)
+ return
+
+
+def handle_help(options, session, args):
+ "List available commands"
+ usage = _("usage: %prog help [options]")
+ usage += _("\n(Specify the --help global option for a list of other help options)")
+ parser = OptionParser(usage=usage)
+ parser.add_option("--admin", action="store_true", help=_("show admin commands"))
+ (options, args) = parser.parse_args(args)
+ list_commands(show_admin=options.admin)
+
+
+def list_commands(show_admin=False):
+ handlers = []
+ for name,value in globals().items():
+ if name.startswith('handle_'):
+ alias = name.replace('handle_','')
+ alias = alias.replace('_','-')
+ handlers.append((alias,value))
+ elif name.startswith('anon_handle_'):
+ alias = name.replace('anon_handle_','')
+ alias = alias.replace('_','-')
+ handlers.append((alias,value))
+ handlers.sort()
+ print _("Available commands:")
+ for alias,handler in handlers:
+ desc = handler.__doc__
+ if desc.startswith('[admin] '):
+ if not show_admin:
+ continue
+ desc = desc[8:]
+ print " %-25s %s" % (alias, desc)
+ progname = os.path.basename(sys.argv[0]) or 'koji'
+ print _('(Type "%s --help" for help about global options') % progname
+ print _(' or "%s <command> --help" for help about a particular command\'s options') % progname
+ print _(' or "%s help --admin" for help about privileged administrative commands.)') % progname
+
+def error(msg=None, code=1):
+ if msg:
+ sys.stderr.write(msg + "\n")
+ sys.stderr.flush()
+ sys.exit(code)
+
+def warn(msg):
+ sys.stderr.write(msg + "\n")
+ sys.stderr.flush()
+
+def has_krb_creds():
+ if not sys.modules.has_key('krbV'):
+ return False
+ try:
+ ctx = krbV.default_context()
+ ccache = ctx.default_ccache()
+ princ = ccache.principal()
+ return True
+ except krbV.Krb5Error:
+ return False
+
+def activate_session(session):
+ """Test and login the session is applicable"""
+ global options
+ if options.authtype == "noauth" or options.noauth:
+ #skip authentication
+ pass
+ elif options.authtype == "ssl" or os.path.isfile(options.cert) and options.authtype is None:
+ # authenticate using SSL client cert
+ session.ssl_login(options.cert, options.ca, options.serverca, proxyuser=options.runas)
+ elif options.authtype == "password" or options.user and options.authtype is None:
+ # authenticate using user/password
+ session.login()
+ elif options.authtype == "kerberos" or has_krb_creds() and options.authtype is None:
+ try:
+ if options.keytab and options.principal:
+ session.krb_login(principal=options.principal, keytab=options.keytab, proxyuser=options.runas)
+ else:
+ session.krb_login(proxyuser=options.runas)
+ except krbV.Krb5Error, e:
+ error(_("Kerberos authentication failed: %s (%s)") % (e.args[1], e.args[0]))
+ except socket.error, e:
+ warn(_("Could not connect to Kerberos authentication service: %s") % e.args[1])
+ if not options.noauth and options.authtype != "noauth" and not session.logged_in:
+ error(_("Unable to log in, no authentication methods available"))
+ ensure_connection(session)
+ if options.debug:
+ print "successfully connected to hub"
+
+if __name__ == "__main__":
+ global options
+ options, command, args = get_options()
+
+ logger = logging.getLogger("koji")
+ handler = logging.StreamHandler(sys.stderr)
+ handler.setFormatter(logging.Formatter('%(asctime)s [%(levelname)s] %(name)s: %(message)s'))
+ handler.setLevel(logging.DEBUG)
+ logger.addHandler(handler)
+ if options.debug:
+ logger.setLevel(logging.DEBUG)
+ elif options.quiet:
+ logger.setLevel(logging.ERROR)
+ else:
+ logger.setLevel(logging.WARN)
+
+ session_opts = {}
+ for k in ('user', 'password', 'krbservice', 'debug_xmlrpc', 'debug', 'max_retries',
+ 'retry_interval', 'offline_retry', 'offline_retry_interval',
+ 'anon_retry', 'keepalive', 'timeout', 'use_fast_upload'):
+ value = getattr(options,k)
+ if value is not None:
+ session_opts[k] = value
+ session = koji.ClientSession(options.server,session_opts)
+ rv = 0
+ try:
+ rv = locals()[command].__call__(options, session, args)
+ if not rv:
+ rv = 0
+ except KeyboardInterrupt:
+ pass
+ except SystemExit:
+ rv = 1
+ except:
+ if options.debug:
+ raise
+ else:
+ exctype, value = sys.exc_info()[:2]
+ rv = 1
+ print "%s: %s" % (exctype.__name__, value)
+ try:
+ session.logout()
+ except:
+ pass
+ sys.exit(rv)
diff --git a/cli/koji.conf b/cli/koji.conf
new file mode 100644
index 0000000..01ac7ee
--- /dev/null
+++ b/cli/koji.conf
@@ -0,0 +1,31 @@
+[koji]
+
+;configuration for koji cli tool
+
+;url of XMLRPC server
+;server = http://hub.example.com/kojihub
+
+;url of web interface
+;weburl = http://www.example.com/koji
+
+;url of package download site
+;pkgurl = http://www.example.com/packages
+
+;path to the koji top directory
+;topdir = /mnt/koji
+
+;configuration for Kerberos authentication
+
+;the service name of the principal being used by the hub
+;krbservice = host
+
+;configuration for SSL authentication
+
+;client certificate
+;cert = ~/.koji/client.crt
+
+;certificate of the CA that issued the client certificate
+;ca = ~/.koji/clientca.crt
+
+;certificate of the CA that issued the HTTP server certificate
+;serverca = ~/.koji/serverca.crt
diff --git a/docs/HOWTO.html b/docs/HOWTO.html
new file mode 100644
index 0000000..2abb3e0
--- /dev/null
+++ b/docs/HOWTO.html
@@ -0,0 +1,321 @@
+<html xmlns="http://www.w3.org/1999/xhtml">
+ <head>
+ <title>Koji HOWTO</title>
+ </head>
+ <body>
+ <h1>Introduction</h1>
+
+ Koji is a system for building and tracking RPMs. It was designed with the following
+ features in mind:
+
+ <p>
+ <b>Security</b>
+ <ul>
+ <li>New buildroot for each build</li>
+ <li>nfs is used (mostly) read-only</li>
+ </ul>
+
+ <b>Leverage other software</b>
+ <ul>
+ <li>Uses Yum and Mock open-source components</li>
+ <li>XML-RPC APIs for easy integration with other tools</li>
+ </ul>
+
+ <b>Flexibility</b>
+ <ul>
+ <li>rich data model</li>
+ <li>active code base</li>
+ </ul>
+
+ <b>Usability</b>
+ <ul>
+ <li>Web interface with Kerberos authentication</li>
+ <li>Thin, portable client</li>
+ <li>Users can create local buildroots</li>
+ </ul>
+
+ <b>Reproducibility</b>
+ <ul>
+ <li>Buildroot contents are tracked in the database</li>
+ <li>Versioned data</li>
+ </ul>
+
+
+ <p> This HOWTO document covers the basic tasks that a developer needs to be
+ able to accomplish with Koji.
+ </p>
+
+ <h1>Getting started</h1>
+
+ <h2>The web interface</h2>
+ <p>The primary interface for viewing Koji data is a web application. Most of the interface
+ is read-only, but if you are logged in (see below) and have sufficient privileges there
+ are some actions that can be performed though the web. For example:
+ <ul>
+ <li>Cancel a build</li>
+ <li>Resubmit a failed task</li>
+ </ul>
+ Those with admin privileges will find additional actions, such as:
+ <ul>
+ <li>Create/Edit/Delete a tag</li>
+ <li>Create/Edit/Delete a target</li>
+ <li>Enable/Disable a build host</li>
+ </ul>
+
+ <p>The web site utilizes Kerberos authentication. In order to log in you will
+ need a valid Kerberos ticket and your web browser will need to be configured to send the
+ Kerberos information to the server.
+
+ <p>In Firefox or Mozilla, you will need to use the about:config page to set a few parameters.
+ Use the search term 'negotiate' to filter the list. Change
+ network.negotiate-auth.trusted-uris to the domain you want to authenticate against,
+ e.g .example.com. You can leave network.negotiate-auth.delegation-uris blank, as it
+ enables Kerberos ticket passing, which is not required. If you do not see those two
+ config options listed, your version of Firefox or Mozilla may be too old to support
+ Negotiate authentication, and you should consider upgrading.
+
+ <p>In order to obtain a Kerberos ticket, use the kinit command.
+
+
+ <h2>Installing the Koji cli</h2>
+ <p>There is a single point of entry for most operations. The command is
+ called 'koji' and is included in the main koji package.
+
+ <p>Repos/webpage TBD
+
+ <p>
+ The koji tool authenticates to the central server using Kerberos, so you will need
+ to have a valid Kerberos ticket to use many features. However, many of the read-only
+ commands will work without authentication.
+
+ <h2>Building a package</h2>
+ <p>Builds are initiated with the command line tool.
+ To build a package, the syntax is:</p>
+ <pre>$ koji build <build target> <cvs URL></pre>
+
+ <p>For example:</p>
+ <pre>$ koji build dist-fc7-scratch 'cvs://cvs.example.com/cvs/dist?rpms/kernel/FC-7#kernel-2_6_20-1_2925_fc7'</pre>
+ <p>
+ The <code>koji build</code> command creates a build task in Koji. By default
+ the tool will wait
+ and print status updates until the build completes. You can override this with
+ the <code>--nowait</code> option. To view other options to the build command use the
+ <code>--help</code> option.
+ </p>
+
+ <pre>$ koji build --help
+</pre>
+
+ <h2>Build Options</h2>
+ <p>
+ There are a few options to the build command. Here are some more detailed explanations
+ of them:
+ </p>
+
+ <dl>
+ <dt>--skip-tag</dt>
+ <dd>Normally the package is tagged after the build completes. This option causes
+ the tagging step to be skipped. The package will be in the system, but untagged
+ (you can later tag it with the tag-pkg command)</dd>
+ <dt>--scratch</dt>
+ <dd>This makes the build into a scratch build. The build will not be
+ imported into the db, it will just be built. The rpms will land under
+ <topdir>/scratch. Scratch builds are not tracked and can never
+ be tagged, but can be convenient for testing. Scratch builds are
+ typically removed from the filesystem after one week.
+ </dd>
+ <dt>--nowait</dt>
+ <dd>As stated above, this prevents the cli from waiting on the build task.</dd>
+ <dt>--arch-override</dt>
+ <dd>This option allows you to override the base set of arches to build for.
+ This option is really only for testing during the beta period, but it may
+ be retained for scratch builds in the future.</dd>
+ </dl>
+
+ <h2>Build Failures</h2>
+ <p>If your package fails to build, you will see something like this.</p>
+ <pre>
+ 420066 buildArch (kernel-2.6.18-1.2739.10.9.el5.jjf.215394.2.src.rpm,
+ ia64): open (build-1.example.com) -> FAILED: BuildrootError:
+ error building package (arch ia64), mock exited with status 10
+ </pre>
+
+ <p>You can figure out why the build failed by looking at the log files. If
+ there is a build.log, start there. Otherwise, look at init.log</p>
+
+ <pre>
+ $ ls -1 <topdir>/work/tasks/420066/*
+ <topdir>/work/tasks/420066/build.log
+ <topdir>/work/tasks/420066/init.log
+ <topdir>/work/tasks/420066/mockconfig.log
+ <topdir>/work/tasks/420066/root.log
+ </pre>
+
+ <h2>Filing Bugs</h2>
+
+ <p>bug tracking TBD
+
+ <h1>Koji Architecture</h1>
+
+ <h2>Terminology</h2>
+
+ In Koji, it is sometimes necessary to distinguish between the a package in general,
+ a specific build of a package, and the various rpm files created by a build. When
+ precision is needed, these terms should be interpreted as follows:
+
+ <dl>
+ <dt>Package</dt>
+ <dd>The name of a source rpm. This refers to the package in general and not
+ any particular build or subpackage. For example: kernel, glibc, etc.</dd>
+ <dt>Build</dt>
+ <dd>A particular build of a package. This refers to the entire build: all arches
+ and subpackages. For example: kernel-2.6.9-34.EL, glibc-2.3.4-2.19.</dd>
+ <dt>RPM</dt>
+ <dd>A particular rpm. A specific arch and subpackage of a build.
+ For example: kernel-2.6.9-34.EL.x86_64, kernel-devel-2.6.9-34.EL.s390,
+ glibc-2.3.4-2.19.i686, glibc-common-2.3.4-2.19.ia64</dd>
+ </dl>
+
+
+ <h2>Koji Components</h2>
+
+ Koji is comprised of several components:
+
+ <ul>
+ <li><em>koji-hub</em> is the center of all Koji operations. It is an XML-RPC server
+ running under mod_python in Apache. koji-hub is passive in that it only
+ receives XML-RPC calls and relies upon the build daemons and other
+ components to initiate communication. koji-hub is the only component that
+ has direct access to the database and is one of the two components that have
+ write access to the file system.</li>
+
+ <li><em>kojid</em> is the build daemon that runs on each of the build machines. Its
+ primary responsibility is polling for incoming build requests and handling
+ them accordingly. Koji also has support for tasks other than building.
+ Creating install images is one example. kojid is responsible for handling
+ these tasks as well.
+
+ <p>kojid uses mock for building. It also creates a fresh buildroot for
+ every build. kojid is written in Python and communicates with koji-hub via
+ XML-RPC.</p></li>
+
+ <li><em>koji-web</em> is a set of scripts that run in mod_python and use the Cheetah
+ templating engine to provide an web interface to Koji. koji-web exposes a
+ lot of information and also provides a means for certain operations, such as
+ cancelling builds.</li>
+
+ <li><em>koji</em> is a CLI written in Python that provides many hooks into
+ Koji. It allows the user to query much of the data as well as perform
+ actions such as build initiation.</li>
+
+ <li><em>kojirepod</em> is a daemon that keeps the build root repodata
+ updated.</li>
+
+ </ul>
+
+ <h2>Package Organization</h2>
+ <p><i>Tags and Targets</i></p>
+ <p>Koji organizes packages using tags. In Koji a tag is roughly analogous to
+ a beehive collection instance, but differ in a number of ways:</p>
+ <ul>
+ <li>Tags are tracked in the database but not on disk</li>
+ <li>Tags support multiple inheritance</li>
+ <li>Each tag has its own list of valid packages (inheritable)</li>
+ <li>Package ownership can be set per-tag (inheritable)</li>
+ <li>Tag inheritance is more configurable</li>
+ <li>When you build you specify a <i>target</i> rather than a tag</li>
+ </ul>
+ <p>
+ A build target specifies where a package should be built and how it
+ should be tagged afterwards. This allows target names to remain fixed
+ as tags change through releases. You can get a full list of build targets
+ with the following command:</p>
+ <pre>$ koji list-targets
+</pre>
+ You can see just a single target with the <code>--name</code> option:
+ <pre>$ koji list-targets --name dist-fc7
+Name Buildroot Destination
+---------------------------------------------------------------------------------------------
+dist-fc7 dist-fc7-build dist-fc7
+</pre>
+ This tells you a build for target dist-fc7 will use a buildroot with packages
+ from the tag dist-fc7-build and tag the resulting packages as dist-fc7.
+ <p>
+ You can get a list of tags with the following command:</p>
+ <pre>$ koji list-tags
+</pre>
+ <p><i>Package lists</i></p>
+ <p>
+ As mentioned above, each tag has its own list of packages that may be placed
+ in the tag. To see that list for a tag, use the <code>list-pkgs</code> command:</p>
+ <pre>$ koji list-pkgs --tag dist-fc7
+Package Tag Extra Arches Owner
+----------------------- ----------------------- ---------------- ----------------
+ElectricFence dist-fc6 pmachata
+GConf2 dist-fc6 rstrode
+lucene dist-fc6 dbhole
+lvm2 dist-fc6 lvm-team
+ImageMagick dist-fc6 nmurray
+m17n-db dist-fc6 majain
+m17n-lib dist-fc6 majain
+MAKEDEV dist-fc6 clumens
+...
+</pre>
+ The first column is the name of the package, the second tells you which tag
+ the package entry has been inherited from, and the third tells you the owner
+ of the package.
+ <p><i>Latest Builds</i></p>
+ <p>
+ To see the latest builds for a tag, use the <code>latest-pkg</code> command:</p>
+ <pre>$ koji latest-pkg --all dist-fc7
+Build Tag Built by
+---------------------------------------- -------------------- ----------------
+ConsoleKit-0.1.0-5.fc7 dist-fc7 davidz
+ElectricFence-2.2.2-20.2.2 dist-fc6 jkeating
+GConf2-2.16.0-6.fc7 dist-fc7 mclasen
+ImageMagick-6.2.8.0-3.fc6.1 dist-fc6-updates nmurray
+MAKEDEV-3.23-1.2 dist-fc6 nalin
+MySQL-python-1.2.1_p2-2 dist-fc7 katzj
+NetworkManager-0.6.5-0.3.cvs20061025.fc7 dist-fc7 caillon
+ORBit2-2.14.6-1.fc7 dist-fc7 mclasen
+</pre>
+ The output gives you not only the latest builds, but which tag they have
+ been inherited from and who built them (note: for builds imported from beehive
+ the "built by" field may be misleading)
+
+
+ <h2>Exploring Koji</h2>
+
+ <p>We've tried to make Koji self-documenting wherever possible. The command
+ line tool will print a list of valid commands and each command supports
+ <code>--help</code>. For example:</p>
+
+ <pre>
+$ koji help
+Koji commands are:
+ build Build a package from source
+ cancel-task Cancel a task
+ help List available commands
+ latest-build Print the latest rpms for a tag
+ latest-pkg Print the latest builds for a tag
+...
+$ koji build --help
+usage: koji build [options] tag URL
+(Specify the --help global option for a list of other help options)
+
+options:
+ -h, --help show this help message and exit
+ --skip-tag Do not attempt to tag package
+ --scratch Perform a scratch build
+ --nowait Don't wait on build
+...
+</pre>
+
+ <h1>Getting Involved</h1>
+
+ If you would like to be more involved with the Koji project...
+
+ <p>Project data TBD
+
+ </body>
+</html>
diff --git a/docs/Makefile b/docs/Makefile
new file mode 100644
index 0000000..43d8d68
--- /dev/null
+++ b/docs/Makefile
@@ -0,0 +1,4 @@
+install:
+
+clean:
+ rm -f *.o *.so *.pyc *~
diff --git a/docs/Migrating_to_1.10.txt b/docs/Migrating_to_1.10.txt
new file mode 100644
index 0000000..e934706
--- /dev/null
+++ b/docs/Migrating_to_1.10.txt
@@ -0,0 +1,76 @@
+Migrating to Koji 1.10
+======================
+
+// asciidoc formatted
+
+The 1.10 release of Koji includes a few changes that you should consider when
+migrating.
+
+DB Updates
+----------
+
+The new +tag_extra+ table tracks extra data for tags.
+
+There is a new entry in the +channels+ table and some additions and updates to
+the +archivetypes+ table.
+
+As in previous releases, we provide a migration script that updates the
+database.
+
+ # psql koji koji </usr/share/doc/koji-1.10.0/docs/schema-upgrade-1.9-1.10.sql
+
+
+Command line changes
+--------------------
+
+A few commands support new arguments
+
+* maven-build
+** --ini : Pass build parameters via a .ini file
+** --section : Get build parameters from this section of the .ini
+* wrapper-rpm
+** --ini : Pass build parameters via a .ini file
+** --section : Get build parameters from this section of the .ini
+* import
+** --link : Attempt to hardlink instead of uploading
+* list-tagged
+** --latest-n : Only show the latest N builds/rpms
+* list-history
+** --watch : Monitor history data
+* edit-tag
+** --extra : Set tag extra option
+* list-tasks
+** --user : Only tasks for this user
+** --arch : Only tasks for this architecture
+** --method : Only tasks of this method
+** --channel : Only tasks in this channel
+** --host : Only tasks for this host
+* download-build
+** --task-id : Interpret id as a task id
+
+And there are three new commands
+
+* image-build-indirection
+* maven-chain
+* runroot
+
+
+Other Configuration changes
+---------------------------
+
+The Koji web interface can now treate +extra-footer.html+ as a Cheetah template.
+This behavior can be enabled by setting the +LiteralFooter+ option to +False+ in
+the kojiweb config.
+
+
+RPC API Changes
+---------------
+
+The +readTaggedBuilds+ and +readTaggedRPMS+ now treat an integer value for the optional
+latest argument differently. Before it was simply treated as a boolean flag, which
+if true caused the call to return only the latest build for each package. Now, if
+the value is a positive integer N, it will return the N latest builds for each
+package. The behavior is unchanged for other values.
+
+New rpc calls: +chainMaven+, +buildImageIndirection+, and +mergeScratch+
+
diff --git a/docs/Migrating_to_1.7.txt b/docs/Migrating_to_1.7.txt
new file mode 100644
index 0000000..a667e52
--- /dev/null
+++ b/docs/Migrating_to_1.7.txt
@@ -0,0 +1,141 @@
+Migrating to Koji 1.7
+=====================
+
+// asciidoc formatted
+
+The 1.7 release of Koji contains changes that will require a little extra
+work when updating. These changes are:
+
+- DB schema updates to support storage volumes
+- The change from mod_python to mod_wsgi
+- The introduction of a separate configuration file for koji-web
+- Changes to url options
+
+DB Schema Updates
+-----------------
+
+The 1.7 release adds two new tables to the database. The +volume+ table tracks
+the names of available storage volumes, and the +tag_updates+ table tracks
+changes to tags that are not easily calculated from other tables. There is
+also a new field in the +build+ table, +volume_id+, which indicates which
+volume a build is stored on.
+
+As in previous releases, we provide a migration script that updates the
+database.
+
+ # psql koji koji </usr/share/doc/koji-1.7.0/docs/schema-upgrade-1.5-1.7.sql
+
+
+mod_python and mod_wsgi
+-----------------------
+
+Koji now defaults to using mod_wsgi to interface with httpd. Support for
+mod_python is _deprecated_ and will disappear in a future version of Koji.
+Koji administrators can opt to stay on mod_python for now, but some minor
+configuration changes will be required.
+
+Migrating to mod_wsgi
+~~~~~~~~~~~~~~~~~~~~~
+
+The mod_wsgi package is now required for both koji-hub and koji-web. Folks
+running RHEL5 can find mod_wsgi in EPEL.
+
+You will need to adjust your http config for both koji-hub and koji-web. Our
+example config files default to mod_wsgi. To adapt your existing config, you
+will need to:
+
+- For both the koji-hub and koji-web/scripts directories:
+ * add +Options ExecCGI+
+ * change +SetHandler+ from mod_python to wsgi-script
+- Ensure that the koji-web Alias points to wsgi_publisher.py
+- If you have not already, migrate all koji-hub PythonOptions to hub.conf
+- Migrate all koji-web PythonOptions to web.conf (see later section)
+
+Staying on mod_python
+~~~~~~~~~~~~~~~~~~~~~
+
+Support for mod_python is _deprecated_ and will disappear in a future version
+of Koji.
+
+While we have made efforts to maintain mod_python compatibility, there are
+a few configuration changes you will need to make.
+
+The koji-hub http config should continue to function without modification.
+
+The koji-web http config will, at minimum, require the following changes:
+
+- Ensure that the koji-web +Alias+ points to wsgi_publisher.py
+- Change koji-web's +PythonHandler+ setting to wsgi_publisher
+
+Our example http configurations contain commented examples of mod_python
+configuration.
+
+Even if you stay on mod_python, we recommend that you migrate away from using
+PythonOptions and place your configuration in web.conf and hub.conf.
+
+
+Web Configuration
+-----------------
+
+Starting with version 1.7, koji-web uses a separate configuration file, rather
+than PythonOptions embedded in the httpd config. The location of the new file
+is:
+
+ /etc/kojiweb/web.conf
+
+The web.conf file is an ini-style configuration file. Options should be placed
+in the [web] section. All previous options accepted via PythonOptions are
+accepted in web.conf. Please see the example web.conf file.
+
+
+Custom Config File Location
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The location of web.conf can be specified in the httpd configuration. To
+specify the location under mod_wsgi, use:
+
+ SetEnv koji.web.ConfigFile /path/to/web.conf
+
+Under mod_python, use:
+
+ PythonOption koji.web.ConfigFile /path/to/web.conf
+
+If you opt to stay on mod_python, the server will continue to process the old
+PythonOptions. To ease migration, it does so by default unless the
+koji.web.ConfigFile PythonOption is specified. In order to use web.conf under
+mod_python, you _must_ specify koji.web.ConfigFile in your http config.
+
+We strongly recommend moving to web.conf. The server will issue a warning at
+startup if web.conf is not in use.
+
+
+Changes to url options
+----------------------
+
+The pkgurl option has been removed from the koji command line tool and from
+the build daemon (kojid). The url for packages is deduced from the topurl
+option, which should point to the top of the /mnt/koji tree.
+
+Any config files that specify pkgurl (e.g. ~/.koji/config, /etc/koji.conf, or
+/etc/kojid/kojid.conf) will need to be adjusted.
+
+Similarly, the kojiweb config options KojiPackagesURL, KojiMavenURL, and
+KojiImagesURL have been dropped in favor of the new option KojiFilesURL.
+
+
+Additional Notes
+----------------
+
+Split Storage
+~~~~~~~~~~~~~
+
+Apart from the schema changes, no other migration steps are required for the
+split storage feature. By default, builds are stored in the normal location.
+
+Web Themes
+~~~~~~~~~~
+
+Using the old method (httpd aliases for koji static content) should continue
+to work. For (brief) instructions on the new method, see the README file under
+koji-web/static/themes.
+
diff --git a/docs/Migrating_to_1.8.txt b/docs/Migrating_to_1.8.txt
new file mode 100644
index 0000000..8223580
--- /dev/null
+++ b/docs/Migrating_to_1.8.txt
@@ -0,0 +1,112 @@
+Migrating to Koji 1.8
+=====================
+
+// asciidoc formatted
+
+The 1.8 release of Koji refactors how images (livecd and appliance) are stored
+in the database and on disc. These changes will require a little extra work
+when updating.
+
+There have also been some changes to the command line.
+
+Finally, kojira accepts some new options.
+
+DB Schema Updates
+-----------------
+
+Previous to 1.8, images were stored in separately from other builds, both in
+the database and on disc. The new schema adds new tables: +image_builds+,
++image_listing+, and +image_archives+.
+
+The following tables are now obsolete: +imageinfo+ and +imageinfo_listing+.
+However you should not drop these tables until you have migrated your image
+data.
+
+As in previous releases, we provide a migration script that updates the
+database.
+
+ # psql koji koji </usr/share/doc/koji-1.8.0/docs/schema-upgrade-1.7-1.8.sql
+
+Note that the SQL script does not (and can not) automatically migrate your old
+image data to the new tables. After applying the schema changes, you can
+migrate old images using the +migrateImage+ hub call. This method is necessary
+because the new schema requires each image to have a name, version, and release
+value. The values for name and version cannot be automatically guessed.
+
+
+Migrating your old images
+-------------------------
+
+If you have old images, you can migrate them to the new system using the
++migrateImage+ hub call. This call requires admin privilege and must also be
+enabled with the +EnableImageMigration+ configuration option in +hub.conf+.
+
+The signature of the call is:
+
+ migrateImage(old_image_id, name, version)
+
+This call can made from the command line:
+
+ # koji call migrateImage 45 my_livecd 1.1
+
+
+Cleaning up
+-----------
+
+After you have migrated any necessary images to the new system, you may want to
+remove the old database tables and filesystem directories. This step is
+*optional*. If you want to leave the old data around, it will not affect Koji.
+
+Before you take any of the following actions, please *make sure* that you have
+migrated any desired images.
+
+Removing the old data is simply a matter of dropping tables and deleting files.
+
+ koji=> DROP TABLE imageinfo_listing;
+ koji=> DROP TABLE imageinfo;
+ # rm -rf /mnt/koji/images
+
+
+Command line changes
+--------------------
+
+For clarity and consistency, all of the +-pkg+ commands have been renamed to
++-build+ commands.
+
+ latest-pkg -> latest-build
+ move-pkg -> move-build
+ tag-pkg -> tag-build
+ untag-pkg -> untag-build
+
+For backwards compatibility, the old commands names are also recognized.
+
+A new command has been added, +remove-pkg+.
+
+Several commands have been modified to support images.
+
+The +spin-livecd+ and +spin-appliance+ commands now require additional
+arguments. These arguments specify the name and version to use for the image.
+
+
+New kojira options
+------------------
+
+The following options are new to kojira:
+
+ max_delete_processes
+ max_repo_tasks_maven
+
+Previously, kojira ran as a single process and repo deletions could potentially
+slow things down (particularly for Maven-enabled repos). Now kojira spawns
+a separate process to handle these deletions. The +max_delete_processes+
+determines how many such processes it will launch at one time.
+
+When Maven-enabled repos are in use, they can potentially take a very long time
+to regenerate. If a number of these pile up it can severely slow down
+regeneration of non-Maven repos. The +max_repo_tasks_maven+ limits how many
+Maven repos kojira will attempt to regenerate at once.
+
+Also the following kojira option has been removed:
+
+ prune_batch_size
+
diff --git a/docs/Migrating_to_1.9.txt b/docs/Migrating_to_1.9.txt
new file mode 100644
index 0000000..dc734b8
--- /dev/null
+++ b/docs/Migrating_to_1.9.txt
@@ -0,0 +1,79 @@
+Migrating to Koji 1.9
+=====================
+
+// asciidoc formatted
+
+The 1.9 release of Koji includes a few changes that you should consider when
+migrating.
+
+DB Updates
+----------
+
+ImageFactory support introduced some new archive types. These have been added to
+the +archivetypes+ table. The inaccurate +vmx+ entry has been removed.
+
+As in previous releases, we provide a migration script that updates the
+database.
+
+ # psql koji koji </usr/share/doc/koji-1.9.0/docs/schema-upgrade-1.8-1.9.sql
+
+
+Command line changes
+--------------------
+
+The command line interface handles configuration files a little differently. Old
+configs should work just fine, but now there are new options and enhancements.
+
+In addition to the main configuration files, the koji cli now checks for
++/etc/koji.conf.d+ and +~/.koji/config.d+ directories and loads any +*.conf+ files
+contained within. Also if the user specifies a directory with the +-c/--config+
+option, then that directory will be processed similarly.
+
+The command line supports a new +-p/--profile+ option to select alternate configuration
+profiles without having to link or rename the koji executable.
+
+The new +image-build+ command is used to generate images using ImageFactory. The older
+spin-appliance command is now deprecated.
+
+The +mock-config+ command no longer requires a name argument. You can still specify one
+if you want to override the default choice. It also supports new options. The
++--latest+ option causes the resulting mock config to reference the ``latest'' repo (a
+varying symlink). The +--target+ option allows generating the config from a target name.
+
+Other command line changes include:
+* a new +download-logs+ command
+* the +list-groups+ command now accepts event args
+* the +taginfo+ command now reports the list of comps groups for the tag
+* the fast upload feature is now used automatically if the server supports it
+
+Other Configuration changes
+---------------------------
+
+There are also some minor configuration changes in other parts of Koji.
+
+In +kojid+ the time limit for rpm builds is now configurable via the +rpmbuild_timeout+
+setting in kojid.conf. The default is 24 hours.
+
+The +koji-gc+ tool supports two new configuration options. The +krbservice+ option allows
+you to specify the kerberos service for authentication, and the +email_domain+ option
+allows you to specify the email domain for sending gc notices.
+
+The messagebus hub plugin now supports +timeout+ and +heartbeat+ options for the message
+bus connection.
+
+
+RPC API Changes
+---------------
+
+Most of these changes are extensions, though some of the host-only call changes are
+incompatible.
+
+The +tagHistory+ call accepts a new named boolean option (+active+) to select only
+active/inactive entries. It also now reports the additional fields maven_build_id and
+win_build_id if builds are maven or win builds respectively.
+
+New rpc calls: +buildImageOz+, +host.completeImageBuild+, and +host.evalPolicy+.
+
+The host-only calls +host.moveImageBuildToScratch+ and +host.importImage+ no longer
+accept the +rpm_results+ argument. The rpm results can be embedded in the regular
++results+ argument.
diff --git a/docs/schema-upgrade-1.2-1.3.sql b/docs/schema-upgrade-1.2-1.3.sql
new file mode 100644
index 0000000..c970115
--- /dev/null
+++ b/docs/schema-upgrade-1.2-1.3.sql
@@ -0,0 +1,62 @@
+-- upgrade script to migrate the Koji database schema
+-- from version 1.2 to 1.3
+
+BEGIN;
+
+-- external yum repos
+create table external_repo (
+ id SERIAL NOT NULL PRIMARY KEY,
+ name TEXT UNIQUE NOT NULL
+);
+-- fake repo id for internal stuff (needed for unique index)
+INSERT INTO external_repo (id, name) VALUES (0, 'INTERNAL');
+
+create table external_repo_config (
+ external_repo_id INTEGER NOT NULL REFERENCES external_repo(id),
+ url TEXT NOT NULL,
+-- versioned - see earlier description of versioning
+ create_event INTEGER NOT NULL REFERENCES events(id) DEFAULT get_event(),
+ revoke_event INTEGER REFERENCES events(id),
+ active BOOLEAN DEFAULT 'true' CHECK (active),
+ CONSTRAINT active_revoke_sane CHECK (
+ (active IS NULL AND revoke_event IS NOT NULL )
+ OR (active IS NOT NULL AND revoke_event IS NULL )),
+ PRIMARY KEY (create_event, external_repo_id),
+ UNIQUE (external_repo_id, active)
+) WITHOUT OIDS;
+
+create table tag_external_repos (
+ tag_id INTEGER NOT NULL REFERENCES tag(id),
+ external_repo_id INTEGER NOT NULL REFERENCES external_repo(id),
+ priority INTEGER NOT NULL,
+-- versioned - see earlier description of versioning
+ create_event INTEGER NOT NULL REFERENCES events(id) DEFAULT get_event(),
+ revoke_event INTEGER REFERENCES events(id),
+ active BOOLEAN DEFAULT 'true' CHECK (active),
+ CONSTRAINT active_revoke_sane CHECK (
+ (active IS NULL AND revoke_event IS NOT NULL )
+ OR (active IS NOT NULL AND revoke_event IS NULL )),
+ PRIMARY KEY (create_event, tag_id, priority),
+ UNIQUE (tag_id, priority, active),
+ UNIQUE (tag_id, external_repo_id, active)
+);
+
+-- add the new column then set the existing packages to have the INTERNAL exteranl repo id
+-- then add the not null constraint
+-- then drop rpminfo_unique_nvra CONSTRAINT and add the new version
+ALTER TABLE rpminfo ADD COLUMN external_repo_id INTEGER REFERENCES external_repo(id);
+UPDATE rpminfo SET external_repo_id = 0;
+ALTER TABLE rpminfo ALTER COLUMN external_repo_id SET NOT NULL;
+ALTER TABLE rpminfo DROP CONSTRAINT rpminfo_unique_nvra;
+ALTER TABLE rpminfo ADD CONSTRAINT rpminfo_unique_nvra UNIQUE (name,version,release,arch,external_repo_id);
+
+GRANT SELECT ON external_repo, external_repo_config, tag_external_repos TO PUBLIC;
+
+-- these tables are no longer included with newer koji
+-- feel free to drop them
+-- DROP TABLE rpmfiles;
+-- DROP TABLE rpmdeps;
+-- DROP TABLE changelogs;
+-- DROP TABLE archivefiles;
+
+COMMIT;
diff --git a/docs/schema-upgrade-1.3-1.4.sql b/docs/schema-upgrade-1.3-1.4.sql
new file mode 100644
index 0000000..3754ba2
--- /dev/null
+++ b/docs/schema-upgrade-1.3-1.4.sql
@@ -0,0 +1,267 @@
+-- upgrade script to migrate the Koji database schema
+-- from version 1.3 to 1.4
+
+BEGIN;
+
+-- First the simple stuff. A pair of new host fields.
+ALTER TABLE host ADD COLUMN description TEXT;
+ALTER TABLE host ADD COLUMN comment TEXT;
+-- ...and a new field for tasks
+ALTER TABLE task ADD COLUMN start_time TIMESTAMP;
+
+
+-- new standard permissions and channels
+INSERT INTO permissions (name) VALUES ('maven-import');
+INSERT INTO permissions (name) VALUES ('appliance');
+
+INSERT INTO channels (name) VALUES ('maven');
+INSERT INTO channels (name) VALUES ('appliance');
+
+
+-- extensions for maven support
+ALTER TABLE tag_config ADD COLUMN maven_support BOOLEAN NOT NULL DEFAULT FALSE;
+ALTER TABLE tag_config ADD COLUMN maven_include_all BOOLEAN NOT NULL DEFAULT FALSE;
+
+CREATE TABLE maven_builds (
+ build_id INTEGER NOT NULL PRIMARY KEY REFERENCES build(id),
+ group_id TEXT NOT NULL,
+ artifact_id TEXT NOT NULL,
+ version TEXT NOT NULL
+) WITHOUT OIDS;
+
+CREATE TABLE archivetypes (
+ id SERIAL NOT NULL PRIMARY KEY,
+ name TEXT NOT NULL UNIQUE,
+ description TEXT NOT NULL,
+ extensions TEXT NOT NULL
+) WITHOUT OIDS;
+
+insert into archivetypes (name, description, extensions) values ('jar', 'Jar file', 'jar war rar ear');
+insert into archivetypes (name, description, extensions) values ('zip', 'Zip archive', 'zip');
+insert into archivetypes (name, description, extensions) values ('pom', 'Maven Project Object Management file', 'pom');
+insert into archivetypes (name, description, extensions) values ('tar', 'Tar file', 'tar tar.gz tar.bz2');
+insert into archivetypes (name, description, extensions) values ('xml', 'XML file', 'xml');
+
+CREATE TABLE archiveinfo (
+ id SERIAL NOT NULL PRIMARY KEY,
+ type_id INTEGER NOT NULL REFERENCES archivetypes (id),
+ build_id INTEGER NOT NULL REFERENCES build (id),
+ buildroot_id INTEGER REFERENCES buildroot (id),
+ filename TEXT NOT NULL,
+ size INTEGER NOT NULL,
+ md5sum TEXT NOT NULL
+) WITHOUT OIDS;
+CREATE INDEX archiveinfo_build_idx ON archiveinfo (build_id);
+CREATE INDEX archiveinfo_buildroot_idx on archiveinfo (buildroot_id);
+CREATE INDEX archiveinfo_type_idx on archiveinfo (type_id);
+CREATE INDEX archiveinfo_filename_idx on archiveinfo(filename);
+
+CREATE TABLE maven_archives (
+ archive_id INTEGER NOT NULL PRIMARY KEY REFERENCES archiveinfo(id),
+ group_id TEXT NOT NULL,
+ artifact_id TEXT NOT NULL,
+ version TEXT NOT NULL
+) WITHOUT OIDS;
+
+CREATE TABLE buildroot_archives (
+ buildroot_id INTEGER NOT NULL REFERENCES buildroot (id),
+ archive_id INTEGER NOT NULL REFERENCES archiveinfo (id),
+ project_dep BOOLEAN NOT NULL,
+ PRIMARY KEY (buildroot_id, archive_id)
+) WITHOUT OIDS;
+CREATE INDEX buildroot_archives_archive_idx ON buildroot_archives (archive_id);
+
+
+
+-- The rest updates all the versioned tables to track who did what
+
+-- One issue with this is that we need to provide creator/revoker data
+-- for existing rows. Our approach is to create a disabled user to use
+-- for this named 'nobody'. The temporary function is merely a convenient
+-- way to reference the user we create.
+INSERT INTO users (name, status, usertype) VALUES ('nobody', 1, 0);
+CREATE FUNCTION pg_temp.user() returns INTEGER as $$ select id from users where name='nobody' $$ language SQL;
+-- If you would like to use an existing user instead, then:
+-- 1. comment out the users insert
+-- 2. edit the temporary function to look for the alternate user name
+
+SELECT 'Updating table user_perms';
+
+ALTER TABLE user_perms ADD COLUMN creator_id INTEGER REFERENCES users(id);
+ALTER TABLE user_perms ADD COLUMN revoker_id INTEGER REFERENCES users(id);
+
+UPDATE user_perms SET creator_id=pg_temp.user() WHERE creator_id IS NULL;
+UPDATE user_perms SET revoker_id=pg_temp.user() WHERE revoker_id IS NULL AND revoke_event IS NOT NULL;
+
+ALTER TABLE user_perms ALTER COLUMN creator_id SET NOT NULL;
+ALTER TABLE user_perms DROP CONSTRAINT active_revoke_sane;
+ALTER TABLE user_perms ADD CONSTRAINT active_revoke_sane CHECK (
+ (active IS NULL AND revoke_event IS NOT NULL AND revoker_id IS NOT NULL)
+ OR (active IS NOT NULL AND revoke_event IS NULL AND revoker_id IS NULL));
+
+
+SELECT 'Updating table user_groups';
+
+ALTER TABLE user_groups ADD COLUMN creator_id INTEGER REFERENCES users(id);
+ALTER TABLE user_groups ADD COLUMN revoker_id INTEGER REFERENCES users(id);
+
+UPDATE user_groups SET creator_id=pg_temp.user() WHERE creator_id IS NULL;
+UPDATE user_groups SET revoker_id=pg_temp.user() WHERE revoker_id IS NULL AND revoke_event IS NOT NULL;
+
+ALTER TABLE user_groups ALTER COLUMN creator_id SET NOT NULL;
+ALTER TABLE user_groups DROP CONSTRAINT active_revoke_sane;
+ALTER TABLE user_groups ADD CONSTRAINT active_revoke_sane CHECK (
+ (active IS NULL AND revoke_event IS NOT NULL AND revoker_id IS NOT NULL)
+ OR (active IS NOT NULL AND revoke_event IS NULL AND revoker_id IS NULL));
+
+
+SELECT 'Updating table tag_inheritance';
+
+ALTER TABLE tag_inheritance ADD COLUMN creator_id INTEGER REFERENCES users(id);
+ALTER TABLE tag_inheritance ADD COLUMN revoker_id INTEGER REFERENCES users(id);
+
+UPDATE tag_inheritance SET creator_id=pg_temp.user() WHERE creator_id IS NULL;
+UPDATE tag_inheritance SET revoker_id=pg_temp.user() WHERE revoker_id IS NULL AND revoke_event IS NOT NULL;
+
+ALTER TABLE tag_inheritance ALTER COLUMN creator_id SET NOT NULL;
+ALTER TABLE tag_inheritance DROP CONSTRAINT active_revoke_sane;
+ALTER TABLE tag_inheritance ADD CONSTRAINT active_revoke_sane CHECK (
+ (active IS NULL AND revoke_event IS NOT NULL AND revoker_id IS NOT NULL)
+ OR (active IS NOT NULL AND revoke_event IS NULL AND revoker_id IS NULL));
+
+
+SELECT 'Updating table tag_config';
+
+ALTER TABLE tag_config ADD COLUMN creator_id INTEGER REFERENCES users(id);
+ALTER TABLE tag_config ADD COLUMN revoker_id INTEGER REFERENCES users(id);
+
+UPDATE tag_config SET creator_id=pg_temp.user() WHERE creator_id IS NULL;
+UPDATE tag_config SET revoker_id=pg_temp.user() WHERE revoker_id IS NULL AND revoke_event IS NOT NULL;
+
+ALTER TABLE tag_config ALTER COLUMN creator_id SET NOT NULL;
+ALTER TABLE tag_config DROP CONSTRAINT active_revoke_sane;
+ALTER TABLE tag_config ADD CONSTRAINT active_revoke_sane CHECK (
+ (active IS NULL AND revoke_event IS NOT NULL AND revoker_id IS NOT NULL)
+ OR (active IS NOT NULL AND revoke_event IS NULL AND revoker_id IS NULL));
+
+
+SELECT 'Updating table build_target_config';
+
+ALTER TABLE build_target_config ADD COLUMN creator_id INTEGER REFERENCES users(id);
+ALTER TABLE build_target_config ADD COLUMN revoker_id INTEGER REFERENCES users(id);
+
+UPDATE build_target_config SET creator_id=pg_temp.user() WHERE creator_id IS NULL;
+UPDATE build_target_config SET revoker_id=pg_temp.user() WHERE revoker_id IS NULL AND revoke_event IS NOT NULL;
+
+ALTER TABLE build_target_config ALTER COLUMN creator_id SET NOT NULL;
+ALTER TABLE build_target_config DROP CONSTRAINT active_revoke_sane;
+ALTER TABLE build_target_config ADD CONSTRAINT active_revoke_sane CHECK (
+ (active IS NULL AND revoke_event IS NOT NULL AND revoker_id IS NOT NULL)
+ OR (active IS NOT NULL AND revoke_event IS NULL AND revoker_id IS NULL));
+
+
+SELECT 'Updating table external_repo_config';
+
+ALTER TABLE external_repo_config ADD COLUMN creator_id INTEGER REFERENCES users(id);
+ALTER TABLE external_repo_config ADD COLUMN revoker_id INTEGER REFERENCES users(id);
+
+UPDATE external_repo_config SET creator_id=pg_temp.user() WHERE creator_id IS NULL;
+UPDATE external_repo_config SET revoker_id=pg_temp.user() WHERE revoker_id IS NULL AND revoke_event IS NOT NULL;
+
+ALTER TABLE external_repo_config ALTER COLUMN creator_id SET NOT NULL;
+ALTER TABLE external_repo_config DROP CONSTRAINT active_revoke_sane;
+ALTER TABLE external_repo_config ADD CONSTRAINT active_revoke_sane CHECK (
+ (active IS NULL AND revoke_event IS NOT NULL AND revoker_id IS NOT NULL)
+ OR (active IS NOT NULL AND revoke_event IS NULL AND revoker_id IS NULL));
+
+
+SELECT 'Updating table tag_external_repos';
+
+ALTER TABLE tag_external_repos ADD COLUMN creator_id INTEGER REFERENCES users(id);
+ALTER TABLE tag_external_repos ADD COLUMN revoker_id INTEGER REFERENCES users(id);
+
+UPDATE tag_external_repos SET creator_id=pg_temp.user() WHERE creator_id IS NULL;
+UPDATE tag_external_repos SET revoker_id=pg_temp.user() WHERE revoker_id IS NULL AND revoke_event IS NOT NULL;
+
+ALTER TABLE tag_external_repos ALTER COLUMN creator_id SET NOT NULL;
+ALTER TABLE tag_external_repos DROP CONSTRAINT active_revoke_sane;
+ALTER TABLE tag_external_repos ADD CONSTRAINT active_revoke_sane CHECK (
+ (active IS NULL AND revoke_event IS NOT NULL AND revoker_id IS NOT NULL)
+ OR (active IS NOT NULL AND revoke_event IS NULL AND revoker_id IS NULL));
+
+
+SELECT 'Updating table tag_listing';
+
+ALTER TABLE tag_listing ADD COLUMN creator_id INTEGER REFERENCES users(id);
+ALTER TABLE tag_listing ADD COLUMN revoker_id INTEGER REFERENCES users(id);
+
+UPDATE tag_listing SET creator_id=pg_temp.user() WHERE creator_id IS NULL;
+UPDATE tag_listing SET revoker_id=pg_temp.user() WHERE revoker_id IS NULL AND revoke_event IS NOT NULL;
+
+ALTER TABLE tag_listing ALTER COLUMN creator_id SET NOT NULL;
+ALTER TABLE tag_listing DROP CONSTRAINT active_revoke_sane;
+ALTER TABLE tag_listing ADD CONSTRAINT active_revoke_sane CHECK (
+ (active IS NULL AND revoke_event IS NOT NULL AND revoker_id IS NOT NULL)
+ OR (active IS NOT NULL AND revoke_event IS NULL AND revoker_id IS NULL));
+
+
+SELECT 'Updating table tag_packages';
+
+ALTER TABLE tag_packages ADD COLUMN creator_id INTEGER REFERENCES users(id);
+ALTER TABLE tag_packages ADD COLUMN revoker_id INTEGER REFERENCES users(id);
+
+UPDATE tag_packages SET creator_id=pg_temp.user() WHERE creator_id IS NULL;
+UPDATE tag_packages SET revoker_id=pg_temp.user() WHERE revoker_id IS NULL AND revoke_event IS NOT NULL;
+
+ALTER TABLE tag_packages ALTER COLUMN creator_id SET NOT NULL;
+ALTER TABLE tag_packages DROP CONSTRAINT active_revoke_sane;
+ALTER TABLE tag_packages ADD CONSTRAINT active_revoke_sane CHECK (
+ (active IS NULL AND revoke_event IS NOT NULL AND revoker_id IS NOT NULL)
+ OR (active IS NOT NULL AND revoke_event IS NULL AND revoker_id IS NULL));
+
+
+SELECT 'Updating table group_config';
+
+ALTER TABLE group_config ADD COLUMN creator_id INTEGER REFERENCES users(id);
+ALTER TABLE group_config ADD COLUMN revoker_id INTEGER REFERENCES users(id);
+
+UPDATE group_config SET creator_id=pg_temp.user() WHERE creator_id IS NULL;
+UPDATE group_config SET revoker_id=pg_temp.user() WHERE revoker_id IS NULL AND revoke_event IS NOT NULL;
+
+ALTER TABLE group_config ALTER COLUMN creator_id SET NOT NULL;
+ALTER TABLE group_config DROP CONSTRAINT active_revoke_sane;
+ALTER TABLE group_config ADD CONSTRAINT active_revoke_sane CHECK (
+ (active IS NULL AND revoke_event IS NOT NULL AND revoker_id IS NOT NULL)
+ OR (active IS NOT NULL AND revoke_event IS NULL AND revoker_id IS NULL));
+
+
+SELECT 'Updating table group_req_listing';
+
+ALTER TABLE group_req_listing ADD COLUMN creator_id INTEGER REFERENCES users(id);
+ALTER TABLE group_req_listing ADD COLUMN revoker_id INTEGER REFERENCES users(id);
+
+UPDATE group_req_listing SET creator_id=pg_temp.user() WHERE creator_id IS NULL;
+UPDATE group_req_listing SET revoker_id=pg_temp.user() WHERE revoker_id IS NULL AND revoke_event IS NOT NULL;
+
+ALTER TABLE group_req_listing ALTER COLUMN creator_id SET NOT NULL;
+ALTER TABLE group_req_listing DROP CONSTRAINT active_revoke_sane;
+ALTER TABLE group_req_listing ADD CONSTRAINT active_revoke_sane CHECK (
+ (active IS NULL AND revoke_event IS NOT NULL AND revoker_id IS NOT NULL)
+ OR (active IS NOT NULL AND revoke_event IS NULL AND revoker_id IS NULL));
+
+
+SELECT 'Updating table group_package_listing';
+
+ALTER TABLE group_package_listing ADD COLUMN creator_id INTEGER REFERENCES users(id);
+ALTER TABLE group_package_listing ADD COLUMN revoker_id INTEGER REFERENCES users(id);
+
+UPDATE group_package_listing SET creator_id=pg_temp.user() WHERE creator_id IS NULL;
+UPDATE group_package_listing SET revoker_id=pg_temp.user() WHERE revoker_id IS NULL AND revoke_event IS NOT NULL;
+
+ALTER TABLE group_package_listing ALTER COLUMN creator_id SET NOT NULL;
+ALTER TABLE group_package_listing DROP CONSTRAINT active_revoke_sane;
+ALTER TABLE group_package_listing ADD CONSTRAINT active_revoke_sane CHECK (
+ (active IS NULL AND revoke_event IS NOT NULL AND revoker_id IS NOT NULL)
+ OR (active IS NOT NULL AND revoke_event IS NULL AND revoker_id IS NULL));
+
+COMMIT;
diff --git a/docs/schema-upgrade-1.4-1.5.sql b/docs/schema-upgrade-1.4-1.5.sql
new file mode 100644
index 0000000..ccc4432
--- /dev/null
+++ b/docs/schema-upgrade-1.4-1.5.sql
@@ -0,0 +1,36 @@
+-- upgrade script to migrate the Koji database schema
+-- from version 1.4 to 1.5
+
+BEGIN;
+
+INSERT INTO permissions (name) VALUES ('win-import');
+INSERT INTO permissions (name) VALUES ('win-admin');
+
+INSERT INTO channels (name) VALUES ('vm');
+
+insert into archivetypes (name, description, extensions) values ('spec', 'RPM spec file', 'spec');
+insert into archivetypes (name, description, extensions) values ('exe', 'Windows executable', 'exe');
+insert into archivetypes (name, description, extensions) values ('dll', 'Windows dynamic link library', 'dll');
+insert into archivetypes (name, description, extensions) values ('lib', 'Windows import library', 'lib');
+insert into archivetypes (name, description, extensions) values ('sys', 'Windows device driver', 'sys');
+insert into archivetypes (name, description, extensions) values ('inf', 'Windows driver information file', 'inf');
+insert into archivetypes (name, description, extensions) values ('cat', 'Windows catalog file', 'cat');
+insert into archivetypes (name, description, extensions) values ('msi', 'Windows Installer package', 'msi');
+insert into archivetypes (name, description, extensions) values ('pdb', 'Windows debug information', 'pdb');
+insert into archivetypes (name, description, extensions) values ('oem', 'Windows driver oem file', 'oem');
+
+-- flag to indicate that a build is a Windows build
+CREATE TABLE win_builds (
+ build_id INTEGER NOT NULL PRIMARY KEY REFERENCES build(id),
+ platform TEXT NOT NULL
+) WITHOUT OIDS;
+
+-- Extended information about files built in Windows VMs
+CREATE TABLE win_archives (
+ archive_id INTEGER NOT NULL PRIMARY KEY REFERENCES archiveinfo(id),
+ relpath TEXT NOT NULL,
+ platforms TEXT NOT NULL,
+ flags TEXT
+) WITHOUT OIDS;
+
+COMMIT WORK;
diff --git a/docs/schema-upgrade-1.6-1.7.sql b/docs/schema-upgrade-1.6-1.7.sql
new file mode 100644
index 0000000..614eb74
--- /dev/null
+++ b/docs/schema-upgrade-1.6-1.7.sql
@@ -0,0 +1,25 @@
+BEGIN;
+
+CREATE TABLE volume (
+ id SERIAL NOT NULL PRIMARY KEY,
+ name TEXT UNIQUE NOT NULL
+) WITHOUT OIDS;
+
+INSERT INTO volume (id, name) VALUES (0, 'DEFAULT');
+
+ALTER TABLE build ADD COLUMN volume_id INTEGER REFERENCES volume (id);
+UPDATE build SET volume_id = 0;
+ALTER TABLE build ALTER COLUMN volume_id SET NOT NULL;
+
+CREATE TABLE tag_updates (
+ id SERIAL NOT NULL PRIMARY KEY,
+ tag_id INTEGER NOT NULL REFERENCES tag(id),
+ update_event INTEGER NOT NULL REFERENCES events(id) DEFAULT get_event(),
+ updater_id INTEGER NOT NULL REFERENCES users(id),
+ update_type INTEGER NOT NULL
+) WITHOUT OIDS;
+
+CREATE INDEX tag_updates_by_tag ON tag_updates (tag_id);
+CREATE INDEX tag_updates_by_event ON tag_updates (update_event);
+
+COMMIT;
diff --git a/docs/schema-upgrade-1.7-1.8.sql b/docs/schema-upgrade-1.7-1.8.sql
new file mode 100644
index 0000000..60f5c19
--- /dev/null
+++ b/docs/schema-upgrade-1.7-1.8.sql
@@ -0,0 +1,47 @@
+-- schema migration from version 1.7 to 1.8
+-- note: this update will require additional steps, please see the migration doc
+
+BEGIN;
+
+
+-- The following tables are now obsolete:
+-- imageinfo
+-- imageinfo_listing
+-- However, we cannot drop them until after we migrate the data
+
+-- create new image tables
+CREATE TABLE image_builds (
+ build_id INTEGER NOT NULL PRIMARY KEY REFERENCES build(id)
+) WITHOUT OIDS;
+
+CREATE TABLE image_archives (
+ archive_id INTEGER NOT NULL PRIMARY KEY REFERENCES archiveinfo(id),
+ arch VARCHAR(16) NOT NULL
+) WITHOUT OIDS;
+
+CREATE TABLE image_listing (
+ image_id INTEGER NOT NULL REFERENCES image_archives(archive_id),
+ rpm_id INTEGER NOT NULL REFERENCES rpminfo(id),
+ UNIQUE (image_id, rpm_id)
+) WITHOUT OIDS;
+CREATE INDEX image_listing_rpms on image_listing(rpm_id);
+
+-- alter archiveinfo
+ALTER TABLE archiveinfo ALTER COLUMN size TYPE BIGINT;
+ALTER TABLE archiveinfo RENAME COLUMN md5sum TO checksum;
+ALTER TABLE archiveinfo ADD COLUMN checksum_type INTEGER NOT NULL DEFAULT 0;
+ALTER TABLE archiveinfo ALTER COLUMN checksum_type DROP DEFAULT;
+-- the main schema has no default for checksum_type
+-- this is just an easy way to populate the fields for the old entries
+
+
+
+-- new archive types
+insert into archivetypes (name, description, extensions) values ('iso', 'CD/DVD Image', 'iso');
+insert into archivetypes (name, description, extensions) values ('raw', 'Raw disk image', 'raw');
+insert into archivetypes (name, description, extensions) values ('qcow', 'QCOW image', 'qcow');
+insert into archivetypes (name, description, extensions) values ('qcow2', 'QCOW2 image', 'qcow2');
+insert into archivetypes (name, description, extensions) values ('vmx', 'VMX image', 'vmx');
+insert into archivetypes (name, description, extensions) values ('xsd', 'XML Schema Definition', 'xsd');
+
+COMMIT;
diff --git a/docs/schema-upgrade-1.8-1.9.sql b/docs/schema-upgrade-1.8-1.9.sql
new file mode 100644
index 0000000..00e39c7
--- /dev/null
+++ b/docs/schema-upgrade-1.8-1.9.sql
@@ -0,0 +1,16 @@
+
+BEGIN;
+
+-- new archive types
+insert into archivetypes (name, description, extensions) values ('vmdk', 'vSphere image', 'vmdk');
+insert into archivetypes (name, description, extensions) values ('ova', 'OVA image', 'ova');
+insert into archivetypes (name, description, extensions) values ('ks', 'Kickstart', 'ks');
+insert into archivetypes (name, description, extensions) values ('cfg', 'Configuration file', 'cfg');
+
+COMMIT;
+
+BEGIN;
+-- it's harmless if this part fails.
+-- there shouldn't be any references to this, but keep it in a separate transaction just in case
+delete from archivetypes where name = 'vmx';
+COMMIT;
diff --git a/docs/schema-upgrade-1.9-1.10.sql b/docs/schema-upgrade-1.9-1.10.sql
new file mode 100644
index 0000000..b70698d
--- /dev/null
+++ b/docs/schema-upgrade-1.9-1.10.sql
@@ -0,0 +1,50 @@
+
+BEGIN;
+
+INSERT INTO channels (name) VALUES ('image');
+
+
+CREATE TABLE tag_extra (
+ tag_id INTEGER NOT NULL REFERENCES tag(id),
+ key TEXT NOT NULL,
+ value TEXT NOT NULL, -- TODO - move this to jsonb when we can
+-- versioned - see desc above
+ create_event INTEGER NOT NULL REFERENCES events(id) DEFAULT get_event(),
+ revoke_event INTEGER REFERENCES events(id),
+ creator_id INTEGER NOT NULL REFERENCES users(id),
+ revoker_id INTEGER REFERENCES users(id),
+ active BOOLEAN DEFAULT 'true' CHECK (active),
+ CONSTRAINT active_revoke_sane CHECK (
+ (active IS NULL AND revoke_event IS NOT NULL AND revoker_id IS NOT NULL)
+ OR (active IS NOT NULL AND revoke_event IS NULL AND revoker_id IS NULL)),
+ PRIMARY KEY (create_event, tag_id, key),
+ UNIQUE (tag_id, key, active)
+) WITHOUT OIDS;
+
+
+update archivetypes set extensions='jar war rar ear sar jdocbook jdocbook-style' where name='jar';
+update archivetypes set description='Zip file' where name='zip';
+update archivetypes set extensions='tar tar.gz tar.bz2 tar.xz' where name='tar';
+update archivetypes set description='Open Virtualization Archive' where name='ova';
+
+insert into archivetypes (name, description, extensions) values ('vdi', 'VirtualBox Virtual Disk Image', 'vdi');
+insert into archivetypes (name, description, extensions) values ('aar', 'Binary distribution of an Android Library project', 'aar');
+insert into archivetypes (name, description, extensions) values ('apklib', 'Source distribution of an Android Library project', 'apklib');
+insert into archivetypes (name, description, extensions) values ('cab', 'Windows cabinet file', 'cab');
+insert into archivetypes (name, description, extensions) values ('dylib', 'OS X dynamic library', 'dylib');
+insert into archivetypes (name, description, extensions) values ('gem', 'Ruby gem', 'gem');
+insert into archivetypes (name, description, extensions) values ('ini', 'INI config file', 'ini');
+insert into archivetypes (name, description, extensions) values ('js', 'Javascript file', 'js');
+insert into archivetypes (name, description, extensions) values ('ldif', 'LDAP Data Interchange Format file', 'ldif');
+insert into archivetypes (name, description, extensions) values ('manifest', 'Runtime environment for .NET applications', 'manifest');
+insert into archivetypes (name, description, extensions) values ('msm', 'Windows merge module', 'msm');
+insert into archivetypes (name, description, extensions) values ('properties', 'Properties file', 'properties');
+insert into archivetypes (name, description, extensions) values ('sig', 'Signature file', 'sig signature');
+insert into archivetypes (name, description, extensions) values ('so', 'Shared library', 'so');
+insert into archivetypes (name, description, extensions) values ('txt', 'Text file', 'txt');
+insert into archivetypes (name, description, extensions) values ('vhd', 'Hyper-V image', 'vhd');
+insert into archivetypes (name, description, extensions) values ('wsf', 'Windows script file', 'wsf');
+insert into archivetypes (name, description, extensions) values ('box', 'Vagrant Box Image', 'box');
+insert into archivetypes (name, description, extensions) values ('raw-xz', 'xz compressed raw disk image', 'raw.xz');
+
+COMMIT;
diff --git a/docs/schema.sql b/docs/schema.sql
new file mode 100644
index 0000000..d5ffa87
--- /dev/null
+++ b/docs/schema.sql
@@ -0,0 +1,813 @@
+
+-- vim:noet:sw=8
+-- still needs work
+DROP TABLE build_notifications;
+
+DROP TABLE log_messages;
+
+DROP TABLE buildroot_listing;
+DROP TABLE image_listing;
+
+DROP TABLE rpminfo;
+DROP TABLE image_builds;
+DROP TABLE image_archives;
+
+DROP TABLE group_package_listing;
+DROP TABLE group_req_listing;
+DROP TABLE group_config;
+DROP TABLE groups;
+
+DROP TABLE tag_listing;
+DROP TABLE tag_packages;
+
+DROP TABLE buildroot;
+DROP TABLE repo;
+
+DROP TABLE build_target_config;
+DROP TABLE build_target;
+
+DROP TABLE tag_config;
+DROP TABLE tag_inheritance;
+DROP TABLE tag;
+
+DROP TABLE build;
+
+DROP TABLE task;
+
+DROP TABLE host_channels;
+DROP TABLE host;
+
+DROP TABLE channels;
+DROP TABLE package;
+
+DROP TABLE user_groups;
+DROP TABLE user_perms;
+DROP TABLE permissions;
+
+DROP TABLE sessions;
+DROP TABLE users;
+
+DROP TABLE event_labels;
+DROP TABLE events;
+DROP FUNCTION get_event();
+DROP FUNCTION get_event_time(INTEGER);
+
+BEGIN WORK;
+
+-- We use the events table to sequence time
+-- in the event that the system clock rolls back, event_ids will retain proper sequencing
+CREATE TABLE events (
+ id SERIAL NOT NULL PRIMARY KEY,
+ time TIMESTAMP NOT NULL DEFAULT NOW()
+) WITHOUT OIDS;
+
+-- A function that creates an event and returns the id, used as DEFAULT value for versioned tables
+CREATE FUNCTION get_event() RETURNS INTEGER AS '
+ INSERT INTO events (time) VALUES (''now'');
+ SELECT currval(''events_id_seq'')::INTEGER;
+' LANGUAGE SQL;
+
+-- A convenience function for converting events to timestamps, useful for
+-- quick queries where you want to avoid JOINs.
+CREATE FUNCTION get_event_time(INTEGER) RETURNS TIMESTAMP AS '
+ SELECT time FROM events WHERE id=$1;
+' LANGUAGE SQL;
+
+-- this table is used to label events
+-- most events will be unlabeled, so keeping this separate saves space
+CREATE TABLE event_labels (
+ event_id INTEGER NOT NULL REFERENCES events(id),
+ label VARCHAR(255) UNIQUE NOT NULL
+) WITHOUT OIDS;
+
+
+-- User and session data
+CREATE TABLE users (
+ id SERIAL NOT NULL PRIMARY KEY,
+ name VARCHAR(255) UNIQUE NOT NULL,
+ password VARCHAR(255),
+ status INTEGER NOT NULL,
+ usertype INTEGER NOT NULL,
+ krb_principal VARCHAR(255) UNIQUE
+) WITHOUT OIDS;
+
+CREATE TABLE permissions (
+ id SERIAL NOT NULL PRIMARY KEY,
+ name VARCHAR(50) UNIQUE NOT NULL
+) WITHOUT OIDS;
+
+-- Some basic perms
+INSERT INTO permissions (name) VALUES ('admin');
+INSERT INTO permissions (name) VALUES ('build');
+INSERT INTO permissions (name) VALUES ('repo');
+INSERT INTO permissions (name) VALUES ('livecd');
+INSERT INTO permissions (name) VALUES ('maven-import');
+INSERT INTO permissions (name) VALUES ('win-import');
+INSERT INTO permissions (name) VALUES ('win-admin');
+INSERT INTO permissions (name) VALUES ('appliance');
+
+CREATE TABLE user_perms (
+ user_id INTEGER NOT NULL REFERENCES users(id),
+ perm_id INTEGER NOT NULL REFERENCES permissions(id),
+-- versioned - see VERSIONING
+ create_event INTEGER NOT NULL REFERENCES events(id) DEFAULT get_event(),
+ revoke_event INTEGER REFERENCES events(id),
+ creator_id INTEGER NOT NULL REFERENCES users(id),
+ revoker_id INTEGER REFERENCES users(id),
+ active BOOLEAN DEFAULT 'true' CHECK (active),
+ CONSTRAINT active_revoke_sane CHECK (
+ (active IS NULL AND revoke_event IS NOT NULL AND revoker_id IS NOT NULL)
+ OR (active IS NOT NULL AND revoke_event IS NULL AND revoker_id IS NULL)),
+ PRIMARY KEY (create_event, user_id, perm_id),
+ UNIQUE (user_id,perm_id,active)
+) WITHOUT OIDS;
+
+-- groups are represented as users w/ usertype=2
+CREATE TABLE user_groups (
+ user_id INTEGER NOT NULL REFERENCES users(id),
+ group_id INTEGER NOT NULL REFERENCES users(id),
+-- versioned - see VERSIONING
+ create_event INTEGER NOT NULL REFERENCES events(id) DEFAULT get_event(),
+ revoke_event INTEGER REFERENCES events(id),
+ creator_id INTEGER NOT NULL REFERENCES users(id),
+ revoker_id INTEGER REFERENCES users(id),
+ active BOOLEAN DEFAULT 'true' CHECK (active),
+ CONSTRAINT active_revoke_sane CHECK (
+ (active IS NULL AND revoke_event IS NOT NULL AND revoker_id IS NOT NULL)
+ OR (active IS NOT NULL AND revoke_event IS NULL AND revoker_id IS NULL)),
+ PRIMARY KEY (create_event, user_id, group_id),
+ UNIQUE (user_id,group_id,active)
+) WITHOUT OIDS;
+
+-- a session can create subsessions, which are just new sessions whose
+-- 'master' field points back to the session. This field should
+-- always point to the top session. If the master session is expired,
+-- the all its subsessions should be expired as well.
+-- If a session is exclusive, it is the only session allowed for its
+-- user. The 'exclusive' field is either NULL or TRUE, never FALSE. This
+-- is so exclusivity can be enforced with a unique condition.
+CREATE TABLE sessions (
+ id SERIAL NOT NULL PRIMARY KEY,
+ user_id INTEGER NOT NULL REFERENCES users(id),
+ expired BOOLEAN NOT NULL DEFAULT FALSE,
+ master INTEGER REFERENCES sessions(id),
+ key VARCHAR(255),
+ authtype INTEGER,
+ hostip VARCHAR(255),
+ callnum INTEGER,
+ start_time TIMESTAMP NOT NULL DEFAULT NOW(),
+ update_time TIMESTAMP NOT NULL DEFAULT NOW(),
+ exclusive BOOLEAN CHECK (exclusive),
+ CONSTRAINT no_exclusive_subsessions CHECK (
+ master IS NULL OR "exclusive" IS NULL),
+ CONSTRAINT exclusive_expired_sane CHECK (
+ expired IS FALSE OR "exclusive" IS NULL),
+ UNIQUE (user_id,exclusive)
+) WITHOUT OIDS;
+CREATE INDEX sessions_master ON sessions(master);
+CREATE INDEX sessions_active_and_recent ON sessions(expired, master, update_time) WHERE (expired IS NOT TRUE AND master IS NULL);
+
+-- Channels are used to limit which tasks are run on which machines.
+-- Each task is assigned to a channel and each host 'listens' on one
+-- or more channels. A host will only accept tasks for channels it is
+-- listening to.
+CREATE TABLE channels (
+ id SERIAL NOT NULL PRIMARY KEY,
+ name VARCHAR(128) UNIQUE NOT NULL
+) WITHOUT OIDS;
+
+-- create default channel
+INSERT INTO channels (name) VALUES ('default');
+INSERT INTO channels (name) VALUES ('createrepo');
+INSERT INTO channels (name) VALUES ('maven');
+INSERT INTO channels (name) VALUES ('livecd');
+INSERT INTO channels (name) VALUES ('appliance');
+INSERT INTO channels (name) VALUES ('vm');
+INSERT INTO channels (name) VALUES ('image');
+
+-- Here we track the build machines
+-- each host has an entry in the users table also
+-- capacity: the hosts weighted task capacity
+CREATE TABLE host (
+ id SERIAL NOT NULL PRIMARY KEY,
+ user_id INTEGER NOT NULL REFERENCES users (id),
+ name VARCHAR(128) UNIQUE NOT NULL,
+ arches TEXT,
+ task_load FLOAT CHECK (NOT task_load < 0) NOT NULL DEFAULT 0.0,
+ capacity FLOAT CHECK (capacity > 1) NOT NULL DEFAULT 2.0,
+ description TEXT,
+ comment TEXT,
+ ready BOOLEAN NOT NULL DEFAULT 'false',
+ enabled BOOLEAN NOT NULL DEFAULT 'true'
+) WITHOUT OIDS;
+CREATE INDEX HOST_IS_READY_AND_ENABLED ON host(enabled, ready) WHERE (enabled IS TRUE AND ready IS TRUE);
+
+CREATE TABLE host_channels (
+ host_id INTEGER NOT NULL REFERENCES host(id),
+ channel_id INTEGER NOT NULL REFERENCES channels(id),
+ UNIQUE (host_id,channel_id)
+) WITHOUT OIDS;
+
+
+-- tasks are pretty general and may refer to all sorts of jobs, not
+-- just package builds.
+-- tasks may spawn subtasks (hence the parent field)
+-- top-level tasks have NULL parent
+-- the request and result fields are xmlrpc data.
+-- this means each task is effectively an xmlrpc call, using this table as
+-- the medium.
+-- the host_id field indicates which host is running the task. This field
+-- is used to lock the task.
+-- weight: the weight of the task (vs. host capacity)
+-- label: this field is used to label subtasks. top-level tasks will not
+-- have a label. some subtasks may be unlabeled. labels are used in task
+-- failover to prevent duplication of work.
+CREATE TABLE task (
+ id SERIAL NOT NULL PRIMARY KEY,
+ state INTEGER,
+ create_time TIMESTAMP NOT NULL DEFAULT NOW(),
+ start_time TIMESTAMP,
+ completion_time TIMESTAMP,
+ channel_id INTEGER NOT NULL REFERENCES channels(id),
+ host_id INTEGER REFERENCES host (id),
+ parent INTEGER REFERENCES task (id),
+ label VARCHAR(255),
+ waiting BOOLEAN,
+ awaited BOOLEAN,
+ owner INTEGER REFERENCES users(id) NOT NULL,
+ method TEXT,
+ request TEXT,
+ result TEXT,
+ eta INTEGER,
+ arch VARCHAR(16) NOT NULL,
+ priority INTEGER,
+ weight FLOAT CHECK (NOT weight < 0) NOT NULL DEFAULT 1.0,
+ CONSTRAINT parent_label_sane CHECK (
+ parent IS NOT NULL OR label IS NULL),
+ UNIQUE (parent,label)
+) WITHOUT OIDS;
+
+CREATE INDEX task_by_state ON task (state);
+-- CREATE INDEX task_by_parent ON task (parent); (unique condition creates similar index)
+CREATE INDEX task_by_host ON task (host_id);
+
+
+-- by package, we mean srpm
+-- we mean the package in general, not an individual build
+CREATE TABLE package (
+ id SERIAL NOT NULL PRIMARY KEY,
+ name TEXT UNIQUE NOT NULL
+) WITHOUT OIDS;
+
+-- CREATE INDEX package_by_name ON package (name);
+-- (implicitly created by unique constraint)
+
+
+CREATE TABLE volume (
+ id SERIAL NOT NULL PRIMARY KEY,
+ name TEXT UNIQUE NOT NULL
+) WITHOUT OIDS;
+
+INSERT INTO volume (id, name) VALUES (0, 'DEFAULT');
+
+-- here we track the built packages
+-- this is at the srpm level, since builds are by srpm
+-- see rpminfo for isolated packages
+-- even though we track epoch, we demand that N-V-R be unique
+-- task_id: a reference to the task creating the build, may be
+-- null, or may point to a deleted task.
+CREATE TABLE build (
+ id SERIAL NOT NULL PRIMARY KEY,
+ volume_id INTEGER NOT NULL REFERENCES volume (id),
+ pkg_id INTEGER NOT NULL REFERENCES package (id) DEFERRABLE,
+ version TEXT NOT NULL,
+ release TEXT NOT NULL,
+ epoch INTEGER,
+ create_event INTEGER NOT NULL REFERENCES events(id) DEFAULT get_event(),
+ completion_time TIMESTAMP,
+ state INTEGER NOT NULL,
+ task_id INTEGER REFERENCES task (id),
+ owner INTEGER NOT NULL REFERENCES users (id),
+ CONSTRAINT build_pkg_ver_rel UNIQUE (pkg_id, version, release),
+ CONSTRAINT completion_sane CHECK ((state = 0 AND completion_time IS NULL) OR
+ (state != 0 AND completion_time IS NOT NULL))
+) WITHOUT OIDS;
+
+CREATE INDEX build_by_pkg_id ON build (pkg_id);
+CREATE INDEX build_completion ON build(completion_time);
+
+-- Note: some of these CREATEs may seem a little out of order. This is done to keep
+-- the references sane.
+
+CREATE TABLE tag (
+ id SERIAL NOT NULL PRIMARY KEY,
+ name VARCHAR(50) UNIQUE NOT NULL
+) WITHOUT OIDS;
+
+-- CREATE INDEX tag_by_name ON tag (name);
+-- (implicitly created by unique constraint)
+
+
+-- VERSIONING
+-- Several tables are versioned with the following scheme. Since this
+-- is the first, here is the explanation of how it works.
+-- The versioning fields are: create_event, revoke_event, and active
+-- The active field is either True or NULL, it is never False!
+-- The create_event and revoke_event fields refer to the event table
+-- A version is active if active is not NULL
+-- (an active version also has NULL revoke_event.)
+-- A UNIQUE condition can incorporate the 'active' field, making it
+-- apply only to the active versions.
+-- When a version is made inactive (revoked):
+-- revoke_event is set
+-- active is set to NULL
+-- Query for current data with WHERE active is not NULL
+-- (should be same as WHERE revoke_event is NULL)
+-- Query for data at event e with WHERE create_event <= e AND e < revoke_event
+CREATE TABLE tag_inheritance (
+ tag_id INTEGER NOT NULL REFERENCES tag(id),
+ parent_id INTEGER NOT NULL REFERENCES tag(id),
+ priority INTEGER NOT NULL,
+ maxdepth INTEGER,
+ intransitive BOOLEAN NOT NULL DEFAULT 'false',
+ noconfig BOOLEAN NOT NULL DEFAULT 'false',
+ pkg_filter TEXT,
+-- versioned - see desc above
+ create_event INTEGER NOT NULL REFERENCES events(id) DEFAULT get_event(),
+ revoke_event INTEGER REFERENCES events(id),
+ creator_id INTEGER NOT NULL REFERENCES users(id),
+ revoker_id INTEGER REFERENCES users(id),
+ active BOOLEAN DEFAULT 'true' CHECK (active),
+ CONSTRAINT active_revoke_sane CHECK (
+ (active IS NULL AND revoke_event IS NOT NULL AND revoker_id IS NOT NULL)
+ OR (active IS NOT NULL AND revoke_event IS NULL AND revoker_id IS NULL)),
+ PRIMARY KEY (create_event, tag_id, priority),
+ UNIQUE (tag_id,priority,active),
+ UNIQUE (tag_id,parent_id,active)
+) WITHOUT OIDS;
+
+CREATE INDEX tag_inheritance_by_parent ON tag_inheritance (parent_id);
+
+-- XXX - need more config options listed here
+-- perm_id: the permission that is required to apply the tag. can be NULL
+--
+CREATE TABLE tag_config (
+ tag_id INTEGER NOT NULL REFERENCES tag(id),
+ arches TEXT,
+ perm_id INTEGER REFERENCES permissions(id),
+ locked BOOLEAN NOT NULL DEFAULT 'false',
+ maven_support BOOLEAN NOT NULL DEFAULT FALSE,
+ maven_include_all BOOLEAN NOT NULL DEFAULT FALSE,
+-- versioned - see desc above
+ create_event INTEGER NOT NULL REFERENCES events(id) DEFAULT get_event(),
+ revoke_event INTEGER REFERENCES events(id),
+ creator_id INTEGER NOT NULL REFERENCES users(id),
+ revoker_id INTEGER REFERENCES users(id),
+ active BOOLEAN DEFAULT 'true' CHECK (active),
+ CONSTRAINT active_revoke_sane CHECK (
+ (active IS NULL AND revoke_event IS NOT NULL AND revoker_id IS NOT NULL)
+ OR (active IS NOT NULL AND revoke_event IS NULL AND revoker_id IS NULL)),
+ PRIMARY KEY (create_event, tag_id),
+ UNIQUE (tag_id,active)
+) WITHOUT OIDS;
+
+CREATE TABLE tag_extra (
+ tag_id INTEGER NOT NULL REFERENCES tag(id),
+ key TEXT NOT NULL,
+ value TEXT NOT NULL, -- TODO - move this to jsonb when we can
+-- versioned - see desc above
+ create_event INTEGER NOT NULL REFERENCES events(id) DEFAULT get_event(),
+ revoke_event INTEGER REFERENCES events(id),
+ creator_id INTEGER NOT NULL REFERENCES users(id),
+ revoker_id INTEGER REFERENCES users(id),
+ active BOOLEAN DEFAULT 'true' CHECK (active),
+ CONSTRAINT active_revoke_sane CHECK (
+ (active IS NULL AND revoke_event IS NOT NULL AND revoker_id IS NOT NULL)
+ OR (active IS NOT NULL AND revoke_event IS NULL AND revoker_id IS NULL)),
+ PRIMARY KEY (create_event, tag_id, key),
+ UNIQUE (tag_id, key, active)
+) WITHOUT OIDS;
+
+-- the tag_updates table provides a mechanism to indicate changes relevant to tag
+-- that are not reflected in a versioned table. For example: builds changing volumes,
+-- changes to external repo content, additional rpms imported to an existing build
+CREATE TABLE tag_updates (
+ id SERIAL NOT NULL PRIMARY KEY,
+ tag_id INTEGER NOT NULL REFERENCES tag(id),
+ update_event INTEGER NOT NULL REFERENCES events(id) DEFAULT get_event(),
+ updater_id INTEGER NOT NULL REFERENCES users(id),
+ update_type INTEGER NOT NULL
+) WITHOUT OIDS;
+
+CREATE INDEX tag_updates_by_tag ON tag_updates (tag_id);
+CREATE INDEX tag_updates_by_event ON tag_updates (update_event);
+
+-- a build target tells the system where to build the package
+-- and how to tag it afterwards.
+CREATE TABLE build_target (
+ id SERIAL NOT NULL PRIMARY KEY,
+ name VARCHAR(50) UNIQUE NOT NULL
+) WITHOUT OIDS;
+
+
+CREATE TABLE build_target_config (
+ build_target_id INTEGER NOT NULL REFERENCES build_target(id),
+ build_tag INTEGER NOT NULL REFERENCES tag(id),
+ dest_tag INTEGER NOT NULL REFERENCES tag(id),
+-- versioned - see desc above
+ create_event INTEGER NOT NULL REFERENCES events(id) DEFAULT get_event(),
+ revoke_event INTEGER REFERENCES events(id),
+ creator_id INTEGER NOT NULL REFERENCES users(id),
+ revoker_id INTEGER REFERENCES users(id),
+ active BOOLEAN DEFAULT 'true' CHECK (active),
+ CONSTRAINT active_revoke_sane CHECK (
+ (active IS NULL AND revoke_event IS NOT NULL AND revoker_id IS NOT NULL)
+ OR (active IS NOT NULL AND revoke_event IS NULL AND revoker_id IS NULL)),
+ PRIMARY KEY (create_event, build_target_id),
+ UNIQUE (build_target_id,active)
+) WITHOUT OIDS;
+
+
+-- track repos
+CREATE TABLE repo (
+ id SERIAL NOT NULL PRIMARY KEY,
+ create_event INTEGER NOT NULL REFERENCES events(id) DEFAULT get_event(),
+ tag_id INTEGER NOT NULL REFERENCES tag(id),
+ state INTEGER
+) WITHOUT OIDS;
+
+-- external yum repos
+create table external_repo (
+ id SERIAL NOT NULL PRIMARY KEY,
+ name TEXT UNIQUE NOT NULL
+);
+-- fake repo id for internal stuff (needed for unique index)
+INSERT INTO external_repo (id, name) VALUES (0, 'INTERNAL');
+
+create table external_repo_config (
+ external_repo_id INTEGER NOT NULL REFERENCES external_repo(id),
+ url TEXT NOT NULL,
+-- versioned - see earlier description of versioning
+ create_event INTEGER NOT NULL REFERENCES events(id) DEFAULT get_event(),
+ revoke_event INTEGER REFERENCES events(id),
+ creator_id INTEGER NOT NULL REFERENCES users(id),
+ revoker_id INTEGER REFERENCES users(id),
+ active BOOLEAN DEFAULT 'true' CHECK (active),
+ CONSTRAINT active_revoke_sane CHECK (
+ (active IS NULL AND revoke_event IS NOT NULL AND revoker_id IS NOT NULL)
+ OR (active IS NOT NULL AND revoke_event IS NULL AND revoker_id IS NULL)),
+ PRIMARY KEY (create_event, external_repo_id),
+ UNIQUE (external_repo_id, active)
+) WITHOUT OIDS;
+
+create table tag_external_repos (
+ tag_id INTEGER NOT NULL REFERENCES tag(id),
+ external_repo_id INTEGER NOT NULL REFERENCES external_repo(id),
+ priority INTEGER NOT NULL,
+-- versioned - see earlier description of versioning
+ create_event INTEGER NOT NULL REFERENCES events(id) DEFAULT get_event(),
+ revoke_event INTEGER REFERENCES events(id),
+ creator_id INTEGER NOT NULL REFERENCES users(id),
+ revoker_id INTEGER REFERENCES users(id),
+ active BOOLEAN DEFAULT 'true' CHECK (active),
+ CONSTRAINT active_revoke_sane CHECK (
+ (active IS NULL AND revoke_event IS NOT NULL AND revoker_id IS NOT NULL)
+ OR (active IS NOT NULL AND revoke_event IS NULL AND revoker_id IS NULL)),
+ PRIMARY KEY (create_event, tag_id, priority),
+ UNIQUE (tag_id, priority, active),
+ UNIQUE (tag_id, external_repo_id, active)
+);
+
+-- here we track the buildroots on the machines
+CREATE TABLE buildroot (
+ id SERIAL NOT NULL PRIMARY KEY,
+ host_id INTEGER NOT NULL REFERENCES host(id),
+ repo_id INTEGER NOT NULL REFERENCES repo (id),
+ arch VARCHAR(16) NOT NULL,
+ task_id INTEGER NOT NULL REFERENCES task (id),
+ create_event INTEGER NOT NULL REFERENCES events(id) DEFAULT get_event(),
+ retire_event INTEGER,
+ state INTEGER,
+ dirtyness INTEGER
+) WITHOUT OIDS;
+
+-- track spun images (livecds, installation, VMs...)
+CREATE TABLE image_builds (
+ build_id INTEGER NOT NULL PRIMARY KEY REFERENCES build(id)
+) WITHOUT OIDS;
+
+-- this table associates tags with builds. an entry here tags a package
+CREATE TABLE tag_listing (
+ build_id INTEGER NOT NULL REFERENCES build (id),
+ tag_id INTEGER NOT NULL REFERENCES tag (id),
+-- versioned - see earlier description of versioning
+ create_event INTEGER NOT NULL REFERENCES events(id) DEFAULT get_event(),
+ revoke_event INTEGER REFERENCES events(id),
+ creator_id INTEGER NOT NULL REFERENCES users(id),
+ revoker_id INTEGER REFERENCES users(id),
+ active BOOLEAN DEFAULT 'true' CHECK (active),
+ CONSTRAINT active_revoke_sane CHECK (
+ (active IS NULL AND revoke_event IS NOT NULL AND revoker_id IS NOT NULL)
+ OR (active IS NOT NULL AND revoke_event IS NULL AND revoker_id IS NULL)),
+ PRIMARY KEY (create_event, build_id, tag_id),
+ UNIQUE (build_id,tag_id,active)
+) WITHOUT OIDS;
+CREATE INDEX tag_listing_tag_id_key ON tag_listing(tag_id);
+
+-- this is a per-tag list of packages, with some extra info
+-- so this allows you to explicitly state which packages belong where
+-- (as opposed to beehive where this can only be done at the collection level)
+-- these are packages in general, not specific builds.
+-- this list limits which builds can be tagged with which tags
+-- if blocked is true, then the package is specifically not included. this
+-- prevents the package from being included via inheritance
+CREATE TABLE tag_packages (
+ package_id INTEGER NOT NULL REFERENCES package (id),
+ tag_id INTEGER NOT NULL REFERENCES tag (id),
+ owner INTEGER NOT NULL REFERENCES users(id),
+ blocked BOOLEAN NOT NULL DEFAULT FALSE,
+ extra_arches TEXT,
+-- versioned - see earlier description of versioning
+ create_event INTEGER NOT NULL REFERENCES events(id) DEFAULT get_event(),
+ revoke_event INTEGER REFERENCES events(id),
+ creator_id INTEGER NOT NULL REFERENCES users(id),
+ revoker_id INTEGER REFERENCES users(id),
+ active BOOLEAN DEFAULT 'true' CHECK (active),
+ CONSTRAINT active_revoke_sane CHECK (
+ (active IS NULL AND revoke_event IS NOT NULL AND revoker_id IS NOT NULL)
+ OR (active IS NOT NULL AND revoke_event IS NULL AND revoker_id IS NULL)),
+ PRIMARY KEY (create_event, package_id, tag_id),
+ UNIQUE (package_id,tag_id,active)
+) WITHOUT OIDS;
+
+-- package groups (per tag). used for generating comps for the tag repos
+CREATE TABLE groups (
+ id SERIAL NOT NULL PRIMARY KEY,
+ name VARCHAR(50) UNIQUE NOT NULL
+ -- corresponds to the id field in a comps group
+) WITHOUT OIDS;
+
+-- if blocked is true, then the group is specifically not included. this
+-- prevents the group from being included via inheritance
+CREATE TABLE group_config (
+ group_id INTEGER NOT NULL REFERENCES groups (id),
+ tag_id INTEGER NOT NULL REFERENCES tag (id),
+ blocked BOOLEAN NOT NULL DEFAULT FALSE,
+ exported BOOLEAN DEFAULT TRUE,
+ display_name TEXT NOT NULL,
+ is_default BOOLEAN,
+ uservisible BOOLEAN,
+ description TEXT,
+ langonly TEXT,
+ biarchonly BOOLEAN,
+-- versioned - see earlier description of versioning
+ create_event INTEGER NOT NULL REFERENCES events(id) DEFAULT get_event(),
+ revoke_event INTEGER REFERENCES events(id),
+ creator_id INTEGER NOT NULL REFERENCES users(id),
+ revoker_id INTEGER REFERENCES users(id),
+ active BOOLEAN DEFAULT 'true' CHECK (active),
+ CONSTRAINT active_revoke_sane CHECK (
+ (active IS NULL AND revoke_event IS NOT NULL AND revoker_id IS NOT NULL)
+ OR (active IS NOT NULL AND revoke_event IS NULL AND revoker_id IS NULL)),
+ PRIMARY KEY (create_event, group_id, tag_id),
+ UNIQUE (group_id,tag_id,active)
+) WITHOUT OIDS;
+
+CREATE TABLE group_req_listing (
+ group_id INTEGER NOT NULL REFERENCES groups (id),
+ tag_id INTEGER NOT NULL REFERENCES tag (id),
+ req_id INTEGER NOT NULL REFERENCES groups (id),
+ blocked BOOLEAN NOT NULL DEFAULT FALSE,
+ type VARCHAR(25),
+ is_metapkg BOOLEAN NOT NULL DEFAULT FALSE,
+-- versioned - see earlier description of versioning
+ create_event INTEGER NOT NULL REFERENCES events(id) DEFAULT get_event(),
+ revoke_event INTEGER REFERENCES events(id),
+ creator_id INTEGER NOT NULL REFERENCES users(id),
+ revoker_id INTEGER REFERENCES users(id),
+ active BOOLEAN DEFAULT 'true' CHECK (active),
+ CONSTRAINT active_revoke_sane CHECK (
+ (active IS NULL AND revoke_event IS NOT NULL AND revoker_id IS NOT NULL)
+ OR (active IS NOT NULL AND revoke_event IS NULL AND revoker_id IS NULL)),
+ PRIMARY KEY (create_event, group_id, tag_id, req_id),
+ UNIQUE (group_id,tag_id,req_id,active)
+) WITHOUT OIDS;
+
+-- if blocked is true, then the package is specifically not included. this
+-- prevents the package from being included in the group via inheritance
+-- package refers to an rpm name, not necessarily an srpm name (so it does
+-- not reference the package table).
+CREATE TABLE group_package_listing (
+ group_id INTEGER NOT NULL REFERENCES groups (id),
+ tag_id INTEGER NOT NULL REFERENCES tag (id),
+ package TEXT,
+ blocked BOOLEAN NOT NULL DEFAULT FALSE,
+ type VARCHAR(25) NOT NULL,
+ basearchonly BOOLEAN,
+ requires TEXT,
+-- versioned - see earlier description of versioning
+ create_event INTEGER NOT NULL REFERENCES events(id) DEFAULT get_event(),
+ revoke_event INTEGER REFERENCES events(id),
+ creator_id INTEGER NOT NULL REFERENCES users(id),
+ revoker_id INTEGER REFERENCES users(id),
+ active BOOLEAN DEFAULT 'true' CHECK (active),
+ CONSTRAINT active_revoke_sane CHECK (
+ (active IS NULL AND revoke_event IS NOT NULL AND revoker_id IS NOT NULL)
+ OR (active IS NOT NULL AND revoke_event IS NULL AND revoker_id IS NULL)),
+ PRIMARY KEY (create_event, group_id, tag_id, package),
+ UNIQUE (group_id,tag_id,package,active)
+) WITHOUT OIDS;
+
+-- rpminfo tracks individual rpms (incl srpms)
+-- buildroot_id can be NULL (for externally built packages)
+-- even though we track epoch, we demand that N-V-R.A be unique
+-- we don't store filename b/c filename should be N-V-R.A.rpm
+CREATE TABLE rpminfo (
+ id SERIAL NOT NULL PRIMARY KEY,
+ build_id INTEGER REFERENCES build (id),
+ buildroot_id INTEGER REFERENCES buildroot (id),
+ name TEXT NOT NULL,
+ version TEXT NOT NULL,
+ release TEXT NOT NULL,
+ epoch INTEGER,
+ arch VARCHAR(16) NOT NULL,
+ external_repo_id INTEGER NOT NULL REFERENCES external_repo(id),
+ payloadhash TEXT NOT NULL,
+ size BIGINT NOT NULL,
+ buildtime BIGINT NOT NULL,
+ CONSTRAINT rpminfo_unique_nvra UNIQUE (name,version,release,arch,external_repo_id)
+) WITHOUT OIDS;
+CREATE INDEX rpminfo_build ON rpminfo(build_id);
+
+-- sighash is the checksum of the signature header
+CREATE TABLE rpmsigs (
+ rpm_id INTEGER NOT NULL REFERENCES rpminfo (id),
+ sigkey TEXT NOT NULL,
+ sighash TEXT NOT NULL,
+ CONSTRAINT rpmsigs_no_resign UNIQUE (rpm_id, sigkey)
+) WITHOUT OIDS;
+
+-- buildroot_listing needs to be created after rpminfo so it can reference it
+CREATE TABLE buildroot_listing (
+ buildroot_id INTEGER NOT NULL REFERENCES buildroot(id),
+ rpm_id INTEGER NOT NULL REFERENCES rpminfo(id),
+ is_update BOOLEAN NOT NULL DEFAULT FALSE,
+ UNIQUE (buildroot_id,rpm_id)
+) WITHOUT OIDS;
+CREATE INDEX buildroot_listing_rpms ON buildroot_listing(rpm_id);
+
+CREATE TABLE log_messages (
+ id SERIAL NOT NULL PRIMARY KEY,
+ message TEXT NOT NULL,
+ message_time TIMESTAMP NOT NULL DEFAULT NOW(),
+ logger_name VARCHAR(200) NOT NULL,
+ level VARCHAR(10) NOT NULL,
+ location VARCHAR(200),
+ host VARCHAR(200)
+) WITHOUT OIDS;
+
+CREATE TABLE build_notifications (
+ id SERIAL NOT NULL PRIMARY KEY,
+ user_id INTEGER NOT NULL REFERENCES users (id),
+ package_id INTEGER REFERENCES package (id),
+ tag_id INTEGER REFERENCES tag (id),
+ success_only BOOLEAN NOT NULL DEFAULT FALSE,
+ email TEXT NOT NULL
+) WITHOUT OIDS;
+
+GRANT SELECT ON build, package, task, tag,
+tag_listing, tag_config, tag_inheritance, tag_packages,
+rpminfo TO PUBLIC;
+
+-- example code to add initial admins
+-- insert into users (name, usertype, status, krb_principal) values ('admin', 0, 0, 'admin at EXAMPLE.COM');
+-- insert into user_perms (user_id, perm_id)
+-- select users.id, permissions.id from users, permissions
+-- where users.name in ('admin')
+-- and permissions.name = 'admin';
+
+-- Schema additions for multiplatform support
+
+-- we need to track some additional metadata about Maven builds
+CREATE TABLE maven_builds (
+ build_id INTEGER NOT NULL PRIMARY KEY REFERENCES build(id),
+ group_id TEXT NOT NULL,
+ artifact_id TEXT NOT NULL,
+ version TEXT NOT NULL
+) WITHOUT OIDS;
+
+-- Windows-specific build information
+CREATE TABLE win_builds (
+ build_id INTEGER NOT NULL PRIMARY KEY REFERENCES build(id),
+ platform TEXT NOT NULL
+) WITHOUT OIDS;
+
+-- Even though we call this archiveinfo, we can probably use it for
+-- any filetype output by a build process. In general they will be
+-- archives (.zip, .jar, .tar.gz) but could also be installer executables (.exe)
+CREATE TABLE archivetypes (
+ id SERIAL NOT NULL PRIMARY KEY,
+ name TEXT NOT NULL UNIQUE,
+ description TEXT NOT NULL,
+ extensions TEXT NOT NULL
+) WITHOUT OIDS;
+
+insert into archivetypes (name, description, extensions) values ('jar', 'Jar file', 'jar war rar ear sar jdocbook jdocbook-style');
+insert into archivetypes (name, description, extensions) values ('zip', 'Zip file', 'zip');
+insert into archivetypes (name, description, extensions) values ('pom', 'Maven Project Object Management file', 'pom');
+insert into archivetypes (name, description, extensions) values ('tar', 'Tar file', 'tar tar.gz tar.bz2 tar.xz');
+insert into archivetypes (name, description, extensions) values ('xml', 'XML file', 'xml');
+insert into archivetypes (name, description, extensions) values ('xsd', 'XML Schema Definition', 'xsd');
+insert into archivetypes (name, description, extensions) values ('spec', 'RPM spec file', 'spec');
+insert into archivetypes (name, description, extensions) values ('exe', 'Windows executable', 'exe');
+insert into archivetypes (name, description, extensions) values ('dll', 'Windows dynamic link library', 'dll');
+insert into archivetypes (name, description, extensions) values ('lib', 'Windows import library', 'lib');
+insert into archivetypes (name, description, extensions) values ('sys', 'Windows device driver', 'sys');
+insert into archivetypes (name, description, extensions) values ('inf', 'Windows driver information file', 'inf');
+insert into archivetypes (name, description, extensions) values ('cat', 'Windows catalog file', 'cat');
+insert into archivetypes (name, description, extensions) values ('msi', 'Windows Installer package', 'msi');
+insert into archivetypes (name, description, extensions) values ('pdb', 'Windows debug information', 'pdb');
+insert into archivetypes (name, description, extensions) values ('oem', 'Windows driver oem file', 'oem');
+insert into archivetypes (name, description, extensions) values ('iso', 'CD/DVD Image', 'iso');
+insert into archivetypes (name, description, extensions) values ('raw', 'Raw disk image', 'raw');
+insert into archivetypes (name, description, extensions) values ('qcow', 'QCOW image', 'qcow');
+insert into archivetypes (name, description, extensions) values ('qcow2', 'QCOW2 image', 'qcow2');
+insert into archivetypes (name, description, extensions) values ('vmdk', 'vSphere image', 'vmdk');
+insert into archivetypes (name, description, extensions) values ('ova', 'Open Virtualization Archive', 'ova');
+insert into archivetypes (name, description, extensions) values ('ks', 'Kickstart', 'ks');
+insert into archivetypes (name, description, extensions) values ('cfg', 'Configuration file', 'cfg');
+insert into archivetypes (name, description, extensions) values ('vdi', 'VirtualBox Virtual Disk Image', 'vdi');
+insert into archivetypes (name, description, extensions) values ('aar', 'Binary distribution of an Android Library project', 'aar');
+insert into archivetypes (name, description, extensions) values ('apklib', 'Source distribution of an Android Library project', 'apklib');
+insert into archivetypes (name, description, extensions) values ('cab', 'Windows cabinet file', 'cab');
+insert into archivetypes (name, description, extensions) values ('dylib', 'OS X dynamic library', 'dylib');
+insert into archivetypes (name, description, extensions) values ('gem', 'Ruby gem', 'gem');
+insert into archivetypes (name, description, extensions) values ('ini', 'INI config file', 'ini');
+insert into archivetypes (name, description, extensions) values ('js', 'Javascript file', 'js');
+insert into archivetypes (name, description, extensions) values ('ldif', 'LDAP Data Interchange Format file', 'ldif');
+insert into archivetypes (name, description, extensions) values ('manifest', 'Runtime environment for .NET applications', 'manifest');
+insert into archivetypes (name, description, extensions) values ('msm', 'Windows merge module', 'msm');
+insert into archivetypes (name, description, extensions) values ('properties', 'Properties file', 'properties');
+insert into archivetypes (name, description, extensions) values ('sig', 'Signature file', 'sig signature');
+insert into archivetypes (name, description, extensions) values ('so', 'Shared library', 'so');
+insert into archivetypes (name, description, extensions) values ('txt', 'Text file', 'txt');
+insert into archivetypes (name, description, extensions) values ('vhd', 'Hyper-V image', 'vhd');
+insert into archivetypes (name, description, extensions) values ('wsf', 'Windows script file', 'wsf');
+insert into archivetypes (name, description, extensions) values ('box', 'Vagrant Box Image', 'box');
+insert into archivetypes (name, description, extensions) values ('raw-xz', 'xz compressed raw disk image', 'raw.xz');
+
+
+-- Do we want to enforce a constraint that a build can only generate one
+-- archive with a given name?
+CREATE TABLE archiveinfo (
+ id SERIAL NOT NULL PRIMARY KEY,
+ type_id INTEGER NOT NULL REFERENCES archivetypes (id),
+ build_id INTEGER NOT NULL REFERENCES build (id),
+ buildroot_id INTEGER REFERENCES buildroot (id),
+ filename TEXT NOT NULL,
+ size BIGINT NOT NULL,
+ checksum TEXT NOT NULL,
+ checksum_type INTEGER NOT NULL
+) WITHOUT OIDS;
+CREATE INDEX archiveinfo_build_idx ON archiveinfo (build_id);
+CREATE INDEX archiveinfo_buildroot_idx on archiveinfo (buildroot_id);
+CREATE INDEX archiveinfo_type_idx on archiveinfo (type_id);
+CREATE INDEX archiveinfo_filename_idx on archiveinfo(filename);
+
+CREATE TABLE maven_archives (
+ archive_id INTEGER NOT NULL PRIMARY KEY REFERENCES archiveinfo(id),
+ group_id TEXT NOT NULL,
+ artifact_id TEXT NOT NULL,
+ version TEXT NOT NULL
+) WITHOUT OIDS;
+
+CREATE TABLE image_archives (
+ archive_id INTEGER NOT NULL PRIMARY KEY REFERENCES archiveinfo(id),
+ arch VARCHAR(16) NOT NULL
+) WITHOUT OIDS;
+
+-- tracks the contents of an image
+CREATE TABLE image_listing (
+ image_id INTEGER NOT NULL REFERENCES image_archives(archive_id),
+ rpm_id INTEGER NOT NULL REFERENCES rpminfo(id),
+ UNIQUE (image_id, rpm_id)
+) WITHOUT OIDS;
+CREATE INDEX image_listing_rpms on image_listing(rpm_id);
+
+CREATE TABLE buildroot_archives (
+ buildroot_id INTEGER NOT NULL REFERENCES buildroot (id),
+ archive_id INTEGER NOT NULL REFERENCES archiveinfo (id),
+ project_dep BOOLEAN NOT NULL,
+ PRIMARY KEY (buildroot_id, archive_id)
+) WITHOUT OIDS;
+CREATE INDEX buildroot_archives_archive_idx ON buildroot_archives (archive_id);
+
+-- Extended information about files built in Windows VMs
+CREATE TABLE win_archives (
+ archive_id INTEGER NOT NULL PRIMARY KEY REFERENCES archiveinfo(id),
+ relpath TEXT NOT NULL,
+ platforms TEXT NOT NULL,
+ flags TEXT
+) WITHOUT OIDS;
+
+COMMIT WORK;
diff --git a/hub/Makefile b/hub/Makefile
new file mode 100644
index 0000000..b6ef1c7
--- /dev/null
+++ b/hub/Makefile
@@ -0,0 +1,41 @@
+PYTHON=python
+PACKAGE = $(shell basename `pwd`)
+LIBEXECFILES = rpmdiff
+PYFILES = $(wildcard *.py)
+PYVER := $(shell $(PYTHON) -c 'import sys; print "%.3s" %(sys.version)')
+PYSYSDIR := $(shell $(PYTHON) -c 'import sys; print sys.prefix')
+PYLIBDIR = $(PYSYSDIR)/lib/python$(PYVER)
+PKGDIR = $(PYLIBDIR)/site-packages/$(PACKAGE)
+
+SERVERDIR = /usr/share/koji-hub
+PYFILES = $(wildcard *.py)
+
+_default:
+ @echo "nothing to make. try make install"
+
+clean:
+ rm -f *.o *.so *.pyc *~
+
+install:
+ @if [ "$(DESTDIR)" = "" ]; then \
+ echo " "; \
+ echo "ERROR: A destdir is required"; \
+ exit 1; \
+ fi
+
+ mkdir -p $(DESTDIR)/usr/libexec/koji-hub
+ install -p -m 755 $(LIBEXECFILES) $(DESTDIR)/usr/libexec/koji-hub
+
+ mkdir -p $(DESTDIR)/etc/httpd/conf.d
+ install -p -m 644 httpd.conf $(DESTDIR)/etc/httpd/conf.d/kojihub.conf
+
+ mkdir -p $(DESTDIR)/etc/koji-hub
+ install -p -m 644 hub.conf $(DESTDIR)/etc/koji-hub/hub.conf
+ mkdir -p $(DESTDIR)/etc/koji-hub/hub.conf.d
+
+ mkdir -p $(DESTDIR)/$(SERVERDIR)
+ for p in $(PYFILES) ; do \
+ install -p -m 644 $$p $(DESTDIR)/$(SERVERDIR)/$$p; \
+ done
+ $(PYTHON) -c "import compileall; compileall.compile_dir('$(DESTDIR)/$(SERVERDIR)', 1, '$(PYDIR)', 1)"
+
diff --git a/hub/httpd.conf b/hub/httpd.conf
new file mode 100644
index 0000000..de3f19f
--- /dev/null
+++ b/hub/httpd.conf
@@ -0,0 +1,57 @@
+#
+# koji-hub is an xmlrpc interface to the Koji database
+#
+
+Alias /kojihub /usr/share/koji-hub/kojixmlrpc.py
+
+<Directory "/usr/share/koji-hub">
+ Options ExecCGI
+ SetHandler wsgi-script
+ Require all granted
+ #If you have httpd <= 2.2, you'll want the following two lines instead
+ #of the one above:
+ #Order allow,deny
+ #Allow from all
+</Directory>
+
+# Support for mod_python is DEPRECATED. If you still need mod_python support,
+# then use the following directory settings instead:
+#
+# <Directory "/usr/share/koji-hub">
+# SetHandler mod_python
+# PythonHandler kojixmlrpc
+# PythonOption ConfigFile /etc/koji-hub/hub.conf
+# PythonDebug Off
+# PythonAutoReload Off
+# </Directory>
+
+# Also serve /mnt/koji
+Alias /kojifiles "/mnt/koji/"
+
+<Directory "/mnt/koji">
+ Options Indexes SymLinksIfOwnerMatch
+ #If your top /mnt/koji directory is not owned by the httpd user, then
+ #you will need to follow all symlinks instead, e.g.
+ #Options Indexes FollowSymLinks
+ AllowOverride None
+ Require all granted
+ #If you have httpd <= 2.2, you'll want the following two lines instead
+ #of the one above:
+ #Order allow,deny
+ #Allow from all
+</Directory>
+
+# uncomment this to enable authentication via SSL client certificates
+# <Location /kojihub/ssllogin>
+# SSLVerifyClient require
+# SSLVerifyDepth 10
+# SSLOptions +StdEnvVars
+# </Location>
+
+# If you need to support koji < 1.4.0 clients using SSL authentication, then use the following instead:
+# <Location /kojihub>
+# SSLOptions +StdEnvVars
+# </Location>
+# In this case, you will need to enable these options globally (in ssl.conf):
+# SSLVerifyClient require
+# SSLVerifyDepth 10
diff --git a/hub/hub.conf b/hub/hub.conf
new file mode 100644
index 0000000..f1e40c1
--- /dev/null
+++ b/hub/hub.conf
@@ -0,0 +1,85 @@
+[hub]
+
+## ConfigParser style config file, similar to ini files
+## http://docs.python.org/library/configparser.html
+##
+## Note that multiline values can be set by indenting subsequent lines
+## (which means you should not indent regular lines)
+
+## Basic options ##
+DBName = koji
+DBUser = koji
+#DBHost = db.example.com
+#DBPass = example_password
+KojiDir = /mnt/koji
+
+
+## Kerberos authentication options ##
+
+# AuthPrincipal = host/kojihub at EXAMPLE.COM
+# AuthKeytab = /etc/koji.keytab
+# ProxyPrincipals = koji/kojiweb at EXAMPLE.COM
+## format string for host principals (%s = hostname)
+# HostPrincipalFormat = compile/%s at EXAMPLE.COM
+
+## end Kerberos auth configuration
+
+
+
+## SSL client certificate auth configuration ##
+#note: ssl auth may also require editing the httpd config (conf.d/kojihub.conf)
+
+## the client username is the common name of the subject of their client certificate
+# DNUsernameComponent = CN
+## separate multiple DNs with |
+# ProxyDNs = /C=US/ST=Massachusetts/O=Example Org/OU=Example User/CN=example/emailAddress=example at example.com
+
+## end SSL client certificate auth configuration
+
+
+
+## Other options ##
+LoginCreatesUser = On
+KojiWebURL = http://kojiweb.example.com/koji
+# The domain name that will be appended to Koji usernames
+# when creating email notifications
+#EmailDomain = example.com
+# whether to send the task owner and package owner email or not on success. this still goes to watchers
+NotifyOnSuccess = True
+## Disables all notifications
+# DisableNotifications = False
+
+## Extended features
+## Support Maven builds
+# EnableMaven = False
+## Support Windows builds
+# EnableWin = False
+
+## Koji hub plugins
+## The path where plugins are found
+# PluginPath = /usr/lib/koji-hub-plugins
+## A space-separated list of plugins to load
+# Plugins = echo
+
+## If KojiDebug is on, the hub will be /very/ verbose and will report exception
+## details to clients for anticipated errors (i.e. koji's own exceptions --
+## subclasses of koji.GenericError).
+# KojiDebug = On
+
+## Determines how much detail about exceptions is reported to the client (via faults)
+## Meaningful values:
+## normal - a basic traceback (format_exception)
+## extended - an extended traceback (format_exc_plus)
+## anything else - no traceback, just the error message
+## The extended traceback is intended for debugging only and should NOT be
+## used in production, since it may contain sensitive information.
+# KojiTraceback = normal
+
+## These options are intended for planned outages
+# ServerOffline = False
+# OfflineMessage = temporary outage
+# LockOut = False
+## If ServerOffline is True, the server will always report a ServerOffline fault (with
+## OfflineMessage as the fault string).
+## If LockOut is True, the server will report a ServerOffline fault for all non-admin
+## requests.
diff --git a/hub/kojihub.py b/hub/kojihub.py
new file mode 100644
index 0000000..ca6985a
--- /dev/null
+++ b/hub/kojihub.py
@@ -0,0 +1,10968 @@
+# Python library
+
+# kojihub - library for koji's XMLRPC interface
+# Copyright (c) 2005-2014 Red Hat, Inc.
+#
+# Koji is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation;
+# version 2.1 of the License.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this software; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+# Authors:
+# Mike McLean <mikem at redhat.com>
+# Mike Bonnet <mikeb at redhat.com>
+# Cristian Balint <cbalint at redhat.com>
+
+import base64
+import calendar
+import cgi
+import copy
+import koji
+import koji.auth
+import koji.db
+import koji.plugin
+import koji.policy
+import datetime
+import errno
+import logging
+import fcntl
+import fnmatch
+import hashlib
+from koji.util import md5_constructor
+from koji.util import sha1_constructor
+from koji.util import dslice
+import os
+import re
+import rpm
+import shutil
+import simplejson as json
+import stat
+import subprocess
+import sys
+import tarfile
+import tempfile
+import time
+import types
+import xmlrpclib
+import zipfile
+from koji.context import context
+
+logger = logging.getLogger('koji.hub')
+
+def log_error(msg):
+ logger.error(msg)
+
+
+class Task(object):
+ """A task for the build hosts"""
+
+ fields = (
+ ('task.id', 'id'),
+ ('task.state', 'state'),
+ ('task.create_time', 'create_time'),
+ ('EXTRACT(EPOCH FROM create_time)','create_ts'),
+ ('task.start_time', 'start_time'),
+ ('EXTRACT(EPOCH FROM task.start_time)', 'start_ts'),
+ ('task.completion_time', 'completion_time'),
+ ('EXTRACT(EPOCH FROM completion_time)','completion_ts'),
+ ('task.channel_id', 'channel_id'),
+ ('task.host_id', 'host_id'),
+ ('task.parent', 'parent'),
+ ('task.label', 'label'),
+ ('task.waiting', 'waiting'),
+ ('task.awaited', 'awaited'),
+ ('task.owner', 'owner'),
+ ('task.method', 'method'),
+ ('task.arch', 'arch'),
+ ('task.priority', 'priority'),
+ ('task.weight', 'weight'))
+
+ def __init__(self,id):
+ self.id = id
+ self.logger = logging.getLogger("koji.hub.Task")
+
+ def verifyHost(self,host_id=None):
+ """Verify that host owns task"""
+ if host_id is None:
+ host_id = context.session.host_id
+ if host_id is None:
+ return False
+ task_id = self.id
+ #getting a row lock on this task to ensure task assignment sanity
+ #no other concurrent transaction should be altering this row
+ q = """SELECT state,host_id FROM task WHERE id=%(task_id)s FOR UPDATE"""
+ r = _fetchSingle(q, locals())
+ if not r:
+ raise koji.GenericError, "No such task: %i" % task_id
+ state, otherhost = r
+ return (state == koji.TASK_STATES['OPEN'] and otherhost == host_id)
+
+ def assertHost(self,host_id):
+ if not self.verifyHost(host_id):
+ raise koji.ActionNotAllowed, "host %d does not own task %d" % (host_id,self.id)
+
+ def getOwner(self):
+ """Return the owner (user_id) for this task"""
+ q = """SELECT owner FROM task WHERE id=%(id)i"""
+ return _singleValue(q, vars(self))
+
+ def verifyOwner(self,user_id=None):
+ """Verify that user owns task"""
+ if user_id is None:
+ user_id = context.session.user_id
+ if user_id is None:
+ return False
+ task_id = self.id
+ #getting a row lock on this task to ensure task state sanity
+ q = """SELECT owner FROM task WHERE id=%(task_id)s FOR UPDATE"""
+ r = _fetchSingle(q, locals())
+ if not r:
+ raise koji.GenericError, "No such task: %i" % task_id
+ (owner,) = r
+ return (owner == user_id)
+
+ def assertOwner(self,user_id=None):
+ if not self.verifyOwner(user_id):
+ raise koji.ActionNotAllowed, "user %d does not own task %d" % (user_id,self.id)
+
+ def lock(self,host_id,newstate='OPEN',force=False):
+ """Attempt to associate the task for host, either to assign or open
+
+ returns True if successful, False otherwise"""
+ info = self.getInfo(request=True)
+ self.runCallbacks('preTaskStateChange', info, 'state', koji.TASK_STATES[newstate])
+ self.runCallbacks('preTaskStateChange', info, 'host_id', host_id)
+ #we use row-level locks to keep things sane
+ #note the SELECT...FOR UPDATE
+ task_id = self.id
+ if not force:
+ q = """SELECT state,host_id FROM task WHERE id=%(task_id)i FOR UPDATE"""
+ r = _fetchSingle(q,locals())
+ if not r:
+ raise koji.GenericError, "No such task: %i" % task_id
+ state, otherhost = r
+ if state == koji.TASK_STATES['FREE']:
+ if otherhost is not None:
+ log_error("Error: task %i is both free and locked (host %i)"
+ % (task_id,otherhost))
+ return False
+ elif state == koji.TASK_STATES['ASSIGNED']:
+ if otherhost is None:
+ log_error("Error: task %i is assigned, but has no assignee"
+ % (task_id))
+ return False
+ elif otherhost != host_id:
+ #task is assigned to someone else
+ return False
+ #otherwise the task is assigned to host_id, so keep going
+ else:
+ if otherhost is None:
+ log_error("Error: task %i is non-free but unlocked (state %i)"
+ % (task_id,state))
+ return False
+ #if we reach here, task is either
+ # - free and unlocked
+ # - assigned to host_id
+ # - force option is enabled
+ state = koji.TASK_STATES[newstate]
+ update = UpdateProcessor('task', clauses=['id=%(task_id)i'], values=locals())
+ update.set(state=state, host_id=host_id)
+ if state == koji.TASK_STATES['OPEN']:
+ update.rawset(start_time='NOW()')
+ update.execute()
+ self.runCallbacks('postTaskStateChange', info, 'state', koji.TASK_STATES[newstate])
+ self.runCallbacks('postTaskStateChange', info, 'host_id', host_id)
+ return True
+
+ def assign(self,host_id,force=False):
+ """Attempt to assign the task to host.
+
+ returns True if successful, False otherwise"""
+ return self.lock(host_id,'ASSIGNED',force)
+
+ def open(self,host_id):
+ """Attempt to open the task for host.
+
+ returns task data if successful, None otherwise"""
+ if self.lock(host_id,'OPEN'):
+ # get more complete data to return
+ fields = self.fields + (('task.request', 'request'),)
+ query = QueryProcessor(tables=['task'], clauses=['id=%(id)i'], values=vars(self),
+ columns=[f[0] for f in fields], aliases=[f[1] for f in fields])
+ ret = query.executeOne()
+ if ret['request'].find('<?xml', 0, 10) == -1:
+ #handle older base64 encoded data
+ ret['request'] = base64.decodestring(ret['request'])
+ return ret
+ else:
+ return None
+
+ def free(self):
+ """Free a task"""
+ info = self.getInfo(request=True)
+ self.runCallbacks('preTaskStateChange', info, 'state', koji.TASK_STATES['FREE'])
+ self.runCallbacks('preTaskStateChange', info, 'host_id', None)
+ task_id = self.id
+ # access checks should be performed by calling function
+ query = """SELECT state FROM task WHERE id = %(id)i FOR UPDATE"""
+ row = _fetchSingle(query,vars(self))
+ if not row:
+ raise koji.GenericError, "No such task: %i" % self.id
+ oldstate = row[0]
+ if koji.TASK_STATES[oldstate] in ['CLOSED','CANCELED','FAILED']:
+ raise koji.GenericError, "Cannot free task %i, state is %s" % \
+ (self.id,koji.TASK_STATES[oldstate])
+ newstate = koji.TASK_STATES['FREE']
+ newhost = None
+ q = """UPDATE task SET state=%(newstate)s,host_id=%(newhost)s
+ WHERE id=%(task_id)s"""
+ _dml(q,locals())
+ self.runCallbacks('postTaskStateChange', info, 'state', koji.TASK_STATES['FREE'])
+ self.runCallbacks('postTaskStateChange', info, 'host_id', None)
+ return True
+
+ def setWeight(self,weight):
+ """Set weight for task"""
+ task_id = self.id
+ weight = float(weight)
+ info = self.getInfo(request=True)
+ self.runCallbacks('preTaskStateChange', info, 'weight', weight)
+ # access checks should be performed by calling function
+ q = """UPDATE task SET weight=%(weight)s WHERE id = %(task_id)s"""
+ _dml(q,locals())
+ self.runCallbacks('postTaskStateChange', info, 'weight', weight)
+
+ def setPriority(self, priority, recurse=False):
+ """Set priority for task"""
+ task_id = self.id
+ priority = int(priority)
+ info = self.getInfo(request=True)
+ self.runCallbacks('preTaskStateChange', info, 'priority', priority)
+ # access checks should be performed by calling function
+ q = """UPDATE task SET priority=%(priority)s WHERE id = %(task_id)s"""
+ _dml(q,locals())
+ self.runCallbacks('postTaskStateChange', info, 'priority', priority)
+
+ if recurse:
+ # Change priority of child tasks
+ q = """SELECT id FROM task WHERE parent = %(task_id)s"""
+ for (child_id,) in _fetchMulti(q, locals()):
+ Task(child_id).setPriority(priority, recurse=True)
+
+ def _close(self,result,state):
+ """Mark task closed and set response
+
+ Returns True if successful, False if not"""
+ task_id = self.id
+ # access checks should be performed by calling function
+ # this is an approximation, and will be different than what is in the database
+ # the actual value should be retrieved from the 'new' value of the post callback
+ now = time.time()
+ info = self.getInfo(request=True)
+ info['result'] = result
+ self.runCallbacks('preTaskStateChange', info, 'state', state)
+ self.runCallbacks('preTaskStateChange', info, 'completion_ts', now)
+ update = """UPDATE task SET result = %(result)s, state = %(state)s, completion_time = NOW()
+ WHERE id = %(task_id)d
+ """
+ # get the result from the info dict, so callbacks have a chance to modify it
+ _dml(update, {'result': info['result'], 'state': state, 'task_id': task_id})
+ self.runCallbacks('postTaskStateChange', info, 'state', state)
+ self.runCallbacks('postTaskStateChange', info, 'completion_ts', now)
+
+ def close(self,result):
+ # access checks should be performed by calling function
+ self._close(result,koji.TASK_STATES['CLOSED'])
+
+ def fail(self,result):
+ # access checks should be performed by calling function
+ self._close(result,koji.TASK_STATES['FAILED'])
+
+ def getState(self):
+ query = """SELECT state FROM task WHERE id = %(id)i"""
+ return _singleValue(query, vars(self))
+
+ def isFinished(self):
+ return (koji.TASK_STATES[self.getState()] in ['CLOSED','CANCELED','FAILED'])
+
+ def isCanceled(self):
+ return (self.getState() == koji.TASK_STATES['CANCELED'])
+
+ def isFailed(self):
+ return (self.getState() == koji.TASK_STATES['FAILED'])
+
+ def cancel(self,recurse=True):
+ """Cancel this task.
+
+ A task can only be canceled if it is not already in the 'CLOSED' state.
+ If it is, no action will be taken. Return True if the task is
+ successfully canceled, or if it was already canceled, False if it is
+ closed."""
+ # access checks should be performed by calling function
+ now = time.time()
+ info = self.getInfo(request=True)
+ self.runCallbacks('preTaskStateChange', info, 'state', koji.TASK_STATES['CANCELED'])
+ self.runCallbacks('preTaskStateChange', info, 'completion_ts', now)
+ task_id = self.id
+ q = """SELECT state FROM task WHERE id = %(task_id)s FOR UPDATE"""
+ state = _singleValue(q,locals())
+ st_canceled = koji.TASK_STATES['CANCELED']
+ st_closed = koji.TASK_STATES['CLOSED']
+ st_failed = koji.TASK_STATES['FAILED']
+ if state == st_canceled:
+ return True
+ elif state in [st_closed,st_failed]:
+ return False
+ update = """UPDATE task SET state = %(st_canceled)i, completion_time = NOW()
+ WHERE id = %(task_id)i"""
+ _dml(update, locals())
+ self.runCallbacks('postTaskStateChange', info, 'state', koji.TASK_STATES['CANCELED'])
+ self.runCallbacks('postTaskStateChange', info, 'completion_ts', now)
+ #cancel associated builds (only if state is 'BUILDING')
+ #since we check build state, we avoid loops with cancel_build on our end
+ b_building = koji.BUILD_STATES['BUILDING']
+ q = """SELECT id FROM build WHERE task_id = %(task_id)i
+ AND state = %(b_building)i
+ FOR UPDATE"""
+ for (build_id,) in _fetchMulti(q, locals()):
+ cancel_build(build_id, cancel_task=False)
+ if recurse:
+ #also cancel child tasks
+ self.cancelChildren()
+ return True
+
+ def cancelChildren(self):
+ """Cancel child tasks"""
+ task_id = self.id
+ q = """SELECT id FROM task WHERE parent = %(task_id)i"""
+ for (id,) in _fetchMulti(q,locals()):
+ Task(id).cancel(recurse=True)
+
+ def cancelFull(self,strict=True):
+ """Cancel this task and every other task in its group
+
+ If strict is true, then this must be a top-level task
+ Otherwise we will follow up the chain to find the top-level task
+ """
+ task_id = self.id
+ q = """SELECT parent FROM task WHERE id = %(task_id)i FOR UPDATE"""
+ parent = _singleValue(q,locals())
+ if parent is not None:
+ if strict:
+ raise koji.GenericError, "Task %d is not top-level (parent=%d)" % (task_id,parent)
+ #otherwise, find the top-level task and go from there
+ seen = {task_id:1}
+ while parent is not None:
+ if seen.has_key(parent):
+ raise koji.GenericError, "Task LOOP at task %i" % task_id
+ task_id = parent
+ seen[task_id] = 1
+ parent = _singleValue(q,locals())
+ return Task(task_id).cancelFull(strict=True)
+ #We handle the recursion ourselves, since self.cancel will stop at
+ #canceled or closed tasks.
+ tasklist = [task_id]
+ seen = {}
+ #query for use in loop
+ q_children = """SELECT id FROM task WHERE parent = %(task_id)i"""
+ for task_id in tasklist:
+ if seen.has_key(task_id):
+ #shouldn't happen
+ raise koji.GenericError, "Task LOOP at task %i" % task_id
+ seen[task_id] = 1
+ Task(task_id).cancel(recurse=False)
+ for (child_id,) in _fetchMulti(q_children,locals()):
+ tasklist.append(child_id)
+
+ def getRequest(self):
+ id = self.id
+ query = """SELECT request FROM task WHERE id = %(id)i"""
+ xml_request = _singleValue(query, locals())
+ if xml_request.find('<?xml', 0, 10) == -1:
+ #handle older base64 encoded data
+ xml_request = base64.decodestring(xml_request)
+ params, method = xmlrpclib.loads(xml_request)
+ return params
+
+ def getResult(self):
+ query = """SELECT state,result FROM task WHERE id = %(id)i"""
+ r = _fetchSingle(query, vars(self))
+ if not r:
+ raise koji.GenericError, "No such task"
+ state, xml_result = r
+ if koji.TASK_STATES[state] == 'CANCELED':
+ raise koji.GenericError, "Task %i is canceled" % self.id
+ elif koji.TASK_STATES[state] not in ['CLOSED','FAILED']:
+ raise koji.GenericError, "Task %i is not finished" % self.id
+ # If the result is a Fault, then loads will raise it
+ # This is probably what we want to happen.
+ # Note that you can't really 'return' a fault over xmlrpc, you
+ # can only 'raise' them.
+ # If you try to return a fault as a value, it gets reduced to
+ # a mere struct.
+ # f = Fault(1,"hello"); print dumps((f,))
+ if xml_result.find('<?xml', 0, 10) == -1:
+ #handle older base64 encoded data
+ xml_result = base64.decodestring(xml_result)
+ result, method = xmlrpclib.loads(xml_result)
+ return result[0]
+
+ def getInfo(self, strict=True, request=False):
+ """Return information about the task in a dictionary. If "request" is True,
+ the request will be decoded and included in the dictionary."""
+ q = """SELECT %s FROM task WHERE id = %%(id)i""" % ','.join([f[0] for f in self.fields])
+ result = _singleRow(q, vars(self), [f[1] for f in self.fields], strict)
+ if request:
+ result['request'] = self.getRequest()
+ return result
+
+ def getChildren(self, request=False):
+ """Return information about tasks with this task as their
+ parent. If there are no such Tasks, return an empty list."""
+ fields = self.fields
+ if request:
+ fields = fields + (('request', 'request'),)
+ query = """SELECT %s FROM task WHERE parent = %%(id)i""" % ', '.join([f[0] for f in fields])
+ results = _multiRow(query, vars(self), [f[1] for f in fields])
+ if request:
+ for task in results:
+ if task['request'].find('<?xml', 0, 10) == -1:
+ #handle older base64 encoded data
+ task['request'] = base64.decodestring(task['request'])
+ task['request'] = xmlrpclib.loads(task['request'])[0]
+ return results
+
+ def runCallbacks(self, cbtype, old_info, attr, new_val):
+ if cbtype.startswith('pre'):
+ info = old_info
+ elif cbtype.startswith('post'):
+ info = self.getInfo(request=True)
+ if info['state'] == koji.TASK_STATES['CLOSED']:
+ # if task is closed, include the result as well
+ info['result'] = self.getResult()
+ new_val = info[attr]
+ else:
+ raise koji.GenericError, 'unknown callback type: %s' % cbtype
+ old_val = old_info[attr]
+ if attr == 'state':
+ # state is passed in as an integer, but we want to use the string
+ old_val = koji.TASK_STATES[old_val]
+ new_val = koji.TASK_STATES[new_val]
+ koji.plugin.run_callbacks(cbtype, attribute=attr, old=old_val, new=new_val,
+ info=info)
+
+def make_task(method,arglist,**opts):
+ """Create a task
+
+ This call should not be directly exposed via xmlrpc
+ Optional args:
+ parent: the id of the parent task (creates a subtask)
+ label: (subtasks only) the label of the subtask
+ owner: the user_id that should own the task
+ channel: the channel to place the task in
+ arch: the arch for the task
+ priority: the priority of the task
+ assign: a host_id to assign the task to
+ """
+ if opts.has_key('parent'):
+ # for subtasks, we use some of the parent's options as defaults
+ fields = ('state','owner','channel_id','priority','arch')
+ q = """SELECT %s FROM task WHERE id = %%(parent)i""" % ','.join(fields)
+ r = _fetchSingle(q,opts)
+ if not r:
+ raise koji.GenericError, "Invalid parent task: %(parent)s" % opts
+ pdata = dict(zip(fields,r))
+ if pdata['state'] != koji.TASK_STATES['OPEN']:
+ raise koji.GenericError, "Parent task (id %(parent)s) is not open" % opts
+ #default to a higher priority than parent
+ opts.setdefault('priority', pdata['priority'] - 1)
+ for f in ('owner', 'arch'):
+ opts.setdefault(f,pdata[f])
+ opts.setdefault('label',None)
+ else:
+ opts.setdefault('priority',koji.PRIO_DEFAULT)
+ #calling function should enforce priority limitations, if applicable
+ opts.setdefault('arch','noarch')
+ if not context.session.logged_in:
+ raise koji.GenericError, 'task must have an owner'
+ else:
+ opts['owner'] = context.session.user_id
+ opts['label'] = None
+ opts['parent'] = None
+ #determine channel from policy
+ policy_data = {}
+ policy_data['method'] = method
+ for key in 'arch', 'parent', 'label', 'owner':
+ policy_data[key] = opts[key]
+ policy_data['user_id'] = opts['owner']
+ if 'channel' in opts:
+ policy_data['req_channel'] = opts['channel']
+ req_channel_id = get_channel_id(opts['channel'], strict=True)
+ if method == 'build':
+ # arglist = source, target, [opts]
+ args = koji.decode_args2(arglist, ('source', 'target', 'opts'))
+ policy_data['source'] = args['source']
+ if args['target'] is None:
+ #koji-shadow makes null-target builds
+ policy_data['target'] = None
+ else:
+ target = get_build_target(args['target'], strict=True)
+ policy_data['target'] = target['name']
+ t_opts = args.get('opts', {})
+ policy_data['scratch'] = t_opts.get('scratch', False)
+ ruleset = context.policy.get('channel')
+ result = ruleset.apply(policy_data)
+ if result is None:
+ logger.warning('Channel policy returned no result, using default')
+ opts['channel_id'] = get_channel_id('default', strict=True)
+ else:
+ try:
+ parts = result.split()
+ if parts[0] == "use":
+ opts['channel_id'] = get_channel_id(parts[1], strict=True)
+ elif parts[0] == "parent":
+ if not opts.get('parent'):
+ logger.error("Invalid channel policy result (no parent task): %s",
+ ruleset.last_rule())
+ raise koji.GenericError, "invalid channel policy"
+ opts['channel_id'] = pdata['channel_id']
+ elif parts[0] == "req":
+ if 'channel' not in opts:
+ logger.error('Invalid channel policy result (no channel requested): %s',
+ ruleset.last_rule())
+ raise koji.GenericError, "invalid channel policy"
+ opts['channel_id'] = req_channel_id
+ else:
+ logger.error("Invalid result from channel policy: %s", ruleset.last_rule())
+ raise koji.GenericError, "invalid channel policy"
+ except IndexError:
+ logger.error("Invalid result from channel policy: %s", ruleset.last_rule())
+ raise koji.GenericError, "invalid channel policy"
+
+ # encode xmlrpc request
+ opts['request'] = xmlrpclib.dumps(tuple(arglist), methodname=method,
+ allow_none=1)
+ opts['state'] = koji.TASK_STATES['FREE']
+ opts['method'] = method
+ koji.plugin.run_callbacks('preTaskStateChange', attribute='state', old=None, new='FREE', info=opts)
+ # stick it in the database
+
+ idata = dslice(opts, ['state', 'owner', 'method', 'request', 'priority', 'parent', 'label', 'channel_id', 'arch'])
+ if opts.get('assign'):
+ idata['state'] = koji.TASK_STATES['ASSIGNED']
+ idata['host_id'] = opts['assign']
+ insert = InsertProcessor('task', data=idata)
+ insert.execute()
+ task_id = _singleValue("SELECT currval('task_id_seq')", strict=True)
+ opts['id'] = task_id
+ koji.plugin.run_callbacks('postTaskStateChange', attribute='state', old=None, new='FREE', info=opts)
+ return task_id
+
+def mktask(__taskopts,__method,*args,**opts):
+ """A wrapper around make_task with alternate signature
+
+ Parameters:
+ _taskopts: a dictionary of task options (e.g. priority, ...)
+ _method: the method to be invoked
+
+ All remaining args (incl. optional ones) are passed on to the task.
+ """
+ return make_task(__method,koji.encode_args(*args,**opts),**__taskopts)
+
+def eventCondition(event, table=None):
+ """return the proper WHERE condition to select data at the time specified by event. """
+ if not table:
+ table = ''
+ else:
+ table += '.'
+ if event is None:
+ return """(%(table)sactive = TRUE)""" % locals()
+ elif isinstance(event, int) or isinstance(event, long):
+ return """(%(table)screate_event <= %(event)d AND ( %(table)srevoke_event IS NULL OR %(event)d < %(table)srevoke_event ))""" \
+ % locals()
+ else:
+ raise koji.GenericError, "Invalid event: %r" % event
+
+def readGlobalInheritance(event=None):
+ c=context.cnx.cursor()
+ fields = ('tag_id','parent_id','name','priority','maxdepth','intransitive',
+ 'noconfig','pkg_filter')
+ q="""SELECT %s FROM tag_inheritance JOIN tag ON parent_id = id
+ WHERE %s
+ ORDER BY priority
+ """ % (",".join(fields), eventCondition(event))
+ c.execute(q,locals())
+ #convert list of lists into a list of dictionaries
+ return [ dict(zip(fields,x)) for x in c.fetchall() ]
+
+def readInheritanceData(tag_id,event=None):
+ c=context.cnx.cursor()
+ fields = ('parent_id','name','priority','maxdepth','intransitive','noconfig','pkg_filter')
+ q="""SELECT %s FROM tag_inheritance JOIN tag ON parent_id = id
+ WHERE %s AND tag_id = %%(tag_id)i
+ ORDER BY priority
+ """ % (",".join(fields), eventCondition(event))
+ c.execute(q,locals())
+ #convert list of lists into a list of dictionaries
+ data = [ dict(zip(fields,x)) for x in c.fetchall() ]
+ # include the current tag_id as child_id, so we can retrace the inheritance chain later
+ for datum in data:
+ datum['child_id'] = tag_id
+ return data
+
+def readDescendantsData(tag_id,event=None):
+ c=context.cnx.cursor()
+ fields = ('tag_id','parent_id','name','priority','maxdepth','intransitive','noconfig','pkg_filter')
+ q="""SELECT %s FROM tag_inheritance JOIN tag ON tag_id = id
+ WHERE %s AND parent_id = %%(tag_id)i
+ ORDER BY priority
+ """ % (",".join(fields), eventCondition(event))
+ c.execute(q,locals())
+ #convert list of lists into a list of dictionaries
+ data = [ dict(zip(fields,x)) for x in c.fetchall() ]
+ return data
+
+def writeInheritanceData(tag_id, changes, clear=False):
+ """Add or change inheritance data for a tag"""
+ context.session.assertPerm('admin')
+ fields = ('parent_id','priority','maxdepth','intransitive','noconfig','pkg_filter')
+ if isinstance(changes,dict):
+ changes = [changes]
+ for link in changes:
+ check_fields = fields
+ if link.get('delete link'):
+ check_fields = ('parent_id')
+ for f in fields:
+ if not link.has_key(f):
+ raise koji.GenericError, "No value for %s" % f
+ # read current data and index
+ data = dict([[link['parent_id'],link] for link in readInheritanceData(tag_id)])
+ for link in changes:
+ link['is_update'] = True
+ parent_id = link['parent_id']
+ orig = data.get(parent_id)
+ if link.get('delete link'):
+ if orig:
+ data[parent_id] = link
+ elif not orig or clear:
+ data[parent_id] = link
+ else:
+ #not a delete request and we have a previous link to parent
+ for f in fields:
+ if orig[f] != link[f]:
+ data[parent_id] = link
+ break
+ if clear:
+ for link in data.itervalues():
+ if not link.get('is_update'):
+ link['delete link'] = True
+ link['is_update'] = True
+ changed = False
+ for link in data.itervalues():
+ if link.get('is_update'):
+ changed = True
+ break
+ if not changed:
+ # nothing to do
+ log_error("No inheritance changes")
+ return
+ #check for duplicate priorities
+ pri_index = {}
+ for link in data.itervalues():
+ if link.get('delete link'):
+ continue
+ pri_index.setdefault(link['priority'], []).append(link)
+ for pri, dups in pri_index.iteritems():
+ if len(dups) <= 1:
+ continue
+ #oops, duplicate entries for a single priority
+ dup_ids = [ link['parent_id'] for link in dups]
+ raise koji.GenericError, "Inheritance priorities must be unique (pri %s: %r )" % (pri, dup_ids)
+ for parent_id, link in data.iteritems():
+ if not link.get('is_update'):
+ continue
+ # revoke old values
+ update = UpdateProcessor('tag_inheritance', values=locals(),
+ clauses=['tag_id=%(tag_id)s', 'parent_id = %(parent_id)s'])
+ update.make_revoke()
+ update.execute()
+ for parent_id, link in data.iteritems():
+ if not link.get('is_update'):
+ continue
+ # skip rest if we are just deleting
+ if link.get('delete link'):
+ continue
+ # insert new value
+ newlink = dslice(link, fields)
+ newlink['tag_id'] = tag_id
+ # defaults ok for the rest
+ insert = InsertProcessor('tag_inheritance', data=newlink)
+ insert.make_create()
+ insert.execute()
+
+def readFullInheritance(tag_id,event=None,reverse=False,stops=None,jumps=None):
+ """Returns a list representing the full, ordered inheritance from tag"""
+ if stops is None:
+ stops = {}
+ if jumps is None:
+ jumps = {}
+ order = []
+ readFullInheritanceRecurse(tag_id,event,order,stops,{},{},0,None,False,[],reverse,jumps)
+ return order
+
+def readFullInheritanceRecurse(tag_id,event,order,prunes,top,hist,currdepth,maxdepth,noconfig,pfilter,reverse,jumps):
+ if maxdepth is not None and maxdepth < 1:
+ return
+ #note: maxdepth is relative to where we are, but currdepth is absolute from
+ #the top.
+ currdepth += 1
+ top = top.copy()
+ top[tag_id] = 1
+ if reverse:
+ node = readDescendantsData(tag_id,event)
+ else:
+ node = readInheritanceData(tag_id,event)
+ for link in node:
+ if reverse:
+ id = link['tag_id']
+ else:
+ id = link['parent_id']
+ if jumps.has_key(id):
+ id = jumps[id]
+ if top.has_key(id):
+ #LOOP!
+ if event is None:
+ # only log if the issue is current
+ log_error("Warning: INHERITANCE LOOP detected at %s -> %s, pruning" % (tag_id,id))
+ #auto prune
+ continue
+ if prunes.has_key(id):
+ # ignore pruned tags
+ continue
+ if link['intransitive'] and len(top) > 1 and not reverse:
+ # ignore intransitive inheritance links, except at root
+ continue
+ if link['priority'] < 0:
+ #negative priority indicates pruning, rather than inheritance
+ prunes[id] = 1
+ continue
+ if reverse:
+ #maxdepth logic is different in this case. no propagation
+ if link['maxdepth'] is not None and link['maxdepth'] < currdepth - 1:
+ continue
+ nextdepth = None
+ else:
+ #propagate maxdepth
+ nextdepth = link['maxdepth']
+ if nextdepth is None:
+ if maxdepth is not None:
+ nextdepth = maxdepth - 1
+ elif maxdepth is not None:
+ nextdepth = min(nextdepth,maxdepth) - 1
+ link['nextdepth'] = nextdepth
+ link['currdepth'] = currdepth
+ #propagate noconfig and pkg_filter controls
+ if link['noconfig']:
+ noconfig = True
+ filter = list(pfilter) # copy
+ pattern = link['pkg_filter']
+ if pattern:
+ filter.append(pattern)
+ link['filter'] = filter
+ # check history to avoid redundant entries
+ if hist.has_key(id):
+ #already been there
+ #BUT, options may have been different
+ rescan = True
+ #since rescans are possible, we might have to consider more than one previous hit
+ for previous in hist[id]:
+ sufficient = True # is previous sufficient?
+ # if last depth was less than current, then previous insufficient
+ lastdepth = previous['nextdepth']
+ if nextdepth is None:
+ if lastdepth is not None:
+ sufficient = False
+ elif lastdepth is not None and lastdepth < nextdepth:
+ sufficient = False
+ # if noconfig was on before, but not now, then insuffient
+ if previous['noconfig'] and not noconfig:
+ sufficient = False
+ # if we had a filter before, then insufficient
+ if len(previous['filter']) > 0:
+ # FIXME - we could probably be a little more precise here
+ sufficient = False
+ if sufficient:
+ rescan = False
+ if not rescan:
+ continue
+ else:
+ hist[id] = []
+ hist[id].append(link) #record history
+ order.append(link)
+ if link['intransitive'] and reverse:
+ # add link, but don't follow it
+ continue
+ readFullInheritanceRecurse(id,event,order,prunes,top,hist,currdepth,nextdepth,noconfig,filter,reverse,jumps)
+
+# tag-package operations
+# add
+# remove
+# block
+# unblock
+# change owner
+# list
+
+
+def _pkglist_remove(tag_id, pkg_id):
+ clauses = ('package_id=%(pkg_id)i', 'tag_id=%(tag_id)i')
+ update = UpdateProcessor('tag_packages', values=locals(), clauses=clauses)
+ update.make_revoke() #XXX user_id?
+ update.execute()
+
+def _pkglist_add(tag_id, pkg_id, owner, block, extra_arches):
+ #revoke old entry (if present)
+ _pkglist_remove(tag_id, pkg_id)
+ data = dslice(locals(), ('tag_id', 'owner', 'extra_arches'))
+ data['package_id'] = pkg_id
+ data['blocked'] = block
+ insert = InsertProcessor('tag_packages', data=data)
+ insert.make_create() #XXX user_id?
+ insert.execute()
+
+def pkglist_add(taginfo,pkginfo,owner=None,block=None,extra_arches=None,force=False,update=False):
+ """Add to (or update) package list for tag"""
+ #access control comes a little later (via an assert_policy)
+ #should not make any changes until after policy is checked
+ tag = get_tag(taginfo, strict=True)
+ tag_id = tag['id']
+ pkg = lookup_package(pkginfo, strict=False)
+ if not pkg:
+ if not isinstance(pkginfo, basestring):
+ raise koji.GenericError, "Invalid package: %s" % pkginfo
+ if owner is not None:
+ owner = get_user(owner,strict=True)['id']
+ action = 'add'
+ if update:
+ action = 'update'
+ elif bool(block):
+ action = 'block'
+ context.session.assertLogin()
+ policy_data = {'tag' : tag_id, 'action' : action, 'package' : pkginfo, 'force' : force}
+ #don't check policy for admins using force
+ if not (force and context.session.hasPerm('admin')):
+ assert_policy('package_list', policy_data)
+ if not pkg:
+ pkg = lookup_package(pkginfo, create=True)
+ koji.plugin.run_callbacks('prePackageListChange', action=action, tag=tag, package=pkg, owner=owner,
+ block=block, extra_arches=extra_arches, force=force, update=update)
+ # first check to see if package is:
+ # already present (via inheritance)
+ # blocked
+ pkglist = readPackageList(tag_id, pkgID=pkg['id'], inherit=True)
+ previous = pkglist.get(pkg['id'],None)
+ if previous is None:
+ if block is None:
+ block = False
+ else:
+ block = bool(block)
+ if update and not force:
+ #if update flag is true, require that there be a previous entry
+ raise koji.GenericError, "cannot update: tag %s has no data for package %s" \
+ % (tag['name'],pkg['name'])
+ else:
+ #already there (possibly via inheritance)
+ if owner is None:
+ owner = previous['owner_id']
+ if block is None:
+ block = previous['blocked']
+ else:
+ block = bool(block)
+ if extra_arches is None:
+ extra_arches = previous['extra_arches']
+ #see if the data is the same
+ changed = False
+ for key,value in (('owner_id',owner),
+ ('blocked',block),
+ ('extra_arches',extra_arches)):
+ if previous[key] != value:
+ changed = True
+ break
+ if not changed and not force:
+ #no point in adding it again with the same data
+ return
+ if previous['blocked'] and not block and not force:
+ raise koji.GenericError, "package %s is blocked in tag %s" % (pkg['name'],tag['name'])
+ if owner is None:
+ if force:
+ owner = context.session.user_id
+ else:
+ raise koji.GenericError, "owner not specified"
+ _pkglist_add(tag_id, pkg['id'], owner, block, extra_arches)
+ koji.plugin.run_callbacks('postPackageListChange', action=action, tag=tag, package=pkg, owner=owner,
+ block=block, extra_arches=extra_arches, force=force, update=update)
+
+def pkglist_remove(taginfo,pkginfo,force=False):
+ """Remove package from the list for tag
+
+ Most of the time you really want to use the block or unblock functions
+
+ The main reason to remove an entry like this is to remove an override so
+ that the package data can be inherited from elsewhere.
+ """
+ tag = get_tag(taginfo, strict=True)
+ pkg = lookup_package(pkginfo, strict=True)
+ context.session.assertLogin()
+ policy_data = {'tag' : tag['id'], 'action' : 'remove', 'package' : pkg['id'], 'force' : force}
+ #don't check policy for admins using force
+ if not (force and context.session.hasPerm('admin')):
+ assert_policy('package_list', policy_data)
+ koji.plugin.run_callbacks('prePackageListChange', action='remove', tag=tag, package=pkg)
+ _pkglist_remove(tag['id'],pkg['id'])
+ koji.plugin.run_callbacks('postPackageListChange', action='remove', tag=tag, package=pkg)
+
+def pkglist_block(taginfo,pkginfo):
+ """Block the package in tag"""
+ pkglist_add(taginfo,pkginfo,block=True)
+
+def pkglist_unblock(taginfo, pkginfo, force=False):
+ """Unblock the package in tag
+
+ Generally this just adds a unblocked duplicate of the blocked entry.
+ However, if the block is actually in tag directly (not through inheritance),
+ the blocking entry is simply removed"""
+ tag = get_tag(taginfo, strict=True)
+ pkg = lookup_package(pkginfo, strict=True)
+ context.session.assertLogin()
+ policy_data = {'tag' : tag['id'], 'action' : 'unblock', 'package' : pkg['id'], 'force' : force}
+ #don't check policy for admins using force
+ if not (force and context.session.hasPerm('admin')):
+ assert_policy('package_list', policy_data)
+ koji.plugin.run_callbacks('prePackageListChange', action='unblock', tag=tag, package=pkg)
+ tag_id = tag['id']
+ pkg_id = pkg['id']
+ pkglist = readPackageList(tag_id, pkgID=pkg_id, inherit=True)
+ previous = pkglist.get(pkg_id,None)
+ if previous is None:
+ raise koji.GenericError, "no data (blocked or otherwise) for package %s in tag %s" \
+ % (pkg['name'],tag['name'])
+ if not previous['blocked']:
+ raise koji.GenericError, "package %s NOT blocked in tag %s" % (pkg['name'],tag['name'])
+ if previous['tag_id'] != tag_id:
+ _pkglist_add(tag_id,pkg_id,previous['owner_id'],False,previous['extra_arches'])
+ else:
+ #just remove the blocking entry
+ _pkglist_remove(tag_id, pkg_id)
+ #it's possible this was the only entry in the inheritance or that the next entry
+ #back is also a blocked entry. if so, we need to add it back as unblocked
+ pkglist = readPackageList(tag_id, pkgID=pkg_id, inherit=True)
+ if not pkglist.has_key(pkg_id) or pkglist[pkg_id]['blocked']:
+ _pkglist_add(tag_id, pkg_id, previous['owner_id'], False, previous['extra_arches'])
+ koji.plugin.run_callbacks('postPackageListChange', action='unblock', tag=tag, package=pkg)
+
+def pkglist_setowner(taginfo,pkginfo,owner,force=False):
+ """Set the owner for package in tag"""
+ pkglist_add(taginfo,pkginfo,owner=owner,force=force,update=True)
+
+def pkglist_setarches(taginfo,pkginfo,arches,force=False):
+ """Set extra_arches for package in tag"""
+ pkglist_add(taginfo,pkginfo,extra_arches=arches,force=force,update=True)
+
+def readPackageList(tagID=None, userID=None, pkgID=None, event=None, inherit=False, with_dups=False):
+ """Returns the package list for the specified tag or user.
+
+ One of (tagID,userID,pkgID) must be specified
+
+ Note that the returned data includes blocked entries
+ """
+ if tagID is None and userID is None and pkgID is None:
+ raise koji.GenericError, 'tag,user, and/or pkg must be specified'
+
+ packages = {}
+ fields = (('package.id', 'package_id'), ('package.name', 'package_name'),
+ ('tag.id', 'tag_id'), ('tag.name', 'tag_name'),
+ ('users.id', 'owner_id'), ('users.name', 'owner_name'),
+ ('extra_arches','extra_arches'),
+ ('tag_packages.blocked', 'blocked'))
+ flist = ', '.join([pair[0] for pair in fields])
+ cond = eventCondition(event)
+ q = """
+ SELECT %(flist)s
+ FROM tag_packages
+ JOIN tag on tag.id = tag_packages.tag_id
+ JOIN package ON package.id = tag_packages.package_id
+ JOIN users ON users.id = tag_packages.owner
+ WHERE %(cond)s"""
+ if tagID != None:
+ q += """
+ AND tag.id = %%(tagID)i"""
+ if userID != None:
+ q += """
+ AND users.id = %%(userID)i"""
+ if pkgID != None:
+ if isinstance(pkgID, int) or isinstance(pkgID, long):
+ q += """
+ AND package.id = %%(pkgID)i"""
+ else:
+ q += """
+ AND package.name = %%(pkgID)s"""
+
+ q = q % locals()
+ for p in _multiRow(q, locals(), [pair[1] for pair in fields]):
+ # things are simpler for the first tag
+ pkgid = p['package_id']
+ if with_dups:
+ packages.setdefault(pkgid,[]).append(p)
+ else:
+ packages[pkgid] = p
+
+ if tagID is None or (not inherit):
+ return packages
+
+ order = readFullInheritance(tagID, event)
+
+ re_cache = {}
+ for link in order:
+ tagID = link['parent_id']
+ filter = link['filter']
+ # precompile filter patterns
+ re_list = []
+ for pat in filter:
+ prog = re_cache.get(pat,None)
+ if prog is None:
+ prog = re.compile(pat)
+ re_cache[pat] = prog
+ re_list.append(prog)
+ # same query as before, with different params
+ for p in _multiRow(q, locals(), [pair[1] for pair in fields]):
+ pkgid = p['package_id']
+ if not with_dups and packages.has_key(pkgid):
+ #previous data supercedes
+ continue
+ # apply package filters
+ skip = False
+ for prog in re_list:
+ # the list of filters is cumulative, i.e.
+ # the package name must match all of them
+ if prog.match(p['package_name']) is None:
+ skip = True
+ break
+ if skip:
+ continue
+ if with_dups:
+ packages.setdefault(pkgid,[]).append(p)
+ else:
+ packages[pkgid] = p
+ return packages
+
+def list_tags(build=None, package=None, queryOpts=None):
+ """List tags. If build is specified, only return tags associated with the
+ given build. If package is specified, only return tags associated with the
+ specified package. If neither is specified, return all tags. Build can be
+ either an integer ID or a string N-V-R. Package can be either an integer ID
+ or a string name. Only one of build and package may be specified. Returns
+ a list of maps. Each map contains keys:
+ - id
+ - name
+ - perm_id
+ - perm
+ - arches
+ - locked
+
+ If package is specified, each map will also contain:
+ - owner_id
+ - owner_name
+ - blocked
+ - extra_arches
+ """
+ if build is not None and package is not None:
+ raise koji.GenericError, 'only one of build and package may be specified'
+
+ tables = ['tag_config']
+ joins = ['tag ON tag.id = tag_config.tag_id',
+ 'LEFT OUTER JOIN permissions ON tag_config.perm_id = permissions.id']
+ fields = ['tag.id', 'tag.name', 'tag_config.perm_id', 'permissions.name',
+ 'tag_config.arches', 'tag_config.locked', 'tag_config.maven_support',
+ 'tag_config.maven_include_all']
+ aliases = ['id', 'name', 'perm_id', 'perm',
+ 'arches', 'locked', 'maven_support',
+ 'maven_include_all']
+ clauses = ['tag_config.active = true']
+
+ if build is not None:
+ # lookup build id
+ buildinfo = get_build(build)
+ if not buildinfo:
+ raise koji.GenericError, 'invalid build: %s' % build
+ joins.append('tag_listing ON tag.id = tag_listing.tag_id')
+ clauses.append('tag_listing.active = true')
+ clauses.append('tag_listing.build_id = %(buildID)i')
+ buildID = buildinfo['id']
+ elif package is not None:
+ packageinfo = lookup_package(package)
+ if not packageinfo:
+ raise koji.GenericError, 'invalid package: %s' % package
+ fields.extend(['users.id', 'users.name', 'tag_packages.blocked', 'tag_packages.extra_arches'])
+ aliases.extend(['owner_id', 'owner_name', 'blocked', 'extra_arches'])
+ joins.append('tag_packages ON tag.id = tag_packages.tag_id')
+ clauses.append('tag_packages.active = true')
+ clauses.append('tag_packages.package_id = %(packageID)i')
+ joins.append('users ON tag_packages.owner = users.id')
+ packageID = packageinfo['id']
+
+ query = QueryProcessor(columns=fields, aliases=aliases, tables=tables,
+ joins=joins, clauses=clauses, values=locals(),
+ opts=queryOpts)
+ return query.iterate()
+
+def readTaggedBuilds(tag,event=None,inherit=False,latest=False,package=None,owner=None,type=None):
+ """Returns a list of builds for specified tag
+
+ set inherit=True to follow inheritance
+ set event to query at a time in the past
+ set latest=True to get only the latest build per package
+ set latest=N to get only the N latest tagged RPMs
+
+ If type is not None, restrict the list to builds of the given type. Currently the supported
+ types are 'maven', 'win', and 'image'.
+ """
+ # build - id pkg_id version release epoch
+ # tag_listing - id build_id tag_id
+
+ if not isinstance(latest, (int, long, float)):
+ latest = bool(latest)
+
+ taglist = [tag]
+ if inherit:
+ taglist += [link['parent_id'] for link in readFullInheritance(tag, event)]
+
+ #regardless of inherit setting, we need to use inheritance to read the
+ #package list
+ packages = readPackageList(tagID=tag, event=event, inherit=True, pkgID=package)
+
+ #these values are used for each iteration
+ fields = [('tag.id', 'tag_id'), ('tag.name', 'tag_name'), ('build.id', 'id'),
+ ('build.id', 'build_id'), ('build.version', 'version'), ('build.release', 'release'),
+ ('build.epoch', 'epoch'), ('build.state', 'state'), ('build.completion_time', 'completion_time'),
+ ('build.task_id','task_id'),
+ ('events.id', 'creation_event_id'), ('events.time', 'creation_time'),
+ ('volume.id', 'volume_id'), ('volume.name', 'volume_name'),
+ ('package.id', 'package_id'), ('package.name', 'package_name'),
+ ('package.name', 'name'),
+ ("package.name || '-' || build.version || '-' || build.release", 'nvr'),
+ ('users.id', 'owner_id'), ('users.name', 'owner_name')]
+ st_complete = koji.BUILD_STATES['COMPLETE']
+
+ type_join = ''
+ if type is None:
+ pass
+ elif type == 'maven':
+ type_join = 'JOIN maven_builds ON maven_builds.build_id = tag_listing.build_id'
+ fields.extend([('maven_builds.group_id', 'maven_group_id'),
+ ('maven_builds.artifact_id', 'maven_artifact_id'),
+ ('maven_builds.version', 'maven_version')])
+ elif type == 'win':
+ type_join = 'JOIN win_builds ON win_builds.build_id = tag_listing.build_id'
+ fields.append(('win_builds.platform', 'platform'))
+ elif type == 'image':
+ type_join = 'JOIN image_builds ON image_builds.build_id = tag_listing.build_id'
+ fields.append(('image_builds.build_id', 'build_id'))
+ else:
+ raise koji.GenericError, 'unsupported build type: %s' % type
+
+ q="""SELECT %s
+ FROM tag_listing
+ JOIN tag ON tag.id = tag_listing.tag_id
+ JOIN build ON build.id = tag_listing.build_id
+ %s
+ JOIN users ON users.id = build.owner
+ JOIN events ON events.id = build.create_event
+ JOIN package ON package.id = build.pkg_id
+ JOIN volume ON volume.id = build.volume_id
+ WHERE %s AND tag_id=%%(tagid)s
+ AND build.state=%%(st_complete)i
+ """ % (', '.join([pair[0] for pair in fields]), type_join, eventCondition(event, 'tag_listing'))
+ if package:
+ q += """AND package.name = %(package)s
+ """
+ if owner:
+ q += """AND users.name = %(owner)s
+ """
+ q += """ORDER BY tag_listing.create_event DESC
+ """
+ # i.e. latest first
+
+ builds = []
+ seen = {} # used to enforce the 'latest' option
+ for tagid in taglist:
+ #log_error(koji.db._quoteparams(q,locals()))
+ for build in _multiRow(q, locals(), [pair[1] for pair in fields]):
+ pkgid = build['package_id']
+ pinfo = packages.get(pkgid,None)
+ if pinfo is None or pinfo['blocked']:
+ # note:
+ # tools should endeavor to keep tag_listing sane w.r.t.
+ # the package list, but if there is disagreement the package
+ # list should take priority
+ continue
+ if latest:
+ if (latest is True and seen.has_key(pkgid)) or seen.get(pkgid, 0) >= latest:
+ # only take the first N entries
+ # (note ordering in query above)
+ continue
+ seen[pkgid] = seen.get(pkgid, 0) + 1
+ builds.append(build)
+
+ return builds
+
+def readTaggedRPMS(tag, package=None, arch=None, event=None,inherit=False,latest=True,rpmsigs=False,owner=None,type=None):
+ """Returns a list of rpms for specified tag
+
+ set inherit=True to follow inheritance
+ set event to query at a time in the past
+ set latest=False to get all tagged RPMS (not just from the latest builds)
+ set latest=N to get only the N latest tagged RPMs
+
+ If type is not None, restrict the list to rpms from builds of the given type. Currently the
+ supported types are 'maven' and 'win'.
+ """
+ taglist = [tag]
+ if inherit:
+ #XXX really should cache this - it gets called several places
+ # (however, it is fairly quick)
+ taglist += [link['parent_id'] for link in readFullInheritance(tag, event)]
+
+ builds = readTaggedBuilds(tag, event=event, inherit=inherit, latest=latest, package=package, owner=owner, type=type)
+ #index builds
+ build_idx = dict([(b['build_id'],b) for b in builds])
+
+ #the following query is run for each tag in the inheritance
+ fields = [('rpminfo.name', 'name'),
+ ('rpminfo.version', 'version'),
+ ('rpminfo.release', 'release'),
+ ('rpminfo.arch', 'arch'),
+ ('rpminfo.id', 'id'),
+ ('rpminfo.epoch', 'epoch'),
+ ('rpminfo.payloadhash', 'payloadhash'),
+ ('rpminfo.size', 'size'),
+ ('rpminfo.buildtime', 'buildtime'),
+ ('rpminfo.buildroot_id', 'buildroot_id'),
+ ('rpminfo.build_id', 'build_id')]
+ tables = ['rpminfo']
+ joins = ['tag_listing ON rpminfo.build_id = tag_listing.build_id']
+ clauses = [eventCondition(event), 'tag_id=%(tagid)s']
+ data = {} #tagid added later
+ if package:
+ joins.append('build ON rpminfo.build_id = build.id')
+ joins.append('package ON package.id = build.pkg_id')
+ clauses.append('package.name = %(package)s')
+ data['package'] = package
+ if rpmsigs:
+ fields.append(('rpmsigs.sigkey', 'sigkey'))
+ joins.append('LEFT OUTER JOIN rpmsigs on rpminfo.id = rpmsigs.rpm_id')
+ if arch:
+ data['arch'] = arch
+ if isinstance(arch, basestring):
+ clauses.append('rpminfo.arch = %(arch)s')
+ elif isinstance(arch, (list, tuple)):
+ clauses.append('rpminfo.arch IN %(arch)s')
+ else:
+ raise koji.GenericError, 'invalid arch option: %s' % arch
+
+ fields, aliases = zip(*fields)
+ query = QueryProcessor(tables=tables, joins=joins, clauses=clauses,
+ columns=fields, aliases=aliases, values=data)
+
+ # unique constraints ensure that each of these queries will not report
+ # duplicate rpminfo entries, BUT since we make the query multiple times,
+ # we can get duplicates if a package is multiply tagged.
+ rpms = []
+ tags_seen = {}
+ def _iter_rpms():
+ for tagid in taglist:
+ if tags_seen.has_key(tagid):
+ #certain inheritance trees can (legitimately) have the same tag
+ #appear more than once (perhaps once with a package filter and once
+ #without). The hard part of that was already done by readTaggedBuilds.
+ #We only need consider each tag once. Note how we use build_idx below.
+ #(Without this, we could report the same rpm twice)
+ continue
+ else:
+ tags_seen[tagid] = 1
+ query.values['tagid'] = tagid
+ for rpminfo in query.iterate():
+ #note: we're checking against the build list because
+ # it has been filtered by the package list. The tag
+ # tools should endeavor to keep tag_listing sane w.r.t.
+ # the package list, but if there is disagreement the package
+ # list should take priority
+ build = build_idx.get(rpminfo['build_id'],None)
+ if build is None:
+ continue
+ elif build['tag_id'] != tagid:
+ #wrong tag
+ continue
+ yield rpminfo
+ return [_iter_rpms(), builds]
+
+def readTaggedArchives(tag, package=None, event=None, inherit=False, latest=True, type=None):
+ """Returns a list of archives for specified tag
+
+ set inherit=True to follow inheritance
+ set event to query at a time in the past
+ set latest=False to get all tagged archives (not just from the latest builds)
+ set latest=N to get only the N latest tagged RPMs
+
+ If type is not None, restrict the listing to archives of the given type. Currently
+ the supported types are 'maven' and 'win'.
+ """
+ taglist = [tag]
+ if inherit:
+ #XXX really should cache this - it gets called several places
+ # (however, it is fairly quick)
+ taglist += [link['parent_id'] for link in readFullInheritance(tag, event)]
+
+ # If type == 'maven', we require that both the build *and* the archive have Maven metadata
+ builds = readTaggedBuilds(tag, event=event, inherit=inherit, latest=latest, package=package, type=type)
+ #index builds
+ build_idx = dict([(b['build_id'],b) for b in builds])
+
+ #the following query is run for each tag in the inheritance
+ fields = [('archiveinfo.id', 'id'),
+ ('archiveinfo.type_id', 'type_id'),
+ ('archiveinfo.build_id', 'build_id'),
+ ('archiveinfo.buildroot_id', 'buildroot_id'),
+ ('archiveinfo.filename', 'filename'),
+ ('archiveinfo.size', 'size'),
+ ('archiveinfo.checksum', 'checksum'),
+ ('archiveinfo.checksum_type', 'checksum_type'),
+ ]
+ tables = ['archiveinfo']
+ joins = ['tag_listing ON archiveinfo.build_id = tag_listing.build_id']
+ clauses = [eventCondition(event), 'tag_listing.tag_id = %(tagid)i']
+ if package:
+ joins.append('build ON archiveinfo.build_id = build.id')
+ joins.append('package ON build.pkg_id = package.id')
+ clauses.append('package.name = %(package)s')
+ if type is None:
+ pass
+ elif type == 'maven':
+ joins.append('maven_archives ON archiveinfo.id = maven_archives.archive_id')
+ fields.extend([('maven_archives.group_id', 'maven_group_id'),
+ ('maven_archives.artifact_id', 'maven_artifact_id'),
+ ('maven_archives.version', 'maven_version')])
+ elif type == 'win':
+ joins.append('win_archives ON archiveinfo.id = win_archives.archive_id')
+ fields.extend([('win_archives.relpath', 'relpath'),
+ ('win_archives.platforms', 'platforms'),
+ ('win_archives.flags', 'flags')])
+ else:
+ raise koji.GenericError, 'unsupported archive type: %s' % type
+
+ query = QueryProcessor(tables=tables, joins=joins, clauses=clauses,
+ columns=[pair[0] for pair in fields],
+ aliases=[pair[1] for pair in fields])
+
+ # unique constraints ensure that each of these queries will not report
+ # duplicate archiveinfo entries, BUT since we make the query multiple times,
+ # we can get duplicates if a package is multiply tagged.
+ archives = []
+ tags_seen = {}
+ for tagid in taglist:
+ if tags_seen.has_key(tagid):
+ #certain inheritance trees can (legitimately) have the same tag
+ #appear more than once (perhaps once with a package filter and once
+ #without). The hard part of that was already done by readTaggedBuilds.
+ #We only need consider each tag once. Note how we use build_idx below.
+ #(Without this, we could report the same rpm twice)
+ continue
+ else:
+ tags_seen[tagid] = 1
+ query.values = {'tagid': tagid, 'package': package}
+ for archiveinfo in query.execute():
+ #note: we're checking against the build list because
+ # it has been filtered by the package list. The tag
+ # tools should endeavor to keep tag_listing sane w.r.t.
+ # the package list, but if there is disagreement the package
+ # list should take priority
+ build = build_idx.get(archiveinfo['build_id'],None)
+ if build is None:
+ continue
+ elif build['tag_id'] != tagid:
+ #wrong tag
+ continue
+ archives.append(archiveinfo)
+ return [archives, builds]
+
+def check_tag_access(tag_id,user_id=None):
+ """Determine if user has access to tag package with tag.
+
+ Returns a tuple (access, override, reason)
+ access: a boolean indicating whether access is allowed
+ override: a boolean indicating whether access may be forced
+ reason: the reason access is blocked
+ """
+ if user_id is None:
+ user_id = context.session.user_id
+ if user_id is None:
+ raise koji.GenericError, "a user_id is required"
+ perms = koji.auth.get_user_perms(user_id)
+ override = False
+ if 'admin' in perms:
+ override = True
+ tag = get_tag(tag_id)
+ if tag['locked']:
+ return (False, override, "tag is locked")
+ if tag['perm_id']:
+ needed_perm = lookup_perm(tag['perm_id'],strict=True)['name']
+ if needed_perm not in perms:
+ return (False, override, "tag requires %s permission" % needed_perm)
+ return (True,override,"")
+
+def assert_tag_access(tag_id,user_id=None,force=False):
+ access, override, reason = check_tag_access(tag_id,user_id)
+ if not access and not (override and force):
+ raise koji.ActionNotAllowed, reason
+
+def _tag_build(tag,build,user_id=None,force=False):
+ """Tag a build
+
+ This function makes access checks based on user_id, which defaults to the
+ user_id of the session.
+
+ Tagging with a locked tag is not allowed unless force is true (and even
+ then admin permission is required).
+
+ Retagging is not allowed unless force is true. (retagging changes the order
+ of entries will affect which build is the latest)
+ """
+ tag = get_tag(tag, strict=True)
+ build = get_build(build, strict=True)
+ if user_id:
+ user = get_user(user_id, strict=True)
+ else:
+ # use the user associated with the current session
+ user = get_user(context.session.user_id, strict=True)
+ koji.plugin.run_callbacks('preTag', tag=tag, build=build, user=user, force=force)
+ tag_id = tag['id']
+ build_id = build['id']
+ nvr = "%(name)s-%(version)s-%(release)s" % build
+ if build['state'] != koji.BUILD_STATES['COMPLETE']:
+ # incomplete builds may not be tagged, not even when forced
+ state = koji.BUILD_STATES[build['state']]
+ raise koji.TagError, "build %s not complete: state %s" % (nvr,state)
+ #access check
+ assert_tag_access(tag['id'],user_id=user_id,force=force)
+ # see if it's already tagged
+ retag = False
+ table = 'tag_listing'
+ clauses = ('tag_id=%(tag_id)i', 'build_id=%(build_id)i')
+ query = QueryProcessor(columns=['build_id'], tables=[table],
+ clauses=('active = TRUE',)+clauses,
+ values=locals(), opts={'rowlock':True})
+ #note: tag_listing is unique on (build_id, tag_id, active)
+ if query.executeOne():
+ #already tagged
+ if not force:
+ raise koji.TagError, "build %s already tagged (%s)" % (nvr,tag['name'])
+ #otherwise we retag
+ retag = True
+ if retag:
+ #revoke the old tag first
+ update = UpdateProcessor(table, values=locals(), clauses=clauses)
+ update.make_revoke(user_id=user_id)
+ update.execute()
+ #tag the package
+ insert = InsertProcessor(table)
+ insert.set(tag_id=tag_id, build_id=build_id)
+ insert.make_create(user_id=user_id)
+ insert.execute()
+ koji.plugin.run_callbacks('postTag', tag=tag, build=build, user=user, force=force)
+
+def _untag_build(tag,build,user_id=None,strict=True,force=False):
+ """Untag a build
+
+ If strict is true, assert that build is actually tagged
+ The force option overrides a lock (if the user is an admin)
+
+ This function makes access checks based on user_id, which defaults to the
+ user_id of the session.
+ """
+ tag = get_tag(tag, strict=True)
+ build = get_build(build, strict=True)
+ if user_id:
+ user = get_user(user_id, strict=True)
+ else:
+ # use the user associated with the current session
+ user = get_user(context.session.user_id, strict=True)
+ koji.plugin.run_callbacks('preUntag', tag=tag, build=build, user=user, force=force, strict=strict)
+ tag_id = tag['id']
+ build_id = build['id']
+ assert_tag_access(tag_id,user_id=user_id,force=force)
+ update = UpdateProcessor('tag_listing', values=locals(),
+ clauses=['tag_id=%(tag_id)i', 'build_id=%(build_id)i'])
+ update.make_revoke(user_id=user_id)
+ count = update.execute()
+ if count == 0 and strict:
+ nvr = "%(name)s-%(version)s-%(release)s" % build
+ raise koji.TagError, "build %s not in tag %s" % (nvr,tag['name'])
+ koji.plugin.run_callbacks('postUntag', tag=tag, build=build, user=user, force=force, strict=strict)
+
+# tag-group operations
+# add
+# remove
+# block
+# unblock
+# list (readTagGroups)
+
+def grplist_add(taginfo,grpinfo,block=False,force=False,**opts):
+ """Add to (or update) group list for tag"""
+ #only admins....
+ context.session.assertPerm('admin')
+ tag = get_tag(taginfo)
+ group = lookup_group(grpinfo,create=True)
+ block = bool(block)
+ # check current group status (incl inheritance)
+ groups = get_tag_groups(tag['id'], inherit=True, incl_pkgs=False,incl_reqs=False)
+ previous = groups.get(group['id'],None)
+ cfg_fields = ('exported','display_name','is_default','uservisible',
+ 'description','langonly','biarchonly',)
+ #prevent user-provided opts from doing anything strange
+ opts = dslice(opts, cfg_fields, strict=False)
+ if previous is not None:
+ #already there (possibly via inheritance)
+ if previous['blocked'] and not force:
+ raise koji.GenericError, "group %s is blocked in tag %s" % (group['name'],tag['name'])
+ #check for duplication and grab old data for defaults
+ changed = False
+ for field in cfg_fields:
+ old = previous[field]
+ if opts.has_key(field):
+ if opts[field] != old:
+ changed = True
+ else:
+ opts[field] = old
+ if not changed:
+ #no point in adding it again with the same data
+ return
+ #provide available defaults and sanity check data
+ opts.setdefault('display_name',group['name'])
+ opts.setdefault('biarchonly',False)
+ opts.setdefault('exported',True)
+ opts.setdefault('uservisible',True)
+ # XXX ^^^
+ opts['tag_id'] = tag['id']
+ opts['group_id'] = group['id']
+ opts['blocked'] = block
+ #revoke old entry (if present)
+ update = UpdateProcessor('group_config', values=opts,
+ clauses=['group_id=%(group_id)s', 'tag_id=%(tag_id)s'])
+ update.make_revoke()
+ update.execute()
+ #add new entry
+ insert = InsertProcessor('group_config', data=opts)
+ insert.make_create()
+ insert.execute()
+
+def grplist_remove(taginfo,grpinfo,force=False):
+ """Remove group from the list for tag
+
+ Really this shouldn't be used except in special cases
+ Most of the time you really want to use the block or unblock functions
+ """
+ #only admins....
+ context.session.assertPerm('admin')
+ tag = get_tag(taginfo)
+ group = lookup_group(grpinfo, strict=True)
+ tag_id = tag['id']
+ grp_id = group['id']
+ clauses = ['group_id=%(grp_id)s', 'tag_id=%(tag_id)s']
+ update = UpdateProcessor('group_config', values=locals(), clauses=clauses)
+ update.make_revoke()
+ update.execute()
+
+def grplist_block(taginfo,grpinfo):
+ """Block the group in tag"""
+ grplist_add(taginfo,grpinfo,block=True)
+
+def grplist_unblock(taginfo,grpinfo):
+ """Unblock the group in tag
+
+ If the group is blocked in this tag, then simply remove the block.
+ Otherwise, raise an error
+ """
+ # only admins...
+ context.session.assertPerm('admin')
+ tag = lookup_tag(taginfo,strict=True)
+ group = lookup_group(grpinfo,strict=True)
+ tag_id = tag['id']
+ grp_id = group['id']
+ table = 'group_config'
+ clauses = ('group_id=%(grp_id)s', 'tag_id=%(tag_id)s')
+ query = QueryProcessor(columns=['blocked'], tables=[table],
+ clauses=('active = TRUE',)+clauses,
+ values=locals(), opts={'rowlock':True})
+ blocked = query.singleValue(strict=False)
+ if not blocked:
+ raise koji.GenericError, "group %s is NOT blocked in tag %s" % (group['name'],tag['name'])
+ update = UpdateProcessor(table, values=locals(), clauses=clauses)
+ update.make_revoke()
+ update.execute()
+
+
+# tag-group-pkg operations
+# add
+# remove
+# block
+# unblock
+# list (readTagGroups)
+
+def grp_pkg_add(taginfo,grpinfo,pkg_name,block=False,force=False,**opts):
+ """Add package to group for tag"""
+ #only admins....
+ context.session.assertPerm('admin')
+ tag = lookup_tag(taginfo, strict=True)
+ group = lookup_group(grpinfo,strict=True)
+ block = bool(block)
+ # check current group status (incl inheritance)
+ groups = get_tag_groups(tag['id'], inherit=True, incl_pkgs=True, incl_reqs=False)
+ grp_cfg = groups.get(group['id'],None)
+ if grp_cfg is None:
+ raise koji.GenericError, "group %s not present in tag %s" % (group['name'],tag['name'])
+ elif grp_cfg['blocked']:
+ raise koji.GenericError, "group %s is blocked in tag %s" % (group['name'],tag['name'])
+ previous = grp_cfg['packagelist'].get(pkg_name,None)
+ cfg_fields = ('type','basearchonly','requires')
+ #prevent user-provided opts from doing anything strange
+ opts = dslice(opts, cfg_fields, strict=False)
+ if previous is not None:
+ #already there (possibly via inheritance)
+ if previous['blocked'] and not force:
+ raise koji.GenericError, "package %s blocked in group %s, tag %s" \
+ % (pkg_name,group['name'],tag['name'])
+ #check for duplication and grab old data for defaults
+ changed = False
+ for field in cfg_fields:
+ old = previous[field]
+ if opts.has_key(field):
+ if opts[field] != old:
+ changed = True
+ else:
+ opts[field] = old
+ if block:
+ #from condition above, either previous is not blocked or force is on,
+ #either way, we should add the entry
+ changed = True
+ if not changed and not force:
+ #no point in adding it again with the same data (unless force is on)
+ return
+ opts.setdefault('type','default')
+ opts['group_id'] = group['id']
+ opts['tag_id'] = tag['id']
+ opts['package'] = pkg_name
+ opts['blocked'] = block
+ #revoke old entry (if present)
+ update = UpdateProcessor('group_package_listing', values=opts,
+ clauses=['group_id=%(group_id)s', 'tag_id=%(tag_id)s', 'package=%(package)s'])
+ update.make_revoke()
+ update.execute()
+ #add new entry
+ insert = InsertProcessor('group_package_listing', data=opts)
+ insert.make_create()
+ insert.execute()
+
+def grp_pkg_remove(taginfo,grpinfo,pkg_name,force=False):
+ """Remove package from the list for group-tag
+
+ Really this shouldn't be used except in special cases
+ Most of the time you really want to use the block or unblock functions
+ """
+ #only admins....
+ context.session.assertPerm('admin')
+ tag_id = get_tag_id(taginfo,strict=True)
+ grp_id = get_group_id(grpinfo,strict=True)
+ update = UpdateProcessor('group_package_listing', values=locals(),
+ clauses=['package=%(pkg_name)s', 'tag_id=%(tag_id)s', 'group_id = %(grp_id)s'])
+ update.make_revoke()
+ update.execute()
+
+def grp_pkg_block(taginfo,grpinfo, pkg_name):
+ """Block the package in group-tag"""
+ grp_pkg_add(taginfo,grpinfo,pkg_name,block=True)
+
+def grp_pkg_unblock(taginfo,grpinfo,pkg_name):
+ """Unblock the package in group-tag
+
+ If blocked (directly) in this tag, then simply remove the block.
+ Otherwise, raise an error
+ """
+ # only admins...
+ context.session.assertPerm('admin')
+ table = 'group_package_listing'
+ tag_id = get_tag_id(taginfo,strict=True)
+ grp_id = get_group_id(grpinfo,strict=True)
+ clauses = ('group_id=%(grp_id)s', 'tag_id=%(tag_id)s', 'package = %(pkg_name)s')
+ query = QueryProcessor(columns=['blocked'], tables=[table],
+ clauses=('active = TRUE',)+clauses,
+ values=locals(), opts={'rowlock':True})
+ blocked = query.singleValue(strict=False)
+ if not blocked:
+ raise koji.GenericError, "package %s is NOT blocked in group %s, tag %s" \
+ % (pkg_name,grp_id,tag_id)
+ update = UpdateProcessor('group_package_listing', values=locals(), clauses=clauses)
+ update.make_revoke()
+ update.execute()
+
+# tag-group-req operations
+# add
+# remove
+# block
+# unblock
+# list (readTagGroups)
+
+def grp_req_add(taginfo,grpinfo,reqinfo,block=False,force=False,**opts):
+ """Add group requirement to group for tag"""
+ #only admins....
+ context.session.assertPerm('admin')
+ tag = lookup_tag(taginfo, strict=True)
+ group = lookup_group(grpinfo, strict=True, create=False)
+ req = lookup_group(reqinfo, strict=True, create=False)
+ block = bool(block)
+ # check current group status (incl inheritance)
+ groups = get_tag_groups(tag['id'], inherit=True, incl_pkgs=False, incl_reqs=True)
+ grp_cfg = groups.get(group['id'],None)
+ if grp_cfg is None:
+ raise koji.GenericError, "group %s not present in tag %s" % (group['name'],tag['name'])
+ elif grp_cfg['blocked']:
+ raise koji.GenericError, "group %s is blocked in tag %s" % (group['name'],tag['name'])
+ previous = grp_cfg['grouplist'].get(req['id'],None)
+ cfg_fields = ('type','is_metapkg')
+ #prevent user-provided opts from doing anything strange
+ opts = dslice(opts, cfg_fields, strict=False)
+ if previous is not None:
+ #already there (possibly via inheritance)
+ if previous['blocked'] and not force:
+ raise koji.GenericError, "requirement on group %s blocked in group %s, tag %s" \
+ % (req['name'],group['name'],tag['name'])
+ #check for duplication and grab old data for defaults
+ changed = False
+ for field in cfg_fields:
+ old = previous[field]
+ if opts.has_key(field):
+ if opts[field] != old:
+ changed = True
+ else:
+ opts[field] = old
+ if block:
+ #from condition above, either previous is not blocked or force is on,
+ #either way, we should add the entry
+ changed = True
+ if not changed:
+ #no point in adding it again with the same data
+ return
+ opts.setdefault('type','mandatory')
+ opts['group_id'] = group['id']
+ opts['tag_id'] = tag['id']
+ opts['req_id'] = req['id']
+ opts['blocked'] = block
+ #revoke old entry (if present)
+ update = UpdateProcessor('group_req_listing', values=opts,
+ clauses=['group_id=%(group_id)s', 'tag_id=%(tag_id)s', 'req_id=%(req_id)s'])
+ update.make_revoke()
+ update.execute()
+ #add new entry
+ insert = InsertProcessor('group_req_listing', data=opts)
+ insert.make_create()
+ insert.execute()
+
+def grp_req_remove(taginfo,grpinfo,reqinfo,force=False):
+ """Remove group requirement from the list for group-tag
+
+ Really this shouldn't be used except in special cases
+ Most of the time you really want to use the block or unblock functions
+ """
+ #only admins....
+ context.session.assertPerm('admin')
+ tag_id = get_tag_id(taginfo,strict=True)
+ grp_id = get_group_id(grpinfo,strict=True)
+ req_id = get_group_id(reqinfo,strict=True)
+ update = UpdateProcessor('group_req_listing', values=locals(),
+ clauses=['req_id=%(req_id)s', 'tag_id=%(tag_id)s', 'group_id = %(grp_id)s'])
+ update.make_revoke()
+ update.execute()
+
+def grp_req_block(taginfo,grpinfo,reqinfo):
+ """Block the group requirement in group-tag"""
+ grp_req_add(taginfo,grpinfo,reqinfo,block=True)
+
+def grp_req_unblock(taginfo,grpinfo,reqinfo):
+ """Unblock the group requirement in group-tag
+
+ If blocked (directly) in this tag, then simply remove the block.
+ Otherwise, raise an error
+ """
+ # only admins...
+ context.session.assertPerm('admin')
+ tag_id = get_tag_id(taginfo,strict=True)
+ grp_id = get_group_id(grpinfo,strict=True)
+ req_id = get_group_id(reqinfo,strict=True)
+ table = 'group_req_listing'
+
+ clauses = ('group_id=%(grp_id)s', 'tag_id=%(tag_id)s', 'req_id = %(req_id)s')
+ query = QueryProcessor(columns=['blocked'], tables=[table],
+ clauses=('active = TRUE',)+clauses,
+ values=locals(), opts={'rowlock':True})
+ blocked = query.singleValue(strict=False)
+ if not blocked:
+ raise koji.GenericError, "group req %s is NOT blocked in group %s, tag %s" \
+ % (req_id,grp_id,tag_id)
+ update = UpdateProcessor('group_req_listing', values=locals(), clauses=clauses)
+ update.make_revoke()
+ update.execute()
+
+def get_tag_groups(tag,event=None,inherit=True,incl_pkgs=True,incl_reqs=True):
+ """Return group data for the tag
+
+ If inherit is true, follow inheritance
+ If event is specified, query at event
+ If incl_pkgs is true (the default), include packagelist data
+ If incl_reqs is true (the default), include groupreq data
+
+ Note: the data returned includes some blocked entries that may need to be
+ filtered out.
+ """
+ order = None
+ tag = get_tag_id(tag,strict=True)
+ taglist = [tag]
+ if inherit:
+ order = readFullInheritance(tag,event)
+ taglist += [link['parent_id'] for link in order]
+ evcondition = eventCondition(event)
+
+ # First get the list of groups
+ fields = ('name','group_id','tag_id','blocked','exported','display_name',
+ 'is_default','uservisible','description','langonly','biarchonly',)
+ q="""
+ SELECT %s FROM group_config JOIN groups ON group_id = id
+ WHERE %s AND tag_id = %%(tagid)s
+ """ % (",".join(fields),evcondition)
+ groups = {}
+ for tagid in taglist:
+ for group in _multiRow(q,locals(),fields):
+ grp_id = group['group_id']
+ # we only take the first entry for group as we go through inheritance
+ groups.setdefault(grp_id,group)
+
+ if incl_pkgs:
+ for group in groups.itervalues():
+ group['packagelist'] = {}
+ fields = ('group_id','tag_id','package','blocked','type','basearchonly','requires')
+ q = """
+ SELECT %s FROM group_package_listing
+ WHERE %s AND tag_id = %%(tagid)s
+ """ % (",".join(fields),evcondition)
+ for tagid in taglist:
+ for grp_pkg in _multiRow(q,locals(),fields):
+ grp_id = grp_pkg['group_id']
+ if not groups.has_key(grp_id):
+ #tag does not have this group
+ continue
+ group = groups[grp_id]
+ if group['blocked']:
+ #ignore blocked groups
+ continue
+ pkg_name = grp_pkg['package']
+ group['packagelist'].setdefault(pkg_name,grp_pkg)
+
+ if incl_reqs:
+ # and now the group reqs
+ for group in groups.itervalues():
+ group['grouplist'] = {}
+ fields = ('group_id','tag_id','req_id','blocked','type','is_metapkg','name')
+ q = """SELECT %s FROM group_req_listing JOIN groups on req_id = id
+ WHERE %s AND tag_id = %%(tagid)s
+ """ % (",".join(fields),evcondition)
+ for tagid in taglist:
+ for grp_req in _multiRow(q,locals(),fields):
+ grp_id = grp_req['group_id']
+ if not groups.has_key(grp_id):
+ #tag does not have this group
+ continue
+ group = groups[grp_id]
+ if group['blocked']:
+ #ignore blocked groups
+ continue
+ req_id = grp_req['req_id']
+ if not groups.has_key(req_id):
+ #tag does not have this group
+ continue
+ elif groups[req_id]['blocked']:
+ #ignore blocked groups
+ continue
+ group['grouplist'].setdefault(req_id,grp_req)
+
+ return groups
+
+def readTagGroups(tag,event=None,inherit=True,incl_pkgs=True,incl_reqs=True):
+ """Return group data for the tag with blocked entries removed
+
+ Also scrubs data into an xmlrpc-safe format (no integer keys)
+ """
+ groups = get_tag_groups(tag,event,inherit,incl_pkgs,incl_reqs)
+ for group in groups.values():
+ #filter blocked entries and collapse to a list
+ if 'packagelist' in group:
+ group['packagelist'] = filter(lambda x: not x['blocked'],
+ group['packagelist'].values())
+ if 'grouplist' in group:
+ group['grouplist'] = filter(lambda x: not x['blocked'],
+ group['grouplist'].values())
+ #filter blocked entries and collapse to a list
+ return filter(lambda x: not x['blocked'],groups.values())
+
+def set_host_enabled(hostname, enabled=True):
+ context.session.assertPerm('admin')
+ if not get_host(hostname):
+ raise koji.GenericError, 'host does not exist: %s' % hostname
+ c = context.cnx.cursor()
+ c.execute("""UPDATE host SET enabled = %(enabled)s WHERE name = %(hostname)s""", locals())
+ context.commit_pending = True
+
+def add_host_to_channel(hostname, channel_name, create=False):
+ """Add the host to the specified channel
+
+ Channel must already exist unless create option is specified
+ """
+ context.session.assertPerm('admin')
+ host = get_host(hostname)
+ if host == None:
+ raise koji.GenericError, 'host does not exist: %s' % hostname
+ host_id = host['id']
+ channel_id = get_channel_id(channel_name, create=create)
+ if channel_id == None:
+ raise koji.GenericError, 'channel does not exist: %s' % channel_name
+ channels = list_channels(host_id)
+ for channel in channels:
+ if channel['id'] == channel_id:
+ raise koji.GenericError, 'host %s is already subscribed to the %s channel' % (hostname, channel_name)
+ insert = InsertProcessor('host_channels')
+ insert.set(host_id=host_id, channel_id=channel_id)
+ insert.execute()
+
+def remove_host_from_channel(hostname, channel_name):
+ context.session.assertPerm('admin')
+ host = get_host(hostname)
+ if host == None:
+ raise koji.GenericError, 'host does not exist: %s' % hostname
+ host_id = host['id']
+ channel_id = get_channel_id(channel_name)
+ if channel_id == None:
+ raise koji.GenericError, 'channel does not exist: %s' % channel_name
+ found = False
+ channels = list_channels(host_id)
+ for channel in channels:
+ if channel['id'] == channel_id:
+ found = True
+ break
+ if not found:
+ raise koji.GenericError, 'host %s is not subscribed to the %s channel' % (hostname, channel_name)
+ c = context.cnx.cursor()
+ c.execute("""DELETE FROM host_channels WHERE host_id = %(host_id)d and channel_id = %(channel_id)d""", locals())
+ context.commit_pending = True
+
+def rename_channel(old, new):
+ """Rename a channel"""
+ context.session.assertPerm('admin')
+ if not isinstance(new, basestring):
+ raise koji.GenericError, "new channel name must be a string"
+ cinfo = get_channel(old, strict=True)
+ dup_check = get_channel(new, strict=False)
+ if dup_check:
+ raise koji.GenericError, "channel %(name)s already exists (id=%(id)i)" % dup_check
+ update = UpdateProcessor('channels', clauses=['id=%(id)i'], values=cinfo)
+ update.set(name=new)
+ update.execute()
+
+def remove_channel(channel_name, force=False):
+ """Remove a channel
+
+ Channel must have no hosts, unless force is set to True
+ If a channel has associated tasks, it cannot be removed
+ """
+ context.session.assertPerm('admin')
+ channel_id = get_channel_id(channel_name, strict=True)
+ # check for task references
+ query = QueryProcessor(tables=['task'], clauses=['channel_id=%(channel_id)i'],
+ values=locals(), columns=['id'], opts={'limit':1})
+ #XXX slow query
+ if query.execute():
+ raise koji.GenericError, 'channel %s has task references' % channel_name
+ query = QueryProcessor(tables=['host_channels'], clauses=['channel_id=%(channel_id)i'],
+ values=locals(), columns=['host_id'], opts={'limit':1})
+ if query.execute():
+ if not force:
+ raise koji.GenericError, 'channel %s has host references' % channel_name
+ delete = """DELETE FROM host_channels WHERE channel_id=%(channel_id)i"""
+ _dml(delete, locals())
+ delete = """DELETE FROM channels WHERE id=%(channel_id)i"""
+ _dml(delete, locals())
+
+def get_ready_hosts():
+ """Return information about hosts that are ready to build.
+
+ Hosts set the ready flag themselves
+ Note: We ignore hosts that are late checking in (even if a host
+ is busy with tasks, it should be checking in quite often).
+ """
+ c = context.cnx.cursor()
+ fields = ('host.id','name','arches','task_load', 'capacity')
+ aliases = ('id','name','arches','task_load', 'capacity')
+ q = """
+ SELECT %s FROM host
+ JOIN sessions USING (user_id)
+ WHERE enabled = TRUE AND ready = TRUE
+ AND expired = FALSE
+ AND master IS NULL
+ AND update_time > NOW() - '5 minutes'::interval
+ """ % ','.join(fields)
+ # XXX - magic number in query
+ c.execute(q)
+ hosts = [dict(zip(aliases,row)) for row in c.fetchall()]
+ for host in hosts:
+ q = """SELECT channel_id FROM host_channels WHERE host_id=%(id)s"""
+ c.execute(q,host)
+ host['channels'] = [row[0] for row in c.fetchall()]
+ return hosts
+
+def get_all_arches():
+ """Return a list of all (canonical) arches available from hosts"""
+ ret = {}
+ for (arches,) in _fetchMulti('SELECT arches FROM host', {}):
+ if arches is None:
+ continue
+ for arch in arches.split():
+ #in a perfect world, this list would only include canonical
+ #arches, but not all admins will undertand that.
+ ret[koji.canonArch(arch)] = 1
+ return ret.keys()
+
+def get_active_tasks(host=None):
+ """Return data on tasks that are yet to be run"""
+ fields = ['id', 'state', 'channel_id', 'host_id', 'arch', 'method', 'priority', 'create_time']
+ values = dslice(koji.TASK_STATES, ('FREE','ASSIGNED'))
+ if host:
+ values['arches'] = host['arches'].split() + ['noarch']
+ values['channels'] = host['channels']
+ values['host_id'] = host['id']
+ clause = '(state = %(ASSIGNED)i AND host_id = %(host_id)i)'
+ if values['channels']:
+ clause += ''' OR (state = %(FREE)i AND arch IN %(arches)s \
+AND channel_id IN %(channels)s)'''
+ clauses = [clause]
+ else:
+ clauses = ['state IN (%(FREE)i,%(ASSIGNED)i)']
+ queryOpts = {'limit' : 100, 'order' : 'priority,create_time'}
+ query = QueryProcessor(columns=fields, tables=['task'], clauses=clauses,
+ values=values, opts=queryOpts)
+ return query.execute()
+
+def get_task_descendents(task, childMap=None, request=False):
+ if childMap == None:
+ childMap = {}
+ children = task.getChildren(request=request)
+ children.sort(lambda a, b: cmp(a['id'], b['id']))
+ # xmlrpclib requires dict keys to be strings
+ childMap[str(task.id)] = children
+ for child in children:
+ get_task_descendents(Task(child['id']), childMap, request)
+ return childMap
+
+def maven_tag_archives(tag_id, event_id=None, inherit=True):
+ """
+ Get Maven artifacts associated with the given tag, following inheritance.
+ For any parent tags where 'maven_include_all' is true, include all versions
+ of a given groupId:artifactId, not just the most-recently-tagged.
+ """
+ packages = readPackageList(tagID=tag_id, event=event_id, inherit=True)
+ taglist = [tag_id]
+ if inherit:
+ taglist.extend([link['parent_id'] for link in readFullInheritance(tag_id, event_id)])
+ fields = [('tag.id', 'tag_id'), ('tag.name', 'tag_name'),
+ ('build.pkg_id', 'pkg_id'), ('build.id', 'build_id'),
+ ('package.name', 'build_name'), ('build.version', 'build_version'),
+ ('build.release', 'build_release'), ('build.epoch', 'build_epoch'),
+ ('build.state', 'state'), ('build.task_id', 'task_id'),
+ ('build.owner', 'owner'),
+ ('volume.id', 'volume_id'), ('volume.name', 'volume_name'),
+ ('archiveinfo.id', 'id'), ('archiveinfo.type_id', 'type_id'),
+ ('archiveinfo.buildroot_id', 'buildroot_id'),
+ ('archiveinfo.filename', 'filename'), ('archiveinfo.size', 'size'),
+ ('archiveinfo.checksum', 'checksum'),
+ ('archiveinfo.checksum_type', 'checksum_type'),
+ ('maven_archives.group_id', 'group_id'),
+ ('maven_archives.artifact_id', 'artifact_id'),
+ ('maven_archives.version', 'version'),
+ ('tag_listing.create_event', 'tag_event')]
+ tables = ['tag_listing']
+ joins = ['tag ON tag_listing.tag_id = tag.id',
+ 'build ON tag_listing.build_id = build.id',
+ 'volume ON build.volume_id = volume.id',
+ 'package ON build.pkg_id = package.id',
+ 'archiveinfo ON build.id = archiveinfo.build_id',
+ 'maven_archives ON archiveinfo.id = maven_archives.archive_id']
+ clauses = [eventCondition(event_id, 'tag_listing'), 'tag_listing.tag_id = %(tag_id)i']
+ order = '-tag_event'
+ query = QueryProcessor(tables=tables, joins=joins,
+ clauses=clauses, opts={'order': order},
+ columns=[f[0] for f in fields],
+ aliases=[f[1] for f in fields])
+ included = {}
+ included_archives = set()
+ # these indexes eat into the memory savings of the generator, but it's only
+ # group_id/artifact_id/version/build_id/archive_id, which is much smaller than
+ # the full query
+ # ballpark estimate: 20-25% of total, less with heavy duplication of indexed values
+ def _iter_archives():
+ for tag_id in taglist:
+ taginfo = get_tag(tag_id, strict=True, event=event_id)
+ query.values['tag_id'] = tag_id
+ archives = query.iterate()
+ for archive in archives:
+ pkg = packages.get(archive['pkg_id'])
+ if not pkg or pkg['blocked']:
+ continue
+ # 4 possibilities:
+ # 1: we have never seen this group_id:artifact_id before
+ # - yield it, and add to the included dict
+ # 2: we have seen the group_id:artifact_id before, but a different version
+ # - if the taginfo['maven_include_all'] is true, yield it and
+ # append it to the included_versions dict, otherwise skip it
+ # 3: we have seen the group_id:artifact_id before, with the same version, from
+ # a different build
+ # - this is a different revision of the same GAV, ignore it because a more
+ # recently-tagged build has already been included
+ # 4: we have seen the group_id:artifact_id before, with the same version, from
+ # the same build
+ # - it is another artifact from a build we're already including, so include it
+ # as well
+ ga = '%(group_id)s:%(artifact_id)s' % archive
+ included_versions = included.get(ga)
+ if not included_versions:
+ included[ga] = {archive['version']: archive['build_id']}
+ included_archives.add(archive['id'])
+ yield archive
+ continue
+ included_build = included_versions.get(archive['version'])
+ if not included_build:
+ if taginfo['maven_include_all']:
+ included_versions[archive['version']] = archive['build_id']
+ included_archives.add(archive['id'])
+ yield archive
+ continue
+ if included_build != archive['build_id']:
+ continue
+ # make sure we haven't already seen this archive somewhere else in the
+ # tag hierarchy
+ if archive['id'] not in included_archives:
+ included_archives.add(archive['id'])
+ yield archive
+ return _iter_archives()
+
+def repo_init(tag, with_src=False, with_debuginfo=False, event=None):
+ """Create a new repo entry in the INIT state, return full repo data
+
+ Returns a dictionary containing
+ repo_id, event_id
+ """
+ logger = logging.getLogger("koji.hub.repo_init")
+ state = koji.REPO_INIT
+ tinfo = get_tag(tag, strict=True, event=event)
+ koji.plugin.run_callbacks('preRepoInit', tag=tinfo, with_src=with_src, with_debuginfo=with_debuginfo,
+ event=event, repo_id=None)
+ tag_id = tinfo['id']
+ repo_arches = {}
+ if tinfo['arches']:
+ for arch in tinfo['arches'].split():
+ arch = koji.canonArch(arch)
+ if arch in ['src','noarch']:
+ continue
+ repo_arches[arch] = 1
+ repo_id = _singleValue("SELECT nextval('repo_id_seq')")
+ if event is None:
+ event_id = _singleValue("SELECT get_event()")
+ else:
+ #make sure event is valid
+ q = "SELECT time FROM events WHERE id=%(event)s"
+ event_time = _singleValue(q, locals(), strict=True)
+ event_id = event
+ insert = InsertProcessor('repo')
+ insert.set(id=repo_id, create_event=event_id, tag_id=tag_id, state=state)
+ insert.execute()
+ # Need to pass event_id because even though this is a single transaction,
+ # it is possible to see the results of other committed transactions
+ rpms, builds = readTaggedRPMS(tag_id, event=event_id, inherit=True, latest=True)
+ groups = readTagGroups(tag_id, event=event_id, inherit=True)
+ blocks = [pkg for pkg in readPackageList(tag_id, event=event_id, inherit=True).values() \
+ if pkg['blocked']]
+ repodir = koji.pathinfo.repo(repo_id, tinfo['name'])
+ os.makedirs(repodir) #should not already exist
+
+ #generate comps and groups.spec
+ groupsdir = "%s/groups" % (repodir)
+ koji.ensuredir(groupsdir)
+ comps = koji.generate_comps(groups, expand_groups=True)
+ fo = file("%s/comps.xml" % groupsdir,'w')
+ fo.write(comps)
+ fo.close()
+
+ #get build dirs
+ relpathinfo = koji.PathInfo(topdir='toplink')
+ builddirs = {}
+ for build in builds:
+ relpath = relpathinfo.build(build)
+ builddirs[build['id']] = relpath.lstrip('/')
+ #generate pkglist files
+ pkglist = {}
+ for repoarch in repo_arches:
+ archdir = os.path.join(repodir, repoarch)
+ koji.ensuredir(archdir)
+ # Make a symlink to our topdir
+ top_relpath = koji.util.relpath(koji.pathinfo.topdir, archdir)
+ top_link = os.path.join(archdir, 'toplink')
+ os.symlink(top_relpath, top_link)
+ pkglist[repoarch] = file(os.path.join(archdir, 'pkglist'), 'w')
+ #NOTE - rpms is now an iterator
+ for rpminfo in rpms:
+ if not with_debuginfo and koji.is_debuginfo(rpminfo['name']):
+ continue
+ relpath = "%s/%s\n" % (builddirs[rpminfo['build_id']], relpathinfo.rpm(rpminfo))
+ arch = rpminfo['arch']
+ if arch == 'src':
+ if with_src:
+ for repoarch in repo_arches:
+ pkglist[repoarch].write(relpath)
+ elif arch == 'noarch':
+ for repoarch in repo_arches:
+ pkglist[repoarch].write(relpath)
+ else:
+ repoarch = koji.canonArch(arch)
+ if repoarch not in repo_arches:
+ # Do not create a repo for arches not in the arch list for this tag
+ continue
+ pkglist[repoarch].write(relpath)
+ for repoarch in repo_arches:
+ pkglist[repoarch].close()
+
+ #write blocked package lists
+ for repoarch in repo_arches:
+ blocklist = file(os.path.join(repodir, repoarch, 'blocklist'), 'w')
+ for pkg in blocks:
+ blocklist.write(pkg['package_name'])
+ blocklist.write('\n')
+ blocklist.close()
+
+ if context.opts.get('EnableMaven') and tinfo['maven_support']:
+ artifact_dirs = {}
+ dir_links = set()
+ for archive in maven_tag_archives(tinfo['id'], event_id):
+ buildinfo = {'name': archive['build_name'],
+ 'version': archive['build_version'],
+ 'release': archive['build_release'],
+ 'epoch': archive['build_epoch'],
+ 'volume_name': archive['volume_name'],
+ }
+ srcdir = os.path.join(koji.pathinfo.mavenbuild(buildinfo),
+ koji.pathinfo.mavenrepo(archive))
+ destlink = os.path.join(repodir, 'maven',
+ koji.pathinfo.mavenrepo(archive))
+ dir_links.add((srcdir, destlink))
+ dest_parent = os.path.dirname(destlink)
+ artifact_dirs.setdefault(dest_parent, set()).add((archive['group_id'],
+ archive['artifact_id'],
+ archive['version']))
+ created_dirs = set()
+ for srcdir, destlink in dir_links:
+ dest_parent = os.path.dirname(destlink)
+ if not dest_parent in created_dirs:
+ koji.ensuredir(dest_parent)
+ created_dirs.add(dest_parent)
+ relpath = koji.util.relpath(srcdir, dest_parent)
+ try:
+ os.symlink(relpath, destlink)
+ except:
+ log_error('Error linking %s to %s' % (destlink, relpath))
+ for artifact_dir, artifacts in artifact_dirs.iteritems():
+ _write_maven_repo_metadata(artifact_dir, artifacts)
+
+ koji.plugin.run_callbacks('postRepoInit', tag=tinfo, with_src=with_src, with_debuginfo=with_debuginfo,
+ event=event, repo_id=repo_id)
+ return [repo_id, event_id]
+
+def _write_maven_repo_metadata(destdir, artifacts):
+ # Sort the list so that the highest version number comes last.
+ # group_id and artifact_id should be the same for all entries,
+ # so we're really only comparing versions.
+ artifacts = sorted(artifacts, cmp=lambda a, b: rpm.labelCompare(a, b))
+ artifactinfo = dict(zip(['group_id', 'artifact_id', 'version'], artifacts[-1]))
+ artifactinfo['timestamp'] = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
+ contents = """<?xml version="1.0"?>
+<metadata>
+ <groupId>%(group_id)s</groupId>
+ <artifactId>%(artifact_id)s</artifactId>
+ <versioning>
+ <latest>%(version)s</latest>
+ <release>%(version)s</release>
+ <versions>
+""" % artifactinfo
+ for artifact in artifacts:
+ contents += """ <version>%s</version>
+""" % artifact[2]
+ contents += """ </versions>
+ <lastUpdated>%s</lastUpdated>
+ </versioning>
+</metadata>
+""" % datetime.datetime.now().strftime('%Y%m%d%H%M%S')
+ mdfile = file(os.path.join(destdir, 'maven-metadata.xml'), 'w')
+ mdfile.write(contents)
+ mdfile.close()
+ _generate_maven_metadata(destdir)
+
+def repo_set_state(repo_id, state, check=True):
+ """Set repo state"""
+ if check:
+ # The repo states are sequential, going backwards makes no sense
+ q = """SELECT state FROM repo WHERE id = %(repo_id)s FOR UPDATE"""
+ oldstate = _singleValue(q,locals())
+ if oldstate > state:
+ raise koji.GenericError, "Invalid repo state transition %s->%s" \
+ % (oldstate,state)
+ q = """UPDATE repo SET state=%(state)s WHERE id = %(repo_id)s"""
+ _dml(q,locals())
+
+def repo_info(repo_id, strict=False):
+ fields = (
+ ('repo.id', 'id'),
+ ('repo.state', 'state'),
+ ('repo.create_event', 'create_event'),
+ ('events.time','creation_time'), #for compatibility with getRepo
+ ('EXTRACT(EPOCH FROM events.time)','create_ts'),
+ ('repo.tag_id', 'tag_id'),
+ ('tag.name', 'tag_name'),
+ )
+ q = """SELECT %s FROM repo
+ JOIN tag ON tag_id=tag.id
+ JOIN events ON repo.create_event = events.id
+ WHERE repo.id = %%(repo_id)s""" % ','.join([f[0] for f in fields])
+ return _singleRow(q, locals(), [f[1] for f in fields], strict=strict)
+
+def repo_ready(repo_id):
+ """Set repo state to ready"""
+ repo_set_state(repo_id,koji.REPO_READY)
+
+def repo_expire(repo_id):
+ """Set repo state to expired"""
+ repo_set_state(repo_id,koji.REPO_EXPIRED)
+
+def repo_problem(repo_id):
+ """Set repo state to problem"""
+ repo_set_state(repo_id,koji.REPO_PROBLEM)
+
+def repo_delete(repo_id):
+ """Attempt to mark repo deleted, return number of references
+
+ If the number of references is nonzero, no change is made"""
+ #get a row lock on the repo
+ q = """SELECT state FROM repo WHERE id = %(repo_id)s FOR UPDATE"""
+ _singleValue(q,locals())
+ references = repo_references(repo_id)
+ if not references:
+ repo_set_state(repo_id,koji.REPO_DELETED)
+ return len(references)
+
+def repo_expire_older(tag_id, event_id):
+ """Expire repos for tag older than event"""
+ st_ready = koji.REPO_READY
+ st_expired = koji.REPO_EXPIRED
+ q = """UPDATE repo SET state=%(st_expired)i
+ WHERE tag_id = %(tag_id)i
+ AND create_event < %(event_id)i
+ AND state = %(st_ready)i"""
+ _dml(q, locals())
+
+def repo_references(repo_id):
+ """Return a list of buildroots that reference the repo"""
+ fields = ('id', 'host_id', 'create_event', 'state')
+ q = """SELECT %s FROM buildroot WHERE repo_id=%%(repo_id)s
+ AND retire_event IS NULL""" % ','.join(fields)
+ #check results for bad states
+ ret = []
+ for data in _multiRow(q, locals(), fields):
+ if data['state'] == koji.BR_STATES['EXPIRED']:
+ log_error("Error: buildroot %(id)s expired, but has no retire_event" % data)
+ continue
+ ret.append(data)
+ return ret
+
+def get_active_repos():
+ """Get data on all active repos
+
+ This is a list of all the repos that the repo daemon needs to worry about.
+ """
+ fields = (
+ ('repo.id', 'id'),
+ ('repo.state', 'state'),
+ ('repo.create_event', 'create_event'),
+ ('EXTRACT(EPOCH FROM events.time)','create_ts'),
+ ('repo.tag_id', 'tag_id'),
+ ('tag.name', 'tag_name'),
+ )
+ st_deleted = koji.REPO_DELETED
+ q = """SELECT %s FROM repo
+ JOIN tag ON tag_id=tag.id
+ JOIN events ON repo.create_event = events.id
+ WHERE repo.state != %%(st_deleted)s""" % ','.join([f[0] for f in fields])
+ return _multiRow(q, locals(), [f[1] for f in fields])
+
+def tag_changed_since_event(event,taglist):
+ """Report whether any changes since event affect any of the tags in list
+
+ The function is used by the repo daemon to determine which of its repos
+ are up to date.
+
+ This function does not figure inheritance, the calling function should
+ expand the taglist to include any desired inheritance.
+
+ Returns: True or False
+ """
+ data = locals().copy()
+ #first check the tag_updates table
+ clauses = ['update_event > %(event)i', 'tag_id IN %(taglist)s']
+ query = QueryProcessor(tables=['tag_updates'], columns=['id'],
+ clauses=clauses, values=data,
+ opts={'limit': 1})
+ if query.execute():
+ return True
+ #also check these versioned tables
+ tables = (
+ 'tag_listing',
+ 'tag_inheritance',
+ 'tag_config',
+ 'tag_packages',
+ 'tag_external_repos',
+ 'group_package_listing',
+ 'group_req_listing',
+ 'group_config',
+ )
+ clauses = ['create_event > %(event)i OR revoke_event > %(event)i',
+ 'tag_id IN %(taglist)s']
+ for table in tables:
+ query = QueryProcessor(tables=[table], columns=['tag_id'], clauses=clauses,
+ values=data, opts={'limit': 1})
+ if query.execute():
+ return True
+ return False
+
+def set_tag_update(tag_id, utype, event_id=None, user_id=None):
+ """Record a non-versioned tag update"""
+ utype_id = koji.TAG_UPDATE_TYPES.getnum(utype)
+ if utype_id is None:
+ raise koji.GenericError, "Invalid update type: %s" % utype
+ if event_id is None:
+ event_id = get_event()
+ if user_id is None:
+ context.session.assertLogin()
+ user_id = context.session.user_id
+ data = {'tag_id': tag_id, 'update_type': utype_id, 'update_event': event_id,
+ 'updater_id': user_id}
+ insert = InsertProcessor('tag_updates', data=data)
+ insert.execute()
+
+def create_build_target(name, build_tag, dest_tag):
+ """Create a new build target"""
+
+ context.session.assertPerm('admin')
+
+ # Does a target with this name already exist?
+ if get_build_targets(info=name):
+ raise koji.GenericError("A build target with the name '%s' already exists" % name)
+
+ # Does the build tag exist?
+ build_tag_object = get_tag(build_tag)
+ if not build_tag_object:
+ raise koji.GenericError("build tag '%s' does not exist" % build_tag)
+ build_tag = build_tag_object['id']
+
+ # Does the dest tag exist?
+ dest_tag_object = get_tag(dest_tag)
+ if not dest_tag_object:
+ raise koji.GenericError("destination tag '%s' does not exist" % dest_tag)
+ dest_tag = dest_tag_object['id']
+
+ #build targets are versioned, so if the target has previously been deleted, it
+ #is possible the name is in the system
+ id = get_build_target_id(name,create=True)
+
+ insert = InsertProcessor('build_target_config')
+ insert.set(build_target_id=id, build_tag=build_tag, dest_tag=dest_tag)
+ insert.make_create()
+ insert.execute()
+
+def edit_build_target(buildTargetInfo, name, build_tag, dest_tag):
+ """Set the build_tag and dest_tag of an existing build_target to new values"""
+ context.session.assertPerm('admin')
+
+ target = lookup_build_target(buildTargetInfo)
+ if not target:
+ raise koji.GenericError, 'invalid build target: %s' % buildTargetInfo
+
+ buildTargetID = target['id']
+
+ build_tag_object = get_tag(build_tag)
+ if not build_tag_object:
+ raise koji.GenericError, "build tag '%s' does not exist" % build_tag
+ buildTagID = build_tag_object['id']
+
+ dest_tag_object = get_tag(dest_tag)
+ if not dest_tag_object:
+ raise koji.GenericError, "destination tag '%s' does not exist" % dest_tag
+ destTagID = dest_tag_object['id']
+
+ if target['name'] != name:
+ # Allow renaming, for parity with tags
+ id = _singleValue("""SELECT id from build_target where name = %(name)s""",
+ locals(), strict=False)
+ if id is not None:
+ raise koji.GenericError, 'name "%s" is already taken by build target %i' % (name, id)
+
+ rename = """UPDATE build_target
+ SET name = %(name)s
+ WHERE id = %(buildTargetID)i"""
+
+ _dml(rename, locals())
+
+ update = UpdateProcessor('build_target_config', values=locals(),
+ clauses=["build_target_id = %(buildTargetID)i"])
+ update.make_revoke()
+
+ insert = InsertProcessor('build_target_config')
+ insert.set(build_target_id=buildTargetID, build_tag=buildTagID, dest_tag=destTagID)
+ insert.make_create()
+
+ update.execute()
+ insert.execute()
+
+def delete_build_target(buildTargetInfo):
+ """Delete the build target with the given name. If no build target
+ exists, raise a GenericError."""
+ context.session.assertPerm('admin')
+
+ target = lookup_build_target(buildTargetInfo)
+ if not target:
+ raise koji.GenericError, 'invalid build target: %s' % buildTargetInfo
+
+ targetID = target['id']
+
+ #build targets are versioned, so we do not delete them from the db
+ #instead we revoke the config entry
+ update = UpdateProcessor('build_target_config', values=locals(),
+ clauses=["build_target_id = %(targetID)i"])
+ update.make_revoke()
+ update.execute()
+
+def get_build_targets(info=None, event=None, buildTagID=None, destTagID=None, queryOpts=None):
+ """Return data on all the build targets
+
+ provide event to query at a different time"""
+ fields = (
+ ('build_target.id', 'id'),
+ ('build_tag', 'build_tag'),
+ ('dest_tag', 'dest_tag'),
+ ('build_target.name', 'name'),
+ ('tag1.name', 'build_tag_name'),
+ ('tag2.name', 'dest_tag_name'),
+ )
+ joins = ['build_target ON build_target_config.build_target_id = build_target.id',
+ 'tag AS tag1 ON build_target_config.build_tag = tag1.id',
+ 'tag AS tag2 ON build_target_config.dest_tag = tag2.id']
+ clauses = [eventCondition(event)]
+
+ if info:
+ if isinstance(info, str):
+ clauses.append('build_target.name = %(info)s')
+ elif isinstance(info, int) or isinstance(info, long):
+ clauses.append('build_target.id = %(info)i')
+ else:
+ raise koji.GenericError, 'invalid type for lookup: %s' % type(info)
+ if buildTagID != None:
+ clauses.append('build_tag = %(buildTagID)i')
+ if destTagID != None:
+ clauses.append('dest_tag = %(destTagID)i')
+
+ query = QueryProcessor(columns=[f[0] for f in fields], aliases=[f[1] for f in fields],
+ tables=['build_target_config'], joins=joins, clauses=clauses,
+ values=locals(), opts=queryOpts)
+ return query.execute()
+
+def get_build_target(info, event=None, strict=False):
+ """Return the build target with the given name or ID.
+ If there is no matching build target, return None."""
+ targets = get_build_targets(info=info, event=event)
+ if len(targets) == 1:
+ return targets[0]
+ elif strict:
+ raise koji.GenericError, 'No matching build target found: %s' % info
+ else:
+ return None
+
+def lookup_name(table,info,strict=False,create=False):
+ """Find the id and name in the table associated with info.
+
+ Info can be the name to look up, or if create is false it can
+ be the id.
+
+ Return value is a dict with keys id and name, or None
+ If there is no match, then the behavior depends on the options. If strict,
+ then an error is raised. If create, then the required entry is created and
+ returned.
+
+ table should be the name of a table with (unique) fields
+ id INTEGER
+ name TEXT
+ Any other fields should have default values, otherwise the
+ create option will fail.
+ """
+ fields = ('id','name')
+ if isinstance(info, int) or isinstance(info, long):
+ q="""SELECT id,name FROM %s WHERE id=%%(info)d""" % table
+ elif isinstance(info, str):
+ q="""SELECT id,name FROM %s WHERE name=%%(info)s""" % table
+ else:
+ raise koji.GenericError, 'invalid type for id lookup: %s' % type(info)
+ ret = _singleRow(q,locals(),fields,strict=False)
+ if ret is None:
+ if strict:
+ raise koji.GenericError, 'No such entry in table %s: %s' % (table, info)
+ elif create:
+ if not isinstance(info, str):
+ raise koji.GenericError, 'Name must be a string'
+ id = _singleValue("SELECT nextval('%s_id_seq')" % table, strict=True)
+ q = """INSERT INTO %s(id,name) VALUES (%%(id)i,%%(info)s)""" % table
+ _dml(q,locals())
+ return {'id': id, 'name': info}
+ else:
+ return ret
+ return ret
+
+def get_id(table,info,strict=False,create=False):
+ """Find the id in the table associated with info."""
+ data = lookup_name(table,info,strict,create)
+ if data is None:
+ return data
+ else:
+ return data['id']
+
+def get_tag_id(info,strict=False,create=False):
+ """Get the id for tag"""
+ return get_id('tag',info,strict,create)
+
+def lookup_tag(info,strict=False,create=False):
+ """Get the id,name for tag"""
+ return lookup_name('tag',info,strict,create)
+
+def get_perm_id(info,strict=False,create=False):
+ """Get the id for a permission"""
+ return get_id('permissions',info,strict,create)
+
+def lookup_perm(info,strict=False,create=False):
+ """Get the id,name for perm"""
+ return lookup_name('permissions',info,strict,create)
+
+def get_package_id(info,strict=False,create=False):
+ """Get the id for a package"""
+ return get_id('package',info,strict,create)
+
+def lookup_package(info,strict=False,create=False):
+ """Get the id,name for package"""
+ return lookup_name('package',info,strict,create)
+
+def get_channel_id(info,strict=False,create=False):
+ """Get the id for a channel"""
+ return get_id('channels',info,strict,create)
+
+def lookup_channel(info,strict=False,create=False):
+ """Get the id,name for channel"""
+ return lookup_name('channels',info,strict,create)
+
+def get_group_id(info,strict=False,create=False):
+ """Get the id for a group"""
+ return get_id('groups',info,strict,create)
+
+def lookup_group(info,strict=False,create=False):
+ """Get the id,name for group"""
+ return lookup_name('groups',info,strict,create)
+
+def get_build_target_id(info,strict=False,create=False):
+ """Get the id for a build target"""
+ return get_id('build_target',info,strict,create)
+
+def lookup_build_target(info,strict=False,create=False):
+ """Get the id,name for build target"""
+ return lookup_name('build_target',info,strict,create)
+
+def create_tag(name, parent=None, arches=None, perm=None, locked=False, maven_support=False, maven_include_all=False):
+ """Create a new tag"""
+
+ context.session.assertPerm('admin')
+ if not context.opts.get('EnableMaven') and (maven_support or maven_include_all):
+ raise koji.GenericError, "Maven support not enabled"
+
+ #see if there is already a tag by this name (active)
+ if get_tag(name):
+ raise koji.GenericError("A tag with the name '%s' already exists" % name)
+
+ # Does the parent exist?
+ if parent:
+ parent_tag = get_tag(parent)
+ if not parent_tag:
+ raise koji.GenericError("Parent tag '%s' could not be found" % parent)
+ parent_id = parent_tag['id']
+ else:
+ parent_id = None
+
+ #there may already be an id for a deleted tag, this will reuse it
+ tag_id = get_tag_id(name,create=True)
+
+ insert = InsertProcessor('tag_config')
+ insert.set(tag_id=tag_id, arches=arches, perm_id=perm, locked=locked)
+ insert.set(maven_support=maven_support, maven_include_all=maven_include_all)
+ insert.make_create()
+ insert.execute()
+
+ if parent_id:
+ data = {'parent_id': parent_id,
+ 'priority': 0,
+ 'maxdepth': None,
+ 'intransitive': False,
+ 'noconfig': False,
+ 'pkg_filter': ''}
+ writeInheritanceData(tag_id, data)
+
+ return tag_id
+
+def get_tag(tagInfo, strict=False, event=None):
+ """Get tag information based on the tagInfo. tagInfo may be either
+ a string (the tag name) or an int (the tag ID).
+ Returns a map containing the following keys:
+
+ - id : unique id for the tag
+ - name : name of the tag
+ - perm_id : permission id (may be null)
+ - perm : permission name (may be null)
+ - arches : tag arches (string, may be null)
+ - locked : lock setting (boolean)
+ - maven_support : maven support flag (boolean)
+ - maven_include_all : maven include all flag (boolean)
+ - extra : extra tag parameters (dictionary)
+
+ If there is no tag matching the given tagInfo, and strict is False,
+ return None. If strict is True, raise a GenericError.
+
+ Note that in order for a tag to 'exist', it must have an active entry
+ in tag_config. A tag whose name appears in the tag table but has no
+ active tag_config entry is considered deleted.
+ """
+
+ tables = ['tag_config']
+ joins = ['tag ON tag.id = tag_config.tag_id',
+ 'LEFT OUTER JOIN permissions ON tag_config.perm_id = permissions.id']
+ fields = {'tag.id': 'id',
+ 'tag.name': 'name',
+ 'tag_config.perm_id': 'perm_id',
+ 'permissions.name': 'perm',
+ 'tag_config.arches': 'arches',
+ 'tag_config.locked': 'locked',
+ 'tag_config.maven_support': 'maven_support',
+ 'tag_config.maven_include_all': 'maven_include_all'
+ }
+ clauses = [eventCondition(event, table='tag_config')]
+ if isinstance(tagInfo, int):
+ clauses.append("tag.id = %(tagInfo)i")
+ elif isinstance(tagInfo, basestring):
+ clauses.append("tag.name = %(tagInfo)s")
+ else:
+ raise koji.GenericError, 'invalid type for tagInfo: %s' % type(tagInfo)
+
+ data = {'tagInfo': tagInfo}
+ fields, aliases = zip(*fields.items())
+ query = QueryProcessor(columns=fields, aliases=aliases, tables=tables,
+ joins=joins, clauses=clauses, values=data)
+ result = query.executeOne()
+ if not result:
+ if strict:
+ raise koji.GenericError, "Invalid tagInfo: %r" % tagInfo
+ return None
+ result['extra'] = get_tag_extra(result)
+ return result
+
+
+def get_tag_extra(tagInfo, event=None):
+ """ Get tag extra info (no inheritance) """
+ tables = ['tag_extra']
+ fields = ['key', 'value']
+ clauses = [eventCondition(event, table='tag_extra'), "tag_id = %(id)i"]
+ query = QueryProcessor(columns=fields, tables=tables, clauses=clauses, values=tagInfo,
+ opts={'asList': True})
+ result = {}
+ for key, value in query.execute():
+ try:
+ value = json.loads(value)
+ except Exception:
+ # this should not happen
+ raise koji.GenericError("Invalid tag extra data: %s : %r", key, value)
+ result[key] = value
+ return result
+
+
+def edit_tag(tagInfo, **kwargs):
+ """Edit information for an existing tag.
+
+ tagInfo specifies the tag to edit
+ fields changes are provided as keyword arguments:
+ name: rename the tag
+ arches: change the arch list
+ locked: lock or unlock the tag
+ perm: change the permission requirement
+ maven_support: whether Maven repos should be generated for the tag
+ maven_include_all: include every build in this tag (including multiple
+ versions of the same package) in the Maven repo
+ """
+
+ context.session.assertPerm('admin')
+ if not context.opts.get('EnableMaven') \
+ and dslice(kwargs, ['maven_support','maven_include_all'], strict=False):
+ raise koji.GenericError, "Maven support not enabled"
+
+ tag = get_tag(tagInfo, strict=True)
+ if kwargs.has_key('perm'):
+ if kwargs['perm'] is None:
+ kwargs['perm_id'] = None
+ else:
+ kwargs['perm_id'] = get_perm_id(kwargs['perm'],strict=True)
+
+ name = kwargs.get('name')
+ if name and tag['name'] != name:
+ #attempt to update tag name
+ #XXX - I'm not sure we should allow this sort of renaming anyway.
+ # while I can see the convenience, it is an untracked change (granted
+ # a cosmetic one). The more versioning-friendly way would be to create
+ # a new tag with duplicate data and revoke the old tag. This is more
+ # of a pain of course :-/ -mikem
+ values = {
+ 'name': name,
+ 'tagID': tag['id']
+ }
+ q = """SELECT id FROM tag WHERE name=%(name)s"""
+ id = _singleValue(q,values,strict=False)
+ if id is not None:
+ #new name is taken
+ raise koji.GenericError, "Name %s already taken by tag %s" % (name,id)
+ update = """UPDATE tag
+ SET name = %(name)s
+ WHERE id = %(tagID)i"""
+ _dml(update, values)
+
+ #check for changes
+ data = tag.copy()
+ changed = False
+ for key in ('perm_id','arches','locked','maven_support','maven_include_all'):
+ if kwargs.has_key(key) and data[key] != kwargs[key]:
+ changed = True
+ data[key] = kwargs[key]
+ if changed:
+ update = UpdateProcessor('tag_config', values=data, clauses=['tag_id = %(id)i'])
+ update.make_revoke()
+ update.execute()
+
+ insert = InsertProcessor('tag_config', data=dslice(data, ('arches', 'perm_id', 'locked')))
+ insert.set(tag_id=data['id'])
+ insert.set(**dslice(data, ('maven_support', 'maven_include_all')))
+ insert.make_create()
+ insert.execute()
+
+ # handle extra data
+ if 'extra' in kwargs:
+ for key in kwargs['extra']:
+ value = kwargs['extra'][key]
+ if key not in tag['extra'] or tag['extra'] != value:
+ data = {
+ 'tag_id' : tag['id'],
+ 'key' : key,
+ 'value' : json.dumps(kwargs['extra'][key]),
+ }
+ # revoke old entry, if any
+ update = UpdateProcessor('tag_extra', values=data, clauses=['tag_id = %(tag_id)i', 'key=%(key)s'])
+ update.make_revoke()
+ update.execute()
+ # add new entry
+ insert = InsertProcessor('tag_extra', data=data)
+ insert.make_create()
+ insert.execute()
+
+
+def old_edit_tag(tagInfo, name, arches, locked, permissionID):
+ """Edit information for an existing tag."""
+ return edit_tag(tagInfo, name=name, arches=arches, locked=locked,
+ perm_id=permissionID)
+
+
+def delete_tag(tagInfo):
+ """Delete the specified tag."""
+
+ context.session.assertPerm('admin')
+
+ #We do not ever DELETE tag data. It is versioned -- we revoke it instead.
+
+ def _tagDelete(tableName, value, columnName='tag_id'):
+ update = UpdateProcessor(tableName, clauses=["%s = %%(value)i" % columnName],
+ values={'value':value})
+ update.make_revoke()
+ update.execute()
+
+ tag = get_tag(tagInfo)
+ tagID = tag['id']
+
+ _tagDelete('tag_config', tagID)
+ #technically, to 'delete' the tag we only have to revoke the tag_config entry
+ #these remaining revocations are more for cleanup.
+ _tagDelete('tag_inheritance', tagID)
+ _tagDelete('tag_inheritance', tagID, 'parent_id')
+ _tagDelete('build_target_config', tagID, 'build_tag')
+ _tagDelete('build_target_config', tagID, 'dest_tag')
+ _tagDelete('tag_listing', tagID)
+ _tagDelete('tag_packages', tagID)
+ _tagDelete('tag_external_repos', tagID)
+ _tagDelete('group_config', tagID)
+ _tagDelete('group_req_listing', tagID)
+ _tagDelete('group_package_listing', tagID)
+ # note: we do not delete the entry in the tag table (we can't actually, it
+ # is still referenced by the revoked rows).
+ # note: there is no need to do anything with the repo entries that reference tagID
+
+def get_external_repo_id(info, strict=False, create=False):
+ """Get the id for a build target"""
+ return get_id('external_repo', info, strict, create)
+
+def create_external_repo(name, url):
+ """Create a new external repo with the given name and url.
+ Return a map containing the id, name, and url
+ of the new repo."""
+
+ context.session.assertPerm('admin')
+
+ if get_external_repos(info=name):
+ raise koji.GenericError, 'An external repo named "%s" already exists' % name
+
+ id = get_external_repo_id(name, create=True)
+ if not url.endswith('/'):
+ # Ensure the url always ends with /
+ url += '/'
+ values = {'id': id, 'name': name, 'url': url}
+ insert = InsertProcessor('external_repo_config')
+ insert.set(external_repo_id = id, url=url)
+ insert.make_create()
+ insert.execute()
+ return values
+
+def get_external_repos(info=None, url=None, event=None, queryOpts=None):
+ """Get a list of external repos. If info is not None it may be a
+ string (name) or an integer (id).
+ If url is not None, filter the list of repos to those matching the
+ given url."""
+ fields = ['id', 'name', 'url']
+ tables = ['external_repo']
+ joins = ['external_repo_config ON external_repo_id = id']
+ clauses = [eventCondition(event)]
+ if info is not None:
+ if isinstance(info, str):
+ clauses.append('name = %(info)s')
+ elif isinstance(info, (int, long)):
+ clauses.append('id = %(info)i')
+ else:
+ raise koji.GenericError, 'invalid type for lookup: %s' % type(info)
+ if url:
+ clauses.append('url = %(url)s')
+
+ query = QueryProcessor(columns=fields, tables=tables,
+ joins=joins, clauses=clauses,
+ values=locals(), opts=queryOpts)
+ return query.execute()
+
+def get_external_repo(info, strict=False, event=None):
+ """Get information about a single external repo.
+ info can either be a string (name) or an integer (id).
+ Returns a map containing the id, name, and url of the
+ repo. If strict is True and no external repo has the
+ given name or id, raise an error."""
+ repos = get_external_repos(info, event=event)
+ if repos:
+ return repos[0]
+ else:
+ if strict:
+ raise koji.GenericError, 'invalid repo info: %s' % info
+ else:
+ return None
+
+def edit_external_repo(info, name=None, url=None):
+ """Edit an existing external repo"""
+
+ context.session.assertPerm('admin')
+
+ repo = get_external_repo(info, strict=True)
+ repo_id = repo['id']
+
+ if name and name != repo['name']:
+ existing_id = _singleValue("""SELECT id FROM external_repo WHERE name = %(name)s""",
+ locals(), strict=False)
+ if existing_id is not None:
+ raise koji.GenericError, 'name "%s" is already taken by external repo %i' % (name, existing_id)
+
+ rename = """UPDATE external_repo SET name = %(name)s WHERE id = %(repo_id)i"""
+ _dml(rename, locals())
+
+ if url and url != repo['url']:
+ if not url.endswith('/'):
+ # Ensure the url always ends with /
+ url += '/'
+
+ update = UpdateProcessor('external_repo_config', values=locals(),
+ clauses=['external_repo_id = %(repo_id)i'])
+ update.make_revoke()
+
+ insert = InsertProcessor('external_repo_config')
+ insert.set(external_repo_id=repo_id, url=url)
+ insert.make_create()
+
+ update.execute()
+ insert.execute()
+
+def delete_external_repo(info):
+ """Delete an external repo"""
+
+ context.session.assertPerm('admin')
+
+ repo = get_external_repo(info, strict=True)
+ repo_id = repo['id']
+
+ for tag_repo in get_tag_external_repos(repo_info=repo['id']):
+ remove_external_repo_from_tag(tag_info=tag_repo['tag_id'],
+ repo_info=repo_id)
+
+ update = UpdateProcessor('external_repo_config', values=locals(),
+ clauses=['external_repo_id = %(repo_id)i'])
+ update.make_revoke()
+ update.execute()
+
+def add_external_repo_to_tag(tag_info, repo_info, priority):
+ """Add an external repo to a tag"""
+
+ context.session.assertPerm('admin')
+
+ tag = get_tag(tag_info, strict=True)
+ tag_id = tag['id']
+ repo = get_external_repo(repo_info, strict=True)
+ repo_id = repo['id']
+
+ tag_repos = get_tag_external_repos(tag_info=tag_id)
+ if [tr for tr in tag_repos if tr['external_repo_id'] == repo_id]:
+ raise koji.GenericError, 'tag %s already associated with external repo %s' % \
+ (tag['name'], repo['name'])
+ if [tr for tr in tag_repos if tr['priority'] == priority]:
+ raise koji.GenericError, 'tag %s already associated with an external repo at priority %i' % \
+ (tag['name'], priority)
+
+ insert = InsertProcessor('tag_external_repos')
+ insert.set(tag_id=tag_id, external_repo_id=repo_id, priority=priority)
+ insert.make_create()
+ insert.execute()
+
+def remove_external_repo_from_tag(tag_info, repo_info):
+ """Remove an external repo from a tag"""
+
+ context.session.assertPerm('admin')
+
+ tag = get_tag(tag_info, strict=True)
+ tag_id = tag['id']
+ repo = get_external_repo(repo_info, strict=True)
+ repo_id = repo['id']
+
+ if not get_tag_external_repos(tag_info=tag_id, repo_info=repo_id):
+ raise koji.GenericError, 'external repo %s not associated with tag %s' % \
+ (repo['name'], tag['name'])
+
+ update = UpdateProcessor('tag_external_repos', values=locals(),
+ clauses=["tag_id = %(tag_id)i", "external_repo_id = %(repo_id)i"])
+ update.make_revoke()
+ update.execute()
+
+def edit_tag_external_repo(tag_info, repo_info, priority):
+ """Edit a tag<->external repo association
+ This allows you to update the priority without removing/adding the repo."""
+
+ context.session.assertPerm('admin')
+
+ tag = get_tag(tag_info, strict=True)
+ tag_id = tag['id']
+ repo = get_external_repo(repo_info, strict=True)
+ repo_id = repo['id']
+
+ tag_repos = get_tag_external_repos(tag_info=tag_id, repo_info=repo_id)
+ if not tag_repos:
+ raise koji.GenericError, 'external repo %s not associated with tag %s' % \
+ (repo['name'], tag['name'])
+ tag_repo = tag_repos[0]
+
+ if priority != tag_repo['priority']:
+ remove_external_repo_from_tag(tag_id, repo_id)
+ add_external_repo_to_tag(tag_id, repo_id, priority)
+
+def get_tag_external_repos(tag_info=None, repo_info=None, event=None):
+ """
+ Get a list of tag<->external repo associations.
+
+ Returns a map containing the following fields:
+ tag_id
+ tag_name
+ external_repo_id
+ external_repo_name
+ url
+ priority
+ """
+ tables = ['tag_external_repos']
+ joins = ['tag ON tag_external_repos.tag_id = tag.id',
+ 'external_repo ON tag_external_repos.external_repo_id = external_repo.id',
+ 'external_repo_config ON external_repo.id = external_repo_config.external_repo_id']
+ columns = ['tag.id', 'tag.name', 'external_repo.id', 'external_repo.name', 'url', 'priority']
+ aliases = ['tag_id', 'tag_name', 'external_repo_id', 'external_repo_name', 'url', 'priority']
+
+ clauses = [eventCondition(event, table='tag_external_repos'), eventCondition(event, table='external_repo_config')]
+ if tag_info:
+ tag = get_tag(tag_info, strict=True, event=event)
+ tag_id = tag['id']
+ clauses.append('tag.id = %(tag_id)i')
+ if repo_info:
+ repo = get_external_repo(repo_info, strict=True, event=event)
+ repo_id = repo['id']
+ clauses.append('external_repo.id = %(repo_id)i')
+
+ opts = {'order': 'priority'}
+
+ query = QueryProcessor(tables=tables, joins=joins,
+ columns=columns, aliases=aliases,
+ clauses=clauses, values=locals(),
+ opts=opts)
+ return query.execute()
+
+def get_external_repo_list(tag_info, event=None):
+ """
+ Get an ordered list of all external repos associated with the tags in the
+ hierarchy rooted at the specified tag. External repos will be returned
+ depth-first, and ordered by priority for each tag. Duplicates will be
+ removed. Returns a list of maps containing the following fields:
+
+ tag_id
+ tag_name
+ external_repo_id
+ external_repo_name
+ url
+ priority
+ """
+ tag = get_tag(tag_info, strict=True, event=event)
+ tag_list = [tag['id']]
+ for parent in readFullInheritance(tag['id'], event):
+ tag_list.append(parent['parent_id'])
+ seen_repos = {}
+ repos = []
+ for tag_id in tag_list:
+ for tag_repo in get_tag_external_repos(tag_info=tag_id, event=event):
+ if not seen_repos.has_key(tag_repo['external_repo_id']):
+ repos.append(tag_repo)
+ seen_repos[tag_repo['external_repo_id']] = 1
+ return repos
+
+def get_user(userInfo=None,strict=False):
+ """Return information about a user. userInfo may be either a str
+ (Kerberos principal) or an int (user id). A map will be returned with the
+ following keys:
+ id: user id
+ name: user name
+ status: user status (int), may be null
+ usertype: user type (int), 0 person, 1 for host, may be null
+ krb_principal: the user's Kerberos principal"""
+ if userInfo is None:
+ userInfo = context.session.user_id
+ #will still be None if not logged in
+ fields = ('id', 'name', 'status', 'usertype', 'krb_principal')
+ q = """SELECT %s FROM users WHERE""" % ', '.join(fields)
+ if isinstance(userInfo, int) or isinstance(userInfo, long):
+ q += """ id = %(userInfo)i"""
+ elif isinstance(userInfo, str):
+ q += """ (krb_principal = %(userInfo)s or name = %(userInfo)s)"""
+ else:
+ raise koji.GenericError, 'invalid type for userInfo: %s' % type(userInfo)
+ return _singleRow(q,locals(),fields,strict=strict)
+
+def find_build_id(X, strict=False):
+ if isinstance(X,int) or isinstance(X,long):
+ return X
+ elif isinstance(X,str):
+ data = koji.parse_NVR(X)
+ elif isinstance(X,dict):
+ data = X
+ else:
+ raise koji.GenericError, "Invalid argument: %r" % X
+
+ if not (data.has_key('name') and data.has_key('version') and
+ data.has_key('release')):
+ raise koji.GenericError, 'did not provide name, version, and release'
+
+ c=context.cnx.cursor()
+ q="""SELECT build.id FROM build JOIN package ON build.pkg_id=package.id
+ WHERE package.name=%(name)s AND build.version=%(version)s
+ AND build.release=%(release)s
+ """
+ # contraints should ensure this is unique
+ #log_error(koji.db._quoteparams(q,data))
+ c.execute(q,data)
+ r=c.fetchone()
+ #log_error("%r" % r )
+ if not r:
+ if strict:
+ raise koji.GenericError, 'No matching build found: %r' % X
+ else:
+ return None
+ return r[0]
+
+def get_build(buildInfo, strict=False):
+ """Return information about a build. buildID may be either
+ a int ID, a string NVR, or a map containing 'name', 'version'
+ and 'release. A map will be returned containing the following
+ keys:
+ id: build ID
+ package_id: ID of the package built
+ package_name: name of the package built
+ version
+ release
+ epoch
+ nvr
+ state
+ task_id: ID of the task that kicked off the build
+ owner_id: ID of the user who kicked off the build
+ owner_name: name of the user who kicked off the build
+ volume_id: ID of the storage volume
+ volume_name: name of the storage volume
+ creation_event_id: id of the create_event
+ creation_time: time the build was created (text)
+ creation_ts: time the build was created (epoch)
+ completion_time: time the build was completed (may be null)
+ completion_ts: time the build was completed (epoch, may be null)
+
+ If there is no build matching the buildInfo given, and strict is specified,
+ raise an error. Otherwise return None.
+ """
+ buildID = find_build_id(buildInfo, strict=strict)
+ if buildID == None:
+ return None
+
+ fields = (('build.id', 'id'), ('build.version', 'version'), ('build.release', 'release'),
+ ('build.epoch', 'epoch'), ('build.state', 'state'), ('build.completion_time', 'completion_time'),
+ ('build.task_id', 'task_id'), ('events.id', 'creation_event_id'), ('events.time', 'creation_time'),
+ ('package.id', 'package_id'), ('package.name', 'package_name'), ('package.name', 'name'),
+ ('volume.id', 'volume_id'), ('volume.name', 'volume_name'),
+ ("package.name || '-' || build.version || '-' || build.release", 'nvr'),
+ ('EXTRACT(EPOCH FROM events.time)','creation_ts'),
+ ('EXTRACT(EPOCH FROM build.completion_time)','completion_ts'),
+ ('users.id', 'owner_id'), ('users.name', 'owner_name'))
+ query = """SELECT %s
+ FROM build
+ JOIN events ON build.create_event = events.id
+ JOIN package on build.pkg_id = package.id
+ JOIN volume on build.volume_id = volume.id
+ JOIN users on build.owner = users.id
+ WHERE build.id = %%(buildID)i""" % ', '.join([pair[0] for pair in fields])
+
+ c = context.cnx.cursor()
+ c.execute(query, locals())
+ result = c.fetchone()
+
+ if not result:
+ if strict:
+ raise koji.GenericError, 'No matching build found: %s' % buildInfo
+ else:
+ return None
+ else:
+ ret = dict(zip([pair[1] for pair in fields], result))
+ return ret
+
+def get_next_release(build_info):
+ """find the last successful or deleted build of this N-V"""
+ values = {'name': build_info['name'],
+ 'version': build_info['version'],
+ 'states': (koji.BUILD_STATES['COMPLETE'], koji.BUILD_STATES['DELETED'])}
+ query = QueryProcessor(tables=['build'], joins=['package ON build.pkg_id = package.id'],
+ columns=['build.id', 'release'],
+ clauses=['name = %(name)s', 'version = %(version)s',
+ 'state in %(states)s'],
+ values=values,
+ opts={'order': '-build.id', 'limit': 1})
+ result = query.executeOne()
+ release = None
+ if result:
+ release = result['release']
+
+ if not release:
+ release = '1'
+ elif release.isdigit():
+ release = str(int(release) + 1)
+ else:
+ raise koji.BuildError, 'Unable to increment release value: %s' % release
+ return release
+
+def get_rpm(rpminfo, strict=False, multi=False):
+ """Get information about the specified RPM
+
+ rpminfo may be any one of the following:
+ - a int ID
+ - a string N-V-R.A
+ - a string N-V-R.A at location
+ - a map containing 'name', 'version', 'release', and 'arch'
+ (and optionally 'location')
+
+ If specified, location should match the name of an external repo
+
+ A map will be returned, with the following keys:
+ - id
+ - name
+ - version
+ - release
+ - arch
+ - epoch
+ - payloadhash
+ - size
+ - buildtime
+ - build_id
+ - buildroot_id
+ - external_repo_id
+ - external_repo_name
+
+ If there is no RPM with the given ID, None is returned, unless strict
+ is True in which case an exception is raised
+
+ If more than one RPM matches, and multi is True, then a list of results is
+ returned. If multi is False, a single match is returned (an internal one if
+ possible).
+ """
+ fields = (
+ ('rpminfo.id', 'id'),
+ ('build_id', 'build_id'),
+ ('buildroot_id', 'buildroot_id'),
+ ('rpminfo.name', 'name'),
+ ('version', 'version'),
+ ('release', 'release'),
+ ('epoch', 'epoch'),
+ ('arch', 'arch'),
+ ('external_repo_id', 'external_repo_id'),
+ ('external_repo.name', 'external_repo_name'),
+ ('payloadhash', 'payloadhash'),
+ ('size', 'size'),
+ ('buildtime', 'buildtime'),
+ )
+ # we can look up by id or NVRA
+ data = None
+ if isinstance(rpminfo,(int,long)):
+ data = {'id': rpminfo}
+ elif isinstance(rpminfo,str):
+ data = koji.parse_NVRA(rpminfo)
+ elif isinstance(rpminfo,dict):
+ data = rpminfo.copy()
+ else:
+ raise koji.GenericError, "Invalid argument: %r" % rpminfo
+ clauses = []
+ if data.has_key('id'):
+ clauses.append("rpminfo.id=%(id)s")
+ else:
+ clauses.append("""rpminfo.name=%(name)s AND version=%(version)s
+ AND release=%(release)s AND arch=%(arch)s""")
+ retry = False
+ if data.has_key('location'):
+ data['external_repo_id'] = get_external_repo_id(data['location'], strict=True)
+ clauses.append("""external_repo_id = %(external_repo_id)i""")
+ elif not multi:
+ #try to match internal first, otherwise first matching external
+ retry = True #if no internal match
+ orig_clauses = list(clauses) #copy
+ clauses.append("""external_repo_id = 0""")
+
+ joins = ['external_repo ON rpminfo.external_repo_id = external_repo.id']
+
+ query = QueryProcessor(columns=[f[0] for f in fields], aliases=[f[1] for f in fields],
+ tables=['rpminfo'], joins=joins, clauses=clauses,
+ values=data)
+ if multi:
+ return query.execute()
+ ret = query.executeOne()
+ if ret:
+ return ret
+ if retry:
+ #at this point we have just an NVRA with no internal match. Open it up to externals
+ query.clauses = orig_clauses
+ ret = query.executeOne()
+ if not ret:
+ if strict:
+ raise koji.GenericError, "No such rpm: %r" % data
+ return None
+ return ret
+
+def list_rpms(buildID=None, buildrootID=None, imageID=None, componentBuildrootID=None, hostID=None, arches=None, queryOpts=None):
+ """List RPMS. If buildID, imageID and/or buildrootID are specified,
+ restrict the list of RPMs to only those RPMs that are part of that
+ build, or were built in that buildroot. If componentBuildrootID is specified,
+ restrict the list to only those RPMs that will get pulled into that buildroot
+ when it is used to build another package. A list of maps is returned, each map
+ containing the following keys:
+
+ - id
+ - name
+ - version
+ - release
+ - nvr (synthesized for sorting purposes)
+ - arch
+ - epoch
+ - payloadhash
+ - size
+ - buildtime
+ - build_id
+ - buildroot_id
+ - external_repo_id
+ - external_repo_name
+
+ If componentBuildrootID is specified, two additional keys will be included:
+ - component_buildroot_id
+ - is_update
+
+ If no build has the given ID, or the build generated no RPMs,
+ an empty list is returned."""
+ fields = [('rpminfo.id', 'id'), ('rpminfo.name', 'name'), ('rpminfo.version', 'version'),
+ ('rpminfo.release', 'release'),
+ ("rpminfo.name || '-' || rpminfo.version || '-' || rpminfo.release", 'nvr'),
+ ('rpminfo.arch', 'arch'),
+ ('rpminfo.epoch', 'epoch'), ('rpminfo.payloadhash', 'payloadhash'),
+ ('rpminfo.size', 'size'), ('rpminfo.buildtime', 'buildtime'),
+ ('rpminfo.build_id', 'build_id'), ('rpminfo.buildroot_id', 'buildroot_id'),
+ ('rpminfo.external_repo_id', 'external_repo_id'),
+ ('external_repo.name', 'external_repo_name'),
+ ]
+ joins = ['external_repo ON rpminfo.external_repo_id = external_repo.id']
+ clauses = []
+
+ if buildID != None:
+ clauses.append('rpminfo.build_id = %(buildID)i')
+ if buildrootID != None:
+ clauses.append('rpminfo.buildroot_id = %(buildrootID)i')
+ if componentBuildrootID != None:
+ fields.append(('buildroot_listing.buildroot_id as component_buildroot_id',
+ 'component_buildroot_id'))
+ fields.append(('buildroot_listing.is_update', 'is_update'))
+ joins.append('buildroot_listing ON rpminfo.id = buildroot_listing.rpm_id')
+ clauses.append('buildroot_listing.buildroot_id = %(componentBuildrootID)i')
+
+ # image specific constraints
+ if imageID != None:
+ clauses.append('image_listing.image_id = %(imageID)i')
+ joins.append('image_listing ON rpminfo.id = image_listing.rpm_id')
+
+ if hostID != None:
+ joins.append('buildroot ON rpminfo.buildroot_id = buildroot.id')
+ clauses.append('buildroot.host_id = %(hostID)i')
+ if arches != None:
+ if isinstance(arches, list) or isinstance(arches, tuple):
+ clauses.append('rpminfo.arch IN %(arches)s')
+ elif isinstance(arches, str):
+ clauses.append('rpminfo.arch = %(arches)s')
+ else:
+ raise koji.GenericError, 'invalid type for "arches" parameter: %s' % type(arches)
+
+ query = QueryProcessor(columns=[f[0] for f in fields], aliases=[f[1] for f in fields],
+ tables=['rpminfo'], joins=joins, clauses=clauses,
+ values=locals(), opts=queryOpts)
+ return query.execute()
+
+def get_maven_build(buildInfo, strict=False):
+ """
+ Retrieve Maven-specific information about a build.
+ buildInfo can be either a string (n-v-r) or an integer
+ (build ID).
+ Returns a map containing the following keys:
+
+ build_id: id of the build (integer)
+ group_id: Maven groupId (string)
+ artifact_id: Maven artifact_Id (string)
+ version: Maven version (string)
+ """
+ fields = ('build_id', 'group_id', 'artifact_id', 'version')
+
+ build_id = find_build_id(buildInfo, strict=strict)
+ if not build_id:
+ return None
+ query = """SELECT %s
+ FROM maven_builds
+ WHERE build_id = %%(build_id)i""" % ', '.join(fields)
+ return _singleRow(query, locals(), fields, strict)
+
+def get_win_build(buildInfo, strict=False):
+ """
+ Retrieve Windows-specific information about a build.
+ buildInfo can be either a string (n-v-r) or an integer
+ (build ID).
+ Returns a map containing the following keys:
+
+ build_id: id of the build (integer)
+ platform: the platform the build was performed on (string)
+ """
+ fields = ('build_id', 'platform')
+
+ build_id = find_build_id(buildInfo, strict=strict)
+ if not build_id:
+ return None
+ query = QueryProcessor(tables=('win_builds',), columns=fields,
+ clauses=('build_id = %(build_id)i',),
+ values={'build_id': build_id})
+ result = query.executeOne()
+ if strict and not result:
+ raise koji.GenericError, 'no such Windows build: %s' % buildInfo
+ return result
+
+def get_image_build(buildInfo, strict=False):
+ """
+ Retrieve image-specific information about a build.
+ buildInfo can be either a string (n-v-r) or an integer
+ (build ID). This function really only exists to verify a build
+ is an image build; there is no additional data.
+
+ Returns a map containing the following keys:
+ build_id: id of the build
+ """
+ build_id = find_build_id(buildInfo, strict=strict)
+ if not build_id:
+ return None
+ query = QueryProcessor(tables=('image_builds',), columns=('build_id',),
+ clauses=('build_id = %(build_id)i',),
+ values={'build_id': build_id})
+ result = query.executeOne()
+ if strict and not result:
+ raise koji.GenericError, 'no such image build: %s' % buildInfo
+ return result
+
+def list_archives(buildID=None, buildrootID=None, componentBuildrootID=None, hostID=None, type=None,
+ filename=None, size=None, checksum=None, typeInfo=None, queryOpts=None):
+ """
+ Retrieve information about archives.
+ If buildID is not null it will restrict the list to archives built by the build with that ID.
+ If buildrootID is not null it will restrict the list to archives built in the buildroot with that ID.
+ If componentBuildrootID is not null it will restrict the list to archives that were present in the
+ buildroot with that ID.
+ If hostID is not null it will restrict the list to archives built on the host with that ID.
+ If filename, size, and/or checksum are not null it will filter the results to entries matching the provided values.
+
+ Returns a list of maps containing the following keys:
+
+ id: unique id of the archive file (integer)
+ type_id: id of the archive type (Java jar, Solaris pkg, Windows exe, etc.) (integer)
+ type_name: name of the archive type
+ type_description: description of the archive
+ type_extensions: valid extensions for the type
+ build_id: id of the build that generated this archive (integer)
+ buildroot_id: id of the buildroot where this archive was built (integer)
+ filename: name of the archive (string)
+ size: size of the archive (integer)
+ checksum: checksum of the archive (string)
+ checksum_type: the checksum type (integer)
+
+ If componentBuildrootID is specified, then the map will also contain the following key:
+ project: whether the archive was pulled in as a project dependency, or as part of the
+ build environment setup (boolean)
+
+ If 'type' is specified, then the archives listed will be limited
+ those associated with additional metadata of the given type.
+ Currently supported types are:
+
+ maven, win, image
+
+ If 'maven' is specified as a type, each returned map will contain
+ these additional keys:
+
+ group_id: Maven groupId (string)
+ artifact_id: Maven artifactId (string)
+ version: Maven version (string)
+
+ if 'win' is specified as a type, each returned map will contain
+ these additional keys:
+
+ relpath: the relative path where the file is located (string)
+ platforms: space-separated list of platforms the file is suitable for use on (string)
+ flags: space-separated list of flags used when building the file (fre, chk) (string)
+
+ if 'image' is specified as a type, each returned map will contain an
+ additional key:
+
+ arch: The architecture if the image itself, which may be different from the
+ task that generated it
+
+ typeInfo is a dict that can be used to filter the output by type-specific info.
+ For the 'maven' type, this dict may contain one or more of group_id, artifact_id, or version,
+ and the output will be restricted to archives with matching attributes.
+
+ If there are no archives matching the selection criteria,
+ an empty list is returned.
+ """
+ values = {}
+
+ tables = ['archiveinfo']
+ joins = ['archivetypes on archiveinfo.type_id = archivetypes.id']
+ fields = [('archiveinfo.id', 'id'),
+ ('archiveinfo.type_id', 'type_id'),
+ ('archiveinfo.build_id', 'build_id'),
+ ('archiveinfo.buildroot_id', 'buildroot_id'),
+ ('archiveinfo.filename', 'filename'),
+ ('archiveinfo.size', 'size'),
+ ('archiveinfo.checksum', 'checksum'),
+ ('archiveinfo.checksum_type', 'checksum_type'),
+ ('archivetypes.name', 'type_name'),
+ ('archivetypes.description', 'type_description'),
+ ('archivetypes.extensions', 'type_extensions'),
+ ]
+ clauses = []
+
+ if buildID is not None:
+ clauses.append('build_id = %(build_id)i')
+ values['build_id'] = buildID
+ if buildrootID is not None:
+ clauses.append('buildroot_id = %(buildroot_id)i')
+ values['buildroot_id'] = buildrootID
+ if componentBuildrootID is not None:
+ joins.append('buildroot_archives on archiveinfo.id = buildroot_archives.archive_id')
+ clauses.append('buildroot_archives.buildroot_id = %(component_buildroot_id)i')
+ values['component_buildroot_id'] = componentBuildrootID
+ fields.append(['buildroot_archives.buildroot_id', 'component_buildroot_id'])
+ fields.append(['buildroot_archives.project_dep', 'project'])
+ if hostID is not None:
+ joins.append('buildroot on archiveinfo.buildroot_id = buildroot.id')
+ clauses.append('buildroot.host_id = %(host_id)i')
+ values['host_id'] = hostID
+ fields.append(['buildroot.host_id', 'host_id'])
+ if filename is not None:
+ clauses.append('filename = %(filename)s')
+ values['filename'] = filename
+ if size is not None:
+ clauses.append('size = %(size)i')
+ values['size'] = size
+ if checksum is not None:
+ clauses.append('checksum = %(checksum)s')
+ values['checksum'] = checksum
+
+ if type is None:
+ pass
+ elif type == 'maven':
+ joins.append('maven_archives ON archiveinfo.id = maven_archives.archive_id')
+ fields.extend([
+ ('maven_archives.group_id', 'group_id'),
+ ('maven_archives.artifact_id', 'artifact_id'),
+ ('maven_archives.version', 'version'),
+ ])
+
+ if typeInfo:
+ for key in ('group_id', 'artifact_id', 'version'):
+ if typeInfo.has_key(key):
+ clauses.append('maven_archives.%s = %%(%s)s' % (key, key))
+ values[key] = typeInfo[key]
+ elif type == 'win':
+ joins.append('win_archives ON archiveinfo.id = win_archives.archive_id')
+ fields.extend([
+ ('win_archives.relpath', 'relpath'),
+ ('win_archives.platforms', 'platforms'),
+ ('win_archives.flags', 'flags'),
+ ])
+
+ if typeInfo:
+ if 'relpath' in typeInfo:
+ clauses.append('win_archives.relpath = %(relpath)s')
+ values['relpath'] = typeInfo['relpath']
+ for key in ('platforms', 'flags'):
+ if key in typeInfo:
+ val = typeInfo[key]
+ if not isinstance(val, (list, tuple)):
+ val = [val]
+ for v in val:
+ clauses.append(r"""%s ~ E'\\m%s\\M'""" % (key, v))
+ elif type == 'image':
+ joins.append('image_archives ON archiveinfo.id = image_archives.archive_id')
+ fields.append(['image_archives.arch', 'arch'])
+ if typeInfo and typeInfo.get('arch'):
+ clauses.append('image_archives.%s = %%(%s)s' % (key, key))
+ values[key] = typeInfo[key]
+ else:
+ raise koji.GenericError, 'unsupported archive type: %s' % type
+
+ columns, aliases = zip(*fields)
+ ret = QueryProcessor(tables=tables, columns=columns, aliases=aliases, joins=joins,
+ clauses=clauses, values=values, opts=queryOpts).execute()
+ if not (queryOpts and queryOpts.get('countOnly')):
+ if queryOpts and 'asList' in queryOpts:
+ key = aliases.index('size')
+ else:
+ key = 'size'
+ for row in ret:
+ row[key] = koji.encode_int(row[key])
+ return ret
+
+def get_archive(archive_id, strict=False):
+ """
+ Get information about the archive with the given ID. Returns a map
+ containing the following keys:
+
+ id: unique id of the archive file (integer)
+ type_id: id of the archive type (Java jar, Solaris pkg, Windows exe, etc.) (integer)
+ build_id: id of the build that generated this archive (integer)
+ buildroot_id: id of the buildroot where this archive was built (integer)
+ filename: name of the archive (string)
+ size: size of the archive (integer)
+ checksum: checksum of the archive (string)
+ checksum_type: type of the checksum (integer)
+
+ If the archive is part of a Maven build, the following keys will be included:
+ group_id
+ artifact_id
+ version
+ If the archive is part of a Windows builds, the following keys will be included:
+ relpath
+ platforms
+ flags
+
+ If the archive is part of an image build, and it is the image file that
+ contains the root partitioning ('/'), there will be a additional fields:
+
+ rootid
+ arch
+ """
+ fields = ('id', 'type_id', 'build_id', 'buildroot_id', 'filename', 'size', 'checksum', 'checksum_type')
+ select = """SELECT %s FROM archiveinfo
+ WHERE id = %%(archive_id)i""" % ', '.join(fields)
+ archive = _singleRow(select, locals(), fields, strict=strict)
+ if not archive:
+ # strict is taken care of by _singleRow()
+ return None
+ maven_info = get_maven_archive(archive_id)
+ if maven_info:
+ del maven_info['archive_id']
+ archive.update(maven_info)
+ win_info = get_win_archive(archive_id)
+ if win_info:
+ del win_info['archive_id']
+ archive.update(win_info)
+ image_info = get_image_archive(archive_id)
+ if image_info:
+ del image_info['archive_id']
+ archive.update(image_info)
+ archive['size'] = koji.encode_int(archive['size'])
+ return archive
+
+def get_maven_archive(archive_id, strict=False):
+ """
+ Retrieve Maven-specific information about an archive.
+ Returns a map containing the following keys:
+
+ archive_id: id of the build (integer)
+ group_id: Maven groupId (string)
+ artifact_id: Maven artifact_Id (string)
+ version: Maven version (string)
+ """
+ fields = ('archive_id', 'group_id', 'artifact_id', 'version')
+ select = """SELECT %s FROM maven_archives
+ WHERE archive_id = %%(archive_id)i""" % ', '.join(fields)
+ return _singleRow(select, locals(), fields, strict=strict)
+
+def get_win_archive(archive_id, strict=False):
+ """
+ Retrieve Windows-specific information about an archive.
+ Returns a map containing the following keys:
+
+ archive_id: id of the build (integer)
+ relpath: the relative path where the file is located (string)
+ platforms: space-separated list of platforms the file is suitable for use on (string)
+ flags: space-separated list of flags used when building the file (fre, chk) (string)
+ """
+ fields = ('archive_id', 'relpath', 'platforms', 'flags')
+ select = """SELECT %s FROM win_archives
+ WHERE archive_id = %%(archive_id)i""" % ', '.join(fields)
+ return _singleRow(select, locals(), fields, strict=strict)
+
+def get_image_archive(archive_id, strict=False):
+ """
+ Retrieve image-specific information about an archive.
+ Returns a map containing the following keys:
+
+ archive_id: id of the build (integer)
+ arch: the architecture of the image
+ rootid: True if this image has the root '/' partition
+ """
+ fields = ('archive_id', 'arch')
+ select = """SELECT %s FROM image_archives
+ WHERE archive_id = %%(archive_id)i""" % ', '.join(fields)
+ results = _singleRow(select, locals(), fields, strict=strict)
+ if not results:
+ return None
+ results['rootid'] = False
+ fields = ('image_id', 'rpm_id')
+ select = """SELECT %s FROM image_listing
+ WHERE image_id = %%(archive_id)i""" % ', '.join(fields)
+ rpms = _singleRow(select, locals(), fields, strict=strict)
+ if rpms:
+ results['rootid'] = True
+ return results
+
+def _get_zipfile_list(archive_id, zippath):
+ """
+ Get a list of the entries in the zipfile located at zippath.
+ Return a list of dicts, one per entry in the zipfile. Each dict contains:
+ - archive_id
+ - name
+ - size
+ If the file does not exist, return an empty list.
+ """
+ result = []
+ if not os.path.exists(zippath):
+ return result
+ archive = zipfile.ZipFile(zippath, 'r')
+ for entry in archive.infolist():
+ filename = koji.fixEncoding(entry.filename)
+ result.append({'archive_id': archive_id,
+ 'name': filename,
+ 'size': entry.file_size,
+ 'mtime': int(time.mktime(entry.date_time + (0, 0, -1)))})
+ archive.close()
+ return result
+
+def _get_tarball_list(archive_id, tarpath):
+ """
+ Get a list of the entries in the tarball located at tarpath.
+ Return a list of dicts, one per entry in the tarball. Each dict contains:
+ - archive_id
+ - name
+ - size
+ - mtime
+ - mode
+ - user
+ - group
+ If the file does not exist, return an empty list.
+ """
+ result = []
+ if not os.path.exists(tarpath):
+ return result
+ archive = tarfile.open(tarpath, 'r')
+ for entry in archive:
+ filename = koji.fixEncoding(entry.name)
+ result.append({'archive_id': archive_id,
+ 'name': filename,
+ 'size': entry.size,
+ 'mtime': entry.mtime,
+ 'mode': entry.mode,
+ 'user': entry.uname,
+ 'group': entry.gname})
+ archive.close()
+ return result
+
+def list_archive_files(archive_id, queryOpts=None):
+ """
+ Get information about the files contained in the archive with the given ID.
+ Returns a list of maps with with following keys:
+
+ archive_id: id of the archive the file is contained in (integer)
+ name: name of the file (string)
+ size: uncompressed size of the file (integer)
+ """
+ archive_info = get_archive(archive_id, strict=True)
+
+ archive_type = get_archive_type(type_id=archive_info['type_id'], strict=True)
+ build_info = get_build(archive_info['build_id'], strict=True)
+ maven_info = get_maven_build(build_info['id'])
+ win_info = get_win_build(build_info['id'])
+
+ if maven_info:
+ maven_archive = get_maven_archive(archive_info['id'], strict=True)
+ archive_info.update(maven_archive)
+ file_path = os.path.join(koji.pathinfo.mavenbuild(build_info),
+ koji.pathinfo.mavenfile(archive_info))
+ elif win_info:
+ win_archive = get_win_archive(archive_info['id'], strict=True)
+ archive_info.update(win_archive)
+ file_path = os.path.join(koji.pathinfo.winbuild(build_info),
+ koji.pathinfo.winfile(archive_info))
+ else:
+ return _applyQueryOpts([], queryOpts)
+
+ if archive_type['name'] in ('zip', 'jar'):
+ return _applyQueryOpts(_get_zipfile_list(archive_id, file_path), queryOpts)
+ elif archive_type['name'] == 'tar':
+ return _applyQueryOpts(_get_tarball_list(archive_id, file_path), queryOpts)
+ else:
+ # XXX support other archive types
+ return _applyQueryOpts([], queryOpts)
+
+def get_archive_file(archive_id, filename):
+ """
+ Get information about a file with the given filename
+ contained in the archive with the given ID.
+ Returns a map with with following keys:
+
+ archive_id: id of the archive the file is contained in (integer)
+ name: name of the file (string)
+ size: uncompressed size of the file (integer)
+ """
+ files = list_archive_files(archive_id)
+ for file_info in files:
+ if file_info['name'] == filename:
+ return file_info
+ #otherwise
+ return None
+
+def list_task_output(taskID, stat=False):
+ """List the files generated by the task with the given ID. This
+ will usually include one or more RPMs, and one or more log files.
+ If the task did not generate any files, or the output directory
+ for the task no longer exists, return an empty list.
+
+ If stat is True, return a map of filename -> stat_info where stat_info
+ is a map containing the values of the st_* attributes returned by
+ os.stat()."""
+ taskDir = '%s/%s' % (koji.pathinfo.work(), koji.pathinfo.taskrelpath(taskID))
+ if stat:
+ result = {}
+ else:
+ result = []
+ if not os.path.isdir(taskDir):
+ return result
+ for path, dirs, files in os.walk(taskDir):
+ relpath = path[len(taskDir) + 1:]
+ for filename in files:
+ relfilename = os.path.join(relpath, filename)
+ if stat:
+ stat_info = os.stat(os.path.join(path, filename))
+ stat_map = {}
+ for attr in dir(stat_info):
+ if attr == 'st_size':
+ stat_map[attr] = str(getattr(stat_info, attr))
+ elif attr in ('st_atime', 'st_mtime', 'st_ctime'):
+ stat_map[attr] = getattr(stat_info, attr)
+ result[relfilename] = stat_map
+ else:
+ result.append(relfilename)
+ return result
+
+def _fetchMulti(query, values):
+ """Run the query and return all rows"""
+ c = context.cnx.cursor()
+ c.execute(query, values)
+ results = c.fetchall()
+ c.close()
+ return results
+
+def _fetchSingle(query, values, strict=False):
+ """Run the query and return a single row
+
+ If strict is true, raise an error if the query returns more or less than
+ one row."""
+ results = _fetchMulti(query, values)
+ numRows = len(results)
+ if numRows == 0:
+ if strict:
+ raise koji.GenericError, 'query returned no rows'
+ else:
+ return None
+ elif strict and numRows > 1:
+ raise koji.GenericError, 'multiple rows returned for a single row query'
+ else:
+ return results[0]
+
+def _multiRow(query, values, fields):
+ """Return all rows from "query". Named query parameters
+ can be specified using the "values" map. Results will be returned
+ as a list of maps. Each map in the list will have a key for each
+ element in the "fields" list. If there are no results, an empty
+ list will be returned."""
+ return [dict(zip(fields, row)) for row in _fetchMulti(query, values)]
+
+def _singleRow(query, values, fields, strict=False):
+ """Return a single row from "query". Named parameters can be
+ specified using the "values" map. The result will be returned as
+ as map. The map will have a key for each element in the "fields"
+ list. If more than one row is returned and "strict" is true, a
+ GenericError will be raised. If no rows are returned, and "strict"
+ is True, a GenericError will be raised. Otherwise None will be
+ returned."""
+ row = _fetchSingle(query, values, strict)
+ if row:
+ return dict(zip(fields, row))
+ else:
+ #strict enforced by _fetchSingle
+ return None
+
+def _singleValue(query, values=None, strict=True):
+ """Perform a query that returns a single value.
+
+ Note that unless strict is True a return value of None could mean either
+ a single NULL value or zero rows returned."""
+ if values is None:
+ values = {}
+ row = _fetchSingle(query, values, strict)
+ if row:
+ if strict and len(row) > 1:
+ raise koji.GenericError, 'multiple fields returned for a single value query'
+ return row[0]
+ else:
+ # don't need to check strict here, since that was already handled by _singleRow()
+ return None
+
+def _dml(operation, values):
+ """Run an insert, update, or delete. Return number of rows affected"""
+ c = context.cnx.cursor()
+ c.execute(operation, values)
+ ret = c.rowcount
+ logger.debug("Operation affected %s row(s)", ret)
+ c.close()
+ context.commit_pending = True
+ return ret
+
+def get_host(hostInfo, strict=False):
+ """Get information about the given host. hostInfo may be
+ either a string (hostname) or int (host id). A map will be returned
+ containign the following data:
+
+ - id
+ - user_id
+ - name
+ - arches
+ - task_load
+ - capacity
+ - description
+ - comment
+ - ready
+ - enabled
+ """
+ fields = ('id', 'user_id', 'name', 'arches', 'task_load',
+ 'capacity', 'description', 'comment', 'ready', 'enabled')
+ query = """SELECT %s FROM host
+ WHERE """ % ', '.join(fields)
+ if isinstance(hostInfo, int) or isinstance(hostInfo, long):
+ query += """id = %(hostInfo)i"""
+ elif isinstance(hostInfo, str):
+ query += """name = %(hostInfo)s"""
+ else:
+ raise koji.GenericError, 'invalid type for hostInfo: %s' % type(hostInfo)
+
+ return _singleRow(query, locals(), fields, strict)
+
+def edit_host(hostInfo, **kw):
+ """Edit information for an existing host.
+ hostInfo specifies the host to edit, either as an integer (id)
+ or a string (name).
+ fields to be changed are specified as keyword parameters:
+ - arches
+ - capacity
+ - description
+ - comment
+
+ Returns True if changes are made to the database, False otherwise.
+ """
+ context.session.assertPerm('admin')
+
+ host = get_host(hostInfo, strict=True)
+
+ fields = ('arches', 'capacity', 'description', 'comment')
+ changes = []
+ for field in fields:
+ if field in kw and kw[field] != host[field]:
+ changed = True
+ if field == 'capacity':
+ # capacity is a float, so set the substitution format appropriately
+ changes.append('%s = %%(%s)f' % (field, field))
+ else:
+ changes.append('%s = %%(%s)s' % (field, field))
+
+ if not changes:
+ return False
+
+ update = 'UPDATE host set ' + ', '.join(changes) + ' where id = %(id)i'
+ data = kw.copy()
+ data['id'] = host['id']
+ _dml(update, data)
+ return True
+
+def get_channel(channelInfo, strict=False):
+ """Return information about a channel."""
+ fields = ('id', 'name')
+ query = """SELECT %s FROM channels
+ WHERE """ % ', '.join(fields)
+ if isinstance(channelInfo, int) or isinstance(channelInfo, long):
+ query += """id = %(channelInfo)i"""
+ elif isinstance(channelInfo, str):
+ query += """name = %(channelInfo)s"""
+ else:
+ raise koji.GenericError, 'invalid type for channelInfo: %s' % type(channelInfo)
+
+ return _singleRow(query, locals(), fields, strict)
+
+
+def query_buildroots(hostID=None, tagID=None, state=None, rpmID=None, archiveID=None, taskID=None, buildrootID=None, queryOpts=None):
+ """Return a list of matching buildroots
+
+ Optional args:
+ hostID - only buildroots on host.
+ tagID - only buildroots for tag.
+ state - only buildroots in state (may be a list)
+ rpmID - only buildroots the specified rpm was used in
+ archiveID - only buildroots the specified archive was used in
+ taskID - only buildroots associated with task.
+ buildrootID - only the specified buildroot
+ queryOpts - query options
+ """
+ fields = [('buildroot.id', 'id'), ('buildroot.arch', 'arch'), ('buildroot.state', 'state'),
+ ('buildroot.dirtyness', 'dirtyness'), ('buildroot.task_id', 'task_id'),
+ ('host.id', 'host_id'), ('host.name', 'host_name'),
+ ('repo.id', 'repo_id'), ('repo.state', 'repo_state'),
+ ('tag.id', 'tag_id'), ('tag.name', 'tag_name'),
+ ('create_events.id', 'create_event_id'), ('create_events.time', 'create_event_time'),
+ ('EXTRACT(EPOCH FROM create_events.time)','create_ts'),
+ ('retire_events.id', 'retire_event_id'), ('retire_events.time', 'retire_event_time'),
+ ('EXTRACT(EPOCH FROM retire_events.time)','retire_ts'),
+ ('repo_create.id', 'repo_create_event_id'), ('repo_create.time', 'repo_create_event_time')]
+
+ tables = ['buildroot']
+ joins=['host ON host.id = buildroot.host_id',
+ 'repo ON repo.id = buildroot.repo_id',
+ 'tag ON tag.id = repo.tag_id',
+ 'events AS create_events ON create_events.id = buildroot.create_event',
+ 'LEFT OUTER JOIN events AS retire_events ON buildroot.retire_event = retire_events.id',
+ 'events AS repo_create ON repo_create.id = repo.create_event']
+
+ clauses = []
+ if buildrootID != None:
+ if isinstance(buildrootID, list) or isinstance(buildrootID, tuple):
+ clauses.append('buildroot.id IN %(buildrootID)s')
+ else:
+ clauses.append('buildroot.id = %(buildrootID)i')
+ if hostID != None:
+ clauses.append('host.id = %(hostID)i')
+ if tagID != None:
+ clauses.append('tag.id = %(tagID)i')
+ if state != None:
+ if isinstance(state, list) or isinstance(state, tuple):
+ clauses.append('buildroot.state IN %(state)s')
+ else:
+ clauses.append('buildroot.state = %(state)i')
+ if rpmID != None:
+ joins.insert(0, 'buildroot_listing ON buildroot.id = buildroot_listing.buildroot_id')
+ fields.append(('buildroot_listing.is_update', 'is_update'))
+ clauses.append('buildroot_listing.rpm_id = %(rpmID)i')
+ if archiveID != None:
+ joins.append('buildroot_archives ON buildroot.id = buildroot_archives.buildroot_id')
+ clauses.append('buildroot_archives.archive_id = %(archiveID)i')
+ if taskID != None:
+ clauses.append('buildroot.task_id = %(taskID)i')
+
+ query = QueryProcessor(columns=[f[0] for f in fields], aliases=[f[1] for f in fields],
+ tables=tables, joins=joins, clauses=clauses, values=locals(),
+ opts=queryOpts)
+ return query.execute()
+
+def get_buildroot(buildrootID, strict=False):
+ """Return information about a buildroot. buildrootID must be an int ID."""
+
+ result = query_buildroots(buildrootID=buildrootID)
+ if len(result) == 0:
+ if strict:
+ raise koji.GenericError, "No such buildroot: %r" % buildrootID
+ else:
+ return None
+ if len(result) > 1:
+ #this should be impossible
+ raise koji.GenericError, "More that one buildroot with id: %i" % buildrootID
+ return result[0]
+
+def list_channels(hostID=None):
+ """List channels. If hostID is specified, only list
+ channels associated with the host with that ID."""
+ fields = ('id', 'name')
+ query = """SELECT %s FROM channels
+ """ % ', '.join(fields)
+ if hostID != None:
+ query += """JOIN host_channels ON channels.id = host_channels.channel_id
+ WHERE host_channels.host_id = %(hostID)i"""
+ return _multiRow(query, locals(), fields)
+
+def new_package(name,strict=True):
+ c = context.cnx.cursor()
+ # TODO - table lock?
+ # check for existing
+ q = """SELECT id FROM package WHERE name=%(name)s"""
+ c.execute(q,locals())
+ row = c.fetchone()
+ if row:
+ (pkg_id,) = row
+ if strict:
+ raise koji.GenericError, "Package already exists [id %d]" % pkg_id
+ else:
+ q = """SELECT nextval('package_id_seq')"""
+ c.execute(q)
+ (pkg_id,) = c.fetchone()
+ q = """INSERT INTO package (id,name) VALUES (%(pkg_id)s,%(name)s)"""
+ context.commit_pending = True
+ c.execute(q,locals())
+ return pkg_id
+
+
+def add_volume(name, strict=True):
+ """Add a new storage volume in the database"""
+ context.session.assertPerm('admin')
+ voldir = koji.pathinfo.volumedir(name)
+ if not os.path.isdir(voldir):
+ raise koji.GenericError, 'please create the volume directory first'
+ if strict:
+ volinfo = lookup_name('volume', name, strict=False)
+ if volinfo:
+ raise koji.GenericError, 'volume %s already exists' % name
+ volinfo = lookup_name('volume', name, strict=False, create=True)
+ return volinfo
+
+def remove_volume(volume):
+ """Remove unused storage volume from the database"""
+ context.session.assertPerm('admin')
+ volinfo = lookup_name('volume', volume, strict=True)
+ query = QueryProcessor(tables=['build'], clauses=['volume_id=%(id)i'],
+ values=volinfo, columns=['id'], opts={'limit':1})
+ if query.execute():
+ raise koji.GenericError, 'volume %(name)s has build references' % volinfo
+ delete = """DELETE FROM volume WHERE id=%(id)i"""
+ _dml(delete, volinfo)
+
+def list_volumes():
+ """List storage volumes"""
+ return QueryProcessor(tables=['volume'], columns=['id', 'name']).execute()
+
+def change_build_volume(build, volume, strict=True):
+ """Move a build to a different storage volume"""
+ context.session.assertPerm('admin')
+ volinfo = lookup_name('volume', volume, strict=True)
+ binfo = get_build(build, strict=True)
+ if binfo['volume_id'] == volinfo['id']:
+ if strict:
+ raise koji.GenericError, "Build %(nvr)s already on volume %(volume_name)s" % binfo
+ else:
+ #nothing to do
+ return
+ state = koji.BUILD_STATES[binfo['state']]
+ if state not in ['COMPLETE', 'DELETED']:
+ raise koji.GenericError, "Build %s is %s" % (binfo['nvr'], state)
+ voldir = koji.pathinfo.volumedir(volinfo['name'])
+ if not os.path.isdir(voldir):
+ raise koji.GenericError, "Directory entry missing for volume %(name)s" % volinfo
+
+ #more sanity checks
+ for check_vol in list_volumes():
+ check_binfo = binfo.copy()
+ check_binfo['volume_id'] = check_vol['id']
+ check_binfo['volume_name'] = check_vol['name']
+ checkdir = koji.pathinfo.build(check_binfo)
+ if check_vol['id'] == binfo['volume_id']:
+ # the volume we are moving from
+ pass
+ elif check_vol['name'] == 'DEFAULT' and os.path.islink(checkdir):
+ # old convenience symlink
+ pass
+ elif check_vol['id'] == volinfo['id']:
+ # the volume we are moving to
+ if os.path.lexists(checkdir):
+ raise koji.GenericError, "Destination directory exists: %s" % checkdir
+ elif os.path.lexists(checkdir):
+ raise koji.GenericError, "Unexpected cross-volume content: %s" % checkdir
+
+ # First copy the build dir(s)
+ dir_moves = []
+ old_binfo = binfo.copy()
+ binfo['volume_id'] = volinfo['id']
+ binfo['volume_name'] = volinfo['name']
+ olddir = koji.pathinfo.build(old_binfo)
+ if os.path.exists(olddir):
+ newdir = koji.pathinfo.build(binfo)
+ dir_moves.append([olddir, newdir])
+ for olddir, newdir in dir_moves:
+ # Remove old symlink if copying to base volume
+ if volinfo['name'] == 'DEFAULT' or volinfo['name'] is None:
+ if os.path.islink(newdir):
+ os.unlink(newdir)
+ koji.ensuredir(os.path.dirname(newdir))
+ shutil.copytree(olddir, newdir, symlinks=True)
+
+ # Second, update the db
+ koji.plugin.run_callbacks('preBuildStateChange', attribute='volume_id', old=old_binfo['volume_id'], new=volinfo['id'], info=binfo)
+ update = UpdateProcessor('build', clauses=['id=%(id)i'], values=binfo)
+ update.set(volume_id=volinfo['id'])
+ update.execute()
+ for tag in list_tags(build=binfo['id']):
+ set_tag_update(tag['id'], 'VOLUME_CHANGE')
+
+ # Third, delete the old content
+ for olddir, newdir in dir_moves:
+ koji.util.rmtree(olddir)
+
+ #Fourth, maintain a symlink if appropriate
+ if volinfo['name'] and volinfo['name'] != 'DEFAULT':
+ base_vol = lookup_name('volume', 'DEFAULT', strict=True)
+ base_binfo = binfo.copy()
+ base_binfo['volume_id'] = base_vol['id']
+ base_binfo['volume_name'] = base_vol['name']
+ basedir = koji.pathinfo.build(base_binfo)
+ if os.path.islink(basedir):
+ os.unlink(basedir)
+ relpath = koji.util.relpath(newdir, os.path.dirname(basedir))
+ os.symlink(relpath, basedir)
+
+ koji.plugin.run_callbacks('postBuildStateChange', attribute='volume_id', old=old_binfo['volume_id'], new=volinfo['id'], info=binfo)
+
+
+def new_build(data):
+ """insert a new build entry"""
+ data = data.copy()
+ if 'pkg_id' in data:
+ data['name'] = lookup_package(data['pkg_id'], strict=True)['name']
+ else:
+ #see if there's a package name
+ name = data.get('name')
+ if not name:
+ raise koji.GenericError, "No name or package id provided for build"
+ data['pkg_id'] = new_package(name,strict=False)
+ for f in ('version','release','epoch'):
+ if not data.has_key(f):
+ raise koji.GenericError, "No %s value for build" % f
+ #provide a few default values
+ data.setdefault('state',koji.BUILD_STATES['COMPLETE'])
+ data.setdefault('completion_time', 'NOW')
+ data.setdefault('owner',context.session.user_id)
+ data.setdefault('task_id',None)
+ data.setdefault('volume_id', 0)
+ #check for existing build
+ # TODO - table lock?
+ q="""SELECT id,state,task_id FROM build
+ WHERE pkg_id=%(pkg_id)d AND version=%(version)s AND release=%(release)s
+ FOR UPDATE"""
+ row = _fetchSingle(q, data)
+ if row:
+ id, state, task_id = row
+ data['id'] = id
+ koji.plugin.run_callbacks('preBuildStateChange', attribute='state', old=state, new=data['state'], info=data)
+ st_desc = koji.BUILD_STATES[state]
+ if st_desc == 'BUILDING':
+ # check to see if this is the controlling task
+ if data['state'] == state and data.get('task_id','') == task_id:
+ #the controlling task must have restarted (and called initBuild again)
+ return id
+ raise koji.GenericError, "Build already in progress (task %d)" % task_id
+ # TODO? - reclaim 'stale' builds (state=BUILDING and task_id inactive)
+ if st_desc in ('FAILED','CANCELED'):
+ #should be ok to replace
+ update = """UPDATE build SET state=%(state)i,task_id=%(task_id)s,
+ owner=%(owner)s,completion_time=%(completion_time)s,create_event=get_event()
+ WHERE id = %(id)i"""
+ _dml(update, data)
+ koji.plugin.run_callbacks('postBuildStateChange', attribute='state', old=state, new=data['state'], info=data)
+ return id
+ raise koji.GenericError, "Build already exists (id=%d, state=%s): %r" \
+ % (id, st_desc, data)
+ else:
+ koji.plugin.run_callbacks('preBuildStateChange', attribute='state', old=None, new=data['state'], info=data)
+ #insert the new data
+ insert_data = dslice(data, ['pkg_id', 'version', 'release', 'epoch', 'state', 'volume_id',
+ 'task_id', 'owner', 'completion_time'])
+ data['id'] = insert_data['id'] = _singleValue("SELECT nextval('build_id_seq')")
+ insert = InsertProcessor('build', data=insert_data)
+ insert.execute()
+ koji.plugin.run_callbacks('postBuildStateChange', attribute='state', old=None, new=data['state'], info=data)
+ #return build_id
+ return data['id']
+
+def check_noarch_rpms(basepath, rpms):
+ """
+ If rpms contains any noarch rpms with identical names,
+ run rpmdiff against the duplicate rpms.
+ Return the list of rpms with any duplicate entries removed (only
+ the first entry will be retained).
+ """
+ result = []
+ noarch_rpms = {}
+ for relpath in rpms:
+ if relpath.endswith('.noarch.rpm'):
+ filename = os.path.basename(relpath)
+ if noarch_rpms.has_key(filename):
+ # duplicate found, add it to the duplicate list
+ # but not the result list
+ noarch_rpms[filename].append(relpath)
+ else:
+ noarch_rpms[filename] = [relpath]
+ result.append(relpath)
+ else:
+ result.append(relpath)
+
+ for noarch_list in noarch_rpms.values():
+ rpmdiff(basepath, noarch_list)
+
+ return result
+
+def import_build(srpm, rpms, brmap=None, task_id=None, build_id=None, logs=None):
+ """Import a build into the database (single transaction)
+
+ Files must be uploaded and specified with path relative to the workdir
+ Args:
+ srpm - relative path of srpm
+ rpms - list of rpms (relative paths)
+ brmap - dictionary mapping [s]rpms to buildroot ids
+ task_id - associate the build with a task
+ build_id - build is a finalization of existing entry
+ """
+ if brmap is None:
+ brmap = {}
+ koji.plugin.run_callbacks('preImport', type='build', srpm=srpm, rpms=rpms, brmap=brmap,
+ task_id=task_id, build_id=build_id, build=None, logs=logs)
+ uploadpath = koji.pathinfo.work()
+ #verify files exist
+ for relpath in [srpm] + rpms:
+ fn = "%s/%s" % (uploadpath,relpath)
+ if not os.path.exists(fn):
+ raise koji.GenericError, "no such file: %s" % fn
+
+ rpms = check_noarch_rpms(uploadpath, rpms)
+
+ #verify buildroot ids from brmap
+ found = {}
+ for br_id in brmap.values():
+ if found.has_key(br_id):
+ continue
+ found[br_id] = 1
+ #this will raise an exception if the buildroot id is invalid
+ BuildRoot(br_id)
+
+ #read srpm info
+ fn = "%s/%s" % (uploadpath,srpm)
+ build = koji.get_header_fields(fn,('name','version','release','epoch',
+ 'sourcepackage'))
+ if build['sourcepackage'] != 1:
+ raise koji.GenericError, "not a source package: %s" % fn
+ build['task_id'] = task_id
+ if build_id is None:
+ build_id = new_build(build)
+ binfo = get_build(build_id, strict=True)
+ else:
+ #build_id was passed in - sanity check
+ binfo = get_build(build_id, strict=True)
+ st_complete = koji.BUILD_STATES['COMPLETE']
+ koji.plugin.run_callbacks('preBuildStateChange', attribute='state', old=binfo['state'], new=st_complete, info=binfo)
+ for key in ('name','version','release','epoch','task_id'):
+ if build[key] != binfo[key]:
+ raise koji.GenericError, "Unable to complete build: %s mismatch (build: %s, rpm: %s)" % (key, binfo[key], build[key])
+ if binfo['state'] != koji.BUILD_STATES['BUILDING']:
+ raise koji.GenericError, "Unable to complete build: state is %s" \
+ % koji.BUILD_STATES[binfo['state']]
+ #update build state
+ update = """UPDATE build SET state=%(st_complete)i,completion_time=NOW()
+ WHERE id=%(build_id)i"""
+ _dml(update,locals())
+ koji.plugin.run_callbacks('postBuildStateChange', attribute='state', old=binfo['state'], new=st_complete, info=binfo)
+ # now to handle the individual rpms
+ for relpath in [srpm] + rpms:
+ fn = "%s/%s" % (uploadpath,relpath)
+ rpminfo = import_rpm(fn, binfo, brmap.get(relpath))
+ import_rpm_file(fn, binfo, rpminfo)
+ add_rpm_sig(rpminfo['id'], koji.rip_rpm_sighdr(fn))
+ if logs:
+ for key, files in logs.iteritems():
+ if not key:
+ key = None
+ for relpath in files:
+ fn = "%s/%s" % (uploadpath,relpath)
+ import_build_log(fn, binfo, subdir=key)
+ koji.plugin.run_callbacks('postImport', type='build', srpm=srpm, rpms=rpms, brmap=brmap,
+ task_id=task_id, build_id=build_id, build=binfo, logs=logs)
+ return binfo
+
+def import_rpm(fn,buildinfo=None,brootid=None,wrapper=False):
+ """Import a single rpm into the database
+
+ Designed to be called from import_build.
+ """
+ if not os.path.exists(fn):
+ raise koji.GenericError, "no such file: %s" % fn
+
+ #read rpm info
+ hdr = koji.get_rpm_header(fn)
+ rpminfo = koji.get_header_fields(hdr,['name','version','release','epoch',
+ 'sourcepackage','arch','buildtime','sourcerpm'])
+ if rpminfo['sourcepackage'] == 1:
+ rpminfo['arch'] = "src"
+
+ #sanity check basename
+ basename = os.path.basename(fn)
+ expected = "%(name)s-%(version)s-%(release)s.%(arch)s.rpm" % rpminfo
+ if basename != expected:
+ raise koji.GenericError, "bad filename: %s (expected %s)" % (basename,expected)
+
+ if buildinfo is None:
+ #figure it out for ourselves
+ if rpminfo['sourcepackage'] == 1:
+ buildinfo = get_build(rpminfo, strict=False)
+ if not buildinfo:
+ # create a new build
+ build_id = new_build(rpminfo)
+ buildinfo = get_build(build_id, strict=True)
+ else:
+ #figure it out from sourcerpm string
+ buildinfo = get_build(koji.parse_NVRA(rpminfo['sourcerpm']))
+ if buildinfo is None:
+ #XXX - handle case where package is not a source rpm
+ # and we still need to create a new build
+ raise koji.GenericError, 'No matching build'
+ state = koji.BUILD_STATES[buildinfo['state']]
+ if state in ('FAILED', 'CANCELED', 'DELETED'):
+ nvr = "%(name)s-%(version)s-%(release)s" % buildinfo
+ raise koji.GenericError, "Build is %s: %s" % (state, nvr)
+ elif not wrapper:
+ # only enforce the srpm name matching the build for non-wrapper rpms
+ srpmname = "%(name)s-%(version)s-%(release)s.src.rpm" % buildinfo
+ #either the sourcerpm field should match the build, or the filename
+ #itself (for the srpm)
+ if rpminfo['sourcepackage'] != 1:
+ if rpminfo['sourcerpm'] != srpmname:
+ raise koji.GenericError, "srpm mismatch for %s: %s (expected %s)" \
+ % (fn,rpminfo['sourcerpm'],srpmname)
+ elif basename != srpmname:
+ raise koji.GenericError, "srpm mismatch for %s: %s (expected %s)" \
+ % (fn,basename,srpmname)
+
+ #add rpminfo entry
+ rpminfo['id'] = _singleValue("""SELECT nextval('rpminfo_id_seq')""")
+ rpminfo['build_id'] = buildinfo['id']
+ rpminfo['size'] = os.path.getsize(fn)
+ rpminfo['payloadhash'] = koji.hex_string(hdr[rpm.RPMTAG_SIGMD5])
+ rpminfo['buildroot_id'] = brootid
+ rpminfo['external_repo_id'] = 0
+
+ koji.plugin.run_callbacks('preImport', type='rpm', rpm=rpminfo, build=buildinfo,
+ filepath=fn)
+
+ data = rpminfo.copy()
+ del data['sourcepackage']
+ del data['sourcerpm']
+ insert = InsertProcessor('rpminfo', data=data)
+ insert.execute()
+
+ koji.plugin.run_callbacks('postImport', type='rpm', rpm=rpminfo, build=buildinfo,
+ filepath=fn)
+
+ #extra fields for return
+ rpminfo['build'] = buildinfo
+ rpminfo['brootid'] = brootid
+ return rpminfo
+
+def add_external_rpm(rpminfo, external_repo, strict=True):
+ """Add an external rpm entry to the rpminfo table
+
+ Differences from import_rpm:
+ - entry will have non-zero external_repo_id
+ - entry will not reference a build
+ - rpm not available to us -- the necessary data is passed in
+
+ The rpminfo arg should contain the following fields:
+ - name, version, release, epoch, arch, payloadhash, size, buildtime
+
+ Returns info as get_rpm
+ """
+
+ # [!] Calling function should perform access checks
+
+ #sanity check rpminfo
+ dtypes = (
+ ('name', basestring),
+ ('version', basestring),
+ ('release', basestring),
+ ('epoch', (int, types.NoneType)),
+ ('arch', basestring),
+ ('payloadhash', str),
+ ('size', int),
+ ('buildtime', (int, long)))
+ for field, allowed in dtypes:
+ if not rpminfo.has_key(field):
+ raise koji.GenericError, "%s field missing: %r" % (field, rpminfo)
+ if not isinstance(rpminfo[field], allowed):
+ #this will catch unwanted NULLs
+ raise koji.GenericError, "Invalid value for %s: %r" % (field, rpminfo[field])
+ #TODO: more sanity checks for payloadhash
+
+ #Check to see if we have it
+ data = rpminfo.copy()
+ data['location'] = external_repo
+ previous = get_rpm(data, strict=False)
+ if previous:
+ disp = "%(name)s-%(version)s-%(release)s.%(arch)s@%(external_repo_name)s" % previous
+ if strict:
+ raise koji.GenericError, "external rpm already exists: %s" % disp
+ elif data['payloadhash'] != previous['payloadhash']:
+ raise koji.GenericError, "hash changed for external rpm: %s (%s -> %s)" \
+ % (disp, previous['payloadhash'], data['payloadhash'])
+ else:
+ return previous
+
+ #add rpminfo entry
+ rpminfo['external_repo_id'] = get_external_repo_id(external_repo, strict=True)
+ rpminfo['id'] = _singleValue("""SELECT nextval('rpminfo_id_seq')""")
+ q = """INSERT INTO rpminfo (id, build_id, buildroot_id,
+ name, version, release, epoch, arch,
+ external_repo_id,
+ payloadhash, size, buildtime)
+ VALUES (%(id)i, NULL, NULL,
+ %(name)s, %(version)s, %(release)s, %(epoch)s, %(arch)s,
+ %(external_repo_id)i,
+ %(payloadhash)s, %(size)i, %(buildtime)i)
+ """
+ _dml(q, rpminfo)
+
+ return get_rpm(rpminfo['id'])
+
+def import_build_log(fn, buildinfo, subdir=None):
+ """Move a logfile related to a build to the right place"""
+ logdir = koji.pathinfo.build_logs(buildinfo)
+ if subdir:
+ logdir = "%s/%s" % (logdir, subdir)
+ koji.ensuredir(logdir)
+ final_path = "%s/%s" % (logdir, os.path.basename(fn))
+ if os.path.exists(final_path):
+ raise koji.GenericError("Error importing build log. %s already exists." % final_path)
+ if os.path.islink(fn) or not os.path.isfile(fn):
+ raise koji.GenericError("Error importing build log. %s is not a regular file." % fn)
+ os.rename(fn,final_path)
+ os.symlink(final_path,fn)
+
+def import_rpm_file(fn,buildinfo,rpminfo):
+ """Move the rpm file into the proper place
+
+ Generally this is done after the db import
+ """
+ final_path = "%s/%s" % (koji.pathinfo.build(buildinfo),koji.pathinfo.rpm(rpminfo))
+ _import_archive_file(fn, os.path.dirname(final_path))
+
+def import_build_in_place(build):
+ """Import a package already in the packages directory
+
+ This is used for bootstrapping the database
+ Parameters:
+ build: a dictionary with fields: name, version, release
+ """
+ # Only an admin may do this
+ context.session.assertPerm('admin')
+ prev = get_build(build)
+ if prev is not None:
+ state = koji.BUILD_STATES[prev['state']]
+ if state == 'COMPLETE':
+ log_error("Skipping build %r, already in db" % build)
+ # TODO - check contents against db
+ return prev['id']
+ elif state not in ('FAILED', 'CANCELED'):
+ raise koji.GenericError, "build already exists (%s): %r" % (state, build)
+ #otherwise try to reimport
+ bdir = koji.pathinfo.build(build)
+ srpm = None
+ rpms = []
+ srpmname = "%(name)s-%(version)s-%(release)s.src.rpm" % build
+ # look for srpm first
+ srcdir = bdir + "/src"
+ if os.path.isdir(srcdir):
+ for basename in os.listdir(srcdir):
+ if basename != srpmname:
+ raise koji.GenericError, "unexpected file: %s" % basename
+ srpm = "%s/%s" % (srcdir,basename)
+ for arch in os.listdir(bdir):
+ if arch == 'src':
+ #already done that
+ continue
+ if arch == "data":
+ continue
+ adir = "%s/%s" % (bdir,arch)
+ if not os.path.isdir(adir):
+ raise koji.GenericError, "out of place file: %s" % adir
+ for basename in os.listdir(adir):
+ fn = "%s/%s" % (adir,basename)
+ if not os.path.isfile(fn):
+ raise koji.GenericError, "unexpected non-regular file: %s" % fn
+ if fn[-4:] != '.rpm':
+ raise koji.GenericError, "out of place file: %s" % adir
+ #check sourcerpm field
+ hdr = koji.get_rpm_header(fn)
+ sourcerpm = hdr[rpm.RPMTAG_SOURCERPM]
+ if sourcerpm != srpmname:
+ raise koji.GenericError, "srpm mismatch for %s: %s (expected %s)" \
+ % (fn,sourcerpm,srpmname)
+ rpms.append(fn)
+ koji.plugin.run_callbacks('preImport', type='build', in_place=True, srpm=srpm, rpms=rpms)
+ # actually import
+ buildinfo = None
+ if srpm is not None:
+ rpminfo = import_rpm(srpm)
+ add_rpm_sig(rpminfo['id'], koji.rip_rpm_sighdr(srpm))
+ buildinfo = rpminfo['build']
+ # file already in place
+ for fn in rpms:
+ rpminfo = import_rpm(fn,buildinfo)
+ add_rpm_sig(rpminfo['id'], koji.rip_rpm_sighdr(fn))
+ #update build state
+ build_id = buildinfo['id']
+ st_complete = koji.BUILD_STATES['COMPLETE']
+ koji.plugin.run_callbacks('preBuildStateChange', attribute='state', old=buildinfo['state'], new=st_complete, info=buildinfo)
+ update = """UPDATE build SET state=%(st_complete)i,completion_time=NOW()
+ WHERE id=%(build_id)i"""
+ _dml(update,locals())
+ koji.plugin.run_callbacks('postBuildStateChange', attribute='state', old=buildinfo['state'], new=st_complete, info=buildinfo)
+ koji.plugin.run_callbacks('postImport', type='build', in_place=True, srpm=srpm, rpms=rpms)
+ return build_id
+
+def _import_wrapper(task_id, build_info, rpm_results):
+ """Helper function to import wrapper rpms for a Maven build"""
+ rpm_buildroot_id = rpm_results['buildroot_id']
+ rpm_task_dir = koji.pathinfo.task(task_id)
+
+ for rpm_path in [rpm_results['srpm']] + rpm_results['rpms']:
+ rpm_path = os.path.join(rpm_task_dir, rpm_path)
+ rpm_info = import_rpm(rpm_path, build_info, rpm_buildroot_id, wrapper=True)
+ import_rpm_file(rpm_path, build_info, rpm_info)
+ add_rpm_sig(rpm_info['id'], koji.rip_rpm_sighdr(rpm_path))
+
+ for log in rpm_results['logs']:
+ # assume we're only importing noarch packages
+ import_build_log(os.path.join(rpm_task_dir, log),
+ build_info, subdir='noarch')
+
+def merge_scratch(task_id):
+ """Import rpms from a scratch build into an existing build, retaining
+ buildroot metadata and build logs."""
+ task = Task(task_id)
+ try:
+ task_info = task.getInfo(request=True)
+ except koji.GenericError:
+ raise koji.ImportError, 'invalid task: %s' % task_id
+ if task_info['state'] != koji.TASK_STATES['CLOSED']:
+ raise koji.ImportError, 'task %s did not complete successfully' % task_id
+ if task_info['method'] != 'build':
+ raise koji.ImportError, 'task %s is not a build task' % task_id
+ if len(task_info['request']) < 3 or not task_info['request'][2].get('scratch'):
+ raise koji.ImportError, 'task %s is not a scratch build' % task_id
+
+ # sanity check the task, and extract data required for import
+ srpm = None
+ tasks = {}
+ for child in task.getChildren():
+ if child['method'] != 'buildArch':
+ continue
+ info = {'rpms': [],
+ 'logs': []}
+ for output in list_task_output(child['id']):
+ if output.endswith('.src.rpm'):
+ srpm_name = os.path.basename(output)
+ if not srpm:
+ srpm = srpm_name
+ else:
+ if srpm != srpm_name:
+ raise koji.ImportError, 'task srpm names do not match: %s, %s' % \
+ (srpm, srpm_name)
+ elif output.endswith('.noarch.rpm'):
+ continue
+ elif output.endswith('.rpm'):
+ rpminfo = koji.parse_NVRA(os.path.basename(output))
+ if 'arch' not in info:
+ info['arch'] = rpminfo['arch']
+ elif info['arch'] != rpminfo['arch']:
+ raise koji.ImportError, 'multiple arches generated by task %s: %s, %s' % \
+ (child['id'], info['arch'], rpminfo['arch'])
+ info['rpms'].append(output)
+ elif output.endswith('.log'):
+ info['logs'].append(output)
+ if not info['rpms']:
+ continue
+ if not info['logs']:
+ raise koji.ImportError, 'task %s is missing logs' % child['id']
+ buildroots = query_buildroots(taskID=child['id'],
+ queryOpts={'order': '-id', 'limit': 1})
+ if not buildroots:
+ raise koji.ImportError, 'no buildroot associated with task %s' % child['id']
+ info['buildroot_id'] = buildroots[0]['id']
+ tasks[child['id']] = info
+ if not tasks:
+ raise koji.ImportError, 'nothing to do for task %s' % task_id
+
+ # sanity check the build
+ build_nvr = koji.parse_NVRA(srpm)
+ build = get_build(build_nvr)
+ if not build:
+ raise koji.ImportError, 'no such build: %(name)s-%(version)s-%(release)s' % \
+ build_nvr
+ if build['state'] != koji.BUILD_STATES['COMPLETE']:
+ raise koji.ImportError, '%s did not complete successfully' % build['nvr']
+ if not build['task_id']:
+ raise koji.ImportError, 'no task for %s' % build['nvr']
+ build_task_info = Task(build['task_id']).getInfo(request=True)
+ # Intentionally skip checking the build task state.
+ # There are cases where the build can be valid even though the task has failed,
+ # e.g. tagging failures.
+
+ # compare the task and build and make sure they are compatible with importing
+ if task_info['request'][0] != build_task_info['request'][0]:
+ raise koji.ImportError, 'SCM URLs for the task and build do not match: %s, %s' % \
+ (task_info['request'][0], build_task_info['request'][0])
+ build_arches = set()
+ for rpm in list_rpms(buildID=build['id']):
+ if rpm['arch'] == 'src':
+ build_srpm = '%s.src.rpm' % rpm['nvr']
+ if srpm != build_srpm:
+ raise koji.ImportError, 'task and build srpm names do not match: %s, %s' % \
+ (srpm, build_srpm)
+ elif rpm['arch'] == 'noarch':
+ continue
+ else:
+ build_arches.add(rpm['arch'])
+ if not build_arches:
+ raise koji.ImportError, 'no arch-specific rpms found for %s' % build['nvr']
+ task_arches = set([t['arch'] for t in tasks.values()])
+ overlapping_arches = task_arches.intersection(build_arches)
+ if overlapping_arches:
+ raise koji.ImportError, 'task %s and %s produce rpms with the same arches: %s' % \
+ (task_info['id'], build['nvr'], ', '.join(overlapping_arches))
+
+ # everything looks good, do the import
+ for task_id, info in tasks.items():
+ taskpath = koji.pathinfo.task(task_id)
+ for filename in info['rpms']:
+ filepath = os.path.realpath(os.path.join(taskpath, filename))
+ rpminfo = import_rpm(filepath, build, info['buildroot_id'])
+ import_rpm_file(filepath, build, rpminfo)
+ add_rpm_sig(rpminfo['id'], koji.rip_rpm_sighdr(filepath))
+ for logname in info['logs']:
+ logpath = os.path.realpath(os.path.join(taskpath, logname))
+ import_build_log(logpath, build, subdir=info['arch'])
+
+ # flag tags whose content has changed, so relevant repos can be regen'ed
+ for tag in list_tags(build=build['id']):
+ set_tag_update(tag['id'], 'IMPORT')
+
+ return build['id']
+
+def get_archive_types():
+ """Return a list of all supported archivetypes"""
+ select = """SELECT id, name, description, extensions FROM archivetypes
+ ORDER BY id"""
+ return _multiRow(select, {}, ('id', 'name', 'description', 'extensions'))
+
+def _get_archive_type_by_name(name, strict=True):
+ select = """SELECT id, name, description, extensions FROM archivetypes
+ WHERE name = %(name)s"""
+ return _singleRow(select, locals(), ('id', 'name', 'description', 'extensions'), strict)
+
+def _get_archive_type_by_id(type_id, strict=False):
+ select = """SELECT id, name, description, extensions FROM archivetypes
+ WHERE id = %(type_id)i"""
+ return _singleRow(select, locals(), ('id', 'name', 'description', 'extensions'), strict)
+
+def get_archive_type(filename=None, type_name=None, type_id=None, strict=False):
+ """
+ Get the archive type for the given filename, type_name, or type_id.
+ """
+ if type_id:
+ return _get_archive_type_by_id(type_id, strict)
+ elif type_name:
+ return _get_archive_type_by_name(type_name, strict)
+ elif filename:
+ # we handle that below
+ pass
+ else:
+ raise koji.GenericError, 'one of filename, type_name, or type_id must be specified'
+
+ parts = filename.split('.')
+
+ for start in range(len(parts)-1, -1, -1):
+ ext = '.'.join(parts[start:])
+
+ select = r"""SELECT id, name, description, extensions FROM archivetypes
+ WHERE extensions ~ E'\\m%s\\M'""" % ext
+ results = _multiRow(select, locals(), ('id', 'name', 'description', 'extensions'))
+
+ if len(results) == 1:
+ return results[0]
+ elif len(results) > 1:
+ # this should never happen, and is a misconfiguration in the database
+ raise koji.GenericError, 'multiple matches for file extension: %s' % ext
+ #otherwise
+ if strict:
+ raise koji.GenericError, 'unsupported file extension: %s' % ext
+ else:
+ return None
+
+def new_maven_build(build, maven_info):
+ """
+ Add Maven metadata to an existing build.
+ maven_info must contain the 'group_id',
+ 'artifact_id', and 'version' keys.
+ """
+ maven_info = maven_info.copy()
+
+ current_maven_info = get_maven_build(build)
+ if current_maven_info:
+ # already exists, verify that it matches
+ for field in ('group_id', 'artifact_id', 'version'):
+ if current_maven_info[field] != maven_info[field]:
+ raise koji.BuildError, '%s mismatch (current: %s, new: %s)' % \
+ (field, current_maven_info[field], maven_info[field])
+ else:
+ maven_info['build_id'] = build['id']
+ insert = """INSERT INTO maven_builds (build_id, group_id, artifact_id, version)
+ VALUES (%(build_id)i, %(group_id)s, %(artifact_id)s, %(version)s)"""
+ _dml(insert, maven_info)
+
+def new_win_build(build_info, win_info):
+ """
+ Add Windows metadata to an existing build.
+ win_info must contain a 'platform' key.
+ """
+ build_id = build_info['id']
+ current = get_win_build(build_id, strict=False)
+ if current:
+ if current['platform'] != win_info['platform']:
+ update = UpdateProcessor('win_builds', clauses=['build_id=%(build_id)i'],
+ values={'build_id': build_id})
+ update.set(platform=win_info['platform'])
+ update.execute()
+ else:
+ insert = InsertProcessor('win_builds')
+ insert.set(build_id=build_id)
+ insert.set(platform=win_info['platform'])
+ insert.execute()
+
+def new_image_build(build_info):
+ """
+ Added Image metadata to an existing build. This is just the buildid so that
+ we can distinguish image builds from other types.
+ """
+ # We don't have to worry about updating an image build because the id is
+ # the only thing we care about, and that should never change if a build
+ # fails first and succeeds later on a resubmission.
+ query = QueryProcessor(tables=('image_builds',), columns=('build_id',),
+ clauses=('build_id = %(build_id)i',),
+ values={'build_id': build_info['id']})
+ result = query.executeOne()
+ if not result:
+ insert = InsertProcessor('image_builds')
+ insert.set(build_id=build_info['id'])
+ insert.execute()
+
+def old_image_data(old_image_id):
+ """Return old image data for given id"""
+
+ values = dict(img_id = old_image_id)
+ tables = ['imageinfo']
+ fields = ['id', 'task_id', 'filename', 'filesize', 'arch', 'hash', 'mediatype']
+ clauses = ['imageinfo.id = %(img_id)i']
+
+ query = QueryProcessor(columns=fields, tables=tables, clauses=clauses,
+ values=values)
+ ret = query.executeOne()
+
+ if not ret:
+ raise koji.GenericError, 'no old image with ID: %i' % old_image_id
+ return ret
+
+def check_old_image_files(old):
+ """Check for existence of files for old image data"""
+
+ parts = [koji.pathinfo.topdir, 'images']
+ if old['mediatype'] == 'LiveCD ISO':
+ parts.append('livecd')
+ else:
+ parts.append('appliance')
+ parts.extend([str(old['id'] % 10000), str(old['id'])])
+ img_dir = os.path.join(*parts)
+ img_path = os.path.join(img_dir, old['filename'])
+ if not os.path.exists(img_path):
+ raise koji.GenericError, "Image file is missing: %s" % img_path
+ if os.path.islink(img_path):
+ raise koji.GenericError, "Image file is a symlink: %s" % img_path
+ if not os.path.isfile(img_path):
+ raise koji.GenericError, "Not a regular file: %s" % img_path
+ img_size = os.path.getsize(img_path)
+ if img_size != old['filesize']:
+ raise koji.GenericError, "Size mismatch for %s (%i != %i)" % \
+ (img_path, img_size, old['filesize'])
+ # old images always used sha256 hashes
+ sha256sum = hashlib.sha256()
+ image_fo = file(img_path, 'r')
+ while True:
+ data = image_fo.read(1048576)
+ sha256sum.update(data)
+ if not len(data):
+ break
+ img_hash = sha256sum.hexdigest()
+ if img_hash != old['hash']:
+ raise koji.GenericError, "Hash mismatch for %s (%i != %i)" % \
+ (img_path, img_hash, old['hash'])
+ # file looks ok
+ old['path'] = img_path
+ old['dir'] = img_dir
+
+ # check for extra files, noting accompanying xml file
+ expected = [old['filename'], 'data']
+ extra = []
+ for out_file in os.listdir(img_dir):
+ if out_file in expected:
+ pass
+ elif out_file.endswith('.xml') and old['mediatype'] != 'LiveCD ISO':
+ if 'xmlfile' in old:
+ extra.append(out_file)
+ else:
+ old['xmlfile'] = out_file
+ else:
+ extra.append(out_file)
+ if extra:
+ raise koji.GenericError, "Unexpected files under %s: %r" % (img_dir, extra)
+
+
+def import_old_image(old, name, version):
+ """Import old image data into the new data model"""
+
+ # note: since this is a one-time migration tool, we are not triggering callbacks
+ # ^ XXX: except that some functions we call do
+
+ # build entry
+ task = Task(old['task_id'])
+ binfo = dict(name=name, version=version)
+ binfo['release'] = get_next_release(binfo)
+ binfo['epoch'] = 0
+ binfo['task_id'] = old['task_id']
+ binfo['owner'] = task.getOwner()
+ binfo['state'] = koji.BUILD_STATES['COMPLETE']
+ build_id = new_build(binfo)
+ binfo['id'] = build_id
+ new_image_build(binfo)
+
+ # figure out buildroot id
+ # the old schema did not track buildroot directly, so we have to infer
+ # by task id.
+ # If the task had multiple buildroots, we chose the latest
+ query = QueryProcessor(columns=['id'], tables=['buildroot'],
+ clauses=['task_id=%(task_id)i'], values=old,
+ opts={'order': '-id', 'limit': 1})
+ br_id = query.singleValue(strict=False)
+
+ # archives
+ archives = []
+ for fn in [old['filename'], old.get('xmlfile')]:
+ if not fn:
+ continue
+ fullpath = os.path.join(old['dir'], fn)
+ archivetype = get_archive_type(filename=fn)
+ logger.debug('image type we are migrating is: %s' % archivetype)
+ if not archivetype:
+ raise koji.BuildError, 'Unsupported image type'
+ imgdata = dict(arch=old['arch'])
+ archives.append(import_archive(fullpath, binfo, 'image', imgdata, buildroot_id=br_id))
+
+ # deal with contents listing
+ archive_id = archives[0]['id']
+ logger.debug('root archive id is %s' % archive_id)
+ query = QueryProcessor(columns=['rpm_id'], tables=['imageinfo_listing'],
+ clauses=['image_id=%(id)i'], values=old,
+ opts={'asList': True})
+ rpm_ids = [r[0] for r in query.execute()]
+ insert = InsertProcessor('image_listing')
+ insert.set(image_id=archive_id)
+ for rpm_id in rpm_ids:
+ insert.set(rpm_id=rpm_id)
+ insert.execute()
+ logger.info('updated image_listing')
+
+ # grab old logs
+ old_log_dir = os.path.join(old['dir'], 'data', 'logs', old['arch'])
+ logdir = os.path.join(koji.pathinfo.build(binfo), 'data/logs/image')
+ for logfile in os.listdir(old_log_dir):
+ logsrc = os.path.join(old_log_dir, logfile)
+ koji.ensuredir(logdir)
+ final_path = os.path.join(logdir, logfile)
+ if os.path.exists(final_path):
+ raise koji.GenericError("Error importing build log. %s already exists." % final_path)
+ if os.path.islink(logsrc) or not os.path.isfile(logsrc):
+ raise koji.GenericError("Error importing build log. %s is not a regular file." % logsrc)
+ os.rename(logsrc, final_path)
+ os.symlink(final_path, logsrc)
+
+ return binfo
+
+def import_archive(filepath, buildinfo, type, typeInfo, buildroot_id=None):
+ """
+ Import an archive file and associate it with a build. The archive can
+ be any non-rpm filetype supported by Koji.
+
+ filepath: full path to the archive file
+ buildinfo: dict of information about the build to associate the archive with (as returned by getBuild())
+ type: type of the archive being imported. Currently supported archive types: maven, win, image
+ typeInfo: dict of type-specific information
+ buildroot_id: the id of the buildroot the archive was built in (may be null)
+ """
+ if not os.path.exists(filepath):
+ raise koji.GenericError, 'no such file: %s' % filepath
+
+ archiveinfo = {'buildroot_id': buildroot_id}
+ filename = koji.fixEncoding(os.path.basename(filepath))
+ archiveinfo['filename'] = filename
+ archivetype = get_archive_type(filename, strict=True)
+ archiveinfo['type_id'] = archivetype['id']
+ archiveinfo['build_id'] = buildinfo['id']
+ archiveinfo['size'] = os.path.getsize(filepath)
+ archivefp = file(filepath)
+ m = md5_constructor()
+ while True:
+ contents = archivefp.read(8192)
+ if not contents:
+ break
+ m.update(contents)
+ archivefp.close()
+ archiveinfo['checksum'] = m.hexdigest()
+ archiveinfo['checksum_type'] = koji.CHECKSUM_TYPES['md5']
+
+ koji.plugin.run_callbacks('preImport', type='archive', archive=archiveinfo, build=buildinfo,
+ build_type=type, filepath=filepath)
+
+ # XXX verify that the buildroot is associated with a task that's associated with the build
+ archive_id = _singleValue("SELECT nextval('archiveinfo_id_seq')", strict=True)
+ archiveinfo['id'] = archive_id
+ insert = InsertProcessor('archiveinfo', data=archiveinfo)
+ insert.execute()
+
+ if type == 'maven':
+ maveninfo = get_maven_build(buildinfo, strict=True)
+
+ if archivetype['name'] == 'pom':
+ pom_info = koji.parse_pom(filepath)
+ pom_maveninfo = koji.pom_to_maven_info(pom_info)
+ # sanity check: Maven info from pom must match the user-supplied typeInfo
+ if koji.mavenLabel(pom_maveninfo) != koji.mavenLabel(typeInfo):
+ raise koji.BuildError, 'Maven info from .pom file (%s) does not match user-supplied typeInfo (%s)' % \
+ (koji.mavenLabel(pom_maveninfo), koji.mavenLabel(typeInfo))
+ # sanity check: the filename of the pom file must match <artifactId>-<version>.pom
+ if filename != '%(artifact_id)s-%(version)s.pom' % typeInfo:
+ raise koji.BuildError, 'Maven info (%s) is not consistent with pom filename (%s)' % \
+ (koji.mavenLabel(typeInfo), filename)
+
+ insert = InsertProcessor('maven_archives', data=dslice(typeInfo, ('group_id', 'artifact_id', 'version')))
+ insert.set(archive_id=archive_id)
+ insert.execute()
+
+ # move the file to it's final destination
+ mavendir = os.path.join(koji.pathinfo.mavenbuild(buildinfo),
+ koji.pathinfo.mavenrepo(typeInfo))
+ _import_archive_file(filepath, mavendir)
+ _generate_maven_metadata(mavendir)
+ elif type == 'win':
+ wininfo = get_win_build(buildinfo, strict=True)
+
+ insert = InsertProcessor('win_archives')
+ insert.set(archive_id=archive_id)
+ relpath = typeInfo['relpath'].strip('/')
+ insert.set(relpath=relpath)
+ if not typeInfo['platforms']:
+ raise koji.BuildError, 'no value for platforms'
+ insert.set(platforms=' '.join(typeInfo['platforms']))
+ if typeInfo['flags']:
+ insert.set(flags=' '.join(typeInfo['flags']))
+ insert.execute()
+
+ destdir = koji.pathinfo.winbuild(buildinfo)
+ if relpath:
+ destdir = os.path.join(destdir, relpath)
+ _import_archive_file(filepath, destdir)
+ elif type == 'image':
+ insert = InsertProcessor('image_archives')
+ insert.set(archive_id=archive_id)
+ insert.set(arch=typeInfo['arch'])
+ insert.execute()
+ imgdir = os.path.join(koji.pathinfo.imagebuild(buildinfo))
+ _import_archive_file(filepath, imgdir)
+ # import log files?
+ else:
+ raise koji.BuildError, 'unsupported archive type: %s' % type
+
+ archiveinfo = get_archive(archive_id, strict=True)
+ koji.plugin.run_callbacks('postImport', type='archive', archive=archiveinfo, build=buildinfo,
+ build_type=type, filepath=filepath)
+ return archiveinfo
+
+def _import_archive_file(filepath, destdir):
+ """
+ Move the file to it's final location on the filesystem.
+ filepath must exist, destdir will be created if it doesn not exist.
+ A symlink pointing from the old location to the new location will
+ be created.
+ """
+ final_path = "%s/%s" % (destdir,
+ koji.fixEncoding(os.path.basename(filepath)))
+ if os.path.exists(final_path):
+ raise koji.GenericError("Error importing archive file, %s already exists" % final_path)
+ if os.path.islink(filepath) or not os.path.isfile(filepath):
+ raise koji.GenericError("Error importing archive file, %s is not a regular file" % filepath)
+ koji.ensuredir(destdir)
+ os.rename(filepath, final_path)
+ os.symlink(final_path, filepath)
+
+def _generate_maven_metadata(mavendir):
+ """
+ Generate md5 and sha1 sums for every file in mavendir, if it doesn't already exist.
+ Checksum files will be named <filename>.md5 and <filename>.sha1.
+ """
+ mavenfiles = os.listdir(mavendir)
+ for mavenfile in mavenfiles:
+ if os.path.splitext(mavenfile)[1] in ('.md5', '.sha1'):
+ continue
+ if not os.path.isfile('%s/%s' % (mavendir, mavenfile)):
+ continue
+ for ext, sum_constr in (('.md5', md5_constructor), ('.sha1', sha1_constructor)):
+ sumfile = mavenfile + ext
+ if sumfile not in mavenfiles:
+ sum = sum_constr()
+ fobj = file('%s/%s' % (mavendir, mavenfile))
+ while True:
+ content = fobj.read(8192)
+ if not content:
+ break
+ sum.update(content)
+ fobj.close()
+ sumobj = file('%s/%s' % (mavendir, sumfile), 'w')
+ sumobj.write(sum.hexdigest())
+ sumobj.close()
+
+def add_rpm_sig(an_rpm, sighdr):
+ """Store a signature header for an rpm"""
+ #calling function should perform permission checks, if applicable
+ rinfo = get_rpm(an_rpm, strict=True)
+ if rinfo['external_repo_id']:
+ raise koji.GenericError, "Not an internal rpm: %s (from %s)" \
+ % (an_rpm, rinfo['external_repo_name'])
+ binfo = get_build(rinfo['build_id'])
+ builddir = koji.pathinfo.build(binfo)
+ if not os.path.isdir(builddir):
+ raise koji.GenericError, "No such directory: %s" % builddir
+ rawhdr = koji.RawHeader(sighdr)
+ sigmd5 = koji.hex_string(rawhdr.get(koji.RPM_SIGTAG_MD5))
+ if sigmd5 == rinfo['payloadhash']:
+ # note: payloadhash is a misnomer, that field is populated with sigmd5.
+ sigkey = rawhdr.get(koji.RPM_SIGTAG_GPG)
+ if not sigkey:
+ sigkey = rawhdr.get(koji.RPM_SIGTAG_PGP)
+ else:
+ # In older rpms, this field in the signature header does not actually match
+ # sigmd5 (I think rpmlib pulls it from SIGTAG_GPG). Anyway, this
+ # sanity check fails incorrectly for those rpms, so we fall back to
+ # a somewhat more expensive check.
+ # ALSO, for these older rpms, the layout of SIGTAG_GPG is different too, so
+ # we need to pull that differently as well
+ rpm_path = "%s/%s" % (builddir, koji.pathinfo.rpm(rinfo))
+ sigmd5, sigkey = _scan_sighdr(sighdr, rpm_path)
+ sigmd5 = koji.hex_string(sigmd5)
+ if sigmd5 != rinfo['payloadhash']:
+ nvra = "%(name)s-%(version)s-%(release)s.%(arch)s" % rinfo
+ raise koji.GenericError, "wrong md5 for %s: %s" % (nvra, sigmd5)
+ if not sigkey:
+ sigkey = ''
+ #we use the sigkey='' to represent unsigned in the db (so that uniqueness works)
+ else:
+ sigkey = koji.get_sigpacket_key_id(sigkey)
+ sighash = md5_constructor(sighdr).hexdigest()
+ rpm_id = rinfo['id']
+ # - db entry
+ q = """SELECT sighash FROM rpmsigs WHERE rpm_id=%(rpm_id)i AND sigkey=%(sigkey)s"""
+ rows = _fetchMulti(q, locals())
+ if rows:
+ #TODO[?] - if sighash is the same, handle more gracefully
+ nvra = "%(name)s-%(version)s-%(release)s.%(arch)s" % rinfo
+ raise koji.GenericError, "Signature already exists for package %s, key %s" % (nvra, sigkey)
+ callback_info = copy.copy(rinfo)
+ callback_info['sigkey'] = sigkey
+ callback_info['sighash'] = sighash
+ koji.plugin.run_callbacks('preRPMSign', attribute='sighash', old=None, new=sighash, info=callback_info)
+ insert = """INSERT INTO rpmsigs(rpm_id, sigkey, sighash)
+ VALUES (%(rpm_id)s, %(sigkey)s, %(sighash)s)"""
+ _dml(insert, locals())
+ # - write to fs
+ sigpath = "%s/%s" % (builddir, koji.pathinfo.sighdr(rinfo, sigkey))
+ koji.ensuredir(os.path.dirname(sigpath))
+ fo = file(sigpath, 'wb')
+ fo.write(sighdr)
+ fo.close()
+ koji.plugin.run_callbacks('postRPMSign', attribute='sighash', old=None, new=sighash, info=callback_info)
+
+def _scan_sighdr(sighdr, fn):
+ """Splices sighdr with other headers from fn and queries (no payload)"""
+ # This is hackish, but it works
+ if not os.path.exists(fn):
+ raise koji.GenericError, "No such path: %s" % fn
+ if not os.path.isfile(fn):
+ raise koji.GenericError, "Not a regular file: %s" % fn
+ #XXX should probably add an option to splice_rpm_sighdr to handle this instead
+ sig_start, sigsize = koji.find_rpm_sighdr(fn)
+ hdr_start = sig_start + sigsize
+ hdrsize = koji.rpm_hdr_size(fn, hdr_start)
+ inp = file(fn, 'rb')
+ outp = tempfile.TemporaryFile(mode='w+b')
+ #before signature
+ outp.write(inp.read(sig_start))
+ #signature
+ outp.write(sighdr)
+ inp.seek(sigsize, 1)
+ #main header
+ outp.write(inp.read(hdrsize))
+ inp.close()
+ outp.seek(0,0)
+ ts = rpm.TransactionSet()
+ ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES|rpm._RPMVSF_NODIGESTS)
+ #(we have no payload, so verifies would fail otherwise)
+ hdr = ts.hdrFromFdno(outp.fileno())
+ outp.close()
+ sig = hdr[rpm.RPMTAG_SIGGPG]
+ if not sig:
+ sig = hdr[rpm.RPMTAG_SIGPGP]
+ return hdr[rpm.RPMTAG_SIGMD5], sig
+
+def check_rpm_sig(an_rpm, sigkey, sighdr):
+ #verify that the provided signature header matches the key and rpm
+ rinfo = get_rpm(an_rpm, strict=True)
+ binfo = get_build(rinfo['build_id'])
+ builddir = koji.pathinfo.build(binfo)
+ rpm_path = "%s/%s" % (builddir, koji.pathinfo.rpm(rinfo))
+ if not os.path.exists(rpm_path):
+ raise koji.GenericError, "No such path: %s" % rpm_path
+ if not os.path.isfile(rpm_path):
+ raise koji.GenericError, "Not a regular file: %s" % rpm_path
+ fd, temp = tempfile.mkstemp()
+ os.close(fd)
+ try:
+ koji.splice_rpm_sighdr(sighdr, rpm_path, temp)
+ ts = rpm.TransactionSet()
+ ts.setVSFlags(0) #full verify
+ fo = file(temp, 'rb')
+ hdr = ts.hdrFromFdno(fo.fileno())
+ fo.close()
+ except:
+ try:
+ os.unlink(temp)
+ except:
+ pass
+ raise
+ raw_key = hdr[rpm.RPMTAG_SIGGPG]
+ if not raw_key:
+ raw_key = hdr[rpm.RPMTAG_SIGPGP]
+ if not raw_key:
+ found_key = None
+ else:
+ found_key = koji.get_sigpacket_key_id(raw_key)
+ if sigkey != found_key:
+ raise koji.GenericError, "Signature key mismatch: got %s, expected %s" \
+ % (found_key, sigkey)
+ os.unlink(temp)
+
+
+
+def query_rpm_sigs(rpm_id=None, sigkey=None, queryOpts=None):
+ fields = ('rpm_id', 'sigkey', 'sighash')
+ clauses = []
+ if rpm_id is not None:
+ clauses.append("rpm_id=%(rpm_id)s")
+ if sigkey is not None:
+ clauses.append("sigkey=%(sigkey)s")
+ query = QueryProcessor(columns=fields, tables=('rpmsigs',), clauses=clauses,
+ values=locals(), opts=queryOpts)
+ return query.execute()
+
+def write_signed_rpm(an_rpm, sigkey, force=False):
+ """Write a signed copy of the rpm"""
+ rinfo = get_rpm(an_rpm, strict=True)
+ if rinfo['external_repo_id']:
+ raise koji.GenericError, "Not an internal rpm: %s (from %s)" \
+ % (an_rpm, rinfo['external_repo_name'])
+ binfo = get_build(rinfo['build_id'])
+ nvra = "%(name)s-%(version)s-%(release)s.%(arch)s" % rinfo
+ builddir = koji.pathinfo.build(binfo)
+ rpm_path = "%s/%s" % (builddir, koji.pathinfo.rpm(rinfo))
+ if not os.path.exists(rpm_path):
+ raise koji.GenericError, "No such path: %s" % rpm_path
+ if not os.path.isfile(rpm_path):
+ raise koji.GenericError, "Not a regular file: %s" % rpm_path
+ #make sure we have it in the db
+ rpm_id = rinfo['id']
+ q = """SELECT sighash FROM rpmsigs WHERE rpm_id=%(rpm_id)i AND sigkey=%(sigkey)s"""
+ row = _fetchSingle(q, locals())
+ if not row:
+ raise koji.GenericError, "No cached signature for package %s, key %s" % (nvra, sigkey)
+ (sighash,) = row
+ signedpath = "%s/%s" % (builddir, koji.pathinfo.signed(rinfo, sigkey))
+ if os.path.exists(signedpath):
+ if not force:
+ #already present
+ return
+ else:
+ os.unlink(signedpath)
+ sigpath = "%s/%s" % (builddir, koji.pathinfo.sighdr(rinfo, sigkey))
+ fo = file(sigpath, 'rb')
+ sighdr = fo.read()
+ fo.close()
+ koji.ensuredir(os.path.dirname(signedpath))
+ koji.splice_rpm_sighdr(sighdr, rpm_path, signedpath)
+
+
+def query_history(tables=None, **kwargs):
+ """Returns history data from various tables that support it
+
+ tables: list of versioned tables to search, no value implies all tables
+ valid entries: user_perms, user_groups, tag_inheritance, tag_config,
+ build_target_config, external_repo_config, tag_external_repos,
+ tag_listing, tag_packages, group_config, group_req_listing,
+ group_package_listing
+
+ - Time options -
+ times are specified as an integer event or a string timestamp
+ time options are valid for all record types
+ before: either created or revoked before timestamp
+ after: either created or revoked after timestamp
+ beforeEvent: either created or revoked before event id
+ afterEvent: either created or revoked after event id
+
+ - other versioning options-
+ active: select by active status
+ editor: record created or revoked by user
+
+ - table-specific search options -
+ use of these options will implicitly limit the search to applicable tables
+ package: only for given package
+ build: only for given build
+ tag: only for given tag
+ user: only affecting a given user
+ permission: only relating to a given permission
+ external_repo: only relateing to an external repo
+ build_target: only relating to a build target
+ group: only relating to a (comps) group)
+ """
+ common_fields = {
+ #fields:aliases common to all versioned tables
+ 'active' : 'active',
+ 'create_event' : 'create_event',
+ 'revoke_event' : 'revoke_event',
+ 'creator_id' : 'creator_id',
+ 'revoker_id' : 'revoker_id',
+ }
+ common_joins = [
+ "events AS ev1 ON ev1.id = create_event",
+ "LEFT OUTER JOIN events AS ev2 ON ev2.id = revoke_event",
+ "users AS creator ON creator.id = creator_id",
+ "LEFT OUTER JOIN users AS revoker ON revoker.id = revoker_id",
+ ]
+ common_joined_fields = {
+ 'creator.name' : 'creator_name',
+ 'revoker.name' : 'revoker_name',
+ 'EXTRACT(EPOCH FROM ev1.time) AS create_ts' : 'create_ts',
+ 'EXTRACT(EPOCH FROM ev2.time) AS revoke_ts' : 'revoke_ts',
+ }
+ table_fields = {
+ 'user_perms' : ['user_id', 'perm_id'],
+ 'user_groups' : ['user_id', 'group_id'],
+ 'tag_inheritance' : ['tag_id', 'parent_id', 'priority', 'maxdepth', 'intransitive', 'noconfig', 'pkg_filter'],
+ 'tag_config' : ['tag_id', 'arches', 'perm_id', 'locked', 'maven_support', 'maven_include_all'],
+ 'tag_extra' : ['tag_id', 'key', 'value'],
+ 'build_target_config' : ['build_target_id', 'build_tag', 'dest_tag'],
+ 'external_repo_config' : ['external_repo_id', 'url'],
+ 'tag_external_repos' : ['tag_id', 'external_repo_id', 'priority'],
+ 'tag_listing' : ['build_id', 'tag_id'],
+ 'tag_packages' : ['package_id', 'tag_id', 'owner', 'blocked', 'extra_arches'],
+ 'group_config' : ['group_id', 'tag_id', 'blocked', 'exported', 'display_name', 'is_default', 'uservisible',
+ 'description', 'langonly', 'biarchonly'],
+ 'group_req_listing' : ['group_id', 'tag_id', 'req_id', 'blocked', 'type', 'is_metapkg'],
+ 'group_package_listing' : ['group_id', 'tag_id', 'package', 'blocked', 'type', 'basearchonly', 'requires'],
+ }
+ name_joins = {
+ #joins triggered by table fields for name lookup
+ #field : [table, join-alias, alias]
+ 'user_id' : ['users', 'users', 'user'],
+ 'perm_id' : ['permissions', 'permission'],
+ #group_id is overloaded (special case below)
+ 'tag_id' : ['tag'],
+ 'parent_id' : ['tag', 'parent'],
+ 'build_target_id' : ['build_target'],
+ 'build_tag' : ['tag', 'build_tag'],
+ 'dest_tag' : ['tag', 'dest_tag'],
+ 'external_repo_id' : ['external_repo'],
+ # build_id is special cased
+ 'package_id' : ['package'],
+ 'owner' : ['users', 'owner'],
+ 'req_id' : ['groups', 'req'],
+ }
+ if tables is None:
+ tables = table_fields.keys()
+ tables.sort()
+ else:
+ for table in tables:
+ if table not in table_fields:
+ raise koji.GenericError, "Unknown history table: %s" % table
+ ret = {}
+ for table in tables:
+ fields = {}
+ for field in common_fields:
+ fullname = "%s.%s" % (table, field)
+ fields[fullname] = common_fields[field]
+ joins = list(common_joins)
+ fields.update(common_joined_fields)
+ joined = {}
+ for field in table_fields[table]:
+ fullname = "%s.%s" % (table,field)
+ fields[fullname] = field
+ name_join = name_joins.get(field)
+ if name_join:
+ tbl = join_as = name_join[0]
+ if len(name_join) > 1:
+ join_as = name_join[1]
+ joined[tbl] = join_as
+ fullname = "%s.name" % join_as
+ if len(name_join) > 2:
+ #apply alias
+ fields[fullname] = "%s.name" % name_join[2]
+ else:
+ fields[fullname] = fullname
+ if join_as == tbl:
+ joins.append('LEFT OUTER JOIN %s ON %s = %s.id' % (tbl, field, tbl))
+ else:
+ joins.append('LEFT OUTER JOIN %s AS %s ON %s = %s.id' % (tbl, join_as, field, join_as))
+ elif field == 'build_id':
+ #special case
+ fields.update({
+ 'package.name' : 'name', #XXX?
+ 'build.version' : 'version',
+ 'build.release' : 'release',
+ 'build.epoch' : 'epoch',
+ 'build.state' : 'build.state',
+ })
+ joins.extend([
+ 'build ON build_id = build.id',
+ 'package ON build.pkg_id = package.id',
+ ])
+ joined['build'] = 'build'
+ joined['package'] = 'package'
+ elif field == 'group_id':
+ if table.startswith('group_'):
+ fields['groups.name'] = 'group.name'
+ joins.append('groups ON group_id = groups.id')
+ joined['groups'] = 'groups'
+ elif table == 'user_groups':
+ fields['usergroup.name'] = 'group.name'
+ joins.append('users AS usergroup ON group_id = usergroup.id')
+ joined['users'] = 'usergroup'
+ clauses = []
+ skip = False
+ data = {}
+ for arg in kwargs:
+ value = kwargs[arg]
+ if arg == 'tag':
+ if 'tag' not in joined:
+ skip = True
+ break
+ data['tag_id'] = get_tag_id(value, strict=True)
+ if table == 'tag_inheritance':
+ #special cased because there are two tag columns
+ clauses.append("tag_id = %(tag_id)i OR parent_id = %(tag_id)i")
+ else:
+ clauses.append("%s.id = %%(tag_id)i" % joined['tag'])
+ elif arg == 'build':
+ if 'build' not in joined:
+ skip = True
+ break
+ data['build_id'] = get_build(value, strict=True)['id']
+ clauses.append("build.id = %(build_id)i")
+ elif arg == 'package':
+ pkg_field_name = "%s.package" % table
+ if 'package' in joined:
+ data['pkg_id'] = get_package_id(value, strict=True)
+ clauses.append("package.id = %(pkg_id)i")
+ elif pkg_field_name in fields:
+ # e.g. group_package_listing
+ data['group_package'] = str(value)
+ clauses.append("%s = %%(group_package)s" % pkg_field_name)
+ else:
+ skip = True
+ break
+ elif arg == 'user':
+ if 'users' not in joined:
+ skip = True
+ break
+ data['affected_user_id'] = get_user(value, strict=True)['id']
+ clauses.append("%s.id = %%(affected_user_id)i" % joined['users'])
+ elif arg == 'permission':
+ if 'permissions' not in joined:
+ skip = True
+ break
+ data['perm_id'] = get_perm_id(value, strict=True)
+ clauses.append("%s.id = %%(perm_id)i" % joined['permissions'])
+ elif arg == 'external_repo':
+ if 'external_repo' not in joined:
+ skip = True
+ break
+ data['external_repo_id'] = get_external_repo_id(value, strict=True)
+ clauses.append("%s.id = %%(external_repo_id)i" % joined['external_repo'])
+ elif arg == 'build_target':
+ if 'build_target' not in joined:
+ skip = True
+ break
+ data['build_target_id'] = get_build_target_id(value, strict=True)
+ clauses.append("%s.id = %%(build_target_id)i" % joined['build_target'])
+ elif arg == 'group':
+ if 'groups' not in joined:
+ skip = True
+ break
+ data['group_id'] = get_group_id(value, strict=True)
+ clauses.append("%s.id = %%(group_id)i" % joined['groups'])
+ elif arg == 'active':
+ if value:
+ clauses.append('active = TRUE')
+ elif value is not None:
+ clauses.append('active IS NULL')
+ elif arg == 'editor':
+ data['editor'] = get_user(value, strict=True)['id']
+ clauses.append('creator.id = %(editor)i OR revoker.id = %(editor)i')
+ fields['creator.id = %(editor)i'] = '_created_by'
+ fields['revoker.id = %(editor)i'] = '_revoked_by'
+ elif arg == 'after':
+ if not isinstance(value, basestring):
+ value = datetime.datetime.fromtimestamp(value).isoformat(' ')
+ data['after'] = value
+ clauses.append('ev1.time > %(after)s OR ev2.time > %(after)s')
+ fields['ev1.time > %(after)s'] = '_created_after'
+ fields['ev2.time > %(after)s'] = '_revoked_after'
+ #clauses.append('EXTRACT(EPOCH FROM ev1.time) > %(after)s OR EXTRACT(EPOCH FROM ev2.time) > %(after)s')
+ elif arg == 'afterEvent':
+ data['afterEvent'] = value
+ c_test = '%s.create_event > %%(afterEvent)i' % table
+ r_test = '%s.revoke_event > %%(afterEvent)i' % table
+ clauses.append(' OR '.join([c_test, r_test]))
+ fields[c_test] = '_created_after_event'
+ fields[r_test] = '_revoked_after_event'
+ elif arg == 'before':
+ if not isinstance(value, basestring):
+ value = datetime.datetime.fromtimestamp(value).isoformat(' ')
+ data['before'] = value
+ clauses.append('ev1.time < %(before)s OR ev2.time < %(before)s')
+ #clauses.append('EXTRACT(EPOCH FROM ev1.time) < %(before)s OR EXTRACT(EPOCH FROM ev2.time) < %(before)s')
+ fields['ev1.time < %(before)s'] = '_created_before'
+ fields['ev2.time < %(before)s'] = '_revoked_before'
+ elif arg == 'beforeEvent':
+ data['beforeEvent'] = value
+ c_test = '%s.create_event < %%(beforeEvent)i' % table
+ r_test = '%s.revoke_event < %%(beforeEvent)i' % table
+ clauses.append(' OR '.join([c_test, r_test]))
+ fields[c_test] = '_created_before_event'
+ fields[r_test] = '_revoked_before_event'
+ if skip:
+ continue
+ fields, aliases = zip(*fields.items())
+ query = QueryProcessor(columns=fields, aliases=aliases, tables=[table],
+ joins=joins, clauses=clauses, values=data)
+ ret[table] = query.iterate()
+ return ret
+
+
+def tag_history(build=None, tag=None, package=None, active=None, queryOpts=None):
+ """Returns historical tag data
+
+ package: only for given package
+ build: only for given build
+ tag: only for given tag
+ """
+ fields = ('build.id', 'package.name', 'build.version', 'build.release',
+ 'tag.id', 'tag.name', 'tag_listing.active',
+ 'tag_listing.create_event', 'tag_listing.revoke_event',
+ 'tag_listing.creator_id', 'tag_listing.revoker_id',
+ 'creator.name', 'revoker.name',
+ 'EXTRACT(EPOCH FROM ev1.time)', 'EXTRACT(EPOCH FROM ev2.time)',
+ 'maven_builds.build_id', 'win_builds.build_id')
+ aliases = ('build_id', 'name', 'version', 'release',
+ 'tag_id', 'tag_name', 'active',
+ 'create_event', 'revoke_event',
+ 'creator_id', 'revoker_id',
+ 'creator_name', 'revoker_name',
+ 'create_ts', 'revoke_ts',
+ 'maven_build_id', 'win_build_id')
+ st_complete = koji.BUILD_STATES['COMPLETE']
+ tables = ['tag_listing']
+ joins = ["tag ON tag.id = tag_listing.tag_id",
+ "build ON build.id = tag_listing.build_id",
+ "package ON package.id = build.pkg_id",
+ "events AS ev1 ON ev1.id = tag_listing.create_event",
+ "LEFT OUTER JOIN events AS ev2 ON ev2.id = tag_listing.revoke_event",
+ "users AS creator ON creator.id = tag_listing.creator_id",
+ "LEFT OUTER JOIN users AS revoker ON revoker.id = tag_listing.revoker_id",
+ "LEFT OUTER JOIN maven_builds ON maven_builds.build_id = build.id",
+ "LEFT OUTER JOIN win_builds ON win_builds.build_id = build.id"]
+ clauses = []
+ if tag is not None:
+ tag_id = get_tag_id(tag, strict=True)
+ clauses.append("tag.id = %(tag_id)i")
+ if build is not None:
+ build_id = get_build(build, strict=True)['id']
+ clauses.append("build.id = %(build_id)i")
+ if package is not None:
+ pkg_id = get_package_id(package, strict=True)
+ clauses.append("package.id = %(pkg_id)i")
+ if active is True:
+ clauses.append("tag_listing.active is true")
+ elif active is False:
+ clauses.append("tag_listing.active is not true")
+ query = QueryProcessor(columns=fields, aliases=aliases, tables=tables,
+ joins=joins, clauses=clauses, values=locals(),
+ opts=queryOpts)
+ return query.iterate()
+
+def untagged_builds(name=None, queryOpts=None):
+ """Returns the list of untagged builds"""
+ fields = ('build.id', 'package.name', 'build.version', 'build.release')
+ aliases = ('id', 'name', 'version', 'release')
+ st_complete = koji.BUILD_STATES['COMPLETE']
+ tables = ('build',)
+ joins = []
+ if name is None:
+ joins.append("""package ON package.id = build.pkg_id""")
+ else:
+ joins.append("""package ON package.name=%(name)s AND package.id = build.pkg_id""")
+ joins.append("""LEFT OUTER JOIN tag_listing ON tag_listing.build_id = build.id
+ AND tag_listing.active = TRUE""")
+ clauses = ["tag_listing.tag_id IS NULL", "build.state = %(st_complete)i"]
+ #q = """SELECT build.id, package.name, build.version, build.release
+ #FROM build
+ # JOIN package on package.id = build.pkg_id
+ # LEFT OUTER JOIN tag_listing ON tag_listing.build_id = build.id
+ # AND tag_listing.active IS TRUE
+ #WHERE tag_listing.tag_id IS NULL AND build.state = %(st_complete)i"""
+ #return _multiRow(q, locals(), aliases)
+ query = QueryProcessor(columns=fields, aliases=aliases, tables=tables,
+ joins=joins, clauses=clauses, values=locals(),
+ opts=queryOpts)
+ return query.iterate()
+
+def build_map():
+ """Map which builds were used in the buildroots of other builds
+
+ To be used for garbage collection
+ """
+ # find rpms whose buildroots we were in
+ st_complete = koji.BUILD_STATES['COMPLETE']
+ fields = ('used', 'built')
+ q = """SELECT DISTINCT used.id, built.id
+ FROM buildroot_listing
+ JOIN rpminfo AS r_used ON r_used.id = buildroot_listing.rpm_id
+ JOIN rpminfo AS r_built ON r_built.buildroot_id = buildroot_listing.buildroot_id
+ JOIN build AS used ON used.id = r_used.build_id
+ JOIN build AS built ON built.id = r_built.build_id
+ WHERE built.state = %(st_complete)i AND used.state =%(st_complete)i"""
+ return _multiRow(q, locals(), fields)
+
+def build_references(build_id, limit=None):
+ """Returns references to a build
+
+ This call is used to determine whether a build can be deleted
+ The optional limit arg is used to limit the size of the buildroot
+ references.
+ """
+ #references (that matter):
+ # tag_listing
+ # buildroot_listing (via rpminfo)
+ # buildroot_archives (via archiveinfo)
+ # ?? rpmsigs (via rpminfo)
+ ret = {}
+
+ # find tags
+ q = """SELECT tag_id, tag.name FROM tag_listing JOIN tag on tag_id = tag.id
+ WHERE build_id = %(build_id)i AND active = TRUE"""
+ ret['tags'] = _multiRow(q, locals(), ('id', 'name'))
+
+ #we'll need the component rpm ids for the rest
+ q = """SELECT id FROM rpminfo WHERE build_id=%(build_id)i"""
+ rpm_ids = _fetchMulti(q, locals())
+
+ # find rpms whose buildroots we were in
+ st_complete = koji.BUILD_STATES['COMPLETE']
+ fields = ('id', 'name', 'version', 'release', 'arch', 'build_id')
+ idx = {}
+ q = """SELECT rpminfo.id, rpminfo.name, rpminfo.version, rpminfo.release, rpminfo.arch, rpminfo.build_id
+ FROM buildroot_listing
+ JOIN rpminfo ON rpminfo.buildroot_id = buildroot_listing.buildroot_id
+ JOIN build on rpminfo.build_id = build.id
+ WHERE buildroot_listing.rpm_id = %(rpm_id)s
+ AND build.state = %(st_complete)i"""
+ if limit is not None:
+ q += "\nLIMIT %(limit)i"
+ for (rpm_id,) in rpm_ids:
+ for row in _multiRow(q, locals(), fields):
+ idx.setdefault(row['id'], row)
+ if limit is not None and len(idx) > limit:
+ break
+ ret['rpms'] = idx.values()
+
+ # find archives whose buildroots we were in
+ q = """SELECT id FROM archiveinfo WHERE build_id = %(build_id)i"""
+ archive_ids = _fetchMulti(q, locals())
+ fields = ('id', 'type_id', 'type_name', 'build_id', 'filename')
+ idx = {}
+ q = """SELECT archiveinfo.id, archiveinfo.type_id, archivetypes.name, archiveinfo.build_id, archiveinfo.filename
+ FROM buildroot_archives
+ JOIN archiveinfo ON archiveinfo.buildroot_id = buildroot_archives.buildroot_id
+ JOIN build ON archiveinfo.build_id = build.id
+ JOIN archivetypes ON archivetypes.id = archiveinfo.type_id
+ WHERE buildroot_archives.archive_id = %(archive_id)i
+ AND build.state = %(st_complete)i"""
+ if limit is not None:
+ q += "\nLIMIT %(limit)i"
+ for (archive_id,) in archive_ids:
+ for row in _multiRow(q, locals(), fields):
+ idx.setdefault(row['id'], row)
+ if limit is not None and len(idx) > limit:
+ break
+ ret['archives'] = idx.values()
+
+ # find timestamp of most recent use in a buildroot
+ q = """SELECT buildroot.create_event
+ FROM buildroot_listing
+ JOIN buildroot ON buildroot_listing.buildroot_id = buildroot.id
+ WHERE buildroot_listing.rpm_id = %(rpm_id)s
+ ORDER BY buildroot.create_event DESC
+ LIMIT 1"""
+ event_id = -1
+ for (rpm_id,) in rpm_ids:
+ tmp_id = _singleValue(q, locals(), strict=False)
+ if tmp_id is not None and tmp_id > event_id:
+ event_id = tmp_id
+ if event_id == -1:
+ ret['last_used'] = None
+ else:
+ q = """SELECT EXTRACT(EPOCH FROM get_event_time(%(event_id)i))"""
+ ret['last_used'] = _singleValue(q, locals())
+
+ q = """SELECT buildroot.create_event
+ FROM buildroot_archives
+ JOIN buildroot ON buildroot_archives.buildroot_id = buildroot.id
+ WHERE buildroot_archives.archive_id = %(archive_id)i
+ ORDER BY buildroot.create_event DESC
+ LIMIT 1"""
+ event_id = -1
+ for (archive_id,) in archive_ids:
+ tmp_id = _singleValue(q, locals(), strict=False)
+ if tmp_id is not None and tmp_id > event_id:
+ event_id = tmp_id
+ if event_id == -1:
+ pass
+ else:
+ q = """SELECT EXTRACT(EPOCH FROM get_event_time(%(event_id)i))"""
+ last_archive_use = _singleValue(q, locals())
+ if ret['last_used'] is None or last_archive_use > ret['last_used']:
+ ret['last_used'] = last_archive_use
+
+ return ret
+
+def delete_build(build, strict=True, min_ref_age=604800):
+ """delete a build, if possible
+
+ Attempts to delete a build. A build can only be deleted if it is
+ unreferenced.
+
+ If strict is true (default), an exception is raised if the build cannot
+ be deleted.
+
+ Note that a deleted build is not completely gone. It is marked deleted and some
+ data remains in the database. Mainly, the rpms are removed.
+
+ Note in particular that deleting a build DOES NOT free any NVRs (or NVRAs) for
+ reuse.
+
+ Returns True if successful, False otherwise
+ """
+ context.session.assertPerm('admin')
+ binfo = get_build(build, strict=True)
+ refs = build_references(binfo['id'], limit=10)
+ if refs['tags']:
+ if strict:
+ raise koji.GenericError, "Cannot delete build, tagged: %s" % refs['tags']
+ return False
+ if refs['rpms']:
+ if strict:
+ raise koji.GenericError, "Cannot delete build, used in buildroots: %s" % refs['rpms']
+ return False
+ if refs['archives']:
+ if strict:
+ raise koji.GenericError, "Cannot delete build, used in archive buildroots: %s" % refs['archives']
+ return False
+ if refs['last_used']:
+ age = time.time() - refs['last_used']
+ if age < min_ref_age:
+ if strict:
+ raise koji.GenericError, "Cannot delete build, used in recent buildroot"
+ return False
+ #otherwise we can delete it
+ _delete_build(binfo)
+ return True
+
+def _delete_build(binfo):
+ """Delete a build (no reference checks)
+
+ Please consider calling delete_build instead
+ """
+ # build-related data:
+ # build KEEP (marked deleted)
+ # maven_builds KEEP
+ # task ??
+ # tag_listing REVOKE (versioned) (but should ideally be empty anyway)
+ # rpminfo KEEP
+ # buildroot_listing KEEP (but should ideally be empty anyway)
+ # rpmsigs DELETE
+ # archiveinfo KEEP
+ # buildroot_archives KEEP (but should ideally be empty anyway)
+ # files on disk: DELETE
+ st_deleted = koji.BUILD_STATES['DELETED']
+ koji.plugin.run_callbacks('preBuildStateChange', attribute='state', old=binfo['state'], new=st_deleted, info=binfo)
+ build_id = binfo['id']
+ q = """SELECT id FROM rpminfo WHERE build_id=%(build_id)i"""
+ rpm_ids = _fetchMulti(q, locals())
+ for (rpm_id,) in rpm_ids:
+ delete = """DELETE FROM rpmsigs WHERE rpm_id=%(rpm_id)i"""
+ _dml(delete, locals())
+ update = UpdateProcessor('tag_listing', clauses=["build_id=%(build_id)i"], values=locals())
+ update.make_revoke()
+ update.execute()
+ update = """UPDATE build SET state=%(st_deleted)i WHERE id=%(build_id)i"""
+ _dml(update, locals())
+ #now clear the build dirs
+ dirs_to_clear = []
+ builddir = koji.pathinfo.build(binfo)
+ if os.path.exists(builddir):
+ dirs_to_clear.append(builddir)
+ for filedir in dirs_to_clear:
+ rv = os.system(r"find '%s' -xdev \! -type d -print0 |xargs -0 rm -f" % filedir)
+ if rv != 0:
+ raise koji.GenericError, 'file removal failed (code %r) for %s' % (rv, filedir)
+ #and clear out the emptied dirs
+ rv = os.system(r"find '%s' -xdev -depth -type d -print0 |xargs -0 rmdir" % filedir)
+ if rv != 0:
+ raise koji.GenericError, 'directory removal failed (code %r) for %s' % (rv, filedir)
+ koji.plugin.run_callbacks('postBuildStateChange', attribute='state', old=binfo['state'], new=st_deleted, info=binfo)
+
+def reset_build(build):
+ """Reset a build so that it can be reimported
+
+ WARNING: this function is potentially destructive. use with care.
+ nulls task_id
+ sets state to CANCELED
+ clears data in rpminfo
+ removes rpminfo entries from any buildroot_listings [!]
+ clears data in archiveinfo, maven_info
+ removes archiveinfo entries from buildroot_archives
+ remove files related to the build
+
+ note, we don't actually delete the build data, so tags
+ remain intact
+ """
+ # Only an admin may do this
+ context.session.assertPerm('admin')
+ binfo = get_build(build)
+ if not binfo:
+ #nothing to do
+ return
+ koji.plugin.run_callbacks('preBuildStateChange', attribute='state', old=binfo['state'], new=koji.BUILD_STATES['CANCELED'], info=binfo)
+ q = """SELECT id FROM rpminfo WHERE build_id=%(id)i"""
+ ids = _fetchMulti(q, binfo)
+ for (rpm_id,) in ids:
+ delete = """DELETE FROM rpmsigs WHERE rpm_id=%(rpm_id)i"""
+ _dml(delete, locals())
+ delete = """DELETE FROM buildroot_listing WHERE rpm_id=%(rpm_id)i"""
+ _dml(delete, locals())
+ delete = """DELETE FROM rpminfo WHERE build_id=%(id)i"""
+ _dml(delete, binfo)
+ q = """SELECT id FROM archiveinfo WHERE build_id=%(id)i"""
+ ids = _fetchMulti(q, binfo)
+ for (archive_id,) in ids:
+ delete = """DELETE FROM maven_archives WHERE archive_id=%(archive_id)i"""
+ _dml(delete, locals())
+ delete = """DELETE FROM win_archives WHERE archive_id=%(archive_id)i"""
+ _dml(delete, locals())
+ delete = """DELETE FROM buildroot_archives WHERE archive_id=%(archive_id)i"""
+ _dml(delete, locals())
+ delete = """DELETE FROM archiveinfo WHERE build_id=%(id)i"""
+ _dml(delete, binfo)
+ delete = """DELETE FROM maven_builds WHERE build_id = %(id)i"""
+ _dml(delete, binfo)
+ delete = """DELETE FROM win_builds WHERE build_id = %(id)i"""
+ _dml(delete, binfo)
+ binfo['state'] = koji.BUILD_STATES['CANCELED']
+ update = """UPDATE build SET state=%(state)i, task_id=NULL WHERE id=%(id)i"""
+ _dml(update, binfo)
+ #now clear the build dirs
+ dirs_to_clear = []
+ builddir = koji.pathinfo.build(binfo)
+ if os.path.exists(builddir):
+ dirs_to_clear.append(builddir)
+ for filedir in dirs_to_clear:
+ rv = os.system(r"find '%s' -xdev \! -type d -print0 |xargs -0 rm -f" % filedir)
+ if rv != 0:
+ raise koji.GenericError, 'file removal failed (code %r) for %s' % (rv, filedir)
+ #and clear out the emptied dirs
+ rv = os.system(r"find '%s' -xdev -depth -type d -print0 |xargs -0 rmdir" % filedir)
+ if rv != 0:
+ raise koji.GenericError, 'directory removal failed (code %r) for %s' % (rv, filedir)
+ koji.plugin.run_callbacks('postBuildStateChange', attribute='state', old=binfo['state'], new=koji.BUILD_STATES['CANCELED'], info=binfo)
+
+def cancel_build(build_id, cancel_task=True):
+ """Cancel a build
+
+ Calling function should perform permission checks.
+
+ If the build is associated with a task, cancel the task as well (unless
+ cancel_task is False).
+ Return True if the build was successfully canceled, False if not.
+
+ The cancel_task option is used to prevent loops between task- and build-
+ cancellation.
+ """
+ st_canceled = koji.BUILD_STATES['CANCELED']
+ st_building = koji.BUILD_STATES['BUILDING']
+ build = get_build(build_id, strict=True)
+ if build['state'] != st_building:
+ return False
+ koji.plugin.run_callbacks('preBuildStateChange', attribute='state', old=build['state'], new=st_canceled, info=build)
+ update = """UPDATE build
+ SET state = %(st_canceled)i, completion_time = NOW()
+ WHERE id = %(build_id)i AND state = %(st_building)i"""
+ _dml(update, locals())
+ build = get_build(build_id)
+ if build['state'] != st_canceled:
+ return False
+ task_id = build['task_id']
+ if task_id != None:
+ build_notification(task_id, build_id)
+ if cancel_task:
+ Task(task_id).cancelFull(strict=False)
+ koji.plugin.run_callbacks('postBuildStateChange', attribute='state', old=build['state'], new=st_canceled, info=build)
+ return True
+
+def _get_build_target(task_id):
+ # XXX Should we be storing a reference to the build target
+ # in the build table for reproducibility?
+ task = Task(task_id)
+ info = task.getInfo(request=True)
+ request = info['request']
+ if info['method'] in ('build', 'maven'):
+ # request is (source-url, build-target, map-of-other-options)
+ if request[1]:
+ return get_build_target(request[1])
+ elif info['method'] == 'winbuild':
+ # request is (vm-name, source-url, build-target, map-of-other-options)
+ if request[2]:
+ return get_build_target(request[2])
+ return None
+
+def get_notification_recipients(build, tag_id, state):
+ """
+ Return the list of email addresses that should be notified about events
+ involving the given build and tag. This could be the build into that tag
+ succeeding or failing, or the build being manually tagged or untagged from
+ that tag.
+
+ The list will contain email addresss for all users who have registered for
+ notifications on the package or tag (or both), as well as the package owner
+ for this tag and the user who submitted the build. The list will not contain
+ duplicates.
+ """
+
+ clauses = []
+
+ if build:
+ package_id = build['package_id']
+ clauses.append('package_id = %(package_id)i OR package_id IS NULL')
+ else:
+ clauses.append('package_id IS NULL')
+ if tag_id:
+ clauses.append('tag_id = %(tag_id)i OR tag_id IS NULL')
+ else:
+ clauses.append('tag_id IS NULL')
+ if state != koji.BUILD_STATES['COMPLETE']:
+ clauses.append('success_only = FALSE')
+
+ query = QueryProcessor(columns=('email',), tables=['build_notifications'],
+ clauses=clauses, values=locals(),
+ opts={'asList':True})
+ emails = [result[0] for result in query.execute()]
+
+ email_domain = context.opts['EmailDomain']
+ notify_on_success = context.opts['NotifyOnSuccess']
+
+ if notify_on_success is True or state != koji.BUILD_STATES['COMPLETE']:
+ # user who submitted the build
+ emails.append('%s@%s' % (build['owner_name'], email_domain))
+
+ if tag_id:
+ packages = readPackageList(pkgID=package_id, tagID=tag_id, inherit=True)
+ # owner of the package in this tag, following inheritance
+ pkgdata = packages.get(package_id)
+ # If the package list has changed very recently it is possible we
+ # will get no result.
+ if pkgdata and not pkgdata['blocked']:
+ emails.append('%s@%s' % (pkgdata['owner_name'], email_domain))
+ #FIXME - if tag_id is None, we don't have a good way to get the package owner.
+ # using all package owners from all tags would be way overkill.
+
+ emails_uniq = dict([(x,1) for x in emails]).keys()
+ return emails_uniq
+
+def tag_notification(is_successful, tag_id, from_id, build_id, user_id, ignore_success=False, failure_msg=''):
+ if context.opts.get('DisableNotifications'):
+ return
+ if is_successful:
+ state = koji.BUILD_STATES['COMPLETE']
+ else:
+ state = koji.BUILD_STATES['FAILED']
+ recipients = {}
+ build = get_build(build_id)
+ if not build:
+ # the build doesn't exist, so there's nothing to send a notification about
+ return None
+ if tag_id:
+ tag = get_tag(tag_id)
+ for email in get_notification_recipients(build, tag['id'], state):
+ recipients[email] = 1
+ if from_id:
+ from_tag = get_tag(from_id)
+ for email in get_notification_recipients(build, from_tag['id'], state):
+ recipients[email] = 1
+ recipients_uniq = recipients.keys()
+ if len(recipients_uniq) > 0 and not (is_successful and ignore_success):
+ task_id = make_task('tagNotification', [recipients_uniq, is_successful, tag_id, from_id, build_id, user_id, ignore_success, failure_msg])
+ return task_id
+ return None
+
+def build_notification(task_id, build_id):
+ if context.opts.get('DisableNotifications'):
+ return
+ build = get_build(build_id)
+ target = _get_build_target(task_id)
+
+ dest_tag = None
+ if target:
+ dest_tag = target['dest_tag']
+
+ if build['state'] == koji.BUILD_STATES['BUILDING']:
+ raise koji.GenericError, 'never send notifications for incomplete builds'
+
+ web_url = context.opts.get('KojiWebURL', 'http://localhost/koji')
+
+ recipients = get_notification_recipients(build, dest_tag, build['state'])
+ if len(recipients) > 0:
+ make_task('buildNotification', [recipients, build, target, web_url])
+
+def get_build_notifications(user_id):
+ fields = ('id', 'user_id', 'package_id', 'tag_id', 'success_only', 'email')
+ query = """SELECT %s
+ FROM build_notifications
+ WHERE user_id = %%(user_id)i
+ """ % ', '.join(fields)
+ return _multiRow(query, locals(), fields)
+
+def new_group(name):
+ """Add a user group to the database"""
+ context.session.assertPerm('admin')
+ if get_user(name):
+ raise koji.GenericError, 'user/group already exists: %s' % name
+ return context.session.createUser(name, usertype=koji.USERTYPES['GROUP'])
+
+def add_group_member(group, user, strict=True):
+ """Add user to group"""
+ context.session.assertPerm('admin')
+ group = get_user(group)
+ user = get_user(user)
+ if group['usertype'] != koji.USERTYPES['GROUP']:
+ raise koji.GenericError, "Not a group: %(name)s" % group
+ if user['usertype'] == koji.USERTYPES['GROUP']:
+ raise koji.GenericError, "Groups cannot be members of other groups"
+ #check to see if user is already a member
+ data = {'user_id' : user['id'], 'group_id' : group['id']}
+ table = 'user_groups'
+ clauses = ('user_id = %(user_id)i', 'group_id = %(group_id)s')
+ query = QueryProcessor(columns=['user_id'], tables=[table],
+ clauses=('active = TRUE',)+clauses,
+ values=data, opts={'rowlock':True})
+ row = query.executeOne()
+ if row:
+ if not strict:
+ return
+ raise koji.GenericError, "User already in group"
+ insert = InsertProcessor(table, data)
+ insert.make_create()
+ insert.execute()
+
+def drop_group_member(group, user):
+ """Drop user from group"""
+ context.session.assertPerm('admin')
+ user = get_user(user, strict=True)
+ ginfo = get_user(group)
+ if not ginfo or ginfo['usertype'] != koji.USERTYPES['GROUP']:
+ raise koji.GenericError, "No such group: %s" % group
+ data = {'user_id' : user['id'], 'group_id' : ginfo['id']}
+ clauses=["user_id = %(user_id)i", "group_id = %(group_id)i"]
+ update = UpdateProcessor('user_groups', values=data, clauses=clauses)
+ update.make_revoke()
+ update.execute()
+
+def get_group_members(group):
+ """Get the members of a group"""
+ context.session.assertPerm('admin')
+ group = get_user(group)
+ if group['usertype'] != koji.USERTYPES['GROUP']:
+ raise koji.GenericError, "Not a group: %(name)s" % group
+ group_id = group['id']
+ fields = ('id','name','usertype','krb_principal')
+ q = """SELECT %s FROM user_groups
+ JOIN users ON user_id = users.id
+ WHERE active = TRUE AND group_id = %%(group_id)i""" % ','.join(fields)
+ return _multiRow(q, locals(), fields)
+
+def set_user_status(user, status):
+ context.session.assertPerm('admin')
+ if not koji.USER_STATUS.get(status):
+ raise koji.GenericError, 'invalid status: %s' % status
+ if user['status'] == status:
+ # nothing to do
+ return
+ update = """UPDATE users SET status = %(status)i WHERE id = %(user_id)i"""
+ user_id = user['id']
+ rows = _dml(update, locals())
+ # sanity check
+ if rows == 0:
+ raise koji.GenericError, 'invalid user ID: %i' % user_id
+
+
+def get_event():
+ """Get an event id for this transaction
+
+ We cache the result in context, so subsequent calls in the same transaction will
+ get the same event.
+ Note that this will persist across calls in a multiCall, which is fine because
+ it is all one transaction.
+ """
+ if hasattr(context, 'event_id'):
+ return context.event_id
+ event_id = _singleValue("SELECT get_event()")
+ context.event_id = event_id
+ return event_id
+
+
+class InsertProcessor(object):
+ """Build an insert statement
+
+ table - the table to insert into
+ data - a dictionary of data to insert (keys = row names)
+ rawdata - data to insert specified as sql expressions rather than python values
+
+ does not support query inserts of "DEFAULT VALUES"
+ """
+
+ def __init__(self, table, data=None, rawdata=None):
+ self.table = table
+ self.data = {}
+ if data:
+ self.data.update(data)
+ self.rawdata = {}
+ if rawdata:
+ self.rawdata.update(rawdata)
+
+ def __str__(self):
+ if not self.data and not self.rawdata:
+ return "-- incomplete update: no assigns"
+ parts = ['INSERT INTO %s ' % self.table]
+ columns = self.data.keys()
+ columns.extend(self.rawdata.keys())
+ parts.append("(%s) " % ', '.join(columns))
+ values = []
+ for key in columns:
+ if self.data.has_key(key):
+ values.append("%%(%s)s" % key)
+ else:
+ values.append("(%s)" % self.rawdata[key])
+ parts.append("VALUES (%s)" % ', '.join(values))
+ return ''.join(parts)
+
+ def __repr__(self):
+ return "<InsertProcessor: %r>" % vars(self)
+
+ def set(self, **kwargs):
+ """Set data via keyword args"""
+ self.data.update(kwargs)
+
+ def rawset(self, **kwargs):
+ """Set rawdata via keyword args"""
+ self.rawdata.update(kwargs)
+
+ def make_create(self, event_id=None, user_id=None):
+ if event_id is None:
+ event_id = get_event()
+ if user_id is None:
+ context.session.assertLogin()
+ user_id = context.session.user_id
+ self.data['create_event'] = event_id
+ self.data['creator_id'] = user_id
+
+ def execute(self):
+ return _dml(str(self), self.data)
+
+
+class UpdateProcessor(object):
+ """Build an update statement
+
+ table - the table to insert into
+ data - a dictionary of data to insert (keys = row names)
+ rawdata - data to insert specified as sql expressions rather than python values
+ clauses - a list of where clauses which will be ANDed together
+ values - dict of values used in clauses
+
+ does not support the FROM clause
+ """
+
+ def __init__(self, table, data=None, rawdata=None, clauses=None, values=None):
+ self.table = table
+ self.data = {}
+ if data:
+ self.data.update(data)
+ self.rawdata = {}
+ if rawdata:
+ self.rawdata.update(rawdata)
+ self.clauses = []
+ if clauses:
+ self.clauses.extend(clauses)
+ self.values = {}
+ if values:
+ self.values.update(values)
+
+ def __str__(self):
+ if not self.data and not self.rawdata:
+ return "-- incomplete update: no assigns"
+ parts = ['UPDATE %s SET ' % self.table]
+ assigns = ["%s = %%(data.%s)s" % (key, key) for key in self.data]
+ assigns.extend(["%s = (%s)" % (key, self.rawdata[key]) for key in self.rawdata])
+ parts.append(', '.join(assigns))
+ if self.clauses:
+ parts.append('\nWHERE ')
+ parts.append(' AND '.join(["( %s )" % c for c in self.clauses]))
+ return ''.join(parts)
+
+ def __repr__(self):
+ return "<UpdateProcessor: %r>" % vars(self)
+
+ def get_values(self):
+ """Returns unified values dict, including data"""
+ ret = {}
+ ret.update(self.values)
+ for key in self.data:
+ ret["data."+key] = self.data[key]
+ return ret
+
+ def set(self, **kwargs):
+ """Set data via keyword args"""
+ self.data.update(kwargs)
+
+ def rawset(self, **kwargs):
+ """Set rawdata via keyword args"""
+ self.rawdata.update(kwargs)
+
+ def make_revoke(self, event_id=None, user_id=None):
+ """Add standard revoke options to the update"""
+ if event_id is None:
+ event_id = get_event()
+ if user_id is None:
+ context.session.assertLogin()
+ user_id = context.session.user_id
+ self.data['revoke_event'] = event_id
+ self.data['revoker_id'] = user_id
+ self.rawdata['active'] = 'NULL'
+ self.clauses.append('active = TRUE')
+
+ def execute(self):
+ return _dml(str(self), self.get_values())
+
+
+class QueryProcessor(object):
+ """
+ Build a query from its components.
+ - columns, aliases, tables: lists of the column names to retrieve,
+ the tables to retrieve them from, and the key names to use when
+ returning values as a map, respectively
+ - joins: a list of joins in the form 'table1 ON table1.col1 = table2.col2', 'JOIN' will be
+ prepended automatically; if extended join syntax (LEFT, OUTER, etc.) is required,
+ it can be specified, and 'JOIN' will not be prepended
+ - clauses: a list of where clauses in the form 'table1.col1 OPER table2.col2-or-variable';
+ each clause will be surrounded by parentheses and all will be AND'ed together
+ - values: the map that will be used to replace any substitution expressions in the query
+ - opts: a map of query options; currently supported options are:
+ countOnly: if True, return an integer indicating how many results would have been
+ returned, rather than the actual query results
+ order: a column or alias name to use in the 'ORDER BY' clause
+ offset: an integer to use in the 'OFFSET' clause
+ limit: an integer to use in the 'LIMIT' clause
+ asList: if True, return results as a list of lists, where each list contains the
+ column values in query order, rather than the usual list of maps
+ rowlock: if True, use "FOR UPDATE" to lock the queried rows
+ """
+
+ iterchunksize = 1000
+
+ def __init__(self, columns=None, aliases=None, tables=None,
+ joins=None, clauses=None, values=None, opts=None):
+ self.columns = columns
+ self.aliases = aliases
+ if columns and aliases:
+ if len(columns) != len(aliases):
+ raise StandardError, 'column and alias lists must be the same length'
+ self.colsByAlias = dict(zip(aliases, columns))
+ else:
+ self.colsByAlias = {}
+ self.tables = tables
+ self.joins = joins
+ self.clauses = clauses
+ self.cursors = 0
+ if values:
+ self.values = values
+ else:
+ self.values = {}
+ if opts:
+ self.opts = opts
+ else:
+ self.opts = {}
+
+ def countOnly(self, count):
+ self.opts['countOnly'] = count
+
+ def __str__(self):
+ query = \
+"""
+SELECT %(col_str)s
+ FROM %(table_str)s
+%(join_str)s
+%(clause_str)s
+ %(order_str)s
+%(offset_str)s
+ %(limit_str)s
+"""
+ if self.opts.get('countOnly'):
+ if self.opts.get('offset') or self.opts.get('limit'):
+ # If we're counting with an offset and/or limit, we need
+ # to wrap the offset/limited query and then count the results,
+ # rather than trying to offset/limit the single row returned
+ # by count(*). Because we're wrapping the query, we don't care
+ # about the column values.
+ col_str = '1'
+ else:
+ col_str = 'count(*)'
+ else:
+ col_str = self._seqtostr(self.columns)
+ table_str = self._seqtostr(self.tables)
+ join_str = self._joinstr()
+ clause_str = self._seqtostr(self.clauses, sep=')\n AND (')
+ if clause_str:
+ clause_str = ' WHERE (' + clause_str + ')'
+ order_str = self._order()
+ offset_str = self._optstr('offset')
+ limit_str = self._optstr('limit')
+
+ query = query % locals()
+ if self.opts.get('countOnly') and \
+ (self.opts.get('offset') or self.opts.get('limit')):
+ query = 'SELECT count(*)\nFROM (' + query + ') numrows'
+ if self.opts.get('rowlock'):
+ query += '\n FOR UPDATE'
+ return query
+
+ def __repr__(self):
+ return '<QueryProcessor: columns=%r, aliases=%r, tables=%r, joins=%r, clauses=%r, values=%r, opts=%r>' % \
+ (self.columns, self.aliases, self.tables, self.joins, self.clauses, self.values, self.opts)
+
+ def _seqtostr(self, seq, sep=', '):
+ if seq:
+ return sep.join(seq)
+ else:
+ return ''
+
+ def _joinstr(self):
+ if not self.joins:
+ return ''
+ result = ''
+ for join in self.joins:
+ if result:
+ result += '\n'
+ if re.search(r'\bjoin\b', join, re.IGNORECASE):
+ # The join clause already contains the word 'join',
+ # so don't prepend 'JOIN' to it
+ result += ' ' + join
+ else:
+ result += ' JOIN ' + join
+ return result
+
+ def _order(self):
+ # Don't bother sorting if we're just counting
+ if self.opts.get('countOnly'):
+ return ''
+ order_opt = self.opts.get('order')
+ if order_opt:
+ order_exprs = []
+ for order in order_opt.split(','):
+ if order.startswith('-'):
+ order = order[1:]
+ direction = ' DESC'
+ else:
+ direction = ''
+ # Check if we're ordering by alias first
+ orderCol = self.colsByAlias.get(order)
+ if orderCol:
+ pass
+ elif order in self.columns:
+ orderCol = order
+ else:
+ raise StandardError, 'invalid order: ' + order
+ order_exprs.append(orderCol + direction)
+ return 'ORDER BY ' + ', '.join(order_exprs)
+ else:
+ return ''
+
+ def _optstr(self, optname):
+ optval = self.opts.get(optname)
+ if optval:
+ return '%s %i' % (optname.upper(), optval)
+ else:
+ return ''
+
+ def singleValue(self, strict=True):
+ return _singleValue(str(self), self.values, strict=strict)
+
+ def execute(self):
+ query = str(self)
+ if self.opts.get('countOnly'):
+ return _singleValue(query, self.values, strict=True)
+ elif self.opts.get('asList'):
+ return _fetchMulti(query, self.values)
+ else:
+ return _multiRow(query, self.values, (self.aliases or self.columns))
+
+
+ def iterate(self):
+ if self.opts.get('countOnly'):
+ return self.execute()
+ elif self.opts.get('limit') and self.opts['limit'] < self.iterchunksize:
+ return self.execute()
+ else:
+ fields = self.aliases or self.columns
+ fields = list(fields)
+ cname = "qp_cursor_%s_%i_%i" % (id(self), os.getpid(), self.cursors)
+ self.cursors += 1
+ logger.debug('Setting up query iterator. cname=%r', cname)
+ return self._iterate(cname, str(self), self.values.copy(), fields,
+ self.iterchunksize, self.opts.get('asList'))
+
+ def _iterate(self, cname, query, values, fields, chunksize, as_list=False):
+ # We pass all this data into the generator so that the iterator works
+ # from the snapshot when it was generated. Otherwise reuse of the processor
+ # for similar queries could have unpredictable results.
+ query = "DECLARE %s NO SCROLL CURSOR FOR %s" % (cname, query)
+ c = context.cnx.cursor()
+ c.execute(query, values)
+ c.close()
+ query = "FETCH %i FROM %s" % (chunksize, cname)
+ while True:
+ if as_list:
+ buf = _fetchMulti(query, {})
+ else:
+ buf = _multiRow(query, {}, fields)
+ if not buf:
+ break
+ for row in buf:
+ yield row
+ c = context.cnx.cursor()
+ c.execute("CLOSE %s" % cname)
+ c.close()
+
+ def executeOne(self):
+ results = self.execute()
+ if isinstance(results, list):
+ if len(results) > 0:
+ return results[0]
+ else:
+ return None
+ return results
+
+def _applyQueryOpts(results, queryOpts):
+ """
+ Apply queryOpts to results in the same way QueryProcessor would.
+ results is a list of maps.
+ queryOpts is a map which may contain the following fields:
+ countOnly
+ order
+ offset
+ limit
+
+ Note: asList is supported by QueryProcessor but not by this method.
+ We don't know the original query order, and so don't have a way to
+ return a useful list. asList should be handled by the caller.
+ """
+ if queryOpts is None:
+ queryOpts = {}
+ if queryOpts.get('order'):
+ order = queryOpts['order']
+ reverse = False
+ if order.startswith('-'):
+ order = order[1:]
+ reverse = True
+ results.sort(key=lambda o: o[order])
+ if reverse:
+ results.reverse()
+ if queryOpts.get('offset'):
+ results = results[queryOpts['offset']:]
+ if queryOpts.get('limit'):
+ results = results[:queryOpts['limit']]
+ if queryOpts.get('countOnly'):
+ return len(results)
+ else:
+ return results
+
+#
+# Policy Test Handlers
+
+
+class OperationTest(koji.policy.MatchTest):
+ """Checks operation against glob patterns"""
+ name = 'operation'
+ field = 'operation'
+
+def policy_get_user(data):
+ """Determine user from policy data (default to logged-in user)"""
+ if data.has_key('user_id'):
+ return get_user(data['user_id'])
+ elif context.session.logged_in:
+ return get_user(context.session.user_id)
+ return None
+
+def policy_get_pkg(data):
+ """Determine package from policy data (default to logged-in user)
+
+ returns dict as lookup_package
+ if package does not exist yet, the id field will be None
+ """
+ if data.has_key('package'):
+ pkginfo = lookup_package(data['package'], strict=False)
+ if not pkginfo:
+ #for some operations (e.g. adding a new package), the package
+ #entry may not exist yet
+ if isinstance(data['package'], basestring):
+ return {'id' : None, 'name' : data['package']}
+ else:
+ raise koji.GenericError, "Invalid package: %s" % data['package']
+ return pkginfo
+ if data.has_key('build'):
+ binfo = get_build(data['build'], strict=True)
+ return {'id' : binfo['package_id'], 'name' : binfo['name']}
+ #else
+ raise koji.GenericError, "policy requires package data"
+
+class NewPackageTest(koji.policy.BaseSimpleTest):
+ """Checks to see if a package exists yet"""
+ name = 'is_new_package'
+ def run(self, data):
+ return (policy_get_pkg(data)['id'] is None)
+
+class PackageTest(koji.policy.MatchTest):
+ """Checks package against glob patterns"""
+ name = 'package'
+ field = '_package'
+ def run(self, data):
+ #we need to find the package name from the base data
+ data[self.field] = policy_get_pkg(data)['name']
+ return super(PackageTest, self).run(data)
+
+class VolumeTest(koji.policy.MatchTest):
+ """Checks storage volume against glob patterns"""
+ name = 'volume'
+ field = '_volume'
+ def run(self, data):
+ #we need to find the volume name from the base data
+ volinfo = None
+ if 'volume' in data:
+ volinfo = lookup_name('volume', data['volume'], strict=False)
+ elif 'build' in data:
+ build = get_build(data['build'])
+ volinfo = {'id': build['volume_id'], 'name': build['volume_name']}
+ if not volinfo:
+ return False
+ data[self.field] = volinfo['name']
+ return super(VolumeTest, self).run(data)
+
+class TagTest(koji.policy.MatchTest):
+ name = 'tag'
+ field = '_tagname'
+
+ def get_tag(self, data):
+ """extract the tag to test against from the data
+
+ return None if there is no tag to test
+ """
+ tag = data.get('tag')
+ if tag is None:
+ return None
+ return get_tag(tag, strict=False)
+
+ def run(self, data):
+ #we need to find the tag name from the base data
+ tinfo = self.get_tag(data)
+ if tinfo is None:
+ return False
+ data[self.field] = tinfo['name']
+ return super(TagTest, self).run(data)
+
+class FromTagTest(TagTest):
+ name = 'fromtag'
+ def get_tag(self, data):
+ tag = data.get('fromtag')
+ if tag is None:
+ return None
+ return get_tag(tag, strict=False)
+
+class HasTagTest(koji.policy.BaseSimpleTest):
+ """Check to see if build (currently) has a given tag"""
+ name = 'hastag'
+ def run(self, data):
+ tags = list_tags(build=data['build'])
+ #True if any of these tags match any of the patterns
+ args = self.str.split()[1:]
+ for tag in tags:
+ for pattern in args:
+ if fnmatch.fnmatch(tag['name'], pattern):
+ return True
+ #otherwise...
+ return False
+
+class SkipTagTest(koji.policy.BaseSimpleTest):
+ """Check for the skip_tag option
+
+ For policies regarding build tasks (e.g. build_from_srpm)
+ """
+ name = 'skip_tag'
+ def run(self, data):
+ return bool(data.get('skip_tag'))
+
+class BuildTagTest(koji.policy.BaseSimpleTest):
+ """Check the build tag of the build
+
+ If build_tag is not provided in policy data, it is determined by the
+ buildroots of the component rpms
+ """
+ name = 'buildtag'
+ def run(self, data):
+ args = self.str.split()[1:]
+ if data.has_key('build_tag'):
+ tagname = get_tag(data['build_tag'], strict=True)['name']
+ for pattern in args:
+ if fnmatch.fnmatch(tagname, pattern):
+ return True
+ #else
+ return False
+ elif data.has_key('build'):
+ #determine build tag from buildroots
+ #in theory, we should find only one unique build tag
+ #it is possible that some rpms could have been imported later and hence
+ #not have a buildroot.
+ #or if the entire build was imported, there will be no buildroots
+ rpms = context.handlers.call('listRPMs', buildID=data['build'])
+ archives = list_archives(buildID=data['build'])
+ br_list = [r['buildroot_id'] for r in rpms]
+ br_list.extend([a['buildroot_id'] for a in archives])
+ for br_id in br_list:
+ if br_id is None:
+ continue
+ tagname = get_buildroot(br_id)['tag_name']
+ for pattern in args:
+ if fnmatch.fnmatch(tagname, pattern):
+ return True
+ #otherwise...
+ return False
+ else:
+ return False
+
+class ImportedTest(koji.policy.BaseSimpleTest):
+ """Check if any part of a build was imported
+
+ This is determined by checking the buildroots of the rpms and archives
+ True if any of them lack a buildroot (strict)"""
+ name = 'imported'
+ def run(self, data):
+ rpms = context.handlers.call('listRPMs', buildID=data['build'])
+ #no test args
+ for rpminfo in rpms:
+ if rpminfo['buildroot_id'] is None:
+ return True
+ for archive in list_archives(buildID=data['build']):
+ if archive['buildroot_id'] is None:
+ return True
+ #otherwise...
+ return False
+
+class ChildTaskTest(koji.policy.BoolTest):
+ name = 'is_child_task'
+ field = 'parent'
+
+class MethodTest(koji.policy.MatchTest):
+ name = 'method'
+ field = 'method'
+
+class UserTest(koji.policy.MatchTest):
+ """Checks username against glob patterns"""
+ name = 'user'
+ field = '_username'
+ def run(self, data):
+ user = policy_get_user(data)
+ if not user:
+ return False
+ data[self.field] = user['name']
+ return super(UserTest, self).run(data)
+
+class VMTest(koji.policy.MatchTest):
+ """Checks a VM name against glob patterns"""
+ name = 'vm_name'
+ field = 'vm_name'
+
+class IsBuildOwnerTest(koji.policy.BaseSimpleTest):
+ """Check if user owns the build"""
+ name = "is_build_owner"
+ def run(self, data):
+ build = get_build(data['build'])
+ owner = get_user(build['owner_id'])
+ user = policy_get_user(data)
+ if not user:
+ return False
+ if owner['id'] == user['id']:
+ return True
+ if owner['usertype'] == koji.USERTYPES['GROUP']:
+ # owner is a group, check to see if user is a member
+ if owner['id'] in koji.auth.get_user_groups(user['id']):
+ return True
+ #otherwise...
+ return False
+
+class UserInGroupTest(koji.policy.BaseSimpleTest):
+ """Check if user is in group(s)
+
+ args are treated as patterns and matched against group name
+ true if user is in /any/ matching group
+ """
+ name = "user_in_group"
+ def run(self, data):
+ user = policy_get_user(data)
+ if not user:
+ return False
+ groups = koji.auth.get_user_groups(user['id'])
+ args = self.str.split()[1:]
+ for group_id, group in groups.iteritems():
+ for pattern in args:
+ if fnmatch.fnmatch(group, pattern):
+ return True
+ #otherwise...
+ return False
+
+class HasPermTest(koji.policy.BaseSimpleTest):
+ """Check if user has permission(s)
+
+ args are treated as patterns and matched against permission name
+ true if user has /any/ matching permission
+ """
+ name = "has_perm"
+ def run(self, data):
+ user = policy_get_user(data)
+ if not user:
+ return False
+ perms = koji.auth.get_user_perms(user['id'])
+ args = self.str.split()[1:]
+ for perm in perms:
+ for pattern in args:
+ if fnmatch.fnmatch(perm, pattern):
+ return True
+ #otherwise...
+ return False
+
+class SourceTest(koji.policy.MatchTest):
+ """Match build source
+
+ This is not the cleanest, since we have to crack open the task parameters
+ True if build source matches any of the supplied patterns
+ """
+ name = "source"
+ field = '_source'
+ def run(self, data):
+ if data.has_key('source'):
+ data[self.field] = data['source']
+ elif data.has_key('build'):
+ #crack open the build task
+ build = get_build(data['build'])
+ if build['task_id'] is None:
+ #imported, no source to match against
+ return False
+ task = Task(build['task_id'])
+ info = task.getInfo()
+ params = task.getRequest()
+ #signatures:
+ # build - (src, target, opts=None)
+ # maven - (url, target, opts=None)
+ # winbuild - (name, source_url, target, opts=None)
+ if info['method'] == 'winbuild':
+ data[self.field] = params[1]
+ elif info['method'] == 'indirectionimage':
+ return False
+ else:
+ data[self.field] = params[0]
+ else:
+ return False
+ return super(SourceTest, self).run(data)
+
+class PolicyTest(koji.policy.BaseSimpleTest):
+ """Test named policy
+
+ The named policy must exist
+ Returns True is the policy results in an action of:
+ yes, true, allow
+ Otherwise returns False
+ (Also returns False if there are no matches in the policy)
+ Watch out for loops
+ """
+ name = 'policy'
+
+ def __init__(self, str):
+ super(PolicyTest, self).__init__(str)
+ self.depth = 0
+ # this is used to detect loops. Note that each test in a ruleset is
+ # a distinct instance of its test class. So this value is particular
+ # to a given appearance of a policy check in a ruleset.
+
+ def run(self, data):
+ args = self.str.split()[1:]
+ if self.depth != 0:
+ #LOOP!
+ raise koji.GenericError, "encountered policy loop at %s" % self.str
+ ruleset = context.policy.get(args[0])
+ if not ruleset:
+ raise koji.GenericError, "no such policy: %s" % args[0]
+ self.depth += 1
+ result = ruleset.apply(data)
+ self.depth -= 1
+ if result is None:
+ return False
+ else:
+ return result.lower() in ('yes', 'true', 'allow')
+
+
+def check_policy(name, data, default='deny', strict=False):
+ """Check data against the named policy
+
+ This assumes the policy actions consist of:
+ allow
+ deny
+
+ Returns a pair (access, reason)
+ access: True if the policy result is allow, false otherwise
+ reason: reason for the access
+ If strict is True, will raise ActionNotAllowed if the action is not 'allow'
+ """
+ ruleset = context.policy.get(name)
+ if not ruleset:
+ if context.opts.get('MissingPolicyOk'):
+ # for backwards compatibility, this is the default
+ result = "allow"
+ else:
+ result = "deny"
+ reason = "missing policy"
+ lastrule = ''
+ else:
+ result = ruleset.apply(data)
+ if result is None:
+ result = default
+ reason = 'not covered by policy'
+ else:
+ parts = result.split(None, 1)
+ parts.extend(['',''])
+ result, reason = parts[:2]
+ reason = reason.lower()
+ lastrule = ruleset.last_rule()
+ if context.opts.get('KojiDebug', False):
+ logger.error("policy %(name)s gave %(result)s, reason: %(reason)s, last rule: %(lastrule)s", locals())
+ if result == 'allow':
+ return True, reason
+ if result != 'deny':
+ reason = 'error in policy'
+ logger.error("Invalid action in policy %s, rule: %s", name, lastrule)
+ if not strict:
+ return False, reason
+ err_str = "policy violation (%s)" % name
+ if reason:
+ err_str += ": %s" % reason
+ if context.opts.get('KojiDebug') or context.opts.get('VerbosePolicy'):
+ err_str += " [rule: %s]" % lastrule
+ raise koji.ActionNotAllowed, err_str
+
+def assert_policy(name, data, default='deny'):
+ """Enforce the named policy
+
+ This assumes the policy actions consist of:
+ allow
+ deny
+ Raises ActionNotAllowed if policy result is not allow
+ """
+ check_policy(name, data, default=default, strict=True)
+
+def rpmdiff(basepath, rpmlist):
+ "Diff the first rpm in the list against the rest of the rpms."
+ if len(rpmlist) < 2:
+ return
+ first_rpm = rpmlist[0]
+ for other_rpm in rpmlist[1:]:
+ # ignore differences in file size, md5sum, and mtime
+ # (files may have been generated at build time and contain
+ # embedded dates or other insignificant differences)
+ args = ['/usr/libexec/koji-hub/rpmdiff',
+ '--ignore', 'S', '--ignore', '5',
+ '--ignore', 'T',
+ os.path.join(basepath, first_rpm),
+ os.path.join(basepath, other_rpm)]
+ proc = subprocess.Popen(args,
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
+ close_fds=True)
+ output = proc.communicate()[0]
+ status = proc.wait()
+ if os.WIFSIGNALED(status) or \
+ (os.WEXITSTATUS(status) != 0):
+ raise koji.BuildError, 'mismatch when analyzing %s, rpmdiff output was:\n%s' % \
+ (os.path.basename(first_rpm), output)
+
+def importImageInternal(task_id, build_id, imgdata):
+ """
+ Import image info and the listing into the database, and move an image
+ to the final resting place. The filesize may be reported as a string if it
+ exceeds the 32-bit signed integer limit. This function will convert it if
+ need be. This is the completeBuild for images; it should not be called for
+ scratch images.
+
+ imgdata is:
+ arch - the arch if the image
+ files - files associated with the image (appliances have multiple files)
+ rpmlist - the list of RPM NVRs installed into the image
+ """
+ host = Host()
+ host.verify()
+ task = Task(task_id)
+ task.assertHost(host.id)
+
+ koji.plugin.run_callbacks('preImport', type='image', image=imgdata)
+
+ # import the build output
+ build_info = get_build(build_id, strict=True)
+ workpath = koji.pathinfo.task(imgdata['task_id'])
+ imgdata['relpath'] = koji.pathinfo.taskrelpath(imgdata['task_id'])
+ archives = []
+ for imgfile in imgdata['files']:
+ fullpath = os.path.join(workpath, imgfile)
+ archivetype = get_archive_type(imgfile)
+ logger.debug('image type we are importing is: %s' % archivetype)
+ if not archivetype:
+ raise koji.BuildError, 'Unsupported image type'
+ archives.append(import_archive(fullpath, build_info, 'image', imgdata))
+
+ # upload logs
+ logs = [f for f in os.listdir(workpath) if f.endswith('.log')]
+ for logfile in logs:
+ logsrc = os.path.join(workpath, logfile)
+ logdir = os.path.join(koji.pathinfo.build(build_info),
+ 'data/logs/image')
+ koji.ensuredir(logdir)
+ final_path = os.path.join(logdir, os.path.basename(logfile))
+ if os.path.exists(final_path):
+ raise koji.GenericError("Error importing build log. %s already exists." % final_path)
+ if os.path.islink(logsrc) or not os.path.isfile(logsrc):
+ raise koji.GenericError("Error importing build log. %s is not a regular file." % logsrc)
+ os.rename(logsrc, final_path)
+ os.symlink(final_path, logsrc)
+
+ # record all of the RPMs installed in the image(s)
+ # verify they were built in Koji or in an external repo
+ rpm_ids = []
+ for an_rpm in imgdata['rpmlist']:
+ location = an_rpm.get('location')
+ if location:
+ data = add_external_rpm(an_rpm, location, strict=False)
+ else:
+ data = get_rpm(an_rpm, strict=True)
+ rpm_ids.append(data['id'])
+
+ # associate those RPMs with the image
+ q = """INSERT INTO image_listing (image_id,rpm_id)
+ VALUES (%(image_id)i,%(rpm_id)i)"""
+ for archive in archives:
+ sys.stderr.write('working on archive %s' % archive)
+ if archive['filename'].endswith('xml'):
+ continue
+ sys.stderr.write('associating installed rpms with %s' % archive['id'])
+ for rpm_id in rpm_ids:
+ _dml(q, {'image_id': archive['id'], 'rpm_id': rpm_id})
+
+ koji.plugin.run_callbacks('postImport', type='image', image=imgdata,
+ fullpath=fullpath)
+
+#
+# XMLRPC Methods
+#
+class RootExports(object):
+ '''Contains functions that are made available via XMLRPC'''
+
+ def buildFromCVS(self, url, tag):
+ raise koji.FunctionDeprecated
+ #return make_task('buildFromCVS',[url, tag])
+
+ def restartHosts(self, priority=5):
+ context.session.assertPerm('admin')
+ return make_task('restartHosts', [], priority=priority)
+
+ def build(self, src, target, opts=None, priority=None, channel=None):
+ """Create a build task
+
+ priority: the amount to increase (or decrease) the task priority, relative
+ to the default priority; higher values mean lower priority; only
+ admins have the right to specify a negative priority here
+ channel: the channel to allocate the task to
+ Returns the task id
+ """
+ if not opts:
+ opts = {}
+ taskOpts = {}
+ if priority:
+ if priority < 0:
+ if not context.session.hasPerm('admin'):
+ raise koji.ActionNotAllowed, 'only admins may create high-priority tasks'
+ taskOpts['priority'] = koji.PRIO_DEFAULT + priority
+ if channel:
+ taskOpts['channel'] = channel
+ return make_task('build',[src, target, opts],**taskOpts)
+
+ def chainBuild(self, srcs, target, opts=None, priority=None, channel=None):
+ """Create a chained build task for building sets of packages in order
+
+ srcs: list of pkg lists, ie [[src00, src01, src03],[src20],[src30,src31],...]
+ where each of the top-level lists gets built and a new repo is created
+ before the next list is built.
+ target: build target
+ priority: the amount to increase (or decrease) the task priority, relative
+ to the default priority; higher values mean lower priority; only
+ admins have the right to specify a negative priority here
+ channel: the channel to allocate the task to
+ Returns a list of all the dependent task ids
+ """
+ if not opts:
+ opts = {}
+ taskOpts = {}
+ if priority:
+ if priority < 0:
+ if not context.session.hasPerm('admin'):
+ raise koji.ActionNotAllowed, 'only admins may create high-priority tasks'
+ taskOpts['priority'] = koji.PRIO_DEFAULT + priority
+ if channel:
+ taskOpts['channel'] = channel
+
+ return make_task('chainbuild',[srcs,target,opts],**taskOpts)
+
+ def mavenBuild(self, url, target, opts=None, priority=None, channel='maven'):
+ """Create a Maven build task
+
+ url: The url to checkout the source from. May be a CVS, SVN, or GIT repository.
+ target: the build target
+ priority: the amount to increase (or decrease) the task priority, relative
+ to the default priority; higher values mean lower priority; only
+ admins have the right to specify a negative priority here
+ channel: the channel to allocate the task to (defaults to the "maven" channel)
+
+ Returns the task ID
+ """
+ if not context.opts.get('EnableMaven'):
+ raise koji.GenericError, "Maven support not enabled"
+ if not opts:
+ opts = {}
+ taskOpts = {}
+ if priority:
+ if priority < 0:
+ if not context.session.hasPerm('admin'):
+ raise koji.ActionNotAllowed, 'only admins may create high-priority tasks'
+ taskOpts['priority'] = koji.PRIO_DEFAULT + priority
+ if channel:
+ taskOpts['channel'] = channel
+
+ return make_task('maven', [url, target, opts], **taskOpts)
+
+ def wrapperRPM(self, build, url, target, priority=None, channel='maven', opts=None):
+ """Create a top-level wrapperRPM task
+
+ build: The build to generate wrapper rpms for. Must be in the COMPLETE state and have no
+ rpms already associated with it.
+ url: SCM URL to a specfile fragment
+ target: The build target to use when building the wrapper rpm. The build_tag of the target will
+ be used to populate the buildroot in which the rpms are built.
+ priority: the amount to increase (or decrease) the task priority, relative
+ to the default priority; higher values mean lower priority; only
+ admins have the right to specify a negative priority here
+ channel: the channel to allocate the task to (defaults to the "maven" channel)
+
+ returns the task ID
+ """
+ if not context.opts.get('EnableMaven'):
+ raise koji.GenericError, "Maven support not enabled"
+
+ if not opts:
+ opts = {}
+
+ build = self.getBuild(build, strict=True)
+ if list_rpms(build['id']) and not (opts.get('scratch') or opts.get('create_build')):
+ raise koji.PreBuildError, 'wrapper rpms for %s have already been built' % koji.buildLabel(build)
+ build_target = self.getBuildTarget(target)
+ if not build_target:
+ raise koji.PreBuildError, 'no such build target: %s' % target
+ build_tag = self.getTag(build_target['build_tag'], strict=True)
+ repo_info = self.getRepo(build_tag['id'])
+ if not repo_info:
+ raise koji.PreBuildError, 'no repo for tag: %s' % build_tag['name']
+ opts['repo_id'] = repo_info['id']
+
+ taskOpts = {}
+ if priority:
+ taskOpts['priority'] = koji.PRIO_DEFAULT + priority
+ taskOpts['channel'] = channel
+
+ return make_task('wrapperRPM', [url, build_target, build, None, opts], **taskOpts)
+
+ def chainMaven(self, builds, target, opts=None, priority=None, channel='maven'):
+ """Create a Maven chain-build task
+
+ builds: a list of maps defining the parameters for the sequence of builds
+ target: the build target
+ priority: the amount to increase (or decrease) the task priority, relative
+ to the default priority; higher values mean lower priority; only
+ admins have the right to specify a negative priority here
+ channel: the channel to allocate the task to (defaults to the "maven" channel)
+
+ Returns the task ID
+ """
+ if not context.opts.get('EnableMaven'):
+ raise koji.GenericError, "Maven support not enabled"
+ taskOpts = {}
+ if priority:
+ if priority < 0:
+ if not context.session.hasPerm('admin'):
+ raise koji.ActionNotAllowed, 'only admins may create high-priority tasks'
+ taskOpts['priority'] = koji.PRIO_DEFAULT + priority
+ if channel:
+ taskOpts['channel'] = channel
+
+ return make_task('chainmaven', [builds, target, opts], **taskOpts)
+
+ def winBuild(self, vm, url, target, opts=None, priority=None, channel='vm'):
+ """
+ Create a Windows build task
+
+ vm: the name of the VM to run the build in
+ url: The url to checkout the source from. May be a CVS, SVN, or GIT repository.
+ opts: task options
+ target: the build target
+ priority: the amount to increase (or decrease) the task priority, relative
+ to the default priority; higher values mean lower priority; only
+ admins have the right to specify a negative priority here
+ channel: the channel to allocate the task to (defaults to the "vm" channel)
+
+ Returns the task ID
+ """
+ if not context.opts.get('EnableWin'):
+ raise koji.GenericError, "Windows support not enabled"
+ targ_info = self.getBuildTarget(target)
+ policy_data = {'vm_name': vm,
+ 'tag': targ_info['dest_tag']}
+ assert_policy('vm', policy_data)
+ if not opts:
+ opts = {}
+ taskOpts = {}
+ if priority:
+ if priority < 0:
+ if not context.session.hasPerm('admin'):
+ raise koji.ActionNotAllowed, 'only admins may create high-priority tasks'
+ taskOpts['priority'] = koji.PRIO_DEFAULT + priority
+ if channel:
+ taskOpts['channel'] = channel
+
+ return make_task('winbuild', [vm, url, target, opts], **taskOpts)
+
+ # Create the image task. Called from _build_image in the client.
+ #
+ def buildImage(self, name, version, arch, target, ksfile, img_type, opts=None, priority=None):
+ """
+ Create an image using a kickstart file and group package list.
+ """
+
+ if img_type not in ('livecd', 'appliance'):
+ raise koji.GenericError, 'Unrecognized image type: %s' % img_type
+
+ context.session.assertPerm(img_type)
+
+ taskOpts = {'channel': img_type}
+ taskOpts['arch'] = arch
+ if priority:
+ if priority < 0:
+ if not context.session.hasPerm('admin'):
+ raise koji.ActionNotAllowed, \
+ 'only admins may create high-priority tasks'
+
+ taskOpts['priority'] = koji.PRIO_DEFAULT + priority
+
+ return make_task(img_type, [name, version, arch, target, ksfile, opts], **taskOpts)
+
+ # Create the image task. Called from _build_image_oz in the client.
+ #
+ def buildImageIndirection(self, opts=None, priority=None):
+ """
+ Create an image using two other images and an indirection template
+ """
+ context.session.assertPerm('image')
+ taskOpts = {'channel': 'image'}
+ if priority:
+ if priority < 0:
+ if not context.session.hasPerm('admin'):
+ raise koji.ActionNotAllowed, \
+ 'only admins may create high-priority tasks'
+
+ taskOpts['priority'] = koji.PRIO_DEFAULT + priority
+ if not opts.has_key('scratch') and not opts.has_key('indirection_template_url'):
+ raise koji.ActionNotAllowed, 'Non-scratch builds must provide url for the indirection template'
+
+ return make_task('indirectionimage', [ opts ], **taskOpts)
+
+ # Create the image task. Called from _build_image_oz in the client.
+ #
+ def buildImageOz(self, name, version, arches, target, inst_tree, opts=None, priority=None):
+ """
+ Create an image using a kickstart file and group package list.
+ """
+ context.session.assertPerm('image')
+ taskOpts = {'channel': 'image'}
+ if priority:
+ if priority < 0:
+ if not context.session.hasPerm('admin'):
+ raise koji.ActionNotAllowed, \
+ 'only admins may create high-priority tasks'
+
+ taskOpts['priority'] = koji.PRIO_DEFAULT + priority
+ if not opts.has_key('scratch') and not opts.has_key('ksurl'):
+ raise koji.ActionNotAllowed, 'Non-scratch builds must provide ksurl'
+
+ return make_task('image', [name, version, arches, target, inst_tree, opts], **taskOpts)
+
+ def migrateImage(self, old_image_id, name, version):
+ """Migrate an old image to the new schema
+
+ This call must be enabled via hub.conf (the EnableImageMigration setting)
+ """
+ context.session.assertPerm('admin')
+ if not context.opts.get('EnableImageMigration'):
+ raise koji.GenericError, 'Image migration not enabled'
+ old = old_image_data(old_image_id)
+ check_old_image_files(old)
+ return import_old_image(old, name, version)
+
+ def hello(self,*args):
+ return "Hello World"
+
+ def fault(self):
+ "debugging. raise an error"
+ raise Exception, "test exception"
+
+ def error(self):
+ "debugging. raise an error"
+ raise koji.GenericError, "test error"
+
+ def echo(self,*args):
+ return args
+
+ def getAPIVersion(self):
+ return koji.API_VERSION
+
+ def mavenEnabled(self):
+ return bool(context.opts.get('EnableMaven'))
+
+ def winEnabled(self):
+ return bool(context.opts.get('EnableWin'))
+
+ def imageMigrationEnabled(self):
+ return bool(context.opts.get('EnableImageMigration'))
+
+ def showSession(self):
+ return "%s" % context.session
+
+ def getSessionInfo(self):
+ if not context.session.logged_in:
+ return None
+ return context.session.session_data
+
+ def showOpts(self):
+ context.session.assertPerm('admin')
+ return "%r" % context.opts
+
+ def getEvent(self, id):
+ """
+ Get information about the event with the given id.
+
+ A map will be returned with the following keys:
+ - id (integer): id of the event
+ - ts (float): timestamp the event was created, in
+ seconds since the epoch
+
+ If no event with the given id exists, an error will be raised.
+ """
+ fields = ('id', 'ts')
+ values = {'id': id}
+ q = """SELECT id, EXTRACT(EPOCH FROM time) FROM events
+ WHERE id = %(id)i"""
+ return _singleRow(q, values, fields, strict=True)
+
+ def getLastEvent(self, before=None):
+ """
+ Get the id and timestamp of the last event recorded in the system.
+ Events are usually created as the result of a configuration change
+ in the database.
+
+ If "before" (int or float) is specified, return the last event
+ that occurred before that time (in seconds since the epoch).
+ If there is no event before the given time, an error will be raised.
+
+ Note that due to differences in precision between the database and python,
+ this method can return an event with a timestamp the same or slightly higher
+ (by a few microseconds) than the value of "before" provided. Code using this
+ method should check that the timestamp returned is in fact lower than the parameter.
+ When trying to find information about a specific event, the getEvent() method
+ should be used.
+ """
+ fields = ('id', 'ts')
+ values = {}
+ q = """SELECT id, EXTRACT(EPOCH FROM time) FROM events"""
+ if before is not None:
+ if not isinstance(before, (int, long, float)):
+ raise koji.GenericError, 'invalid type for before: %s' % type(before)
+ # use the repr() conversion because it retains more precision than the
+ # string conversion
+ q += """ WHERE EXTRACT(EPOCH FROM time) < %(before)r"""
+ values['before'] = before
+ q += """ ORDER BY id DESC LIMIT 1"""
+ return _singleRow(q, values, fields, strict=True)
+
+ def makeTask(self,*args,**opts):
+ #this is mainly for debugging
+ #only an admin can make arbitrary tasks
+ context.session.assertPerm('admin')
+ return make_task(*args,**opts)
+
+ def uploadFile(self, path, name, size, md5sum, offset, data):
+ #path: the relative path to upload to
+ #name: the name of the file
+ #size: size of contents (bytes)
+ #md5: md5sum (hex digest) of contents
+ #data: base64 encoded file contents
+ #offset: the offset of the chunk
+ # files can be uploaded in chunks, if so the md5 and size describe
+ # the chunk rather than the whole file. the offset indicates where
+ # the chunk belongs
+ # the special offset -1 is used to indicate the final chunk
+ context.session.assertLogin()
+ contents = base64.decodestring(data)
+ del data
+ # we will accept offset and size as strings to work around xmlrpc limits
+ offset = koji.decode_int(offset)
+ size = koji.decode_int(size)
+ if isinstance(md5sum, basestring):
+ # this case is for backwards compatibility
+ verify = "md5"
+ digest = md5sum
+ elif md5sum is None:
+ verify = None
+ else:
+ verify, digest = md5sum
+ sum_cls = get_verify_class(verify)
+ if offset != -1:
+ if size is not None:
+ if size != len(contents): return False
+ if verify is not None:
+ if digest != sum_cls(contents).hexdigest():
+ return False
+ fn = get_upload_path(path, name, create=True)
+ try:
+ st = os.lstat(fn)
+ except OSError, e:
+ if e.errno == errno.ENOENT:
+ pass
+ else:
+ raise
+ else:
+ if not stat.S_ISREG(st.st_mode):
+ raise koji.GenericError, "destination not a file: %s" % fn
+ elif offset == 0:
+ #first chunk, so file should not exist yet
+ if not fn.endswith('.log'):
+ # but we allow .log files to be uploaded multiple times to support
+ # realtime log-file viewing
+ raise koji.GenericError, "file already exists: %s" % fn
+ fd = os.open(fn, os.O_RDWR | os.O_CREAT, 0666)
+ # log_error("fd=%r" %fd)
+ try:
+ if offset == 0 or (offset == -1 and size == len(contents)):
+ #truncate file
+ fcntl.lockf(fd, fcntl.LOCK_EX|fcntl.LOCK_NB)
+ try:
+ os.ftruncate(fd, 0)
+ # log_error("truncating fd %r to 0" %fd)
+ finally:
+ fcntl.lockf(fd, fcntl.LOCK_UN)
+ if offset == -1:
+ os.lseek(fd,0,2)
+ else:
+ os.lseek(fd,offset,0)
+ #write contents
+ fcntl.lockf(fd, fcntl.LOCK_EX|fcntl.LOCK_NB, len(contents), 0, 2)
+ try:
+ os.write(fd, contents)
+ # log_error("wrote contents")
+ finally:
+ fcntl.lockf(fd, fcntl.LOCK_UN, len(contents), 0, 2)
+ if offset == -1:
+ if size is not None:
+ #truncate file
+ fcntl.lockf(fd, fcntl.LOCK_EX|fcntl.LOCK_NB)
+ try:
+ os.ftruncate(fd, size)
+ # log_error("truncating fd %r to size %r" % (fd,size))
+ finally:
+ fcntl.lockf(fd, fcntl.LOCK_UN)
+ if verify is not None:
+ #check final digest
+ chksum = sum_cls()
+ fcntl.lockf(fd, fcntl.LOCK_SH|fcntl.LOCK_NB)
+ try:
+ os.lseek(fd,0,0)
+ while True:
+ block = os.read(fd, 819200)
+ if not block: break
+ chksum.update(block)
+ if digest != chksum.hexdigest():
+ return False
+ finally:
+ fcntl.lockf(fd, fcntl.LOCK_UN)
+ finally:
+ os.close(fd)
+ return True
+
+ def checkUpload(self, path, name, verify=None, tail=None):
+ """Return basic information about an uploaded file"""
+ fn = get_upload_path(path, name)
+ data = {}
+ try:
+ fd = os.open(fn, os.O_RDONLY)
+ except OSError, e:
+ if e.errno == errno.ENOENT:
+ return None
+ else:
+ raise
+ try:
+ try:
+ fcntl.lockf(fd, fcntl.LOCK_SH|fcntl.LOCK_NB)
+ except IOError, e:
+ raise koji.LockError, e
+ st = os.fstat(fd)
+ if not stat.S_ISREG(st.st_mode):
+ raise koji.GenericError, "Not a regular file: %s" % fn
+ data['size'] = koji.encode_int(st.st_size)
+ data['mtime'] = st.st_mtime
+ if verify:
+ sum_cls = get_verify_class(verify)
+ if tail is not None:
+ if tail < 0:
+ raise koji.GenericError, "invalid tail value: %r" % tail
+ offset = max(st.st_size - tail, 0)
+ os.lseek(fd, offset, 0)
+ length = 0
+ chksum = sum_cls()
+ chunk = os.read(fd, 8192)
+ while chunk:
+ length += len(chunk)
+ chksum.update(chunk)
+ chunk = os.read(fd, 8192)
+ data['sumlength'] = koji.encode_int(length)
+ data['hexdigest'] = chksum.hexdigest()
+ return data
+ finally:
+ # this will also free our lock
+ os.close(fd)
+
+
+ def downloadTaskOutput(self, taskID, fileName, offset=0, size=-1):
+ """Download the file with the given name, generated by the task with the
+ given ID."""
+ if '..' in fileName:
+ raise koji.GenericError, 'Invalid file name: %s' % fileName
+ filePath = '%s/%s/%s' % (koji.pathinfo.work(), koji.pathinfo.taskrelpath(taskID), fileName)
+ filePath = os.path.normpath(filePath)
+ if not os.path.isfile(filePath):
+ raise koji.GenericError, 'no file "%s" output by task %i' % (fileName, taskID)
+ # Let the caller handler any IO or permission errors
+ f = file(filePath, 'r')
+ if isinstance(offset, str):
+ offset = int(offset)
+ if offset != None and offset > 0:
+ f.seek(offset, 0)
+ elif offset != None and offset < 0:
+ f.seek(offset, 2)
+ contents = f.read(size)
+ f.close()
+ return base64.encodestring(contents)
+
+ listTaskOutput = staticmethod(list_task_output)
+
+ createTag = staticmethod(create_tag)
+ editTag = staticmethod(old_edit_tag)
+ editTag2 = staticmethod(edit_tag)
+ deleteTag = staticmethod(delete_tag)
+
+ createExternalRepo = staticmethod(create_external_repo)
+ listExternalRepos = staticmethod(get_external_repos)
+ getExternalRepo = staticmethod(get_external_repo)
+ editExternalRepo = staticmethod(edit_external_repo)
+ deleteExternalRepo = staticmethod(delete_external_repo)
+
+ def addExternalRepoToTag(self, tag_info, repo_info, priority):
+ """Add an external repo to a tag"""
+ # wrap the local method so we don't expose the event parameter
+ add_external_repo_to_tag(tag_info, repo_info, priority)
+
+ def removeExternalRepoFromTag(self, tag_info, repo_info):
+ """Remove an external repo from a tag"""
+ # wrap the local method so we don't expose the event parameter
+ remove_external_repo_from_tag(tag_info, repo_info)
+
+ editTagExternalRepo = staticmethod(edit_tag_external_repo)
+ getTagExternalRepos = staticmethod(get_tag_external_repos)
+ getExternalRepoList = staticmethod(get_external_repo_list)
+
+ importBuildInPlace = staticmethod(import_build_in_place)
+ resetBuild = staticmethod(reset_build)
+
+ def importArchive(self, filepath, buildinfo, type, typeInfo):
+ """
+ Import an archive file and associate it with a build. The archive can
+ be any non-rpm filetype supported by Koji.
+
+ filepath: path to the archive file (relative to the Koji workdir)
+ buildinfo: information about the build to associate the archive with
+ May be a string (NVR), integer (buildID), or dict (containing keys: name, version, release)
+ type: type of the archive being imported. Currently supported archive types: maven, win
+ typeInfo: dict of type-specific information
+ """
+ if type == 'maven':
+ if not context.opts.get('EnableMaven'):
+ raise koji.GenericError, 'Maven support not enabled'
+ context.session.assertPerm('maven-import')
+ elif type == 'win':
+ if not context.opts.get('EnableWin'):
+ raise koji.GenericError, 'Windows support not enabled'
+ context.session.assertPerm('win-import')
+ elif type == 'image':
+ context.session.assertPerm('image-import')
+ else:
+ raise koji.GenericError, 'unsupported archive type: %s' % type
+ buildinfo = get_build(buildinfo, strict=True)
+ fullpath = '%s/%s' % (koji.pathinfo.work(), filepath)
+ import_archive(fullpath, buildinfo, type, typeInfo)
+
+ untaggedBuilds = staticmethod(untagged_builds)
+ tagHistory = staticmethod(tag_history)
+ queryHistory = staticmethod(query_history)
+
+ buildMap = staticmethod(build_map)
+ deleteBuild = staticmethod(delete_build)
+ def buildReferences(self, build, limit=None):
+ return build_references(get_build(build, strict=True)['id'], limit)
+
+ addVolume = staticmethod(add_volume)
+ removeVolume = staticmethod(remove_volume)
+ listVolumes = staticmethod(list_volumes)
+ changeBuildVolume = staticmethod(change_build_volume)
+ def getVolume(self, volume, strict=False):
+ return lookup_name('volume', volume, strict=strict)
+
+ def createEmptyBuild(self, name, version, release, epoch, owner=None):
+ context.session.assertPerm('admin')
+ data = { 'name' : name, 'version' : version, 'release' : release,
+ 'epoch' : epoch }
+ if owner is not None:
+ data['owner'] = owner
+ return new_build(data)
+
+ def createMavenBuild(self, build_info, maven_info):
+ """
+ Associate Maven metadata with an existing build. The build must
+ not already have associated Maven metadata. maven_info must be a dict
+ containing group_id, artifact_id, and version entries.
+ """
+ context.session.assertPerm('maven-import')
+ if not context.opts.get('EnableMaven'):
+ raise koji.GenericError, "Maven support not enabled"
+ build = get_build(build_info)
+ if not build:
+ build_id = new_build(dslice(build_info, ('name', 'version', 'release', 'epoch')))
+ build = get_build(build_id, strict=True)
+ new_maven_build(build, maven_info)
+
+ def createWinBuild(self, build_info, win_info):
+ """
+ Associate Windows metadata with an existing build. The build must
+ not already have associated Windows metadata. win_info must be a dict
+ containing a platform entry.
+ """
+ context.session.assertPerm('win-import')
+ if not context.opts.get('EnableWin'):
+ raise koji.GenericError, "Windows support not enabled"
+ build = get_build(build_info)
+ if not build:
+ build_id = new_build(dslice(build_info, ('name', 'version', 'release', 'epoch')))
+ build = get_build(build_id, strict=True)
+ new_win_build(build, win_info)
+
+ def createImageBuild(self, build_info):
+ """
+ Associate image metadata with an existing build. The build must not
+ already have associated image metadata.
+ """
+ context.session.assertPerm('image-import')
+ build = get_build(build_info)
+ if not build:
+ build_id = new_build(dslice(build_info, ('name', 'version', 'release', 'epoch')))
+ build = get_build(build_id, strict=True)
+ new_image_build(build)
+
+ def importRPM(self, path, basename):
+ """Import an RPM into the database.
+
+ The file must be uploaded first.
+ """
+ context.session.assertPerm('admin')
+ uploadpath = koji.pathinfo.work()
+ fn = "%s/%s/%s" %(uploadpath,path,basename)
+ if not os.path.exists(fn):
+ raise koji.GenericError, "No such file: %s" % fn
+ rpminfo = import_rpm(fn)
+ import_rpm_file(fn,rpminfo['build'],rpminfo)
+ add_rpm_sig(rpminfo['id'], koji.rip_rpm_sighdr(fn))
+ for tag in list_tags(build=rpminfo['build_id']):
+ set_tag_update(tag['id'], 'IMPORT')
+
+ def mergeScratch(self, task_id):
+ """Import the rpms generated by a scratch build, and associate
+ them with an existing build.
+
+ To be eligible for import, the build must:
+ - be successfully completed
+ - contain at least one arch-specific rpm
+
+ The task must:
+ - be a 'build' task
+ - be successfully completed
+ - use the exact same SCM URL as the build
+ - contain at least one arch-specific rpm
+ - have no overlap between the arches of the rpms it contains and
+ the rpms contained by the build
+ - contain a .src.rpm whose filename exactly matches the .src.rpm
+ of the build
+
+ Only arch-specific rpms will be imported. noarch rpms and the src
+ rpm will be skipped. Build logs and buildroot metadata from the
+ scratch build will be imported along with the rpms.
+
+ This is useful for bootstrapping a new arch. RPMs can be built
+ for the new arch using a scratch build and then merged into an
+ existing build, incrementally expanding arch coverage and avoiding
+ the need for a mass-rebuild to support the new arch.
+ """
+ context.session.assertPerm('admin')
+ return merge_scratch(task_id)
+
+ def addExternalRPM(self, rpminfo, external_repo, strict=True):
+ """Import an external RPM
+
+ This call is mainly for testing. Normal access will be through
+ a host call"""
+ context.session.assertPerm('admin')
+ add_external_rpm(rpminfo, external_repo, strict=strict)
+
+ def tagBuildBypass(self,tag,build,force=False):
+ """Tag a build without running post checks or notifications
+
+ This is a short circuit function for imports.
+ Admin permission required.
+
+ Tagging with a locked tag is not allowed unless force is true.
+ Retagging is not allowed unless force is true. (retagging changes the order
+ of entries will affect which build is the latest)
+ """
+ context.session.assertPerm('admin')
+ _tag_build(tag, build, force=force)
+
+ def tagBuild(self,tag,build,force=False,fromtag=None):
+ """Request that a build be tagged
+
+ The force option will attempt to force the action in the event of:
+ - tag locked
+ - missing permission
+ - package not in list for tag
+ - policy violation
+ The force option is really only effective for admins
+
+ If fromtag is specified, this becomes a move operation.
+
+ This call creates a task that was originally intended to perform more
+ extensive checks, but never has. We're stuck with this task system until
+ we're ready to break the api.
+
+ The return value is the task id
+ """
+ #first some lookups and basic sanity checks
+ build = get_build(build, strict=True)
+ tag = get_tag(tag, strict=True)
+ if fromtag:
+ fromtag_id = get_tag_id(fromtag, strict=True)
+ else:
+ fromtag_id = None
+ pkg_id = build['package_id']
+ tag_id = tag['id']
+ build_id = build['id']
+ # build state check
+ if build['state'] != koji.BUILD_STATES['COMPLETE']:
+ state = koji.BUILD_STATES[build['state']]
+ raise koji.TagError, "build %s not complete: state %s" % (build['nvr'], state)
+ # basic tag access check
+ assert_tag_access(tag_id,user_id=None,force=force)
+ if fromtag:
+ assert_tag_access(fromtag_id,user_id=None,force=force)
+ # package list check
+ pkgs = readPackageList(tagID=tag_id, pkgID=pkg_id, inherit=True)
+ pkg_error = None
+ if not pkgs.has_key(pkg_id):
+ pkg_error = "Package %s not in list for %s" % (build['name'], tag['name'])
+ elif pkgs[pkg_id]['blocked']:
+ pkg_error = "Package %s blocked in %s" % (build['name'], tag['name'])
+ if pkg_error:
+ if force and context.session.hasPerm('admin'):
+ pkglist_add(tag_id,pkg_id,force=True,block=False)
+ else:
+ raise koji.TagError, pkg_error
+ # tag policy check
+ policy_data = {'tag' : tag_id, 'build' : build_id, 'fromtag' : fromtag_id}
+ if fromtag is None:
+ policy_data['operation'] = 'tag'
+ else:
+ policy_data['operation'] = 'move'
+ #don't check policy for admins using force
+ if not (force and context.session.hasPerm('admin')):
+ assert_policy('tag', policy_data)
+ #XXX - we're running this check twice, here and in host.tagBuild (called by the task)
+ #spawn the tagging task
+ return make_task('tagBuild', [tag_id, build_id, force, fromtag_id], priority=10)
+
+ def untagBuild(self,tag,build,strict=True,force=False):
+ """Untag a build
+
+ Unlike tagBuild, this does not create a task
+ No return value"""
+ #we can't staticmethod this one -- we're limiting the options
+ user_id = context.session.user_id
+ tag_id = get_tag(tag, strict=True)['id']
+ build_id = get_build(build, strict=True)['id']
+ policy_data = {'tag' : None, 'build' : build_id, 'fromtag' : tag_id}
+ policy_data['operation'] = 'untag'
+ try:
+ #don't check policy for admins using force
+ if not (force and context.session.hasPerm('admin')):
+ assert_policy('tag', policy_data)
+ _untag_build(tag,build,strict=strict,force=force)
+ tag_notification(True, None, tag, build, user_id)
+ except Exception, e:
+ exctype, value = sys.exc_info()[:2]
+ tag_notification(False, None, tag, build, user_id, False, "%s: %s" % (exctype, value))
+ raise
+
+ def untagBuildBypass(self, tag, build, strict=True, force=False):
+ """Untag a build without any checks or notifications
+
+ Admins only. Intended for syncs/imports.
+
+ Unlike tagBuild, this does not create a task
+ No return value"""
+ context.session.assertPerm('admin')
+ _untag_build(tag, build, strict=strict, force=force)
+
+ def moveBuild(self,tag1,tag2,build,force=False):
+ """Move a build from tag1 to tag2
+
+ Returns the task id of the task performing the move"""
+ return self.tagBuild(tag2,build,force=force,fromtag=tag1)
+
+ def moveAllBuilds(self, tag1, tag2, package, force=False):
+ """Move all builds of a package from tag1 to tag2 in the correct order
+
+ Returns the task id of the task performing the move"""
+
+ #lookups and basic sanity checks
+ pkg_id = get_package_id(package, strict=True)
+ tag1_id = get_tag_id(tag1, strict=True)
+ tag2_id = get_tag_id(tag2, strict=True)
+
+ # note: we're just running the quick checks now so we can fail
+ # early if appropriate, rather then waiting for the task
+ # Make sure package is on the list for the tag we're adding it to
+ pkgs = readPackageList(tagID=tag2_id, pkgID=pkg_id, inherit=True)
+ pkg_error = None
+ if not pkgs.has_key(pkg_id):
+ pkg_error = "Package %s not in list for tag %s" % (package, tag2)
+ elif pkgs[pkg_id]['blocked']:
+ pkg_error = "Package %s blocked in tag %s" % (package, tag2)
+ if pkg_error:
+ if force and context.session.hasPerm('admin'):
+ pkglist_add(tag2_id,pkg_id,force=True,block=False)
+ else:
+ raise koji.TagError, pkg_error
+
+ #access check
+ assert_tag_access(tag1_id,user_id=None,force=force)
+ assert_tag_access(tag2_id,user_id=None,force=force)
+
+ build_list = readTaggedBuilds(tag1_id, package=package)
+ # we want 'ORDER BY tag_listing.create_event ASC' not DESC so reverse
+ build_list.reverse()
+
+ #policy check
+ policy_data = {'tag' : tag2, 'fromtag' : tag1, 'operation' : 'move'}
+ #don't check policy for admins using force
+ if not (force and context.session.hasPerm('admin')):
+ for build in build_list:
+ policy_data['build'] = build['id']
+ assert_policy('tag', policy_data)
+ #XXX - we're running this check twice, here and in host.tagBuild (called by the task)
+
+ wait_on = []
+ tasklist = []
+ for build in build_list:
+ task_id = make_task('dependantTask', [wait_on, [['tagBuild', [tag2_id, build['id'], force, tag1_id], {'priority':15}]]])
+ wait_on = [task_id]
+ log_error("\nMade Task: %s\n" % task_id)
+ tasklist.append(task_id)
+ return tasklist
+
+
+ listTags = staticmethod(list_tags)
+
+ getBuild = staticmethod(get_build)
+ getNextRelease = staticmethod(get_next_release)
+ getMavenBuild = staticmethod(get_maven_build)
+ getWinBuild = staticmethod(get_win_build)
+ getImageBuild = staticmethod(get_image_build)
+ getArchiveTypes = staticmethod(get_archive_types)
+ getArchiveType = staticmethod(get_archive_type)
+ listArchives = staticmethod(list_archives)
+ getArchive = staticmethod(get_archive)
+ getMavenArchive = staticmethod(get_maven_archive)
+ getWinArchive = staticmethod(get_win_archive)
+ getImageArchive = staticmethod(get_image_archive)
+ listArchiveFiles = staticmethod(list_archive_files)
+ getArchiveFile = staticmethod(get_archive_file)
+
+ def getChangelogEntries(self, buildID=None, taskID=None, filepath=None, author=None, before=None, after=None, queryOpts=None):
+ """Get changelog entries for the build with the given ID,
+ or for the rpm generated by the given task at the given path
+
+ - author: only return changelogs with a matching author
+ - before: only return changelogs from before the given date (in UTC)
+ (a datetime object, a string in the 'YYYY-MM-DD HH24:MI:SS format, or integer seconds
+ since the epoch)
+ - after: only return changelogs from after the given date (in UTC)
+ (a datetime object, a string in the 'YYYY-MM-DD HH24:MI:SS format, or integer seconds
+ since the epoch)
+ - queryOpts: query options used by the QueryProcessor
+
+ If "order" is not specified in queryOpts, results will be returned in reverse chronological
+ order.
+
+ Results will be returned as a list of maps with 'date', 'author', and 'text' keys.
+ If there are no results, an empty list will be returned.
+ """
+ if queryOpts is None:
+ queryOpts = {}
+ if queryOpts.get('order') in ('date', '-date'):
+ # use a numeric sort on the timestamp instead of an alphabetic sort on the
+ # date string
+ queryOpts['order'] = queryOpts['order'].replace('date', 'date_ts')
+ if buildID:
+ build_info = get_build(buildID)
+ if not build_info:
+ return _applyQueryOpts([], queryOpts)
+ srpms = self.listRPMs(buildID=build_info['id'], arches='src')
+ if not srpms:
+ return _applyQueryOpts([], queryOpts)
+ srpm_info = srpms[0]
+ srpm_path = os.path.join(koji.pathinfo.build(build_info), koji.pathinfo.rpm(srpm_info))
+ elif taskID:
+ if not filepath:
+ raise koji.GenericError, 'filepath must be spcified with taskID'
+ if filepath.startswith('/') or '../' in filepath:
+ raise koji.GenericError, 'invalid filepath: %s' % filepath
+ srpm_path = os.path.join(koji.pathinfo.work(),
+ koji.pathinfo.taskrelpath(taskID),
+ filepath)
+ else:
+ raise koji.GenericError, 'either buildID or taskID and filepath must be specified'
+
+ if not os.path.exists(srpm_path):
+ return _applyQueryOpts([], queryOpts)
+
+ if before:
+ if isinstance(before, datetime.datetime):
+ before = calendar.timegm(before.utctimetuple())
+ elif isinstance(before, (str, unicode)):
+ before = koji.util.parseTime(before)
+ elif isinstance(before, (int, long)):
+ pass
+ else:
+ raise koji.GenericError, 'invalid type for before: %s' % type(before)
+
+ if after:
+ if isinstance(after, datetime.datetime):
+ after = calendar.timegm(after.utctimetuple())
+ elif isinstance(after, (str, unicode)):
+ after = koji.util.parseTime(after)
+ elif isinstance(after, (int, long)):
+ pass
+ else:
+ raise koji.GenericError, 'invalid type for after: %s' % type(after)
+
+ results = []
+
+ fields = koji.get_header_fields(srpm_path, ['changelogtime', 'changelogname', 'changelogtext'])
+ for (cltime, clname, cltext) in zip(fields['changelogtime'], fields['changelogname'],
+ fields['changelogtext']):
+ cldate = datetime.datetime.fromtimestamp(cltime).isoformat(' ')
+ clname = koji.fixEncoding(clname)
+ cltext = koji.fixEncoding(cltext)
+
+ if author and author != clname:
+ continue
+ if before and not cltime < before:
+ continue
+ if after and not cltime > after:
+ continue
+
+ if queryOpts.get('asList'):
+ results.append([cldate, clname, cltext])
+ else:
+ results.append({'date': cldate, 'date_ts': cltime, 'author': clname, 'text': cltext})
+
+ return _applyQueryOpts(results, queryOpts)
+
+ def cancelBuild(self, buildID):
+ """Cancel the build with the given buildID
+
+ If the build is associated with a task, cancel the task as well.
+ Return True if the build was successfully canceled, False if not."""
+ build = get_build(buildID)
+ if build == None:
+ return False
+ if build['owner_id'] != context.session.user_id:
+ if not context.session.hasPerm('admin'):
+ raise koji.ActionNotAllowed, 'Cannot cancel build, not owner'
+ return cancel_build(build['id'])
+
+ def assignTask(self,task_id,host,force=False):
+ """Assign a task to a host
+
+ Specify force=True to assign a non-free task
+ """
+ context.session.assertPerm('admin')
+ task = Task(task_id)
+ host = get_host(host,strict=True)
+ task.assign(host['id'],force)
+
+ def freeTask(self,task_id):
+ """Free a task"""
+ context.session.assertPerm('admin')
+ task = Task(task_id)
+ task.free()
+
+ def cancelTask(self,task_id,recurse=True):
+ """Cancel a task"""
+ task = Task(task_id)
+ if not task.verifyOwner() and not task.verifyHost():
+ if not context.session.hasPerm('admin'):
+ raise koji.ActionNotAllowed, 'Cannot cancel task, not owner'
+ #non-admins can also use cancelBuild
+ task.cancel(recurse=recurse)
+
+ def cancelTaskFull(self,task_id,strict=True):
+ """Cancel a task and all tasks in its group"""
+ context.session.assertPerm('admin')
+ #non-admins can use cancelBuild or cancelTask
+ Task(task_id).cancelFull(strict=strict)
+
+ def cancelTaskChildren(self,task_id):
+ """Cancel a task's children, but not the task itself"""
+ task = Task(task_id)
+ if not task.verifyOwner() and not task.verifyHost():
+ if not context.session.hasPerm('admin'):
+ raise koji.ActionNotAllowed, 'Cannot cancel task, not owner'
+ task.cancelChildren()
+
+ def setTaskPriority(self, task_id, priority, recurse=True):
+ """Set task priority"""
+ context.session.assertPerm('admin')
+ task = Task(task_id)
+ task.setPriority(priority, recurse=recurse)
+
+ def listTagged(self,tag,event=None,inherit=False,prefix=None,latest=False,package=None,owner=None,type=None):
+ """List builds tagged with tag"""
+ if not isinstance(tag,int):
+ #lookup tag id
+ tag = get_tag_id(tag,strict=True)
+ results = readTaggedBuilds(tag,event,inherit=inherit,latest=latest,package=package,owner=owner,type=type)
+ if prefix:
+ prefix = prefix.lower()
+ results = [build for build in results if build['package_name'].lower().startswith(prefix)]
+ return results
+
+ def listTaggedRPMS(self,tag,event=None,inherit=False,latest=False,package=None,arch=None,rpmsigs=False,owner=None,type=None):
+ """List rpms and builds within tag"""
+ if not isinstance(tag,int):
+ #lookup tag id
+ tag = get_tag_id(tag,strict=True)
+ return readTaggedRPMS(tag,event=event,inherit=inherit,latest=latest,package=package,arch=arch,rpmsigs=rpmsigs,owner=owner,type=type)
+
+ def listTaggedArchives(self, tag, event=None, inherit=False, latest=False, package=None, type=None):
+ """List archives and builds within a tag"""
+ if not isinstance(tag, int):
+ tag = get_tag_id(tag,strict=True)
+ return readTaggedArchives(tag, event=event, inherit=inherit, latest=latest, package=package, type=type)
+
+ def listBuilds(self, packageID=None, userID=None, taskID=None, prefix=None, state=None,
+ volumeID=None,
+ createdBefore=None, createdAfter=None,
+ completeBefore=None, completeAfter=None, type=None, typeInfo=None, queryOpts=None):
+ """List package builds.
+ If packageID is specified, restrict the results to builds of the specified package.
+ If userID is specified, restrict the results to builds owned by the given user.
+ If taskID is specfied, restrict the results to builds with the given task ID. If taskID is -1,
+ restrict the results to builds with a non-null taskID.
+ If volumeID is specified, restrict the results to builds stored on that volume
+ One or more of packageID, userID, volumeID, and taskID may be specified.
+ If prefix is specified, restrict the results to builds whose package name starts with that
+ prefix.
+ If createdBefore and/or createdAfter are specified, restrict the results to builds whose
+ creation_time is before and/or after the given time.
+ If completeBefore and/or completeAfter are specified, restrict the results to builds whose
+ completion_time is before and/or after the given time.
+ The time may be specified as a floating point value indicating seconds since the Epoch (as
+ returned by time.time()) or as a string in ISO format ('YYYY-MM-DD HH24:MI:SS').
+ If type is not None, only list builds of the associated type. Currently the supported types are 'maven' and 'win'.
+ if typeInfo is not None, only list builds with matching type-specific info. Must be used in conjunction with
+ the type parameter.
+ Currently the only supported type is 'maven', and typeInfo is a dict containing
+ one or more of group_id, artifact_id, and/or version. Output will be restricted to builds with
+ matching Maven metadata.
+
+ Returns a list of maps. Each map contains the following keys:
+
+ - build_id
+ - version
+ - release
+ - epoch
+ - state
+ - package_id
+ - package_name
+ - name (same as package_name)
+ - nvr (synthesized for sorting purposes)
+ - owner_id
+ - owner_name
+ - volume_id
+ - volume_name
+ - creation_event_id
+ - creation_time
+ - creation_ts
+ - completion_time
+ - completion_ts
+ - task_id
+
+ If type == 'maven', each map will also contain the following keys:
+
+ - maven_group_id
+ - maven_artifact_id
+ - maven_version
+
+ If no builds match, an empty list is returned.
+ """
+ fields = [('build.id', 'build_id'), ('build.version', 'version'), ('build.release', 'release'),
+ ('build.epoch', 'epoch'), ('build.state', 'state'), ('build.completion_time', 'completion_time'),
+ ('events.id', 'creation_event_id'), ('events.time', 'creation_time'), ('build.task_id', 'task_id'),
+ ('EXTRACT(EPOCH FROM events.time)','creation_ts'),
+ ('EXTRACT(EPOCH FROM build.completion_time)','completion_ts'),
+ ('package.id', 'package_id'), ('package.name', 'package_name'), ('package.name', 'name'),
+ ('volume.id', 'volume_id'), ('volume.name', 'volume_name'),
+ ("package.name || '-' || build.version || '-' || build.release", 'nvr'),
+ ('users.id', 'owner_id'), ('users.name', 'owner_name')]
+
+ tables = ['build']
+ joins = ['events ON build.create_event = events.id',
+ 'package ON build.pkg_id = package.id',
+ 'volume ON build.volume_id = volume.id',
+ 'users ON build.owner = users.id']
+ clauses = []
+ if packageID != None:
+ clauses.append('package.id = %(packageID)i')
+ if userID != None:
+ clauses.append('users.id = %(userID)i')
+ if volumeID != None:
+ clauses.append('volume.id = %(volumeID)i')
+ if taskID != None:
+ if taskID == -1:
+ clauses.append('build.task_id IS NOT NULL')
+ else:
+ clauses.append('build.task_id = %(taskID)i')
+ if prefix:
+ clauses.append("package.name ilike %(prefix)s || '%%'")
+ if state != None:
+ clauses.append('build.state = %(state)i')
+ if createdBefore:
+ if not isinstance(createdBefore, str):
+ createdBefore = datetime.datetime.fromtimestamp(createdBefore).isoformat(' ')
+ clauses.append('events.time < %(createdBefore)s')
+ if createdAfter:
+ if not isinstance(createdAfter, str):
+ createdAfter = datetime.datetime.fromtimestamp(createdAfter).isoformat(' ')
+ clauses.append('events.time > %(createdAfter)s')
+ if completeBefore:
+ if not isinstance(completeBefore, str):
+ completeBefore = datetime.datetime.fromtimestamp(completeBefore).isoformat(' ')
+ clauses.append('build.completion_time < %(completeBefore)s')
+ if completeAfter:
+ if not isinstance(completeAfter, str):
+ completeAfter = datetime.datetime.fromtimestamp(completeAfter).isoformat(' ')
+ clauses.append('build.completion_time > %(completeAfter)s')
+ if type is None:
+ pass
+ elif type == 'maven':
+ joins.append('maven_builds ON build.id = maven_builds.build_id')
+ fields.extend([('maven_builds.group_id', 'maven_group_id'),
+ ('maven_builds.artifact_id', 'maven_artifact_id'),
+ ('maven_builds.version', 'maven_version')])
+ if typeInfo:
+ if typeInfo.has_key('group_id'):
+ clauses.append('maven_builds.group_id = %(group_id)s')
+ group_id = typeInfo['group_id']
+ if typeInfo.has_key('artifact_id'):
+ clauses.append('maven_builds.artifact_id = %(artifact_id)s')
+ artifact_id = typeInfo['artifact_id']
+ if typeInfo.has_key('version'):
+ clauses.append('maven_builds.version = %(version)s')
+ version = typeInfo['version']
+ elif type == 'win':
+ joins.append('win_builds ON build.id = win_builds.build_id')
+ fields.append(('win_builds.platform', 'platform'))
+ if typeInfo:
+ clauses.append('win_builds.platform = %(platform)s')
+ platform = typeInfo['platform']
+ elif type == 'image':
+ joins.append('image_builds ON build.id = image_builds.build_id')
+ fields.append(('image_builds.build_id', 'build_id'))
+ else:
+ raise koji.GenericError, 'unsupported build type: %s' % type
+
+ query = QueryProcessor(columns=[pair[0] for pair in fields],
+ aliases=[pair[1] for pair in fields],
+ tables=tables, joins=joins, clauses=clauses,
+ values=locals(), opts=queryOpts)
+
+ return query.iterate()
+
+ def getLatestBuilds(self,tag,event=None,package=None,type=None):
+ """List latest builds for tag (inheritance enabled)"""
+ if not isinstance(tag,int):
+ #lookup tag id
+ tag = get_tag_id(tag,strict=True)
+ return readTaggedBuilds(tag,event,inherit=True,latest=True,package=package,type=type)
+
+ def getLatestRPMS(self, tag, package=None, arch=None, event=None, rpmsigs=False, type=None):
+ """List latest RPMS for tag (inheritance enabled)"""
+ if not isinstance(tag,int):
+ #lookup tag id
+ tag = get_tag_id(tag,strict=True)
+ return readTaggedRPMS(tag, package=package, arch=arch, event=event, inherit=True, latest=True, rpmsigs=rpmsigs, type=type)
+
+ def getLatestMavenArchives(self, tag, event=None, inherit=True):
+ """Return a list of the latest Maven archives in the tag, as of the given event
+ (or now if event is None). If inherit is True, follow the tag hierarchy
+ and return a list of the latest archives for all tags in the tree."""
+ tag_id = get_tag_id(tag, strict=True)
+ return maven_tag_archives(tag_id, event_id=event, inherit=inherit)
+
+ def getAverageBuildDuration(self, package):
+ """Get the average duration of a build of the given package.
+ Returns a floating-point value indicating the
+ average number of seconds the package took to build. If the package
+ has never been built, return None."""
+ packageID = get_package_id(package)
+ if not packageID:
+ return None
+ st_complete = koji.BUILD_STATES['COMPLETE']
+ query = """SELECT EXTRACT(epoch FROM avg(build.completion_time - events.time))
+ FROM build
+ JOIN events ON build.create_event = events.id
+ WHERE build.pkg_id = %(packageID)i
+ AND build.state = %(st_complete)i
+ AND build.task_id IS NOT NULL"""
+
+ return _singleValue(query, locals())
+
+ packageListAdd = staticmethod(pkglist_add)
+ packageListRemove = staticmethod(pkglist_remove)
+ packageListBlock = staticmethod(pkglist_block)
+ packageListUnblock = staticmethod(pkglist_unblock)
+ packageListSetOwner = staticmethod(pkglist_setowner)
+ packageListSetArches = staticmethod(pkglist_setarches)
+
+ groupListAdd = staticmethod(grplist_add)
+ groupListRemove = staticmethod(grplist_remove)
+ groupListBlock = staticmethod(grplist_block)
+ groupListUnblock = staticmethod(grplist_unblock)
+
+ groupPackageListAdd = staticmethod(grp_pkg_add)
+ groupPackageListRemove = staticmethod(grp_pkg_remove)
+ groupPackageListBlock = staticmethod(grp_pkg_block)
+ groupPackageListUnblock = staticmethod(grp_pkg_unblock)
+
+ groupReqListAdd = staticmethod(grp_req_add)
+ groupReqListRemove = staticmethod(grp_req_remove)
+ groupReqListBlock = staticmethod(grp_req_block)
+ groupReqListUnblock = staticmethod(grp_req_unblock)
+
+ getTagGroups = staticmethod(readTagGroups)
+
+ checkTagAccess = staticmethod(check_tag_access)
+
+ getGlobalInheritance = staticmethod(readGlobalInheritance)
+
+ def getInheritanceData(self,tag,event=None):
+ """Return inheritance data for tag"""
+ if not isinstance(tag,int):
+ #lookup tag id
+ tag = get_tag_id(tag,strict=True)
+ return readInheritanceData(tag,event)
+
+ def setInheritanceData(self,tag,data,clear=False):
+ if not isinstance(tag,int):
+ #lookup tag id
+ tag = get_tag_id(tag,strict=True)
+ context.session.assertPerm('admin')
+ return writeInheritanceData(tag,data,clear=clear)
+
+ def getFullInheritance(self,tag,event=None,reverse=False,stops=None,jumps=None):
+ if stops is None:
+ stops = {}
+ if jumps is None:
+ jumps = {}
+ if not isinstance(tag,int):
+ #lookup tag id
+ tag = get_tag_id(tag,strict=True)
+ for mapping in [stops, jumps]:
+ for key in mapping.keys():
+ mapping[int(key)] = mapping[key]
+ return readFullInheritance(tag,event,reverse,stops,jumps)
+
+ listRPMs = staticmethod(list_rpms)
+
+ def listBuildRPMs(self,build):
+ """Get information about all the RPMs generated by the build with the given
+ ID. A list of maps is returned, each map containing the following keys:
+
+ - id
+ - name
+ - version
+ - release
+ - arch
+ - epoch
+ - payloadhash
+ - size
+ - buildtime
+ - build_id
+ - buildroot_id
+
+ If no build has the given ID, or the build generated no RPMs, an empty list is returned."""
+ if not isinstance(build, int):
+ #lookup build id
+ build = self.findBuildID(build, strict=True)
+ return self.listRPMs(buildID=build)
+
+ getRPM = staticmethod(get_rpm)
+
+ def getRPMDeps(self, rpmID, depType=None, queryOpts=None):
+ """Return dependency information about the RPM with the given ID.
+ If depType is specified, restrict results to dependencies of the given type.
+ Otherwise, return all dependency information. A list of maps will be returned,
+ each with the following keys:
+ - name
+ - version
+ - flags
+ - type
+
+ If there is no RPM with the given ID, or the RPM has no dependency information,
+ an empty list will be returned.
+ """
+ if queryOpts is None:
+ queryOpts = {}
+ rpm_info = get_rpm(rpmID)
+ if not rpm_info or not rpm_info['build_id']:
+ return _applyQueryOpts([], queryOpts)
+ build_info = get_build(rpm_info['build_id'])
+ rpm_path = os.path.join(koji.pathinfo.build(build_info), koji.pathinfo.rpm(rpm_info))
+ if not os.path.exists(rpm_path):
+ return _applyQueryOpts([], queryOpts)
+
+ results = []
+
+ for dep_name in ['REQUIRE','PROVIDE','CONFLICT','OBSOLETE']:
+ dep_id = getattr(koji, 'DEP_' + dep_name)
+ if depType is None or depType == dep_id:
+ fields = koji.get_header_fields(rpm_path, [dep_name + 'NAME',
+ dep_name + 'VERSION',
+ dep_name + 'FLAGS'])
+ for (name, version, flags) in zip(fields[dep_name + 'NAME'],
+ fields[dep_name + 'VERSION'],
+ fields[dep_name + 'FLAGS']):
+ if queryOpts.get('asList'):
+ results.append([name, version, flags, dep_id])
+ else:
+ results.append({'name': name, 'version': version, 'flags': flags, 'type': dep_id})
+
+ return _applyQueryOpts(results, queryOpts)
+
+ def listRPMFiles(self, rpmID, queryOpts=None):
+ """List files associated with the RPM with the given ID. A list of maps
+ will be returned, each with the following keys:
+ - name
+ - digest
+ - md5 (alias for digest)
+ - digest_algo
+ - size
+ - flags
+
+ If there is no RPM with the given ID, or that RPM contains no files,
+ an empty list will be returned."""
+ if queryOpts is None:
+ queryOpts = {}
+ rpm_info = get_rpm(rpmID)
+ if not rpm_info or not rpm_info['build_id']:
+ return _applyQueryOpts([], queryOpts)
+ build_info = get_build(rpm_info['build_id'])
+ rpm_path = os.path.join(koji.pathinfo.build(build_info), koji.pathinfo.rpm(rpm_info))
+ if not os.path.exists(rpm_path):
+ return _applyQueryOpts([], queryOpts)
+
+ results = []
+ hdr = koji.get_rpm_header(rpm_path)
+ fields = koji.get_header_fields(hdr, ['filenames', 'filemd5s', 'filesizes', 'fileflags',
+ 'fileusername', 'filegroupname', 'filemtimes', 'filemodes'])
+ digest_algo = koji.util.filedigestAlgo(hdr)
+
+ for (name, digest, size, flags, user, group, mtime, mode) in zip(fields['filenames'], fields['filemd5s'],
+ fields['filesizes'], fields['fileflags'],
+ fields['fileusername'], fields['filegroupname'],
+ fields['filemtimes'], fields['filemodes']):
+ if queryOpts.get('asList'):
+ results.append([name, digest, size, flags, digest_algo, user, group, mtime, mode])
+ else:
+ results.append({'name': name, 'digest': digest, 'digest_algo': digest_algo,
+ 'md5': digest, 'size': size, 'flags': flags,
+ 'user': user, 'group': group, 'mtime': mtime, 'mode': mode})
+
+ return _applyQueryOpts(results, queryOpts)
+
+ def getRPMFile(self, rpmID, filename):
+ """
+ Get info about the file in the given RPM with the given filename.
+ A map will be returned with the following keys:
+ - rpm_id
+ - name
+ - digest
+ - md5 (alias for digest)
+ - digest_algo
+ - size
+ - flags
+
+ If no such file exists, an empty map will be returned.
+ """
+ rpm_info = get_rpm(rpmID)
+ if not rpm_info or not rpm_info['build_id']:
+ return {}
+ build_info = get_build(rpm_info['build_id'])
+ rpm_path = os.path.join(koji.pathinfo.build(build_info), koji.pathinfo.rpm(rpm_info))
+ if not os.path.exists(rpm_path):
+ return {}
+
+ hdr = koji.get_rpm_header(rpm_path)
+ # use filemd5s for backward compatibility
+ fields = koji.get_header_fields(hdr, ['filenames', 'filemd5s', 'filesizes', 'fileflags',
+ 'fileusername', 'filegroupname', 'filemtimes', 'filemodes'])
+ digest_algo = koji.util.filedigestAlgo(hdr)
+
+ i = 0
+ for name in fields['filenames']:
+ if name == filename:
+ return {'rpm_id': rpm_info['id'], 'name': name, 'digest': fields['filemd5s'][i],
+ 'digest_algo': digest_algo, 'md5': fields['filemd5s'][i],
+ 'size': fields['filesizes'][i], 'flags': fields['fileflags'][i],
+ 'user': fields['fileusername'][i], 'group': fields['filegroupname'][i],
+ 'mtime': fields['filemtimes'][i], 'mode': fields['filemodes'][i]}
+ i += 1
+ return {}
+
+ def getRPMHeaders(self, rpmID=None, taskID=None, filepath=None, headers=None):
+ """
+ Get the requested headers from the rpm. Header names are case-insensitive.
+ If a header is requested that does not exist an exception will be raised.
+ Returns a map of header names to values. If the specified ID
+ is not valid or the rpm does not exist on the file system, an empty map
+ will be returned.
+ """
+ if not headers:
+ headers = []
+ if rpmID:
+ rpm_info = get_rpm(rpmID)
+ if not rpm_info or not rpm_info['build_id']:
+ return {}
+ build_info = get_build(rpm_info['build_id'])
+ rpm_path = os.path.join(koji.pathinfo.build(build_info), koji.pathinfo.rpm(rpm_info))
+ if not os.path.exists(rpm_path):
+ return {}
+ elif taskID:
+ if not filepath:
+ raise koji.GenericError, 'filepath must be specified with taskID'
+ if filepath.startswith('/') or '../' in filepath:
+ raise koji.GenericError, 'invalid filepath: %s' % filepath
+ rpm_path = os.path.join(koji.pathinfo.work(),
+ koji.pathinfo.taskrelpath(taskID),
+ filepath)
+ else:
+ raise koji.GenericError, 'either rpmID or taskID and filepath must be specified'
+
+ headers = koji.get_header_fields(rpm_path, headers)
+ for key, value in headers.items():
+ if isinstance(value, basestring):
+ headers[key] = koji.fixEncoding(value)
+ return headers
+
+ queryRPMSigs = staticmethod(query_rpm_sigs)
+
+ def writeSignedRPM(self, an_rpm, sigkey, force=False):
+ """Write a signed copy of the rpm"""
+ context.session.assertPerm('sign')
+ #XXX - still not sure if this is the right restriction
+ return write_signed_rpm(an_rpm, sigkey, force)
+
+ def addRPMSig(self, an_rpm, data):
+ """Store a signature header for an rpm
+
+ data: the signature header encoded as base64
+ """
+ context.session.assertPerm('sign')
+ return add_rpm_sig(an_rpm, base64.decodestring(data))
+
+ findBuildID = staticmethod(find_build_id)
+ getTagID = staticmethod(get_tag_id)
+ getTag = staticmethod(get_tag)
+
+ def getPackageID(self,name):
+ c=context.cnx.cursor()
+ q="""SELECT id FROM package WHERE name=%(name)s"""
+ c.execute(q,locals())
+ r=c.fetchone()
+ if not r:
+ return None
+ return r[0]
+
+ getPackage = staticmethod(lookup_package)
+
+ def listPackages(self, tagID=None, userID=None, pkgID=None, prefix=None, inherited=False, with_dups=False, event=None):
+ """List if tagID and/or userID is specified, limit the
+ list to packages belonging to the given user or with the
+ given tag.
+
+ A list of maps is returned. Each map contains the
+ following keys:
+
+ - package_id
+ - package_name
+
+ If tagID, userID, or pkgID are specified, the maps will also contain the
+ following keys.
+
+ - tag_id
+ - tag_name
+ - owner_id
+ - owner_name
+ - extra_arches
+ - blocked
+ """
+ if tagID is None and userID is None and pkgID is None:
+ query = """SELECT id, name from package"""
+ results = _multiRow(query,{},('package_id', 'package_name'))
+ else:
+ if tagID is not None:
+ tagID = get_tag_id(tagID,strict=True)
+ if userID is not None:
+ userID = get_user(userID,strict=True)['id']
+ if pkgID is not None:
+ pkgID = get_package_id(pkgID,strict=True)
+ result_list = readPackageList(tagID=tagID, userID=userID, pkgID=pkgID,
+ inherit=inherited, with_dups=with_dups,
+ event=event).values()
+ if with_dups:
+ # when with_dups=True, readPackageList returns a list of list of dicts
+ # convert it to a list of dicts for consistency
+ results = []
+ for result in result_list:
+ results.extend(result)
+ else:
+ results = result_list
+
+ if prefix:
+ prefix = prefix.lower()
+ results = [package for package in results if package['package_name'].lower().startswith(prefix)]
+
+ return results
+
+ def checkTagPackage(self,tag,pkg):
+ """Check that pkg is in the list for tag. Returns true/false"""
+ tag_id = get_tag_id(tag,strict=False)
+ pkg_id = get_package_id(pkg,strict=False)
+ if pkg_id is None or tag_id is None:
+ return False
+ pkgs = readPackageList(tagID=tag_id, pkgID=pkg_id, inherit=True)
+ if not pkgs.has_key(pkg_id):
+ return False
+ else:
+ #still might be blocked
+ return not pkgs[pkg_id]['blocked']
+
+ def getPackageConfig(self,tag,pkg,event=None):
+ """Get config for package in tag"""
+ tag_id = get_tag_id(tag,strict=False)
+ pkg_id = get_package_id(pkg,strict=False)
+ if pkg_id is None or tag_id is None:
+ return None
+ pkgs = readPackageList(tagID=tag_id, pkgID=pkg_id, inherit=True, event=event)
+ return pkgs.get(pkg_id,None)
+
+ getUser = staticmethod(get_user)
+
+ def grantPermission(self, userinfo, permission, create=False):
+ """Grant a permission to a user"""
+ context.session.assertPerm('admin')
+ user_id = get_user(userinfo,strict=True)['id']
+ perm = lookup_perm(permission, strict=(not create), create=create)
+ perm_id = perm['id']
+ if perm['name'] in koji.auth.get_user_perms(user_id):
+ raise koji.GenericError, 'user %s already has permission: %s' % (userinfo, perm['name'])
+ insert = InsertProcessor('user_perms')
+ insert.set(user_id=user_id, perm_id=perm_id)
+ insert.make_create()
+ insert.execute()
+
+ def revokePermission(self, userinfo, permission):
+ """Revoke a permission from a user"""
+ context.session.assertPerm('admin')
+ user_id = get_user(userinfo, strict=True)['id']
+ perm = lookup_perm(permission, strict=True)
+ perm_id = perm['id']
+ if perm['name'] not in koji.auth.get_user_perms(user_id):
+ raise koji.GenericError, 'user %s does not have permission: %s' % (userinfo, perm['name'])
+ update = UpdateProcessor('user_perms', values=locals(),
+ clauses=["user_id = %(user_id)i", "perm_id = %(perm_id)i"])
+ update.make_revoke()
+ update.execute()
+
+ def createUser(self, username, status=None, krb_principal=None):
+ """Add a user to the database"""
+ context.session.assertPerm('admin')
+ if get_user(username):
+ raise koji.GenericError, 'user already exists: %s' % username
+ if krb_principal and get_user(krb_principal):
+ raise koji.GenericError, 'user with this Kerberos principal already exists: %s' % krb_principal
+
+ return context.session.createUser(username, status=status, krb_principal=krb_principal)
+
+ def enableUser(self, username):
+ """Enable logins by the specified user"""
+ user = get_user(username)
+ if not user:
+ raise koji.GenericError, 'unknown user: %s' % username
+ set_user_status(user, koji.USER_STATUS['NORMAL'])
+
+ def disableUser(self, username):
+ """Disable logins by the specified user"""
+ user = get_user(username)
+ if not user:
+ raise koji.GenericError, 'unknown user: %s' % username
+ set_user_status(user, koji.USER_STATUS['BLOCKED'])
+
+ #group management calls
+ newGroup = staticmethod(new_group)
+ addGroupMember = staticmethod(add_group_member)
+ dropGroupMember = staticmethod(drop_group_member)
+ getGroupMembers = staticmethod(get_group_members)
+
+ def listUsers(self, userType=koji.USERTYPES['NORMAL'], prefix=None, queryOpts=None):
+ """List all users in the system.
+ type can be either koji.USERTYPES['NORMAL']
+ or koji.USERTYPES['HOST']. Returns a list of maps with the
+ following keys:
+
+ - id
+ - name
+ - status
+ - usertype
+ - krb_principal
+
+ If no users of the specified
+ type exist, return an empty list."""
+ fields = ('id', 'name', 'status', 'usertype', 'krb_principal')
+ clauses = ['usertype = %(userType)i']
+ if prefix:
+ clauses.append("name ilike %(prefix)s || '%%'")
+ query = QueryProcessor(columns=fields, tables=('users',), clauses=clauses,
+ values=locals(), opts=queryOpts)
+ return query.execute()
+
+ def getBuildConfig(self,tag,event=None):
+ """Return build configuration associated with a tag"""
+ taginfo = get_tag(tag,strict=True,event=event)
+ order = readFullInheritance(taginfo['id'], event=event)
+ #follow inheritance for arches and extra
+ for link in order:
+ if link['noconfig']:
+ continue
+ ancestor = get_tag(link['parent_id'], strict=True, event=event)
+ if taginfo['arches'] is None and ancestor['arches'] is not None:
+ taginfo['arches'] = ancestor['arches']
+ for key in ancestor['extra']:
+ if key not in taginfo['extra']:
+ taginfo['extra'][key] = ancestor['extra'][key]
+ return taginfo
+
+ def getRepo(self,tag,state=None,event=None):
+ if isinstance(tag,int):
+ id = tag
+ else:
+ id = get_tag_id(tag,strict=True)
+
+ fields = ['repo.id', 'repo.state', 'repo.create_event', 'events.time', 'EXTRACT(EPOCH FROM events.time)']
+ aliases = ['id', 'state', 'create_event', 'creation_time', 'create_ts']
+ joins = ['events ON repo.create_event = events.id']
+ clauses = ['repo.tag_id = %(id)i']
+ if event:
+ # the repo table doesn't have all the fields of a _config table, just create_event
+ clauses.append('create_event <= %(event)i')
+ else:
+ if state is None:
+ state = koji.REPO_READY
+ clauses.append('repo.state = %(state)s' )
+
+ query = QueryProcessor(columns=fields, aliases=aliases,
+ tables=['repo'], joins=joins, clauses=clauses,
+ values=locals(),
+ opts={'order': '-creation_time', 'limit': 1})
+ return query.executeOne()
+
+ repoInfo = staticmethod(repo_info)
+ getActiveRepos = staticmethod(get_active_repos)
+
+ def newRepo(self, tag, event=None, src=False, debuginfo=False):
+ """Create a newRepo task. returns task id"""
+ if context.session.hasPerm('regen-repo'):
+ pass
+ else:
+ context.session.assertPerm('repo')
+ opts = {}
+ if event is not None:
+ opts['event'] = event
+ if src:
+ opts['src'] = True
+ if debuginfo:
+ opts['debuginfo'] = True
+ args = koji.encode_args(tag, **opts)
+ return make_task('newRepo', args, priority=15, channel='createrepo')
+
+ def repoExpire(self, repo_id):
+ """mark repo expired"""
+ context.session.assertPerm('repo')
+ repo_expire(repo_id)
+
+ def repoDelete(self, repo_id):
+ """Attempt to mark repo deleted, return number of references
+
+ If the number of references is nonzero, no change is made
+ Does not remove from disk"""
+ context.session.assertPerm('repo')
+ return repo_delete(repo_id)
+
+ def repoProblem(self, repo_id):
+ """mark repo as broken"""
+ context.session.assertPerm('repo')
+ repo_problem(repo_id)
+
+ def debugFunction(self, name, *args, **kwargs):
+ # This is potentially dangerous, so it must be explicitly enabled
+ allowed = context.opts.get('EnableFunctionDebug', False)
+ if not allowed:
+ raise koji.ActionNotAllowed, 'This call is not enabled'
+ context.session.assertPerm('admin')
+ func = globals().get(name)
+ if callable(func):
+ return func(*args, **kwargs)
+ else:
+ raise koji.GenericError, 'Unable to find function: %s' % name
+
+ tagChangedSinceEvent = staticmethod(tag_changed_since_event)
+ createBuildTarget = staticmethod(create_build_target)
+ editBuildTarget = staticmethod(edit_build_target)
+ deleteBuildTarget = staticmethod(delete_build_target)
+ getBuildTargets = staticmethod(get_build_targets)
+ getBuildTarget = staticmethod(get_build_target)
+
+ def taskFinished(self,taskId):
+ task = Task(taskId)
+ return task.isFinished()
+
+ def getTaskRequest(self, taskId):
+ task = Task(taskId)
+ return task.getRequest()
+
+ def getTaskResult(self, taskId):
+ task = Task(taskId)
+ return task.getResult()
+
+ def getTaskInfo(self, task_id, request=False):
+ """Get information about a task"""
+ single = True
+ if isinstance(task_id, list) or isinstance(task_id, tuple):
+ single = False
+ else:
+ task_id = [task_id]
+ ret = [Task(id).getInfo(False, request) for id in task_id]
+ if single:
+ return ret[0]
+ else:
+ return ret
+
+ def getTaskChildren(self, task_id, request=False):
+ """Return a list of the children
+ of the Task with the given ID."""
+ task = Task(task_id)
+ return task.getChildren(request=request)
+
+ def getTaskDescendents(self, task_id, request=False):
+ """Get all descendents of the task with the given ID.
+ Return a map of task_id -> list of child tasks. If the given
+ task has no descendents, the map will contain a single elements
+ mapping the given task ID to an empty list. Map keys will be strings
+ representing integers, due to limitations in xmlrpclib. If "request"
+ is true, the parameters sent with the xmlrpc request will be decoded and
+ included in the map."""
+ task = Task(task_id)
+ return get_task_descendents(task, request=request)
+
+ def listTasks(self, opts=None, queryOpts=None):
+ """Return list of tasks filtered by options
+
+ Options(dictionary):
+ option[type]: meaning
+ arch[list]: limit to tasks for given arches
+ state[list]: limit to tasks of given state
+ owner[int]: limit to tasks owned by the user with the given ID
+ host_id[int]: limit to tasks running on the host with the given ID
+ channel_id[int]: limit to tasks in the specified channel
+ parent[int]: limit to tasks with the given parent
+ decode[bool]: whether or not xmlrpc data in the 'request' and 'result'
+ fields should be decoded; defaults to False
+ method[str]: limit to tasks of the given method
+ createdBefore[float or str]: limit to tasks whose create_time is before the
+ given date, in either float (seconds since the epoch)
+ or str (ISO) format
+ createdAfter[float or str]: limit to tasks whose create_time is after the
+ given date, in either float (seconds since the epoch)
+ or str (ISO) format
+ startedBefore[float or str]: limit to tasks whose start_time is before the
+ given date, in either float (seconds since the epoch)
+ or str (ISO) format
+ startedAfter[float or str]: limit to tasks whose start_time is after the
+ given date, in either float (seconds since the epoch)
+ or str (ISO) format
+ completeBefore[float or str]: limit to tasks whose completion_time is before
+ the given date, in either float (seconds since the epoch)
+ or str (ISO) format
+ completeAfter[float or str]: limit to tasks whose completion_time is after
+ the given date, in either float (seconds since the epoch)
+ or str (ISO) format
+ """
+ if not opts:
+ opts = {}
+ if not queryOpts:
+ queryOpts = {}
+
+ tables = ['task']
+ joins = ['users ON task.owner = users.id']
+ flist = Task.fields + (
+ ('task.request', 'request'),
+ ('task.result', 'result'),
+ ('users.name', 'owner_name'),
+ ('users.usertype', 'owner_type'),
+ )
+ fields = [f[0] for f in flist]
+ aliases = [f[1] for f in flist]
+
+ conditions = []
+ for f in ['arch','state']:
+ if opts.has_key(f):
+ conditions.append('%s IN %%(%s)s' % (f, f))
+ for f in ['owner', 'host_id', 'channel_id', 'parent']:
+ if opts.has_key(f):
+ if opts[f] is None:
+ conditions.append('%s IS NULL' % f)
+ else:
+ conditions.append('%s = %%(%s)i' % (f, f))
+ if opts.has_key('method'):
+ conditions.append('method = %(method)s')
+ time_opts = [
+ ['createdBefore', 'create_time', '<'],
+ ['createdAfter', 'create_time', '>'],
+ ['startedBefore', 'start_time', '<'],
+ ['startedAfter', 'start_time', '>'],
+ ['completeBefore', 'completion_time', '<'],
+ ['completeAfter', 'completion_time', '>'],
+ # and a couple aliases for api compat:
+ ['completedBefore', 'completion_time', '<'],
+ ['completedAfter', 'completion_time', '>'],
+ ]
+ for key, field, cmp in time_opts:
+ if opts.get(key) != None:
+ value = opts[key]
+ if not isinstance(value, str):
+ opts[key] = datetime.datetime.fromtimestamp(value).isoformat(' ')
+ conditions.append('%(field)s %(cmp)s %%(%(key)s)s' % locals())
+
+ query = QueryProcessor(columns=fields, aliases=aliases, tables=tables, joins=joins,
+ clauses=conditions, values=opts, opts=queryOpts)
+ tasks = query.iterate()
+ if queryOpts and (queryOpts.get('countOnly') or queryOpts.get('asList')):
+ # Either of the above options makes us unable to easily the decode
+ # the xmlrpc data
+ return tasks
+
+ if opts.get('decode') and not queryOpts.get('countOnly'):
+ if queryOpts.get('asList'):
+ keys = []
+ for n, f in aliases:
+ if f in ('request','result'):
+ keys.append(n)
+ else:
+ keys = ('request','result')
+ tasks = self._decode_tasks(tasks, keys)
+
+ return tasks
+
+ def _decode_tasks(self, tasks, keys):
+ for task in tasks:
+ # decode xmlrpc data
+ for f in keys:
+ val = task[f]
+ if val:
+ try:
+ if val.find('<?xml', 0, 10) == -1:
+ #handle older base64 encoded data
+ val = base64.decodestring(val)
+ data, method = xmlrpclib.loads(val)
+ except xmlrpclib.Fault, fault:
+ data = fault
+ task[f] = data
+ yield task
+
+ def taskReport(self, owner=None):
+ """Return data on active or recent tasks"""
+ fields = (
+ ('task.id','id'),
+ ('task.state','state'),
+ ('task.create_time','create_time'),
+ ('task.completion_time','completion_time'),
+ ('task.channel_id','channel_id'),
+ ('channels.name','channel'),
+ ('task.host_id','host_id'),
+ ('host.name','host'),
+ ('task.parent','parent'),
+ ('task.waiting','waiting'),
+ ('task.awaited','awaited'),
+ ('task.method','method'),
+ ('task.arch','arch'),
+ ('task.priority','priority'),
+ ('task.weight','weight'),
+ ('task.owner','owner_id'),
+ ('users.name','owner'),
+ ('build.id','build_id'),
+ ('package.name','build_name'),
+ ('build.version','build_version'),
+ ('build.release','build_release'),
+ )
+ q = """SELECT %s FROM task
+ JOIN channels ON task.channel_id = channels.id
+ JOIN users ON task.owner = users.id
+ LEFT OUTER JOIN host ON task.host_id = host.id
+ LEFT OUTER JOIN build ON build.task_id = task.id
+ LEFT OUTER JOIN package ON build.pkg_id = package.id
+ WHERE (task.state NOT IN (%%(CLOSED)d,%%(CANCELED)d,%%(FAILED)d)
+ OR NOW() - task.create_time < '1 hour'::interval)
+ """ % ','.join([f[0] for f in fields])
+ if owner:
+ q += """AND users.id = %s
+ """ % get_user(owner, strict=True)['id']
+ q += """ORDER BY priority,create_time
+ """
+ #XXX hard-coded interval
+ c = context.cnx.cursor()
+ c.execute(q,koji.TASK_STATES)
+ return [dict(zip([f[1] for f in fields],row)) for row in c.fetchall()]
+
+ def resubmitTask(self, taskID):
+ """Retry a canceled or failed task, using the same parameter as the original task.
+ The logged-in user must be the owner of the original task or an admin."""
+ task = Task(taskID)
+ if not (task.isCanceled() or task.isFailed()):
+ raise koji.GenericError, 'only canceled or failed tasks may be resubmitted'
+ taskInfo = task.getInfo()
+ if taskInfo['parent'] != None:
+ raise koji.GenericError, 'only top-level tasks may be resubmitted'
+ if not (context.session.user_id == taskInfo['owner'] or self.hasPerm('admin')):
+ raise koji.GenericError, 'only the task owner or an admin may resubmit a task'
+
+ args = task.getRequest()
+ channel = get_channel(taskInfo['channel_id'], strict=True)
+
+ return make_task(taskInfo['method'], args, arch=taskInfo['arch'], channel=channel['name'], priority=taskInfo['priority'])
+
+ def addHost(self, hostname, arches, krb_principal=None):
+ """Add a host to the database"""
+ context.session.assertPerm('admin')
+ if get_host(hostname):
+ raise koji.GenericError, 'host already exists: %s' % hostname
+ q = """SELECT id FROM channels WHERE name = 'default'"""
+ default_channel = _singleValue(q)
+ if krb_principal is None:
+ fmt = context.opts.get('HostPrincipalFormat')
+ if fmt:
+ krb_principal = fmt % hostname
+ #users entry
+ userID = context.session.createUser(hostname, usertype=koji.USERTYPES['HOST'],
+ krb_principal=krb_principal)
+ #host entry
+ hostID = _singleValue("SELECT nextval('host_id_seq')", strict=True)
+ arches = " ".join(arches)
+ insert = """INSERT INTO host (id, user_id, name, arches)
+ VALUES (%(hostID)i, %(userID)i, %(hostname)s, %(arches)s)"""
+ _dml(insert, locals())
+ #host_channels entry
+ insert = """INSERT INTO host_channels (host_id, channel_id)
+ VALUES (%(hostID)i, %(default_channel)i)"""
+ _dml(insert, locals())
+ return hostID
+
+ def enableHost(self, hostname):
+ """Mark a host as enabled"""
+ set_host_enabled(hostname, True)
+
+ def disableHost(self, hostname):
+ """Mark a host as disabled"""
+ set_host_enabled(hostname, False)
+
+ getHost = staticmethod(get_host)
+ editHost = staticmethod(edit_host)
+ addHostToChannel = staticmethod(add_host_to_channel)
+ removeHostFromChannel = staticmethod(remove_host_from_channel)
+ renameChannel = staticmethod(rename_channel)
+ removeChannel = staticmethod(remove_channel)
+
+ def listHosts(self, arches=None, channelID=None, ready=None, enabled=None, userID=None, queryOpts=None):
+ """Get a list of hosts. "arches" is a list of string architecture
+ names, e.g. ['i386', 'ppc64']. If one of the arches associated with a given
+ host appears in the list, it will be included in the results. If "ready" and "enabled"
+ are specified, only hosts with the given value for the respective field will
+ be included."""
+ fields = ('id', 'user_id', 'name', 'arches', 'task_load',
+ 'capacity', 'description', 'comment', 'ready', 'enabled')
+
+ clauses = []
+ joins = []
+ if arches != None:
+ # include the regex constraints below so we can match 'ppc' without
+ # matching 'ppc64'
+ if not (isinstance(arches, list) or isinstance(arches, tuple)):
+ arches = [arches]
+ archClause = [r"""arches ~ E'\\m%s\\M'""" % arch for arch in arches]
+ clauses.append('(' + ' OR '.join(archClause) + ')')
+ if channelID != None:
+ joins.append('host_channels on host.id = host_channels.host_id')
+ clauses.append('host_channels.channel_id = %(channelID)i')
+ if ready != None:
+ if ready:
+ clauses.append('ready is true')
+ else:
+ clauses.append('ready is false')
+ if enabled != None:
+ if enabled:
+ clauses.append('enabled is true')
+ else:
+ clauses.append('enabled is false')
+ if userID != None:
+ clauses.append('user_id = %(userID)i')
+
+ query = QueryProcessor(columns=fields, tables=['host'],
+ joins=joins, clauses=clauses,
+ values=locals(), opts=queryOpts)
+ return query.execute()
+
+ def getLastHostUpdate(self, hostID):
+ """Return the latest update timestampt for the host
+
+ The timestamp represents the last time the host with the given
+ ID contacted the hub. Returns None if the host has never contacted
+ the hub."""
+ query = """SELECT update_time FROM sessions
+ JOIN host ON sessions.user_id = host.user_id
+ WHERE host.id = %(hostID)i
+ ORDER BY update_time DESC
+ LIMIT 1
+ """
+ return _singleValue(query, locals(), strict=False)
+
+ getAllArches = staticmethod(get_all_arches)
+
+ getChannel = staticmethod(get_channel)
+ listChannels=staticmethod(list_channels)
+
+ getBuildroot=staticmethod(get_buildroot)
+
+ def getBuildrootListing(self,id):
+ """Return a list of packages in the buildroot"""
+ br = BuildRoot(id)
+ return br.getList()
+
+ listBuildroots = staticmethod(query_buildroots)
+
+ def hasPerm(self, perm):
+ """Check if the logged-in user has the given permission. Return False if
+ they do not have the permission, or if they are not logged-in."""
+ return context.session.hasPerm(perm)
+
+ def getPerms(self):
+ """Get a list of the permissions granted to the currently logged-in user."""
+ return context.session.getPerms()
+
+ def getUserPerms(self, userID):
+ """Get a list of the permissions granted to the user with the given ID."""
+ return koji.auth.get_user_perms(userID)
+
+ def getAllPerms(self):
+ """Get a list of all permissions in the system. Returns a list of maps. Each
+ map contains the following keys:
+
+ - id
+ - name
+ """
+ query = """SELECT id, name FROM permissions
+ ORDER BY id"""
+
+ return _multiRow(query, {}, ['id', 'name'])
+
+ def getLoggedInUser(self):
+ """Return information about the currently logged-in user. Returns data
+ in the same format as getUser(). If there is no currently logged-in user,
+ return None."""
+ if context.session.logged_in:
+ return self.getUser(context.session.user_id)
+ else:
+ return None
+
+ def setBuildOwner(self, build, user):
+ context.session.assertPerm('admin')
+ buildinfo = get_build(build)
+ if not buildinfo:
+ raise koji.GenericError, 'build does not exist: %s' % build
+ userinfo = get_user(user)
+ if not userinfo:
+ raise koji.GenericError, 'user does not exist: %s' % user
+ userid = userinfo['id']
+ buildid = buildinfo['id']
+ koji.plugin.run_callbacks('preBuildStateChange', attribute='owner_id', old=buildinfo['owner_id'], new=userid, info=buildinfo)
+ q = """UPDATE build SET owner=%(userid)i WHERE id=%(buildid)i"""
+ _dml(q,locals())
+ koji.plugin.run_callbacks('postBuildStateChange', attribute='owner_id', old=buildinfo['owner_id'], new=userid, info=buildinfo)
+
+ def setBuildTimestamp(self, build, ts):
+ """Set the completion time for a build
+
+ build should a valid nvr or build id
+ ts should be # of seconds since epoch or optionally an
+ xmlrpc DateTime value"""
+ context.session.assertPerm('admin')
+ buildinfo = get_build(build)
+ if not buildinfo:
+ raise koji.GenericError, 'build does not exist: %s' % build
+ elif isinstance(ts, xmlrpclib.DateTime):
+ #not recommended
+ #the xmlrpclib.DateTime class is almost useless
+ try:
+ ts = time.mktime(time.strptime(str(ts),'%Y%m%dT%H:%M:%S'))
+ except ValueError:
+ raise koji.GenericError, "Invalid time: %s" % ts
+ elif not isinstance(ts, (int, long, float)):
+ raise koji.GenericError, "Invalid type for timestamp"
+ koji.plugin.run_callbacks('preBuildStateChange', attribute='completion_ts', old=buildinfo['completion_ts'], new=ts, info=buildinfo)
+ buildid = buildinfo['id']
+ q = """UPDATE build
+ SET completion_time=TIMESTAMP 'epoch' AT TIME ZONE 'utc' + '%(ts)f seconds'::interval
+ WHERE id=%%(buildid)i""" % locals()
+ _dml(q,locals())
+ koji.plugin.run_callbacks('postBuildStateChange', attribute='completion_ts', old=buildinfo['completion_ts'], new=ts, info=buildinfo)
+
+ def count(self, methodName, *args, **kw):
+ """Execute the XML-RPC method with the given name and count the results.
+ A method return value of None will return O, a return value of type "list", "tuple", or "dict"
+ will return len(value), and a return value of any other type will return 1. An invalid
+ methodName will raise an AttributeError, and invalid arguments will raise a TypeError."""
+ result = getattr(self, methodName)(*args, **kw)
+ if result == None:
+ return 0
+ elif isinstance(result, list) or isinstance(result, tuple) or isinstance(result, dict):
+ return len(result)
+ else:
+ return 1
+
+ def _sortByKeyFunc(self, key, noneGreatest=True):
+ """Return a function to sort a list of maps by the given key.
+ If the key starts with '-', sort in reverse order. If noneGreatest
+ is True, None will sort higher than all other values (instead of lower).
+ """
+ if noneGreatest:
+ # Normally None evaluates to be less than every other value
+ # Invert the comparison so it always evaluates to greater
+ cmpFunc = lambda a, b: (a is None or b is None) and -(cmp(a, b)) or cmp(a, b)
+ else:
+ cmpFunc = cmp
+
+ if key.startswith('-'):
+ key = key[1:]
+ return lambda a, b: cmpFunc(b[key], a[key])
+ else:
+ return lambda a, b: cmpFunc(a[key], b[key])
+
+ def filterResults(self, methodName, *args, **kw):
+ """Execute the XML-RPC method with the given name and filter the results
+ based on the options specified in the keywork option "filterOpts". The method
+ must return a list of maps. Any other return type will result in a TypeError.
+ Currently supported options are:
+ - offset: the number of elements to trim off the front of the list
+ - limit: the maximum number of results to return
+ - order: the map key to use to sort the list; the list will be sorted before
+ offset or limit are applied
+ - noneGreatest: when sorting, consider 'None' to be greater than all other values;
+ python considers None less than all other values, but Postgres sorts
+ NULL higher than all other values; default to True for consistency
+ with database sorts
+ """
+ filterOpts = kw.pop('filterOpts', {})
+
+ results = getattr(self, methodName)(*args, **kw)
+ if results is None:
+ return None
+ elif not isinstance(results, list):
+ raise TypeError, '%s() did not return a list' % methodName
+
+ order = filterOpts.get('order')
+ if order:
+ results.sort(self._sortByKeyFunc(order, filterOpts.get('noneGreatest', True)))
+
+ offset = filterOpts.get('offset')
+ if offset is not None:
+ results = results[offset:]
+ limit = filterOpts.get('limit')
+ if limit is not None:
+ results = results[:limit]
+
+ return results
+
+ def getBuildNotifications(self, userID=None):
+ """Get build notifications for the user with the given ID. If no ID
+ is specified, get the notifications for the currently logged-in user. If
+ there is no currently logged-in user, raise a GenericError."""
+ if userID is None:
+ user = self.getLoggedInUser()
+ if user is None:
+ raise koji.GenericError, 'not logged-in'
+ else:
+ userID = user['id']
+ return get_build_notifications(userID)
+
+ def getBuildNotification(self, id):
+ """Get the build notification with the given ID. Return None
+ if there is no notification with the given ID."""
+ fields = ('id', 'user_id', 'package_id', 'tag_id', 'success_only', 'email')
+ query = """SELECT %s
+ FROM build_notifications
+ WHERE id = %%(id)i
+ """ % ', '.join(fields)
+ return _singleRow(query, locals(), fields)
+
+ def updateNotification(self, id, package_id, tag_id, success_only):
+ """Update an existing build notification with new data. If the notification
+ with the given ID doesn't exist, or the currently logged-in user is not the
+ owner or the notification or an admin, raise a GenericError."""
+ currentUser = self.getLoggedInUser()
+ if not currentUser:
+ raise koji.GenericError, 'not logged-in'
+
+ orig_notif = self.getBuildNotification(id)
+ if not orig_notif:
+ raise koji.GenericError, 'no notification with ID: %i' % id
+ elif not (orig_notif['user_id'] == currentUser['id'] or
+ self.hasPerm('admin')):
+ raise koji.GenericError, 'user %i cannot update notifications for user %i' % \
+ (currentUser['id'], orig_notif['user_id'])
+
+ update = """UPDATE build_notifications
+ SET package_id = %(package_id)s,
+ tag_id = %(tag_id)s,
+ success_only = %(success_only)s
+ WHERE id = %(id)i
+ """
+
+ _dml(update, locals())
+
+ def createNotification(self, user_id, package_id, tag_id, success_only):
+ """Create a new notification. If the user_id does not match the currently logged-in user
+ and the currently logged-in user is not an admin, raise a GenericError."""
+ currentUser = self.getLoggedInUser()
+ if not currentUser:
+ raise koji.GenericError, 'not logged in'
+
+ notificationUser = self.getUser(user_id)
+ if not notificationUser:
+ raise koji.GenericError, 'invalid user ID: %s' % user_id
+
+ if not (notificationUser['id'] == currentUser['id'] or self.hasPerm('admin')):
+ raise koji.GenericError, 'user %s cannot create notifications for user %s' % \
+ (currentUser['name'], notificationUser['name'])
+
+ email = '%s@%s' % (notificationUser['name'], context.opts['EmailDomain'])
+ insert = """INSERT INTO build_notifications
+ (user_id, package_id, tag_id, success_only, email)
+ VALUES
+ (%(user_id)i, %(package_id)s, %(tag_id)s, %(success_only)s, %(email)s)
+ """
+ _dml(insert, locals())
+
+ def deleteNotification(self, id):
+ """Delete the notification with the given ID. If the currently logged-in
+ user is not the owner of the notification or an admin, raise a GenericError."""
+ notification = self.getBuildNotification(id)
+ if not notification:
+ raise koji.GenericError, 'no notification with ID: %i' % id
+ currentUser = self.getLoggedInUser()
+ if not currentUser:
+ raise koji.GenericError, 'not logged-in'
+
+ if not (notification['user_id'] == currentUser['id'] or
+ self.hasPerm('admin')):
+ raise koji.GenericError, 'user %i cannot delete notifications for user %i' % \
+ (currentUser['id'], notification['user_id'])
+ delete = """DELETE FROM build_notifications WHERE id = %(id)i"""
+ _dml(delete, locals())
+
+ def _prepareSearchTerms(self, terms, matchType):
+ r"""Process the search terms before passing them to the database.
+ If matchType is "glob", "_" will be replaced with "\_" (to match literal
+ underscores), "?" will be replaced with "_", and "*" will
+ be replaced with "%". If matchType is "regexp", no changes will be
+ made."""
+ if matchType == 'glob':
+ return terms.replace('\\', '\\\\').replace('_', r'\_').replace('?', '_').replace('*', '%')
+ else:
+ return terms
+
+ _searchTables = {'package': 'package',
+ 'build': 'build',
+ 'tag': 'tag',
+ 'target': 'build_target',
+ 'user': 'users',
+ 'host': 'host',
+ 'rpm': 'rpminfo',
+ 'maven': 'archiveinfo',
+ 'win': 'archiveinfo'}
+
+ def search(self, terms, type, matchType, queryOpts=None):
+ """Search for an item in the database matching "terms".
+ "type" specifies what object type to search for, and must be
+ one of "package", "build", "tag", "target", "user", "host",
+ "rpm", "maven", or "win". "matchType" specifies the type of search to
+ perform, and must be one of "glob" or "regexp". All searches
+ are case-insensitive. A list of maps containing "id" and
+ "name" will be returned. If no matches are found, an empty
+ list will be returned."""
+ if not terms:
+ raise koji.GenericError, 'empty search terms'
+ if type == 'file':
+ # searching by filename is no longer supported
+ return _applyQueryOpts([], queryOpts)
+ table = self._searchTables.get(type)
+ if not table:
+ raise koji.GenericError, 'unknown search type: %s' % type
+
+ if matchType == 'glob':
+ oper = 'ilike'
+ elif matchType == 'regexp':
+ oper = '~*'
+ else:
+ oper = '='
+
+ terms = self._prepareSearchTerms(terms, matchType)
+
+ cols = ('id', 'name')
+ aliases = cols
+ joins = []
+ if type == 'build':
+ joins.append('package ON build.pkg_id = package.id')
+ clause = "package.name || '-' || build.version || '-' || build.release %s %%(terms)s" % oper
+ cols = ('build.id', "package.name || '-' || build.version || '-' || build.release")
+ elif type == 'rpm':
+ clause = "name || '-' || version || '-' || release || '.' || arch || '.rpm' %s %%(terms)s" % oper
+ cols = ('id', "name || '-' || version || '-' || release || '.' || arch || '.rpm'")
+ elif type == 'tag':
+ joins.append('tag_config ON tag.id = tag_config.tag_id')
+ clause = 'tag_config.active = TRUE and name %s %%(terms)s' % oper
+ elif type == 'target':
+ joins.append('build_target_config ON build_target.id = build_target_config.build_target_id')
+ clause = 'build_target_config.active = TRUE and name %s %%(terms)s' % oper
+ elif type == 'maven':
+ cols = ('id', 'filename')
+ joins.append('maven_archives ON archiveinfo.id = maven_archives.archive_id')
+ clause = "archiveinfo.filename %s %%(terms)s or maven_archives.group_id || '-' || " \
+ "maven_archives.artifact_id || '-' || maven_archives.version %s %%(terms)s" % (oper, oper)
+ elif type == 'win':
+ cols = ('id', "trim(leading '/' from win_archives.relpath || '/' || archiveinfo.filename)")
+ joins.append('win_archives ON archiveinfo.id = win_archives.archive_id')
+ clause = "archiveinfo.filename %s %%(terms)s or win_archives.relpath || '/' || " \
+ "archiveinfo.filename %s %%(terms)s" % (oper, oper)
+ else:
+ clause = 'name %s %%(terms)s' % oper
+
+ query = QueryProcessor(columns=cols,
+ aliases=aliases, tables=(table,),
+ joins=joins, clauses=(clause,),
+ values=locals(), opts=queryOpts)
+ return query.iterate()
+
+
+class BuildRoot(object):
+
+ def __init__(self,id=None):
+ if id is None:
+ #db entry has yet to be created
+ self.id = None
+ else:
+ logging.getLogger("koji.hub").debug("BuildRoot id: %s" % id)
+ #load buildroot data
+ self.load(id)
+
+ def load(self,id):
+ fields = ('id', 'host_id', 'repo_id', 'arch', 'task_id',
+ 'create_event', 'retire_event', 'state')
+ q = """SELECT %s FROM buildroot WHERE id=%%(id)i""" % (",".join(fields))
+ data = _singleRow(q,locals(),fields,strict=False)
+ if data == None:
+ raise koji.GenericError, 'no buildroot with ID: %i' % id
+ self.id = id
+ self.data = data
+
+ def new(self, host, repo, arch, task_id=None):
+ state = koji.BR_STATES['INIT']
+ id = _singleValue("SELECT nextval('buildroot_id_seq')", strict=True)
+ q = """INSERT INTO buildroot(id,host_id,repo_id,arch,state,task_id)
+ VALUES (%(id)i,%(host)i,%(repo)i,%(arch)s,%(state)i,%(task_id)s)"""
+ _dml(q,locals())
+ self.load(id)
+ return self.id
+
+ def verifyTask(self,task_id):
+ if self.id is None:
+ raise koji.GenericError, "buildroot not specified"
+ return (task_id == self.data['task_id'])
+
+ def assertTask(self,task_id):
+ if not self.verifyTask(task_id):
+ raise koji.ActionNotAllowed, 'Task %s does not have lock on buildroot %s' \
+ %(task_id,self.id)
+
+ def verifyHost(self,host_id):
+ if self.id is None:
+ raise koji.GenericError, "buildroot not specified"
+ return (host_id == self.data['host_id'])
+
+ def assertHost(self,host_id):
+ if not self.verifyHost(host_id):
+ raise koji.ActionNotAllowed, "Host %s not owner of buildroot %s" \
+ % (host_id,self.id)
+
+ def setState(self,state):
+ if self.id is None:
+ raise koji.GenericError, "buildroot not specified"
+ id = self.id
+ if isinstance(state,str):
+ state = koji.BR_STATES[state]
+ #sanity checks
+ if state == koji.BR_STATES['INIT']:
+ #we do not re-init buildroots
+ raise koji.GenericError, "Cannot change buildroot state to INIT"
+ q = """SELECT state,retire_event FROM buildroot WHERE id=%(id)s FOR UPDATE"""
+ lstate,retire_event = _fetchSingle(q,locals(),strict=True)
+ if koji.BR_STATES[lstate] == 'EXPIRED':
+ #we will quietly ignore a request to expire an expired buildroot
+ #otherwise this is an error
+ if state == lstate:
+ return
+ else:
+ raise koji.GenericError, "buildroot %i is EXPIRED" % id
+ set = "state=%(state)s"
+ if koji.BR_STATES[state] == 'EXPIRED':
+ set += ",retire_event=get_event()"
+ update = """UPDATE buildroot SET %s WHERE id=%%(id)s""" % set
+ _dml(update,locals())
+ self.data['state'] = state
+
+ def getList(self):
+ if self.id is None:
+ raise koji.GenericError, "buildroot not specified"
+ brootid = self.id
+ fields = (
+ ('rpm_id', 'rpm_id'),
+ ('is_update', 'is_update'),
+ ('rpminfo.name', 'name'),
+ ('version', 'version'),
+ ('release', 'release'),
+ ('epoch', 'epoch'),
+ ('arch', 'arch'),
+ ('build_id', 'build_id'),
+ ('external_repo_id', 'external_repo_id'),
+ ('external_repo.name', 'external_repo_name'),
+ )
+ query = QueryProcessor(columns=[f[0] for f in fields], aliases=[f[1] for f in fields],
+ tables=['buildroot_listing'],
+ joins=["rpminfo ON rpm_id = rpminfo.id", "external_repo ON external_repo_id = external_repo.id"],
+ clauses=["buildroot_listing.buildroot_id = %(brootid)i"],
+ values=locals())
+ return query.execute()
+
+ def _setList(self,rpmlist,update=False):
+ """Set or update the list of rpms in a buildroot"""
+ if self.id is None:
+ raise koji.GenericError, "buildroot not specified"
+ brootid = self.id
+ if update:
+ current = dict([(r['rpm_id'],1) for r in self.getList()])
+ q = """INSERT INTO buildroot_listing (buildroot_id,rpm_id,is_update)
+ VALUES (%(brootid)s,%(rpm_id)s,%(update)s)"""
+ rpm_ids = []
+ for an_rpm in rpmlist:
+ location = an_rpm.get('location')
+ if location:
+ data = add_external_rpm(an_rpm, location, strict=False)
+ #will add if missing, compare if not
+ else:
+ data = get_rpm(an_rpm, strict=True)
+ rpm_id = data['id']
+ if update and current.has_key(rpm_id):
+ #ignore duplicate packages for updates
+ continue
+ rpm_ids.append(rpm_id)
+ #we sort to try to avoid deadlock issues
+ rpm_ids.sort()
+ for rpm_id in rpm_ids:
+ _dml(q, locals())
+
+ def setList(self,rpmlist):
+ """Set the initial list of rpms in a buildroot"""
+ if self.data['state'] != koji.BR_STATES['INIT']:
+ raise koji.GenericError, "buildroot %(id)s in wrong state %(state)s" % self.data
+ self._setList(rpmlist,update=False)
+
+ def updateList(self,rpmlist):
+ """Update the list of packages in a buildroot"""
+ if self.data['state'] != koji.BR_STATES['BUILDING']:
+ raise koji.GenericError, "buildroot %(id)s in wrong state %(state)s" % self.data
+ self._setList(rpmlist,update=True)
+
+ def getArchiveList(self, queryOpts=None):
+ """Get the list of archives in the buildroot"""
+ tables = ('archiveinfo',)
+ joins = ('buildroot_archives ON archiveinfo.id = buildroot_archives.archive_id',)
+ clauses = ('buildroot_archives.buildroot_id = %(id)i',)
+ fields = [('id', 'id'),
+ ('type_id', 'type_id'),
+ ('build_id', 'build_id'),
+ ('archiveinfo.buildroot_id', 'buildroot_id'),
+ ('filename', 'filename'),
+ ('size', 'size'),
+ ('checksum', 'checksum'),
+ ('checksum_type', 'checksum_type'),
+ ('project_dep', 'project_dep'),
+ ]
+ columns, aliases = zip(*fields)
+ query = QueryProcessor(tables=tables, columns=columns,
+ joins=joins, clauses=clauses,
+ values=self.data,
+ opts=queryOpts)
+ return query.execute()
+
+ def updateArchiveList(self, archives, project=False):
+ """Update the list of archives in a buildroot.
+ If project is True, the archives are project dependencies. If False, they dependencies required to setup the
+ build environment."""
+ if not (context.opts.get('EnableMaven') or context.opts.get('EnableWin')):
+ raise koji.GenericError, "non-rpm support is not enabled"
+ if self.data['state'] != koji.BR_STATES['BUILDING']:
+ raise koji.GenericError, "buildroot %(id)s in wrong state %(state)s" % self.data
+ archives = set([r['id'] for r in archives])
+ current = set([r['id'] for r in self.getArchiveList()])
+ new_archives = archives.difference(current)
+ insert = """INSERT INTO buildroot_archives (buildroot_id, archive_id, project_dep)
+ VALUES
+ (%(broot_id)i, %(archive_id)i, %(project)s)"""
+ broot_id = self.id
+ for archive_id in sorted(new_archives):
+ _dml(insert, locals())
+
+class Host(object):
+
+ def __init__(self,id=None):
+ remote_id = context.session.getHostId()
+ if id is None:
+ id = remote_id
+ if id is None:
+ if context.session.logged_in:
+ raise koji.AuthError, "User %i is not a host" % context.session.user_id
+ else:
+ raise koji.AuthError, "Not logged in"
+ self.id = id
+ self.same_host = (id == remote_id)
+
+ def verify(self):
+ """Verify that the remote host matches and has the lock"""
+ if not self.same_host:
+ raise koji.AuthError, "Host mismatch"
+ if not context.session.exclusive:
+ raise koji.AuthError, "This method requires an exclusive session"
+ return True
+
+ def taskUnwait(self,parent):
+ """Clear wait data for task"""
+ c = context.cnx.cursor()
+ #unwait the task
+ q = """UPDATE task SET waiting='false' WHERE id = %(parent)s"""
+ context.commit_pending = True
+ c.execute(q,locals())
+ #...and un-await its subtasks
+ q = """UPDATE task SET awaited='false' WHERE parent=%(parent)s"""
+ c.execute(q,locals())
+
+ def taskSetWait(self,parent,tasks):
+ """Mark task waiting and subtasks awaited"""
+ self.taskUnwait(parent)
+ c = context.cnx.cursor()
+ #mark tasks awaited
+ q = """UPDATE task SET waiting='true' WHERE id=%(parent)s"""
+ context.commit_pending = True
+ c.execute(q,locals())
+ if tasks is None:
+ #wait on all subtasks
+ q = """UPDATE task SET awaited='true' WHERE parent=%(parent)s"""
+ c.execute(q,locals())
+ else:
+ for id in tasks:
+ q = """UPDATE task SET awaited='true' WHERE id=%(id)s"""
+ c.execute(q,locals())
+
+ def taskWaitCheck(self,parent):
+ """Return status of awaited subtask
+
+ The return value is [finished, unfinished] where each entry
+ is a list of task ids."""
+ #check to see if any of the tasks have finished
+ c = context.cnx.cursor()
+ q = """
+ SELECT id,state FROM task
+ WHERE parent=%(parent)s AND awaited = TRUE
+ FOR UPDATE"""
+ c.execute(q,locals())
+ canceled = koji.TASK_STATES['CANCELED']
+ closed = koji.TASK_STATES['CLOSED']
+ failed = koji.TASK_STATES['FAILED']
+ finished = []
+ unfinished = []
+ for id,state in c.fetchall():
+ if state in (canceled,closed,failed):
+ finished.append(id)
+ else:
+ unfinished.append(id)
+ return finished, unfinished
+
+ def taskWait(self,parent):
+ """Return task results or mark tasks as waited upon"""
+ finished, unfinished = self.taskWaitCheck(parent)
+ # un-await finished tasks
+ if finished:
+ context.commit_pending = True
+ for id in finished:
+ c = context.cnx.cursor()
+ q = """UPDATE task SET awaited='false' WHERE id=%(id)s"""
+ c.execute(q,locals())
+ return [finished,unfinished]
+
+ def taskWaitResults(self,parent,tasks):
+ results = {}
+ #if we're getting results, we're done waiting
+ self.taskUnwait(parent)
+ c = context.cnx.cursor()
+ canceled = koji.TASK_STATES['CANCELED']
+ closed = koji.TASK_STATES['CLOSED']
+ failed = koji.TASK_STATES['FAILED']
+ q = """
+ SELECT id,state FROM task
+ WHERE parent=%(parent)s"""
+ if tasks is None:
+ #query all subtasks
+ tasks = []
+ c.execute(q,locals())
+ for id,state in c.fetchall():
+ if state == canceled:
+ raise koji.GenericError, "Subtask canceled"
+ elif state in (closed,failed):
+ tasks.append(id)
+ #would use a dict, but xmlrpc requires the keys to be strings
+ results = []
+ for id in tasks:
+ task = Task(id)
+ results.append([id,task.getResult()])
+ return results
+
+ def getHostTasks(self):
+ """get status of open tasks assigned to host"""
+ c = context.cnx.cursor()
+ host_id = self.id
+ #query tasks
+ fields = ['id','waiting','weight']
+ st_open = koji.TASK_STATES['OPEN']
+ q = """
+ SELECT %s FROM task
+ WHERE host_id = %%(host_id)s AND state = %%(st_open)s
+ """ % (",".join(fields))
+ c.execute(q,locals())
+ tasks = [ dict(zip(fields,x)) for x in c.fetchall() ]
+ for task in tasks:
+ id = task['id']
+ if task['waiting']:
+ finished, unfinished = self.taskWaitCheck(id)
+ if finished:
+ task['alert'] = True
+ return tasks
+
+ def updateHost(self,task_load,ready):
+ host_data = get_host(self.id)
+ if task_load != host_data['task_load'] or ready != host_data['ready']:
+ c = context.cnx.cursor()
+ id = self.id
+ q = """UPDATE host SET task_load=%(task_load)s,ready=%(ready)s WHERE id=%(id)s"""
+ c.execute(q,locals())
+ context.commit_pending = True
+
+ def getLoadData(self):
+ """Get load balancing data
+
+ This data is relatively small and the necessary load analysis is
+ relatively complex, so we let the host machines crunch it."""
+ hosts = get_ready_hosts()
+ for host in hosts:
+ if host['id'] == self.id:
+ break
+ else:
+ #this host not in ready list
+ return [[], []]
+ #host is the host making the call
+ tasks = get_active_tasks(host)
+ return [hosts, tasks]
+
+ def getTask(self):
+ """Open next available task and return it"""
+ c = context.cnx.cursor()
+ id = self.id
+ #get arch and channel info for host
+ q = """
+ SELECT arches FROM host WHERE id = %(id)s
+ """
+ c.execute(q,locals())
+ arches = c.fetchone()[0].split()
+ q = """
+ SELECT channel_id FROM host_channels WHERE host_id = %(id)s
+ """
+ c.execute(q,locals())
+ channels = [ x[0] for x in c.fetchall() ]
+
+ #query tasks
+ fields = ['id', 'state', 'method', 'request', 'channel_id', 'arch', 'parent']
+ st_free = koji.TASK_STATES['FREE']
+ st_assigned = koji.TASK_STATES['ASSIGNED']
+ q = """
+ SELECT %s FROM task
+ WHERE (state = %%(st_free)s)
+ OR (state = %%(st_assigned)s AND host_id = %%(id)s)
+ ORDER BY priority,create_time
+ """ % (",".join(fields))
+ c.execute(q,locals())
+ for data in c.fetchall():
+ data = dict(zip(fields,data))
+ # XXX - we should do some pruning here, but for now...
+ # check arch
+ if data['arch'] not in arches:
+ continue
+ # NOTE: channels ignored for explicit assignments
+ if data['state'] != st_assigned and data['channel_id'] not in channels:
+ continue
+ task = Task(data['id'])
+ ret = task.open(self.id)
+ if ret is None:
+ #someone else got it while we were looking
+ #log_error("task %s seems to be locked" % task['id'])
+ continue
+ return ret
+ #else no appropriate tasks
+ return None
+
+ def isEnabled(self):
+ """Return whether this host is enabled or not."""
+ query = """SELECT enabled FROM host WHERE id = %(id)i"""
+ return _singleValue(query, {'id': self.id}, strict=True)
+
+class HostExports(object):
+ '''Contains functions that are made available via XMLRPC'''
+
+ def getID(self):
+ host = Host()
+ host.verify()
+ return host.id
+
+ def updateHost(self,task_load,ready):
+ host = Host()
+ host.verify()
+ host.updateHost(task_load,ready)
+
+ def getLoadData(self):
+ host = Host()
+ host.verify()
+ return host.getLoadData()
+
+ def getHost(self):
+ """Return information about this host"""
+ host = Host()
+ host.verify()
+ return get_host(host.id)
+
+ def openTask(self,task_id):
+ host = Host()
+ host.verify()
+ task = Task(task_id)
+ return task.open(host.id)
+
+ def getTask(self):
+ host = Host()
+ host.verify()
+ return host.getTask()
+
+ def closeTask(self,task_id,response):
+ host = Host()
+ host.verify()
+ task = Task(task_id)
+ task.assertHost(host.id)
+ return task.close(response)
+
+ def failTask(self,task_id,response):
+ host = Host()
+ host.verify()
+ task = Task(task_id)
+ task.assertHost(host.id)
+ return task.fail(response)
+
+ def freeTasks(self,tasks):
+ host = Host()
+ host.verify()
+ for task_id in tasks:
+ task = Task(task_id)
+ if not task.verifyHost(host.id):
+ #it's possible that a task was freed/reassigned since the host
+ #last checked, so we should not raise an error
+ continue
+ task.free()
+ #XXX - unfinished
+ #remove any files related to task
+
+ def setTaskWeight(self,task_id,weight):
+ host = Host()
+ host.verify()
+ task = Task(task_id)
+ task.assertHost(host.id)
+ return task.setWeight(weight)
+
+ def getHostTasks(self):
+ host = Host()
+ host.verify()
+ return host.getHostTasks()
+
+ def taskSetWait(self,parent,tasks):
+ host = Host()
+ host.verify()
+ return host.taskSetWait(parent,tasks)
+
+ def taskWait(self,parent):
+ host = Host()
+ host.verify()
+ return host.taskWait(parent)
+
+ def taskWaitResults(self,parent,tasks):
+ host = Host()
+ host.verify()
+ return host.taskWaitResults(parent,tasks)
+
+ def subtask(self,method,arglist,parent,**opts):
+ host = Host()
+ host.verify()
+ ptask = Task(parent)
+ ptask.assertHost(host.id)
+ opts['parent'] = parent
+ if opts.has_key('label'):
+ # first check for existing task with this parent/label
+ q = """SELECT id FROM task
+ WHERE parent=%(parent)s AND label=%(label)s"""
+ row = _fetchSingle(q,opts)
+ if row:
+ #return task id
+ return row[0]
+ if opts.has_key('kwargs'):
+ arglist = koji.encode_args(*arglist, **opts['kwargs'])
+ del opts['kwargs']
+ return make_task(method,arglist,**opts)
+
+ def subtask2(self,__parent,__taskopts,__method,*args,**opts):
+ """A wrapper around subtask with optional signature
+
+ Parameters:
+ __parent: task id of the parent task
+ __taskopts: dictionary of task options
+ __method: the method to be invoked
+
+ Remaining args are passed on to the subtask
+ """
+ #self.subtask will verify the host
+ args = koji.encode_args(*args,**opts)
+ return self.subtask(__method,args,__parent,**__taskopts)
+
+ def moveBuildToScratch(self, task_id, srpm, rpms, logs=None):
+ "Move a completed scratch build into place (not imported)"
+ host = Host()
+ host.verify()
+ task = Task(task_id)
+ task.assertHost(host.id)
+ uploadpath = koji.pathinfo.work()
+ #verify files exist
+ for relpath in [srpm] + rpms:
+ fn = "%s/%s" % (uploadpath,relpath)
+ if not os.path.exists(fn):
+ raise koji.GenericError, "no such file: %s" % fn
+
+ rpms = check_noarch_rpms(uploadpath, rpms)
+
+ #figure out storage location
+ # <scratchdir>/<username>/task_<id>
+ scratchdir = koji.pathinfo.scratch()
+ username = get_user(task.getOwner())['name']
+ dir = "%s/%s/task_%s" % (scratchdir, username, task_id)
+ koji.ensuredir(dir)
+ for relpath in [srpm] + rpms:
+ fn = "%s/%s" % (uploadpath,relpath)
+ dest = "%s/%s" % (dir,os.path.basename(fn))
+ os.rename(fn,dest)
+ os.symlink(dest,fn)
+ if logs:
+ for key, files in logs.iteritems():
+ if key:
+ logdir = "%s/logs/%s" % (dir, key)
+ else:
+ logdir = "%s/logs" % dir
+ koji.ensuredir(logdir)
+ for relpath in files:
+ fn = "%s/%s" % (uploadpath,relpath)
+ dest = "%s/%s" % (logdir,os.path.basename(fn))
+ os.rename(fn,dest)
+ os.symlink(dest,fn)
+
+ def moveMavenBuildToScratch(self, task_id, results, rpm_results):
+ "Move a completed Maven scratch build into place (not imported)"
+ if not context.opts.get('EnableMaven'):
+ raise koji.GenericError, 'Maven support not enabled'
+ host = Host()
+ host.verify()
+ task = Task(task_id)
+ task.assertHost(host.id)
+ scratchdir = koji.pathinfo.scratch()
+ username = get_user(task.getOwner())['name']
+ destdir = os.path.join(scratchdir, username, 'task_%s' % task_id)
+ for reldir, files in results['files'].items() + [('', results['logs'])]:
+ for filename in files:
+ if reldir:
+ relpath = os.path.join(reldir, filename)
+ else:
+ relpath = filename
+ src = os.path.join(koji.pathinfo.task(results['task_id']), relpath)
+ dest = os.path.join(destdir, relpath)
+ koji.ensuredir(os.path.dirname(dest))
+ os.rename(src, dest)
+ os.symlink(dest, src)
+ if rpm_results:
+ for relpath in [rpm_results['srpm']] + rpm_results['rpms'] + \
+ rpm_results['logs']:
+ src = os.path.join(koji.pathinfo.task(rpm_results['task_id']),
+ relpath)
+ dest = os.path.join(destdir, 'rpms', relpath)
+ koji.ensuredir(os.path.dirname(dest))
+ os.rename(src, dest)
+ os.symlink(dest, src)
+
+ def moveWinBuildToScratch(self, task_id, results, rpm_results):
+ "Move a completed Windows scratch build into place (not imported)"
+ if not context.opts.get('EnableWin'):
+ raise koji.GenericError, 'Windows support not enabled'
+ host = Host()
+ host.verify()
+ task = Task(task_id)
+ task.assertHost(host.id)
+ scratchdir = koji.pathinfo.scratch()
+ username = get_user(task.getOwner())['name']
+ destdir = os.path.join(scratchdir, username, 'task_%s' % task_id)
+ for relpath in results['output'].keys() + results['logs']:
+ filename = os.path.join(koji.pathinfo.task(results['task_id']), relpath)
+ dest = os.path.join(destdir, relpath)
+ koji.ensuredir(os.path.dirname(dest))
+ os.rename(filename, dest)
+ os.symlink(dest, filename)
+ if rpm_results:
+ for relpath in [rpm_results['srpm']] + rpm_results['rpms'] + \
+ rpm_results['logs']:
+ filename = os.path.join(koji.pathinfo.task(rpm_results['task_id']),
+ relpath)
+ dest = os.path.join(destdir, 'rpms', relpath)
+ koji.ensuredir(os.path.dirname(dest))
+ os.rename(filename, dest)
+ os.symlink(dest, filename)
+
+ def moveImageBuildToScratch(self, task_id, results):
+ """move a completed image scratch build into place"""
+ host = Host()
+ host.verify()
+ task = Task(task_id)
+ task.assertHost(host.id)
+ logger.debug('scratch image results: %s' % results)
+ for sub_results in results.values():
+ workdir = koji.pathinfo.task(sub_results['task_id'])
+ scratchdir = koji.pathinfo.scratch()
+ username = get_user(task.getOwner())['name']
+ destdir = os.path.join(scratchdir, username,
+ 'task_%s' % sub_results['task_id'])
+ for img in sub_results['files'] + sub_results['logs']:
+ src = os.path.join(workdir, img)
+ dest = os.path.join(destdir, img)
+ koji.ensuredir(destdir)
+ logger.debug('renaming %s to %s' % (src, dest))
+ os.rename(src, dest)
+ os.symlink(dest, src)
+ if sub_results.has_key('rpmresults'):
+ rpm_results = sub_results['rpmresults']
+ for relpath in [rpm_results['srpm']] + rpm_results['rpms'] + \
+ rpm_results['logs']:
+ src = os.path.join(koji.pathinfo.task(
+ rpm_results['task_id']), relpath)
+ dest = os.path.join(destdir, 'rpms', relpath)
+ koji.ensuredir(os.path.dirname(dest))
+ os.rename(src, dest)
+ os.symlink(dest, src)
+
+ def initBuild(self,data):
+ """Create a stub build entry.
+
+ This is done at the very beginning of the build to inform the
+ system the build is underway.
+ """
+ host = Host()
+ host.verify()
+ #sanity checks
+ task = Task(data['task_id'])
+ task.assertHost(host.id)
+ #prep the data
+ data['owner'] = task.getOwner()
+ data['state'] = koji.BUILD_STATES['BUILDING']
+ data['completion_time'] = None
+ return new_build(data)
+
+ def completeBuild(self, task_id, build_id, srpm, rpms, brmap=None, logs=None):
+ """Import final build contents into the database"""
+ #sanity checks
+ host = Host()
+ host.verify()
+ task = Task(task_id)
+ task.assertHost(host.id)
+ result = import_build(srpm, rpms, brmap, task_id, build_id, logs=logs)
+ build_notification(task_id, build_id)
+ return result
+
+ def completeImageBuild(self, task_id, build_id, results):
+ """Set an image build to the COMPLETE state"""
+ host = Host()
+ host.verify()
+ task = Task(task_id)
+ task.assertHost(host.id)
+ self.importImage(task_id, build_id, results)
+
+ st_complete = koji.BUILD_STATES['COMPLETE']
+ update = UpdateProcessor('build', clauses=['id=%(build_id)i'],
+ values={'build_id': build_id})
+ update.set(id=build_id, state=st_complete)
+ update.rawset(completion_time='now()')
+ update.execute()
+ # send email
+ build_notification(task_id, build_id)
+
+ def initMavenBuild(self, task_id, build_info, maven_info):
+ """Create a new in-progress Maven build
+ Synthesize the release number by taking the (integer) release of the
+ last successful build and incrementing it."""
+ if not context.opts.get('EnableMaven'):
+ raise koji.GenericError, "Maven support not enabled"
+ host = Host()
+ host.verify()
+ task = Task(task_id)
+ task.assertHost(host.id)
+ build_info['release'] = get_next_release(build_info)
+ data = build_info.copy()
+ data['task_id'] = task_id
+ data['owner'] = task.getOwner()
+ data['state'] = koji.BUILD_STATES['BUILDING']
+ data['completion_time'] = None
+ build_id = new_build(data)
+ data['id'] = build_id
+ new_maven_build(data, maven_info)
+
+ return data
+
+ def createMavenBuild(self, build_info, maven_info):
+ """
+ Associate Maven metadata with an existing build. Used
+ by the rpm2maven plugin.
+ """
+ host = Host()
+ host.verify()
+ if not context.opts.get('EnableMaven'):
+ raise koji.GenericError, "Maven support not enabled"
+ new_maven_build(build_info, maven_info)
+
+ def completeMavenBuild(self, task_id, build_id, maven_results, rpm_results):
+ """Complete the Maven build."""
+ if not context.opts.get('EnableMaven'):
+ raise koji.GenericError, "Maven support not enabled"
+ host = Host()
+ host.verify()
+ task = Task(task_id)
+ task.assertHost(host.id)
+
+ build_info = get_build(build_id, strict=True)
+ maven_info = get_maven_build(build_id, strict=True)
+
+ maven_task_id = maven_results['task_id']
+ maven_buildroot_id = maven_results['buildroot_id']
+ maven_task_dir = koji.pathinfo.task(maven_task_id)
+ # import the build output
+ for relpath, files in maven_results['files'].iteritems():
+ dir_maven_info = maven_info
+ poms = [f for f in files if f.endswith('.pom')]
+ if len(poms) == 0:
+ pass
+ elif len(poms) == 1:
+ # This directory has a .pom file, so get the Maven group_id,
+ # artifact_id, and version from it and associate those with
+ # the artifacts in this directory
+ pom_path = os.path.join(maven_task_dir, relpath, poms[0])
+ pom_info = koji.parse_pom(pom_path)
+ dir_maven_info = koji.pom_to_maven_info(pom_info)
+ else:
+ raise koji.BuildError, 'multiple .pom files in %s: %s' % (relpath, ', '.join(poms))
+
+ for filename in files:
+ if os.path.splitext(filename)[1] in ('.md5', '.sha1'):
+ # metadata, we'll recreate that ourselves
+ continue
+ filepath = os.path.join(maven_task_dir, relpath, filename)
+ if filename == 'maven-metadata.xml':
+ # We want the maven-metadata.xml to be present in the build dir
+ # so that it's a valid Maven repo, but we don't want to track it
+ # in the database because we regenerate it when creating tag repos.
+ # So we special-case it here.
+ destdir = os.path.join(koji.pathinfo.mavenbuild(build_info),
+ relpath)
+ _import_archive_file(filepath, destdir)
+ _generate_maven_metadata(destdir)
+ continue
+ archivetype = get_archive_type(filename)
+ if not archivetype:
+ # Unknown archive type, skip it
+ continue
+ import_archive(filepath, build_info, 'maven', dir_maven_info, maven_buildroot_id)
+
+ # move the logs to their final destination
+ for log_path in maven_results['logs']:
+ import_build_log(os.path.join(maven_task_dir, log_path),
+ build_info, subdir='maven')
+
+ if rpm_results:
+ _import_wrapper(rpm_results['task_id'], build_info, rpm_results)
+
+ # update build state
+ st_complete = koji.BUILD_STATES['COMPLETE']
+ koji.plugin.run_callbacks('preBuildStateChange', attribute='state', old=build_info['state'], new=st_complete, info=build_info)
+ update = UpdateProcessor('build', clauses=['id=%(build_id)i'],
+ values={'build_id': build_id})
+ update.set(state=st_complete)
+ update.rawset(completion_time='now()')
+ update.execute()
+ koji.plugin.run_callbacks('postBuildStateChange', attribute='state', old=build_info['state'], new=st_complete, info=build_info)
+
+ # send email
+ build_notification(task_id, build_id)
+
+ def importArchive(self, filepath, buildinfo, type, typeInfo):
+ """
+ Import an archive file and associate it with a build. The archive can
+ be any non-rpm filetype supported by Koji. Used by the rpm2maven plugin.
+ """
+ host = Host()
+ host.verify()
+ if type == 'maven':
+ if not context.opts.get('EnableMaven'):
+ raise koji.GenericError, 'Maven support not enabled'
+ elif type == 'win':
+ if not context.opts.get('EnableWin'):
+ raise koji.GenericError, 'Windows support not enabled'
+ else:
+ raise koji.GenericError, 'unsupported archive type: %s' % type
+ import_archive(filepath, buildinfo, type, typeInfo)
+
+ def importWrapperRPMs(self, task_id, build_id, rpm_results):
+ """Import the wrapper rpms and associate them with the given build. The build
+ must not have any existing rpms associated with it."""
+ if not context.opts.get('EnableMaven'):
+ raise koji.GenericError, "Maven support not enabled"
+ host = Host()
+ host.verify()
+ task = Task(task_id)
+ task.assertHost(host.id)
+
+ build_info = get_build(build_id, strict=True)
+
+ if build_info['state'] != koji.BUILD_STATES['COMPLETE']:
+ raise koji.GenericError, 'cannot import wrapper rpms for %s: build state is %s, not complete' % \
+ (koji.buildLabel(build_info), koji.BUILD_STATES[build_info['state']].lower())
+
+ if list_rpms(buildID=build_info['id']):
+ # don't allow overwriting of already-imported wrapper RPMs
+ raise koji.GenericError, 'wrapper rpms for %s have already been imported' % koji.buildLabel(build_info)
+
+ _import_wrapper(task.id, build_info, rpm_results)
+
+ def initImageBuild(self, task_id, build_info):
+ """create a new in-progress image build"""
+ host = Host()
+ host.verify()
+ task = Task(task_id)
+ task.assertHost(host.id)
+ data = build_info.copy()
+ data['task_id'] = task_id
+ data['owner'] = task.getOwner()
+ data['state'] = koji.BUILD_STATES['BUILDING']
+ data['completion_time'] = None
+ build_id = new_build(data)
+ data['id'] = build_id
+ new_image_build(data)
+ return data
+
+ def initWinBuild(self, task_id, build_info, win_info):
+ """
+ Create a new in-progress Windows build.
+ """
+ if not context.opts.get('EnableWin'):
+ raise koji.GenericError, 'Windows support not enabled'
+ host = Host()
+ host.verify()
+ #sanity checks
+ task = Task(task_id)
+ task.assertHost(host.id)
+ # build_info must contain name, version, and release
+ data = build_info.copy()
+ data['task_id'] = task_id
+ data['owner'] = task.getOwner()
+ data['state'] = koji.BUILD_STATES['BUILDING']
+ data['completion_time'] = None
+ build_id = new_build(data)
+ data['id'] = build_id
+ new_win_build(data, win_info)
+ return data
+
+ def completeWinBuild(self, task_id, build_id, results, rpm_results):
+ """Complete a Windows build"""
+ if not context.opts.get('EnableWin'):
+ raise koji.GenericError, 'Windows support not enabled'
+ host = Host()
+ host.verify()
+ task = Task(task_id)
+ task.assertHost(host.id)
+
+ build_info = get_build(build_id, strict=True)
+ win_info = get_win_build(build_id, strict=True)
+
+ task_dir = koji.pathinfo.task(results['task_id'])
+ # import the build output
+ for relpath, metadata in results['output'].iteritems():
+ archivetype = get_archive_type(relpath)
+ if not archivetype:
+ # Unknown archive type, skip it
+ continue
+ filepath = os.path.join(task_dir, relpath)
+ metadata['relpath'] = os.path.dirname(relpath)
+ import_archive(filepath, build_info, 'win', metadata, buildroot_id=results['buildroot_id'])
+
+ # move the logs to their final destination
+ for relpath in results['logs']:
+ subdir = 'win'
+ reldir = os.path.dirname(relpath)
+ if reldir:
+ subdir = os.path.join(subdir, reldir)
+ import_build_log(os.path.join(task_dir, relpath),
+ build_info, subdir=subdir)
+
+ if rpm_results:
+ _import_wrapper(rpm_results['task_id'], build_info, rpm_results)
+
+ # update build state
+ st_complete = koji.BUILD_STATES['COMPLETE']
+ koji.plugin.run_callbacks('preBuildStateChange', attribute='state', old=build_info['state'], new=st_complete, info=build_info)
+ update = UpdateProcessor('build', clauses=['id=%(build_id)i'],
+ values={'build_id': build_id})
+ update.set(state=st_complete)
+ update.rawset(completion_time='now()')
+ update.execute()
+ koji.plugin.run_callbacks('postBuildStateChange', attribute='state', old=build_info['state'], new=st_complete, info=build_info)
+
+ # send email
+ build_notification(task_id, build_id)
+
+ def failBuild(self, task_id, build_id):
+ """Mark the build as failed. If the current state is not
+ 'BUILDING', or the current competion_time is not null, a
+ GenericError will be raised."""
+ host = Host()
+ host.verify()
+ task = Task(task_id)
+ task.assertHost(host.id)
+
+ st_failed = koji.BUILD_STATES['FAILED']
+ buildinfo = get_build(build_id, strict=True)
+ koji.plugin.run_callbacks('preBuildStateChange', attribute='state', old=buildinfo['state'], new=st_failed, info=buildinfo)
+
+ query = """SELECT state, completion_time
+ FROM build
+ WHERE id = %(build_id)i
+ FOR UPDATE"""
+ result = _singleRow(query, locals(), ('state', 'completion_time'))
+
+ if result['state'] != koji.BUILD_STATES['BUILDING']:
+ raise koji.GenericError, 'cannot update build %i, state: %s' % \
+ (build_id, koji.BUILD_STATES[result['state']])
+ elif result['completion_time'] is not None:
+ raise koji.GenericError, 'cannot update build %i, completed at %s' % \
+ (build_id, result['completion_time'])
+
+ update = """UPDATE build
+ SET state = %(st_failed)i,
+ completion_time = NOW()
+ WHERE id = %(build_id)i"""
+ _dml(update, locals())
+ build_notification(task_id, build_id)
+ koji.plugin.run_callbacks('postBuildStateChange', attribute='state', old=buildinfo['state'], new=st_failed, info=buildinfo)
+
+ def tagBuild(self,task_id,tag,build,force=False,fromtag=None):
+ """Tag a build (host version)
+
+ This tags as the user who owns the task
+
+ If fromtag is specified, also untag the package (i.e. move in a single
+ transaction)
+
+ No return value
+ """
+ host = Host()
+ host.verify()
+ task = Task(task_id)
+ task.assertHost(host.id)
+ user_id = task.getOwner()
+ policy_data = {'tag' : tag, 'build' : build, 'fromtag' : fromtag}
+ policy_data['user_id'] = user_id
+ if fromtag is None:
+ policy_data['operation'] = 'tag'
+ else:
+ policy_data['operation'] = 'move'
+ #don't check policy for admins using force
+ perms = koji.auth.get_user_perms(user_id)
+ if not force or 'admin' not in perms:
+ assert_policy('tag', policy_data)
+ if fromtag:
+ _untag_build(fromtag,build,user_id=user_id,force=force,strict=True)
+ _tag_build(tag,build,user_id=user_id,force=force)
+
+ def importImage(self, task_id, build_id, results):
+ """
+ Import a built image, populating the database with metadata and
+ moving the image to its final location.
+ """
+ for sub_results in results.values():
+ importImageInternal(task_id, build_id, sub_results)
+ if sub_results.has_key('rpmresults'):
+ rpm_results = sub_results['rpmresults']
+ _import_wrapper(rpm_results['task_id'],
+ get_build(build_id, strict=True), rpm_results)
+
+ def tagNotification(self, is_successful, tag_id, from_id, build_id, user_id, ignore_success=False, failure_msg=''):
+ """Create a tag notification message.
+ Handles creation of tagNotification tasks for hosts."""
+ host = Host()
+ host.verify()
+ tag_notification(is_successful, tag_id, from_id, build_id, user_id, ignore_success, failure_msg)
+
+ def checkPolicy(self, name, data, default='deny', strict=False):
+ host = Host()
+ host.verify()
+ return check_policy(name, data, default=default, strict=strict)
+
+ def assertPolicy(self, name, data, default='deny'):
+ host = Host()
+ host.verify()
+ check_policy(name, data, default=default, strict=True)
+
+ def evalPolicy(self, name, data):
+ """Evaluate named policy with given data and return the result"""
+ host = Host()
+ host.verify()
+ ruleset = context.policy.get(name)
+ if not ruleset:
+ raise koji.GenericError, "no such policy: %s" % name
+ return ruleset.apply(data)
+
+ def newBuildRoot(self, repo, arch, task_id=None):
+ host = Host()
+ host.verify()
+ if task_id is not None:
+ Task(task_id).assertHost(host.id)
+ br = BuildRoot()
+ return br.new(host.id,repo,arch,task_id=task_id)
+
+ def setBuildRootState(self,brootid,state,task_id=None):
+ host = Host()
+ host.verify()
+ if task_id is not None:
+ Task(task_id).assertHost(host.id)
+ br = BuildRoot(brootid)
+ br.assertHost(host.id)
+ if task_id is not None:
+ br.assertTask(task_id)
+ return br.setState(state)
+
+ def setBuildRootList(self,brootid,rpmlist,task_id=None):
+ host = Host()
+ host.verify()
+ if task_id is not None:
+ Task(task_id).assertHost(host.id)
+ br = BuildRoot(brootid)
+ br.assertHost(host.id)
+ if task_id is not None:
+ br.assertTask(task_id)
+ return br.setList(rpmlist)
+
+ def updateBuildRootList(self,brootid,rpmlist,task_id=None):
+ host = Host()
+ host.verify()
+ if task_id is not None:
+ Task(task_id).assertHost(host.id)
+ br = BuildRoot(brootid)
+ br.assertHost(host.id)
+ if task_id is not None:
+ br.assertTask(task_id)
+ return br.updateList(rpmlist)
+
+ def updateBuildrootArchives(self, brootid, task_id, archives, project=False):
+ host = Host()
+ host.verify()
+ Task(task_id).assertHost(host.id)
+ br = BuildRoot(brootid)
+ br.assertHost(host.id)
+ br.assertTask(task_id)
+ return br.updateArchiveList(archives, project)
+
+ def updateMavenBuildRootList(self, brootid, task_id, mavenlist, ignore=None, project=False,
+ ignore_unknown=False, extra_deps=None):
+ if not context.opts.get('EnableMaven'):
+ raise koji.GenericError, "Maven support not enabled"
+ host = Host()
+ host.verify()
+ Task(task_id).assertHost(host.id)
+ br = BuildRoot(brootid)
+ br.assertHost(host.id)
+ br.assertTask(task_id)
+
+ repo = repo_info(br.data['repo_id'], strict=True)
+ tag = get_tag(repo['tag_id'], strict=True)
+ maven_build_index = {}
+ # Index the maven_tag_archives result by group_id:artifact_id:version
+ # The function ensures that each g:a:v maps to a single build id.
+ # The generator returned by maven_tag_archives can create a lot of data,
+ # but this index will only consume a fraction of that.
+ for archive in maven_tag_archives(tag['id'], event_id=repo['create_event']):
+ # unfortunately pgdb does not appear to intern strings, but still
+ # better not to create any new ones
+ idx_build = \
+ maven_build_index.setdefault(
+ archive['group_id'], {}).setdefault(
+ archive['artifact_id'], {}).setdefault(
+ archive['version'], archive['build_id'])
+ if idx_build != archive['build_id']:
+ logger.error("Found multiple builds for %(group_id)s:%(artifact_id)s:%(version)s. Current build: %(build_id)i", archive)
+ logger.error("Indexed build id was %i", idx_build)
+
+ if not ignore:
+ ignore = []
+ if not extra_deps:
+ extra_deps = []
+ task_deps = {}
+ for dep in extra_deps:
+ if isinstance(dep, (int, long)):
+ task_output = list_task_output(dep, stat=True)
+ for filepath, filestats in task_output.iteritems():
+ if os.path.splitext(filepath)[1] in ['.log', '.md5', '.sha1']:
+ continue
+ tokens = filepath.split('/')
+ if len(tokens) < 4:
+ # should never happen in a Maven repo
+ continue
+ filename = tokens.pop()
+ maven_info = {}
+ maven_info['version'] = tokens.pop()
+ maven_info['artifact_id'] = tokens.pop()
+ maven_info['group_id'] = '.'.join(tokens)
+ maven_label = koji.mavenLabel(maven_info)
+ fileinfo = {'filename': filename,
+ 'size': int(filestats['st_size'])}
+ if maven_label in task_deps:
+ task_deps[maven_label]['files'].append(fileinfo)
+ else:
+ task_deps[maven_label] = {'maven_info': maven_info,
+ 'files': [fileinfo]}
+ else:
+ build = get_build(dep, strict=True)
+ for archive in list_archives(buildID=build['id'], type='maven'):
+ idx_build = \
+ maven_build_index.setdefault(
+ archive['group_id'], {}).setdefault(
+ archive['artifact_id'], {}).setdefault(
+ archive['version'], archive['build_id'])
+ if idx_build != archive['build_id']:
+ logger.error("Overriding build for %(group_id)s:%(artifact_id)s:%(version)s.", archive)
+ logger.error("Current build is %s, new build is %s.", idx_build, archive['build_id'])
+ maven_build_index[archive['group_id']][archive['artifact_id']][archive['version']] = archive['build_id']
+
+ ignore.extend(task_deps.values())
+
+ SNAPSHOT_RE = re.compile(r'-\d{8}\.\d{6}-\d+')
+ ignore_by_label = {}
+ for entry in ignore:
+ ignore_info = entry['maven_info']
+ ignore_label = koji.mavenLabel(ignore_info)
+ if not ignore_by_label.has_key(ignore_label):
+ ignore_by_label[ignore_label] = {}
+ for fileinfo in entry['files']:
+ filename = fileinfo['filename']
+ ignore_by_label[ignore_label][filename] = fileinfo
+ if SNAPSHOT_RE.search(filename):
+ # the task output snapshot versions, which means the
+ # local repo will contain the same file with both
+ # -SNAPSHOT and -{timestamp} in the name
+ snapname = SNAPSHOT_RE.sub('-SNAPSHOT', filename)
+ ignore_by_label[ignore_label][snapname] = fileinfo
+
+ archives = []
+ for entry in mavenlist:
+ maven_info = entry['maven_info']
+ maven_label = koji.mavenLabel(maven_info)
+ ignore_archives = ignore_by_label.get(maven_label, {})
+ build_id = maven_build_index.get(
+ maven_info['group_id'], {}).get(
+ maven_info['artifact_id'], {}).get(
+ maven_info['version'])
+ if not build_id:
+ if not ignore_unknown:
+ # just warn for now. might be in ignore list. the loop below will check.
+ logger.warning('Unmatched maven g:a:v in build environment: '
+ '%(group_id)s:%(artifact_id)s:%(version)s', maven_info)
+ build_archives = {}
+ else:
+ tinfo = dslice(maven_info, ['group_id', 'artifact_id', 'version'])
+ build_archives = list_archives(buildID=build_id, type='maven', typeInfo=tinfo)
+ # index by filename
+ build_archives = dict([(a['filename'], a) for a in build_archives])
+
+ for fileinfo in entry['files']:
+ ignore_archive = ignore_archives.get(fileinfo['filename'])
+ tag_archive = build_archives.get(fileinfo['filename'])
+ if tag_archive and fileinfo['size'] == tag_archive['size']:
+ archives.append(tag_archive)
+ elif ignore_archive and fileinfo['size'] == ignore_archive['size']:
+ pass
+ else:
+ if not ignore_unknown:
+ logger.error("Unknown file for %(group_id)s:%(artifact_id)s:%(version)s", maven_info)
+ if build_id:
+ build = get_build(build_id)
+ logger.error("g:a:v supplied by build %(nvr)s", build)
+ logger.error("Build supplies %i archives: %r", len(build_archives), build_archives.keys())
+ if tag_archive:
+ logger.error("Size mismatch, br: %i, db: %i", fileinfo['size'], tag_archive['size'])
+ raise koji.BuildrootError, 'Unknown file in build environment: %s, size: %s' % \
+ ('%s/%s' % (fileinfo['path'], fileinfo['filename']), fileinfo['size'])
+
+ return br.updateArchiveList(archives, project)
+
+ def repoInit(self, tag, with_src=False, with_debuginfo=False, event=None):
+ """Initialize a new repo for tag"""
+ host = Host()
+ host.verify()
+ return repo_init(tag, with_src=with_src, with_debuginfo=with_debuginfo, event=event)
+
+ def repoAddRPM(self, repo_id, path):
+ """Add an uploaded rpm to a repo"""
+ host = Host()
+ host.verify()
+ rinfo = repo_info(repo_id, strict=True)
+ repodir = koji.pathinfo.repo(repo_id, rinfo['tag_name'])
+ if rinfo['state'] != koji.REPO_INIT:
+ raise koji.GenericError, "Repo %(id)s not in INIT state (got %(state)s)" % rinfo
+ #verify file exists
+ uploadpath = koji.pathinfo.work()
+ filepath = "%s/%s" % (uploadpath, path)
+ if not os.path.exists(filepath):
+ raise koji.GenericError, "no such file: %s" % filepath
+ rpminfo = koji.get_header_fields(filepath, ('arch','sourcepackage'))
+ dirs = []
+ if not rpminfo['sourcepackage'] and rpminfo['arch'] != 'noarch':
+ arch = koji.canonArch(rpminfo['arch'])
+ dir = "%s/%s/RPMS" % (repodir, arch)
+ if os.path.isdir(dir):
+ dirs.append(dir)
+ else:
+ #noarch and srpms linked for all arches
+ for fn in os.listdir(repodir):
+ if fn == 'groups':
+ continue
+ if rpminfo['sourcepackage']:
+ dir = "%s/%s/SRPMS" % (repodir, fn)
+ else:
+ dir = "%s/%s/RPMS" % (repodir, fn)
+ if os.path.isdir(dir):
+ dirs.append(dir)
+ for dir in dirs:
+ fn = os.path.basename(filepath)
+ dst = "%s/%s" % (dir, fn)
+ if os.path.exists(dst):
+ s_st = os.stat(filepath)
+ d_st = os.stat(dst)
+ if s_st.st_ino != d_st.st_ino:
+ raise koji.GenericError, "File already in repo: %s" % dst
+ #otherwise the desired hardlink already exists
+ else:
+ os.link(filepath, dst)
+
+ def repoDone(self, repo_id, data, expire=False):
+ """Move repo data into place, mark as ready, and expire earlier repos
+
+ repo_id: the id of the repo
+ data: a dictionary of the form { arch: (uploadpath, files), ...}
+ expire(optional): if set to true, mark the repo expired immediately*
+
+ * This is used when a repo from an older event is generated
+ """
+ host = Host()
+ host.verify()
+ rinfo = repo_info(repo_id, strict=True)
+ koji.plugin.run_callbacks('preRepoDone', repo=rinfo, data=data, expire=expire)
+ if rinfo['state'] != koji.REPO_INIT:
+ raise koji.GenericError, "Repo %(id)s not in INIT state (got %(state)s)" % rinfo
+ repodir = koji.pathinfo.repo(repo_id, rinfo['tag_name'])
+ workdir = koji.pathinfo.work()
+ for arch, (uploadpath, files) in data.iteritems():
+ archdir = "%s/%s" % (repodir, arch)
+ if not os.path.isdir(archdir):
+ raise koji.GenericError, "Repo arch directory missing: %s" % archdir
+ datadir = "%s/repodata" % archdir
+ koji.ensuredir(datadir)
+ for fn in files:
+ src = "%s/%s/%s" % (workdir,uploadpath, fn)
+ dst = "%s/%s" % (datadir, fn)
+ if not os.path.exists(src):
+ raise koji.GenericError, "uploaded file missing: %s" % src
+ os.link(src, dst)
+ os.unlink(src)
+ if expire:
+ repo_expire(repo_id)
+ koji.plugin.run_callbacks('postRepoDone', repo=rinfo, data=data, expire=expire)
+ return
+ #else:
+ repo_ready(repo_id)
+ repo_expire_older(rinfo['tag_id'], rinfo['create_event'])
+ #make a latest link
+ latestrepolink = koji.pathinfo.repo('latest', rinfo['tag_name'])
+ #XXX - this is a slight abuse of pathinfo
+ try:
+ if os.path.lexists(latestrepolink):
+ os.unlink(latestrepolink)
+ os.symlink(str(repo_id), latestrepolink)
+ except OSError:
+ #making this link is nonessential
+ log_error("Unable to create latest link for repo: %s" % repodir)
+ koji.plugin.run_callbacks('postRepoDone', repo=rinfo, data=data, expire=expire)
+
+ def isEnabled(self):
+ host = Host()
+ host.verify()
+ return host.isEnabled()
+
+
+def get_upload_path(reldir, name, create=False):
+ orig_reldir = reldir
+ orig_name = name
+ # lots of sanity checks
+ d, name = os.path.split(name)
+ if d or name.startswith('.'):
+ raise koji.GenericError, "Invalid upload filename: %s" % orig_name
+ reldir = os.path.normpath(reldir)
+ if not reldir or reldir.startswith('..'):
+ raise koji.GenericError, "Invalid upload directory: %s" % orig_reldir
+ parts = reldir.split('/')
+ check_user = True
+ if create and parts[0] == "tasks":
+ if len(parts) < 3:
+ raise koji.GenericError, "Invalid task upload directory: %s" % orig_reldir
+ try:
+ task_id = int(parts[2])
+ except ValueError:
+ raise koji.GenericError, "Invalid task upload directory: %s" % orig_reldir
+ # only the host running this task may write here
+ host = Host()
+ host.verify()
+ Task(task_id).assertHost(host.id)
+ check_user = False
+ udir = os.path.join(koji.pathinfo.work(), reldir)
+ if create:
+ koji.ensuredir(udir)
+ if check_user:
+ # assuming login was asserted earlier
+ u_fn = os.path.join(udir, '.user')
+ if os.path.exists(u_fn):
+ user_id = int(file(u_fn, 'r').read())
+ if context.session.user_id != user_id:
+ raise koji.GenericError, "Invalid upload directory, not owner: %s" % orig_reldir
+ else:
+ fo = file(u_fn, 'w')
+ fo.write(str(context.session.user_id))
+ fo.close()
+ return os.path.join(udir, name)
+
+def get_verify_class(verify):
+ if verify == 'md5':
+ return md5_constructor
+ elif verify == 'adler32':
+ return koji.util.adler32_constructor
+ elif verify:
+ raise koji.GenericError, "Unsupported verify type: %s" % verify
+ else:
+ return None
+
+
+def handle_upload(environ):
+ """Handle file upload via POST request"""
+ logger = logging.getLogger('koji.upload')
+ start = time.time()
+ if not context.session.logged_in:
+ raise koji.ActionNotAllowed, 'you must be logged-in to upload a file'
+ args = cgi.parse_qs(environ.get('QUERY_STRING', ''), strict_parsing=True)
+ #XXX - already parsed by auth
+ name = args['filename'][0]
+ path = args.get('filepath', ('',))[0]
+ verify = args.get('fileverify', ('',))[0]
+ overwrite = args.get('overwrite', ('',))[0]
+ offset = args.get('offset', ('0',))[0]
+ offset = int(offset)
+ fn = get_upload_path(path, name, create=True)
+ if os.path.exists(fn):
+ if not os.path.isfile(fn):
+ raise koji.GenericError, "destination not a file: %s" % fn
+ if offset == 0 and not overwrite:
+ raise koji.GenericError, "upload path exists: %s" % fn
+ sum_cls = get_verify_class(verify)
+ size = 0
+ chksum = sum_cls()
+ inf = environ['wsgi.input']
+ fd = os.open(fn, os.O_RDWR | os.O_CREAT, 0666)
+ try:
+ try:
+ fcntl.lockf(fd, fcntl.LOCK_EX|fcntl.LOCK_NB)
+ except IOError, e:
+ raise koji.LockError, e
+ if offset == -1:
+ offset = os.lseek(fd, 0, 2)
+ else:
+ os.ftruncate(fd, offset)
+ os.lseek(fd, offset, 0)
+ while True:
+ chunk = inf.read(65536)
+ if not chunk:
+ break
+ size += len(chunk)
+ if verify:
+ chksum.update(chunk)
+ os.write(fd, chunk)
+ finally:
+ # this will also remove our lock
+ os.close(fd)
+ ret = {
+ 'size' : koji.encode_int(size),
+ 'fileverify' : verify,
+ 'offset' : koji.encode_int(offset),
+ }
+ if verify:
+ # unsigned 32bit - could be too big for xmlrpc
+ ret['hexdigest'] = chksum.hexdigest()
+ logger.debug("Upload result: %r", ret)
+ logger.info("Completed upload for session %s (#%s): %f seconds, %i bytes, %s",
+ context.session.id, context.session.callnum,
+ time.time()-start, size, fn)
+ return ret
+
+#koji.add_sys_logger("koji")
+
+if __name__ == "__main__":
+ # XXX - testing defaults
+ print "Connecting to DB"
+ koji.db.setDBopts( database = "test", user = "test")
+ context.cnx = koji.db.connect()
+ context.req = {}
+ print "Creating a session"
+ context.session = koji.auth.Session(None,hostip="127.0.0.1")
+ print context.session
+ test_user = "host/1"
+ pw = "foobar"
+ print "Logging in as %s" % test_user
+ session_info = context.session.login(test_user,pw,{'hostip':'127.0.0.1'})
+ for k in session_info.keys():
+ session_info[k] = [session_info[k]]
+ s2=koji.auth.Session(session_info,'127.0.0.1')
+ print s2
+ print s2.getHostId()
+ context.session = s2
+ print "Associating host"
+ Host()
+ #context.cnx.commit()
+ context.session.perms['admin'] = 1 #XXX
diff --git a/hub/kojixmlrpc.py b/hub/kojixmlrpc.py
new file mode 100644
index 0000000..efb99a6
--- /dev/null
+++ b/hub/kojixmlrpc.py
@@ -0,0 +1,796 @@
+# kojixmlrpc - an XMLRPC interface for koji.
+# Copyright (c) 2005-2014 Red Hat, Inc.
+#
+# Koji is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation;
+# version 2.1 of the License.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this software; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+# Authors:
+# Mike McLean <mikem at redhat.com>
+
+from ConfigParser import RawConfigParser
+import inspect
+import logging
+import os
+import sys
+import time
+import traceback
+import types
+import pprint
+import resource
+import xmlrpclib
+from xmlrpclib import getparser,dumps,Fault
+from koji.server import WSGIWrapper
+
+import koji
+import koji.auth
+import koji.db
+import koji.plugin
+import koji.policy
+import koji.util
+from koji.context import context
+
+
+# Workaround to allow xmlrpclib deal with iterators
+class Marshaller(xmlrpclib.Marshaller):
+
+ dispatch = xmlrpclib.Marshaller.dispatch.copy()
+
+ def dump_generator(self, value, write):
+ dump = self.__dump
+ write("<value><array><data>\n")
+ for v in value:
+ dump(v, write)
+ write("</data></array></value>\n")
+ dispatch[types.GeneratorType] = dump_generator
+
+xmlrpclib.Marshaller = Marshaller
+
+
+class HandlerRegistry(object):
+ """Track handlers for RPC calls"""
+
+ def __init__(self):
+ self.funcs = {}
+ #introspection functions
+ self.register_function(self.list_api, name="_listapi")
+ self.register_function(self.system_listMethods, name="system.listMethods")
+ self.register_function(self.system_methodSignature, name="system.methodSignature")
+ self.register_function(self.system_methodHelp, name="system.methodHelp")
+ self.argspec_cache = {}
+
+ def register_function(self, function, name = None):
+ if name is None:
+ name = function.__name__
+ self.funcs[name] = function
+
+ def register_module(self, instance, prefix=None):
+ """Register all the public functions in an instance with prefix prepended
+
+ For example
+ h.register_module(exports,"pub.sys")
+ will register the methods of exports with names like
+ pub.sys.method1
+ pub.sys.method2
+ ...etc
+ """
+ for name in dir(instance):
+ if name.startswith('_'):
+ continue
+ function = getattr(instance, name)
+ if not callable(function):
+ continue
+ if prefix is not None:
+ name = "%s.%s" %(prefix,name)
+ self.register_function(function, name=name)
+
+ def register_instance(self,instance):
+ self.register_module(instance)
+
+ def register_plugin(self, plugin):
+ """Scan a given plugin for handlers
+
+ Handlers are functions marked with one of the decorators defined in koji.plugin
+ """
+ for v in vars(plugin).itervalues():
+ if isinstance(v, (types.ClassType, types.TypeType)):
+ #skip classes
+ continue
+ if callable(v):
+ if getattr(v, 'exported', False):
+ if hasattr(v, 'export_alias'):
+ name = getattr(v, 'export_alias')
+ else:
+ name = v.__name__
+ self.register_function(v, name=name)
+ if getattr(v, 'callbacks', None):
+ for cbtype in v.callbacks:
+ koji.plugin.register_callback(cbtype, v)
+
+ def getargspec(self, func):
+ ret = self.argspec_cache.get(func)
+ if ret:
+ return ret
+ ret = tuple(inspect.getargspec(func))
+ if inspect.ismethod(func) and func.im_self:
+ # bound method, remove first arg
+ args, varargs, varkw, defaults = ret
+ if args:
+ aname = args[0] #generally "self"
+ del args[0]
+ if defaults and aname in defaults:
+ # shouldn't happen, but...
+ del defaults[aname]
+ return ret
+
+ def list_api(self):
+ funcs = []
+ for name,func in self.funcs.items():
+ #the keys in self.funcs determine the name of the method as seen over xmlrpc
+ #func.__name__ might differ (e.g. for dotted method names)
+ args = self._getFuncArgs(func)
+ argspec = self.getargspec(func)
+ funcs.append({'name': name,
+ 'doc': func.__doc__,
+ 'argspec': argspec,
+ 'argdesc': inspect.formatargspec(*argspec),
+ 'args': args})
+ return funcs
+
+ def _getFuncArgs(self, func):
+ args = []
+ for x in range(0, func.func_code.co_argcount):
+ if x == 0 and func.func_code.co_varnames[x] == "self":
+ continue
+ if func.func_defaults and func.func_code.co_argcount - x <= len(func.func_defaults):
+ args.append((func.func_code.co_varnames[x], func.func_defaults[x - func.func_code.co_argcount + len(func.func_defaults)]))
+ else:
+ args.append(func.func_code.co_varnames[x])
+ return args
+
+ def system_listMethods(self):
+ return self.funcs.keys()
+
+ def system_methodSignature(self, method):
+ #it is not possible to autogenerate this data
+ return 'signatures not supported'
+
+ def system_methodHelp(self, method):
+ func = self.funcs.get(method)
+ if func is None:
+ return ""
+ args = inspect.formatargspec(*self.getargspec(func))
+ ret = '%s%s' % (method, args)
+ if func.__doc__:
+ ret += "\ndescription: %s" % func.__doc__
+ return ret
+
+ def get(self, name):
+ func = self.funcs.get(name, None)
+ if func is None:
+ raise koji.GenericError, "Invalid method: %s" % name
+ return func
+
+
+class HandlerAccess(object):
+ """This class is used to grant access to the rpc handlers"""
+
+ def __init__(self, registry):
+ self.__reg = registry
+
+ def call(self, __name, *args, **kwargs):
+ return self.__reg.get(__name)(*args, **kwargs)
+
+ def get(self, name):
+ return self.__reg.get(name)
+
+
+class ModXMLRPCRequestHandler(object):
+ """Simple XML-RPC handler for mod_python environment"""
+
+ def __init__(self, handlers):
+ self.traceback = False
+ self.handlers = handlers #expecting HandlerRegistry instance
+ self.logger = logging.getLogger('koji.xmlrpc')
+
+ def _get_handler(self, name):
+ # just a wrapper so we can handle multicall ourselves
+ # we don't register multicall since the registry will outlive our instance
+ if name in ('multiCall', 'system.multicall'):
+ return self.multiCall
+ else:
+ return self.handlers.get(name)
+
+ def _read_request(self, stream):
+ parser, unmarshaller = getparser()
+ rlen = 0
+ maxlen = opts.get('MaxRequestLength', None)
+ while True:
+ chunk = stream.read(8192)
+ if not chunk:
+ break
+ rlen += len(chunk)
+ if maxlen and rlen > maxlen:
+ raise koji.GenericError, 'Request too long'
+ parser.feed(chunk)
+ parser.close()
+ return unmarshaller.close(), unmarshaller.getmethodname()
+
+ def _wrap_handler(self, handler, environ):
+ """Catch exceptions and encode response of handler"""
+
+ # generate response
+ try:
+ response = handler(environ)
+ # wrap response in a singleton tuple
+ response = (response,)
+ response = dumps(response, methodresponse=1, allow_none=1)
+ except Fault, fault:
+ self.traceback = True
+ response = dumps(fault)
+ except:
+ self.traceback = True
+ # report exception back to server
+ e_class, e = sys.exc_info()[:2]
+ faultCode = getattr(e_class,'faultCode',1)
+ tb_type = context.opts.get('KojiTraceback',None)
+ tb_str = ''.join(traceback.format_exception(*sys.exc_info()))
+ if issubclass(e_class, koji.GenericError):
+ if context.opts.get('KojiDebug'):
+ if tb_type == "extended":
+ faultString = koji.format_exc_plus()
+ else:
+ faultString = tb_str
+ else:
+ faultString = str(e)
+ else:
+ if tb_type == "normal":
+ faultString = tb_str
+ elif tb_type == "extended":
+ faultString = koji.format_exc_plus()
+ else:
+ faultString = "%s: %s" % (e_class,e)
+ self.logger.warning(tb_str)
+ response = dumps(Fault(faultCode, faultString))
+
+ return response
+
+ def handle_upload(self, environ):
+ #uploads can't be in a multicall
+ context.method = None
+ self.check_session()
+ self.enforce_lockout()
+ return kojihub.handle_upload(environ)
+
+ def handle_rpc(self, environ):
+ params, method = self._read_request(environ['wsgi.input'])
+ return self._dispatch(method, params)
+
+ def check_session(self):
+ if not hasattr(context,"session"):
+ #we may be called again by one of our meta-calls (like multiCall)
+ #so we should only create a session if one does not already exist
+ context.session = koji.auth.Session()
+ try:
+ context.session.validate()
+ except koji.AuthLockError:
+ #might be ok, depending on method
+ if context.method not in ('exclusiveSession','login', 'krbLogin', 'logout'):
+ raise
+
+ def enforce_lockout(self):
+ if context.opts.get('LockOut') and \
+ context.method not in ('login', 'krbLogin', 'sslLogin', 'logout') and \
+ not context.session.hasPerm('admin'):
+ raise koji.ServerOffline, "Server disabled for maintenance"
+
+ def _dispatch(self, method, params):
+ func = self._get_handler(method)
+ context.method = method
+ context.params = params
+ self.check_session()
+ self.enforce_lockout()
+ # handle named parameters
+ params, opts = koji.decode_args(*params)
+
+ if self.logger.isEnabledFor(logging.INFO):
+ self.logger.info("Handling method %s for session %s (#%s)",
+ method, context.session.id, context.session.callnum)
+ if method != 'uploadFile' and self.logger.isEnabledFor(logging.DEBUG):
+ self.logger.debug("Params: %s", pprint.pformat(params))
+ self.logger.debug("Opts: %s", pprint.pformat(opts))
+ start = time.time()
+
+ ret = koji.util.call_with_argcheck(func, params, opts)
+
+ if self.logger.isEnabledFor(logging.INFO):
+ rusage = resource.getrusage(resource.RUSAGE_SELF)
+ self.logger.info("Completed method %s for session %s (#%s): %f seconds, rss %s, stime %f",
+ method, context.session.id, context.session.callnum,
+ time.time()-start,
+ rusage.ru_maxrss, rusage.ru_stime)
+
+ return ret
+
+ def multiCall(self, calls):
+ """Execute a multicall. Execute each method call in the calls list, collecting
+ results and errors, and return those as a list."""
+ results = []
+ for call in calls:
+ try:
+ result = self._dispatch(call['methodName'], call['params'])
+ except Fault, fault:
+ results.append({'faultCode': fault.faultCode, 'faultString': fault.faultString})
+ except:
+ # transform unknown exceptions into XML-RPC Faults
+ # don't create a reference to full traceback since this creates
+ # a circular reference.
+ exc_type, exc_value = sys.exc_info()[:2]
+ faultCode = getattr(exc_type, 'faultCode', 1)
+ faultString = ', '.join(exc_value.args)
+ trace = traceback.format_exception(*sys.exc_info())
+ # traceback is not part of the multicall spec, but we include it for debugging purposes
+ results.append({'faultCode': faultCode, 'faultString': faultString, 'traceback': trace})
+ else:
+ results.append([result])
+
+ return results
+
+ def handle_request(self,req):
+ """Handle a single XML-RPC request"""
+
+ pass
+ #XXX no longer used
+
+
+def offline_reply(start_response, msg=None):
+ """Send a ServerOffline reply"""
+ faultCode = koji.ServerOffline.faultCode
+ if msg is None:
+ faultString = "server is offline"
+ else:
+ faultString = msg
+ response = dumps(Fault(faultCode, faultString))
+ headers = [
+ ('Content-Length', str(len(response))),
+ ('Content-Type', "text/xml"),
+ ]
+ start_response('200 OK', headers)
+ return [response]
+
+def load_config(environ):
+ """Load configuration options
+
+ Options are read from a config file. The config file location is
+ controlled by the PythonOption ConfigFile in the httpd config.
+
+ Backwards compatibility:
+ - if ConfigFile is not set, opts are loaded from http config
+ - if ConfigFile is set, then the http config must not provide Koji options
+ - In a future version we will load the default hub config regardless
+ - all PythonOptions (except ConfigFile) are now deprecated and support for them
+ will disappear in a future version of Koji
+ """
+ logger = logging.getLogger("koji")
+ #get our config file(s)
+ if 'modpy.opts' in environ:
+ modpy_opts = environ.get('modpy.opts')
+ cf = modpy_opts.get('ConfigFile', None)
+ # to aid in the transition from PythonOptions to hub.conf, we only load
+ # the configfile if it is explicitly configured
+ if cf == '/etc/koji-hub/hub.conf':
+ cfdir = modpy_opts.get('ConfigDir', '/etc/koji-hub/hub.conf.d')
+ else:
+ cfdir = modpy_opts.get('ConfigDir', None)
+ if not cf and not cfdir:
+ logger.warn('Warning: configuring Koji via PythonOptions is deprecated. Use hub.conf')
+ else:
+ cf = environ.get('koji.hub.ConfigFile', '/etc/koji-hub/hub.conf')
+ cfdir = environ.get('koji.hub.ConfigDir', '/etc/koji-hub/hub.conf.d')
+ modpy_opts = {}
+ if cfdir:
+ configs = koji.config_directory_contents(cfdir)
+ else:
+ configs = []
+ if cf and os.path.isfile(cf):
+ configs.append(cf)
+ if configs:
+ config = RawConfigParser()
+ config.read(configs)
+ else:
+ config = None
+ cfgmap = [
+ #option, type, default
+ ['DBName', 'string', None],
+ ['DBUser', 'string', None],
+ ['DBHost', 'string', None],
+ ['DBhost', 'string', None], # alias for backwards compatibility
+ ['DBPass', 'string', None],
+ ['KojiDir', 'string', None],
+
+ ['AuthPrincipal', 'string', None],
+ ['AuthKeytab', 'string', None],
+ ['ProxyPrincipals', 'string', ''],
+ ['HostPrincipalFormat', 'string', None],
+
+ ['DNUsernameComponent', 'string', 'CN'],
+ ['ProxyDNs', 'string', ''],
+
+ ['LoginCreatesUser', 'boolean', True],
+ ['KojiWebURL', 'string', 'http://localhost.localdomain/koji'],
+ ['EmailDomain', 'string', None],
+ ['NotifyOnSuccess', 'boolean', True],
+ ['DisableNotifications', 'boolean', False],
+
+ ['Plugins', 'string', ''],
+ ['PluginPath', 'string', '/usr/lib/koji-hub-plugins'],
+
+ ['KojiDebug', 'boolean', False],
+ ['KojiTraceback', 'string', None],
+ ['VerbosePolicy', 'boolean', False],
+ ['EnableFunctionDebug', 'boolean', False],
+
+ ['LogLevel', 'string', 'WARNING'],
+ ['LogFormat', 'string', '%(asctime)s [%(levelname)s] m=%(method)s u=%(user_name)s p=%(process)s r=%(remoteaddr)s %(name)s: %(message)s'],
+
+ ['MissingPolicyOk', 'boolean', True],
+ ['EnableMaven', 'boolean', False],
+ ['EnableWin', 'boolean', False],
+ ['EnableImageMigration', 'boolean', False],
+
+ ['RLIMIT_AS', 'string', None],
+ ['RLIMIT_CORE', 'string', None],
+ ['RLIMIT_CPU', 'string', None],
+ ['RLIMIT_DATA', 'string', None],
+ ['RLIMIT_FSIZE', 'string', None],
+ ['RLIMIT_MEMLOCK', 'string', None],
+ ['RLIMIT_NOFILE', 'string', None],
+ ['RLIMIT_NPROC', 'string', None],
+ ['RLIMIT_OFILE', 'string', None],
+ ['RLIMIT_RSS', 'string', None],
+ ['RLIMIT_STACK', 'string', None],
+
+ ['MemoryWarnThreshold', 'integer', 5000],
+ ['MaxRequestLength', 'integer', 4194304],
+
+ ['LockOut', 'boolean', False],
+ ['ServerOffline', 'boolean', False],
+ ['OfflineMessage', 'string', None],
+ ]
+ opts = {}
+ for name, dtype, default in cfgmap:
+ if config:
+ key = ('hub', name)
+ if config.has_option(*key):
+ if dtype == 'integer':
+ opts[name] = config.getint(*key)
+ elif dtype == 'boolean':
+ opts[name] = config.getboolean(*key)
+ else:
+ opts[name] = config.get(*key)
+ else:
+ opts[name] = default
+ else:
+ if modpy_opts.get(name, None) is not None:
+ if dtype == 'integer':
+ opts[name] = int(modpy_opts.get(name))
+ elif dtype == 'boolean':
+ opts[name] = modpy_opts.get(name).lower() in ('yes', 'on', 'true', '1')
+ else:
+ opts[name] = modpy_opts.get(name)
+ else:
+ opts[name] = default
+ if opts['DBHost'] is None:
+ opts['DBHost'] = opts['DBhost']
+ # load policies
+ # (only from config file)
+ if config and config.has_section('policy'):
+ #for the moment, we simply transfer the policy conf to opts
+ opts['policy'] = dict(config.items('policy'))
+ else:
+ opts['policy'] = {}
+ for pname, text in _default_policies.iteritems():
+ opts['policy'].setdefault(pname, text)
+ # use configured KojiDir
+ if opts.get('KojiDir') is not None:
+ koji.BASEDIR = opts['KojiDir']
+ koji.pathinfo.topdir = opts['KojiDir']
+ return opts
+
+
+def load_plugins(opts):
+ """Load plugins specified by our configuration"""
+ if not opts['Plugins']:
+ return
+ logger = logging.getLogger('koji.plugins')
+ tracker = koji.plugin.PluginTracker(path=opts['PluginPath'].split(':'))
+ for name in opts['Plugins'].split():
+ logger.info('Loading plugin: %s', name)
+ try:
+ tracker.load(name)
+ except Exception:
+ logger.error(''.join(traceback.format_exception(*sys.exc_info())))
+ #make this non-fatal, but set ServerOffline
+ opts['ServerOffline'] = True
+ opts['OfflineMessage'] = 'configuration error'
+ return tracker
+
+_default_policies = {
+ 'build_from_srpm' : '''
+ has_perm admin :: allow
+ all :: deny
+ ''',
+ 'build_from_repo_id' : '''
+ has_perm admin :: allow
+ all :: deny
+ ''',
+ 'package_list' : '''
+ has_perm admin :: allow
+ all :: deny
+ ''',
+ 'channel' : '''
+ has req_channel :: req
+ is_child_task :: parent
+ all :: use default
+ ''',
+ 'vm' : '''
+ has_perm admin win-admin :: allow
+ all :: deny
+ '''
+}
+
+def get_policy(opts, plugins):
+ if not opts.get('policy'):
+ return
+ #first find available policy tests
+ alltests = [koji.policy.findSimpleTests([vars(kojihub), vars(koji.policy)])]
+ # we delay merging these to allow a test to be overridden for a specific policy
+ for plugin_name in opts.get('Plugins', '').split():
+ alltests.append(koji.policy.findSimpleTests(vars(plugins.get(plugin_name))))
+ policy = {}
+ for pname, text in opts['policy'].iteritems():
+ #filter/merge tests
+ merged = {}
+ for tests in alltests:
+ # tests can be limited to certain policies by setting a class variable
+ for name, test in tests.iteritems():
+ if hasattr(test, 'policy'):
+ if isinstance(test.policy, basestring):
+ if pname != test.policy:
+ continue
+ elif pname not in test.policy:
+ continue
+ # in case of name overlap, last one wins
+ # hence plugins can override builtin tests
+ merged[name] = test
+ policy[pname] = koji.policy.SimpleRuleSet(text.splitlines(), merged)
+ return policy
+
+
+class HubFormatter(logging.Formatter):
+ """Support some koji specific fields in the format string"""
+
+ def format(self, record):
+ record.method = getattr(context, 'method', None)
+ if hasattr(context, 'environ'):
+ record.remoteaddr = "%s:%s" % (
+ context.environ.get('REMOTE_ADDR', '?'),
+ context.environ.get('REMOTE_PORT', '?'))
+ else:
+ record.remoteaddr = "?:?"
+ if hasattr(context, 'session'):
+ record.user_id = context.session.user_id
+ record.session_id = context.session.id
+ record.callnum = context.session.callnum
+ record.user_name = context.session.user_data.get('name')
+ else:
+ record.user_id = None
+ record.session_id = None
+ record.callnum = None
+ record.user_name = None
+ return logging.Formatter.format(self, record)
+
+def setup_logging1():
+ """Set up basic logging, before options are loaded"""
+ global log_handler
+ logger = logging.getLogger("koji")
+ logger.setLevel(logging.WARNING)
+ #stderr logging (stderr goes to httpd logs)
+ log_handler = logging.StreamHandler()
+ log_format = '%(asctime)s [%(levelname)s] SETUP p=%(process)s %(name)s: %(message)s'
+ log_handler.setFormatter(HubFormatter(log_format))
+ log_handler.setLevel(logging.DEBUG)
+ logger.addHandler(log_handler)
+
+def setup_logging2(opts):
+ global log_handler
+ """Adjust logging based on configuration options"""
+ #determine log level
+ level = opts['LogLevel']
+ valid_levels = ('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL')
+ # the config value can be a single level name or a series of
+ # logger:level names pairs. processed in order found
+ default = None
+ for part in level.split():
+ pair = part.split(':', 1)
+ if len(pair) == 2:
+ name, level = pair
+ else:
+ name = 'koji'
+ level = part
+ default = level
+ if level not in valid_levels:
+ raise koji.GenericError, "Invalid log level: %s" % level
+ #all our loggers start with koji
+ if name == '':
+ name = 'koji'
+ default = level
+ elif name.startswith('.'):
+ name = 'koji' + name
+ elif not name.startswith('koji'):
+ name = 'koji.' + name
+ level_code = logging._levelNames[level]
+ logging.getLogger(name).setLevel(level_code)
+ logger = logging.getLogger("koji")
+ # if KojiDebug is set, force main log level to DEBUG
+ if opts.get('KojiDebug'):
+ logger.setLevel(logging.DEBUG)
+ elif default is None:
+ #LogLevel did not configure a default level
+ logger.setLevel(logging.WARNING)
+ #log_handler defined in setup_logging1
+ log_handler.setFormatter(HubFormatter(opts['LogFormat']))
+
+
+def load_scripts(environ):
+ """Update path and import our scripts files"""
+ global kojihub
+ scriptsdir = os.path.dirname(environ['SCRIPT_FILENAME'])
+ sys.path.insert(0, scriptsdir)
+ import kojihub
+
+
+#
+# mod_python handler
+#
+
+def handler(req):
+ wrapper = WSGIWrapper(req)
+ return wrapper.run(application)
+
+
+def get_memory_usage():
+ pagesize = resource.getpagesize()
+ statm = [pagesize*int(y)/1024 for y in "".join(open("/proc/self/statm").readlines()).strip().split()]
+ size, res, shr, text, lib, data, dirty = statm
+ return res - shr
+
+def server_setup(environ):
+ global opts, plugins, registry, policy
+ logger = logging.getLogger('koji')
+ try:
+ setup_logging1()
+ opts = load_config(environ)
+ setup_logging2(opts)
+ load_scripts(environ)
+ koji.util.setup_rlimits(opts)
+ plugins = load_plugins(opts)
+ registry = get_registry(opts, plugins)
+ policy = get_policy(opts, plugins)
+ koji.db.provideDBopts(database = opts["DBName"],
+ user = opts["DBUser"],
+ password = opts.get("DBPass",None),
+ host = opts.get("DBHost", None))
+ except Exception:
+ tb_str = ''.join(traceback.format_exception(*sys.exc_info()))
+ logger.error(tb_str)
+ opts = {
+ 'ServerOffline': True,
+ 'OfflineMessage': 'server startup error',
+ }
+
+
+#
+# wsgi handler
+#
+
+firstcall = True
+
+def application(environ, start_response):
+ global firstcall
+ if firstcall:
+ server_setup(environ)
+ firstcall = False
+ # XMLRPC uses POST only. Reject anything else
+ if environ['REQUEST_METHOD'] != 'POST':
+ headers = [
+ ('Allow', 'POST'),
+ ]
+ start_response('405 Method Not Allowed', headers)
+ response = "Method Not Allowed\nThis is an XML-RPC server. Only POST requests are accepted."
+ headers = [
+ ('Content-Length', str(len(response))),
+ ('Content-Type', "text/plain"),
+ ]
+ return [response]
+ if opts.get('ServerOffline'):
+ return offline_reply(start_response, msg=opts.get("OfflineMessage", None))
+ # XXX check request length
+ # XXX most of this should be moved elsewhere
+ if 1:
+ try:
+ start = time.time()
+ memory_usage_at_start = get_memory_usage()
+
+ context._threadclear()
+ context.commit_pending = False
+ context.opts = opts
+ context.handlers = HandlerAccess(registry)
+ context.environ = environ
+ context.policy = policy
+ try:
+ context.cnx = koji.db.connect()
+ except Exception:
+ return offline_reply(start_response, msg="database outage")
+ h = ModXMLRPCRequestHandler(registry)
+ if environ['CONTENT_TYPE'] == 'application/octet-stream':
+ response = h._wrap_handler(h.handle_upload, environ)
+ else:
+ response = h._wrap_handler(h.handle_rpc, environ)
+ headers = [
+ ('Content-Length', str(len(response))),
+ ('Content-Type', "text/xml"),
+ ]
+ start_response('200 OK', headers)
+ if h.traceback:
+ #rollback
+ context.cnx.rollback()
+ elif context.commit_pending:
+ context.cnx.commit()
+ memory_usage_at_end = get_memory_usage()
+ if memory_usage_at_end - memory_usage_at_start > opts['MemoryWarnThreshold']:
+ paramstr = repr(getattr(context, 'params', 'UNKNOWN'))
+ if len(paramstr) > 120:
+ paramstr = paramstr[:117] + "..."
+ h.logger.warning("Memory usage of process %d grew from %d KiB to %d KiB (+%d KiB) processing request %s with args %s" % (os.getpid(), memory_usage_at_start, memory_usage_at_end, memory_usage_at_end - memory_usage_at_start, context.method, paramstr))
+ h.logger.debug("Returning %d bytes after %f seconds", len(response),
+ time.time() - start)
+ finally:
+ #make sure context gets cleaned up
+ if hasattr(context,'cnx'):
+ try:
+ context.cnx.close()
+ except Exception:
+ pass
+ context._threadclear()
+ return [response] #XXX
+
+
+def get_registry(opts, plugins):
+ # Create and populate handler registry
+ registry = HandlerRegistry()
+ functions = kojihub.RootExports()
+ hostFunctions = kojihub.HostExports()
+ registry.register_instance(functions)
+ registry.register_module(hostFunctions,"host")
+ registry.register_function(koji.auth.login)
+ registry.register_function(koji.auth.krbLogin)
+ registry.register_function(koji.auth.sslLogin)
+ registry.register_function(koji.auth.logout)
+ registry.register_function(koji.auth.subsession)
+ registry.register_function(koji.auth.logoutChild)
+ registry.register_function(koji.auth.exclusiveSession)
+ registry.register_function(koji.auth.sharedSession)
+ for name in opts.get('Plugins', '').split():
+ registry.register_plugin(plugins.get(name))
+ return registry
diff --git a/hub/rpmdiff b/hub/rpmdiff
new file mode 100755
index 0000000..88c0c9b
--- /dev/null
+++ b/hub/rpmdiff
@@ -0,0 +1,248 @@
+#!/usr/bin/python
+#
+# Copyright (C) 2006 Mandriva; 2009-2014 Red Hat, Inc.
+# Authors: Frederic Lepied, Florian Festi
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+#
+# This library and program is heavily based on rpmdiff from the rpmlint package
+# It was modified to be used as standalone library for the Koji project.
+
+import rpm
+import os
+import itertools
+
+import sys, getopt
+
+
+class Rpmdiff:
+
+ # constants
+
+ TAGS = ( rpm.RPMTAG_NAME, rpm.RPMTAG_SUMMARY,
+ rpm.RPMTAG_DESCRIPTION, rpm.RPMTAG_GROUP,
+ rpm.RPMTAG_LICENSE, rpm.RPMTAG_URL,
+ rpm.RPMTAG_PREIN, rpm.RPMTAG_POSTIN,
+ rpm.RPMTAG_PREUN, rpm.RPMTAG_POSTUN)
+
+ PRCO = ( 'REQUIRES', 'PROVIDES', 'CONFLICTS', 'OBSOLETES')
+
+ #{fname : (size, mode, mtime, flags, dev, inode,
+ # nlink, state, vflags, user, group, digest)}
+ __FILEIDX = [ ['S', 0],
+ ['M', 1],
+ ['5', 11],
+ ['D', 4],
+ ['N', 6],
+ ['L', 7],
+ ['V', 8],
+ ['U', 9],
+ ['G', 10],
+ ['F', 3],
+ ['T', 2] ]
+
+ try:
+ if rpm.RPMSENSE_SCRIPT_PRE:
+ PREREQ_FLAG=rpm.RPMSENSE_PREREQ|rpm.RPMSENSE_SCRIPT_PRE|\
+ rpm.RPMSENSE_SCRIPT_POST|rpm.RPMSENSE_SCRIPT_PREUN|\
+ rpm.RPMSENSE_SCRIPT_POSTUN
+ except AttributeError:
+ try:
+ PREREQ_FLAG=rpm.RPMSENSE_PREREQ
+ except:
+ #(proyvind): This seems ugly, but then again so does
+ # this whole check as well.
+ PREREQ_FLAG=False
+
+ DEPFORMAT = '%-12s%s %s %s %s'
+ FORMAT = '%-12s%s'
+
+ ADDED = 'added'
+ REMOVED = 'removed'
+
+ # code starts here
+
+ def __init__(self, old, new, ignore=None):
+ self.result = []
+ self.ignore = ignore
+ if self.ignore is None:
+ self.ignore = []
+
+ FILEIDX = self.__FILEIDX
+ for tag in self.ignore:
+ for entry in FILEIDX:
+ if tag == entry[0]:
+ entry[1] = None
+ break
+
+ old = self.__load_pkg(old)
+ new = self.__load_pkg(new)
+
+ # Compare single tags
+ for tag in self.TAGS:
+ old_tag = old[tag]
+ new_tag = new[tag]
+ if old_tag != new_tag:
+ tagname = rpm.tagnames[tag]
+ if old_tag == None:
+ self.__add(self.FORMAT, (self.ADDED, tagname))
+ elif new_tag == None:
+ self.__add(self.FORMAT, (self.REMOVED, tagname))
+ else:
+ self.__add(self.FORMAT, ('S.5........', tagname))
+
+ # compare Provides, Requires, ...
+ for tag in self.PRCO:
+ self.__comparePRCOs(old, new, tag)
+
+ # compare the files
+
+ old_files_dict = self.__fileIteratorToDict(old.fiFromHeader())
+ new_files_dict = self.__fileIteratorToDict(new.fiFromHeader())
+ files = list(set(itertools.chain(old_files_dict.iterkeys(),
+ new_files_dict.iterkeys())))
+ files.sort()
+
+ for f in files:
+ diff = 0
+
+ old_file = old_files_dict.get(f)
+ new_file = new_files_dict.get(f)
+
+ if not old_file:
+ self.__add(self.FORMAT, (self.ADDED, f))
+ elif not new_file:
+ self.__add(self.FORMAT, (self.REMOVED, f))
+ else:
+ format = ''
+ for entry in FILEIDX:
+ if entry[1] != None and \
+ old_file[entry[1]] != new_file[entry[1]]:
+ format = format + entry[0]
+ diff = 1
+ else:
+ format = format + '.'
+ if diff:
+ self.__add(self.FORMAT, (format, f))
+
+ # return a report of the differences
+ def textdiff(self):
+ return '\n'.join((format % data for format, data in self.result))
+
+ # do the two rpms differ
+ def differs(self):
+ return bool(self.result)
+
+ # add one differing item
+ def __add(self, format, data):
+ self.result.append((format, data))
+
+ # load a package from a file or from the installed ones
+ def __load_pkg(self, filename):
+ ts = rpm.ts()
+ f = os.open(filename, os.O_RDONLY)
+ hdr = ts.hdrFromFdno(f)
+ os.close(f)
+ return hdr
+
+ # output the right string according to RPMSENSE_* const
+ def sense2str(self, sense):
+ s = ""
+ for tag, char in ((rpm.RPMSENSE_LESS, "<"),
+ (rpm.RPMSENSE_GREATER, ">"),
+ (rpm.RPMSENSE_EQUAL, "=")):
+ if sense & tag:
+ s += char
+ return s
+
+ # compare Provides, Requires, Conflicts, Obsoletes
+ def __comparePRCOs(self, old, new, name):
+ oldflags = old[name[:-1]+'FLAGS']
+ newflags = new[name[:-1]+'FLAGS']
+ # fix buggy rpm binding not returning list for single entries
+ if not isinstance(oldflags, list): oldflags = [ oldflags ]
+ if not isinstance(newflags, list): newflags = [ newflags ]
+
+ o = zip(old[name], oldflags, old[name[:-1]+'VERSION'])
+ n = zip(new[name], newflags, new[name[:-1]+'VERSION'])
+
+ if name == 'PROVIDES': # filter our self provide
+ oldNV = (old['name'], rpm.RPMSENSE_EQUAL,
+ "%s-%s" % (old['version'], old['release']))
+ newNV = (new['name'], rpm.RPMSENSE_EQUAL,
+ "%s-%s" % (new['version'], new['release']))
+ o = [entry for entry in o if entry != oldNV]
+ n = [entry for entry in n if entry != newNV]
+
+ for oldentry in o:
+ if not oldentry in n:
+ if name == 'REQUIRES' and oldentry[1] & self.PREREQ_FLAG:
+ tagname = 'PREREQ'
+ else:
+ tagname = name
+ self.__add(self.DEPFORMAT,
+ (self.REMOVED, tagname, oldentry[0],
+ self.sense2str(oldentry[1]), oldentry[2]))
+ for newentry in n:
+ if not newentry in o:
+ if name == 'REQUIRES' and newentry[1] & self.PREREQ_FLAG:
+ tagname = 'PREREQ'
+ else:
+ tagname = name
+ self.__add(self.DEPFORMAT,
+ (self.ADDED, tagname, newentry[0],
+ self.sense2str(newentry[1]), newentry[2]))
+
+ def __fileIteratorToDict(self, fi):
+ result = {}
+ for filedata in fi:
+ result[filedata[0]] = filedata[1:]
+ return result
+
+def _usage(exit=1):
+ print "Usage: %s [<options>] <old package> <new package>" % sys.argv[0]
+ print "Options:"
+ print " -h, --help Output this message and exit"
+ print " -i, --ignore Tag to ignore when calculating differences"
+ print " (may be used multiple times)"
+ print " Valid values are: SM5DNLVUGFT"
+ sys.exit(exit)
+
+def main():
+
+ ignore_tags = []
+ try:
+ opts, args = getopt.getopt(sys.argv[1:], "hi:", ["help", "ignore="])
+ except getopt.GetoptError, e:
+ print "Error: %s" % e
+ _usage()
+
+ for option, argument in opts:
+ if option in ("-h", "--help"):
+ _usage(0)
+ if option in ("-i", "--ignore"):
+ ignore_tags.append(argument)
+
+ if len(args) != 2:
+ _usage()
+
+ d = Rpmdiff(args[0], args[1], ignore=ignore_tags)
+ print d.textdiff()
+ sys.exit(int(d.differs()))
+
+if __name__ == '__main__':
+ main()
+
+# rpmdiff ends here
diff --git a/koji.spec b/koji.spec
new file mode 100644
index 0000000..8a65b6f
--- /dev/null
+++ b/koji.spec
@@ -0,0 +1,587 @@
+%{!?python_sitelib: %define python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib()")}
+
+%if 0%{?fedora} >= 21 || 0%{?redhat} >= 7
+%global use_systemd 1
+%else
+%global use_systemd 0
+%global install_opt TYPE=sysv
+%endif
+
+%define baserelease 1
+#build with --define 'testbuild 1' to have a timestamp appended to release
+%if "x%{?testbuild}" == "x1"
+%define release %{baserelease}.%(date +%%Y%%m%%d.%%H%%M.%%S)
+%else
+%define release %{baserelease}
+%endif
+Name: koji
+Version: 1.10.0
+Release: %{release}%{?dist}
+License: LGPLv2 and GPLv2+
+# koji.ssl libs (from plague) are GPLv2+
+Summary: Build system tools
+Group: Applications/System
+URL: http://fedorahosted.org/koji
+Source: https://fedorahosted.org/released/koji/koji-%{version}.tar.bz2
+BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n)
+BuildArch: noarch
+Requires: python-krbV >= 1.0.13
+Requires: rpm-python
+Requires: pyOpenSSL
+Requires: python-urlgrabber
+BuildRequires: python
+%if %{use_systemd}
+BuildRequires: systemd
+BuildRequires: pkgconfig
+%endif
+
+%description
+Koji is a system for building and tracking RPMS. The base package
+contains shared libraries and the command-line interface.
+
+%package hub
+Summary: Koji XMLRPC interface
+Group: Applications/Internet
+License: LGPLv2 and GPLv2
+# rpmdiff lib (from rpmlint) is GPLv2 (only)
+Requires: httpd
+Requires: mod_wsgi
+Requires: postgresql-python
+Requires: %{name} = %{version}-%{release}
+
+%description hub
+koji-hub is the XMLRPC interface to the koji database
+
+%package hub-plugins
+Summary: Koji hub plugins
+Group: Applications/Internet
+License: LGPLv2
+Requires: %{name} = %{version}-%{release}
+Requires: %{name}-hub = %{version}-%{release}
+Requires: python-qpid >= 0.7
+%if 0%{?rhel} == 5
+Requires: python-ssl
+%endif
+Requires: cpio
+
+%description hub-plugins
+Plugins to the koji XMLRPC interface
+
+%package builder
+Summary: Koji RPM builder daemon
+Group: Applications/System
+License: LGPLv2 and GPLv2+
+#mergerepos (from createrepo) is GPLv2+
+Requires: %{name} = %{version}-%{release}
+Requires: mock >= 0.9.14
+Requires(pre): /usr/sbin/useradd
+%if %{use_systemd}
+Requires(post): systemd
+Requires(preun): systemd
+Requires(postun): systemd
+%else
+Requires(post): /sbin/chkconfig
+Requires(post): /sbin/service
+Requires(preun): /sbin/chkconfig
+Requires(preun): /sbin/service
+%endif
+Requires: /usr/bin/cvs
+Requires: /usr/bin/svn
+Requires: /usr/bin/git
+Requires: python-cheetah
+%if 0%{?rhel} == 5
+Requires: createrepo >= 0.4.11-2
+Requires: python-hashlib
+Requires: python-createrepo
+%endif
+%if 0%{?fedora} >= 9
+Requires: createrepo >= 0.9.2
+%endif
+
+%description builder
+koji-builder is the daemon that runs on build machines and executes
+tasks that come through the Koji system.
+
+%package vm
+Summary: Koji virtual machine management daemon
+Group: Applications/System
+License: LGPLv2
+Requires: %{name} = %{version}-%{release}
+%if %{use_systemd}
+Requires(post): systemd
+Requires(preun): systemd
+Requires(postun): systemd
+%else
+Requires(post): /sbin/chkconfig
+Requires(post): /sbin/service
+Requires(preun): /sbin/chkconfig
+Requires(preun): /sbin/service
+%endif
+Requires: libvirt-python
+Requires: libxml2-python
+Requires: /usr/bin/virt-clone
+Requires: qemu-img
+
+%description vm
+koji-vm contains a supplemental build daemon that executes certain tasks in a
+virtual machine. This package is not required for most installations.
+
+%package utils
+Summary: Koji Utilities
+Group: Applications/Internet
+License: LGPLv2
+Requires: postgresql-python
+Requires: %{name} = %{version}-%{release}
+%if %{use_systemd}
+Requires(post): systemd
+Requires(preun): systemd
+Requires(postun): systemd
+%endif
+
+%description utils
+Utilities for the Koji system
+
+%package web
+Summary: Koji Web UI
+Group: Applications/Internet
+License: LGPLv2
+Requires: httpd
+Requires: mod_wsgi
+Requires: mod_auth_kerb
+Requires: postgresql-python
+Requires: python-cheetah
+Requires: %{name} = %{version}-%{release}
+Requires: python-krbV >= 1.0.13
+
+%description web
+koji-web is a web UI to the Koji system.
+
+%prep
+%setup -q
+
+%build
+
+%install
+rm -rf $RPM_BUILD_ROOT
+make DESTDIR=$RPM_BUILD_ROOT %{?install_opt} install
+
+%clean
+rm -rf $RPM_BUILD_ROOT
+
+%files
+%defattr(-,root,root)
+%{_bindir}/*
+%{python_sitelib}/%{name}
+%config(noreplace) %{_sysconfdir}/koji.conf
+%dir %{_sysconfdir}/koji.conf.d
+%doc docs Authors COPYING LGPL
+
+%files hub
+%defattr(-,root,root)
+%{_datadir}/koji-hub
+%dir %{_libexecdir}/koji-hub
+%{_libexecdir}/koji-hub/rpmdiff
+%config(noreplace) %{_sysconfdir}/httpd/conf.d/kojihub.conf
+%dir %{_sysconfdir}/koji-hub
+%config(noreplace) %{_sysconfdir}/koji-hub/hub.conf
+%dir %{_sysconfdir}/koji-hub/hub.conf.d
+
+%files hub-plugins
+%defattr(-,root,root)
+%dir %{_prefix}/lib/koji-hub-plugins
+%{_prefix}/lib/koji-hub-plugins/*.py*
+%dir %{_sysconfdir}/koji-hub/plugins
+%{_sysconfdir}/koji-hub/plugins/*.conf
+
+%files utils
+%defattr(-,root,root)
+%{_sbindir}/kojira
+%if %{use_systemd}
+%{_unitdir}/kojira.service
+%else
+%{_initrddir}/kojira
+%config(noreplace) %{_sysconfdir}/sysconfig/kojira
+%endif
+%dir %{_sysconfdir}/kojira
+%config(noreplace) %{_sysconfdir}/kojira/kojira.conf
+%{_sbindir}/koji-gc
+%dir %{_sysconfdir}/koji-gc
+%config(noreplace) %{_sysconfdir}/koji-gc/koji-gc.conf
+%{_sbindir}/koji-shadow
+%dir %{_sysconfdir}/koji-shadow
+%config(noreplace) %{_sysconfdir}/koji-shadow/koji-shadow.conf
+
+%files web
+%defattr(-,root,root)
+%{_datadir}/koji-web
+%dir %{_sysconfdir}/kojiweb
+%config(noreplace) %{_sysconfdir}/kojiweb/web.conf
+%config(noreplace) %{_sysconfdir}/httpd/conf.d/kojiweb.conf
+%dir %{_sysconfdir}/kojiweb/web.conf.d
+
+%files builder
+%defattr(-,root,root)
+%{_sbindir}/kojid
+%dir %{_libexecdir}/kojid
+%{_libexecdir}/kojid/mergerepos
+%if %{use_systemd}
+%{_unitdir}/kojid.service
+%else
+%{_initrddir}/kojid
+%config(noreplace) %{_sysconfdir}/sysconfig/kojid
+%endif
+%dir %{_sysconfdir}/kojid
+%config(noreplace) %{_sysconfdir}/kojid/kojid.conf
+%attr(-,kojibuilder,kojibuilder) %{_sysconfdir}/mock/koji
+
+%pre builder
+/usr/sbin/useradd -r -s /bin/bash -G mock -d /builddir -M kojibuilder 2>/dev/null ||:
+
+%if %{use_systemd}
+
+%post builder
+%systemd_post kojid.service
+
+%preun builder
+%systemd_preun kojid.service
+
+%postun builder
+%systemd_postun kojid.service
+
+%else
+
+%post builder
+/sbin/chkconfig --add kojid
+
+%preun builder
+if [ $1 = 0 ]; then
+ /sbin/service kojid stop &> /dev/null
+ /sbin/chkconfig --del kojid
+fi
+%endif
+
+%files vm
+%defattr(-,root,root)
+%{_sbindir}/kojivmd
+#dir %{_datadir}/kojivmd
+%{_datadir}/kojivmd/kojikamid
+%if %{use_systemd}
+%{_unitdir}/kojivmd.service
+%else
+%{_initrddir}/kojivmd
+%config(noreplace) %{_sysconfdir}/sysconfig/kojivmd
+%endif
+%dir %{_sysconfdir}/kojivmd
+%config(noreplace) %{_sysconfdir}/kojivmd/kojivmd.conf
+
+%if %{use_systemd}
+
+%post vm
+%systemd_post kojivmd.service
+
+%preun vm
+%systemd_preun kojivmd.service
+
+%postun vm
+%systemd_postun kojivmd.service
+
+%else
+
+%post vm
+/sbin/chkconfig --add kojivmd
+
+%preun vm
+if [ $1 = 0 ]; then
+ /sbin/service kojivmd stop &> /dev/null
+ /sbin/chkconfig --del kojivmd
+fi
+%endif
+
+%if %{use_systemd}
+
+%post utils
+%systemd_post kojira.service
+
+%preun utils
+%systemd_preun kojira.service
+
+%postun utils
+%systemd_postun kojira.service
+
+%else
+%post utils
+/sbin/chkconfig --add kojira
+/sbin/service kojira condrestart &> /dev/null || :
+%preun utils
+if [ $1 = 0 ]; then
+ /sbin/service kojira stop &> /dev/null || :
+ /sbin/chkconfig --del kojira
+fi
+%endif
+
+%changelog
+* Tue Jul 14 2015 Mike McLean <mikem at redhat.com> - 1.10.0-1
+- 1.10.0 release
+
+* Mon Mar 24 2014 Mike McLean <mikem at redhat.com> - 1.9.0-1
+- 1.9.0 release
+
+* Mon Apr 1 2013 Mike McLean <mikem at redhat.com> - 1.8.0-1
+- refactor how images are stored and tracked (images as builds)
+- delete repos in background
+- limit concurrent maven regens
+- let kojira delete repos for deleted tags
+- check for a target before waiting on a repo
+- don't append to artifact_relpaths twice in the case of Maven builds
+- Use standard locations for maven settings and local repository
+- Specify altDeploymentRepository for Maven in settings.xml NOT on command line
+- rather than linking to each artifact from the Maven repo, link the version directory
+- handle volumes in maven repos
+- fix integer overflow issue in checkUpload handler
+- koji-shadow adjustments
+- change default ssl timeout to 60 seconds
+- rewrite ensuredir function to avoid os.makedirs race
+- rename -pkg commands to -build
+- implement remove-pkg for the cli
+- a little more room to edit host comments
+- use wsgi.url_scheme instead of HTTPS
+- handle relative-to-koji urls in mergerepos
+
+* Mon Nov 19 2012 Mike McLean <mikem at redhat.com> - 1.7.1-1
+- improved upload mechanism
+- koji-shadow enhancements
+- handle multiple topurl values in kojid
+- fix form handling
+- mount all of /dev for image tasks
+- avoid error messages on canceled/reassigned tasks
+- handle unauthenticated case in moshimoshi
+- fix the tag_updates query in tag_changed_since_event
+- stop tracking deleted repos in kojira
+- don't die on malformed tasks
+- fix bugs in our relpath backport
+- avoid baseurl option in createrepo
+- message bus plugin: use timeout and heartbeat
+- add maven and win to the supported cli search types
+- remove latest-by-tag command
+- fix noreplace setting for web.conf
+- add sanity checks to regen-repo command
+- debuginfo and source options for regen-repo command
+- make taginfo command compatible with older koji servers
+
+* Thu May 31 2012 Mike McLean <mikem at redhat.com> - 1.7.0-1
+- mod_wsgi support
+- mod_python support deprecated
+- kojiweb configuration file (web.conf)
+- split storage support (build volumes)
+- configurable resource limits (hub, web, and kojid)
+- drop pkgurl in favor of topurl
+- better approach to web themes
+- more helpful policy errors
+- clearer errors when rpc args do not match function signature
+- avoid retry errors on some common builder calls
+- don't rely on pgdb._quoteparams
+- avoid hosts taking special arch tasks they cannot handle
+- kojid: configure yum proxy
+- kojid: configure failed buildroot lifetime
+- kojid: literal_task_arches option
+- support for arm hardware floating point arches
+- maven build options: goals, envs, extra packages
+- store Maven build output under the standard build directory
+- make the list of files ignored in the local Maven repo configurable
+- add Maven information to taginfo
+- make kojira more efficient using multicalls and caching
+- speed up kojira startup
+- kojira: configurable sleep time
+- kojira: count untracked newRepo tasks towards limits
+- kojira: limit non-waiting newRepo tasks
+- gssapi support in the messagebus plugin
+- grant-permission --new
+- improved argument display for list-api command
+- moshimoshi
+- download task output directly from KojiFilesURL, rather than going through getfile
+- option to show buildroot data in rpminfo command
+- show search help on blank search command
+- wait-repo: wait for the build(s) to be the latest rather than just present
+
+* Thu Dec 16 2010 Mike McLean <mikem at redhat.com> - 1.6.0-1
+- extend debuginfo check to cover newer formats
+- ignore tasks that TaskManager does not have a handler for
+- avoid possible traceback on ^c
+- graceful mass builder restart
+- no longer issue condrestart in postinstall scriptlet
+- fix ssl connections for python 2.7
+- more sanity checks on wait-repo arguments (ticket#192)
+- maven: only treat files ending in .patch as patch files
+- maven: retain ordering so more recent builds will take precedence
+- enable passing options to Maven
+- maven: use strict checksum checking
+
+* Thu Nov 11 2010 Mike McLean <mikem at redhat.com> - 1.5.0-1
+- koji vm daemon for executing certain tasks in virtual machine
+- major refactoring of koji daemons
+- support for complete history query (not just tag operations)
+- allow filtering tasks by channel in webui
+- rename-channel and remove-channel commands
+- clean up tagBuild checks (rhbz#616839)
+- resurrect import-comps command
+- utf8 encoding fixes
+- allow getfile to handle files > 2G
+- update the messagebus plugin to use the new qpid.messaging API
+- rpm2maven plugin: use Maven artifacts from rpm builds in Koji's Maven repos
+- log mock output
+
+* Thu Jul 8 2010 Mike McLean <mikem at redhat.com> - 1.4.0-1
+- Merge mead branch: support for building jars with Maven *
+- support for building appliance images *
+- soft dependencies for LiveCD/Appliance features
+- smarter prioritization of repo regenerations
+- package list policy to determine if package list changes are allowed
+- channel policy to determine which channel a task is placed in
+- edit host data via webui
+- description and comment fields for hosts *
+- cleaner log entries for kojihub
+- track user data in versioned tables *
+- allow setting retry parameters for the cli
+- track start time for tasks *
+- allow packages built from the same srpm to span multiple external repos
+- make the command used to fetch sources configuable per repo
+- kojira: remove unexpected directories
+- let kojid to decide if it can handle a noarch task
+- avoid extraneous ssl handshakes
+- schema changes to support starred items
+
+* Tue Nov 10 2009 Mike Bonnet <mikeb at redhat.com> - 1.3.2-1
+- support for LiveCD creation
+- new event-based callback system
+
+* Fri Jun 12 2009 Mike Bonnet <mikeb at redhat.com> - 1.3.1-2
+- use <mirrorOf>*</mirrorOf> now that Maven 2.0.8 is available in the buildroots
+- retrieve Maven info for a build from the top-level pom.xml in the source tree
+- allow specifying one or more Maven profiles to be used during a build
+
+* Fri Feb 20 2009 Mike McLean <mikem at redhat.com> 1.3.1-1
+- external repo urls rewritten to end with /
+- add schema file for upgrades from 1.2.x to 1.3
+- explicitly request sha1 for backward compatibility with older yum
+- fix up sparc arch handling
+
+* Wed Feb 18 2009 Mike McLean <mikem at redhat.com> 1.3.0-1
+- support for external repos
+- support for noarch subpackages
+- support rpms with different signatures and file digests
+- hub configuration file
+- drop huge tables from database
+- build srpms in chroots
+- hub policies
+- limited plugin support
+- limited web ui theming
+- many miscellaneous enhancements and bugfixes
+- license fields changed to reflect code additions
+
+* Mon Aug 25 2008 Mike McLean <mikem at redhat.com> 1.2.6-1
+- fix testbuild conditional [downstream]
+- fix license tag [downstream]
+- bump version
+- more robust client sessions
+- handle errors gracefully in web ui
+- koji-gc added to utils subpackage
+- skip sleep in kojid after taking a task
+- new dir layout for task workdirs (avoids large directories)
+- unified boolean option parsing in kojihub
+- new ServerOffline exception
+- other miscellaneous fixes
+
+* Fri Jan 25 2008 jkeating <jkeating at redhat.com> 1.2.5-1
+- Put createrepo arguments in correct order
+
+* Thu Jan 24 2008 jkeating <jkeating at redhat.com> 1.2.4-1
+- Use the --skip-stat flag in createrepo calls.
+- canonicalize tag arches before using them (dgilmore)
+- fix return value of delete_build
+- Revert to getfile urls if the task is not successful in emails
+- Pass --target instead of --arch to mock.
+- ignore trashcan tag in prune-signed-copies command
+- add the "allowed_scms" kojid parameter
+- allow filtering builds by the person who built them
+
+* Fri Dec 14 2007 jkeating <jkeating at redhat.com> 1.2.3-1
+- New upstream release with lots of updates, bugfixes, and enhancements.
+
+* Tue Jun 5 2007 Mike Bonnet <mikeb at redhat.com> - 1.2.2-1
+- only allow admins to perform non-scratch builds from srpm
+- bug fixes to the cmd-line and web UIs
+
+* Thu May 31 2007 Mike Bonnet <mikeb at redhat.com> - 1.2.1-1
+- don't allow ExclusiveArch to expand the archlist (bz#239359)
+- add a summary line stating whether the task succeeded or failed to the end of the "watch-task" output
+- add a search box to the header of every page in the web UI
+- new koji download-build command (patch provided by Dan Berrange)
+
+* Tue May 15 2007 Mike Bonnet <mikeb at redhat.com> - 1.2.0-1
+- change version numbering to a 3-token scheme
+- install the koji favicon
+
+* Mon May 14 2007 Mike Bonnet <mikeb at redhat.com> - 1.1-5
+- cleanup koji-utils Requires
+- fix encoding and formatting in email notifications
+- expand archlist based on ExclusiveArch/BuildArchs
+- allow import of rpms without srpms
+- commit before linking in prepRepo to release db locks
+- remove exec bit from kojid logs and uploaded files (patch by Enrico Scholz)
+
+* Tue May 1 2007 Mike Bonnet <mikeb at redhat.com> - 1.1-4
+- remove spurious Requires: from the koji-utils package
+
+* Tue May 1 2007 Mike Bonnet <mikeb at redhat.com> - 1.1-3
+- fix typo in BuildNotificationTask (patch provided by Michael Schwendt)
+- add the --changelog param to the buildinfo command
+- always send email notifications to the package builder and package owner
+- improvements to the web UI
+
+* Tue Apr 17 2007 Mike Bonnet <mikeb at redhat.com> - 1.1-2
+- re-enable use of the --update flag to createrepo
+
+* Mon Apr 09 2007 Jesse Keating <jkeating at redhat.com> 1.1-1
+- make the output listPackages() consistent regardless of with_dups
+- prevent large batches of repo deletes from holding up regens
+- allow sorting the host list by arches
+
+* Mon Apr 02 2007 Jesse Keating <jkeating at redhat.com> 1.0-1
+- Release 1.0!
+
+* Wed Mar 28 2007 Mike Bonnet <mikeb at redhat.com> - 0.9.7-4
+- set SSL connection timeout to 12 hours
+
+* Wed Mar 28 2007 Mike Bonnet <mikeb at redhat.com> - 0.9.7-3
+- avoid SSL renegotiation
+- improve log file handling in kojid
+- bug fixes in command-line and web UI
+
+* Sun Mar 25 2007 Mike Bonnet <mikeb at redhat.com> - 0.9.7-2
+- enable http access to packages in kojid
+- add Requires: pyOpenSSL
+- building srpms from CVS now works with the Extras CVS structure
+- fixes to the chain-build command
+- bug fixes in the XML-RPC and web interfaces
+
+* Tue Mar 20 2007 Jesse Keating <jkeating at redhat.com> - 0.9.7-1
+- Package up the needed ssl files
+
+* Tue Mar 20 2007 Jesse Keating <jkeating at redhat.com> - 0.9.6-1
+- 0.9.6 release, mostly ssl auth stuff
+- use named directories for config stuff
+- remove -3 requires on creatrepo, don't need that specific anymore
+
+* Tue Feb 20 2007 Jesse Keating <jkeating at redhat.com> - 0.9.5-8
+- Add Authors COPYING LGPL to the docs of the main package
+
+* Tue Feb 20 2007 Jesse Keating <jkeating at redhat.com> - 0.9.5-7
+- Move web files from /var/www to /usr/share
+- Use -p in install calls
+- Add rpm-python to requires for koji
+
+* Mon Feb 19 2007 Jesse Keating <jkeating at redhat.com> - 0.9.5-6
+- Clean up spec for package review
+
+* Sun Feb 04 2007 Mike McLean <mikem at redhat.com> - 0.9.5-1
+- project renamed to koji
diff --git a/koji/Makefile b/koji/Makefile
new file mode 100644
index 0000000..2e8909b
--- /dev/null
+++ b/koji/Makefile
@@ -0,0 +1,30 @@
+SUBDIRS = ssl
+
+PYTHON=python
+PACKAGE = $(shell basename `pwd`)
+PYFILES = $(wildcard *.py)
+PYSCRIPTS = context.py
+PYVER := $(shell $(PYTHON) -c 'import sys; print "%.3s" %(sys.version)')
+PYSYSDIR := $(shell $(PYTHON) -c 'import sys; print sys.prefix')
+PYLIBDIR = $(PYSYSDIR)/lib/python$(PYVER)
+PKGDIR = $(PYLIBDIR)/site-packages/$(PACKAGE)
+
+_default:
+ @echo "nothing to make. try make install"
+
+clean:
+ rm -f *.o *.so *.pyc *~
+ for d in $(SUBDIRS); do make -s -C $$d clean; done
+
+install:
+ mkdir -p $(DESTDIR)/$(PKGDIR)
+ for p in $(PYFILES) ; do \
+ install -p -m 644 $$p $(DESTDIR)/$(PKGDIR)/$$p; \
+ done
+ for p in $(PYSCRIPTS) ; do \
+ chmod 0755 $(DESTDIR)/$(PKGDIR)/$$p; \
+ done
+ $(PYTHON) -c "import compileall; compileall.compile_dir('$(DESTDIR)/$(PKGDIR)', 1, '$(PYDIR)', 1)"
+
+ for d in $(SUBDIRS); do make DESTDIR=$(DESTDIR)/$(PKGDIR)/$$d \
+ -C $$d install; [ $$? = 0 ] || exit 1; done
diff --git a/koji/__init__.py b/koji/__init__.py
new file mode 100644
index 0000000..fadbada
--- /dev/null
+++ b/koji/__init__.py
@@ -0,0 +1,2476 @@
+# Python module
+# Common functions
+
+# Copyright (c) 2005-2014 Red Hat, Inc.
+#
+# Koji is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation;
+# version 2.1 of the License.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this software; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+# Authors:
+# Mike McLean <mikem at redhat.com>
+# Mike Bonnet <mikeb at redhat.com>
+
+import sys
+try:
+ import krbV
+except ImportError:
+ sys.stderr.write("Warning: Could not install krbV module. Kerberos support will be disabled.\n")
+ sys.stderr.flush()
+import base64
+import datetime
+import errno
+from fnmatch import fnmatch
+import httplib
+import logging
+import logging.handlers
+from koji.util import md5_constructor
+import os
+import os.path
+import pwd
+import random
+import re
+import rpm
+import shutil
+import signal
+import socket
+import ssl.SSLCommon
+import struct
+import tempfile
+import time
+import traceback
+import urllib
+import urllib2
+import urlparse
+import util
+import xmlrpclib
+import xml.sax
+import xml.sax.handler
+from xmlrpclib import loads, dumps, Fault
+import OpenSSL
+import zipfile
+
+def _(args):
+ """Stub function for translation"""
+ return args
+
+## Constants ##
+
+RPM_HEADER_MAGIC = '\x8e\xad\xe8'
+RPM_TAG_HEADERSIGNATURES = 62
+RPM_TAG_FILEDIGESTALGO = 5011
+RPM_SIGTAG_PGP = 1002
+RPM_SIGTAG_MD5 = 1004
+RPM_SIGTAG_GPG = 1005
+
+RPM_FILEDIGESTALGO_IDS = {
+ # Taken from RFC 4880
+ # A missing algo ID means md5
+ None: 'MD5',
+ 1: 'MD5',
+ 2: 'SHA1',
+ 3: 'RIPEMD160',
+ 8: 'SHA256',
+ 9: 'SHA384',
+ 10: 'SHA512',
+ 11: 'SHA224'
+ }
+
+class Enum(dict):
+ """A simple class to track our enumerated constants
+
+ Can quickly map forward or reverse
+ """
+
+ def __init__(self,*args):
+ self._order = tuple(*args)
+ super(Enum,self).__init__([(value,n) for n,value in enumerate(self._order)])
+
+ def __getitem__(self,key):
+ if isinstance(key,int) or isinstance(key,slice):
+ return self._order.__getitem__(key)
+ else:
+ return super(Enum,self).__getitem__(key)
+
+ def get(self,key,default=None):
+ try:
+ return self.__getitem__(key)
+ except (IndexError,KeyError):
+ return default
+
+ def getnum(self,key,default=None):
+ try:
+ value = self.__getitem__(key)
+ except (IndexError,KeyError):
+ return default
+ if isinstance(key,int):
+ return key
+ else:
+ return value
+
+ def getvalue(self,key,default=None):
+ try:
+ value = self.__getitem__(key)
+ except (IndexError,KeyError):
+ return default
+ if isinstance(key,int):
+ return value
+ else:
+ return key
+
+ def _notImplemented(self,*args,**opts):
+ raise NotImplementedError
+
+ #read-only
+ __setitem__ = _notImplemented
+ __delitem__ = _notImplemented
+ clear = _notImplemented
+ pop = _notImplemented
+ popitem = _notImplemented
+ update = _notImplemented
+ setdefault = _notImplemented
+
+API_VERSION = 1
+
+TASK_STATES = Enum((
+ 'FREE',
+ 'OPEN',
+ 'CLOSED',
+ 'CANCELED',
+ 'ASSIGNED',
+ 'FAILED',
+))
+
+BUILD_STATES = Enum((
+ 'BUILDING',
+ 'COMPLETE',
+ 'DELETED',
+ 'FAILED',
+ 'CANCELED',
+))
+
+USERTYPES = Enum((
+ 'NORMAL',
+ 'HOST',
+ 'GROUP',
+))
+
+USER_STATUS = Enum((
+ 'NORMAL',
+ 'BLOCKED',
+))
+
+# authtype values
+# normal == username/password
+AUTHTYPE_NORMAL = 0
+AUTHTYPE_KERB = 1
+AUTHTYPE_SSL = 2
+
+#dependency types
+DEP_REQUIRE = 0
+DEP_PROVIDE = 1
+DEP_OBSOLETE = 2
+DEP_CONFLICT = 3
+
+#dependency flags
+RPMSENSE_LESS = 2
+RPMSENSE_GREATER = 4
+RPMSENSE_EQUAL = 8
+
+# repo states
+REPO_STATES = Enum((
+ 'INIT',
+ 'READY',
+ 'EXPIRED',
+ 'DELETED',
+ 'PROBLEM',
+))
+# for backwards compatibility
+REPO_INIT = REPO_STATES['INIT']
+REPO_READY = REPO_STATES['READY']
+REPO_EXPIRED = REPO_STATES['EXPIRED']
+REPO_DELETED = REPO_STATES['DELETED']
+REPO_PROBLEM = REPO_STATES['PROBLEM']
+
+# buildroot states
+BR_STATES = Enum((
+ 'INIT',
+ 'WAITING',
+ 'BUILDING',
+ 'EXPIRED',
+))
+
+TAG_UPDATE_TYPES = Enum((
+ 'VOLUME_CHANGE',
+ 'IMPORT',
+ 'MANUAL',
+))
+
+CHECKSUM_TYPES = Enum((
+ 'md5',
+ 'sha1',
+ 'sha256',
+))
+
+#PARAMETERS
+BASEDIR = '/mnt/koji'
+# default task priority
+PRIO_DEFAULT = 20
+
+## BEGIN kojikamid dup
+
+#Exceptions
+class GenericError(Exception):
+ """Base class for our custom exceptions"""
+ faultCode = 1000
+ fromFault = False
+ def __str__(self):
+ try:
+ return str(self.args[0]['args'][0])
+ except:
+ try:
+ return str(self.args[0])
+ except:
+ return str(self.__dict__)
+## END kojikamid dup
+
+class LockError(GenericError):
+ """Raised when there is a lock conflict"""
+ faultCode = 1001
+
+class AuthError(GenericError):
+ """Raised when there is an error in authentication"""
+ faultCode = 1002
+
+class TagError(GenericError):
+ """Raised when a tagging operation fails"""
+ faultCode = 1003
+
+class ActionNotAllowed(GenericError):
+ """Raised when the session does not have permission to take some action"""
+ faultCode = 1004
+
+## BEGIN kojikamid dup
+
+class BuildError(GenericError):
+ """Raised when a build fails"""
+ faultCode = 1005
+## END kojikamid dup
+
+class AuthLockError(AuthError):
+ """Raised when a lock prevents authentication"""
+ faultCode = 1006
+
+class AuthExpired(AuthError):
+ """Raised when a session has expired"""
+ faultCode = 1007
+
+class SequenceError(AuthError):
+ """Raised when requests are received out of sequence"""
+ faultCode = 1008
+
+class RetryError(AuthError):
+ """Raised when a request is received twice and cannot be rerun"""
+ faultCode = 1009
+
+class PreBuildError(BuildError):
+ """Raised when a build fails during pre-checks"""
+ faultCode = 1010
+
+class PostBuildError(BuildError):
+ """Raised when a build fails during post-checks"""
+ faultCode = 1011
+
+class BuildrootError(BuildError):
+ """Raised when there is an error with the buildroot"""
+ faultCode = 1012
+
+class FunctionDeprecated(GenericError):
+ """Raised by a deprecated function"""
+ faultCode = 1013
+
+class ServerOffline(GenericError):
+ """Raised when the server is offline"""
+ faultCode = 1014
+
+class LiveCDError(GenericError):
+ """Raised when LiveCD Image creation fails"""
+ faultCode = 1015
+
+class PluginError(GenericError):
+ """Raised when there is an error with a plugin"""
+ faultCode = 1016
+
+class CallbackError(PluginError):
+ """Raised when there is an error executing a callback"""
+ faultCode = 1017
+
+class ApplianceError(GenericError):
+ """Raised when Appliance Image creation fails"""
+ faultCode = 1018
+
+class ParameterError(GenericError):
+ """Raised when an rpc call receives incorrect arguments"""
+ faultCode = 1019
+
+class ImportError(GenericError):
+ """Raised when an import fails"""
+ faultCode = 1020
+
+class MultiCallInProgress(object):
+ """
+ Placeholder class to be returned by method calls when in the process of
+ constructing a multicall.
+ """
+ pass
+
+
+#A function to get create an exception from a fault
+def convertFault(fault):
+ """Convert a fault to the corresponding Exception type, if possible"""
+ code = getattr(fault,'faultCode',None)
+ if code is None:
+ return fault
+ for v in globals().values():
+ if type(v) == type(Exception) and issubclass(v,GenericError) and \
+ code == getattr(v,'faultCode',None):
+ ret = v(fault.faultString)
+ ret.fromFault = True
+ return ret
+ #otherwise...
+ return fault
+
+def listFaults():
+ """Return a list of faults
+
+ Returns a list of dictionaries whose keys are:
+ faultCode: the numeric code used in fault conversion
+ name: the name of the exception
+ desc: the description of the exception (docstring)
+ """
+ ret = []
+ for n,v in globals().items():
+ if type(v) == type(Exception) and issubclass(v,GenericError):
+ code = getattr(v,'faultCode',None)
+ if code is None:
+ continue
+ info = {}
+ info['faultCode'] = code
+ info['name'] = n
+ info['desc'] = getattr(v,'__doc__',None)
+ ret.append(info)
+ ret.sort(lambda a,b: cmp(a['faultCode'],b['faultCode']))
+ return ret
+
+#functions for encoding/decoding optional arguments
+
+def encode_args(*args,**opts):
+ """The function encodes optional arguments as regular arguments.
+
+ This is used to allow optional arguments in xmlrpc calls
+ Returns a tuple of args
+ """
+ if opts:
+ opts['__starstar'] = True
+ args = args + (opts,)
+ return args
+
+def decode_args(*args):
+ """Decodes optional arguments from a flat argument list
+
+ Complementary to encode_args
+ Returns a tuple (args,opts) where args is a tuple and opts is a dict
+ """
+ opts = {}
+ if len(args) > 0:
+ last = args[-1]
+ if type(last) == dict and last.get('__starstar',False):
+ del last['__starstar']
+ opts = last
+ args = args[:-1]
+ return args,opts
+
+def decode_args2(args, names, strict=True):
+ "An alternate form of decode_args, returns a dictionary"
+ args, opts = decode_args(*args)
+ if strict and len(names) < len(args):
+ raise TypeError, "Expecting at most %i arguments" % len(names)
+ ret = dict(zip(names, args))
+ ret.update(opts)
+ return ret
+
+## BEGIN kojikamid dup
+
+def encode_int(n):
+ """If n is too large for a 32bit signed, convert it to a string"""
+ if n <= 2147483647:
+ return n
+ #else
+ return str(n)
+## END kojikamid dup
+
+def decode_int(n):
+ """If n is not an integer, attempt to convert it"""
+ if isinstance(n, (int, long)):
+ return n
+ #else
+ return int(n)
+
+#commonly used functions
+
+def safe_xmlrpc_loads(s):
+ """Load xmlrpc data from a string, but catch faults"""
+ try:
+ return loads(s)
+ except Fault, f:
+ return f
+
+## BEGIN kojikamid dup
+
+def ensuredir(directory):
+ """Create directory, if necessary."""
+ if os.path.exists(directory):
+ if not os.path.isdir(directory):
+ raise OSError, "Not a directory: %s" % directory
+ else:
+ head, tail = os.path.split(directory)
+ if not tail and head == directory:
+ # can only happen if directory == '/' or equivalent
+ # (which obviously should not happen)
+ raise OSError, "root directory missing? %s" % directory
+ if head:
+ ensuredir(head)
+ # note: if head is blank, then we've reached the top of a relative path
+ try:
+ os.mkdir(directory)
+ except OSError:
+ #thrown when dir already exists (could happen in a race)
+ if not os.path.isdir(directory):
+ #something else must have gone wrong
+ raise
+ return directory
+
+## END kojikamid dup
+
+def daemonize():
+ """Detach and run in background"""
+ pid = os.fork()
+ if pid:
+ os._exit(0)
+ os.setsid()
+ signal.signal(signal.SIGHUP, signal.SIG_IGN)
+ #fork again
+ pid = os.fork()
+ if pid:
+ os._exit(0)
+ os.chdir("/")
+ #redirect stdin/stdout/sterr
+ fd0 = os.open('/dev/null', os.O_RDONLY)
+ fd1 = os.open('/dev/null', os.O_RDWR)
+ fd2 = os.open('/dev/null', os.O_RDWR)
+ os.dup2(fd0,0)
+ os.dup2(fd1,1)
+ os.dup2(fd2,2)
+ os.close(fd0)
+ os.close(fd1)
+ os.close(fd2)
+
+def multibyte(data):
+ """Convert a list of bytes to an integer (network byte order)"""
+ sum = 0
+ n = len(data)
+ for i in xrange(n):
+ sum += data[i] << (8 * (n - i - 1))
+ return sum
+
+def find_rpm_sighdr(path):
+ """Finds the offset and length of the signature header."""
+ # see Maximum RPM Appendix A: Format of the RPM File
+
+ # The lead is a fixed sized section (96 bytes) that is mostly obsolete
+ sig_start = 96
+ sigsize = rpm_hdr_size(path, sig_start)
+ return (sig_start, sigsize)
+
+def rpm_hdr_size(f, ofs=None):
+ """Returns the length (in bytes) of the rpm header
+
+ f = filename or file object
+ ofs = offset of the header
+ """
+ if isinstance(f, (str, unicode)):
+ fo = file(f, 'rb')
+ else:
+ fo = f
+ if ofs != None:
+ fo.seek(ofs, 0)
+ magic = fo.read(3)
+ if magic != RPM_HEADER_MAGIC:
+ raise GenericError, "Invalid rpm: bad magic: %r" % magic
+
+ # skip past section magic and such
+ # (3 bytes magic, 1 byte version number, 4 bytes reserved)
+ fo.seek(ofs + 8, 0)
+
+ # now read two 4-byte integers which tell us
+ # - # of index entries
+ # - bytes of data in header
+ data = [ ord(x) for x in fo.read(8) ]
+ il = multibyte(data[0:4])
+ dl = multibyte(data[4:8])
+
+ #this is what the section data says the size should be
+ hdrsize = 8 + 16 * il + dl
+
+ # hdrsize rounded up to nearest 8 bytes
+ hdrsize = hdrsize + ( 8 - ( hdrsize % 8 ) ) % 8
+
+ # add eight bytes for section header
+ hdrsize = hdrsize + 8
+
+ if not isinstance(f, (str, unicode)):
+ fo.close()
+ return hdrsize
+
+
+class RawHeader(object):
+
+ # see Maximum RPM Appendix A: Format of the RPM File
+
+ def __init__(self, data):
+ if data[0:3] != RPM_HEADER_MAGIC:
+ raise GenericError, "Invalid rpm header: bad magic: %r" % (data[0:3],)
+ self.header = data
+ self._index()
+
+ def version(self):
+ #fourth byte is the version
+ return ord(self.header[3])
+
+ def _index(self):
+ # read two 4-byte integers which tell us
+ # - # of index entries (each 16 bytes long)
+ # - bytes of data in header
+ data = [ ord(x) for x in self.header[8:12] ]
+ il = multibyte(data[:4])
+ dl = multibyte(data[4:8])
+
+ #read the index (starts at offset 16)
+ index = {}
+ for i in xrange(il):
+ entry = []
+ for j in xrange(4):
+ ofs = 16 + i*16 + j*4
+ data = [ ord(x) for x in self.header[ofs:ofs+4] ]
+ entry.append(multibyte(data))
+ #print "Tag: %d, Type: %d, Offset: %x, Count: %d" % tuple(entry)
+ index[entry[0]] = entry
+ self.datalen = dl
+ self.index = index
+
+ def dump(self):
+ print "HEADER DUMP:"
+ #calculate start of store
+ il = len(self.index)
+ store = 16 + il * 16
+ #print "start is: %d" % start
+ #print "index length: %d" % il
+ print "Store at offset %d (%0x)" % (store,store)
+ #sort entries by offset, dtype
+ #also rearrange: tag, dtype, offset, count -> offset, dtype, tag, count
+ order = [(x[2], x[1], x[0], x[3]) for x in self.index.itervalues()]
+ order.sort()
+ next = store
+ #map some rpmtag codes
+ tags = {}
+ for name, code in rpm.__dict__.iteritems():
+ if name.startswith('RPMTAG_') and isinstance(code, int):
+ tags[code] = name[7:].lower()
+ for entry in order:
+ #tag, dtype, offset, count = entry
+ offset, dtype, tag, count = entry
+ pos = store + offset
+ if next is not None:
+ if pos > next:
+ print "** HOLE between entries"
+ print "Hex: %s" % hex_string(self.header[next:pos])
+ print "Data: %r" % self.header[next:pos]
+ elif pos < next:
+ print "** OVERLAPPING entries"
+ print "Tag: %d [%s], Type: %d, Offset: %x, Count: %d" \
+ % (tag, tags.get(tag, '?'), dtype, offset, count)
+ if dtype == 0:
+ #null
+ print "[NULL entry]"
+ next = pos
+ elif dtype == 1:
+ #char
+ for i in xrange(count):
+ print "Char: %r" % self.header[pos]
+ pos += 1
+ next = pos
+ elif dtype >= 2 and dtype <= 5:
+ #integer
+ n = 1 << (dtype - 2)
+ for i in xrange(count):
+ data = [ ord(x) for x in self.header[pos:pos+n] ]
+ print "%r" % data
+ num = multibyte(data)
+ print "Int(%d): %d" % (n, num)
+ pos += n
+ next = pos
+ elif dtype == 6:
+ # string (null terminated)
+ end = self.header.find('\0', pos)
+ print "String(%d): %r" % (end-pos, self.header[pos:end])
+ next = end + 1
+ elif dtype == 7:
+ print "Data: %s" % hex_string(self.header[pos:pos+count])
+ next = pos+count
+ elif dtype == 8:
+ # string array
+ for i in xrange(count):
+ end = self.header.find('\0', pos)
+ print "String(%d): %r" % (end-pos, self.header[pos:end])
+ pos = end + 1
+ next = pos
+ elif dtype == 9:
+ # unicode string array
+ for i in xrange(count):
+ end = self.header.find('\0', pos)
+ print "i18n(%d): %r" % (end-pos, self.header[pos:end])
+ pos = end + 1
+ next = pos
+ else:
+ print "Skipping data type %x" % dtype
+ next = None
+ if next is not None:
+ pos = store + self.datalen
+ if next < pos:
+ print "** HOLE at end of data block"
+ print "Hex: %s" % hex_string(self.header[next:pos])
+ print "Data: %r" % self.header[next:pos]
+ elif pos > next:
+ print "** OVERFLOW in data block"
+
+ def __getitem__(self, key):
+ tag, dtype, offset, count = self.index[key]
+ assert tag == key
+ return self._getitem(dtype, offset, count)
+
+ def _getitem(self, dtype, offset, count):
+ #calculate start of store
+ il = len(self.index)
+ store = 16 + il * 16
+ pos = store + offset
+ if dtype >= 2 and dtype <= 5:
+ n = 1 << (dtype - 2)
+ # n-byte integer
+ data = [ ord(x) for x in self.header[pos:pos+n] ]
+ return multibyte(data)
+ elif dtype == 6:
+ # string (null terminated)
+ end = self.header.find('\0', pos)
+ return self.header[pos:end]
+ elif dtype == 7:
+ #raw data
+ return self.header[pos:pos+count]
+ else:
+ #XXX - not all valid data types are handled
+ raise GenericError, "Unable to read header data type: %x" % dtype
+
+ def get(self, key, default=None):
+ entry = self.index.get(key)
+ if entry is None:
+ return default
+ else:
+ return self._getitem(*entry[1:])
+
+
+def rip_rpm_sighdr(src):
+ """Rip the signature header out of an rpm"""
+ (start, size) = find_rpm_sighdr(src)
+ fo = file(src, 'rb')
+ fo.seek(start, 0)
+ sighdr = fo.read(size)
+ fo.close()
+ return sighdr
+
+def rip_rpm_hdr(src):
+ """Rip the main header out of an rpm"""
+ (start, size) = find_rpm_sighdr(src)
+ start += size
+ size = rpm_hdr_size(src, start)
+ fo = file(src, 'rb')
+ fo.seek(start, 0)
+ hdr = fo.read(size)
+ fo.close()
+ return hdr
+
+def __parse_packet_header(pgp_packet):
+ """Parse pgp_packet header, return tag type and the rest of pgp_packet"""
+ byte0 = ord(pgp_packet[0])
+ if (byte0 & 0x80) == 0:
+ raise ValueError, 'Not an OpenPGP packet'
+ if (byte0 & 0x40) == 0:
+ tag = (byte0 & 0x3C) >> 2
+ len_type = byte0 & 0x03
+ if len_type == 3:
+ offset = 1
+ length = len(pgp_packet) - offset
+ else:
+ (fmt, offset) = { 0:('>B', 2), 1:('>H', 3), 2:('>I', 5) }[len_type]
+ length = struct.unpack(fmt, pgp_packet[1:offset])[0]
+ else:
+ tag = byte0 & 0x3F
+ byte1 = ord(pgp_packet[1])
+ if byte1 < 192:
+ length = byte1
+ offset = 2
+ elif byte1 < 224:
+ length = ((byte1 - 192) << 8) + ord(pgp_packet[2]) + 192
+ offset = 3
+ elif byte1 == 255:
+ length = struct.unpack('>I', pgp_packet[2:6])[0]
+ offset = 6
+ else:
+ # Who the ... would use partial body lengths in a signature packet?
+ raise NotImplementedError, \
+ 'OpenPGP packet with partial body lengths'
+ if len(pgp_packet) != offset + length:
+ raise ValueError, 'Invalid OpenPGP packet length'
+ return (tag, pgp_packet[offset:])
+
+def __subpacket_key_ids(subs):
+ """Parse v4 signature subpackets and return a list of issuer key IDs"""
+ res = []
+ while len(subs) > 0:
+ byte0 = ord(subs[0])
+ if byte0 < 192:
+ length = byte0
+ off = 1
+ elif byte0 < 255:
+ length = ((byte0 - 192) << 8) + ord(subs[1]) + 192
+ off = 2
+ else:
+ length = struct.unpack('>I', subs[1:5])[0]
+ off = 5
+ if ord(subs[off]) == 16:
+ res.append(subs[off+1 : off+length])
+ subs = subs[off+length:]
+ return res
+
+def get_sigpacket_key_id(sigpacket):
+ """Return ID of the key used to create sigpacket as a hexadecimal string"""
+ (tag, sigpacket) = __parse_packet_header(sigpacket)
+ if tag != 2:
+ raise ValueError, 'Not a signature packet'
+ if ord(sigpacket[0]) == 0x03:
+ key_id = sigpacket[11:15]
+ elif ord(sigpacket[0]) == 0x04:
+ sub_len = struct.unpack('>H', sigpacket[4:6])[0]
+ off = 6 + sub_len
+ key_ids = __subpacket_key_ids(sigpacket[6:off])
+ sub_len = struct.unpack('>H', sigpacket[off : off+2])[0]
+ off += 2
+ key_ids += __subpacket_key_ids(sigpacket[off : off+sub_len])
+ if len(key_ids) != 1:
+ raise NotImplementedError, \
+ 'Unexpected number of key IDs: %s' % len(key_ids)
+ key_id = key_ids[0][-4:]
+ else:
+ raise NotImplementedError, \
+ 'Unknown PGP signature packet version %s' % ord(sigpacket[0])
+ return hex_string(key_id)
+
+def get_sighdr_key(sighdr):
+ """Parse the sighdr and return the sigkey"""
+ rh = RawHeader(sighdr)
+ sig = rh.get(RPM_SIGTAG_GPG)
+ if not sig:
+ sig = rh.get(RPM_SIGTAG_PGP)
+ if not sig:
+ return None
+ else:
+ return get_sigpacket_key_id(sig)
+
+def splice_rpm_sighdr(sighdr, src, dst=None, bufsize=8192):
+ """Write a copy of an rpm with signature header spliced in"""
+ (start, size) = find_rpm_sighdr(src)
+ if dst is None:
+ (fd, dst) = tempfile.mkstemp()
+ os.close(fd)
+ src_fo = file(src, 'rb')
+ dst_fo = file(dst, 'wb')
+ dst_fo.write(src_fo.read(start))
+ dst_fo.write(sighdr)
+ src_fo.seek(size, 1)
+ while True:
+ buf = src_fo.read(bufsize)
+ if not buf:
+ break
+ dst_fo.write(buf)
+ src_fo.close()
+ dst_fo.close()
+ return dst
+
+def get_rpm_header(f, ts=None):
+ """Return the rpm header."""
+ if ts is None:
+ ts = rpm.TransactionSet()
+ ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES|rpm._RPMVSF_NODIGESTS)
+ if isinstance(f, (str, unicode)):
+ fo = file(f, "r")
+ else:
+ fo = f
+ hdr = ts.hdrFromFdno(fo.fileno())
+ if fo is not f:
+ fo.close()
+ return hdr
+
+def get_header_field(hdr,name):
+ """Extract named field from an rpm header"""
+ idx = getattr(rpm,"RPMTAG_%s" % name.upper(),None)
+ if idx is None:
+ raise GenericError, "No such rpm header field: %s" % name
+ return hdr[idx]
+
+def get_header_fields(X,fields):
+ """Extract named fields from an rpm header and return as a dictionary
+
+ X may be either the rpm header or the rpm filename
+ """
+ if type(X) == str:
+ hdr = get_rpm_header(X)
+ else:
+ hdr = X
+ ret = {}
+ for f in fields:
+ ret[f] = get_header_field(hdr,f)
+ return ret
+
+def parse_NVR(nvr):
+ """split N-V-R into dictionary of data"""
+ ret = {}
+ p2 = nvr.rfind("-",0)
+ if p2 == -1 or p2 == len(nvr) - 1:
+ raise GenericError("invalid format: %s" % nvr)
+ p1 = nvr.rfind("-",0,p2)
+ if p1 == -1 or p1 == p2 - 1:
+ raise GenericError("invalid format: %s" % nvr)
+ ret['release'] = nvr[p2+1:]
+ ret['version'] = nvr[p1+1:p2]
+ ret['name'] = nvr[:p1]
+ epochIndex = ret['name'].find(':')
+ if epochIndex == -1:
+ ret['epoch'] = ''
+ else:
+ ret['epoch'] = ret['name'][:epochIndex]
+ ret['name'] = ret['name'][epochIndex + 1:]
+ return ret
+
+def parse_NVRA(nvra):
+ """split N-V-R.A.rpm into dictionary of data
+
+ also splits off @location suffix"""
+ parts = nvra.split('@', 1)
+ location = None
+ if len(parts) > 1:
+ nvra, location = parts
+ if nvra.endswith(".rpm"):
+ nvra = nvra[:-4]
+ p3 = nvra.rfind(".")
+ if p3 == -1 or p3 == len(nvra) - 1:
+ raise GenericError("invalid format: %s" % nvra)
+ arch = nvra[p3+1:]
+ ret = parse_NVR(nvra[:p3])
+ ret['arch'] = arch
+ if arch == 'src':
+ ret['src'] = True
+ else:
+ ret['src'] = False
+ if location:
+ ret['location'] = location
+ return ret
+
+def is_debuginfo(name):
+ """Determines if an rpm is a debuginfo rpm, based on name"""
+ if name.endswith('-debuginfo') or name.find('-debuginfo-') != -1:
+ return True
+ return False
+
+def canonArch(arch):
+ """Given an arch, return the "canonical" arch"""
+ #XXX - this could stand to be smarter, and we should probably
+ # have some other related arch-mangling functions.
+ if fnmatch(arch,'i?86') or arch == 'athlon':
+ return 'i386'
+ elif arch == 'ia32e':
+ return 'x86_64'
+ elif fnmatch(arch,'ppc64le'):
+ return 'ppc64le'
+ elif fnmatch(arch,'ppc64*'):
+ return 'ppc64'
+ elif fnmatch(arch,'sparc64*'):
+ return 'sparc64'
+ elif fnmatch(arch,'sparc*'):
+ return 'sparc'
+ elif fnmatch(arch, 'alpha*'):
+ return 'alpha'
+ elif fnmatch(arch,'arm*h*'):
+ return 'armhfp'
+ elif fnmatch(arch,'arm*'):
+ return 'arm'
+ else:
+ return arch
+
+class POMHandler(xml.sax.handler.ContentHandler):
+ def __init__(self, values, fields):
+ xml.sax.handler.ContentHandler.__init__(self)
+ self.tag_stack = []
+ self.tag_content = None
+ self.values = values
+ self.fields = fields
+
+ def startElement(self, name, attrs):
+ self.tag_stack.append(name)
+ self.tag_content = ''
+
+ def characters(self, content):
+ self.tag_content += content
+
+ def endElement(self, name):
+ if len(self.tag_stack) in (2, 3) and self.tag_stack[-1] in self.fields:
+ if self.tag_stack[-2] == 'parent':
+ # Only set a value from the "parent" tag if we don't already have
+ # that value set
+ if not self.values.has_key(self.tag_stack[-1]):
+ self.values[self.tag_stack[-1]] = self.tag_content.strip()
+ elif self.tag_stack[-2] == 'project':
+ self.values[self.tag_stack[-1]] = self.tag_content.strip()
+ self.tag_content = ''
+ self.tag_stack.pop()
+
+ def reset(self):
+ self.tag_stack = []
+ self.tag_content = None
+ self.values.clear()
+
+ENTITY_RE = re.compile(r'&[A-Za-z0-9]+;')
+
+def parse_pom(path=None, contents=None):
+ """
+ Parse the Maven .pom file return a map containing information
+ extracted from it. The map will contain at least the following
+ fields:
+
+ groupId
+ artifactId
+ version
+ """
+ fields = ('groupId', 'artifactId', 'version')
+ values = {}
+ handler = POMHandler(values, fields)
+ if path:
+ fd = file(path)
+ contents = fd.read()
+ fd.close()
+
+ if not contents:
+ raise GenericError, 'either a path to a pom file or the contents of a pom file must be specified'
+
+ # A common problem is non-UTF8 characters in XML files, so we'll convert the string first
+
+ contents = fixEncoding(contents)
+
+ try:
+ xml.sax.parseString(contents, handler)
+ except xml.sax.SAXParseException:
+ # likely an undefined entity reference, so lets try replacing
+ # any entity refs we can find and see if we get something parseable
+ handler.reset()
+ contents = ENTITY_RE.sub('?', contents)
+ xml.sax.parseString(contents, handler)
+
+ for field in fields:
+ if field not in values.keys():
+ raise GenericError, 'could not extract %s from POM: %s' % (field, (path or '<contents>'))
+ return values
+
+def pom_to_maven_info(pominfo):
+ """
+ Convert the output of parsing a POM into a format compatible
+ with Koji.
+ The mapping is as follows:
+ - groupId: group_id
+ - artifactId: artifact_id
+ - version: version
+ """
+ maveninfo = {'group_id': pominfo['groupId'],
+ 'artifact_id': pominfo['artifactId'],
+ 'version': pominfo['version']}
+ return maveninfo
+
+def maven_info_to_nvr(maveninfo):
+ """
+ Convert the maveninfo to NVR-compatible format.
+ The release cannot be determined from Maven metadata, and will
+ be set to None.
+ """
+ nvr = {'name': maveninfo['group_id'] + '-' + maveninfo['artifact_id'],
+ 'version': maveninfo['version'].replace('-', '_'),
+ 'release': None,
+ 'epoch': None}
+ # for backwards-compatibility
+ nvr['package_name'] = nvr['name']
+ return nvr
+
+def mavenLabel(maveninfo):
+ """
+ Return a user-friendly label for the given maveninfo. maveninfo is
+ a dict as returned by kojihub:getMavenBuild().
+ """
+ return '%(group_id)s-%(artifact_id)s-%(version)s' % maveninfo
+
+def hex_string(s):
+ """Converts a string to a string of hex digits"""
+ return ''.join([ '%02x' % ord(x) for x in s ])
+
+
+def make_groups_spec(grplist,name='buildsys-build',buildgroup=None):
+ """Return specfile contents representing the group"""
+ if buildgroup is None:
+ buildgroup=name
+ data = [
+"""#
+# This specfile represents buildgroups for mock
+# Autogenerated by the build system
+#
+Summary: The base set of packages for a mock chroot\n""",
+"""Name: %s\n""" % name,
+"""Version: 1
+Release: 1
+License: GPL
+Group: Development/Build Tools
+BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root
+BuildArch: noarch
+
+#package requirements
+"""]
+ #add a requires entry for all the packages in buildgroup, and in
+ #groups required by buildgroup
+ need = [buildgroup]
+ seen_grp = {}
+ seen_pkg = {}
+ #index groups
+ groups = dict([(g['name'],g) for g in grplist])
+ for group_name in need:
+ if seen_grp.has_key(group_name):
+ continue
+ seen_grp[group_name] = 1
+ group = groups.get(group_name)
+ if group is None:
+ data.append("#MISSING GROUP: %s\n" % group_name)
+ continue
+ data.append("#Group: %s\n" % group_name)
+ pkglist = list(group['packagelist'])
+ pkglist.sort(lambda a,b: cmp(a['package'], b['package']))
+ for pkg in pkglist:
+ pkg_name = pkg['package']
+ if seen_pkg.has_key(pkg_name):
+ continue
+ data.append("Requires: %s\n" % pkg_name)
+ for req in group['grouplist']:
+ req_name = req['name']
+ if seen_grp.has_key(req_name):
+ continue
+ need.append(req_name)
+ data.append("""
+%description
+This is a meta-package that requires a defined group of packages
+
+%prep
+%build
+%install
+%clean
+
+%files
+%defattr(-,root,root,-)
+%doc
+""")
+ return ''.join(data)
+
+def generate_comps(groups, expand_groups=False):
+ """Generate comps content from groups data"""
+ def boolean_text(x):
+ if x:
+ return "true"
+ else:
+ return "false"
+ data = [
+"""<?xml version="1.0"?>
+<!DOCTYPE comps PUBLIC "-//Red Hat, Inc.//DTD Comps info//EN" "comps.dtd">
+
+<!-- Auto-generated by the build system -->
+<comps>
+""" ]
+ groups = list(groups)
+ group_idx = dict([(g['name'],g) for g in groups])
+ groups.sort(lambda a,b:cmp(a['name'],b['name']))
+ for g in groups:
+ group_id = g['name']
+ name = g['display_name']
+ description = g['description']
+ langonly = boolean_text(g['langonly'])
+ default = boolean_text(g['is_default'])
+ uservisible = boolean_text(g['uservisible'])
+ data.append(
+""" <group>
+ <id>%(group_id)s</id>
+ <name>%(name)s</name>
+ <description>%(description)s</description>
+ <default>%(default)s</default>
+ <uservisible>%(uservisible)s</uservisible>
+""" % locals())
+ if g['biarchonly']:
+ data.append(
+""" <biarchonly>%s</biarchonly>
+""" % boolean_text(True))
+
+ #print grouplist, if any
+ if g['grouplist'] and not expand_groups:
+ data.append(
+""" <grouplist>
+""")
+ grouplist = list(g['grouplist'])
+ grouplist.sort(lambda a,b:cmp(a['name'],b['name']))
+ for x in grouplist:
+ #['req_id','type','is_metapkg','name']
+ name = x['name']
+ thetype = x['type']
+ tag = "groupreq"
+ if x['is_metapkg']:
+ tag = "metapkg"
+ if thetype:
+ data.append(
+""" <%(tag)s type="%(thetype)s">%(name)s</%(tag)s>
+""" % locals())
+ else:
+ data.append(
+""" <%(tag)s>%(name)s</%(tag)s>
+""" % locals())
+ data.append(
+""" </grouplist>
+""")
+
+ #print packagelist, if any
+ def package_entry(pkg):
+ #p['package_id','type','basearchonly','requires','name']
+ name = pkg['package']
+ opts = 'type="%s"' % pkg['type']
+ if pkg['basearchonly']:
+ opts += ' basearchonly="%s"' % boolean_text(True)
+ if pkg['requires']:
+ opts += ' requires="%s"' % pkg['requires']
+ return "<packagereq %(opts)s>%(name)s</packagereq>" % locals()
+
+ data.append(
+""" <packagelist>
+""")
+ if g['packagelist']:
+ packagelist = list(g['packagelist'])
+ packagelist.sort(lambda a,b:cmp(a['package'],b['package']))
+ for p in packagelist:
+ data.append(
+""" %s
+""" % package_entry(p))
+ # also include expanded list, if needed
+ if expand_groups and g['grouplist']:
+ #add a requires entry for all packages in groups required by buildgroup
+ need = [req['name'] for req in g['grouplist']]
+ seen_grp = { g['name'] : 1}
+ seen_pkg = {}
+ for p in g['packagelist']:
+ seen_pkg[p['package']] = 1
+ for group_name in need:
+ if seen_grp.has_key(group_name):
+ continue
+ seen_grp[group_name] = 1
+ group = group_idx.get(group_name)
+ if group is None:
+ data.append(
+""" <!-- MISSING GROUP: %s -->
+""" % group_name)
+ continue
+ data.append(
+""" <!-- Expanding Group: %s -->
+""" % group_name)
+ pkglist = list(group['packagelist'])
+ pkglist.sort(lambda a,b: cmp(a['package'], b['package']))
+ for pkg in pkglist:
+ pkg_name = pkg['package']
+ if seen_pkg.has_key(pkg_name):
+ continue
+ data.append(
+""" %s
+""" % package_entry(pkg))
+ for req in group['grouplist']:
+ req_name = req['name']
+ if seen_grp.has_key(req_name):
+ continue
+ need.append(req_name)
+ data.append(
+""" </packagelist>
+""")
+ data.append(
+""" </group>
+""")
+ data.append(
+"""</comps>
+""")
+ return ''.join(data)
+
+
+def genMockConfig(name, arch, managed=False, repoid=None, tag_name=None, **opts):
+ """Generate a mock config
+
+ Returns a string containing the config
+ The generated config is compatible with mock >= 0.8.7
+ """
+ mockdir = opts.get('mockdir', '/var/lib/mock')
+ if 'url' in opts:
+ from warnings import warn
+ warn('The url option for genMockConfig is deprecated', DeprecationWarning)
+ urls = [opts['url']]
+ else:
+ if not (repoid and tag_name):
+ raise GenericError, "please provide a repo and tag"
+ topurls = opts.get('topurls')
+ if not topurls:
+ #cli command still passes plain topurl
+ topurl = opts.get('topurl')
+ if topurl:
+ topurls = [topurl]
+ if topurls:
+ #XXX - PathInfo isn't quite right for this, but it will do for now
+ pathinfos = [PathInfo(topdir=_u) for _u in topurls]
+ urls = ["%s/%s" % (_p.repo(repoid,tag_name), arch) for _p in pathinfos]
+ else:
+ pathinfo = PathInfo(topdir=opts.get('topdir', '/mnt/koji'))
+ repodir = pathinfo.repo(repoid,tag_name)
+ urls = ["file://%s/%s" % (repodir,arch)]
+ if managed:
+ buildroot_id = opts.get('buildroot_id')
+
+ # rely on the mock defaults being correct
+ # and only includes changes from the defaults here
+ config_opts = {
+ 'root' : name,
+ 'basedir' : mockdir,
+ 'target_arch' : opts.get('target_arch', arch),
+ 'chroothome': '/builddir',
+ # Use the group data rather than a generated rpm
+ 'chroot_setup_cmd': 'groupinstall %s' % opts.get('install_group', 'build'),
+ # don't encourage network access from the chroot
+ 'use_host_resolv': opts.get('use_host_resolv', False),
+ # Don't let a build last more than 24 hours
+ 'rpmbuild_timeout': opts.get('rpmbuild_timeout', 86400)
+ }
+ if opts.get('package_manager'):
+ config_opts['package_manager'] = opts['package_manager']
+
+ # bind_opts are used to mount parts (or all of) /dev if needed.
+ # See kojid::LiveCDTask for a look at this option in action.
+ bind_opts = opts.get('bind_opts')
+
+ files = {}
+ if opts.get('use_host_resolv', False) and os.path.exists('/etc/hosts'):
+ # if we're setting up DNS,
+ # also copy /etc/hosts from the host
+ etc_hosts = file('/etc/hosts')
+ files['etc/hosts'] = etc_hosts.read()
+ etc_hosts.close()
+ mavenrc = ''
+ if opts.get('maven_opts'):
+ mavenrc = 'export MAVEN_OPTS="%s"\n' % ' '.join(opts['maven_opts'])
+ if opts.get('maven_envs'):
+ for name, val in opts['maven_envs'].iteritems():
+ mavenrc += 'export %s="%s"\n' % (name, val)
+ if mavenrc:
+ files['etc/mavenrc'] = mavenrc
+
+ #generate yum.conf
+ yc_parts = ["[main]\n"]
+ # HTTP proxy for yum
+ if opts.get('yum_proxy'):
+ yc_parts.append("proxy=%s\n" % opts['yum_proxy'])
+ # Rest of the yum options
+ yc_parts.append("""\
+cachedir=/var/cache/yum
+debuglevel=1
+logfile=/var/log/yum.log
+reposdir=/dev/null
+retries=20
+obsoletes=1
+gpgcheck=0
+assumeyes=1
+
+# repos
+
+[build]
+name=build
+""")
+ yc_parts.append("baseurl=%s\n" % urls[0])
+ for url in urls[1:]:
+ yc_parts.append(" %s\n" % url)
+ config_opts['yum.conf'] = ''.join(yc_parts)
+
+ plugin_conf = {
+ 'ccache_enable': False,
+ 'yum_cache_enable': False,
+ 'root_cache_enable': False
+ }
+
+ #XXX - this needs to be configurable
+ macros = {
+ '%_topdir' : '%s/build' % config_opts['chroothome'],
+ '%_rpmfilename' : '%%{NAME}-%%{VERSION}-%%{RELEASE}.%%{ARCH}.rpm',
+ '%_host_cpu' : opts.get('target_arch', arch),
+ '%_host': '%s-%s' % (opts.get('target_arch', arch), opts.get('mockhost', 'koji-linux-gnu')),
+ '%vendor' : opts.get('vendor', 'Koji'),
+ '%packager' : opts.get('packager', 'Koji'),
+ '%distribution': opts.get('distribution', 'Unknown')
+ #TODO - track some of these in the db instead?
+ }
+
+ parts = ["""# Auto-generated by the Koji build system
+"""]
+ if managed:
+ parts.append("""
+# Koji buildroot id: %(buildroot_id)s
+# Koji buildroot name: %(name)s
+# Koji repo id: %(repoid)s
+# Koji tag: %(tag_name)s
+""" % locals())
+
+ parts.append("\n")
+ for key, value in config_opts.iteritems():
+ parts.append("config_opts[%r] = %r\n" % (key, value))
+ parts.append("\n")
+ for key, value in plugin_conf.iteritems():
+ parts.append("config_opts['plugin_conf'][%r] = %r\n" % (key, value))
+ parts.append("\n")
+
+ if bind_opts:
+ # This line is REQUIRED for mock to work if bind_opts defined.
+ parts.append("config_opts['internal_dev_setup'] = False\n")
+ for key in bind_opts.keys():
+ for mnt_src, mnt_dest in bind_opts.get(key).iteritems():
+ parts.append("config_opts['plugin_conf']['bind_mount_opts'][%r].append((%r, %r))\n" % (key, mnt_src, mnt_dest))
+ parts.append("\n")
+
+ for key, value in macros.iteritems():
+ parts.append("config_opts['macros'][%r] = %r\n" % (key, value))
+ parts.append("\n")
+ for key, value in files.iteritems():
+ parts.append("config_opts['files'][%r] = %r\n" % (key, value))
+
+ return ''.join(parts)
+
+def get_sequence_value(cursor, sequence):
+ cursor.execute("""SELECT nextval(%(sequence)s)""", locals())
+ return cursor.fetchone()[0]
+
+# From Python Cookbook 2nd Edition, Recipe 8.6
+def format_exc_plus():
+ """ Format the usual traceback information, followed by a listing of
+ all the local variables in each frame.
+ """
+ tb = sys.exc_info()[2]
+ while tb.tb_next:
+ tb = tb.tb_next
+ stack = []
+ f = tb.tb_frame
+ while f:
+ stack.append(f)
+ f = f.f_back
+ stack.reverse()
+ rv = ''.join(traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
+ rv += "Locals by frame, innermost last\n"
+ for frame in stack:
+ rv += "Frame %s in %s at line %s\n" % (frame.f_code.co_name,
+ frame.f_code.co_filename,
+ frame.f_lineno)
+ for key, value in frame.f_locals.items():
+ rv += " %20s = " % key
+ # we must _absolutely_ avoid propagating eceptions, and str(value)
+ # COULD cause any exception, so we MUST catch any...:
+ try:
+ rv += "%s\n" % value
+ except:
+ rv += "<ERROR WHILE PRINTING VALUE>\n"
+ return rv
+
+def openRemoteFile(relpath, topurl=None, topdir=None):
+ """Open a file on the main server (read-only)
+
+ This is done either via a mounted filesystem (nfs) or http, depending
+ on options"""
+ if topurl:
+ url = "%s/%s" % (topurl, relpath)
+ src = urllib2.urlopen(url)
+ fo = tempfile.TemporaryFile()
+ shutil.copyfileobj(src, fo)
+ src.close()
+ fo.seek(0)
+ elif topdir:
+ fn = "%s/%s" % (topdir, relpath)
+ fo = open(fn)
+ else:
+ raise GenericError, "No access method for remote file: %s" % relpath
+ return fo
+
+
+def config_directory_contents(dir_name):
+ configs = []
+ try:
+ conf_dir_contents = os.listdir(dir_name)
+ except OSError, exception:
+ if exception.errno != errno.ENOENT:
+ raise
+ else:
+ for name in sorted(conf_dir_contents):
+ if not name.endswith('.conf'):
+ continue
+ config_full_name = os.path.join(dir_name, name)
+ configs.append(config_full_name)
+ return configs
+
+
+class PathInfo(object):
+ # ASCII numbers and upper- and lower-case letter for use in tmpdir()
+ ASCII_CHARS = [chr(i) for i in range(48, 58) + range(65, 91) + range(97, 123)]
+
+ def __init__(self, topdir=None):
+ self._topdir = topdir
+
+ def topdir(self):
+ if self._topdir is None:
+ self._topdir = str(BASEDIR)
+ return self._topdir
+
+ def _set_topdir(self, topdir):
+ self._topdir = topdir
+
+ topdir = property(topdir, _set_topdir)
+
+ def volumedir(self, volume):
+ if volume == 'DEFAULT' or volume is None:
+ return self.topdir
+ #else
+ return self.topdir + ("/vol/%s" % volume)
+
+ def build(self,build):
+ """Return the directory where a build belongs"""
+ return self.volumedir(build.get('volume_name')) + ("/packages/%(name)s/%(version)s/%(release)s" % build)
+
+ def mavenbuild(self, build):
+ """Return the directory where the Maven build exists in the global store (/mnt/koji/packages)"""
+ return self.build(build) + '/maven'
+
+ def mavenrepo(self, maveninfo):
+ """Return the relative path to the artifact directory in the repo"""
+ group_path = maveninfo['group_id'].replace('.', '/')
+ artifact_id = maveninfo['artifact_id']
+ version = maveninfo['version']
+ return "%(group_path)s/%(artifact_id)s/%(version)s" % locals()
+
+ def mavenfile(self, maveninfo):
+ """Return the relative path to the artifact in the repo"""
+ return self.mavenrepo(maveninfo) + '/' + maveninfo['filename']
+
+ def winbuild(self, build):
+ """Return the directory where the Windows build exists"""
+ return self.build(build) + '/win'
+
+ def winfile(self, wininfo):
+ """Return the relative path from the winbuild directory where the
+ file identified by wininfo is located."""
+ filepath = wininfo['filename']
+ if wininfo['relpath']:
+ filepath = wininfo['relpath'] + '/' + filepath
+ return filepath
+
+ def imagebuild(self, build):
+ """Return the directory where the image for the build are stored"""
+ return self.build(build) + '/images'
+
+ def rpm(self,rpminfo):
+ """Return the path (relative to build_dir) where an rpm belongs"""
+ return "%(arch)s/%(name)s-%(version)s-%(release)s.%(arch)s.rpm" % rpminfo
+
+ def signed(self, rpminfo, sigkey):
+ """Return the path (relative to build dir) where a signed rpm lives"""
+ return "data/signed/%s/" % sigkey + self.rpm(rpminfo)
+
+ def sighdr(self, rpminfo, sigkey):
+ """Return the path (relative to build_dir) where a cached sig header lives"""
+ return "data/sigcache/%s/" % sigkey + self.rpm(rpminfo) + ".sig"
+
+ def build_logs(self, build):
+ """Return the path for build logs"""
+ return "%s/data/logs" % self.build(build)
+
+ def repo(self,repo_id,tag_str):
+ """Return the directory where a repo belongs"""
+ return self.topdir + ("/repos/%(tag_str)s/%(repo_id)s" % locals())
+
+ def repocache(self,tag_str):
+ """Return the directory where a repo belongs"""
+ return self.topdir + ("/repos/%(tag_str)s/cache" % locals())
+
+ def taskrelpath(self, task_id):
+ """Return the relative path for the task work directory"""
+ return "tasks/%s/%s" % (task_id % 10000, task_id)
+
+ def work(self):
+ """Return the work dir"""
+ return self.topdir + '/work'
+
+ def tmpdir(self):
+ """Return a path to a unique directory under work()/tmp/"""
+ tmp = None
+ while tmp is None or os.path.exists(tmp):
+ tmp = self.work() + '/tmp/' + ''.join([random.choice(self.ASCII_CHARS) for dummy in '123456'])
+ return tmp
+
+ def scratch(self):
+ """Return the main scratch dir"""
+ return self.topdir + '/scratch'
+
+ def task(self, task_id):
+ """Return the output directory for the task with the given id"""
+ return self.work() + '/' + self.taskrelpath(task_id)
+
+pathinfo = PathInfo()
+
+class VirtualMethod(object):
+ # some magic to bind an XML-RPC method to an RPC server.
+ # supports "nested" methods (e.g. examples.getStateName)
+ # supports named arguments (if server does)
+ def __init__(self, func, name):
+ self.__func = func
+ self.__name = name
+ def __getattr__(self, name):
+ return type(self)(self.__func, "%s.%s" % (self.__name, name))
+ def __call__(self, *args, **opts):
+ return self.__func(self.__name,args,opts)
+
+
+class ClientSession(object):
+
+ def __init__(self, baseurl, opts=None, sinfo=None):
+ assert baseurl, "baseurl argument must not be empty"
+ if opts == None:
+ opts = {}
+ else:
+ opts = opts.copy()
+ self.baseurl = baseurl
+ self.opts = opts
+ self._connection = None
+ self._setup_connection()
+ self.setSession(sinfo)
+ self.multicall = False
+ self._calls = []
+ self.logger = logging.getLogger('koji')
+
+ def _setup_connection(self):
+ uri = urlparse.urlsplit(self.baseurl)
+ scheme = uri[0]
+ self._host, _port = urllib.splitport(uri[1])
+ self.explicit_port = bool(_port)
+ self._path = uri[2]
+ default_port = 80
+ if self.opts.get('certs'):
+ ctx = ssl.SSLCommon.CreateSSLContext(self.opts['certs'])
+ cnxOpts = {'ssl_context' : ctx}
+ cnxClass = ssl.SSLCommon.PlgHTTPSConnection
+ default_port = 443
+ elif scheme == 'https':
+ cnxOpts = {}
+ cnxClass = httplib.HTTPSConnection
+ default_port = 443
+ elif scheme == 'http':
+ cnxOpts = {}
+ cnxClass = httplib.HTTPConnection
+ else:
+ raise IOError, "unsupported XML-RPC protocol"
+ # set a default 12 hour connection timeout.
+ # Some Koji operations can take a long time to return, but after 12
+ # hours we can assume something is seriously wrong.
+ timeout = self.opts.setdefault('timeout', 60 * 60 * 12)
+ self._timeout_compat = False
+ if timeout:
+ if sys.version_info[:3] < (2, 6, 0) and 'ssl_context' not in cnxOpts:
+ self._timeout_compat = True
+ else:
+ cnxOpts['timeout'] = timeout
+ self._port = (_port and int(_port) or default_port)
+ self._cnxOpts = cnxOpts
+ self._cnxClass = cnxClass
+ self._close_connection()
+
+ def setSession(self,sinfo):
+ """Set the session info
+
+ If sinfo is None, logout."""
+ if sinfo is None:
+ self.logged_in = False
+ self.callnum = None
+ # do we need to do anything else here?
+ self._setup_connection()
+ else:
+ self.logged_in = True
+ self.callnum = 0
+ self.sinfo = sinfo
+
+ def login(self,opts=None):
+ sinfo = self.callMethod('login',self.opts['user'], self.opts['password'],opts)
+ if not sinfo:
+ return False
+ self.setSession(sinfo)
+ return True
+
+ def subsession(self):
+ "Create a subsession"
+ sinfo = self.callMethod('subsession')
+ return type(self)(self.baseurl,self.opts,sinfo)
+
+ def krb_login(self, principal=None, keytab=None, ccache=None, proxyuser=None):
+ """Log in using Kerberos. If principal is not None and keytab is
+ not None, then get credentials for the given principal from the given keytab.
+ If both are None, authenticate using existing local credentials (as obtained
+ from kinit). ccache is the absolute path to use for the credential cache. If
+ not specified, the default ccache will be used. If proxyuser is specified,
+ log in the given user instead of the user associated with the Kerberos
+ principal. The principal must be in the "ProxyPrincipals" list on
+ the server side."""
+ ctx = krbV.default_context()
+
+ if ccache != None:
+ ccache = krbV.CCache(name='FILE:' + ccache, context=ctx)
+ else:
+ ccache = ctx.default_ccache()
+
+ if principal != None:
+ if keytab != None:
+ cprinc = krbV.Principal(name=principal, context=ctx)
+ keytab = krbV.Keytab(name=keytab, context=ctx)
+ ccache.init(cprinc)
+ ccache.init_creds_keytab(principal=cprinc, keytab=keytab)
+ else:
+ raise AuthError, 'cannot specify a principal without a keytab'
+ else:
+ # We're trying to log ourself in. Connect using existing credentials.
+ cprinc = ccache.principal()
+
+ sprinc = krbV.Principal(name=self._serverPrincipal(cprinc), context=ctx)
+
+ ac = krbV.AuthContext(context=ctx)
+ ac.flags = krbV.KRB5_AUTH_CONTEXT_DO_SEQUENCE|krbV.KRB5_AUTH_CONTEXT_DO_TIME
+ ac.rcache = ctx.default_rcache()
+
+ # create and encode the authentication request
+ (ac, req) = ctx.mk_req(server=sprinc, client=cprinc,
+ auth_context=ac, ccache=ccache,
+ options=krbV.AP_OPTS_MUTUAL_REQUIRED)
+ req_enc = base64.encodestring(req)
+
+ # ask the server to authenticate us
+ (rep_enc, sinfo_enc, addrinfo) = self.callMethod('krbLogin', req_enc, proxyuser)
+
+ # Set the addrinfo we received from the server
+ # (necessary before calling rd_priv())
+ # addrinfo is in (serveraddr, serverport, clientaddr, clientport)
+ # format, so swap the pairs because clientaddr is now the local addr
+ ac.addrs = tuple((addrinfo[2], addrinfo[3], addrinfo[0], addrinfo[1]))
+
+ # decode and read the reply from the server
+ rep = base64.decodestring(rep_enc)
+ ctx.rd_rep(rep, auth_context=ac)
+
+ # decode and decrypt the login info
+ sinfo_priv = base64.decodestring(sinfo_enc)
+ sinfo_str = ac.rd_priv(sinfo_priv)
+ sinfo = dict(zip(['session-id', 'session-key'], sinfo_str.split()))
+
+ if not sinfo:
+ self.logger.warn('No session info received')
+ return False
+ self.setSession(sinfo)
+
+ return True
+
+ def _serverPrincipal(self, cprinc):
+ """Get the Kerberos principal of the server we're connecting
+ to, based on baseurl."""
+ servername = self._host
+ #portspec = servername.find(':')
+ #if portspec != -1:
+ # servername = servername[:portspec]
+ realm = cprinc.realm
+ service = self.opts.get('krbservice', 'host')
+
+ return '%s/%s@%s' % (service, servername, realm)
+
+ def ssl_login(self, cert, ca, serverca, proxyuser=None):
+ certs = {}
+ certs['key_and_cert'] = cert
+ certs['ca_cert'] = ca
+ certs['peer_ca_cert'] = serverca
+
+ ctx = ssl.SSLCommon.CreateSSLContext(certs)
+ self._cnxOpts = {'ssl_context' : ctx}
+ # 60 second timeout during login
+ old_timeout = self._cnxOpts.get('timeout')
+ self._cnxOpts['timeout'] = 60
+ try:
+ self._cnxClass = ssl.SSLCommon.PlgHTTPSConnection
+ if self._port == 80 and not self.explicit_port:
+ self._port = 443
+ sinfo = self.callMethod('sslLogin', proxyuser)
+ finally:
+ if old_timeout is None:
+ del self._cnxOpts['timeout']
+ else:
+ self._cnxOpts['timeout'] = old_timeout
+ if not sinfo:
+ raise AuthError, 'unable to obtain a session'
+
+ self.opts['certs'] = certs
+ self.setSession(sinfo)
+
+ return True
+
+ def logout(self):
+ if not self.logged_in:
+ return
+ try:
+ # bypass _callMethod (no retries)
+ # XXX - is that really what we want?
+ handler, headers, request = self._prepCall('logout', ())
+ self._sendCall(handler, headers, request)
+ except AuthExpired:
+ #this can happen when an exclusive session is forced
+ pass
+ self.setSession(None)
+
+ def _forget(self):
+ """Forget session information, but do not close the session
+
+ This is intended to be used after a fork to prevent the subprocess
+ from affecting the session accidentally."""
+ if not self.logged_in:
+ return
+ self.setSession(None)
+
+ #we've had some trouble with this method causing strange problems
+ #(like infinite recursion). Possibly triggered by initialization failure,
+ #and possibly due to some interaction with __getattr__.
+ #Re-enabling with a small improvement
+ def __del__(self):
+ if self.__dict__:
+ try:
+ self.logout()
+ except:
+ pass
+
+ def callMethod(self,name,*args,**opts):
+ """compatibility wrapper for _callMethod"""
+ return self._callMethod(name, args, opts)
+
+ def _prepCall(self, name, args, kwargs=None):
+ #pass named opts in a way the server can understand
+ if kwargs is None:
+ kwargs = {}
+ if name == 'rawUpload':
+ return self._prepUpload(*args, **kwargs)
+ args = encode_args(*args,**kwargs)
+ if self.logged_in:
+ sinfo = self.sinfo.copy()
+ sinfo['callnum'] = self.callnum
+ self.callnum += 1
+ handler = "%s?%s" % (self._path, urllib.urlencode(sinfo))
+ elif name == 'sslLogin':
+ handler = self._path + '/ssllogin'
+ else:
+ handler = self._path
+ request = dumps(args, name, allow_none=1)
+ headers = [
+ # connection class handles Host
+ ('User-Agent', 'koji/1.7'), #XXX
+ ('Content-Type', 'text/xml'),
+ ('Content-Length', len(request)),
+ ]
+ return handler, headers, request
+
+ def _sendCall(self, handler, headers, request):
+ # handle expired connections
+ for i in (0, 1):
+ try:
+ return self._sendOneCall(handler, headers, request)
+ except socket.error, e:
+ self._close_connection()
+ if i or getattr(e, 'errno', None) not in (errno.ECONNRESET, errno.ECONNABORTED, errno.EPIPE):
+ raise
+ except httplib.BadStatusLine:
+ self._close_connection()
+ if i:
+ raise
+
+
+ def _sendOneCall(self, handler, headers, request):
+ cnx = self._get_connection()
+ if self.opts.get('debug_xmlrpc', False):
+ cnx.set_debuglevel(1)
+ cnx.putrequest('POST', handler)
+ for n, v in headers:
+ cnx.putheader(n, v)
+ cnx.endheaders()
+ cnx.send(request)
+ response = cnx.getresponse()
+ try:
+ ret = self._read_xmlrpc_response(response, handler)
+ finally:
+ response.close()
+ return ret
+
+ def _get_connection(self):
+ key = (self._cnxClass, self._host, self._port)
+ if self._connection and self.opts.get('keepalive'):
+ if key == self._connection[0]:
+ cnx = self._connection[1]
+ if getattr(cnx, 'sock', None):
+ return cnx
+ cnx = self._cnxClass(self._host, self._port, **self._cnxOpts)
+ self._connection = (key, cnx)
+ if self._timeout_compat:
+ # in python < 2.6 httplib does not support the timeout option
+ # but socket supports it since 2.3
+ cnx.connect()
+ cnx.sock.settimeout(self.opts['timeout'])
+ return cnx
+
+ def _close_connection(self):
+ if self._connection:
+ self._connection[1].close()
+ self._connection = None
+
+ def _read_xmlrpc_response(self, response, handler=''):
+ #XXX honor debug_xmlrpc
+ if response.status != 200:
+ if (response.getheader("content-length", 0)):
+ response.read()
+ raise xmlrpclib.ProtocolError(self._host + handler,
+ response.status, response.reason, response.msg)
+ p, u = xmlrpclib.getparser()
+ while True:
+ chunk = response.read(8192)
+ if not chunk:
+ break
+ if self.opts.get('debug_xmlrpc', False):
+ print "body: %r" % chunk
+ p.feed(chunk)
+ p.close()
+ result = u.close()
+ if len(result) == 1:
+ result = result[0]
+ return result
+
+ def _callMethod(self, name, args, kwargs=None):
+ """Make a call to the hub with retries and other niceties"""
+
+ if self.multicall:
+ if kwargs is None:
+ kwargs = {}
+ args = encode_args(*args, **kwargs)
+ self._calls.append({'methodName': name, 'params': args})
+ return MultiCallInProgress
+ else:
+ handler, headers, request = self._prepCall(name, args, kwargs)
+ tries = 0
+ self.retries = 0
+ debug = self.opts.get('debug',False)
+ max_retries = self.opts.get('max_retries',30)
+ interval = self.opts.get('retry_interval',20)
+ while True:
+ tries += 1
+ self.retries += 1
+ try:
+ return self._sendCall(handler, headers, request)
+ #basically, we want to retry on most errors, with a few exceptions
+ # - faults (this means the call completed and failed)
+ # - SystemExit, KeyboardInterrupt
+ # note that, for logged-in sessions the server should tell us (via a RetryError fault)
+ # if the call cannot be retried. For non-logged-in sessions, all calls should be read-only
+ # and hence retryable.
+ except Fault, fault:
+ #try to convert the fault to a known exception
+ err = convertFault(fault)
+ if isinstance(err, ServerOffline):
+ if self.opts.get('offline_retry',False):
+ secs = self.opts.get('offline_retry_interval', interval)
+ self.logger.debug("Server offline. Retrying in %i seconds", secs)
+ time.sleep(secs)
+ #reset try count - this isn't a typical error, this is a running server
+ #correctly reporting an outage
+ tries = 0
+ continue
+ raise err
+ except (SystemExit, KeyboardInterrupt):
+ #(depending on the python version, these may or may not be subclasses of Exception)
+ raise
+ except OpenSSL.SSL.Error as e:
+ # There's no point in retrying this
+ raise
+ except Exception, e:
+ self._close_connection()
+ if not self.logged_in:
+ #in the past, non-logged-in sessions did not retry. For compatibility purposes
+ #this behavior is governed by the anon_retry opt.
+ if not self.opts.get('anon_retry',False):
+ raise
+ if tries > max_retries:
+ raise
+ #otherwise keep retrying
+ if self.logger.isEnabledFor(logging.DEBUG):
+ tb_str = ''.join(traceback.format_exception(*sys.exc_info()))
+ self.logger.debug(tb_str)
+ self.logger.info("Try #%s for call %s (%s) failed: %s", tries, self.callnum, name, e)
+ if tries > 1:
+ # first retry is immediate, after that we honor retry_interval
+ time.sleep(interval)
+ #not reached
+
+ def multiCall(self, strict=False):
+ """Execute a multicall (multiple function calls passed to the server
+ and executed at the same time, with results being returned in a batch).
+ Before calling this method, the self.multicall field must have
+ been set to True, and then one or more methods must have been called on
+ the current session (those method calls will return None). On executing
+ the multicall, the self.multicall field will be reset to False
+ (so subsequent method calls will be executed immediately)
+ and results will be returned in a list. The list will contain one element
+ for each method added to the multicall, in the order it was added to the multicall.
+ Each element of the list will be either a one-element list containing the result of the
+ method call, or a map containing "faultCode" and "faultString" keys, describing the
+ error that occurred during the method call."""
+ if not self.multicall:
+ raise GenericError, 'ClientSession.multicall must be set to True before calling multiCall()'
+ self.multicall = False
+ if len(self._calls) == 0:
+ return []
+
+ calls = self._calls
+ self._calls = []
+ ret = self._callMethod('multiCall', (calls,), {})
+ if strict:
+ #check for faults and raise first one
+ for entry in ret:
+ if isinstance(entry, dict):
+ fault = Fault(entry['faultCode'], entry['faultString'])
+ err = convertFault(fault)
+ raise err
+ return ret
+
+ def __getattr__(self,name):
+ #if name[:1] == '_':
+ # raise AttributeError, "no attribute %r" % name
+ return VirtualMethod(self._callMethod,name)
+
+ def fastUpload(self, localfile, path, name=None, callback=None, blocksize=1048576, overwrite=False):
+ if not self.logged_in:
+ raise ActionNotAllowed, 'You must be logged in to upload files'
+ if name is None:
+ name = os.path.basename(localfile)
+ self.logger.debug("Fast upload: %s to %s/%s", localfile, path, name)
+ size = os.stat(localfile).st_size
+ fo = file(localfile, 'rb')
+ ofs = 0
+ size = os.path.getsize(localfile)
+ start = time.time()
+ if callback:
+ callback(0, size, 0, 0, 0)
+ problems = False
+ full_chksum = util.adler32_constructor()
+ while True:
+ lap = time.time()
+ chunk = fo.read(blocksize)
+ if not chunk:
+ break
+ result = self._callMethod('rawUpload', (chunk, ofs, path, name), {'overwrite':overwrite})
+ if self.retries > 1:
+ problems = True
+ hexdigest = util.adler32_constructor(chunk).hexdigest()
+ full_chksum.update(chunk)
+ if result['size'] != len(chunk):
+ raise GenericError, "server returned wrong chunk size: %s != %s" % (result['size'], len(chunk))
+ if result['hexdigest'] != hexdigest:
+ raise GenericError, 'upload checksum failed: %s != %s' \
+ % (result['hexdigest'], hexdigest)
+ ofs += len(chunk)
+ now = time.time()
+ t1 = max(now - lap, 0.00001)
+ t2 = max(now - start, 0.00001)
+ # max is to prevent possible divide by zero in callback function
+ if callback:
+ callback(ofs, size, len(chunk), t1, t2)
+ if ofs != size:
+ self.logger.error("Local file changed size: %s, %s -> %s", localfile, size, ofs)
+ chk_opts = {}
+ if problems:
+ chk_opts['verify'] = 'adler32'
+ result = self._callMethod('checkUpload', (path, name), chk_opts)
+ if int(result['size']) != ofs:
+ raise GenericError, "Uploaded file is wrong length: %s/%s, %s != %s" \
+ % (path, name, result['sumlength'], ofs)
+ if problems and result['hexdigest'] != full_chksum.hexdigest():
+ raise GenericError, "Uploaded file has wrong checksum: %s/%s, %s != %s" \
+ % (path, name, result['hexdigest'], full_chksum.hexdigest())
+ self.logger.debug("Fast upload: %s complete. %i bytes in %.1f seconds", localfile, size, t2)
+
+ def _prepUpload(self, chunk, offset, path, name, verify="adler32", overwrite=False):
+ """prep a rawUpload call"""
+ if not self.logged_in:
+ raise ActionNotAllowed, "you must be logged in to upload"
+ args = self.sinfo.copy()
+ args['callnum'] = self.callnum
+ args['filename'] = name
+ args['filepath'] = path
+ args['fileverify'] = verify
+ args['offset'] = str(offset)
+ if overwrite:
+ args['overwrite'] = "1"
+ size = len(chunk)
+ self.callnum += 1
+ handler = "%s?%s" % (self._path, urllib.urlencode(args))
+ headers = [
+ ('User-Agent', 'koji/1.7'), #XXX
+ ("Content-Type", "application/octet-stream"),
+ ("Content-length", str(size)),
+ ]
+ request = chunk
+ return handler, headers, request
+
+ def uploadWrapper(self, localfile, path, name=None, callback=None, blocksize=1048576, overwrite=True):
+ """upload a file in chunks using the uploadFile call"""
+ if self.opts.get('use_fast_upload'):
+ self.fastUpload(localfile, path, name, callback, blocksize, overwrite)
+ return
+ if name is None:
+ name = os.path.basename(localfile)
+
+ # check if server supports fast upload
+ try:
+ check = self._callMethod('checkUpload', (path, name))
+ # fast upload was introduced in 1.7.1, earlier servers will not
+ # recognise this call and return an error
+ except GenericError:
+ pass
+ else:
+ self.fastUpload(localfile, path, name, callback, blocksize, overwrite)
+ return
+
+ start=time.time()
+ # XXX - stick in a config or something
+ retries=3
+ fo = file(localfile, "r") #specify bufsize?
+ totalsize = os.path.getsize(localfile)
+ ofs = 0
+ md5sum = md5_constructor()
+ debug = self.opts.get('debug',False)
+ if callback:
+ callback(0, totalsize, 0, 0, 0)
+ while True:
+ lap = time.time()
+ contents = fo.read(blocksize)
+ md5sum.update(contents)
+ size = len(contents)
+ data = base64.encodestring(contents)
+ if size == 0:
+ # end of file, use offset = -1 to finalize upload
+ offset = -1
+ digest = md5sum.hexdigest()
+ sz = ofs
+ else:
+ offset = ofs
+ digest = md5_constructor(contents).hexdigest()
+ sz = size
+ del contents
+ tries = 0
+ while True:
+ if debug:
+ self.logger.debug("uploadFile(%r,%r,%r,%r,%r,...)" %(path,name,sz,digest,offset))
+ if self.callMethod('uploadFile', path, name, encode_int(sz), digest, encode_int(offset), data):
+ break
+ if tries <= retries:
+ tries += 1
+ continue
+ else:
+ raise GenericError, "Error uploading file %s, offset %d" %(path, offset)
+ if size == 0:
+ break
+ ofs += size
+ now = time.time()
+ t1 = now - lap
+ if t1 <= 0:
+ t1 = 1
+ t2 = now - start
+ if t2 <= 0:
+ t2 = 1
+ if debug:
+ self.logger.debug("Uploaded %d bytes in %f seconds (%f kbytes/sec)" % (size,t1,size/t1/1024))
+ if debug:
+ self.logger.debug("Total: %d bytes in %f seconds (%f kbytes/sec)" % (ofs,t2,ofs/t2/1024))
+ if callback:
+ callback(ofs, totalsize, size, t1, t2)
+ fo.close()
+
+ def downloadTaskOutput(self, taskID, fileName, offset=0, size=-1):
+ """Download the file with the given name, generated by the task with the
+ given ID.
+
+ Note: This method does not work with multicall.
+ """
+ if self.multicall:
+ raise GenericError, 'downloadTaskOutput() may not be called during a multicall'
+ result = self.callMethod('downloadTaskOutput', taskID, fileName, offset, size)
+ return base64.decodestring(result)
+
+class DBHandler(logging.Handler):
+ """
+ A handler class which writes logging records, appropriately formatted,
+ to a database.
+ """
+ def __init__(self, cnx, table, mapping=None):
+ """
+ Initialize the handler.
+
+ A database connection and table name are required.
+ """
+ logging.Handler.__init__(self)
+ self.cnx = cnx
+ self.table = table
+ if mapping is None:
+ self.mapping = { 'message': '%(message)s' }
+ else:
+ self.mapping = mapping
+
+ def emit(self, record):
+ """
+ Emit a record.
+
+ If a formatter is specified, it is used to format the record.
+ """
+ try:
+ cursor = self.cnx.cursor()
+ columns = []
+ values = []
+ data = {}
+ record.message = record.getMessage()
+ for key, value in self.mapping.iteritems():
+ value = str(value)
+ if value.find("%(asctime)") >= 0:
+ if self.formatter:
+ fmt = self.formatter
+ else:
+ fmt = logging._defaultFormatter
+ record.asctime = fmt.formatTime(record, fmt.datefmt)
+ columns.append(key)
+ values.append("%%(%s)s" % key)
+ data[key] = value % record.__dict__
+ #values.append(_quote(value % record.__dict__))
+ columns = ",".join(columns)
+ values = ",".join(values)
+ command = "INSERT INTO %s (%s) VALUES (%s)" % (self.table, columns, values)
+ #note we're letting cursor.execute do the escaping
+ cursor.execute(command,data)
+ cursor.close()
+ #self.cnx.commit()
+ #XXX - commiting here is most likely wrong, but we need to set commit_pending or something
+ # ...and this is really the wrong place for that
+ except:
+ self.handleError(record)
+
+#used by parse_timestamp
+TIMESTAMP_RE = re.compile("(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)")
+
+def parse_timestamp(ts):
+ """Parse a timestamp returned from a query"""
+ m = TIMESTAMP_RE.search(ts)
+ t = tuple([int(x) for x in m.groups()]) + (0,0,0)
+ return time.mktime(t)
+
+def formatTime(value):
+ """Format a timestamp so it looks nicer"""
+ if not value:
+ return ''
+ elif isinstance(value, datetime.datetime):
+ return value.strftime('%Y-%m-%d %H:%M:%S')
+ else:
+ # trim off the microseconds, if present
+ dotidx = value.rfind('.')
+ if dotidx != -1:
+ return value[:dotidx]
+ else:
+ return value
+
+def formatTimeLong(value):
+ """Format a timestamp to a more human-reable format, i.e.:
+ Sat, 07 Sep 2002 00:00:01 GMT
+ """
+ if not value:
+ return ''
+ else:
+ # Assume the string value passed in is the local time
+ localtime = time.mktime(time.strptime(formatTime(value), '%Y-%m-%d %H:%M:%S'))
+ return time.strftime('%a, %d %b %Y %H:%M:%S %Z', time.localtime(localtime))
+
+def buildLabel(buildInfo, showEpoch=False):
+ """Format buildInfo (dict) into a descriptive label."""
+ epoch = buildInfo.get('epoch')
+ if showEpoch and epoch != None:
+ epochStr = '%i:' % epoch
+ else:
+ epochStr = ''
+ name = buildInfo.get('package_name')
+ if not name:
+ name = buildInfo.get('name')
+ return '%s%s-%s-%s' % (epochStr, name,
+ buildInfo.get('version'),
+ buildInfo.get('release'))
+
+def _module_info(url):
+ module_info = ''
+ if '?' in url:
+ # extract the module path
+ module_info = url[url.find('?') + 1:url.find('#')]
+ # Find the first / after the scheme://
+ repo_start = url.find('/', url.find('://') + 3)
+ # Find the ? if present, otherwise find the #
+ repo_end = url.find('?')
+ if repo_end == -1:
+ repo_end = url.find('#')
+ repo_info = url[repo_start:repo_end]
+ rev_info = url[url.find('#') + 1:]
+ if module_info:
+ return '%s:%s:%s' % (repo_info, module_info, rev_info)
+ else:
+ return '%s:%s' % (repo_info, rev_info)
+
+def taskLabel(taskInfo):
+ try:
+ return _taskLabel(taskInfo)
+ except Exception:
+ return "malformed task"
+
+def _taskLabel(taskInfo):
+ """Format taskInfo (dict) into a descriptive label."""
+ method = taskInfo['method']
+ arch = taskInfo['arch']
+ extra = ''
+ if method in ('build', 'maven'):
+ if taskInfo.has_key('request'):
+ source, target = taskInfo['request'][:2]
+ if '://' in source:
+ module_info = _module_info(source)
+ else:
+ module_info = os.path.basename(source)
+ extra = '%s, %s' % (target, module_info)
+ elif method in ('buildSRPMFromSCM', 'buildSRPMFromCVS'):
+ if taskInfo.has_key('request'):
+ url = taskInfo['request'][0]
+ extra = _module_info(url)
+ elif method == 'buildArch':
+ if taskInfo.has_key('request'):
+ srpm, tagID, arch = taskInfo['request'][:3]
+ srpm = os.path.basename(srpm)
+ extra = '%s, %s' % (srpm, arch)
+ elif method == 'buildMaven':
+ if taskInfo.has_key('request'):
+ build_tag = taskInfo['request'][1]
+ extra = build_tag['name']
+ elif method == 'wrapperRPM':
+ if taskInfo.has_key('request'):
+ build_target = taskInfo['request'][1]
+ build = taskInfo['request'][2]
+ if build:
+ extra = '%s, %s' % (build_target['name'], buildLabel(build))
+ else:
+ extra = build_target['name']
+ elif method == 'winbuild':
+ if taskInfo.has_key('request'):
+ vm = taskInfo['request'][0]
+ url = taskInfo['request'][1]
+ target = taskInfo['request'][2]
+ module_info = _module_info(url)
+ extra = '%s, %s' % (target, module_info)
+ elif method == 'vmExec':
+ if taskInfo.has_key('request'):
+ extra = taskInfo['request'][0]
+ elif method == 'buildNotification':
+ if taskInfo.has_key('request'):
+ build = taskInfo['request'][1]
+ extra = buildLabel(build)
+ elif method == 'newRepo':
+ if taskInfo.has_key('request'):
+ extra = str(taskInfo['request'][0])
+ elif method in ('tagBuild', 'tagNotification'):
+ # There is no displayable information included in the request
+ # for these methods
+ pass
+ elif method == 'prepRepo':
+ if taskInfo.has_key('request'):
+ tagInfo = taskInfo['request'][0]
+ extra = tagInfo['name']
+ elif method == 'createrepo':
+ if taskInfo.has_key('request'):
+ arch = taskInfo['request'][1]
+ extra = arch
+ elif method == 'dependantTask':
+ if taskInfo.has_key('request'):
+ extra = ', '.join([subtask[0] for subtask in taskInfo['request'][1]])
+ elif method in ('chainbuild', 'chainmaven'):
+ if taskInfo.has_key('request'):
+ extra = taskInfo['request'][1]
+ elif method == 'waitrepo':
+ if taskInfo.has_key('request'):
+ extra = str(taskInfo['request'][0])
+ if len(taskInfo['request']) >= 3:
+ nvrs = taskInfo['request'][2]
+ if isinstance(nvrs, list):
+ extra += ', ' + ', '.join(nvrs)
+ elif method in ('livecd', 'appliance', 'image'):
+ if taskInfo.has_key('request'):
+ stuff = taskInfo['request']
+ if method == 'image':
+ kickstart = os.path.basename(stuff[-1]['kickstart'])
+ else:
+ kickstart = os.path.basename(stuff[4])
+ extra = '%s, %s-%s, %s' % (stuff[3], stuff[0], stuff[1], kickstart)
+ elif method in ('createLiveCD', 'createAppliance', 'createImage'):
+ if taskInfo.has_key('request'):
+ stuff = taskInfo['request']
+ if method == 'createImage':
+ kickstart = os.path.basename(stuff[-1]['kickstart'])
+ else:
+ kickstart = os.path.basename(stuff[7])
+ extra = '%s, %s-%s-%s, %s, %s' % (stuff[4]['name'], stuff[0],
+ stuff[1], stuff[2], kickstart, stuff[3])
+ elif method == 'restart':
+ if taskInfo.has_key('request'):
+ host = taskInfo['request'][0]
+ extra = host['name']
+ elif method == 'restartVerify':
+ if taskInfo.has_key('request'):
+ task_id, host = taskInfo['request'][:2]
+ extra = host['name']
+
+ if extra:
+ return '%s (%s)' % (method, extra)
+ else:
+ return '%s (%s)' % (method, arch)
+
+def _forceAscii(value):
+ """Replace characters not in the 7-bit ASCII range
+ with "?"."""
+ return ''.join([(ord(c) <= 127) and c or '?' for c in value])
+
+def fixEncoding(value, fallback='iso8859-15'):
+ """
+ Convert value to a 'str' object encoded as UTF-8.
+ If value is not valid UTF-8 to begin with, assume it is
+ encoded in the 'fallback' charset.
+ """
+ if not value:
+ return ''
+
+ if isinstance(value, unicode):
+ # value is already unicode, so just convert it
+ # to a utf8-encoded str
+ return value.encode('utf8')
+ else:
+ # value is a str, but may be encoded in utf8 or some
+ # other non-ascii charset. Try to verify it's utf8, and if not,
+ # decode it using the fallback encoding.
+ try:
+ return value.decode('utf8').encode('utf8')
+ except UnicodeDecodeError, err:
+ return value.decode(fallback).encode('utf8')
+
+def add_file_logger(logger, fn):
+ if not os.path.exists(fn):
+ try:
+ fh = open(fn, 'w')
+ fh.close()
+ except (ValueError, IOError):
+ return
+ if not os.path.isfile(fn):
+ return
+ if not os.access(fn,os.W_OK):
+ return
+ handler = logging.handlers.RotatingFileHandler(fn, maxBytes=1024*1024*10, backupCount=5)
+ handler.setFormatter(logging.Formatter('%(asctime)s [%(levelname)s] %(name)s: %(message)s'))
+ logging.getLogger(logger).addHandler(handler)
+
+def add_stderr_logger(logger):
+ handler = logging.StreamHandler()
+ handler.setFormatter(logging.Formatter('%(asctime)s [%(levelname)s] {%(process)d} %(name)s:%(lineno)d %(message)s'))
+ handler.setLevel(logging.DEBUG)
+ logging.getLogger(logger).addHandler(handler)
+
+def add_sys_logger(logger):
+ # For remote logging;
+ # address = ('host.example.com', logging.handlers.SysLogHandler.SYSLOG_UDP_PORT)
+ address = "/dev/log"
+ handler = logging.handlers.SysLogHandler(address=address,
+ facility=logging.handlers.SysLogHandler.LOG_DAEMON)
+ handler.setFormatter(logging.Formatter('%(name)s: %(message)s'))
+ handler.setLevel(logging.INFO)
+ logging.getLogger(logger).addHandler(handler)
+
+def add_mail_logger(logger, addr):
+ if not addr:
+ return
+ handler = logging.handlers.SMTPHandler("localhost",
+ "%s@%s" % (pwd.getpwuid(os.getuid())[0], socket.getfqdn()),
+ addr,
+ "%s: error notice" % socket.getfqdn())
+ handler.setFormatter(logging.Formatter('%(pathname)s:%(lineno)d [%(levelname)s] %(message)s'))
+ handler.setLevel(logging.ERROR)
+ logging.getLogger(logger).addHandler(handler)
+
+def add_db_logger(logger, cnx):
+ handler = DBHandler(cnx, "log_messages", {'message': '%(message)s',
+ 'message_time': '%(asctime)s',
+ 'logger_name': '%(name)s',
+ 'level': '%(levelname)s',
+ 'location': '%(pathname)s:%(lineno)d',
+ 'host': socket.getfqdn(),
+ })
+ handler.setFormatter(logging.Formatter(datefmt='%Y-%m-%d %H:%M:%S'))
+ logging.getLogger(logger).addHandler(handler)
+ return handler
+
+def remove_log_handler(logger, handler):
+ logging.getLogger(logger).removeHandler(handler)
diff --git a/koji/auth.py b/koji/auth.py
new file mode 100644
index 0000000..d419d77
--- /dev/null
+++ b/koji/auth.py
@@ -0,0 +1,736 @@
+# authentication module
+# Copyright (c) 2005-2014 Red Hat, Inc.
+#
+# Koji is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation;
+# version 2.1 of the License.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this software; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+# Authors:
+# Mike McLean <mikem at redhat.com>
+# Mike Bonnet <mikeb at redhat.com>
+
+import socket
+import string
+import random
+import base64
+import krbV
+import koji
+import cgi #for parse_qs
+from context import context
+
+# 1 - load session if provided
+# - check uri for session id
+# - load session info from db
+# - validate session
+# 2 - create a session
+# - maybe in two steps
+# -
+
+
+RetryWhitelist = [
+ 'host.taskWait',
+ 'host.taskUnwait',
+ 'host.taskSetWait',
+ 'host.updateHost',
+ 'host.setBuildRootState',
+ 'repoExpire',
+ 'repoDelete',
+ 'repoProblem',
+]
+
+
+class Session(object):
+
+ def __init__(self,args=None,hostip=None):
+ self.logged_in = False
+ self.id = None
+ self.master = None
+ self.key = None
+ self.user_id = None
+ self.hostip = None
+ self.user_data = {}
+ self.message = ''
+ self.exclusive = False
+ self.lockerror = None
+ self.callnum = None
+ #get session data from request
+ if args is None:
+ environ = getattr(context,'environ',{})
+ args = environ.get('QUERY_STRING','')
+ if not args:
+ self.message = 'no session args'
+ return
+ args = cgi.parse_qs(args,strict_parsing=True)
+ if hostip is None:
+ hostip = context.environ['REMOTE_ADDR']
+ #XXX - REMOTE_ADDR not promised by wsgi spec
+ if hostip == '127.0.0.1':
+ hostip = socket.gethostbyname(socket.gethostname())
+ try:
+ id = long(args['session-id'][0])
+ key = args['session-key'][0]
+ except KeyError, field:
+ raise koji.AuthError, '%s not specified in session args' % field
+ try:
+ callnum = args['callnum'][0]
+ except:
+ callnum = None
+ #lookup the session
+ c = context.cnx.cursor()
+ fields = {
+ 'authtype': 'authtype',
+ 'callnum': 'callnum',
+ 'exclusive': 'exclusive',
+ 'expired': 'expired',
+ 'master': 'master',
+ 'start_time': 'start_time',
+ 'update_time': 'update_time',
+ 'EXTRACT(EPOCH FROM start_time)': 'start_ts',
+ 'EXTRACT(EPOCH FROM update_time)': 'update_ts',
+ 'user_id': 'user_id',
+ }
+ fields, aliases = zip(*fields.items())
+ q = """
+ SELECT %s FROM sessions
+ WHERE id = %%(id)i
+ AND key = %%(key)s
+ AND hostip = %%(hostip)s
+ FOR UPDATE
+ """ % ",".join(fields)
+ c.execute(q,locals())
+ row = c.fetchone()
+ if not row:
+ raise koji.AuthError, 'Invalid session or bad credentials'
+ session_data = dict(zip(aliases, row))
+ #check for expiration
+ if session_data['expired']:
+ raise koji.AuthExpired, 'session "%i" has expired' % id
+ #check for callnum sanity
+ if callnum is not None:
+ try:
+ callnum = int(callnum)
+ except (ValueError,TypeError):
+ raise koji.AuthError, "Invalid callnum: %r" % callnum
+ lastcall = session_data['callnum']
+ if lastcall is not None:
+ if lastcall > callnum:
+ raise koji.SequenceError, "%d > %d (session %d)" \
+ % (lastcall,callnum,id)
+ elif lastcall == callnum:
+ #Some explanation:
+ #This function is one of the few that performs its own commit.
+ #However, our storage of the current callnum is /after/ that
+ #commit. This means the the current callnum only gets commited if
+ #a commit happens afterward.
+ #We only schedule a commit for dml operations, so if we find the
+ #callnum in the db then a previous attempt succeeded but failed to
+ #return. Data was changed, so we cannot simply try the call again.
+ method = getattr(context, 'method', 'UNKNOWN')
+ if method not in RetryWhitelist:
+ raise koji.RetryError, \
+ "unable to retry call %d (method %s) for session %d" \
+ % (callnum, method, id)
+
+ # read user data
+ #historical note:
+ # we used to get a row lock here as an attempt to maintain sanity of exclusive
+ # sessions, but it was an imperfect approach and the lock could cause some
+ # performance issues.
+ fields = ('name','status','usertype')
+ q = """SELECT %s FROM users WHERE id=%%(user_id)s""" % ','.join(fields)
+ c.execute(q,session_data)
+ user_data = dict(zip(fields,c.fetchone()))
+
+ if user_data['status'] != koji.USER_STATUS['NORMAL']:
+ raise koji.AuthError, 'logins by %s are not allowed' % user_data['name']
+ #check for exclusive sessions
+ if session_data['exclusive']:
+ #we are the exclusive session for this user
+ self.exclusive = True
+ else:
+ #see if an exclusive session exists
+ q = """SELECT id FROM sessions WHERE user_id=%(user_id)s
+ AND "exclusive" = TRUE AND expired = FALSE"""
+ #should not return multiple rows (unique constraint)
+ c.execute(q,session_data)
+ row = c.fetchone()
+ if row:
+ (excl_id,) = row
+ if excl_id == session_data['master']:
+ #(note excl_id cannot be None)
+ #our master session has the lock
+ self.exclusive = True
+ else:
+ #a session unrelated to us has the lock
+ self.lockerror = "User locked by another session"
+ # we don't enforce here, but rely on the dispatcher to enforce
+ # if appropriate (otherwise it would be impossible to steal
+ # an exclusive session with the force option).
+
+ # update timestamp
+ q = """UPDATE sessions SET update_time=NOW() WHERE id = %(id)i"""
+ c.execute(q,locals())
+ #save update time
+ context.cnx.commit()
+
+ #update callnum (this is deliberately after the commit)
+ #see earlier note near RetryError
+ if callnum is not None:
+ q = """UPDATE sessions SET callnum=%(callnum)i WHERE id = %(id)i"""
+ c.execute(q,locals())
+
+ # record the login data
+ self.id = id
+ self.key = key
+ self.hostip = hostip
+ self.callnum = callnum
+ self.user_id = session_data['user_id']
+ self.authtype = session_data['authtype']
+ self.master = session_data['master']
+ self.session_data = session_data
+ self.user_data = user_data
+ # we look up perms, groups, and host_id on demand, see __getattr__
+ self._perms = None
+ self._groups = None
+ self._host_id = ''
+ self.logged_in = True
+
+ def __getattr__(self, name):
+ # grab perm and groups data on the fly
+ if name == 'perms':
+ if self._perms is None:
+ #in a dict for quicker lookup
+ self._perms = dict([[name,1] for name in get_user_perms(self.user_id)])
+ return self._perms
+ elif name == 'groups':
+ if self._groups is None:
+ self._groups = get_user_groups(self.user_id)
+ return self._groups
+ elif name == 'host_id':
+ if self._host_id == '':
+ self._host_id = self._getHostId()
+ return self._host_id
+ else:
+ raise AttributeError, "%s" % name
+
+ def __str__(self):
+ # convenient display for debugging
+ if not self.logged_in:
+ s = "session: not logged in"
+ else:
+ s = "session %d: %r" % (self.id, self.__dict__)
+ if self.message:
+ s += " (%s)" % self.message
+ return s
+
+ def validate(self):
+ if self.lockerror:
+ raise koji.AuthLockError, self.lockerror
+ return True
+
+ def checkLoginAllowed(self, user_id):
+ """Verify that the user is allowed to login"""
+ cursor = context.cnx.cursor()
+ query = """SELECT name, usertype, status FROM users WHERE id = %(user_id)i"""
+ cursor.execute(query, locals())
+ result = cursor.fetchone()
+ if not result:
+ raise koji.AuthError, 'invalid user_id: %s' % user_id
+ name, usertype, status = result
+
+ if status != koji.USER_STATUS['NORMAL']:
+ raise koji.AuthError, 'logins by %s are not allowed' % name
+
+ def login(self,user,password,opts=None):
+ """create a login session"""
+ if opts is None:
+ opts = {}
+ if not isinstance(password,str) or len(password) == 0:
+ raise koji.AuthError, 'invalid username or password'
+ if self.logged_in:
+ raise koji.GenericError, "Already logged in"
+ hostip = opts.get('hostip')
+ if hostip is None:
+ hostip = context.environ['REMOTE_ADDR']
+ #XXX - REMOTE_ADDR not promised by wsgi spec
+ if hostip == '127.0.0.1':
+ hostip = socket.gethostbyname(socket.gethostname())
+
+ # check passwd
+ c = context.cnx.cursor()
+ q = """SELECT id FROM users
+ WHERE name = %(user)s AND password = %(password)s"""
+ c.execute(q,locals())
+ r = c.fetchone()
+ if not r:
+ raise koji.AuthError, 'invalid username or password'
+ user_id = r[0]
+
+ self.checkLoginAllowed(user_id)
+
+ #create session and return
+ sinfo = self.createSession(user_id, hostip, koji.AUTHTYPE_NORMAL)
+ session_id = sinfo['session-id']
+ context.cnx.commit()
+ return sinfo
+
+ def krbLogin(self, krb_req, proxyuser=None):
+ """Authenticate the user using the base64-encoded
+ AP_REQ message in krb_req. If proxyuser is not None,
+ log in that user instead of the user associated with the
+ Kerberos principal. The principal must be an authorized
+ "proxy_principal" in the server config."""
+ if self.logged_in:
+ raise koji.AuthError, "Already logged in"
+
+ if not (context.opts.get('AuthPrincipal') and context.opts.get('AuthKeytab')):
+ raise koji.AuthError, 'not configured for Kerberos authentication'
+
+ ctx = krbV.default_context()
+ srvprinc = krbV.Principal(name=context.opts.get('AuthPrincipal'), context=ctx)
+ srvkt = krbV.Keytab(name=context.opts.get('AuthKeytab'), context=ctx)
+
+ ac = krbV.AuthContext(context=ctx)
+ ac.flags = krbV.KRB5_AUTH_CONTEXT_DO_SEQUENCE|krbV.KRB5_AUTH_CONTEXT_DO_TIME
+ conninfo = self.getConnInfo()
+ ac.addrs = conninfo
+
+ # decode and read the authentication request
+ req = base64.decodestring(krb_req)
+ ac, opts, sprinc, ccreds = ctx.rd_req(req, server=srvprinc, keytab=srvkt,
+ auth_context=ac,
+ options=krbV.AP_OPTS_MUTUAL_REQUIRED)
+ cprinc = ccreds[2]
+
+ # Successfully authenticated via Kerberos, now log in
+ if proxyuser:
+ proxyprincs = [princ.strip() for princ in context.opts.get('ProxyPrincipals', '').split(',')]
+ if cprinc.name in proxyprincs:
+ login_principal = proxyuser
+ else:
+ raise koji.AuthError, \
+ 'Kerberos principal %s is not authorized to log in other users' % cprinc.name
+ else:
+ login_principal = cprinc.name
+ user_id = self.getUserIdFromKerberos(login_principal)
+ if not user_id:
+ if context.opts.get('LoginCreatesUser'):
+ user_id = self.createUserFromKerberos(login_principal)
+ else:
+ raise koji.AuthError, 'Unknown Kerberos principal: %s' % login_principal
+
+ self.checkLoginAllowed(user_id)
+
+ hostip = context.environ['REMOTE_ADDR']
+ #XXX - REMOTE_ADDR not promised by wsgi spec
+ if hostip == '127.0.0.1':
+ hostip = socket.gethostbyname(socket.gethostname())
+
+ sinfo = self.createSession(user_id, hostip, koji.AUTHTYPE_KERB)
+
+ # encode the reply
+ rep = ctx.mk_rep(auth_context=ac)
+ rep_enc = base64.encodestring(rep)
+
+ # encrypt and encode the login info
+ sinfo_priv = ac.mk_priv('%(session-id)s %(session-key)s' % sinfo)
+ sinfo_enc = base64.encodestring(sinfo_priv)
+
+ return (rep_enc, sinfo_enc, conninfo)
+
+ def getConnInfo(self):
+ """Return a tuple containing connection information
+ in the following format:
+ (local ip addr, local port, remote ip, remote port)"""
+ # For some reason req.connection.{local,remote}_addr contain port info,
+ # but no IP info. Use req.connection.{local,remote}_ip for that instead.
+ # See: http://lists.planet-lab.org/pipermail/devel-community/2005-June/001084.html
+ # local_ip seems to always be set to the same value as remote_ip,
+ # so get the local ip via a different method
+ local_ip = socket.gethostbyname(context.environ['SERVER_NAME'])
+ remote_ip = context.environ['REMOTE_ADDR']
+ #XXX - REMOTE_ADDR not promised by wsgi spec
+
+ # it appears that calling setports() with *any* value results in authentication
+ # failing with "Incorrect net address", so return 0 (which prevents
+ # python-krbV from calling setports())
+ local_port = 0
+ remote_port = 0
+
+ return (local_ip, local_port, remote_ip, remote_port)
+
+ def sslLogin(self, proxyuser=None):
+ if self.logged_in:
+ raise koji.AuthError, "Already logged in"
+
+ if context.environ['wsgi.url_scheme'] != 'https':
+ raise koji.AuthError, 'cannot call sslLogin() via a non-https connection: %s' % context.environ['wsgi.url_scheme']
+
+ if context.environ.get('SSL_CLIENT_VERIFY') != 'SUCCESS':
+ raise koji.AuthError, 'could not verify client: %s' % context.environ.get('SSL_CLIENT_VERIFY')
+
+ name_dn_component = context.opts.get('DNUsernameComponent', 'CN')
+ client_name = context.environ.get('SSL_CLIENT_S_DN_%s' % name_dn_component)
+ if not client_name:
+ raise koji.AuthError, 'unable to get user information (%s) from client certificate' % name_dn_component
+
+ if proxyuser:
+ client_dn = context.environ.get('SSL_CLIENT_S_DN')
+ proxy_dns = [dn.strip() for dn in context.opts.get('ProxyDNs', '').split('|')]
+ if client_dn in proxy_dns:
+ # the SSL-authenticated user authorized to login other users
+ username = proxyuser
+ else:
+ raise koji.AuthError, '%s is not authorized to login other users' % client_dn
+ else:
+ username = client_name
+
+ cursor = context.cnx.cursor()
+ query = """SELECT id FROM users
+ WHERE name = %(username)s"""
+ cursor.execute(query, locals())
+ result = cursor.fetchone()
+ if result:
+ user_id = result[0]
+ else:
+ if context.opts.get('LoginCreatesUser'):
+ user_id = self.createUser(username)
+ else:
+ raise koji.AuthError, 'Unknown user: %s' % username
+
+ self.checkLoginAllowed(user_id)
+
+ hostip = context.environ['REMOTE_ADDR']
+ #XXX - REMOTE_ADDR not promised by wsgi spec
+ if hostip == '127.0.0.1':
+ hostip = socket.gethostbyname(socket.gethostname())
+
+ sinfo = self.createSession(user_id, hostip, koji.AUTHTYPE_SSL)
+ return sinfo
+
+ def makeExclusive(self,force=False):
+ """Make this session exclusive"""
+ c = context.cnx.cursor()
+ if self.master is not None:
+ raise koji.GenericError, "subsessions cannot become exclusive"
+ if self.exclusive:
+ #shouldn't happen
+ raise koji.GenericError, "session is already exclusive"
+ user_id = self.user_id
+ session_id = self.id
+ #acquire a row lock on the user entry
+ q = """SELECT id FROM users WHERE id=%(user_id)s FOR UPDATE"""
+ c.execute(q,locals())
+ # check that no other sessions for this user are exclusive
+ q = """SELECT id FROM sessions WHERE user_id=%(user_id)s
+ AND expired = FALSE AND "exclusive" = TRUE
+ FOR UPDATE"""
+ c.execute(q,locals())
+ row = c.fetchone()
+ if row:
+ if force:
+ #expire the previous exclusive session and try again
+ (excl_id,) = row
+ q = """UPDATE sessions SET expired=TRUE,"exclusive"=NULL WHERE id=%(excl_id)s"""
+ c.execute(q,locals())
+ else:
+ raise koji.AuthLockError, "Cannot get exclusive session"
+ #mark this session exclusive
+ q = """UPDATE sessions SET "exclusive"=TRUE WHERE id=%(session_id)s"""
+ c.execute(q,locals())
+ context.cnx.commit()
+
+ def makeShared(self):
+ """Drop out of exclusive mode"""
+ c = context.cnx.cursor()
+ session_id = self.id
+ q = """UPDATE sessions SET "exclusive"=NULL WHERE id=%(session_id)s"""
+ c.execute(q,locals())
+ context.cnx.commit()
+
+ def logout(self):
+ """expire a login session"""
+ if not self.logged_in:
+ #XXX raise an error?
+ raise koji.AuthError, "Not logged in"
+ update = """UPDATE sessions
+ SET expired=TRUE,exclusive=NULL
+ WHERE id = %(id)i OR master = %(id)i"""
+ #note we expire subsessions as well
+ c = context.cnx.cursor()
+ c.execute(update, {'id': self.id})
+ context.cnx.commit()
+ self.logged_in = False
+
+ def logoutChild(self, session_id):
+ """expire a subsession"""
+ if not self.logged_in:
+ #XXX raise an error?
+ raise koji.AuthError, "Not logged in"
+ update = """UPDATE sessions
+ SET expired=TRUE,exclusive=NULL
+ WHERE id = %(session_id)i AND master = %(master)i"""
+ master = self.id
+ c = context.cnx.cursor()
+ c.execute(update, locals())
+ context.cnx.commit()
+
+ def createSession(self, user_id, hostip, authtype, master=None):
+ """Create a new session for the given user.
+
+ Return a map containing the session-id and session-key.
+ If master is specified, create a subsession
+ """
+ c = context.cnx.cursor()
+
+ # generate a random key
+ alnum = string.ascii_letters + string.digits
+ key = "%s-%s" %(user_id,
+ ''.join([ random.choice(alnum) for x in range(1,20) ]))
+ # use sha? sha.new(phrase).hexdigest()
+
+ # get a session id
+ q = """SELECT nextval('sessions_id_seq')"""
+ c.execute(q, {})
+ (session_id,) = c.fetchone()
+
+ #add session id to database
+ q = """
+ INSERT INTO sessions (id, user_id, key, hostip, authtype, master)
+ VALUES (%(session_id)i, %(user_id)i, %(key)s, %(hostip)s, %(authtype)i, %(master)s)
+ """
+ c.execute(q,locals())
+ context.cnx.commit()
+
+ #return session info
+ return {'session-id' : session_id, 'session-key' : key}
+
+ def subsession(self):
+ "Create a subsession"
+ if not self.logged_in:
+ raise koji.AuthError, "Not logged in"
+ master = self.master
+ if master is None:
+ master=self.id
+ return self.createSession(self.user_id, self.hostip, self.authtype,
+ master=master)
+
+ def getPerms(self):
+ if not self.logged_in:
+ return []
+ return self.perms.keys()
+
+ def hasPerm(self, name):
+ if not self.logged_in:
+ return False
+ return self.perms.has_key(name)
+
+ def assertPerm(self, name):
+ if not self.hasPerm(name) and not self.hasPerm('admin'):
+ raise koji.ActionNotAllowed, "%s permission required" % name
+
+ def assertLogin(self):
+ if not self.logged_in:
+ raise koji.ActionNotAllowed, "you must be logged in for this operation"
+
+ def hasGroup(self, group_id):
+ if not self.logged_in:
+ return False
+ #groups indexed by id
+ return self.groups.has_key(group_id)
+
+ def isUser(self, user_id):
+ if not self.logged_in:
+ return False
+ return ( self.user_id == user_id or self.hasGroup(user_id) )
+
+ def assertUser(self, user_id):
+ if not self.isUser(user_id) and not self.hasPerm('admin'):
+ raise koji.ActionNotAllowed, "not owner"
+
+ def _getHostId(self):
+ '''Using session data, find host id (if there is one)'''
+ if self.user_id is None:
+ return None
+ c=context.cnx.cursor()
+ q="""SELECT id FROM host WHERE user_id = %(uid)d"""
+ c.execute(q,{'uid' : self.user_id })
+ r=c.fetchone()
+ c.close()
+ if r:
+ return r[0]
+ else:
+ return None
+
+ def getHostId(self):
+ #for compatibility
+ return self.host_id
+
+ def getUserIdFromKerberos(self, krb_principal):
+ """Return the user ID associated with a particular Kerberos principal.
+ If no user with the given princpal if found, return None."""
+ c = context.cnx.cursor()
+ q = """SELECT id FROM users WHERE krb_principal = %(krb_principal)s"""
+ c.execute(q,locals())
+ r = c.fetchone()
+ c.close()
+ if r:
+ return r[0]
+ else:
+ return None
+
+ def createUser(self, name, usertype=None, status=None, krb_principal=None):
+ """
+ Create a new user, using the provided values.
+ Return the user_id of the newly-created user.
+ """
+ if not name:
+ raise koji.GenericError, 'a user must have a non-empty name'
+
+ if usertype == None:
+ usertype = koji.USERTYPES['NORMAL']
+ elif not koji.USERTYPES.get(usertype):
+ raise koji.GenericError, 'invalid user type: %s' % usertype
+
+ if status == None:
+ status = koji.USER_STATUS['NORMAL']
+ elif not koji.USER_STATUS.get(status):
+ raise koji.GenericError, 'invalid status: %s' % status
+
+ cursor = context.cnx.cursor()
+ select = """SELECT nextval('users_id_seq')"""
+ cursor.execute(select, locals())
+ user_id = cursor.fetchone()[0]
+
+ insert = """INSERT INTO users (id, name, usertype, status, krb_principal)
+ VALUES (%(user_id)i, %(name)s, %(usertype)i, %(status)i, %(krb_principal)s)"""
+ cursor.execute(insert, locals())
+ context.cnx.commit()
+
+ return user_id
+
+ def setKrbPrincipal(self, name, krb_principal):
+ usertype = koji.USERTYPES['NORMAL']
+ status = koji.USER_STATUS['NORMAL']
+ update = """UPDATE users SET krb_principal = %(krb_principal)s WHERE name = %(name)s AND usertype = %(usertype)i AND status = %(status)i RETURNING users.id"""
+ cursor = context.cnx.cursor()
+ cursor.execute(update, locals())
+ r = cursor.fetchall()
+ if len(r) != 1:
+ context.cnx.rollback()
+ raise koji.AuthError, 'could not automatically associate Kerberos Principal with existing user %s' % (name,)
+ else:
+ context.cnx.commit()
+ return r[0][0]
+
+ def createUserFromKerberos(self, krb_principal):
+ """Create a new user, based on the Kerberos principal. Their
+ username will be everything before the "@" in the principal.
+ Return the ID of the newly created user."""
+ atidx = krb_principal.find('@')
+ if atidx == -1:
+ raise koji.AuthError, 'invalid Kerberos principal: %s' % krb_principal
+ user_name = krb_principal[:atidx]
+
+ # check if user already exists
+ c = context.cnx.cursor()
+ q = """SELECT krb_principal FROM users
+ WHERE name = %(user_name)s"""
+ c.execute(q,locals())
+ r = c.fetchone()
+ if not r:
+ return self.createUser(user_name, krb_principal=krb_principal)
+ else:
+ existing_user_krb = r[0]
+ if existing_user_krb is not None:
+ raise koji.AuthError, 'user %s already associated with other Kerberos principal: %s' % (user_name, existing_user_krb)
+ return self.setKrbPrincipal(user_name, krb_principal)
+
+def get_user_groups(user_id):
+ """Get user groups
+
+ returns a dictionary where the keys are the group ids and the values
+ are the group names"""
+ c = context.cnx.cursor()
+ t_group = koji.USERTYPES['GROUP']
+ q = """SELECT group_id,name
+ FROM user_groups JOIN users ON group_id = users.id
+ WHERE active = TRUE AND users.usertype=%(t_group)i
+ AND user_id=%(user_id)i"""
+ c.execute(q,locals())
+ return dict(c.fetchall())
+
+def get_user_perms(user_id):
+ c = context.cnx.cursor()
+ q = """SELECT name
+ FROM user_perms JOIN permissions ON perm_id = permissions.id
+ WHERE active = TRUE AND user_id=%(user_id)s"""
+ c.execute(q,locals())
+ #return a list of permissions by name
+ return [row[0] for row in c.fetchall()]
+
+def get_user_data(user_id):
+ c = context.cnx.cursor()
+ fields = ('name','status','usertype')
+ q = """SELECT %s FROM users WHERE id=%%(user_id)s""" % ','.join(fields)
+ c.execute(q,locals())
+ row = c.fetchone()
+ if not row:
+ return None
+ return dict(zip(fields,row))
+
+def login(*args,**opts):
+ return context.session.login(*args,**opts)
+
+def krbLogin(*args, **opts):
+ return context.session.krbLogin(*args, **opts)
+
+def sslLogin(*args, **opts):
+ return context.session.sslLogin(*args, **opts)
+
+def logout():
+ return context.session.logout()
+
+def subsession():
+ return context.session.subsession()
+
+def logoutChild(session_id):
+ return context.session.logoutChild(session_id)
+
+def exclusiveSession(*args,**opts):
+ """Make this session exclusive"""
+ return context.session.makeExclusive(*args,**opts)
+
+def sharedSession():
+ """Drop out of exclusive mode"""
+ return context.session.makeShared()
+
+
+if __name__ == '__main__':
+ # XXX - testing defaults
+ import db
+ db.setDBopts( database = "test", user = "test")
+ print "Connecting to db"
+ context.cnx = db.connect()
+ print "starting session 1"
+ sess = Session(None,hostip='127.0.0.1')
+ print "Session 1: %s" % sess
+ print "logging in with session 1"
+ session_info = sess.login('host/1','foobar',{'hostip':'127.0.0.1'})
+ #wrap values in lists
+ session_info = dict([ [k,[v]] for k,v in session_info.iteritems()])
+ print "Session 1: %s" % sess
+ print "Session 1 info: %r" % session_info
+ print "Creating session 2"
+ s2 = Session(session_info,'127.0.0.1')
+ print "Session 2: %s " % s2
diff --git a/koji/context.py b/koji/context.py
new file mode 100755
index 0000000..b05e3a3
--- /dev/null
+++ b/koji/context.py
@@ -0,0 +1,112 @@
+#!/usr/bin/python
+# Copyright (c) 2005-2014 Red Hat, Inc.
+#
+# Koji is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation;
+# version 2.1 of the License.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this software; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+# Authors:
+# Mike McLean <mikem at redhat.com>
+
+# This modules provides a thread-safe way of passing
+# request context around in a global way
+# - db connections
+# - request data
+# - auth data
+
+import thread
+
+class _data(object):
+ pass
+
+class ThreadLocal(object):
+ def __init__(self):
+ object.__setattr__(self, '_tdict', {})
+
+ # should probably be getattribute, but easier to debug this way
+ def __getattr__(self, key):
+ id = thread.get_ident()
+ tdict = object.__getattribute__(self, '_tdict')
+ if not tdict.has_key(id):
+ raise AttributeError(key)
+ data = tdict[id]
+ return object.__getattribute__(data, key)
+
+ def __setattr__(self, key, value):
+ id = thread.get_ident()
+ tdict = object.__getattribute__(self, '_tdict')
+ if not tdict.has_key(id):
+ tdict[id] = _data()
+ data = tdict[id]
+ return object.__setattr__(data,key,value)
+
+ def __delattr__(self, key):
+ id = thread.get_ident()
+ tdict = object.__getattribute__(self, '_tdict')
+ if not tdict.has_key(id):
+ raise AttributeError(key)
+ data = tdict[id]
+ ret = object.__delattr__(data, key)
+ if len(data.__dict__) == 0:
+ del tdict[id]
+ return ret
+
+ def __str__(self):
+ id = thread.get_ident()
+ tdict = object.__getattribute__(self, '_tdict')
+ return "(current thread: %s) {" % id + \
+ ", ".join([ "%s : %s" %(k,v.__dict__) for (k,v) in tdict.iteritems() ]) + \
+ "}"
+
+ def _threadclear(self):
+ id = thread.get_ident()
+ tdict = object.__getattribute__(self, '_tdict')
+ if not tdict.has_key(id):
+ return
+ del tdict[id]
+
+
+context = ThreadLocal()
+
+
+if __name__ == '__main__':
+
+ #testing
+
+ #context.foo = 1
+ #context.bar = 2
+ print context
+ #del context.bar
+ print context
+
+ import random
+ import time
+ def test():
+ context.foo=random.random()
+ time.sleep(1.5+random.random())
+ context._threadclear()
+ print context
+
+ for x in xrange(1,10):
+ thread.start_new_thread(test,())
+
+ time.sleep(4)
+ print
+ print context
+
+ context.foo = 1
+ context.bar = 2
+ print context.foo,context.bar
+ print context
+ context._threadclear()
+ print context
diff --git a/koji/daemon.py b/koji/daemon.py
new file mode 100644
index 0000000..0570faa
--- /dev/null
+++ b/koji/daemon.py
@@ -0,0 +1,1190 @@
+# Code shared by various Koji daemons
+
+# Copyright (c) 2010-2014 Red Hat, Inc.
+#
+# Koji is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation;
+# version 2.1 of the License.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this software; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+# Authors:
+# Mike McLean <mikem at redhat.com>
+# Mike Bonnet <mikeb at redhat.com>
+
+import koji
+import koji.tasks
+from koji.tasks import safe_rmtree
+from koji.util import md5_constructor, adler32_constructor, parseStatus
+import os
+import signal
+import logging
+import urlparse
+from fnmatch import fnmatch
+import base64
+import time
+import sys
+import traceback
+import errno
+import xmlrpclib
+
+
+def incremental_upload(session, fname, fd, path, retries=5, logger=None):
+ if not fd:
+ return
+
+ if logger is None:
+ logger = logging.getLogger('koji.daemon')
+
+ if session.opts.get('use_fast_upload'):
+ fast_incremental_upload(session, fname, fd, path, retries, logger)
+ return
+
+ while True:
+ offset = fd.tell()
+ contents = fd.read(65536)
+ size = len(contents)
+ if size == 0:
+ break
+
+ data = base64.encodestring(contents)
+ digest = md5_constructor(contents).hexdigest()
+ del contents
+
+ tries = 0
+ while True:
+ if session.uploadFile(path, fname, koji.encode_int(size), digest, koji.encode_int(offset), data):
+ break
+
+ if tries <= retries:
+ tries += 1
+ time.sleep(10)
+ continue
+ else:
+ logger.error("Error uploading file %s to %s at offset %d" % (fname, path, offset))
+ break
+
+def fast_incremental_upload(session, fname, fd, path, retries, logger):
+ """Like incremental_upload, but use the fast upload mechanism"""
+
+ while True:
+ offset = fd.tell()
+ contents = fd.read(65536)
+ if not contents:
+ break
+ hexdigest = adler32_constructor(contents).hexdigest()
+
+ tries = 0
+ while True:
+ result = session.rawUpload(contents, offset, path, fname, overwrite=True)
+ if result['hexdigest'] == hexdigest:
+ break
+
+ if tries <= retries:
+ tries += 1
+ time.sleep(10)
+ continue
+ else:
+ logger.error("Error uploading file %s to %s at offset %d" % (fname, path, offset))
+ break
+
+def log_output(session, path, args, outfile, uploadpath, cwd=None, logerror=0, append=0, chroot=None, env=None):
+ """Run command with output redirected. If chroot is not None, chroot to the directory specified
+ before running the command."""
+ pid = os.fork()
+ fd = None
+ if not pid:
+ session._forget()
+ try:
+ if chroot:
+ os.chroot(chroot)
+ if cwd:
+ os.chdir(cwd)
+ flags = os.O_CREAT | os.O_WRONLY
+ if append:
+ flags |= os.O_APPEND
+ fd = os.open(outfile, flags, 0666)
+ os.dup2(fd, 1)
+ if logerror:
+ os.dup2(fd, 2)
+ # echo the command we're running into the logfile
+ os.write(fd, '$ %s\n' % ' '.join(args))
+ environ = os.environ.copy()
+ if env:
+ environ.update(env)
+ os.execvpe(path, args, environ)
+ except:
+ msg = ''.join(traceback.format_exception(*sys.exc_info()))
+ if fd:
+ try:
+ os.write(fd, msg)
+ os.close(fd)
+ except:
+ pass
+ print msg
+ os._exit(1)
+ else:
+ if chroot:
+ outfile = os.path.normpath(chroot + outfile)
+ outfd = None
+ remotename = os.path.basename(outfile)
+ while True:
+ status = os.waitpid(pid, os.WNOHANG)
+ time.sleep(1)
+
+ if not outfd:
+ try:
+ outfd = file(outfile, 'r')
+ except IOError:
+ # will happen if the forked process has not created the logfile yet
+ continue
+ except:
+ print 'Error reading log file: %s' % outfile
+ print ''.join(traceback.format_exception(*sys.exc_info()))
+
+ incremental_upload(session, remotename, outfd, uploadpath)
+
+ if status[0] != 0:
+ if outfd:
+ outfd.close()
+ return status[1]
+
+
+## BEGIN kojikamid dup
+
+class SCM(object):
+ "SCM abstraction class"
+
+ types = { 'CVS': ('cvs://',),
+ 'CVS+SSH': ('cvs+ssh://',),
+ 'GIT': ('git://', 'git+http://', 'git+https://', 'git+rsync://'),
+ 'GIT+SSH': ('git+ssh://',),
+ 'SVN': ('svn://', 'svn+http://', 'svn+https://'),
+ 'SVN+SSH': ('svn+ssh://',) }
+
+ def is_scm_url(url):
+ """
+ Return True if the url appears to be a valid, accessible source location, False otherwise
+ """
+ for schemes in SCM.types.values():
+ for scheme in schemes:
+ if url.startswith(scheme):
+ return True
+ else:
+ return False
+ is_scm_url = staticmethod(is_scm_url)
+
+ def __init__(self, url):
+ """
+ Initialize the SCM object using the specified url.
+ The expected url format is:
+
+ scheme://[user@]host/path/to/repo?path/to/module#revision_or_tag_identifier
+
+ The initialized SCM object will have the following attributes:
+ - url (the unmodified url)
+ - scheme
+ - user (may be null)
+ - host
+ - repository
+ - module
+ - revision
+ - use_common (defaults to True, may be set by assert_allowed())
+ - source_cmd (defaults to ['make', 'sources'], may be set by assert_allowed())
+ - scmtype
+
+ The exact format of each attribute is SCM-specific, but the structure of the url
+ must conform to the template above, or an error will be raised.
+ """
+ self.logger = logging.getLogger('koji.build.SCM')
+
+ if not SCM.is_scm_url(url):
+ raise koji.GenericError, 'Invalid SCM URL: %s' % url
+
+ self.url = url
+ scheme, user, host, path, query, fragment = self._parse_url()
+
+ self.scheme = scheme
+ self.user = user
+ self.host = host
+ self.repository = path
+ self.module = query
+ self.revision = fragment
+ self.use_common = True
+ self.source_cmd = ['make', 'sources']
+
+ for scmtype, schemes in SCM.types.items():
+ if self.scheme in schemes:
+ self.scmtype = scmtype
+ break
+ else:
+ # should never happen
+ raise koji.GenericError, 'Invalid SCM URL: %s' % url
+
+ def _parse_url(self):
+ """
+ Parse the SCM url into usable components.
+ Return the following tuple:
+
+ (scheme, user, host, path, query, fragment)
+
+ user may be None, everything else will have a value
+ """
+ # get the url's scheme
+ scheme = self.url.split('://')[0] + '://'
+
+ # replace the scheme with http:// so that the urlparse works in all cases
+ dummyurl = self.url.replace(scheme, 'http://', 1)
+ dummyscheme, netloc, path, params, query, fragment = urlparse.urlparse(dummyurl)
+
+ user = None
+ userhost = netloc.split('@')
+ if len(userhost) == 2:
+ user = userhost[0]
+ if not user:
+ # Don't return an empty string
+ user = None
+ elif ':' in user:
+ raise koji.GenericError, 'username:password format not supported: %s' % user
+ netloc = userhost[1]
+ elif len(userhost) > 2:
+ raise koji.GenericError, 'Invalid username at hostname specified: %s' % netloc
+
+ # ensure that path and query do not end in /
+ if path.endswith('/'):
+ path = path[:-1]
+ if query.endswith('/'):
+ query = query[:-1]
+
+ # check for validity: params should be empty, query may be empty, everything else should be populated
+ if params :
+ raise koji.GenericError, 'Unable to parse SCM URL: %s . Params element %s should be empty.' % (self.url, params)
+ if not scheme :
+ raise koji.GenericError, 'Unable to parse SCM URL: %s . Could not find the scheme element.' % self.url
+ if not netloc :
+ raise koji.GenericError, 'Unable to parse SCM URL: %s . Could not find the netloc element.' % self.url
+ if not path :
+ raise koji.GenericError, 'Unable to parse SCM URL: %s . Could not find the path element.' % self.url
+ if not fragment :
+ raise koji.GenericError, 'Unable to parse SCM URL: %s . Could not find the fragment element.' % self.url
+
+ # return parsed values
+ return (scheme, user, netloc, path, query, fragment)
+
+ def assert_allowed(self, allowed):
+ """
+ Verify that the host and repository of this SCM is in the provided list of
+ allowed repositories.
+
+ allowed is a space-separated list of host:repository[:use_common[:source_cmd]] tuples. Incorrectly-formatted
+ tuples will be ignored.
+
+ If use_common is not present, kojid will attempt to checkout a common/ directory from the
+ repository. If use_common is set to no, off, false, or 0, it will not attempt to checkout a common/
+ directory.
+
+ source_cmd is a shell command (args separated with commas instead of spaces) to run before building the srpm.
+ It is generally used to retrieve source files from a remote location. If no source_cmd is specified,
+ "make sources" is run by default.
+ """
+ for allowed_scm in allowed.split():
+ scm_tuple = allowed_scm.split(':')
+ if len(scm_tuple) >= 2:
+ if fnmatch(self.host, scm_tuple[0]) and fnmatch(self.repository, scm_tuple[1]):
+ # SCM host:repository is in the allowed list
+ # check if we specify a value for use_common
+ if len(scm_tuple) >= 3:
+ if scm_tuple[2].lower() in ('no', 'off', 'false', '0'):
+ self.use_common = False
+ # check if we specify a custom source_cmd
+ if len(scm_tuple) >= 4:
+ if scm_tuple[3]:
+ self.source_cmd = scm_tuple[3].split(',')
+ else:
+ # there was nothing after the trailing :, so they don't want to run a source_cmd at all
+ self.source_cmd = None
+ break
+ else:
+ self.logger.warn('Ignoring incorrectly formatted SCM host:repository: %s' % allowed_scm)
+ else:
+ raise koji.BuildError, '%s:%s is not in the list of allowed SCMs' % (self.host, self.repository)
+
+ def checkout(self, scmdir, session=None, uploadpath=None, logfile=None):
+ """
+ Checkout the module from SCM. Accepts the following parameters:
+ - scmdir: the working directory
+ - session: a ClientSession object
+ - uploadpath: the path on the server the logfile should be uploaded to
+ - logfile: the file used for logging command output
+ session, uploadpath, and logfile are not used when run within kojikamid,
+ but are otherwise required.
+
+ Returns the directory that the module was checked-out into (a subdirectory of scmdir)
+ """
+ # TODO: sanity check arguments
+ sourcedir = '%s/%s' % (scmdir, self.module)
+
+ update_checkout_cmd = None
+ update_checkout_dir = None
+ env = None
+ def _run(cmd, chdir=None, fatal=False, log=True, _count=[0]):
+ if globals().get('KOJIKAMID'):
+ #we've been inserted into kojikamid, use its run()
+ return run(cmd, chdir=chdir, fatal=fatal, log=log)
+ else:
+ append = (_count[0] > 0)
+ _count[0] += 1
+ if log_output(session, cmd[0], cmd, logfile, uploadpath,
+ cwd=chdir, logerror=1, append=append, env=env):
+ raise koji.BuildError, 'Error running %s command "%s", see %s for details' % \
+ (self.scmtype, ' '.join(cmd), os.path.basename(logfile))
+
+ if self.scmtype == 'CVS':
+ pserver = ':pserver:%s@%s:%s' % ((self.user or 'anonymous'), self.host, self.repository)
+ module_checkout_cmd = ['cvs', '-d', pserver, 'checkout', '-r', self.revision, self.module]
+ common_checkout_cmd = ['cvs', '-d', pserver, 'checkout', 'common']
+
+ elif self.scmtype == 'CVS+SSH':
+ if not self.user:
+ raise koji.BuildError, 'No user specified for repository access scheme: %s' % self.scheme
+
+ cvsserver = ':ext:%s@%s:%s' % (self.user, self.host, self.repository)
+ module_checkout_cmd = ['cvs', '-d', cvsserver, 'checkout', '-r', self.revision, self.module]
+ common_checkout_cmd = ['cvs', '-d', cvsserver, 'checkout', 'common']
+ env = {'CVS_RSH': 'ssh'}
+
+ elif self.scmtype == 'GIT':
+ scheme = self.scheme
+ if '+' in scheme:
+ scheme = scheme.split('+')[1]
+ gitrepo = '%s%s%s' % (scheme, self.host, self.repository)
+ commonrepo = os.path.dirname(gitrepo) + '/common'
+ checkout_path = os.path.basename(self.repository)
+ if self.repository.endswith('/.git'):
+ # If we're referring to the .git subdirectory of the main module,
+ # assume we need to do the same for the common module
+ checkout_path = os.path.basename(self.repository[:-5])
+ commonrepo = os.path.dirname(gitrepo[:-5]) + '/common/.git'
+ elif self.repository.endswith('.git'):
+ # If we're referring to a bare repository for the main module,
+ # assume we need to do the same for the common module
+ checkout_path = os.path.basename(self.repository[:-4])
+ commonrepo = os.path.dirname(gitrepo[:-4]) + '/common.git'
+
+ sourcedir = '%s/%s' % (scmdir, checkout_path)
+ module_checkout_cmd = ['git', 'clone', '-n', gitrepo, sourcedir]
+ common_checkout_cmd = ['git', 'clone', commonrepo, 'common']
+ update_checkout_cmd = ['git', 'reset', '--hard', self.revision]
+ update_checkout_dir = sourcedir
+
+ # self.module may be empty, in which case the specfile should be in the top-level directory
+ if self.module:
+ # Treat the module as a directory inside the git repository
+ sourcedir = '%s/%s' % (sourcedir, self.module)
+
+ elif self.scmtype == 'GIT+SSH':
+ if not self.user:
+ raise koji.BuildError, 'No user specified for repository access scheme: %s' % self.scheme
+ gitrepo = 'git+ssh://%s@%s%s' % (self.user, self.host, self.repository)
+ commonrepo = os.path.dirname(gitrepo) + '/common'
+ checkout_path = os.path.basename(self.repository)
+ if self.repository.endswith('/.git'):
+ # If we're referring to the .git subdirectory of the main module,
+ # assume we need to do the same for the common module
+ checkout_path = os.path.basename(self.repository[:-5])
+ commonrepo = os.path.dirname(gitrepo[:-5]) + '/common/.git'
+ elif self.repository.endswith('.git'):
+ # If we're referring to a bare repository for the main module,
+ # assume we need to do the same for the common module
+ checkout_path = os.path.basename(self.repository[:-4])
+ commonrepo = os.path.dirname(gitrepo[:-4]) + '/common.git'
+
+ sourcedir = '%s/%s' % (scmdir, checkout_path)
+ module_checkout_cmd = ['git', 'clone', '-n', gitrepo, sourcedir]
+ common_checkout_cmd = ['git', 'clone', commonrepo, 'common']
+ update_checkout_cmd = ['git', 'reset', '--hard', self.revision]
+ update_checkout_dir = sourcedir
+
+ # self.module may be empty, in which case the specfile should be in the top-level directory
+ if self.module:
+ # Treat the module as a directory inside the git repository
+ sourcedir = '%s/%s' % (sourcedir, self.module)
+
+ elif self.scmtype == 'SVN':
+ scheme = self.scheme
+ if '+' in scheme:
+ scheme = scheme.split('+')[1]
+
+ svnserver = '%s%s%s' % (scheme, self.host, self.repository)
+ module_checkout_cmd = ['svn', 'checkout', '-r', self.revision, '%s/%s' % (svnserver, self.module), self.module]
+ common_checkout_cmd = ['svn', 'checkout', '%s/common' % svnserver]
+
+ elif self.scmtype == 'SVN+SSH':
+ if not self.user:
+ raise koji.BuildError, 'No user specified for repository access scheme: %s' % self.scheme
+
+ svnserver = 'svn+ssh://%s@%s%s' % (self.user, self.host, self.repository)
+ module_checkout_cmd = ['svn', 'checkout', '-r', self.revision, '%s/%s' % (svnserver, self.module), self.module]
+ common_checkout_cmd = ['svn', 'checkout', '%s/common' % svnserver]
+
+ else:
+ raise koji.BuildError, 'Unknown SCM type: %s' % self.scmtype
+
+ # perform checkouts
+ _run(module_checkout_cmd, chdir=scmdir, fatal=True)
+
+ if update_checkout_cmd:
+ # Currently only required for GIT checkouts
+ # Run the command in the directory the source was checked out into
+ if self.scmtype.startswith('GIT') and globals().get('KOJIKAMID'):
+ _run(['git', 'config', 'core.autocrlf', 'true'], chdir=update_checkout_dir, fatal=True)
+ _run(['git', 'config', 'core.safecrlf', 'true'], chdir=update_checkout_dir, fatal=True)
+ _run(update_checkout_cmd, chdir=update_checkout_dir, fatal=True)
+
+ if self.use_common and not globals().get('KOJIKAMID'):
+ _run(common_checkout_cmd, chdir=scmdir, fatal=True)
+ if not os.path.exists('%s/../common' % sourcedir):
+ # find the relative distance from sourcedir/../common to scmdir/common
+ destdir = os.path.split(sourcedir)[0]
+ path_comps = destdir[len(scmdir) + 1:]
+ rel_path = '../' * len(path_comps.split('/'))
+ os.symlink(rel_path + 'common', '%s/../common' % sourcedir)
+
+ return sourcedir
+
+## END kojikamid dup
+
+
+class TaskManager(object):
+
+ def __init__(self, options, session):
+ self.options = options
+ self.session = session
+ self.tasks = {}
+ self.pids = {}
+ self.subsessions = {}
+ self.handlers = {}
+ self.status = ''
+ self.restart_pending = False
+ self.ready = False
+ self.hostdata = {}
+ self.task_load = 0.0
+ self.host_id = self.session.host.getID()
+ self.start_time = self.session.getSessionInfo()['start_time']
+ self.logger = logging.getLogger("koji.TaskManager")
+
+ def findHandlers(self, vars):
+ """Find and index task handlers"""
+ for v in vars.values():
+ if type(v) == type(koji.tasks.BaseTaskHandler) and issubclass(v,koji.tasks.BaseTaskHandler):
+ for method in v.Methods:
+ self.handlers[method] = v
+
+ def scanPlugin(self, plugin):
+ """Find task handlers in a plugin"""
+ self.findHandlers(vars(plugin))
+
+ def shutdown(self):
+ """Attempt to shut down cleanly"""
+ for task_id in self.pids.keys():
+ self.cleanupTask(task_id)
+ self.session.host.freeTasks(self.tasks.keys())
+ self.session.host.updateHost(task_load=0.0,ready=False)
+
+ def updateBuildroots(self, nolocal=False):
+ """Handle buildroot cleanup/maintenance
+
+ - examine current buildroots on system
+ - compare with db
+ - clean up as needed
+ - /var/lib/mock
+ - /etc/mock/koji
+
+ If nolocal is True, do not try to scan local buildroots.
+ """
+ #query buildroots in db that are not expired
+ states = [ koji.BR_STATES[x] for x in ('INIT','WAITING','BUILDING') ]
+ db_br = self.session.listBuildroots(hostID=self.host_id,state=tuple(states))
+ # index by id
+ db_br = dict([(row['id'],row) for row in db_br])
+ st_expired = koji.BR_STATES['EXPIRED']
+ for id, br in db_br.items():
+ task_id = br['task_id']
+ if task_id is None:
+ # not associated with a task
+ # this makes no sense now, but may in the future
+ self.logger.warn("Expiring taskless buildroot: %(id)i/%(tag_name)s/%(arch)s" % br)
+ self.session.host.setBuildRootState(id,st_expired)
+ elif not self.tasks.has_key(task_id):
+ #task not running - expire the buildroot
+ #TODO - consider recycling hooks here (with strong sanity checks)
+ self.logger.info("Expiring buildroot: %(id)i/%(tag_name)s/%(arch)s" % br)
+ self.logger.debug("Buildroot task: %r, Current tasks: %r" % (task_id,self.tasks.keys()))
+ self.session.host.setBuildRootState(id,st_expired)
+ continue
+ if nolocal:
+ return
+ local_br = self._scanLocalBuildroots()
+ # get info on local_only buildroots (most likely expired)
+ local_only = [id for id in local_br.iterkeys() if not db_br.has_key(id)]
+ if local_only:
+ missed_br = self.session.listBuildroots(buildrootID=tuple(local_only))
+ #get all the task info in one call
+ tasks = []
+ for br in missed_br:
+ task_id = br['task_id']
+ if task_id:
+ tasks.append(task_id)
+ #index
+ missed_br = dict([(row['id'],row) for row in missed_br])
+ tasks = dict([(row['id'],row) for row in self.session.getTaskInfo(tasks)])
+ for id in local_only:
+ # Cleaning options
+ # - wait til later
+ # - "soft" clean (leaving empty root/ dir)
+ # - full removal
+ data = local_br[id]
+ br = missed_br.get(id)
+ if not br:
+ self.logger.warn("%(name)s: not in db" % data)
+ continue
+ desc = "%(id)i/%(tag_name)s/%(arch)s" % br
+ if not br['retire_ts']:
+ self.logger.warn("%s: no retire timestamp" % desc)
+ continue
+ age = time.time() - br['retire_ts']
+ self.logger.debug("Expired/stray buildroot: %s" % desc)
+ if br and br['task_id']:
+ task = tasks.get(br['task_id'])
+ if not task:
+ self.logger.warn("%s: invalid task %s" % (desc, br['task_id']))
+ continue
+ if (task['state'] == koji.TASK_STATES['FAILED'] and age < self.options.failed_buildroot_lifetime):
+ #XXX - this could be smarter
+ # keep buildroots for failed tasks around for a little while
+ self.logger.debug("Keeping failed buildroot: %s" % desc)
+ continue
+ topdir = data['dir']
+ rootdir = None
+ if topdir:
+ rootdir = "%s/root" % topdir
+ try:
+ st = os.lstat(rootdir)
+ except OSError, e:
+ if e.errno == errno.ENOENT:
+ rootdir = None
+ else:
+ self.logger.warn("%s: %s" % (desc, e))
+ continue
+ else:
+ age = min(age, time.time() - st.st_mtime)
+ #note: https://bugzilla.redhat.com/bugzilla/show_bug.cgi?id=192153)
+ #If rpmlib is installing in this chroot, removing it entirely
+ #can lead to a world of hurt.
+ #We remove the rootdir contents but leave the rootdir unless it
+ #is really old
+ if age > 3600*24:
+ #dir untouched for a day
+ self.logger.info("Removing buildroot: %s" % desc)
+ if topdir and safe_rmtree(topdir, unmount=True, strict=False) != 0:
+ continue
+ #also remove the config
+ try:
+ os.unlink(data['cfg'])
+ except OSError, e:
+ self.logger.warn("%s: can't remove config: %s" % (desc, e))
+ elif age > 120:
+ if rootdir:
+ try:
+ flist = os.listdir(rootdir)
+ except OSError, e:
+ self.logger.warn("%s: can't list rootdir: %s" % (desc, e))
+ continue
+ if flist:
+ self.logger.info("%s: clearing rootdir" % desc)
+ for fn in flist:
+ safe_rmtree("%s/%s" % (rootdir,fn), unmount=True, strict=False)
+ resultdir = "%s/result" % topdir
+ if os.path.isdir(resultdir):
+ self.logger.info("%s: clearing resultdir" % desc)
+ safe_rmtree(resultdir, unmount=True, strict=False)
+ else:
+ self.logger.debug("Recent buildroot: %s: %i seconds" % (desc,age))
+ self.logger.debug("Local buildroots: %d" % len(local_br))
+ self.logger.debug("Active buildroots: %d" % len(db_br))
+ self.logger.debug("Expired/stray buildroots: %d" % len(local_only))
+
+ def _scanLocalBuildroots(self):
+ #XXX
+ configdir = '/etc/mock/koji'
+ buildroots = {}
+ for f in os.listdir(configdir):
+ if not f.endswith('.cfg'):
+ continue
+ fn = "%s/%s" % (configdir,f)
+ if not os.path.isfile(fn):
+ continue
+ fo = file(fn,'r')
+ id = None
+ name = None
+ for n in xrange(10):
+ # data should be in first few lines
+ line = fo.readline()
+ if line.startswith('# Koji buildroot id:'):
+ try:
+ id = int(line.split(':')[1])
+ except (ValueError, IndexError):
+ continue
+ if line.startswith('# Koji buildroot name:'):
+ try:
+ name = line.split(':')[1].strip()
+ except (ValueError, IndexError):
+ continue
+ if id is None or name is None:
+ continue
+ # see if there's a dir for the buildroot
+ vardir = os.path.join(self.options.mockdir, name)
+ buildroots[id] = {}
+ buildroots[id]['name'] = name
+ buildroots[id]['cfg'] = fn
+ buildroots[id]['dir'] = None
+ if os.path.isdir(vardir):
+ buildroots[id]['dir'] = vardir
+ return buildroots
+
+ def updateTasks(self):
+ """Read and process task statuses from server
+
+ The processing we do is:
+ 1) clean up after tasks that are not longer active:
+ * kill off processes
+ * retire buildroots
+ * remove buildroots
+ - with some possible exceptions
+ 2) wake waiting tasks if appropriate
+ """
+ tasks = {}
+ stale = []
+ task_load = 0.0
+ if self.pids:
+ self.logger.info("pids: %r" % self.pids)
+ for task in self.session.host.getHostTasks():
+ self.logger.info("open task: %r" % task)
+ # the tasks returned are those that are open and locked
+ # by this host.
+ id = task['id']
+ if not self.pids.has_key(id):
+ #We don't have a process for this
+ #Expected to happen after a restart, otherwise this is an error
+ stale.append(id)
+ continue
+ tasks[id] = task
+ if task.get('alert',False):
+ #wake up the process
+ self.logger.info("Waking up task: %r" % task)
+ os.kill(self.pids[id],signal.SIGUSR2)
+ if not task['waiting']:
+ task_load += task['weight']
+ self.logger.debug("Task Load: %s" % task_load)
+ self.task_load = task_load
+ self.tasks = tasks
+ self.logger.debug("Current tasks: %r" % self.tasks)
+ if len(stale) > 0:
+ #A stale task is one which is opened to us, but we know nothing
+ #about). This will happen after a daemon restart, for example.
+ self.logger.info("freeing stale tasks: %r" % stale)
+ self.session.host.freeTasks(stale)
+ for id, pid in self.pids.items():
+ if self._waitTask(id, pid):
+ # the subprocess handles most everything, we just need to clear things out
+ if self.cleanupTask(id, wait=False):
+ del self.pids[id]
+ if self.tasks.has_key(id):
+ del self.tasks[id]
+ for id, pid in self.pids.items():
+ if not tasks.has_key(id):
+ # expected to happen when:
+ # - we are in the narrow gap between the time the task
+ # records its result and the time the process actually
+ # exits.
+ # - task is canceled
+ # - task is forcibly reassigned/unassigned
+ tinfo = self.session.getTaskInfo(id)
+ if tinfo is None:
+ raise koji.GenericError, "Invalid task %r (pid %r)" % (id,pid)
+ elif tinfo['state'] == koji.TASK_STATES['CANCELED']:
+ self.logger.info("Killing canceled task %r (pid %r)" % (id,pid))
+ if self.cleanupTask(id):
+ del self.pids[id]
+ elif tinfo['host_id'] != self.host_id:
+ self.logger.info("Killing reassigned task %r (pid %r)" % (id,pid))
+ if self.cleanupTask(id):
+ del self.pids[id]
+ else:
+ self.logger.info("Lingering task %r (pid %r)" % (id,pid))
+
+ def getNextTask(self):
+ self.ready = self.readyForTask()
+ self.session.host.updateHost(self.task_load,self.ready)
+ if not self.ready:
+ self.logger.info("Not ready for task")
+ return False
+ hosts, tasks = self.session.host.getLoadData()
+ self.logger.debug("Load Data:")
+ self.logger.debug(" hosts: %r" % hosts)
+ self.logger.debug(" tasks: %r" % tasks)
+ #now we organize this data into channel-arch bins
+ bin_hosts = {} #hosts indexed by bin
+ bins = {} #bins for this host
+ our_avail = None
+ for host in hosts:
+ host['bins'] = []
+ if host['id'] == self.host_id:
+ #note: task_load reported by server might differ from what we
+ #sent due to precision variation
+ our_avail = host['capacity'] - host['task_load']
+ for chan in host['channels']:
+ for arch in host['arches'].split() + ['noarch']:
+ bin = "%s:%s" % (chan,arch)
+ bin_hosts.setdefault(bin,[]).append(host)
+ if host['id'] == self.host_id:
+ bins[bin] = 1
+ self.logger.debug("bins: %r" % bins)
+ if our_avail is None:
+ self.logger.info("Server did not report this host. Are we disabled?")
+ return False
+ elif not bins:
+ self.logger.info("No bins for this host. Missing channel/arch config?")
+ return False
+ #sort available capacities for each of our bins
+ avail = {}
+ for bin in bins.iterkeys():
+ avail[bin] = [host['capacity'] - host['task_load'] for host in bin_hosts[bin]]
+ avail[bin].sort()
+ avail[bin].reverse()
+ for task in tasks:
+ # note: tasks are in priority order
+ self.logger.debug("task: %r" % task)
+ if task['method'] not in self.handlers:
+ self.logger.warn("Skipping task %(id)i, no handler for method %(method)s", task)
+ continue
+ if self.tasks.has_key(task['id']):
+ # we were running this task, but it apparently has been
+ # freed or reassigned. We can't do anything with it until
+ # updateTasks notices this and cleans up.
+ self.logger.debug("Task %(id)s freed or reassigned", task)
+ continue
+ if task['state'] == koji.TASK_STATES['ASSIGNED']:
+ self.logger.debug("task is assigned")
+ if self.host_id == task['host_id']:
+ #assigned to us, we can take it regardless
+ if self.takeTask(task):
+ return True
+ elif task['state'] == koji.TASK_STATES['FREE']:
+ bin = "%(channel_id)s:%(arch)s" % task
+ self.logger.debug("task is free, bin=%r" % bin)
+ if not bins.has_key(bin):
+ continue
+ #see where our available capacity is compared to other hosts for this bin
+ #(note: the hosts in this bin are exactly those that could
+ #accept this task)
+ bin_avail = avail.get(bin, [0])
+ self.logger.debug("available capacities for bin: %r" % bin_avail)
+ median = bin_avail[(len(bin_avail)-1)/2]
+ self.logger.debug("ours: %.2f, median: %.2f" % (our_avail, median))
+ if not self.checkRelAvail(bin_avail, our_avail):
+ #decline for now and give the upper half a chance
+ return False
+ #otherwise, we attempt to open the task
+ if self.takeTask(task):
+ return True
+ else:
+ #should not happen
+ raise Exception, "Invalid task state reported by server"
+ return False
+
+ def checkRelAvail(self, bin_avail, avail):
+ """
+ Check our available capacity against the capacity of other hosts in this bin.
+ Return True if we should take a task, False otherwise.
+ """
+ median = bin_avail[(len(bin_avail)-1)/2]
+ self.logger.debug("ours: %.2f, median: %.2f" % (avail, median))
+ if avail >= median:
+ return True
+ else:
+ self.logger.debug("Skipping - available capacity in lower half")
+ return False
+
+ def _waitTask(self, task_id, pid=None):
+ """Wait (nohang) on the task, return true if finished"""
+ if pid is None:
+ pid = self.pids.get(task_id)
+ if not pid:
+ raise koji.GenericError, "No pid for task %i" % task_id
+ prefix = "Task %i (pid %i)" % (task_id, pid)
+ try:
+ (childpid, status) = os.waitpid(pid, os.WNOHANG)
+ except OSError, e:
+ #check errno
+ if e.errno != errno.ECHILD:
+ #should not happen
+ raise
+ #otherwise assume the process is gone
+ self.logger.info("%s: %s" % (prefix, e))
+ return True
+ if childpid != 0:
+ self.logger.info(parseStatus(status, prefix))
+ return True
+ return False
+
+ def _doKill(self, task_id, pid, cmd, sig, timeout, pause):
+ """
+ Kill the process with the given process ID.
+ Return True if the process is successfully killed in
+ the given timeout, False otherwise.
+ """
+ self.logger.info('Checking "%s" (pid %i, taskID %i)...' % (cmd, pid, task_id))
+ execname = cmd.split()[0]
+ signaled = False
+ t = 0.0
+ while True:
+ status = self._getStat(pid)
+ if status and status[1] == cmd and status[2] != 'Z':
+ self.logger.info('%s (pid %i, taskID %i) is running' % (execname, pid, task_id))
+ else:
+ if signaled:
+ self.logger.info('%s (pid %i, taskID %i) was killed by signal %i' % (execname, pid, task_id, sig))
+ else:
+ self.logger.info('%s (pid %i, taskID %i) exited' % (execname, pid, task_id))
+ return True
+
+ if t >= timeout:
+ self.logger.warn('Failed to kill %s (pid %i, taskID %i) with signal %i' %
+ (execname, pid, task_id, sig))
+ return False
+
+ try:
+ os.kill(pid, sig)
+ except OSError, e:
+ # process probably went away, we'll find out on the next iteration
+ self.logger.info('Error sending signal %i to %s (pid %i, taskID %i): %s' %
+ (sig, execname, pid, task_id, e))
+ else:
+ signaled = True
+ self.logger.info('Sent signal %i to %s (pid %i, taskID %i)' %
+ (sig, execname, pid, task_id))
+
+ time.sleep(pause)
+ t += pause
+
+ def _getStat(self, pid):
+ """
+ Get the stat info for the given pid.
+ Return a list of all the fields in /proc/<pid>/stat.
+ The second entry will contain the full command-line instead of
+ just the command name.
+ If the process does not exist, return None.
+ """
+ try:
+ proc_path = '/proc/%i/stat' % pid
+ if not os.path.isfile(proc_path):
+ return None
+ proc_file = file(proc_path)
+ procstats = [not field.isdigit() and field or int(field) for field in proc_file.read().split()]
+ proc_file.close()
+
+ cmd_path = '/proc/%i/cmdline' % pid
+ if not os.path.isfile(cmd_path):
+ return None
+ cmd_file = file(cmd_path)
+ procstats[1] = cmd_file.read().replace('\0', ' ').strip()
+ cmd_file.close()
+ if not procstats[1]:
+ return None
+
+ return procstats
+ except IOError, e:
+ # process may have already gone away
+ return None
+
+ def _childPIDs(self, pid):
+ """Recursively get the children of the process with the given ID.
+ Return a list containing the process IDs of the children
+ in breadth-first order, without duplicates."""
+ statsByPPID = {}
+ pidcmd = None
+ for procdir in os.listdir('/proc'):
+ if not procdir.isdigit():
+ continue
+ procid = int(procdir)
+ procstats = self._getStat(procid)
+ if not procstats:
+ continue
+ statsByPPID.setdefault(procstats[3], []).append(procstats)
+ if procid == pid:
+ pidcmd = procstats[1]
+
+ pids = []
+ if pidcmd:
+ # only append the pid if it still exists
+ pids.append((pid, pidcmd))
+
+ parents = [pid]
+ while parents:
+ for ppid in parents[:]:
+ for procstats in statsByPPID.get(ppid, []):
+ # get the /proc entries with ppid as their parent, and append their pid to the list,
+ # then recheck for their children
+ # pid is the 0th field, ppid is the 3rd field
+ pids.append((procstats[0], procstats[1]))
+ parents.append(procstats[0])
+ parents.remove(ppid)
+
+ return pids
+
+ def _killChildren(self, task_id, children, sig=signal.SIGTERM, timeout=2.0, pause=1.0):
+ """
+ Kill child processes of the given task, as specified in the children list,
+ by sending sig.
+ Retry every pause seconds, within timeout.
+ Remove successfully killed processes from the "children" list.
+ """
+ for childpid, cmd in children[::-1]:
+ # iterate in reverse order so processes whose children are killed might have
+ # a chance to cleanup before they're killed
+ if self._doKill(task_id, childpid, cmd, sig, timeout, pause):
+ children.remove((childpid, cmd))
+
+ def cleanupTask(self, task_id, wait=True):
+ """Clean up after task
+
+ - kill children
+ - expire session
+
+ Return True if all children were successfully killed, False otherwise.
+ """
+ pid = self.pids.get(task_id)
+ if not pid:
+ raise koji.GenericError, "No pid for task %i" % task_id
+ children = self._childPIDs(pid)
+ if children:
+ # send SIGINT once to let mock mock try to clean up
+ self._killChildren(task_id, children, sig=signal.SIGINT, pause=3.0)
+ if children:
+ self._killChildren(task_id, children)
+ if children:
+ self._killChildren(task_id, children, sig=signal.SIGKILL, timeout=3.0)
+
+ #expire the task's subsession
+ session_id = self.subsessions.get(task_id)
+ if session_id:
+ self.logger.info("Expiring subsession %i (task %i)" % (session_id, task_id))
+ try:
+ self.session.logoutChild(session_id)
+ del self.subsessions[task_id]
+ except:
+ #not much we can do about it
+ pass
+ if wait:
+ return self._waitTask(task_id, pid)
+ else:
+ # task has already been waited on, and we've cleaned
+ # up as much as we can
+ return True
+
+ def checkSpace(self):
+ """See if we have enough space to accept another job"""
+ br_path = self.options.mockdir
+ if not os.path.exists(br_path):
+ self.logger.error("No such directory: %s" % br_path)
+ raise IOError, "No such directory: %s" % br_path
+ fs_stat = os.statvfs(br_path)
+ available = fs_stat.f_bavail * fs_stat.f_bsize
+ availableMB = available / 1024 / 1024
+ self.logger.debug("disk space available in '%s': %i MB", br_path, availableMB)
+ if availableMB < self.options.minspace:
+ self.status = "Insufficient disk space: %i MB, %i MB required" % (availableMB, self.options.minspace)
+ self.logger.warn(self.status)
+ return False
+ return True
+
+ def readyForTask(self):
+ """Determine if the system is ready to accept a new task.
+
+ This function measures the system load and tries to determine
+ if there is room to accept a new task."""
+ # key resources to track:
+ # disk_space
+ # df -P path
+ # df -iP path ?
+ # memory (meminfo/vmstat)
+ # vmstat fields 3-6 (also 7-8 for swap)
+ # http://www.redhat.com/advice/tips/meminfo.html
+ # cpu cycles (vmstat?)
+ # vmstat fields 13-16 (and others?)
+ # others?:
+ # io (iostat/vmstat)
+ # network (netstat?)
+ if self.restart_pending:
+ if self.tasks:
+ return False
+ else:
+ raise koji.tasks.ServerRestart
+ self.hostdata = self.session.host.getHost()
+ self.logger.debug('hostdata: %r' % self.hostdata)
+ if not self.hostdata['enabled']:
+ self.status = "Host is disabled"
+ self.logger.info(self.status)
+ return False
+ if self.task_load > self.hostdata['capacity']:
+ self.status = "Over capacity"
+ self.logger.info("Task load (%.2f) exceeds capacity (%.2f)" % (self.task_load, self.hostdata['capacity']))
+ return False
+ if len(self.tasks) >= self.options.maxjobs:
+ # This serves as a backup to the capacity check and prevents
+ # a tremendous number of low weight jobs from piling up
+ self.status = "Full queue"
+ self.logger.info(self.status)
+ return False
+ if not self.checkSpace():
+ # checkSpace() does its own logging
+ return False
+ loadavgs = os.getloadavg()
+ # this likely treats HT processors the same as real ones
+ # but that's fine, it's a conservative test
+ maxload = 4.0 * os.sysconf('SC_NPROCESSORS_ONLN')
+ if loadavgs[0] > maxload:
+ self.status = "Load average %.2f > %.2f" % (loadavgs[0], maxload)
+ self.logger.info(self.status)
+ return False
+ #XXX - add more checks
+ return True
+
+ def takeTask(self,task):
+ """Attempt to open the specified task
+
+ Returns True if successful, False otherwise
+ """
+ self.logger.info("Attempting to take task %s" % task['id'])
+ method = task['method']
+ if self.handlers.has_key(method):
+ handlerClass = self.handlers[method]
+ else:
+ raise koji.GenericError, "No handler found for method '%s'" % method
+ task_info = self.session.getTaskInfo(task['id'], request=True)
+ if task_info.get('request') is None:
+ self.logger.warn("Task '%s' has no request" % task['id'])
+ return False
+ params = task_info['request']
+ handler = handlerClass(task_info['id'], method, params, self.session, self.options)
+ if hasattr(handler, 'checkHost'):
+ try:
+ valid_host = handler.checkHost(self.hostdata)
+ except (SystemExit,KeyboardInterrupt):
+ raise
+ except:
+ valid_host = False
+ self.logger.warn('Error during host check')
+ self.logger.warn(''.join(traceback.format_exception(*sys.exc_info())))
+ if not valid_host:
+ self.logger.info('Skipping task %s (%s) due to host check', task['id'], task['method'])
+ return False
+ data = self.session.host.openTask(task['id'])
+ if data is None:
+ self.logger.warn("Could not open")
+ return False
+ task_id = data['id']
+ request = data['request']
+ self.tasks[task_id] = data
+ # set weight
+ try:
+ self.session.host.setTaskWeight(task_id, handler.weight())
+ except koji.ActionNotAllowed:
+ info2 = self.session.getTaskInfo(task['id'])
+ if info2['host_id'] != self.host_id:
+ self.logger.warn("Task %i was reassigned", task_id)
+ return False
+ state = koji.TASK_STATES[info2['state']]
+ if state != 'OPEN':
+ self.logger.warn("Task %i changed is %s", task_id, state)
+ return False
+ #otherwise...
+ raise
+ if handler.Foreground:
+ self.logger.info("running task in foreground")
+ handler.setManager(self)
+ self.runTask(handler)
+ else:
+ pid, session_id = self.forkTask(handler)
+ self.pids[task_id] = pid
+ self.subsessions[task_id] = session_id
+ return True
+
+ def forkTask(self,handler):
+ #get the subsession before we fork
+ newhub = self.session.subsession()
+ session_id = newhub.sinfo['session-id']
+ pid = os.fork()
+ if pid:
+ newhub._forget()
+ return pid, session_id
+ #in no circumstance should we return after the fork
+ #nor should any exceptions propagate past here
+ try:
+ self.session._forget()
+ #set process group
+ os.setpgrp()
+ #use the subsession
+ self.session = newhub
+ handler.session = self.session
+ #set a do-nothing handler for sigusr2
+ signal.signal(signal.SIGUSR2,lambda *args: None)
+ self.runTask(handler)
+ finally:
+ #diediedie
+ try:
+ self.session.logout()
+ finally:
+ os._exit(0)
+
+ def runTask(self,handler):
+ try:
+ response = (handler.run(),)
+ # note that we wrap response in a singleton tuple
+ response = xmlrpclib.dumps(response, methodresponse=1, allow_none=1)
+ self.logger.info("RESPONSE: %r" % response)
+ self.session.host.closeTask(handler.id, response)
+ return
+ except xmlrpclib.Fault, fault:
+ response = xmlrpclib.dumps(fault)
+ tb = ''.join(traceback.format_exception(*sys.exc_info())).replace(r"\n", "\n")
+ self.logger.warn("FAULT:\n%s" % tb)
+ except (SystemExit,koji.tasks.ServerExit,KeyboardInterrupt):
+ #we do not trap these
+ raise
+ except koji.tasks.ServerRestart:
+ #freeing this task will allow the pending restart to take effect
+ self.session.host.freeTasks([handler.id])
+ return
+ except:
+ tb = ''.join(traceback.format_exception(*sys.exc_info()))
+ self.logger.warn("TRACEBACK: %s" % tb)
+ # report exception back to server
+ e_class, e = sys.exc_info()[:2]
+ faultCode = getattr(e_class,'faultCode',1)
+ if issubclass(e_class, koji.GenericError):
+ #just pass it through
+ tb = str(e)
+ response = xmlrpclib.dumps(xmlrpclib.Fault(faultCode, tb))
+
+ # if we get here, then we're handling an exception, so fail the task
+ self.session.host.failTask(handler.id, response)
diff --git a/koji/db.py b/koji/db.py
new file mode 100644
index 0000000..dcd2461
--- /dev/null
+++ b/koji/db.py
@@ -0,0 +1,171 @@
+# python library
+
+# db utilities for koji
+# Copyright (c) 2005-2014 Red Hat, Inc.
+#
+# Koji is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation;
+# version 2.1 of the License.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this software; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+# Authors:
+# Mike McLean <mikem at redhat.com>
+
+
+import logging
+import sys
+import pgdb
+import time
+import traceback
+_quoteparams = None
+try:
+ from pgdb import _quoteparams
+except ImportError:
+ pass
+assert pgdb.threadsafety >= 1
+import context
+
+## Globals ##
+_DBopts = None
+# A persistent connection to the database.
+# A new connection will be created whenever
+# Apache forks a new worker, and that connection
+# will be used to service all requests handled
+# by that worker.
+# This probably doesn't need to be a ThreadLocal
+# since Apache is not using threading,
+# but play it safe anyway.
+_DBconn = context.ThreadLocal()
+
+class DBWrapper:
+ def __init__(self, cnx):
+ self.cnx = cnx
+
+ def __getattr__(self, key):
+ if not self.cnx:
+ raise StandardError, 'connection is closed'
+ return getattr(self.cnx, key)
+
+ def cursor(self, *args, **kw):
+ if not self.cnx:
+ raise StandardError, 'connection is closed'
+ return CursorWrapper(self.cnx.cursor(*args, **kw))
+
+ def close(self):
+ # Rollback any uncommitted changes and clear the connection so
+ # this DBWrapper is no longer usable after close()
+ if not self.cnx:
+ raise StandardError, 'connection is closed'
+ self.cnx.cursor().execute('ROLLBACK')
+ #We do this rather than cnx.rollback to avoid opening a new transaction
+ #If our connection gets recycled cnx.rollback will be called then.
+ self.cnx = None
+
+
+class CursorWrapper:
+ def __init__(self, cursor):
+ self.cursor = cursor
+ self.logger = logging.getLogger('koji.db')
+
+ def __getattr__(self, key):
+ return getattr(self.cursor, key)
+
+ def _timed_call(self, method, args, kwargs):
+ start = time.time()
+ ret = getattr(self.cursor,method)(*args,**kwargs)
+ self.logger.debug("%s operation completed in %.4f seconds", method, time.time() - start)
+ return ret
+
+ def fetchone(self,*args,**kwargs):
+ return self._timed_call('fetchone',args,kwargs)
+
+ def fetchall(self,*args,**kwargs):
+ return self._timed_call('fetchall',args,kwargs)
+
+ def quote(self, operation, parameters):
+ if _quoteparams is not None:
+ quote = _quoteparams
+ elif hasattr(self.cursor, "_quoteparams"):
+ quote = self.cursor._quoteparams
+ else:
+ quote = lambda a,b: a % b
+ try:
+ return quote(operation, parameters)
+ except Exception:
+ self.logger.exception('Unable to quote query:\n%s\nParameters: %s', operation, parameters)
+ return "INVALID QUERY"
+
+ def execute(self, operation, parameters=()):
+ debug = self.logger.isEnabledFor(logging.DEBUG)
+ if debug:
+ self.logger.debug(self.quote(operation, parameters))
+ start = time.time()
+ try:
+ ret = self.cursor.execute(operation, parameters)
+ except Exception:
+ self.logger.error('Query failed. Query was: %s', self.quote(operation, parameters))
+ raise
+ if debug:
+ self.logger.debug("Execute operation completed in %.4f seconds", time.time() - start)
+ return ret
+
+
+## Functions ##
+def provideDBopts(**opts):
+ global _DBopts
+ if _DBopts is None:
+ _DBopts = opts
+
+def setDBopts(**opts):
+ global _DBopts
+ _DBopts = opts
+
+def getDBopts():
+ return _DBopts
+
+def connect():
+ logger = logging.getLogger('koji.db')
+ global _DBconn
+ if hasattr(_DBconn, 'conn'):
+ # Make sure the previous transaction has been
+ # closed. This is safe to call multiple times.
+ conn = _DBconn.conn
+ try:
+ # Under normal circumstances, the last use of this connection
+ # will have issued a raw ROLLBACK to close the transaction. To
+ # avoid 'no transaction in progress' warnings (depending on postgres
+ # configuration) we open a new one here.
+ # Should there somehow be a transaction in progress, a second
+ # BEGIN will be a harmless no-op, though there may be a warning.
+ conn.cursor().execute('BEGIN')
+ conn.rollback()
+ return DBWrapper(conn)
+ except pgdb.Error:
+ del _DBconn.conn
+ #create a fresh connection
+ opts = _DBopts
+ if opts is None:
+ opts = {}
+ try:
+ conn = pgdb.connect(**opts)
+ except Exception:
+ logger.error(''.join(traceback.format_exception(*sys.exc_info())))
+ raise
+ # XXX test
+ # return conn
+ _DBconn.conn = conn
+
+ return DBWrapper(conn)
+
+if __name__ == "__main__":
+ setDBopts( database = "test", user = "test")
+ print "This is a Python library"
diff --git a/koji/plugin.py b/koji/plugin.py
new file mode 100644
index 0000000..cbb245e
--- /dev/null
+++ b/koji/plugin.py
@@ -0,0 +1,169 @@
+# koji plugin module
+# Copyright (c) 2008-2014 Red Hat, Inc.
+#
+# Koji is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation;
+# version 2.1 of the License.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this software; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+# Authors:
+# Mike McLean <mikem at redhat.com>
+# Mike Bonnet <mikeb at redhat.com>
+
+import imp
+import koji
+import logging
+import sys
+import traceback
+
+# the available callback hooks and a list
+# of functions to be called for each event
+callbacks = {
+ 'prePackageListChange': [],
+ 'postPackageListChange': [],
+ 'preTaskStateChange': [],
+ 'postTaskStateChange': [],
+ 'preBuildStateChange': [],
+ 'postBuildStateChange': [],
+ 'preImport': [],
+ 'postImport': [],
+ 'preRPMSign': [],
+ 'postRPMSign': [],
+ 'preTag': [],
+ 'postTag': [],
+ 'preUntag': [],
+ 'postUntag': [],
+ 'preRepoInit': [],
+ 'postRepoInit': [],
+ 'preRepoDone': [],
+ 'postRepoDone': []
+ }
+
+class PluginTracker(object):
+
+ def __init__(self, path=None, prefix='_koji_plugin__'):
+ self.searchpath = path
+ #prefix should not have a '.' in it, this can cause problems.
+ self.prefix = prefix
+ self.plugins = {}
+
+ def load(self, name, path=None, reload=False):
+ if self.plugins.has_key(name) and not reload:
+ return self.plugins[name]
+ mod_name = name
+ if self.prefix:
+ #mod_name determines how the module is named in sys.modules
+ #Using a prefix helps prevent overlap with other modules
+ #(no '.' -- it causes problems)
+ mod_name = self.prefix + name
+ if sys.modules.has_key(mod_name) and not reload:
+ raise koji.PluginError, 'module name conflict: %s' % mod_name
+ if path is None:
+ path = self.searchpath
+ if path is None:
+ raise koji.PluginError, "empty module search path"
+ file, pathname, description = imp.find_module(name, self.pathlist(path))
+ try:
+ plugin = imp.load_module(mod_name, file, pathname, description)
+ finally:
+ file.close()
+ self.plugins[name] = plugin
+ return plugin
+
+ def get(self, name):
+ return self.plugins.get(name)
+
+ def pathlist(self, path):
+ if isinstance(path, basestring):
+ return [path]
+ else:
+ return path
+
+
+# some decorators used by plugins
+def export(f):
+ """a decorator that marks a function as exported
+
+ intended to be used by plugins
+ the HandlerRegistry will export the function under its own name
+ """
+ setattr(f, 'exported', True)
+ return f
+
+def export_as(alias):
+ """returns a decorator that marks a function as exported and gives it an alias
+
+ indended to be used by plugins
+ """
+ def dec(f):
+ setattr(f, 'exported', True)
+ setattr(f, 'export_alias', alias)
+ return f
+ return dec
+
+def export_in(module, alias=None):
+ """returns a decorator that marks a function as exported with a module prepended
+
+ optionally, can also alias the function within the module
+ indended to be used by plugins
+ """
+ def dec(f):
+ if alias is None:
+ local_alias = "%s.%s" % (module, f.__name__)
+ else:
+ local_alias = "%s.%s" % (module, alias)
+ setattr(f, 'exported', True)
+ setattr(f, 'export_module', module)
+ setattr(f, 'export_alias', local_alias)
+ return f
+ return dec
+
+def callback(*cbtypes):
+ """A decorator that indicates a function is a callback.
+ cbtypes is a list of callback types to register for. Valid
+ callback types are listed in the plugin module.
+
+ Intended to be used by plugins.
+ """
+ def dec(f):
+ setattr(f, 'callbacks', cbtypes)
+ return f
+ return dec
+
+def ignore_error(f):
+ """a decorator that marks a callback as ok to fail
+
+ intended to be used by plugins
+ """
+ setattr(f, 'failure_is_an_option', True)
+ return f
+
+def register_callback(cbtype, func):
+ if not cbtype in callbacks:
+ raise koji.PluginError, '"%s" is not a valid callback type' % cbtype
+ if not callable(func):
+ raise koji.PluginError, '%s is not callable' % getattr(func, '__name__', 'function')
+ callbacks[cbtype].append(func)
+
+def run_callbacks(cbtype, *args, **kws):
+ if not cbtype in callbacks:
+ raise koji.PluginError, '"%s" is not a valid callback type' % cbtype
+ for func in callbacks[cbtype]:
+ try:
+ func(cbtype, *args, **kws)
+ except:
+ msg = 'Error running %s callback from %s' % (cbtype, func.__module__)
+ if getattr(func, 'failure_is_an_option', False):
+ logging.getLogger('koji.plugin').warn(msg, exc_info=True)
+ else:
+ tb = ''.join(traceback.format_exception(*sys.exc_info()))
+ raise koji.CallbackError, '%s:\n%s' % (msg, tb)
diff --git a/koji/policy.py b/koji/policy.py
new file mode 100644
index 0000000..653f414
--- /dev/null
+++ b/koji/policy.py
@@ -0,0 +1,370 @@
+# Copyright (c) 2008-2014 Red Hat, Inc.
+#
+# Koji is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation;
+# version 2.1 of the License.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this software; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+# Authors:
+# Mike McLean <mikem at redhat.com>
+
+import fnmatch
+import koji
+
+
+class BaseSimpleTest(object):
+ """Abstract base class for simple tests"""
+
+ #Provide the name of the test
+ name = None
+
+ def __init__(self, str):
+ """Read the test parameters from string"""
+ self.str = str
+
+ def run(self, data):
+ """Run the test against data provided"""
+ raise NotImplementedError
+
+ def __str__(self):
+ return self.str
+
+
+# The following tests are generic enough that we can place them here
+
+class TrueTest(BaseSimpleTest):
+ name = 'true'
+ def run(self, data):
+ return True
+
+
+class FalseTest(BaseSimpleTest):
+ name = 'false'
+ def run(self, data):
+ return False
+
+
+class AllTest(TrueTest):
+ name = 'all'
+ #alias for true
+
+
+class NoneTest(FalseTest):
+ name = 'none'
+ #alias for false
+
+
+class HasTest(BaseSimpleTest):
+ """Test if policy data contains a field"""
+
+ name = "has"
+
+ def __init__(self, str):
+ try:
+ self.field = str.split()[1]
+ except IndexError:
+ raise koji.GenericError, "Invalid or missing field in policy test"
+
+ def run(self, data):
+ return self.field in data
+
+
+class BoolTest(BaseSimpleTest):
+ """Test a field in the data as a boolean value
+
+ This test can be used as-is, or it can be subclassed to
+ test a specific field
+
+ Syntax:
+ name [field]
+ """
+ name = 'bool'
+ field = None
+ def run(self, data):
+ args = self.str.split()[1:]
+ if self.field is None:
+ field = args[0]
+ else:
+ # expected when we are subclassed
+ field = self.field
+ return bool(data[field])
+
+
+class MatchTest(BaseSimpleTest):
+ """Matches a field in the data against glob patterns
+
+ True if any of the expressions match, else False
+ This test can be used as-is, or it can be subclassed to
+ test a specific field
+
+ Syntax:
+ name [field] pattern1 [pattern2 ...]
+ """
+ name = 'match'
+ field = None
+ def run(self, data):
+ args = self.str.split()[1:]
+ if self.field is None:
+ field = args[0]
+ args = args[1:]
+ else:
+ # expected when we are subclassed
+ field = self.field
+ for pattern in args:
+ if fnmatch.fnmatch(data[field], pattern):
+ return True
+ return False
+
+
+class CompareTest(BaseSimpleTest):
+ """Simple numeric field comparison
+
+ Supports basic numeric comparisons. The right operand must be a valid number
+ This test can be used as-is, or it can be subclassed to
+ test a specific field
+
+ Syntax:
+ name [field] OP number
+ """
+
+ name = 'compare'
+ field = None
+ allow_float = True
+
+ operators = {
+ '<' : lambda a, b: a < b,
+ '>' : lambda a, b: a > b,
+ '<=' : lambda a, b: a <= b,
+ '>=' : lambda a, b: a >= b,
+ '=' : lambda a, b: a == b,
+ '!=' : lambda a, b: a != b,
+ }
+
+ def __init__(self, str):
+ """Read the test parameters from string"""
+ super(CompareTest, self).__init__(str)
+ if self.field is None:
+ # field OP number
+ self.field, cmp, value = str.split(None, 3)[1:]
+ else:
+ # OP number
+ cmp, value = str.split(None, 2)[1:]
+ self.func = self.operators.get(cmp, None)
+ if self.func is None:
+ raise koji.GenericError, "Invalid comparison in test."
+ try:
+ self.value = int(value)
+ except ValueError:
+ if not self.allow_float:
+ raise
+ self.value = float(value)
+
+ def run(self, data):
+ return self.func(data[self.field], self.value)
+
+
+class SimpleRuleSet(object):
+
+ def __init__(self, rules, tests):
+ self.tests = tests
+ self.rules = self.parse_rules(rules)
+ self.lastrule = None
+ self.lastaction = None
+
+ def parse_rules(self, lines):
+ """Parse rules into a ruleset data structure
+
+ At the top level, the structure is a set of rules
+ [rule1, rule2, ...]
+ Each rule is a pair
+ [tests, negate, action ]
+ Tests is a list of test handlers:
+ [handler1, handler2, ...]
+ Action can either be a string or a chained ruleset
+ "action"
+ or
+ [subrule1, subrule2, ...]
+ Putting it all together, you get something like this:
+ [[[test1, test2], negate, "action"],
+ [[test], negate,
+ [[[test1, test2], negate, "action"],
+ [[test1, test2, test3], negate
+ [[[test1, test2], negate, "action"]]]]]]
+ """
+ cursor = []
+ self.ruleset = cursor
+ stack = []
+ for line in lines:
+ rule = self.parse_line(line)
+ if rule is None:
+ #blank/etc
+ continue
+ tests, negate, action = rule
+ if action == '{':
+ #nested rules
+ child = []
+ cursor.append([tests, negate, child])
+ stack.append(cursor)
+ cursor = child
+ elif action == '}':
+ if not stack:
+ raise koji.GenericError, "nesting error in rule set"
+ cursor = stack.pop()
+ else:
+ cursor.append(rule)
+ if stack:
+ # unclosed {
+ raise koji.GenericError, "nesting error in rule set"
+
+ def parse_line(self, line):
+ """Parse line as a rule
+
+ Expected format is:
+ test [params] [&& test [params] ...] :: action-if-true
+ test [params] [&& test [params] ...] !! action-if-false
+
+
+ (syntax is !! instead of ||, because otherwise folks might think
+ they can mix && and ||, which is /not/ supported)
+
+ For complex rules:
+ test [params [&& ...]] :: {
+ test [params [&& ...]] :: action
+ test [params [&& ...]] :: {
+ ...
+ }
+ }
+
+ Each closing brace must be on a line by itself
+ """
+ line = line.split('#', 1)[0].strip()
+ if not line:
+ #blank or all comment
+ return None
+ if line == '}':
+ return None, False, '}'
+ #?? allow }} ??
+ negate = False
+ pos = line.rfind('::')
+ if pos == -1:
+ pos = line.rfind('!!')
+ if pos == -1:
+ raise Exception, "bad policy line: %s" % line
+ negate = True
+ tests = line[:pos]
+ action = line[pos+2:]
+ tests = [self.get_test_handler(x) for x in tests.split('&&')]
+ action = action.strip()
+ # just return action = { for nested rules
+ return tests, negate, action
+
+ def get_test_handler(self, str):
+ name = str.split(None,1)[0]
+ try:
+ return self.tests[name](str)
+ except KeyError:
+ raise koji.GenericError, "missing test handler: %s" % name
+
+ def all_actions(self):
+ """report a list of all actions in the ruleset
+
+ (only the first word of the action is considered)
+ """
+ def _recurse(rules, index):
+ for tests, negate, action in rules:
+ if isinstance(action, list):
+ _recurse(action, index)
+ else:
+ name = action.split(None,1)[0]
+ index[name] = 1
+ index = {}
+ _recurse(self.ruleset, index)
+ return index.keys()
+
+ def _apply(self, rules, data, top=False):
+ for tests, negate, action in rules:
+ if top:
+ self.lastrule = []
+ value = False
+ for test in tests:
+ if not test.run(data):
+ break
+ else:
+ #all tests in current rule passed
+ value = True
+ if negate:
+ value = not value
+ if value:
+ self.lastrule.append([tests, negate])
+ if isinstance(action, list):
+ # action is a list of subrules
+ ret = self._apply(action, data)
+ if ret is not None:
+ return ret
+ # if ret is None, then none of the subrules matched,
+ # so we keep going
+ else:
+ return action
+ return None
+
+ def apply(self, data):
+ self.lastrule = []
+ self.lastaction = self._apply(self.ruleset, data, top=True)
+ return self.lastaction
+
+ def last_rule(self):
+ if self.lastrule is None:
+ return None
+ ret = []
+ for (tests, negate) in self.lastrule:
+ line = '&&'.join([str(t) for t in tests])
+ if negate:
+ line += ' !! '
+ else:
+ line += ' :: '
+ ret.append(line)
+ ret = '... '.join(ret)
+ if self.lastaction is None:
+ ret += "(no match)"
+ else:
+ ret += self.lastaction
+ return ret
+
+
+def findSimpleTests(namespace):
+ """Search namespace for subclasses of BaseSimpleTest
+
+ This is a convenience function for initializing a SimpleRuleSet instance
+ namespace can be a dict (e.g. globals()), or a list of dicts
+ returns a dictionary of the found subclasses, indexed by name
+ """
+ if not isinstance(namespace, (list, tuple)):
+ namespace = (namespace,)
+ ret = {}
+ for ns in namespace:
+ for key, value in ns.iteritems():
+ if value is BaseSimpleTest:
+ # skip this abstract base class if we encounter it
+ # this module contains generic tests, so it is valid to include it
+ # in the namespace list
+ continue
+ if type(value) == type(BaseSimpleTest) and issubclass(value, BaseSimpleTest):
+ name = getattr(value, 'name', None)
+ if not name:
+ #use the class name
+ name = key
+ #but trim 'Test' from the end
+ if name.endswith('Test') and len(name) > 4:
+ name = name[:-4]
+ ret.setdefault(name, value)
+ #...so first test wins in case of name overlap
+ return ret
diff --git a/koji/server.py b/koji/server.py
new file mode 100644
index 0000000..52f13f5
--- /dev/null
+++ b/koji/server.py
@@ -0,0 +1,189 @@
+# common server code for koji
+#
+# Copyright (c) 2012-2014 Red Hat, Inc.
+#
+# Koji is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation;
+# version 2.1 of the License.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this software; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+# Authors:
+# Mike McLean <mikem at redhat.com>
+
+import koji
+import sys
+import traceback
+from koji.util import LazyDict
+
+try:
+ from mod_python import apache
+except ImportError:
+ apache = None
+
+
+class ServerError(Exception):
+ """Base class for our server-side-only exceptions"""
+
+class ServerRedirect(ServerError):
+ """Used to handle redirects"""
+
+
+class WSGIWrapper(object):
+ """A very thin wsgi compat layer for mod_python
+
+ This class is highly specific to koji and is not fit for general use.
+ It does not support the full wsgi spec
+ """
+
+ def __init__(self, req):
+ self.req = req
+ self._env = None
+ host, port = req.connection.remote_addr
+ environ = {
+ 'REMOTE_ADDR' : req.connection.remote_ip,
+ # or remote_addr[0]?
+ # or req.get_remote_host(apache.REMOTE_NOLOOKUP)?
+ 'REMOTE_PORT' : str(req.connection.remote_addr[1]),
+ 'REMOTE_USER' : req.user,
+ 'REQUEST_METHOD' : req.method,
+ 'REQUEST_URI' : req.uri,
+ 'PATH_INFO' : req.path_info,
+ 'SCRIPT_FILENAME' : req.filename,
+ 'QUERY_STRING' : req.args or '',
+ 'SERVER_NAME' : req.hostname,
+ 'SERVER_PORT' : str(req.connection.local_addr[1]),
+ 'wsgi.version' : (1, 0),
+ 'wsgi.input' : InputWrapper(req),
+ 'wsgi.errors' : sys.stderr,
+ #TODO - file_wrapper support
+ }
+ environ = LazyDict(environ)
+ environ.lazyset('wsgi.url_scheme', self.get_scheme, [])
+ environ.lazyset('modpy.env', self.env, [])
+ environ.lazyset('modpy.opts', req.get_options, [])
+ environ.lazyset('modpy.conf', req.get_config, [])
+ environ.lazyset('SCRIPT_NAME', self.script_name, [], cache=True)
+ env_keys = ['SSL_CLIENT_VERIFY', 'HTTPS', 'SSL_CLIENT_S_DN']
+ for key in env_keys:
+ environ.lazyset(key, self.envget, [key])
+ # The component of the DN used for the username is usually the CN,
+ # but it is configurable.
+ # Allow retrieval of some common DN components from the environment.
+ for comp in ['C', 'ST', 'L', 'O', 'OU', 'CN', 'Email']:
+ key = 'SSL_CLIENT_S_DN_' + comp
+ environ.lazyset(key, self.envget, [key])
+ #gather the headers we care about
+ for key in req.headers_in:
+ k2 = key.upper()
+ k2 = k2.replace('-', '_')
+ if k2 not in ['CONTENT_TYPE', 'CONTENT_LENGTH']:
+ k2 = 'HTTP_' + k2
+ environ[k2] = req.headers_in[key]
+ self.environ = environ
+ self.set_headers = False
+
+ def env(self):
+ if self._env is None:
+ self.req.add_common_vars()
+ self._env = self.req.subprocess_env
+ return self._env
+
+ def envget(self, *args):
+ return self.env().get(*args)
+
+ def script_name(self):
+ uri = self.req.uri
+ path_info = self.req.path_info
+ if uri.endswith(path_info):
+ uri = uri[:-len(path_info)]
+ uri = uri.rstrip('/')
+ return uri
+
+ def get_scheme(self):
+ if self.envget('HTTPS') in ('yes', 'on', '1'):
+ return 'https'
+ else:
+ return 'http'
+
+ def no_write(self, string):
+ """a fake write() callable returned by start_response
+
+ we don't use the write() callable in koji, so it will raise an error if called
+ """
+ raise RuntimeError, "wsgi write() callable not supported"
+
+ def start_response(self, status, headers, exc_info=None):
+ #XXX we don't deal with exc_info
+ if self.set_headers:
+ raise RuntimeError, "start_response() already called"
+ self.req.status = int(status[:3])
+ for key, val in headers:
+ if key.lower() == 'content-length':
+ self.req.set_content_length(int(val))
+ elif key.lower() == 'content-type':
+ self.req.content_type = val
+ else:
+ self.req.headers_out.add(key, val)
+ self.set_headers = True
+ return self.no_write
+
+ def run(self, handler):
+ try:
+ result = handler(self.environ, self.start_response)
+ self.write_result(result)
+ return apache.OK
+ except:
+ sys.stderr.write(''.join(traceback.format_exception(*sys.exc_info())))
+ sys.stderr.flush()
+ raise apache.SERVER_RETURN, apache.HTTP_INTERNAL_SERVER_ERROR
+
+ def write_result(self, result):
+ """called by run() to handle the application's result value"""
+ req = self.req
+ write = req.write
+ if self.set_headers:
+ for chunk in result:
+ write(chunk)
+ else:
+ #slower version -- need to check for set_headers
+ for chunk in result:
+ if chunk and not self.set_headers:
+ raise RuntimeError, "write() called before start_response()"
+ write(chunk)
+ if not req.bytes_sent:
+ #application sent nothing back
+ req.set_content_length(0)
+
+
+
+class InputWrapper(object):
+
+ def __init__(self, req):
+ self.req = req
+
+ def close(self):
+ pass
+
+ def read(self, size=-1):
+ return self.req.read(size)
+
+ def readline(self):
+ return self.req.readline()
+
+ def readlines(self, hint=-1):
+ return self.req.readlines(hint)
+
+ def __iter__(self):
+ line = self.readline()
+ while line:
+ yield line
+ line = self.readline()
diff --git a/koji/ssl/Makefile b/koji/ssl/Makefile
new file mode 100644
index 0000000..f643700
--- /dev/null
+++ b/koji/ssl/Makefile
@@ -0,0 +1,21 @@
+FILES = $(wildcard *.py)
+
+PYTHON=python
+
+_default:
+ @echo "nothing to make. try make install"
+
+clean:
+ rm -f *.o *.so *.pyc *~
+
+install:
+ @if [ "$(DESTDIR)" = "" ]; then \
+ echo " "; \
+ echo "ERROR: A destdir is required"; \
+ exit 1; \
+ fi
+
+ mkdir -p $(DESTDIR)
+ install -p -m 644 $(FILES) $(DESTDIR)
+
+ $(PYTHON) -c "import compileall; compileall.compile_dir('$(DESTDIR)', 1, '$(PYDIR)', 1)"
diff --git a/koji/ssl/SSLCommon.py b/koji/ssl/SSLCommon.py
new file mode 100644
index 0000000..0d3fb94
--- /dev/null
+++ b/koji/ssl/SSLCommon.py
@@ -0,0 +1,141 @@
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Library General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# Copyright 2005 Dan Williams <dcbw at redhat.com> and Red Hat, Inc.
+
+import os, sys
+from OpenSSL import SSL
+import SSLConnection
+import httplib
+import socket
+import SocketServer
+
+def our_verify(connection, x509, errNum, errDepth, preverifyOK):
+ # print "Verify: errNum = %s, errDepth = %s, preverifyOK = %s" % (errNum, errDepth, preverifyOK)
+
+ # preverifyOK should tell us whether or not the client's certificate
+ # correctly authenticates against the CA chain
+ return preverifyOK
+
+
+def CreateSSLContext(certs):
+ key_and_cert = certs['key_and_cert']
+ ca_cert = certs['ca_cert']
+ peer_ca_cert = certs['peer_ca_cert']
+ for f in key_and_cert, ca_cert, peer_ca_cert:
+ if f and not os.access(f, os.R_OK):
+ raise StandardError, "%s does not exist or is not readable" % f
+
+ ctx = SSL.Context(SSL.SSLv23_METHOD) # Use best possible TLS Method
+ ctx.use_certificate_file(key_and_cert)
+ ctx.use_privatekey_file(key_and_cert)
+ ctx.load_client_ca(ca_cert)
+ ctx.load_verify_locations(peer_ca_cert)
+ verify = SSL.VERIFY_PEER | SSL.VERIFY_FAIL_IF_NO_PEER_CERT
+ ctx.set_verify(verify, our_verify)
+ ctx.set_verify_depth(10)
+ ctx.set_options(SSL.OP_NO_SSLv3 | SSL.OP_NO_SSLv2) # disable SSLv2 and SSLv3
+ return ctx
+
+
+
+class PlgBaseServer(SocketServer.ThreadingTCPServer):
+ allow_reuse_address = 1
+
+ def __init__(self, server_addr, req_handler):
+ self._quit = False
+ self.allow_reuse_address = 1
+ SocketServer.ThreadingTCPServer.__init__(self, server_addr, req_handler)
+
+ def stop(self):
+ self._quit = True
+
+ def serve_forever(self):
+ while not self._quit:
+ self.handle_request()
+ self.server_close()
+
+
+class PlgBaseSSLServer(PlgBaseServer):
+ """ SSL-enabled variant """
+
+ def __init__(self, server_address, req_handler, certs, timeout=None):
+ self._timeout = timeout
+ self.ssl_ctx = CreateSSLContext(certs)
+
+ PlgBaseServer.__init__(self, server_address, req_handler)
+
+ sock = socket.socket(self.address_family, self.socket_type)
+ con = SSL.Connection(self.ssl_ctx, sock)
+ self.socket = SSLConnection.SSLConnection(con)
+ if sys.version_info[:3] >= (2, 3, 0):
+ self.socket.settimeout(self._timeout)
+ self.server_bind()
+ self.server_activate()
+
+ host, port = self.socket.getsockname()[:2]
+ self.server_name = socket.getfqdn(host)
+ self.server_port = port
+
+
+class PlgHTTPSConnection(httplib.HTTPConnection):
+ "This class allows communication via SSL."
+
+ response_class = httplib.HTTPResponse
+
+ def __init__(self, host, port=None, ssl_context=None, strict=None, timeout=None):
+ httplib.HTTPConnection.__init__(self, host, port, strict)
+ self.ssl_ctx = ssl_context
+ self._timeout = timeout
+
+ def connect(self):
+ for res in socket.getaddrinfo(self.host, self.port, 0, socket.SOCK_STREAM):
+ af, socktype, proto, canonname, sa = res
+ try:
+ sock = socket.socket(af, socktype, proto)
+ con = SSL.Connection(self.ssl_ctx, sock)
+ self.sock = SSLConnection.SSLConnection(con)
+ if sys.version_info[:3] >= (2, 3, 0):
+ self.sock.settimeout(self._timeout)
+ self.sock.connect(sa)
+ if self.debuglevel > 0:
+ print "connect: (%s, %s) [ssl]" % (self.host, self.port)
+ except socket.error, msg:
+ if self.debuglevel > 0:
+ print 'connect fail:', (self.host, self.port)
+ if self.sock:
+ self.sock.close()
+ self.sock = None
+ continue
+ break
+ else:
+ raise socket.error, "failed to connect"
+
+
+
+class PlgHTTPS(httplib.HTTP):
+ """Compatibility with 1.5 httplib interface
+
+ Python 1.5.2 did not have an HTTPS class, but it defined an
+ interface for sending http requests that is also useful for
+ https.
+ """
+
+ _http_vsn = 11
+ _http_vsn_str = 'HTTP/1.1'
+
+ _connection_class = PlgHTTPSConnection
+
+ def __init__(self, host='', port=None, ssl_context=None, strict=None, timeout=None):
+ self._setup(self._connection_class(host, port, ssl_context, strict, timeout))
diff --git a/koji/ssl/SSLConnection.py b/koji/ssl/SSLConnection.py
new file mode 100644
index 0000000..5a45095
--- /dev/null
+++ b/koji/ssl/SSLConnection.py
@@ -0,0 +1,158 @@
+# Higher-level SSL objects used by rpclib
+#
+# Copyright (c) 2002 Red Hat, Inc.
+#
+# Author: Mihai Ibanescu <misa at redhat.com>
+# Modifications by Dan Williams <dcbw at redhat.com>
+
+
+from OpenSSL import SSL, crypto
+import os, string, time, socket, select
+
+
+class SSLConnection:
+ """
+ This whole class exists just to filter out a parameter
+ passed in to the shutdown() method in SimpleXMLRPC.doPOST()
+ """
+
+ DEFAULT_TIMEOUT = 60
+
+ def __init__(self, conn):
+ """
+ Connection is not yet a new-style class,
+ so I'm making a proxy instead of subclassing.
+ """
+ self.__dict__["conn"] = conn
+ self.__dict__["close_refcount"] = 1
+ self.__dict__["closed"] = False
+ self.__dict__["timeout"] = self.DEFAULT_TIMEOUT
+
+ def __del__(self):
+ self.__dict__["conn"].close()
+
+ def __getattr__(self,name):
+ return getattr(self.__dict__["conn"], name)
+
+ def __setattr__(self,name, value):
+ setattr(self.__dict__["conn"], name, value)
+
+ def settimeout(self, timeout):
+ if timeout == None:
+ self.__dict__["timeout"] = self.DEFAULT_TIMEOUT
+ else:
+ self.__dict__["timeout"] = timeout
+ self.__dict__["conn"].settimeout(timeout)
+
+ def shutdown(self, how=1):
+ """
+ SimpleXMLRpcServer.doPOST calls shutdown(1),
+ and Connection.shutdown() doesn't take
+ an argument. So we just discard the argument.
+ """
+ self.__dict__["conn"].shutdown()
+
+ def accept(self):
+ """
+ This is the other part of the shutdown() workaround.
+ Since servers create new sockets, we have to infect
+ them with our magic. :)
+ """
+ c, a = self.__dict__["conn"].accept()
+ return (SSLConnection(c), a)
+
+ def makefile(self, mode='r', bufsize=-1):
+ """
+ We need to use socket._fileobject Because SSL.Connection
+ doesn't have a 'dup'. Not exactly sure WHY this is, but
+ this is backed up by comments in socket.py and SSL/connection.c
+
+ Since httplib.HTTPSResponse/HTTPConnection depend on the
+ socket being duplicated when they close it, we refcount the
+ socket object and don't actually close until its count is 0.
+ """
+ self.__dict__["close_refcount"] = self.__dict__["close_refcount"] + 1
+ return PlgFileObject(self, mode, bufsize)
+
+ def close(self):
+ if self.__dict__["closed"]:
+ return
+ self.__dict__["close_refcount"] = self.__dict__["close_refcount"] - 1
+ if self.__dict__["close_refcount"] == 0:
+ self.shutdown()
+ self.__dict__["conn"].close()
+ self.__dict__["closed"] = True
+
+ def sendall(self, data, flags=0):
+ """
+ - Use select() to simulate a socket timeout without setting the socket
+ to non-blocking mode.
+ - Don't use pyOpenSSL's sendall() either, since it just loops on WantRead
+ or WantWrite, consuming 100% CPU, and never times out.
+ """
+ timeout = self.__dict__["timeout"]
+ con = self.__dict__["conn"]
+ (read, write, excpt) = select.select([], [con], [], timeout)
+ if not con in write:
+ raise socket.timeout((110, "Operation timed out."))
+
+ starttime = time.time()
+ origlen = len(data)
+ sent = -1
+ while len(data):
+ curtime = time.time()
+ if curtime - starttime > timeout:
+ raise socket.timeout((110, "Operation timed out."))
+
+ try:
+ sent = con.send(data, flags)
+ except SSL.SysCallError, e:
+ if e[0] == 32: # Broken Pipe
+ self.close()
+ sent = 0
+ else:
+ raise socket.error(e)
+ except (SSL.WantWriteError, SSL.WantReadError):
+ time.sleep(0.2)
+ continue
+
+ data = data[sent:]
+ return origlen - len(data)
+
+ def recv(self, bufsize, flags=0):
+ """
+ Use select() to simulate a socket timeout without setting the socket
+ to non-blocking mode
+ """
+ timeout = self.__dict__["timeout"]
+ con = self.__dict__["conn"]
+ (read, write, excpt) = select.select([con], [], [], timeout)
+ if not con in read:
+ raise socket.timeout((110, "Operation timed out."))
+
+ starttime = time.time()
+ while True:
+ curtime = time.time()
+ if curtime - starttime > timeout:
+ raise socket.timeout((110, "Operation timed out."))
+
+ try:
+ return con.recv(bufsize, flags)
+ except SSL.ZeroReturnError:
+ return None
+ except SSL.WantReadError:
+ time.sleep(0.2)
+ return None
+
+class PlgFileObject(socket._fileobject):
+ def close(self):
+ """
+ socket._fileobject doesn't actually _close_ the socket,
+ which we want it to do, so we have to override.
+ """
+ try:
+ if self._sock:
+ self.flush()
+ self._sock.close()
+ finally:
+ self._sock = None
diff --git a/koji/ssl/XMLRPCServerProxy.py b/koji/ssl/XMLRPCServerProxy.py
new file mode 100644
index 0000000..16de619
--- /dev/null
+++ b/koji/ssl/XMLRPCServerProxy.py
@@ -0,0 +1,178 @@
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Library General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+#
+# Modified by Dan Williams <dcbw at redhat.com>
+# Further modified by Mike Bonnet <mikeb at redhat.com>
+
+import os, sys
+import SSLCommon
+import urllib
+import xmlrpclib
+
+__version__='0.12'
+
+class PlgSSL_Transport(xmlrpclib.Transport):
+
+ user_agent = "pyOpenSSL_XMLRPC/%s - %s" % (__version__, xmlrpclib.Transport.user_agent)
+
+ def __init__(self, ssl_context, timeout=None, use_datetime=0):
+ if sys.version_info[:3] >= (2, 5, 0):
+ xmlrpclib.Transport.__init__(self, use_datetime)
+ self.ssl_ctx=ssl_context
+ self._timeout = timeout
+ self._https = None
+
+ def make_connection(self, host):
+ # Handle username and password.
+ try:
+ host, extra_headers, x509 = self.get_host_info(host)
+ except AttributeError:
+ # Yay for Python 2.2
+ pass
+ _host, _port = urllib.splitport(host)
+ if hasattr(xmlrpclib.Transport, 'single_request'):
+ cnx_class = SSLCommon.PlgHTTPSConnection
+ else:
+ cnx_class = SSLCommon.PlgHTTPS
+ self._https = cnx_class(_host, (_port and int(_port) or 443), ssl_context=self.ssl_ctx, timeout=self._timeout)
+ return self._https
+
+ def close(self):
+ if self._https:
+ self._https.close()
+ self._https = None
+
+
+class Plg_ClosableTransport(xmlrpclib.Transport):
+ """Override make_connection so we can close it."""
+ def __init__(self):
+ self._http = None
+
+ def make_connection(self, host):
+ # create a HTTP connection object from a host descriptor
+ import httplib
+ host, extra_headers, x509 = self.get_host_info(host)
+ self._http = httplib.HTTP(host)
+ return self._http
+
+ def close(self):
+ if self._http:
+ self._http.close()
+ self._http = None
+
+
+class PlgXMLRPCServerProxy(xmlrpclib.ServerProxy):
+ def __init__(self, uri, certs, timeout=None, verbose=0, allow_none=0):
+ if certs and len(certs) > 0:
+ self.ctx = SSLCommon.CreateSSLContext(certs)
+ self._transport = PlgSSL_Transport(ssl_context=self.ctx, timeout=timeout)
+ else:
+ self._transport = Plg_ClosableTransport()
+ xmlrpclib.ServerProxy.__init__(self, uri, transport=self._transport,
+ verbose=verbose, allow_none=allow_none)
+
+ def cancel(self):
+ self._transport.close()
+
+
+###########################################################
+# Testing stuff
+###########################################################
+
+
+import threading
+import time
+import random
+import OpenSSL
+import socket
+
+client_start = False
+
+threadlist_lock = threading.Lock()
+threadlist = {}
+
+class TestClient(threading.Thread):
+ def __init__(self, certs, num, tm):
+ self.server = PlgXMLRPCServerProxy("https://127.0.0.1:8886", certs, timeout=20)
+ self.num = i
+ self.tm = tm
+ threading.Thread.__init__(self)
+
+ def run(self):
+ while not client_start:
+ time.sleep(0.05)
+ i = 0
+ while i < 5:
+ reply = None
+ try:
+ reply = self.server.ping(self.num, i)
+ except OpenSSL.SSL.Error, e:
+ reply = "OpenSSL Error (%s)" % e
+ except socket.timeout, e:
+ reply = "Socket timeout (%s)" % e
+ threadlist_lock.acquire()
+ self.tm.inc()
+ threadlist_lock.release()
+ print "TRY(%d / %d): %s" % (self.num, i, reply)
+ time.sleep(0.05)
+ i = i + 1
+ threadlist_lock.acquire()
+ del threadlist[self]
+ threadlist_lock.release()
+
+class TimeoutCounter:
+ def __init__(self):
+ self._timedout = 0
+ self._lock = threading.Lock();
+
+ def inc(self):
+ self._lock.acquire()
+ self._timedout = self._timedout + 1
+ self._lock.release()
+
+ def get(self):
+ return self._timedout
+
+if __name__ == '__main__':
+ if len(sys.argv) < 4:
+ print "Usage: python XMLRPCServerProxy.py key_and_cert ca_cert peer_ca_cert"
+ sys.exit(1)
+
+ certs = {}
+ certs['key_and_cert'] = sys.argv[1]
+ certs['ca_cert'] = sys.argv[2]
+ certs['peer_ca_cert'] = sys.argv[3]
+
+ tm = TimeoutCounter()
+ i = 100
+ while i > 0:
+ t = TestClient(certs, i, tm)
+ threadlist[t] = None
+ print "Created thread %d." % i
+ t.start()
+ i = i - 1
+
+ time.sleep(3)
+ print "Unleashing threads."
+ client_start = True
+ while True:
+ try:
+ time.sleep(0.25)
+ threadlist_lock.acquire()
+ if len(threadlist) == 0:
+ break
+ threadlist_lock.release()
+ except KeyboardInterrupt:
+ os._exit(0)
+ print "All done. (%d timed out)" % tm.get()
diff --git a/koji/ssl/__init__.py b/koji/ssl/__init__.py
new file mode 100644
index 0000000..180fed6
--- /dev/null
+++ b/koji/ssl/__init__.py
@@ -0,0 +1 @@
+# identify this as the ssl module
diff --git a/koji/tasks.py b/koji/tasks.py
new file mode 100644
index 0000000..0d9a003
--- /dev/null
+++ b/koji/tasks.py
@@ -0,0 +1,555 @@
+# Task definitions used by various Koji daemons
+
+# Copyright (c) 2010-2014 Red Hat, Inc.
+#
+# Koji is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation;
+# version 2.1 of the License.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this software; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+# Authors:
+# Mike McLean <mikem at redhat.com>
+# Mike Bonnet <mikeb at redhat.com>
+
+import koji
+import koji.util
+import os
+import logging
+import xmlrpclib
+import signal
+import urllib2
+import shutil
+import random
+import time
+import pprint
+
+def scan_mounts(topdir):
+ """Search path for mountpoints"""
+ mplist = []
+ topdir = os.path.normpath(topdir)
+ fo = file('/proc/mounts','r')
+ for line in fo.readlines():
+ path = line.split()[1]
+ if path.startswith(topdir):
+ mplist.append(path)
+ fo.close()
+ #reverse sort so deeper dirs come first
+ mplist.sort()
+ mplist.reverse()
+ return mplist
+
+def umount_all(topdir):
+ "Unmount every mount under topdir"
+ logger = logging.getLogger("koji.build")
+ for path in scan_mounts(topdir):
+ logger.debug('Unmounting %s' % path)
+ cmd = ['umount', '-l', path]
+ rv = os.spawnvp(os.P_WAIT,cmd[0],cmd)
+ if rv != 0:
+ raise koji.GenericError, 'umount failed (exit code %r) for %s' % (rv,path)
+ #check mounts again
+ remain = scan_mounts(topdir)
+ if remain:
+ raise koji.GenericError, "Unmounting incomplete: %r" % remain
+
+def safe_rmtree(path, unmount=False, strict=True):
+ logger = logging.getLogger("koji.build")
+ #safe remove: with -xdev the find cmd will not cross filesystems
+ # (though it will cross bind mounts from the same filesystem)
+ if unmount:
+ umount_all(path)
+ if os.path.isfile(path) or os.path.islink(path):
+ logger.debug("Removing: %s" % path)
+ try:
+ os.remove(path)
+ except:
+ if strict:
+ raise
+ else:
+ logger.warn("Error removing: %s", exc_info=True)
+ return
+ if not os.path.exists(path):
+ logger.debug("No such path: %s" % path)
+ return
+ #first rm -f non-directories
+ logger.debug('Scrubbing files in %s' % path)
+ rv = os.system("find '%s' -xdev \\! -type d -print0 |xargs -0 rm -f" % path)
+ msg = 'file removal failed (code %r) for %s' % (rv,path)
+ if rv != 0:
+ logger.warn(msg)
+ if strict:
+ raise koji.GenericError, msg
+ else:
+ return rv
+ #them rmdir directories
+ #with -depth, we start at the bottom and work up
+ logger.debug('Scrubbing directories in %s' % path)
+ rv = os.system("find '%s' -xdev -depth -type d -print0 |xargs -0 rmdir" % path)
+ msg = 'dir removal failed (code %r) for %s' % (rv,path)
+ if rv != 0:
+ logger.warn(msg)
+ if strict:
+ raise koji.GenericError, msg
+ return rv
+
+class ServerExit(Exception):
+ """Raised to shutdown the server"""
+ pass
+
+class ServerRestart(Exception):
+ """Raised to restart the server"""
+ pass
+
+class BaseTaskHandler(object):
+ """The base class for task handlers
+
+ Each task handler is a class, a new instance of which is created
+ to handle each task.
+ """
+
+ # list of methods the class can handle
+ Methods = []
+
+ # Options:
+ Foreground = False
+
+ def __init__(self, id, method, params, session, options, workdir=None):
+ self.id = id #task id
+ if method not in self.Methods:
+ raise koji.GenericError, 'method "%s" is not supported' % method
+ self.method = method
+ # handle named parameters
+ self.params,self.opts = koji.decode_args(*params)
+ self.session = session
+ self.options = options
+ if workdir is None:
+ workdir = "%s/%s" % (self.options.workdir, koji.pathinfo.taskrelpath(id))
+ self.workdir = workdir
+ self.logger = logging.getLogger("koji.build.BaseTaskHandler")
+ self.manager = None
+
+ def setManager(self,manager):
+ """Set the manager attribute
+
+ This is only used for foreground tasks to give them access
+ to their task manager.
+ """
+ if not self.Foreground:
+ return
+ self.manager = manager
+
+ def handler(self):
+ """(abstract) the handler for the task."""
+ raise NotImplementedError
+
+ def run(self):
+ """Execute the task"""
+ self.createWorkdir()
+ try:
+ return koji.util.call_with_argcheck(self.handler, self.params, self.opts)
+ finally:
+ self.removeWorkdir()
+
+ _taskWeight = 1.0
+
+ def weight(self):
+ """Return the weight of the task.
+
+ This is run by the taskmanager before the task is run to determine
+ the weight of the task. The weight is an abstract measure of the
+ total load the task places on the system while running.
+
+ A task may set _taskWeight for a constant weight different from 1, or
+ override this function for more complicated situations.
+
+ Note that task weight is partially ignored while the task is sleeping.
+ """
+ return getattr(self,'_taskWeight',1.0)
+
+ def createWorkdir(self):
+ if self.workdir is None:
+ return
+ self.removeWorkdir()
+ os.makedirs(self.workdir)
+
+ def removeWorkdir(self):
+ if self.workdir is None:
+ return
+ safe_rmtree(self.workdir, unmount=False, strict=True)
+ #os.spawnvp(os.P_WAIT, 'rm', ['rm', '-rf', self.workdir])
+
+ def wait(self, subtasks=None, all=False, failany=False):
+ """Wait on subtasks
+
+ subtasks is a list of integers (or an integer). If more than one subtask
+ is specified, then the default behavior is to return when any of those
+ tasks complete. However, if all is set to True, then it waits for all of
+ them to complete. If all and failany are both set to True, then each
+ finished task will be checked for failure, and a failure will cause all
+ of the unfinished tasks to be cancelled.
+
+ special values:
+ subtasks = None specify all subtasks
+
+ Implementation notes:
+ The build daemon forks all tasks as separate processes. This function
+ uses signal.pause to sleep. The main process watches subtasks in
+ the database and will send the subprocess corresponding to the
+ subtask a SIGUSR2 to wake it up when subtasks complete.
+ """
+ if isinstance(subtasks,int):
+ # allow single integer w/o enclosing list
+ subtasks = [subtasks]
+ self.session.host.taskSetWait(self.id,subtasks)
+ self.logger.debug("Waiting on %r" % subtasks)
+ while True:
+ finished, unfinished = self.session.host.taskWait(self.id)
+ if len(unfinished) == 0:
+ #all done
+ break
+ elif len(finished) > 0:
+ if all:
+ if failany:
+ failed = False
+ for task in finished:
+ try:
+ result = self.session.getTaskResult(task)
+ except (koji.GenericError, xmlrpclib.Fault), task_error:
+ self.logger.info("task %s failed or was canceled" % task)
+ failed = True
+ break
+ if failed:
+ self.logger.info("at least one task failed or was canceled, cancelling unfinished tasks")
+ self.session.cancelTaskChildren(self.id)
+ # reraise the original error now, rather than waiting for
+ # an error in taskWaitResults()
+ raise task_error
+ else:
+ # at least one done
+ break
+ # signal handler set by TaskManager.forkTask
+ self.logger.debug("Pausing...")
+ signal.pause()
+ # main process will wake us up with SIGUSR2
+ self.logger.debug("...waking up")
+ self.logger.debug("Finished waiting")
+ if all:
+ return dict(self.session.host.taskWaitResults(self.id, subtasks))
+ else:
+ return dict(self.session.host.taskWaitResults(self.id, finished))
+
+ def getUploadDir(self):
+ return koji.pathinfo.taskrelpath(self.id)
+
+ def uploadFile(self, filename, relPath=None, remoteName=None):
+ """Upload the file with the given name to the task output directory
+ on the hub."""
+ uploadPath = self.getUploadDir()
+ if relPath:
+ relPath = relPath.strip('/')
+ uploadPath += '/' + relPath
+ # Only upload files with content
+ if os.path.isfile(filename) and os.stat(filename).st_size > 0:
+ self.session.uploadWrapper(filename, uploadPath, remoteName)
+
+ def uploadTree(self, dirpath, flatten=False):
+ """Upload the directory tree at dirpath to the task directory on the
+ hub, preserving the directory structure"""
+ dirpath = dirpath.rstrip('/')
+ for path, dirs, files in os.walk(dirpath):
+ if flatten:
+ relpath = None
+ else:
+ relpath = path[len(dirpath) + 1:]
+ for filename in files:
+ self.uploadFile(os.path.join(path, filename), relpath)
+
+ def chownTree(self, dirpath, uid, gid):
+ """chown the given path and all files and directories under
+ it to the given uid/gid."""
+ for path, dirs, files in os.walk(dirpath):
+ os.lchown(path, uid, gid)
+ for filename in files:
+ os.lchown(os.path.join(path, filename), uid, gid)
+
+ def localPath(self, relpath):
+ """Return a local path to a remote file.
+
+ If the file is on an nfs mount, use that, otherwise download a copy"""
+ if self.options.topurl:
+ fn = "%s/local/%s" % (self.workdir, relpath)
+ if os.path.exists(fn):
+ # We've already downloaded this file,
+ # just return the existing local path
+ return fn
+ self.logger.debug("Downloading %s", relpath)
+ url = "%s/%s" % (self.options.topurl, relpath)
+ fsrc = urllib2.urlopen(url)
+ if not os.path.exists(os.path.dirname(fn)):
+ os.makedirs(os.path.dirname(fn))
+ fdst = file(fn, 'w')
+ shutil.copyfileobj(fsrc, fdst)
+ fsrc.close()
+ fdst.close()
+ else:
+ fn = "%s/%s" % (self.options.topdir, relpath)
+ return fn
+
+ def subtask(self, method, arglist, **opts):
+ return self.session.host.subtask(method, arglist, self.id, **opts)
+
+ def subtask2(self, __taskopts, __method, *args, **kwargs):
+ return self.session.host.subtask2(self.id, __taskopts, __method, *args, **kwargs)
+
+ def find_arch(self, arch, host, tag):
+ """
+ For noarch tasks, find a canonical arch that is supported by both the host and tag.
+ If the arch is anything other than noarch, return it unmodified.
+ """
+ if arch != "noarch":
+ return arch
+
+ # We need a concrete arch. Pick one that:
+ # a) this host can handle
+ # b) the build tag can support
+ # c) is canonical
+ host_arches = host['arches']
+ if not host_arches:
+ raise koji.BuildError, "No arch list for this host: %s" % host['name']
+ tag_arches = tag['arches']
+ if not tag_arches:
+ raise koji.BuildError, "No arch list for tag: %s" % tag['name']
+ # index canonical host arches
+ host_arches = set([koji.canonArch(a) for a in host_arches.split()])
+ # index canonical tag arches
+ tag_arches = set([koji.canonArch(a) for a in tag_arches.split()])
+ # find the intersection of host and tag arches
+ common_arches = list(host_arches & tag_arches)
+ if common_arches:
+ # pick one of the common arches randomly
+ # need to re-seed the prng or we'll get the same arch every time,
+ # because we just forked from a common parent
+ random.seed()
+ arch = random.choice(common_arches)
+ self.logger.info('Valid arches: %s, using: %s' % (' '.join(common_arches), arch))
+ return arch
+ else:
+ # no overlap
+ raise koji.BuildError, "host %s (%s) does not support any arches of tag %s (%s)" % \
+ (host['name'], ', '.join(host_arches), tag['name'], ', '.join(tag_arches))
+
+ def getRepo(self, tag):
+ """
+ Get the active repo for the given tag. If there is no repo available,
+ wait for a repo to be created.
+ """
+ repo_info = self.session.getRepo(tag)
+ if not repo_info:
+ #make sure there is a target
+ taginfo = self.session.getTag(tag, strict=True)
+ targets = self.session.getBuildTargets(buildTagID=taginfo['id'])
+ if not targets:
+ raise koji.BuildError, 'no repo (and no target) for tag %s' % taginfo['name']
+ #wait for it
+ task_id = self.session.host.subtask(method='waitrepo',
+ arglist=[tag, None, None],
+ parent=self.id)
+ repo_info = self.wait(task_id)[task_id]
+ return repo_info
+
+
+class FakeTask(BaseTaskHandler):
+ Methods = ['someMethod']
+ Foreground = True
+ def handler(self, *args):
+ self.logger.info("This is a fake task. Args: " + str(args))
+ return 42
+
+
+class SleepTask(BaseTaskHandler):
+ Methods = ['sleep']
+ _taskWeight = 0.25
+ def handler(self, n):
+ self.logger.info("Sleeping for %s seconds" % n)
+ time.sleep(n)
+ self.logger.info("Finished sleeping")
+
+class ForkTask(BaseTaskHandler):
+ Methods = ['fork']
+ def handler(self, n=5, m=37):
+ for i in xrange(n):
+ os.spawnvp(os.P_NOWAIT, 'sleep', ['sleep',str(m)])
+
+class WaitTestTask(BaseTaskHandler):
+ Methods = ['waittest']
+ _taskWeight = 0.1
+ def handler(self,count,seconds=10):
+ tasks = []
+ for i in xrange(count):
+ task_id = self.session.host.subtask(method='sleep',
+ arglist=[seconds],
+ label=str(i),
+ parent=self.id)
+ tasks.append(task_id)
+ results = self.wait(all=True)
+ self.logger.info(pprint.pformat(results))
+
+
+class SubtaskTask(BaseTaskHandler):
+ Methods = ['subtask']
+ _taskWeight = 0.1
+ def handler(self,n=4):
+ if n > 0:
+ task_id = self.session.host.subtask(method='subtask',
+ arglist=[n-1],
+ label='foo',
+ parent=self.id)
+ self.wait(task_id)
+ else:
+ task_id = self.session.host.subtask(method='sleep',
+ arglist=[15],
+ label='bar',
+ parent=self.id)
+ self.wait(task_id)
+
+
+class DefaultTask(BaseTaskHandler):
+ """Used when no matching method is found"""
+ Methods = ['default']
+ _taskWeight = 0.1
+ def handler(self,*args,**opts):
+ raise koji.GenericError, "Invalid method: %s" % self.method
+
+
+class ShutdownTask(BaseTaskHandler):
+ Methods = ['shutdown']
+ _taskWeight = 0.0
+ Foreground = True
+ def handler(self):
+ #note: this is a foreground task
+ raise ServerExit
+
+
+class RestartTask(BaseTaskHandler):
+ """Gracefully restart the daemon"""
+
+ Methods = ['restart']
+ _taskWeight = 0.1
+ Foreground = True
+ def handler(self, host):
+ #note: this is a foreground task
+ if host['id'] != self.session.host.getID():
+ raise koji.GenericError, "Host mismatch"
+ self.manager.restart_pending = True
+ return "graceful restart initiated"
+
+
+class RestartVerifyTask(BaseTaskHandler):
+ """Verify that the daemon has restarted"""
+
+ Methods = ['restartVerify']
+ _taskWeight = 0.1
+ Foreground = True
+ def handler(self, task_id, host):
+ #note: this is a foreground task
+ tinfo = self.session.getTaskInfo(task_id)
+ state = koji.TASK_STATES[tinfo['state']]
+ if state != 'CLOSED':
+ raise koji.GenericError, "Stage one restart task is %s" % state
+ if host['id'] != self.session.host.getID():
+ raise koji.GenericError, "Host mismatch"
+ if self.manager.start_time < tinfo['completion_ts']:
+ start_time = time.asctime(time.localtime(self.manager.start_time))
+ raise koji.GenericError, "Restart failed - start time is %s" % start_time
+
+
+class RestartHostsTask(BaseTaskHandler):
+ """Gracefully restart the daemon"""
+
+ Methods = ['restartHosts']
+ _taskWeight = 0.1
+ def handler(self):
+ hosts = self.session.listHosts(enabled=True)
+ if not hosts:
+ raise koji.GenericError, "No hosts enabled"
+ this_host = self.session.host.getID()
+ subtasks = []
+ my_tasks = None
+ for host in hosts:
+ #note: currently task assignments bypass channel restrictions
+ task1 = self.subtask('restart', [host], assign=host['id'], label="restart %i" % host['id'])
+ task2 = self.subtask('restartVerify', [task1, host], assign=host['id'], label="sleep %i" % host['id'])
+ subtasks.append(task1)
+ subtasks.append(task2)
+ if host['id'] == this_host:
+ my_tasks = [task1, task2]
+ if not my_tasks:
+ raise koji.GenericError, 'This host is not enabled'
+ self.wait(my_tasks[0])
+ #see if we've restarted
+ if not self.session.taskFinished(my_tasks[1]):
+ raise ServerRestart
+ #raising this inside a task handler causes TaskManager.runTask
+ #to free the task so that it will not block a pending restart
+ if subtasks:
+ self.wait(subtasks, all=True)
+ return
+
+
+class DependantTask(BaseTaskHandler):
+
+ Methods = ['dependantTask']
+ #mostly just waiting on other tasks
+ _taskWeight = 0.2
+
+ def handler(self, wait_list, task_list):
+ for task in wait_list:
+ if not isinstance(task, int) or not self.session.getTaskInfo(task):
+ self.logger.debug("invalid task id %s, removing from wait_list" % task)
+ wait_list.remove(task)
+
+ # note, tasks in wait_list are not children of this task so we can't
+ # just use self.wait()
+ while wait_list:
+ for task in wait_list[:]:
+ if self.session.taskFinished(task):
+ info = self.session.getTaskInfo(task)
+ if info and koji.TASK_STATES[info['state']] in ['CANCELED','FAILED']:
+ raise koji.GenericError, "Dependency %s failed to complete." % info['id']
+ wait_list.remove(task)
+ # let the system rest before polling again
+ time.sleep(1)
+
+ subtasks = []
+ for task in task_list:
+ # **((len(task)>2 and task[2]) or {}) expands task[2] into opts if it exists, allows for things like 'priority=15'
+ task_id = self.session.host.subtask(method=task[0], arglist=task[1], parent=self.id, **((len(task)>2 and task[2]) or {}))
+ if task_id:
+ subtasks.append(task_id)
+ if subtasks:
+ self.wait(subtasks, all=True)
+
+class MultiPlatformTask(BaseTaskHandler):
+ def buildWrapperRPM(self, spec_url, build_task_id, build_target, build, repo_id, **opts):
+ task = self.session.getTaskInfo(build_task_id)
+ arglist = [spec_url, build_target, build, task, {'repo_id': repo_id}]
+
+ rpm_task_id = self.session.host.subtask(method='wrapperRPM',
+ arglist=arglist,
+ label='rpm',
+ parent=self.id,
+ arch='noarch',
+ **opts)
+ results = self.wait(rpm_task_id)[rpm_task_id]
+ results['task_id'] = rpm_task_id
+
+ return results
diff --git a/koji/util.py b/koji/util.py
new file mode 100644
index 0000000..80e511f
--- /dev/null
+++ b/koji/util.py
@@ -0,0 +1,611 @@
+# Copyright (c) 2005-2014 Red Hat, Inc.
+#
+# Koji is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation;
+# version 2.1 of the License.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this software; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+# Authors:
+# Mike McLean <mikem at redhat.com>
+# Mike Bonnet <mikeb at redhat.com>
+
+import calendar
+from fnmatch import fnmatch
+import koji
+import logging
+import os
+import os.path
+import re
+import resource
+import stat
+import sys
+import time
+import ConfigParser
+from zlib import adler32
+
+try:
+ from hashlib import md5 as md5_constructor
+except ImportError:
+ from md5 import new as md5_constructor
+try:
+ from hashlib import sha1 as sha1_constructor
+except ImportError:
+ from sha import new as sha1_constructor
+
+def _changelogDate(cldate):
+ return time.strftime('%a %b %d %Y', time.strptime(koji.formatTime(cldate), '%Y-%m-%d %H:%M:%S'))
+
+def formatChangelog(entries):
+ """Format a list of changelog entries (dicts)
+ into a string representation."""
+ result = ''
+ for entry in entries:
+ result += """* %s %s
+%s
+
+""" % (_changelogDate(entry['date']), entry['author'].encode("utf-8"),
+ entry['text'].encode("utf-8"))
+
+ return result
+
+DATE_RE = re.compile(r'(\d+)-(\d+)-(\d+)')
+TIME_RE = re.compile(r'(\d+):(\d+):(\d+)')
+
+def parseTime(val):
+ """
+ Parse a string time in either "YYYY-MM-DD HH24:MI:SS" or "YYYY-MM-DD"
+ format into floating-point seconds since the epoch. If the time portion
+ is not specified, it will be padded with zeros. The string time is treated
+ as UTC. If the time string cannot be parsed into a valid date, None will be
+ returned.
+ """
+ result = DATE_RE.search(val)
+ if not result:
+ return None
+ else:
+ date = [int(r) for r in result.groups()]
+ time = [0, 0, 0]
+ rest = val[result.end():].strip()
+ result = TIME_RE.search(rest)
+ if result:
+ time = [int(r) for r in result.groups()]
+ return calendar.timegm(date + time + [0, 0, 0])
+
+def checkForBuilds(session, tag, builds, event, latest=False):
+ """Check that the builds existed in tag at the time of the event.
+ If latest=True, check that the builds are the latest in tag."""
+ for build in builds:
+ if latest:
+ tagged_list = session.getLatestBuilds(tag, event=event, package=build['name'])
+ else:
+ tagged_list = session.listTagged(tag, event=event, package=build['name'], inherit=True)
+ for tagged in tagged_list:
+ if tagged['version'] == build['version'] and tagged['release'] == build['release']:
+ break
+ else:
+ return False
+
+ return True
+
+def duration(start):
+ """Return the duration between start and now in MM:SS format"""
+ elapsed = time.time() - start
+ mins = int(elapsed / 60)
+ secs = int(elapsed % 60)
+ return '%s:%02i' % (mins, secs)
+
+def printList(l):
+ """Print the contents of the list comma-separated"""
+ if len(l) == 0:
+ return ''
+ elif len(l) == 1:
+ return l[0]
+ elif len(l) == 2:
+ return ' and '.join(l)
+ else:
+ ret = ', '.join(l[:-1])
+ ret += ', and '
+ ret += l[-1]
+ return ret
+
+def multi_fnmatch(s, patterns):
+ """Returns true if s matches any pattern in the list
+
+ If patterns is a string, it will be split() first
+ """
+ if isinstance(patterns, basestring):
+ patterns = patterns.split()
+ for pat in patterns:
+ if fnmatch(s, pat):
+ return True
+ return False
+
+def dslice(dict, keys, strict=True):
+ """Returns a new dictionary containing only the specified keys"""
+ ret = {}
+ for key in keys:
+ if strict or dict.has_key(key):
+ #for strict we skip the has_key check and let the dict generate the KeyError
+ ret[key] = dict[key]
+ return ret
+
+def dslice_ex(dict, keys, strict=True):
+ """Returns a new dictionary with only the specified keys removed"""
+ ret = dict.copy()
+ for key in keys:
+ if strict or ret.has_key(key):
+ del ret[key]
+ return ret
+
+def call_with_argcheck(func, args, kwargs=None):
+ """Call function, raising ParameterError if args do not match"""
+ if kwargs is None:
+ kwargs = {}
+ try:
+ return func(*args, **kwargs)
+ except TypeError, e:
+ if sys.exc_info()[2].tb_next is None:
+ # The stack is only one high, so the error occurred in this function.
+ # Therefore, we assume the TypeError is due to a parameter mismatch
+ # in the above function call.
+ raise koji.ParameterError, str(e)
+ raise
+
+
+class HiddenValue(object):
+ """A wrapper that prevents a value being accidentally printed"""
+
+ def __init__(self, value):
+ if isinstance(value, HiddenValue):
+ self.value = value.value
+ else:
+ self.value = value
+
+ def __str__(self):
+ return "[value hidden]"
+
+ def __repr__(self):
+ return "HiddenValue()"
+
+
+class LazyValue(object):
+ """Used to represent a value that is generated by a function call at access time
+ """
+
+ def __init__(self, func, args, kwargs=None, cache=False):
+ if kwargs is None:
+ kwargs = {}
+ self.func = func
+ self.args = args
+ self.kwargs = kwargs
+ self.cache = cache
+
+ def get(self):
+ if hasattr(self, '_value'):
+ return self._value
+ value = self.func(*self.args, **self.kwargs)
+ if self.cache:
+ self._value = value
+ return value
+
+
+class LazyString(LazyValue):
+ """Lazy values that should be expanded when printed"""
+
+ def __str__(self):
+ return str(self.get())
+
+
+def lazy_eval(value):
+ if isinstance(value, LazyValue):
+ return value.get()
+ return value
+
+
+class LazyDict(dict):
+ """A container for lazy data
+
+ fields can refer to function calls, which can optionally be cached
+ """
+
+ def __getitem__(self, key):
+ return lazy_eval(super(LazyDict, self).__getitem__(key))
+
+ def lazyset(self, key, func, args, kwargs=None, cache=False):
+ self[key] = LazyValue(func, args, kwargs=kwargs, cache=cache)
+
+ def get(self, *args, **kwargs):
+ return lazy_eval(super(LazyDict, self).get(*args, **kwargs))
+
+ def copy(self):
+ return LazyDict(self)
+
+ def values(self):
+ return [lazy_eval(val) for val in super(LazyDict, self).values()]
+
+ def items(self):
+ return [(key, lazy_eval(val)) for key, val in super(LazyDict, self).items()]
+
+ def itervalues(self):
+ for val in super(LazyDict, self).itervalues():
+ yield lazy_eval(val)
+
+ def iteritems(self):
+ for key, val in super(LazyDict, self).iteritems():
+ yield key, lazy_eval(val)
+
+ def pop(self, key, *args, **kwargs):
+ return lazy_eval(super(LazyDict, self).pop(key, *args, **kwargs))
+
+ def popitem(self):
+ key, val = super(LazyDict, self).popitem()
+ return key, lazy_eval(val)
+
+
+class LazyRecord(object):
+ """A object whose attributes can reference lazy data
+
+ Use lazysetattr to set lazy attributes, or just set them to a LazyValue
+ object directly"""
+
+ def __init__(self, base=None):
+ if base is not None:
+ self.__dict__.update(base.__dict__)
+ self._base_record = base
+
+ def __getattribute__(self, name):
+ try:
+ val = object.__getattribute__(self, name)
+ except AttributeError:
+ base = object.__getattribute__(self, '_base_record')
+ val = getattr(base, name)
+ return lazy_eval(val)
+
+
+def lazysetattr(object, name, func, args, kwargs=None, cache=False):
+ if not isinstance(object, LazyRecord):
+ raise TypeError, 'object does not support lazy attributes'
+ value = LazyValue(func, args, kwargs=kwargs, cache=cache)
+ setattr(object, name, value)
+
+
+def rmtree(path):
+ """Delete a directory tree without crossing fs boundaries"""
+ st = os.lstat(path)
+ if not stat.S_ISDIR(st.st_mode):
+ raise koji.GenericError, "Not a directory: %s" % path
+ dev = st.st_dev
+ dirlist = []
+ for dirpath, dirnames, filenames in os.walk(path):
+ dirlist.append(dirpath)
+ newdirs = []
+ dirsyms = []
+ for fn in dirnames:
+ path = os.path.join(dirpath, fn)
+ st = os.lstat(path)
+ if st.st_dev != dev:
+ # don't cross fs boundary
+ continue
+ if stat.S_ISLNK(st.st_mode):
+ #os.walk includes symlinks to dirs here
+ dirsyms.append(fn)
+ continue
+ newdirs.append(fn)
+ #only walk our filtered dirs
+ dirnames[:] = newdirs
+ for fn in filenames + dirsyms:
+ path = os.path.join(dirpath, fn)
+ st = os.lstat(path)
+ if st.st_dev != dev:
+ #shouldn't happen, but just to be safe...
+ continue
+ os.unlink(path)
+ dirlist.reverse()
+ for dirpath in dirlist:
+ if os.listdir(dirpath):
+ # dir not empty. could happen if a mount was present
+ continue
+ os.rmdir(dirpath)
+
+def _relpath(path, start=getattr(os.path, 'curdir', '.')):
+ """Backport of os.path.relpath for python<2.6"""
+
+ sep = getattr(os.path, 'sep', '/')
+ pardir = getattr(os.path, 'pardir', '..')
+ if not path:
+ raise ValueError("no path specified")
+ start_list = [x for x in os.path.abspath(start).split(sep) if x]
+ path_list = [x for x in os.path.abspath(path).split(sep) if x]
+ i = -1
+ for i in range(min(len(start_list), len(path_list))):
+ if start_list[i] != path_list[i]:
+ break
+ else:
+ i += 1
+ rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
+ if not rel_list:
+ return getattr(os.path, 'curdir', '.')
+ return os.path.join(*rel_list)
+
+relpath = getattr(os.path, 'relpath', _relpath)
+
+def eventFromOpts(session, opts):
+ """Determine event id from standard cli options
+
+ Standard options are:
+ event: an event id (int)
+ ts: an event timestamp (int)
+ repo: pull event from given repo
+ """
+ event_id = getattr(opts, 'event')
+ if event_id:
+ return session.getEvent(event_id)
+ ts = getattr(opts, 'ts')
+ if ts:
+ return session.getLastEvent(before=ts)
+ repo = getattr(opts, 'repo')
+ if repo:
+ rinfo = session.repoInfo(repo)
+ if rinfo:
+ return {'id' : rinfo['create_event'],
+ 'ts' : rinfo['create_ts'] }
+ return None
+
+def filedigestAlgo(hdr):
+ """
+ Get the file digest algorithm used in hdr.
+ If there is no algorithm flag in the header,
+ default to md5. If the flag contains an unknown,
+ non-None value, return 'unknown'.
+ """
+ # need to use the header ID hard-coded into Koji so we're not dependent on the
+ # version of rpm installed on the hub
+ digest_algo_id = hdr[koji.RPM_TAG_FILEDIGESTALGO]
+ if not digest_algo_id:
+ # certain versions of rpm return an empty list instead of None
+ # for missing header fields
+ digest_algo_id = None
+ digest_algo = koji.RPM_FILEDIGESTALGO_IDS.get(digest_algo_id, 'unknown')
+ return digest_algo.lower()
+
+def parseStatus(rv, prefix):
+ if isinstance(prefix, list) or isinstance(prefix, tuple):
+ prefix = ' '.join(prefix)
+ if os.WIFSIGNALED(rv):
+ return '%s was killed by signal %i' % (prefix, os.WTERMSIG(rv))
+ elif os.WIFEXITED(rv):
+ return '%s exited with status %i' % (prefix, os.WEXITSTATUS(rv))
+ else:
+ return '%s terminated for unknown reasons' % prefix
+
+def isSuccess(rv):
+ """Return True if rv indicates successful completion
+ (exited with status 0), False otherwise."""
+ if os.WIFEXITED(rv) and os.WEXITSTATUS(rv) == 0:
+ return True
+ else:
+ return False
+
+def setup_rlimits(opts, logger=None):
+ logger = logger or logging.getLogger("koji")
+ for key in opts:
+ if not key.startswith('RLIMIT_') or not opts[key]:
+ continue
+ rcode = getattr(resource, key, None)
+ if rcode is None:
+ continue
+ orig = resource.getrlimit(rcode)
+ try:
+ limits = [int(x) for x in opts[key].split()]
+ except ValueError:
+ logger.error("Invalid resource limit: %s=%s", key, opts[key])
+ continue
+ if len(limits) not in (1,2):
+ logger.error("Invalid resource limit: %s=%s", key, opts[key])
+ continue
+ if len(limits) == 1:
+ limits.append(orig[1])
+ logger.warn('Setting resource limit: %s = %r', key, limits)
+ try:
+ resource.setrlimit(rcode, tuple(limits))
+ except ValueError, e:
+ logger.error("Unable to set %s: %s", key, e)
+
+class adler32_constructor(object):
+
+ #mimicing the hashlib constructors
+ def __init__(self, arg=''):
+ self._value = adler32(arg) & 0xffffffffL
+ #the bitwise and works around a bug in some versions of python
+ #see: http://bugs.python.org/issue1202
+
+ def update(self, arg):
+ self._value = adler32(arg, self._value) & 0xffffffffL
+
+ def digest(self):
+ return self._value
+
+ def hexdigest(self):
+ return "%08x" % self._value
+
+ def copy(self):
+ dup = adler32_constructor()
+ dup._value = self._value
+ return dup
+
+ digest_size = 4
+ block_size = 1 #I think
+
+def tsort(parts):
+ """Given a partial ordering, return a totally ordered list.
+
+ part is a dict of partial orderings. Each value is a set,
+ which the key depends on.
+
+ The return value is a list of sets, each of which has only
+ dependencies on items in previous entries in the list."""
+ parts = parts.copy()
+ result = []
+ while True:
+ level = set([name for name, deps in parts.iteritems() if not deps])
+ if not level:
+ break
+ result.append(level)
+ parts = dict([(name, deps - level) for name, deps in parts.iteritems()
+ if name not in level])
+ if parts:
+ raise ValueError, 'total ordering not possible'
+ return result
+
+class MavenConfigOptAdapter(object):
+ """
+ Wrap a ConfigParser so it looks like a optparse.Values instance
+ used by maven-build.
+ """
+ MULTILINE = ['properties', 'envs']
+ MULTIVALUE = ['goals', 'profiles', 'packages',
+ 'jvm_options', 'maven_options', 'buildrequires']
+
+ def __init__(self, conf, section):
+ self._conf = conf
+ self._section = section
+
+ def __getattr__(self, name):
+ if self._conf.has_option(self._section, name):
+ value = self._conf.get(self._section, name)
+ if name in self.MULTIVALUE:
+ value = value.split()
+ elif name in self.MULTILINE:
+ value = value.splitlines()
+ return value
+ raise AttributeError, name
+
+def maven_opts(values, chain=False, scratch=False):
+ """
+ Convert the argument (an optparse.Values object) to a dict of build options
+ suitable for passing to maven-build or maven-chain.
+ """
+ opts = {}
+ for key in ('scmurl', 'patches', 'specfile', 'goals', 'profiles', 'packages',
+ 'jvm_options', 'maven_options'):
+ val = getattr(values, key, None)
+ if val:
+ opts[key] = val
+ props = {}
+ for prop in getattr(values, 'properties', []):
+ fields = prop.split('=', 1)
+ if len(fields) != 2:
+ fields.append(None)
+ props[fields[0]] = fields[1]
+ if props:
+ opts['properties'] = props
+ envs = {}
+ for env in getattr(values, 'envs', []):
+ fields = env.split('=', 1)
+ if len(fields) != 2:
+ raise ValueError, "Environment variables must be in NAME=VALUE format"
+ envs[fields[0]] = fields[1]
+ if envs:
+ opts['envs'] = envs
+ if chain:
+ val = getattr(values, 'buildrequires', [])
+ if val:
+ opts['buildrequires'] = val
+ if scratch and not chain:
+ opts['scratch'] = True
+ return opts
+
+def maven_params(config, package, chain=False, scratch=False):
+ values = MavenConfigOptAdapter(config, package)
+ return maven_opts(values, chain=chain, scratch=scratch)
+
+def wrapper_params(config, package, chain=False, scratch=False):
+ params = {}
+ values = MavenConfigOptAdapter(config, package)
+ params['type'] = getattr(values, 'type', None)
+ params['scmurl'] = getattr(values, 'scmurl', None)
+ params['buildrequires'] = getattr(values, 'buildrequires', [])
+ if not scratch:
+ params['create_build'] = True
+ return params
+
+def parse_maven_params(confs, chain=False, scratch=False):
+ """
+ Parse .ini files that contain parameters to launch a Maven build.
+
+ Return a map whose keys are package names and values are config parameters.
+ """
+ if not isinstance(confs, (list, tuple)):
+ confs = [confs]
+ config = ConfigParser.ConfigParser()
+ for conf in confs:
+ conf_fd = file(conf)
+ config.readfp(conf_fd)
+ conf_fd.close()
+ builds = {}
+ for package in config.sections():
+ params = {}
+ buildtype = 'maven'
+ if config.has_option(package, 'type'):
+ buildtype = config.get(package, 'type')
+ if buildtype == 'maven':
+ params = maven_params(config, package, chain=chain, scratch=scratch)
+ elif buildtype == 'wrapper':
+ params = wrapper_params(config, package, chain=chain, scratch=scratch)
+ if len(params.get('buildrequires')) != 1:
+ raise ValueError, "A wrapper-rpm must depend on exactly one package"
+ else:
+ raise ValueError, "Unsupported build type: %s" % buildtype
+ if not 'scmurl' in params:
+ raise ValueError, "%s is missing the scmurl parameter" % package
+ builds[package] = params
+ if not builds:
+ raise ValueError, "No sections found in: %s" % ', '.join(confs)
+ return builds
+
+def parse_maven_param(confs, chain=False, scratch=False, section=None):
+ """
+ Parse .ini files that contain parameters to launch a Maven build.
+
+ Return a map that contains a single entry corresponding to the given
+ section of the .ini file. If the config file only contains a single
+ section, section does not need to be specified.
+ """
+ if not isinstance(confs, (list, tuple)):
+ confs = [confs]
+ builds = parse_maven_params(confs, chain=chain, scratch=scratch)
+ if section:
+ if section in builds:
+ builds = {section: builds[section]}
+ else:
+ raise ValueError, "Section %s does not exist in: %s" % (section, ', '.join(confs))
+ elif len(builds) > 1:
+ raise ValueError, "Multiple sections in: %s, you must specify the section" % ', '.join(confs)
+ return builds
+
+def parse_maven_chain(confs, scratch=False):
+ """
+ Parse maven-chain config.
+
+ confs is a path to a config file or a list of paths to config files.
+
+ Return a map whose keys are package names and values are config parameters.
+ """
+ builds = parse_maven_params(confs, chain=True, scratch=scratch)
+ depmap = {}
+ for package, params in builds.items():
+ depmap[package] = set(params.get('buildrequires', []))
+ try:
+ order = tsort(depmap)
+ except ValueError, e:
+ raise ValueError, 'No possible build order, missing/circular dependencies'
+ return builds
diff --git a/plugins/Makefile b/plugins/Makefile
new file mode 100644
index 0000000..0bbf748
--- /dev/null
+++ b/plugins/Makefile
@@ -0,0 +1,24 @@
+PYTHON=python
+PLUGINDIR = /usr/lib/koji-hub-plugins
+FILES = $(wildcard *.py)
+CONFDIR = /etc/koji-hub/plugins
+CONFFILES = $(wildcard *.conf)
+
+_default:
+ @echo "nothing to make. try make install"
+
+clean:
+ rm -f *.o *.so *.pyc *.pyo *~
+
+install:
+ @if [ "$(DESTDIR)" = "" ]; then \
+ echo " "; \
+ echo "ERROR: A destdir is required"; \
+ exit 1; \
+ fi
+
+ mkdir -p $(DESTDIR)/$(PLUGINDIR)
+ install -p -m 644 $(FILES) $(DESTDIR)/$(PLUGINDIR)
+ $(PYTHON) -c "import compileall; compileall.compile_dir('$(DESTDIR)/$(PLUGINDIR)', 1, '$(PLUGINDIR)', 1)"
+ mkdir -p $(DESTDIR)/$(CONFDIR)
+ install -p -m 644 $(CONFFILES) $(DESTDIR)/$(CONFDIR)
diff --git a/plugins/echo.py b/plugins/echo.py
new file mode 100644
index 0000000..6727d41
--- /dev/null
+++ b/plugins/echo.py
@@ -0,0 +1,15 @@
+# Example Koji callback
+# Copyright (c) 2009-2014 Red Hat, Inc.
+# This callback simply logs all of its args using the logging module
+#
+# Authors:
+# Mike Bonnet <mikeb at redhat.com>
+
+from koji.plugin import callbacks, callback, ignore_error
+import logging
+
+ at callback(*callbacks.keys())
+ at ignore_error
+def echo(cbtype, *args, **kws):
+ logging.getLogger('koji.plugin.echo').info('Called the %s callback, args: %s; kws: %s',
+ cbtype, str(args), str(kws))
diff --git a/plugins/messagebus.conf b/plugins/messagebus.conf
new file mode 100644
index 0000000..fe18a1c
--- /dev/null
+++ b/plugins/messagebus.conf
@@ -0,0 +1,24 @@
+# config file for the Koji messagebus plugin
+
+[broker]
+host = amqp.example.com
+port = 5671
+ssl = true
+timeout = 10
+heartbeat = 60
+# PLAIN options
+auth = PLAIN
+username = guest
+password = guest
+# GSSAPI options
+# auth = GSSAPI
+# keytab = /etc/koji-hub/plugins/koji-messagebus.keytab
+# principal = messagebus/koji.example.com at EXAMPLE.COM
+
+[exchange]
+name = koji.events
+type = topic
+durable = true
+
+[topic]
+prefix = koji.event
diff --git a/plugins/messagebus.py b/plugins/messagebus.py
new file mode 100644
index 0000000..3f3dc6f
--- /dev/null
+++ b/plugins/messagebus.py
@@ -0,0 +1,226 @@
+# Koji callback for sending notifications about events to a messagebus (amqp broker)
+# Copyright (c) 2009-2014 Red Hat, Inc.
+#
+# Authors:
+# Mike Bonnet <mikeb at redhat.com>
+
+from koji.plugin import callbacks, callback, ignore_error
+import ConfigParser
+import logging
+import qpid.messaging
+import qpid.messaging.transports
+from ssl import wrap_socket
+import socket
+import os
+import krbV
+
+MAX_KEY_LENGTH = 255
+CONFIG_FILE = '/etc/koji-hub/plugins/messagebus.conf'
+
+config = None
+session = None
+target = None
+
+def connect_timeout(host, port, timeout):
+ for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
+ af, socktype, proto, canonname, sa = res
+ sock = socket.socket(af, socktype, proto)
+ sock.settimeout(timeout)
+ try:
+ sock.connect(sa)
+ break
+ except socket.error, msg:
+ sock.close()
+ else:
+ # If we got here then we couldn't connect (yet)
+ raise
+ return sock
+
+class tlstimeout(qpid.messaging.transports.tls):
+ def __init__(self, conn, host, port):
+ self.socket = connect_timeout(host, port, getattr(conn, '_timeout'))
+ if conn.tcp_nodelay:
+ self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
+ self.tls = wrap_socket(self.socket, keyfile=conn.ssl_keyfile, certfile=conn.ssl_certfile, ca_certs=conn.ssl_trustfile)
+ self.socket.setblocking(0)
+ self.state = None
+
+qpid.messaging.transports.TRANSPORTS['tls+timeout'] = tlstimeout
+
+class Connection(qpid.messaging.Connection):
+ """
+ A connection class which supports a timeout option
+ to the establish() method. Only necessary until
+ upstream Apache Qpid commit 1487578 is available in
+ a supported release.
+ """
+ @staticmethod
+ def establish(url=None, timeout=None, **options):
+ conn = Connection(url, **options)
+ conn._timeout = timeout
+ conn.open()
+ return conn
+
+ def _wait(self, predicate, timeout=None):
+ if timeout is None and hasattr(self, '_timeout'):
+ timeout = self._timeout
+ return qpid.messaging.Connection._wait(self, predicate, timeout)
+
+def get_sender():
+ global config, session, target
+ if session and target:
+ try:
+ return session.sender(target)
+ except:
+ logging.getLogger('koji.plugin.messagebus').warning('Error getting session, will retry', exc_info=True)
+ session = None
+ target = None
+
+ config = ConfigParser.SafeConfigParser()
+ config.read(CONFIG_FILE)
+ if not config.has_option('broker', 'timeout'):
+ config.set('broker', 'timeout', '60')
+ if not config.has_option('broker', 'heartbeat'):
+ config.set('broker', 'heartbeat', '60')
+
+ if config.getboolean('broker', 'ssl'):
+ url = 'amqps://'
+ else:
+ url = 'amqp://'
+ auth = config.get('broker', 'auth')
+ if auth == 'PLAIN':
+ url += config.get('broker', 'username') + '/'
+ url += config.get('broker', 'password') + '@'
+ elif auth == 'GSSAPI':
+ ccname = 'MEMORY:messagebus'
+ os.environ['KRB5CCNAME'] = ccname
+ ctx = krbV.default_context()
+ ccache = krbV.CCache(name=ccname, context=ctx)
+ cprinc = krbV.Principal(name=config.get('broker', 'principal'), context=ctx)
+ ccache.init(principal=cprinc)
+ keytab = krbV.Keytab(name='FILE:' + config.get('broker', 'keytab'), context=ctx)
+ ccache.init_creds_keytab(principal=cprinc, keytab=keytab)
+ else:
+ raise koji.PluginError, 'unsupported auth type: %s' % auth
+
+ url += config.get('broker', 'host') + ':'
+ url += config.get('broker', 'port')
+
+ conn = Connection.establish(url,
+ sasl_mechanisms=config.get('broker', 'auth'),
+ transport='tls+timeout',
+ timeout=config.getfloat('broker', 'timeout'),
+ heartbeat=config.getint('broker', 'heartbeat'))
+ sess = conn.session()
+ tgt = """%s;
+ { create: sender,
+ assert: always,
+ node: { type: topic,
+ durable: %s,
+ x-declare: { exchange: "%s",
+ type: %s } } }""" % \
+ (config.get('exchange', 'name'), config.getboolean('exchange', 'durable'),
+ config.get('exchange', 'name'), config.get('exchange', 'type'))
+ sender = sess.sender(tgt)
+ session = sess
+ target = tgt
+
+ return sender
+
+def _token_append(tokenlist, val):
+ # Replace any periods with underscores so we have a deterministic number of tokens
+ val = val.replace('.', '_')
+ tokenlist.append(val)
+
+def get_message_subject(msgtype, *args, **kws):
+ key = [config.get('topic', 'prefix'), msgtype]
+
+ if msgtype == 'PackageListChange':
+ _token_append(key, kws['tag']['name'])
+ _token_append(key, kws['package']['name'])
+ elif msgtype == 'TaskStateChange':
+ _token_append(key, kws['info']['method'])
+ _token_append(key, kws['attribute'])
+ elif msgtype == 'BuildStateChange':
+ info = kws['info']
+ _token_append(key, kws['attribute'])
+ _token_append(key, info['name'])
+ elif msgtype == 'Import':
+ _token_append(key, kws['type'])
+ elif msgtype in ('Tag', 'Untag'):
+ _token_append(key, kws['tag']['name'])
+ build = kws['build']
+ _token_append(key, build['name'])
+ _token_append(key, kws['user']['name'])
+ elif msgtype == 'RepoInit':
+ _token_append(key, kws['tag']['name'])
+ elif msgtype == 'RepoDone':
+ _token_append(key, kws['repo']['tag_name'])
+
+ key = '.'.join(key)
+ key = key[:MAX_KEY_LENGTH]
+ return key
+
+def get_message_headers(msgtype, *args, **kws):
+ headers = {'type': msgtype}
+
+ if msgtype == 'PackageListChange':
+ headers['tag'] = kws['tag']['name']
+ headers['package'] = kws['package']['name']
+ elif msgtype == 'TaskStateChange':
+ headers['id'] = kws['info']['id']
+ headers['parent'] = kws['info']['parent']
+ headers['method'] = kws['info']['method']
+ headers['attribute'] = kws['attribute']
+ headers['old'] = kws['old']
+ headers['new'] = kws['new']
+ elif msgtype == 'BuildStateChange':
+ info = kws['info']
+ headers['name'] = info['name']
+ headers['version'] = info['version']
+ headers['release'] = info['release']
+ headers['attribute'] = kws['attribute']
+ headers['old'] = kws['old']
+ headers['new'] = kws['new']
+ elif msgtype == 'Import':
+ headers['importType'] = kws['type']
+ elif msgtype in ('Tag', 'Untag'):
+ headers['tag'] = kws['tag']['name']
+ build = kws['build']
+ headers['name'] = build['name']
+ headers['version'] = build['version']
+ headers['release'] = build['release']
+ headers['user'] = kws['user']['name']
+ elif msgtype == 'RepoInit':
+ headers['tag'] = kws['tag']['name']
+ elif msgtype == 'RepoDone':
+ headers['tag'] = kws['repo']['tag_name']
+
+ return headers
+
+ at callback(*[c for c in callbacks.keys() if c.startswith('post')])
+ at ignore_error
+def send_message(cbtype, *args, **kws):
+ global config
+ sender = get_sender()
+ if cbtype.startswith('post'):
+ msgtype = cbtype[4:]
+ else:
+ msgtype = cbtype[3:]
+
+ data = kws.copy()
+ if args:
+ data['args'] = list(args)
+
+ exchange_type = config.get('exchange', 'type')
+ if exchange_type == 'topic':
+ subject = get_message_subject(msgtype, *args, **kws)
+ message = qpid.messaging.Message(subject=subject, content=data)
+ elif exchange_type == 'headers':
+ headers = get_message_headers(msgtype, *args, **kws)
+ message = qpid.messaging.Message(properties=headers, content=data)
+ else:
+ raise koji.PluginError, 'unsupported exchange type: %s' % exchange_type
+
+ sender.send(message, sync=True, timeout=config.getfloat('broker', 'timeout'))
+ sender.close(timeout=config.getfloat('broker', 'timeout'))
diff --git a/plugins/rpm2maven.conf b/plugins/rpm2maven.conf
new file mode 100644
index 0000000..900bf13
--- /dev/null
+++ b/plugins/rpm2maven.conf
@@ -0,0 +1,5 @@
+# config file for the Koji rpm2maven plugin
+
+[patterns]
+rpm_names = *-repolib
+artifact_paths = /usr/share/java/repository/maven2/*
diff --git a/plugins/rpm2maven.py b/plugins/rpm2maven.py
new file mode 100644
index 0000000..484a319
--- /dev/null
+++ b/plugins/rpm2maven.py
@@ -0,0 +1,107 @@
+# Koji callback for extracting Maven artifacts (.pom and .jar files)
+# from an rpm and making them available via the Koji-managed Maven repo.
+# Copyright (c) 2010-2014 Red Hat, Inc.
+#
+# Authors:
+# Mike Bonnet <mikeb at redhat.com>
+
+import koji
+from koji.context import context
+from koji.plugin import callback
+import ConfigParser
+import fnmatch
+import os
+import shutil
+import subprocess
+
+CONFIG_FILE = '/etc/koji-hub/plugins/rpm2maven.conf'
+
+config = None
+
+ at callback('postImport')
+def maven_import(cbtype, *args, **kws):
+ global config
+ if not context.opts.get('EnableMaven', False):
+ return
+ if kws.get('type') != 'rpm':
+ return
+ buildinfo = kws['build']
+ rpminfo = kws['rpm']
+ filepath = kws['filepath']
+
+ if not config:
+ config = ConfigParser.SafeConfigParser()
+ config.read(CONFIG_FILE)
+ name_patterns = config.get('patterns', 'rpm_names').split()
+ for pattern in name_patterns:
+ if fnmatch.fnmatch(rpminfo['name'], pattern):
+ break
+ else:
+ return
+
+ tmpdir = os.path.join(koji.pathinfo.work(), 'rpm2maven', koji.buildLabel(buildinfo))
+ try:
+ if os.path.exists(tmpdir):
+ shutil.rmtree(tmpdir)
+ koji.ensuredir(tmpdir)
+ expand_rpm(filepath, tmpdir)
+ scan_and_import(buildinfo, rpminfo, tmpdir)
+ finally:
+ if os.path.exists(tmpdir):
+ shutil.rmtree(tmpdir)
+
+def expand_rpm(filepath, tmpdir):
+ devnull = file('/dev/null', 'r+')
+ rpm2cpio = subprocess.Popen(['/usr/bin/rpm2cpio', filepath],
+ stdout=subprocess.PIPE,
+ stdin=devnull, stderr=devnull,
+ close_fds=True)
+ cpio = subprocess.Popen(['/bin/cpio', '-id'],
+ stdin=rpm2cpio.stdout,
+ cwd=tmpdir,
+ stdout=devnull, stderr=devnull,
+ close_fds=True)
+ if rpm2cpio.wait() != 0 or cpio.wait() != 0:
+ raise koji.CallbackError, 'error extracting files from %s, ' \
+ 'rpm2cpio returned %s, cpio returned %s' % \
+ (filepath, rpm2cpio.wait(), cpio.wait())
+ devnull.close()
+
+def scan_and_import(buildinfo, rpminfo, tmpdir):
+ global config
+ path_patterns = config.get('patterns', 'artifact_paths').split()
+
+ maven_archives = []
+ for dirpath, dirnames, filenames in os.walk(tmpdir):
+ relpath = dirpath[len(tmpdir):]
+ for pattern in path_patterns:
+ if fnmatch.fnmatch(relpath, pattern):
+ break
+ else:
+ continue
+
+ poms = [f for f in filenames if f.endswith('.pom')]
+ if len(poms) != 1:
+ continue
+
+ pom_info = koji.parse_pom(os.path.join(dirpath, poms[0]))
+ maven_info = koji.pom_to_maven_info(pom_info)
+ maven_archives.append({'maven_info': maven_info,
+ 'files': [os.path.join(dirpath, f) for f in filenames]})
+
+ if not maven_archives:
+ return
+
+ # We don't know which pom is the top-level pom, so we don't know what Maven
+ # metadata to associate with the build. So we make something up.
+ maven_build = {'group_id': buildinfo['name'], 'artifact_id': rpminfo['name'],
+ 'version': '%(version)s-%(release)s' % buildinfo}
+ context.handlers.call('host.createMavenBuild', buildinfo, maven_build)
+
+ for entry in maven_archives:
+ maven_info = entry['maven_info']
+ for filepath in entry['files']:
+ if not context.handlers.call('getArchiveType', filename=filepath):
+ # unsupported archive type, skip it
+ continue
+ context.handlers.call('host.importArchive', filepath, buildinfo, 'maven', maven_info)
diff --git a/plugins/runroot.conf b/plugins/runroot.conf
new file mode 100644
index 0000000..d3d222b
--- /dev/null
+++ b/plugins/runroot.conf
@@ -0,0 +1,25 @@
+[paths]
+; comma-delimited list of default mountpoints
+; They will be mounted during each run. It is suggested, that these
+; paths has readonly options and are made writable via extra_mounts
+; parameter for individual calls.
+; default_mounts = /mnt/archive,/mnt/workdir
+
+; comma-delimited list of safe roots.
+; Each extra_mount need to start with some of these prefixes. Other paths are
+; not allowed for mounting. Only absolute paths are allowed here, no
+; wildcards.
+; safe_roots = /mnt/workdir/tmp
+
+; path substitutions is tuple per line, delimited by comma, order is
+; important.
+; Path prefixes which can be substituted for other mountpoints.
+; Usable for locations symlinked from other mounts.
+; path_subs = /mnt/archive/prehistory/,/mnt/prehistoric_disk/archive/prehistory
+
+; mount origins, order is important here, ordered by best catch
+; [path0]
+; mountpoint = /mnt/archive
+; path = archive.org:/vol/archive
+; fstype = nfs
+; options = ro,hard,intr,nosuid,nodev,noatime,tcp
diff --git a/plugins/runroot.py b/plugins/runroot.py
new file mode 100644
index 0000000..9a71c91
--- /dev/null
+++ b/plugins/runroot.py
@@ -0,0 +1,322 @@
+# kojid plugin
+
+import commands
+import koji
+import ConfigParser
+import os
+import platform
+compat_mode = False
+try:
+ import koji.tasks as tasks
+ from koji.tasks import scan_mounts
+ from koji.util import isSuccess as _isSuccess
+ from koji.util import parseStatus as _parseStatus
+ from koji.daemon import log_output
+ from __main__ import BuildRoot
+except ImportError:
+ compat_mode = True
+ #old way
+ import tasks
+ #XXX - stuff we need from kojid
+ from __main__ import BuildRoot, log_output, scan_mounts, _isSuccess, _parseStatus
+
+
+__all__ = ('RunRootTask',)
+
+CONFIG_FILE = '/etc/kojid/runroot.conf'
+
+
+class RunRootTask(tasks.BaseTaskHandler):
+
+ Methods = ['runroot']
+
+ _taskWeight = 2.0
+
+ def __init__(self, *args, **kwargs):
+ self._read_config()
+ return super(RunRootTask, self).__init__(*args, **kwargs)
+
+ def _get_path_params(self, path, rw=False):
+ found = False
+ for mount_data in self.config['paths']:
+ if path.startswith(mount_data['mountpoint']):
+ found = True
+ break
+ if not found:
+ raise koji.GenericError("bad config: missing corresponding mountpoint")
+ options = []
+ for o in mount_data['options'].split(','):
+ if rw and o == 'ro':
+ options.append('rw')
+ else:
+ options.append(o)
+ rel_path = path[len(mount_data['mountpoint']):]
+ rel_path = rel_path[1:] if rel_path.startswith('/') else rel_path
+ res = (os.path.join(mount_data['path'], rel_path), path, mount_data['fstype'], ','.join(options))
+ return res
+
+ def _read_config(self):
+ cp = ConfigParser.SafeConfigParser()
+ cp.read(CONFIG_FILE)
+ self.config = {
+ 'default_mounts': [],
+ 'safe_roots': [],
+ 'path_subs': [],
+ 'paths': [],
+ }
+
+ if cp.has_option('paths', 'default_mounts'):
+ self.config['default_mounts'] = cp.get('paths', 'default_mounts').split(',')
+ if cp.has_option('paths', 'safe_roots'):
+ self.config['safe_roots'] = cp.get('paths', 'safe_roots').split(',')
+ if cp.has_option('paths', 'path_subs'):
+ self.config['path_subs'] = [x.split(',') for x in cp.get('paths', 'path_subs').split('\n')]
+
+ count = 0
+ while True:
+ section_name = 'path%d' % count
+ if not cp.has_section(section_name):
+ break
+ try:
+ self.config['paths'].append({
+ 'mountpoint': cp.get(section_name, 'mountpoint'),
+ 'path': cp.get(section_name, 'path'),
+ 'fstype': cp.get(section_name, 'fstype'),
+ 'options': cp.get(section_name, 'options'),
+ })
+ except ConfigParser.NoOptionError:
+ raise koji.GenericError("bad config: missing options in %s section" % section_name)
+ count += 1
+
+ for path in self.config['default_mounts'] + self.config['safe_roots'] + [x[0] for x in self.config['path_subs']]:
+ if not path.startswith('/'):
+ raise koji.GenericError("bad config: all paths (default_mounts, safe_roots, path_subs) needs to be absolute: %s" % path)
+
+ def handler(self, root, arch, command, keep=False, packages=[], mounts=[], repo_id=None, skip_setarch=False, weight=None, upload_logs=None):
+ """Create a buildroot and run a command (as root) inside of it
+
+ Command may be a string or a list.
+
+ Returns a message indicating success if the command was successful, and
+ raises an error otherwise. Command output will be available in
+ runroot.log in the task output directory on the hub.
+
+ skip_setarch is a rough approximation of an old hack
+
+ the keep option is not used. keeping for compatibility for now...
+
+ upload_logs is list of absolute paths which will be uploaded for
+ archiving on hub. It always consists of /tmp/runroot.log, but can be
+ used for additional logs (pungi.log, etc.)
+ """
+ if weight is not None:
+ weight = max(weight, 0.5)
+ self.session.host.setTaskWeight(self.id, weight)
+ #noarch is funny
+ if arch == "noarch":
+ #we need a buildroot arch. Pick one that:
+ # a) this host can handle
+ # b) the build tag can support
+ # c) is canonical
+ host_arches = self.session.host.getHost()['arches']
+ if not host_arches:
+ raise koji.BuildError, "No arch list for this host"
+ tag_arches = self.session.getBuildConfig(root)['arches']
+ if not tag_arches:
+ raise koji.BuildError, "No arch list for tag: %s" % root
+ #index canonical host arches
+ host_arches = dict([(koji.canonArch(a),1) for a in host_arches.split()])
+ #pick the first suitable match from tag's archlist
+ for br_arch in tag_arches.split():
+ br_arch = koji.canonArch(br_arch)
+ if host_arches.has_key(br_arch):
+ #we're done
+ break
+ else:
+ #no overlap
+ raise koji.BuildError, "host does not match tag arches: %s (%s)" % (root, tag_arches)
+ else:
+ br_arch = arch
+ if repo_id:
+ repo_info = self.session.repoInfo(repo_id, strict=True)
+ if repo_info['tag_name'] != root:
+ raise koji.BuildError, "build tag (%s) does not match repo tag (%s)" % (root, repo_info['tag_name'])
+ if repo_info['state'] not in (koji.REPO_STATES['READY'], koji.REPO_STATES['EXPIRED']):
+ raise koji.BuildError, "repos in the %s state may not be used by runroot" % koji.REPO_STATES[repo_info['state']]
+ else:
+ repo_info = self.session.getRepo(root)
+ if not repo_info:
+ #wait for it
+ task_id = self.session.host.subtask(method='waitrepo',
+ arglist=[root, None, None],
+ parent=self.id)
+ repo_info = self.wait(task_id)[task_id]
+ if compat_mode:
+ broot = BuildRoot(root, br_arch, self.id, repo_id=repo_info['id'])
+ else:
+ broot = BuildRoot(self.session, self.options, root, br_arch, self.id, repo_id=repo_info['id'])
+ broot.workdir = self.workdir
+ broot.init()
+ rootdir = broot.rootdir()
+ #workaround for rpm oddness
+ os.system('rm -f "%s"/var/lib/rpm/__db.*' % rootdir)
+ #update buildroot state (so that updateBuildRootList() will work)
+ self.session.host.setBuildRootState(broot.id, 'BUILDING')
+ try:
+ if packages:
+ #pkglog = '%s/%s' % (broot.resultdir(), 'packages.log')
+ pkgcmd = ['--install'] + packages
+ status = broot.mock(pkgcmd)
+ self.session.host.updateBuildRootList(broot.id, broot.getPackageList())
+ if not _isSuccess(status):
+ raise koji.BuildrootError, _parseStatus(status, pkgcmd)
+
+ if isinstance(command, str):
+ cmdstr = command
+ else:
+ #we were passed an arglist
+ #we still have to run this through the shell (for redirection)
+ #but we can preserve the list structure precisely with careful escaping
+ cmdstr = ' '.join(["'%s'" % arg.replace("'", r"'\''") for arg in command])
+ # A nasty hack to put command output into its own file until mock can be
+ # patched to do something more reasonable than stuff everything into build.log
+ cmdargs = ['/bin/sh', '-c', "{ %s; } < /dev/null 2>&1 | /usr/bin/tee /tmp/runroot.log; exit ${PIPESTATUS[0]}" % cmdstr]
+
+ # always mount /mnt/redhat (read-only)
+ # always mount /mnt/iso (read-only)
+ # also need /dev bind mount
+ self.do_mounts(rootdir, [self._get_path_params(x) for x in self.config['default_mounts']])
+ self.do_extra_mounts(rootdir, mounts)
+ mock_cmd = ['chroot']
+ if skip_setarch:
+ #we can't really skip it, but we can set it to the current one instead of of the chroot one
+ myarch = platform.uname()[5]
+ mock_cmd.extend(['--arch', myarch])
+ mock_cmd.append('--')
+ mock_cmd.extend(cmdargs)
+ rv = broot.mock(mock_cmd)
+ log_paths = ['/tmp/runroot.log']
+ if upload_logs is not None:
+ log_paths += upload_logs
+ for log_path in log_paths:
+ self.uploadFile(rootdir + log_path)
+ finally:
+ # mock should umount its mounts, but it will not handle ours
+ self.undo_mounts(rootdir, fatal=False)
+ broot.expire()
+ if isinstance(command, str):
+ cmdlist = command.split()
+ else:
+ cmdlist = command
+ cmdlist = [param for param in cmdlist if '=' not in param]
+ if cmdlist:
+ cmd = os.path.basename(cmdlist[0])
+ else:
+ cmd = '(none)'
+ if _isSuccess(rv):
+ return '%s completed successfully' % cmd
+ else:
+ raise koji.BuildrootError, _parseStatus(rv, cmd)
+
+ def do_extra_mounts(self, rootdir, mounts):
+ mnts = []
+ for mount in mounts:
+ mount = os.path.normpath(mount)
+ for safe_root in self.config['safe_roots']:
+ if mount.startswith(safe_root):
+ break
+ else:
+ #no match
+ raise koji.GenericError("read-write mount point is not safe: %s" % mount)
+ #normpath should have removed any .. dirs, but just in case...
+ if mount.find('/../') != -1:
+ raise koji.GenericError("read-write mount point is not safe: %s" % mount)
+
+ for re, sub in self.config['path_subs']:
+ mount = mount.replace(re, sub)
+
+ mnts.append(self._get_path_params(mount, rw=True))
+ self.do_mounts(rootdir, mnts)
+
+ def do_mounts(self, rootdir, mounts):
+ if not mounts:
+ return
+ self.logger.info('New runroot')
+ self.logger.info("Runroot mounts: %s" % mounts)
+ fn = '%s/tmp/runroot_mounts' % rootdir
+ fslog = file(fn, 'a')
+ logfile = "%s/do_mounts.log" % self.workdir
+ uploadpath = self.getUploadDir()
+ error = None
+ for dev,path,type,opts in mounts:
+ if not path.startswith('/'):
+ raise koji.GenericError("invalid mount point: %s" % path)
+ mpoint = "%s%s" % (rootdir,path)
+ if opts is None:
+ opts = []
+ else:
+ opts = opts.split(',')
+ if 'bind' in opts:
+ #make sure dir exists
+ if not os.path.isdir(dev):
+ error = koji.GenericError("No such directory or mount: %s" % dev)
+ break
+ type = 'none'
+ if path is None:
+ #shorthand for "same path"
+ path = dev
+ if 'bg' in opts:
+ error = koji.GenericError("bad config: background mount not allowed")
+ break
+ opts = ','.join(opts)
+ cmd = ['mount', '-t', type, '-o', opts, dev, mpoint]
+ self.logger.info("Mount command: %r" % cmd)
+ koji.ensuredir(mpoint)
+ if compat_mode:
+ status = log_output(cmd[0], cmd, logfile, uploadpath, logerror=True, append=True)
+ else:
+ status = log_output(self.session, cmd[0], cmd, logfile, uploadpath, logerror=True, append=True)
+ if not _isSuccess(status):
+ error = koji.GenericError("Unable to mount %s: %s" \
+ % (mpoint, _parseStatus(status, cmd)))
+ break
+ fslog.write("%s\n" % mpoint)
+ fslog.flush()
+ fslog.close()
+ if error is not None:
+ self.undo_mounts(rootdir, fatal=False)
+ raise error
+
+ def undo_mounts(self, rootdir, fatal=True):
+ self.logger.debug("Unmounting runroot mounts")
+ mounts = {}
+ fn = '%s/tmp/runroot_mounts' % rootdir
+ if os.path.exists(fn):
+ fslog = file(fn,'r')
+ for line in fslog:
+ mounts.setdefault(line.strip(), 1)
+ fslog.close()
+ #also, check /proc/mounts just in case
+ for dir in scan_mounts(rootdir):
+ mounts.setdefault(dir, 1)
+ mounts = mounts.keys()
+ # deeper directories first
+ mounts.sort()
+ mounts.reverse()
+ failed = []
+ self.logger.info("Unmounting (runroot): %s" % mounts)
+ for dir in mounts:
+ (rv, output) = commands.getstatusoutput("umount -l '%s'" % dir)
+ if rv != 0:
+ failed.append("%s: %s" % (dir, output))
+ if failed:
+ msg = "Unable to unmount: %s" % ', '.join(failed)
+ self.logger.warn(msg)
+ if fatal:
+ raise koji.GenericError, msg
+ else:
+ # remove the mount list when everything is unmounted
+ try:
+ os.unlink(fn)
+ except OSError:
+ pass
diff --git a/plugins/runroot_hub.py b/plugins/runroot_hub.py
new file mode 100644
index 0000000..a666919
--- /dev/null
+++ b/plugins/runroot_hub.py
@@ -0,0 +1,61 @@
+#koji hub plugin
+# There is a kojid plugin that goes with this hub plugin. The kojid builder
+# plugin has a config file. This hub plugin has no config file.
+
+
+from koji.context import context
+from koji.plugin import export
+import koji
+import random
+import sys
+
+#XXX - have to import kojihub for mktask
+sys.path.insert(0, '/usr/share/koji-hub/')
+from kojihub import mktask, get_tag, get_all_arches
+
+__all__ = ('runroot',)
+
+
+def get_channel_arches(channel):
+ """determine arches available in channel"""
+ chan = context.handlers.call('getChannel', channel, strict=True)
+ ret = {}
+ for host in context.handlers.call('listHosts', channelID=chan['id'], enabled=True):
+ for a in host['arches'].split():
+ ret[koji.canonArch(a)] = 1
+ return ret
+
+ at export
+def runroot(tagInfo, arch, command, channel=None, **opts):
+ """ Create a runroot task """
+ context.session.assertPerm('runroot')
+ taskopts = {
+ 'priority': 15,
+ 'arch': arch,
+ }
+
+ taskopts['channel'] = channel or 'runroot'
+
+ if arch == 'noarch':
+ #not all arches can generate a proper buildroot for all tags
+ tag = get_tag(tagInfo)
+ if not tag['arches']:
+ raise koji.GenericError, 'no arches defined for tag %s' % tag['name']
+
+ #get all known arches for the system
+ fullarches = get_all_arches()
+
+ tagarches = tag['arches'].split()
+
+ # If our tag can't do all arches, then we need to
+ # specify one of the arches it can do.
+ if set(fullarches) - set(tagarches):
+ chanarches = get_channel_arches(taskopts['channel'])
+ choices = [x for x in tagarches if x in chanarches]
+ if not choices:
+ raise koji.GenericError, 'no common arches for tag/channel: %s/%s' \
+ % (tagInfo, taskopts['channel'])
+ taskopts['arch'] = koji.canonArch(random.choice(choices))
+
+ return mktask(taskopts,'runroot', tagInfo, arch, command, **opts)
+
diff --git a/tests/runtests.py b/tests/runtests.py
new file mode 100755
index 0000000..699e9e4
--- /dev/null
+++ b/tests/runtests.py
@@ -0,0 +1,32 @@
+#!/usr/bin/python
+
+"""Wrapper script for running unit tests"""
+
+__version__ = "$Revision: 1.1 $"
+
+import sys
+import os
+import os.path
+import unittest
+
+testDir = os.path.dirname(sys.argv[0])
+
+sys.path.insert(0, os.path.abspath('%s/..' % testDir))
+
+allTests = unittest.TestSuite()
+for root, dirs, files in os.walk(testDir):
+ common_path = os.path.commonprefix([os.path.abspath(testDir),
+ os.path.abspath(root)])
+ root_path = os.path.abspath(root).replace(common_path, '').lstrip('/').replace('/', '.')
+
+ for test_file in [item for item in files
+ if item.startswith("test_") and item.endswith(".py")]:
+ if len(sys.argv) == 1 or test_file in sys.argv[1:]:
+ print "adding %s..." % test_file
+ test_file = test_file[:-3]
+ if root_path:
+ test_file = "%s.%s" % (root_path, test_file)
+ suite = unittest.defaultTestLoader.loadTestsFromName(test_file)
+ allTests.addTests(suite._tests)
+
+unittest.TextTestRunner(verbosity=2).run(allTests)
diff --git a/tests/test___init__.py b/tests/test___init__.py
new file mode 100644
index 0000000..93de88a
--- /dev/null
+++ b/tests/test___init__.py
@@ -0,0 +1,67 @@
+#!/usr/bin/python
+
+"""Test the __init__.py module"""
+
+import koji
+import unittest
+
+class INITTestCase(unittest.TestCase):
+ """Main test case container"""
+
+ def test_parse_NVR(self):
+ """Test the parse_NVR method"""
+
+ self.assertRaises(AttributeError, koji.parse_NVR, None)
+ self.assertRaises(AttributeError, koji.parse_NVR, 1)
+ self.assertRaises(AttributeError, koji.parse_NVR, {})
+ self.assertRaises(AttributeError, koji.parse_NVR, [])
+ self.assertRaises(koji.GenericError, koji.parse_NVR, "")
+ self.assertRaises(koji.GenericError, koji.parse_NVR, "foo")
+ self.assertRaises(koji.GenericError, koji.parse_NVR, "foo-1")
+ self.assertRaises(koji.GenericError, koji.parse_NVR, "foo-1-")
+ self.assertRaises(koji.GenericError, koji.parse_NVR, "foo--1")
+ self.assertRaises(koji.GenericError, koji.parse_NVR, "--1")
+ ret = koji.parse_NVR("foo-1-2")
+ self.assertEqual(ret['name'], "foo")
+ self.assertEqual(ret['version'], "1")
+ self.assertEqual(ret['release'], "2")
+ self.assertEqual(ret['epoch'], "")
+ ret = koji.parse_NVR("12:foo-1-2")
+ self.assertEqual(ret['name'], "foo")
+ self.assertEqual(ret['version'], "1")
+ self.assertEqual(ret['release'], "2")
+ self.assertEqual(ret['epoch'], "12")
+
+ def test_parse_NVRA(self):
+ """Test the parse_NVRA method"""
+
+ self.assertRaises(AttributeError, koji.parse_NVRA, None)
+ self.assertRaises(AttributeError, koji.parse_NVRA, 1)
+ self.assertRaises(AttributeError, koji.parse_NVRA, {})
+ self.assertRaises(AttributeError, koji.parse_NVRA, [])
+ self.assertRaises(koji.GenericError, koji.parse_NVRA, "")
+ self.assertRaises(koji.GenericError, koji.parse_NVRA, "foo")
+ self.assertRaises(koji.GenericError, koji.parse_NVRA, "foo-1")
+ self.assertRaises(koji.GenericError, koji.parse_NVRA, "foo-1-")
+ self.assertRaises(koji.GenericError, koji.parse_NVRA, "foo--1")
+ self.assertRaises(koji.GenericError, koji.parse_NVRA, "--1")
+ self.assertRaises(koji.GenericError, koji.parse_NVRA, "foo-1-1")
+ self.assertRaises(koji.GenericError, koji.parse_NVRA, "foo-1-1.")
+ self.assertRaises(koji.GenericError, koji.parse_NVRA, "foo-1.-1")
+ ret = koji.parse_NVRA("foo-1-2.i386")
+ self.assertEqual(ret['name'], "foo")
+ self.assertEqual(ret['version'], "1")
+ self.assertEqual(ret['release'], "2")
+ self.assertEqual(ret['epoch'], "")
+ self.assertEqual(ret['arch'], "i386")
+ self.assertEqual(ret['src'], False)
+ ret = koji.parse_NVRA("12:foo-1-2.src")
+ self.assertEqual(ret['name'], "foo")
+ self.assertEqual(ret['version'], "1")
+ self.assertEqual(ret['release'], "2")
+ self.assertEqual(ret['epoch'], "12")
+ self.assertEqual(ret['arch'], "src")
+ self.assertEqual(ret['src'], True)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/util/Makefile b/util/Makefile
new file mode 100644
index 0000000..5ea8bde
--- /dev/null
+++ b/util/Makefile
@@ -0,0 +1,40 @@
+BINFILES = kojira koji-gc koji-shadow
+SYSTEMDSYSTEMUNITDIR = $(shell pkg-config systemd --variable=systemdsystemunitdir)
+TYPE = systemd
+
+_default:
+ @echo "nothing to make. try make install"
+
+clean:
+ rm -f *.o *.so *.pyc *~
+
+_install:
+ @if [ "$(DESTDIR)" = "" ]; then \
+ echo " "; \
+ echo "ERROR: A destdir is required"; \
+ exit 1; \
+ fi
+ mkdir -p $(DESTDIR)/usr/sbin
+ install -p -m 755 $(BINFILES) $(DESTDIR)/usr/sbin
+
+ mkdir -p $(DESTDIR)/etc/kojira
+ install -p -m 644 kojira.conf $(DESTDIR)/etc/kojira/kojira.conf
+
+ mkdir -p $(DESTDIR)/etc/koji-gc
+ install -p -m 644 koji-gc.conf $(DESTDIR)/etc/koji-gc/koji-gc.conf
+
+ mkdir -p $(DESTDIR)/etc/koji-shadow
+ install -p -m 644 koji-shadow.conf $(DESTDIR)/etc/koji-shadow/koji-shadow.conf
+
+install-systemd: _install
+ mkdir -p $(DESTDIR)$(SYSTEMDSYSTEMUNITDIR)
+ install -p -m 644 kojira.service $(DESTDIR)$(SYSTEMDSYSTEMUNITDIR)
+
+install-sysv: _install
+ mkdir -p $(DESTDIR)/etc/rc.d/init.d
+ install -p -m 755 kojira.init $(DESTDIR)/etc/rc.d/init.d/kojira
+
+ mkdir -p $(DESTDIR)/etc/sysconfig
+ install -p -m 644 kojira.sysconfig $(DESTDIR)/etc/sysconfig/kojira
+
+install: install-$(TYPE)
diff --git a/util/koji-gc b/util/koji-gc
new file mode 100755
index 0000000..2d61aa4
--- /dev/null
+++ b/util/koji-gc
@@ -0,0 +1,959 @@
+#!/usr/bin/python
+
+# koji-gc: a garbage collection tool for Koji
+# Copyright (c) 2007-2014 Red Hat, Inc.
+#
+# Authors:
+# Mike McLean <mikem at redhat.com>
+
+try:
+ import krbV
+except ImportError:
+ pass
+import koji
+from koji.util import LazyDict, LazyValue
+import koji.policy
+import ConfigParser
+from email.MIMEText import MIMEText
+import fnmatch
+import optparse
+import os
+import pprint
+import smtplib
+import socket # for socket.error
+import sys
+import time
+import xmlrpclib # for ProtocolError and Fault
+
+
+OptionParser = optparse.OptionParser
+if optparse.__version__ == "1.4.1+":
+ def _op_error(self, msg):
+ self.print_usage(sys.stderr)
+ msg = "%s: error: %s\n" % (self._get_prog_name(), msg)
+ if msg:
+ sys.stderr.write(msg)
+ sys.exit(2)
+ OptionParser.error = _op_error
+
+class MySession(koji.ClientSession):
+ """This is a hack to work around server timeouts"""
+
+ def _callMethod(self, name, args, kwargs):
+ retries = 10
+ i = 0
+ while True:
+ i += 1
+ try:
+ return super(MySession, self)._callMethod(name, args, kwargs)
+ except (socket.timeout, socket.error, xmlrpclib.ProtocolError), e:
+ if i > retries:
+ raise
+ else:
+ print "Socket Error: %s [%i], retrying..." % (e, i)
+ time.sleep(60)
+
+ #an even worse hack
+ def multiCall(self):
+ if not self.multicall:
+ raise GenericError, 'ClientSession.multicall must be set to True before calling multiCall()'
+ if len(self._calls) == 0:
+ return []
+
+ self.multicall = False
+ calls = self._calls
+ self._calls = []
+ return self._callMethod('multiCall', (calls,), {})
+
+
+def _(args):
+ """Stub function for translation"""
+ return args
+
+def get_options():
+ """process options from command line and config file"""
+
+ usage = _("%prog [options]")
+ parser = OptionParser(usage=usage)
+ parser.add_option("-c", "--config-file", metavar="FILE",
+ help=_("use alternate configuration file"))
+ parser.add_option("--keytab", help=_("specify a Kerberos keytab to use"))
+ parser.add_option("--principal", help=_("specify a Kerberos principal to use"))
+ parser.add_option("--krbservice", default="host",
+ help=_("the service name of the principal being used by the hub"))
+ parser.add_option("--runas", metavar="USER",
+ help=_("run as the specified user (requires special privileges)"))
+ parser.add_option("--user", help=_("specify user"))
+ parser.add_option("--password", help=_("specify password"))
+ parser.add_option("--noauth", action="store_true", default=False,
+ help=_("do not authenticate"))
+ parser.add_option("--network-hack", action="store_true", default=False,
+ help=_("enable hackish workaround for broken networks"))
+ parser.add_option("--cert", default='/etc/koji-gc/client.crt',
+ help=_("Client SSL certificate file for authentication"))
+ parser.add_option("--ca", default='/etc/koji-gc/clientca.crt',
+ help=_("CA cert file that issued the client certificate"))
+ parser.add_option("--serverca", default='/etc/koji-gc/serverca.crt',
+ help=_("CA cert file that issued the hub certificate"))
+ parser.add_option("-n", "--test", action="store_true", default=False,
+ help=_("test mode"))
+ parser.add_option("-d", "--debug", action="store_true", default=False,
+ help=_("show debug output"))
+ parser.add_option("--debug-xmlrpc", action="store_true", default=False,
+ help=_("show xmlrpc debug output"))
+ parser.add_option("--smtp-host", metavar="HOST",
+ help=_("specify smtp server for notifications"))
+ parser.add_option("--no-mail", action='store_false', default=True, dest="mail",
+ help=_("don't send notifications"))
+ parser.add_option("--send-mail", action='store_true', dest="mail",
+ help=_("send notifications"))
+ parser.add_option("--email-domain", default="fedoraproject.org",
+ help=_("Email domain appended to Koji username for notifications"))
+ parser.add_option("--from-addr", default="Koji Build System <buildsys at example.com>",
+ help=_("From address for notifications"))
+ parser.add_option("--action", help=_("action(s) to take"))
+ parser.add_option("--delay", metavar="INTERVAL", default = '5 days',
+ help="time before eligible builds are placed in trashcan")
+ parser.add_option("--grace-period", default='4 weeks', metavar="INTERVAL",
+ help="time that builds are held in trashcan")
+ parser.add_option("--skip-main", action="store_true", default=False,
+ help=_("don't actually run main"))
+ parser.add_option("--unprotected-keys", metavar="KEYS",
+ help=_("allow builds signed with these keys to be deleted"))
+ parser.add_option("--tag-filter", "--tag", metavar="PATTERN", action="append",
+ help=_("Process only tags matching PATTERN when pruning"))
+ parser.add_option("--ignore-tags", metavar="PATTERN", action="append",
+ help=_("Ignore tags matching PATTERN when pruning"))
+ parser.add_option("--pkg-filter", "--pkg", "--package",
+ metavar="PATTERN", action='append',
+ help=_("Process only packages matching PATTERN"))
+ parser.add_option("--bypass-locks", metavar="PATTERN", action="append",
+ help=_("Bypass locks for tags matching PATTERN"))
+ parser.add_option("--purge", action="store_true", default=False,
+ help=_("When pruning, attempt to delete the builds that are untagged"))
+ parser.add_option("--trashcan-tag", default='trashcan', metavar="TAG",
+ help=_("specify an alternate trashcan tag"))
+ parser.add_option("--weburl", default="http://localhost/koji", metavar="URL",
+ help=_("url of koji web server (for use in notifications)"))
+ parser.add_option("-s", "--server", help=_("url of koji XMLRPC server"))
+ #parse once to get the config file
+ (options, args) = parser.parse_args()
+
+ defaults = parser.get_default_values()
+ config = ConfigParser.ConfigParser()
+ cf = getattr(options, 'config_file', None)
+ if cf:
+ if not os.access(cf, os.F_OK):
+ parser.error(_("No such file: %s") % cf)
+ assert False
+ else:
+ cf = '/etc/koji-gc/koji-gc.conf'
+ if not os.access(cf, os.F_OK):
+ cf = None
+ if not cf:
+ print "no config file"
+ config = None
+ else:
+ config.read(cf)
+ #allow config file to update defaults for certain options
+ cfgmap = [
+ ['keytab', None, 'string'],
+ ['principal', None, 'string'],
+ ['krbservice', None, 'string'],
+ ['runas', None, 'string'],
+ ['user', None, 'string'],
+ ['password', None, 'string'],
+ ['noauth', None, 'boolean'],
+ ['cert', None, 'string'],
+ ['ca', None, 'string'],
+ ['serverca', None, 'string'],
+ ['server', None, 'string'],
+ ['weburl', None, 'string'],
+ ['smtp_host', None, 'string'],
+ ['from_addr', None, 'string'],
+ ['email_domain', None, 'string'],
+ ['mail', None, 'boolean'],
+ ['delay', None, 'string'],
+ ['unprotected_keys', None, 'string'],
+ ['grace_period', None, 'string'],
+ ['trashcan_tag', None, 'string'],
+ ]
+ for name, alias, type in cfgmap:
+ if alias is None:
+ alias = ('main', name)
+ if config.has_option(*alias):
+ if options.debug:
+ print "Using option %s from config file" % (alias,)
+ if type == 'integer':
+ setattr(defaults, name, config.getint(*alias))
+ elif type == 'boolean':
+ setattr(defaults, name, config.getboolean(*alias))
+ else:
+ setattr(defaults, name, config.get(*alias))
+ #parse again with defaults
+ (options, args) = parser.parse_args(values=defaults)
+ options.config = config
+
+ #figure out actions
+ actions = ('prune', 'trash', 'delete', 'salvage')
+ if options.action:
+ options.action = options.action.lower().replace(',',' ').split()
+ for x in options.action:
+ if x not in actions:
+ parser.error(_("Invalid action: %s") % x)
+ else:
+ options.action = ('delete', 'prune', 'trash')
+
+ #split patterns for unprotected keys
+ if options.unprotected_keys:
+ options.unprotected_key_patterns = options.unprotected_keys.replace(',',' ').split()
+ else:
+ options.unprotected_key_patterns = []
+
+ #parse key aliases
+ options.key_aliases = {}
+ try:
+ if config and config.has_option('main', 'key_aliases'):
+ for line in config.get('main','key_aliases').splitlines():
+ parts = line.split()
+ if len(parts) < 2:
+ continue
+ options.key_aliases[parts[0].upper()] = parts[1]
+ except ValueError, e:
+ print e
+ parser.error(_("Invalid key alias data in config: %s") % config.get('main','key_aliases'))
+
+ #parse time intervals
+ for key in ('delay', 'grace_period'):
+ try:
+ value = getattr(options, key)
+ value = parse_duration(value)
+ setattr(options, key, value)
+ if options.debug:
+ print "%s: %s seconds" % (key, value)
+ except ValueError:
+ parser.error(_("Invalid time interval: %s") % value)
+
+ return options, args
+
+def check_tag(name):
+ """Check tag name against options and determine if we should process it
+
+ The ignore option takes priority here.
+ Returns True if we should process the tag, False otherwise
+ """
+ if options.ignore_tags:
+ for pattern in options.ignore_tags:
+ if fnmatch.fnmatch(name, options.tag_filter):
+ return False
+ if options.tag_filter:
+ for pattern in options.tag_filter:
+ if fnmatch.fnmatch(name, pattern):
+ return True
+ #doesn't match any pattern in filter
+ return False
+ else:
+ #not ignored and no filter specified
+ return True
+
+def check_package(name):
+ """Check package name against options and determine if we should process it
+
+ Returns True if we should process the package, False otherwise
+ """
+ if options.pkg_filter:
+ for pattern in options.pkg_filter:
+ if fnmatch.fnmatch(name, pattern):
+ return True
+ #doesn't match any pattern in filter
+ return False
+ else:
+ #no filter specified
+ return True
+
+time_units = {
+ 'second' : 1,
+ 'minute' : 60,
+ 'hour' : 3600,
+ 'day' : 86400,
+ 'week' : 604800,
+}
+time_unit_aliases = [
+ #[unit, alias, alias, ...]
+ ['week', 'weeks', 'wk', 'wks'],
+ ['hour', 'hours', 'hr', 'hrs'],
+ ['day', 'days'],
+ ['minute', 'minutes', 'min', 'mins'],
+ ['second', 'seconds', 'sec', 'secs', 's'],
+]
+def parse_duration(str):
+ """Parse time duration from string, returns duration in seconds"""
+ ret = 0
+ n = None
+ unit = None
+ def parse_num(s):
+ try:
+ return int(s)
+ except ValueError:
+ pass
+ try:
+ return float(s)
+ except ValueError:
+ pass
+ return None
+ for x in str.split():
+ if n is None:
+ n = parse_num(x)
+ if n is not None:
+ continue
+ #perhaps the unit is appended w/o a space
+ for names in time_unit_aliases:
+ for name in names:
+ if x.endswith(name):
+ n = parse_num(x[:-len(name)])
+ if n is None:
+ continue
+ unit = names[0]
+ # combined at end
+ break
+ if unit:
+ break
+ else:
+ raise ValueError, "Invalid time interval: %s" % str
+ if unit is None:
+ x = x.lower()
+ for names in time_unit_aliases:
+ for name in names:
+ if x == name:
+ unit = names[0]
+ break
+ if unit:
+ break
+ else:
+ raise ValueError, "Invalid time interval: %s" % str
+ ret += n * time_units[unit]
+ n = None
+ unit = None
+ return ret
+
+def error(msg=None, code=1):
+ if msg:
+ sys.stderr.write(msg + "\n")
+ sys.stderr.flush()
+ sys.exit(code)
+
+def warn(msg):
+ sys.stderr.write(msg + "\n")
+ sys.stderr.flush()
+
+def ensure_connection(session):
+ try:
+ ret = session.getAPIVersion()
+ except xmlrpclib.ProtocolError:
+ error(_("Error: Unable to connect to server"))
+ if ret != koji.API_VERSION:
+ warn(_("WARNING: The server is at API version %d and the client is at %d" % (ret, koji.API_VERSION)))
+
+def has_krb_creds():
+ if not sys.modules.has_key('krbV'):
+ return False
+ try:
+ ctx = krbV.default_context()
+ ccache = ctx.default_ccache()
+ princ = ccache.principal()
+ return True
+ except krbV.Krb5Error:
+ return False
+
+def activate_session(session):
+ """Test and login the session is applicable"""
+ global options
+ if options.noauth:
+ #skip authentication
+ pass
+ elif os.path.isfile(options.cert):
+ # authenticate using SSL client cert
+ session.ssl_login(options.cert, options.ca, options.serverca, proxyuser=options.runas)
+ elif options.user:
+ #authenticate using user/password
+ session.login()
+ elif has_krb_creds():
+ try:
+ if options.keytab and options.principal:
+ session.krb_login(principal=options.principal, keytab=options.keytab, proxyuser=options.runas)
+ else:
+ session.krb_login(proxyuser=options.runas)
+ except krbV.Krb5Error, e:
+ error(_("Kerberos authentication failed: %s (%s)") % (e.args[1], e.args[0]))
+ except socket.error, e:
+ warn(_("Could not connect to Kerberos authentication service: '%s'") % e.args[1])
+ if not options.noauth and not session.logged_in:
+ error(_("Error: unable to log in, no authentication methods available"))
+ ensure_connection(session)
+ if options.debug:
+ print "successfully connected to hub"
+
+def send_warning_notice(owner_name, builds):
+ if not options.mail:
+ return
+ if not builds:
+ print "Warning: empty build list. No notice sent"
+ return
+ head = """\
+The following build(s) are unreferenced and have been marked for
+deletion. They will be held in the trashcan tag for a grace period.
+At the end of that period they will be deleted permanently. This
+garbage collection is a normal part of build system operation.
+Please see the following url for more information:
+
+ http://fedoraproject.org/wiki/Koji/GarbageCollection"""
+ fmt="""\
+Build: %%(name)s-%%(version)s-%%(release)s
+%s/buildinfo?buildID=%%(id)i""" % options.weburl
+ middle = '\n\n'.join([fmt % b for b in builds])
+ tail = """\
+If you would like to protect any of these builds from deletion, please
+refer to the document linked above for instructions."""
+
+ msg = MIMEText('\n\n'.join([head, middle, tail]))
+ if len(builds) == 1:
+ msg['Subject'] = "1 build marked for deletion"
+ else:
+ msg['Subject'] = "%i builds marked for deletion" % len(builds)
+ msg['From'] = options.from_addr
+ msg['To'] = "%s@%s" % (owner_name, options.email_domain) #XXX!
+ msg['X-Koji-Builder'] = owner_name
+ if options.test:
+ if options.debug:
+ print str(msg)
+ else:
+ print "Would have sent warning notice to %s" % msg['To']
+ else:
+ if options.debug:
+ print "Sending warning notice to %s" % msg['To']
+ s = smtplib.SMTP(options.smtp_host)
+ s.connect()
+ s.sendmail(msg['From'], msg['To'], msg.as_string())
+ s.close()
+
+
+def main(args):
+ activate_session(session)
+ for x in options.action:
+ globals()['handle_' + x]()
+
+
+def handle_trash():
+ print "Getting untagged builds..."
+ untagged = session.untaggedBuilds()
+ print "...got %i builds" % len(untagged)
+ min_age = options.delay
+ trashcan_tag = options.trashcan_tag
+ #Step 1: place unreferenced builds into trashcan
+ i = 0
+ N = len(untagged)
+ to_trash = []
+ for binfo in untagged:
+ i += 1
+ nvr = "%(name)s-%(version)s-%(release)s" % binfo
+ if not check_package(binfo['name']):
+ if options.debug:
+ print "[%i/%i] Skipping package: %s" % (i, N, nvr)
+ continue
+ try:
+ refs = session.buildReferences(binfo['id'], limit=10)
+ except xmlrpclib.Fault:
+ print "[%i/%i] Error checking references for %s. Skipping" % (i, N, nvr)
+ continue
+ #XXX - this is more data than we need
+ # also, this call takes waaaay longer than it should
+ if refs['tags']:
+ # must have been tagged just now
+ print "[%i/%i] Build is tagged [?]: %s" % (i, N, nvr)
+ continue
+ if refs['rpms']:
+ if options.debug:
+ print "[%i/%i] Build has %i rpm references: %s" % (i, N, len(refs['rpms']), nvr)
+ #pprint.pprint(refs['rpms'])
+ continue
+ if refs['archives']:
+ if options.debug:
+ print "[%i/%i] Build has %i archive references: %s" % (i, N, len(refs['archives']), nvr)
+ #pprint.pprint(refs['archives'])
+ continue
+ ts = refs['last_used']
+ if ts:
+ #work around server bug
+ if isinstance(ts, list):
+ ts = ts[0]
+ #XXX - should really check time server side
+ if options.debug:
+ print "[%i/%i] Build has been used in a buildroot: %s" % (i, N, nvr)
+ print "Last_used: %r" % ts
+ age = time.time() - ts
+ if age < min_age:
+ continue
+ #see how long build has been untagged
+ history = session.tagHistory(build=binfo['id'])
+ age = None
+ binfo2 = None
+ if not history:
+ #never tagged, we'll have to use the build create time
+ binfo2 = session.getBuild(binfo['id'])
+ ts = binfo2.get('creation_ts')
+ if ts is None:
+ # older api with no good way to get a proper timestamp for
+ # a build, so we have the following hack
+ task_id = binfo2.get('task_id')
+ if task_id:
+ tinfo = session.getTaskInfo(task_id)
+ if tinfo['completion_ts']:
+ age = time.time() - tinfo['completion_ts']
+ else:
+ age = time.time() - ts
+ else:
+ history = [(h['revoke_event'],h) for h in history]
+ last = max(history)[1]
+ if not last['revoke_event']:
+ #this might happen if the build was tagged just now
+ print "[%i/%i] Warning: build not untagged: %s" % (i, N, nvr)
+ continue
+ age = time.time() - last['revoke_ts']
+ if age is not None and age < min_age:
+ if options.debug:
+ print "[%i/%i] Build untagged only recently: %s" % (i, N, nvr)
+ continue
+ #check build signatures
+ keys = get_build_sigs(binfo['id'], cache=True)
+ if keys and options.debug:
+ print "Build: %s, Keys: %s" % (nvr, keys)
+ if protected_sig(keys):
+ print "Skipping build %s. Keys: %s" % (nvr, keys)
+ continue
+
+ #ok, go ahead add it to the list
+ if binfo2 is None:
+ binfo2 = session.getBuild(binfo['id'])
+ binfo2['nvr'] = nvr
+ print "[%i/%i] Adding build to trash list: %s" % (i, N, nvr)
+ to_trash.append(binfo2)
+
+ #process to_trash
+ #group by owner so we can reduce the number of notices
+ by_owner = {}
+ for binfo in to_trash:
+ by_owner.setdefault(binfo['owner_name'], []).append(binfo)
+ owners = by_owner.keys()
+ owners.sort()
+ for owner_name in owners:
+ builds = [(b['nvr'], b) for b in by_owner[owner_name]]
+ builds.sort()
+ send_warning_notice(owner_name, [x[1] for x in builds])
+ for nvr, binfo in builds:
+ if options.test:
+ print "Would have moved to trashcan: %s" % nvr
+ else:
+ if options.debug:
+ print "Moving to trashcan: %s" % nvr
+ #figure out package owner
+ count = {}
+ for pkg in session.listPackages(pkgID=binfo['name']):
+ count.setdefault(pkg['owner_id'], 0)
+ count[pkg['owner_id']] += 1
+ if not count:
+ print "Warning: no owner for %s, using build owner" % nvr
+ #best we can do currently
+ owner = binfo['owner_id']
+ else:
+ owner = max([(n, k) for k, n in count.iteritems()])[1]
+ session.packageListAdd(trashcan_tag, binfo['name'], owner)
+ session.tagBuildBypass(trashcan_tag, binfo['id'], force=True)
+
+def protected_sig(keys):
+ """Check list of keys and see if any are protected
+
+ returns True if ANY are protected (not on unprotected list)
+ returns False if ALL are unprotected
+ """
+ for key in keys:
+ if not key:
+ continue
+ if not sigmatch(key, options.unprotected_key_patterns):
+ #this key is protected
+ return True
+ return False
+
+
+def handle_salvage():
+ """Reclaim builds from trashcan
+
+ Check builds in trashcan for new tags or references and salvage them
+ (remove trashcan tag) if appropriate.
+
+ The delete action also does this, but this is for when you want to
+ run this action only."""
+ return handle_delete(just_salvage=True)
+
+def salvage_build(binfo):
+ """Removes trashcan tag from a build and prints a message"""
+ if options.test:
+ print "Would have untagged from trashcan: %(nvr)s" % binfo
+ else:
+ if options.debug:
+ print "Untagging from trashcan: %(nvr)s" % binfo
+ session.untagBuildBypass(options.trashcan_tag, binfo['id'], force=True)
+
+def handle_delete(just_salvage=False):
+ """Delete builds that have been in the trashcan for long enough
+
+ If just_salvage is True, goes into salvage mode. In salvage mode it only
+ reclaims referenced builds from the trashcan, it does not perform any
+ deletes
+ """
+ print "Getting list of builds in trash..."
+ trashcan_tag = options.trashcan_tag
+ trash = [(b['nvr'], b) for b in session.listTagged(trashcan_tag)]
+ trash.sort()
+ print "...got %i builds" % len(trash)
+ #XXX - it would be better if there were more appropriate server calls for this
+ grace_period = options.grace_period
+ for nvr, binfo in trash:
+ # see if build has been tagged elsewhere
+ if not check_package(binfo['name']):
+ if options.debug:
+ print "Skipping package: %s" % nvr
+ continue
+ tags = [t['name'] for t in session.listTags(build=binfo['id']) if t['name'] != trashcan_tag]
+ if tags:
+ print "Build %s tagged elsewhere: %s" % (nvr, tags)
+ salvage_build(binfo)
+ continue
+ #check build signatures
+ keys = get_build_sigs(binfo['id'], cache=False)
+ if keys and options.debug:
+ print "Build: %s, Keys: %s" % (nvr, keys)
+ if protected_sig(keys):
+ print "Salvaging signed build %s. Keys: %s" % (nvr, keys)
+ salvage_build(binfo)
+ continue
+ if just_salvage:
+ # skip the rest when salvaging
+ continue
+ # determine how long this build has been in the trashcan
+ history = session.tagHistory(build=binfo['id'], tag=trashcan_tag)
+ current = [x for x in history if x['active']]
+ if not current:
+ #untagged just now?
+ print "Warning: history missing for %s" % nvr
+ pprint.pprint(binfo)
+ pprint.pprint(history)
+ continue
+ assert len(current) == 1 #see db constraint
+ current = current[0]
+ age = time.time() - current['create_ts']
+ if age < grace_period:
+ if options.debug:
+ print "Skipping build %s, age=%i" % (nvr, age)
+ continue
+
+ # go ahead and delete
+ if options.test:
+ print "Would have deleted build from trashcan: %s" % nvr
+ else:
+ print "Deleting build: %s" % nvr
+ session.untagBuildBypass(trashcan_tag, binfo['id'])
+ try:
+ session.deleteBuild(binfo['id'])
+ except (xmlrpclib.Fault, koji.GenericError), e:
+ print "Warning: deletion failed: %s" % e
+ #server issue
+ pass
+ #TODO - log details for delete failures
+
+
+class TagPruneTest(koji.policy.MatchTest):
+ name = 'tag'
+ field = 'tagname'
+
+
+class PackagePruneTest(koji.policy.MatchTest):
+ name = 'package'
+ field = 'pkgname'
+
+
+class VolumePruneTest(koji.policy.MatchTest):
+ name = 'volume'
+ field = 'volname'
+
+
+class SigPruneTest(koji.policy.BaseSimpleTest):
+ name = 'sig'
+
+ def run(self, data):
+ # true if any of the keys match any of the patterns
+ patterns = self.str.split()[1:]
+ for key in data['keys']:
+ if sigmatch(key, patterns):
+ return True
+ return False
+
+
+def sigmatch(key, patterns):
+ """Test whether a key id matches any of the given patterns
+
+ Supports key aliases
+ """
+ if not isinstance(patterns, (tuple, list)):
+ patterns = (patterns,)
+ for pat in patterns:
+ if fnmatch.fnmatch(key, pat):
+ return True
+ alias = options.key_aliases.get(key.upper())
+ if alias is not None and fnmatch.fnmatch(alias, pat):
+ return True
+ return False
+
+
+class OrderPruneTest(koji.policy.CompareTest):
+ name = 'order'
+ field = 'order'
+ allow_float = False
+
+
+class AgePruneTest(koji.policy.BaseSimpleTest):
+ name = 'age'
+ cmp_idx = koji.policy.CompareTest.operators
+
+ def __init__(self, str):
+ """Read the test parameters from string"""
+ super(AgePruneTest, self).__init__(str)
+ self.cmp, value = str.split(None, 2)[1:]
+ self.func = self.cmp_idx.get(self.cmp, None)
+ if self.func is None:
+ raise Exception, "Invalid comparison in test: %s" % str
+ self.span = parse_duration(value)
+
+ def run(self, data):
+ return self.func(time.time() - data['ts'], self.span)
+
+
+def read_policies(fn=None):
+ """Read tag gc policies from file
+
+ The expected format as follows
+ test [params] [&& test [params] ...] :: (keep|untag|skip)
+ """
+ fo = file(fn, 'r')
+ tests = koji.policy.findSimpleTests(globals())
+ ret = koji.policy.SimpleRuleSet(fo, tests)
+ fo.close()
+ return ret
+
+def scan_policies(str):
+ """Read tag gc policies from a string
+
+ The expected format as follows
+ test [params] [&& test [params] ...] :: (keep|untag|skip)
+ """
+ tests = koji.policy.findSimpleTests(globals())
+ return koji.policy.SimpleRuleSet(str.splitlines(), tests)
+
+build_sig_cache = {}
+
+def get_build_sigs(build, cache=False):
+ if cache and build in build_sig_cache:
+ return build_sig_cache[build]
+ rpms = session.listRPMs(buildID=build)
+ keys = {}
+ if not rpms:
+ # for non-rpm builds we have no easy way of checking signatures
+ ret = build_sig_cache[build] = []
+ return ret
+ else:
+ #TODO - multicall helps, but it might be good to have a more robust server-side call
+ session.multicall = True
+ for rpminfo in rpms:
+ session.queryRPMSigs(rpm_id=rpminfo['id'])
+ for rpminfo, [sigs] in zip(rpms, session.multiCall()):
+ for sig in sigs:
+ if sig['sigkey']:
+ keys.setdefault(sig['sigkey'], 1)
+ ret = build_sig_cache[build] = keys.keys()
+ return ret
+
+def handle_prune():
+ """Untag old builds according to policy
+
+ If purge is True, will also attempt to delete the pruned builds afterwards
+ """
+ #read policy
+ if not options.config or not options.config.has_option('prune', 'policy'):
+ print "Skipping prune step. No policies available."
+ return
+ #policies = read_policies(options.policy_file)
+ policies = scan_policies(options.config.get('prune', 'policy'))
+ for action in policies.all_actions():
+ if action not in ("keep", "untag", "skip"):
+ raise Exception, "Invalid action: %s" % action
+ if options.debug:
+ pprint.pprint(policies.ruleset)
+ #get tags
+ tags = session.listTags(queryOpts={'order': 'name'})
+ untagged = {}
+ build_ids = {}
+ for taginfo in tags:
+ tagname = taginfo['name']
+ if tagname == options.trashcan_tag:
+ if options.debug:
+ print "Skipping trashcan tag: %s" % tagname
+ continue
+ if not check_tag(tagname):
+ #if options.debug:
+ # print "skipping tag due to filter: %s" % tagname
+ continue
+ bypass = False
+ if taginfo['locked']:
+ if options.bypass_locks:
+ for pattern in options.bypass_locks:
+ if fnmatch.fnmatch(tagname, pattern):
+ bypass = True
+ break
+ if bypass:
+ print "Bypassing lock on tag: %s" % tagname
+ else:
+ if options.debug:
+ print "skipping locked tag: %s" % tagname
+ continue
+ if options.debug:
+ print "Pruning tag: %s" % tagname
+ #get builds
+ history = session.tagHistory(tag=tagname, active=True, queryOpts={'order': '-create_ts'})
+ if not history:
+ if options.debug:
+ print "No history for %s" % tagname
+ continue
+ pkghist = {}
+ for h in history:
+ if taginfo['maven_include_all'] and h['maven_build_id']:
+ pkghist.setdefault(h['name'] + '-' + h['version'], []).append(h)
+ else:
+ pkghist.setdefault(h['name'], []).append(h)
+ pkgs = pkghist.keys()
+ pkgs.sort()
+ for pkg in pkgs:
+ if not check_package(pkg):
+ #if options.debug:
+ # print "skipping package due to filter: %s" % pkg
+ continue
+ if options.debug:
+ print pkg
+ hist = pkghist[pkg]
+ #these are the *active* history entries for tag/pkg
+ skipped = 0
+ for order, entry in enumerate(hist):
+ # get sig data
+ nvr = "%(name)s-%(version)s-%(release)s" % entry
+ data = {
+ 'tagname' : tagname,
+ 'pkgname' : pkg,
+ 'order': order - skipped,
+ 'ts' : entry['create_ts'],
+ 'nvr' : nvr,
+ }
+ data = LazyDict(data)
+ data['keys'] = LazyValue(get_build_sigs, (entry['build_id'],), {'cache':True})
+ data['volname'] = LazyValue(lambda x: session.getBuild(x).get('volume_name'),
+ (entry['build_id'],), cache=True)
+ build_ids[nvr] = entry['build_id']
+ action = policies.apply(data)
+ if action is None:
+ if options.debug:
+ print "No policy for %s (%s)" % (nvr, tagname)
+ if action == 'skip':
+ skipped += 1
+ if options.debug:
+ print policies.last_rule()
+ print "%s: %s (%s, %i)" % (action, nvr, tagname, order)
+ if action == 'untag':
+ if options.test:
+ print "Would have untagged %s from %s" % (nvr, tagname)
+ untagged.setdefault(nvr, {})[tagname] = 1
+ else:
+ print "Untagging build %s from %s" % (nvr, tagname)
+ try:
+ session.untagBuildBypass(taginfo['id'], entry['build_id'], force=bypass)
+ untagged.setdefault(nvr, {})[tagname] = 1
+ except (xmlrpclib.Fault, koji.GenericError), e:
+ print "Warning: untag operation failed: %s" % e
+ pass
+ # if action == 'keep' do nothing
+ if options.purge and untagged:
+ print "Attempting to purge %i builds" % len(untagged)
+ for nvr in untagged:
+ build_id = build_ids[nvr]
+ tags = [t['name'] for t in session.listTags(build_id)]
+ if options.test:
+ #filted out the tags we would have dropped above
+ tags = [t for t in tags if t not in untagged[nvr]]
+ if tags:
+ #still tagged somewhere
+ print "Skipping %s, still tagged: %s" % (nvr, tags)
+ continue
+ #check cached sigs first to save a little time
+ if build_id in build_sig_cache:
+ keys = build_sig_cache[build_id]
+ if protected_sig(keys):
+ print "Skipping %s, signatures: %s" % (nvr, keys)
+ continue
+ #recheck signatures in case build was signed during run
+ keys = get_build_sigs(build_id, cache=False)
+ if protected_sig(keys):
+ print "Skipping %s, signatures: %s" % (nvr, keys)
+ continue
+
+ if options.test:
+ print "Would have deleted build: %s" % nvr
+ else:
+ print "Deleting untagged build: %s" % nvr
+ try:
+ session.deleteBuild(build_id, strict=False)
+ except (xmlrpclib.Fault, koji.GenericError), e:
+ print "Warning: deletion failed: %s" % e
+ #server issue
+ pass
+
+if __name__ == "__main__":
+
+ options, args = get_options()
+
+ session_opts = {}
+ for k in ('user', 'password', 'krbservice', 'email_domain', 'debug_xmlrpc', 'debug'):
+ session_opts[k] = getattr(options,k)
+ if options.network_hack:
+ socket.setdefaulttimeout(180)
+ session = MySession(options.server, session_opts)
+ else:
+ session = koji.ClientSession(options.server, session_opts)
+ rv = 0
+ try:
+ if not options.skip_main:
+ rv = main(args)
+ if not rv:
+ rv = 0
+ except KeyboardInterrupt:
+ pass
+ except SystemExit:
+ rv = 1
+ #except:
+ # if options.debug:
+ # raise
+ # else:
+ # exctype, value = sys.exc_info()[:2]
+ # rv = 1
+ # print "%s: %s" % (exctype, value)
+ try:
+ session.logout()
+ except:
+ pass
+ if not options.skip_main:
+ sys.exit(rv)
diff --git a/util/koji-gc.conf b/util/koji-gc.conf
new file mode 100644
index 0000000..8a0c07a
--- /dev/null
+++ b/util/koji-gc.conf
@@ -0,0 +1,43 @@
+#test policy file
+#earlier = higher precedence!
+
+[main]
+key_aliases =
+ 30C9ECF8 fedora-test
+ 4F2A6FD2 fedora-gold
+ 897DA07A redhat-beta
+ 1AC70CE6 fedora-extras
+
+unprotected_keys =
+ fedora-test
+ fedora-extras
+ redhat-beta
+
+server = https://koji.fedoraproject.org/kojihub
+weburl = http://koji.fedoraproject.org/koji
+
+# The service name of the principal being used by the hub
+#krbservice = host
+
+# The domain name that will be appended to Koji usernames
+# when creating email notifications
+#email_domain = fedoraproject.org
+
+[prune]
+policy =
+ #stuff to protect
+ #note that tags with master lock engaged are already protected
+ tag *-updates :: keep
+ age < 1 day :: skip
+ sig fedora-gold :: skip
+ sig fedora-test && age < 12 weeks :: keep
+
+ #stuff to chuck semi-rapidly
+ tag *-testing *-candidate :: { # nested rules
+ order >= 2 :: untag
+ order > 0 && age > 6 weeks :: untag
+ } #closing braces must be on a line by themselves (modulo comments/whitespace)
+ tag *-candidate && age > 60 weeks :: untag
+
+ #default: keep the last 3
+ order > 2 :: untag
diff --git a/util/koji-shadow b/util/koji-shadow
new file mode 100755
index 0000000..3b62776
--- /dev/null
+++ b/util/koji-shadow
@@ -0,0 +1,1330 @@
+#!/usr/bin/python
+
+# koji-shadow: a tool to shadow builds between koji instances
+# Copyright (c) 2007-2014 Red Hat, Inc.
+#
+# Koji is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation;
+# version 2.1 of the License.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this software; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+# Authors:
+# Mike McLean <mikem at redhat.com>
+# Dennis Gilmore <dennis at ausil.us>
+
+try:
+ import krbV
+except ImportError:
+ pass
+import koji
+import ConfigParser
+import fnmatch
+import optparse
+import os
+import pprint
+import random
+import shutil
+import socket # for socket.error and socket.setdefaulttimeout
+import string
+import sys
+import time
+import urllib2
+import urlgrabber.grabber as grabber
+import xmlrpclib # for ProtocolError and Fault
+import rpm
+
+# koji.fp.o keeps stalling, probably network errors...
+# better to time out than to stall
+socket.setdefaulttimeout(180) #XXX - too short?
+
+
+OptionParser = optparse.OptionParser
+if optparse.__version__ == "1.4.1+":
+ def _op_error(self, msg):
+ self.print_usage(sys.stderr)
+ msg = "%s: error: %s\n" % (self._get_prog_name(), msg)
+ if msg:
+ sys.stderr.write(msg)
+ sys.exit(2)
+ OptionParser.error = _op_error
+
+
+def _(args):
+ """Stub function for translation"""
+ return args
+
+
+class SubOption(object):
+ """A simple container to help with tracking ConfigParser data"""
+ pass
+
+def get_options():
+ """process options from command line and config file"""
+
+ usage = _("%prog [options]")
+ parser = OptionParser(usage=usage)
+ parser.add_option("-c", "--config-file", metavar="FILE",
+ help=_("use alternate configuration file"))
+ parser.add_option("--keytab", help=_("specify a Kerberos keytab to use"))
+ parser.add_option("--principal", help=_("specify a Kerberos principal to use"))
+ parser.add_option("--krbservice", help=_("the service name of the"
+ " principal being used by the hub"))
+ parser.add_option("--runas", metavar="USER",
+ help=_("run as the specified user (requires special privileges)"))
+ parser.add_option("--user", help=_("specify user"))
+ parser.add_option("--password", help=_("specify password"))
+ parser.add_option("--noauth", action="store_true", default=False,
+ help=_("do not authenticate"))
+ parser.add_option("-n", "--test", action="store_true", default=False,
+ help=_("test mode"))
+ parser.add_option("-d", "--debug", action="store_true", default=False,
+ help=_("show debug output"))
+ parser.add_option("--first-one", action="store_true", default=False,
+ help=_("stop after scanning first build -- debugging"))
+ parser.add_option("--debug-xmlrpc", action="store_true", default=False,
+ help=_("show xmlrpc debug output"))
+ parser.add_option("--skip-main", action="store_true", default=False,
+ help=_("don't actually run main"))
+# parser.add_option("--tag-filter", metavar="PATTERN",
+# help=_("limit tags for pruning"))
+# parser.add_option("--pkg-filter", metavar="PATTERN",
+# help=_("limit packages for pruning"))
+ parser.add_option("--max-jobs", type="int", default=0,
+ help=_("limit number of tasks"))
+ parser.add_option("--build",
+ help=_("scan just this build"))
+ parser.add_option("-s", "--server",
+ help=_("url of local XMLRPC server"))
+ parser.add_option("-r", "--remote",
+ help=_("url of remote XMLRPC server"))
+ parser.add_option("--prefer-new", action="store_true", default=False,
+ help=_("if there is a newer build locally prefer it for deps"))
+ parser.add_option("--import-noarch-only", action="store_true", default=False,
+ help=_("Only import missing noarch builds"))
+ parser.add_option("--import-noarch", action="store_true",
+ help=_("import missing noarch builds rather than rebuilding"))
+ parser.add_option("--link-imports", action="store_true",
+ help=_("use 'import --link' functionality"))
+ parser.add_option("--remote-topurl",
+ help=_("topurl for remote server"))
+ parser.add_option("--workpath", default="/tmp/koji-shadow",
+ help=_("location to store work files"))
+ parser.add_option("--auth-cert",
+ help=_("Certificate for authentication"))
+ parser.add_option("--auth-ca",
+ help=_("CA certificate for authentication"))
+ parser.add_option("--serverca",
+ help=_("Server CA certificate"))
+ parser.add_option("--rules",
+ help=_("rules"))
+ parser.add_option("--rules-greylist",
+ help=_("greylist rules"))
+ parser.add_option("--rules-blacklist",
+ help=_("blacklist rules"))
+ parser.add_option("--rules-ignorelist",
+ help=_("Rules: list of packages to ignore"))
+ parser.add_option("--rules-excludelist",
+ help=_("Rules: list of packages to are excluded using ExcludeArch or ExclusiveArch"))
+ parser.add_option("--rules-includelist",
+ help=_("Rules: list of packages to always include"))
+ parser.add_option("--rules-protectlist",
+ help=_("Rules: list of package names to never replace"))
+ parser.add_option("--tag-build", action="store_true", default=False,
+ help=_("tag sucessful builds into the tag we are building, default is to not tag"))
+ parser.add_option("--arches",
+ help=_("arches to use when creating tags"))
+ parser.add_option("--priority", type="int", default=5,
+ help=_("priority to set for submitted builds"))
+
+ #parse once to get the config file
+ (options, args) = parser.parse_args()
+
+ defaults = parser.get_default_values()
+ config = ConfigParser.ConfigParser()
+ cf = getattr(options, 'config_file', None)
+ if cf:
+ if not os.access(cf, os.F_OK):
+ parser.error(_("No such file: %s") % cf)
+ assert False
+ else:
+ cf = '/etc/koji-shadow/koji-shadow.conf'
+ if not os.access(cf, os.F_OK):
+ cf = None
+ if not cf:
+ print "no config file"
+ config = None
+ else:
+ config.read(cf)
+ #allow config file to update defaults
+ for opt in parser.option_list:
+ if not opt.dest:
+ continue
+ name = opt.dest
+ alias = ('main', name)
+ if config.has_option(*alias):
+ print "Using option %s from config file" % (alias,)
+ if opt.action in ('store_true', 'store_false'):
+ setattr(defaults, name, config.getboolean(*alias))
+ elif opt.action != 'store':
+ pass
+ elif opt.type in ('int', 'long'):
+ setattr(defaults, name, config.getint(*alias))
+ elif opt.type in ('float'):
+ setattr(defaults, name, config.getfloat(*alias))
+ else:
+ print config.get(*alias)
+ setattr(defaults, name, config.get(*alias))
+ #config file options without a cmdline equivalent
+ otheropts = [
+ #name, type, default
+ ['keytab', None, 'string'],
+ ['principal', None, 'string'],
+ ['runas', None, 'string'],
+ ['user', None, 'string'],
+ ['password', None, 'string'],
+ ['noauth', None, 'boolean'],
+ ['server', None, 'string'],
+ ['remote', None, 'string'],
+ ['max_jobs', None, 'int'],
+ ['serverca', None, 'string'],
+ ['auth_cert', None, 'string'],
+ ['auth_ca', None, 'string'],
+ ['arches', None, 'string'],
+ ]
+
+
+ #parse again with updated defaults
+ (options, args) = parser.parse_args(values=defaults)
+ options.config = config
+
+ return options, args
+
+time_units = {
+ 'second' : 1,
+ 'minute' : 60,
+ 'hour' : 3600,
+ 'day' : 86400,
+ 'week' : 604800,
+}
+time_unit_aliases = [
+ #[unit, alias, alias, ...]
+ ['week', 'weeks', 'wk', 'wks'],
+ ['hour', 'hours', 'hr', 'hrs'],
+ ['day', 'days'],
+ ['minute', 'minutes', 'min', 'mins'],
+ ['second', 'seconds', 'sec', 'secs', 's'],
+]
+def parse_duration(str):
+ """Parse time duration from string, returns duration in seconds"""
+ ret = 0
+ n = None
+ unit = None
+ def parse_num(s):
+ try:
+ return int(s)
+ except ValueError:
+ pass
+ try:
+ return float(s)
+ except ValueError:
+ pass
+ return None
+ for x in str.split():
+ if n is None:
+ n = parse_num(x)
+ if n is not None:
+ continue
+ #perhaps the unit is appended w/o a space
+ for names in time_unit_aliases:
+ for name in names:
+ if x.endswith(name):
+ n = parse_num(x[:-len(name)])
+ if n is None:
+ continue
+ unit = names[0]
+ # combined at end
+ break
+ if unit:
+ break
+ else:
+ raise ValueError, "Invalid time interval: %s" % str
+ if unit is None:
+ x = x.lower()
+ for names in time_unit_aliases:
+ for name in names:
+ if x == name:
+ unit = names[0]
+ break
+ if unit:
+ break
+ else:
+ raise ValueError, "Invalid time interval: %s" % str
+ ret += n * time_units[unit]
+ n = None
+ unit = None
+ return ret
+
+def error(msg=None, code=1):
+ if msg:
+ sys.stderr.write(msg + "\n")
+ sys.stderr.flush()
+ sys.exit(code)
+
+def warn(msg):
+ sys.stderr.write(msg + "\n")
+ sys.stderr.flush()
+
+def ensure_connection(session):
+ try:
+ ret = session.getAPIVersion()
+ except xmlrpclib.ProtocolError:
+ error(_("Error: Unable to connect to server"))
+ if ret != koji.API_VERSION:
+ warn(_("WARNING: The server is at API version %d and the client is at "
+ "%d" % (ret, koji.API_VERSION)))
+
+def activate_session(session):
+ """Test and login the session is applicable"""
+ global options
+
+ # convert to absolute paths
+ options.auth_cert = os.path.expanduser(options.auth_cert)
+ options.auth_ca = os.path.expanduser(options.auth_ca)
+ options.serverca = os.path.expanduser(options.serverca)
+
+ if options.noauth:
+ #skip authentication
+ pass
+ elif os.path.isfile(options.auth_cert):
+ # authenticate using SSL client cert
+ session.ssl_login(options.auth_cert, options.auth_ca, options.serverca, proxyuser=options.runas)
+ elif options.user:
+ #authenticate using user/password
+ session.login()
+ elif sys.modules.has_key('krbV'):
+ try:
+ if options.keytab and options.principal:
+ session.krb_login(principal=options.principal, keytab=options.keytab, proxyuser=options.runas)
+ else:
+ session.krb_login(proxyuser=options.runas)
+ except krbV.Krb5Error, e:
+ error(_("Kerberos authentication failed: '%s' (%s)") % (e.args[1], e.args[0]))
+ except socket.error, e:
+ warn(_("Could not connect to Kerberos authentication service: '%s'") % e.args[1])
+ if not options.noauth and not session.logged_in:
+ error(_("Error: unable to log in"))
+ ensure_connection(session)
+ if options.debug:
+ print "successfully connected to hub"
+
+def _unique_path(prefix):
+ """Create a unique path fragment by appending a path component
+ to prefix. The path component will consist of a string of letter and numbers
+ that is unlikely to be a duplicate, but is not guaranteed to be unique."""
+ # Use time() in the dirname to provide a little more information when
+ # browsing the filesystem.
+ # For some reason repr(time.time()) includes 4 or 5
+ # more digits of precision than str(time.time())
+ return '%s/%r.%s' % (prefix, time.time(),
+ ''.join([random.choice(string.ascii_letters) for i in range(8)]))
+
+
+class LocalBuild(object):
+ """A stand-in for substitute deps that are only available locally"""
+
+ def __init__(self, info, tracker=None):
+ self.info = info
+ self.id = info['id']
+ self.nvr = "%(name)s-%(version)s-%(release)s" % self.info
+ self.state = 'local'
+
+
+class TrackedBuild(object):
+
+ def __init__(self, build_id, child=None, tracker=None):
+ self.id = build_id
+ self.tracker = tracker
+ self.info = remote.getBuild(build_id)
+ self.nvr = "%(name)s-%(version)s-%(release)s" % self.info
+ self.name = "%(name)s" % self.info
+ self.epoch = "%(epoch)s" % self.info
+ self.version = "%(version)s" % self.info
+ self.release = "%(release)s" % self.info
+ self.srpm = None
+ self.rpms = None
+ self.children = {}
+ self.state = None
+ self.order = 0
+ self.substitute = None
+ if child is not None:
+ #children tracks the builds that were built using this one
+ self.children[child] = 1
+ #see if we have it
+ self.rebuilt = False
+ self.updateState()
+ if self.state == 'missing':
+ self.rpms = remote.listRPMs(self.id)
+ for rinfo in self.rpms:
+ if rinfo['arch'] == 'src':
+ self.srpm = rinfo
+ self.getExtraArches()
+ self.getDeps() #sets deps, br_tag, base, order, (maybe state)
+
+ def updateState(self):
+ """Update state from local hub
+
+ This is intended to be called at initialization and after a missing
+ build has been rebuilt"""
+ ours = session.getBuild(self.nvr)
+ if ours is not None:
+ state = koji.BUILD_STATES[ours['state']]
+ if state == 'COMPLETE':
+ self.setState("common")
+ if ours['task_id']:
+ self.rebuilt = True
+ return
+ elif state in ('FAILED', 'CANCELED'):
+ #treat these as having no build
+ pass
+ elif state == 'BUILDING' and ours['task_id']:
+ self.setState("pending")
+ self.task_id = ours['task_id']
+ return
+ else:
+ # DELETED or BUILDING(no task)
+ self.setState("broken")
+ return
+ self.setState("missing")
+
+ def isNoarch(self):
+ if not self.rpms:
+ return False
+ noarch = False
+ for rpminfo in self.rpms:
+ if rpminfo['arch'] == 'noarch':
+ #note that we've seen a noarch rpm
+ noarch = True
+ elif rpminfo['arch'] != 'src':
+ return False
+ return noarch
+
+ def setState(self, state):
+ #print "%s -> %s" % (self.nvr, state)
+ if state == self.state:
+ return
+ if self.state is not None and self.tracker:
+ del self.tracker.state_idx[self.state][self.id]
+ self.state = state
+ if self.tracker:
+ self.tracker.state_idx.setdefault(self.state, {})[self.id] = self
+
+ def getSource(self):
+ """Get source from remote"""
+ if options.remote_topurl and self.srpm:
+ #download srpm from remote
+ pathinfo = koji.PathInfo(options.remote_topurl)
+ url = "%s/%s" % (pathinfo.build(self.info), pathinfo.rpm(self.srpm))
+ print "Downloading %s" % url
+ #XXX - this is not really the right place for this
+ fsrc = urllib2.urlopen(url)
+ fn = "/tmp/koji-shadow/%s.src.rpm" % self.nvr
+ koji.ensuredir(os.path.dirname(fn))
+ fdst = file(fn, 'w')
+ shutil.copyfileobj(fsrc, fdst)
+ fsrc.close()
+ fdst.close()
+ serverdir = _unique_path('koji-shadow')
+ session.uploadWrapper(fn, serverdir, blocksize=65536)
+ src = "%s/%s" % (serverdir, os.path.basename(fn))
+ return src
+ #otherwise use SCM url
+ task_id = self.info['task_id']
+ if task_id:
+ tinfo = remote.getTaskInfo(task_id)
+ if tinfo['method'] == 'build':
+ try:
+ request = remote.getTaskRequest(task_id)
+ src = request[0]
+ #XXX - Move SCM class out of kojid and use it to check for scm url
+ if src.startswith('cvs:'):
+ return src
+ except:
+ pass
+ #otherwise fail
+ return None
+
+ def addChild(self, child):
+ self.children[child] = 1
+
+ def getExtraArches(self):
+ arches = {}
+ for rpminfo in self.rpms:
+ arches.setdefault(rpminfo['arch'], 1)
+ self.extraArches = [a for a in arches if koji.canonArch(a) != a]
+
+ def getBuildroots(self):
+ """Return a list of buildroots for remote build"""
+ brs = {}
+ bad = []
+ for rinfo in self.rpms:
+ br_id = rinfo.get('buildroot_id')
+ if not br_id:
+ bad.append(rinfo)
+ continue
+ brs[br_id] = 1
+ if brs and bad:
+ print "Warning: some rpms for %s lacked buildroots:" % self.nvr
+ for rinfo in bad:
+ print " %(name)s-%(version)s-%(release)s.%(arch)s" % rinfo
+ return brs.keys()
+
+ def getDeps(self):
+ buildroots = self.getBuildroots()
+ if not buildroots:
+ self.setState("noroot")
+ return
+ buildroots.sort()
+ self.order = buildroots[-1]
+ seen = {} #used to avoid scanning the same buildroot twice
+ builds = {} #track which builds we need for a rebuild
+ bases = {} #track base install for buildroots
+ tags = {} #track buildroot tag(s)
+ remote.multicall = True
+ unpack = []
+ for br_id in buildroots:
+ if seen.has_key(br_id):
+ continue
+ seen[br_id] = 1
+ #br_info = remote.getBuildroot(br_id, strict=True)
+ remote.getBuildroot(br_id, strict=True)
+ unpack.append(('br_info', br_id))
+ #tags.setdefault(br_info['tag_name'], 0)
+ #tags[br_info['tag_name']] += 1
+ #print "."
+ remote.listRPMs(componentBuildrootID=br_id)
+ unpack.append(('rpmlist', br_id))
+ #for rinfo in remote.listRPMs(componentBuildrootID=br_id):
+ # builds[rinfo['build_id']] = 1
+ # if not rinfo['is_update']:
+ # bases.setdefault(rinfo['name'], {})[br_id] = 1
+ for (dtype, br_id), data in zip(unpack, remote.multiCall()):
+ if dtype == 'br_info':
+ [br_info] = data
+ tags.setdefault(br_info['tag_name'], 0)
+ tags[br_info['tag_name']] += 1
+ elif dtype == 'rpmlist':
+ [rpmlist] = data
+ for rinfo in rpmlist:
+ builds[rinfo['build_id']] = 1
+ if not rinfo['is_update']:
+ bases.setdefault(rinfo['name'], {})[br_id] = 1
+ # we want to record the intersection of the base sets
+ # XXX - this makes some assumptions about homogeneity that, while reasonable,
+ # are not strictly required of the db.
+ # The only way I can think of to break this is if some significant tag/target
+ # changes happened during the build startup and some subtasks got the old
+ # repo and others the new one.
+ base = []
+ for name, brlist in bases.iteritems():
+ #We want to determine for each name if that package was present
+ #in /all/ the buildroots or just some.
+ #Because brlist is constructed only from elements of buildroots, we
+ #can simply check the length
+ assert len(brlist) <= len(buildroots)
+ if len(brlist) == len(buildroots):
+ #each buildroot had this as a base package
+ base.append(name)
+ if len(tags) > 1:
+ print "Warning: found multiple buildroot tags for %s: %s" % (self.nvr, tags.keys())
+ counts = [(n, tag) for tag, n in tags.iteritems()]
+ sort(counts)
+ tag = counts[-1][1]
+ else:
+ tag = tags.keys()[0]
+ self.deps = builds
+ self.revised_deps = None #BuildTracker will set this later
+ self.br_tag = tag
+ self.base = base
+
+
+class BuildTracker(object):
+
+ def __init__(self):
+ self.rebuild_order = 0
+ self.builds = {}
+ self.state_idx = {}
+ self.nvr_idx = {}
+ for state in ('common', 'pending', 'missing', 'broken', 'brokendeps',
+ 'noroot', 'blocked', 'grey'):
+ self.state_idx.setdefault(state, {})
+ self.scanRules()
+
+ def scanRules(self):
+ """Reads/parses rules data from the config
+
+ This data consists mainly of
+ white/black/greylist data
+ substitution data
+ """
+ self.blacklist = None
+ self.whitelist = None
+ self.greylist = None
+ self.ignorelist = []
+ self.excludelist = []
+ self.includelist = []
+ self.protectlist = []
+ self.substitute_idx = {}
+ self.substitutions = {}
+ if options.config.has_option('rules', 'whitelist'):
+ self.whitelist = options.config.get('rules', 'whitelist').split()
+ if options.config.has_option('rules', 'blacklist'):
+ self.blacklist = options.config.get('rules', 'blacklist').split()
+ if options.config.has_option('rules', 'greylist'):
+ self.greylist = options.config.get('rules', 'greylist').split()
+ if options.config.has_option('rules', 'ignorelist'):
+ self.ignorelist = options.config.get('rules', 'ignorelist').split()
+ if options.config.has_option('rules', 'excludelist'):
+ self.excludelist = options.config.get('rules', 'excludelist').split()
+ if options.config.has_option('rules', 'includelist'):
+ self.includelist = options.config.get('rules', 'includelist').split()
+ if options.config.has_option('rules', 'protectlist'):
+ self.protectlist = options.config.get('rules', 'protectlist').split()
+
+ # merge the excludelist (script generated) to the ignorelist (manually maintained)
+ self.ignorelist = self.ignorelist + self.excludelist
+
+ if options.config.has_option('rules', 'substitutions'):
+ #At present this is a simple multi-line format
+ #one substitution per line
+ #format:
+ # missing-build build-to-substitute
+ #TODO: allow more robust substitutions
+ for line in options.config.get('rules', 'substitutions').splitlines():
+ line = line.strip()
+ if line[:1] == "#":
+ #skip comment
+ continue
+ if not line:
+ #blank
+ continue
+ data = line.split()
+ if len(data) != 2:
+ raise Exception, "Bad substitution: %s" % line
+ match, replace = data
+ self.substitutions[match] = replace
+
+ def checkFilter(self, build, grey=None, default=True):
+ """Check build against white/black/grey lists
+
+ Whitelisting takes precedence over blacklisting. In our case, the whitelist
+ is a list of exceptions to black/greylisting.
+
+ If the build is greylisted, returns the value specified by the 'grey' parameter
+
+ If the build matches nothing, returns the value specified in the 'default' parameter
+ """
+ if self.whitelist:
+ for pattern in self.whitelist:
+ if fnmatch.fnmatch(build.nvr, pattern):
+ return True
+ if self.blacklist:
+ for pattern in self.blacklist:
+ if fnmatch.fnmatch(build.nvr, pattern):
+ return False
+ if self.greylist:
+ for pattern in self.greylist:
+ if fnmatch.fnmatch(build.nvr, pattern):
+ return grey
+ return default
+
+ def rpmvercmp(self, (e1, v1, r1), (e2, v2, r2)):
+ """find out which build is newer"""
+ rc = rpm.labelCompare((e1, v1, r1), (e2, v2, r2))
+ if rc == 1:
+ #first evr wins
+ return 1
+ elif rc == 0:
+ #same evr
+ return 0
+ else:
+ #second evr wins
+ return -1
+
+ def newerBuild(self, build, tag):
+ #XXX: secondary arches need a policy to say if we have newer build localy it will be the substitute
+ localBuilds = session.listTagged(tag, inherit=True, package=str(build.name))
+ newer = None
+ parentevr = (str(build.epoch), build.version, build.release)
+ parentnvr = (str(build.name), build.version, build.release)
+ for b in localBuilds:
+ latestevr = (str(b['epoch']), b['version'], b['release'])
+ newestRPM = self.rpmvercmp(parentevr, latestevr)
+ if options.debug:
+ print "remote evr: %s \nlocal evr: %s \nResult: %s" % (parentevr, latestevr, newestRPM)
+ if newestRPM == -1:
+ newer = b
+ else:
+ break
+ #the local is newer
+ if newer is not None:
+ info = session.getBuild("%s-%s-%s" % (str(newer['name']), newer['version'], newer['release']))
+ if info:
+ build = LocalBuild(info)
+ self.substitute_idx[parentnvr] = build
+ return build
+ return None
+
+ def getSubstitute(self, nvr):
+ build = self.substitute_idx.get(nvr)
+ if not build:
+ #see if remote has it
+ info = remote.getBuild(nvr)
+ if info:
+ #see if we're already tracking it
+ build = self.builds.get(info['id'])
+ if not build:
+ build = TrackedBuild(info['id'], tracker=self)
+ else:
+ #remote doesn't have it
+ #see if we have it locally
+ info = session.getBuild(nvr)
+ if info:
+ build = LocalBuild(info)
+ else:
+ build = None
+ self.substitute_idx[nvr] = build
+ return build
+
+ def scanBuild(self, build_id, from_build=None, depth=0, tag=None):
+ """Recursively scan a build and its dependencies"""
+ #print build_id
+ build = self.builds.get(build_id)
+ if build:
+ #already scanned
+ if from_build:
+ build.addChild(from_build.id)
+ #There are situations where, we'll need to go forward anyway:
+ # - if we were greylisted before, and depth > 0 now
+ # - if we're being substituted and depth is 0
+ if not (depth > 0 and build.state == 'grey') \
+ and not (depth == 0 and build.substitute):
+ return build
+ else:
+ child_id = None
+ if from_build:
+ child_id = from_build.id
+ build = TrackedBuild(build_id, child=child_id, tracker=self)
+ self.builds[build_id] = build
+ if from_build:
+ tail = " (from %s)" % from_build.nvr
+ else:
+ tail = ""
+ head = " " * depth
+ for ignored in self.ignorelist:
+ if (build.name == ignored) or fnmatch.fnmatch(build.name, ignored):
+ print "%sIgnored Build: %s%s" % (head, build.nvr, tail)
+ build.setState('ignore')
+ return build
+ check = self.checkFilter(build, grey=None)
+ if check is None:
+ #greylisted builds are ok as deps, but not primary builds
+ if depth == 0:
+ print "%sGreylisted build %s%s" % (head, build.nvr, tail)
+ build.setState('grey')
+ return build
+ #get rid of 'grey' state (filter will not be checked again)
+ build.updateState()
+ elif not check:
+ print "%sBlocked build %s%s" % (head, build.nvr, tail)
+ build.setState('blocked')
+ return build
+ #make sure we dont have the build name protected
+ if build.name not in self.protectlist:
+ #check to see if a substition applies
+ replace = self.substitutions.get(build.nvr)
+ if replace:
+ build.substitute = replace
+ if depth > 0:
+ print "%sDep replaced: %s->%s" % (head, build.nvr, replace)
+ return build
+ if options.prefer_new and (depth > 0) and (tag is not None) and not (build.state == "common"):
+ latestBuild = self.newerBuild(build, tag)
+ if latestBuild != None:
+ build.substitute = latestBuild.nvr
+ print "%sNewer build replaced: %s->%s" % (head, build.nvr, latestBuild.nvr)
+ return build
+ else:
+ print "%sProtected Build: %s" % (head, build.nvr)
+ if build.state == "common":
+ #we're good
+ if build.rebuilt:
+ print "%sCommon build (rebuilt) %s%s" % (head, build.nvr, tail)
+ else:
+ print "%sCommon build %s%s" % (head, build.nvr, tail)
+ elif build.state == 'pending':
+ print "%sRebuild in progress: %s%s" % (head, build.nvr, tail)
+ elif build.state == "broken":
+ #The build already exists locally, but is somehow invalid.
+ #We should not replace it automatically. An admin can reset it
+ #if that is the correct thing. A substitution might also be in order
+ print "%sWarning: build exists, but is invalid: %s%s" % (head, build.nvr, tail)
+ #
+ # !! Cases where importing a noarch is /not/ ok must occur
+ # before this point
+ #
+ elif (options.import_noarch or options.import_noarch_only) and build.isNoarch():
+ self.importBuild(build, tag)
+ elif options.import_noarch_only and not build.isNoarch():
+ print "%sSkipping archful build: %s" % (head, build.nvr)
+ elif build.state == "noroot":
+ #Can't rebuild it, this is what substitutions are for
+ print "%sWarning: no buildroot data for %s%s" % (head, build.nvr, tail)
+ elif build.state == 'brokendeps':
+ #should not be possible at this point
+ print "Error: build reports brokendeps state before dep scan"
+ elif build.state == "missing":
+ #scan its deps
+ print "%sMissing build %s%s. Scanning deps..." % (head, build.nvr, tail)
+ newdeps = []
+ # include extra local builds as deps.
+ if self.includelist:
+ for dep in self.includelist:
+ info = session.getBuild(dep)
+ if info:
+ print "%s Adding local Dep %s%s" % (head, dep, tail)
+ extradep = LocalBuild(info)
+ newdeps.append(extradep)
+ else:
+ print "%s Warning: could not find build for %s" % (head, dep)
+ #don't actually set build.revised_deps until we finish the dep scan
+ for dep_id in build.deps:
+ dep = self.scanBuild(dep_id, from_build=build, depth=depth+1, tag=tag)
+ if dep.name in self.ignorelist:
+ # we are not done dep solving yet. but we dont want this dep in our buildroot
+ continue
+ else:
+ if dep.substitute:
+ dep2 = self.getSubstitute(dep.substitute)
+ if isinstance(dep2, TrackedBuild):
+ self.scanBuild(dep2.id, from_build=build, depth=depth+1, tag=tag)
+ elif dep2 is None:
+ #dep is missing on both local and remote
+ print "%sSubstitute dep unavailable: %s" % (head, dep2.nvr)
+ #no point in continuing
+ break
+ #otherwise dep2 should be LocalBuild instance
+ newdeps.append(dep2)
+ elif dep.state in ('broken', 'brokendeps', 'noroot', 'blocked'):
+ #no point in continuing
+ build.setState('brokendeps')
+ print "%sCan't rebuild %s, %s is %s" % (head, build.nvr, dep.nvr, dep.state)
+ newdeps = None
+ break
+ else:
+ newdeps.append(dep)
+ # set rebuild order as we go
+ # we do this /after/ the recursion, so our deps have a lower order number
+ self.rebuild_order += 1
+ build.order = self.rebuild_order
+ build.revised_deps = newdeps
+ #scanning takes a long time, might as well start builds if we can
+ self.checkJobs(tag)
+ self.rebuildMissing()
+ if len(self.builds) % 50 == 0:
+ self.report()
+ return build
+
+ def scanTag(self, tag):
+ """Scan the latest builds in a remote tag"""
+ taginfo = remote.getTag(tag)
+ builds = remote.listTagged(taginfo['id'], latest=True)
+ for build in builds:
+ for retry in xrange(10):
+ try:
+ self.scanBuild(build['id'], tag=tag)
+ if options.first_one:
+ return
+ except (socket.timeout, socket.error):
+ print "retry"
+ continue
+ break
+ else:
+ print "Error: unable to scan %(name)s-%(version)s-%(release)s" % build
+ continue
+
+ def _importURL(self, url, fn):
+ """Import an rpm directly from a url"""
+ serverdir = _unique_path('koji-shadow')
+ if options.link_imports:
+ #bit of a hack, but faster than uploading
+ dst = "%s/%s/%s" % (koji.pathinfo.work(), serverdir, fn)
+ old_umask = os.umask(002)
+ try:
+ koji.ensuredir(os.path.dirname(dst))
+ os.chown(os.path.dirname(dst), 48, 48) #XXX - hack
+ print "Downloading %s to %s" % (url, dst)
+ fsrc = urllib2.urlopen(url)
+ fdst = file(fn, 'w')
+ shutil.copyfileobj(fsrc, fdst)
+ fsrc.close()
+ fdst.close()
+ finally:
+ os.umask(old_umask)
+ else:
+ #TODO - would be possible, using uploadFile directly, to upload without writing locally.
+ #for now, though, just use uploadWrapper
+ koji.ensuredir(options.workpath)
+ dst = "%s/%s" % (options.workpath, fn)
+ print "Downloading %s to %s..." % (url, dst)
+ fsrc = urllib2.urlopen(url)
+ fdst = file(dst, 'w')
+ shutil.copyfileobj(fsrc, fdst)
+ fsrc.close()
+ fdst.close()
+ print "Uploading %s..." % dst
+ session.uploadWrapper(dst, serverdir, blocksize=65536)
+ session.importRPM(serverdir, fn)
+
+ def importBuild(self, build, tag=None):
+ '''import a build from remote hub'''
+ if not build.srpm:
+ print "No srpm for build %s, skipping import" % build.nvr
+ #TODO - support no-src imports here
+ return False
+ if not options.remote_topurl:
+ print "Skipping import of %s, remote_topurl not specified" % build.nvr
+ return False
+ pathinfo = koji.PathInfo(options.remote_topurl)
+ build_url = pathinfo.build(build.info)
+ url = "%s/%s" % (pathinfo.build(build.info), pathinfo.rpm(build.srpm))
+ fname = "%s.src.rpm" % build.nvr
+ self._importURL(url, fname)
+ for rpminfo in build.rpms:
+ if rpminfo['arch'] == 'src':
+ #already imported above
+ continue
+ relpath = pathinfo.rpm(rpminfo)
+ url = "%s/%s" % (build_url, relpath)
+ fname = os.path.basename(relpath)
+ self._importURL(url, fname)
+ build.updateState()
+ if options.tag_build and not tag == None:
+ self.tagSuccessful(build.nvr, tag)
+ return True
+
+ def scan(self):
+ """Scan based on config file"""
+ to_scan = []
+ alltags = remote.listTags()
+
+ def rebuild(self, build):
+ """Rebuild a remote build using closest possible buildroot"""
+ #first check that we can
+ if build.state != 'missing':
+ print "Can't rebuild %s. state=%s" % (build.nvr, build.state)
+ return
+ #deps = []
+ #for build_id in build.deps:
+ # dep = self.builds.get(build_id)
+ # if not dep:
+ # print "Missing dependency %i for %s. Not scanned?" % (build_id, build.nvr)
+ # return
+ # if dep.state != 'common':
+ # print "Dependency missing for %s: %s (%s)" % (build.nvr, dep.nvr, dep.state)
+ # return
+ # deps.append(dep)
+ deps = build.revised_deps
+ if deps is None:
+ print "Can't rebuild %s" % build.nvr
+ return
+ if options.test:
+ print "Skipping rebuild of %s (test mode)" % build.nvr
+ return
+ #check/create tag
+ our_tag = "SHADOWBUILD-%s" % build.br_tag
+ taginfo = session.getTag(our_tag)
+ parents = None
+ if not taginfo:
+ #XXX - not sure what is best here
+ #how do we pick arches? for now just hardcoded
+ #XXX this call for perms is stupid, but it's all we've got
+ perm_id = None
+ for data in session.getAllPerms():
+ if data['name'] == 'admin':
+ perm_id = data['id']
+ break
+ session.createTag(our_tag, perm=perm_id, arches=options.arches)
+ taginfo = session.getTag(our_tag, strict=True)
+ #we don't need a target, we trigger our own repo creation and
+ #pass that repo_id to the build call
+ #session.createBuildTarget(taginfo['name'], taginfo['id'], taginfo['id'])
+ else:
+ parents = session.getInheritanceData(taginfo['id'])
+ if parents:
+ print "Warning: shadow build tag has inheritance"
+ #check package list
+ pkgs = {}
+ for pkg in session.listPackages(tagID=taginfo['id']):
+ pkgs[pkg['package_name']] = pkg
+ missing_pkgs = []
+ for dep in deps:
+ name = dep.info['name']
+ if not pkgs.has_key(name):
+ #guess owner
+ owners = {}
+ for pkg in session.listPackages(pkgID=name):
+ owners.setdefault(pkg['owner_id'], []).append(pkg)
+ if owners:
+ order = [(len(v), k) for k, v in owners.iteritems()]
+ order.sort()
+ owner = order[-1][1]
+ else:
+ #just use ourselves
+ owner = session.getLoggedInUser()['id']
+ missing_pkgs.append((name, owner))
+ #check build list
+ cur_builds = {}
+ for binfo in session.listTagged(taginfo['id']):
+ #index by name in tagging order (latest first)
+ cur_builds.setdefault(binfo['name'], []).append(binfo)
+ to_untag = []
+ to_tag = []
+ for dep in deps:
+ #XXX - assuming here that there is only one dep per 'name'
+ # may want to check that this is true
+ cur_order = cur_builds.get(dep.info['name'], [])
+ tagged = False
+ for binfo in cur_order:
+ if binfo['nvr'] == dep.nvr:
+ tagged = True
+ #may not be latest now, but it will be after we do all the untagging
+ else:
+ # note that the untagging keeps older builds from piling up. In a sense
+ # we're gc-pruning this tag ourselves every pass.
+ to_untag.append(binfo)
+ if not tagged:
+ to_tag.append(dep)
+ #TODO - "add-on" packages
+ # for handling arch-specific deps that may not show up on remote
+ # e.g. elilo or similar
+ # these extra packages should be added to tag, but not the build group
+ #TODO - local extra builds
+ # a configurable mechanism to add specific local builds to the buildroot
+ drop_groups = []
+ build_group = None
+ for group in session.getTagGroups(taginfo['id']):
+ if group['name'] == 'build':
+ build_group = group
+ else:
+ # we should have no other groups but build
+ print "Warning: found stray group: %s" % group
+ drop_groups.append(group['name'])
+ if build_group:
+ #fix build group package list based on base of build to shadow
+ needed = dict([(n, 1) for n in build.base])
+ current = dict([(p['package'], 1) for p in build_group['packagelist']])
+ add_pkgs = [n for n in needed if not current.has_key(n)]
+ drop_pkgs = [n for n in current if not needed.has_key(n)]
+ #no group deps needed/allowed
+ drop_deps = [(g['name'], 1) for g in build_group['grouplist']]
+ if drop_deps:
+ print "Warning: build group had deps: %r" % build_group
+ else:
+ add_pkgs = build.base
+ drop_pkgs = []
+ drop_deps = []
+ #update package list, tagged packages, and groups in one multicall/transaction
+ #(avoid useless repo regens)
+ session.multicall = True
+ for name, owner in missing_pkgs:
+ session.packageListAdd(taginfo['id'], name, owner=owner)
+ for binfo in to_untag:
+ session.untagBuildBypass(taginfo['id'], binfo['id'])
+ for dep in to_tag:
+ session.tagBuildBypass(taginfo['id'], dep.nvr)
+ #shouldn't need force here
+ #set groups data
+ if not build_group:
+ # build group not present. add it
+ session.groupListAdd(taginfo['id'], 'build', force=True)
+ #using force in case group is blocked. This shouldn't be the case, but...
+ for pkg_name in drop_pkgs:
+ #in principal, our tag should not have inheritance, so the remove call is the right thing
+ session.groupPackageListRemove(taginfo['id'], 'build', pkg_name)
+ for pkg_name in add_pkgs:
+ session.groupPackageListAdd(taginfo['id'], 'build', pkg_name)
+ #we never add any blocks, so forcing shouldn't be required
+ #TODO - adjust extra_arches for package to build
+ #get event id to facilitate waiting on repo
+ # not sure if getLastEvent is good enough
+ # short of adding a new call, perhaps use getLastEvent together with event of
+ # current latest repo for tag
+ session.getLastEvent()
+ results = session.multiCall(strict=True)
+ event_id = results[-1][0]['id']
+ #TODO - verify / check results ?
+ task_id = session.newRepo(our_tag, event=event_id)
+ #TODO - upload src
+ # [?] use remote SCM url (if avail)?
+ src = build.getSource()
+ if not src:
+ print "Couldn't get source for %s" % build.nvr
+ return None
+ #wait for repo task
+ print "Waiting on newRepo task %i" % task_id
+ while True:
+ tinfo = session.getTaskInfo(task_id)
+ tstate = koji.TASK_STATES[tinfo['state']]
+ if tstate == 'CLOSED':
+ break
+ elif tstate in ('CANCELED', 'FAILED'):
+ print "Error: failed to generate repo"
+ return None
+ #add a timeout?
+ #TODO ...and verify repo
+ repo_id, event_id = session.getTaskResult(task_id)
+ #kick off build
+ task_id = session.build(src, None, opts={'repo_id': repo_id}, priority=options.priority)
+ return task_id
+
+ def report(self):
+ print "-- %s --" % time.asctime()
+ self.report_brief()
+ for state in ('broken', 'noroot', 'blocked'):
+ builds = self.state_idx[state].values()
+ not_replaced = [b for b in builds if not b.substitute]
+ n_replaced = len(builds) - len(not_replaced)
+ print "%s: %i (+%i replaced)" % (state, len(not_replaced), n_replaced)
+ if not_replaced and len(not_replaced) < 8:
+ print '', ' '.join([b.nvr for b in not_replaced])
+ #generate a report of the most frequent problem deps
+ problem_counts = {}
+ for build in self.state_idx['brokendeps'].values():
+ for dep_id in build.deps:
+ dep = self.builds.get(dep_id)
+ if not dep:
+ #unscanned
+ #possible because we short circuit the earlier scan on problems
+ #we don't really know if this one is a problem or not, so just
+ #skip it.
+ continue
+ if dep.state in ('common', 'pending', 'missing'):
+ #not a problem
+ continue
+ nvr = dep.nvr
+ if dep.substitute:
+ dep2 = self.getSubstitute(dep.substitute)
+ if dep2:
+ #we have a substitution, so not a problem
+ continue
+ #otherwise the substitution is the problem
+ nvr = dep.substitute
+ problem_counts.setdefault(nvr, 0)
+ problem_counts[nvr] += 1
+ order = [(c, nvr) for (nvr, c) in problem_counts.iteritems()]
+ if order:
+ order.sort()
+ order.reverse()
+ #print top 5 problems
+ print "-- top problems --"
+ for (c, nvr) in order[:5]:
+ print " %s (%i)" % (nvr, c)
+
+ def report_brief(self):
+ N = len(self.builds)
+ states = self.state_idx.keys()
+ states.sort()
+ parts = ["%s: %i" % (s, len(self.state_idx[s])) for s in states]
+ parts.append("total: %i" % N)
+ print ' '.join(parts)
+
+ def _print_builds(self, mylist):
+ """small helper function for output"""
+ for build in mylist:
+ print " %s (%s)" % (build.nvr, build.state)
+
+ def checkJobs(self, tag=None):
+ """Check outstanding jobs. Return true if anything changes"""
+ ret = False
+ for build_id, build in self.state_idx['pending'].items():
+ #check pending builds
+ if not build.task_id:
+ print "No task id recorded for %s" % build.nvr
+ build.updateState()
+ ret = True
+ info = session.getTaskInfo(build.task_id)
+ if not info:
+ print "No such task: %i (build %s)" % (build.task_id, build.nvr)
+ build.updateState()
+ ret = True
+ continue
+ state = koji.TASK_STATES[info['state']]
+ if state in ('CANCELED', 'FAILED'):
+ print "Task %i is %s (build %s)" % (build.task_id, state, build.nvr)
+ #we have to set the state to broken manually (updateState will mark
+ #a failed build as missing)
+ build.setState('broken')
+ ret = True
+ elif state == 'CLOSED':
+ print "Task %i complete (build %s)" % (build.task_id, build.nvr)
+ if options.tag_build and not tag == None:
+ self.tagSuccessful(build.nvr, tag)
+ build.updateState()
+ ret = True
+ if build.state != 'common':
+ print "Task %i finished, but %s still missing" \
+ % (build.task_id, build.nvr)
+ return ret
+
+ def checkBuildDeps(self, build):
+ #check deps
+ if build.revised_deps is None:
+ #print "No revised deplist yet for %s" % build.nvr
+ return False
+ problem = [x for x in build.revised_deps
+ if x.state in ('broken', 'brokendeps', 'noroot', 'blocked')]
+ if problem:
+ print "Can't rebuild %s, missing %i deps" % (build.nvr, len(problem))
+ build.setState('brokendeps')
+ self._print_builds(problem)
+ return False
+ not_common = [x for x in build.revised_deps
+ if x.state not in ('common', 'local')]
+ if not_common:
+ #could be missing or still building or whatever
+ #print "Still missing %i revised deps for %s" % (len(not_common), build.nvr)
+ return False
+ #otherwise, we should be good to rebuild
+ return True
+
+ def rebuildMissing(self):
+ """Initiate rebuilds for missing builds, if possible.
+
+ Returns True if any builds were attempted"""
+ ret = False
+ if options.max_jobs and len(self.state_idx['pending']) >= options.max_jobs:
+ return ret
+ missing = [(b.order, b.id, b) for b in self.state_idx['missing'].itervalues()]
+ missing.sort()
+ for order, build_id, build in missing:
+ if not self.checkBuildDeps(build):
+ continue
+ #otherwise, we should be good to rebuild
+ print "rebuild: %s" % build.nvr
+ task_id = self.rebuild(build)
+ ret = True
+ if options.test:
+ #pretend build is available
+ build.setState('common')
+ elif not task_id:
+ #something went wrong setting up the rebuild
+ print "Did not get a task for %s" % build.nvr
+ build.setState('broken')
+ else:
+ # build might not show up as 'BUILDING' immediately, so we
+ # set this state manually rather than by updateState
+ build.task_id = task_id
+ build.setState('pending')
+ if options.max_jobs and len(self.state_idx['pending']) >= options.max_jobs:
+ if options.debug:
+ print "Maximum number of jobs reached."
+ break
+ return ret
+
+ def runRebuilds(self, tag=None):
+ """Rebuild missing builds"""
+ print "Determining rebuild order"
+ #using self.state_idx to track build states
+ #make sure state_idx has at least these states
+ initial_avail = len(self.state_idx['common'])
+ self.report_brief()
+ while True:
+ if (not self.state_idx['missing'] and not self.state_idx['pending']) or \
+ (options.prefer_new and not self.state_idx['pending']):
+ #we're done
+ break
+ changed1 = self.checkJobs(tag)
+ changed2 = self.rebuildMissing()
+ if not changed1 and not changed2:
+ time.sleep(30)
+ continue
+ self.report_brief()
+ print "Rebuilt %i builds" % (len(self.state_idx['common']) - initial_avail)
+
+ def tagSuccessful(self, nvr, tag):
+ """tag completed builds into final tags"""
+ #TODO: check if there are other reasons why tagging may fail and handle them
+ try:
+ session.tagBuildBypass(tag, nvr)
+ print "tagged %s to %s" % (nvr, tag)
+ except koji.TagError:
+ print "NOTICE: %s already tagged in %s" % (nvr, tag)
+
+
+def main(args):
+ tracker = BuildTracker()
+ try:
+ tag = args[0]
+ except IndexError:
+ tag = None
+
+ if options.build:
+ binfo = remote.getBuild(options.build, strict=True)
+ tracker.scanBuild(binfo['id'], tag=tag)
+ else:
+ if tag is None:
+ print "Tag is required"
+ return
+ else:
+ print "Working on tag %s" % (tag)
+ tracker.scanTag(tag)
+ tracker.report()
+ tracker.runRebuilds(tag)
+
+
+if __name__ == "__main__":
+
+ options, args = get_options()
+
+ session_opts = {}
+ for k in ('user', 'password', 'krbservice', 'debug_xmlrpc', 'debug'):
+ session_opts[k] = getattr(options, k)
+ session = koji.ClientSession(options.server, session_opts)
+ if not options.noauth:
+ activate_session(session)
+ #XXX - sane auth
+ #XXX - config!
+ remote_opts = {'anon_retry': True}
+ for k in ('debug_xmlrpc', 'debug'):
+ remote_opts[k] = getattr(options, k)
+ remote = koji.ClientSession(options.remote, remote_opts)
+ rv = 0
+ try:
+ rv = main(args)
+ if not rv:
+ rv = 0
+ except KeyboardInterrupt:
+ pass
+ except SystemExit:
+ rv = 1
+ #except:
+ # if options.debug:
+ # raise
+ # else:
+ # exctype, value = sys.exc_info()[:2]
+ # rv = 1
+ # print "%s: %s" % (exctype, value)
+ try:
+ session.logout()
+ except:
+ pass
+ sys.exit(rv)
diff --git a/util/koji-shadow.conf b/util/koji-shadow.conf
new file mode 100644
index 0000000..37318b7
--- /dev/null
+++ b/util/koji-shadow.conf
@@ -0,0 +1,7 @@
+# koji-shadow example config file
+# (still working out all the config options)
+
+[main]
+server=http://localhost/kojihub/
+krbservice=host
+remote=http://koji.fedoraproject.org/kojihub
diff --git a/util/kojira b/util/kojira
new file mode 100755
index 0000000..c18f63b
--- /dev/null
+++ b/util/kojira
@@ -0,0 +1,822 @@
+#!/usr/bin/python
+
+# Koji Repository Administrator (kojira)
+# Copyright (c) 2005-2014 Red Hat, Inc.
+#
+# Koji is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation;
+# version 2.1 of the License.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this software; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+# Authors:
+# Mike McLean <mikem at redhat.com>
+
+try:
+ import krbV
+except ImportError:
+ pass
+import sys
+import os
+import koji
+from koji.util import rmtree, parseStatus
+from optparse import OptionParser
+from ConfigParser import ConfigParser
+import errno
+import fnmatch
+import logging
+import logging.handlers
+import pprint
+import signal
+import time
+import threading
+import traceback
+
+
+
+tag_cache = {}
+
+def getTag(session, tag, event=None):
+ """A caching version of the hub call"""
+ cache = tag_cache
+ now = time.time()
+ if (tag, event) in cache:
+ ts, info = cache[(tag,event)]
+ if now - ts < 600:
+ #use the cache
+ return info
+ info = session.getTag(tag, event=event)
+ if info:
+ cache[(info['id'], event)] = (now, info)
+ cache[(info['name'], event)] = (now, info)
+ return info
+
+
+class ManagedRepo(object):
+
+ def __init__(self, manager, data):
+ self.manager = manager
+ self.session = manager.session
+ self.options = manager.options
+ self.logger = logging.getLogger("koji.repo")
+ self.current = True
+ self.repo_id = data['id']
+ self.event_id = data['create_event']
+ self.event_ts = data['create_ts']
+ self.tag_id = data['tag_id']
+ self.state = data['state']
+ self.expire_ts = None
+ if koji.REPO_STATES[self.state] in ['EXPIRED', 'DELETED', 'PROBLEM']:
+ self.current = False
+ self.expire_ts = time.time()
+ # TODO use hub data to find the actual expiration time
+ self.first_seen = time.time()
+ if self.current:
+ order = self.session.getFullInheritance(self.tag_id, event=self.event_id)
+ #order may contain same tag more than once
+ tags = {self.tag_id : 1}
+ for x in order:
+ tags[x['parent_id']] = 1
+ self.taglist = tags.keys()
+
+ def expire(self):
+ """Mark the repo expired"""
+ if self.state == koji.REPO_EXPIRED:
+ return
+ elif self.state == koji.REPO_DELETED:
+ raise koji.GenericError, "Repo already deleted"
+ self.logger.info("Expiring repo %s.." % self.repo_id)
+ self.session.repoExpire(self.repo_id)
+ self.state = koji.REPO_EXPIRED
+
+ def expired(self):
+ return self.state == koji.REPO_EXPIRED
+
+ def pending(self, timeout=180):
+ """Determine if repo generation appears to be in progress and not already obsolete"""
+ if self.state != koji.REPO_INIT:
+ return False
+ age = time.time() - self.event_ts
+ return self.current and age < timeout
+
+ def stale(self):
+ """Determine if repo seems stale
+
+ By stale, we mean:
+ - state=INIT
+ - timestamp really, really old
+ """
+ timeout = 36000
+ #XXX - config
+ if self.state != koji.REPO_INIT:
+ return False
+ age = time.time() - max(self.event_ts, self.first_seen)
+ #the first_seen timestamp is also factored in because a repo can be
+ #created from an older event and should not be expired based solely on
+ #that event's timestamp.
+ return age > timeout
+
+ def tryDelete(self):
+ """Remove the repo from disk, if possible"""
+ tag_info = getTag(self.session, self.tag_id)
+ if not tag_info:
+ tag_info = getTag(self.session, self.tag_id, self.event_id)
+ if not tag_info:
+ self.logger.warn('Could not get info for tag %i, skipping delete of repo %i' %
+ (self.tag_id, self.repo_id))
+ return False
+ tag_name = tag_info['name']
+ path = pathinfo.repo(self.repo_id, tag_name)
+ try:
+ #also check dir age. We do this because a repo can be created from an older event
+ #and should not be removed based solely on that event's timestamp.
+ mtime = os.stat(path).st_mtime
+ except OSError, e:
+ if e.errno == 2:
+ # No such file or directory, so the repo either never existed,
+ # or has already been deleted, so allow it to be marked deleted.
+ self.logger.info("Repo directory does not exist: %s" % path)
+ pass
+ else:
+ self.logger.error("Can't stat repo directory: %s, %s" % (path, e.strerror))
+ return False
+ else:
+ times = [self.event_ts, mtime, self.first_seen, self.expire_ts]
+ times = [ts for ts in times if ts is not None]
+ age = time.time() - max(times)
+ if age < self.options.deleted_repo_lifetime:
+ #XXX should really be called expired_repo_lifetime
+ return False
+ self.logger.debug("Attempting to delete repo %s.." % self.repo_id)
+ if self.state != koji.REPO_EXPIRED:
+ raise koji.GenericError, "Repo not expired"
+ if self.session.repoDelete(self.repo_id) > 0:
+ #cannot delete, we are referenced by a buildroot
+ self.logger.debug("Cannot delete repo %s, still referenced" % self.repo_id)
+ return False
+ self.logger.info("Deleted repo %s" % self.repo_id)
+ self.state = koji.REPO_DELETED
+ self.manager.rmtree(path)
+ return True
+
+ def ready(self):
+ return self.state == koji.REPO_READY
+
+ def deleted(self):
+ return self.state == koji.REPO_DELETED
+
+ def problem(self):
+ return self.state == koji.REPO_PROBLEM
+
+
+class RepoManager(object):
+
+ def __init__(self, options, session):
+ self.options = options
+ self.session = session
+ self.repos = {}
+ self.tasks = {}
+ self.tag_use_stats = {}
+ self.delete_pids = {}
+ self.delete_queue = []
+ self.logger = logging.getLogger("koji.repo.manager")
+
+ def printState(self):
+ self.logger.debug('Tracking %i repos, %i child processes', len(self.repos), len(self.delete_pids))
+ for tag_id, task_id in self.tasks.iteritems():
+ self.logger.debug("Tracking task %s for tag %s", task_id, tag_id)
+ for pid, desc in self.delete_pids.iteritems():
+ self.logger.debug("Delete job %s: %r", pid, desc)
+
+ def rmtree(self, path):
+ """Spawn (or queue) and rmtree job"""
+ self.logger.info("Queuing rmtree job for %s", path)
+ self.delete_queue.append(path)
+ self.checkQueue()
+
+ def checkQueue(self):
+ finished = [pid for pid in self.delete_pids if self.waitPid(pid)]
+ for pid in finished:
+ path = self.delete_pids[pid]
+ self.logger.info("Completed rmtree job for %s", path)
+ del self.delete_pids[pid]
+ while self.delete_queue and len(self.delete_pids) <= self.options.max_delete_processes:
+ path = self.delete_queue.pop(0)
+ pid = self._rmtree(path)
+ self.logger.info("Started rmtree (pid %i) for %s", pid, path)
+ self.delete_pids[pid] = path
+
+ def waitPid(self, pid):
+ # XXX - can we unify with TaskManager?
+ prefix = "pid %i (%s)" % (pid, self.delete_pids.get(pid))
+ try:
+ (childpid, status) = os.waitpid(pid, os.WNOHANG)
+ except OSError, e:
+ if e.errno != errno.ECHILD:
+ #should not happen
+ raise
+ #otherwise assume the process is gone
+ self.logger.info("%s: %s" % (prefix, e))
+ return True
+ if childpid != 0:
+ self.logger.info(parseStatus(status, prefix))
+ return True
+ return False
+
+ def _rmtree(self, path):
+ pid = os.fork()
+ if pid:
+ return pid
+ # no return
+ try:
+ status = 1
+ self.session._forget()
+ try:
+ rmtree(path)
+ status = 0
+ except Exception:
+ logger.error(''.join(traceback.format_exception(*sys.exc_info())))
+ logging.shutdown()
+ finally:
+ os._exit(status)
+
+ def killChildren(self):
+ # XXX - unify with TaskManager?
+ sig = signal.SIGTERM
+ for pid in self.delete_pids:
+ try:
+ os.kill(pid, sig)
+ except OSError, e:
+ if e.errno != errno.ESRCH:
+ logger.error("Unable to kill process %s", pid)
+
+ def readCurrentRepos(self):
+ self.logger.debug("Reading current repo data")
+ repodata = self.session.getActiveRepos()
+ self.logger.debug("Repo data: %r" % repodata)
+ for data in repodata:
+ repo_id = data['id']
+ repo = self.repos.get(repo_id)
+ if repo:
+ #we're already tracking it
+ if repo.state != data['state']:
+ self.logger.info('State changed for repo %s: %s -> %s'
+ %(repo_id, koji.REPO_STATES[repo.state], koji.REPO_STATES[data['state']]))
+ repo.state = data['state']
+ else:
+ self.logger.info('Found repo %s, state=%s'
+ %(repo_id, koji.REPO_STATES[data['state']]))
+ self.repos[repo_id] = ManagedRepo(self, data)
+ if len(self.repos) > len(repodata):
+ # This shouldn't normally happen, but might if someone else calls
+ # repoDelete or similar
+ active = set([r['id'] for r in repodata])
+ for repo_id in self.repos.keys():
+ if repo_id not in active:
+ self.logger.info('Dropping entry for inactive repo: %s', repo_id)
+ del self.repos[repo_id]
+
+ def checkCurrentRepos(self, session=None):
+ """Determine which repos are current"""
+ if session is None:
+ session = self.session
+ to_check = []
+ repo_ids = self.repos.keys()
+ for repo_id in repo_ids:
+ repo = self.repos.get(repo_id)
+ if repo is None:
+ # removed by main thread
+ continue
+ if not repo.current:
+ # no point in checking again
+ continue
+ if repo.state not in (koji.REPO_READY, koji.REPO_INIT):
+ repo.current = False
+ if repo.expire_ts is None:
+ repo.expire_ts = time.time()
+ #also no point in further checking
+ continue
+ to_check.append(repo)
+ if self.logger.isEnabledFor(logging.DEBUG):
+ skipped = set(repo_ids).difference([r.repo_id for r in to_check])
+ self.logger.debug("Skipped check for repos: %r", skipped)
+ if not to_check:
+ return
+ #session.multicall = True
+ for repo in to_check:
+ changed = session.tagChangedSinceEvent(repo.event_id, repo.taglist)
+ #for repo, [changed] in zip(to_check, session.multiCall(strict=True)):
+ if changed:
+ self.logger.info("Repo %i no longer current", repo.repo_id)
+ repo.current = False
+ repo.expire_ts = time.time()
+
+ def currencyChecker(self, session):
+ """Continually checks repos for currency. Runs as a separate thread"""
+ self.logger.info('currencyChecker starting')
+ try:
+ try:
+ while True:
+ self.checkCurrentRepos(session)
+ time.sleep(self.options.sleeptime)
+ except:
+ logger.exception('Error in currency checker thread')
+ raise
+ finally:
+ session.logout()
+
+ def pruneLocalRepos(self):
+ """Scan filesystem for repos and remove any deleted ones
+
+ Also, warn about any oddities"""
+ if self.delete_pids:
+ #skip
+ return
+ self.logger.debug("Scanning filesystem for repos")
+ topdir = "%s/repos" % pathinfo.topdir
+ for tag in os.listdir(topdir):
+ tagdir = "%s/%s" % (topdir, tag)
+ if not os.path.isdir(tagdir):
+ continue
+ for repo_id in os.listdir(tagdir):
+ try:
+ repo_id = int(repo_id)
+ except ValueError:
+ continue
+ repodir = "%s/%s" % (tagdir, repo_id)
+ if not os.path.isdir(repodir):
+ continue
+ if self.repos.has_key(repo_id):
+ #we're already managing it, no need to deal with it here
+ continue
+ try:
+ dir_ts = os.stat(repodir).st_mtime
+ except OSError:
+ #just in case something deletes the repo out from under us
+ continue
+ rinfo = self.session.repoInfo(repo_id)
+ if rinfo is None:
+ if not self.options.ignore_stray_repos:
+ age = time.time() - dir_ts
+ if age > self.options.deleted_repo_lifetime:
+ self.logger.info("Removing unexpected directory (no such repo): %s" % repodir)
+ self.rmtree(repodir)
+ continue
+ if rinfo['tag_name'] != tag:
+ self.logger.warn("Tag name mismatch (rename?): %s vs %s", tag, rinfo['tag_name'])
+ continue
+ if rinfo['state'] in (koji.REPO_DELETED, koji.REPO_PROBLEM):
+ age = time.time() - max(rinfo['create_ts'], dir_ts)
+ if age > self.options.deleted_repo_lifetime:
+ #XXX should really be called expired_repo_lifetime
+ logger.info("Removing stray repo (state=%s): %s" % (koji.REPO_STATES[rinfo['state']], repodir))
+ self.rmtree(repodir)
+ pass
+
+ def tagUseStats(self, tag_id):
+ stats = self.tag_use_stats.get(tag_id)
+ now = time.time()
+ if stats and now - stats['ts'] < 3600:
+ #use the cache
+ return stats
+ data = self.session.listBuildroots(tagID=tag_id,
+ queryOpts={'order': '-create_event_id', 'limit' : 100})
+ #XXX magic number (limit)
+ if data:
+ tag_name = data[0]['tag_name']
+ else:
+ tag_name = "#%i" % tag_id
+ stats = {'data': data, 'ts': now, 'tag_name': tag_name}
+ recent = [x for x in data if now - x['create_ts'] < 3600 * 24]
+ #XXX magic number
+ stats ['n_recent'] = len(recent)
+ self.tag_use_stats[tag_id] = stats
+ self.logger.debug("tag %s recent use count: %i" % (tag_name, len(recent)))
+ return stats
+
+ def adjustRegenOrder(self, data):
+ """Adjust repo regen order
+
+ data is list of (ts, tag_id) entries
+ We sort the tags by two factors
+ - age of current repo (passed in via data)
+ - last use in a buildroot (via tagUseStats)
+ Having and older repo or a higher use count give the repo
+ a higher priority for regen. The formula attempts to keep
+ the last use factor from overpowering, so that very old repos
+ still get regen priority.
+ """
+ if not data:
+ return []
+ n_maven = 0
+ for ts, tag_id in data:
+ taginfo = getTag(self.session, tag_id)
+ if taginfo.get('maven_support'):
+ n_maven += 1
+ self.logger.info("Got %i tags for regeneration (%i maven tags)", len(data), n_maven)
+ if len(data) == 1:
+ return data[:]
+ data = [(ts, tag_id, self.tagUseStats(tag_id)) for ts, tag_id in data]
+ max_n = max([s['n_recent'] for ts,tag,s in data])
+ if max_n == 0:
+ self.logger.info("No tags had recent use")
+ ret = [(ts,tag) for ts,tag,s in data]
+ ret.sort()
+ return ret
+ #XXX - need to make sure our times aren't far off, otherwise this
+ # adjustment could have the opposite of the desired effect
+ now = time.time()
+ ret = []
+ names = {}
+ for ts, tag_id, stats in data:
+ names[tag_id] = stats['tag_name']
+ #normalize use count
+ adj = stats ['n_recent'] * 9.0 / max_n + 1 # 1.0 to 10.0
+ sortvalue = (now-ts)*adj
+ ret.append(((now-ts)*adj, tag_id))
+ self.logger.debug("order adjustment: tag %s, ts %s, recent use %s, factor %s, new sort value %s",
+ stats['tag_name'], ts, stats ['n_recent'], adj, sortvalue)
+ #so a day old unused repo gets about the regen same priority as a
+ #2.4-hour-old, very popular repo
+ ret.sort()
+ ret.reverse()
+ if self.logger.isEnabledFor(logging.INFO):
+ #show some stats
+ by_ts = [(ts,names[tag]) for ts,tag,s in data]
+ by_ts.sort()
+ self.logger.info("Newest repo: %s (%.2fhrs)", by_ts[-1][1], (now - by_ts[-1][0])/3600.)
+ self.logger.info("Oldest repo: %s (%.2fhrs)", by_ts[0][1], (now - by_ts[0][0])/3600.)
+ self.logger.info("Best score: %s (%.1f)", names[ret[0][1]], ret[0][0])
+ self.logger.info("Worst score: %s (%.1f)", names[ret[-1][1]], ret[-1][0])
+ self.logger.info("Order: %s", [names[x[1]] for x in ret])
+ return ret
+
+ def updateRepos(self):
+ #check on tasks
+ running_tasks = 0
+ running_tasks_maven = 0
+ our_tasks = {}
+ for tag_id, task_id in self.tasks.items():
+ tinfo = self.session.getTaskInfo(task_id)
+ our_tasks[task_id] = tinfo
+ tstate = koji.TASK_STATES[tinfo['state']]
+ if tstate == 'CLOSED':
+ self.logger.info("Finished: newRepo task %s for tag %s" % (task_id, tag_id))
+ del self.tasks[tag_id]
+ continue
+ elif tstate in ('CANCELED', 'FAILED'):
+ self.logger.info("Problem: newRepo task %s for tag %s is %s" % (task_id, tag_id, tstate))
+ del self.tasks[tag_id]
+ continue
+ taginfo = getTag(self.session, tag_id)
+ if tinfo['waiting']:
+ self.logger.debug("Task %i is waiting", task_id)
+ else:
+ #the largest hub impact is from the first part of the newRepo task
+ #once it is waiting on subtasks, that part is over
+ running_tasks += 1
+ if taginfo.get('maven_support'):
+ running_tasks_maven += 1
+ #TODO [?] - implement a timeout for active tasks?
+ #check for untracked newRepo tasks
+ repo_tasks = self.session.listTasks(opts={'method':'newRepo',
+ 'state':([koji.TASK_STATES[s] for s in ('FREE', 'OPEN')])})
+ other_tasks = []
+ for tinfo in repo_tasks:
+ if tinfo['id'] in our_tasks:
+ continue
+ other_tasks.append(tinfo)
+ if tinfo['waiting']:
+ self.logger.debug("Untracked task %i is waiting", tinfo['id'])
+ else:
+ running_tasks += 1
+ # TODO - determine tag and maven support
+ self.logger.debug("Current tasks: %r" % self.tasks)
+ if other_tasks:
+ self.logger.debug("Found %i untracked newRepo tasks" % len(other_tasks))
+ self.logger.debug("Updating repos")
+ self.readCurrentRepos()
+ #check for stale repos
+ for repo in self.repos.values():
+ if repo.stale():
+ repo.expire()
+ #find out which tags require repos
+ tags = {}
+ for target in self.session.getBuildTargets():
+ tag_id = target['build_tag']
+ tags[tag_id] = target['build_tag_name']
+ #index repos by tag
+ tag_repos = {}
+ for repo in self.repos.values():
+ tag_repos.setdefault(repo.tag_id, []).append(repo)
+ self.logger.debug("Needed tags: %r" % tags.keys())
+ self.logger.debug("Current tags: %r" % tag_repos.keys())
+
+ #we need to determine:
+ # - which tags need a new repo
+ # - if any repos seem to be broken
+ #self.checkCurrentRepos now runs continually in a separate thread
+ regen = []
+ expire_times = {}
+ for tag_id in tags.iterkeys():
+ covered = False
+ for repo in tag_repos.get(tag_id,[]):
+ if repo.current:
+ covered = True
+ break
+ elif repo.pending():
+ #one on the way
+ covered = True
+ break
+ if covered:
+ continue
+ if self.tasks.has_key(tag_id):
+ #repo creation in progress
+ #TODO - implement a timeout
+ continue
+ #tag still appears to be uncovered
+ #figure out how old existing repo is
+ ts = 0
+ for repo in tag_repos.get(tag_id, []):
+ if repo.expire_ts:
+ if repo.expire_ts > ts:
+ ts = repo.expire_ts
+ else:
+ self.logger.warning("No expire timestamp for repo: %s", repo.repo_id)
+ expire_times[tag_id] = ts
+ if ts == 0:
+ ts = time.time()
+ regen.append((ts, tag_id))
+ #factor in tag use stats
+ regen = self.adjustRegenOrder(regen)
+ self.logger.debug("order: %s", regen)
+ # i.e. tags with oldest (or no) repos get precedence
+ for score, tag_id in regen:
+ if running_tasks >= self.options.max_repo_tasks:
+ self.logger.info("Maximum number of repo tasks reached")
+ break
+ elif len(self.tasks) + len(other_tasks) >= self.options.repo_tasks_limit:
+ self.logger.info("Repo task limit reached")
+ break
+ tagname = tags[tag_id]
+ taskopts = {}
+ for pat in self.options.debuginfo_tags.split():
+ if fnmatch.fnmatch(tagname, pat):
+ taskopts['debuginfo'] = True
+ break
+ for pat in self.options.source_tags.split():
+ if fnmatch.fnmatch(tagname, pat):
+ taskopts['src'] = True
+ break
+ taginfo = getTag(self.session, tag_id)
+ if taginfo.get('maven_support'):
+ if running_tasks_maven >= self.options.max_repo_tasks_maven:
+ continue
+ task_id = self.session.newRepo(tagname, **taskopts)
+ running_tasks += 1
+ if taginfo.get('maven_support'):
+ running_tasks_maven += 1
+ expire_ts = expire_times[tag_id]
+ if expire_ts == 0:
+ time_expired = '???'
+ else:
+ time_expired = "%.1f" % (time.time() - expire_ts)
+ self.logger.info("Created newRepo task %s for tag %s (%s), expired for %s sec" % (task_id, tag_id, tags[tag_id], time_expired))
+ self.tasks[tag_id] = task_id
+ if running_tasks_maven >= self.options.max_repo_tasks_maven:
+ self.logger.info("Maximum number of maven repo tasks reached")
+ #some cleanup
+ n_deletes = 0
+ for tag_id, repolist in tag_repos.items():
+ if not tags.has_key(tag_id):
+ #repos for these tags are no longer required
+ for repo in repolist:
+ if repo.ready():
+ repo.expire()
+ for repo in repolist:
+ if n_deletes >= self.options.delete_batch_size:
+ break
+ if repo.expired():
+ #try to delete
+ if repo.tryDelete():
+ n_deletes += 1
+ del self.repos[repo.repo_id]
+
+def start_currency_checker(session, repomgr):
+ subsession = session.subsession()
+ thread = threading.Thread(name='currencyChecker',
+ target=repomgr.currencyChecker, args=(subsession,))
+ thread.setDaemon(True)
+ thread.start()
+ return thread
+
+def main(options, session):
+ repomgr = RepoManager(options, session)
+ repomgr.readCurrentRepos()
+ def shutdown(*args):
+ raise SystemExit
+ signal.signal(signal.SIGTERM,shutdown)
+ curr_chk_thread = start_currency_checker(session, repomgr)
+ # TODO also move rmtree jobs to threads
+ logger.info("Entering main loop")
+ while True:
+ try:
+ repomgr.updateRepos()
+ repomgr.checkQueue()
+ repomgr.printState()
+ repomgr.pruneLocalRepos()
+ if not curr_chk_thread.isAlive():
+ logger.error("Currency checker thread died. Restarting it.")
+ curr_chk_thread = start_currency_checker(session, repomgr)
+ except KeyboardInterrupt:
+ logger.warn("User exit")
+ break
+ except koji.AuthExpired:
+ logger.warn("Session expired")
+ break
+ except SystemExit:
+ logger.warn("Shutting down")
+ break
+ except:
+ # log the exception and continue
+ logger.error(''.join(traceback.format_exception(*sys.exc_info())))
+ try:
+ time.sleep(options.sleeptime)
+ except KeyboardInterrupt:
+ logger.warn("User exit")
+ break
+ try:
+ repomgr.checkQueue()
+ repomgr.killChildren()
+ finally:
+ session.logout()
+
+def get_options():
+ """process options from command line and config file"""
+ # parse command line args
+ parser = OptionParser("usage: %prog [opts]")
+ parser.add_option("-c", "--config", dest="configFile",
+ help="use alternate configuration file", metavar="FILE",
+ default="/etc/kojira/kojira.conf")
+ parser.add_option("--user", help="specify user")
+ parser.add_option("--password", help="specify password")
+ parser.add_option("--principal", help="Kerberos principal")
+ parser.add_option("--keytab", help="Kerberos keytab")
+ parser.add_option("-f", "--fg", dest="daemon",
+ action="store_false", default=True,
+ help="run in foreground")
+ parser.add_option("-d", "--debug", action="store_true",
+ help="show debug output")
+ parser.add_option("-q", "--quiet", action="store_true",
+ help="don't show warnings")
+ parser.add_option("-v", "--verbose", action="store_true",
+ help="show verbose output")
+ parser.add_option("--with-src", action="store_true",
+ help="include srpms in repos")
+ parser.add_option("--force-lock", action="store_true", default=False,
+ help="force lock for exclusive session")
+ parser.add_option("--debug-xmlrpc", action="store_true", default=False,
+ help="show xmlrpc debug output")
+ parser.add_option("--skip-main", action="store_true", default=False,
+ help="don't actually run main")
+ parser.add_option("--show-config", action="store_true", default=False,
+ help="Show config and exit")
+ parser.add_option("--sleeptime", type='int', help="Specify the polling interval")
+ parser.add_option("-s", "--server", help="URL of XMLRPC server")
+ parser.add_option("--topdir", help="Specify topdir")
+ parser.add_option("--logfile", help="Specify logfile")
+ (options, args) = parser.parse_args()
+
+ config = ConfigParser()
+ config.read(options.configFile)
+ section = 'kojira'
+ for x in config.sections():
+ if x != section:
+ quit('invalid section found in config file: %s' % x)
+ defaults = {'with_src': False,
+ 'debuginfo_tags': '',
+ 'source_tags': '',
+ 'verbose': False,
+ 'debug': False,
+ 'ignore_stray_repos': False,
+ 'topdir': '/mnt/koji',
+ 'server': None,
+ 'logfile': '/var/log/kojira.log',
+ 'principal': None,
+ 'keytab': None,
+ 'ccache': '/var/tmp/kojira.ccache',
+ 'krbservice': 'host',
+ 'retry_interval': 60,
+ 'max_retries': 120,
+ 'offline_retry': True,
+ 'offline_retry_interval': 120,
+ 'max_delete_processes': 4,
+ 'max_repo_tasks' : 4,
+ 'max_repo_tasks_maven' : 2,
+ 'repo_tasks_limit' : 10,
+ 'delete_batch_size' : 3,
+ 'deleted_repo_lifetime': 7*24*3600,
+ #XXX should really be called expired_repo_lifetime
+ 'sleeptime' : 15,
+ 'cert': '/etc/kojira/client.crt',
+ 'ca': '/etc/kojira/clientca.crt',
+ 'serverca': '/etc/kojira/serverca.crt'
+ }
+ if config.has_section(section):
+ int_opts = ('deleted_repo_lifetime', 'max_repo_tasks', 'repo_tasks_limit',
+ 'retry_interval', 'max_retries', 'offline_retry_interval',
+ 'max_delete_processes', 'max_repo_tasks_maven', 'delete_batch_size', )
+ str_opts = ('topdir', 'server', 'user', 'password', 'logfile', 'principal', 'keytab', 'krbservice',
+ 'cert', 'ca', 'serverca', 'debuginfo_tags', 'source_tags')
+ bool_opts = ('with_src','verbose','debug','ignore_stray_repos', 'offline_retry')
+ for name in config.options(section):
+ if name in int_opts:
+ defaults[name] = config.getint(section, name)
+ elif name in str_opts:
+ defaults[name] = config.get(section, name)
+ elif name in bool_opts:
+ defaults[name] = config.getboolean(section, name)
+ else:
+ quit("unknown config option: %s" % name)
+ for name, value in defaults.items():
+ if getattr(options, name, None) is None:
+ setattr(options, name, value)
+ if options.logfile in ('','None','none'):
+ options.logfile = None
+ return options
+
+def quit(msg=None, code=1):
+ if msg:
+ logging.getLogger("koji.repo").error(msg)
+ sys.stderr.write('%s\n' % msg)
+ sys.stderr.flush()
+ sys.exit(code)
+
+if __name__ == "__main__":
+
+ options = get_options()
+ topdir = getattr(options,'topdir',None)
+ pathinfo = koji.PathInfo(topdir)
+ if options.show_config:
+ pprint.pprint(options.__dict__)
+ sys.exit()
+ if options.logfile:
+ if not os.path.exists(options.logfile):
+ try:
+ logfile = open(options.logfile, "w")
+ logfile.close()
+ except:
+ sys.stderr.write("Cannot create logfile: %s\n" % options.logfile)
+ sys.exit(1)
+ if not os.access(options.logfile,os.W_OK):
+ sys.stderr.write("Cannot write to logfile: %s\n" % options.logfile)
+ sys.exit(1)
+ koji.add_file_logger("koji", options.logfile)
+ #note we're setting logging for koji.*
+ logger = logging.getLogger("koji")
+ if options.debug:
+ logger.setLevel(logging.DEBUG)
+ elif options.verbose:
+ logger.setLevel(logging.INFO)
+ elif options.quiet:
+ logger.setLevel(logging.ERROR)
+ else:
+ logger.setLevel(logging.WARNING)
+ session_opts = {}
+ for k in ('user', 'password', 'krbservice', 'debug_xmlrpc', 'debug',
+ 'retry_interval', 'max_retries', 'offline_retry', 'offline_retry_interval'):
+ session_opts[k] = getattr(options,k)
+ session = koji.ClientSession(options.server,session_opts)
+ if os.path.isfile(options.cert):
+ # authenticate using SSL client certificates
+ session.ssl_login(options.cert, options.ca, options.serverca)
+ elif options.user:
+ # authenticate using user/password
+ session.login()
+ elif sys.modules.has_key('krbV') and options.principal and options.keytab:
+ session.krb_login(options.principal, options.keytab, options.ccache)
+ #get an exclusive session
+ try:
+ session.exclusiveSession(force=options.force_lock)
+ except koji.AuthLockError:
+ quit("Error: Unable to get lock. Trying using --force-lock")
+ if not session.logged_in:
+ quit("Error: Unknown login error")
+ if not session.logged_in:
+ print "Error: unable to log in"
+ sys.exit(1)
+ if options.skip_main:
+ sys.exit()
+ elif options.daemon:
+ koji.daemonize()
+ else:
+ koji.add_stderr_logger("koji")
+ main(options, session)
diff --git a/util/kojira.conf b/util/kojira.conf
new file mode 100644
index 0000000..a79dc82
--- /dev/null
+++ b/util/kojira.conf
@@ -0,0 +1,44 @@
+[kojira]
+; For user/pass authentication
+; user=kojira
+; password=kojira
+
+; For Kerberos authentication
+; the principal to connect with
+principal=koji/repo at EXAMPLE.COM
+; The location of the keytab for the principal above
+keytab=/etc/kojira.keytab
+
+; The URL for the koji hub server
+server=http://hub.example.com/kojihub
+
+; The directory containing the repos/ directory
+topdir=/mnt/koji
+
+; Logfile
+logfile=/var/log/kojira.log
+
+; Include srpms in repos? (not needed for normal operation)
+with_src=no
+
+;configuration for Kerberos authentication
+
+;the kerberos principal to use
+;principal = kojira at EXAMPLE.COM
+
+;location of the keytab
+;keytab = /etc/kojira/kojira.keytab
+
+;the service name of the principal being used by the hub
+;krbservice = host
+
+;configuration for SSL authentication
+
+;client certificate
+;cert = /etc/kojira/client.crt
+
+;certificate of the CA that issued the client certificate
+;ca = /etc/kojira/clientca.crt
+
+;certificate of the CA that issued the HTTP server certificate
+;serverca = /etc/kojira/serverca.crt
diff --git a/util/kojira.init b/util/kojira.init
new file mode 100644
index 0000000..749773b
--- /dev/null
+++ b/util/kojira.init
@@ -0,0 +1,85 @@
+#! /bin/sh
+#
+# kojira Start/Stop kojira
+#
+# chkconfig: - 99 99
+# description: koji repo administrator
+# processname: kojira
+
+# This is an interactive program, we need the current locale
+
+# Source function library.
+. /etc/init.d/functions
+
+# Check that we're a priviledged user
+[ `id -u` = 0 ] || exit 0
+
+[ -f /etc/sysconfig/kojira ] && . /etc/sysconfig/kojira
+
+prog="kojira"
+
+# Check that networking is up.
+if [ "$NETWORKING" = "no" ]
+then
+ exit 0
+fi
+
+[ -f /usr/sbin/kojira ] || exit 0
+
+RETVAL=0
+
+start() {
+ echo -n $"Starting $prog: "
+ cd /
+ ARGS=""
+ [ "$FORCE_LOCK" == "Y" ] && ARGS="$ARGS --force-lock"
+ [ "$KOJIRA_DEBUG" == "Y" ] && ARGS="$ARGS --debug"
+ [ "$KOJIRA_VERBOSE" == "Y" ] && ARGS="$ARGS --verbose"
+ if [ -n "$RUNAS" -a "$RUNAS" != "root" ]; then
+ daemon --user "$RUNAS" /usr/sbin/kojira $ARGS
+ else
+ daemon /usr/sbin/kojira $ARGS
+ fi
+ RETVAL=$?
+ echo
+ [ $RETVAL -eq 0 ] && touch /var/lock/subsys/kojira
+ return $RETVAL
+}
+
+stop() {
+ echo -n $"Stopping $prog: "
+ killproc kojira
+ RETVAL=$?
+ echo
+ [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/kojira
+ return $RETVAL
+}
+
+restart() {
+ stop
+ start
+}
+
+# See how we were called.
+case "$1" in
+ start)
+ start
+ ;;
+ stop)
+ stop
+ ;;
+ status)
+ status $prog
+ ;;
+ restart|reload|force-reload)
+ restart
+ ;;
+ condrestart|try-restart)
+ [ -f /var/lock/subsys/kojira ] && restart || :
+ ;;
+ *)
+ echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}"
+ exit 1
+esac
+
+exit $?
diff --git a/util/kojira.service b/util/kojira.service
new file mode 100644
index 0000000..beaea18
--- /dev/null
+++ b/util/kojira.service
@@ -0,0 +1,14 @@
+[Unit]
+Description=Koji repo administration
+Documentation=https://fedoraproject.org/wiki/Koji/ServerHowTo
+
+After=network.target
+
+[Service]
+ExecStart=/usr/sbin/kojira \
+ --fg \
+ --force-lock \
+ --verbose
+
+[Install]
+WantedBy=multi-user.target
diff --git a/util/kojira.sysconfig b/util/kojira.sysconfig
new file mode 100644
index 0000000..830c183
--- /dev/null
+++ b/util/kojira.sysconfig
@@ -0,0 +1,4 @@
+FORCE_LOCK=Y
+KOJIRA_DEBUG=N
+KOJIRA_VERBOSE=Y
+RUNAS=root
diff --git a/vm/Makefile b/vm/Makefile
new file mode 100644
index 0000000..13e340c
--- /dev/null
+++ b/vm/Makefile
@@ -0,0 +1,42 @@
+BINFILES = kojivmd
+SHAREFILES = kojikamid
+SYSTEMDSYSTEMUNITDIR = $(shell pkg-config systemd --variable=systemdsystemunitdir)
+TYPE = systemd
+
+_default:
+ @echo "nothing to make. try make install"
+
+clean:
+ rm -f *.o *.so *.pyc *~ kojikamid
+
+kojikamid: kojikamid.py
+ bash fix_kojikamid.sh >kojikamid
+
+_install: kojikamid
+ @if [ "$(DESTDIR)" = "" ]; then \
+ echo " "; \
+ echo "ERROR: A destdir is required"; \
+ exit 1; \
+ fi
+
+ mkdir -p $(DESTDIR)/usr/sbin
+ install -p -m 755 $(BINFILES) $(DESTDIR)/usr/sbin
+
+ mkdir -p $(DESTDIR)/usr/share/kojivmd
+ install -p -m 644 $(SHAREFILES) $(DESTDIR)/usr/share/kojivmd
+
+ mkdir -p $(DESTDIR)/etc/kojivmd
+ install -p -m 644 kojivmd.conf $(DESTDIR)/etc/kojivmd/kojivmd.conf
+
+install-systemd: _install
+ mkdir -p $(DESTDIR)$(SYSTEMDSYSTEMUNITDIR)
+ install -p -m 644 kojivmd.service $(DESTDIR)$(SYSTEMDSYSTEMUNITDIR)
+
+install-sysv: _install
+ mkdir -p $(DESTDIR)/etc/rc.d/init.d
+ install -p -m 755 kojivmd.init $(DESTDIR)/etc/rc.d/init.d/kojivmd
+
+ mkdir -p $(DESTDIR)/etc/sysconfig
+ install -p -m 644 kojivmd.sysconfig $(DESTDIR)/etc/sysconfig/kojivmd
+
+install: install-$(TYPE)
diff --git a/vm/fix_kojikamid.sh b/vm/fix_kojikamid.sh
new file mode 100755
index 0000000..5b0f5c1
--- /dev/null
+++ b/vm/fix_kojikamid.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+awk '/^## INSERT kojikamid dup/ {exit} {print $0}' kojikamid.py
+
+for fn in ../koji/__init__.py ../koji/daemon.py
+do
+ awk '/^## END kojikamid dup/ {p=0} p {print $0} /^## BEGIN kojikamid dup/ {p=1}' $fn
+done
+
+awk 'p {print $0} /^## INSERT kojikamid dup/ {p=1}' kojikamid.py
diff --git a/vm/kojikamid.py b/vm/kojikamid.py
new file mode 100755
index 0000000..15c0570
--- /dev/null
+++ b/vm/kojikamid.py
@@ -0,0 +1,760 @@
+#!/usr/bin/python
+
+# Koji daemon that runs in a Windows VM and executes commands associated
+# with a task.
+# Copyright (c) 2010-2014 Red Hat, Inc.
+#
+# Koji is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation;
+# version 2.1 of the License.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this software; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+# Authors:
+# Mike Bonnet <mikeb at redhat.com>
+# Jay Greguske <jgregusk at redhat.com>
+#
+# To register this script as a service on Windows 2008 (with Cygwin 1.7.5 installed) run:
+# kojiwind --install
+# in a cygwin shell.
+
+from optparse import OptionParser
+from ConfigParser import ConfigParser
+import os
+import subprocess
+import sys
+import tempfile
+import time
+import urlparse
+import xmlrpclib
+import base64
+import hashlib
+import logging
+import traceback
+import threading
+import re
+import glob
+import zipfile
+
+MANAGER_PORT = 7000
+
+KOJIKAMID = True
+
+## INSERT kojikamid dup
+
+class fakemodule(object):
+ pass
+
+#make parts of the above insert accessible as koji.X
+koji = fakemodule()
+koji.GenericError = GenericError
+koji.BuildError = BuildError
+
+
+class WindowsBuild(object):
+
+ LEADING_CHAR = re.compile('^[^A-Za-z_]')
+ VAR_CHARS = re.compile('[^A-Za-z0-9_]')
+
+ def __init__(self, server):
+ """Get task info and setup build directory"""
+ self.logger = logging.getLogger('koji.vm')
+ self.server = server
+ info = server.getTaskInfo()
+ self.source_url = info[0]
+ self.build_tag = info[1]
+ if len(info) > 2:
+ self.task_opts = info[2]
+ else:
+ self.task_opts = {}
+ self.workdir = '/tmp/build'
+ ensuredir(self.workdir)
+ self.buildreq_dir = os.path.join(self.workdir, 'buildreqs')
+ ensuredir(self.buildreq_dir)
+ self.source_dir = None
+ self.spec_dir = None
+ self.patches_dir = None
+ self.buildroot_id = None
+
+ # we initialize these here for clarity, but they are populated in loadConfig()
+ self.name = None
+ self.version = None
+ self.release = None
+ self.epoch = None
+ self.description = None
+ self.platform = None
+ self.preinstalled = []
+ self.buildrequires = []
+ self.provides = []
+ self.shell = None
+ self.execute = []
+ self.postbuild = []
+ self.output = {}
+ self.logs = []
+
+ def checkTools(self):
+ """Is this environment fit to build in, based on the spec file?"""
+ errors = []
+ for entry in self.preinstalled:
+ checkdir = False
+ if entry.startswith('/'):
+ # Cygwin path
+ if entry.endswith('/'):
+ # directory
+ checkdir = True
+ elif entry[1:3] == ':\\':
+ # Windows path
+ if entry.endswith('\\'):
+ # directory
+ checkdir = True
+ else:
+ # Check in the path
+ ret, output = run(['/bin/which', entry], log=False)
+ output = output.strip()
+ if ret:
+ errors.append(output)
+ else:
+ self.logger.info('command %s is available at %s', entry, output)
+ continue
+ if checkdir:
+ if not os.path.isdir(entry):
+ errors.append('directory %s does not exist' % entry)
+ else:
+ self.logger.info('directory %s exists', entry)
+ else:
+ # file
+ if not os.path.isfile(entry):
+ errors.append('file %s does not exist' % entry)
+ else:
+ self.logger.info('file %s exists', entry)
+ if errors:
+ raise BuildError, 'error validating build environment: %s' % \
+ ', '.join(errors)
+
+ def updateClam(self):
+ """update ClamAV virus definitions"""
+ ret, output = run(['/bin/freshclam', '--quiet'])
+ if ret:
+ raise BuildError, 'could not update ClamAV database: %s' % output
+
+ def checkEnv(self):
+ """make the environment is fit for building in"""
+ for tool in ['/bin/freshclam', '/bin/clamscan', '/bin/patch']:
+ if not os.path.isfile(tool):
+ raise BuildError, '%s is missing from the build environment' % tool
+
+ def zipDir(self, rootdir, filename):
+ rootbase = os.path.basename(rootdir)
+ roottrim = len(rootdir) - len(rootbase)
+ zfo = zipfile.ZipFile(filename, 'w', zipfile.ZIP_DEFLATED)
+ for dirpath, dirnames, filenames in os.walk(rootdir):
+ for skip in ['CVS', '.svn', '.git']:
+ if skip in dirnames:
+ dirnames.remove(skip)
+ for filename in filenames:
+ filepath = os.path.join(dirpath, filename)
+ zfo.write(filepath, filepath[roottrim:])
+ zfo.close()
+
+ def checkout(self):
+ """Checkout sources, winspec, and patches, and apply patches"""
+ src_scm = SCM(self.source_url)
+ self.source_dir = src_scm.checkout(ensuredir(os.path.join(self.workdir, 'source')))
+ self.zipDir(self.source_dir, os.path.join(self.workdir, 'sources.zip'))
+ if 'winspec' in self.task_opts:
+ spec_scm = SCM(self.task_opts['winspec'])
+ self.spec_dir = spec_scm.checkout(ensuredir(os.path.join(self.workdir, 'spec')))
+ self.zipDir(self.spec_dir, os.path.join(self.workdir, 'spec.zip'))
+ else:
+ self.spec_dir = self.source_dir
+ if 'patches' in self.task_opts:
+ patch_scm = SCM(self.task_opts['patches'])
+ self.patches_dir = patch_scm.checkout(ensuredir(os.path.join(self.workdir, 'patches')))
+ self.zipDir(self.patches_dir, os.path.join(self.workdir, 'patches.zip'))
+ self.applyPatches(self.source_dir, self.patches_dir)
+ self.virusCheck(self.workdir)
+
+ def applyPatches(self, sourcedir, patchdir):
+ """Apply patches in patchdir to files in sourcedir)"""
+ patches = [patch for patch in os.listdir(patchdir) if \
+ os.path.isfile(os.path.join(patchdir, patch)) and \
+ patch.endswith('.patch')]
+ if not patches:
+ raise BuildError, 'no patches found at %s' % patchdir
+ patches.sort()
+ for patch in patches:
+ cmd = ['/bin/patch', '--verbose', '-d', sourcedir, '-p1', '-i', os.path.join(patchdir, patch)]
+ run(cmd, fatal=True)
+
+ def loadConfig(self):
+ """Load build configuration from the spec file."""
+ specfiles = [spec for spec in os.listdir(self.spec_dir) if spec.endswith('.ini')]
+ if len(specfiles) == 0:
+ raise BuildError, 'No .ini file found'
+ elif len(specfiles) > 1:
+ raise BuildError, 'Multiple .ini files found'
+
+ conf = ConfigParser()
+ conf.read(os.path.join(self.spec_dir, specfiles[0]))
+
+ # [naming] section
+ for entry in ('name', 'version', 'release', 'description'):
+ setattr(self, entry, conf.get('naming', entry))
+ if conf.has_option('naming', 'epoch'):
+ self.epoch = conf.get('naming', 'epoch')
+
+ # [building] section
+ self.platform = conf.get('building', 'platform')
+
+ # preinstalled are paths to files or directories that must exist
+ # in the VM for it to execute the build.
+ # If the path ends in / or \ it must be a directory, otherwise it must
+ # be a file.
+ # They may be specified as Cygwin (/cygdrive/c/...) or Windows (C:\...)
+ # absolute paths, or without a path in which case it is searched for
+ # on the PATH.
+ if conf.has_option('building', 'preinstalled'):
+ self.preinstalled.extend([e.strip() for e in conf.get('building', 'preinstalled').split('\n') if e])
+
+ # buildrequires and provides are multi-valued (space-separated)
+ for br in conf.get('building', 'buildrequires').split():
+ # buildrequires is a space-separated list
+ # each item in the list is in the format:
+ # pkgname[:opt1:opt2=val2:...]
+ # the options are put into a dict
+ # if the option has no =val, the value in the dict will be None
+ if br:
+ br = br.split(':')
+ bropts = {}
+ for opt in br[1:]:
+ if '=' in opt:
+ key, val = opt.split('=', 1)
+ else:
+ key = opt
+ val = None
+ bropts[key] = val
+ self.buildrequires.append((br[0], bropts))
+ for prov in conf.get('building', 'provides').split():
+ if prov:
+ self.provides.append(prov)
+ # optionally specify a shell to use (defaults to bash)
+ # valid values are: cmd, cmd.exe (alias for cmd), and bash
+ if conf.has_option('building', 'shell'):
+ self.shell = conf.get('building', 'shell')
+ else:
+ self.shell = 'bash'
+ # execute is multi-valued (newline-separated)
+ self.execute.extend([e.strip() for e in conf.get('building', 'execute').split('\n') if e])
+
+ # postbuild are files or directories that must exist after the build is
+ # complete, but are not included in the build output
+ # they are specified as paths relative the source directory, and may be
+ # in Unix or Windows format
+ # each entry may contain shell-style globs, and one or more files
+ # matching the glob is considered valid
+ if conf.has_option('building', 'postbuild'):
+ for entry in conf.get('building', 'postbuild').split('\n'):
+ entry = entry.strip()
+ if not entry:
+ continue
+ for var in ('name', 'version', 'release'):
+ entry = entry.replace('$' + var, getattr(self, var))
+ self.postbuild.append(entry)
+
+ # [files] section
+ for entry in conf.get('files', 'output').split('\n'):
+ entry = entry.strip()
+ if not entry:
+ continue
+ tokens = entry.split(':')
+ filename = tokens[0]
+ for var in ('name', 'version', 'release'):
+ filename = filename.replace('$' + var, getattr(self, var))
+ metadata = {}
+ metadata['platforms'] = tokens[1].split(',')
+ if len(tokens) > 2:
+ metadata['flags'] = tokens[2].split(',')
+ else:
+ metadata['flags'] = []
+ self.output[filename] = metadata
+ self.logs.extend([e.strip() for e in conf.get('files', 'logs').split('\n') if e])
+
+ def initBuildroot(self):
+ """Create the buildroot object on the hub."""
+ repo_id = self.task_opts.get('repo_id')
+ if not repo_id:
+ raise BuildError, 'repo_id must be specified'
+ self.buildroot_id = self.server.initBuildroot(repo_id, self.platform)
+
+ def expireBuildroot(self):
+ """Set the buildroot object to expired on the hub."""
+ self.server.expireBuildroot(self.buildroot_id)
+
+ def fetchFile(self, basedir, buildinfo, fileinfo, type):
+ """Download the file from buildreq, at filepath, into the basedir"""
+ destpath = os.path.join(basedir, fileinfo['localpath'])
+ ensuredir(os.path.dirname(destpath))
+ destfile = file(destpath, 'w')
+ offset = 0
+ checksum = hashlib.md5()
+ while True:
+ encoded = self.server.getFile(buildinfo, fileinfo, encode_int(offset), 1048576, type)
+ if not encoded:
+ break
+ data = base64.b64decode(encoded)
+ del encoded
+ destfile.write(data)
+ offset += len(data)
+ checksum.update(data)
+ destfile.close()
+ digest = checksum.hexdigest()
+ # rpms don't have a md5sum in the fileinfo, but check it for everything else
+ if ('md5sum' in fileinfo) and (digest != fileinfo['md5sum']):
+ raise BuildError, 'md5 checksum validation failed for %s, %s (computed) != %s (provided)' % \
+ (destpath, digest, fileinfo['md5sum'])
+ self.logger.info('Retrieved %s (%s bytes, md5: %s)', destpath, offset, digest)
+
+ def fetchBuildReqs(self):
+ """Retrieve buildrequires listed in the spec file"""
+ files = []
+ rpms = []
+ for buildreq, brinfo in self.buildrequires:
+ # if no type is specified in the options, default to win
+ brtype = brinfo.get('type', 'win')
+ buildinfo = self.server.getLatestBuild(self.build_tag, buildreq,
+ self.task_opts.get('repo_id'))
+ br_dir = os.path.join(self.buildreq_dir, buildreq, brtype)
+ ensuredir(br_dir)
+ brinfo['dir'] = br_dir
+ brfiles = []
+ brinfo['files'] = brfiles
+ buildfiles = self.server.getFileList(buildinfo['id'], brtype, brinfo)
+ for fileinfo in buildfiles:
+ self.fetchFile(br_dir, buildinfo, fileinfo, brtype)
+ brfiles.append(fileinfo['localpath'])
+ if brtype == 'rpm':
+ rpms.append(fileinfo)
+ else:
+ files.append(fileinfo)
+ self.server.updateBuildrootFiles(self.buildroot_id, files, rpms)
+ self.virusCheck(self.buildreq_dir)
+
+ def build(self):
+ if self.shell in ('cmd', 'cmd.exe'):
+ self.cmdBuild()
+ else:
+ self.bashBuild()
+ # move the zips of the SCM checkouts to their final locations
+ for src in ['sources.zip', 'spec.zip', 'patches.zip']:
+ srcpath = os.path.join(self.workdir, src)
+ if os.path.exists(srcpath):
+ dest = '%s-%s-%s-%s' % (self.name, self.version, self.release, src)
+ destpath = os.path.join(self.source_dir, dest)
+ os.rename(srcpath, destpath)
+ self.output[dest] = {'platforms': ['all'],
+ 'flags': ['src']}
+
+ def varname(self, name):
+ """
+ Convert name to a valid shell variable name.
+ Converts leading characters that aren't letters or underscores
+ to underscores.
+ Converts any other characters that aren't letters, numbers,
+ or underscores to underscores.
+ """
+ name = self.LEADING_CHAR.sub('_', name)
+ name = self.VAR_CHARS.sub('_', name)
+ return name
+
+ def cmdBuild(self):
+ """Do the build: run the execute line(s) with cmd.exe"""
+ tmpfd, tmpname = tempfile.mkstemp(prefix='koji-tmp', suffix='.bat', dir='/cygdrive/c/Windows/Temp')
+ script = os.fdopen(tmpfd, 'w')
+ for attr in ['source_dir', 'spec_dir', 'patches_dir']:
+ val = getattr(self, attr)
+ if val:
+ ret, output = run(['/bin/cygpath', '-wa', val], log=False, fatal=True)
+ script.write('set %s=%s\r\n' % (attr, output.strip()))
+ for buildreq, brinfo in self.buildrequires:
+ buildreq = self.varname(buildreq)
+ ret, output = run(['/bin/cygpath', '-wa', brinfo['dir']], log=False, fatal=True)
+ br_dir = output.strip()
+ files = ' '.join(brinfo['files'])
+ files.replace('/', '\\')
+ if brinfo.get('type'):
+ # if the spec file qualifies the buildreq with a type,
+ # the env. var is named buildreq_type_{dir,files}
+ script.write('set %s_%s_dir=%s\r\n' % (buildreq, brinfo['type'], br_dir))
+ script.write('set %s_%s_files=%s\r\n' % (buildreq, brinfo['type'], files))
+ else:
+ # otherwise it's just buildreq_{dir,files}
+ script.write('set %s_dir=%s\r\n' % (buildreq, br_dir))
+ script.write('set %s_files=%s\r\n' % (buildreq, files))
+ script.write('\r\n')
+ script.write('set name=%s\r\n' % self.name)
+ script.write('set version=%s\r\n' % self.version)
+ script.write('set release=%s\r\n' % self.release)
+ for cmd in self.execute:
+ script.write(cmd)
+ script.write('\r\n')
+ script.close()
+ cmd = ['cmd.exe', '/C', 'C:\\Windows\\Temp\\' + os.path.basename(tmpname)]
+ ret, output = run(cmd, chdir=self.source_dir)
+ if ret:
+ raise BuildError, 'build command failed, see build.log for details'
+
+ def bashBuild(self):
+ """Do the build: run the execute line(s) with bash"""
+ tmpfd, tmpname = tempfile.mkstemp(prefix='koji-tmp.', dir='/tmp')
+ script = os.fdopen(tmpfd, 'w')
+ script.write("export source_dir='%s'\n" % self.source_dir)
+ script.write("export spec_dir='%s'\n" % self.spec_dir)
+ if self.patches_dir:
+ script.write("export patches_dir='%s'\n" % self.patches_dir)
+ for buildreq, brinfo in self.buildrequires:
+ buildreq = self.varname(buildreq)
+ if brinfo.get('type'):
+ script.write("export %s_%s_dir='%s'\n" % (buildreq, brinfo['type'], brinfo['dir']))
+ script.write("export %s_%s_files='" % (buildreq, brinfo['type']))
+ else:
+ script.write("export %s_dir='%s'\n" % (buildreq, brinfo['dir']))
+ script.write("export %s_files='" % buildreq)
+ for filename in brinfo['files']:
+ script.write(filename)
+ script.write('\n')
+ script.write("'\n\n")
+ script.write('export name=%s\n' % self.name)
+ script.write('export version=%s\n' % self.version)
+ script.write('export release=%s\n' % self.release)
+ for cmd in self.execute:
+ script.write(cmd)
+ script.write('\n')
+ script.close()
+ cmd = ['/bin/bash', '-e', '-x', tmpname]
+ ret, output = run(cmd, chdir=self.source_dir)
+ if ret:
+ raise BuildError, 'build command failed, see build.log for details'
+
+ def checkBuild(self):
+ """Verify that the build completed successfully."""
+ errors = []
+ for entry in self.postbuild:
+ relpath = entry
+ if '\\' in relpath:
+ relpath = relpath.replace('\\', '/')
+ fullpath = os.path.join(self.source_dir, relpath)
+ results = glob.glob(fullpath)
+ if fullpath.endswith('/'):
+ for result in results:
+ if os.path.isdir(result):
+ self.logger.info('found directory %s at %s', entry, result)
+ break
+ else:
+ errors.append('directory %s does not exist' % entry)
+ else:
+ for result in results:
+ if os.path.isfile(result):
+ self.logger.info('found file %s at %s', entry, result)
+ break
+ else:
+ errors.append('file %s does not exist' % entry)
+ self.virusCheck(self.workdir)
+ if errors:
+ raise BuildError, 'error validating build output: %s' % \
+ ', '.join(errors)
+
+ def virusCheck(self, path):
+ """ensure a path is virus free with ClamAV. path should be absolute"""
+ if not path.startswith('/'):
+ raise BuildError, 'Invalid path to scan for viruses: ' + path
+ run(['/bin/clamscan', '--quiet', '--recursive', path], fatal=True)
+
+ def gatherResults(self):
+ """Gather information about the output from the build, return it"""
+ return {'name': self.name, 'version': self.version, 'release': self.release,
+ 'epoch': self.epoch,
+ 'description': self.description, 'platform': self.platform,
+ 'provides': self.provides,
+ 'output': self.output, 'logs': self.logs,
+ 'buildroot_id': self.buildroot_id}
+
+ def run(self):
+ """Run the entire build process"""
+ self.checkEnv()
+ self.updateClam()
+ self.checkout()
+ self.loadConfig()
+ self.initBuildroot()
+ self.checkTools()
+ self.fetchBuildReqs()
+ self.build()
+ self.checkBuild()
+ self.expireBuildroot()
+ return self.gatherResults()
+
+def run(cmd, chdir=None, fatal=False, log=True):
+ global logfd
+ output = ''
+ olddir = None
+ if chdir:
+ olddir = os.getcwd()
+ os.chdir(chdir)
+ if log:
+ logger = logging.getLogger('koji.vm')
+ logger.info('$ %s', ' '.join(cmd))
+ proc = subprocess.Popen(cmd, stdout=logfd, stderr=subprocess.STDOUT,
+ close_fds=True)
+ ret = proc.wait()
+ else:
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
+ close_fds=True)
+ output, dummy = proc.communicate()
+ ret = proc.returncode
+ if olddir:
+ os.chdir(olddir)
+ if ret and fatal:
+ msg = 'error running: %s, return code was %s' % (' '.join(cmd), ret)
+ if log:
+ msg += ', see %s for details' % (os.path.basename(logfd.name))
+ else:
+ msg += ', output: %s' % output
+ raise BuildError, msg
+ return ret, output
+
+def find_net_info():
+ """
+ Find the network gateway configured for this VM.
+ """
+ ret, output = run(['ipconfig', '/all'], log=False)
+ if ret:
+ raise RuntimeError, 'error running ipconfig, output was: %s' % output
+ macaddr = None
+ gateway = None
+ for line in output.splitlines():
+ line = line.strip()
+ # take the first values we find
+ if line.startswith('Physical Address'):
+ if not macaddr:
+ macaddr = line.split()[-1]
+ # format it to be consistent with the libvirt MAC address
+ macaddr = macaddr.replace('-', ':').lower()
+ elif line.startswith('Default Gateway'):
+ if not gateway:
+ gateway = line.split()[-1]
+
+ # check that we have valid values
+ if macaddr and len(macaddr) != 17:
+ macaddr = None
+ if gateway and (len(gateway) < 7 or len(gateway) > 15):
+ gateway = None
+ return macaddr, gateway
+
+def upload_file(server, prefix, path):
+ """upload a single file to the vmd"""
+ logger = logging.getLogger('koji.vm')
+ destpath = os.path.join(prefix, path)
+ fobj = file(destpath, 'r')
+ offset = 0
+ sum = hashlib.md5()
+ while True:
+ data = fobj.read(131072)
+ if not data:
+ break
+ encoded = base64.b64encode(data)
+ server.upload(path, encode_int(offset), encoded)
+ offset += len(data)
+ sum.update(data)
+ fobj.close()
+ digest = sum.hexdigest()
+ server.verifyChecksum(path, digest, 'md5')
+ logger.info('Uploaded %s (%s bytes, md5: %s)', destpath, offset, digest)
+
+def get_mgmt_server():
+ """Get a ServerProxy object we can use to retrieve task info"""
+ logger = logging.getLogger('koji.vm')
+ macaddr, gateway = find_net_info()
+ while not (macaddr and gateway):
+ # wait for the network connection to come up and get an address
+ time.sleep(5)
+ macaddr, gateway = find_net_info()
+ logger.debug('found MAC address %s, connecting to %s:%s',
+ macaddr, gateway, MANAGER_PORT)
+ server = xmlrpclib.ServerProxy('http://%s:%s/' %
+ (gateway, MANAGER_PORT), allow_none=True)
+ # we would set a timeout on the socket here, but that is apparently not
+ # supported by python/cygwin/Windows
+ task_port = server.getPort(macaddr)
+ logger.debug('found task-specific port %s', task_port)
+ return xmlrpclib.ServerProxy('http://%s:%s/' % (gateway, task_port), allow_none=True)
+
+def get_options():
+ """handle usage and parse options"""
+ usage = """%prog [options]
+ Run Koji tasks assigned to a VM.
+ Run without any arguments to start this daemon.
+ """
+ parser = OptionParser(usage=usage)
+ parser.add_option('-d', '--debug', action='store_true', help='Log debug statements')
+ parser.add_option('-i', '--install', action='store_true', help='Install this daemon as a service', default=False)
+ parser.add_option('-u', '--uninstall', action='store_true', help='Uninstall this daemon if it was installed previously as a service', default=False)
+ (options, args) = parser.parse_args()
+ return options
+
+def setup_logging(opts):
+ global logfile, logfd
+ logger = logging.getLogger('koji.vm')
+ level = logging.INFO
+ if opts.debug:
+ level = logging.DEBUG
+ logger.setLevel(level)
+ logfd = file(logfile, 'w')
+ handler = logging.StreamHandler(logfd)
+ handler.setLevel(level)
+ handler.setFormatter(logging.Formatter('%(asctime)s [%(levelname)s] %(name)s: %(message)s'))
+ logger.addHandler(handler)
+ return handler
+
+def log_local(msg):
+ tb = ''.join(traceback.format_exception(*sys.exc_info()))
+ sys.stderr.write('%s: %s\n' % (time.ctime(), msg))
+ sys.stderr.write(tb)
+
+def stream_logs(server, handler, builds):
+ """Stream logs incrementally to the server.
+ The global logfile will always be streamed.
+ The logfiles associated with any builds
+ will also be streamed."""
+ global logfile
+ logs = {logfile: (os.path.basename(logfile), None)}
+ while handler.active:
+ for build in builds:
+ for relpath in build.logs:
+ logpath = os.path.join(build.source_dir, relpath)
+ if logpath not in logs:
+ logs[logpath] = (relpath, None)
+ for log, (relpath, fd) in logs.iteritems():
+ if not fd:
+ if os.path.isfile(log):
+ try:
+ fd = file(log, 'r')
+ logs[log] = (relpath, fd)
+ except:
+ log_local('Error opening %s' % log)
+ continue
+ else:
+ continue
+ offset = fd.tell()
+ contents = fd.read(65536)
+ if contents:
+ size = len(contents)
+ data = base64.b64encode(contents)
+ digest = hashlib.md5(contents).hexdigest()
+ del contents
+ try:
+ server.uploadDirect(relpath, offset, size, digest, data)
+ except:
+ log_local('error uploading %s' % relpath)
+ time.sleep(1)
+
+def fail(server, handler):
+ """do the right thing when a build fails"""
+ global logfile, logfd
+ logging.getLogger('koji.vm').error('error running build', exc_info=True)
+ tb = ''.join(traceback.format_exception(*sys.exc_info()))
+ handler.active = False
+ if server is not None:
+ try:
+ logfd.flush()
+ upload_file(server, os.path.dirname(logfile),
+ os.path.basename(logfile))
+ except:
+ log_local('error calling upload_file()')
+ while True:
+ try:
+ # this is the very last thing we do, keep trying as long as we can
+ server.failTask(tb)
+ break
+ except:
+ log_local('error calling server.failTask()')
+ sys.exit(1)
+
+
+logfile = '/tmp/build.log'
+logfd = None
+
+def main():
+ prog = os.path.basename(sys.argv[0])
+ opts = get_options()
+ if opts.install:
+ ret, output = run(['/bin/cygrunsrv', '--install', prog,
+ '--path', sys.executable, '--args', os.path.abspath(prog),
+ '--type', 'auto', '--dep', 'Dhcp',
+ '--disp', 'Koji Windows Daemon',
+ '--desc', 'Runs Koji tasks assigned to a VM'],
+ log=False)
+ if ret:
+ print 'Error installing %s service, output was: %s' % (prog, output)
+ sys.exit(1)
+ else:
+ print 'Successfully installed the %s service' % prog
+ sys.exit(0)
+ elif opts.uninstall:
+ ret, output = run(['/bin/cygrunsrv', '--remove', prog], log=False)
+ if ret:
+ print 'Error removing the %s service, output was: %s' % (prog, output)
+ sys.exit(1)
+ else:
+ print 'Successfully removed the %s service' % prog
+ sys.exit(0)
+
+ handler = setup_logging(opts)
+ handler.active = True
+ server = None
+ try:
+ server = get_mgmt_server()
+
+ builds = []
+ thread = threading.Thread(target=stream_logs,
+ args=(server, handler, builds))
+ thread.daemon = True
+ thread.start()
+
+ # xmlrpclib is not thread-safe, create a new ServerProxy
+ # instance so we're not sharing with the stream_logs thread
+ server = get_mgmt_server()
+
+ build = WindowsBuild(server)
+ builds.append(build)
+ results = build.run()
+
+ for filename in results['output'].keys():
+ upload_file(server, build.source_dir, filename)
+
+ handler.active = False
+ thread.join()
+
+ for filename in results['logs']:
+ # reupload the log files to make sure the thread
+ # didn't miss anything
+ upload_file(server, build.source_dir, filename)
+
+ upload_file(server, os.path.dirname(logfile),
+ os.path.basename(logfile))
+ results['logs'].append(os.path.basename(logfile))
+
+ server.closeTask(results)
+ except:
+ fail(server, handler)
+ sys.exit(0)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/vm/kojivmd b/vm/kojivmd
new file mode 100755
index 0000000..cf44f0c
--- /dev/null
+++ b/vm/kojivmd
@@ -0,0 +1,1119 @@
+#!/usr/bin/python
+
+# Koji virtual machine management daemon
+# Copyright (c) 2010-2014 Red Hat, Inc.
+#
+# Koji is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation;
+# version 2.1 of the License.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this software; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+# Authors:
+# Mike Bonnet <mikeb at redhat.com>
+
+import koji
+import koji.util
+from koji.daemon import SCM, TaskManager
+from koji.tasks import ServerExit, ServerRestart, BaseTaskHandler, MultiPlatformTask
+from koji.tasks import RestartTask, RestartVerifyTask
+import sys
+import logging
+import os
+import xmlrpclib
+import signal
+import time
+import subprocess
+import libvirt
+import libxml2
+import random
+import socket
+import SimpleXMLRPCServer
+import threading
+import base64
+import pwd
+import urlgrabber
+import fnmatch
+from ConfigParser import ConfigParser
+from optparse import OptionParser
+try:
+ import krbV
+except ImportError:
+ pass
+
+
+# Register libvirt handler
+def libvirt_callback(ignore, err):
+ if err[3] != libvirt.VIR_ERR_ERROR:
+ # Don't log libvirt errors: global error handler will do that
+ logging.warn("Non-error from libvirt: '%s'", err[2])
+libvirt.registerErrorHandler(f=libvirt_callback, ctx=None)
+
+
+def get_options():
+ """process options from command line and config file"""
+ # parse command line args
+ parser = OptionParser()
+ parser.add_option("-c", "--config", dest="configFile",
+ help="use alternate configuration file", metavar="FILE",
+ default="/etc/kojivmd/kojivmd.conf")
+ parser.add_option("--user", help="specify user")
+ parser.add_option("--password", help="specify password")
+ parser.add_option("-f", "--fg", dest="daemon",
+ action="store_false", default=True,
+ help="run in foreground")
+ parser.add_option("--force-lock", action="store_true", default=False,
+ help="force lock for exclusive session")
+ parser.add_option("-v", "--verbose", action="store_true", default=False,
+ help="show verbose output")
+ parser.add_option("-d", "--debug", action="store_true", default=False,
+ help="show debug output")
+ parser.add_option("--debug-task", action="store_true", default=False,
+ help="enable debug output for tasks")
+ parser.add_option("--debug-xmlrpc", action="store_true", default=False,
+ help="show xmlrpc debug output")
+ parser.add_option("--skip-main", action="store_true", default=False,
+ help="don't actually run main")
+ parser.add_option("--maxjobs", type='int', help="Specify maxjobs")
+ parser.add_option("--sleeptime", type='int', help="Specify the polling interval")
+ parser.add_option("--admin-emails", help="Address(es) to send error notices to")
+ parser.add_option("--workdir", help="Specify workdir")
+ parser.add_option("--pluginpath", help="Specify plugin search path")
+ parser.add_option("--plugin", action="append", help="Load specified plugin")
+ parser.add_option("-s", "--server", help="url of XMLRPC server")
+ (options, args) = parser.parse_args()
+
+ if args:
+ parser.error("incorrect number of arguments")
+ #not reached
+ assert False
+
+ # load local config
+ config = ConfigParser()
+ config.read(options.configFile)
+ for x in config.sections():
+ if x != 'kojivmd':
+ quit('invalid section found in config file: %s' % x)
+ defaults = {'sleeptime': 15,
+ 'maxjobs': 5,
+ 'minspace': 8192,
+ 'minmem': 4096,
+ 'vmuser': 'qemu',
+ 'admin_emails': None,
+ 'workdir': '/tmp/koji',
+ 'topurl': '',
+ 'imagedir': '/var/lib/libvirt/images',
+ 'pluginpath': '/usr/lib/koji-vm-plugins',
+ 'privaddr': '192.168.122.1',
+ 'portbase': 7000,
+ 'smtphost': 'example.com',
+ 'from_addr': 'Koji Build System <buildsys at example.com>',
+ 'krb_principal': None,
+ 'host_principal_format': 'compile/%s at EXAMPLE.COM',
+ 'keytab': '/etc/kojivmd/kojivmd.keytab',
+ 'ccache': '/var/tmp/kojivmd.ccache',
+ 'krbservice': 'host',
+ 'server': None,
+ 'user': None,
+ 'password': None,
+ 'retry_interval': 60,
+ 'max_retries': 120,
+ 'offline_retry': True,
+ 'offline_retry_interval': 120,
+ 'allowed_scms': '',
+ 'cert': '/etc/kojivmd/client.crt',
+ 'ca': '/etc/kojivmd/clientca.crt',
+ 'serverca': '/etc/kojivmd/serverca.crt'}
+ if config.has_section('kojivmd'):
+ for name, value in config.items('kojivmd'):
+ if name in ['sleeptime', 'maxjobs', 'minspace', 'minmem',
+ 'retry_interval', 'max_retries', 'offline_retry_interval',
+ 'portbase']:
+ try:
+ defaults[name] = int(value)
+ except ValueError:
+ quit("value for %s option must be a valid integer" % name)
+ elif name in ['offline_retry']:
+ defaults[name] = config.getboolean('kojivmd', name)
+ elif name in ['plugin', 'plugins']:
+ defaults['plugin'] = value.split()
+ elif name in defaults.keys():
+ defaults[name] = value
+ else:
+ quit("unknown config option: %s" % name)
+ for name, value in defaults.items():
+ if getattr(options, name, None) is None:
+ setattr(options, name, value)
+
+ #make sure workdir exists
+ if not os.path.exists(options.workdir):
+ koji.ensuredir(options.workdir)
+
+ if not options.server:
+ parser.error("--server argument required")
+
+ return options
+
+def quit(msg=None, code=1):
+ if msg:
+ logging.getLogger("koji.vm").error(msg)
+ sys.stderr.write('%s\n' % msg)
+ sys.stderr.flush()
+ sys.exit(code)
+
+def main(options, session):
+ logger = logging.getLogger("koji.vm")
+ logger.info('Starting up')
+ tm = VMTaskManager(options, session)
+ tm.findHandlers(globals())
+ if options.plugin:
+ #load plugins
+ pt = koji.plugin.PluginTracker(path=options.pluginpath.split(':'))
+ for name in options.plugin:
+ logger.info('Loading plugin: %s', name)
+ tm.scanPlugin(pt.load(name))
+ def shutdown(*args):
+ raise SystemExit
+ def restart(*args):
+ logger.warn("Initiating graceful restart")
+ tm.restart_pending = True
+ signal.signal(signal.SIGTERM,shutdown)
+ signal.signal(signal.SIGUSR1,restart)
+ taken = False
+ tm.cleanupAllVMs()
+ while True:
+ try:
+ tm.updateBuildroots(nolocal=True)
+ tm.updateTasks()
+ taken = tm.getNextTask()
+ tm.cleanupExpiredVMs()
+ except (SystemExit,ServerExit,KeyboardInterrupt):
+ logger.warn("Exiting")
+ break
+ except ServerRestart:
+ logger.warn("Restarting")
+ os.execv(sys.argv[0], sys.argv)
+ except koji.AuthExpired:
+ logger.error('Session expired')
+ break
+ except koji.RetryError:
+ raise
+ except:
+ # XXX - this is a little extreme
+ # log the exception and continue
+ logger.error('Error in main loop', exc_info=True)
+ try:
+ if not taken:
+ # Only sleep if we didn't take a task, otherwise retry immediately.
+ # The load-balancing code in getNextTask() will prevent a single builder
+ # from getting overloaded.
+ time.sleep(options.sleeptime)
+ except (SystemExit,KeyboardInterrupt):
+ logger.warn("Exiting")
+ break
+ logger.warn("Shutting down, please wait...")
+ tm.shutdown()
+ session.logout()
+ sys.exit(0)
+
+
+####################
+# Tasks for handling VM lifecycle
+####################
+
+class DaemonXMLRPCServer(SimpleXMLRPCServer.SimpleXMLRPCServer):
+ allow_reuse_address = True
+
+ def __init__(self, addr, port):
+ if sys.version_info[:2] <= (2, 4):
+ SimpleXMLRPCServer.SimpleXMLRPCServer.__init__(self, (addr, port), logRequests=False)
+ else:
+ SimpleXMLRPCServer.SimpleXMLRPCServer.__init__(self, (addr, port), logRequests=False,
+ allow_none=True)
+ self.logger = logging.getLogger('koji.vm.DaemonXMLRPCServer')
+ self.socket.settimeout(5)
+ self.active = True
+
+ def server_close(self):
+ self.active = False
+ SimpleXMLRPCServer.SimpleXMLRPCServer.server_close(self)
+
+ def handle_while_active(self):
+ while self.active:
+ try:
+ conn, (ipaddr, port) = self.get_request()
+ self.logger.debug('request from %s:%s', ipaddr, port)
+ if self.verify_request(conn, (ipaddr, port)):
+ try:
+ self.process_request(conn, (ipaddr, port))
+ finally:
+ self.close_request(conn)
+ except socket.timeout:
+ pass
+ except:
+ self.logger.error('Error handling requests', exc_info=True)
+
+ if sys.version_info[:2] <= (2, 4):
+ # Copy and paste from SimpleXMLRPCServer, with the addition of passing
+ # allow_none=True to xmlrpclib.dumps()
+ def _marshaled_dispatch(self, data, dispatch_method = None):
+ params, method = xmlrpclib.loads(data)
+ try:
+ if dispatch_method is not None:
+ response = dispatch_method(method, params)
+ else:
+ response = self._dispatch(method, params)
+ response = (response,)
+ response = xmlrpclib.dumps(response, methodresponse=1, allow_none=True)
+ except xmlrpclib.Fault, fault:
+ response = xmlrpclib.dumps(fault)
+ except:
+ # report exception back to server
+ response = xmlrpclib.dumps(
+ xmlrpclib.Fault(1, "%s:%s" % (sys.exc_type, sys.exc_value))
+ )
+ return response
+
+
+class WinBuildTask(MultiPlatformTask):
+ """
+ Spawns a vmExec task to run a build, and imports the output.
+ """
+ Methods = ['winbuild']
+ _taskWeight = 0.2
+
+ def handler(self, name, source_url, target, opts=None):
+ if not opts:
+ opts = {}
+
+ subopts = koji.util.dslice(opts, ['winspec', 'patches'],
+ strict=False)
+ # winspec and patches options are urls
+ # verify the urls before passing them to the VM
+ for url in [source_url] + subopts.values():
+ scm = SCM(url)
+ scm.assert_allowed(self.options.allowed_scms)
+
+ task_info = self.session.getTaskInfo(self.id)
+ target_info = self.session.getBuildTarget(target)
+ if not target_info:
+ raise koji.BuildError, 'unknown build target: %s' % target
+ dest_tag = self.session.getTag(target_info['dest_tag'], strict=True)
+ build_tag = self.session.getTag(target_info['build_tag'], strict=True)
+ repo_id = opts.get('repo_id')
+ if repo_id:
+ repo_info = session.repoInfo(repo_id)
+ event_id = repo_info['create_event']
+ if not repo_info:
+ raise koji.BuildError, 'invalid repo ID: %s' % repo_id
+ policy_data = {
+ 'user_id' : task_info['owner'],
+ 'source' : source_url,
+ 'task_id' : self.id,
+ 'build_tag' : build_tag['id'],
+ 'skip_tag' : bool(opts.get('skip_tag')),
+ 'target': target_info['id']
+ }
+ if not opts.get('skip_tag'):
+ policy_data['tag'] = dest_tag['id']
+ self.session.host.assertPolicy('build_from_repo_id', policy_data)
+ else:
+ repo_info = self.getRepo(build_tag['id'])
+ repo_id = repo_info['id']
+ event_id = None
+
+ subopts['repo_id'] = repo_id
+
+ task_opts = koji.util.dslice(opts, ['timeout', 'cpus', 'mem', 'static_mac'], strict=False)
+ task_id = self.session.host.subtask(method='vmExec',
+ arglist=[name, [source_url, build_tag['name'], subopts], task_opts],
+ label=name[:255],
+ parent=self.id)
+ results = self.wait(task_id)[task_id]
+ results['task_id'] = task_id
+
+ build_info = None
+ if not opts.get('scratch'):
+ build_info = koji.util.dslice(results, ['name', 'version', 'release', 'epoch'])
+ build_info['package_name'] = build_info['name']
+ pkg_cfg = self.session.getPackageConfig(dest_tag['id'], build_info['name'], event=event_id)
+ if not opts.get('skip_tag'):
+ # Make sure package is on the list for this tag
+ if pkg_cfg is None:
+ raise koji.BuildError, "package %s not in list for tag %s" \
+ % (build_info['name'], dest_tag['name'])
+ elif pkg_cfg['blocked']:
+ raise koji.BuildError, "package %s is blocked for tag %s" \
+ % (build_info['name'], dest_tag['name'])
+
+ build_info = self.session.host.initWinBuild(self.id, build_info,
+ koji.util.dslice(results, ['platform']))
+ build_id = build_info['id']
+
+ try:
+ rpm_results = None
+ spec_url = opts.get('specfile')
+ if spec_url:
+ rpm_results = self.buildWrapperRPM(spec_url, task_id, target_info, build_info, repo_id,
+ channel='default')
+
+ if opts.get('scratch'):
+ self.session.host.moveWinBuildToScratch(self.id, results, rpm_results)
+ else:
+ self.session.host.completeWinBuild(self.id, build_id, results, rpm_results)
+ except (SystemExit, ServerExit, KeyboardInterrupt):
+ # we do not trap these
+ raise
+ except:
+ if not opts.get('scratch'):
+ # scratch builds do not get imported
+ self.session.host.failBuild(self.id, build_id)
+ # reraise the exception
+ raise
+
+ if not opts.get('scratch') and not opts.get('skip_tag'):
+ tag_task_id = self.session.host.subtask(method='tagBuild',
+ arglist=[dest_tag['id'], build_id],
+ label='tag',
+ channel='default',
+ parent=self.id)
+ self.wait(tag_task_id)
+
+class VMExecTask(BaseTaskHandler):
+ """
+ Handles the startup, state-tracking, and shutdown of a VM
+ for the purposes for executing a single task.
+ """
+
+ Methods = ['vmExec']
+ _taskWeight = 3.0
+ CLONE_PREFIX = 'koji-clone-'
+ QCOW2_EXT = '.qcow2'
+
+ def __init__(self, *args, **kw):
+ super(VMExecTask, self).__init__(*args, **kw)
+ self.task_manager = xmlrpclib.ServerProxy('http://%s:%s/' % (self.options.privaddr, self.options.portbase),
+ allow_none=True)
+ self.port = None
+ self.server = None
+ self.task_info = None
+ self.buildreq_dir = os.path.join(self.workdir, 'buildreqs')
+ koji.ensuredir(self.buildreq_dir)
+ self.output_dir = os.path.join(self.workdir, 'output')
+ koji.ensuredir(self.output_dir)
+ self.output = None
+ self.success = None
+
+ def mkqcow2(self, clone_name, source_disk, disk_num):
+ new_name = clone_name + '-disk-' + str(disk_num) + self.QCOW2_EXT
+ new_path = os.path.join(self.options.imagedir, new_name)
+ cmd = ['/usr/bin/qemu-img', 'create', '-f', 'qcow2', '-o', 'backing_file=%s' % source_disk, new_path]
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, close_fds=True)
+ output, dummy = proc.communicate()
+ ret = proc.wait()
+ if ret:
+ raise koji.BuildError, 'unable to create qcow2 image, "%s" returned %s; output was: %s' % \
+ (' '.join(cmd), ret, output)
+ vm_user = pwd.getpwnam(self.options.vmuser)
+ os.chown(new_path, vm_user.pw_uid, vm_user.pw_gid)
+ return new_path
+
+ def updateXML(self, xml, opts):
+ """Update the VM xml to reflect the task options"""
+ doc = libxml2.parseDoc(xml)
+ ctx = doc.xpathNewContext()
+ if opts.get('cpus'):
+ cpus = opts['cpus']
+ cpu_node = ctx.xpathEval('/domain/vcpu')[0]
+ if str(cpus) != cpu_node.getContent():
+ cpu_node.setContent(str(cpus))
+ if opts.get('mem'):
+ mem = opts['mem']
+ # mem is in mbytes, libvirt expects kbytes
+ mem = mem * 1024
+ mem_node = ctx.xpathEval('/domain/memory')[0]
+ if mem > int(mem_node.getContent()):
+ mem_node.setContent(str(mem))
+ curr_mem_node = ctx.xpathEval('/domain/currentMemory')[0]
+ if str(mem) != curr_mem_node.getContent():
+ curr_mem_node.setContent(str(mem))
+ fixed_xml = str(doc)
+ ctx.xpathFreeContext()
+ doc.freeDoc()
+ return fixed_xml
+
+ def clone(self, conn, name, opts):
+ """
+ Clone the VM named "name" and return the name of the cloned VM.
+ All disks will be qcow2 images backed by the storage of the original
+ VM. The original VM must be shutdown, or this will raise an error.
+ """
+ clone_name = self.CLONE_PREFIX + str(self.id) + '-' + name
+ clone_name = clone_name[:50]
+ orig_vm = conn.lookupByName(name)
+ orig_paths = self.guestDisks(orig_vm)
+
+ cmd = ["virt-clone", "--original", name, "--name", clone_name,
+ "--preserve-data"]
+
+ for idx, orig_disk in enumerate(orig_paths):
+ new_disk = self.mkqcow2(clone_name, orig_disk, idx)
+ cmd += ["--file", new_disk]
+
+ if opts.get('static_mac'):
+ orig_mac = self.macAddr(orig_vm)
+ cmd += ["--mac", orig_mac]
+
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT, close_fds=True)
+ output, dummy = proc.communicate()
+ ret = proc.wait()
+ if ret:
+ raise koji.BuildError('unable to clone VM: '
+ '"%s" returned %s; output was: %s' %
+ (' '.join(cmd), ret, output))
+
+ # Set the cpus and mem parameters directly in the XML.
+ newvm = conn.lookupByName(clone_name)
+ clone_xml = self.updateXML(newvm.XMLDesc(0), opts)
+ conn.defineXML(clone_xml)
+
+ return clone_name
+
+ def guestDisks(self, vm):
+ """
+ Parse cloneable disks out of the original VM XML
+ """
+ doc = libxml2.parseDoc(vm.XMLDesc(0))
+ ctx = doc.xpathNewContext()
+
+ ret = []
+ nodelist = ctx.xpathEval('/domain/devices/disk[@device="disk" and @type="file"]/source')
+ for node in nodelist:
+ for prop in node.properties:
+ if prop.name in ["file", "dev"]:
+ ret.append(node.prop(prop.name))
+ break
+
+ ctx.xpathFreeContext()
+ doc.freeDoc()
+
+ return ret
+
+ def macAddr(self, vm):
+ """
+ Return the MAC address of the first network interface configured for the given VM.
+ """
+ doc = libxml2.parseDoc(vm.XMLDesc(0))
+ ctx = doc.xpathNewContext()
+ nodelist = ctx.xpathEval('/domain/devices/interface[@type="network"]/mac')
+ if not nodelist:
+ raise koji.BuildError, 'no network interfaces configured for %s' % vm.name()
+ addr = nodelist[0].prop('address')
+ ctx.xpathFreeContext()
+ doc.freeDoc()
+ return addr
+
+ def getTaskInfo(self):
+ """
+ Get the command-line to run in the VM.
+ """
+ return self.task_info
+
+ def initBuildroot(self, repo_id, platform):
+ """
+ Create the buildroot object on the hub.
+ """
+ # we're using platform as the arch, which is currently limited to
+ # 16 characters by the database schema
+ buildroot_id = self.session.host.newBuildRoot(repo_id, platform[:16], task_id=self.id)
+ # a VM doesn't require any additional initialization, so move it from INIT to BUILDING
+ self.session.host.setBuildRootState(buildroot_id, 'BUILDING', task_id=self.id)
+ return buildroot_id
+
+ def updateBuildrootFiles(self, buildroot_id, files, rpms):
+ """
+ Update the list of files that were downloaded into the build environment.
+ """
+ if files:
+ self.session.host.updateBuildrootArchives(buildroot_id, self.id,
+ files, project=True)
+ if rpms:
+ self.session.host.updateBuildRootList(buildroot_id, rpms, task_id=self.id)
+
+ def expireBuildroot(self, buildroot_id):
+ """
+ Set the buildroot to the expired state.
+ """
+ return self.session.host.setBuildRootState(buildroot_id, 'EXPIRED', task_id=self.id)
+
+ def getLatestBuild(self, tag, package, repo_id):
+ """
+ Get information about the latest build of package "package" in tag "tag".
+ """
+ repo_info = self.session.repoInfo(repo_id, strict=True)
+ builds = self.session.getLatestBuilds(tag, package=package,
+ event=repo_info['create_event'])
+ if not builds:
+ raise koji.BuildError, 'no build of package %s in tag %s' % (package, tag)
+ build = builds[0]
+ maven_build = self.session.getMavenBuild(build['id'])
+ if maven_build:
+ del maven_build['build_id']
+ build.update(maven_build)
+ win_build = self.session.getWinBuild(build['id'])
+ if win_build:
+ del win_build['build_id']
+ build.update(win_build)
+ return build
+
+ def getFileList(self, buildID, type, typeopts):
+ """
+ Get the list of files of "type" for the latest build of the package "package" in tag "tag".
+ typeopts is a dict that is used to filter the file list.
+ typeopts is checked for:
+ patterns: comma-separated list of path/filename patterns (as used by fnmatch)
+ to filter the results with
+ If type is 'rpm', typeopts is checked for:
+ arches: comma-separated list of arches to include in output
+ If type is 'maven', typeopts is checked for:
+ group_ids: Maven group IDs to include in the output
+ artifact_ids: Maven artifact IDs to include in the output
+ versions: Maven versions to include in the output
+ If type is 'win', typeopts is checked for:
+ platforms: comma-separated list of platforms
+ flags: comma-separated list of flags
+ """
+ if not typeopts:
+ typeopts = {}
+ if type == 'rpm':
+ arches = None
+ if typeopts.get('arches'):
+ arches = typeopts['arches'].split(',')
+ files = self.session.listRPMs(buildID=buildID, arches=arches)
+ else:
+ files = self.session.listArchives(buildID=buildID, type=type)
+ for fileinfo in files:
+ if type == 'rpm':
+ filepath = koji.pathinfo.rpm(fileinfo)
+ elif type == 'maven':
+ filepath = koji.pathinfo.mavenfile(fileinfo)
+ elif type == 'win':
+ filepath = koji.pathinfo.winfile(fileinfo)
+ else:
+ # XXX support other file types when available
+ filepath = fileinfo['filename']
+ fileinfo['localpath'] = filepath
+ if typeopts.get('patterns'):
+ to_filter = files
+ files = []
+ patterns = typeopts['patterns'].split(',')
+ for fileinfo in to_filter:
+ for pattern in patterns:
+ if fnmatch.fnmatch(fileinfo['localpath'], pattern):
+ files.append(fileinfo)
+ break
+ if type == 'maven':
+ if typeopts.get('group_ids'):
+ group_ids = typeopts['group_ids'].split(',')
+ files = [f for f in files if f['group_id'] in group_ids]
+ if typeopts.get('artifact_ids'):
+ artifact_ids = typeopts['artifact_ids'].split(',')
+ files = [f for f in files if f['artifact_id'] in artifact_ids]
+ if typeopts.get('versions'):
+ versions = typeopts['versions'].split(',')
+ files = [f for f in files if f['version'] in versions]
+ if type == 'win':
+ if typeopts.get('platforms'):
+ platforms = typeopts['platforms'].split(',')
+ files = [f for f in files if set(f['platforms'].split()).intersection(platforms)]
+ if typeopts.get('flags'):
+ flags = typeopts['flags'].split(',')
+ files = [f for f in files if set(f['flags'].split()).intersection(flags)]
+ return files
+
+ def localCache(self, buildinfo, fileinfo, type):
+ """
+ Access a file in the local cache. If the file does not exist, it's downloaded
+ from the server. Returns an open file object.
+ """
+ # fileinfo['localpath'] is set by getFileList()
+ localpath = os.path.join(self.buildreq_dir, buildinfo['name'], type, fileinfo['localpath'])
+ if not os.path.isfile(localpath):
+ remote_pi = koji.PathInfo(self.options.topurl)
+ if type == 'rpm':
+ remote_url = remote_pi.build(buildinfo) + '/' + \
+ fileinfo['localpath']
+ elif type == 'maven':
+ remote_url = remote_pi.mavenbuild(buildinfo) + '/' + \
+ fileinfo['localpath']
+ elif type == 'win':
+ remote_url = remote_pi.winbuild(buildinfo) + '/' + \
+ fileinfo['localpath']
+ else:
+ raise koji.BuildError, 'unsupported file type: %s' % type
+ koji.ensuredir(os.path.dirname(localpath))
+ urlgrabber.urlgrab(remote_url, filename=localpath)
+
+ return file(localpath, 'r')
+
+ def getFile(self, buildinfo, archiveinfo, offset, length, type):
+ """
+ Get the contents of the file indicated by fileinfo, returning a maximum of
+ "length" bytes starting at "offset". Contents are returned base64-encoded.
+ """
+ offset = int(offset)
+ length = int(length)
+ fileobj = self.localCache(buildinfo, archiveinfo, type)
+ try:
+ fileobj.seek(offset)
+ data = fileobj.read(length)
+ encoded = base64.b64encode(data)
+ del data
+ return encoded
+ finally:
+ fileobj.close()
+
+ def upload(self, path, offset, contents):
+ local_path = os.path.abspath(os.path.join(self.output_dir, path))
+ if not local_path.startswith(self.output_dir):
+ raise koji.BuildError, 'invalid upload path: %s' % path
+ koji.ensuredir(os.path.dirname(local_path))
+ # accept offset as a str to avoid problems with files larger than 2**32
+ offset = int(offset)
+ if offset == 0:
+ if os.path.exists(local_path):
+ raise koji.BuildError, 'cannot overwrite %s' % local_path
+ fobj = file(local_path, 'w')
+ else:
+ if not os.path.isfile(local_path):
+ raise koji.BuildError, '% does not exist' % local_path
+ size = os.path.getsize(local_path)
+ if offset != size:
+ raise koji.BuildError, 'cannot write to %s at offset %s, size is %s' % \
+ (local_path, offset, size)
+ fobj = file(local_path, 'r+')
+ fobj.seek(offset)
+ data = base64.b64decode(contents)
+ fobj.write(data)
+ fobj.close()
+ return len(data)
+
+ def uploadDirect(self, filepath, offset, size, md5sum, data):
+ """
+ Upload contents directly to the server.
+ """
+ remotepath = os.path.dirname(os.path.join(self.getUploadDir(), filepath))
+ filename = os.path.basename(filepath)
+ self.session.uploadFile(remotepath, filename, koji.encode_int(size),
+ md5sum, koji.encode_int(offset), data)
+
+ def verifyChecksum(self, path, checksum, algo='sha1'):
+ local_path = os.path.abspath(os.path.join(self.output_dir, path))
+ if not local_path.startswith(self.output_dir):
+ raise koji.BuildError, 'invalid path: %s' % path
+ if not os.path.isfile(local_path):
+ raise koji.BuildError, '%s does not exist' % local_path
+
+ if algo == 'sha1':
+ sum = koji.util.sha1_constructor()
+ elif algo == 'md5':
+ sum = koji.util.md5_constructor()
+ else:
+ raise koji.BuildError, 'unsupported checksum algorithm: %s' % algo
+
+ fobj = file(local_path, 'r')
+ while True:
+ data = fobj.read(1048576)
+ if not data:
+ break
+ sum.update(data)
+ fobj.close()
+ if sum.hexdigest() == checksum:
+ return True
+ else:
+ raise koji.BuildError, '%s checksum validation failed for %s, %s (computed) != %s (provided)' % \
+ (algo, local_path, sum.hexdigest(), checksum)
+
+ def closeTask(self, output):
+ self.output = output
+ self.success = True
+ return True
+
+ def failTask(self, output):
+ self.output = output
+ self.success = False
+ return True
+
+ def setupTaskServer(self):
+ """
+ Setup the task-specific xmlrpc server to listen to requests from
+ the VM.
+ """
+ self.server = DaemonXMLRPCServer(self.options.privaddr, self.port)
+ self.server.register_function(self.getTaskInfo)
+ self.server.register_function(self.closeTask)
+ self.server.register_function(self.failTask)
+ self.server.register_function(self.initBuildroot)
+ self.server.register_function(self.updateBuildrootFiles)
+ self.server.register_function(self.expireBuildroot)
+ self.server.register_function(self.getLatestBuild)
+ self.server.register_function(self.getFileList)
+ self.server.register_function(self.getFile)
+ self.server.register_function(self.upload)
+ self.server.register_function(self.uploadDirect)
+ self.server.register_function(self.verifyChecksum)
+ thr = threading.Thread(name='task_%s_thread' % self.id,
+ target=self.server.handle_while_active)
+ thr.setDaemon(True)
+ thr.start()
+
+ def handler(self, name, task_info, opts=None):
+ """
+ Clone the VM named "name", and provide the data in "task_info" to it.
+ Available options:
+ - timeout (int): number of minutes to let the VM run before
+ destroying it and failing the task, default: 1440
+ """
+ if not opts:
+ opts = {}
+ timeout = opts.get('timeout', 1440)
+
+ self.task_info = task_info
+
+ conn = libvirt.open(None)
+ clone_name = self.clone(conn, name, opts)
+ self.logger.debug('Cloned VM %s to %s',name, clone_name)
+ try:
+ vm = conn.lookupByName(clone_name)
+ macaddr = self.macAddr(vm)
+ registered = False
+ while not registered:
+ # loop in case the port is already taken
+ self.port = self.options.portbase + random.randint(1, 100)
+ registered = self.task_manager.registerVM(macaddr, clone_name, self.id, self.port)
+ self.setupTaskServer()
+ vm.create()
+ self.logger.info('Started VM %s', clone_name)
+ except libvirt.libvirtError, e:
+ self.logger.error('error starting VM %s', clone_name, exc_info=True)
+ raise koji.PreBuildError, 'error starting VM %s, error was: %s' % \
+ (clone_name, e)
+
+ start = time.time()
+ while True:
+ time.sleep(15)
+ info = vm.info()
+ if info[0] in (libvirt.VIR_DOMAIN_CRASHED, libvirt.VIR_DOMAIN_SHUTOFF):
+ self.logger.warn('VM %s crashed', clone_name)
+ self.server.server_close()
+ raise koji.BuildError, 'VM %s crashed' % clone_name
+ if self.success is None:
+ # task is still running
+ # make sure it hasn't exceeded the timeout
+ mins = (time.time() - start) / 60
+ if mins > timeout:
+ vm.destroy()
+ self.server.server_close()
+ raise koji.BuildError, 'Task did not complete after %.2f minutes, VM %s has been destroyed' % \
+ (mins, clone_name)
+ else:
+ vm.destroy()
+ self.server.server_close()
+ self.uploadTree(self.output_dir)
+ if self.success:
+ return self.output
+ else:
+ raise koji.BuildError, self.output
+
+class VMTaskManager(TaskManager):
+ def __init__(self, options, session):
+ super(VMTaskManager, self).__init__(options, session)
+ self.libvirt_conn = libvirt.open(None)
+ self.macaddrs = {}
+ self.macaddr_lock = threading.Lock()
+ self.expired_vms = {}
+ self.setupServer()
+
+ def registerVM(self, macaddr, vm_name, task_id, port):
+ """
+ Register a VM instance with the task manager.
+ """
+ self.macaddr_lock.acquire()
+ try:
+ macaddr = macaddr.lower()
+ ports = [d[2] for d in self.macaddrs.values()]
+ if port in ports:
+ return False
+ if macaddr in self.macaddrs:
+ raise koji.PreBuildError, 'duplicate MAC address: %s' % macaddr
+ self.macaddrs[macaddr] = (vm_name, task_id, port)
+ self.logger.info('registered MAC address %s for VM %s (task ID %s, port %s)', macaddr, vm_name, task_id, port)
+ return True
+ finally:
+ self.macaddr_lock.release()
+
+ def getPort(self, macaddr):
+ """
+ Get the port that the daemon associated with VM with the given MAC address is listening on.
+ """
+ self.macaddr_lock.acquire()
+ try:
+ macaddr = macaddr.lower()
+ data = self.macaddrs.get(macaddr)
+ if data:
+ return data[2]
+ else:
+ raise koji.PreBuildError, 'unknown MAC address: %s' % macaddr
+ finally:
+ self.macaddr_lock.release()
+
+ def setupServer(self):
+ self.server = DaemonXMLRPCServer(self.options.privaddr, self.options.portbase)
+ self.server.register_function(self.registerVM)
+ self.server.register_function(self.getPort)
+ thr = threading.Thread(name='manager_thread', target=self.server.handle_while_active)
+ thr.setDaemon(True)
+ thr.start()
+
+ def getCloneDisks(self, vm):
+ doc = libxml2.parseDoc(vm.XMLDesc(0))
+ ctx = doc.xpathNewContext()
+ nodelist = ctx.xpathEval('/domain/devices/disk[@device="disk" and @type="file"]/source')
+ disks = []
+ for node in nodelist:
+ disk = node.prop('file')
+ if os.path.basename(disk).startswith(VMExecTask.CLONE_PREFIX) and \
+ disk.endswith(VMExecTask.QCOW2_EXT):
+ disks.append(disk)
+ ctx.xpathFreeContext()
+ doc.freeDoc()
+ return disks
+
+ def checkDisk(self):
+ if not os.path.exists(self.options.imagedir):
+ self.logger.error('No such directory: %s', self.options.imagedir)
+ raise IOError, 'No such directory: %s' % self.options.imagedir
+ fs_stat = os.statvfs(self.options.imagedir)
+ available = fs_stat.f_bavail * fs_stat.f_bsize
+ availableMB = available / 1024 / 1024
+ self.logger.debug('disk space available in %s: %i MB', self.options.imagedir, availableMB)
+ if availableMB < self.options.minspace:
+ self.status = 'Insufficient disk space: %i MB, %i MB required' % (availableMB, self.options.minspace)
+ self.logger.warn(self.status)
+ return False
+ return True
+
+ def checkMem(self):
+ phys_mem = os.sysconf('SC_PHYS_PAGES') * os.sysconf('SC_PAGE_SIZE') / 1024
+ vm_mem = 0
+ for vm_id in self.libvirt_conn.listDomainsID():
+ vm = self.libvirt_conn.lookupByID(vm_id)
+ info = vm.info()
+ # info[1] is the max. memory allocatable to the VM, and info[2] is the amount of
+ # memory currently used by the VM (in kbytes). We're interested in the latter.
+ vm_mem += info[2]
+ avail_mem = phys_mem - vm_mem
+ # options.minmem is listed in mbytes
+ min_mem = self.options.minmem * 1024
+ self.logger.debug('physical mem: %sk, allocated mem: %sk, available mem: %sk',
+ phys_mem, vm_mem, avail_mem)
+ if avail_mem < min_mem:
+ self.status = 'Insufficient memory: %sk allocated, %sk available, %sk required' % \
+ (vm_mem, avail_mem, min_mem)
+ self.logger.warn(self.status)
+ return False
+ return True
+
+ def checkSpace(self):
+ """See if we have enough space to accept another job"""
+ return self.checkDisk() and self.checkMem()
+
+ def checkRelAvail(self, bin_avail, avail):
+ """
+ Always return True, since we may be the only daemon with access
+ to the VM required to process this task.
+ """
+ return True
+
+ def takeTask(self, task):
+ """
+ Verify that this builder can handle the task before claiming it.
+ """
+ if task['method'] == 'vmExec':
+ task_info = self.session.getTaskInfo(task['id'], request=True)
+ vm_name = task_info['request'][0]
+ try:
+ vm = self.libvirt_conn.lookupByName(vm_name)
+ except libvirt.libvirtError:
+ # if this builder does not have the requested VM,
+ # we can't handle the task
+ self.logger.debug('VM %s not available, ignoring task %i', vm_name, task['id'])
+ return False
+ return super(VMTaskManager, self).takeTask(task)
+
+ def cleanupVM(self, vm_name):
+ """
+ Cleanup a single VM with the given name.
+ """
+ try:
+ vm = self.libvirt_conn.lookupByName(vm_name)
+ except libvirt.libvirtError:
+ # if we can't find the VM by name, it has probably been cleaned up manually
+ self.logger.warn("Can't find %s, assuming it has already been cleaned up", vm_name)
+ return True
+ info = vm.info()
+ if info[0] not in (libvirt.VIR_DOMAIN_SHUTOFF, libvirt.VIR_DOMAIN_CRASHED):
+ vm.destroy()
+ self.logger.info('Shut down VM %s', vm_name)
+ disks = self.getCloneDisks(vm)
+ for disk in disks:
+ try:
+ if os.path.isfile(disk):
+ os.unlink(disk)
+ self.logger.debug('Removed disk file %s for VM %s', disk, vm_name)
+ except:
+ self.logger.error('Error removing disk file %s for VM %s', disk, vm_name,
+ exc_info=True)
+ return False
+ else:
+ # Removed all the disks successfully, so undefine the VM
+ vm.undefine()
+ self.logger.info('Cleaned up VM %s', vm_name)
+ return True
+
+ def cleanupAllVMs(self):
+ """
+ Cleanup shutdown and clean up all cloned Koji VMs.
+ Only called once at daemon startup, so we start with a clean slate.
+ """
+ vms = self.libvirt_conn.listDefinedDomains() + self.libvirt_conn.listDomainsID()
+ for vm_name in vms:
+ if type(vm_name) == int:
+ vm_name = self.libvirt_conn.lookupByID(vm_name).name()
+ if vm_name.startswith(VMExecTask.CLONE_PREFIX):
+ self.cleanupVM(vm_name)
+
+ def cleanupExpiredVMs(self):
+ for vm_name, task in self.expired_vms.items():
+ if task['state'] == koji.TASK_STATES['FAILED']:
+ if time.time() - task['completion_ts'] < 3600 * 4:
+ # task failed, so we'll keep the VM image around for 4 hours
+ # for debugging purposes
+ continue
+ ret = self.cleanupVM(vm_name)
+ if ret:
+ # successfully cleaned up the VM, so remove it from the expired list
+ del self.expired_vms[vm_name]
+
+ def cleanupTask(self, task_id, wait=True):
+ ret = super(VMTaskManager, self).cleanupTask(task_id, wait)
+ self.macaddr_lock.acquire()
+ try:
+ if ret:
+ for macaddr, (vm_name, id, port) in self.macaddrs.items():
+ if task_id == id:
+ self.expired_vms[vm_name] = self.session.getTaskInfo(task_id)
+ del self.macaddrs[macaddr]
+ self.logger.info('unregistered MAC address %s', macaddr)
+ break
+ return ret
+ finally:
+ self.macaddr_lock.release()
+
+ def shutdown(self):
+ self.server.server_close()
+ self.libvirt_conn.close()
+ super(VMTaskManager, self).shutdown()
+
+
+####################
+# Boilerplate startup code
+####################
+
+if __name__ == "__main__":
+ koji.add_file_logger("koji", "/var/log/kojivmd.log")
+ #note we're setting logging params for all of koji*
+ options = get_options()
+ if options.debug:
+ logging.getLogger("koji").setLevel(logging.DEBUG)
+ elif options.verbose:
+ logging.getLogger("koji").setLevel(logging.INFO)
+ else:
+ logging.getLogger("koji").setLevel(logging.WARN)
+ if options.debug_task:
+ logging.getLogger("koji.build.BaseTaskHandler").setLevel(logging.DEBUG)
+ if options.admin_emails:
+ koji.add_mail_logger("koji", options.admin_emails)
+
+ #build session options
+ session_opts = {}
+ for k in ('user', 'password', 'krbservice', 'debug_xmlrpc', 'debug',
+ 'retry_interval', 'max_retries', 'offline_retry', 'offline_retry_interval'):
+ v = getattr(options, k, None)
+ if v is not None:
+ session_opts[k] = v
+ #start a session and login
+ session = koji.ClientSession(options.server, session_opts)
+ if os.path.isfile(options.cert):
+ try:
+ # authenticate using SSL client certificates
+ session.ssl_login(options.cert, options.ca,
+ options.serverca)
+ except koji.AuthError, e:
+ quit("Error: Unable to log in: %s" % e)
+ except xmlrpclib.ProtocolError:
+ quit("Error: Unable to connect to server %s" % (options.server))
+ elif options.user:
+ try:
+ # authenticate using user/password
+ session.login()
+ except koji.AuthError:
+ quit("Error: Unable to log in. Bad credentials?")
+ except xmlrpclib.ProtocolError:
+ quit("Error: Unable to connect to server %s" % (options.server))
+ elif sys.modules.has_key('krbV'):
+ krb_principal = options.krb_principal
+ if krb_principal is None:
+ krb_principal = options.host_principal_format % socket.getfqdn()
+ try:
+ session.krb_login(principal=krb_principal,
+ keytab=options.keytab,
+ ccache=options.ccache)
+ except krbV.Krb5Error, e:
+ quit("Kerberos authentication failed: '%s' (%s)" % (e.args[1], e.args[0]))
+ except socket.error, e:
+ quit("Could not connect to Kerberos authentication service: '%s'" % e.args[1])
+ else:
+ quit("No username/password supplied and Kerberos missing or not configured")
+ #make session exclusive
+ try:
+ session.exclusiveSession(force=options.force_lock)
+ except koji.AuthLockError:
+ quit("Error: Unable to get lock. Trying using --force-lock")
+ if not session.logged_in:
+ quit("Error: Unknown login error")
+ #make sure it works
+ try:
+ ret = session.echo("OK")
+ except xmlrpclib.ProtocolError:
+ quit("Error: Unable to connect to server %s" % (options.server))
+ if ret != ["OK"]:
+ quit("Error: incorrect server response: %r" % (ret))
+
+ # run main
+ if options.daemon:
+ #detach
+ koji.daemonize()
+ main(options, session)
+ elif not options.skip_main:
+ koji.add_stderr_logger("koji")
+ main(options, session)
diff --git a/vm/kojivmd.conf b/vm/kojivmd.conf
new file mode 100644
index 0000000..82c61f2
--- /dev/null
+++ b/vm/kojivmd.conf
@@ -0,0 +1,57 @@
+[kojivmd]
+; The number of seconds to sleep between tasks
+; sleeptime=15
+
+; The maximum number of jobs that kojivmd will handle at a time
+; maxjobs=10
+
+; Minimum amount of memory (in MBs) not allocated to a VM for kojivmd to take a new task
+; minmem=4096
+
+; The user the VM/emulator runs as (cloned disk images will be readable and writable by this user)
+; vmuser=qemu
+
+; The directory root for temporary storage
+; workdir=/tmp/koji
+
+; The url where the Koji root directory (/mnt/koji) can be accessed
+topurl=http://koji.example.com/kojiroot
+
+; The URL for the xmlrpc server
+server=http://hub.example.com/kojihub
+
+; A space-separated list of hostname:repository[:use_common] tuples that kojivmd is authorized to checkout from (no quotes).
+; Wildcards (as supported by fnmatch) are allowed.
+; If use_common is specified and is one of "false", "no", "off", or "0" (without quotes), then kojid will not attempt to checkout
+; a common/ dir when checking out sources from the source control system. Otherwise, it will attempt to checkout a common/
+; dir, and will raise an exception if it cannot.
+allowed_scms=scm.example.com:/cvs/example git.example.org:/example svn.example.org:/users/*:no
+
+; The mail host to use for sending email notifications
+smtphost=example.com
+
+; The From address used when sending email notifications
+from_addr=Koji Build System <buildsys at example.com>
+
+;configuration for Kerberos authentication
+
+;the format of the principal used by the build hosts
+;%s will be replaced by the FQDN of the host
+;host_principal_format = compile/%s at EXAMPLE.COM
+
+;location of the keytab
+;keytab = /etc/kojivmd/kojivmd.keytab
+
+;the service name of the principal being used by the hub
+;krbservice = host
+
+;configuration for SSL authentication
+
+;client certificate
+;cert = /etc/kojivmd/client.crt
+
+;certificate of the CA that issued the client certificate
+;ca = /etc/kojivmd/clientca.crt
+
+;certificate of the CA that issued the HTTP server certificate
+;serverca = /etc/kojivmd/serverca.crt
diff --git a/vm/kojivmd.init b/vm/kojivmd.init
new file mode 100755
index 0000000..b0a6408
--- /dev/null
+++ b/vm/kojivmd.init
@@ -0,0 +1,93 @@
+#! /bin/sh
+#
+# kojivmd Start/Stop kojivmd
+#
+# chkconfig: - 99 99
+# description: kojivmd server
+# processname: kojivmd
+
+# Source function library.
+. /etc/init.d/functions
+
+# Check that we're a priviledged user
+[ `id -u` = 0 ] || exit 1
+
+[ -f /etc/sysconfig/kojivmd ] && . /etc/sysconfig/kojivmd
+
+prog="kojivmd"
+
+# Check that networking is up.
+if [ "$NETWORKING" = "no" ]
+then
+ exit 0
+fi
+
+[ -f /usr/sbin/kojivmd ] || exit 1
+
+RETVAL=0
+
+start() {
+ echo -n $"Starting $prog: "
+ cd /
+ ARGS=""
+ [ "$FORCE_LOCK" == "Y" ] && ARGS="$ARGS --force-lock"
+ [ "$KOJIVMD_DEBUG" == "Y" ] && ARGS="$ARGS --debug"
+ [ "$KOJIVMD_VERBOSE" == "Y" ] && ARGS="$ARGS --verbose"
+ daemon /usr/sbin/kojivmd $ARGS
+ RETVAL=$?
+ echo
+ [ $RETVAL -eq 0 ] && touch /var/lock/subsys/kojivmd
+ return $RETVAL
+}
+
+stop() {
+ echo -n $"Stopping $prog: "
+ killproc kojivmd
+ RETVAL=$?
+ echo
+ [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/kojivmd
+ return $RETVAL
+}
+
+restart() {
+ stop
+ start
+}
+
+graceful() {
+ #SIGUSR1 initiates a graceful restart
+ pid=$(pidofproc kojivmd)
+ if test -z "$pid"
+ then
+ echo $"$prog not running"
+ else
+ kill -10 $pid
+ fi
+}
+
+# See how we were called.
+case "$1" in
+ start)
+ start
+ ;;
+ stop)
+ stop
+ ;;
+ status)
+ status $prog
+ ;;
+ restart|reload|force-reload)
+ restart
+ ;;
+ condrestart|try-restart)
+ [ -f /var/lock/subsys/kojivmd ] && restart || :
+ ;;
+ graceful)
+ graceful
+ ;;
+ *)
+ echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload|graceful}"
+ exit 1
+esac
+
+exit $?
diff --git a/vm/kojivmd.service b/vm/kojivmd.service
new file mode 100644
index 0000000..a417fdb
--- /dev/null
+++ b/vm/kojivmd.service
@@ -0,0 +1,14 @@
+[Unit]
+Description=Koji vm build server
+Documentation=https://fedoraproject.org/wiki/Koji/ServerHowTo
+
+After=network.target
+
+[Service]
+ExecStart=/usr/sbin/kojivmd \
+ --fg \
+ --force-lock \
+ --verbose
+
+[Install]
+WantedBy=multi-user.target
diff --git a/vm/kojivmd.sysconfig b/vm/kojivmd.sysconfig
new file mode 100644
index 0000000..1d7af4d
--- /dev/null
+++ b/vm/kojivmd.sysconfig
@@ -0,0 +1,3 @@
+FORCE_LOCK=Y
+KOJIVMD_DEBUG=N
+KOJIVMD_VERBOSE=Y
diff --git a/www/Makefile b/www/Makefile
new file mode 100644
index 0000000..25a89dd
--- /dev/null
+++ b/www/Makefile
@@ -0,0 +1,20 @@
+SUBDIRS = kojiweb conf lib static
+
+_default:
+ @echo "nothing to make. try make install"
+
+clean:
+ rm -f *.o *.so *.pyc *~
+ for d in $(SUBDIRS); do make -s -C $$d clean; done
+
+install:
+ @if [ "$(DESTDIR)" = "" ]; then \
+ echo " "; \
+ echo "ERROR: A destdir is required"; \
+ exit 1; \
+ fi
+
+ mkdir -p $(DESTDIR)/usr/share/koji-web
+
+ for d in $(SUBDIRS); do make DESTDIR=$(DESTDIR) \
+ -C $$d install; [ $$? = 0 ] || exit 1; done
diff --git a/www/conf/Makefile b/www/conf/Makefile
new file mode 100644
index 0000000..f87e381
--- /dev/null
+++ b/www/conf/Makefile
@@ -0,0 +1,20 @@
+_default:
+ @echo "nothing to make. try make install"
+
+clean:
+ rm -f *.o *.so *.pyc *~
+ for d in $(SUBDIRS); do make -s -C $$d clean; done
+
+install:
+ @if [ "$(DESTDIR)" = "" ]; then \
+ echo " "; \
+ echo "ERROR: A destdir is required"; \
+ exit 1; \
+ fi
+
+ mkdir -p $(DESTDIR)/etc/httpd/conf.d
+ install -p -m 644 kojiweb.conf $(DESTDIR)/etc/httpd/conf.d/kojiweb.conf
+
+ mkdir -p $(DESTDIR)/etc/kojiweb
+ install -p -m 644 web.conf $(DESTDIR)/etc/kojiweb/web.conf
+ mkdir -p $(DESTDIR)/etc/kojiweb/web.conf.d
diff --git a/www/conf/kojiweb.conf b/www/conf/kojiweb.conf
new file mode 100644
index 0000000..3173ba2
--- /dev/null
+++ b/www/conf/kojiweb.conf
@@ -0,0 +1,62 @@
+#We use wsgi by default
+Alias /koji "/usr/share/koji-web/scripts/wsgi_publisher.py"
+#(configuration goes in /etc/kojiweb/web.conf)
+
+<Directory "/usr/share/koji-web/scripts/">
+ Options ExecCGI
+ SetHandler wsgi-script
+ Require all granted
+ #If you have httpd <= 2.2, you'll want the following two lines instead
+ #of the one above:
+ #Order allow,deny
+ #Allow from all
+</Directory>
+
+# Support for mod_python is DEPRECATED. If you still need mod_python support,
+# then use the following directory settings instead:
+#
+# <Directory "/usr/share/koji-web/scripts/">
+# # Config for the publisher handler
+# SetHandler mod_python
+# # Use kojiweb's publisher (provides wsgi compat layer)
+# # mod_python's publisher is no longer supported
+# PythonHandler wsgi_publisher
+# PythonOption koji.web.ConfigFile /etc/kojiweb/web.conf
+# PythonAutoReload Off
+# # Configuration via PythonOptions is DEPRECATED. Use /etc/kojiweb/web.conf
+# Order allow,deny
+# Allow from all
+# </Directory>
+
+# uncomment this to enable authentication via Kerberos
+# <Location /koji/login>
+# AuthType Kerberos
+# AuthName "Koji Web UI"
+# KrbMethodNegotiate on
+# KrbMethodK5Passwd off
+# KrbServiceName HTTP
+# KrbAuthRealm EXAMPLE.COM
+# Krb5Keytab /etc/httpd.keytab
+# KrbSaveCredentials off
+# Require valid-user
+# ErrorDocument 401 /koji-static/errors/unauthorized.html
+# </Location>
+
+# uncomment this to enable authentication via SSL client certificates
+# <Location /koji/login>
+# SSLVerifyClient require
+# SSLVerifyDepth 10
+# SSLOptions +StdEnvVars
+# </Location>
+
+Alias /koji-static/ "/usr/share/koji-web/static/"
+
+<Directory "/usr/share/koji-web/static/">
+ Options None
+ AllowOverride None
+ Require all granted
+ #If you have httpd <= 2.2, you'll want the following two lines instead
+ #of the one above:
+ #Order allow,deny
+ #Allow from all
+</Directory>
diff --git a/www/conf/web.conf b/www/conf/web.conf
new file mode 100644
index 0000000..38f0b61
--- /dev/null
+++ b/www/conf/web.conf
@@ -0,0 +1,31 @@
+[web]
+SiteName = koji
+#KojiTheme = mytheme
+
+# Key urls
+KojiHubURL = http://hub.example.com/kojihub
+KojiFilesURL = http://server.example.com/kojifiles
+
+# Kerberos authentication options
+# WebPrincipal = koji/web at EXAMPLE.COM
+# WebKeytab = /etc/httpd.keytab
+# WebCCache = /var/tmp/kojiweb.ccache
+# The service name of the principal being used by the hub
+# KrbService = host
+
+# SSL authentication options
+# WebCert = /etc/kojiweb/kojiweb.crt
+# ClientCA = /etc/kojiweb/clientca.crt
+# KojiHubCA = /etc/kojiweb/kojihubca.crt
+
+LoginTimeout = 72
+
+# This must be changed and uncommented before deployment
+# Secret = CHANGE_ME
+
+LibPath = /usr/share/koji-web/lib
+
+# If set to True, then the footer will be included literally.
+# If False, then the footer will be included as another Kid Template.
+# Defaults to True
+LiteralFooter = True
diff --git a/www/docs/negotiate/index.html b/www/docs/negotiate/index.html
new file mode 100644
index 0000000..ed6122e
--- /dev/null
+++ b/www/docs/negotiate/index.html
@@ -0,0 +1,78 @@
+<html>
+ <head>
+ <title>Configuring Firefox (and Mozilla) for Negotiate Authentication</title>
+ </head>
+ <body>
+ <h3>Configuring Firefox (and Mozilla) for Negotiate Authentication</h3>
+ <p>
+ Before Firefox and Mozilla can authenticate to a server using "Negotiate"
+ authentication, a couple of configuration changes must be made.
+ </p>
+ <p>
+ Type <strong>about:config</strong> into the location bar, to bring
+ up the configuration page. Type <strong>negotiate</strong> into the <em>Filter:</em> box, to restrict
+ the listing to the configuration options we're interested in.
+ <br/>
+ Change <strong>network.negotiate-auth.trusted-uris</strong> to the domain you want to authenticate against,
+ e.g. <code>.example.com</code>. You can leave <strong>network.negotiate-auth.delegation-uris</strong>
+ blank, as it enables Kerberos ticket passing, which is not required. If you do not see those two config
+ options listed, your version of Firefox or Mozilla may be too old to support Negotiate authentication, and
+ you should consider upgrading.
+ <br/>
+ <strong>FC5 Update:</strong> Firefox and Mozilla on FC5 are attempting to load a library by its unversioned name, which is
+ not installed by default. A fix has been checked-in upstream, but in the meantime, the workaround is to set
+ <strong>network.negotiate-auth.gsslib</strong> to <code>libgssapi_krb5.so.2</code>.
+ <br/>
+ <strong>FC5 Update Update:</strong> If you are using the most recent Firefox or Mozilla, this workaround is
+ no longer necessary.
+ </p>
+ <p>
+ Now, make sure you have Kerberos tickets. Typing <em>kinit</em> in a shell should allow you to
+ retrieve Kerberos tickets. <em>klist</em> will show you what tickets you have.
+ <br/>
+ </p>
+ <p>
+ Now, if you visit a Kerberos-authenticated website in the .example.com domain, you should be logged in
+ automatically, without having to type in your password.
+ </p>
+ <p>
+ <h4>Troubleshooting</h4>
+ If you have followed the configuration steps and Negotiate authentication is not working, you can
+ turn on verbose logging of the authentication process, and potentially find the cause of the problem.
+ Exit Firefox or Mozilla. In a shell, type the following commands:
+ <pre>
+export NSPR_LOG_MODULES=negotiateauth:5
+export NSPR_LOG_FILE=/tmp/moz.log
+ </pre>
+ Then restart Firefox or Mozilla from that shell, and visit the website you were unable to authenticate
+ to earlier. Information will be logged to <em>/tmp/moz.log</em>, which may give a clue to the problem.
+ For example:
+ <pre>
+-1208550944[90039d0]: entering nsNegotiateAuth::GetNextToken()
+-1208550944[90039d0]: gss_init_sec_context() failed: Miscellaneous failure
+No credentials cache found
+
+ </pre>
+ means that you do not have Kerberos tickets, and need to run <em>kinit</em>.
+ <br/>
+ <br/>
+ If you are able to <em>kinit</em> successfully from your machine but you are unable to authenticate, and you see
+ something like this in your log:
+ <pre>
+-1208994096[8d683d8]: entering nsAuthGSSAPI::GetNextToken()
+-1208994096[8d683d8]: gss_init_sec_context() failed: Miscellaneous failure
+Server not found in Kerberos database
+ </pre>
+ it generally indicates a Kerberos configuration problem. Make sure you have the following in the
+ <code>[domain_realm]</code> section of <em>/etc/krb5.conf</em>:
+ <pre>
+ .example.com = EXAMPLE.COM
+ example.com = EXAMPLE.COM
+ </pre>
+ If nothing is showing up in the log it's possible that you're behind a proxy, and that proxy is stripping off
+ the HTTP headers required for Negotiate authentication. As a workaround, you can try to connect to the
+ server via <code>https</code> instead, which will allow the request to pass through unmodified. Then proceed to
+ debug using the log, as described above.
+ </p>
+ </body>
+</html>
diff --git a/www/kojiweb/Makefile b/www/kojiweb/Makefile
new file mode 100644
index 0000000..4feda33
--- /dev/null
+++ b/www/kojiweb/Makefile
@@ -0,0 +1,24 @@
+SUBDIRS = includes
+
+SERVERDIR = /usr/share/koji-web/scripts
+FILES = $(wildcard *.py *.chtml)
+
+_default:
+ @echo "nothing to make. try make install"
+
+clean:
+ rm -f *.o *.so *.pyc *~
+ for d in $(SUBDIRS); do make -s -C $$d clean; done
+
+install:
+ @if [ "$(DESTDIR)" = "" ]; then \
+ echo " "; \
+ echo "ERROR: A destdir is required"; \
+ exit 1; \
+ fi
+
+ mkdir -p $(DESTDIR)/$(SERVERDIR)
+ install -p -m 644 $(FILES) $(DESTDIR)/$(SERVERDIR)
+
+ for d in $(SUBDIRS); do make DESTDIR=$(DESTDIR)/$(SERVERDIR) \
+ -C $$d install; [ $$? = 0 ] || exit 1; done
diff --git a/www/kojiweb/archiveinfo.chtml b/www/kojiweb/archiveinfo.chtml
new file mode 100644
index 0000000..ffda9cf
--- /dev/null
+++ b/www/kojiweb/archiveinfo.chtml
@@ -0,0 +1,147 @@
+#import koji
+#from kojiweb import util
+#import urllib
+
+#attr _PASSTHROUGH = ['archiveID', 'fileOrder', 'fileStart', 'buildrootOrder', 'buildrootStart']
+
+#include "includes/header.chtml"
+ <h4>Information for archive <a href="archiveinfo?archiveID=$archive.id">$archive.filename</a></h4>
+
+ <table>
+ <tr>
+ <th>ID</th><td>$archive.id</td>
+ </tr>
+ <tr>
+ #if $wininfo
+ <th>File Name</th><td>$koji.pathinfo.winfile($archive)</td>
+ #else
+ <th>File Name</th><td>$archive.filename</td>
+ #end if
+ </tr>
+ <tr>
+ <th>File Type</th><td>$archive_type.description</td>
+ </tr>
+ <tr>
+ <th>Build</th><td><a href="buildinfo?buildID=$build.id">$koji.buildLabel($build)</a></td>
+ </tr>
+ #if $maveninfo
+ <tr>
+ <th>Maven groupId</th><td>$archive.group_id</td>
+ </tr>
+ <tr>
+ <th>Maven artifactId</th><td>$archive.artifact_id</td>
+ </tr>
+ <tr>
+ <th>Maven version</th><td>$archive.version</td>
+ </tr>
+ #end if
+ <tr>
+ <th>Size</th><td>$archive.size</td>
+ </tr>
+ <tr>
+ <th>Checksum</th><td>$archive.checksum</td>
+ </tr>
+ #if $wininfo
+ <tr>
+ <th>Platforms</th><td>$archive.platforms</td>
+ </tr>
+ <tr>
+ <th>Flags</th><td>$archive.flags</td>
+ </tr>
+ #end if
+ #if $builtInRoot
+ <tr>
+ <th>Buildroot</th><td><a href="buildrootinfo?buildrootID=$builtInRoot.id">$builtInRoot.tag_name-$builtInRoot.id-$builtInRoot.repo_id</a></td>
+ </tr>
+ #end if
+ #if $files
+ <tr>
+ <th id="filelist">Files</th>
+ <td class="container">
+ <table class="nested data-list">
+ <tr>
+ <td class="paginate" colspan="2">
+ #if $len($filePages) > 1
+ <form class="pageJump">
+ Page:
+ <select onchange="javascript: window.location = 'archiveinfo?fileStart=' + this.value * $fileRange + '$util.passthrough_except($self, 'fileStart')#filelist';">
+ #for $pageNum in $filePages
+ <option value="$pageNum"#if $pageNum == $fileCurrentPage then ' selected' else ''#>#echo $pageNum + 1#</option>
+ #end for
+ </select>
+ </form>
+ #end if
+ #if $fileStart > 0
+ <a href="archiveinfo?fileStart=#echo $fileStart - $fileRange#$util.passthrough_except($self, 'fileStart')#filelist"><<<</a>
+ #end if
+ <strong>#echo $fileStart + 1 # through #echo $fileStart + $fileCount # of $totalFiles</strong>
+ #if $fileStart + $fileCount < $totalFiles
+ <a href="archiveinfo?fileStart=#echo $fileStart + $fileRange#$util.passthrough_except($self, 'fileStart')#filelist">>>></a>
+ #end if
+ </td>
+ </tr>
+ <tr class="list-header">
+ <th><a href="archiveinfo?fileOrder=$util.toggleOrder($self, 'name', 'fileOrder')$util.passthrough_except($self, 'fileOrder', 'fileStart')#filelist">Name</a> $util.sortImage($self, 'name', 'fileOrder')</th>
+ <th><a href="archiveinfo?fileOrder=$util.toggleOrder($self, 'size', 'fileOrder')$util.passthrough_except($self, 'fileOrder', 'fileStart')#filelist">Size</a> $util.sortImage($self, 'size', 'fileOrder')</th>
+ </tr>
+ #for $file in $files
+ <tr class="$util.rowToggle($self)">
+ <td><a href="fileinfo?archiveID=$archive.id&filename=$urllib.quote($file.name)">$file.name</a></td><td>$file.size</td>
+ </tr>
+ #end for
+ </table>
+ </td>
+ </tr>
+ #end if
+ <tr>
+ <th id="buildrootlist">Component of</th>
+ <td class="container">
+ #if $len($buildroots) > 0
+ <table class="nested data-list">
+ <tr>
+ <td class="paginate" colspan="3">
+ #if $len($buildrootPages) > 1
+ <form class="pageJump" action="">
+ Page:
+ <select onchange="javascript: window.location = 'archiveinfo?buildrootStart=' + this.value * $buildrootRange + '$util.passthrough_except($self, 'buildrootStart')#buildrootlist';">
+ #for $pageNum in $buildrootPages
+ <option value="$pageNum"#if $pageNum == $buildrootCurrentPage then ' selected="selected"' else ''#>#echo $pageNum + 1#</option>
+ #end for
+ </select>
+ </form>
+ #end if
+ #if $buildrootStart > 0
+ <a href="archiveinfo?buildrootStart=#echo $buildrootStart - $buildrootRange #$util.passthrough_except($self, 'buildrootStart')#buildrootlist"><<<</a>
+ #end if
+ <strong>#echo $buildrootStart + 1 # through #echo $buildrootStart + $buildrootCount # of $totalBuildroots</strong>
+ #if $buildrootStart + $buildrootCount < $totalBuildroots
+ <a href="archiveinfo?buildrootStart=#echo $buildrootStart + $buildrootRange#$util.passthrough_except($self, 'buildrootStart')#buildrootlist">>>></a>
+ #end if
+ </td>
+ </tr>
+ <tr class="list-header">
+ <th><a href="archiveinfo?buildrootOrder=$util.toggleOrder($self, 'id', 'buildrootOrder')$util.passthrough_except($self, 'buildrootOrder', 'buildrootStart')#buildrootlist">Buildroot</a> $util.sortImage($self, 'id', 'buildrootOrder')</th>
+ <th><a href="archiveinfo?buildrootOrder=$util.toggleOrder($self, 'create_event_time', 'buildrootOrder')$util.passthrough_except($self, 'buildrootOrder', 'buildrootStart')#buildrootlist">Created</a> $util.sortImage($self, 'create_event_time', 'buildrootOrder')</th>
+ <th><a href="archiveinfo?buildrootOrder=$util.toggleOrder($self, 'state', 'buildrootOrder')$util.passthrough_except($self, 'buildrootOrder', 'buildrootStart')#buildrootlist">State</a> $util.sortImage($self, 'state', 'buildrootOrder')</th>
+ </tr>
+ #for $buildroot in $buildroots
+ <tr class="$util.rowToggle($self)">
+ <td><a href="buildrootinfo?buildrootID=$buildroot.id">$buildroot.tag_name-$buildroot.id-$buildroot.repo_id</a></td>
+ <td>$util.formatTime($buildroot.create_event_time)</td>
+ <td>$util.imageTag($util.brStateName($buildroot.state))</td>
+ </tr>
+ #end for
+ </table>
+ #else
+ No buildroots
+ #end if
+ </td>
+ </tr>
+ #if 'rootid' in $archive and $archive.rootid
+ <tr>
+ <th colspan="2"><a href="rpmlist?imageID=$archive.id&type=image" title="RPMs that where installed to the image">Installed RPMs</a></th>
+ </tr>
+ #end if
+ </table>
+
+#include "includes/footer.chtml"
diff --git a/www/kojiweb/archivelist.chtml b/www/kojiweb/archivelist.chtml
new file mode 100644
index 0000000..4696e4e
--- /dev/null
+++ b/www/kojiweb/archivelist.chtml
@@ -0,0 +1,83 @@
+#from kojiweb import util
+
+#include "includes/header.chtml"
+
+ #if $type == 'component'
+ <h4>Component Archives of buildroot <a href="buildrootinfo?buildrootID=$buildroot.id">$buildroot.tag_name-$buildroot.id-$buildroot.repo_id</a></h4>
+ #else
+ <h4>Archives built in buildroot <a href="buildrootinfo?buildrootID=$buildroot.id">$buildroot.tag_name-$buildroot.id-$buildroot.repo_id</a></h4>
+ #end if
+
+ <table class="data-list">
+ <tr>
+ <td class="paginate" colspan="#if $type == 'component' then '3' else '2'#">
+ #if $len($archivePages) > 1
+ <form class="pageJump">
+ Page:
+ <select onchange="javascript: window.location = 'archivelist?buildrootID=$buildroot.id&start=' + this.value * $archiveRange + '$util.passthrough($self, 'order', 'type')';">
+ #for $pageNum in $archivePages
+ <option value="$pageNum"#if $pageNum == $archiveCurrentPage then ' selected' else ''#>#echo $pageNum + 1#</option>
+ #end for
+ </select>
+ </form>
+ #end if
+ #if $archiveStart > 0
+ <a href="archivelist?buildrootID=$buildroot.id&start=#echo $archiveStart - $archiveRange #$util.passthrough($self, 'order', 'type')"><<<</a>
+ #end if
+ #if $totalArchives != 0
+ <strong>Archives #echo $archiveStart + 1 # through #echo $archiveStart + $archiveCount # of $totalArchives</strong>
+ #end if
+ #if $archiveStart + $archiveCount < $totalArchives
+ <a href="archivelist?buildrootID=$buildroot.id&start=#echo $archiveStart + $archiveRange#$util.passthrough($self, 'order', 'type')">>>></a>
+ #end if
+ </td>
+ </tr>
+ <tr class="list-header">
+ <th><a href="archivelist?buildrootID=$buildroot.id&order=$util.toggleOrder($self, 'filename')$util.passthrough($self, 'type')">Filename</a> $util.sortImage($self, 'filename')</th>
+ <th><a href="archivelist?buildrootID=$buildroot.id&order=$util.toggleOrder($self, 'type_name')$util.passthrough($self, 'type')">Type</a> $util.sortImage($self, 'type_name')</th>
+ #if $type == 'component'
+ <th><a href="archivelist?buildrootID=$buildroot.id&order=$util.toggleOrder($self, 'project')$util.passthrough($self, 'type')">Build Dependency?</a> $util.sortImage($self, 'project')</th>
+ #end if
+ </tr>
+ #if $len($archives) > 0
+ #for $archive in $archives
+ <tr class="$util.rowToggle($self)">
+ <td><a href="archiveinfo?archiveID=$archive.id">$archive.filename</a></td>
+ <td>$archive.type_name</td>
+ #if $type == 'component'
+ #set $project = $archive.project and 'yes' or 'no'
+ <td class="$project">$util.imageTag($project)</td>
+ #end if
+ </tr>
+ #end for
+ #else
+ <tr class="row-odd">
+ <td colspan="#if $type == 'component' then '3' else '2'#">No Archives</td>
+ </tr>
+ #end if
+ <tr>
+ <td class="paginate" colspan="#if $type == 'component' then '3' else '2'#">
+ #if $len($archivePages) > 1
+ <form class="pageJump">
+ Page:
+ <select onchange="javascript: window.location = 'archivelist?buildrootID=$buildroot.id&start=' + this.value * $archiveRange + '$util.passthrough($self, 'order', 'type')';">
+ #for $pageNum in $archivePages
+ <option value="$pageNum"#if $pageNum == $archiveCurrentPage then ' selected' else ''#>#echo $pageNum + 1#</option>
+ #end for
+ </select>
+ </form>
+ #end if
+ #if $archiveStart > 0
+ <a href="archivelist?buildrootID=$buildroot.id&start=#echo $archiveStart - $archiveRange #$util.passthrough($self, 'order', 'type')"><<<</a>
+ #end if
+ #if $totalArchives != 0
+ <strong>Archives #echo $archiveStart + 1 # through #echo $archiveStart + $archiveCount # of $totalArchives</strong>
+ #end if
+ #if $archiveStart + $archiveCount < $totalArchives
+ <a href="archivelist?buildrootID=$buildroot.id&start=#echo $archiveStart + $archiveRange#$util.passthrough($self, 'order', 'type')">>>></a>
+ #end if
+ </td>
+ </tr>
+ </table>
+
+#include "includes/footer.chtml"
diff --git a/www/kojiweb/buildinfo.chtml b/www/kojiweb/buildinfo.chtml
new file mode 100644
index 0000000..07c62fd
--- /dev/null
+++ b/www/kojiweb/buildinfo.chtml
@@ -0,0 +1,195 @@
+#import koji
+#import koji.util
+#from kojiweb import util
+
+#include "includes/header.chtml"
+#set $nvrpath = $pathinfo.build($build)
+
+ <h4>Information for build <a href="buildinfo?buildID=$build.id">$koji.buildLabel($build)</a></h4>
+
+ <table>
+ <tr>
+ <th>ID</th><td>$build.id</td>
+ </tr>
+ <tr>
+ <th>Package Name</th><td><a href="packageinfo?packageID=$build.package_id">$build.package_name</a></td>
+ </tr>
+ <tr>
+ <th>Version</th><td>$build.version</td>
+ </tr>
+ <tr>
+ <th>Release</th><td>$build.release</td>
+ </tr>
+ <tr>
+ <th>Epoch</th><td>$build.epoch</td>
+ </tr>
+ #if $mavenbuild
+ <tr>
+ <th>Maven groupId</th><td>$mavenbuild.group_id</td>
+ </tr>
+ <tr>
+ <th>Maven artifactId</th><td>$mavenbuild.artifact_id</td>
+ </tr>
+ <tr>
+ <th>Maven version</th><td>$mavenbuild.version</td>
+ </tr>
+ #end if
+ #if $summary
+ <tr>
+ <th>Summary</th><td class="rpmheader">$util.escapeHTML($summary)</td>
+ </tr>
+ #end if
+ #if $description
+ <tr>
+ <th>Description</th><td class="rpmheader">$util.escapeHTML($description)</td>
+ </tr>
+ #end if
+ <tr>
+ <th>Built by</th><td class="user-$build.owner_name"><a href="userinfo?userID=$build.owner_id">$build.owner_name</a></td>
+ </tr>
+ <tr>
+ #set $stateName = $util.stateName($build.state)
+ <th>State</th>
+ <td class="$stateName">$stateName
+ #if $build.state == $koji.BUILD_STATES.BUILDING
+ #if $currentUser and ('admin' in $perms or $build.owner_id == $currentUser.id)
+ <span class="adminLink">(<a href="cancelbuild?buildID=$build.id$util.authToken($self)">cancel</a>)</span>
+ #end if
+ #end if
+ </td>
+ </tr>
+ <tr>
+ <th>Started</th><td>$util.formatTimeLong($start_time)</td>
+ </tr>
+ #if $build.state == $koji.BUILD_STATES.BUILDING
+ #if $estCompletion
+ <tr>
+ <th>Est. Completion</th><td>$util.formatTimeLong($estCompletion)</td>
+ </tr>
+ #end if
+ #else
+ <tr>
+ <th>Completed</th><td>$util.formatTimeLong($build.completion_time)</td>
+ </tr>
+ #end if
+ #if $task
+ <tr>
+ <th>Task</th><td><a href="taskinfo?taskID=$task.id" class="task$util.taskState($task.state)">$koji.taskLabel($task)</a></td>
+ </tr>
+ #end if
+ <tr>
+ <th>Tags</th>
+ <td class="container">
+ #if $len($tags) > 0
+ <table class="nested">
+ #for $tag in $tags
+ <tr>
+ <td><a href="taginfo?tagID=$tag.id">$tag.name</a></td>
+ </tr>
+ #end for
+ </table>
+ #else
+ No tags
+ #end if
+ </td>
+ </tr>
+ <tr>
+ <th>RPMs</th>
+ <td class="container">
+ #if $len($rpmsByArch) > 0
+ <table class="nested">
+ #if $rpmsByArch.has_key('src')
+ <tr><th>src</th><th></th></tr>
+ #for $rpm in $rpmsByArch['src']
+ #set $rpmfile = '%(name)s-%(version)s-%(release)s.%(arch)s.rpm' % $rpm
+ #set $rpmpath = $pathinfo.rpm($rpm)
+ <tr>
+ <td></td>
+ <td>$rpmfile (<a href="rpminfo?rpmID=$rpm.id">info</a>) (<a href="$nvrpath/$rpmpath">download</a>)</td>
+ </tr>
+ #end for
+ #end if
+ #set $arches = $rpmsByArch.keys()
+ #silent $arches.sort()
+ #for $arch in $arches
+ #if $arch == 'src'
+ #silent continue
+ #end if
+ <tr>
+ <th>$arch</th>
+ <td>
+ #if $task
+ #if $arch == 'noarch'
+ (<a href="$nvrpath/data/logs/$noarch_log_dest/">build logs</a>)
+ #else
+ (<a href="$nvrpath/data/logs/$arch/">build logs</a>)
+ #end if
+ #end if
+ </td>
+ </tr>
+ #for $rpm in $rpmsByArch[$arch] + $debuginfoByArch.get($arch, [])
+ <tr>
+ #set $rpmfile = '%(name)s-%(version)s-%(release)s.%(arch)s.rpm' % $rpm
+ #set $rpmpath = $pathinfo.rpm($rpm)
+ <td></td>
+ <td>
+ $rpmfile (<a href="rpminfo?rpmID=$rpm.id">info</a>) (<a href="$nvrpath/$rpmpath">download</a>)
+ </td>
+ </tr>
+ #end for
+ #end for
+ </table>
+ #else
+ No RPMs
+ #end if
+ </td>
+ </tr>
+ #if $archives
+ <tr>
+ <th>Archives</th>
+ <td class="container">
+ <table class="nested">
+ #set $exts = $archivesByExt.keys()
+ #for ext in $exts
+ <tr>
+ <th>$ext</th>
+ <td>
+ #if $task and $ext == $exts[0]
+ #if $mavenbuild
+ (<a href="$nvrpath/data/logs/maven/">build logs</a>)
+ #elif $winbuild
+ (<a href="$nvrpath/data/logs/win/">build logs</a>)
+ #elif $imagebuild
+ (<a href="$nvrpath/data/logs/image">build logs</a>)
+ #end if
+ #end if
+ </td>
+ </tr>
+ #for $archive in $archivesByExt[$ext]
+ <tr>
+ <td/>
+ <td>
+ #if $mavenbuild
+ $archive.filename (<a href="archiveinfo?archiveID=$archive.id">info</a>) (<a href="$pathinfo.mavenbuild($build)/$pathinfo.mavenfile($archive)">download</a>)
+ #elif $winbuild
+ $pathinfo.winfile($archive) (<a href="archiveinfo?archiveID=$archive.id">info</a>) (<a href="$pathinfo.winbuild($build)/$pathinfo.winfile($archive)">download</a>)
+ #elif $imagebuild
+ $archive.filename (<a href="archiveinfo?archiveID=$archive.id">info</a>) (<a href="$pathinfo.imagebuild($build)/$archive.filename">download</a>)
+ #end if
+ </td>
+ </tr>
+ #end for
+ #end for
+ </table>
+ </td>
+ </tr>
+ #end if
+ #if $changelog
+ <tr>
+ <th>Changelog</th>
+ <td class="changelog">$util.escapeHTML($koji.util.formatChangelog($changelog))</td>
+ </tr>
+ #end if
+ </table>
+
+#include "includes/footer.chtml"
diff --git a/www/kojiweb/buildrootinfo.chtml b/www/kojiweb/buildrootinfo.chtml
new file mode 100644
index 0000000..987e787
--- /dev/null
+++ b/www/kojiweb/buildrootinfo.chtml
@@ -0,0 +1,56 @@
+#import koji
+#from kojiweb import util
+
+#include "includes/header.chtml"
+
+ <h4>Information for buildroot <a href="buildrootinfo?buildrootID=$buildroot.id">$buildroot.tag_name-$buildroot.id-$buildroot.repo_id</a></h4>
+
+ <table>
+ <tr>
+ <th>Host</th><td><a href="hostinfo?hostID=$buildroot.host_id">$buildroot.host_name</a></td>
+ </tr>
+ <tr>
+ <th>Arch</th><td>$buildroot.arch</td>
+ </tr>
+ <tr>
+ <th>ID</th><td>$buildroot.id</td>
+ </tr>
+ <tr>
+ <th>Task</th><td><a href="taskinfo?taskID=$task.id" class="task$util.taskState($task.state)">$koji.taskLabel($task)</a></td>
+ </tr>
+ <tr>
+ <th>State</th><td>$util.imageTag($util.brStateName($buildroot.state))</td>
+ </tr>
+ <tr>
+ <th>Created</th><td>$util.formatTimeLong($buildroot.create_event_time)</td>
+ </tr>
+ <tr>
+ <th>Retired</th><td>$util.formatTimeLong($buildroot.retire_event_time)</td>
+ </tr>
+ <tr>
+ <th>Repo ID</th><td>$buildroot.repo_id</td>
+ </tr>
+ <tr>
+ <th>Repo Tag</th><td><a href="taginfo?tagID=$buildroot.tag_id">$buildroot.tag_name</a></td>
+ </tr>
+ <tr>
+ <th>Repo State</th><td>$util.imageTag($util.repoStateName($buildroot.repo_state))</td>
+ </tr>
+ <tr>
+ <th>Repo Created</th><td>$util.formatTimeLong($buildroot.repo_create_event_time)</td>
+ </tr>
+ <tr>
+ <th colspan="2"><a href="rpmlist?buildrootID=$buildroot.id&type=component" title="RPMs that are installed into this buildroot when building packages">Component RPMs</a></th>
+ </tr>
+ <tr>
+ <th colspan="2"><a href="rpmlist?buildrootID=$buildroot.id&type=built" title="RPMs that have been built in this buildroot">Built RPMs</a></th>
+ </tr>
+ <tr>
+ <th colspan="2"><a href="archivelist?buildrootID=$buildroot.id&type=component" title="Archives that are installed into this buildroot when building packages">Component Archives</a></th>
+ </tr>
+ <tr>
+ <th colspan="2"><a href="archivelist?buildrootID=$buildroot.id&type=built" title="Archives that have been built in this buildroot">Built Archives</a></th>
+ </tr>
+ </table>
+
+#include "includes/footer.chtml"
diff --git a/www/kojiweb/builds.chtml b/www/kojiweb/builds.chtml
new file mode 100644
index 0000000..337c466
--- /dev/null
+++ b/www/kojiweb/builds.chtml
@@ -0,0 +1,173 @@
+#import koji
+#from kojiweb import util
+
+#attr _PASSTHROUGH = ['userID', 'tagID', 'packageID', 'order', 'prefix', 'state', 'inherited', 'latest', 'type']
+
+#include "includes/header.chtml"
+
+ <h4>#if $latest then 'Latest ' else ''##if $state != None then $util.stateName($state).capitalize() + ' ' else ''##if $type == 'maven' then 'Maven ' else ''#Builds#if $package then ' of <a href="packageinfo?packageID=%i">%s</a>' % ($package.id, $package.name) else ''##if $prefix then ' starting with "%s"' % $prefix else ''##if $user then ' by <a href="userinfo?userID=%i">%s</a>' % ($user.id, $user.name) else ''##if $tag then ' in tag <a href="taginfo?tagID=%i">%s</a>' % ($tag.id, $tag. [...]
+
+ <table class="data-list">
+ <tr>
+ <td colspan="#if $tag then '6' else '5'#">
+ <table class="nested">
+ <tr><td>
+ #if $tag
+ <strong>Latest</strong>:
+ </td><td>
+ <select name="latest" class="filterlist" onchange="javascript: window.location = 'builds?latest=' + this.value + '$util.passthrough_except($self, 'latest')';">
+ <option value="1" #if $latest then 'selected="selected"' else ''#>yes</option>
+ <option value="0" #if not $latest then 'selected="selected"' else ''#>no</option>
+ </select>
+ #else
+ <strong>State</strong>:
+ </td><td>
+ <select name="state" class="filterlist" onchange="javascript: window.location = 'builds?state=' + this.value + '$util.passthrough_except($self, 'state')';">
+ <option value="all">all</option>
+ #for $stateOpt in ['BUILDING', 'COMPLETE', 'FAILED', 'CANCELED']
+ <option value="$koji.BUILD_STATES[$stateOpt]" #if $state == $koji.BUILD_STATES[$stateOpt] then 'selected="selected"' else ''#>$stateOpt.lower()</option>
+ #end for
+ </select>
+ #end if
+ </td><td>
+ <strong>Built by</strong>:
+ </td><td>
+ <select name="userID" class="filterlist" onchange="javascript: window.location = 'builds?userID=' + this.value + '$util.passthrough_except($self, 'userID')';">
+ <option value="" #if not $user then 'selected="selected"' else ''#>everyone</option>
+ #if $loggedInUser
+ <option value="$loggedInUser.name">me</option>
+ #end if
+ #for $userOption in $users
+ <option value="$userOption.name" #if $userOption.name == ($user and $user.name or None) then 'selected="selected"' else ''#>$userOption.name</option>
+ #end for
+ </select>
+ </td></tr>
+ #if $tag or $mavenEnabled or $winEnabled
+ <tr>
+ #if $mavenEnabled or $winEnabled
+ <td>
+ <strong>Type</strong>:
+ </td>
+ <td>
+ <select name="type" class="filterlist" onchange="javascript: window.location='builds?type=' + this.value + '$util.passthrough_except($self, 'type')';">
+ <option value="all" #if not $type then 'selected="selected"' else ''#>all</option>
+ #if $mavenEnabled
+ <option value="maven" #if $type == 'maven' then 'selected="selected"' else ''#>Maven</option>
+ #end if
+ #if $winEnabled
+ <option value="win" #if $type == 'win' then 'selected="selected"' else ''#>Windows</option>
+ #end if
+ <option value="image" #if $type == 'image' then 'selected="selected"' else ''#>Image</option>
+ </select>
+ </td>
+ #end if
+ #if $tag
+ <td>
+ <strong>Inherited</strong>:
+ </td><td>
+ <select name="inherited" class="filterlist" onchange="javascript: window.location = 'builds?inherited=' + this.value + '$util.passthrough_except($self, 'inherited')';">
+ <option value="1" #if $inherited then 'selected="selected"' else ''#>yes</option>
+ <option value="0" #if not $inherited then 'selected="selected"' else ''#>no</option>
+ </select>
+ </td>
+ #end if
+ </tr>
+ #end if
+ </table>
+ </td>
+ </tr>
+ <tr>
+ <td class="charlist" colspan="#if $tag then '6' else '5'#">
+ #for $char in $chars
+ #if $prefix == $char
+ <strong>$char</strong>
+ #else
+ <a href="builds?prefix=$char$util.passthrough_except($self, 'prefix')">$char</a>
+ #end if
+ |
+ #end for
+ #if $prefix
+ <a href="builds?${util.passthrough_except($self, 'prefix')[1:]}">all</a>
+ #else
+ <strong>all</strong>
+ #end if
+ </td>
+ </tr>
+ <tr>
+ <td class="paginate" colspan="#if $tag then '6' else '5'#">
+ #if $len($buildPages) > 1
+ <form class="pageJump" action="">
+ Page:
+ <select onchange="javascript: window.location = 'builds?start=' + this.value * $buildRange + '$util.passthrough_except($self)';">
+ #for $pageNum in $buildPages
+ <option value="$pageNum"#if $pageNum == $buildCurrentPage then ' selected="selected"' else ''#>#echo $pageNum + 1#</option>
+ #end for
+ </select>
+ </form>
+ #end if
+ #if $buildStart > 0
+ <a href="builds?start=#echo $buildStart - $buildRange #$util.passthrough_except($self)"><<<</a>
+ #end if
+ #if $totalBuilds != 0
+ <strong>Builds #echo $buildStart + 1 # through #echo $buildStart + $buildCount # of $totalBuilds</strong>
+ #end if
+ #if $buildStart + $buildCount < $totalBuilds
+ <a href="builds?start=#echo $buildStart + $buildRange#$util.passthrough_except($self)">>>></a>
+ #end if
+ </td>
+ </tr>
+ <tr class="list-header">
+ <th><a href="builds?order=$util.toggleOrder($self, 'build_id')$util.passthrough_except($self, 'order')">ID</a> $util.sortImage($self, 'build_id')</th>
+ <th><a href="builds?order=$util.toggleOrder($self, 'nvr')$util.passthrough_except($self, 'order')">NVR</a> $util.sortImage($self, 'nvr')</th>
+ #if $tag
+ <th><a href="builds?order=$util.toggleOrder($self, 'tag_name')$util.passthrough_except($self, 'order')">Tag</a> $util.sortImage($self, 'tag_name')</th>
+ #end if
+ <th><a href="builds?order=$util.toggleOrder($self, 'owner_name')$util.passthrough_except($self, 'order')">Built by</a> $util.sortImage($self, 'owner_name')</th>
+ <th><a href="builds?order=$util.toggleOrder($self, 'completion_time')$util.passthrough_except($self, 'order')">Finished</a> $util.sortImage($self, 'completion_time')</th>
+ <th><a href="builds?order=$util.toggleOrder($self, 'state')$util.passthrough_except($self, 'order')">State</a> $util.sortImage($self, 'state')</th>
+ </tr>
+ #if $len($builds) > 0
+ #for $build in $builds
+ <tr class="$util.rowToggle($self)">
+ <td>$build.build_id</td>
+ <td><a href="buildinfo?buildID=$build.build_id">$koji.buildLabel($build)</a></td>
+ #if $tag
+ <td><a href="taginfo?tagID=$build.tag_id">$build.tag_name</a></td>
+ #end if
+ <td class="user-$build.owner_name"><a href="userinfo?userID=$build.owner_id">$build.owner_name</a></td>
+ <td>$util.formatTime($build.completion_time)</td>
+ #set $stateName = $util.stateName($build.state)
+ <td class="$stateName">$util.stateImage($build.state)</td>
+ </tr>
+ #end for
+ #else
+ <tr class="row-odd">
+ <td colspan="#if $tag then '6' else '5'#">No builds</td>
+ </tr>
+ #end if
+ <tr>
+ <td class="paginate" colspan="#if $tag then '6' else '5'#">
+ #if $len($buildPages) > 1
+ <form class="pageJump" action="">
+ Page:
+ <select onchange="javascript: window.location = 'builds?start=' + this.value * $buildRange + '$util.passthrough_except($self)';">
+ #for $pageNum in $buildPages
+ <option value="$pageNum"#if $pageNum == $buildCurrentPage then ' selected="selected"' else ''#>#echo $pageNum + 1#</option>
+ #end for
+ </select>
+ </form>
+ #end if
+ #if $buildStart > 0
+ <a href="builds?start=#echo $buildStart - $buildRange #$util.passthrough_except($self)"><<<</a>
+ #end if
+ #if $totalBuilds != 0
+ <strong>Builds #echo $buildStart + 1 # through #echo $buildStart + $buildCount # of $totalBuilds</strong>
+ #end if
+ #if $buildStart + $buildCount < $totalBuilds
+ <a href="builds?start=#echo $buildStart + $buildRange#$util.passthrough_except($self)">>>></a>
+ #end if
+ </td>
+ </tr>
+ </table>
+
+#include "includes/footer.chtml"
diff --git a/www/kojiweb/buildsbystatus.chtml b/www/kojiweb/buildsbystatus.chtml
new file mode 100644
index 0000000..be7b6cb
--- /dev/null
+++ b/www/kojiweb/buildsbystatus.chtml
@@ -0,0 +1,55 @@
+#from kojiweb import util
+
+#def printOption(value, label=None)
+#if not $label
+#set $label = $value
+#end if
+<option value="$value"#if $value == $days then ' selected="selected"' else ''#>$label</option>
+#end def
+
+#include "includes/header.chtml"
+
+ <h4>Succeeded/Failed/Canceled Builds#if $days != -1 then ' in the last %i days' % $days else ''#</h4>
+ <table class="data-list">
+ <tr style="text-align: left">
+ <td colspan="3">
+ <form action="">
+ Show last
+ <select onchange="javascript: window.location = 'buildsbystatus?days=' + this.value;">
+ $printOption(1)
+ $printOption(3)
+ $printOption(5)
+ $printOption(7)
+ $printOption(14)
+ $printOption(30)
+ $printOption(60)
+ $printOption(90)
+ $printOption(120)
+ $printOption(-1, 'all')
+ </select> days
+ </form>
+ </td>
+ </tr>
+ <tr class="list-header">
+ <th>Type</th>
+ <th>Builds</th>
+ <th> </th>
+ </tr>
+ <tr class="row-odd taskclosed">
+ <td>Succeeded</td>
+ <td width="#echo $graphWidth + 5#"><img src="$util.themePath('images/1px.gif')" width="#echo $increment * $numSucceeded#" height="15" class="graphrow" alt="graph row"/></td>
+ <td>$numSucceeded</td>
+ </tr>
+ <tr class="row-even taskfailed">
+ <td>Failed</td>
+ <td width="#echo $graphWidth + 5#"><img src="$util.themePath('images/1px.gif')" width="#echo $increment * $numFailed#" height="15" class="graphrow" alt="graph row"/></td>
+ <td>$numFailed</td>
+ </tr>
+ <tr class="row-odd taskcanceled">
+ <td>Canceled</td>
+ <td width="#echo $graphWidth + 5#"><img src="$util.themePath('images/1px.gif')" width="#echo $increment * $numCanceled#" height="15" class="graphrow" alt="graph row"/></td>
+ <td>$numCanceled</td>
+ </tr>
+ </table>
+
+#include "includes/footer.chtml"
diff --git a/www/kojiweb/buildsbytarget.chtml b/www/kojiweb/buildsbytarget.chtml
new file mode 100644
index 0000000..97a7e7b
--- /dev/null
+++ b/www/kojiweb/buildsbytarget.chtml
@@ -0,0 +1,99 @@
+#from kojiweb import util
+
+#def printOption(value, label=None)
+#if not $label
+#set $label = $value
+#end if
+<option value="$value"#if $value == $days then ' selected="selected"' else ''#>$label</option>
+#end def
+
+#include "includes/header.chtml"
+
+ <h4>Builds by Target#if $days != -1 then ' in the last %i days' % $days else ''#</h4>
+ <table class="data-list">
+ <tr style="text-align: left">
+ <td colspan="3">
+ <form action="">
+ Show last
+ <select onchange="javascript: window.location = 'buildsbytarget?days=' + this.value + '$util.passthrough($self, 'order')';">
+ $printOption(1)
+ $printOption(3)
+ $printOption(5)
+ $printOption(7)
+ $printOption(14)
+ $printOption(30)
+ $printOption(60)
+ $printOption(90)
+ $printOption(120)
+ $printOption(-1, 'all')
+ </select> days
+ </form>
+ </td>
+ </tr>
+ <tr>
+ <td class="paginate" colspan="3">
+ #if $len($targetPages) > 1
+ <form class="pageJump" action="">
+ Page:
+ <select onchange="javascript: window.location = 'buildsbytarget?start=' + this.value * $targetRange + '$util.passthrough($self, 'days', 'order')';">
+ #for $pageNum in $targetPages
+ <option value="$pageNum"#if $pageNum == $targetCurrentPage then ' selected="selected"' else ''#>#echo $pageNum + 1#</option>
+ #end for
+ </select>
+ </form>
+ #end if
+ #if $targetStart > 0
+ <a href="buildsbytarget?start=#echo $targetStart - $targetRange #$util.passthrough($self, 'days', 'order')"><<<</a>
+ #end if
+ #if $totalTargets != 0
+ <strong>Build Targets #echo $targetStart + 1 # through #echo $targetStart + $targetCount # of $totalTargets</strong>
+ #end if
+ #if $targetStart + $targetCount < $totalTargets
+ <a href="buildsbytarget?start=#echo $targetStart + $targetRange#$util.passthrough($self, 'days', 'order')">>>></a>
+ #end if
+ </td>
+ </tr>
+ <tr class="list-header">
+ <th><a href="buildsbytarget?order=$util.toggleOrder($self, 'name')$util.passthrough($self, 'days')">Name</a> $util.sortImage($self, 'name')</th>
+ <th><a href="buildsbytarget?order=$util.toggleOrder($self, 'builds')$util.passthrough($self, 'days')">Builds</a> $util.sortImage($self, 'builds')</th>
+ <th> </th>
+ </tr>
+ #if $len($targets) > 0
+ #for $target in $targets
+ <tr class="$util.rowToggle($self)">
+ <td><a href="buildtargetinfo?name=$target.name">$target.name</a></td>
+ <td width="#echo $graphWidth + 5#"><img src=$util.themePath('images/1px.gif') width="#echo $increment * $target.builds#" height="15" class="graphrow" alt="graph row"/></td>
+ <td>$target.builds</td>
+ </tr>
+ #end for
+ #else
+ <tr class="row-odd">
+ <td colspan="3">No builds</td>
+ </tr>
+ #end if
+ <tr>
+ <td class="paginate" colspan="3">
+ #if $len($targetPages) > 1
+ <form class="pageJump" action="">
+ Page:
+ <select onchange="javascript: window.location = 'buildsbytarget?start=' + this.value * $targetRange + '$util.passthrough($self, 'days', 'order')';">
+ #for $pageNum in $targetPages
+ <option value="$pageNum"#if $pageNum == $targetCurrentPage then ' selected="selected"' else ''#>#echo $pageNum + 1#</option>
+ #end for
+ </select>
+ </form>
+ #end if
+ #if $targetStart > 0
+ <a href="buildsbytarget?start=#echo $targetStart - $targetRange #$util.passthrough($self, 'days', 'order')"><<<</a>
+ #end if
+ #if $totalTargets != 0
+ <strong>Build Targets #echo $targetStart + 1 # through #echo $targetStart + $targetCount # of $totalTargets</strong>
+ #end if
+ #if $targetStart + $targetCount < $totalTargets
+ <a href="buildsbytarget?start=#echo $targetStart + $targetRange#$util.passthrough($self, 'days', 'order')">>>></a>
+ #end if
+ </td>
+ </tr>
+ </table>
+
+#include "includes/footer.chtml"
diff --git a/www/kojiweb/buildsbyuser.chtml b/www/kojiweb/buildsbyuser.chtml
new file mode 100644
index 0000000..002f645
--- /dev/null
+++ b/www/kojiweb/buildsbyuser.chtml
@@ -0,0 +1,73 @@
+#from kojiweb import util
+
+#include "includes/header.chtml"
+
+ <h4>Builds by User</h4>
+ <table class="data-list">
+ <tr>
+ <td class="paginate" colspan="3">
+ #if $len($userBuildPages) > 1
+ <form class="pageJump" action="">
+ Page:
+ <select onchange="javascript: window.location = 'buildsbyuser?start=' + this.value * $userBuildRange + '$util.passthrough($self, 'order')';">
+ #for $pageNum in $userBuildPages
+ <option value="$pageNum"#if $pageNum == $userBuildCurrentPage then ' selected="selected"' else ''#>#echo $pageNum + 1#</option>
+ #end for
+ </select>
+ </form>
+ #end if
+ #if $userBuildStart > 0
+ <a href="buildsbyuser?start=#echo $userBuildStart - $userBuildRange #$util.passthrough($self, 'order')"><<<</a>
+ #end if
+ #if $totalUserBuilds != 0
+ <strong>Users #echo $userBuildStart + 1 # through #echo $userBuildStart + $userBuildCount # of $totalUserBuilds</strong>
+ #end if
+ #if $userBuildStart + $userBuildCount < $totalUserBuilds
+ <a href="buildsbyuser?start=#echo $userBuildStart + $userBuildRange#$util.passthrough($self, 'order')">>>></a>
+ #end if
+ </td>
+ </tr>
+ <tr class="list-header">
+ <th><a href="buildsbyuser?order=$util.toggleOrder($self, 'name')">Name</a> $util.sortImage($self, 'name')</th>
+ <th><a href="buildsbyuser?order=$util.toggleOrder($self, 'builds')">Builds</a> $util.sortImage($self, 'builds')</th>
+ <th> </th>
+ </tr>
+ #if $len($userBuilds) > 0
+ #for $userBuild in $userBuilds
+ <tr class="$util.rowToggle($self)">
+ <td><a href="userinfo?userID=$userBuild.id">$userBuild.name</a></td>
+ <td width="#echo $graphWidth + 5#"><img src="$util.themePath('images/1px.gif')" width="#echo $increment * $userBuild.builds#" height="15" class="graphrow" alt="graph row"/></td>
+ <td>$userBuild.builds</td>
+ </tr>
+ #end for
+ #else
+ <tr class="row-odd">
+ <td colspan="3">No users</td>
+ </tr>
+ #end if
+ <tr>
+ <td class="paginate" colspan="3">
+ #if $len($userBuildPages) > 1
+ <form class="pageJump" action="">
+ Page:
+ <select onchange="javascript: window.location = 'buildsbyuser?start=' + this.value * $userBuildRange + '$util.passthrough($self, 'order')';">
+ #for $pageNum in $userBuildPages
+ <option value="$pageNum"#if $pageNum == $userBuildCurrentPage then ' selected="selected"' else ''#>#echo $pageNum + 1#</option>
+ #end for
+ </select>
+ </form>
+ #end if
+ #if $userBuildStart > 0
+ <a href="buildsbyuser?start=#echo $userBuildStart - $userBuildRange #$util.passthrough($self, 'order')"><<<</a>
+ #end if
+ #if $totalUserBuilds != 0
+ <strong>Users #echo $userBuildStart + 1 # through #echo $userBuildStart + $userBuildCount # of $totalUserBuilds</strong>
+ #end if
+ #if $userBuildStart + $userBuildCount < $totalUserBuilds
+ <a href="buildsbyuser?start=#echo $userBuildStart + $userBuildRange#$util.passthrough($self, 'order')">>>></a>
+ #end if
+ </td>
+ </tr>
+ </table>
+
+#include "includes/footer.chtml"
diff --git a/www/kojiweb/buildtargetedit.chtml b/www/kojiweb/buildtargetedit.chtml
new file mode 100644
index 0000000..647a87c
--- /dev/null
+++ b/www/kojiweb/buildtargetedit.chtml
@@ -0,0 +1,63 @@
+#from kojiweb import util
+
+#include "includes/header.chtml"
+
+ #if $target
+ <h4>Edit target $target.name</h4>
+ #else
+ <h4>Create build target</h4>
+ #end if
+
+ <form action="#if $target then 'buildtargetedit' else 'buildtargetcreate'#">
+ $util.authToken($self, form=True)
+ #if $target
+ <input type="hidden" name="targetID" value="$target.id"/>
+ #end if
+ <table>
+ <tr>
+ <th>Name</th>
+ <td>
+ <input type="text" name="name" size="50" value="#if $target then $target.name else ''#"/>
+ </td>
+ </tr>
+ #if $target
+ <tr>
+ <th>ID</th><td>$target.id</td>
+ </tr>
+ #end if
+ <tr>
+ <th>Build Tag</th>
+ <td>
+ <select name="buildTag">
+ <option value="">select tag</option>
+ #for $tag in $tags
+ <option value="$tag.id"#if $target and $target.build_tag == $tag.id then ' selected="selected"' else ''#>$tag.name</option>
+ #end for
+ </select>
+ </td>
+ </tr>
+ <tr>
+ <th>Destination Tag</th>
+ <td>
+ <select name="destTag">
+ <option value="">select tag</option>
+ #for $tag in $tags
+ <option value="$tag.id"#if $target and $target.dest_tag == $tag.id then ' selected="selected"' else ''#>$tag.name</option>
+ #end for
+ </select>
+ </td>
+ </tr>
+ <tr>
+ <td>
+ #if $target
+ <button type="submit" name="save" value="Save">Save</button>
+ #else
+ <button type="submit" name="add" value="Add">Add</button>
+ #end if
+ </td>
+ <td><button type="submit" name="cancel" value="Cancel">Cancel</button></td>
+ </tr>
+ </table>
+ </form>
+
+#include "includes/footer.chtml"
diff --git a/www/kojiweb/buildtargetinfo.chtml b/www/kojiweb/buildtargetinfo.chtml
new file mode 100644
index 0000000..42a51f6
--- /dev/null
+++ b/www/kojiweb/buildtargetinfo.chtml
@@ -0,0 +1,30 @@
+#from kojiweb import util
+
+#include "includes/header.chtml"
+
+ <h4>Information for target <a href="buildtargetinfo?targetID=$target.id">$target.name</a></h4>
+
+ <table>
+ <tr>
+ <th>Name</th><td>$target.name</td>
+ </tr>
+ <tr>
+ <th>ID</th><td>$target.id</td>
+ </tr>
+ <tr>
+ <th>Build Tag</th><td><a href="taginfo?tagID=$buildTag.id">$buildTag.name</a></td>
+ </tr>
+ <tr>
+ <th>Destination Tag</th><td><a href="taginfo?tagID=$destTag.id">$destTag.name</a></td>
+ </tr>
+ #if 'admin' in $perms
+ <tr>
+ <td colspan="2"><a href="buildtargetedit?targetID=$target.id$util.authToken($self)">Edit</a></td>
+ </tr>
+ <tr>
+ <td colspan="2"><a href="buildtargetdelete?targetID=$target.id$util.authToken($self)">Delete</a></td>
+ </tr>
+ #end if
+ </table>
+
+#include "includes/footer.chtml"
diff --git a/www/kojiweb/buildtargets.chtml b/www/kojiweb/buildtargets.chtml
new file mode 100644
index 0000000..ad02cbd
--- /dev/null
+++ b/www/kojiweb/buildtargets.chtml
@@ -0,0 +1,76 @@
+#from kojiweb import util
+
+#include "includes/header.chtml"
+
+ <h4>Build Targets</h4>
+ <table class="data-list">
+ <tr>
+ <td class="paginate" colspan="2">
+ #if $len($targetPages) > 1
+ <form class="pageJump" action="">
+ Page:
+ <select onchange="javascript: window.location = 'buildtargets?start=' + this.value * $targetRange + '$util.passthrough($self, 'order')';">
+ #for $pageNum in $targetPages
+ <option value="$pageNum"#if $pageNum == $targetCurrentPage then ' selected="selected"' else ''#>#echo $pageNum + 1#</option>
+ #end for
+ </select>
+ </form>
+ #end if
+ #if $targetStart > 0
+ <a href="buildtargets?start=#echo $targetStart - $targetRange #$util.passthrough($self, 'order')"><<<</a>
+ #end if
+ #if $totalTargets != 0
+ <strong>Targets #echo $targetStart + 1 # through #echo $targetStart + $targetCount # of $totalTargets</strong>
+ #end if
+ #if $targetStart + $targetCount < $totalTargets
+ <a href="buildtargets?start=#echo $targetStart + $targetRange#$util.passthrough($self, 'order')">>>></a>
+ #end if
+ </td>
+ </tr>
+ <tr class="list-header">
+ <th><a href="buildtargets?order=$util.toggleOrder($self, 'id')">ID</a> $util.sortImage($self, 'id')</th>
+ <th><a href="buildtargets?order=$util.toggleOrder($self, 'name')">Name</a> $util.sortImage($self, 'name')</th>
+ </tr>
+ #if $len($targets) > 0
+ #for $target in $targets
+ <tr class="$util.rowToggle($self)">
+ <td>$target.id</td>
+ <td><a href="buildtargetinfo?targetID=$target.id">$target.name</a></td>
+ </tr>
+ #end for
+ #else
+ <tr class="row-odd">
+ <td colspan="2">No build targets</td>
+ </tr>
+ #end if
+ <tr>
+ <td class="paginate" colspan="2">
+ #if $len($targetPages) > 1
+ <form class="pageJump" action="">
+ Page:
+ <select onchange="javascript: window.location = 'buildtargets?start=' + this.value * $targetRange + '$util.passthrough($self, 'order')';">
+ #for $pageNum in $targetPages
+ <option value="$pageNum"#if $pageNum == $targetCurrentPage then ' selected="selected"' else ''#>#echo $pageNum + 1#</option>
+ #end for
+ </select>
+ </form>
+ #end if
+ #if $targetStart > 0
+ <a href="buildtargets?start=#echo $targetStart - $targetRange #$util.passthrough($self, 'order')"><<<</a>
+ #end if
+ #if $totalTargets != 0
+ <strong>Targets #echo $targetStart + 1 # through #echo $targetStart + $targetCount # of $totalTargets</strong>
+ #end if
+ #if $targetStart + $targetCount < $totalTargets
+ <a href="buildtargets?start=#echo $targetStart + $targetRange#$util.passthrough($self, 'order')">>>></a>
+ #end if
+ </td>
+ </tr>
+ </table>
+
+ #if 'admin' in $perms
+ <br/>
+ <a href="buildtargetcreate$util.authToken($self, first=True)">Create new Build Target</a>
+ #end if
+
+#include "includes/footer.chtml"
diff --git a/www/kojiweb/channelinfo.chtml b/www/kojiweb/channelinfo.chtml
new file mode 100644
index 0000000..31dcdc5
--- /dev/null
+++ b/www/kojiweb/channelinfo.chtml
@@ -0,0 +1,31 @@
+#from kojiweb import util
+
+#include "includes/header.chtml"
+
+ <h4>Information for channel <a href="channelinfo?channelID=$channel.id">$channel.name</a></h4>
+
+ <table>
+ <tr>
+ <th>Name</th><td>$channel.name</td>
+ </tr>
+ <tr>
+ <th>ID</th><td>$channel.id</td>
+ </tr>
+ <tr>
+ <th>Active Tasks</th><td><a href="tasks?view=flat&channelID=$channel.id">$taskCount</a></td>
+ </tr>
+ <tr>
+ <th>Hosts</th>
+ <td>
+ #if $len($hosts) > 0
+ #for $host in $hosts
+ <a href="hostinfo?hostID=$host.id">$host.name</a><br/>
+ #end for
+ #else
+ No hosts
+ #end if
+ </td>
+ </tr>
+ </table>
+
+#include "includes/footer.chtml"
diff --git a/www/kojiweb/error.chtml b/www/kojiweb/error.chtml
new file mode 100644
index 0000000..5113805
--- /dev/null
+++ b/www/kojiweb/error.chtml
@@ -0,0 +1,30 @@
+
+#from kojiweb import util
+
+#include "includes/header.chtml"
+
+ <h4>Error</h4>
+
+<div>
+$util.escapeHTML($explanation)
+</div>
+
+#if $debug_level >= 1
+<div>
+#else
+<div style="visibility: hidden">
+#end if
+$util.escapeHTML($tb_short)
+</div>
+
+#if $debug_level >= 2
+<div>
+#else
+<div style="visibility: hidden">
+#end if
+<pre>
+#echo $util.escapeHTML($tb_long)
+</pre>
+</div>
+
+#include "includes/footer.chtml"
diff --git a/www/kojiweb/externalrepoinfo.chtml b/www/kojiweb/externalrepoinfo.chtml
new file mode 100644
index 0000000..140331a
--- /dev/null
+++ b/www/kojiweb/externalrepoinfo.chtml
@@ -0,0 +1,31 @@
+#from kojiweb import util
+
+#include "includes/header.chtml"
+
+ <h4>Information for external repo <a href="externalrepoinfo?extrepoID=$extRepo.id">$extRepo.name</a></h4>
+
+ <table>
+ <tr>
+ <th>Name</th><td>$extRepo.name</td>
+ </tr>
+ <tr>
+ <th>ID</th><td>$extRepo.id</td>
+ </tr>
+ <tr>
+ <th>URL</th><td><a href="$extRepo.url">$extRepo.url</a></td>
+ </tr>
+ <tr>
+ <th>Tags using this external repo</th>
+ <td>
+ #if $len($repoTags)
+ #for $tag in $repoTags
+ <a href="taginfo?tagID=$tag.tag_id">$tag.tag_name</a><br/>
+ #end for
+ #else
+ No tags
+ #end if
+ </td>
+ </tr>
+ </table>
+
+#include "includes/footer.chtml"
diff --git a/www/kojiweb/fileinfo.chtml b/www/kojiweb/fileinfo.chtml
new file mode 100644
index 0000000..7d2e1f7
--- /dev/null
+++ b/www/kojiweb/fileinfo.chtml
@@ -0,0 +1,64 @@
+#from kojiweb import util
+#import urllib
+#import datetime
+
+#include "includes/header.chtml"
+ #if $rpm
+ <h4>Information for file <a href="fileinfo?rpmID=$rpm.id&filename=$urllib.quote($file.name)">$file.name</a></h4>
+ #elif $archive
+ <h4>Information for file <a href="fileinfo?archiveID=$archive.id&filename=$urllib.quote($file.name)">$file.name</a></h4>
+ #end if
+
+ <table>
+ <tr>
+ <th>Name</th><td>$file.name</td>
+ </tr>
+ #if $rpm
+ <tr>
+ <th>Digest ($file.digest_algo)</th><td>$file.digest</td>
+ </tr>
+ #end if
+ <tr>
+ <th>Size</th><td>$file.size</td>
+ </tr>
+ #if $file.has_key('mtime') and $file.mtime
+ <tr>
+ <th>Modification time</th><td>$util.formatTimeLong($datetime.datetime.fromtimestamp($file.mtime))</td>
+ </tr>
+ #end if
+ #if $file.has_key('user') and $file.user
+ <tr>
+ <th>User</th><td>$file.user</td>
+ </tr>
+ #end if
+ #if $file.has_key('group') and $file.group
+ <tr>
+ <th>Group</th><td>$file.group</td>
+ </tr>
+ #end if
+ #if $file.has_key('mode') and $file.mode
+ <tr>
+ <th>Mode</th><td class="rpmheader">$util.formatMode($file.mode)</td>
+ </tr>
+ #end if
+ #if $rpm
+ <tr>
+ <th>Flags</th>
+ <td>
+ #for flag in $util.formatFileFlags($file.flags)
+ $flag<br/>
+ #end for
+ </td>
+ </tr>
+ <tr>
+ #set $epoch = ($rpm.epoch != None and $str($rpm.epoch) + ':' or '')
+ <th>RPM</th><td><a href="rpminfo?rpmID=$rpm.id">$rpm.name-$epoch$rpm.version-$rpm.release.${rpm.arch}.rpm</a></td>
+ </tr>
+ #elif $archive
+ <tr>
+ <th>Archive</th><td><a href="archiveinfo?archiveID=$archive.id">$archive.filename</a></td>
+ </tr>
+ #end if
+ </table>
+
+#include "includes/footer.chtml"
diff --git a/www/kojiweb/hostedit.chtml b/www/kojiweb/hostedit.chtml
new file mode 100644
index 0000000..00a64c9
--- /dev/null
+++ b/www/kojiweb/hostedit.chtml
@@ -0,0 +1,57 @@
+#from kojiweb import util
+
+#include "includes/header.chtml"
+
+ <h4>Edit host $host.name</h4>
+
+ <form action="hostedit">
+ $util.authToken($self, form=True)
+ <table>
+ <tr>
+ <th>Name</th>
+ <td>$host.name</td>
+ </tr>
+ <tr>
+ <th>ID</th>
+ <td>
+ $host.id
+ <input type="hidden" name="hostID" value="$host.id"/>
+ </td>
+ </tr>
+ <tr>
+ <th>Arches</th>
+ <td><input type="text" name="arches" value="$host.arches"/></td>
+ </tr>
+ <tr>
+ <th>Capacity</th>
+ <td><input type="text" name="capacity" value="$host.capacity"/></td>
+ </tr>
+ <tr>
+ <th>Description</th>
+ <td><textarea name="description" rows="6" cols="50">$util.escapeHTML($host.description)</textarea></td>
+ </tr>
+ <tr>
+ <th>Comment</th>
+ <td><textarea name="comment" rows="2" cols="50">$util.escapeHTML($host.comment)</textarea></td>
+ </tr>
+ <tr>
+ <th>Enabled?</th>
+ <td><input type="checkbox" name="enabled" value="yes" #if $host.enabled then 'checked="checked"' else ''#/>
+ </tr>
+ <tr>
+ <th>Channels</th>
+ <td>
+ <select name="channels" multiple="multiple">
+ #for $channel in $allChannels
+ <option value="$channel.name" #if $channel in $hostChannels then 'selected="selected"' else ''#>$channel.name</option>
+ #end for
+ </select>
+ </td>
+ <tr>
+ <td><button type="submit" name="save" value="Save">Save</button></td>
+ <td><button type="submit" name="cancel" value="Cancel">Cancel</button></td>
+ </tr>
+ </table>
+ </form>
+
+#include "includes/footer.chtml"
diff --git a/www/kojiweb/hostinfo.chtml b/www/kojiweb/hostinfo.chtml
new file mode 100644
index 0000000..c7674d9
--- /dev/null
+++ b/www/kojiweb/hostinfo.chtml
@@ -0,0 +1,91 @@
+#from kojiweb import util
+
+#include "includes/header.chtml"
+
+ <h4>Information for host <a href="hostinfo?hostID=$host.id">$host.name</a></h4>
+
+ <table>
+ <tr>
+ <th>Name</th><td>$host.name</td>
+ </tr>
+ <tr>
+ <th>ID</th><td>$host.id</td>
+ </tr>
+ <tr>
+ <th>Arches</th><td>$host.arches</td>
+ </tr>
+ <tr>
+ <th>Capacity</th><td>$host.capacity</td>
+ </tr>
+ <tr>
+ <th>Task Load</th><td><a href="tasks?hostID=$host.id">#echo '%.2f' % $host.task_load#</a></td>
+ </tr>
+ <tr>
+ <th>Description</th><td class="usertext">$util.escapeHTML($host.description)</td>
+ </tr>
+ <tr>
+ <th>Comment</th><td class="usertext">$util.escapeHTML($host.comment)</td>
+ </tr>
+ <tr>
+ #set $enabled = $host.enabled and 'yes' or 'no'
+ <th>Enabled?</th>
+ <td class="$enabled">
+ $util.imageTag($enabled)
+ #if 'admin' in $perms
+ #if $host.enabled
+ <span class="adminLink">(<a href="disablehost?hostID=$host.id$util.authToken($self)">disable</a>)</span>
+ #else
+ <span class="adminLink">(<a href="enablehost?hostID=$host.id$util.authToken($self)">enable</a>)</span>
+ #end if
+ #end if
+ </td>
+ </tr>
+ <tr>
+ #set $ready = $host.ready and 'yes' or 'no'
+ <th>Ready?</th><td class="$ready">$util.imageTag($ready)</td>
+ </tr>
+ <tr>
+ <th>Last Update</th><td>$util.formatTime($lastUpdate)</td>
+ </tr>
+ <tr>
+ <th>Channels</th>
+ <td>
+ #for $channel in $channels
+ <a href="channelinfo?channelID=$channel.id">$channel.name</a><br/>
+ #end for
+ #if not $channels
+ No channels
+ #end if
+ </td>
+ </tr>
+ <tr>
+ <th>Active Buildroots</th>
+ #if $buildroots
+ <td class="container">
+ <table class="nested data-list">
+ <tr class="list-header">
+ <th>Buildroot</th><th>Created</th><th>State</th>
+ </tr>
+ #for $buildroot in $buildroots
+ <tr class="$util.rowToggle($self)">
+ <td><a href="buildrootinfo?buildrootID=$buildroot.id">$buildroot.tag_name-$buildroot.id-$buildroot.repo_id</a></td>
+ <td>$util.formatTime($buildroot.create_event_time)</td>
+ <td>$util.imageTag($util.brStateName($buildroot.state))</td>
+ </tr>
+ #end for
+ </table>
+ </td>
+ #else
+ <td>
+ No buildroots
+ </td>
+ #end if
+ </tr>
+ #if 'admin' in $perms
+ <tr>
+ <td colspan="2"><a href="hostedit?hostID=$host.id$util.authToken($self)">Edit host</a></td>
+ </tr>
+ #end if
+ </table>
+
+#include "includes/footer.chtml"
diff --git a/www/kojiweb/hosts.chtml b/www/kojiweb/hosts.chtml
new file mode 100644
index 0000000..0750f77
--- /dev/null
+++ b/www/kojiweb/hosts.chtml
@@ -0,0 +1,96 @@
+#from kojiweb import util
+
+#attr _PASSTHROUGH = ['state', 'order']
+
+#include "includes/header.chtml"
+
+ <h4>Hosts</h4>
+ <table class="data-list">
+ <tr>
+ <td colspan="6">
+ <table class="nested">
+ <tr><td>
+ <strong>State</strong>:
+ </td><td>
+ <select name="state" class="filterlist" onchange="javascript: window.location = 'hosts?state=' + this.value + '$util.passthrough_except($self, 'state')';">
+ <option value="enabled" #if $state == 'enabled' then 'selected="selected"' else ''#>enabled</option>
+ <option value="disabled" #if $state == 'disabled' then 'selected="selected"' else ''#>disabled</option>
+ <option value="all" #if $state == 'all' then 'selected="selected"' else ''#>all</option>
+ </select>
+ </td></tr>
+ </table>
+ </td>
+ </tr>
+ <tr>
+ <td class="paginate" colspan="6">
+ #if $len($hostPages) > 1
+ <form class="pageJump" action="">
+ Page:
+ <select onchange="javascript: window.location = 'hosts?start=' + this.value * $hostRange + '$util.passthrough_except($self)';">
+ #for $pageNum in $hostPages
+ <option value="$pageNum"#if $pageNum == $hostCurrentPage then ' selected="selected"' else ''#>#echo $pageNum + 1#</option>
+ #end for
+ </select>
+ </form>
+ #end if
+ #if $hostStart > 0
+ <a href="hosts?start=#echo $hostStart - $hostRange #$util.passthrough_except($self)"><<<</a>
+ #end if
+ #if $totalHosts != 0
+ <strong>Hosts #echo $hostStart + 1 # through #echo $hostStart + $hostCount # of $totalHosts</strong>
+ #end if
+ #if $hostStart + $hostCount < $totalHosts
+ <a href="hosts?start=#echo $hostStart + $hostRange#$util.passthrough_except($self)">>>></a>
+ #end if
+ </td>
+ </tr>
+ <tr class="list-header">
+ <th><a href="hosts?order=$util.toggleOrder($self, 'id')$util.passthrough_except($self, 'order')">ID</a> $util.sortImage($self, 'id')</th>
+ <th><a href="hosts?order=$util.toggleOrder($self, 'name')$util.passthrough_except($self, 'order')">Name</a> $util.sortImage($self, 'name')</th>
+ <th><a href="hosts?order=$util.toggleOrder($self, 'arches')$util.passthrough_except($self, 'order')">Arches</a> $util.sortImage($self, 'arches')</th>
+ <th><a href="hosts?order=$util.toggleOrder($self, 'enabled')$util.passthrough_except($self, 'order')">Enabled?</a> $util.sortImage($self, 'enabled')</th>
+ <th><a href="hosts?order=$util.toggleOrder($self, 'ready')$util.passthrough_except($self, 'order')">Ready?</a> $util.sortImage($self, 'ready')</th>
+ <th><a href="hosts?order=$util.toggleOrder($self, 'last_update')$util.passthrough_except($self, 'order')">Last Update</a> $util.sortImage($self, 'last_update')</th>
+ </tr>
+ #if $len($hosts) > 0
+ #for $host in $hosts
+ <tr class="$util.rowToggle($self)">
+ <td>$host.id</td>
+ <td><a href="hostinfo?hostID=$host.id">$host.name</a></td>
+ <td>$host.arches</td>
+ <td class="$str($bool($host.enabled)).lower()">#if $host.enabled then $util.imageTag('yes') else $util.imageTag('no')#</td>
+ <td class="$str($bool($host.ready)).lower()">#if $host.ready then $util.imageTag('yes') else $util.imageTag('no')#</td>
+ <td>$util.formatTime($host.last_update)</td>
+ </tr>
+ #end for
+ #else
+ <tr class="row-odd">
+ <td colspan="6">No hosts</td>
+ </tr>
+ #end if
+ <tr>
+ <td class="paginate" colspan="6">
+ #if $len($hostPages) > 1
+ <form class="pageJump" action="">
+ Page:
+ <select onchange="javascript: window.location = 'hosts?start=' + this.value * $hostRange + '$util.passthrough_except($self)';">
+ #for $pageNum in $hostPages
+ <option value="$pageNum"#if $pageNum == $hostCurrentPage then ' selected="selected"' else ''#>#echo $pageNum + 1#</option>
+ #end for
+ </select>
+ </form>
+ #end if
+ #if $hostStart > 0
+ <a href="hosts?start=#echo $hostStart - $hostRange #$util.passthrough_except($self)"><<<</a>
+ #end if
+ #if $totalHosts != 0
+ <strong>Hosts #echo $hostStart + 1 # through #echo $hostStart + $hostCount # of $totalHosts</strong>
+ #end if
+ #if $hostStart + $hostCount < $totalHosts
+ <a href="hosts?start=#echo $hostStart + $hostRange#$util.passthrough_except($self)">>>></a>
+ #end if
+ </td>
+ </tr>
+ </table>
+
+#include "includes/footer.chtml"
diff --git a/www/kojiweb/imageinfo.chtml b/www/kojiweb/imageinfo.chtml
new file mode 100644
index 0000000..69a9e50
--- /dev/null
+++ b/www/kojiweb/imageinfo.chtml
@@ -0,0 +1,60 @@
+#import koji
+#import koji.util
+#from os.path import basename
+#from kojiweb import util
+
+#include "includes/header.chtml"
+
+<h4>Information for image <a href="imageinfo?imageID=$image.id">$image.filename</a></h4>
+
+<table>
+ <tr>
+ <th>ID</th><td>$image.id</td>
+ </tr>
+ <tr>
+ <th>File Name</th><td>$image.filename</a></td>
+ </tr>
+ <tr>
+ <th>File Size</th><td>$image.filesize</td>
+ </tr>
+ <tr>
+ <th>Arch</th><td>$image.arch</td>
+ </tr>
+ <tr>
+ <th>Media Type</th><td>$image.mediatype</td>
+ </tr>
+ <tr>
+ #if $len($image.hash) == 32
+ <th>Digest (md5)</th><td>$image.hash</td>
+ #elif $len($image.hash) == 40
+ <th>Digest (sha1)</th><td>$image.hash</td>
+ #elif $len($image.hash) == 64
+ <th>Digest (sha256)</th><td>$image.hash</td>
+ #elif $len($image.hash) == 96
+ <th>Digest (sha384)</th><td>$image.hash</td>
+ #elif $len($image.hash) == 128
+ <th>Digest (sha512)</th><td>$image.hash</td>
+ #else
+ <th>Hash </th><td>$image.hash</td>
+ #end if
+ </tr>
+ <tr>
+ <th>Task</th><td><a href="taskinfo?taskID=$task.id" class="task$util.taskState($task.state)">$koji.taskLabel($task)</a></td>
+ </tr>
+ <tr>
+ <th>Buildroot</th><td><a href="buildrootinfo?buildrootID=$buildroot.id">/var/lib/mock/$buildroot.tag_name-$buildroot.id-$buildroot.repo_id</a></td>
+ </tr>
+ <tr>
+ <th colspan="2"><a href="rpmlist?imageID=$image.id&type=image" title="RPMs that where installed into the LiveCD">Included RPMs</a></th>
+ </tr>
+ <tr>
+ <th colspan="2"><a href="$imageBase/$image.filename">Download Image</a> (<a href="$imageBase/data/logs/$image.arch/">build logs</a>)</th>
+ </tr>
+ #if $image.get('xmlfile', None)
+ <tr>
+ <th colspan="2"><a href="$imageBase/$image.xmlfile">Download XML Description</a></th>
+ </tr>
+ #end if
+</table>
+
+#include "includes/footer.chtml"
diff --git a/www/kojiweb/includes/Makefile b/www/kojiweb/includes/Makefile
new file mode 100644
index 0000000..ab25127
--- /dev/null
+++ b/www/kojiweb/includes/Makefile
@@ -0,0 +1,18 @@
+SERVERDIR = /includes
+FILES = $(wildcard *.chtml)
+
+_default:
+ @echo "nothing to make. try make install"
+
+clean:
+ rm -f *.o *.so *.pyc *~
+
+install:
+ @if [ "$(DESTDIR)" = "" ]; then \
+ echo " "; \
+ echo "ERROR: A destdir is required"; \
+ exit 1; \
+ fi
+
+ mkdir -p $(DESTDIR)/$(SERVERDIR)
+ install -p -m 644 $(FILES) $(DESTDIR)/$(SERVERDIR)
diff --git a/www/kojiweb/includes/footer.chtml b/www/kojiweb/includes/footer.chtml
new file mode 100644
index 0000000..8e22022
--- /dev/null
+++ b/www/kojiweb/includes/footer.chtml
@@ -0,0 +1,23 @@
+#from kojiweb import util
+ </div>
+
+ <p id="footer">
+ Copyright © 2006-2014 Red Hat, Inc.
+ <a href="https://fedorahosted.org/koji/"><img src="$util.themePath('images/powered-by-koji.png')" alt="Powered By Koji" id="PoweredByKojiLogo"/></a>
+ </p>
+
+#set $localfooterpath=$util.themePath("extra-footer.html", local=True)
+#if os.path.exists($localfooterpath)
+#if $literalFooter
+#set $localfooter="".join(open($localfooterpath).readlines())
+#$localfooter
+#else
+#include $localfooterpath
+#end if
+#end if
+
+ </div>
+ </div>
+
+ </body>
+</html>
diff --git a/www/kojiweb/includes/header.chtml b/www/kojiweb/includes/header.chtml
new file mode 100644
index 0000000..ff04063
--- /dev/null
+++ b/www/kojiweb/includes/header.chtml
@@ -0,0 +1,103 @@
+#encoding utf-8
+#import koji
+#from kojiweb import util
+#import random
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+
+#def greeting()
+#set $greetings = ('hello', 'hi', 'yo', "what's up", "g'day", 'back to work',
+ 'bonjour',
+ 'hallo',
+ 'ciao',
+ 'hola',
+ u'olá',
+ u'dobrý den',
+ u'zdravstvuite',
+ u'góðan daginn',
+ 'hej',
+ 'tervehdys',
+ u'grüezi',
+ u'céad míle fáilte',
+ u'hylô',
+ u'bună ziua',
+ u'jó napot',
+ 'dobre dan',
+ u'你好',
+ u'こんにちは',
+ u'नमस्कार',
+ u'안녕하세요')
+#echo $random.choice($greetings)##slurp
+#end def
+
+<html xmlns="http://www.w3.org/1999/xhtml">
+ <head>
+ <meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/>
+ <title>$title | $siteName</title>
+ <link rel="shortcut icon" href="$util.themePath('images/koji.ico')"/>
+ <link rel="stylesheet" type="text/css" media="screen" title="Koji Style" href="$util.themePath('koji.css')"/>
+ <link rel="alternate stylesheet" type="text/css" media="screen" title="Debug" href="$util.themePath('debug.css')"/>
+ <link rel="alternate" type="application/rss+xml" title="Koji: recent builds" href="/koji/recentbuilds"/>
+ </head>
+ <body id="$pageID">
+
+ <div id="wrap">
+ <div id="innerwrap">
+
+ <!-- HEADER -->
+ <div id="header">
+ <img src="$util.themePath('images/koji.png')" alt="Koji Logo" id="kojiLogo"/>
+#set $localnavpath=$util.themePath("extra-nav.html", local=True)
+#if os.path.exists($localnavpath)
+#set $localnav="".join(open($localnavpath).readlines())
+$localnav
+#end if
+ <form action="search" id="headerSearch">
+ <input type="hidden" name="match" value="glob"/>
+ <select name="type">
+ <option value="package">Packages</option>
+ <option value="build">Builds</option>
+ <option value="tag">Tags</option>
+ <option value="target">Build Targets</option>
+ <option value="user">Users</option>
+ <option value="host">Hosts</option>
+ <option value="rpm">RPMs</option>
+ #if $mavenEnabled
+ <option value="maven">Maven Artifacts</option>
+ #end if
+ #if $winEnabled
+ <option value="win">Windows Artifacts</option>
+ #end if
+ </select>
+ <input type="text" name="terms"/>
+ <input type="submit" value="Search"/>
+ </form>
+ </div><!-- end header -->
+
+ <!-- MAIN NAVIGATION -->
+ <div id="mainNav">
+ <h4 class="hide">Main Site Links:</h4>
+ <ul>
+ <li id="summaryTab"><a href="index">Summary</a></li>
+ <li id="packagesTab"><a href="packages">Packages</a></li>
+ <li id="buildsTab"><a href="builds">Builds</a></li>
+ <li id="tasksTab"><a href="tasks">Tasks</a></li>
+ <li id="tagsTab"><a href="tags">Tags</a></li>
+ <li id="buildtargetsTab"><a href="buildtargets">Build Targets</a></li>
+ <li id="usersTab"><a href="users">Users</a></li>
+ <li id="hostsTab"><a href="hosts">Hosts</a></li>
+ <li id="reportsTab"><a href="reports">Reports</a></li>
+ <li id="searchTab"><a href="search">Search</a></li>
+ </ul>
+ </div><!-- end mainNav -->
+
+ <span id="loginInfo">
+ $koji.formatTimeLong($currentDate) |
+ #if $currentUser
+ $greeting(), <a href="userinfo?userID=$currentUser.id">$currentUser.name</a> | <a href="logout">logout</a>
+ #else
+ <a href="login">login</a>
+ #end if
+ </span>
+
+ <div id="content">
diff --git a/www/kojiweb/index.chtml b/www/kojiweb/index.chtml
new file mode 100644
index 0000000..738256c
--- /dev/null
+++ b/www/kojiweb/index.chtml
@@ -0,0 +1,161 @@
+#import koji
+#from kojiweb import util
+
+#include "includes/header.chtml"
+
+ <div class="pageHeader">$welcomeMessage</div>
+
+ <div class="dataHeader noPaginate" id="buildlist">#if $user then 'Your ' else ''#Recent Builds</div>
+ <table class="data-list">
+ <tr class="list-header">
+ <th>ID $util.sortImage(self, 'id')</th>
+ <th>NVR</th>
+ #if not $user
+ <th>Built by</th>
+ #end if
+ <th>Finished</th>
+ <th>State</th>
+ </tr>
+ #for $build in $builds
+ <tr class="$util.rowToggle($self)">
+ #set $stateName = $util.stateName($build.state)
+ <td>$build.build_id</td>
+ <td><a href="buildinfo?buildID=$build.build_id">$build.nvr</a></td>
+ #if not $user
+ <td class="user-$build.owner_name"><a href="userinfo?userID=$build.owner_id">$build.owner_name</a></td>
+ #end if
+ <td>$util.formatTime($build.completion_time)</td>
+ <td class="$stateName">$util.stateImage($build.state)</td>
+ </tr>
+ #end for
+ #if not $builds
+ <tr class="row-odd">
+ <td colspan="3">No builds</td>
+ </tr>
+ #end if
+ </table>
+
+ <br/>
+
+ <div class="dataHeader noPaginate" id="tasklist">#if $user then 'Your ' else ''#Recent Tasks</div>
+ <table class="data-list">
+ <tr class="list-header">
+ <th>ID $util.sortImage($self, 'id')</th>
+ <th>Type</th>
+ #if not $user
+ <th>Owner</th>
+ #end if
+ <th>Arch</th>
+ <th>Finished</th>
+ <th>State</th>
+ </tr>
+ #for $task in $tasks
+ #set $scratch = $util.taskScratchClass($task)
+ <tr class="$util.rowToggle($self) $scratch">
+ #set $state = $util.taskState($task.state)
+ <td>$task.id</td>
+ <td><a href="taskinfo?taskID=$task.id" class="task$state" title="$state">$koji.taskLabel($task)</a></td>
+ #if not $user
+ <td class="user-$task.owner_name">
+ #if $task.owner_type == $koji.USERTYPES['HOST']
+ <a href="hostinfo?userID=$task.owner">$task.owner_name</a>
+ #else
+ <a href="userinfo?userID=$task.owner">$task.owner_name</a>
+ #end if
+ </td>
+ #end if
+ <td>$task.arch</td>
+ <td>$util.formatTime($task.completion_time)</td>
+ <td class="task$state">$util.imageTag($state)</td>
+ </tr>
+ #end for
+ #if not $tasks
+ <tr class="row-odd">
+ <td colspan="5">No tasks</td>
+ </tr>
+ #end if
+ </table>
+
+ #if $user
+ <br/>
+
+ <div class="dataHeader" id="packagelist">Your Packages</div>
+ <table class="data-list">
+ <tr>
+ <td class="paginate" colspan="3">
+ #if $len($packagePages) > 1
+ <form class="pageJump" action="">
+ Page:
+ <select onchange="javascript: window.location = 'index?packageStart=' + this.value * $packageRange + '$util.passthrough($self, 'packageOrder', 'buildOrder', 'buildStart', 'taskOrder', 'taskStart')#packagelist';">
+ #for $pageNum in $packagePages
+ <option value="$pageNum"#if $pageNum == $packageCurrentPage then ' selected="selected"' else ''#>#echo $pageNum + 1#</option>
+ #end for
+ </select>
+ </form>
+ #end if
+ #if $packageStart > 0
+ <a href="index?packageStart=#echo $packageStart - $packageRange #$util.passthrough($self, 'packageOrder', 'buildOrder', 'buildStart', 'taskOrder', 'taskStart')#packagelist"><<<</a>
+ #end if
+ #if $totalPackages != 0
+ <strong>Package #echo $packageStart + 1 # through #echo $packageStart + $packageCount # of $totalPackages</strong>
+ #end if
+ #if $packageStart + $packageCount < $totalPackages
+ <a href="index?packageStart=#echo $packageStart + $packageRange#$util.passthrough($self, 'packageOrder', 'buildOrder', 'buildStart', 'taskOrder', 'taskStart')#packagelist">>>></a>
+ #end if
+ </td>
+ </tr>
+ <tr class="list-header">
+ <th><a href="index?packageOrder=$util.toggleOrder($self, 'package_name', 'packageOrder')$util.passthrough($self, 'buildOrder', 'buildStart', 'taskOrder', 'taskStart')#packagelist">Name</a> $util.sortImage($self, 'package_name', 'packageOrder')</th>
+ <th><a href="index?packageOrder=$util.toggleOrder($self, 'tag_name', 'packageOrder')$util.passthrough($self, 'buildOrder', 'buildStart', 'taskOrder', 'taskStart')#packagelist">Tag</a> $util.sortImage($self, 'tag_name', 'packageOrder')</th>
+ <th><a href="index?packageOrder=$util.toggleOrder($self, 'blocked', 'packageOrder')$util.passthrough($self, 'buildOrder', 'buildStart', 'taskOrder', 'taskStart')#packagelist">Included?</a> $util.sortImage($self, 'blocked', 'packageOrder')</th>
+ </tr>
+ #for $package in $packages
+ <tr class="$util.rowToggle($self)">
+ <td><a href="packageinfo?packageID=$package.package_id">$package.package_name</a></td>
+ <td><a href="taginfo?tagID=$package.tag_id">$package.tag_name</a></td>
+ #set $included = $package.blocked and 'no' or 'yes'
+ <td>$util.imageTag($included)</td>
+ </tr>
+ #end for
+ #if $totalPackages == 0
+ <tr class="row-odd">
+ <td colspan="3">No packages</td>
+ </tr>
+ #end if
+ </table>
+
+ <br/>
+
+ <div class="dataHeader" id="notificationlist">Your Notifications</div>
+ <table class="data-list">
+ <tr>
+ <td colspan="5"></td>
+ </tr>
+ <tr class="list-header">
+ <th>Package</th>
+ <th>Tag</th>
+ <th>Type</th>
+ <th></th>
+ <th></th>
+ </tr>
+ #for $notif in $notifs
+ <tr class="$util.rowToggle($self)">
+ <td>#if $notif.package then $notif.package.name else 'all'#</td>
+ <td>#if $notif.tag then $notif.tag.name else 'all'#</td>
+ <td>#if $notif.success_only then 'success only' else 'all'#</td>
+ <td><a href="notificationedit?notificationID=$notif.id$util.authToken($self)">edit</a></td>
+ <td><a href="notificationdelete?notificationID=$notif.id$util.authToken($self)">delete</a></td>
+ </tr>
+ #end for
+ #if $len($notifs) == 0
+ <tr class="row-odd">
+ <td colspan="5">No notifications</td>
+ </tr>
+ #end if
+ </table>
+
+ <br/>
+ <a href="notificationcreate$util.authToken($self, first=True)">Add a Notification</a>
+ #end if
+
+#include "includes/footer.chtml"
diff --git a/www/kojiweb/index.py b/www/kojiweb/index.py
new file mode 100644
index 0000000..4be6131
--- /dev/null
+++ b/www/kojiweb/index.py
@@ -0,0 +1,2241 @@
+# core web interface handlers for koji
+#
+# Copyright (c) 2005-2014 Red Hat, Inc.
+#
+# Koji is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation;
+# version 2.1 of the License.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this software; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+# Authors:
+# Mike Bonnet <mikeb at redhat.com>
+# Mike McLean <mikem at redhat.com>
+
+import os
+import os.path
+import re
+import sys
+import mimetypes
+import Cookie
+import Cheetah.Filters
+import Cheetah.Template
+import datetime
+import logging
+import time
+import koji
+import kojiweb.util
+from koji.server import ServerRedirect
+from kojiweb.util import _initValues
+from kojiweb.util import _genHTML
+from kojiweb.util import _getValidTokens
+from koji.util import sha1_constructor
+
+# Convenience definition of a commonly-used sort function
+_sortbyname = kojiweb.util.sortByKeyFunc('name')
+
+#loggers
+authlogger = logging.getLogger('koji.auth')
+
+def _setUserCookie(environ, user):
+ options = environ['koji.options']
+ # include the current time in the cookie so we can verify that
+ # someone is not using an expired cookie
+ value = user + ':' + str(int(time.time()))
+ if not options['Secret'].value:
+ raise koji.AuthError, 'Unable to authenticate, server secret not configured'
+ shasum = sha1_constructor(value)
+ shasum.update(options['Secret'].value)
+ value = "%s:%s" % (shasum.hexdigest(), value)
+ cookies = Cookie.SimpleCookie()
+ cookies['user'] = value
+ c = cookies['user'] #morsel instance
+ c['secure'] = True
+ c['path'] = os.path.dirname(environ['SCRIPT_NAME'])
+ # the Cookie module treats integer expire times as relative seconds
+ c['expires'] = int(options['LoginTimeout']) * 60 * 60
+ out = c.OutputString()
+ out += '; HttpOnly'
+ environ['koji.headers'].append(['Set-Cookie', out])
+ environ['koji.headers'].append(['Cache-Control', 'no-cache="set-cookie"'])
+
+def _clearUserCookie(environ):
+ cookies = Cookie.SimpleCookie()
+ cookies['user'] = ''
+ c = cookies['user'] #morsel instance
+ c['path'] = os.path.dirname(environ['SCRIPT_NAME'])
+ c['expires'] = 0
+ out = c.OutputString()
+ environ['koji.headers'].append(['Set-Cookie', out])
+
+def _getUserCookie(environ):
+ options = environ['koji.options']
+ cookies = Cookie.SimpleCookie(environ.get('HTTP_COOKIE',''))
+ if 'user' not in cookies:
+ return None
+ value = cookies['user'].value
+ parts = value.split(":", 1)
+ if len(parts) != 2:
+ authlogger.warn('malformed user cookie: %s' % value)
+ return None
+ sig, value = parts
+ if not options['Secret'].value:
+ raise koji.AuthError, 'Unable to authenticate, server secret not configured'
+ shasum = sha1_constructor(value)
+ shasum.update(options['Secret'].value)
+ if shasum.hexdigest() != sig:
+ authlogger.warn('invalid user cookie: %s:%s', sig, value)
+ return None
+ parts = value.split(":", 1)
+ if len(parts) != 2:
+ authlogger.warn('invalid signed user cookie: %s:%s', sig, value)
+ # no embedded timestamp
+ return None
+ user, timestamp = parts
+ try:
+ timestamp = float(timestamp)
+ except ValueError:
+ authlogger.warn('invalid time in signed user cookie: %s:%s', sig, value)
+ return None
+ if (time.time() - timestamp) > (int(options['LoginTimeout']) * 60 * 60):
+ authlogger.info('expired user cookie: %s', value)
+ return None
+ # Otherwise, cookie is valid and current
+ return user
+
+def _krbLogin(environ, session, principal):
+ options = environ['koji.options']
+ wprinc = options['WebPrincipal']
+ keytab = options['WebKeytab']
+ ccache = options['WebCCache']
+ return session.krb_login(principal=wprinc, keytab=keytab,
+ ccache=ccache, proxyuser=principal)
+
+def _sslLogin(environ, session, username):
+ options = environ['koji.options']
+ client_cert = options['WebCert']
+ client_ca = options['ClientCA']
+ server_ca = options['KojiHubCA']
+
+ return session.ssl_login(client_cert, client_ca, server_ca,
+ proxyuser=username)
+
+def _assertLogin(environ):
+ session = environ['koji.session']
+ options = environ['koji.options']
+ if 'koji.currentLogin' not in environ or 'koji.currentUser' not in environ:
+ raise StandardError, '_getServer() must be called before _assertLogin()'
+ elif environ['koji.currentLogin'] and environ['koji.currentUser']:
+ if options['WebCert']:
+ if not _sslLogin(environ, session, environ['koji.currentLogin']):
+ raise koji.AuthError, 'could not login %s via SSL' % environ['koji.currentLogin']
+ elif options['WebPrincipal']:
+ if not _krbLogin(environ, environ['koji.session'], environ['koji.currentLogin']):
+ raise koji.AuthError, 'could not login using principal: %s' % environ['koji.currentLogin']
+ else:
+ raise koji.AuthError, 'KojiWeb is incorrectly configured for authentication, contact the system administrator'
+
+ # verify a valid authToken was passed in to avoid CSRF
+ authToken = environ['koji.form'].getfirst('a', '')
+ validTokens = _getValidTokens(environ)
+ if authToken and authToken in validTokens:
+ # we have a token and it's valid
+ pass
+ else:
+ # their authToken is likely expired
+ # send them back to the page that brought them here so they
+ # can re-click the link with a valid authToken
+ _redirectBack(environ, page=None, forceSSL=(_getBaseURL(environ).startswith('https://')))
+ assert False
+ else:
+ _redirect(environ, 'login')
+ assert False
+
+def _getServer(environ):
+ opts = environ['koji.options']
+ session = koji.ClientSession(opts['KojiHubURL'],
+ opts={'krbservice': opts['KrbService']})
+
+ environ['koji.currentLogin'] = _getUserCookie(environ)
+ if environ['koji.currentLogin']:
+ environ['koji.currentUser'] = session.getUser(environ['koji.currentLogin'])
+ if not environ['koji.currentUser']:
+ raise koji.AuthError, 'could not get user for principal: %s' % environ['koji.currentLogin']
+ _setUserCookie(environ, environ['koji.currentLogin'])
+ else:
+ environ['koji.currentUser'] = None
+
+ environ['koji.session'] = session
+ return session
+
+def _construct_url(environ, page):
+ port = environ['SERVER_PORT']
+ host = environ['SERVER_NAME']
+ url_scheme = environ['wsgi.url_scheme']
+ if (url_scheme == 'https' and port == '443') or \
+ (url_scheme == 'http' and port == '80'):
+ return "%s://%s%s" % (url_scheme, host, page)
+ return "%s://%s:%s%s" % (url_scheme, host, port, page)
+
+def _getBaseURL(environ):
+ base = environ['SCRIPT_NAME']
+ return _construct_url(environ, base)
+
+def _redirect(environ, location):
+ environ['koji.redirect'] = location
+ raise ServerRedirect
+
+def _redirectBack(environ, page, forceSSL):
+ if page:
+ # We'll work with the page we were given
+ pass
+ elif 'HTTP_REFERER' in environ:
+ page = environ['HTTP_REFERER']
+ else:
+ page = 'index'
+
+ # Modify the scheme if necessary
+ if page.startswith('http'):
+ pass
+ elif page.startswith('/'):
+ page = _construct_url(environ, page)
+ else:
+ page = _getBaseURL(environ) + '/' + page
+ if forceSSL:
+ page = page.replace('http:', 'https:')
+ else:
+ page = page.replace('https:', 'http:')
+
+ # and redirect to the page
+ _redirect(environ, page)
+
+def login(environ, page=None):
+ session = _getServer(environ)
+ options = environ['koji.options']
+
+ # try SSL first, fall back to Kerberos
+ if options['WebCert']:
+ if environ['wsgi.url_scheme'] != 'https':
+ dest = 'login'
+ if page:
+ dest = dest + '?page=' + page
+ _redirectBack(environ, dest, forceSSL=True)
+ return
+
+ if environ.get('SSL_CLIENT_VERIFY') != 'SUCCESS':
+ raise koji.AuthError, 'could not verify client: %s' % environ.get('SSL_CLIENT_VERIFY')
+
+ # use the subject's common name as their username
+ username = environ.get('SSL_CLIENT_S_DN_CN')
+ if not username:
+ raise koji.AuthError, 'unable to get user information from client certificate'
+
+ if not _sslLogin(environ, session, username):
+ raise koji.AuthError, 'could not login %s using SSL certificates' % username
+
+ authlogger.info('Successful SSL authentication by %s', username)
+
+ elif options['WebPrincipal']:
+ principal = environ.get('REMOTE_USER')
+ if not principal:
+ raise koji.AuthError, 'configuration error: mod_auth_kerb should have performed authentication before presenting this page'
+
+ if not _krbLogin(environ, session, principal):
+ raise koji.AuthError, 'could not login using principal: %s' % principal
+
+ username = principal
+ authlogger.info('Successful Kerberos authentication by %s', username)
+ else:
+ raise koji.AuthError, 'KojiWeb is incorrectly configured for authentication, contact the system administrator'
+
+ _setUserCookie(environ, username)
+ # To protect the session cookie, we must forceSSL
+ _redirectBack(environ, page, forceSSL=True)
+
+def logout(environ, page=None):
+ user = _getUserCookie(environ)
+ _clearUserCookie(environ)
+ if user:
+ authlogger.info('Logout by %s', user)
+
+ _redirectBack(environ, page, forceSSL=False)
+
+def index(environ, packageOrder='package_name', packageStart=None):
+ values = _initValues(environ)
+ server = _getServer(environ)
+
+ user = environ['koji.currentUser']
+
+ values['builds'] = server.listBuilds(userID=(user and user['id'] or None), queryOpts={'order': '-build_id', 'limit': 10})
+
+ taskOpts = {'parent': None, 'decode': True}
+ if user:
+ taskOpts['owner'] = user['id']
+ values['tasks'] = server.listTasks(opts=taskOpts, queryOpts={'order': '-id', 'limit': 10})
+
+ values['order'] = '-id'
+
+ if user:
+ packages = kojiweb.util.paginateResults(server, values, 'listPackages', kw={'userID': user['id'], 'with_dups': True},
+ start=packageStart, dataName='packages', prefix='package', order=packageOrder, pageSize=10)
+
+ notifs = server.getBuildNotifications(user['id'])
+ notifs.sort(kojiweb.util.sortByKeyFunc('id'))
+ # XXX Make this a multicall
+ for notif in notifs:
+ notif['package'] = None
+ if notif['package_id']:
+ notif['package'] = server.getPackage(notif['package_id'])
+
+ notif['tag'] = None
+ if notif['tag_id']:
+ notif['tag'] = server.getTag(notif['tag_id'])
+ values['notifs'] = notifs
+
+ values['user'] = user
+ values['welcomeMessage'] = environ['koji.options']['KojiGreeting']
+
+ return _genHTML(environ, 'index.chtml')
+
+def notificationedit(environ, notificationID):
+ server = _getServer(environ)
+ _assertLogin(environ)
+
+ notificationID = int(notificationID)
+ notification = server.getBuildNotification(notificationID)
+ if notification == None:
+ raise koji.GenericError, 'no notification with ID: %i' % notificationID
+
+ form = environ['koji.form']
+
+ if form.has_key('save'):
+ package_id = form.getfirst('package')
+ if package_id == 'all':
+ package_id = None
+ else:
+ package_id = int(package_id)
+
+ tag_id = form.getfirst('tag')
+ if tag_id == 'all':
+ tag_id = None
+ else:
+ tag_id = int(tag_id)
+
+ if form.has_key('success_only'):
+ success_only = True
+ else:
+ success_only = False
+
+ server.updateNotification(notification['id'], package_id, tag_id, success_only)
+
+ _redirect(environ, 'index')
+ elif form.has_key('cancel'):
+ _redirect(environ, 'index')
+ else:
+ values = _initValues(environ, 'Edit Notification')
+
+ values['notif'] = notification
+ packages = server.listPackages()
+ packages.sort(kojiweb.util.sortByKeyFunc('package_name'))
+ values['packages'] = packages
+ tags = server.listTags(queryOpts={'order': 'name'})
+ values['tags'] = tags
+
+ return _genHTML(environ, 'notificationedit.chtml')
+
+def notificationcreate(environ):
+ server = _getServer(environ)
+ _assertLogin(environ)
+
+ form = environ['koji.form']
+
+ if form.has_key('add'):
+ user = environ['koji.currentUser']
+ if not user:
+ raise koji.GenericError, 'not logged-in'
+
+ package_id = form.getfirst('package')
+ if package_id == 'all':
+ package_id = None
+ else:
+ package_id = int(package_id)
+
+ tag_id = form.getfirst('tag')
+ if tag_id == 'all':
+ tag_id = None
+ else:
+ tag_id = int(tag_id)
+
+ if form.has_key('success_only'):
+ success_only = True
+ else:
+ success_only = False
+
+ server.createNotification(user['id'], package_id, tag_id, success_only)
+
+ _redirect(environ, 'index')
+ elif form.has_key('cancel'):
+ _redirect(environ, 'index')
+ else:
+ values = _initValues(environ, 'Edit Notification')
+
+ values['notif'] = None
+ packages = server.listPackages()
+ packages.sort(kojiweb.util.sortByKeyFunc('package_name'))
+ values['packages'] = packages
+ tags = server.listTags(queryOpts={'order': 'name'})
+ values['tags'] = tags
+
+ return _genHTML(environ, 'notificationedit.chtml')
+
+def notificationdelete(environ, notificationID):
+ server = _getServer(environ)
+ _assertLogin(environ)
+
+ notificationID = int(notificationID)
+ notification = server.getBuildNotification(notificationID)
+ if not notification:
+ raise koji.GenericError, 'no notification with ID: %i' % notificationID
+
+ server.deleteNotification(notification['id'])
+
+ _redirect(environ, 'index')
+
+
+# All Tasks
+_TASKS = ['build',
+ 'buildSRPMFromSCM',
+ 'buildArch',
+ 'chainbuild',
+ 'maven',
+ 'buildMaven',
+ 'chainmaven',
+ 'wrapperRPM',
+ 'winbuild',
+ 'vmExec',
+ 'waitrepo',
+ 'tagBuild',
+ 'newRepo',
+ 'createrepo',
+ 'buildNotification',
+ 'tagNotification',
+ 'dependantTask',
+ 'livecd',
+ 'createLiveCD',
+ 'appliance',
+ 'createAppliance',
+ 'image',
+ 'createImage']
+# Tasks that can exist without a parent
+_TOPLEVEL_TASKS = ['build', 'buildNotification', 'chainbuild', 'maven', 'chainmaven', 'wrapperRPM', 'winbuild', 'newRepo', 'tagBuild', 'tagNotification', 'waitrepo', 'livecd', 'appliance', 'image']
+# Tasks that can have children
+_PARENT_TASKS = ['build', 'chainbuild', 'maven', 'chainmaven', 'winbuild', 'newRepo', 'wrapperRPM', 'livecd', 'appliance', 'image']
+
+def tasks(environ, owner=None, state='active', view='tree', method='all', hostID=None, channelID=None, start=None, order='-id'):
+ values = _initValues(environ, 'Tasks', 'tasks')
+ server = _getServer(environ)
+
+ opts = {'decode': True}
+ if owner:
+ if owner.isdigit():
+ owner = int(owner)
+ ownerObj = server.getUser(owner, strict=True)
+ opts['owner'] = ownerObj['id']
+ values['owner'] = ownerObj['name']
+ values['ownerObj'] = ownerObj
+ else:
+ values['owner'] = None
+ values['ownerObj'] = None
+
+ values['users'] = server.listUsers(queryOpts={'order': 'name'})
+
+ if method in _TASKS:
+ opts['method'] = method
+ else:
+ method = 'all'
+ values['method'] = method
+ values['alltasks'] = _TASKS
+
+ treeEnabled = True
+ if hostID or (method not in ['all'] + _PARENT_TASKS):
+ # force flat view if we're filtering by a hostID or a task that never has children
+ if view == 'tree':
+ view = 'flat'
+ # don't let them choose tree view either
+ treeEnabled = False
+ values['treeEnabled'] = treeEnabled
+
+ toplevelEnabled = True
+ if method not in ['all'] + _TOPLEVEL_TASKS:
+ # force flat view if we're viewing a task that is never a top-level task
+ if view == 'toplevel':
+ view = 'flat'
+ toplevelEnabled = False
+ values['toplevelEnabled'] = toplevelEnabled
+
+ values['view'] = view
+
+ if view == 'tree':
+ treeDisplay = True
+ else:
+ treeDisplay = False
+ values['treeDisplay'] = treeDisplay
+
+ if view in ('tree', 'toplevel'):
+ opts['parent'] = None
+
+ if state == 'active':
+ opts['state'] = [koji.TASK_STATES['FREE'], koji.TASK_STATES['OPEN'], koji.TASK_STATES['ASSIGNED']]
+ elif state == 'all':
+ pass
+ else:
+ # Assume they've passed in a state name
+ opts['state'] = [koji.TASK_STATES[state.upper()]]
+ values['state'] = state
+
+ if hostID:
+ hostID = int(hostID)
+ host = server.getHost(hostID, strict=True)
+ opts['host_id'] = host['id']
+ values['host'] = host
+ values['hostID'] = host['id']
+ else:
+ values['host'] = None
+ values['hostID'] = None
+
+ if channelID:
+ try:
+ channelID = int(channelID)
+ except ValueError:
+ pass
+ channel = server.getChannel(channelID, strict=True)
+ opts['channel_id'] = channel['id']
+ values['channel'] = channel
+ values['channelID'] = channel['id']
+ else:
+ values['channel'] = None
+ values['channelID'] = None
+
+ loggedInUser = environ['koji.currentUser']
+ values['loggedInUser'] = loggedInUser
+
+ values['order'] = order
+
+ tasks = kojiweb.util.paginateMethod(server, values, 'listTasks', kw={'opts': opts},
+ start=start, dataName='tasks', prefix='task', order=order)
+
+ if view == 'tree':
+ server.multicall = True
+ for task in tasks:
+ server.getTaskDescendents(task['id'], request=True)
+ descendentList = server.multiCall()
+ for task, [descendents] in zip(tasks, descendentList):
+ task['descendents'] = descendents
+
+ return _genHTML(environ, 'tasks.chtml')
+
+def taskinfo(environ, taskID):
+ server = _getServer(environ)
+ values = _initValues(environ, 'Task Info', 'tasks')
+
+ taskID = int(taskID)
+ task = server.getTaskInfo(taskID, request=True)
+ if not task:
+ raise koji.GenericError, 'invalid task ID: %s' % taskID
+
+ values['title'] = koji.taskLabel(task) + ' | Task Info'
+
+ values['task'] = task
+ params = task['request']
+ values['params'] = params
+
+ if task['channel_id']:
+ channel = server.getChannel(task['channel_id'])
+ values['channelName'] = channel['name']
+ else:
+ values['channelName'] = None
+ if task['host_id']:
+ host = server.getHost(task['host_id'])
+ values['hostName'] = host['name']
+ else:
+ values['hostName'] = None
+ if task['owner']:
+ owner = server.getUser(task['owner'])
+ values['owner'] = owner
+ else:
+ values['owner'] = None
+ if task['parent']:
+ parent = server.getTaskInfo(task['parent'], request=True)
+ values['parent'] = parent
+ else:
+ values['parent'] = None
+
+ descendents = server.getTaskDescendents(task['id'], request=True)
+ values['descendents'] = descendents
+
+ builds = server.listBuilds(taskID=task['id'])
+ if builds:
+ taskBuild = builds[0]
+ else:
+ taskBuild = None
+ values['taskBuild'] = taskBuild
+
+ values['estCompletion'] = None
+ if taskBuild and taskBuild['state'] == koji.BUILD_STATES['BUILDING']:
+ avgDuration = server.getAverageBuildDuration(taskBuild['package_id'])
+ if avgDuration != None:
+ avgDelta = datetime.timedelta(seconds=avgDuration)
+ startTime = datetime.datetime.fromtimestamp(taskBuild['creation_ts'])
+ values['estCompletion'] = startTime + avgDelta
+
+ buildroots = server.listBuildroots(taskID=task['id'])
+ values['buildroots'] = buildroots
+
+ if task['method'] == 'buildArch':
+ buildTag = server.getTag(params[1])
+ values['buildTag'] = buildTag
+ elif task['method'] == 'buildMaven':
+ buildTag = params[1]
+ values['buildTag'] = buildTag
+ elif task['method'] == 'buildSRPMFromSCM':
+ if len(params) > 1:
+ buildTag = server.getTag(params[1])
+ values['buildTag'] = buildTag
+ elif task['method'] == 'tagBuild':
+ destTag = server.getTag(params[0])
+ build = server.getBuild(params[1])
+ values['destTag'] = destTag
+ values['build'] = build
+ elif task['method'] == 'newRepo':
+ tag = server.getTag(params[0])
+ values['tag'] = tag
+ elif task['method'] == 'tagNotification':
+ destTag = None
+ if params[2]:
+ destTag = server.getTag(params[2])
+ srcTag = None
+ if params[3]:
+ srcTag = server.getTag(params[3])
+ build = server.getBuild(params[4])
+ user = server.getUser(params[5])
+ values['destTag'] = destTag
+ values['srcTag'] = srcTag
+ values['build'] = build
+ values['user'] = user
+ elif task['method'] == 'dependantTask':
+ deps = [server.getTaskInfo(depID, request=True) for depID in params[0]]
+ values['deps'] = deps
+ elif task['method'] == 'wrapperRPM':
+ buildTarget = params[1]
+ values['buildTarget'] = buildTarget
+ if params[3]:
+ wrapTask = server.getTaskInfo(params[3]['id'], request=True)
+ values['wrapTask'] = wrapTask
+ elif task['method'] == 'restartVerify':
+ values['rtask'] = server.getTaskInfo(params[0], request=True)
+
+ if task['state'] in (koji.TASK_STATES['CLOSED'], koji.TASK_STATES['FAILED']):
+ try:
+ result = server.getTaskResult(task['id'])
+ values['result'] = result
+ values['excClass'] = None
+ except:
+ excClass, exc = sys.exc_info()[:2]
+ values['result'] = exc
+ values['excClass'] = excClass
+ # clear the exception, since we're just using
+ # it for display purposes
+ sys.exc_clear()
+ else:
+ values['result'] = None
+ values['excClass'] = None
+
+ output = server.listTaskOutput(task['id'])
+ output.sort(_sortByExtAndName)
+ values['output'] = output
+ if environ['koji.currentUser']:
+ values['perms'] = server.getUserPerms(environ['koji.currentUser']['id'])
+ else:
+ values['perms'] = []
+
+ topurl = environ['koji.options']['KojiFilesURL']
+ values['pathinfo'] = koji.PathInfo(topdir=topurl)
+
+ return _genHTML(environ, 'taskinfo.chtml')
+
+def taskstatus(environ, taskID):
+ server = _getServer(environ)
+
+ taskID = int(taskID)
+ task = server.getTaskInfo(taskID)
+ if not task:
+ return ''
+ files = server.listTaskOutput(taskID, stat=True)
+ output = '%i:%s\n' % (task['id'], koji.TASK_STATES[task['state']])
+ for filename, file_stats in files.items():
+ output += '%s:%s\n' % (filename, file_stats['st_size'])
+
+ return output
+
+def resubmittask(environ, taskID):
+ server = _getServer(environ)
+ _assertLogin(environ)
+
+ taskID = int(taskID)
+ newTaskID = server.resubmitTask(taskID)
+ _redirect(environ, 'taskinfo?taskID=%i' % newTaskID)
+
+def canceltask(environ, taskID):
+ server = _getServer(environ)
+ _assertLogin(environ)
+
+ taskID = int(taskID)
+ server.cancelTask(taskID)
+ _redirect(environ, 'taskinfo?taskID=%i' % taskID)
+
+def _sortByExtAndName(a, b):
+ """Sort two filenames, first by extension, and then by name."""
+ aRoot, aExt = os.path.splitext(a)
+ bRoot, bExt = os.path.splitext(b)
+ return cmp(aExt, bExt) or cmp(aRoot, bRoot)
+
+def getfile(environ, taskID, name, offset=None, size=None):
+ server = _getServer(environ)
+ taskID = int(taskID)
+
+ output = server.listTaskOutput(taskID, stat=True)
+ file_info = output.get(name)
+ if not file_info:
+ raise koji.GenericError, 'no file "%s" output by task %i' % (name, taskID)
+
+ mime_guess = mimetypes.guess_type(name, strict=False)[0]
+ if mime_guess:
+ ctype = mime_guess
+ else:
+ if name.endswith('.log') or name.endswith('.ks'):
+ ctype = 'text/plain'
+ else:
+ ctype = 'application/octet-stream'
+ if ctype != 'text/plain':
+ environ['koji.headers'].append('Content-Disposition', 'attachment; filename=%s' % name)
+ environ['koji.headers'].append(['Content-Type', ctype])
+
+ file_size = int(file_info['st_size'])
+ if offset is None:
+ offset = 0
+ else:
+ offset = int(offset)
+ if size is None:
+ size = file_size
+ else:
+ size = int(size)
+ if size < 0:
+ size = file_size
+ if offset < 0:
+ # seeking relative to the end of the file
+ if offset < -file_size:
+ offset = -file_size
+ if size > -offset:
+ size = -offset
+ else:
+ if size > (file_size - offset):
+ size = file_size - offset
+
+ #environ['koji.headers'].append(['Content-Length', str(size)])
+ return _chunk_file(server, environ, taskID, name, offset, size)
+
+
+def _chunk_file(server, environ, taskID, name, offset, size):
+ remaining = size
+ encode_int = koji.encode_int
+ while True:
+ if remaining <= 0:
+ break
+ chunk_size = 1048576
+ if remaining < chunk_size:
+ chunk_size = remaining
+ content = server.downloadTaskOutput(taskID, name, offset=encode_int(offset), size=chunk_size)
+ if not content:
+ break
+ yield content
+ content_length = len(content)
+ offset += content_length
+ remaining -= content_length
+
+def tags(environ, start=None, order=None, childID=None):
+ values = _initValues(environ, 'Tags', 'tags')
+ server = _getServer(environ)
+
+ if order == None:
+ order = 'name'
+ values['order'] = order
+
+ tags = kojiweb.util.paginateMethod(server, values, 'listTags', kw=None,
+ start=start, dataName='tags', prefix='tag', order=order)
+
+ if environ['koji.currentUser']:
+ values['perms'] = server.getUserPerms(environ['koji.currentUser']['id'])
+ else:
+ values['perms'] = []
+
+ values['childID'] = childID
+
+ return _genHTML(environ, 'tags.chtml')
+
+_PREFIX_CHARS = [chr(char) for char in range(48, 58) + range(97, 123)]
+
+def packages(environ, tagID=None, userID=None, order='package_name', start=None, prefix=None, inherited='1'):
+ values = _initValues(environ, 'Packages', 'packages')
+ server = _getServer(environ)
+ tag = None
+ if tagID != None:
+ if tagID.isdigit():
+ tagID = int(tagID)
+ tag = server.getTag(tagID, strict=True)
+ values['tagID'] = tagID
+ values['tag'] = tag
+ user = None
+ if userID != None:
+ if userID.isdigit():
+ userID = int(userID)
+ user = server.getUser(userID, strict=True)
+ values['userID'] = userID
+ values['user'] = user
+ values['order'] = order
+ if prefix:
+ prefix = prefix.lower()[0]
+ if prefix not in _PREFIX_CHARS:
+ prefix = None
+ values['prefix'] = prefix
+ inherited = int(inherited)
+ values['inherited'] = inherited
+
+ packages = kojiweb.util.paginateResults(server, values, 'listPackages',
+ kw={'tagID': tagID, 'userID': userID, 'prefix': prefix, 'inherited': bool(inherited)},
+ start=start, dataName='packages', prefix='package', order=order)
+
+ values['chars'] = _PREFIX_CHARS
+
+ return _genHTML(environ, 'packages.chtml')
+
+def packageinfo(environ, packageID, tagOrder='name', tagStart=None, buildOrder='-completion_time', buildStart=None):
+ values = _initValues(environ, 'Package Info', 'packages')
+ server = _getServer(environ)
+
+ if packageID.isdigit():
+ packageID = int(packageID)
+ package = server.getPackage(packageID)
+ if package == None:
+ raise koji.GenericError, 'invalid package ID: %s' % packageID
+
+ values['title'] = package['name'] + ' | Package Info'
+
+ values['package'] = package
+ values['packageID'] = package['id']
+
+ tags = kojiweb.util.paginateMethod(server, values, 'listTags', kw={'package': package['id']},
+ start=tagStart, dataName='tags', prefix='tag', order=tagOrder)
+ builds = kojiweb.util.paginateMethod(server, values, 'listBuilds', kw={'packageID': package['id']},
+ start=buildStart, dataName='builds', prefix='build', order=buildOrder)
+
+ return _genHTML(environ, 'packageinfo.chtml')
+
+def taginfo(environ, tagID, all='0', packageOrder='package_name', packageStart=None, buildOrder='-completion_time', buildStart=None, childID=None):
+ values = _initValues(environ, 'Tag Info', 'tags')
+ server = _getServer(environ)
+
+ if tagID.isdigit():
+ tagID = int(tagID)
+ tag = server.getTag(tagID, strict=True)
+
+ values['title'] = tag['name'] + ' | Tag Info'
+
+ all = int(all)
+
+ numPackages = server.count('listPackages', tagID=tag['id'], inherited=True)
+ numBuilds = server.count('listTagged', tag=tag['id'], inherit=True)
+ values['numPackages'] = numPackages
+ values['numBuilds'] = numBuilds
+
+ inheritance = server.getFullInheritance(tag['id'])
+ tagsByChild = {}
+ for parent in inheritance:
+ child_id = parent['child_id']
+ if not tagsByChild.has_key(child_id):
+ tagsByChild[child_id] = []
+ tagsByChild[child_id].append(child_id)
+
+ srcTargets = server.getBuildTargets(buildTagID=tag['id'])
+ srcTargets.sort(_sortbyname)
+ destTargets = server.getBuildTargets(destTagID=tag['id'])
+ destTargets.sort(_sortbyname)
+
+ values['tag'] = tag
+ values['tagID'] = tag['id']
+ values['inheritance'] = inheritance
+ values['tagsByChild'] = tagsByChild
+ values['srcTargets'] = srcTargets
+ values['destTargets'] = destTargets
+ values['all'] = all
+ values['repo'] = server.getRepo(tag['id'], state=koji.REPO_READY)
+ values['external_repos'] = server.getExternalRepoList(tag['id'])
+
+ child = None
+ if childID != None:
+ child = server.getTag(int(childID), strict=True)
+ values['child'] = child
+
+ if environ['koji.currentUser']:
+ values['perms'] = server.getUserPerms(environ['koji.currentUser']['id'])
+ else:
+ values['perms'] = []
+ permList = server.getAllPerms()
+ allPerms = dict([(perm['id'], perm['name']) for perm in permList])
+ values['allPerms'] = allPerms
+
+ return _genHTML(environ, 'taginfo.chtml')
+
+def tagcreate(environ):
+ server = _getServer(environ)
+ _assertLogin(environ)
+
+ mavenEnabled = server.mavenEnabled()
+
+ form = environ['koji.form']
+
+ if form.has_key('add'):
+ params = {}
+ name = form['name'].value
+ params['arches'] = form['arches'].value
+ params['locked'] = bool(form.has_key('locked'))
+ permission = form['permission'].value
+ if permission != 'none':
+ params['perm'] = int(permission)
+ if mavenEnabled:
+ params['maven_support'] = bool(form.has_key('maven_support'))
+ params['maven_include_all'] = bool(form.has_key('maven_include_all'))
+
+ tagID = server.createTag(name, **params)
+
+ _redirect(environ, 'taginfo?tagID=%i' % tagID)
+ elif form.has_key('cancel'):
+ _redirect(environ, 'tags')
+ else:
+ values = _initValues(environ, 'Add Tag', 'tags')
+
+ values['mavenEnabled'] = mavenEnabled
+
+ values['tag'] = None
+ values['permissions'] = server.getAllPerms()
+
+ return _genHTML(environ, 'tagedit.chtml')
+
+def tagedit(environ, tagID):
+ server = _getServer(environ)
+ _assertLogin(environ)
+
+ mavenEnabled = server.mavenEnabled()
+
+ tagID = int(tagID)
+ tag = server.getTag(tagID)
+ if tag == None:
+ raise koji.GenericError, 'no tag with ID: %i' % tagID
+
+ form = environ['koji.form']
+
+ if form.has_key('save'):
+ params = {}
+ params['name'] = form['name'].value
+ params['arches'] = form['arches'].value
+ params['locked'] = bool(form.has_key('locked'))
+ permission = form['permission'].value
+ if permission == 'none':
+ params['perm'] = None
+ else:
+ params['perm'] = int(permission)
+ if mavenEnabled:
+ params['maven_support'] = bool(form.has_key('maven_support'))
+ params['maven_include_all'] = bool(form.has_key('maven_include_all'))
+
+ server.editTag2(tag['id'], **params)
+
+ _redirect(environ, 'taginfo?tagID=%i' % tag['id'])
+ elif form.has_key('cancel'):
+ _redirect(environ, 'taginfo?tagID=%i' % tag['id'])
+ else:
+ values = _initValues(environ, 'Edit Tag', 'tags')
+
+ values['mavenEnabled'] = mavenEnabled
+
+ values['tag'] = tag
+ values['permissions'] = server.getAllPerms()
+
+ return _genHTML(environ, 'tagedit.chtml')
+
+def tagdelete(environ, tagID):
+ server = _getServer(environ)
+ _assertLogin(environ)
+
+ tagID = int(tagID)
+ tag = server.getTag(tagID)
+ if tag == None:
+ raise koji.GenericError, 'no tag with ID: %i' % tagID
+
+ server.deleteTag(tag['id'])
+
+ _redirect(environ, 'tags')
+
+def tagparent(environ, tagID, parentID, action):
+ server = _getServer(environ)
+ _assertLogin(environ)
+
+ tag = server.getTag(int(tagID), strict=True)
+ parent = server.getTag(int(parentID), strict=True)
+
+ if action in ('add', 'edit'):
+ form = environ['koji.form']
+
+ if form.has_key('add') or form.has_key('save'):
+ newDatum = {}
+ newDatum['parent_id'] = parent['id']
+ newDatum['priority'] = int(form.getfirst('priority'))
+ maxdepth = form.getfirst('maxdepth')
+ maxdepth = len(maxdepth) > 0 and int(maxdepth) or None
+ newDatum['maxdepth'] = maxdepth
+ newDatum['intransitive'] = bool(form.has_key('intransitive'))
+ newDatum['noconfig'] = bool(form.has_key('noconfig'))
+ newDatum['pkg_filter'] = form.getfirst('pkg_filter')
+
+ data = server.getInheritanceData(tag['id'])
+ data.append(newDatum)
+
+ server.setInheritanceData(tag['id'], data)
+ elif form.has_key('cancel'):
+ pass
+ else:
+ values = _initValues(environ, action.capitalize() + ' Parent Tag', 'tags')
+ values['tag'] = tag
+ values['parent'] = parent
+
+ inheritanceData = server.getInheritanceData(tag['id'])
+ maxPriority = 0
+ for datum in inheritanceData:
+ if datum['priority'] > maxPriority:
+ maxPriority = datum['priority']
+ values['maxPriority'] = maxPriority
+ inheritanceData = [datum for datum in inheritanceData \
+ if datum['parent_id'] == parent['id']]
+ if len(inheritanceData) == 0:
+ values['inheritanceData'] = None
+ elif len(inheritanceData) == 1:
+ values['inheritanceData'] = inheritanceData[0]
+ else:
+ raise koji.GenericError, 'tag %i has tag %i listed as a parent more than once' % (tag['id'], parent['id'])
+
+ return _genHTML(environ, 'tagparent.chtml')
+ elif action == 'remove':
+ data = server.getInheritanceData(tag['id'])
+ for datum in data:
+ if datum['parent_id'] == parent['id']:
+ datum['delete link'] = True
+ break
+ else:
+ raise koji.GenericError, 'tag %i is not a parent of tag %i' % (parent['id'], tag['id'])
+
+ server.setInheritanceData(tag['id'], data)
+ else:
+ raise koji.GenericError, 'unknown action: %s' % action
+
+ _redirect(environ, 'taginfo?tagID=%i' % tag['id'])
+
+def externalrepoinfo(environ, extrepoID):
+ values = _initValues(environ, 'External Repo Info', 'tags')
+ server = _getServer(environ)
+
+ if extrepoID.isdigit():
+ extrepoID = int(extrepoID)
+ extRepo = server.getExternalRepo(extrepoID, strict=True)
+ repoTags = server.getTagExternalRepos(repo_info=extRepo['id'])
+
+ values['title'] = extRepo['name'] + ' | External Repo Info'
+ values['extRepo'] = extRepo
+ values['repoTags'] = repoTags
+
+ return _genHTML(environ, 'externalrepoinfo.chtml')
+
+def buildinfo(environ, buildID):
+ values = _initValues(environ, 'Build Info', 'builds')
+ server = _getServer(environ)
+
+ buildID = int(buildID)
+
+ build = server.getBuild(buildID)
+
+ values['title'] = koji.buildLabel(build) + ' | Build Info'
+
+ tags = server.listTags(build['id'])
+ tags.sort(_sortbyname)
+ rpms = server.listBuildRPMs(build['id'])
+ rpms.sort(_sortbyname)
+ mavenbuild = server.getMavenBuild(buildID)
+ winbuild = server.getWinBuild(buildID)
+ imagebuild = server.getImageBuild(buildID)
+ if mavenbuild:
+ archivetype = 'maven'
+ elif winbuild:
+ archivetype = 'win'
+ elif imagebuild:
+ archivetype = 'image'
+ else:
+ archivetype = None
+ archives = server.listArchives(build['id'], type=archivetype, queryOpts={'order': 'filename'})
+ archivesByExt = {}
+ for archive in archives:
+ archivesByExt.setdefault(os.path.splitext(archive['filename'])[1][1:], []).append(archive)
+
+ rpmsByArch = {}
+ debuginfoByArch = {}
+ for rpm in rpms:
+ if koji.is_debuginfo(rpm['name']):
+ debuginfoByArch.setdefault(rpm['arch'], []).append(rpm)
+ else:
+ rpmsByArch.setdefault(rpm['arch'], []).append(rpm)
+
+ if rpmsByArch.has_key('src'):
+ srpm = rpmsByArch['src'][0]
+ headers = server.getRPMHeaders(srpm['id'], headers=['summary', 'description'])
+ values['summary'] = koji.fixEncoding(headers.get('summary'))
+ values['description'] = koji.fixEncoding(headers.get('description'))
+ values['changelog'] = server.getChangelogEntries(build['id'])
+
+ noarch_log_dest = 'noarch'
+ if build['task_id']:
+ task = server.getTaskInfo(build['task_id'], request=True)
+ if rpmsByArch.has_key('noarch') and \
+ [a for a in rpmsByArch.keys() if a not in ('noarch', 'src')]:
+ # This build has noarch and other-arch packages, indicating either
+ # noarch in extra-arches (kernel) or noarch subpackages.
+ # Point the log link to the arch of the buildArch task that the first
+ # noarch package came from. This will be correct in both the
+ # extra-arches case (noarch) and the subpackage case (one of the other
+ # arches). If noarch extra-arches and noarch subpackages are mixed in
+ # same build, this will become incorrect.
+ noarch_rpm = rpmsByArch['noarch'][0]
+ if noarch_rpm['buildroot_id']:
+ noarch_buildroot = server.getBuildroot(noarch_rpm['buildroot_id'])
+ if noarch_buildroot:
+ noarch_task = server.getTaskInfo(noarch_buildroot['task_id'], request=True)
+ if noarch_task:
+ noarch_log_dest = noarch_task['request'][2]
+
+ # get the summary, description, and changelogs from the built srpm
+ # if the build is not yet complete
+ if build['state'] != koji.BUILD_STATES['COMPLETE']:
+ srpm_tasks = server.listTasks(opts={'parent': task['id'], 'method': 'buildSRPMFromSCM'})
+ if srpm_tasks:
+ srpm_task = srpm_tasks[0]
+ if srpm_task['state'] == koji.TASK_STATES['CLOSED']:
+ srpm_path = None
+ for output in server.listTaskOutput(srpm_task['id']):
+ if output.endswith('.src.rpm'):
+ srpm_path = output
+ break
+ if srpm_path:
+ srpm_headers = server.getRPMHeaders(taskID=srpm_task['id'], filepath=srpm_path,
+ headers=['summary', 'description'])
+ if srpm_headers:
+ values['summary'] = koji.fixEncoding(srpm_headers['summary'])
+ values['description'] = koji.fixEncoding(srpm_headers['description'])
+ changelog = server.getChangelogEntries(taskID=srpm_task['id'], filepath=srpm_path)
+ if changelog:
+ values['changelog'] = changelog
+ else:
+ task = None
+
+ values['build'] = build
+ values['tags'] = tags
+ values['rpmsByArch'] = rpmsByArch
+ values['debuginfoByArch'] = debuginfoByArch
+ values['task'] = task
+ values['mavenbuild'] = mavenbuild
+ values['winbuild'] = winbuild
+ values['imagebuild'] = imagebuild
+ values['archives'] = archives
+ values['archivesByExt'] = archivesByExt
+
+ values['noarch_log_dest'] = noarch_log_dest
+ if environ['koji.currentUser']:
+ values['perms'] = server.getUserPerms(environ['koji.currentUser']['id'])
+ else:
+ values['perms'] = []
+ for field in ['summary', 'description', 'changelog']:
+ if not values.has_key(field):
+ values[field] = None
+
+ values['start_time'] = build['creation_time']
+ # the build start time is not accurate for maven and win builds, get it from the
+ # task start time instead
+ if mavenbuild or winbuild:
+ if task:
+ values['start_time'] = task['start_time']
+ if build['state'] == koji.BUILD_STATES['BUILDING']:
+ avgDuration = server.getAverageBuildDuration(build['package_id'])
+ if avgDuration != None:
+ avgDelta = datetime.timedelta(seconds=avgDuration)
+ startTime = datetime.datetime.fromtimestamp(build['creation_ts'])
+ values['estCompletion'] = startTime + avgDelta
+ else:
+ values['estCompletion'] = None
+
+ topurl = environ['koji.options']['KojiFilesURL']
+ values['pathinfo'] = koji.PathInfo(topdir=topurl)
+ return _genHTML(environ, 'buildinfo.chtml')
+
+def builds(environ, userID=None, tagID=None, packageID=None, state=None, order='-build_id', start=None, prefix=None, inherited='1', latest='1', type=None):
+ values = _initValues(environ, 'Builds', 'builds')
+ server = _getServer(environ)
+
+ user = None
+ if userID:
+ if userID.isdigit():
+ userID = int(userID)
+ user = server.getUser(userID, strict=True)
+ values['userID'] = userID
+ values['user'] = user
+
+ loggedInUser = environ['koji.currentUser']
+ values['loggedInUser'] = loggedInUser
+
+ values['users'] = server.listUsers(queryOpts={'order': 'name'})
+
+ tag = None
+ if tagID:
+ if tagID.isdigit():
+ tagID = int(tagID)
+ tag = server.getTag(tagID, strict=True)
+ values['tagID'] = tagID
+ values['tag'] = tag
+
+ package = None
+ if packageID:
+ if packageID.isdigit():
+ packageID = int(packageID)
+ package = server.getPackage(packageID, strict=True)
+ values['packageID'] = packageID
+ values['package'] = package
+
+ if state == 'all':
+ state = None
+ elif state != None:
+ state = int(state)
+ values['state'] = state
+
+ if prefix:
+ prefix = prefix.lower()[0]
+ if prefix not in _PREFIX_CHARS:
+ prefix = None
+ values['prefix'] = prefix
+
+ values['order'] = order
+ if type in ('maven', 'win', 'image'):
+ pass
+ elif type == 'all':
+ type = None
+ else:
+ type = None
+ values['type'] = type
+
+ if tag:
+ inherited = int(inherited)
+ values['inherited'] = inherited
+ latest = int(latest)
+ values['latest'] = latest
+ else:
+ values['inherited'] = None
+ values['latest'] = None
+
+ if tag:
+ # don't need to consider 'state' here, since only completed builds would be tagged
+ builds = kojiweb.util.paginateResults(server, values, 'listTagged', kw={'tag': tag['id'], 'package': (package and package['name'] or None),
+ 'owner': (user and user['name'] or None),
+ 'type': type,
+ 'inherit': bool(inherited), 'latest': bool(latest), 'prefix': prefix},
+ start=start, dataName='builds', prefix='build', order=order)
+ else:
+ builds = kojiweb.util.paginateMethod(server, values, 'listBuilds', kw={'userID': (user and user['id'] or None), 'packageID': (package and package['id'] or None),
+ 'type': type,
+ 'state': state, 'prefix': prefix},
+ start=start, dataName='builds', prefix='build', order=order)
+
+ values['chars'] = _PREFIX_CHARS
+
+ return _genHTML(environ, 'builds.chtml')
+
+def users(environ, order='name', start=None, prefix=None):
+ values = _initValues(environ, 'Users', 'users')
+ server = _getServer(environ)
+
+ if prefix:
+ prefix = prefix.lower()[0]
+ if prefix not in _PREFIX_CHARS:
+ prefix = None
+ values['prefix'] = prefix
+
+ values['order'] = order
+
+ users = kojiweb.util.paginateMethod(server, values, 'listUsers', kw={'prefix': prefix},
+ start=start, dataName='users', prefix='user', order=order)
+
+ values['chars'] = _PREFIX_CHARS
+
+ return _genHTML(environ, 'users.chtml')
+
+def userinfo(environ, userID, packageOrder='package_name', packageStart=None, buildOrder='-completion_time', buildStart=None):
+ values = _initValues(environ, 'User Info', 'users')
+ server = _getServer(environ)
+
+ if userID.isdigit():
+ userID = int(userID)
+ user = server.getUser(userID, strict=True)
+
+ values['title'] = user['name'] + ' | User Info'
+
+ values['user'] = user
+ values['userID'] = userID
+ values['taskCount'] = server.listTasks(opts={'owner': user['id'], 'parent': None}, queryOpts={'countOnly': True})
+
+ packages = kojiweb.util.paginateResults(server, values, 'listPackages', kw={'userID': user['id'], 'with_dups': True},
+ start=packageStart, dataName='packages', prefix='package', order=packageOrder, pageSize=10)
+
+ builds = kojiweb.util.paginateMethod(server, values, 'listBuilds', kw={'userID': user['id']},
+ start=buildStart, dataName='builds', prefix='build', order=buildOrder, pageSize=10)
+
+ return _genHTML(environ, 'userinfo.chtml')
+
+def rpminfo(environ, rpmID, fileOrder='name', fileStart=None, buildrootOrder='-id', buildrootStart=None):
+ values = _initValues(environ, 'RPM Info', 'builds')
+ server = _getServer(environ)
+
+ rpmID = int(rpmID)
+ rpm = server.getRPM(rpmID)
+
+ values['title'] = '%(name)s-%%s%(version)s-%(release)s.%(arch)s.rpm' % rpm + ' | RPM Info'
+ epochStr = ''
+ if rpm['epoch'] != None:
+ epochStr = '%s:' % rpm['epoch']
+ values['title'] = values['title'] % epochStr
+
+ build = None
+ if rpm['build_id'] != None:
+ build = server.getBuild(rpm['build_id'])
+ builtInRoot = None
+ if rpm['buildroot_id'] != None:
+ builtInRoot = server.getBuildroot(rpm['buildroot_id'])
+ if rpm['external_repo_id'] == 0:
+ values['requires'] = server.getRPMDeps(rpm['id'], koji.DEP_REQUIRE)
+ values['requires'].sort(_sortbyname)
+ values['provides'] = server.getRPMDeps(rpm['id'], koji.DEP_PROVIDE)
+ values['provides'].sort(_sortbyname)
+ values['obsoletes'] = server.getRPMDeps(rpm['id'], koji.DEP_OBSOLETE)
+ values['obsoletes'].sort(_sortbyname)
+ values['conflicts'] = server.getRPMDeps(rpm['id'], koji.DEP_CONFLICT)
+ values['conflicts'].sort(_sortbyname)
+ headers = server.getRPMHeaders(rpm['id'], headers=['summary', 'description'])
+ values['summary'] = koji.fixEncoding(headers.get('summary'))
+ values['description'] = koji.fixEncoding(headers.get('description'))
+ buildroots = kojiweb.util.paginateMethod(server, values, 'listBuildroots', kw={'rpmID': rpm['id']},
+ start=buildrootStart, dataName='buildroots', prefix='buildroot',
+ order=buildrootOrder)
+
+ values['rpmID'] = rpmID
+ values['rpm'] = rpm
+ values['build'] = build
+ values['builtInRoot'] = builtInRoot
+ values['buildroots'] = buildroots
+
+ files = kojiweb.util.paginateMethod(server, values, 'listRPMFiles', args=[rpm['id']],
+ start=fileStart, dataName='files', prefix='file', order=fileOrder)
+
+ return _genHTML(environ, 'rpminfo.chtml')
+
+def archiveinfo(environ, archiveID, fileOrder='name', fileStart=None, buildrootOrder='-id', buildrootStart=None):
+ values = _initValues(environ, 'Archive Info', 'builds')
+ server = _getServer(environ)
+
+ archiveID = int(archiveID)
+ archive = server.getArchive(archiveID)
+ archive_type = server.getArchiveType(type_id=archive['type_id'])
+ build = server.getBuild(archive['build_id'])
+ maveninfo = False
+ if 'group_id' in archive:
+ maveninfo = True
+ wininfo = False
+ if 'relpath' in archive:
+ wininfo = True
+ builtInRoot = None
+ if archive['buildroot_id'] != None:
+ builtInRoot = server.getBuildroot(archive['buildroot_id'])
+ files = kojiweb.util.paginateMethod(server, values, 'listArchiveFiles', args=[archive['id']],
+ start=fileStart, dataName='files', prefix='file', order=fileOrder)
+ buildroots = kojiweb.util.paginateMethod(server, values, 'listBuildroots', kw={'archiveID': archive['id']},
+ start=buildrootStart, dataName='buildroots', prefix='buildroot',
+ order=buildrootOrder)
+
+ values['title'] = archive['filename'] + ' | Archive Info'
+
+ values['archiveID'] = archive['id']
+ values['archive'] = archive
+ values['archive_type'] = archive_type
+ values['build'] = build
+ values['maveninfo'] = maveninfo
+ values['wininfo'] = wininfo
+ values['builtInRoot'] = builtInRoot
+ values['buildroots'] = buildroots
+
+ return _genHTML(environ, 'archiveinfo.chtml')
+
+def fileinfo(environ, filename, rpmID=None, archiveID=None):
+ values = _initValues(environ, 'File Info', 'builds')
+ server = _getServer(environ)
+
+ values['rpm'] = None
+ values['archive'] = None
+
+ if rpmID:
+ rpmID = int(rpmID)
+ rpm = server.getRPM(rpmID)
+ if not rpm:
+ raise koji.GenericError, 'invalid RPM ID: %i' % rpmID
+ file = server.getRPMFile(rpm['id'], filename)
+ if not file:
+ raise koji.GenericError, 'no file %s in RPM %i' % (filename, rpmID)
+ values['rpm'] = rpm
+ elif archiveID:
+ archiveID = int(archiveID)
+ archive = server.getArchive(archiveID)
+ if not archive:
+ raise koji.GenericError, 'invalid archive ID: %i' % archiveID
+ file = server.getArchiveFile(archive['id'], filename)
+ if not file:
+ raise koji.GenericError, 'no file %s in archive %i' % (filename, archiveID)
+ values['archive'] = archive
+ else:
+ raise koji.GenericError, 'either rpmID or archiveID must be specified'
+
+ values['title'] = file['name'] + ' | File Info'
+
+ values['file'] = file
+
+ return _genHTML(environ, 'fileinfo.chtml')
+
+def cancelbuild(environ, buildID):
+ server = _getServer(environ)
+ _assertLogin(environ)
+
+ buildID = int(buildID)
+ build = server.getBuild(buildID)
+ if build == None:
+ raise koji.GenericError, 'unknown build ID: %i' % buildID
+
+ result = server.cancelBuild(build['id'])
+ if not result:
+ raise koji.GenericError, 'unable to cancel build'
+
+ _redirect(environ, 'buildinfo?buildID=%i' % build['id'])
+
+def hosts(environ, state='enabled', start=None, order='name'):
+ values = _initValues(environ, 'Hosts', 'hosts')
+ server = _getServer(environ)
+
+ values['order'] = order
+
+ args = {}
+
+ if state == 'enabled':
+ args['enabled'] = True
+ elif state == 'disabled':
+ args['enabled'] = False
+ else:
+ state = 'all'
+ values['state'] = state
+
+ hosts = server.listHosts(**args)
+
+ server.multicall = True
+ for host in hosts:
+ server.getLastHostUpdate(host['id'])
+ updates = server.multiCall()
+ for host, [lastUpdate] in zip(hosts, updates):
+ host['last_update'] = lastUpdate
+
+ # Paginate after retrieving last update info so we can sort on it
+ kojiweb.util.paginateList(values, hosts, start, 'hosts', 'host', order)
+
+ return _genHTML(environ, 'hosts.chtml')
+
+def hostinfo(environ, hostID=None, userID=None):
+ values = _initValues(environ, 'Host Info', 'hosts')
+ server = _getServer(environ)
+
+ if hostID:
+ if hostID.isdigit():
+ hostID = int(hostID)
+ host = server.getHost(hostID)
+ if host == None:
+ raise koji.GenericError, 'invalid host ID: %s' % hostID
+ elif userID:
+ userID = int(userID)
+ hosts = server.listHosts(userID=userID)
+ host = None
+ if hosts:
+ host = hosts[0]
+ if host == None:
+ raise koji.GenericError, 'invalid host ID: %s' % userID
+ else:
+ raise koji.GenericError, 'hostID or userID must be provided'
+
+ values['title'] = host['name'] + ' | Host Info'
+
+ channels = server.listChannels(host['id'])
+ channels.sort(_sortbyname)
+ buildroots = server.listBuildroots(hostID=host['id'],
+ state=[state[1] for state in koji.BR_STATES.items() if state[0] != 'EXPIRED'])
+ buildroots.sort(kojiweb.util.sortByKeyFunc('-create_event_time'))
+
+ values['host'] = host
+ values['channels'] = channels
+ values['buildroots'] = buildroots
+ values['lastUpdate'] = server.getLastHostUpdate(host['id'])
+ if environ['koji.currentUser']:
+ values['perms'] = server.getUserPerms(environ['koji.currentUser']['id'])
+ else:
+ values['perms'] = []
+
+ return _genHTML(environ, 'hostinfo.chtml')
+
+def hostedit(environ, hostID):
+ server = _getServer(environ)
+ _assertLogin(environ)
+
+ hostID = int(hostID)
+ host = server.getHost(hostID)
+ if host == None:
+ raise koji.GenericError, 'no host with ID: %i' % hostID
+
+ form = environ['koji.form']
+
+ if form.has_key('save'):
+ arches = form['arches'].value
+ capacity = float(form['capacity'].value)
+ description = form['description'].value
+ comment = form['comment'].value
+ enabled = bool(form.has_key('enabled'))
+ channels = form.getlist('channels')
+
+ server.editHost(host['id'], arches=arches, capacity=capacity,
+ description=description, comment=comment)
+ if enabled != host['enabled']:
+ if enabled:
+ server.enableHost(host['name'])
+ else:
+ server.disableHost(host['name'])
+
+ hostChannels = [c['name'] for c in server.listChannels(hostID=host['id'])]
+ for channel in hostChannels:
+ if channel not in channels:
+ server.removeHostFromChannel(host['name'], channel)
+ for channel in channels:
+ if channel not in hostChannels:
+ server.addHostToChannel(host['name'], channel)
+
+ _redirect(environ, 'hostinfo?hostID=%i' % host['id'])
+ elif form.has_key('cancel'):
+ _redirect(environ, 'hostinfo?hostID=%i' % host['id'])
+ else:
+ values = _initValues(environ, 'Edit Host', 'hosts')
+
+ values['host'] = host
+ allChannels = server.listChannels()
+ allChannels.sort(_sortbyname)
+ values['allChannels'] = allChannels
+ values['hostChannels'] = server.listChannels(hostID=host['id'])
+
+ return _genHTML(environ, 'hostedit.chtml')
+
+def disablehost(environ, hostID):
+ server = _getServer(environ)
+ _assertLogin(environ)
+
+ hostID = int(hostID)
+ host = server.getHost(hostID, strict=True)
+ server.disableHost(host['name'])
+
+ _redirect(environ, 'hostinfo?hostID=%i' % host['id'])
+
+def enablehost(environ, hostID):
+ server = _getServer(environ)
+ _assertLogin(environ)
+
+ hostID = int(hostID)
+ host = server.getHost(hostID, strict=True)
+ server.enableHost(host['name'])
+
+ _redirect(environ, 'hostinfo?hostID=%i' % host['id'])
+
+def channelinfo(environ, channelID):
+ values = _initValues(environ, 'Channel Info', 'hosts')
+ server = _getServer(environ)
+
+ channelID = int(channelID)
+ channel = server.getChannel(channelID)
+ if channel == None:
+ raise koji.GenericError, 'invalid channel ID: %i' % channelID
+
+ values['title'] = channel['name'] + ' | Channel Info'
+
+ states = [koji.TASK_STATES[s] for s in ('FREE', 'OPEN', 'ASSIGNED')]
+ values['taskCount'] = \
+ server.listTasks(opts={'channel_id': channelID, 'state': states},
+ queryOpts={'countOnly': True})
+
+ hosts = server.listHosts(channelID=channelID)
+ hosts.sort(_sortbyname)
+
+ values['channel'] = channel
+ values['hosts'] = hosts
+
+ return _genHTML(environ, 'channelinfo.chtml')
+
+def buildrootinfo(environ, buildrootID, builtStart=None, builtOrder=None, componentStart=None, componentOrder=None):
+ values = _initValues(environ, 'Buildroot Info', 'hosts')
+ server = _getServer(environ)
+
+ buildrootID = int(buildrootID)
+ buildroot = server.getBuildroot(buildrootID)
+
+ values['title'] = '%(tag_name)s-%(id)i-%(repo_id)i' % buildroot + ' | Buildroot Info'
+
+ if buildroot == None:
+ raise koji.GenericError, 'unknown buildroot ID: %i' % buildrootID
+
+ task = server.getTaskInfo(buildroot['task_id'], request=True)
+
+ values['buildroot'] = buildroot
+ values['task'] = task
+
+ return _genHTML(environ, 'buildrootinfo.chtml')
+
+def rpmlist(environ, type, buildrootID=None, imageID=None, start=None, order='nvr'):
+ """
+ rpmlist requires a buildrootID OR an imageID to be passed in. From one
+ of these values it will paginate a list of rpms included in the
+ corresponding object. (buildroot or image)
+ """
+
+ values = _initValues(environ, 'RPM List', 'hosts')
+ server = _getServer(environ)
+
+ if buildrootID != None:
+ buildrootID = int(buildrootID)
+ buildroot = server.getBuildroot(buildrootID)
+ values['buildroot'] = buildroot
+ if buildroot == None:
+ raise koji.GenericError, 'unknown buildroot ID: %i' % buildrootID
+
+ rpms = None
+ if type == 'component':
+ rpms = kojiweb.util.paginateMethod(server, values, 'listRPMs',
+ kw={'componentBuildrootID': buildroot['id']},
+ start=start, dataName='rpms', prefix='rpm', order=order)
+ elif type == 'built':
+ rpms = kojiweb.util.paginateMethod(server, values, 'listRPMs',
+ kw={'buildrootID': buildroot['id']},
+ start=start, dataName='rpms', prefix='rpm', order=order)
+ else:
+ raise koji.GenericError, 'unrecognized type of rpmlist'
+
+ elif imageID != None:
+ imageID = int(imageID)
+ values['image'] = server.getArchive(imageID)
+ # If/When future image types are supported, add elifs here if needed.
+ if type == 'image':
+ rpms = kojiweb.util.paginateMethod(server, values, 'listRPMs',
+ kw={'imageID': imageID}, \
+ start=start, dataName='rpms', prefix='rpm', order=order)
+ else:
+ raise koji.GenericError, 'unrecognized type of image rpmlist'
+
+ else:
+ # It is an error if neither buildrootID and imageID are defined.
+ raise koji.GenericError, 'Both buildrootID and imageID are None'
+
+ values['type'] = type
+ values['order'] = order
+
+ return _genHTML(environ, 'rpmlist.chtml')
+
+def archivelist(environ, buildrootID, type, start=None, order='filename'):
+ values = _initValues(environ, 'Archive List', 'hosts')
+ server = _getServer(environ)
+
+ buildrootID = int(buildrootID)
+ buildroot = server.getBuildroot(buildrootID)
+ if buildroot == None:
+ raise koji.GenericError, 'unknown buildroot ID: %i' % buildrootID
+
+ archives = None
+ if type == 'component':
+ rpms = kojiweb.util.paginateMethod(server, values, 'listArchives', kw={'componentBuildrootID': buildroot['id']},
+ start=start, dataName='archives', prefix='archive', order=order)
+ elif type == 'built':
+ rpms = kojiweb.util.paginateMethod(server, values, 'listArchives', kw={'buildrootID': buildroot['id']},
+ start=start, dataName='archives', prefix='archive', order=order)
+ else:
+ raise koji.GenericError, 'invalid type: %s' % type
+
+ values['buildroot'] = buildroot
+ values['type'] = type
+
+ values['order'] = order
+
+ return _genHTML(environ, 'archivelist.chtml')
+
+def buildtargets(environ, start=None, order='name'):
+ values = _initValues(environ, 'Build Targets', 'buildtargets')
+ server = _getServer(environ)
+
+ targets = kojiweb.util.paginateMethod(server, values, 'getBuildTargets',
+ start=start, dataName='targets', prefix='target', order=order)
+
+ values['order'] = order
+ if environ['koji.currentUser']:
+ values['perms'] = server.getUserPerms(environ['koji.currentUser']['id'])
+ else:
+ values['perms'] = []
+
+ return _genHTML(environ, 'buildtargets.chtml')
+
+def buildtargetinfo(environ, targetID=None, name=None):
+ values = _initValues(environ, 'Build Target Info', 'buildtargets')
+ server = _getServer(environ)
+
+ target = None
+ if targetID != None:
+ targetID = int(targetID)
+ target = server.getBuildTarget(targetID)
+ elif name != None:
+ target = server.getBuildTarget(name)
+
+ if target == None:
+ raise koji.GenericError, 'invalid build target: %s' % (targetID or name)
+
+ values['title'] = target['name'] + ' | Build Target Info'
+
+ buildTag = server.getTag(target['build_tag'])
+ destTag = server.getTag(target['dest_tag'])
+
+ values['target'] = target
+ values['buildTag'] = buildTag
+ values['destTag'] = destTag
+ if environ['koji.currentUser']:
+ values['perms'] = server.getUserPerms(environ['koji.currentUser']['id'])
+ else:
+ values['perms'] = []
+
+ return _genHTML(environ, 'buildtargetinfo.chtml')
+
+def buildtargetedit(environ, targetID):
+ server = _getServer(environ)
+ _assertLogin(environ)
+
+ targetID = int(targetID)
+
+ target = server.getBuildTarget(targetID)
+ if target == None:
+ raise koji.GenericError, 'invalid build target: %s' % targetID
+
+ form = environ['koji.form']
+
+ if form.has_key('save'):
+ name = form.getfirst('name')
+ buildTagID = int(form.getfirst('buildTag'))
+ buildTag = server.getTag(buildTagID)
+ if buildTag == None:
+ raise koji.GenericError, 'invalid tag ID: %i' % buildTagID
+
+ destTagID = int(form.getfirst('destTag'))
+ destTag = server.getTag(destTagID)
+ if destTag == None:
+ raise koji.GenericError, 'invalid tag ID: %i' % destTagID
+
+ server.editBuildTarget(target['id'], name, buildTag['id'], destTag['id'])
+
+ _redirect(environ, 'buildtargetinfo?targetID=%i' % target['id'])
+ elif form.has_key('cancel'):
+ _redirect(environ, 'buildtargetinfo?targetID=%i' % target['id'])
+ else:
+ values = _initValues(environ, 'Edit Build Target', 'buildtargets')
+ tags = server.listTags()
+ tags.sort(_sortbyname)
+
+ values['target'] = target
+ values['tags'] = tags
+
+ return _genHTML(environ, 'buildtargetedit.chtml')
+
+def buildtargetcreate(environ):
+ server = _getServer(environ)
+ _assertLogin(environ)
+
+ form = environ['koji.form']
+
+ if form.has_key('add'):
+ # Use the str .value field of the StringField object,
+ # since xmlrpclib doesn't know how to marshal the StringFields
+ # returned by mod_python
+ name = form.getfirst('name')
+ buildTagID = int(form.getfirst('buildTag'))
+ destTagID = int(form.getfirst('destTag'))
+
+ server.createBuildTarget(name, buildTagID, destTagID)
+ target = server.getBuildTarget(name)
+
+ if target == None:
+ raise koji.GenericError, 'error creating build target "%s"' % name
+
+ _redirect(environ, 'buildtargetinfo?targetID=%i' % target['id'])
+ elif form.has_key('cancel'):
+ _redirect(environ, 'buildtargets')
+ else:
+ values = _initValues(environ, 'Add Build Target', 'builtargets')
+
+ tags = server.listTags()
+ tags.sort(_sortbyname)
+
+ values['target'] = None
+ values['tags'] = tags
+
+ return _genHTML(environ, 'buildtargetedit.chtml')
+
+def buildtargetdelete(environ, targetID):
+ server = _getServer(environ)
+ _assertLogin(environ)
+
+ targetID = int(targetID)
+
+ target = server.getBuildTarget(targetID)
+ if target == None:
+ raise koji.GenericError, 'invalid build target: %i' % targetID
+
+ server.deleteBuildTarget(target['id'])
+
+ _redirect(environ, 'buildtargets')
+
+def reports(environ):
+ server = _getServer(environ)
+ values = _initValues(environ, 'Reports', 'reports')
+ return _genHTML(environ, 'reports.chtml')
+
+def buildsbyuser(environ, start=None, order='-builds'):
+ values = _initValues(environ, 'Builds by User', 'reports')
+ server = _getServer(environ)
+
+ maxBuilds = 1
+ users = server.listUsers()
+
+ server.multicall = True
+ for user in users:
+ server.listBuilds(userID=user['id'], queryOpts={'countOnly': True})
+ buildCounts = server.multiCall()
+
+ for user, [numBuilds] in zip(users, buildCounts):
+ user['builds'] = numBuilds
+ if numBuilds > maxBuilds:
+ maxBuilds = numBuilds
+
+ values['order'] = order
+
+ graphWidth = 400.0
+ values['graphWidth'] = graphWidth
+ values['maxBuilds'] = maxBuilds
+ values['increment'] = graphWidth / maxBuilds
+ kojiweb.util.paginateList(values, users, start, 'userBuilds', 'userBuild', order)
+
+ return _genHTML(environ, 'buildsbyuser.chtml')
+
+def rpmsbyhost(environ, start=None, order=None, hostArch=None, rpmArch=None):
+ values = _initValues(environ, 'RPMs by Host', 'reports')
+ server = _getServer(environ)
+
+ maxRPMs = 1
+ hostArchFilter = hostArch
+ if hostArchFilter == 'ix86':
+ hostArchFilter = ['i386', 'i486', 'i586', 'i686']
+ hosts = server.listHosts(arches=hostArchFilter)
+ rpmArchFilter = rpmArch
+ if rpmArchFilter == 'ix86':
+ rpmArchFilter = ['i386', 'i486', 'i586', 'i686']
+
+ server.multicall = True
+ for host in hosts:
+ server.listRPMs(hostID=host['id'], arches=rpmArchFilter, queryOpts={'countOnly': True})
+ rpmCounts = server.multiCall()
+
+ for host, [numRPMs] in zip(hosts, rpmCounts):
+ host['rpms'] = numRPMs
+ if numRPMs > maxRPMs:
+ maxRPMs = numRPMs
+
+ values['hostArch'] = hostArch
+ hostArchList = server.getAllArches()
+ hostArchList.sort()
+ values['hostArchList'] = hostArchList
+ values['rpmArch'] = rpmArch
+ values['rpmArchList'] = hostArchList + ['noarch', 'src']
+
+ if order == None:
+ order = '-rpms'
+ values['order'] = order
+
+ graphWidth = 400.0
+ values['graphWidth'] = graphWidth
+ values['maxRPMs'] = maxRPMs
+ values['increment'] = graphWidth / maxRPMs
+ kojiweb.util.paginateList(values, hosts, start, 'hosts', 'host', order)
+
+ return _genHTML(environ, 'rpmsbyhost.chtml')
+
+def packagesbyuser(environ, start=None, order=None):
+ values = _initValues(environ, 'Packages by User', 'reports')
+ server = _getServer(environ)
+
+ maxPackages = 1
+ users = server.listUsers()
+
+ server.multicall = True
+ for user in users:
+ server.count('listPackages', userID=user['id'], with_dups=True)
+ packageCounts = server.multiCall()
+
+ for user, [numPackages] in zip(users, packageCounts):
+ user['packages'] = numPackages
+ if numPackages > maxPackages:
+ maxPackages = numPackages
+
+ if order == None:
+ order = '-packages'
+ values['order'] = order
+
+ graphWidth = 400.0
+ values['graphWidth'] = graphWidth
+ values['maxPackages'] = maxPackages
+ values['increment'] = graphWidth / maxPackages
+ kojiweb.util.paginateList(values, users, start, 'users', 'user', order)
+
+ return _genHTML(environ, 'packagesbyuser.chtml')
+
+def tasksbyhost(environ, start=None, order='-tasks', hostArch=None):
+ values = _initValues(environ, 'Tasks by Host', 'reports')
+ server = _getServer(environ)
+
+ maxTasks = 1
+
+ hostArchFilter = hostArch
+ if hostArchFilter == 'ix86':
+ hostArchFilter = ['i386', 'i486', 'i586', 'i686']
+
+ hosts = server.listHosts(arches=hostArchFilter)
+
+ server.multicall = True
+ for host in hosts:
+ server.listTasks(opts={'host_id': host['id']}, queryOpts={'countOnly': True})
+ taskCounts = server.multiCall()
+
+ for host, [numTasks] in zip(hosts, taskCounts):
+ host['tasks'] = numTasks
+ if numTasks > maxTasks:
+ maxTasks = numTasks
+
+ values['hostArch'] = hostArch
+ hostArchList = server.getAllArches()
+ hostArchList.sort()
+ values['hostArchList'] = hostArchList
+
+ values['order'] = order
+
+ graphWidth = 400.0
+ values['graphWidth'] = graphWidth
+ values['maxTasks'] = maxTasks
+ values['increment'] = graphWidth / maxTasks
+ kojiweb.util.paginateList(values, hosts, start, 'hosts', 'host', order)
+
+ return _genHTML(environ, 'tasksbyhost.chtml')
+
+def tasksbyuser(environ, start=None, order='-tasks'):
+ values = _initValues(environ, 'Tasks by User', 'reports')
+ server = _getServer(environ)
+
+ maxTasks = 1
+
+ users = server.listUsers()
+
+ server.multicall = True
+ for user in users:
+ server.listTasks(opts={'owner': user['id']}, queryOpts={'countOnly': True})
+ taskCounts = server.multiCall()
+
+ for user, [numTasks] in zip(users, taskCounts):
+ user['tasks'] = numTasks
+ if numTasks > maxTasks:
+ maxTasks = numTasks
+
+ values['order'] = order
+
+ graphWidth = 400.0
+ values['graphWidth'] = graphWidth
+ values['maxTasks'] = maxTasks
+ values['increment'] = graphWidth / maxTasks
+ kojiweb.util.paginateList(values, users, start, 'users', 'user', order)
+
+ return _genHTML(environ, 'tasksbyuser.chtml')
+
+def buildsbystatus(environ, days='7'):
+ values = _initValues(environ, 'Builds by Status', 'reports')
+ server = _getServer(environ)
+
+ days = int(days)
+ if days != -1:
+ seconds = 60 * 60 * 24 * days
+ dateAfter = time.time() - seconds
+ else:
+ dateAfter = None
+ values['days'] = days
+
+ server.multicall = True
+ # use taskID=-1 to filter out builds with a null task_id (imported rather than built in koji)
+ server.listBuilds(completeAfter=dateAfter, state=koji.BUILD_STATES['COMPLETE'], taskID=-1, queryOpts={'countOnly': True})
+ server.listBuilds(completeAfter=dateAfter, state=koji.BUILD_STATES['FAILED'], taskID=-1, queryOpts={'countOnly': True})
+ server.listBuilds(completeAfter=dateAfter, state=koji.BUILD_STATES['CANCELED'], taskID=-1, queryOpts={'countOnly': True})
+ [[numSucceeded], [numFailed], [numCanceled]] = server.multiCall()
+
+ values['numSucceeded'] = numSucceeded
+ values['numFailed'] = numFailed
+ values['numCanceled'] = numCanceled
+
+ maxBuilds = 1
+ for value in (numSucceeded, numFailed, numCanceled):
+ if value > maxBuilds:
+ maxBuilds = value
+
+ graphWidth = 400.0
+ values['graphWidth'] = graphWidth
+ values['maxBuilds'] = maxBuilds
+ values['increment'] = graphWidth / maxBuilds
+
+ return _genHTML(environ, 'buildsbystatus.chtml')
+
+def buildsbytarget(environ, days='7', start=None, order='-builds'):
+ values = _initValues(environ, 'Builds by Target', 'reports')
+ server = _getServer(environ)
+
+ days = int(days)
+ if days != -1:
+ seconds = 60 * 60 * 24 * days
+ dateAfter = time.time() - seconds
+ else:
+ dateAfter = None
+ values['days'] = days
+
+ targets = {}
+ maxBuilds = 1
+
+ tasks = server.listTasks(opts={'method': 'build', 'completeAfter': dateAfter, 'decode': True})
+
+ for task in tasks:
+ targetName = task['request'][1]
+ target = targets.get(targetName)
+ if not target:
+ target = {'name': targetName}
+ targets[targetName] = target
+ builds = target.get('builds', 0) + 1
+ target['builds'] = builds
+ if builds > maxBuilds:
+ maxBuilds = builds
+
+ kojiweb.util.paginateList(values, targets.values(), start, 'targets', 'target', order)
+
+ values['order'] = order
+
+ graphWidth = 400.0
+ values['graphWidth'] = graphWidth
+ values['maxBuilds'] = maxBuilds
+ values['increment'] = graphWidth / maxBuilds
+
+ return _genHTML(environ, 'buildsbytarget.chtml')
+
+def recentbuilds(environ, user=None, tag=None, package=None):
+ values = _initValues(environ, 'Recent Build RSS')
+ server = _getServer(environ)
+
+ tagObj = None
+ if tag != None:
+ if tag.isdigit():
+ tag = int(tag)
+ tagObj = server.getTag(tag)
+
+ userObj = None
+ if user != None:
+ if user.isdigit():
+ user = int(user)
+ userObj = server.getUser(user)
+
+ packageObj = None
+ if package:
+ if package.isdigit():
+ package = int(package)
+ packageObj = server.getPackage(package)
+
+ if tagObj != None:
+ builds = server.listTagged(tagObj['id'], inherit=True, package=(packageObj and packageObj['name'] or None),
+ owner=(userObj and userObj['name'] or None))
+ builds.sort(kojiweb.util.sortByKeyFunc('-completion_time', noneGreatest=True))
+ builds = builds[:20]
+ else:
+ kwargs = {}
+ if userObj:
+ kwargs['userID'] = userObj['id']
+ if packageObj:
+ kwargs['packageID'] = packageObj['id']
+ builds = server.listBuilds(queryOpts={'order': '-completion_time', 'limit': 20}, **kwargs)
+
+ server.multicall = True
+ for build in builds:
+ if build['task_id']:
+ server.getTaskInfo(build['task_id'], request=True)
+ else:
+ server.echo(None)
+ tasks = server.multiCall()
+
+ server.multicall = True
+ queryOpts = {'limit': 3}
+ for build in builds:
+ if build['state'] == koji.BUILD_STATES['COMPLETE']:
+ server.getChangelogEntries(build['build_id'], queryOpts=queryOpts)
+ else:
+ server.echo(None)
+ clogs = server.multiCall()
+
+ for i in range(len(builds)):
+ task = tasks[i][0]
+ if isinstance(task, list):
+ # this is the output of server.echo(None) above
+ task = None
+ builds[i]['task'] = task
+ builds[i]['changelog'] = clogs[i][0]
+
+ values['tag'] = tagObj
+ values['user'] = userObj
+ values['package'] = packageObj
+ values['builds'] = builds
+ values['weburl'] = _getBaseURL(environ)
+
+ environ['koji.headers'].append(['Content-Type', 'text/xml'])
+ return _genHTML(environ, 'recentbuilds.chtml')
+
+_infoURLs = {'package': 'packageinfo?packageID=%(id)i',
+ 'build': 'buildinfo?buildID=%(id)i',
+ 'tag': 'taginfo?tagID=%(id)i',
+ 'target': 'buildtargetinfo?targetID=%(id)i',
+ 'user': 'userinfo?userID=%(id)i',
+ 'host': 'hostinfo?hostID=%(id)i',
+ 'rpm': 'rpminfo?rpmID=%(id)i',
+ 'maven': 'archiveinfo?archiveID=%(id)i',
+ 'win': 'archiveinfo?archiveID=%(id)i'}
+
+_VALID_SEARCH_CHARS = r"""a-zA-Z0-9"""
+_VALID_SEARCH_SYMS = r""" @.,_/\()%+-*?|[]^$"""
+_VALID_SEARCH_RE = re.compile('^[' + _VALID_SEARCH_CHARS + re.escape(_VALID_SEARCH_SYMS) + ']+$')
+
+def search(environ, start=None, order='name'):
+ values = _initValues(environ, 'Search', 'search')
+ server = _getServer(environ)
+ values['error'] = None
+
+ form = environ['koji.form']
+ if form.has_key('terms') and form['terms']:
+ terms = form['terms'].value
+ terms = terms.strip()
+ type = form['type'].value
+ match = form['match'].value
+ values['terms'] = terms
+ values['type'] = type
+ values['match'] = match
+
+ if not _VALID_SEARCH_RE.match(terms):
+ values['error'] = 'Invalid search terms<br/>' + \
+ 'Search terms may contain only these characters: ' + \
+ _VALID_SEARCH_CHARS + _VALID_SEARCH_SYMS
+ return _genHTML(environ, 'search.chtml')
+
+ if match == 'regexp':
+ try:
+ re.compile(terms)
+ except:
+ values['error'] = 'Invalid regular expression'
+ return _genHTML(environ, 'search.chtml')
+
+ infoURL = _infoURLs.get(type)
+ if not infoURL:
+ raise koji.GenericError, 'unknown search type: %s' % type
+ values['infoURL'] = infoURL
+ values['order'] = order
+
+ results = kojiweb.util.paginateMethod(server, values, 'search', args=(terms, type, match),
+ start=start, dataName='results', prefix='result', order=order)
+ if not start and len(results) == 1:
+ # if we found exactly one result, skip the result list and redirect to the info page
+ # (you're feeling lucky)
+ _redirect(environ, infoURL % results[0])
+ else:
+ if type == 'maven':
+ typeLabel = 'Maven artifacts'
+ elif type == 'win':
+ typeLabel = 'Windows artifacts'
+ else:
+ typeLabel = '%ss' % type
+ values['typeLabel'] = typeLabel
+ return _genHTML(environ, 'searchresults.chtml')
+ else:
+ return _genHTML(environ, 'search.chtml')
+
+def watchlogs(environ, taskID):
+ values = _initValues(environ)
+ if isinstance(taskID, list):
+ values['tasks'] = ', '.join(taskID)
+ else:
+ values['tasks'] = taskID
+
+ html = """
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
+ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
+<html>
+ <head>
+ <script type="text/javascript" src="/koji-static/js/watchlogs.js"></script>
+ <title>Logs for task %(tasks)s | %(siteName)s</title>
+ </head>
+ <body onload="watchLogs('logs')">
+ <pre id="logs">
+<span>Loading logs for task %(tasks)s...</span>
+ </pre>
+ </body>
+</html>
+""" % values
+ return html
diff --git a/www/kojiweb/notificationedit.chtml b/www/kojiweb/notificationedit.chtml
new file mode 100644
index 0000000..12aaf3d
--- /dev/null
+++ b/www/kojiweb/notificationedit.chtml
@@ -0,0 +1,56 @@
+#from kojiweb import util
+
+#include "includes/header.chtml"
+
+ #if $notif
+ <h4>Edit notification</h4>
+ #else
+ <h4>Create notification</h4>
+ #end if
+
+ <form action="#if $notif then 'notificationedit' else 'notificationcreate'#">
+ $util.authToken($self, form=True)
+ #if $notif
+ <input type="hidden" name="notificationID" value="$notif.id"/>
+ #end if
+ <table>
+ <tr>
+ <th>Package</th>
+ <td>
+ <select name="package">
+ <option value="all"#if $notif and not $notif.package_id then ' selected="selected"' else ''#>all</option>
+ #for $package in $packages
+ <option value="$package.package_id"#if $notif and $notif.package_id == $package.package_id then ' selected="selected"' else ''#>$package.package_name</option>
+ #end for
+ </select>
+ </td>
+ </tr>
+ <tr>
+ <th>Tag</th>
+ <td>
+ <select name="tag">
+ <option value="all"#if $notif and not $notif.tag_id then ' selected="selected"' else ''#>all</option>
+ #for $tag in $tags
+ <option value="$tag.id"#if $notif and $notif.tag_id == $tag.id then ' selected="selected"' else ''#>$tag.name</option>
+ #end for
+ </select>
+ </td>
+ </tr>
+ <tr>
+ <th>Success Only?</th>
+ <td><input type="checkbox" name="success_only" value="yes"#if $notif and $notif.success_only then ' checked="checked"' else ''#/></td>
+ </tr>
+ <tr>
+ <td>
+ #if $notif
+ <button type="submit" name="save" value="Save">Save</button>
+ #else
+ <button type="submit" name="add" value="Add">Add</button>
+ #end if
+ </td>
+ <td><button type="submit" name="cancel" value="Cancel">Cancel</button></td>
+ </tr>
+ </table>
+ </form>
+
+#include "includes/footer.chtml"
diff --git a/www/kojiweb/packageinfo.chtml b/www/kojiweb/packageinfo.chtml
new file mode 100644
index 0000000..313bc88
--- /dev/null
+++ b/www/kojiweb/packageinfo.chtml
@@ -0,0 +1,113 @@
+#from kojiweb import util
+
+#include "includes/header.chtml"
+
+ <h4>Information for package <a href="packageinfo?packageID=$package.id">$package.name</a></h4>
+
+ <table>
+ <tr>
+ <th>Name</th><td>$package.name</td>
+ </tr>
+ <tr>
+ <th>ID</th><td>$package.id</td>
+ </tr>
+
+ <tr>
+ <th id="buildlist">Builds</th>
+ <td class="container">
+ #if $len($builds) > 0
+ <table class="nested data-list">
+ <tr>
+ <td class="paginate" colspan="4">
+ #if $len($buildPages) > 1
+ <form class="pageJump" action="">
+ Page:
+ <select onchange="javascript: window.location = 'packageinfo?buildStart=' + this.value * $buildRange + '$util.passthrough($self, 'packageID', 'buildOrder', 'tagOrder', 'tagStart')#buildlist';">
+ #for $pageNum in $buildPages
+ <option value="$pageNum"#if $pageNum == $buildCurrentPage then ' selected="selected"' else ''#>#echo $pageNum + 1#</option>
+ #end for
+ </select>
+ </form>
+ #end if
+ #if $buildStart > 0
+ <a href="packageinfo?buildStart=#echo $buildStart - $buildRange#$util.passthrough($self, 'packageID', 'buildOrder', 'tagOrder', 'tagStart')#buildlist"><<<</a>
+ #end if
+ <strong>#echo $buildStart + 1 # through #echo $buildStart + $buildCount # of $totalBuilds</strong>
+ #if $buildStart + $buildCount < $totalBuilds
+ <a href="packageinfo?buildStart=#echo $buildStart + $buildRange#$util.passthrough($self, 'packageID', 'buildOrder', 'tagOrder', 'tagStart')#buildlist">>>></a>
+ #end if
+ </td>
+ </tr>
+ <tr class="list-header">
+ <th><a href="packageinfo?buildOrder=$util.toggleOrder($self, 'nvr', 'buildOrder')$util.passthrough($self, 'packageID', 'tagOrder', 'tagStart')#buildlist">NVR</a> $util.sortImage($self, 'nvr', 'buildOrder')</th>
+ <th><a href="packageinfo?buildOrder=$util.toggleOrder($self, 'owner_name', 'buildOrder')$util.passthrough($self, 'packageID', 'tagOrder', 'tagStart')#buildlist">Built by</a> $util.sortImage($self, 'owner_name', 'buildOrder')</th>
+ <th><a href="packageinfo?buildOrder=$util.toggleOrder($self, 'completion_time', 'buildOrder')$util.passthrough($self, 'packageID', 'tagOrder', 'tagStart')#buildlist">Finished</a> $util.sortImage($self, 'completion_time', 'buildOrder')</th>
+ <th><a href="packageinfo?buildOrder=$util.toggleOrder($self, 'state', 'buildOrder')$util.passthrough($self, 'packageID', 'tagOrder', 'tagStart')#buildlist">State</a> $util.sortImage($self, 'state', 'buildOrder')</th>
+ </tr>
+ #for $build in $builds
+ <tr class="$util.rowToggle($self)">
+ <td><a href="buildinfo?buildID=$build.build_id">$build.nvr</a></td>
+ <td class="user-$build.owner_name"><a href="userinfo?userID=$build.owner_id">$build.owner_name</a></td>
+ <td>$util.formatTime($build.completion_time)</td>
+ #set $stateName = $util.stateName($build.state)
+ <td class="$stateName">$util.stateImage($build.state)</td>
+ </tr>
+ #end for
+ </table>
+ #else
+ No builds
+ #end if
+ </td>
+ </tr>
+
+ <tr>
+ <th id="taglist">Tags</th>
+ <td class="container">
+ #if $len($tags) > 0
+ <table class="nested data-list">
+ <tr>
+ <td class="paginate" colspan="4">
+ #if $len($tagPages) > 1
+ <form class="pageJump" action="">
+ Page:
+ <select onchange="javascript: window.location = 'packageinfo?tagStart=' + this.value * $tagRange + '$util.passthrough($self, 'packageID', 'tagOrder', 'buildOrder', 'buildStart')#taglist';">
+ #for $pageNum in $tagPages
+ <option value="$pageNum"#if $pageNum == $tagCurrentPage then ' selected="selected"' else ''#>#echo $pageNum + 1#</option>
+ #end for
+ </select>
+ </form>
+ #end if
+ #if $tagStart > 0
+ <a href="packageinfo?tagStart=#echo $tagStart - $tagRange#$util.passthrough($self, 'packageID', 'tagOrder', 'buildOrder', 'buildStart')#taglist"><<<</a>
+ #end if
+ <strong>#echo $tagStart + 1 # through #echo $tagStart + $tagCount # of $totalTags</strong>
+ #if $tagStart + $tagCount < $totalTags
+ <a href="packageinfo?tagStart=#echo $tagStart + $tagRange#$util.passthrough($self, 'packageID', 'tagOrder', 'buildOrder', 'buildStart')#taglist">>>></a>
+ #end if
+ </td>
+ </tr>
+ <tr class="list-header">
+ <th><a href="packageinfo?tagOrder=$util.toggleOrder($self, 'name', 'tagOrder')$util.passthrough($self, 'packageID', 'buildOrder', 'buildStart')#taglist">Name</a> $util.sortImage($self, 'name', 'tagOrder')</th>
+ <th><a href="packageinfo?tagOrder=$util.toggleOrder($self, 'owner_name', 'tagOrder')$util.passthrough($self, 'packageID', 'buildOrder', 'buildStart')#taglist">Owner</a> $util.sortImage($self, 'owner_name', 'tagOrder')</th>
+ <th><a href="packageinfo?tagOrder=$util.toggleOrder($self, 'blocked', 'tagOrder')$util.passthrough($self, 'packageID', 'buildOrder', 'buildStart')#taglist">Included?</a> $util.sortImage($self, 'blocked', 'tagOrder')</th>
+ <th><a href="packageinfo?tagOrder=$util.toggleOrder($self, 'extra_arches', 'tagOrder')$util.passthrough($self, 'packageID', 'buildOrder', 'buildStart')#taglist">Extra Arches</a> $util.sortImage($self, 'extra_arches', 'tagOrder')</th>
+ </tr>
+ #for $tag in $tags
+ <tr class="$util.rowToggle($self)">
+ <td><a href="taginfo?tagID=$tag.id">$tag.name</a></td>
+ <td><a href="userinfo?userID=$tag.owner_id">$tag.owner_name</a></td>
+ #set $included = $tag.blocked and 'no' or 'yes'
+ <td>$util.imageTag($included)</td>
+ <td>$tag.extra_arches</td>
+ </tr>
+ #end for
+ </table>
+ #else
+ No tags
+ #end if
+ </td>
+ </tr>
+
+ </table>
+
+#include "includes/footer.chtml"
diff --git a/www/kojiweb/packages.chtml b/www/kojiweb/packages.chtml
new file mode 100644
index 0000000..08cae84
--- /dev/null
+++ b/www/kojiweb/packages.chtml
@@ -0,0 +1,116 @@
+#from kojiweb import util
+
+#attr _PASSTHROUGH = ['userID', 'tagID', 'order', 'prefix', 'inherited']
+
+#include "includes/header.chtml"
+
+ <h4>Packages#if $prefix then ' starting with "%s"' % $prefix else ''##if $tag then ' in tag <a href="taginfo?tagID=%i">%s</a>' % ($tag.id, $tag.name) else ''##if $user then ' owned by <a href="userinfo?userID=%i">%s</a>' % ($user.id, $user.name) else ''#</h4>
+
+ <table class="data-list">
+ #if $tag
+ <tr>
+ <td colspan="5">
+ <table class="nested">
+ <tr><td>
+ <strong>Inherited</strong>:
+ </td><td>
+ <select name="inherited" class="filterlist" onchange="javascript: window.location = 'packages?inherited=' + this.value + '$util.passthrough_except($self, 'inherited')';">
+ <option value="1" #if $inherited then 'selected="selected"' else ''#>yes</option>
+ <option value="0" #if not $inherited then 'selected="selected"' else ''#>no</option>
+ </select>
+ </td></tr>
+ </table>
+ </tr>
+ #end if
+ <tr>
+ <td class="charlist" colspan="#if $tag or $user then '5' else '2'#">
+ #for $char in $chars
+ #if $prefix == $char
+ <strong>$char</strong>
+ #else
+ <a href="packages?prefix=$char$util.passthrough($self, 'userID', 'tagID', 'order', 'inherited')">$char</a>
+ #end if
+ |
+ #end for
+ #if $prefix
+ <a href="packages?${util.passthrough($self, 'userID', 'tagID', 'order', 'inherited')[1:]}">all</a>
+ #else
+ <strong>all</strong>
+ #end if
+ </td>
+ </tr>
+ <tr>
+ <td class="paginate" colspan="#if $tag or $user then '5' else '2'#">
+ #if $len($packagePages) > 1
+ <form class="pageJump" action="">
+ Page:
+ <select onchange="javascript: window.location = 'packages?start=' + this.value * $packageRange + '$util.passthrough($self, 'userID', 'tagID', 'order', 'prefix', 'inherited')';">
+ #for $pageNum in $packagePages
+ <option value="$pageNum"#if $pageNum == $packageCurrentPage then ' selected="selected"' else ''#>#echo $pageNum + 1#</option>
+ #end for
+ </select>
+ </form>
+ #end if
+ #if $packageStart > 0
+ <a href="packages?start=#echo $packageStart - $packageRange #$util.passthrough($self, 'userID', 'tagID', 'order', 'prefix', 'inherited')"><<<</a>
+ #end if
+ #if $totalPackages != 0
+ <strong>Packages #echo $packageStart + 1 # through #echo $packageStart + $packageCount # of $totalPackages</strong>
+ #end if
+ #if $packageStart + $packageCount < $totalPackages
+ <a href="packages?start=#echo $packageStart + $packageRange#$util.passthrough($self, 'userID', 'tagID', 'order', 'prefix', 'inherited')">>>></a>
+ #end if
+ </td>
+ </tr>
+ <tr class="list-header">
+ <th><a href="packages?order=$util.toggleOrder($self, 'package_id')$util.passthrough($self, 'userID', 'tagID', 'prefix', 'inherited')">ID</a> $util.sortImage($self, 'package_id')</th>
+ <th><a href="packages?order=$util.toggleOrder($self, 'package_name')$util.passthrough($self, 'userID', 'tagID', 'prefix', 'inherited')">Name</a> $util.sortImage($self, 'package_name')</th>
+ #if $tag or $user
+ <th><a href="packages?order=$util.toggleOrder($self, 'tag_name')$util.passthrough($self, 'userID', 'tagID', 'prefix', 'inherited')">Tag</a> $util.sortImage($self, 'tag_name')</th>
+ <th><a href="packages?order=$util.toggleOrder($self, 'owner_name')$util.passthrough($self, 'userID', 'tagID', 'prefix', 'inherited')">Owner</a> $util.sortImage($self, 'owner_name')</th>
+ <th><a href="packages?order=$util.toggleOrder($self, 'blocked')$util.passthrough($self, 'userID', 'tagID', 'prefix', 'inherited')">Included?</a> $util.sortImage($self, 'blocked')</th>
+ #end if
+ </tr>
+ #if $len($packages) > 0
+ #for $package in $packages
+ <tr class="$util.rowToggle($self)">
+ <td>$package.package_id</td>
+ <td><a href="packageinfo?packageID=$package.package_id">$package.package_name</a></td>
+ #if $tag or $user
+ <td><a href="taginfo?tagID=$package.tag_id">$package.tag_name</a></td>
+ <td class="user-$package.owner_name"><a href="userinfo?userID=$package.owner_id">$package.owner_name</a></td>
+ <td class="$str(not $package.blocked).lower()">#if $package.blocked then $util.imageTag('no') else $util.imageTag('yes')#</td>
+ #end if
+ </tr>
+ #end for
+ #else
+ <tr class="row-odd">
+ <td colspan="#if $tag or $user then '5' else '2'#">No packages</td>
+ </tr>
+ #end if
+ <tr>
+ <td class="paginate" colspan="#if $tag or $user then '5' else '2'#">
+ #if $len($packagePages) > 1
+ <form class="pageJump" action="">
+ Page:
+ <select onchange="javascript: window.location = 'packages?start=' + this.value * $packageRange + '$util.passthrough($self, 'userID', 'tagID', 'order', 'prefix', 'inherited')';">
+ #for $pageNum in $packagePages
+ <option value="$pageNum"#if $pageNum == $packageCurrentPage then ' selected="selected"' else ''#>#echo $pageNum + 1#</option>
+ #end for
+ </select>
+ </form>
+ #end if
+ #if $packageStart > 0
+ <a href="packages?start=#echo $packageStart - $packageRange #$util.passthrough($self, 'userID', 'tagID', 'order', 'prefix', 'inherited')"><<<</a>
+ #end if
+ #if $totalPackages != 0
+ <strong>Packages #echo $packageStart + 1 # through #echo $packageStart + $packageCount # of $totalPackages</strong>
+ #end if
+ #if $packageStart + $packageCount < $totalPackages
+ <a href="packages?start=#echo $packageStart + $packageRange#$util.passthrough($self, 'userID', 'tagID', 'order', 'prefix', 'inherited')">>>></a>
+ #end if
+ </td>
+ </tr>
+ </table>
+
+#include "includes/footer.chtml"
diff --git a/www/kojiweb/packagesbyuser.chtml b/www/kojiweb/packagesbyuser.chtml
new file mode 100644
index 0000000..c4e5713
--- /dev/null
+++ b/www/kojiweb/packagesbyuser.chtml
@@ -0,0 +1,73 @@
+#from kojiweb import util
+
+#include "includes/header.chtml"
+
+ <h4>Packages by User</h4>
+ <table class="data-list">
+ <tr>
+ <td class="paginate" colspan="3">
+ #if $len($userPages) > 1
+ <form class="pageJump" action="">
+ Page:
+ <select onchange="javascript: window.location = 'packagesbyuser?start=' + this.value * $userRange + '$util.passthrough($self, 'order')';">
+ #for $pageNum in $userPages
+ <option value="$pageNum"#if $pageNum == $userCurrentPage then ' selected="selected"' else ''#>#echo $pageNum + 1#</option>
+ #end for
+ </select>
+ </form>
+ #end if
+ #if $userStart > 0
+ <a href="packagesbyuser?start=#echo $userStart - $userRange #$util.passthrough($self, 'order')"><<<</a>
+ #end if
+ #if $totalUsers != 0
+ <strong>Users #echo $userStart + 1 # through #echo $userStart + $userCount # of $totalUsers</strong>
+ #end if
+ #if $userStart + $userCount < $totalUsers
+ <a href="packagesbyuser?start=#echo $userStart + $userRange#$util.passthrough($self, 'order')">>>></a>
+ #end if
+ </td>
+ </tr>
+ <tr class="list-header">
+ <th><a href="packagesbyuser?order=$util.toggleOrder($self, 'name')">Name</a> $util.sortImage($self, 'name')</th>
+ <th><a href="packagesbyuser?order=$util.toggleOrder($self, 'packages')">Packages</a> $util.sortImage($self, 'packages')</th>
+ <th> </th>
+ </tr>
+ #if $len($users) > 0
+ #for $user in $users
+ <tr class="$util.rowToggle($self)">
+ <td><a href="userinfo?userID=$user.id">$user.name</a></td>
+ <td width="#echo $graphWidth + 5#"><img src="$util.themePath('images/1px.gif')" width="#echo $increment * $user.packages#" height="15" class="graphrow" alt="graph row"/></td>
+ <td>$user.packages</td>
+ </tr>
+ #end for
+ #else
+ <tr class="row-odd">
+ <td colspan="3">No users</td>
+ </tr>
+ #end if
+ <tr>
+ <td class="paginate" colspan="3">
+ #if $len($userPages) > 1
+ <form class="pageJump" action="">
+ Page:
+ <select onchange="javascript: window.location = 'packagesbyuser?start=' + this.value * $userRange + '$util.passthrough($self, 'order')';">
+ #for $pageNum in $userPages
+ <option value="$pageNum"#if $pageNum == $userCurrentPage then ' selected="selected"' else ''#>#echo $pageNum + 1#</option>
+ #end for
+ </select>
+ </form>
+ #end if
+ #if $userStart > 0
+ <a href="packagesbyuser?start=#echo $userStart - $userRange #$util.passthrough($self, 'order')"><<<</a>
+ #end if
+ #if $totalUsers != 0
+ <strong>Users #echo $userStart + 1 # through #echo $userStart + $userCount # of $totalUsers</strong>
+ #end if
+ #if $userStart + $userCount < $totalUsers
+ <a href="packagesbyuser?start=#echo $userStart + $userRange#$util.passthrough($self, 'order')">>>></a>
+ #end if
+ </td>
+ </tr>
+ </table>
+
+#include "includes/footer.chtml"
diff --git a/www/kojiweb/recentbuilds.chtml b/www/kojiweb/recentbuilds.chtml
new file mode 100644
index 0000000..f1688e7
--- /dev/null
+++ b/www/kojiweb/recentbuilds.chtml
@@ -0,0 +1,54 @@
+#import koji
+#import koji.util
+#from kojiweb import util
+
+#def linkURL()
+ #set $query = []
+ #if $tag
+ #silent $query.append('tagID=%i' % $tag.id)
+ #end if
+ #if $user
+ #silent $query.append('userID=%i' % $user.id)
+ #end if
+ #if $package
+ #silent $query.append('packageID=%i' % $package.id)
+ #end if
+ #if $query
+ #echo '%s/%s?%s' % ($weburl, 'builds', '&'.join($query))
+ #else
+ #echo '%s/%s' % ($weburl, 'builds')
+ #end if
+#end def
+
+<rss version="2.0">
+ <channel>
+ <title>$siteName: recent builds#if $package then ' of package ' + $package.name else ''##if $tag then ' into tag ' + $tag.name else ''##if $user then ' by user ' + $user.name else ''#</title>
+ <link>$linkURL()</link>
+ <description>
+ A list of the most recent builds
+ #if $package
+ of package $package.name
+ #end if
+ #if $tag
+ into tag $tag.name
+ #end if
+ #if $user
+ by user $user.name
+ #end if
+ in the $siteName Build System. The list is sorted in reverse chronological order by build completion time.
+ </description>
+ <pubDate>$util.formatTimeRSS($currentDate)</pubDate>
+ #for $build in $builds
+ <item>
+ <title>$koji.BUILD_STATES[$build.state].lower(): $koji.buildLabel($build)#if $build.task then ', target: ' + $build.task.request[1] else ''#</title>
+ <link>$weburl/buildinfo?buildID=$build.build_id</link>
+ #if $build.completion_time
+ <pubDate>$util.formatTimeRSS($build.completion_time)</pubDate>
+ #end if
+ #if $build.state == $koji.BUILD_STATES['COMPLETE'] and $build.changelog
+ <description><pre>$util.escapeHTML($koji.util.formatChangelog($build.changelog))</pre></description>
+ #end if
+ </item>
+ #end for
+ </channel>
+</rss>
diff --git a/www/kojiweb/reports.chtml b/www/kojiweb/reports.chtml
new file mode 100644
index 0000000..67184f9
--- /dev/null
+++ b/www/kojiweb/reports.chtml
@@ -0,0 +1,17 @@
+#from kojiweb import util
+
+#include "includes/header.chtml"
+
+ <h4>Reports</h4>
+
+ <ul>
+ <li><a href="packagesbyuser">Number of packages owned by each user</a></li>
+ <li><a href="buildsbyuser">Number of builds submitted by each user</a></li>
+ <li><a href="rpmsbyhost">RPMs built by each host</a></li>
+ <li><a href="tasksbyuser">Tasks submitted by each user</a></li>
+ <li><a href="tasksbyhost">Tasks completed by each host</a></li>
+ <li><a href="buildsbystatus">Succeeded/failed/canceled builds</a></li>
+ <li><a href="buildsbytarget">Number of builds in each target</a></li>
+ </ul>
+
+#include "includes/footer.chtml"
diff --git a/www/kojiweb/rpminfo.chtml b/www/kojiweb/rpminfo.chtml
new file mode 100644
index 0000000..83e4e4d
--- /dev/null
+++ b/www/kojiweb/rpminfo.chtml
@@ -0,0 +1,224 @@
+#import koji
+#from kojiweb import util
+#import time
+#import urllib
+
+#attr _PASSTHROUGH = ['rpmID', 'fileOrder', 'fileStart', 'buildrootOrder', 'buildrootStart']
+
+#include "includes/header.chtml"
+ #set $epoch = ($rpm.epoch != None and $str($rpm.epoch) + ':' or '')
+ <h4>Information for RPM <a href="rpminfo?rpmID=$rpm.id">$rpm.name-$epoch$rpm.version-$rpm.release.${rpm.arch}.rpm</a></h4>
+
+ <table>
+ <tr>
+ <th>ID</th><td>$rpm.id</td>
+ </tr>
+ <tr>
+ #if $build
+ <th>Name</th><td><a href="packageinfo?packageID=$build.package_id">$rpm.name</a></td>
+ #else
+ <th>Name</th><td>$rpm.name</td>
+ #end if
+ </tr>
+ <tr>
+ #if $build
+ <th>Version</th><td><a href="buildinfo?buildID=$build.id">$rpm.version</a></td>
+ #else
+ <th>Version</th><td>$rpm.version</td>
+ #end if
+ </tr>
+ <tr>
+ <th>Release</th><td>$rpm.release</td>
+ </tr>
+ <tr>
+ <th>Epoch</th><td>$rpm.epoch</td>
+ </tr>
+ <tr>
+ <th>Arch</th><td>$rpm.arch</td>
+ </tr>
+ #if $rpm.external_repo_id == 0
+ <tr>
+ <th>Summary</th><td class="rpmheader">$util.escapeHTML($summary)</td>
+ </tr>
+ <tr>
+ <th>Description</th><td class="rpmheader">$util.escapeHTML($description)</td>
+ </tr>
+ #end if
+ <tr>
+ <th>Build Time</th><td>$time.strftime('%Y-%m-%d %H:%M:%S', $time.gmtime($rpm.buildtime)) GMT</td>
+ </tr>
+ #if $build and $build.state == $koji.BUILD_STATES.DELETED
+ <tr>
+ <th>State</th><td class="deleted">deleted</td>
+ </tr>
+ #end if
+ #if $rpm.external_repo_id
+ <tr>
+ <th>External Repository</th><td><a href="externalrepoinfo?extrepoID=$rpm.external_repo_id">$rpm.external_repo_name</a></td>
+ </tr>
+ #end if
+ <tr>
+ <th>Size</th><td>$rpm.size</td>
+ </tr>
+ <tr>
+ <th>Payload Hash</th><td>$rpm.payloadhash</td>
+ </tr>
+ #if $builtInRoot
+ <tr>
+ <th>Buildroot</th><td><a href="buildrootinfo?buildrootID=$builtInRoot.id">$builtInRoot.tag_name-$builtInRoot.id-$builtInRoot.repo_id</a></td>
+ </tr>
+ #end if
+ #if $rpm.external_repo_id == 0
+ <tr>
+ <th>Provides</th>
+ <td class="container">
+ #if $len($provides) > 0
+ <table class="nested">
+ #for $dep in $provides
+ <tr>
+ <td>$util.escapeHTML($util.formatDep($dep.name, $dep.version, $dep.flags))</td>
+ </tr>
+ #end for
+ </table>
+ #else
+ No Provides
+ #end if
+ </td>
+ </tr>
+ <tr>
+ <th>Requires</th>
+ <td class="container">
+ #if $len($requires) > 0
+ <table class="nested">
+ #for $dep in $requires
+ <tr>
+ <td>$util.escapeHTML($util.formatDep($dep.name, $dep.version, $dep.flags))</td>
+ </tr>
+ #end for
+ </table>
+ #else
+ No Requires
+ #end if
+ </td>
+ </tr>
+ <tr>
+ <th>Obsoletes</th>
+ <td class="container">
+ #if $len($obsoletes) > 0
+ <table class="nested">
+ #for $dep in $obsoletes
+ <tr>
+ <td>$util.escapeHTML($util.formatDep($dep.name, $dep.version, $dep.flags))</td>
+ </tr>
+ #end for
+ </table>
+ #else
+ No Obsoletes
+ #end if
+ </td>
+ </tr>
+ <tr>
+ <th>Conflicts</th>
+ <td class="container">
+ #if $len($conflicts) > 0
+ <table class="nested">
+ #for $dep in $conflicts
+ <tr>
+ <td>$util.escapeHTML($util.formatDep($dep.name, $dep.version, $dep.flags))</td>
+ </tr>
+ #end for
+ </table>
+ #else
+ No Conflicts
+ #end if
+ </td>
+ </tr>
+ <tr>
+ <th id="filelist">Files</th>
+ <td class="container">
+ #if $len($files) > 0
+ <table class="nested data-list">
+ <tr>
+ <td class="paginate" colspan="2">
+ #if $len($filePages) > 1
+ <form class="pageJump" action="">
+ Page:
+ <select onchange="javascript: window.location = 'rpminfo?fileStart=' + this.value * $fileRange + '$util.passthrough_except($self, 'fileStart')#filelist';">
+ #for $pageNum in $filePages
+ <option value="$pageNum"#if $pageNum == $fileCurrentPage then ' selected="selected"' else ''#>#echo $pageNum + 1#</option>
+ #end for
+ </select>
+ </form>
+ #end if
+ #if $fileStart > 0
+ <a href="rpminfo?fileStart=#echo $fileStart - $fileRange #$util.passthrough_except($self, 'fileStart')#filelist"><<<</a>
+ #end if
+ <strong>#echo $fileStart + 1 # through #echo $fileStart + $fileCount # of $totalFiles</strong>
+ #if $fileStart + $fileCount < $totalFiles
+ <a href="rpminfo?fileStart=#echo $fileStart + $fileRange#$util.passthrough_except($self, 'fileStart')#filelist">>>></a>
+ #end if
+ </td>
+ </tr>
+ <tr class="list-header">
+ <th><a href="rpminfo?fileOrder=$util.toggleOrder($self, 'name', 'fileOrder')$util.passthrough_except($self, 'fileOrder', 'fileStart')#filelist">Name</a> $util.sortImage($self, 'name', 'fileOrder')</th>
+ <th><a href="rpminfo?fileOrder=$util.toggleOrder($self, 'size', 'fileOrder')$util.passthrough_except($self, 'fileOrder', 'fileStart')#filelist">Size</a> $util.sortImage($self, 'size', 'fileOrder')</th>
+ </tr>
+ #for $file in $files
+ <tr class="$util.rowToggle($self)">
+ <td><a href="fileinfo?rpmID=$rpm.id&filename=$urllib.quote($file.name)">$util.escapeHTML($file.name)</a></td><td>$file.size</td>
+ </tr>
+ #end for
+ </table>
+ #else
+ No Files
+ #end if
+ </td>
+ </tr>
+ #end if
+ <tr>
+ <th id="buildrootlist">Component of</th>
+ <td class="container">
+ #if $len($buildroots) > 0
+ <table class="nested data-list">
+ <tr>
+ <td class="paginate" colspan="3">
+ #if $len($buildrootPages) > 1
+ <form class="pageJump" action="">
+ Page:
+ <select onchange="javascript: window.location = 'rpminfo?buildrootStart=' + this.value * $buildrootRange + '$util.passthrough_except($self, 'buildrootStart')#buildrootlist';">
+ #for $pageNum in $buildrootPages
+ <option value="$pageNum"#if $pageNum == $buildrootCurrentPage then ' selected="selected"' else ''#>#echo $pageNum + 1#</option>
+ #end for
+ </select>
+ </form>
+ #end if
+ #if $buildrootStart > 0
+ <a href="rpminfo?buildrootStart=#echo $buildrootStart - $buildrootRange #$util.passthrough_except($self, 'buildrootStart')#buildrootlist"><<<</a>
+ #end if
+ <strong>#echo $buildrootStart + 1 # through #echo $buildrootStart + $buildrootCount # of $totalBuildroots</strong>
+ #if $buildrootStart + $buildrootCount < $totalBuildroots
+ <a href="rpminfo?buildrootStart=#echo $buildrootStart + $buildrootRange#$util.passthrough_except($self, 'buildrootStart')#buildrootlist">>>></a>
+ #end if
+ </td>
+ </tr>
+ <tr class="list-header">
+ <th><a href="rpminfo?buildrootOrder=$util.toggleOrder($self, 'id', 'buildrootOrder')$util.passthrough_except($self, 'buildrootOrder', 'buildrootStart')#buildrootlist">Buildroot</a> $util.sortImage($self, 'id', 'buildrootOrder')</th>
+ <th><a href="rpminfo?buildrootOrder=$util.toggleOrder($self, 'create_event_time', 'buildrootOrder')$util.passthrough_except($self, 'buildrootOrder', 'buildrootStart')#buildrootlist">Created</a> $util.sortImage($self, 'create_event_time', 'buildrootOrder')</th>
+ <th><a href="rpminfo?buildrootOrder=$util.toggleOrder($self, 'state', 'buildrootOrder')$util.passthrough_except($self, 'buildrootOrder', 'buildrootStart')#buildrootlist">State</a> $util.sortImage($self, 'state', 'buildrootOrder')</th>
+ </tr>
+ #for $buildroot in $buildroots
+ <tr class="$util.rowToggle($self)">
+ <td><a href="buildrootinfo?buildrootID=$buildroot.id">$buildroot.tag_name-$buildroot.id-$buildroot.repo_id</a></td>
+ <td>$util.formatTime($buildroot.create_event_time)</td>
+ <td>$util.imageTag($util.brStateName($buildroot.state))</td>
+ </tr>
+ #end for
+ </table>
+ #else
+ No Buildroots
+ #end if
+ </td>
+ </tr>
+ </table>
+
+#include "includes/footer.chtml"
diff --git a/www/kojiweb/rpmlist.chtml b/www/kojiweb/rpmlist.chtml
new file mode 100644
index 0000000..71f4b32
--- /dev/null
+++ b/www/kojiweb/rpmlist.chtml
@@ -0,0 +1,112 @@
+#from kojiweb import util
+
+#include "includes/header.chtml"
+
+#def getID()
+ #if $type == 'image'
+imageID=$image.id #slurp
+ #else
+buildrootID=$buildroot.id #slurp
+ #end if
+#end def
+
+#def getColspan()
+ #if $type == 'component'
+colspan="3" #slurp
+ #elif $type == 'image'
+colspan="2" #slurp
+ #else
+ #pass
+ #end if
+#end def
+
+ #if $type == 'component'
+ <h4>Component RPMs of buildroot <a href="buildrootinfo?buildrootID=$buildroot.id">$buildroot.tag_name-$buildroot.id-$buildroot.repo_id</a></h4>
+ #elif $type == 'image'
+ <h4>RPMs installed in <a href="archiveinfo?archiveID=$image.id">$image.filename</a></h4>
+ #else
+ <h4>RPMs built in buildroot <a href="buildrootinfo?buildrootID=$buildroot.id">$buildroot.tag_name-$buildroot.id-$buildroot.repo_id</a></h4>
+ #end if
+
+ <table class="data-list">
+ <tr>
+ <td class="paginate" $getColspan()>
+ #if $len($rpmPages) > 1
+ <form class="pageJump" action="">
+ Page:
+ <select onchange="javascript: window.location = 'rpmlist?$getID()&start=' + this.value * $rpmRange + '$util.passthrough($self, 'order', 'type')';">
+ #for $pageNum in $rpmPages
+ <option value="$pageNum"#if $pageNum == $rpmCurrentPage then ' selected="selected"' else ''#>#echo $pageNum + 1#</option>
+ #end for
+ </select>
+ </form>
+ #end if
+ #if $rpmStart > 0
+ <a href="rpmlist?$getID()&start=#echo $rpmStart - $rpmRange #$util.passthrough($self, 'order', 'type')"><<<</a>
+ #end if
+ #if $totalRpms != 0
+ <strong>RPMs #echo $rpmStart + 1 # through #echo $rpmStart + $rpmCount # of $totalRpms</strong>
+ #end if
+ #if $rpmStart + $rpmCount < $totalRpms
+ <a href="rpmlist?$getID()&start=#echo $rpmStart + $rpmRange#$util.passthrough($self, 'order', 'type')">>>></a>
+ #end if
+ </td>
+ </tr>
+ <tr class="list-header">
+ <th><a href="rpmlist?$getID()&order=$util.toggleOrder($self, 'nvr')$util.passthrough($self, 'type')">NVR</a> $util.sortImage($self, 'nvr')</th>
+ #if $type in ['component', 'image']
+ <th><a href="rpmlist?$getID()&order=$util.toggleOrder($self, 'external_repo_name')$util.passthrough($self, 'type')">Origin</a> $util.sortImage($self, 'external_repo_name')</th>
+ #end if
+ #if $type == 'component'
+ <th><a href="rpmlist?$getID()&order=$util.toggleOrder($self, 'is_update')$util.passthrough($self, 'type')">Update?</a> $util.sortImage($self, 'is_update')</th>
+ #end if
+ </tr>
+ #if $len($rpms) > 0
+ #for $rpm in $rpms
+ <tr class="$util.rowToggle($self)">
+ #set $epoch = ($rpm.epoch != None and $str($rpm.epoch) + ':' or '')
+ <td><a href="rpminfo?rpmID=$rpm.id">$rpm.name-$epoch$rpm.version-$rpm.release.${rpm.arch}.rpm</a></td>
+ #if $type in ['component', 'image']
+ #if $rpm.external_repo_id == 0
+ <td>internal</td>
+ #else
+ <td><a href="externalrepoinfo?extrepoID=$rpm.external_repo_id">$rpm.external_repo_name</a></td>
+ #end if
+ #end if
+ #if $type == 'component'
+ #set $update = $rpm.is_update and 'yes' or 'no'
+ <td class="$update">$util.imageTag($update)</td>
+ #end if
+ </tr>
+ #end for
+ #else
+ <tr class="row-odd">
+ <td $getColspan()>No RPMs</td>
+ </tr>
+ #end if
+ <tr>
+ <td class="paginate" $getColspan()>
+ #if $len($rpmPages) > 1
+ <form class="pageJump" action="">
+ Page:
+ <select onchange="javascript: window.location = 'rpmlist?$getID()&start=' + this.value * $rpmRange + '$util.passthrough($self, 'order', 'type')';">
+ #for $pageNum in $rpmPages
+ <option value="$pageNum"#if $pageNum == $rpmCurrentPage then ' selected="selected"' else ''#>#echo $pageNum + 1#</option>
+ #end for
+ </select>
+ </form>
+ #end if
+ #if $rpmStart > 0
+ <a href="rpmlist?$getID()&start=#echo $rpmStart - $rpmRange #$util.passthrough($self, 'order', 'type')"><<<</a>
+ #end if
+ #if $totalRpms != 0
+ <strong>RPMs #echo $rpmStart + 1 # through #echo $rpmStart + $rpmCount # of $totalRpms</strong>
+ #end if
+ #if $rpmStart + $rpmCount < $totalRpms
+ <a href="rpmlist?$getID()&start=#echo $rpmStart + $rpmRange#$util.passthrough($self, 'order', 'type')">>>></a>
+ #end if
+ </td>
+ </tr>
+ </table>
+
+#include "includes/footer.chtml"
diff --git a/www/kojiweb/rpmsbyhost.chtml b/www/kojiweb/rpmsbyhost.chtml
new file mode 100644
index 0000000..2c63368
--- /dev/null
+++ b/www/kojiweb/rpmsbyhost.chtml
@@ -0,0 +1,105 @@
+#from kojiweb import util
+
+#include "includes/header.chtml"
+
+ <h4>#if $rpmArch then $rpmArch + ' ' else ''#RPMs by Host#if $hostArch then ' (%s)' % $hostArch else ''#</h4>
+ <table class="data-list">
+ <tr>
+ <td class="archlist" colspan="3">
+ <strong>Host arch:</strong> #for $arch in $hostArchList
+ #if $arch == $hostArch
+ <strong>$arch</strong> |
+ #else
+ <a href="rpmsbyhost?hostArch=$arch$util.passthrough($self, 'order', 'rpmArch')">$arch</a> |
+ #end if
+ #end for
+ #if $hostArch
+ <a href="rpmsbyhost?${util.passthrough($self, 'order', 'rpmArch')[1:]}">all</a>
+ #else
+ <strong>all</strong>
+ #end if
+ </td>
+ </tr>
+ <tr>
+ <td class="archlist" colspan="3">
+ <strong>RPM arch:</strong> #for $arch in $rpmArchList
+ #if $arch == $rpmArch
+ <strong>$arch</strong> |
+ #else
+ <a href="rpmsbyhost?rpmArch=$arch$util.passthrough($self, 'order', 'hostArch')">$arch</a> |
+ #end if
+ #end for
+ #if $rpmArch
+ <a href="rpmsbyhost?${util.passthrough($self, 'order', 'hostArch')[1:]}">all</a>
+ #else
+ <strong>all</strong>
+ #end if
+ </td>
+ </tr>
+ <tr>
+ <td class="paginate" colspan="3">
+ #if $len($hostPages) > 1
+ <form class="pageJump" action="">
+ Page:
+ <select onchange="javascript: window.location = 'rpmsbyhost?start=' + this.value * $hostRange + '$util.passthrough($self, 'order', 'hostArch', 'rpmArch')';">
+ #for $pageNum in $hostPages
+ <option value="$pageNum"#if $pageNum == $hostCurrentPage then ' selected="selected"' else ''#>#echo $pageNum + 1#</option>
+ #end for
+ </select>
+ </form>
+ #end if
+ #if $hostStart > 0
+ <a href="rpmsbyhost?start=#echo $hostStart - $hostRange #$util.passthrough($self, 'order', 'hostArch', 'rpmArch')"><<<</a>
+ #end if
+ #if $totalHosts != 0
+ <strong>Hosts #echo $hostStart + 1 # through #echo $hostStart + $hostCount # of $totalHosts</strong>
+ #end if
+ #if $hostStart + $hostCount < $totalHosts
+ <a href="rpmsbyhost?start=#echo $hostStart + $hostRange#$util.passthrough($self, 'order', 'hostArch', 'rpmArch')">>>></a>
+ #end if
+ </td>
+ </tr>
+ <tr class="list-header">
+ <th><a href="rpmsbyhost?order=$util.toggleOrder($self, 'name')$util.passthrough($self, 'hostArch', 'rpmArch')">Name</a> $util.sortImage($self, 'name')</th>
+ <th><a href="rpmsbyhost?order=$util.toggleOrder($self, 'rpms')$util.passthrough($self, 'hostArch', 'rpmArch')">RPMs</a> $util.sortImage($self, 'rpms')</th>
+ <th> </th>
+ </tr>
+ #if $len($hosts) > 0
+ #for $host in $hosts
+ <tr class="$util.rowToggle($self)">
+ <td><a href="hostinfo?hostID=$host.id">$host.name</a></td>
+ <td width="#echo $graphWidth + 5#"><img src="$util.themePath('images/1px.gif')" width="#echo $increment * $host.rpms#" height="15" class="graphrow" alt="graph row"/></td>
+ <td>$host.rpms</td>
+ </tr>
+ #end for
+ #else
+ <tr class="row-odd">
+ <td colspan="3">No hosts</td>
+ </tr>
+ #end if
+ <tr>
+ <td class="paginate" colspan="3">
+ #if $len($hostPages) > 1
+ <form class="pageJump" action="">
+ Page:
+ <select onchange="javascript: window.location = 'rpmsbyhost?start=' + this.value * $hostRange + '$util.passthrough($self, 'order', 'hostArch', 'rpmArch')';">
+ #for $pageNum in $hostPages
+ <option value="$pageNum"#if $pageNum == $hostCurrentPage then ' selected="selected"' else ''#>#echo $pageNum + 1#</option>
+ #end for
+ </select>
+ </form>
+ #end if
+ #if $hostStart > 0
+ <a href="rpmsbyhost?start=#echo $hostStart - $hostRange #$util.passthrough($self, 'order', 'hostArch', 'rpmArch')"><<<</a>
+ #end if
+ #if $totalHosts != 0
+ <strong>Hosts #echo $hostStart + 1 # through #echo $hostStart + $hostCount # of $totalHosts</strong>
+ #end if
+ #if $hostStart + $hostCount < $totalHosts
+ <a href="rpmsbyhost?start=#echo $hostStart + $hostRange#$util.passthrough($self, 'order', 'hostArch', 'rpmArch')">>>></a>
+ #end if
+ </td>
+ </tr>
+ </table>
+
+#include "includes/footer.chtml"
diff --git a/www/kojiweb/search.chtml b/www/kojiweb/search.chtml
new file mode 100644
index 0000000..8749dad
--- /dev/null
+++ b/www/kojiweb/search.chtml
@@ -0,0 +1,48 @@
+#from kojiweb import util
+
+#include "includes/header.chtml"
+
+ <h4>Search</h4>
+
+ <form action="search">
+ <table>
+ <tr>
+ #if $error
+ <tr><td colspan="3" class="error">$error</td></tr>
+ #end if
+ <th>Search</th>
+ <td><input type="text" name="terms"/></td>
+ <td>
+ <select name="type">
+ <option value="package">Packages</option>
+ <option value="build">Builds</option>
+ <option value="tag">Tags</option>
+ <option value="target">Build Targets</option>
+ <option value="user">Users</option>
+ <option value="host">Hosts</option>
+ <option value="rpm">RPMs</option>
+ #if $mavenEnabled
+ <option value="maven">Maven Artifacts</option>
+ #end if
+ #if $winEnabled
+ <option value="win">Windows Artifacts</option>
+ #end if
+ </select>
+ </td>
+ </tr>
+ <tr>
+ <th> </th>
+ <td colspan="2">
+ <input type="radio" name="match" value="glob" id="radioglob" checked="checked"/><abbr title="? will match any single character, * will match any sequence of zero or more characters" id="abbrglob">glob</abbr>
+ <input type="radio" name="match" value="regexp" id="radioregexp"/><abbr title="full POSIX regular expressions" id="abbrregexp">regexp</abbr>
+ <input type="radio" name="match" value="exact" id="radioexact"/><abbr title="exact matches only" id="abbrexact">exact</abbr>
+ </td>
+ </tr>
+ <tr>
+ <th> </th>
+ <td colspan="2"><input type="submit" value="Search"/></td>
+ </tr>
+ </table>
+ </form>
+
+#include "includes/footer.chtml"
diff --git a/www/kojiweb/searchresults.chtml b/www/kojiweb/searchresults.chtml
new file mode 100644
index 0000000..e0c4ece
--- /dev/null
+++ b/www/kojiweb/searchresults.chtml
@@ -0,0 +1,75 @@
+#from kojiweb import util
+#import urllib
+
+#include "includes/header.chtml"
+
+ <h4>Search Results for $typeLabel matching "$terms"</h4>
+
+ <table class="data-list">
+ <tr>
+ <td class="paginate" colspan="2">
+ #if $len($resultPages) > 1
+ <form class="pageJump" action="">
+ Page:
+ <select onchange="javascript: window.location = 'search?start=' + this.value * $resultRange + '$util.passthrough($self, 'order', 'terms', 'type', 'match')';">
+ #for $pageNum in $resultPages
+ <option value="$pageNum"#if $pageNum == $resultCurrentPage then ' selected="selected"' else ''#>#echo $pageNum + 1#</option>
+ #end for
+ </select>
+ </form>
+ #end if
+ #if $resultStart > 0
+ <a href="search?start=#echo $resultStart - $resultRange #$util.passthrough($self, 'order', 'terms', 'type', 'match')"><<<</a>
+ #end if
+ #if $totalResults != 0
+ <strong>Results #echo $resultStart + 1 # through #echo $resultStart + $resultCount # of $totalResults</strong>
+ #end if
+ #if $resultStart + $resultCount < $totalResults
+ <a href="search?start=#echo $resultStart + $resultRange#$util.passthrough($self, 'order', 'terms', 'type', 'match')">>>></a>
+ #end if
+ </td>
+ </tr>
+ <tr class="list-header">
+ <th><a href="search?order=$util.toggleOrder($self, 'id')$util.passthrough($self, 'terms', 'type', 'match')">ID</a> $util.sortImage($self, 'id')</th>
+ <th><a href="search?order=$util.toggleOrder($self, 'name')$util.passthrough($self, 'terms', 'type', 'match')">Name</a> $util.sortImage($self, 'name')</th>
+ </tr>
+ #if $len($results) > 0
+ #for $result in $results
+ <tr class="$util.rowToggle($self)">
+ <td>$result.id</td>
+ #set $quoted = $result.copy()
+ #silent $quoted['name'] = $urllib.quote($quoted['name'])
+ <td><a href="${infoURL % $quoted}">$result.name</a></td>
+ </tr>
+ #end for
+ #else
+ <tr class="row-odd">
+ <td colspan="2">No search results</td>
+ </tr>
+ #end if
+ <tr>
+ <td class="paginate" colspan="2">
+ #if $len($resultPages) > 1
+ <form class="pageJump" action="">
+ Page:
+ <select onchange="javascript: window.location = 'search?start=' + this.value * $resultRange + '$util.passthrough($self, 'order', 'terms', 'type', 'match')';">
+ #for $pageNum in $resultPages
+ <option value="$pageNum"#if $pageNum == $resultCurrentPage then ' selected="selected"' else ''#>#echo $pageNum + 1#</option>
+ #end for
+ </select>
+ </form>
+ #end if
+ #if $resultStart > 0
+ <a href="search?start=#echo $resultStart - $resultRange #$util.passthrough($self, 'order', 'terms', 'type', 'match')"><<<</a>
+ #end if
+ #if $totalResults != 0
+ <strong>Results #echo $resultStart + 1 # through #echo $resultStart + $resultCount # of $totalResults</strong>
+ #end if
+ #if $resultStart + $resultCount < $totalResults
+ <a href="search?start=#echo $resultStart + $resultRange#$util.passthrough($self, 'order', 'terms', 'type', 'match')">>>></a>
+ #end if
+ </td>
+ </tr>
+ </table>
+
+#include "includes/footer.chtml"
diff --git a/www/kojiweb/tagedit.chtml b/www/kojiweb/tagedit.chtml
new file mode 100644
index 0000000..920e3a1
--- /dev/null
+++ b/www/kojiweb/tagedit.chtml
@@ -0,0 +1,65 @@
+#from kojiweb import util
+
+#include "includes/header.chtml"
+
+ #if $tag
+ <h4>Edit tag $tag.name</h4>
+ #else
+ <h4>Create tag</h4>
+ #end if
+
+ <form action="#if $tag then 'tagedit' else 'tagcreate'#">
+ $util.authToken($self, form=True)
+ <table>
+ <tr>
+ <th>Name</th>
+ <td>
+ <input type="text" name="name" value="#if $tag then $tag.name else ''#"/>
+ #if $tag
+ <input type="hidden" name="tagID" value="$tag.id"/>
+ #end if
+ </td>
+ </tr>
+ <tr>
+ <th>Arches</th>
+ <td><input type="text" name="arches" value="#if $tag then $tag.arches else ''#"/></td>
+ </tr>
+ <tr>
+ <th>Locked</th>
+ <td><input type="checkbox" name="locked" value="yes" #if $tag and $tag.locked then 'checked="checked"' else ''#/></td>
+ </tr>
+ <tr>
+ <th>Permission</th>
+ <td>
+ <select name="permission">
+ <option value="none" #if $tag and not $tag.perm_id then 'selected="selected"' else ''#>none</option>
+ #for $permission in $permissions
+ <option value="$permission.id" #if $tag and $tag.perm_id == $permission.id then 'selected="selected"' else ''#>$permission.name</option>
+ #end for
+ </select>
+ </td>
+ </tr>
+ #if $mavenEnabled
+ <tr>
+ <th>Maven Support?</th>
+ <td><input type="checkbox" name="maven_support" value="yes" #if $tag and $tag.maven_support then 'checked="checked"' else ''#>
+ </tr>
+ <tr>
+ <th>Include All Maven Builds?</th>
+ <td><input type="checkbox" name="maven_include_all" value="yes" #if $tag and $tag.maven_include_all then 'checked="checked"' else ''#>
+ </tr>
+ #end if
+ <tr>
+ <td>
+ #if $tag
+ <button type="submit" name="save" value="Save">Save</button>
+ #else
+ <button type="submit" name="add" value="Add">Add</button>
+ #end if
+ </td>
+ <td><button type="submit" name="cancel" value="Cancel">Cancel</button></td>
+ </tr>
+ </table>
+ </form>
+
+#include "includes/footer.chtml"
diff --git a/www/kojiweb/taginfo.chtml b/www/kojiweb/taginfo.chtml
new file mode 100644
index 0000000..1298996
--- /dev/null
+++ b/www/kojiweb/taginfo.chtml
@@ -0,0 +1,170 @@
+#from kojiweb import util
+#import pprint
+
+#include "includes/header.chtml"
+
+ <h4>Information for tag <a href="taginfo?tagID=$tag.id">$tag.name</a></h4>
+
+ <table>
+ #if $child and 'admin' in $perms
+ <tr>
+ <th colspan="2"><a href="tagparent?tagID=$child.id&parentID=$tag.id&action=add$util.authToken($self)">Add $tag.name as parent of $child.name</a></th>
+ </tr>
+ #end if
+ <tr>
+ <th>Name</th><td>$tag.name</td>
+ </tr>
+ <tr>
+ <th>ID</th><td>$tag.id</td>
+ </tr>
+ <tr>
+ <th>Arches</th><td>$tag.arches</td>
+ </tr>
+ <tr>
+ <th>Locked</th><td class="$str(not $tag.locked).lower()">#if $tag.locked then 'yes' else 'no'#</td>
+ </tr>
+ <tr>
+ <th>Permission</th><td>#if $tag.perm_id then $allPerms[$tag.perm_id] else 'none'#</td>
+ </tr>
+ #if $mavenEnabled
+ <tr>
+ <th>Maven Support?</th><td class="$str($tag.maven_support).lower()">#if $tag.maven_support then 'yes' else 'no'#</td>
+ </tr>
+ <tr>
+ <th>Include All Maven Builds?</th><td class="$str($tag.maven_include_all).lower()">#if $tag.maven_include_all then 'yes' else 'no'#</td>
+ </tr>
+ #end if
+ <tr>
+ <th>Inheritance</th>
+ <td class="tree">
+ <span class="root">$tag.name</span>
+ #set $numParents = $len($inheritance)
+ #set $iter = 0
+ #set $maxDepth = 0
+ #set $TRUNC_DEPTH = 7
+ <ul>
+ #for $parent in $inheritance
+ #set $iter += 1
+ #set $nextDepth = ($iter < $numParents and $inheritance[$iter].currdepth or 1)
+ #set $depth = $parent.currdepth
+ #if $depth > $maxDepth
+ #set $maxDepth = $depth
+ #end if
+ #if $depth == $TRUNC_DEPTH and not $all
+ <li><span class="treeBranch"><span class="treeToggle treeLabel">...</span></span></li>
+ <li class="hidden">
+ #else if $len($tagsByChild[$parent.child_id]) > 1
+ <li class="sibling">
+ #else
+ <li>
+ #end if
+ #silent $tagsByChild[$parent.child_id].pop()
+ <span class="treeBranch">
+ <span class="treeLabel">
+ <a href="taginfo?tagID=$parent.parent_id">$parent.name</a>
+ #if $depth == 1 and 'admin' in $perms
+ <span class="treeLink">(<a href="tagparent?tagID=$tag.id&parentID=$parent.parent_id&action=edit$util.authToken($self)">edit</a>) (<a href="tagparent?tagID=$tag.id&parentID=$parent.parent_id&action=remove$util.authToken($self)">remove</a>)</span>
+ #end if
+ </span>
+ </span>
+ #if $nextDepth > $depth
+ <ul>
+ #else
+ </li>
+ #end if
+ #while $nextDepth < $depth
+ </ul>
+ </li>
+ #set $depth -= 1
+ #end while
+ #end for
+ </ul>
+ </td>
+ </tr>
+ #if $maxDepth >= $TRUNC_DEPTH
+ <tr>
+ <td colspan="2">
+ #if $all
+ <a href="taginfo?tagID=$tag.id$util.passthrough($self, 'inherited')">Show abbreviated tree</a>
+ #else
+ <a href="taginfo?tagID=$tag.id$util.passthrough($self, 'inherited')&all=1">Show full tree</a>
+ #end if
+ </td>
+ </tr>
+ #end if
+ #if 'admin' in $perms
+ <tr>
+ <td colspan="2"><a href="tags?childID=$tag.id">Add parent</a></td>
+ </tr>
+ #end if
+ #if $external_repos
+ <tr>
+ <th>External repos</th>
+ <td>
+ #for $external_repo in $external_repos
+ <a href="externalrepoinfo?extrepoID=$external_repo.external_repo_id">$external_repo.external_repo_name</a>
+ #if $external_repo.tag_id != $tag.id
+ <span class="smaller">(inherited from <a href="taginfo?tagID=$external_repo.tag_id">$external_repo.tag_name</a>)</span>
+ #end if
+ <br/>
+ #end for
+ </td>
+ </tr>
+ #end if
+ <tr>
+ <th>Repo created</th><td>#if $repo then $util.formatTimeRSS($repo.creation_time) else ''#</td>
+ </tr>
+ <tr>
+ <th>Packages</th>
+ <td><a href="packages?tagID=$tag.id">$numPackages</a></td>
+ </tr>
+ <tr>
+ <th>Builds</th>
+ <td><a href="builds?tagID=$tag.id">$numBuilds</a></td>
+ </tr>
+ <tr>
+ <th>Targets building from this tag</th>
+ <td>
+ #if $len($srcTargets)
+ #for $target in $srcTargets
+ <a href="buildtargetinfo?name=$target.name">$target.name</a><br/>
+ #end for
+ #else
+ No build targets
+ #end if
+ </td>
+ </tr>
+ <tr>
+ <th>Targets building to this tag</th>
+ <td>
+ #if $len($destTargets)
+ #for $target in $destTargets
+ <a href="buildtargetinfo?name=$target.name">$target.name</a><br/>
+ #end for
+ #else
+ No build targets
+ #end if
+ </td>
+ </tr>
+ #if 'admin' in $perms
+ <tr>
+ <td colspan="2"><a href="tagedit?tagID=$tag.id$util.authToken($self)">Edit tag</a></td>
+ </tr>
+ <tr>
+ <td colspan="2"><a href="tagdelete?tagID=$tag.id$util.authToken($self)">Delete tag</a></td>
+ </tr>
+ #end if
+ #if $tag.get('extra')
+ <tr>
+ <th>Extra options:</th>
+ </tr>
+ #for $key in $tag['extra']
+ <tr>
+ <th>$key</th>
+ <td>$pprint.pformat($tag['extra'][$key])</td>
+ </tr>
+ #end for
+ #end if
+ </table>
+
+#include "includes/footer.chtml"
diff --git a/www/kojiweb/tagparent.chtml b/www/kojiweb/tagparent.chtml
new file mode 100644
index 0000000..cd391f2
--- /dev/null
+++ b/www/kojiweb/tagparent.chtml
@@ -0,0 +1,72 @@
+#from kojiweb import util
+
+#include "includes/header.chtml"
+
+ #if $inheritanceData
+ <h4>Edit Parent</h4>
+ #else
+ <h4>Add Parent</h4>
+ #end if
+
+ <form action="tagparent">
+ $util.authToken($self, form=True)
+ <input type="hidden" name="action" value="#if $inheritanceData then 'edit' else 'add'#"/>
+ <table>
+ <tr>
+ <th>Tag Name</th>
+ <td>
+ $tag.name
+ <input type="hidden" name="tagID" value="$tag.id"/>
+ </td>
+ </tr>
+ <tr>
+ <th>Parent Tag Name</th>
+ <td>
+ $parent.name
+ <input type="hidden" name="parentID" value="$parent.id"/>
+ </td>
+ </tr>
+ <tr>
+ <th>Priority</th>
+ <td>
+ <input type="text" name="priority" value="#if $inheritanceData then $inheritanceData.priority else $maxPriority + 1#"/>
+ </td>
+ </tr>
+ <tr>
+ <th>Max Depth</th>
+ <td>
+ <input type="text" name="maxdepth" value="#if $inheritanceData then $inheritanceData.maxdepth else ''#"/>
+ </td>
+ </tr>
+ <tr>
+ <th>Intransitive</th>
+ <td>
+ <input type="checkbox" name="intransitive" value="yes" #if $inheritanceData and $inheritanceData.intransitive then 'checked="checked"' else ''#/>
+ </td>
+ </tr>
+ <tr>
+ <th>Packages Only</th>
+ <td>
+ <input type="checkbox" name="noconfig" value="yes" #if $inheritanceData and $inheritanceData.noconfig then 'checked="checked"' else ''#/>
+ </td>
+ </tr>
+ <tr>
+ <th>Package Filter</th>
+ <td>
+ <input type="text" name="pkg_filter" value="#if $inheritanceData then $inheritanceData.pkg_filter else ''#"/>
+ </td>
+ </tr>
+ <tr>
+ <td>
+ #if $inheritanceData
+ <button type="submit" name="save" value="Save">Save</button>
+ #else
+ <button type="submit" name="add" value="Add">Add</button>
+ #end if
+ </td>
+ <td><button type="submit" name="cancel" value="Cancel">Cancel</button></td>
+ </tr>
+ </table>
+ </form>
+
+#include "includes/footer.chtml"
diff --git a/www/kojiweb/tags.chtml b/www/kojiweb/tags.chtml
new file mode 100644
index 0000000..fe39118
--- /dev/null
+++ b/www/kojiweb/tags.chtml
@@ -0,0 +1,76 @@
+#from kojiweb import util
+
+#include "includes/header.chtml"
+
+ <h4>Tags</h4>
+ <table class="data-list">
+ <tr>
+ <td class="paginate" colspan="2">
+ #if $len($tagPages) > 1
+ <form class="pageJump" action="">
+ Page:
+ <select onchange="javascript: window.location = 'tags?start=' + this.value * $tagRange + '$util.passthrough($self, 'userID', 'tagID', 'order', 'childID')';">
+ #for $pageNum in $tagPages
+ <option value="$pageNum"#if $pageNum == $tagCurrentPage then ' selected="selected"' else ''#>#echo $pageNum + 1#</option>
+ #end for
+ </select>
+ </form>
+ #end if
+ #if $tagStart > 0
+ <a href="tags?start=#echo $tagStart - $tagRange #$util.passthrough($self, 'userID', 'tagID', 'order', 'childID')"><<<</a>
+ #end if
+ #if $totalTags != 0
+ <strong>Tags #echo $tagStart + 1 # through #echo $tagStart + $tagCount # of $totalTags</strong>
+ #end if
+ #if $tagStart + $tagCount < $totalTags
+ <a href="tags?start=#echo $tagStart + $tagRange#$util.passthrough($self, 'userID', 'tagID', 'order', 'childID')">>>></a>
+ #end if
+ </td>
+ </tr>
+ <tr class="list-header">
+ <th><a href="tags?order=$util.toggleOrder($self, 'id')">ID</a> $util.sortImage($self, 'id')</th>
+ <th><a href="tags?order=$util.toggleOrder($self, 'name')">Name</a> $util.sortImage($self, 'name')</th>
+ </tr>
+ #if $len($tags) > 0
+ #for $tag in $tags
+ <tr class="$util.rowToggle($self)">
+ <td>$tag.id</td>
+ <td><a href="taginfo?tagID=$tag.id$util.passthrough($self, 'childID')">$tag.name</a></td>
+ </tr>
+ #end for
+ #else
+ <tr class="row-odd">
+ <td colspan="2">No tags</td>
+ </tr>
+ #end if
+ <tr>
+ <td class="paginate" colspan="2">
+ #if $len($tagPages) > 1
+ <form class="pageJump" action="">
+ Page:
+ <select onchange="javascript: window.location = 'tags?start=' + this.value * $tagRange + '$util.passthrough($self, 'userID', 'tagID', 'order', 'childID')';">
+ #for $pageNum in $tagPages
+ <option value="$pageNum"#if $pageNum == $tagCurrentPage then ' selected="selected"' else ''#>#echo $pageNum + 1#</option>
+ #end for
+ </select>
+ </form>
+ #end if
+ #if $tagStart > 0
+ <a href="tags?start=#echo $tagStart - $tagRange #$util.passthrough($self, 'userID', 'tagID', 'order', 'childID')"><<<</a>
+ #end if
+ #if $totalTags != 0
+ <strong>Tags #echo $tagStart + 1 # through #echo $tagStart + $tagCount # of $totalTags</strong>
+ #end if
+ #if $tagStart + $tagCount < $totalTags
+ <a href="tags?start=#echo $tagStart + $tagRange#$util.passthrough($self, 'userID', 'tagID', 'order', 'childID')">>>></a>
+ #end if
+ </td>
+ </tr>
+ </table>
+
+ #if 'admin' in $perms
+ <br/>
+ <a href="tagcreate$util.authToken($self, first=True)">Create new Tag</a>
+ #end if
+
+#include "includes/footer.chtml"
diff --git a/www/kojiweb/taskinfo.chtml b/www/kojiweb/taskinfo.chtml
new file mode 100644
index 0000000..8db3571
--- /dev/null
+++ b/www/kojiweb/taskinfo.chtml
@@ -0,0 +1,447 @@
+#import koji
+#from kojiweb import util
+#import urllib
+#import cgi
+
+#def printValue($key, $value, $sep=', ')
+ #if $key in ('brootid', 'buildroot_id')
+<a href="buildrootinfo?buildrootID=$value">$value</a>
+ #elif $isinstance($value, list)
+$sep.join([$str($val) for $val in $value])
+ #elif $isinstance($value, dict)
+$sep.join(['%s=%s' % (($n == '' and "''" or $n), $v) for $n, $v in $value.items()])
+ #else
+$value
+ #end if
+#end def
+
+#def printProperties($props)
+ #echo ', '.join([$v is not None and '%s=%s' % ($n, $v) or $str($n) for $n, $v in $props.items()])
+#end def
+
+#def printMap($vals, $prefix='')
+ #for $key, $value in $vals.items()
+ #if $key == 'properties'
+ ${prefix}properties = $printProperties($value)<br/>
+ #elif $key != '__starstar'
+ $prefix$key = $printValue($key, $value)<br/>
+ #end if
+ #end for
+#end def
+
+#def printOpts($opts)
+ #if $opts
+ <strong>Options:</strong><br/>
+ $printMap($opts, ' ')
+ #end if
+#end def
+
+#def printChildren($taskID, $childMap)
+ #set $iter = 0
+ #set $children = $childMap[$str($taskID)]
+ #if $children
+ <ul>
+ #for $child in $children
+ #set $iter += 1
+ #if $iter < $len($children)
+ <li class="sibling">
+ #else
+ <li>
+ #end if
+ #set $childState = $util.taskState($child.state)
+ <span class="treeBranch">
+ <span class="treeLabel">
+ <a href="taskinfo?taskID=$child.id" class="task$childState" title="$childState">$koji.taskLabel($child)</a>
+ </span>
+ </span>
+ $printChildren($child.id, $childMap)
+ </li>
+ #end for
+ </ul>
+ #end if
+#end def
+
+#include "includes/header.chtml"
+
+ <h4>Information for task <a href="taskinfo?taskID=$task.id">$koji.taskLabel($task)</a></h4>
+
+ <table>
+ <tr>
+ <th>ID</th><td>$task.id</td>
+ </tr>
+ <tr>
+ <th>Method</th><td>$task.method</td>
+ </tr>
+ <tr>
+ <th>Parameters</th>
+ <td>
+ #if $task.method == 'buildSRPMFromSCM'
+ <strong>SCM URL:</strong> $params[0]<br/>
+ #if $len($params) > 1
+ <strong>Build Tag:</strong>: <a href="taginfo?tagID=$buildTag.name">$buildTag.name</a><br/>
+ #end if
+ #if $len($params) > 2
+ $printOpts($params[2])
+ #end if
+ #elif $task.method == 'buildSRPMFromCVS'
+ <strong>CVS URL:</strong> $params[0]
+ #elif $task.method == 'buildArch'
+ <strong>SRPM:</strong> $params[0]<br/>
+ <strong>Build Tag:</strong> <a href="taginfo?tagID=$buildTag.name">$buildTag.name</a><br/>
+ <strong>Arch:</strong> $params[2]<br/>
+ <strong>Keep SRPM?</strong> #if $params[3] then 'yes' else 'no'#<br/>
+ #if $len($params) > 4
+ $printOpts($params[4])
+ #end if
+ #elif $task.method == 'tagBuild'
+ <strong>Destination Tag:</strong> <a href="taginfo?tagID=$destTag.id">$destTag.name</a><br/>
+ <strong>Build:</strong> <a href="buildinfo?buildID=$build.id">$koji.buildLabel($build)</a>
+ #elif $task.method == 'buildNotification'
+ #set $build = $params[1]
+ #set $buildTarget = $params[2]
+ <strong>Recipients:</strong> $printValue('', $params[0])<br/>
+ <strong>Build:</strong> <a href="buildinfo?buildID=$build.id">$koji.buildLabel($build)</a><br/>
+ #if $buildTarget
+ <strong>Build Target:</strong> <a href="buildtargetinfo?targetID=$buildTarget.id">$buildTarget.name</a><br/>
+ #else
+ <strong>Build Target:</strong> (no build target)<br/>
+ #end if
+ <strong>Web URL:</strong> <a href="$params[3]">$params[3]</a>
+ #elif $task.method == 'tagNotification'
+ <strong>Recipients:</strong> $printValue('', $params[0])<br/>
+ <strong>Successful?:</strong> #if $params[1] then 'yes' else 'no'#<br/>
+ #if $destTag
+ <strong>Tagged Into:</strong> <a href="taginfo?tagID=$destTag.id">$destTag.name</a><br/>
+ #end if
+ #if $srcTag
+ <strong>#if $destTag then 'Moved From:' else 'Untagged From:'#</strong> <a href="taginfo?tagID=$srcTag.id">$srcTag.name</a><br/>
+ #end if
+ <strong>Build:</strong> <a href="buildinfo?buildID=$build.id">$koji.buildLabel($build)</a><br/>
+ <strong>#if $destTag then 'Tagged By:' else 'Untagged By:'#</strong> <a href="userinfo?userID=$user.id">$user.name</a><br/>
+ <strong>Ignore Success?:</strong> #if $params[6] then 'yes' else 'no'#<br/>
+ #if $params[7]
+ <strong>Failure Message:</strong> $params[7]
+ #end if
+ #elif $task.method == 'build'
+ <strong>Source:</strong> $params[0]<br/>
+ <strong>Build Target:</strong> <a href="buildtargetinfo?name=$params[1]">$params[1]</a><br/>
+ $printOpts($params[2])
+ #elif $task.method == 'maven'
+ <strong>SCM URL:</strong> $params[0]<br/>
+ <strong>Build Target:</strong> <a href="buildtargetinfo?name=$params[1]">$params[1]</a><br/>
+ $printOpts($params[2])
+ #elif $task.method == 'buildMaven'
+ <strong>SCM URL:</strong> $params[0]<br/>
+ <strong>Build Tag:</strong> <a href="taginfo?tagID=$buildTag.id">$buildTag.name</a><br/>
+ #if $len($params) > 2
+ $printOpts($params[2])
+ #end if
+ #elif $task.method == 'wrapperRPM'
+ <strong>Spec File URL:</strong> $params[0]<br/>
+ #if 'locked' in $buildTarget
+ #set $buildTag = $buildTarget
+ <strong>Build Tag:</strong> <a href="taginfo?tagID=$buildTag.id">$buildTag.name</a><br/>
+ #else
+ <strong>Build Target:</strong> <a href="buildtargetinfo?targetID=$buildTarget.id">$buildTarget.name</a><br/>
+ #end if
+ #if $params[2]
+ <strong>Build:</strong> <a href="buildinfo?buildID=$params[2].id">$koji.buildLabel($params[2])</a><br/>
+ #end if
+ #if $params[3]
+ <strong>Task:</strong> <a href="taskinfo?taskID=$wrapTask.id">$koji.taskLabel($wrapTask)</a><br/>
+ #end if
+ #if $len($params) > 4
+ $printOpts($params[4])
+ #end if
+ #elif $task.method == 'chainmaven'
+ <strong>Builds:</strong><br/>
+ <table>
+ #for $key, $val in $params[0].items()
+ <tr><td><strong>$key:</strong></td><td>$printMap($val)</td></tr>
+ #end for
+ </table>
+ <strong>Build Target:</strong> <a href="buildtargetinfo?name=$params[1]">$params[1]</a><br/>
+ #if $len($params) > 2
+ $printOpts($params[2])
+ #end if
+ #elif $task.method == 'livecd' or $task.method == 'appliance'
+ <strong>Arch:</strong> $params[2]<br/>
+ <strong>Build Target:</strong> <a href="buildtargetinfo?name=$params[3]">$params[3]</a><br/>
+ <strong>Kickstart File:</strong> $params[4]<br/>
+ $printOpts($params[5])
+ #elif $task.method == 'image'
+ <strong>Arches:</strong> #echo ', '.join($params[2])#<br/>
+ <strong>Build Target:</strong> <a href="buildtargetinfo?name=$params[3]">$params[3]</a><br/>
+ <strong>Installation Tree:</strong> $params[4]<br/>
+ $printOpts($params[5])
+ #elif $task.method == 'createLiveCD' or $task.method == 'createAppliance'
+ #if $len($params) > 4:
+ ## new method signature
+ <strong>Arch:</strong> $params[3]<br/>
+ <strong>Kickstart File:</strong> $params[7]<br/>
+ #if $len($params) > 8
+ $printOpts($params[8])
+ #end if
+ #else
+ ## old method signature
+ <strong>Arch:</strong> $params[0]<br/>
+ <strong>Build Target:</strong> <a href="buildtargetinfo?name=$params[1]">$params[1]</a><br/>
+ <strong>Kickstart File:</strong> $params[2]<br/>
+ #if $len($params) > 3
+ $printOpts($params[3])
+ #end if
+ #end if
+ #elif $task.method == 'createImage'
+ #set $target = $params[4]
+ <strong>Build Target:</strong> <a href="buildtargetinfo?name=$target.name">$target.name</a><br/>
+ <strong>Install Tree:</strong> $params[7]<br/>
+ $printOpts($params[8])
+ #elif $task.method == 'winbuild'
+ <strong>VM:</strong> $params[0]<br/>
+ <strong>SCM URL:</strong> $params[1]<br/>
+ <strong>Build Target:</strong> <a href="buildtargetinfo?name=$params[2]">$params[2]</a><br/>
+ #if $len($params) > 3
+ $printOpts($params[3])
+ #end if
+ #elif $task.method == 'vmExec'
+ <strong>VM:</strong> $params[0]<br/>
+ <strong>Exec Params:</strong><br/>
+ #for $info in $params[1]
+ #if $isinstance($info, dict)
+ $printMap($info, ' ')
+ #else
+ $info<br/>
+ #end if
+ #end for
+ #if $len($params) > 2
+ $printOpts($params[2])
+ #end if
+ #elif $task.method == 'newRepo'
+ <strong>Tag:</strong> <a href="taginfo?tagID=$tag.id">$tag.name</a><br/>
+ #if $len($params) > 1
+ $printOpts($params[1])
+ #end if
+ #elif $task.method == 'prepRepo'
+ <strong>Tag:</strong> <a href="taginfo?tagID=$params[0].id">$params[0].name</a>
+ #elif $task.method == 'createrepo'
+ <strong>Repo ID:</strong> $params[0]<br/>
+ <strong>Arch:</strong> $params[1]<br/>
+ #set $oldrepo = $params[2]
+ #if $oldrepo
+ <strong>Old Repo ID:</strong> $oldrepo.id<br/>
+ <strong>Old Repo Creation:</strong> $koji.formatTimeLong($oldrepo.creation_time)<br/>
+ #end if
+ #if $len($params) > 3
+ <strong>External Repos:</strong> $printValue(None, [ext['external_repo_name'] for ext in $params[3]])<br/>
+ #end if
+ #elif $task.method == 'dependantTask'
+ <strong>Dependant Tasks:</strong><br/>
+ #for $dep in $deps
+ <a href="taskinfo?taskID=$dep.id" class="task$util.taskState($dep.state)">$koji.taskLabel($dep)</a><br/>
+ #end for
+ <strong>Subtasks:</strong><br/>
+ #for $subtask in $params[1]
+ <strong>Method:</strong> $subtask[0]<br/>
+ <strong>Parameters:</strong> #echo ', '.join([$str($subparam) for $subparam in $subtask[1]])#<br/>
+ #if $len($subtask) > 2 and $subtask[2]
+ <strong>Options:</strong><br/>
+ $printMap($subtask[2], ' ')
+ #end if
+ <br/>
+ #end for
+ #elif $task.method == 'chainbuild'
+ <strong>Build Groups:</strong><br/>
+ #set $groupNum = 0
+ #for $urls in $params[0]
+ #set $groupNum += 1
+ <strong>$groupNum</strong>: #echo ', '.join($urls)#<br/>
+ #end for
+ <strong>Build Target:</strong> <a href="buildtargetinfo?name=$params[1]">$params[1]</a><br/>
+ $printOpts($params[2])
+ #elif $task.method == 'waitrepo'
+ <strong>Build Target:</strong> $params[0]<br/>
+ #if $params[1]
+ <strong>Newer Than:</strong> $params[1]<br/>
+ #end if
+ #if $params[2]
+ <strong>NVRs:</strong> $printValue('', $params[2])
+ #end if
+ #elif $task.method == 'restart'
+ <strong>Host:</strong> <a href="hostinfo?hostID=$params[0].id">$params[0].name</a><br/>
+ #elif $task.method == 'restartVerify'
+ <strong>Host:</strong> <a href="hostinfo?hostID=$params[1].id">$params[1].name</a><br/>
+ <strong>Restart Task:</strong>
+ <a href="taskinfo?taskID=$rtask.id" class="task$util.taskState($rtask.state)">$koji.taskLabel($rtask)</a><br/>
+ #else
+ $params
+ #end if
+ </td>
+ </tr>
+ <tr>
+ #set $state = $util.taskState($task.state)
+ <th>State</th>
+ <td class="task$state">$state
+ #if $currentUser and ('admin' in $perms or $task.owner == $currentUser.id)
+ #if $task.state in ($koji.TASK_STATES.FREE, $koji.TASK_STATES.OPEN, $koji.TASK_STATES.ASSIGNED)
+ <span class="adminLink">(<a href="canceltask?taskID=$task.id$util.authToken($self)">cancel</a>)</span>
+ #elif $task.state in ($koji.TASK_STATES.CANCELED, $koji.TASK_STATES.FAILED) and (not $parent)
+ <span class="adminLink">(<a href="resubmittask?taskID=$task.id$util.authToken($self)">resubmit</a>)</span>
+ #end if
+ #end if
+ </td>
+ </tr>
+ #if $taskBuild
+ <tr>
+ <th>Build</th><td><a href="buildinfo?buildID=$taskBuild.build_id">$koji.buildLabel($taskBuild)</a></td>
+ </tr>
+ #end if
+ <tr>
+ <th>Created</th><td>$util.formatTimeLong($task.create_time)</td>
+ </tr>
+ #if $task.start_time
+ <tr>
+ <th>Started</th><td>$util.formatTimeLong($task.start_time)</td>
+ #end if
+ #if $task.state == $koji.TASK_STATES.OPEN
+ #if $estCompletion
+ <tr>
+ <th>Est. Completion</th><td>$util.formatTimeLong($estCompletion)</td>
+ </tr>
+ #end if
+ #elif $task.completion_time
+ <tr>
+ <th>Completed</th><td>$util.formatTimeLong($task.completion_time)</td>
+ </tr>
+ #end if
+ <tr>
+ <th>Owner</th>
+ <td>
+ #if $owner
+ #if $owner.usertype == $koji.USERTYPES['HOST']
+ <a href="hostinfo?userID=$owner.id">$owner.name</a>
+ #else
+ <a href="userinfo?userID=$owner.id">$owner.name</a>
+ #end if
+ #end if
+ </td>
+ </tr>
+ <tr>
+ <th>Channel</th>
+ <td>
+ #if $task.channel_id
+ <a href="channelinfo?channelID=$task.channel_id">$channelName</a>
+ #end if
+ </td>
+ </tr>
+ <tr>
+ <th>Host</th>
+ <td>
+ #if $task.host_id
+ <a href="hostinfo?hostID=$task.host_id">$hostName</a>
+ #end if
+ </td>
+ </tr>
+ <tr>
+ <th>Arch</th><td>$task.arch</td>
+ </tr>
+ #if $buildroots
+ <tr>
+ <th>Buildroot#if $len($buildroots) > 1 then 's' else ''#</th>
+ <td>
+ #for $buildroot in $buildroots
+ <a href="buildrootinfo?buildrootID=$buildroot.id">#if $task.method == 'vmExec' then '' else '/var/lib/mock/'#$buildroot.tag_name-$buildroot.id-$buildroot.repo_id</a><br/>
+ #end for
+ </td>
+ </tr>
+ #end if
+ <tr>
+ <th>Parent</th>
+ <td>
+ #if $parent
+ <a href="taskinfo?taskID=$parent.id" class="task$util.taskState($parent.state)">$koji.taskLabel($parent)</a>
+ #end if
+ </td>
+ </tr>
+ <tr>
+ <th>Descendants</th>
+ <td class="tree">
+ #if $len($descendents[$str($task.id)]) > 0
+ <span class="root">$task.method</span>
+ #end if
+ $printChildren($task.id, $descendents)
+ </td>
+ </tr>
+ <tr>
+ <th>Waiting?</th><td>#if $task.waiting then 'yes' else 'no'#</td>
+ </tr>
+ <tr>
+ <th>Awaited?</th><td>#if $task.awaited then 'yes' else 'no'#</td>
+ </tr>
+ <tr>
+ <th>Priority</th><td>$task.priority</td>
+ </tr>
+ <tr>
+ <th>Weight</th><td>#echo '%.2f' % $task.weight#</td>
+ </tr>
+ <tr>
+ <th>Result</th>
+ <td>
+ <a href="#" collapse" id="toggle-result" style="display: none;">Show results</a>
+ <div id="result">
+ #if $excClass
+ <pre>
+ #if $hasattr($result, 'faultString')
+$cgi.escape($result.faultString.strip())
+ #else
+${excClass.__name__}: $cgi.escape($str($result))
+ #end if
+ </pre>
+ #elif $isinstance($result, dict)
+ $printMap($result)
+ #else
+ $printValue('', $result)
+ #end if
+ </div>
+ </td>
+ </tr>
+ <tr>
+ <th>Output</th>
+ <td>
+ #for $filename in $output
+ <a href="$pathinfo.task($task.id)/$urllib.quote($filename)">$filename</a>
+ #if $filename.endswith('.log')
+ (<a href="getfile?taskID=$task.id&name=$urllib.quote($filename)&offset=-4000">tail</a>)
+ #end if
+ <br/>
+ #end for
+ #if $task.state not in ($koji.TASK_STATES.CLOSED, $koji.TASK_STATES.CANCELED, $koji.TASK_STATES.FAILED) and \
+ $task.method in ('buildSRPMFromSCM', 'buildArch', 'livecd', 'appliance', 'buildMaven', 'wrapperRPM', 'vmExec', 'createrepo')
+ <br/>
+ <a href="watchlogs?taskID=$task.id">Watch logs</a>
+ #end if
+ </td>
+ </tr>
+ </table>
+
+ <script type="text/javascript">
+ (function() {
+ var r = document.getElementById('result'),
+ t = document.getElementById('toggle-result');
+ r.style.display = 'none';
+ t.style.display = 'block';
+ t.text = 'Show result';
+ t.onclick = function(e) {
+ if(r.style.display == 'none') {
+ r.style.display = 'block';
+ t.text = 'Hide result';
+ }
+ else {
+ r.style.display = 'none';
+ t.text = 'Show result';
+ }
+ return false;
+ };
+ })();
+ </script>
+
+#include "includes/footer.chtml"
diff --git a/www/kojiweb/tasks.chtml b/www/kojiweb/tasks.chtml
new file mode 100644
index 0000000..4c1a32f
--- /dev/null
+++ b/www/kojiweb/tasks.chtml
@@ -0,0 +1,190 @@
+#import koji
+#from kojiweb import util
+
+#def printChildren($taskID, $childMap)
+ #set $iter = 0
+ #set $children = $childMap[$str($taskID)]
+ #if $children
+ <ul>
+ #for $child in $children
+ #set $iter += 1
+ #if $iter < $len($children)
+ <li class="sibling">
+ #else
+ <li>
+ #end if
+ #set $childState = $util.taskState($child.state)
+ <span class="treeBranch">
+ <span class="treeLabel">
+ <a href="taskinfo?taskID=$child.id" class="task$childState" title="$childState">$koji.taskLabel($child)</a>
+ </span>
+ </span>
+ $printChildren($child.id, $childMap)
+ </li>
+ #end for
+ </ul>
+ #end if
+#end def
+
+#def headerPrefix($state)
+ #if $state == 'active'
+Active
+ #elif $state == 'all'
+All
+ #else
+#echo $state.capitalize()
+ #end if
+#end def
+
+#attr _PASSTHROUGH = ['owner', 'state', 'view', 'method', 'hostID', 'channelID', 'order']
+
+#include "includes/header.chtml"
+
+ <h4>$headerPrefix($state) #if $view == 'toplevel' then 'toplevel' else ''# #if $method != 'all' then $method else ''# Tasks#if $ownerObj then ' owned by <a href="userinfo?userID=%i">%s</a>' % ($ownerObj.id, $ownerObj.name) else ''##if $host then ' on host <a href="hostinfo?hostID=%i">%s</a>' % ($host.id, $host.name) else ''# #if $channel then ' in channel <a href="channelinfo?channelID=%i">%s</a>' % ($channel.id, $channel.name) else ''#</h4>
+
+ <table class="data-list">
+ <tr>
+ <td colspan="6">
+ <form action="tasks">
+ <table class="nested">
+ <tr><td>
+ <strong>State</strong>:
+ </td><td>
+ <select name="state" class="filterlist" onchange="javascript: window.location = 'tasks?state=' + this.value + '$util.passthrough_except($self, 'state')';">
+ <option value="active" $util.toggleSelected($self, $state, 'active')>active</option>
+ <option value="all" $util.toggleSelected($self, $state, 'all')>all</option>
+ <option value="free" #if $state == 'free' then 'selected="selected"' else ''#>free</option>
+ <option value="open" #if $state == 'open' then 'selected="selected"' else ''#>open</option>
+ <option value="closed" #if $state == 'closed' then 'selected="selected"' else ''#>closed</option>
+ <option value="failed" #if $state == 'failed' then 'selected="selected"' else ''#>failed</option>
+ <option value="canceled" #if $state == 'canceled' then 'selected="selected"' else ''#>canceled</option>
+ <option value="assigned" #if $state == 'assigned' then 'selected="selected"' else ''#>assigned</option>
+ </select>
+ </td><td>
+ <strong>Owner</strong>:
+ </td><td>
+ <select name="owner" class="filterlist" onchange="javascript: window.location = 'tasks?owner=' + this.value + '$util.passthrough_except($self, 'owner')';">
+ <option value="" #if not $owner then 'selected="selected"' else ''#>everyone</option>
+ #if $loggedInUser
+ <option value="$loggedInUser.name">me</option>
+ #end if
+ #for $user in $users
+ <option value="$user.name" #if $user.name == $owner then 'selected="selected"' else ''#>$user.name</option>
+ #end for
+ </select>
+ </td></tr>
+ <tr><td>
+ <strong>Method</strong>:
+ </td><td>
+ <select name="method" class="filterlist" onchange="javascript: window.location = 'tasks?method=' + this.value + '$util.passthrough_except($self, 'method')';">
+ <option value="all" $util.toggleSelected($self, $method, 'all')>all</option>
+ #for $task_type in $alltasks
+ #if $task_type in ('maven', 'buildMaven') and not $mavenEnabled
+ #continue
+ #elif $task_type in ('winbuild', 'vmExec') and not $winEnabled
+ #continue
+ #elif $task_type == 'wrapperRPM' and not ($mavenEnabled or $winEnabled)
+ #continue
+ #else
+ <option value="$task_type" #if $method == $task_type then 'selected="selected"' else ''#>$task_type</option>
+ #end if
+ #end for
+ </select>
+ </td><td>
+ <strong>View</strong>:
+ </td><td>
+ <select name="view" class="filterlist" onchange="javascript: window.location = 'tasks?view=' + this.value + '$util.passthrough_except($self, 'view')';">
+ <option value="tree" $util.toggleSelected($self, $view, 'tree') #if not $treeEnabled then 'disabled="disabled"' else ''#>tree</option>
+ <option value="toplevel" $util.toggleSelected($self, $view, 'toplevel') #if not $toplevelEnabled then 'disabled="disabled"' else ''#>toplevel</option>
+ <option value="flat" $util.toggleSelected($self, $view, 'flat')>flat</option>
+ </select>
+ </td></tr>
+ </table>
+ </form>
+ </td>
+ </tr>
+ <tr>
+ <td class="paginate" colspan="6">
+ #if $len($taskPages) > 1
+ <form class="pageJump" action="">
+ Page:
+ <select onchange="javascript: window.location = 'tasks?start=' + this.value * $taskRange + '$util.passthrough_except($self)';">
+ #for $pageNum in $taskPages
+ <option value="$pageNum"#if $pageNum == $taskCurrentPage then ' selected="selected"' else ''#>#echo $pageNum + 1#</option>
+ #end for
+ </select>
+ </form>
+ #end if
+ #if $taskStart > 0
+ <a href="tasks?start=#echo $taskStart - $taskRange #$util.passthrough_except($self)"><<<</a>
+ #end if
+ #if $totalTasks != 0
+ <strong>Tasks #echo $taskStart + 1 # through #echo $taskStart + $taskCount # of $totalTasks</strong>
+ #end if
+ #if $taskStart + $taskCount < $totalTasks
+ <a href="tasks?start=#echo $taskStart + $taskRange#$util.passthrough_except($self)">>>></a>
+ #end if
+ </td>
+ </tr>
+ <tr class="list-header">
+ <th><a href="tasks?order=$util.toggleOrder($self, 'id')$util.passthrough_except($self, 'order')">ID</a> $util.sortImage($self, 'id')</th>
+ <th><a href="tasks?order=$util.toggleOrder($self, 'method')$util.passthrough_except($self, 'order')">Type</a> $util.sortImage($self, 'method')</th>
+ <th><a href="tasks?order=$util.toggleOrder($self, 'owner')$util.passthrough_except($self, 'order')">Owner</a> $util.sortImage($self, 'owner')</th>
+ <th><a href="tasks?order=$util.toggleOrder($self, 'arch')$util.passthrough_except($self, 'order')">Arch</a> $util.sortImage($self, 'arch')</th>
+ <th><a href="tasks?order=$util.toggleOrder($self, 'completion_time')$util.passthrough_except($self, 'order')">Finished</a> $util.sortImage($self, 'completion_time')</th>
+ <th><a href="tasks?order=$util.toggleOrder($self, 'state')$util.passthrough_except($self, 'order')">State</a> $util.sortImage($self, 'state')</th>
+ </tr>
+ #if $len($tasks) > 0
+ #for $task in $tasks
+ <tr class="$util.rowToggle($self)">
+ #set $taskState = $util.taskState($task.state)
+ <td>$task.id</td>
+ <td#if $treeDisplay then ' class="tree"' else ''#>
+ #if $treeDisplay then ' ' else ''#<a href="taskinfo?taskID=$task.id" class="task$taskState" title="$taskState">$koji.taskLabel($task)</a>
+ #if $treeDisplay
+ $printChildren($task.id, $task.descendents)
+ #end if
+ </td>
+ <td class="user-$task.owner_name">
+ #if $task.owner_type == $koji.USERTYPES['HOST']
+ <a href="hostinfo?userID=$task.owner">$task.owner_name</a>
+ #else
+ <a href="userinfo?userID=$task.owner">$task.owner_name</a>
+ #end if
+ </td>
+ <td>$task.arch</td>
+ <td>$util.formatTime($task.completion_time)</td>
+ <td class="task$state">$util.imageTag($taskState)</td>
+ </tr>
+ #end for
+ #else
+ <tr class="row-odd">
+ <td colspan="6">No tasks</td>
+ </tr>
+ #end if
+ <tr>
+ <td class="paginate" colspan="6">
+ #if $len($taskPages) > 1
+ <form class="pageJump" action="">
+ Page:
+ <select onchange="javascript: window.location = 'tasks?start=' + this.value * $taskRange + '$util.passthrough_except($self)';">
+ #for $pageNum in $taskPages
+ <option value="$pageNum"#if $pageNum == $taskCurrentPage then ' selected="selected"' else ''#>#echo $pageNum + 1#</option>
+ #end for
+ </select>
+ </form>
+ #end if
+ #if $taskStart > 0
+ <a href="tasks?start=#echo $taskStart - $taskRange #$util.passthrough_except($self)"><<<</a>
+ #end if
+ #if $totalTasks != 0
+ <strong>Tasks #echo $taskStart + 1 # through #echo $taskStart + $taskCount # of $totalTasks</strong>
+ #end if
+ #if $taskStart + $taskCount < $totalTasks
+ <a href="tasks?start=#echo $taskStart + $taskRange#$util.passthrough_except($self)">>>></a>
+ #end if
+ </td>
+ </tr>
+ </table>
+
+#include "includes/footer.chtml"
diff --git a/www/kojiweb/tasksbyhost.chtml b/www/kojiweb/tasksbyhost.chtml
new file mode 100644
index 0000000..4efa19e
--- /dev/null
+++ b/www/kojiweb/tasksbyhost.chtml
@@ -0,0 +1,89 @@
+#from kojiweb import util
+
+#include "includes/header.chtml"
+
+ <h4>Tasks by Host#if $hostArch then ' (%s)' % $hostArch else ''#</h4>
+ <table class="data-list">
+ <tr>
+ <td class="archlist" colspan="3">
+ <strong>Host arch:</strong> #for $arch in $hostArchList
+ #if $arch == $hostArch
+ <strong>$arch</strong> |
+ #else
+ <a href="tasksbyhost?hostArch=$arch$util.passthrough($self, 'order')">$arch</a> |
+ #end if
+ #end for
+ #if $hostArch
+ <a href="tasksbyhost?${util.passthrough($self, 'order')[1:]}">all</a>
+ #else
+ <strong>all</strong>
+ #end if
+ </td>
+ </tr>
+ <tr>
+ <td class="paginate" colspan="3">
+ #if $len($hostPages) > 1
+ <form class="pageJump" action="">
+ Page:
+ <select onchange="javascript: window.location = 'tasksbyhost?start=' + this.value * $hostRange + '$util.passthrough($self, 'order', 'hostArch')';">
+ #for $pageNum in $hostPages
+ <option value="$pageNum"#if $pageNum == $hostCurrentPage then ' selected="selected"' else ''#>#echo $pageNum + 1#</option>
+ #end for
+ </select>
+ </form>
+ #end if
+ #if $hostStart > 0
+ <a href="tasksbyhost?start=#echo $hostStart - $hostRange #$util.passthrough($self, 'order', 'hostArch')"><<<</a>
+ #end if
+ #if $totalHosts != 0
+ <strong>Hosts #echo $hostStart + 1 # through #echo $hostStart + $hostCount # of $totalHosts</strong>
+ #end if
+ #if $hostStart + $hostCount < $totalHosts
+ <a href="tasksbyhost?start=#echo $hostStart + $hostRange#$util.passthrough($self, 'order', 'hostArch')">>>></a>
+ #end if
+ </td>
+ </tr>
+ <tr class="list-header">
+ <th><a href="tasksbyhost?order=$util.toggleOrder($self, 'name')$util.passthrough($self, 'hostArch')">Name</a> $util.sortImage($self, 'name')</th>
+ <th><a href="tasksbyhost?order=$util.toggleOrder($self, 'tasks')$util.passthrough($self, 'hostArch')">Tasks</a> $util.sortImage($self, 'tasks')</th>
+ <th> </th>
+ </tr>
+ #if $len($hosts) > 0
+ #for $host in $hosts
+ <tr class="$util.rowToggle($self)">
+ <td><a href="hostinfo?hostID=$host.id">$host.name</a></td>
+ <td width="#echo $graphWidth + 5#"><img src="$util.themePath('images/1px.gif')" width="#echo $increment * $host.tasks#" height="15" class="graphrow" alt="graph row"/></td>
+ <td>$host.tasks</td>
+ </tr>
+ #end for
+ #else
+ <tr class="row-odd">
+ <td colspan="3">No hosts</td>
+ </tr>
+ #end if
+ <tr>
+ <td class="paginate" colspan="3">
+ #if $len($hostPages) > 1
+ <form class="pageJump" action="">
+ Page:
+ <select onchange="javascript: window.location = 'tasksbyhost?start=' + this.value * $hostRange + '$util.passthrough($self, 'order', 'hostArch')';">
+ #for $pageNum in $hostPages
+ <option value="$pageNum"#if $pageNum == $hostCurrentPage then ' selected="selected"' else ''#>#echo $pageNum + 1#</option>
+ #end for
+ </select>
+ </form>
+ #end if
+ #if $hostStart > 0
+ <a href="tasksbyhost?start=#echo $hostStart - $hostRange #$util.passthrough($self, 'order', 'hostArch')"><<<</a>
+ #end if
+ #if $totalHosts != 0
+ <strong>Hosts #echo $hostStart + 1 # through #echo $hostStart + $hostCount # of $totalHosts</strong>
+ #end if
+ #if $hostStart + $hostCount < $totalHosts
+ <a href="tasksbyhost?start=#echo $hostStart + $hostRange#$util.passthrough($self, 'order', 'hostArch')">>>></a>
+ #end if
+ </td>
+ </tr>
+ </table>
+
+#include "includes/footer.chtml"
diff --git a/www/kojiweb/tasksbyuser.chtml b/www/kojiweb/tasksbyuser.chtml
new file mode 100644
index 0000000..843ea18
--- /dev/null
+++ b/www/kojiweb/tasksbyuser.chtml
@@ -0,0 +1,73 @@
+#from kojiweb import util
+
+#include "includes/header.chtml"
+
+ <h4>Tasks by User</h4>
+ <table class="data-list">
+ <tr>
+ <td class="paginate" colspan="3">
+ #if $len($userPages) > 1
+ <form class="pageJump" action="">
+ Page:
+ <select onchange="javascript: window.location = 'tasksbyuser?start=' + this.value * $userRange + '$util.passthrough($self, 'order')';">
+ #for $pageNum in $userPages
+ <option value="$pageNum"#if $pageNum == $userCurrentPage then ' selected="selected"' else ''#>#echo $pageNum + 1#</option>
+ #end for
+ </select>
+ </form>
+ #end if
+ #if $userStart > 0
+ <a href="tasksbyuser?start=#echo $userStart - $userRange #$util.passthrough($self, 'order')"><<<</a>
+ #end if
+ #if $totalUsers != 0
+ <strong>Users #echo $userStart + 1 # through #echo $userStart + $userCount # of $totalUsers</strong>
+ #end if
+ #if $userStart + $userCount < $totalUsers
+ <a href="tasksbyuser?start=#echo $userStart + $userRange#$util.passthrough($self, 'order')">>>></a>
+ #end if
+ </td>
+ </tr>
+ <tr class="list-header">
+ <th><a href="tasksbyuser?order=$util.toggleOrder($self, 'name')">Name</a> $util.sortImage($self, 'name')</th>
+ <th><a href="tasksbyuser?order=$util.toggleOrder($self, 'tasks')">Tasks</a> $util.sortImage($self, 'tasks')</th>
+ <th> </th>
+ </tr>
+ #if $len($users) > 0
+ #for $user in $users
+ <tr class="$util.rowToggle($self)">
+ <td><a href="userinfo?userID=$user.id">$user.name</a></td>
+ <td width="#echo $graphWidth + 5#"><img src="$util.themePath('images/1px.gif')" width="#echo $increment * $user.tasks#" height="15" class="graphrow" alt="graph row"/></td>
+ <td>$user.tasks</td>
+ </tr>
+ #end for
+ #else
+ <tr class="row-odd">
+ <td colspan="3">No users</td>
+ </tr>
+ #end if
+ <tr>
+ <td class="paginate" colspan="3">
+ #if $len($userPages) > 1
+ <form class="pageJump" action="">
+ Page:
+ <select onchange="javascript: window.location = 'tasksbyuser?start=' + this.value * $userRange + '$util.passthrough($self, 'order')';">
+ #for $pageNum in $userPages
+ <option value="$pageNum"#if $pageNum == $userCurrentPage then ' selected="selected"' else ''#>#echo $pageNum + 1#</option>
+ #end for
+ </select>
+ </form>
+ #end if
+ #if $userStart > 0
+ <a href="tasksbyuser?start=#echo $userStart - $userRange #$util.passthrough($self, 'order')"><<<</a>
+ #end if
+ #if $totalUsers != 0
+ <strong>Users #echo $userStart + 1 # through #echo $userStart + $userCount # of $totalUsers</strong>
+ #end if
+ #if $userStart + $userCount < $totalUsers
+ <a href="tasksbyuser?start=#echo $userStart + $userRange#$util.passthrough($self, 'order')">>>></a>
+ #end if
+ </td>
+ </tr>
+ </table>
+
+#include "includes/footer.chtml"
diff --git a/www/kojiweb/userinfo.chtml b/www/kojiweb/userinfo.chtml
new file mode 100644
index 0000000..f7b97fb
--- /dev/null
+++ b/www/kojiweb/userinfo.chtml
@@ -0,0 +1,108 @@
+#from kojiweb import util
+
+#include "includes/header.chtml"
+
+ <h4>Information for user <a href="userinfo?userID=$user.id">$user.name</a></h4>
+
+ <table>
+ <tr>
+ <th>Name</th><td>$user.name</td>
+ </tr>
+ <tr>
+ <th>ID</th><td>$user.id</td>
+ </tr>
+ <tr>
+ <th>Tasks</th><td><a href="tasks?owner=$user.name&state=all">$taskCount</a></td>
+ </tr>
+ <tr>
+ <th id="packagelist">Packages</th>
+ <td class="container">
+ #if $len($packages) > 0
+ <table class="nested data-list">
+ <tr>
+ <td class="paginate" colspan="3">
+ #if $len($packagePages) > 1
+ <form class="pageJump" action="">
+ Page:
+ <select onchange="javascript: window.location = 'userinfo?packageStart=' + this.value * $packageRange + '$util.passthrough($self, 'userID', 'packageOrder', 'buildOrder', 'buildStart')#packagelist';">
+ #for $pageNum in $packagePages
+ <option value="$pageNum"#if $pageNum == $packageCurrentPage then ' selected="selected"' else ''#>#echo $pageNum + 1#</option>
+ #end for
+ </select>
+ </form>
+ #end if
+ #if $packageStart > 0
+ <a href="userinfo?packageStart=#echo $packageStart - $packageRange #$util.passthrough($self, 'userID', 'packageOrder', 'buildOrder', 'buildStart')#packagelist"><<<</a>
+ #end if
+ <strong>#echo $packageStart + 1 # through #echo $packageStart + $packageCount # of $totalPackages</strong>
+ #if $packageStart + $packageCount < $totalPackages
+ <a href="userinfo?packageStart=#echo $packageStart + $packageRange#$util.passthrough($self, 'userID', 'packageOrder', 'buildOrder', 'buildStart')#packagelist">>>></a>
+ #end if
+ </td>
+ </tr>
+ <tr class="list-header">
+ <th><a href="userinfo?packageOrder=$util.toggleOrder($self, 'package_name', 'packageOrder')$util.passthrough($self, 'userID', 'buildOrder', 'buildStart')#packagelist">Name</a> $util.sortImage($self, 'package_name', 'packageOrder')</th>
+ <th><a href="userinfo?packageOrder=$util.toggleOrder($self, 'tag_name', 'packageOrder')$util.passthrough($self, 'userID', 'buildOrder', 'buildStart')#packagelist">Tag</a> $util.sortImage($self, 'tag_name', 'packageOrder')</th>
+ <th><a href="userinfo?packageOrder=$util.toggleOrder($self, 'blocked', 'packageOrder')$util.passthrough($self, 'userID', 'buildOrder', 'buildStart')#packagelist">Included?</a> $util.sortImage($self, 'blocked', 'packageOrder')</th>
+ </tr>
+ #for $package in $packages
+ <tr class="$util.rowToggle($self)">
+ <td><a href="packageinfo?packageID=$package.package_id">$package.package_name</a></td>
+ <td><a href="taginfo?tagID=$package.tag_id">$package.tag_name</a></td>
+ <td class="$str(not $package.blocked).lower()">#if $package.blocked then $util.imageTag('no') else $util.imageTag('yes')#</td>
+ </tr>
+ #end for
+ </table>
+ #else
+ No packages
+ #end if
+ </td>
+ </tr>
+ <tr>
+ <th id="buildlist">Builds</th>
+ <td class="container">
+ #if $len($builds) > 0
+ <table class="nested data-list">
+ <tr>
+ <td class="paginate" colspan="3">
+ #if $len($buildPages) > 1
+ <form class="pageJump" action="">
+ Page:
+ <select onchange="javascript: window.location = 'userinfo?buildStart=' + this.value * $buildRange + '$util.passthrough($self, 'userID', 'buildOrder', 'packageOrder', 'packageStart')#buildlist';">
+ #for $pageNum in $buildPages
+ <option value="$pageNum"#if $pageNum == $buildCurrentPage then ' selected="selected"' else ''#>#echo $pageNum + 1#</option>
+ #end for
+ </select>
+ </form>
+ #end if
+ #if $buildStart > 0
+ <a href="userinfo?buildStart=#echo $buildStart - $buildRange #$util.passthrough($self, 'userID', 'buildOrder', 'packageOrder', 'packageStart')#buildlist"><<<</a>
+ #end if
+ <strong>#echo $buildStart + 1 # through #echo $buildStart + $buildCount # of $totalBuilds</strong>
+ #if $buildStart + $buildCount < $totalBuilds
+ <a href="userinfo?buildStart=#echo $buildStart + $buildRange#$util.passthrough($self, 'userID', 'buildOrder', 'packageOrder', 'packageStart')#buildlist">>>></a>
+ #end if
+ </td>
+ </tr>
+ <tr class="list-header">
+ <th><a href="userinfo?buildOrder=$util.toggleOrder($self, 'nvr', 'buildOrder')$util.passthrough($self, 'userID', 'packageOrder', 'packageStart')#buildlist">NVR</a> $util.sortImage($self, 'nvr', 'buildOrder')</th>
+ <th><a href="userinfo?buildOrder=$util.toggleOrder($self, 'completion_time', 'buildOrder')$util.passthrough($self, 'userID', 'packageOrder', 'packageStart')#buildlist">Finished</a> $util.sortImage($self, 'completion_time', 'buildOrder')</th>
+ <th><a href="userinfo?buildOrder=$util.toggleOrder($self, 'state', 'buildOrder')$util.passthrough($self, 'userID', 'packageOrder', 'packageStart')#buildlist">State</a> $util.sortImage($self, 'state', 'buildOrder')</th>
+ </tr>
+ #for $build in $builds
+ <tr class="$util.rowToggle($self)">
+ #set $stateName = $util.stateName($build.state)
+ <td><a href="buildinfo?buildID=$build.build_id">$build.nvr</a></td>
+ <td>$util.formatTime($build.completion_time)</td>
+ <td class="$stateName">$util.stateImage($build.state)</td>
+ </tr>
+ #end for
+ </table>
+ #else
+ No builds
+ #end if
+ </td>
+ </tr>
+ </table>
+
+#include "includes/footer.chtml"
diff --git a/www/kojiweb/users.chtml b/www/kojiweb/users.chtml
new file mode 100644
index 0000000..e8cafec
--- /dev/null
+++ b/www/kojiweb/users.chtml
@@ -0,0 +1,94 @@
+#from kojiweb import util
+
+#include "includes/header.chtml"
+
+ <h4>Users#if $prefix then ' starting with "%s"' % $prefix else ''#</h4>
+ <table class="data-list">
+ <tr>
+ <td class="charlist" colspan="5">
+ #for $char in $chars
+ #if $prefix == $char
+ <strong>$char</strong>
+ #else
+ <a href="users?prefix=$char$util.passthrough($self, 'order')">$char</a>
+ #end if
+ |
+ #end for
+ #if $prefix
+ <a href="users?${util.passthrough($self, 'order')[1:]}">all</a>
+ #else
+ <strong>all</strong>
+ #end if
+ </td>
+ </tr>
+ <tr>
+ <td class="paginate" colspan="5">
+ #if $len($userPages) > 1
+ <form class="pageJump" action="">
+ Page:
+ <select onchange="javascript: window.location = 'users?start=' + this.value * $userRange + '$util.passthrough($self, 'order', 'prefix')';">
+ #for $pageNum in $userPages
+ <option value="$pageNum"#if $pageNum == $userCurrentPage then ' selected="selected"' else ''#>#echo $pageNum + 1#</option>
+ #end for
+ </select>
+ </form>
+ #end if
+ #if $userStart > 0
+ <a href="users?start=#echo $userStart - $userRange #$util.passthrough($self, 'order', 'prefix')"><<<</a>
+ #end if
+ #if $totalUsers != 0
+ <strong>Users #echo $userStart + 1 # through #echo $userStart + $userCount # of $totalUsers</strong>
+ #end if
+ #if $userStart + $userCount < $totalUsers
+ <a href="users?start=#echo $userStart + $userRange#$util.passthrough($self, 'order', 'prefix')">>>></a>
+ #end if
+ </td>
+ </tr>
+ <tr class="list-header">
+ <th><a href="users?order=$util.toggleOrder($self, 'id')$util.passthrough($self, 'prefix')">ID</a> $util.sortImage($self, 'id')</th>
+ <th><a href="users?order=$util.toggleOrder($self, 'name')$util.passthrough($self, 'prefix')">Name</a> $util.sortImage($self, 'name')</th>
+ <th>Packages</th>
+ <th>Builds</th>
+ <th>Tasks</th>
+ </tr>
+ #if $len($users) > 0
+ #for $user in $users
+ <tr class="$util.rowToggle($self)">
+ <td>$user.id</td>
+ <td><a href="userinfo?userID=$user.name">$user.name</a></td>
+ <td><a href="packages?userID=$user.name">view</a></td>
+ <td><a href="builds?userID=$user.name">view</a></td>
+ <td><a href="tasks?owner=$user.name">view</a></td>
+ </tr>
+ #end for
+ #else
+ <tr class="row-odd">
+ <td colspan="5">No users</td>
+ </tr>
+ #end if
+ <tr>
+ <td class="paginate" colspan="5">
+ #if $len($userPages) > 1
+ <form class="pageJump" action="">
+ Page:
+ <select onchange="javascript: window.location = 'users?start=' + this.value * $userRange + '$util.passthrough($self, 'order', 'prefix')';">
+ #for $pageNum in $userPages
+ <option value="$pageNum"#if $pageNum == $userCurrentPage then ' selected="selected"' else ''#>#echo $pageNum + 1#</option>
+ #end for
+ </select>
+ </form>
+ #end if
+ #if $userStart > 0
+ <a href="users?start=#echo $userStart - $userRange #$util.passthrough($self, 'order', 'prefix')"><<<</a>
+ #end if
+ #if $totalUsers != 0
+ <strong>Users #echo $userStart + 1 # through #echo $userStart + $userCount # of $totalUsers</strong>
+ #end if
+ #if $userStart + $userCount < $totalUsers
+ <a href="users?start=#echo $userStart + $userRange#$util.passthrough($self, 'order', 'prefix')">>>></a>
+ #end if
+ </td>
+ </tr>
+ </table>
+
+#include "includes/footer.chtml"
diff --git a/www/kojiweb/wsgi_publisher.py b/www/kojiweb/wsgi_publisher.py
new file mode 100644
index 0000000..e790815
--- /dev/null
+++ b/www/kojiweb/wsgi_publisher.py
@@ -0,0 +1,481 @@
+# a vaguely publisher-like dispatcher for wsgi
+#
+# Copyright (c) 2012-2014 Red Hat, Inc.
+#
+# Koji is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation;
+# version 2.1 of the License.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this software; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+# Authors:
+# Mike McLean <mikem at redhat.com>
+
+import cgi
+import inspect
+import koji
+import koji.util
+import logging
+import os.path
+import pprint
+import sys
+import traceback
+
+from ConfigParser import RawConfigParser
+from koji.server import WSGIWrapper, ServerError, ServerRedirect
+from koji.util import dslice
+
+
+class URLNotFound(ServerError):
+ """Used to generate a 404 response"""
+
+
+class Dispatcher(object):
+
+ def __init__(self):
+ #we can't do much setup until we get a request
+ self.firstcall = True
+ self.options = {}
+ self.startup_error = None
+ self.handler_index = {}
+ self.setup_logging1()
+
+ def setup_logging1(self):
+ """Set up basic logging, before options are loaded"""
+ logger = logging.getLogger("koji")
+ logger.setLevel(logging.WARNING)
+ self.log_handler = logging.StreamHandler()
+ # Log to stderr (StreamHandler default).
+ # There seems to be no advantage to using wsgi.errors
+ log_format = '%(msecs)d [%(levelname)s] SETUP p=%(process)s %(name)s: %(message)s'
+ self.log_handler.setFormatter(logging.Formatter(log_format))
+ self.log_handler.setLevel(logging.DEBUG)
+ logger.addHandler(self.log_handler)
+ self.formatter = None
+ self.logger = logging.getLogger("koji.web")
+
+ cfgmap = [
+ #option, type, default
+ ['SiteName', 'string', None],
+ ['KojiHubURL', 'string', 'http://localhost/kojihub'],
+ ['KojiFilesURL', 'string', 'http://localhost/kojifiles'],
+ ['KojiTheme', 'string', None],
+ ['KojiGreeting', 'string', 'Welcome to Koji Web'],
+ ['LiteralFooter', 'boolean', True],
+
+ ['WebPrincipal', 'string', None],
+ ['WebKeytab', 'string', '/etc/httpd.keytab'],
+ ['WebCCache', 'string', '/var/tmp/kojiweb.ccache'],
+ ['KrbService', 'string', 'host'],
+
+ ['WebCert', 'string', None],
+ ['ClientCA', 'string', '/etc/kojiweb/clientca.crt'],
+ ['KojiHubCA', 'string', '/etc/kojiweb/kojihubca.crt'],
+
+ ['PythonDebug', 'boolean', False],
+
+ ['LoginTimeout', 'integer', 72],
+
+ ['Secret', 'string', None],
+
+ ['LibPath', 'string', '/usr/share/koji-web/lib'],
+
+ ['LogLevel', 'string', 'WARNING'],
+ ['LogFormat', 'string', '%(msecs)d [%(levelname)s] m=%(method)s u=%(user_name)s p=%(process)s r=%(remoteaddr)s %(name)s: %(message)s'],
+
+ ['RLIMIT_AS', 'string', None],
+ ['RLIMIT_CORE', 'string', None],
+ ['RLIMIT_CPU', 'string', None],
+ ['RLIMIT_DATA', 'string', None],
+ ['RLIMIT_FSIZE', 'string', None],
+ ['RLIMIT_MEMLOCK', 'string', None],
+ ['RLIMIT_NOFILE', 'string', None],
+ ['RLIMIT_NPROC', 'string', None],
+ ['RLIMIT_OFILE', 'string', None],
+ ['RLIMIT_RSS', 'string', None],
+ ['RLIMIT_STACK', 'string', None],
+ ]
+
+ def load_config(self, environ):
+ """Load configuration options
+
+ Options are read from a config file.
+
+ Backwards compatibility:
+ - if ConfigFile is not set, opts are loaded from http config
+ - if ConfigFile is set, then the http config must not provide Koji options
+ - In a future version we will load the default hub config regardless
+ - all PythonOptions (except koji.web.ConfigFile) are now deprecated and
+ support for them will disappear in a future version of Koji
+ """
+ modpy_opts = environ.get('modpy.opts', {})
+ if 'modpy.opts' in environ:
+ cf = modpy_opts.get('koji.web.ConfigFile', None)
+ cfdir = modpy_opts.get('koji.web.ConfigDir', None)
+ # to aid in the transition from PythonOptions to web.conf, we do
+ # not check the config file by default, it must be configured
+ if not cf and not cfdir:
+ self.logger.warn('Warning: configuring Koji via PythonOptions is deprecated. Use web.conf')
+ else:
+ cf = environ.get('koji.web.ConfigFile', '/etc/kojiweb/web.conf')
+ cfdir = environ.get('koji.web.ConfigDir', '/etc/kojiweb/web.conf.d')
+ if cfdir:
+ configs = koji.config_directory_contents(cfdir)
+ else:
+ configs = []
+ if cf and os.path.isfile(cf):
+ configs.append(cf)
+ if configs:
+ config = RawConfigParser()
+ config.read(configs)
+ elif modpy_opts:
+ # presumably we are configured by modpy options
+ config = None
+ else:
+ raise koji.GenericError, "Configuration missing"
+
+ opts = {}
+ for name, dtype, default in self.cfgmap:
+ if config:
+ key = ('web', name)
+ if config.has_option(*key):
+ if dtype == 'integer':
+ opts[name] = config.getint(*key)
+ elif dtype == 'boolean':
+ opts[name] = config.getboolean(*key)
+ else:
+ opts[name] = config.get(*key)
+ else:
+ opts[name] = default
+ else:
+ if modpy_opts.get(name, None) is not None:
+ if dtype == 'integer':
+ opts[name] = int(modpy_opts.get(name))
+ elif dtype == 'boolean':
+ opts[name] = modpy_opts.get(name).lower() in ('yes', 'on', 'true', '1')
+ else:
+ opts[name] = modpy_opts.get(name)
+ else:
+ opts[name] = default
+ if 'modpy.conf' in environ:
+ debug = environ['modpy.conf'].get('PythonDebug', '0').lower()
+ opts['PythonDebug'] = (debug in ['yes', 'on', 'true', '1'])
+ opts['Secret'] = koji.util.HiddenValue(opts['Secret'])
+ self.options = opts
+ return opts
+
+ def setup_logging2(self, environ):
+ """Adjust logging based on configuration options"""
+ opts = self.options
+ #determine log level
+ level = opts['LogLevel']
+ valid_levels = ('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL')
+ # the config value can be a single level name or a series of
+ # logger:level names pairs. processed in order found
+ default = None
+ for part in level.split():
+ pair = part.split(':', 1)
+ if len(pair) == 2:
+ name, level = pair
+ else:
+ name = 'koji'
+ level = part
+ default = level
+ if level not in valid_levels:
+ raise koji.GenericError, "Invalid log level: %s" % level
+ #all our loggers start with koji
+ if name == '':
+ name = 'koji'
+ default = level
+ elif name.startswith('.'):
+ name = 'koji' + name
+ elif not name.startswith('koji'):
+ name = 'koji.' + name
+ level_code = logging._levelNames[level]
+ logging.getLogger(name).setLevel(level_code)
+ logger = logging.getLogger("koji")
+ # if KojiDebug is set, force main log level to DEBUG
+ if opts.get('KojiDebug'):
+ logger.setLevel(logging.DEBUG)
+ elif default is None:
+ #LogLevel did not configure a default level
+ logger.setLevel(logging.WARNING)
+ self.formatter = HubFormatter(opts['LogFormat'])
+ self.formatter.environ = environ
+ self.log_handler.setFormatter(self.formatter)
+
+ def find_handlers(self):
+ for name in vars(kojiweb_handlers):
+ if name.startswith('_'):
+ continue
+ try:
+ val = getattr(kojiweb_handlers, name, None)
+ if not inspect.isfunction(val):
+ continue
+ # err on the side of paranoia
+ args = inspect.getargspec(val)
+ if not args[0] or args[0][0] != 'environ':
+ continue
+ except:
+ tb_str = ''.join(traceback.format_exception(*sys.exc_info()))
+ self.logger.error(tb_str)
+ self.handler_index[name] = val
+
+ def prep_handler(self, environ):
+ path_info = environ['PATH_INFO']
+ if not path_info:
+ #empty path info (no trailing slash) breaks our relative urls
+ environ['koji.redirect'] = environ['REQUEST_URI'] + '/'
+ raise ServerRedirect
+ elif path_info == '/':
+ method = 'index'
+ else:
+ method = path_info.lstrip('/').split('/')[0]
+ environ['koji.method'] = method
+ self.logger.info("Method: %s", method)
+ func = self.handler_index.get(method)
+ if not func:
+ raise URLNotFound
+ #parse form args
+ data = {}
+ fs = cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ.copy(), keep_blank_values=True)
+ for field in fs.list:
+ if field.filename:
+ val = field
+ else:
+ val = field.value
+ data.setdefault(field.name, []).append(val)
+ # replace singleton lists with single values
+ # XXX - this is a bad practice, but for now we strive to emulate mod_python.publisher
+ for arg in data:
+ val = data[arg]
+ if isinstance(val, list) and len(val) == 1:
+ data[arg] = val[0]
+ environ['koji.form'] = fs
+ args, varargs, varkw, defaults = inspect.getargspec(func)
+ if not varkw:
+ # remove any unexpected args
+ data = dslice(data, args, strict=False)
+ #TODO (warning in header or something?)
+ return func, data
+
+
+ def _setup(self, environ):
+ global kojiweb_handlers
+ global kojiweb
+ options = self.load_config(environ)
+ if 'LibPath' in options and os.path.exists(options['LibPath']):
+ sys.path.insert(0, options['LibPath'])
+ # figure out our location and try to load index.py from same dir
+ scriptsdir = os.path.dirname(environ['SCRIPT_FILENAME'])
+ environ['koji.scriptsdir'] = scriptsdir
+ sys.path.insert(0, scriptsdir)
+ import index as kojiweb_handlers
+ import kojiweb
+ self.find_handlers()
+ self.setup_logging2(environ)
+ koji.util.setup_rlimits(options)
+ # TODO - plugins?
+
+ def setup(self, environ):
+ try:
+ self._setup(environ)
+ except Exception:
+ self.startup_error = "unknown startup_error"
+ etype, e = sys.exc_info()[:2]
+ tb_short = ''.join(traceback.format_exception_only(etype, e))
+ self.startup_error = "startup_error: %s" % tb_short
+ tb_str = ''.join(traceback.format_exception(*sys.exc_info()))
+ self.logger.error(tb_str)
+
+ def simple_error_page(self, message=None, err=None):
+ result = ["""\
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
+"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html><head><title>Error</title></head>
+<body>
+"""]
+ if message:
+ result.append("<p>%s</p>\n" % message)
+ if err:
+ result.append("<p>%s</p>\n" % err)
+ result.append("</body></html>\n")
+ length = sum([len(x) for x in result])
+ headers = [
+ ('Allow', 'GET, POST, HEAD'),
+ ('Content-Length', str(length)),
+ ('Content-Type', 'text/html'),
+ ]
+ return result, headers
+
+ def error_page(self, environ, message=None, err=True):
+ if err:
+ etype, e = sys.exc_info()[:2]
+ tb_short = ''.join(traceback.format_exception_only(etype, e))
+ tb_long = ''.join(traceback.format_exception(*sys.exc_info()))
+ if isinstance(e, koji.ServerOffline):
+ desc = ('Outage', 'outage')
+ else:
+ desc = ('Error', 'error')
+ else:
+ etype = None
+ e = None
+ tb_short = ''
+ tb_long = ''
+ desc = ('Error', 'error')
+ try:
+ _initValues = kojiweb.util._initValues
+ _genHTML = kojiweb.util._genHTML
+ except (NameError, AttributeError):
+ tb_str = ''.join(traceback.format_exception(*sys.exc_info()))
+ self.logger.error(tb_str)
+ #fallback to simple error page
+ return self.simple_error_page(message, err=tb_short)
+ values = _initValues(environ, *desc)
+ values['etype'] = etype
+ values['exception'] = e
+ if err:
+ values['explanation'], values['debug_level'] = kojiweb.util.explainError(e)
+ if message:
+ values['explanation'] = message
+ else:
+ values['explanation'] = message or "Unknown error"
+ values['debug_level'] = 0
+ values['tb_short'] = tb_short
+ if int(self.options.get("PythonDebug", 0)):
+ values['tb_long'] = tb_long
+ else:
+ values['tb_long'] = "Full tracebacks disabled"
+ # default these koji values to false so the _genHTML doesn't try to look
+ # them up (which will fail badly if the hub is offline)
+ # FIXME - we need a better fix for this
+ environ['koji.values'].setdefault('mavenEnabled', False)
+ environ['koji.values'].setdefault('winEnabled', False)
+ result = _genHTML(environ, 'error.chtml')
+ headers = [
+ ('Allow', 'GET, POST, HEAD'),
+ ('Content-Length', str(len(result))),
+ ('Content-Type', 'text/html'),
+ ]
+ return [result], headers
+
+ def handle_request(self, environ, start_response):
+ if self.startup_error:
+ status = '200 OK'
+ result, headers = self.error_page(environ, message=self.startup_error)
+ start_response(status, headers)
+ return result
+ if environ['REQUEST_METHOD'] not in ['GET', 'POST', 'HEAD']:
+ status = '405 Method Not Allowed'
+ result, headers = self.error_page(environ, message="Method Not Allowed")
+ start_response(status, headers)
+ return result
+ environ['koji.options'] = self.options
+ try:
+ environ['koji.headers'] = []
+ func, data = self.prep_handler(environ)
+ result = func(environ, **data)
+ status = '200 OK'
+ except ServerRedirect:
+ status = '302 Found'
+ location = environ['koji.redirect']
+ result = '<p>Redirect: <a href="%s">here</a></p>\n' % location
+ environ['koji.headers'].append(['Location', location])
+ except URLNotFound:
+ status = "404 Not Found"
+ msg = "Not found: %s" % environ['REQUEST_URI']
+ result, headers = self.error_page(environ, message=msg, err=False)
+ start_response(status, headers)
+ return result
+ except Exception:
+ tb_str = ''.join(traceback.format_exception(*sys.exc_info()))
+ self.logger.error(tb_str)
+ status = '500 Internal Server Error'
+ result, headers = self.error_page(environ)
+ start_response(status, headers)
+ return result
+ headers = {
+ 'allow' : ('Allow', 'GET, POST, HEAD'),
+ }
+ extra = []
+ for name, value in environ.get('koji.headers', []):
+ key = name.lower()
+ if key == 'set-cookie':
+ extra.append((name, value))
+ else:
+ # last one wins
+ headers[key] = (name, value)
+ if isinstance(result, basestring):
+ headers.setdefault('content-length', ('Content-Length', str(len(result))))
+ headers.setdefault('content-type', ('Content-Type', 'text/html'))
+ headers = headers.values() + extra
+ self.logger.debug("Headers:")
+ self.logger.debug(koji.util.LazyString(pprint.pformat, [headers]))
+ start_response(status, headers)
+ if isinstance(result, basestring):
+ result = [result]
+ return result
+
+ def handler(self, req):
+ """mod_python handler"""
+ wrapper = WSGIWrapper(req)
+ return wrapper.run(self.application)
+
+ def application(self, environ, start_response):
+ """wsgi handler"""
+ if self.formatter:
+ self.formatter.environ = environ
+ if self.firstcall:
+ self.firstcall = False
+ self.setup(environ)
+ try:
+ result = self.handle_request(environ, start_response)
+ finally:
+ if self.formatter:
+ self.formatter.environ = {}
+ session = environ.get('koji.session')
+ if session:
+ session.logout()
+ return result
+
+
+class HubFormatter(logging.Formatter):
+ """Support some koji specific fields in the format string"""
+
+ def format(self, record):
+ # dispatcher should set environ for us
+ environ = self.environ
+ # XXX Can we avoid these data lookups if not needed?
+ record.method = environ.get('koji.method')
+ record.remoteaddr = "%s:%s" % (
+ environ.get('REMOTE_ADDR', '?'),
+ environ.get('REMOTE_PORT', '?'))
+ record.user_name = environ.get('koji.currentLogin')
+ user = environ.get('koji.currentUser')
+ if user:
+ record.user_id = user['id']
+ else:
+ record.user_id = None
+ session = environ.get('koji.session')
+ record.session_id = None
+ if session:
+ record.callnum = session.callnum
+ if session.sinfo:
+ record.session_id = session.sinfo.get('session.id')
+ else:
+ record.callnum = None
+ return logging.Formatter.format(self, record)
+
+
+# provide necessary global handlers for mod_wsgi and mod_python
+dispatcher = Dispatcher()
+handler = dispatcher.handler
+application = dispatcher.application
diff --git a/www/lib/Makefile b/www/lib/Makefile
new file mode 100644
index 0000000..a0e01a1
--- /dev/null
+++ b/www/lib/Makefile
@@ -0,0 +1,20 @@
+SUBDIRS = kojiweb
+
+SERVERDIR = /usr/share/koji-web/lib
+
+_default:
+ @echo "nothing to make. try make install"
+
+clean:
+ rm -f *.o *.so *.pyc *~
+ for d in $(SUBDIRS); do make -s -C $$d clean; done
+
+install:
+ @if [ "$(DESTDIR)" = "" ]; then \
+ echo " "; \
+ echo "ERROR: A destdir is required"; \
+ exit 1; \
+ fi
+
+ for d in $(SUBDIRS); do make DESTDIR=$(DESTDIR)/$(SERVERDIR) \
+ -C $$d install; [ $$? = 0 ] || exit 1; done
diff --git a/www/lib/kojiweb/Makefile b/www/lib/kojiweb/Makefile
new file mode 100644
index 0000000..0cdcbe9
--- /dev/null
+++ b/www/lib/kojiweb/Makefile
@@ -0,0 +1,30 @@
+PYTHON=python
+PACKAGE = $(shell basename `pwd`)
+PYFILES = $(wildcard *.py)
+PYVER := $(shell $(PYTHON) -c 'import sys; print "%.3s" %(sys.version)')
+PYSYSDIR := $(shell $(PYTHON) -c 'import sys; print sys.prefix')
+PYLIBDIR = $(PYSYSDIR)/lib/python$(PYVER)
+PKGDIR = $(PYLIBDIR)/site-packages/$(PACKAGE)
+
+SERVERDIR = /kojiweb
+FILES = $(wildcard *.py *.chtml)
+
+_default:
+ @echo "nothing to make. try make install"
+
+clean:
+ rm -f *.o *.so *.pyc *~
+ for d in $(SUBDIRS); do make -s -C $$d clean; done
+
+install:
+ @if [ "$(DESTDIR)" = "" ]; then \
+ echo " "; \
+ echo "ERROR: A destdir is required"; \
+ exit 1; \
+ fi
+
+ mkdir -p $(DESTDIR)/$(SERVERDIR)
+ for p in $(PYFILES) ; do \
+ install -p -m 644 $$p $(DESTDIR)/$(SERVERDIR)/$$p; \
+ done
+ $(PYTHON) -c "import compileall; compileall.compile_dir('$(DESTDIR)/$(SERVERDIR)', 1, '$(PYDIR)', 1)"
diff --git a/www/lib/kojiweb/__init__.py b/www/lib/kojiweb/__init__.py
new file mode 100644
index 0000000..ac095f5
--- /dev/null
+++ b/www/lib/kojiweb/__init__.py
@@ -0,0 +1 @@
+# identify this directory as a python module
diff --git a/www/lib/kojiweb/util.py b/www/lib/kojiweb/util.py
new file mode 100644
index 0000000..8156d79
--- /dev/null
+++ b/www/lib/kojiweb/util.py
@@ -0,0 +1,583 @@
+# utility functions for koji web interface
+#
+# Copyright (c) 2005-2014 Red Hat, Inc.
+#
+# Koji is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation;
+# version 2.1 of the License.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this software; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+# Authors:
+# Mike Bonnet <mikeb at redhat.com>
+# Mike McLean <mikem at redhat.com>
+
+import Cheetah.Template
+import datetime
+import koji
+from koji.util import md5_constructor
+import os
+import stat
+import time
+#a bunch of exception classes that explainError needs
+from socket import error as socket_error
+from socket import sslerror as socket_sslerror
+from xmlrpclib import ProtocolError
+from xml.parsers.expat import ExpatError
+
+class NoSuchException(Exception):
+ pass
+
+try:
+ # pyOpenSSL might not be around
+ from OpenSSL.SSL import Error as SSL_Error
+except:
+ SSL_Error = NoSuchException
+
+
+themeInfo = {}
+themeCache = {}
+
+def _initValues(environ, title='Build System Info', pageID='summary'):
+ global themeInfo
+ global themeCache
+ values = {}
+ values['siteName'] = environ['koji.options'].get('SiteName', 'Koji')
+ values['title'] = title
+ values['pageID'] = pageID
+ values['currentDate'] = str(datetime.datetime.now())
+ values['literalFooter'] = environ['koji.options'].get('LiteralFooter', True)
+ themeCache.clear()
+ themeInfo.clear()
+ themeInfo['name'] = environ['koji.options'].get('KojiTheme', None)
+ themeInfo['staticdir'] = environ['koji.options'].get('KojiStaticDir', '/usr/share/koji-web/static')
+
+ environ['koji.values'] = values
+
+ return values
+
+def themePath(path, local=False):
+ global themeInfo
+ global themeCache
+ local = bool(local)
+ if (path, local) in themeCache:
+ return themeCache[path, local]
+ if not themeInfo['name']:
+ if local:
+ ret = os.path.join(themeInfo['staticdir'], path)
+ else:
+ ret = "/koji-static/%s" % path
+ else:
+ themepath = os.path.join(themeInfo['staticdir'], 'themes', themeInfo['name'], path)
+ if os.path.exists(themepath):
+ if local:
+ ret = themepath
+ else:
+ ret = "/koji-static/themes/%s/%s" % (themeInfo['name'], path)
+ else:
+ if local:
+ ret = os.path.join(themeInfo['staticdir'], path)
+ else:
+ ret = "/koji-static/%s" % path
+ themeCache[path, local] = ret
+ return ret
+
+class DecodeUTF8(Cheetah.Filters.Filter):
+ def filter(self, *args, **kw):
+ """Convert all strs to unicode objects"""
+ result = super(DecodeUTF8, self).filter(*args, **kw)
+ if isinstance(result, unicode):
+ pass
+ else:
+ result = result.decode('utf-8', 'replace')
+ return result
+
+# Escape ampersands so the output can be valid XHTML
+class XHTMLFilter(DecodeUTF8):
+ def filter(self, *args, **kw):
+ result = super(XHTMLFilter, self).filter(*args, **kw)
+ result = result.replace('&', '&')
+ result = result.replace('&', '&')
+ result = result.replace(' ', ' ')
+ result = result.replace('<', '<')
+ result = result.replace('>', '>')
+ return result
+
+TEMPLATES = {}
+
+def _genHTML(environ, fileName):
+ reqdir = os.path.dirname(environ['SCRIPT_FILENAME'])
+ if os.getcwd() != reqdir:
+ os.chdir(reqdir)
+
+ if 'koji.currentUser' in environ:
+ environ['koji.values']['currentUser'] = environ['koji.currentUser']
+ else:
+ environ['koji.values']['currentUser'] = None
+ environ['koji.values']['authToken'] = _genToken(environ)
+ if not environ['koji.values'].has_key('mavenEnabled'):
+ if 'koji.session' in environ:
+ environ['koji.values']['mavenEnabled'] = environ['koji.session'].mavenEnabled()
+ else:
+ environ['koji.values']['mavenEnabled'] = False
+ if not environ['koji.values'].has_key('winEnabled'):
+ if 'koji.session' in environ:
+ environ['koji.values']['winEnabled'] = environ['koji.session'].winEnabled()
+ else:
+ environ['koji.values']['winEnabled'] = False
+
+ tmpl_class = TEMPLATES.get(fileName)
+ if not tmpl_class:
+ tmpl_class = Cheetah.Template.Template.compile(file=fileName)
+ TEMPLATES[fileName] = tmpl_class
+ tmpl_inst = tmpl_class(namespaces=[environ['koji.values']], filter=XHTMLFilter)
+ return tmpl_inst.respond().encode('utf-8', 'replace')
+
+def _truncTime():
+ now = datetime.datetime.now()
+ # truncate to the nearest 15 minutes
+ return now.replace(minute=(now.minute / 15 * 15), second=0, microsecond=0)
+
+def _genToken(environ, tstamp=None):
+ if 'koji.currentLogin' in environ and environ['koji.currentLogin']:
+ user = environ['koji.currentLogin']
+ else:
+ return ''
+ if tstamp == None:
+ tstamp = _truncTime()
+ return md5_constructor(user + str(tstamp) + environ['koji.options']['Secret'].value).hexdigest()[-8:]
+
+def _getValidTokens(environ):
+ tokens = []
+ now = _truncTime()
+ for delta in (0, 15, 30):
+ token_time = now - datetime.timedelta(minutes=delta)
+ token = _genToken(environ, token_time)
+ if token:
+ tokens.append(token)
+ return tokens
+
+def toggleOrder(template, sortKey, orderVar='order'):
+ """
+ If orderVar equals 'sortKey', return '-sortKey', else
+ return 'sortKey'.
+ """
+ if template.getVar(orderVar) == sortKey:
+ return '-' + sortKey
+ else:
+ return sortKey
+
+def toggleSelected(template, var, option):
+ """
+ If the passed in variable var equals the literal value in option,
+ return 'selected="selected"', otherwise return ''.
+ Used for setting the selected option in select boxes.
+ """
+ if var == option:
+ return 'selected="selected"'
+ else:
+ return ''
+
+def sortImage(template, sortKey, orderVar='order'):
+ """
+ Return an html img tag suitable for inclusion in the sortKey of a sortable table,
+ if the sortValue is "sortKey" or "-sortKey".
+ """
+ orderVal = template.getVar(orderVar)
+ if orderVal == sortKey:
+ return '<img src="%s" class="sort" alt="ascending sort"/>' % themePath("images/gray-triangle-up.gif")
+ elif orderVal == '-' + sortKey:
+ return '<img src="%s" class="sort" alt="descending sort"/>' % themePath("images/gray-triangle-down.gif")
+ else:
+ return ''
+
+def passthrough(template, *vars):
+ """
+ Construct a string suitable for use as URL
+ parameters. For each variable name in *vars,
+ if the template has a corresponding non-None value,
+ append that name-value pair to the string. The name-value
+ pairs will be separated by ampersands (&), and prefixed by
+ an ampersand if there are any name-value pairs. If there
+ are no name-value pairs, an empty string will be returned.
+ """
+ result = []
+ for var in vars:
+ value = template.getVar(var, default=None)
+ if value != None:
+ result.append('%s=%s' % (var, value))
+ if result:
+ return '&' + '&'.join(result)
+ else:
+ return ''
+
+def passthrough_except(template, *exclude):
+ """
+ Construct a string suitable for use as URL
+ parameters. The template calling this method must have
+ previously used
+ #attr _PASSTHROUGH = ...
+ to define the list of variable names to be passed-through.
+ Any variables names passed in will be excluded from the
+ list of variables in the output string.
+ """
+ passvars = []
+ for var in template._PASSTHROUGH:
+ if not var in exclude:
+ passvars.append(var)
+ return passthrough(template, *passvars)
+
+def sortByKeyFunc(key, noneGreatest=False):
+ """Return a function to sort a list of maps by the given key.
+ If the key starts with '-', sort in reverse order. If noneGreatest
+ is True, None will sort higher than all other values (instead of lower).
+ """
+ if noneGreatest:
+ # Normally None evaluates to be less than every other value
+ # Invert the comparison so it always evaluates to greater
+ cmpFunc = lambda a, b: (a is None or b is None) and -(cmp(a, b)) or cmp(a, b)
+ else:
+ cmpFunc = cmp
+
+ if key.startswith('-'):
+ key = key[1:]
+ sortFunc = lambda a, b: cmpFunc(b[key], a[key])
+ else:
+ sortFunc = lambda a, b: cmpFunc(a[key], b[key])
+
+ return sortFunc
+
+def paginateList(values, data, start, dataName, prefix=None, order=None, noneGreatest=False, pageSize=50):
+ """
+ Slice the 'data' list into one page worth. Start at offset
+ 'start' and limit the total number of pages to pageSize
+ (defaults to 50). 'dataName' is the name under which the
+ list will be added to the value map, and prefix is the name
+ under which a number of list-related metadata variables will
+ be added to the value map.
+ """
+ if order != None:
+ data.sort(sortByKeyFunc(order, noneGreatest))
+
+ totalRows = len(data)
+
+ if start:
+ start = int(start)
+ if not start or start < 0:
+ start = 0
+
+ data = data[start:(start + pageSize)]
+ count = len(data)
+
+ _populateValues(values, dataName, prefix, data, totalRows, start, count, pageSize, order)
+
+ return data
+
+def paginateMethod(server, values, methodName, args=None, kw=None,
+ start=None, dataName=None, prefix=None, order=None, pageSize=50):
+ """Paginate the results of the method with the given name when called with the given args and kws.
+ The method must support the queryOpts keyword parameter, and pagination is done in the database."""
+ if args is None:
+ args = []
+ if kw is None:
+ kw = {}
+ if start:
+ start = int(start)
+ if not start or start < 0:
+ start = 0
+ if not dataName:
+ raise StandardError, 'dataName must be specified'
+
+ kw['queryOpts'] = {'countOnly': True}
+ totalRows = getattr(server, methodName)(*args, **kw)
+
+ kw['queryOpts'] = {'order': order,
+ 'offset': start,
+ 'limit': pageSize}
+ data = getattr(server, methodName)(*args, **kw)
+ count = len(data)
+
+ _populateValues(values, dataName, prefix, data, totalRows, start, count, pageSize, order)
+
+ return data
+
+def paginateResults(server, values, methodName, args=None, kw=None,
+ start=None, dataName=None, prefix=None, order=None, pageSize=50):
+ """Paginate the results of the method with the given name when called with the given args and kws.
+ This method should only be used when then method does not support the queryOpts command (because
+ the logic used to generate the result list prevents filtering/ordering from being done in the database).
+ The method must return a list of maps."""
+ if args is None:
+ args = []
+ if kw is None:
+ kw = {}
+ if start:
+ start = int(start)
+ if not start or start < 0:
+ start = 0
+ if not dataName:
+ raise StandardError, 'dataName must be specified'
+
+ totalRows = server.count(methodName, *args, **kw)
+
+ kw['filterOpts'] = {'order': order,
+ 'offset': start,
+ 'limit': pageSize}
+ data = server.filterResults(methodName, *args, **kw)
+ count = len(data)
+
+ _populateValues(values, dataName, prefix, data, totalRows, start, count, pageSize, order)
+
+ return data
+
+def _populateValues(values, dataName, prefix, data, totalRows, start, count, pageSize, order):
+ """Populate the values list with the data about the list provided."""
+ values[dataName] = data
+ # Don't use capitalize() to title() here, they mess up
+ # mixed-case name
+ values['total' + dataName[0].upper() + dataName[1:]] = totalRows
+ # Possibly prepend a prefix to the numeric parameters, to avoid namespace collisions
+ # when there is more than one list on the same page
+ values[(prefix and prefix + 'Start' or 'start')] = start
+ values[(prefix and prefix + 'Count' or 'count')] = count
+ values[(prefix and prefix + 'Range' or 'range')] = pageSize
+ values[(prefix and prefix + 'Order' or 'order')] = order
+ currentPage = start / pageSize
+ values[(prefix and prefix + 'CurrentPage' or 'currentPage')] = currentPage
+ totalPages = totalRows / pageSize
+ if totalRows % pageSize > 0:
+ totalPages += 1
+ pages = [page for page in range(0, totalPages) if (abs(page - currentPage) < 100 or ((page + 1) % 100 == 0))]
+ values[(prefix and prefix + 'Pages') or 'pages'] = pages
+
+def stateName(stateID):
+ """Convert a numeric build state into a readable name."""
+ return koji.BUILD_STATES[stateID].lower()
+
+def imageTag(name):
+ """Return an img tag that loads an icon with the given name"""
+ return '<img class="stateimg" src="%s" title="%s" alt="%s"/>' \
+ % (themePath("images/%s.png" % name), name, name)
+
+def stateImage(stateID):
+ """Return an IMG tag that loads an icon appropriate for
+ the given state"""
+ name = stateName(stateID)
+ return imageTag(name)
+
+def brStateName(stateID):
+ """Convert a numeric buildroot state into a readable name."""
+ return koji.BR_STATES[stateID].lower()
+
+def repoStateName(stateID):
+ """Convert a numeric repository state into a readable name."""
+ if stateID == koji.REPO_INIT:
+ return 'initializing'
+ elif stateID == koji.REPO_READY:
+ return 'ready'
+ elif stateID == koji.REPO_EXPIRED:
+ return 'expired'
+ elif stateID == koji.REPO_DELETED:
+ return 'deleted'
+ else:
+ return 'unknown'
+
+def taskState(stateID):
+ """Convert a numeric task state into a readable name"""
+ return koji.TASK_STATES[stateID].lower()
+
+formatTime = koji.formatTime
+formatTimeRSS = koji.formatTimeLong
+formatTimeLong = koji.formatTimeLong
+
+def formatDep(name, version, flags):
+ """Format dependency information into
+ a human-readable format. Copied from
+ rpmUtils/miscutils.py:formatRequires()"""
+ s = name
+
+ if flags:
+ if flags & (koji.RPMSENSE_LESS | koji.RPMSENSE_GREATER |
+ koji.RPMSENSE_EQUAL):
+ s = s + " "
+ if flags & koji.RPMSENSE_LESS:
+ s = s + "<"
+ if flags & koji.RPMSENSE_GREATER:
+ s = s + ">"
+ if flags & koji.RPMSENSE_EQUAL:
+ s = s + "="
+ if version:
+ s = "%s %s" %(s, version)
+ return s
+
+def formatMode(mode):
+ """Format a numeric mode into a ls-like string describing the access mode."""
+ if stat.S_ISREG(mode):
+ result = '-'
+ elif stat.S_ISDIR(mode):
+ result = 'd'
+ elif stat.S_ISCHR(mode):
+ result = 'c'
+ elif stat.S_ISBLK(mode):
+ result = 'b'
+ elif stat.S_ISFIFO(mode):
+ result = 'p'
+ elif stat.S_ISLNK(mode):
+ result = 'l'
+ elif stat.S_ISSOCK(mode):
+ result = 's'
+ else:
+ # What is it? Show it like a regular file.
+ result = '-'
+
+ for x in ('USR', 'GRP', 'OTH'):
+ for y in ('R', 'W', 'X'):
+ if mode & getattr(stat, 'S_I' + y + x):
+ result += y.lower()
+ else:
+ result += '-'
+
+ return result
+
+def rowToggle(template):
+ """If the value of template._rowNum is even, return 'row-even';
+ if it is odd, return 'row-odd'. Increment the value before checking it.
+ If the template does not have that value, set it to 0."""
+ if not hasattr(template, '_rowNum'):
+ template._rowNum = 0
+ template._rowNum += 1
+ if template._rowNum % 2:
+ return 'row-odd'
+ else:
+ return 'row-even'
+
+
+def taskScratchClass(task_object):
+ """ Return a css class indicating whether or not this task is a scratch
+ build.
+ """
+ request = task_object['request']
+ if len(request) >= 3:
+ opts = request[2]
+ if opts.get('scratch'):
+ return "scratch"
+ return ""
+
+
+_fileFlags = {1: 'configuration',
+ 2: 'documentation',
+ 4: 'icon',
+ 8: 'missing ok',
+ 16: "don't replace",
+ 64: 'ghost',
+ 128: 'license',
+ 256: 'readme',
+ 512: 'exclude',
+ 1024: 'unpatched',
+ 2048: 'public key'}
+
+def formatFileFlags(flags):
+ """Format rpm fileflags for display. Returns
+ a list of human-readable strings specifying the
+ flags set in "flags"."""
+ results = []
+ for flag, desc in _fileFlags.items():
+ if flags & flag:
+ results.append(desc)
+ return results
+
+def escapeHTML(value):
+ """Replace special characters to the text can be displayed in
+ an HTML page correctly.
+ < : <
+ > : >
+ & : &
+ """
+ if not value:
+ return value
+
+ value = koji.fixEncoding(value)
+ return value.replace('&', '&').\
+ replace('<', '<').\
+ replace('>', '>')
+
+def authToken(template, first=False, form=False):
+ """Return the current authToken if it exists.
+ If form is True, return it enclosed in a hidden input field.
+ Otherwise, return it in a format suitable for appending to a URL.
+ If first is True, prefix it with ?, otherwise prefix it
+ with &. If no authToken exists, return an empty string."""
+ token = template.getVar('authToken', default=None)
+ if token != None:
+ if form:
+ return '<input type="hidden" name="a" value="%s"/>' % token
+ if first:
+ return '?a=' + token
+ else:
+ return '&a=' + token
+ else:
+ return ''
+
+def explainError(error):
+ """Explain an exception in user-consumable terms
+
+ Some of the explanations are web-centric, which is why this call is not part
+ of the main koji library, at least for now...
+
+ Returns a tuple: (str, level)
+ str = explanation in plain text
+ level = an integer indicating how much traceback data should
+ be shown:
+ 0 - no traceback data
+ 1 - just the exception
+ 2 - full traceback
+ """
+ str = "An exception has occurred"
+ level = 2
+ if isinstance(error, koji.ServerOffline):
+ str = "The server is offline. Please try again later."
+ level = 0
+ elif isinstance(error, koji.ActionNotAllowed):
+ str = """\
+The web interface has tried to do something that your account is not \
+allowed to do. This is most likely a bug in the web interface."""
+ elif isinstance(error, koji.FunctionDeprecated):
+ str = """\
+The web interface has tried to access a deprecated function. This is \
+most likely a bug in the web interface."""
+ elif isinstance(error, koji.RetryError):
+ str = """\
+The web interface is having difficulty communicating with the main \
+server and was unable to retry an operation. Most likely this indicates \
+a network issue, but it could also be a configuration issue."""
+ level = 1
+ elif isinstance(error, koji.GenericError):
+ if getattr(error, 'fromFault', False):
+ str = """\
+An error has occurred on the main server. This could be a software \
+bug, a server configuration issue, or possibly something else."""
+ else:
+ str = """\
+An error has occurred in the web interface code. This could be due to \
+a bug or a configuration issue."""
+ elif isinstance(error, (socket_error, socket_sslerror)):
+ str = """\
+The web interface is having difficulty communicating with the main \
+server. This most likely indicates a network issue."""
+ level = 1
+ elif isinstance(error, (ProtocolError, ExpatError)):
+ str = """\
+The main server returned an invalid response. This could be caused by \
+a network issue or load issues on the server."""
+ level = 1
+ else:
+ str = "An error has occurred while processing your request."
+ return str, level
diff --git a/www/static/Makefile b/www/static/Makefile
new file mode 100644
index 0000000..9ec4398
--- /dev/null
+++ b/www/static/Makefile
@@ -0,0 +1,24 @@
+SUBDIRS = images errors js themes
+
+SERVERDIR = /usr/share/koji-web/static
+FILES = $(wildcard *.css)
+
+_default:
+ @echo "nothing to make. try make install"
+
+clean:
+ rm -f *.o *.so *.pyc *~
+ for d in $(SUBDIRS); do make -s -C $$d clean; done
+
+install:
+ @if [ "$(DESTDIR)" = "" ]; then \
+ echo " "; \
+ echo "ERROR: A destdir is required"; \
+ exit 1; \
+ fi
+
+ mkdir -p $(DESTDIR)/$(SERVERDIR)
+ install -p -m 644 $(FILES) $(DESTDIR)/$(SERVERDIR)
+
+ for d in $(SUBDIRS); do make DESTDIR=$(DESTDIR)/$(SERVERDIR) \
+ -C $$d install; [ $$? = 0 ] || exit 1; done
diff --git a/www/static/debug.css b/www/static/debug.css
new file mode 100644
index 0000000..84cbaf4
--- /dev/null
+++ b/www/static/debug.css
@@ -0,0 +1,9 @@
+/* for debugging purposes */
+
+ at import url(koji.css);
+
+* {
+ border: 1px solid black !IMPORTANT;
+ margin: 1px !IMPORTANT;
+ padding: 1px !IMPORTANT;
+}
diff --git a/www/static/errors/Makefile b/www/static/errors/Makefile
new file mode 100644
index 0000000..9da0127
--- /dev/null
+++ b/www/static/errors/Makefile
@@ -0,0 +1,18 @@
+SERVERDIR = /errors
+FILES = $(wildcard *.html)
+
+_default:
+ @echo "nothing to make. try make install"
+
+clean:
+ rm -f *.o *.so *.pyc *~
+
+install:
+ @if [ "$(DESTDIR)" = "" ]; then \
+ echo " "; \
+ echo "ERROR: A destdir is required"; \
+ exit 1; \
+ fi
+
+ mkdir -p $(DESTDIR)/$(SERVERDIR)
+ install -p -m 644 $(FILES) $(DESTDIR)/$(SERVERDIR)
diff --git a/www/static/errors/unauthorized.html b/www/static/errors/unauthorized.html
new file mode 100644
index 0000000..18fa367
--- /dev/null
+++ b/www/static/errors/unauthorized.html
@@ -0,0 +1,37 @@
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"
+ "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+ <head>
+ <title>Authentication Failed | Koji</title>
+ <link rel="stylesheet" type="text/css" media="screen" title="Koji Style" href="/koji-static/koji.css"/>
+ <link rel="alternate stylesheet" type="text/css" media="screen" title="Debug" href="/koji-static/debug.css"/>
+ <link rel="alternate" type="application/rss+xml" title="Koji: recent builds" href="/koji/recentbuilds"/>
+
+ <meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/>
+ </head>
+ <body>
+
+ <div id="wrap">
+ <div id="innerwrap">
+
+ <!-- HEADER -->
+ <div id="header">
+ <img src="/koji-static/images/koji.png" alt="Koji Logo" id="kojiLogo"/>
+ </div><!-- end header -->
+
+ <div id="content">
+ <h2>Kerberos Authentication Failed</h2>
+ The Koji Web UI was unable to verify your Kerberos credentials. Please make sure that you have valid
+ Kerberos tickets (obtainable via <strong>kinit</strong>), and that you have
+ configured your browser correctly.
+ </div>
+
+ <p id="footer">
+ Copyright © 2007-2014 Red Hat, Inc.
+ </p>
+
+ </div>
+ </div>
+
+ </body>
+</html>
diff --git a/www/static/images/1px.gif b/www/static/images/1px.gif
new file mode 100644
index 0000000..41ee92f
Binary files /dev/null and b/www/static/images/1px.gif differ
diff --git a/www/static/images/Makefile b/www/static/images/Makefile
new file mode 100644
index 0000000..fd7f2ca
--- /dev/null
+++ b/www/static/images/Makefile
@@ -0,0 +1,18 @@
+SERVERDIR = /images
+FILES = $(wildcard *.gif *.png *.ico)
+
+_default:
+ @echo "nothing to make. try make install"
+
+clean:
+ rm -f *.o *.so *.pyc *~
+
+install:
+ @if [ "$(DESTDIR)" = "" ]; then \
+ echo " "; \
+ echo "ERROR: A destdir is required"; \
+ exit 1; \
+ fi
+
+ mkdir -p $(DESTDIR)/$(SERVERDIR)
+ install -p -m 644 $(FILES) $(DESTDIR)/$(SERVERDIR)
diff --git a/www/static/images/assigned.png b/www/static/images/assigned.png
new file mode 100644
index 0000000..9ad3715
Binary files /dev/null and b/www/static/images/assigned.png differ
diff --git a/www/static/images/bkgrnd_greydots.png b/www/static/images/bkgrnd_greydots.png
new file mode 100644
index 0000000..d5e79e8
Binary files /dev/null and b/www/static/images/bkgrnd_greydots.png differ
diff --git a/www/static/images/building.png b/www/static/images/building.png
new file mode 100644
index 0000000..1b4710b
Binary files /dev/null and b/www/static/images/building.png differ
diff --git a/www/static/images/canceled.png b/www/static/images/canceled.png
new file mode 100644
index 0000000..acf2036
Binary files /dev/null and b/www/static/images/canceled.png differ
diff --git a/www/static/images/closed.png b/www/static/images/closed.png
new file mode 100644
index 0000000..8e7a9d7
Binary files /dev/null and b/www/static/images/closed.png differ
diff --git a/www/static/images/complete.png b/www/static/images/complete.png
new file mode 100644
index 0000000..8e7a9d7
Binary files /dev/null and b/www/static/images/complete.png differ
diff --git a/www/static/images/deleted.png b/www/static/images/deleted.png
new file mode 100644
index 0000000..bbdf224
Binary files /dev/null and b/www/static/images/deleted.png differ
diff --git a/www/static/images/expired.png b/www/static/images/expired.png
new file mode 100644
index 0000000..dc7ed60
Binary files /dev/null and b/www/static/images/expired.png differ
diff --git a/www/static/images/failed.png b/www/static/images/failed.png
new file mode 100644
index 0000000..b4b29bf
Binary files /dev/null and b/www/static/images/failed.png differ
diff --git a/www/static/images/free.png b/www/static/images/free.png
new file mode 100644
index 0000000..fa147fe
Binary files /dev/null and b/www/static/images/free.png differ
diff --git a/www/static/images/gray-triangle-down.gif b/www/static/images/gray-triangle-down.gif
new file mode 100644
index 0000000..e5d3dac
Binary files /dev/null and b/www/static/images/gray-triangle-down.gif differ
diff --git a/www/static/images/gray-triangle-up.gif b/www/static/images/gray-triangle-up.gif
new file mode 100644
index 0000000..09149a4
Binary files /dev/null and b/www/static/images/gray-triangle-up.gif differ
diff --git a/www/static/images/init.png b/www/static/images/init.png
new file mode 100644
index 0000000..8177e6b
Binary files /dev/null and b/www/static/images/init.png differ
diff --git a/www/static/images/initializing.png b/www/static/images/initializing.png
new file mode 100644
index 0000000..8177e6b
Binary files /dev/null and b/www/static/images/initializing.png differ
diff --git a/www/static/images/koji.ico b/www/static/images/koji.ico
new file mode 100644
index 0000000..7affb4b
Binary files /dev/null and b/www/static/images/koji.ico differ
diff --git a/www/static/images/koji.png b/www/static/images/koji.png
new file mode 100644
index 0000000..2a7b508
Binary files /dev/null and b/www/static/images/koji.png differ
diff --git a/www/static/images/no.png b/www/static/images/no.png
new file mode 100644
index 0000000..b4b29bf
Binary files /dev/null and b/www/static/images/no.png differ
diff --git a/www/static/images/open.png b/www/static/images/open.png
new file mode 100644
index 0000000..c1e6e96
Binary files /dev/null and b/www/static/images/open.png differ
diff --git a/www/static/images/powered-by-koji.png b/www/static/images/powered-by-koji.png
new file mode 100644
index 0000000..32b52e3
Binary files /dev/null and b/www/static/images/powered-by-koji.png differ
diff --git a/www/static/images/ready.png b/www/static/images/ready.png
new file mode 100644
index 0000000..8e7a9d7
Binary files /dev/null and b/www/static/images/ready.png differ
diff --git a/www/static/images/unknown.png b/www/static/images/unknown.png
new file mode 100644
index 0000000..5b83f4f
Binary files /dev/null and b/www/static/images/unknown.png differ
diff --git a/www/static/images/waiting.png b/www/static/images/waiting.png
new file mode 100644
index 0000000..fa147fe
Binary files /dev/null and b/www/static/images/waiting.png differ
diff --git a/www/static/images/yes.png b/www/static/images/yes.png
new file mode 100644
index 0000000..8e7a9d7
Binary files /dev/null and b/www/static/images/yes.png differ
diff --git a/www/static/js/Makefile b/www/static/js/Makefile
new file mode 100644
index 0000000..8a4627f
--- /dev/null
+++ b/www/static/js/Makefile
@@ -0,0 +1,18 @@
+SERVERDIR = /js
+FILES = $(wildcard *.js)
+
+_default:
+ @echo "nothing to make. try make install"
+
+clean:
+ rm -f *.o *.so *.pyc *~
+
+install:
+ @if [ "$(DESTDIR)" = "" ]; then \
+ echo " "; \
+ echo "ERROR: A destdir is required"; \
+ exit 1; \
+ fi
+
+ mkdir -p $(DESTDIR)/$(SERVERDIR)
+ install -p -m 644 $(FILES) $(DESTDIR)/$(SERVERDIR)
diff --git a/www/static/js/watchlogs.js b/www/static/js/watchlogs.js
new file mode 100644
index 0000000..2f0cdcd
--- /dev/null
+++ b/www/static/js/watchlogs.js
@@ -0,0 +1,194 @@
+var MAX_ERRORS = 5; // errors before we just stop
+var CHUNK_SIZE = 16384;
+
+// General globals
+var baseURL = window.location.href.substring(0, window.location.href.lastIndexOf("/"));
+var logElement = null;
+var headerElement = null;
+var errorCount = 0;
+var tasks = null;
+var offsets = {};
+var lastlog = "";
+
+var tasksToProcess = null;
+var currentTaskID = null;
+var currentInfo = null;
+var currentLogs = null;
+var currentLog = null;
+
+function parseTasklist() {
+ var tasklist = [];
+ var queryStr = unescape(window.location.search.substring(1));
+ var vars = queryStr.split('&');
+ for (var i=0; i<vars.length; i++) {
+ if (vars[i].split('=')[0] == 'taskID') {
+ tasklist.push(parseInt(vars[i].split('=')[1]));
+ }
+ }
+ return tasklist;
+}
+
+function maybeScroll(origHeight) {
+ if ((window.pageYOffset + window.innerHeight) >= origHeight) {
+ // Only scroll the window if we were already at the bottom
+ // of the document
+ window.scroll(window.pageXOffset, document.body.clientHeight);
+ }
+}
+
+function handleStatus(event) {
+ req = event.target;
+ if (req.readyState != 4) {
+ return;
+ }
+ if (req.status == 200) {
+ if (req.responseText.length > 0) {
+ var lines = req.responseText.split("\n");
+ var line = lines[0];
+ var data = line.split(":");
+ // var taskID = parseInt(data[0]);
+ var state = data[1];
+ var logs = {};
+ for (var i = 1; i < lines.length; i++) {
+ data = lines[i].split(":");
+ var filename = data[0];
+ var filesize = parseInt(data[1]);
+ if (filename.indexOf(".log") != -1) {
+ logs[filename] = filesize;
+ }
+ }
+ } else {
+ // task may not have started yet
+ var state = "UNKNOWN";
+ var logs = {};
+ }
+ currentInfo = {state: state, logs: logs};
+ if (!(state == "FREE" || state == "OPEN" ||
+ state == "ASSIGNED" || state == "UNKNOWN")) {
+ // remove tasks from the task list that are no longer running
+ for (var i = 0; i < tasks.length; i++) {
+ if (tasks[i] == currentTaskID) {
+ tasks.splice(i, 1);
+ break;
+ }
+ }
+ }
+ } else {
+ currentInfo = {state: "UNKNOWN", logs: {}};
+ popupError("Error checking status of task " + currentTaskID + ": " + req.statusText);
+ }
+ currentLogs = [];
+ for (var logname in currentInfo.logs) {
+ currentLogs.push(logname);
+ }
+ processLog();
+}
+
+function getStatus() {
+ if (tasksToProcess.length == 0) {
+ if (errorCount > MAX_ERRORS) {
+ return;
+ } else {
+ if (headerElement != null) {
+ headerElement.appendChild(document.createTextNode("."));
+ }
+ setTimeout(checkTasks, 5000);
+ return;
+ }
+ }
+
+ currentTaskID = tasksToProcess.shift();
+ var req = new XMLHttpRequest();
+ req.open("GET", baseURL + "/taskstatus?taskID=" + currentTaskID, true);
+ req.onreadystatechange = handleStatus;
+ req.send(null);
+}
+
+function checkTasks() {
+ if (tasks.length == 0) {
+ docHeight = document.body.clientHeight;
+ logElement.appendChild(document.createTextNode("\n==> Task has completed <==\n"));
+ maybeScroll(docHeight);
+ } else {
+ tasksToProcess = [];
+ for (var i = 0; i < tasks.length; i++) {
+ tasksToProcess.push(tasks[i]);
+ }
+ getStatus();
+ }
+}
+
+function processLog() {
+ if (currentLogs.length == 0) {
+ getStatus();
+ return;
+ }
+ currentLog = currentLogs.shift();
+ var taskOffsets = offsets[currentTaskID];
+ if (!(currentLog in taskOffsets)) {
+ taskOffsets[currentLog] = 0;
+ }
+ outputLog();
+}
+
+function outputLog() {
+ var currentOffset = offsets[currentTaskID][currentLog];
+ var currentSize = currentInfo.logs[currentLog];
+ if (currentSize > currentOffset) {
+ var chunkSize = CHUNK_SIZE;
+ if ((currentSize - currentOffset) < chunkSize) {
+ chunkSize = currentSize - currentOffset;
+ }
+ var req = new XMLHttpRequest();
+ req.open("GET", baseURL + "/getfile?taskID=" + currentTaskID + "&name=" + currentLog +
+ "&offset=" + currentOffset + "&size=" + chunkSize, true);
+ req.onreadystatechange = handleLog;
+ req.send(null);
+ if (headerElement != null) {
+ logElement.removeChild(headerElement);
+ headerElement = null;
+ }
+ } else {
+ processLog();
+ }
+}
+
+function handleLog(event) {
+ req = event.target;
+ if (req.readyState != 4) {
+ return;
+ }
+ if (req.status == 200) {
+ content = req.responseText;
+ offsets[currentTaskID][currentLog] += content.length;
+ if (content.length > 0) {
+ docHeight = document.body.clientHeight;
+ currlog = currentTaskID + ":" + currentLog;
+ if (currlog != lastlog) {
+ logElement.appendChild(document.createTextNode("\n==> " + currlog + " <==\n"));
+ lastlog = currlog;
+ }
+ logElement.appendChild(document.createTextNode(content));
+ maybeScroll(docHeight);
+ }
+ } else {
+ popupError("Error retrieving " + currentLog + " for task " + currentTaskID + ": " + req.statusText);
+ }
+ outputLog();
+}
+
+function popupError(msg) {
+ errorCount++;
+ alert(msg);
+}
+
+function watchLogs(element) {
+ logElement = document.getElementById(element);
+ headerElement = logElement.firstChild;
+ tasks = parseTasklist();
+ for (var i=0; i<tasks.length; i++) {
+ offsets[tasks[i]] = {};
+ }
+
+ setTimeout(checkTasks, 1000);
+}
diff --git a/www/static/koji.css b/www/static/koji.css
new file mode 100644
index 0000000..2f31138
--- /dev/null
+++ b/www/static/koji.css
@@ -0,0 +1,448 @@
+/*
+ Koji styling
+ Copyright (c) 2007-2014 Red Hat, Inc.
+
+ Authors:
+ Mike Bonnet <mikeb at redhat.com>
+ Mike McLean <mikem at redhat.com>
+*/
+
+html {
+ min-width: 800px;
+}
+
+body {
+ margin: 0px;
+ padding: 0px;
+ font-size: small;
+ font-family: "Lucida Grande", "Luxi Sans", "Bitstream Vera Sans", helvetica, verdana, arial, sans-serif;
+ color: #333;
+ background: #fff url(images/bkgrnd_greydots.png) repeat;
+}
+
+a {
+ text-decoration: none;
+}
+
+a:hover {
+ text-decoration: underline;
+}
+
+#wrap {
+ min-width: 750px;
+ margin: 0 25px 10px 25px;
+ padding: 0;
+ text-align: left;
+ background: #fff;
+}
+
+#innerwrap {
+ margin: 0 15px;
+ padding: 8px 0;
+}
+
+#header {
+ width: 100%;
+ height: 40px;
+ clear: left;
+}
+
+#headerSearch {
+ float: right;
+ margin-right: 10px;
+ margin-top: 15px;
+}
+
+#headerSearch input,
+#headerSearch select {
+ font-size: smaller;
+}
+
+#kojiLogo {
+ /* Used only for the koji logo icon */
+ float: left;
+ width: 49px;
+ height: 40px;
+}
+
+div#content {
+ margin: 0 20px;
+ clear: both;
+}
+
+p#footer {
+ padding-top: 40px;
+ margin-left: 15px;
+ line-height: 1.5em;
+ color: #999;
+ font-size: xx-small;
+ clear: both;
+}
+
+p#footer a {
+ text-decoration: none;
+}
+
+#PoweredByKojiLogo {
+ /* Used only for the powered by koji icon */
+ float: right;
+ border: 0px;
+ height: 30px;
+}
+
+.hide {
+ display: none;
+}
+
+.disabled {
+ color: #808080;
+}
+
+#mainNav {
+ width: 100%;
+ background-color: #009;
+ font-weight: bold;
+ font-family: verdana, helvetica, arial, sans-serif;
+ height: 2.1em;
+}
+
+#mainNav ul {
+ padding: 0px;
+ margin: 0px;
+ list-style-type: none;
+}
+
+#mainNav ul li {
+ background-color: #006;
+ color: #fff;
+ display: block;
+ float: left;
+ padding: 0px;
+ margin: 0px;
+ border-style: solid;
+ border-width: 2px;
+ border-color: #009;
+}
+
+#mainNav ul li a {
+ display: block;
+ color: #fff;
+ text-decoration: none;
+ padding: 0.4em 1.5em;
+ font-size: 0.77em;
+ height: 1.5em;
+}
+
+#mainNav ul li:hover {
+ border-color: #ddd;
+}
+
+body#summary #mainNav li#summaryTab a,
+body#tasks #mainNav li#tasksTab a,
+body#tags #mainNav li#tagsTab a,
+body#builds #mainNav li#buildsTab a,
+body#packages #mainNav li#packagesTab a,
+body#users #mainNav li#usersTab a,
+body#hosts #mainNav li#hostsTab a,
+body#buildtargets #mainNav li#buildtargetsTab a,
+body#reports #mainNav li#reportsTab a,
+body#search #mainNav li#searchTab a {
+ background-color: #eee;
+ color: #000;
+}
+
+h4 {
+ color: #fff;
+ background-color: #006;
+ padding: 0.3em;
+ margin: 0px;
+}
+
+h4 a {
+ color: #fff;
+}
+
+table {
+ border-spacing: 0px;
+}
+
+th {
+ font-weight: bold;
+ vertical-align: text-top;
+}
+
+th, td {
+ padding: 5px;
+}
+
+td.building {
+ color: #cc0;
+}
+
+td.complete {
+ color: #0c0;
+}
+
+td.deleted,
+td.failed,
+td.canceled {
+ color: #c00;
+}
+
+td.false {
+ color: #c00;
+}
+
+td.true {
+ color: #0c0;
+}
+
+img.sort {
+ /* used for up/down sort arrows*/
+ vertical-align: baseline;
+ width: 10px;
+ height: 9px;
+}
+
+td.paginate {
+ text-align: center;
+}
+
+form.pageJump {
+ float: right;
+ margin-left: 20px;
+}
+
+form.pageJump select {
+ font-size: smaller;
+}
+
+div.dataHeader {
+ font-weight: bold;
+}
+
+div.noPaginate {
+ margin-bottom: 10px;
+}
+
+div.pageHeader {
+ margin-bottom: 10px;
+ font-weight: bold;
+ font-size: 1.5em;
+}
+
+table.nested {
+ float: left;
+}
+
+td.container {
+ padding: 4px 0px;
+ width: 100%;
+}
+
+table.nested th,
+table.nested td {
+ padding: 2px 4px;
+}
+
+div.toggle {
+ padding: 6px;
+}
+
+td.tree {
+ background-color: #fff;
+}
+
+.tree span.root {
+ font-weight: bold;
+ background-color: #fff;
+}
+
+.tree ul {
+ padding-left: 2em;
+ list-style: none;
+ margin-top: 0em;
+ margin-bottom: 0em;
+}
+
+.tree span.treeBranch {
+ border-bottom: 1px solid #000;
+ border-left: 1px solid #000;
+ font-size: 1.2em;
+}
+
+.tree li.sibling > span.treeBranch {
+ border-left-width: 0em;
+}
+
+.tree li.sibling {
+ border-left: 1px solid #000;
+}
+
+.tree a {
+ text-decoration: none;
+}
+
+.tree span.treeLabel {
+ position: relative;
+ top: 0.6em;
+ margin-left: 1.2em;
+ padding-left: 0.2em;
+ background-color: #fff;
+ font-size: 0.83em;
+}
+
+.tree > ul {
+ padding-bottom: 0.6em;
+}
+
+.hidden {
+ display: none;
+}
+
+.tree span.treeToggle {
+ font-weight: bold;
+}
+
+.tree span.treeLink {
+ font-size: smaller;
+}
+
+.adminLink {
+ color: #000;
+}
+
+img.stateimg {
+ margin-top: -6px;
+ margin-bottom: -6px;
+}
+
+.charlist {
+ text-align: center;
+}
+
+img.graphrow {
+ background-color: #00f;
+ vertical-align: bottom;
+}
+
+table.data-list {
+ width: 100%;
+}
+
+table.data-list td {
+ vertical-align: text-top;
+}
+
+tr.list-header {
+ background-color: #fff;
+}
+
+tr.list-header th {
+ background-color: #ddd;
+}
+
+tr.list-header th:first-child {
+ -moz-border-radius-topleft: 1em;
+ -webkit-border-top-left-radius: 1em;
+}
+
+tr.list-header th:last-child {
+ -moz-border-radius-topright: 1em;
+ -webkit-border-top-right-radius: 1em;
+}
+
+tr.row-odd {
+ background-color: #fff;
+}
+
+tr.row-odd td {
+ border-bottom: 1px solid #eee;
+}
+
+tr.row-even {
+ background-color: #eee;
+}
+
+tr.row-even td {
+ border-bottom: 1px solid #fff;
+}
+
+tr.row-odd td:first-child,
+tr.row-even td:first-child {
+ border-left: 1px solid #eee;
+}
+
+tr.row-odd td:last-child,
+tr.row-even td:last-child {
+ border-right: 1px solid #eee;
+}
+
+tr.row-even td.tree {
+ background-color: #eee;
+}
+
+tr.row-even td.tree span.treeLabel {
+ background-color: #eee;
+}
+
+.taskfree {
+ color: #30c;
+}
+
+.taskopen {
+ color: #f60;
+}
+
+.taskclosed {
+ color: #0c0;
+}
+
+.taskcanceled {
+ color: #c90;
+}
+
+.taskassigned {
+ color: #c0f;
+}
+
+.taskfailed {
+ color: #c00;
+}
+
+a.help {
+ text-decoration: underline;
+}
+
+abbr {
+ cursor: help;
+}
+
+.changelog, .rpmheader, .usertext {
+ font-family: monospace;
+ font-size: medium;
+ white-space: pre;
+}
+
+#headerHelp {
+ float: right;
+ margin: 15px 10px 0 0;
+}
+
+.filterlist {
+ font-size: smaller;
+}
+
+span#loginInfo {
+ float: right;
+ font-weight: bold;
+ margin: 5px;
+}
+
+.smaller {
+ font-size: smaller;
+}
+
+.error {
+ color: red;
+}
diff --git a/www/static/themes/Makefile b/www/static/themes/Makefile
new file mode 100644
index 0000000..79c3560
--- /dev/null
+++ b/www/static/themes/Makefile
@@ -0,0 +1,18 @@
+SERVERDIR = /themes
+FILES = README
+
+_default:
+ @echo "nothing to make. try make install"
+
+clean:
+ rm -f *.o *.so *.pyc *~
+
+install:
+ @if [ "$(DESTDIR)" = "" ]; then \
+ echo " "; \
+ echo "ERROR: A destdir is required"; \
+ exit 1; \
+ fi
+
+ mkdir -p $(DESTDIR)/$(SERVERDIR)
+ install -p -m 644 $(FILES) $(DESTDIR)/$(SERVERDIR)
diff --git a/www/static/themes/README b/www/static/themes/README
new file mode 100644
index 0000000..801a3ba
--- /dev/null
+++ b/www/static/themes/README
@@ -0,0 +1,5 @@
+Place static theme content under this directory
+
+Content under koji-static/theme/$NAME/ will be used instead of the normal
+files under koji-static/ if KojiTheme is set to $NAME. Any absent files
+will fall back to the normal koji-static/ path.
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/reproducible/koji.git
More information about the Reproducible-commits
mailing list