[SCM] BOINC packaging branch, upstream, updated. upstream/7.0.28+dfsg-2-g1ed3ee8
Steffen Moeller
steffen_moeller at gmx.de
Sun Jul 8 18:06:43 UTC 2012
The following commit has been merged in the upstream branch:
commit 1ed3ee81ebf1fb4ca919ca1f7a014b6b856b8550
Author: Steffen Moeller <steffen_moeller at gmx.de>
Date: Sun Jul 8 20:04:46 2012 +0200
Imported Upstream version 7.0.31+dfsg
diff --git a/checkin_notes b/checkin_notes
index f973948..24d4577 100644
--- a/checkin_notes
+++ b/checkin_notes
@@ -3774,10 +3774,58 @@ Charlie 26 June 2012
HowToBuildBOINC_XCode.rtf
setupforBOINC.sh
-Rom 2 May 2012
+Rom 27 June 2012
- Tag for 7.0.30 release, all platforms
boinc_core_release_7_0_30
/
configure.ac
version.h
+
+Charlie 28 June 2012
+ - Mac client: Update wxMac build script for partial compatibility
+ with OS 10.8 and XCode 4.5. See comments in script for details.
+
+ mac_build/
+ buildWxMac.sh
+
+David 29 June 2012
+ - client: add missing end tag for <pci_info>. Doh!
+
+ lib/
+ coproc.cpp
+
+David 1 July 2012
+ - When the client makes a scheduler RPC without requesting work,
+ and there's a simple reason
+ (e.g. the project is suspended, no-new-tasks, downloads stalled, etc.)
+ show it in the event lot.
+ If the reason is more complex, don't try to explain.
+
+ client/
+ work_fetch.cpp,h
+ scheduler_op.cpp
+
+David 2 July 2012
+ - client: in the job scheduler, there's a check to prevent
+ overcommitting the CPUs if an MT is scheduled.
+ Skip this check for GPU jobs.
+
+ client/
+ cpu_sched.cpp
+
+Rom 2 July 2012
+ - Tag for 7.0.31 release, all platforms
+ boinc_core_release_7_0_31
+
+ /
+ configure.ac
+ version.h
+
+Charlie 3 July 2012
+ - Mac client: Update Xcode project for compatibility with Xcode 4.3.2
+ and Xcode 4.5. (Checked into 7.0.31 tag.)
+
+ mac_build/
+ boinc.xcodeproj/
+ project.pbxproj
diff --git a/client/cpu_sched.cpp b/client/cpu_sched.cpp
index afa9392..6e92db5 100644
--- a/client/cpu_sched.cpp
+++ b/client/cpu_sched.cpp
@@ -1596,18 +1596,20 @@ bool CLIENT_STATE::enforce_run_list(vector<RESULT*>& run_list) {
}
}
- // don't overcommit CPUs if a MT job is scheduled
+ // Don't overcommit CPUs if a MT job is scheduled.
+ // Skip this check for GPU jobs.
//
- if (scheduled_mt || (rp->avp->avg_ncpus > 1)) {
- if (ncpus_used + rp->avp->avg_ncpus > ncpus) {
- if (log_flags.cpu_sched_debug) {
- msg_printf(rp->project, MSG_INFO,
- "[cpu_sched_debug] avoid MT overcommit: skipping %s",
- rp->name
- );
- }
- continue;
+ if (!rp->uses_coprocs()
+ && (scheduled_mt || (rp->avp->avg_ncpus > 1))
+ && (ncpus_used + rp->avp->avg_ncpus > ncpus)
+ ) {
+ if (log_flags.cpu_sched_debug) {
+ msg_printf(rp->project, MSG_INFO,
+ "[cpu_sched_debug] avoid MT overcommit: skipping %s",
+ rp->name
+ );
}
+ continue;
}
double wss = 0;
diff --git a/client/scheduler_op.cpp b/client/scheduler_op.cpp
index 71e7fc5..58255d9 100644
--- a/client/scheduler_op.cpp
+++ b/client/scheduler_op.cpp
@@ -229,7 +229,6 @@ static void request_string(char* buf) {
int SCHEDULER_OP::start_rpc(PROJECT* p) {
int retval;
char request_file[1024], reply_file[1024], buf[256];
- const char *trickle_up_msg;
safe_strcpy(scheduler_url, p->get_scheduler_url(url_index, url_random));
if (log_flags.sched_ops) {
@@ -237,28 +236,24 @@ int SCHEDULER_OP::start_rpc(PROJECT* p) {
"Sending scheduler request: %s.", rpc_reason_string(reason)
);
if (p->trickle_up_pending && reason != RPC_REASON_TRICKLE_UP) {
- trickle_up_msg = ", sending trickle-up message";
- } else {
- trickle_up_msg = "";
+ msg_printf(p, MSG_INFO, "Sending trickle-up message");
+ }
+ if (p->nresults_returned) {
+ msg_printf(p, MSG_INFO,
+ "Reporting %d completed tasks", p->nresults_returned
+ );
}
request_string(buf);
if (strlen(buf)) {
- if (p->nresults_returned) {
- msg_printf(p, MSG_INFO,
- "Reporting %d completed tasks, requesting new tasks for %s%s",
- p->nresults_returned, buf, trickle_up_msg
- );
- } else {
- msg_printf(p, MSG_INFO, "Requesting new tasks for %s%s", buf, trickle_up_msg);
- }
+ msg_printf(p, MSG_INFO, "Requesting new tasks for %s", buf);
} else {
- if (p->nresults_returned) {
+ if (p->pwf.cant_fetch_work_reason) {
msg_printf(p, MSG_INFO,
- "Reporting %d completed tasks, not requesting new tasks%s",
- p->nresults_returned, trickle_up_msg
+ "Not requesting tasks: %s",
+ cant_fetch_work_string(p->pwf.cant_fetch_work_reason)
);
} else {
- msg_printf(p, MSG_INFO, "Not reporting or requesting tasks%s", trickle_up_msg);
+ msg_printf(p, MSG_INFO, "Not requesting tasks");
}
}
}
diff --git a/client/work_fetch.cpp b/client/work_fetch.cpp
index 58e8578..cd1043e 100644
--- a/client/work_fetch.cpp
+++ b/client/work_fetch.cpp
@@ -229,7 +229,7 @@ PROJECT* RSC_WORK_FETCH::choose_project_hyst(bool enforce_hyst) {
for (unsigned i=0; i<gstate.projects.size(); i++) {
PROJECT* p = gstate.projects[i];
- if (!p->pwf.can_fetch_work) continue;
+ if (p->pwf.cant_fetch_work_reason) continue;
if (!project_state(p).may_have_work) continue;
// if project has zero resource share,
@@ -294,7 +294,7 @@ PROJECT* RSC_WORK_FETCH::choose_project(int criterion) {
for (unsigned i=0; i<gstate.projects.size(); i++) {
PROJECT* p = gstate.projects[i];
- if (!p->pwf.can_fetch_work) continue;
+ if (p->pwf.cant_fetch_work_reason) continue;
if (!project_state(p).may_have_work) continue;
RSC_PROJECT_WORK_FETCH& rpwf = project_state(p);
if (rpwf.anon_skip) continue;
@@ -452,16 +452,16 @@ void RSC_WORK_FETCH::clear_request() {
/////////////// PROJECT_WORK_FETCH ///////////////
-bool PROJECT_WORK_FETCH::compute_can_fetch_work(PROJECT* p) {
- if (p->non_cpu_intensive) return false;
- if (p->suspended_via_gui) return false;
- if (p->master_url_fetch_pending) return false;
- if (p->min_rpc_time > gstate.now) return false;
- if (p->dont_request_more_work) return false;
- if (p->some_download_stalled()) return false;
- if (p->some_result_suspended()) return false;
- if (p->too_many_uploading_results) return false;
- return true;
+int PROJECT_WORK_FETCH::compute_cant_fetch_work_reason(PROJECT* p) {
+ if (p->non_cpu_intensive) return CANT_FETCH_WORK_NON_CPU_INTENSIVE;
+ if (p->suspended_via_gui) return CANT_FETCH_WORK_SUSPENDED_VIA_GUI;
+ if (p->master_url_fetch_pending) return CANT_FETCH_WORK_MASTER_URL_FETCH_PENDING;
+ if (p->min_rpc_time > gstate.now) return CANT_FETCH_WORK_MIN_RPC_TIME;
+ if (p->dont_request_more_work) return CANT_FETCH_WORK_DONT_REQUEST_MORE_WORK;
+ if (p->some_download_stalled()) return CANT_FETCH_WORK_DOWNLOAD_STALLED;
+ if (p->some_result_suspended()) return CANT_FETCH_WORK_RESULT_SUSPENDED;
+ if (p->too_many_uploading_results) return CANT_FETCH_WORK_TOO_MANY_UPLOADS;
+ return 0;
}
void PROJECT_WORK_FETCH::reset(PROJECT* p) {
@@ -478,7 +478,7 @@ void WORK_FETCH::rr_init() {
}
for (unsigned int i=0; i<gstate.projects.size(); i++) {
PROJECT* p = gstate.projects[i];
- p->pwf.can_fetch_work = p->pwf.compute_can_fetch_work(p);
+ p->pwf.cant_fetch_work_reason = p->pwf.compute_cant_fetch_work_reason(p);
p->pwf.has_runnable_jobs = false;
for (int j=0; j<coprocs.n_rsc; j++) {
p->rsc_pwf[j].rr_init(p, j);
@@ -494,7 +494,7 @@ void RSC_WORK_FETCH::supplement(PROJECT* pp) {
for (unsigned i=0; i<gstate.projects.size(); i++) {
PROJECT* p = gstate.projects[i];
if (p == pp) continue;
- if (!p->pwf.can_fetch_work) continue;
+ if (p->pwf.cant_fetch_work_reason) continue;
if (!project_state(p).may_have_work) continue;
RSC_PROJECT_WORK_FETCH& rpwf = project_state(p);
if (rpwf.anon_skip) continue;
@@ -590,6 +590,13 @@ void WORK_FETCH::compute_work_request(PROJECT* p) {
PROJECT* bestp = choose_project(false);
if (p != bestp) {
+ if (!p->pwf.cant_fetch_work_reason) {
+ if (bestp) {
+ p->pwf.cant_fetch_work_reason = CANT_FETCH_WORK_NOT_HIGHEST_PRIORITY;
+ } else {
+ p->pwf.cant_fetch_work_reason = CANT_FETCH_WORK_DONT_NEED;
+ }
+ }
clear_request();
}
}
@@ -727,7 +734,7 @@ void WORK_FETCH::compute_shares() {
for (i=0; i<gstate.projects.size(); i++) {
p = gstate.projects[i];
if (p->non_cpu_intensive) continue;
- if (!p->pwf.can_fetch_work) continue;
+ if (p->pwf.cant_fetch_work_reason) continue;
for (int j=0; j<coprocs.n_rsc; j++) {
if (p->rsc_pwf[j].may_have_work) {
rsc_work_fetch[j].total_fetchable_share += p->resource_share;
@@ -737,7 +744,7 @@ void WORK_FETCH::compute_shares() {
for (i=0; i<gstate.projects.size(); i++) {
p = gstate.projects[i];
if (p->non_cpu_intensive) continue;
- if (!p->pwf.can_fetch_work) continue;
+ if (p->pwf.cant_fetch_work_reason) continue;
for (int j=0; j<coprocs.n_rsc; j++) {
if (p->rsc_pwf[j].may_have_work) {
p->rsc_pwf[j].fetchable_share = rsc_work_fetch[j].total_fetchable_share?p->resource_share/rsc_work_fetch[j].total_fetchable_share:1;
diff --git a/client/work_fetch.h b/client/work_fetch.h
index 1e32039..204740f 100644
--- a/client/work_fetch.h
+++ b/client/work_fetch.h
@@ -29,6 +29,45 @@ extern bool use_hyst_fetch;
#define RSC_TYPE_ANY -1
#define RSC_TYPE_CPU 0
+// reasons for not being able to fetch work
+//
+#define CANT_FETCH_WORK_NON_CPU_INTENSIVE 1
+#define CANT_FETCH_WORK_SUSPENDED_VIA_GUI 2
+#define CANT_FETCH_WORK_MASTER_URL_FETCH_PENDING 3
+#define CANT_FETCH_WORK_MIN_RPC_TIME 4
+#define CANT_FETCH_WORK_DONT_REQUEST_MORE_WORK 5
+#define CANT_FETCH_WORK_DOWNLOAD_STALLED 6
+#define CANT_FETCH_WORK_RESULT_SUSPENDED 7
+#define CANT_FETCH_WORK_TOO_MANY_UPLOADS 8
+#define CANT_FETCH_WORK_NOT_HIGHEST_PRIORITY 9
+#define CANT_FETCH_WORK_DONT_NEED 10
+
+inline const char* cant_fetch_work_string(int reason) {
+ switch (reason) {
+ case CANT_FETCH_WORK_NON_CPU_INTENSIVE:
+ return "non CPU intensive";
+ case CANT_FETCH_WORK_SUSPENDED_VIA_GUI:
+ return "suspended via Manager";
+ case CANT_FETCH_WORK_MASTER_URL_FETCH_PENDING:
+ return "master URL fetch pending";
+ case CANT_FETCH_WORK_MIN_RPC_TIME:
+ return "scheduler RPC backoff";
+ case CANT_FETCH_WORK_DONT_REQUEST_MORE_WORK:
+ return "\"no new tasks\" requested via Manager";
+ case CANT_FETCH_WORK_DOWNLOAD_STALLED:
+ return "some download is stalled";
+ case CANT_FETCH_WORK_RESULT_SUSPENDED:
+ return "some task is suspended via Manager";
+ case CANT_FETCH_WORK_TOO_MANY_UPLOADS:
+ return "too many uploads in progress";
+ case CANT_FETCH_WORK_NOT_HIGHEST_PRIORITY:
+ return "project is not highest priority";
+ case CANT_FETCH_WORK_DONT_NEED:
+ return "don't need";
+ }
+ return "";
+}
+
struct PROJECT;
struct RESULT;
struct ACTIVE_TASK;
@@ -228,8 +267,8 @@ struct PROJECT_WORK_FETCH {
// temporary copy used during schedule_cpus() and work fetch
double rec_temp_save;
// temporary used during RR simulation
- bool can_fetch_work;
- bool compute_can_fetch_work(PROJECT*);
+ int cant_fetch_work_reason;
+ int compute_cant_fetch_work_reason(PROJECT*);
bool has_runnable_jobs;
PROJECT_WORK_FETCH() {
memset(this, 0, sizeof(*this));
diff --git a/configure.ac b/configure.ac
index 43f1e17..c6698b3 100644
--- a/configure.ac
+++ b/configure.ac
@@ -1,12 +1,12 @@
dnl -*- autoconf -*-
-dnl $Id: configure.ac 25816 2012-06-27 21:34:57Z romw $
+dnl $Id: configure.ac 25834 2012-07-02 17:54:01Z romw $
dnl not sure exactly what the minimum version is (but 2.13 wont work)
AC_PREREQ(2.58)
dnl Set the BOINC version here. You can also use the set-version script.
-AC_INIT(BOINC, 7.0.30)
+AC_INIT(BOINC, 7.0.31)
AC_CONFIG_MACRO_DIR([m4])
LIBBOINC_VERSION=`echo ${PACKAGE_VERSION} | sed 's/\./:/g'`
AC_SUBST([LIBBOINC_VERSION])
@@ -19,8 +19,8 @@ AM_INIT_AUTOMAKE(dist-zip)
AC_CONFIG_SRCDIR(lib/shmem.cpp)
-AC_REVISION([$Revision: 25816 $])
-REV=`echo '$Revision: 25816 $' | awk "{print $2}"`
+AC_REVISION([$Revision: 25834 $])
+REV=`echo '$Revision: 25834 $' | awk "{print $2}"`
RDATE=`date '+%Y.%m.%d'`
if test -d .svn ; then
REV=`svn info | grep Revision | awk '{print $2}'`
diff --git a/lib/coproc.cpp b/lib/coproc.cpp
index 1df7d68..050881d 100644
--- a/lib/coproc.cpp
+++ b/lib/coproc.cpp
@@ -91,7 +91,8 @@ void PCI_INFO::write(MIOFILE& f) {
"<pci_info>\n"
" <bus_id>%d</bus_id>\n"
" <device_id>%d</device_id>\n"
- " <domain_id>%d</domain_id>\n",
+ " <domain_id>%d</domain_id>\n"
+ "</pci_info>\n",
bus_id,
device_id,
domain_id
--
BOINC packaging
More information about the pkg-boinc-commits
mailing list