Commit 72ff1701 authored by Stanislav Bohm's avatar Stanislav Bohm

TEST: Scheduler tests for "follows"

parent e9bd2f8e
......@@ -285,18 +285,20 @@ TaskDistribution ComputationState::compute_distribution()
// + 1 because lp solve indexes from 1 :(
std::vector<double> costs(t_variables + 1, 0.0);
std::vector<loom::Id> tasks(pending_tasks.begin(), pending_tasks.end());
/* [t_0,A] [t_0,B] ... [t_1,A] [t_1,B] ... [m_0,A] [m_0,B] ... [m_2,A] [m_2,B]
* pending tasks t_X,A - X=task_index, A=worker_index
* movable tasks m_X,A - X=task_index, A=worker_index
*/
/* Gather all inputs */
/* Gather all inputs
and estimate the max transfer cost */
std::unordered_map<loom::Id, int> inputs;
std::vector<double> n_cpus; // we will later use it for coefs, we store it directly as double
n_cpus.reserve(pending_tasks.size());
n_cpus.reserve(tasks.size());
size_t total_size = 0;
for (loom::Id id : pending_tasks) {
for (loom::Id id : tasks) {
const PlanNode &node = get_node(id);
n_cpus.push_back(node.get_n_cpus());
for (loom::Id id2 : node.get_inputs()) {
......@@ -313,6 +315,7 @@ TaskDistribution ComputationState::compute_distribution()
}
}
/* Setup coeficients for exexuting a task */
double task_cost = (total_size + 1) * 2 * TRANSFER_COST_COEF;
for (size_t i = 0; i < n_tasks; i++) {
double cpus = n_cpus[i];
......@@ -328,23 +331,22 @@ TaskDistribution ComputationState::compute_distribution()
}
}
/* Initialize solver and helper structures */
size_t variables = costs.size() - 1;
Solver solver(variables);
std::vector<int> indices;
indices.reserve(n_workers * n_tasks);
std::vector<double> ones(variables, 1.0);
size_t task_id = 1;
std::vector<loom::Id> tasks(pending_tasks.begin(), pending_tasks.end());
/* Task constraints */
for (loom::Id id : tasks) {
indices.clear();
for (size_t i = 0; i < n_workers; i++) {
indices.push_back(task_id + i);
}
/* Task can be executed only once */
solver.add_constraint_lq(indices, ones, 1);
const PlanNode &node = get_node(id);
......@@ -355,13 +357,9 @@ TaskDistribution ComputationState::compute_distribution()
nonlocals.clear();
collect_requirements_for_node(wc, node, nonlocals);
for (loom::Id id2 : nonlocals) {
/* What dataobjects have to be transfered? */
solver.add_constraint_lq(task_id + index, inputs[id2] + index);
}
/*size_t local_size = 0;
for (loom::Id id2 : locals) {
local_size += get_state(id2).get_size();
}
costs[task_id + index] -= local_size * TRANSFER_COST_COEF;*/
}
task_id += n_workers;
}
......@@ -374,6 +372,7 @@ TaskDistribution ComputationState::compute_distribution()
for (size_t j = 0; j < n_tasks; j++) {
indices[j] = j * n_workers + index + 1;
}
/* Capacity limit for each worker */
solver.add_constraint_lq(indices, n_cpus, free_cpus);
}
solver.set_objective_fn(costs);
......
......@@ -107,6 +107,51 @@ static loomplan::Plan make_plan2(Server &server)
return plan;
}
/* Plan3
n0 n1 n2
/ \ / \ / \
/ \ / | / \
n3 n5 n4 n6 <-- Changed order!
\ \ / /
\ X /
\ / \ /
n7 n8
*/
static loomplan::Plan make_plan3(Server &server)
{
loomplan::Plan plan;
add_cpu_request(server, plan, 1);
new_task(plan, 0); // n0
new_task(plan, 0); // n1
new_task(plan, 0); // n2
loomplan::Task *n3 = new_task(plan, 0);
loomplan::Task *n4 = new_task(plan, 0);
loomplan::Task *n5 = new_task(plan, 0);
loomplan::Task *n6 = new_task(plan, 0);
n3->add_input_ids(0);
n4->add_input_ids(1);
n4->add_input_ids(2);
n5->add_input_ids(0);
n5->add_input_ids(1);
n6->add_input_ids(2);
loomplan::Task *n7 = new_task(plan, 0);
loomplan::Task *n8 = new_task(plan, 0);
n7->add_input_ids(3);
n7->add_input_ids(4);
n8->add_input_ids(5);
n8->add_input_ids(6);
return plan;
}
static loomplan::Plan make_big_plan(Server &server, size_t plan_size)
{
loomplan::Plan plan;
......@@ -565,3 +610,82 @@ TEST_CASE("Request plan", "[scheduling]") {
REQUIRE((check_uvector(d[w1.get()], {6, 7})));
}
}
TEST_CASE("Plan continution (plan2)", "[scheduling]") {
Server server(NULL, 0);
ComputationState s(server);
s.set_plan(Plan(make_plan2(server), 0, server.get_dictionary()));
SECTION("Stick to gether") {
auto w1 = simple_worker(server, "w1", 2);
s.add_worker(w1.get());
auto w2 = simple_worker(server, "w2", 2);
s.add_worker(w2.get());
s.add_ready_nodes({0, 1});
TaskDistribution d = s.compute_distribution();
dump_dist(d);
REQUIRE((d[w1.get()].size() == 2 || d[w2.get()].size() == 2));
}
SECTION("Stick to gether - 2") {
auto w1 = simple_worker(server, "w1", 2);
s.add_worker(w1.get());
auto w2 = simple_worker(server, "w2", 1);
s.add_worker(w2.get());
s.add_ready_nodes({0, 3, 4});
TaskDistribution d = s.compute_distribution();
dump_dist(d);
REQUIRE(check_uvector(d[w1.get()], {3, 4}));
REQUIRE(check_uvector(d[w1.get()], {0}));
}
}
TEST_CASE("Plan continution (plan3)", "[scheduling]") {
Server server(NULL, 0);
ComputationState s(server);
s.set_plan(Plan(make_plan3(server), 0, server.get_dictionary()));
SECTION("Stick to gether - inpuits dominant ") {
auto w1 = simple_worker(server, "w1", 2);
s.add_worker(w1.get());
auto w2 = simple_worker(server, "w2", 2);
s.add_worker(w2.get());
auto w3 = simple_worker(server, "w3", 0);
s.add_worker(w3.get());
finish(s, 0, 20000000, 0, w3.get());
finish(s, 1, 20000, 0, w3.get());
finish(s, 2, 20000000, 0, w3.get());
s.add_ready_nodes({3, 4, 5, 6});
TaskDistribution d = s.compute_distribution();
dump_dist(d);
REQUIRE((check_uvector(d[w1.get()], {3, 5}) || (check_uvector(d[w1.get()], {4, 6}))));
REQUIRE((check_uvector(d[w2.get()], {3, 5}) || (check_uvector(d[w2.get()], {4, 6}))));
}
SECTION("Stick to gether - follows dominant") {
auto w1 = simple_worker(server, "w1", 2);
s.add_worker(w1.get());
auto w2 = simple_worker(server, "w2", 2);
s.add_worker(w2.get());
auto w3 = simple_worker(server, "w3", 0);
s.add_worker(w3.get());
finish(s, 0, 20000, 0, w3.get());
finish(s, 1, 20000, 0, w3.get());
finish(s, 2, 20000, 0, w3.get());
s.add_ready_nodes({3, 4, 5, 6});
TaskDistribution d = s.compute_distribution();
dump_dist(d);
REQUIRE((check_uvector(d[w1.get()], {3, 4}) || (check_uvector(d[w1.get()], {5, 6}))));
REQUIRE((check_uvector(d[w2.get()], {3, 4}) || (check_uvector(d[w2.get()], {5, 6}))));
}
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment