diff --git a/ortools/sat/BUILD.bazel b/ortools/sat/BUILD.bazel index e67d724437..da3e5820bb 100644 --- a/ortools/sat/BUILD.bazel +++ b/ortools/sat/BUILD.bazel @@ -433,6 +433,7 @@ cc_library( "@com_google_absl//absl/container:flat_hash_map", "@com_google_absl//absl/container:flat_hash_set", "@com_google_absl//absl/log:check", + "@com_google_absl//absl/random:bit_gen_ref", "@com_google_absl//absl/random:distributions", "@com_google_absl//absl/strings", "@com_google_absl//absl/types:span", @@ -450,6 +451,10 @@ cc_test( ":sat_parameters_cc_proto", "//ortools/base:gmock_main", "//ortools/base:parse_test_proto", + "//ortools/util:stats", + "@com_google_absl//absl/container:flat_hash_map", + "@com_google_absl//absl/log", + "@com_google_absl//absl/strings", ], ) @@ -592,6 +597,7 @@ cc_library( ":cp_model_symmetries", ":cp_model_utils", ":cuts", + ":diffn_util", ":feasibility_jump", ":feasibility_pump", ":implied_bounds", @@ -2028,7 +2034,9 @@ cc_library( "//ortools/graph:strongly_connected_components", "//ortools/util:sort", "//ortools/util:strong_integers", + "@com_google_absl//absl/algorithm:container", "@com_google_absl//absl/container:btree", + "@com_google_absl//absl/container:flat_hash_map", "@com_google_absl//absl/log:check", "@com_google_absl//absl/types:span", ], @@ -3350,6 +3358,7 @@ cc_library( srcs = ["circuit.cc"], hdrs = ["circuit.h"], deps = [ + ":all_different", ":clause", ":integer", ":model", diff --git a/ortools/sat/all_different.cc b/ortools/sat/all_different.cc index 8c76822a8c..f2bb1abc8a 100644 --- a/ortools/sat/all_different.cc +++ b/ortools/sat/all_different.cc @@ -20,7 +20,9 @@ #include #include +#include "absl/algorithm/container.h" #include "absl/container/btree_map.h" +#include "absl/container/flat_hash_map.h" #include "absl/log/check.h" #include "absl/types/span.h" #include "ortools/base/logging.h" @@ -101,108 +103,104 @@ std::function AllDifferentOnBounds( std::function AllDifferentAC( absl::Span variables) { - return [=, variables = std::vector( - variables.begin(), variables.end())](Model* model) { + return [variables](Model* model) { if (variables.size() < 3) return; - AllDifferentConstraint* constraint = new AllDifferentConstraint( - variables, model->GetOrCreate(), - model->GetOrCreate(), model->GetOrCreate()); + AllDifferentConstraint* constraint = + new AllDifferentConstraint(variables, model); constraint->RegisterWith(model->GetOrCreate()); model->TakeOwnership(constraint); }; } AllDifferentConstraint::AllDifferentConstraint( - std::vector variables, IntegerEncoder* encoder, - Trail* trail, IntegerTrail* integer_trail) + absl::Span variables, Model* model) : num_variables_(variables.size()), - variables_(std::move(variables)), - trail_(trail), - integer_trail_(integer_trail) { + trail_(model->GetOrCreate()), + integer_trail_(model->GetOrCreate()) { // Initialize literals cache. - int64_t min_value = std::numeric_limits::max(); - int64_t max_value = std::numeric_limits::min(); - variable_min_value_.resize(num_variables_); - variable_max_value_.resize(num_variables_); - variable_literal_index_.resize(num_variables_); - int num_fixed_variables = 0; + // Note that remap all values appearing here with a dense_index. + num_values_ = 0; + absl::flat_hash_map dense_indexing; + variable_to_possible_values_.resize(num_variables_); + auto* encoder = model->GetOrCreate(); for (int x = 0; x < num_variables_; x++) { - variable_min_value_[x] = integer_trail_->LowerBound(variables_[x]).value(); - variable_max_value_[x] = integer_trail_->UpperBound(variables_[x]).value(); - - // Compute value range of all variables. - min_value = std::min(min_value, variable_min_value_[x]); - max_value = std::max(max_value, variable_max_value_[x]); + const IntegerValue lb = integer_trail_->LowerBound(variables[x]); + const IntegerValue ub = integer_trail_->UpperBound(variables[x]); // FullyEncode does not like 1-value domains, handle this case first. // TODO(user): Prune now, ignore these variables during solving. - if (variable_min_value_[x] == variable_max_value_[x]) { - num_fixed_variables++; - variable_literal_index_[x].push_back(kTrueLiteralIndex); + if (lb == ub) { + const auto [it, inserted] = dense_indexing.insert({lb, num_values_}); + if (inserted) ++num_values_; + + variable_to_possible_values_[x].push_back( + {it->second, encoder->GetTrueLiteral()}); continue; } // Force full encoding if not already done. - if (!encoder->VariableIsFullyEncoded(variables_[x])) { - encoder->FullyEncodeVariable(variables_[x]); + if (!encoder->VariableIsFullyEncoded(variables[x])) { + encoder->FullyEncodeVariable(variables[x]); } // Fill cache with literals, default value is kFalseLiteralIndex. - int64_t size = variable_max_value_[x] - variable_min_value_[x] + 1; - variable_literal_index_[x].resize(size, kFalseLiteralIndex); - for (const auto& entry : encoder->FullDomainEncoding(variables_[x])) { - int64_t value = entry.value.value(); - // Can happen because of initial propagation! - if (value < variable_min_value_[x] || variable_max_value_[x] < value) { - continue; - } - variable_literal_index_[x][value - variable_min_value_[x]] = - entry.literal.Index(); + for (const auto [value, lit] : encoder->FullDomainEncoding(variables[x])) { + const auto [it, inserted] = dense_indexing.insert({value, num_values_}); + if (inserted) ++num_values_; + + variable_to_possible_values_[x].push_back({it->second, lit}); } + + // Not sure it is needed, but lets sort. + absl::c_sort( + variable_to_possible_values_[x], + [](const std::pair& a, const std::pair& b) { + return a.first < b.first; + }); + } + + variable_to_value_.assign(num_variables_, -1); + visiting_.resize(num_variables_); + variable_visited_from_.resize(num_variables_); + component_number_.resize(num_variables_ + num_values_ + 1); +} + +AllDifferentConstraint::AllDifferentConstraint( + int num_nodes, absl::Span tails, absl::Span heads, + absl::Span literals, Model* model) + : num_variables_(num_nodes), + trail_(model->GetOrCreate()), + integer_trail_(model->GetOrCreate()) { + num_values_ = num_nodes; + + // We assume everything is already dense. + const int num_arcs = tails.size(); + variable_to_possible_values_.resize(num_variables_); + for (int a = 0; a < num_arcs; ++a) { + variable_to_possible_values_[tails[a]].push_back({heads[a], literals[a]}); } - min_all_values_ = min_value; - num_all_values_ = max_value - min_value + 1; - successor_.resize(num_variables_); variable_to_value_.assign(num_variables_, -1); visiting_.resize(num_variables_); variable_visited_from_.resize(num_variables_); - residual_graph_successors_.resize(num_variables_ + num_all_values_ + 1); - component_number_.resize(num_variables_ + num_all_values_ + 1); + component_number_.resize(num_variables_ + num_values_ + 1); } void AllDifferentConstraint::RegisterWith(GenericLiteralWatcher* watcher) { const int id = watcher->Register(this); watcher->SetPropagatorPriority(id, 2); - for (const auto& literal_indices : variable_literal_index_) { - for (const LiteralIndex li : literal_indices) { + for (int x = 0; x < num_variables_; x++) { + for (const auto [_, lit] : variable_to_possible_values_[x]) { // Watch only unbound literals. - if (li >= 0 && - !trail_->Assignment().VariableIsAssigned(Literal(li).Variable())) { - watcher->WatchLiteral(Literal(li), id); - watcher->WatchLiteral(Literal(li).Negated(), id); + if (!trail_->Assignment().LiteralIsAssigned(lit)) { + watcher->WatchLiteral(lit, id); + watcher->WatchLiteral(lit.Negated(), id); } } } } -LiteralIndex AllDifferentConstraint::VariableLiteralIndexOf(int x, - int64_t value) { - return (value < variable_min_value_[x] || variable_max_value_[x] < value) - ? kFalseLiteralIndex - : variable_literal_index_[x][value - variable_min_value_[x]]; -} - -inline bool AllDifferentConstraint::VariableHasPossibleValue(int x, - int64_t value) { - LiteralIndex li = VariableLiteralIndexOf(x, value); - if (li == kFalseLiteralIndex) return false; - if (li == kTrueLiteralIndex) return true; - DCHECK_GE(li, 0); - return !trail_->Assignment().LiteralIsFalse(Literal(li)); -} - bool AllDifferentConstraint::MakeAugmentingPath(int start) { // Do a BFS and use visiting_ as a queue, with num_visited pointing // at its begin() and num_to_visit its end(). @@ -264,58 +262,38 @@ bool AllDifferentConstraint::MakeAugmentingPath(int start) { bool AllDifferentConstraint::Propagate() { // Copy variable state to graph state. prev_matching_ = variable_to_value_; - value_to_variable_.assign(num_all_values_, -1); + value_to_variable_.assign(num_values_, -1); variable_to_value_.assign(num_variables_, -1); + successor_.clear(); + const auto assignment = AssignmentView(trail_->Assignment()); for (int x = 0; x < num_variables_; x++) { - successor_[x].clear(); - const int64_t min_value = integer_trail_->LowerBound(variables_[x]).value(); - const int64_t max_value = integer_trail_->UpperBound(variables_[x]).value(); - for (int64_t value = min_value; value <= max_value; value++) { - if (VariableHasPossibleValue(x, value)) { - const int offset_value = value - min_all_values_; - // Forward-checking should propagate x != value. - successor_[x].push_back(offset_value); + successor_.Add({}); + for (const auto [value, lit] : variable_to_possible_values_[x]) { + if (assignment.LiteralIsFalse(lit)) continue; + + // Forward-checking should propagate x != value. + successor_.AppendToLastVector(value); + + // Seed with previous matching. + if (prev_matching_[x] == value) { + variable_to_value_[x] = prev_matching_[x]; + value_to_variable_[prev_matching_[x]] = x; } } if (successor_[x].size() == 1) { - const int offset_value = successor_[x][0]; - if (value_to_variable_[offset_value] == -1) { - value_to_variable_[offset_value] = x; - variable_to_value_[x] = offset_value; - } - } - } - - // Because we currently propagates all clauses before entering this - // propagator, we known that this can't happen. - if (DEBUG_MODE) { - for (int x = 0; x < num_variables_; x++) { - for (const int offset_value : successor_[x]) { - if (value_to_variable_[offset_value] != -1 && - value_to_variable_[offset_value] != x) { - LOG(FATAL) << "Should have been propagated by AllDifferentBinary()!"; - } + const int value = successor_[x][0]; + if (value_to_variable_[value] == -1) { + value_to_variable_[value] = x; + variable_to_value_[x] = value; } } } - // Seed with previous matching. - for (int x = 0; x < num_variables_; x++) { - if (variable_to_value_[x] != -1) continue; - const int prev_value = prev_matching_[x]; - if (prev_value == -1 || value_to_variable_[prev_value] != -1) continue; - - if (VariableHasPossibleValue(x, prev_matching_[x] + min_all_values_)) { - variable_to_value_[x] = prev_matching_[x]; - value_to_variable_[prev_matching_[x]] = x; - } - } - // Compute max matching. int x = 0; for (; x < num_variables_; x++) { if (variable_to_value_[x] == -1) { - value_visited_.assign(num_all_values_, false); + value_visited_.assign(num_values_, false); variable_visited_.assign(num_variables_, false); MakeAugmentingPath(x); } @@ -331,12 +309,10 @@ bool AllDifferentConstraint::Propagate() { conflict->clear(); for (int y = 0; y < num_variables_; y++) { if (!variable_visited_[y]) continue; - for (int value = variable_min_value_[y]; value <= variable_max_value_[y]; - value++) { - const LiteralIndex li = VariableLiteralIndexOf(y, value); - if (li >= 0 && !value_visited_[value - min_all_values_]) { - DCHECK(trail_->Assignment().LiteralIsFalse(Literal(li))); - conflict->push_back(Literal(li)); + for (const auto [value, lit] : variable_to_possible_values_[y]) { + if (!value_visited_[value]) { + DCHECK(assignment.LiteralIsFalse(lit)); + conflict->push_back(lit); } } } @@ -345,32 +321,31 @@ bool AllDifferentConstraint::Propagate() { // The current matching is a valid solution, now try to filter values. // Build residual graph, compute its SCCs. + residual_graph_successors_.clear(); for (int x = 0; x < num_variables_; x++) { - residual_graph_successors_[x].clear(); + residual_graph_successors_.Add({}); for (const int succ : successor_[x]) { if (succ != variable_to_value_[x]) { - residual_graph_successors_[x].push_back(num_variables_ + succ); + residual_graph_successors_.AppendToLastVector(num_variables_ + succ); } } } - for (int offset_value = 0; offset_value < num_all_values_; offset_value++) { - residual_graph_successors_[num_variables_ + offset_value].clear(); - if (value_to_variable_[offset_value] != -1) { - residual_graph_successors_[num_variables_ + offset_value].push_back( - value_to_variable_[offset_value]); + + const int dummy_node = num_variables_ + num_values_; + const bool need_dummy = num_variables_ < num_values_; + for (int value = 0; value < num_values_; value++) { + residual_graph_successors_.Add({}); + if (value_to_variable_[value] != -1) { + residual_graph_successors_.AppendToLastVector(value_to_variable_[value]); + } else if (need_dummy) { + residual_graph_successors_.AppendToLastVector(dummy_node); } } - const int dummy_node = num_variables_ + num_all_values_; - residual_graph_successors_[dummy_node].clear(); - if (num_variables_ < num_all_values_) { + if (need_dummy) { + DCHECK_EQ(residual_graph_successors_.size(), dummy_node); + residual_graph_successors_.Add({}); for (int x = 0; x < num_variables_; x++) { - residual_graph_successors_[dummy_node].push_back(x); - } - for (int offset_value = 0; offset_value < num_all_values_; offset_value++) { - if (value_to_variable_[offset_value] == -1) { - residual_graph_successors_[num_variables_ + offset_value].push_back( - dummy_node); - } + residual_graph_successors_.AppendToLastVector(x); } } @@ -394,47 +369,42 @@ bool AllDifferentConstraint::Propagate() { // Remove arcs var -> val where SCC(var) -/->* SCC(val). for (int x = 0; x < num_variables_; x++) { if (successor_[x].size() == 1) continue; - for (const int offset_value : successor_[x]) { - const int value_node = offset_value + num_variables_; - if (variable_to_value_[x] != offset_value && - component_number_[x] != component_number_[value_node] && - VariableHasPossibleValue(x, offset_value + min_all_values_)) { - // We can deduce that x != value. To explain, force x == offset_value, + for (const auto [value, x_lit] : variable_to_possible_values_[x]) { + if (assignment.LiteralIsFalse(x_lit)) continue; + + const int value_node = value + num_variables_; + if (variable_to_value_[x] != value && + component_number_[x] != component_number_[value_node]) { + // We can deduce that x != value. To explain, force x == value, // then find another assignment for the variable matched to - // offset_value. It will fail: explaining why is the same as + // value. It will fail: explaining why is the same as // explaining failure as above, and it is an explanation of x != value. - value_visited_.assign(num_all_values_, false); + value_visited_.assign(num_values_, false); variable_visited_.assign(num_variables_, false); - // Undo x -> old_value and old_variable -> offset_value. - const int old_variable = value_to_variable_[offset_value]; + // Undo x -> old_value and old_variable -> value. + const int old_variable = value_to_variable_[value]; variable_to_value_[old_variable] = -1; const int old_value = variable_to_value_[x]; value_to_variable_[old_value] = -1; - variable_to_value_[x] = offset_value; - value_to_variable_[offset_value] = x; + variable_to_value_[x] = value; + value_to_variable_[value] = x; - value_visited_[offset_value] = true; + value_visited_[value] = true; MakeAugmentingPath(old_variable); DCHECK_EQ(variable_to_value_[old_variable], -1); // No reassignment. std::vector* reason = trail_->GetEmptyVectorToStoreReason(); for (int y = 0; y < num_variables_; y++) { if (!variable_visited_[y]) continue; - for (int value = variable_min_value_[y]; - value <= variable_max_value_[y]; value++) { - const LiteralIndex li = VariableLiteralIndexOf(y, value); - if (li >= 0 && !value_visited_[value - min_all_values_]) { - DCHECK(!VariableHasPossibleValue(y, value)); - reason->push_back(Literal(li)); + for (const auto [value, y_lit] : variable_to_possible_values_[y]) { + if (!value_visited_[value]) { + DCHECK(assignment.LiteralIsFalse(y_lit)); + reason->push_back(y_lit); } } } - const LiteralIndex li = - VariableLiteralIndexOf(x, offset_value + min_all_values_); - DCHECK_NE(li, kTrueLiteralIndex); - DCHECK_NE(li, kFalseLiteralIndex); - return trail_->EnqueueWithStoredReason(Literal(li).Negated()); + return trail_->EnqueueWithStoredReason(x_lit.Negated()); } } } diff --git a/ortools/sat/all_different.h b/ortools/sat/all_different.h index 38976b05ff..836a380b29 100644 --- a/ortools/sat/all_different.h +++ b/ortools/sat/all_different.h @@ -65,9 +65,14 @@ std::function AllDifferentAC( // Implementation of AllDifferentAC(). class AllDifferentConstraint : PropagatorInterface { public: - AllDifferentConstraint(std::vector variables, - IntegerEncoder* encoder, Trail* trail, - IntegerTrail* integer_trail); + AllDifferentConstraint(absl::Span variables, + Model* model); + + // In a circuit, the successor of all node must be "different". + // Thus this propagator can also be used in this context. + AllDifferentConstraint(int num_nodes, absl::Span tails, + absl::Span heads, + absl::Span literals, Model* model); bool Propagate() final; void RegisterWith(GenericLiteralWatcher* watcher); @@ -83,27 +88,23 @@ class AllDifferentConstraint : PropagatorInterface { // or manipulate it to create what-if scenarios without modifying successor_. bool MakeAugmentingPath(int start); - // Accessors to the cache of literals. - inline LiteralIndex VariableLiteralIndexOf(int x, int64_t value); - inline bool VariableHasPossibleValue(int x, int64_t value); - // This caches all literals of the fully encoded variables. // Values of a given variable are 0-indexed using offsets variable_min_value_, // the set of all values is globally offset using offset min_all_values_. // TODO(user): compare this encoding to a sparser hash_map encoding. const int num_variables_; const std::vector variables_; - int64_t min_all_values_; - int64_t num_all_values_; - std::vector variable_min_value_; - std::vector variable_max_value_; - std::vector> variable_literal_index_; + + // Note that we remap all value into [0, num_values_) in a "dense" way. + std::vector>> + variable_to_possible_values_; + int64_t num_values_; // Internal state of MakeAugmentingPath(). // value_to_variable_ and variable_to_value_ represent the current assignment; // -1 means not assigned. Otherwise, // variable_to_value_[var] = value <=> value_to_variable_[value] = var. - std::vector> successor_; + CompactVectorVector successor_; std::vector value_visited_; std::vector variable_visited_; std::vector value_to_variable_; @@ -128,7 +129,7 @@ class AllDifferentConstraint : PropagatorInterface { // part in the alternating cycle, and filter with only the SCC decomposition. // When num_variables_ == num_all_values_, the dummy node is useless, // we add it anyway to simplify the code. - std::vector> residual_graph_successors_; + CompactVectorVector residual_graph_successors_; std::vector component_number_; Trail* trail_; diff --git a/ortools/sat/circuit.cc b/ortools/sat/circuit.cc index 402e516b7c..8ae427ce78 100644 --- a/ortools/sat/circuit.cc +++ b/ortools/sat/circuit.cc @@ -24,6 +24,7 @@ #include "absl/types/span.h" #include "ortools/base/logging.h" #include "ortools/graph/strongly_connected_components.h" +#include "ortools/sat/all_different.h" #include "ortools/sat/clause.h" #include "ortools/sat/integer.h" #include "ortools/sat/model.h" @@ -693,6 +694,16 @@ void LoadSubcircuitConstraint(int num_nodes, absl::Span tails, new CircuitPropagator(num_nodes, tails, heads, literals, options, model); constraint->RegisterWith(model->GetOrCreate()); model->TakeOwnership(constraint); + + // TODO(user): Just ignore node zero if multiple_subcircuit_through_zero is + // true. + if (model->GetOrCreate()->use_all_different_for_circuit() && + !multiple_subcircuit_through_zero) { + AllDifferentConstraint* constraint = + new AllDifferentConstraint(num_nodes, tails, heads, literals, model); + constraint->RegisterWith(model->GetOrCreate()); + model->TakeOwnership(constraint); + } } std::function CircuitCovering( diff --git a/ortools/sat/cp_model.proto b/ortools/sat/cp_model.proto index c130d2f1ee..625471ad29 100644 --- a/ortools/sat/cp_model.proto +++ b/ortools/sat/cp_model.proto @@ -562,6 +562,7 @@ message DecisionStrategyProto { SELECT_LOWER_HALF = 2; SELECT_UPPER_HALF = 3; SELECT_MEDIAN_VALUE = 4; + SELECT_RANDOM_HALF = 5; } DomainReductionStrategy domain_reduction_strategy = 3; } diff --git a/ortools/sat/cp_model_checker.cc b/ortools/sat/cp_model_checker.cc index c64d120f8f..a7c8475570 100644 --- a/ortools/sat/cp_model_checker.cc +++ b/ortools/sat/cp_model_checker.cc @@ -921,7 +921,8 @@ std::string ValidateSearchStrategies(const CpModelProto& model) { drs != DecisionStrategyProto::SELECT_MAX_VALUE && drs != DecisionStrategyProto::SELECT_LOWER_HALF && drs != DecisionStrategyProto::SELECT_UPPER_HALF && - drs != DecisionStrategyProto::SELECT_MEDIAN_VALUE) { + drs != DecisionStrategyProto::SELECT_MEDIAN_VALUE && + drs != DecisionStrategyProto::SELECT_RANDOM_HALF) { return absl::StrCat("Unknown or unsupported domain_reduction_strategy: ", drs); } diff --git a/ortools/sat/cp_model_expand.cc b/ortools/sat/cp_model_expand.cc index 140b4586fb..b074a0f96e 100644 --- a/ortools/sat/cp_model_expand.cc +++ b/ortools/sat/cp_model_expand.cc @@ -1437,7 +1437,7 @@ void AddSizeTwoTable( int num_large_clause_added = 0; auto add_support_constraint = [context, &num_clause_added, &num_large_clause_added, &num_implications]( - int lit, const std::vector& support_literals, + int lit, absl::Span support_literals, int max_support_size) { if (support_literals.size() == max_support_size) return; if (support_literals.size() == 1) { diff --git a/ortools/sat/cp_model_presolve.cc b/ortools/sat/cp_model_presolve.cc index 4fe099bc5a..34794cd3cf 100644 --- a/ortools/sat/cp_model_presolve.cc +++ b/ortools/sat/cp_model_presolve.cc @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -5839,6 +5840,147 @@ bool CpModelPresolver::PresolveNoOverlap(ConstraintProto* ct) { return changed; } +bool CpModelPresolver::PresolveNoOverlap2DFramed( + absl::Span fixed_boxes, + absl::Span non_fixed_boxes, ConstraintProto* ct) { + const NoOverlap2DConstraintProto& proto = ct->no_overlap_2d(); + + Rectangle fixed_boxes_bb = fixed_boxes.front(); + for (const Rectangle& box : fixed_boxes) { + fixed_boxes_bb.GrowToInclude(box); + } + std::vector espace_for_single_box = + FindEmptySpaces(fixed_boxes_bb, {fixed_boxes.begin(), fixed_boxes.end()}); + // TODO(user): Find a faster way to see if four boxes are delimiting a + // rectangle. + std::vector empty; + ReduceNumberofBoxesGreedy(&espace_for_single_box, &empty); + ReduceNumberOfBoxesExactMandatory(&espace_for_single_box, &empty); + if (espace_for_single_box.size() != 1) { + // Not a rectangular frame, since the inside is not a rectangle. + return false; + } + const Rectangle framed_region = espace_for_single_box.front(); + for (const RectangleInRange& box : non_fixed_boxes) { + if (!box.bounding_area.IsInsideOf(fixed_boxes_bb)) { + // Something can be outside of the frame. + return false; + } + if (2 * box.bounding_area.SizeX() <= framed_region.SizeX() || + 2 * box.bounding_area.SizeY() <= framed_region.SizeY()) { + // We can fit two boxes in the delimited space between the fixed boxes, so + // we cannot replace it by an at-most-one. + return false; + } + const int x_interval_index = proto.x_intervals(box.box_index); + const int y_interval_index = proto.y_intervals(box.box_index); + if (!context_->working_model->constraints(x_interval_index) + .enforcement_literal() + .empty() && + !context_->working_model->constraints(y_interval_index) + .enforcement_literal() + .empty()) { + if (context_->working_model->constraints(x_interval_index) + .enforcement_literal(0) != + context_->working_model->constraints(y_interval_index) + .enforcement_literal(0)) { + // Two different enforcement literals. + return false; + } + } + } + // All this no_overlap_2d constraint is doing is forcing at most one of + // the non-fixed boxes to be in the `framed_region` rectangle. A + // better representation of this is to simply enforce that the items fit + // that rectangle with linear constraints and add a at-most-one constraint. + std::vector enforcement_literals_for_amo; + bool has_mandatory = false; + for (const RectangleInRange& box : non_fixed_boxes) { + const int box_index = box.box_index; + const int x_interval_index = proto.x_intervals(box_index); + const int y_interval_index = proto.y_intervals(box_index); + const ConstraintProto& x_interval_ct = + context_->working_model->constraints(x_interval_index); + const ConstraintProto& y_interval_ct = + context_->working_model->constraints(y_interval_index); + if (x_interval_ct.enforcement_literal().empty() && + y_interval_ct.enforcement_literal().empty()) { + // Mandatory box, update the domains. + if (has_mandatory) { + return context_->NotifyThatModelIsUnsat( + "Two mandatory boxes in the same space"); + } + has_mandatory = true; + if (!context_->IntersectDomainWith(x_interval_ct.interval().start(), + Domain(framed_region.x_min.value(), + framed_region.x_max.value()))) { + return true; + } + if (!context_->IntersectDomainWith(x_interval_ct.interval().end(), + Domain(framed_region.x_min.value(), + framed_region.x_max.value()))) { + return true; + } + if (!context_->IntersectDomainWith(y_interval_ct.interval().start(), + Domain(framed_region.y_min.value(), + framed_region.y_max.value()))) { + return true; + } + if (!context_->IntersectDomainWith(y_interval_ct.interval().end(), + Domain(framed_region.y_min.value(), + framed_region.y_max.value()))) { + return true; + } + } else { + auto add_linear_constraint = [&](const ConstraintProto& interval_ct, + int enforcement_literal, + IntegerValue min, IntegerValue max) { + // TODO(user): If size is constant add only one linear constraint + // instead of two. + ConstraintProto* linear_start = + context_->working_model->add_constraints(); + ConstraintProto* linear_end = + context_->working_model->add_constraints(); + linear_start->add_enforcement_literal(enforcement_literal); + linear_end->add_enforcement_literal(enforcement_literal); + linear_start->mutable_linear()->add_domain(min.value()); + linear_start->mutable_linear()->add_domain(max.value()); + linear_end->mutable_linear()->add_domain(min.value()); + linear_end->mutable_linear()->add_domain(max.value()); + AddLinearExpressionToLinearConstraint(interval_ct.interval().start(), 1, + linear_start->mutable_linear()); + AddLinearExpressionToLinearConstraint(interval_ct.interval().end(), 1, + linear_end->mutable_linear()); + }; + const int enforcement_literal = + x_interval_ct.enforcement_literal().empty() + ? y_interval_ct.enforcement_literal(0) + : x_interval_ct.enforcement_literal(0); + enforcement_literals_for_amo.push_back(enforcement_literal); + add_linear_constraint(x_interval_ct, enforcement_literal, + framed_region.x_min, framed_region.x_max); + add_linear_constraint(y_interval_ct, enforcement_literal, + framed_region.y_min, framed_region.y_max); + } + } + if (has_mandatory) { + for (const int lit : enforcement_literals_for_amo) { + if (!context_->SetLiteralToFalse(lit)) { + return true; + } + } + } else if (enforcement_literals_for_amo.size() > 1) { + context_->working_model->add_constraints() + ->mutable_at_most_one() + ->mutable_literals() + ->Add(enforcement_literals_for_amo.begin(), + enforcement_literals_for_amo.end()); + } + context_->UpdateRuleStats("no_overlap_2d: at most one rectangle in region"); + context_->UpdateNewConstraintsVariableUsage(); + return RemoveConstraint(ct); +} + bool CpModelPresolver::PresolveNoOverlap2D(int /*c*/, ConstraintProto* ct) { if (context_->ModelIsUnsat()) { return false; @@ -6046,6 +6188,14 @@ bool CpModelPresolver::PresolveNoOverlap2D(int /*c*/, ConstraintProto* ct) { return RemoveConstraint(ct); } } + + if (fixed_boxes.size() == 4 && !non_fixed_boxes.empty() && + !has_potential_zero_sized_interval) { + if (PresolveNoOverlap2DFramed(fixed_boxes, non_fixed_boxes, ct)) { + return true; + } + } + // If the non-fixed boxes are disjoint but connected by fixed boxes, we can // split the constraint and duplicate the fixed boxes. To avoid duplicating // too many fixed boxes, we do this after we we applied the presolve reducing diff --git a/ortools/sat/cp_model_presolve.h b/ortools/sat/cp_model_presolve.h index a016a19cd4..04b7f54a49 100644 --- a/ortools/sat/cp_model_presolve.h +++ b/ortools/sat/cp_model_presolve.h @@ -28,6 +28,7 @@ #include "ortools/sat/clause.h" #include "ortools/sat/cp_model.pb.h" #include "ortools/sat/cp_model_mapping.h" +#include "ortools/sat/diffn_util.h" #include "ortools/sat/presolve_context.h" #include "ortools/sat/presolve_util.h" #include "ortools/sat/sat_base.h" @@ -215,6 +216,14 @@ class CpModelPresolver { void ProcessOneLinearWithAmo(int ct_index, ConstraintProto* ct, ActivityBoundHelper* helper); + // Presolve a no_overlap_2d constraint where all the non-fixed rectangles are + // framed by exactly four fixed rectangles and at most one single box can fit + // inside the frame. This is a rather specific situation, but it is fast to + // check and happens often in LNS problems. + bool PresolveNoOverlap2DFramed( + absl::Span fixed_boxes, + absl::Span non_fixed_boxes, ConstraintProto* ct); + // SetPPC is short for set packing, partitioning and covering constraints. // These are sum of booleans <=, = and >= 1 respectively. // We detect inclusion of these constraint which allows a few simplifications. diff --git a/ortools/sat/cp_model_search.cc b/ortools/sat/cp_model_search.cc index 80dfae8f24..5e16b41795 100644 --- a/ortools/sat/cp_model_search.cc +++ b/ortools/sat/cp_model_search.cc @@ -50,7 +50,8 @@ CpModelView::CpModelView(Model* model) : mapping_(*model->GetOrCreate()), boolean_assignment_(model->GetOrCreate()->Assignment()), integer_trail_(*model->GetOrCreate()), - integer_encoder_(*model->GetOrCreate()) {} + integer_encoder_(*model->GetOrCreate()), + random_(*model->GetOrCreate()) {} int CpModelView::NumVariables() const { return mapping_.NumProtoVariables(); } @@ -135,6 +136,28 @@ BooleanOrIntegerLiteral CpModelView::MedianValue(int var) const { return result; } +BooleanOrIntegerLiteral CpModelView::RandomSplit(int var, int64_t lb, + int64_t ub) const { + DCHECK(!IsFixed(var)); + BooleanOrIntegerLiteral result; + if (mapping_.IsBoolean(var)) { + if (absl::Bernoulli(random_, 0.5)) { + result.boolean_literal_index = mapping_.Literal(var).Index(); + } else { + result.boolean_literal_index = mapping_.Literal(var).NegatedIndex(); + } + } else if (mapping_.IsInteger(var)) { + if (absl::Bernoulli(random_, 0.5)) { + result.integer_literal = IntegerLiteral::LowerOrEqual( + mapping_.Integer(var), IntegerValue(lb + (ub - lb) / 2)); + } else { + result.integer_literal = IntegerLiteral::GreaterOrEqual( + mapping_.Integer(var), IntegerValue(ub - (ub - lb) / 2)); + } + } + return result; +} + // Stores one variable and its strategy value. struct VarValue { int ref; @@ -315,6 +338,8 @@ std::function ConstructUserSearchStrategy( return view.GreaterOrEqual(var, ub - (ub - lb) / 2); case DecisionStrategyProto::SELECT_MEDIAN_VALUE: return view.MedianValue(var); + case DecisionStrategyProto::SELECT_RANDOM_HALF: + return view.RandomSplit(var, lb, ub); default: LOG(FATAL) << "Unknown DomainReductionStrategy " << strategy.domain_reduction_strategy(); diff --git a/ortools/sat/cp_model_search.h b/ortools/sat/cp_model_search.h index ad08298271..9c2dc3cfc4 100644 --- a/ortools/sat/cp_model_search.h +++ b/ortools/sat/cp_model_search.h @@ -20,6 +20,7 @@ #include #include "absl/container/flat_hash_map.h" +#include "absl/random/bit_gen_ref.h" #include "absl/strings/string_view.h" #include "absl/types/span.h" #include "ortools/base/stl_util.h" @@ -59,12 +60,14 @@ class CpModelView { BooleanOrIntegerLiteral GreaterOrEqual(int var, int64_t value) const; BooleanOrIntegerLiteral LowerOrEqual(int var, int64_t value) const; BooleanOrIntegerLiteral MedianValue(int var) const; + BooleanOrIntegerLiteral RandomSplit(int var, int64_t lb, int64_t ub) const; private: const CpModelMapping& mapping_; const VariablesAssignment& boolean_assignment_; const IntegerTrail& integer_trail_; const IntegerEncoder& integer_encoder_; + mutable absl::BitGenRef random_; }; // Constructs the search strategy specified in the given CpModelProto. diff --git a/ortools/sat/cp_model_search_test.cc b/ortools/sat/cp_model_search_test.cc index 73bd7f3450..a1a99f7853 100644 --- a/ortools/sat/cp_model_search_test.cc +++ b/ortools/sat/cp_model_search_test.cc @@ -11,16 +11,24 @@ // See the License for the specific language governing permissions and // limitations under the License. +#include +#include +#include #include +#include #include +#include "absl/container/flat_hash_map.h" +#include "absl/strings/str_join.h" #include "gtest/gtest.h" #include "ortools/base/gmock.h" +#include "ortools/base/logging.h" #include "ortools/base/parse_test_proto.h" #include "ortools/sat/cp_model.pb.h" #include "ortools/sat/cp_model_solver.h" #include "ortools/sat/model.h" #include "ortools/sat/sat_parameters.pb.h" +#include "ortools/util/stats.h" namespace operations_research { namespace sat { @@ -293,6 +301,94 @@ TEST(BasicFixedSearchBehaviorTest, MedianTest2) { EXPECT_THAT(response.solution(), testing::ElementsAre(10, 8)); } +TEST(BasicFixedSearchBehaviorTest, RandomHalfTest) { + CpModelProto model_proto = ParseTestProto(R"pb( + variables { domain: [ 0, 1 ] } + variables { domain: [ 0, 10 ] } + variables { domain: [ 0, 10 ] } + variables { domain: [ 0, 10 ] } + constraints { + linear { + vars: [ 0, 1, 2, 3 ] + coeffs: [ 1, 1, 1, 1 ] + domain: [ 10, 10 ] + } + } + search_strategy { + variables: [ 0, 1, 2, 3 ] + variable_selection_strategy: CHOOSE_FIRST + domain_reduction_strategy: SELECT_RANDOM_HALF + } + )pb"); + + absl::flat_hash_map, int> count_by_solution; + { + SatParameters params; + params.set_enumerate_all_solutions(true); + params.set_num_workers(1); + Model model; + model.Add(NewSatParameters(params)); + model.Add(NewFeasibleSolutionObserver( + [&count_by_solution](const CpSolverResponse& response) { + count_by_solution[std::make_tuple( + response.solution(0), response.solution(1), response.solution(2), + response.solution(3))] = 0; + })); + EXPECT_EQ(SolveCpModel(model_proto, &model).status(), + CpSolverStatus::OPTIMAL); + } + constexpr int kNumExpectedSolutions = 121; + EXPECT_EQ(count_by_solution.size(), kNumExpectedSolutions); + + // Repeatedly solve the model with a different seed and count the number of + // times each solution occurs. If each solution is found with equal + // probability, each solution should be found "near" kExpectedMean times. + constexpr int kExpectedMean = 100; + std::mt19937_64 random(12345); + for (int i = 0; i < kNumExpectedSolutions * kExpectedMean; ++i) { + SatParameters params; + params.set_cp_model_presolve(false); + params.set_search_branching(SatParameters::FIXED_SEARCH); + params.set_random_seed(i); + params.set_num_workers(1); + auto search_order = + model_proto.mutable_search_strategy(0)->mutable_variables(); + std::shuffle(search_order->begin(), search_order->end(), random); + const CpSolverResponse response = SolveWithParameters(model_proto, params); + EXPECT_EQ(response.status(), CpSolverStatus::OPTIMAL); + count_by_solution[std::make_tuple( + response.solution(0), response.solution(1), response.solution(2), + response.solution(3))]++; + } + EXPECT_EQ(count_by_solution.size(), kNumExpectedSolutions); + DoubleDistribution counts; + int min_count = std::numeric_limits::max(); + std::tuple min_count_solution; + int max_count = 0; + std::tuple max_count_solution; + for (const auto& [solution, count] : count_by_solution) { + if (count < min_count) { + min_count = count; + min_count_solution = solution; + } + if (count > max_count) { + max_count = count; + max_count_solution = solution; + } + counts.Add(count); + } + const double std_dev = counts.StdDeviation(); + LOG(INFO) << "min_count = " << min_count << " for " + << absl::StrJoin(min_count_solution, ","); + LOG(INFO) << "max_count = " << max_count << " for " + << absl::StrJoin(max_count_solution, ","); + LOG(INFO) << "std_dev / mean = " << std_dev / kExpectedMean; + EXPECT_GE(min_count, kExpectedMean / 10); + // If each solution was really found with equal probability, the coefficient + // of variation would be much lower (about 0.1 for kExpectedMean = 100). + EXPECT_LE(std_dev / kExpectedMean, 0.5); +} + } // namespace } // namespace sat } // namespace operations_research diff --git a/ortools/sat/diffn_util.cc b/ortools/sat/diffn_util.cc index 99f71c470e..0c341497b2 100644 --- a/ortools/sat/diffn_util.cc +++ b/ortools/sat/diffn_util.cc @@ -1610,6 +1610,7 @@ std::vector PavedRegionDifference( std::vector original_region, absl::Span area_to_remove) { std::vector new_area_to_cover; + new_area_to_cover.reserve(original_region.size()); for (const Rectangle& rectangle : area_to_remove) { new_area_to_cover.clear(); for (const Rectangle& r : original_region) { diff --git a/ortools/sat/diffn_util.h b/ortools/sat/diffn_util.h index e921e78bef..361bd79b25 100644 --- a/ortools/sat/diffn_util.h +++ b/ortools/sat/diffn_util.h @@ -66,6 +66,11 @@ struct Rectangle { Rectangle Intersect(const Rectangle& other) const; IntegerValue IntersectArea(const Rectangle& other) const; + bool IsInsideOf(const Rectangle& other) const { + return x_min >= other.x_min && x_max <= other.x_max && + y_min >= other.y_min && y_max <= other.y_max; + } + // Returns `this \ other` as a set of disjoint rectangles of non-empty area. // The resulting vector will have at most four elements. absl::InlinedVector RegionDifference( diff --git a/ortools/sat/diffn_util_test.cc b/ortools/sat/diffn_util_test.cc index 7838ca0123..b1e64b5247 100644 --- a/ortools/sat/diffn_util_test.cc +++ b/ortools/sat/diffn_util_test.cc @@ -628,7 +628,7 @@ TEST(GetMinimumOverlapTest, BasicTest) { } IntegerValue RecomputeEnergy(const Rectangle& rectangle, - const std::vector& intervals) { + absl::Span intervals) { IntegerValue ret = 0; for (const RectangleInRange& range : intervals) { const Rectangle min_intersect = range.GetMinimumIntersection(rectangle); diff --git a/ortools/sat/integer_expr.h b/ortools/sat/integer_expr.h index 853b6f8151..6b88b7db50 100644 --- a/ortools/sat/integer_expr.h +++ b/ortools/sat/integer_expr.h @@ -594,10 +594,15 @@ inline void AddWeightedSumGreaterOrEqual( // TODO(user): Delete once Telamon use new function. inline std::function ConditionalWeightedSumLowerOrEqual( - const std::vector& enforcement_literals, - const std::vector& vars, - const std::vector& coefficients, int64_t upper_bound) { - return [=](Model* model) { + absl::Span enforcement_literals, + absl::Span vars, + absl::Span coefficients, int64_t upper_bound) { + return [=, vars = std::vector(vars.begin(), vars.end()), + coefficients = + std::vector(coefficients.begin(), coefficients.end()), + enforcement_literals = + std::vector(enforcement_literals.begin(), + enforcement_literals.end())](Model* model) { AddWeightedSumLowerOrEqual(enforcement_literals, vars, coefficients, upper_bound, model); }; diff --git a/ortools/sat/python/cp_model.py b/ortools/sat/python/cp_model.py index 195535d0d6..22f68886cd 100644 --- a/ortools/sat/python/cp_model.py +++ b/ortools/sat/python/cp_model.py @@ -107,6 +107,8 @@ SELECT_MAX_VALUE = cp_model_pb2.DecisionStrategyProto.SELECT_MAX_VALUE SELECT_LOWER_HALF = cp_model_pb2.DecisionStrategyProto.SELECT_LOWER_HALF SELECT_UPPER_HALF = cp_model_pb2.DecisionStrategyProto.SELECT_UPPER_HALF +SELECT_MEDIAN_VALUE = cp_model_pb2.DecisionStrategyProto.SELECT_MEDIAN_VALUE +SELECT_RANDOM_HALF = cp_model_pb2.DecisionStrategyProto.SELECT_RANDOM_HALF # Search branching AUTOMATIC_SEARCH = sat_parameters_pb2.SatParameters.AUTOMATIC_SEARCH diff --git a/ortools/sat/python/cp_model_helper_test.py b/ortools/sat/python/cp_model_helper_test.py index c73f6cbd2b..b45a67508b 100644 --- a/ortools/sat/python/cp_model_helper_test.py +++ b/ortools/sat/python/cp_model_helper_test.py @@ -66,7 +66,7 @@ def tearDown(self) -> None: super().tearDown() sys.stdout.flush() - def testVariableDomain(self): + def test_variable_domain(self): model_string = """ variables { domain: [ -10, 10 ] } variables { domain: [ -5, -5, 3, 6 ] } @@ -80,7 +80,7 @@ def testVariableDomain(self): self.assertEqual(d0.flattened_intervals(), [-10, 10]) self.assertEqual(d1.flattened_intervals(), [-5, -5, 3, 6]) - def testSimpleSolve(self): + def test_simple_solve(self): model_string = """ variables { domain: -10 domain: 10 } variables { domain: -10 domain: 10 } @@ -121,7 +121,7 @@ def testSimpleSolve(self): self.assertEqual(cp_model_pb2.OPTIMAL, response_wrapper.status()) self.assertEqual(30.0, response_wrapper.objective_value()) - def testSimpleSolveWithCore(self): + def test_simple_solve_with_core(self): model_string = """ variables { domain: -10 domain: 10 } variables { domain: -10 domain: 10 } @@ -165,7 +165,7 @@ def testSimpleSolveWithCore(self): self.assertEqual(cp_model_pb2.OPTIMAL, response_wrapper.status()) self.assertEqual(30.0, response_wrapper.objective_value()) - def testSimpleSolveWithProtoApi(self): + def test_simple_solve_with_proto_api(self): model = cp_model_pb2.CpModelProto() x = model.variables.add() x.domain.extend([-10, 10]) @@ -188,7 +188,7 @@ def testSimpleSolveWithProtoApi(self): self.assertEqual(30.0, response_wrapper.objective_value()) self.assertEqual(30.0, response_wrapper.best_objective_bound()) - def testSolutionCallback(self): + def test_solution_callback(self): model_string = """ variables { domain: 0 domain: 5 } variables { domain: 0 domain: 5 } @@ -209,7 +209,7 @@ def testSolutionCallback(self): self.assertEqual(5, callback.solution_count()) self.assertEqual(cp_model_pb2.OPTIMAL, response_wrapper.status()) - def testBestBoundCallback(self): + def test_best_bound_callback(self): model_string = """ variables { domain: 0 domain: 1 } variables { domain: 0 domain: 1 } @@ -238,7 +238,7 @@ def testBestBoundCallback(self): self.assertEqual(2.6, best_bound_callback.best_bound) self.assertEqual(cp_model_pb2.OPTIMAL, response_wrapper.status()) - def testModelStats(self): + def test_model_stats(self): model_string = """ variables { domain: -10 domain: 10 } variables { domain: -10 domain: 10 } @@ -277,7 +277,7 @@ def testModelStats(self): stats = cmh.CpSatHelper.model_stats(model) self.assertTrue(stats) - def testIntLinExpr(self): + def test_int_lin_expr(self): x = TestIntVar(0, "x") self.assertTrue(x.is_integer()) self.assertIsInstance(x, cmh.BaseIntVar) @@ -319,7 +319,7 @@ def testIntLinExpr(self): e11 = cmh.LinearExpr.weighted_sum([x, y, z, 5], [1, 2, 3, -1]) self.assertEqual(str(e11), "(x + 2 * y + 3 * z - 5)") - def testFloatLinExpr(self): + def test_float_lin_expr(self): x = TestIntVar(0, "x") self.assertTrue(x.is_integer()) self.assertIsInstance(x, TestIntVar) diff --git a/ortools/sat/python/cp_model_numbers_test.py b/ortools/sat/python/cp_model_numbers_test.py index 7fbc9e2a2a..1e0b91a0fc 100644 --- a/ortools/sat/python/cp_model_numbers_test.py +++ b/ortools/sat/python/cp_model_numbers_test.py @@ -34,14 +34,14 @@ def test_is_boolean(self): self.assertTrue(cmn.is_boolean(np.bool_(1))) self.assertTrue(cmn.is_boolean(np.bool_(0))) - def testto_capped_int64(self): + def test_to_capped_int64(self): self.assertEqual(cmn.to_capped_int64(cmn.INT_MAX), cmn.INT_MAX) self.assertEqual(cmn.to_capped_int64(cmn.INT_MAX + 1), cmn.INT_MAX) self.assertEqual(cmn.to_capped_int64(cmn.INT_MIN), cmn.INT_MIN) self.assertEqual(cmn.to_capped_int64(cmn.INT_MIN - 1), cmn.INT_MIN) self.assertEqual(cmn.to_capped_int64(15), 15) - def testcapped_subtraction(self): + def test_capped_subtraction(self): self.assertEqual(cmn.capped_subtraction(10, 5), 5) self.assertEqual(cmn.capped_subtraction(cmn.INT_MIN, 5), cmn.INT_MIN) self.assertEqual(cmn.capped_subtraction(cmn.INT_MIN, -5), cmn.INT_MIN) diff --git a/ortools/sat/python/cp_model_test.py b/ortools/sat/python/cp_model_test.py index f7d23701ae..2882236b9a 100644 --- a/ortools/sat/python/cp_model_test.py +++ b/ortools/sat/python/cp_model_test.py @@ -168,7 +168,7 @@ def tearDown(self) -> None: super().tearDown() sys.stdout.flush() - def testCreateIntegerVariable(self) -> None: + def test_create_integer_variable(self) -> None: model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") self.assertEqual("x", str(x)) @@ -191,7 +191,7 @@ def testCreateIntegerVariable(self) -> None: cst = model.new_constant(5) self.assertEqual("5", str(cst)) - def testLiteral(self) -> None: + def test_literal(self) -> None: model = cp_model.CpModel() x = model.new_bool_var("x") self.assertEqual("x", str(x)) @@ -212,7 +212,7 @@ def testLiteral(self) -> None: self.assertRaises(TypeError, z.negated) self.assertRaises(TypeError, z.__invert__) - def testNegation(self) -> None: + def test_negation(self) -> None: model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") b = model.new_bool_var("b") @@ -224,14 +224,14 @@ def testNegation(self) -> None: self.assertEqual(nb.index, -b.index - 1) self.assertRaises(TypeError, x.negated) - def testEqualityOverload(self) -> None: + def test_equality_overload(self) -> None: model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") y = model.new_int_var(0, 5, "y") self.assertEqual(x, x) self.assertNotEqual(x, y) - def testLinear(self) -> None: + def test_linear(self) -> None: model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") y = model.new_int_var(-10, 10, "y") @@ -242,7 +242,7 @@ def testLinear(self) -> None: self.assertEqual(10, solver.value(x)) self.assertEqual(-5, solver.value(y)) - def testLinearConstraint(self) -> None: + def test_linear_constraint(self) -> None: model = cp_model.CpModel() model.add_linear_constraint(5, 0, 10) model.add_linear_constraint(-1, 0, 10) @@ -252,7 +252,7 @@ def testLinearConstraint(self) -> None: self.assertTrue(model.proto.constraints[1].HasField("bool_or")) self.assertEmpty(model.proto.constraints[1].bool_or.literals) - def testLinearNonEqual(self) -> None: + def test_linear_non_equal(self) -> None: model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") y = model.new_int_var(-10, 10, "y") @@ -263,7 +263,7 @@ def testLinearNonEqual(self) -> None: self.assertEqual(4, ct.linear.domain[2]) self.assertEqual(cp_model.INT_MAX, ct.linear.domain[3]) - def testEq(self) -> None: + def test_eq(self) -> None: model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") ct = model.add(x == 2).proto @@ -283,7 +283,7 @@ def testGe(self) -> None: self.assertEqual(2, ct.linear.domain[0]) self.assertEqual(cp_model.INT_MAX, ct.linear.domain[1]) - def testGt(self) -> None: + def test_gt(self) -> None: model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") ct = model.add(x > 2).proto @@ -293,7 +293,7 @@ def testGt(self) -> None: self.assertEqual(3, ct.linear.domain[0]) self.assertEqual(cp_model.INT_MAX, ct.linear.domain[1]) - def testLe(self) -> None: + def test_le(self) -> None: model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") ct = model.add(x <= 2).proto @@ -303,7 +303,7 @@ def testLe(self) -> None: self.assertEqual(cp_model.INT_MIN, ct.linear.domain[0]) self.assertEqual(2, ct.linear.domain[1]) - def testLt(self) -> None: + def test_lt(self) -> None: model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") ct = model.add(x < 2).proto @@ -313,7 +313,7 @@ def testLt(self) -> None: self.assertEqual(cp_model.INT_MIN, ct.linear.domain[0]) self.assertEqual(1, ct.linear.domain[1]) - def testEqVar(self) -> None: + def test_eq_var(self) -> None: model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") y = model.new_int_var(-10, 10, "y") @@ -326,7 +326,7 @@ def testEqVar(self) -> None: self.assertEqual(2, ct.linear.domain[0]) self.assertEqual(2, ct.linear.domain[1]) - def testGeVar(self) -> None: + def test_ge_var(self) -> None: model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") y = model.new_int_var(-10, 10, "y") @@ -340,7 +340,7 @@ def testGeVar(self) -> None: self.assertEqual(1, ct.linear.domain[0]) self.assertEqual(cp_model.INT_MAX, ct.linear.domain[1]) - def testGtVar(self) -> None: + def test_gt_var(self) -> None: model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") y = model.new_int_var(-10, 10, "y") @@ -354,7 +354,7 @@ def testGtVar(self) -> None: self.assertEqual(2, ct.linear.domain[0]) self.assertEqual(cp_model.INT_MAX, ct.linear.domain[1]) - def testLeVar(self) -> None: + def test_le_var(self) -> None: model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") y = model.new_int_var(-10, 10, "y") @@ -368,7 +368,7 @@ def testLeVar(self) -> None: self.assertEqual(cp_model.INT_MIN, ct.linear.domain[0]) self.assertEqual(1, ct.linear.domain[1]) - def testLtVar(self) -> None: + def test_lt_var(self) -> None: model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") y = model.new_int_var(-10, 10, "y") @@ -382,7 +382,7 @@ def testLtVar(self) -> None: self.assertEqual(cp_model.INT_MIN, ct.linear.domain[0]) self.assertEqual(0, ct.linear.domain[1]) - def testLinearNonEqualWithConstant(self) -> None: + def test_linear_non_equal_with_constant(self) -> None: model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") y = model.new_int_var(-10, 10, "y") @@ -394,7 +394,7 @@ def testLinearNonEqualWithConstant(self) -> None: self.assertEqual(-1, ct.linear.domain[2]) self.assertEqual(cp_model.INT_MAX, ct.linear.domain[3]) - def testLinearWithEnforcement(self) -> None: + def test_linear_with_enforcement(self) -> None: model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") y = model.new_int_var(-10, 10, "y") @@ -413,14 +413,14 @@ def testLinearWithEnforcement(self) -> None: self.assertEqual(-4, model.proto.constraints[2].enforcement_literal[0]) self.assertEqual(2, model.proto.constraints[2].enforcement_literal[1]) - def testConstraintWithName(self) -> None: + def test_constraint_with_name(self) -> None: model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") y = model.new_int_var(-10, 10, "y") ct = model.add_linear_constraint(x + 2 * y, 0, 10).with_name("test_constraint") self.assertEqual("test_constraint", ct.name) - def testNaturalApiMinimize(self) -> None: + def test_natural_api_minimize(self) -> None: model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") y = model.new_int_var(-10, 10, "y") @@ -433,7 +433,7 @@ def testNaturalApiMinimize(self) -> None: self.assertEqual(6, solver.value(1 + x)) self.assertEqual(-10.0, solver.objective_value) - def testNaturalApiMaximizeFloat(self) -> None: + def test_natural_api_maximize_float(self) -> None: model = cp_model.CpModel() x = model.new_bool_var("x") y = model.new_int_var(0, 10, "y") @@ -445,7 +445,7 @@ def testNaturalApiMaximizeFloat(self) -> None: self.assertEqual(-10, solver.value(-y)) self.assertEqual(16.1, solver.objective_value) - def testNaturalApiMaximizeComplex(self) -> None: + def test_natural_api_maximize_complex(self) -> None: model = cp_model.CpModel() x1 = model.new_bool_var("x1") x2 = model.new_bool_var("x1") @@ -469,7 +469,7 @@ def testNaturalApiMaximizeComplex(self) -> None: self.assertEqual(5, solver.value(5 * x4.negated())) self.assertEqual(8, solver.objective_value) - def testNaturalApiMaximize(self) -> None: + def test_natural_api_maximize(self) -> None: model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") y = model.new_int_var(-10, 10, "y") @@ -481,7 +481,7 @@ def testNaturalApiMaximize(self) -> None: self.assertEqual(-9, solver.value(y)) self.assertEqual(17, solver.objective_value) - def testMinimizeConstant(self) -> None: + def test_minimize_constant(self) -> None: model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") model.add(x >= -1) @@ -490,7 +490,7 @@ def testMinimizeConstant(self) -> None: self.assertEqual("OPTIMAL", solver.status_name(solver.solve(model))) self.assertEqual(10, solver.objective_value) - def testMaximizeConstant(self) -> None: + def test_maximize_constant(self) -> None: model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") model.add(x >= -1) @@ -499,7 +499,7 @@ def testMaximizeConstant(self) -> None: self.assertEqual("OPTIMAL", solver.status_name(solver.solve(model))) self.assertEqual(5, solver.objective_value) - def testAddTrue(self) -> None: + def test_add_true(self) -> None: model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") model.add(3 >= -1) @@ -508,7 +508,7 @@ def testAddTrue(self) -> None: self.assertEqual("OPTIMAL", solver.status_name(solver.solve(model))) self.assertEqual(-10, solver.value(x)) - def testAddFalse(self) -> None: + def test_add_false(self) -> None: model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") model.add(3 <= -1) @@ -516,7 +516,7 @@ def testAddFalse(self) -> None: solver = cp_model.CpSolver() self.assertEqual("INFEASIBLE", solver.status_name(solver.solve(model))) - def testSum(self) -> None: + def test_sum(self) -> None: model = cp_model.CpModel() x = [model.new_int_var(0, 2, f"x{i}") for i in range(100)] model.add(sum(x) <= 1) @@ -527,7 +527,7 @@ def testSum(self) -> None: for i in range(100): self.assertEqual(solver.value(x[i]), 1 if i == 99 else 0) - def testSumParsing(self) -> None: + def test_sum_parsing(self) -> None: model = cp_model.CpModel() x = [model.new_int_var(0, 2, f"x{i}") for i in range(5)] s1 = cp_model.LinearExpr.sum(x) @@ -660,7 +660,7 @@ def __str__(self): with self.assertRaises(TypeError): cp_model.LinearExpr.sum(x[0], x[2], FakeNpDTypeB()) - def testWeightedSumParsing(self) -> None: + def test_weighted_sum_parsing(self) -> None: model = cp_model.CpModel() x = [model.new_int_var(0, 2, f"x{i}") for i in range(5)] c = [1, -2, 2, 3, 0.0] @@ -702,7 +702,7 @@ def testWeightedSumParsing(self) -> None: s7 = cp_model.LinearExpr.weighted_sum([2], [1.25]) self.assertEqual(repr(s7), "FloatConstant(2.5)") - def testSumWithApi(self) -> None: + def test_sum_with_api(self) -> None: model = cp_model.CpModel() x = [model.new_int_var(0, 2, f"x{i}") for i in range(100)] self.assertEqual(cp_model.LinearExpr.sum([x[0]]), x[0]) @@ -720,7 +720,7 @@ def testSumWithApi(self) -> None: for i in range(100): self.assertEqual(solver.value(x[i]), 1 if i == 99 else 0) - def testWeightedSum(self) -> None: + def test_weighted_sum(self) -> None: model = cp_model.CpModel() x = [model.new_int_var(0, 2, f"x{i}") for i in range(100)] c = [2] * 100 @@ -745,7 +745,7 @@ def testWeightedSum(self) -> None: with self.assertRaises(ValueError): cp_model.LinearExpr.WeightedSum([x[0]], [1.1, 2.2]) - def testAllDifferent(self) -> None: + def test_all_different(self) -> None: model = cp_model.CpModel() x = [model.new_int_var(0, 4, f"x{i}") for i in range(5)] model.add_all_different(x) @@ -753,14 +753,14 @@ def testAllDifferent(self) -> None: self.assertLen(model.proto.constraints, 1) self.assertLen(model.proto.constraints[0].all_diff.exprs, 5) - def testAllDifferentGen(self) -> None: + def test_all_different_gen(self) -> None: model = cp_model.CpModel() model.add_all_different(model.new_int_var(0, 4, f"x{i}") for i in range(5)) self.assertLen(model.proto.variables, 5) self.assertLen(model.proto.constraints, 1) self.assertLen(model.proto.constraints[0].all_diff.exprs, 5) - def testAllDifferentList(self) -> None: + def test_all_different_list(self) -> None: model = cp_model.CpModel() x = [model.new_int_var(0, 4, f"x{i}") for i in range(5)] model.add_all_different(x[0], x[1], x[2], x[3], x[4]) @@ -768,7 +768,7 @@ def testAllDifferentList(self) -> None: self.assertLen(model.proto.constraints, 1) self.assertLen(model.proto.constraints[0].all_diff.exprs, 5) - def testElement(self) -> None: + def test_element(self) -> None: model = cp_model.CpModel() x = [model.new_int_var(0, 4, f"x{i}") for i in range(5)] model.add_element(x[0], [x[1], 2, 4, x[2]], x[4]) @@ -780,7 +780,7 @@ def testElement(self) -> None: with self.assertRaises(ValueError): model.add_element(x[0], [], x[4]) - def testFixedElement(self) -> None: + def test_fixed_element(self) -> None: model = cp_model.CpModel() x = [model.new_int_var(0, 4, f"x{i}") for i in range(4)] model.add_element(1, [x[0], 2, 4, x[2]], x[3]) @@ -791,7 +791,7 @@ def testFixedElement(self) -> None: self.assertEqual(1, model.proto.constraints[0].linear.coeffs[0]) self.assertEqual([2, 2], model.proto.constraints[0].linear.domain) - def testAffineElement(self) -> None: + def test_affine_element(self) -> None: model = cp_model.CpModel() x = [model.new_int_var(0, 4, f"x{i}") for i in range(5)] model.add_element(x[0] + 1, [2 * x[1] - 2, 2, 4, x[2]], x[4] - 1) @@ -826,7 +826,7 @@ def testCircuit(self) -> None: with self.assertRaises(ValueError): model.add_circuit([]) - def testMultipleCircuit(self) -> None: + def test_multiple_circuit(self) -> None: model = cp_model.CpModel() x = [model.new_bool_var(f"x{i}") for i in range(5)] arcs: list[tuple[int, int, cp_model.LiteralT]] = [ @@ -841,7 +841,7 @@ def testMultipleCircuit(self) -> None: with self.assertRaises(ValueError): model.add_multiple_circuit([]) - def testAllowedAssignments(self) -> None: + def test_allowed_assignments(self) -> None: model = cp_model.CpModel() x = [model.new_int_var(0, 4, f"x{i}") for i in range(5)] model.add_allowed_assignments( @@ -862,7 +862,7 @@ def testAllowedAssignments(self) -> None: [(0, 1, 2, 3, 4), (4, 3, 2, 1, 1), (0, 0, 0, 0)], ) - def testForbiddenAssignments(self) -> None: + def test_forbidden_assignments(self) -> None: model = cp_model.CpModel() x = [model.new_int_var(0, 4, f"x{i}") for i in range(5)] model.add_forbidden_assignments( @@ -886,7 +886,7 @@ def testForbiddenAssignments(self) -> None: [(0, 1, 2, 3, 4), (4, 3, 2, 1, 1), (0, 0, 0, 0)], ) - def testAutomaton(self) -> None: + def test_automaton(self) -> None: model = cp_model.CpModel() x = [model.new_int_var(0, 4, f"x{i}") for i in range(5)] model.add_automaton(x, 0, [2, 3], [(0, 0, 0), (0, 1, 1), (1, 2, 2), (2, 3, 3)]) @@ -922,7 +922,7 @@ def testAutomaton(self) -> None: with self.assertRaises(ValueError): model.add_automaton(x, 0, [2, 3], []) - def testInverse(self) -> None: + def test_inverse(self) -> None: model = cp_model.CpModel() x = [model.new_int_var(0, 4, f"x{i}") for i in range(5)] y = [model.new_int_var(0, 4, f"y{i}") for i in range(5)] @@ -932,7 +932,7 @@ def testInverse(self) -> None: self.assertLen(model.proto.constraints[0].inverse.f_direct, 5) self.assertLen(model.proto.constraints[0].inverse.f_inverse, 5) - def testMaxEquality(self) -> None: + def test_max_equality(self) -> None: model = cp_model.CpModel() x = model.new_int_var(0, 4, "x") y = [model.new_int_var(0, 4, f"y{i}") for i in range(5)] @@ -943,7 +943,7 @@ def testMaxEquality(self) -> None: self.assertEqual(0, model.proto.constraints[0].lin_max.target.vars[0]) self.assertEqual(1, model.proto.constraints[0].lin_max.target.coeffs[0]) - def testMinEquality(self) -> None: + def test_min_equality(self) -> None: model = cp_model.CpModel() x = model.new_int_var(0, 4, "x") y = [model.new_int_var(0, 4, f"y{i}") for i in range(5)] @@ -953,7 +953,7 @@ def testMinEquality(self) -> None: self.assertEqual(0, model.proto.constraints[0].lin_max.target.vars[0]) self.assertEqual(-1, model.proto.constraints[0].lin_max.target.coeffs[0]) - def testMinEqualityList(self) -> None: + def test_min_equality_list(self) -> None: model = cp_model.CpModel() x = model.new_int_var(0, 4, "x") y = [model.new_int_var(0, 4, f"y{i}") for i in range(5)] @@ -963,7 +963,7 @@ def testMinEqualityList(self) -> None: self.assertEqual(0, model.proto.constraints[0].lin_max.target.vars[0]) self.assertEqual(-1, model.proto.constraints[0].lin_max.target.coeffs[0]) - def testMinEqualityTuple(self) -> None: + def test_min_equality_tuple(self) -> None: model = cp_model.CpModel() x = model.new_int_var(0, 4, "x") y = [model.new_int_var(0, 4, f"y{i}") for i in range(5)] @@ -973,7 +973,7 @@ def testMinEqualityTuple(self) -> None: self.assertEqual(0, model.proto.constraints[0].lin_max.target.vars[0]) self.assertEqual(-1, model.proto.constraints[0].lin_max.target.coeffs[0]) - def testMinEqualityGenerator(self) -> None: + def test_min_equality_generator(self) -> None: model = cp_model.CpModel() x = model.new_int_var(0, 4, "x") y = [model.new_int_var(0, 4, f"y{i}") for i in range(5)] @@ -983,7 +983,7 @@ def testMinEqualityGenerator(self) -> None: self.assertEqual(0, model.proto.constraints[0].lin_max.target.vars[0]) self.assertEqual(-1, model.proto.constraints[0].lin_max.target.coeffs[0]) - def testMinEqualityWithConstant(self) -> None: + def test_min_equality_with_constant(self) -> None: model = cp_model.CpModel() x = model.new_int_var(0, 4, "x") y = model.new_int_var(0, 4, "y") @@ -999,7 +999,7 @@ def testMinEqualityWithConstant(self) -> None: self.assertEmpty(lin_max.exprs[1].vars) self.assertEqual(-3, lin_max.exprs[1].offset) - def testAbs(self) -> None: + def test_abs(self) -> None: model = cp_model.CpModel() x = model.new_int_var(0, 4, "x") y = model.new_int_var(-5, 5, "y") @@ -1025,7 +1025,7 @@ def testAbs(self) -> None: ) self.assertTrue(passed) - def testDivision(self) -> None: + def test_division(self) -> None: model = cp_model.CpModel() x = model.new_int_var(0, 10, "x") y = model.new_int_var(0, 50, "y") @@ -1077,7 +1077,7 @@ def testModulo(self) -> None: ) self.assertTrue(passed) - def testMultiplicationEquality(self) -> None: + def test_multiplication_equality(self) -> None: model = cp_model.CpModel() x = model.new_int_var(0, 4, "x") y = [model.new_int_var(0, 4, f"y{i}") for i in range(5)] @@ -1087,7 +1087,7 @@ def testMultiplicationEquality(self) -> None: self.assertLen(model.proto.constraints[0].int_prod.exprs, 5) self.assertEqual(0, model.proto.constraints[0].int_prod.target.vars[0]) - def testImplication(self) -> None: + def test_implication(self) -> None: model = cp_model.CpModel() x = model.new_bool_var("x") y = model.new_bool_var("y") @@ -1099,7 +1099,7 @@ def testImplication(self) -> None: self.assertEqual(x.index, model.proto.constraints[0].enforcement_literal[0]) self.assertEqual(y.index, model.proto.constraints[0].bool_or.literals[0]) - def testBoolOr(self) -> None: + def test_bool_or(self) -> None: model = cp_model.CpModel() x = [model.new_bool_var(f"x{i}") for i in range(5)] model.add_bool_or(x) @@ -1114,7 +1114,7 @@ def testBoolOr(self) -> None: with self.assertRaises(TypeError): model.add_bool_or([y, False]) - def testBoolOrListOrGet(self) -> None: + def test_bool_or_list_or_get(self) -> None: model = cp_model.CpModel() x = [model.new_bool_var(f"x{i}") for i in range(5)] model.add_bool_or(x) @@ -1128,7 +1128,7 @@ def testBoolOrListOrGet(self) -> None: self.assertLen(model.proto.constraints[2].bool_or.literals, 2) self.assertLen(model.proto.constraints[3].bool_or.literals, 4) - def testAtLeastOne(self) -> None: + def test_at_least_one(self) -> None: model = cp_model.CpModel() x = [model.new_bool_var(f"x{i}") for i in range(5)] model.add_at_least_one(x) @@ -1141,7 +1141,7 @@ def testAtLeastOne(self) -> None: y = model.new_int_var(0, 4, "y") self.assertRaises(TypeError, model.add_at_least_one, [y, False]) - def testAtMostOne(self) -> None: + def test_at_most_one(self) -> None: model = cp_model.CpModel() x = [model.new_bool_var(f"x{i}") for i in range(5)] model.add_at_most_one(x) @@ -1154,7 +1154,7 @@ def testAtMostOne(self) -> None: y = model.new_int_var(0, 4, "y") self.assertRaises(TypeError, model.add_at_most_one, [y, False]) - def testExactlyOne(self) -> None: + def test_exactly_one(self) -> None: model = cp_model.CpModel() x = [model.new_bool_var(f"x{i}") for i in range(5)] model.add_exactly_one(x) @@ -1167,7 +1167,7 @@ def testExactlyOne(self) -> None: y = model.new_int_var(0, 4, "y") self.assertRaises(TypeError, model.add_exactly_one, [y, False]) - def testBoolAnd(self) -> None: + def test_bool_and(self) -> None: model = cp_model.CpModel() x = [model.new_bool_var(f"x{i}") for i in range(5)] model.add_bool_and(x) @@ -1179,7 +1179,7 @@ def testBoolAnd(self) -> None: self.assertEqual(-3, model.proto.constraints[1].bool_and.literals[1]) self.assertEqual(5, model.proto.constraints[1].bool_and.literals[2]) - def testBoolXOr(self) -> None: + def test_bool_x_or(self) -> None: model = cp_model.CpModel() x = [model.new_bool_var(f"x{i}") for i in range(5)] model.add_bool_xor(x) @@ -1187,7 +1187,7 @@ def testBoolXOr(self) -> None: self.assertLen(model.proto.constraints, 1) self.assertLen(model.proto.constraints[0].bool_xor.literals, 5) - def testMapDomain(self) -> None: + def test_map_domain(self) -> None: model = cp_model.CpModel() x = [model.new_bool_var(f"x{i}") for i in range(5)] y = model.new_int_var(0, 10, "y") @@ -1195,7 +1195,7 @@ def testMapDomain(self) -> None: self.assertLen(model.proto.variables, 6) self.assertLen(model.proto.constraints, 10) - def testInterval(self) -> None: + def test_interval(self) -> None: model = cp_model.CpModel() x = model.new_int_var(0, 4, "x") y = model.new_int_var(0, 3, "y") @@ -1211,7 +1211,7 @@ def testInterval(self) -> None: self.assertEqual(size_expr, 2) self.assertEqual(str(end_expr), "(x + 2)") - def testRebuildFromLinearExpressionProto(self) -> None: + def test_rebuild_from_linear_expression_proto(self) -> None: model = cp_model.CpModel() x = model.new_int_var(0, 4, "x") y = model.new_int_var(0, 1, "y") @@ -1247,12 +1247,12 @@ def testRebuildFromLinearExpressionProto(self) -> None: self.assertEqual(canonical_expr2.coeffs[1], 2) self.assertEqual(canonical_expr2.offset, 2) - def testAbsentInterval(self) -> None: + def test_absent_interval(self) -> None: model = cp_model.CpModel() i = model.new_optional_interval_var(1, 0, 1, False, "") self.assertEqual(0, i.index) - def testOptionalInterval(self) -> None: + def test_optional_interval(self) -> None: model = cp_model.CpModel() b = model.new_bool_var("b") x = model.new_int_var(0, 4, "x") @@ -1272,7 +1272,7 @@ def testOptionalInterval(self) -> None: with self.assertRaises(TypeError): model.new_optional_interval_var(1, 2, 3, b + 1, "x") - def testNoOverlap(self) -> None: + def test_no_overlap(self) -> None: model = cp_model.CpModel() x = model.new_int_var(0, 4, "x") y = model.new_int_var(0, 3, "y") @@ -1285,7 +1285,7 @@ def testNoOverlap(self) -> None: self.assertEqual(0, ct.proto.no_overlap.intervals[0]) self.assertEqual(1, ct.proto.no_overlap.intervals[1]) - def testNoOverlap2D(self) -> None: + def test_no_overlap2d(self) -> None: model = cp_model.CpModel() x = model.new_int_var(0, 4, "x") y = model.new_int_var(0, 3, "y") @@ -1301,7 +1301,7 @@ def testNoOverlap2D(self) -> None: self.assertEqual(1, ct.proto.no_overlap_2d.y_intervals[0]) self.assertEqual(0, ct.proto.no_overlap_2d.y_intervals[1]) - def testCumulative(self) -> None: + def test_cumulative(self) -> None: model = cp_model.CpModel() intervals = [ model.new_interval_var( @@ -1320,7 +1320,7 @@ def testCumulative(self) -> None: with self.assertRaises(TypeError): model.add_cumulative([intervals[0], 3], [2, 3], 3) - def testGetOrMakeIndexFromConstant(self) -> None: + def test_get_or_make_index_from_constant(self) -> None: model = cp_model.CpModel() self.assertEqual(0, model.get_or_make_index_from_constant(3)) self.assertEqual(0, model.get_or_make_index_from_constant(3)) @@ -1330,7 +1330,7 @@ def testGetOrMakeIndexFromConstant(self) -> None: self.assertEqual(3, model_var.domain[0]) self.assertEqual(3, model_var.domain[1]) - def testStr(self) -> None: + def test_str(self) -> None: model = cp_model.CpModel() x = model.new_int_var(0, 4, "x") self.assertEqual(str(x == 2), "x == 2") @@ -1381,7 +1381,7 @@ def testStr(self) -> None: i = model.new_interval_var(x, 2, y, "i") self.assertEqual(str(i), "i") - def testRepr(self) -> None: + def test_repr(self) -> None: model = cp_model.CpModel() x = model.new_int_var(0, 4, "x") y = model.new_int_var(0, 3, "y") @@ -1429,12 +1429,12 @@ def testDisplayBounds(self) -> None: self.assertEqual("10", cp_model.display_bounds([10, 10])) self.assertEqual("10..15, 20..30", cp_model.display_bounds([10, 15, 20, 30])) - def testShortName(self) -> None: + def test_short_name(self) -> None: model = cp_model.CpModel() model.proto.variables.add(domain=[5, 10]) self.assertEqual("[5..10]", cp_model.short_name(model.proto, 0)) - def testIntegerExpressionErrors(self) -> None: + def test_integer_expression_errors(self) -> None: model = cp_model.CpModel() x = model.new_int_var(0, 1, "x") y = model.new_int_var(0, 3, "y") @@ -1453,13 +1453,13 @@ def testIntegerExpressionErrors(self) -> None: self.assertRaises(TypeError, x.__add__, "dummy") self.assertRaises(TypeError, x.__mul__, "dummy") - def testModelErrors(self) -> None: + def test_model_errors(self) -> None: model = cp_model.CpModel() self.assertRaises(TypeError, model.add, "dummy") self.assertRaises(TypeError, model.get_or_make_index, "dummy") self.assertRaises(TypeError, model.minimize, "dummy") - def testSolverErrors(self) -> None: + def test_solver_errors(self) -> None: model = cp_model.CpModel() x = model.new_int_var(0, 1, "x") y = model.new_int_var(-10, 10, "y") @@ -1471,7 +1471,7 @@ def testSolverErrors(self) -> None: self.assertRaises(TypeError, solver.value, "not_a_variable") self.assertRaises(TypeError, model.add_bool_or, [x, y]) - def testHasObjectiveMinimize(self) -> None: + def test_has_objective_minimize(self) -> None: model = cp_model.CpModel() x = model.new_int_var(0, 1, "x") y = model.new_int_var(-10, 10, "y") @@ -1480,7 +1480,7 @@ def testHasObjectiveMinimize(self) -> None: model.minimize(y) self.assertTrue(model.has_objective()) - def testHasObjectiveMaximize(self) -> None: + def test_has_objective_maximize(self) -> None: model = cp_model.CpModel() x = model.new_int_var(0, 1, "x") y = model.new_int_var(-10, 10, "y") @@ -1489,7 +1489,7 @@ def testHasObjectiveMaximize(self) -> None: model.maximize(y) self.assertTrue(model.has_objective()) - def testSearchForAllSolutions(self) -> None: + def test_search_for_all_solutions(self) -> None: model = cp_model.CpModel() x = model.new_int_var(0, 5, "x") y = model.new_int_var(0, 5, "y") @@ -1502,7 +1502,7 @@ def testSearchForAllSolutions(self) -> None: self.assertEqual(cp_model.OPTIMAL, status) self.assertEqual(5, solution_counter.solution_count) - def testSolveWithSolutionCallback(self) -> None: + def test_solve_with_solution_callback(self) -> None: model = cp_model.CpModel() x = model.new_int_var(0, 5, "x") y = model.new_int_var(0, 5, "y") @@ -1515,7 +1515,7 @@ def testSolveWithSolutionCallback(self) -> None: self.assertEqual(cp_model.OPTIMAL, status) self.assertEqual(6, solution_sum.sum) - def testBestBoundCallback(self) -> None: + def test_best_bound_callback(self) -> None: model = cp_model.CpModel() x0 = model.new_bool_var("x0") x1 = model.new_bool_var("x1") @@ -1533,7 +1533,7 @@ def testBestBoundCallback(self) -> None: self.assertEqual(cp_model.OPTIMAL, status) self.assertEqual(2.6, best_bound_callback.best_bound) - def testValue(self) -> None: + def test_value(self) -> None: model = cp_model.CpModel() x = model.new_int_var(0, 10, "x") y = model.new_int_var(0, 10, "y") @@ -1545,7 +1545,7 @@ def testValue(self) -> None: self.assertEqual(solver.value(y), 10) self.assertEqual(solver.value(2), 2) - def testBooleanValue(self) -> None: + def test_boolean_value(self) -> None: model = cp_model.CpModel() x = model.new_bool_var("x") y = model.new_bool_var("y") @@ -1566,7 +1566,7 @@ def testBooleanValue(self) -> None: self.assertEqual(solver.boolean_value(2), True) self.assertEqual(solver.boolean_value(0), False) - def testUnsupportedOperators(self) -> None: + def test_unsupported_operators(self) -> None: model = cp_model.CpModel() x = model.new_int_var(0, 10, "x") y = model.new_int_var(0, 10, "y") @@ -1581,7 +1581,7 @@ def testUnsupportedOperators(self) -> None: if x == 2: print("passed2") - def testIsLiteralTrueFalse(self) -> None: + def test_is_literal_true_false(self) -> None: model = cp_model.CpModel() x = model.new_constant(0) self.assertFalse(cp_model.object_is_a_true_literal(x)) @@ -1593,7 +1593,7 @@ def testIsLiteralTrueFalse(self) -> None: self.assertFalse(cp_model.object_is_a_true_literal(False)) self.assertFalse(cp_model.object_is_a_false_literal(True)) - def testSolveMinimizeWithSolutionCallback(self) -> None: + def test_solve_minimize_with_solution_callback(self) -> None: model = cp_model.CpModel() x = model.new_int_var(0, 5, "x") y = model.new_int_var(0, 5, "y") @@ -1606,7 +1606,7 @@ def testSolveMinimizeWithSolutionCallback(self) -> None: self.assertEqual(cp_model.OPTIMAL, status) self.assertEqual(11, solution_obj.obj) - def testSolutionValue(self) -> None: + def test_solution_value(self) -> None: model = cp_model.CpModel() x = model.new_int_var(0, 5, "x") b = model.new_bool_var("b") @@ -1625,7 +1625,7 @@ def testSolutionValue(self) -> None: self.assertEqual([3, 5, -4], solution_recorder.int_var_values) self.assertEqual([True, False, True], solution_recorder.bool_var_values) - def testSolutionHinting(self) -> None: + def test_solution_hinting(self) -> None: model = cp_model.CpModel() x = model.new_int_var(0, 5, "x") y = model.new_int_var(0, 5, "y") @@ -1639,7 +1639,7 @@ def testSolutionHinting(self) -> None: self.assertEqual(2, solver.value(x)) self.assertEqual(4, solver.value(y)) - def testSolutionHintingWithBooleans(self) -> None: + def test_solution_hinting_with_booleans(self) -> None: model = cp_model.CpModel() x = model.new_bool_var("x") y = model.new_bool_var("y") @@ -1653,7 +1653,7 @@ def testSolutionHintingWithBooleans(self) -> None: self.assertTrue(solver.boolean_value(x)) self.assertFalse(solver.boolean_value(y)) - def testStats(self) -> None: + def test_stats(self) -> None: model = cp_model.CpModel() x = model.new_int_var(0, 5, "x") y = model.new_int_var(0, 5, "y") @@ -1669,7 +1669,7 @@ def testStats(self) -> None: self.assertEqual(solver.num_branches, 0) self.assertGreater(solver.wall_time, 0.0) - def testSearchStrategy(self) -> None: + def test_search_strategy(self) -> None: model = cp_model.CpModel() x = model.new_int_var(0, 5, "x") y = model.new_int_var(0, 5, "y") @@ -1694,7 +1694,7 @@ def testSearchStrategy(self) -> None: ) self.assertEqual(cp_model.SELECT_MAX_VALUE, strategy.domain_reduction_strategy) - def testModelAndResponseStats(self) -> None: + def test_model_and_response_stats(self) -> None: model = cp_model.CpModel() x = model.new_int_var(0, 5, "x") y = model.new_int_var(0, 5, "y") @@ -1706,7 +1706,7 @@ def testModelAndResponseStats(self) -> None: solver.solve(model) self.assertTrue(solver.response_stats()) - def testValidateModel(self) -> None: + def test_validate_model(self) -> None: model = cp_model.CpModel() x = model.new_int_var(0, 5, "x") y = model.new_int_var(0, 5, "y") @@ -1714,7 +1714,7 @@ def testValidateModel(self) -> None: model.maximize(x + 2 * y) self.assertFalse(model.validate()) - def testValidateModelWithOverflow(self) -> None: + def test_validate_model_with_overflow(self) -> None: model = cp_model.CpModel() x = model.new_int_var(0, cp_model.INT_MAX, "x") y = model.new_int_var(0, 10, "y") @@ -1722,7 +1722,7 @@ def testValidateModelWithOverflow(self) -> None: model.maximize(x + 2 * y) self.assertTrue(model.validate()) - def testCopyModel(self) -> None: + def test_copy_model(self) -> None: model = cp_model.CpModel() b = model.new_bool_var("b") x = model.new_int_var(0, 4, "x") @@ -1781,7 +1781,7 @@ def testCopyModel(self) -> None: interval_ct = new_model.proto.constraints[copy_i.index].interval self.assertEqual(12, interval_ct.size.offset) - def testCustomLog(self) -> None: + def test_custom_log(self) -> None: model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") y = model.new_int_var(-10, 10, "y") @@ -1799,14 +1799,14 @@ def testCustomLog(self) -> None: self.assertRegex(log_callback.log, ".*log_to_stdout.*") - def testIssue2762(self) -> None: + def test_issue2762(self) -> None: model = cp_model.CpModel() x = [model.new_bool_var("a"), model.new_bool_var("b")] with self.assertRaises(NotImplementedError): model.add((x[0] != 0) or (x[1] != 0)) - def testModelError(self) -> None: + def test_model_error(self) -> None: model = cp_model.CpModel() x = [model.new_int_var(0, -2, f"x{i}") for i in range(100)] model.add(sum(x) <= 1) @@ -1815,7 +1815,7 @@ def testModelError(self) -> None: self.assertEqual(cp_model.MODEL_INVALID, solver.solve(model)) self.assertEqual(solver.solution_info(), 'var #0 has no domain(): name: "x0"') - def testIntVarSeries(self) -> None: + def test_int_var_series(self) -> None: df = pd.DataFrame([1, -1, 1], columns=["coeffs"]) model = cp_model.CpModel() x = model.new_int_var_series( @@ -1836,7 +1836,7 @@ def testIntVarSeries(self) -> None: ) _ = z.apply(lambda x: ~x) - def testBoolVarSeries(self) -> None: + def test_bool_var_series(self) -> None: df = pd.DataFrame([1, -1, 1], columns=["coeffs"]) model = cp_model.CpModel() x = model.new_bool_var_series(name="x", index=df.index) @@ -1851,7 +1851,7 @@ def testBoolVarSeries(self) -> None: solution = solver.boolean_values(x) self.assertTrue((solution.values == [False, True, False]).all()) - def testFixedSizeIntervalVarSeries(self) -> None: + def test_fixed_size_interval_var_series(self) -> None: df = pd.DataFrame([2, 4, 6], columns=["size"]) model = cp_model.CpModel() starts = model.new_int_var_series( @@ -1876,7 +1876,7 @@ def testFixedSizeIntervalVarSeries(self) -> None: ) self.assertLen(model.proto.constraints, 7) - def testIntervalVarSeries(self) -> None: + def test_interval_var_series(self) -> None: df = pd.DataFrame([2, 4, 6], columns=["size"]) model = cp_model.CpModel() starts = model.new_int_var_series( @@ -1925,7 +1925,7 @@ def testIntervalVarSeries(self) -> None: ) self.assertLen(model.proto.constraints, 13) - def testCompareWithNone(self) -> None: + def test_compare_with_none(self) -> None: model = cp_model.CpModel() x = model.new_int_var(0, 10, "x") self.assertRaises(TypeError, x.__eq__, None) @@ -1935,7 +1935,23 @@ def testCompareWithNone(self) -> None: self.assertRaises(TypeError, x.__gt__, None) self.assertRaises(TypeError, x.__ge__, None) - def testIssue4376SatModel(self) -> None: + def test_small_series(self) -> None: + # OR-Tools issue #4525. + model = cp_model.CpModel() + x = model.new_bool_var("foo") + y = model.new_bool_var("bar") + + s1 = pd.Series([x, y], index=[1, 2]) + self.assertEqual(str(s1.sum()), "(foo + bar)") + s2 = pd.Series([1, -1], index=[1, 2]) + self.assertEqual(str(s1.dot(s2)), "(foo + (-bar))") + + s3 = pd.Series([x], index=[1]) + self.assertIs(s3.sum(), x) + s4 = pd.Series([1], index=[1]) + self.assertIs(s3.dot(s4), x) + + def test_issue4376_sat_model(self) -> None: letters: str = "BCFLMRT" def symbols_from_string(text: str) -> list[int]: @@ -2045,7 +2061,7 @@ def rotate_symbols(symbols: list[int], turns: int) -> list[int]: if status == cp_model.OPTIMAL: self.assertLess(time.time(), solution_callback.last_time + 5.0) - def testIssue4376MinimizeModel(self) -> None: + def test_issue4376_minimize_model(self) -> None: model = cp_model.CpModel() jobs = [ @@ -2146,7 +2162,7 @@ def testIssue4376MinimizeModel(self) -> None: max(best_bound_callback.last_time, solution_callback.last_time) + 9.0, ) - def testIssue4434(self) -> None: + def test_issue4434(self) -> None: model = cp_model.CpModel() i = model.NewIntVar(0, 10, "i") j = model.NewIntVar(0, 10, "j") @@ -2163,7 +2179,7 @@ def testIssue4434(self) -> None: self.assertIsNotNone(expr_ne) self.assertIsNotNone(expr_ge) - def testRaisePythonExceptionInCallback(self) -> None: + def test_raise_python_exception_in_callback(self) -> None: model = cp_model.CpModel() jobs = [ @@ -2258,7 +2274,7 @@ def testRaisePythonExceptionInCallback(self) -> None: with self.assertRaisesRegex(ValueError, msg): solver.solve(model, callback) - def testInPlaceSumModifications(self) -> None: + def test_in_place_sum_modifications(self) -> None: model = cp_model.CpModel() x = [model.new_int_var(0, 10, f"x{i}") for i in range(5)] y = [model.new_int_var(0, 10, f"y{i}") for i in range(5)] @@ -2287,12 +2303,12 @@ def testInPlaceSumModifications(self) -> None: self.assertEqual(e3.double_offset, 1.5) self.assertEqual(e3.num_exprs, 5) - def testLargeSum(self) -> None: + def test_large_sum(self) -> None: model = cp_model.CpModel() x = [model.new_int_var(0, 10, f"x{i}") for i in range(100000)] model.add(sum(x) == 10) - def testSimplification1(self): + def test_simplification1(self): model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") prod = (x * 2) * 2 @@ -2301,7 +2317,7 @@ def testSimplification1(self): self.assertEqual(4, prod.coefficient) self.assertEqual(0, prod.offset) - def testSimplification2(self): + def test_simplification2(self): model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") prod = 2 * (x * 2) @@ -2310,7 +2326,7 @@ def testSimplification2(self): self.assertEqual(4, prod.coefficient) self.assertEqual(0, prod.offset) - def testSimplification3(self): + def test_simplification3(self): model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") prod = (2 * x) * 2 @@ -2319,7 +2335,7 @@ def testSimplification3(self): self.assertEqual(4, prod.coefficient) self.assertEqual(0, prod.offset) - def testSimplification4(self): + def test_simplification4(self): model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") prod = 2 * (2 * x) @@ -2328,7 +2344,7 @@ def testSimplification4(self): self.assertEqual(4, prod.coefficient) self.assertEqual(0, prod.offset) - def testSimplification5(self): + def test_simplification5(self): model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") prod = 2 * (x + 1) @@ -2337,7 +2353,7 @@ def testSimplification5(self): self.assertEqual(2, prod.coefficient) self.assertEqual(2, prod.offset) - def testSimplification6(self): + def test_simplification6(self): model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") prod = (x + 1) * 2 @@ -2346,7 +2362,7 @@ def testSimplification6(self): self.assertEqual(2, prod.coefficient) self.assertEqual(2, prod.offset) - def testSimplification7(self): + def test_simplification7(self): model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") prod = 2 * (x - 1) @@ -2355,7 +2371,7 @@ def testSimplification7(self): self.assertEqual(2, prod.coefficient) self.assertEqual(-2, prod.offset) - def testSimplification8(self): + def test_simplification8(self): model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") prod = (x - 1) * 2 @@ -2364,7 +2380,7 @@ def testSimplification8(self): self.assertEqual(2, prod.coefficient) self.assertEqual(-2, prod.offset) - def testSimplification9(self): + def test_simplification9(self): model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") prod = 2 * (1 - x) @@ -2373,7 +2389,7 @@ def testSimplification9(self): self.assertEqual(-2, prod.coefficient) self.assertEqual(2, prod.offset) - def testSimplification10(self): + def test_simplification10(self): model = cp_model.CpModel() x = model.new_int_var(-10, 10, "x") prod = (1 - x) * 2 diff --git a/ortools/sat/sat_parameters.proto b/ortools/sat/sat_parameters.proto index 5fedf35f83..303b961aac 100644 --- a/ortools/sat/sat_parameters.proto +++ b/ortools/sat/sat_parameters.proto @@ -23,7 +23,7 @@ option java_multiple_files = true; // Contains the definitions for all the sat algorithm parameters and their // default values. // -// NEXT TAG: 311 +// NEXT TAG: 312 message SatParameters { // In some context, like in a portfolio of search, it makes sense to name a // given parameters set for logging purpose. @@ -907,6 +907,10 @@ message SatParameters { // as it modifies the reduced_cost, lb_tree_search, and probing workers. optional bool use_dual_scheduling_heuristics = 214 [default = true]; + // Turn on extra propagation for the circuit constraint. + // This can be quite slow. + optional bool use_all_different_for_circuit = 311 [default = false]; + // The search branching will be used to decide how to branch on unfixed nodes. enum SearchBranching { // Try to fix all literals using the underlying SAT solver's heuristics,