#pragma once #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "drake/common/autodiff.h" #include "drake/common/drake_assert.h" #include "drake/common/drake_copyable.h" #include "drake/common/eigen_types.h" #include "drake/common/fmt.h" #include "drake/common/polynomial.h" #include "drake/common/symbolic/expression.h" #include "drake/common/symbolic/monomial_util.h" #include "drake/common/symbolic/polynomial.h" #include "drake/solvers/binding.h" #include "drake/solvers/constraint.h" #include "drake/solvers/cost.h" #include "drake/solvers/create_constraint.h" #include "drake/solvers/create_cost.h" #include "drake/solvers/decision_variable.h" #include "drake/solvers/function.h" #include "drake/solvers/indeterminate.h" #include "drake/solvers/program_attribute.h" #include "drake/solvers/solver_options.h" namespace drake { namespace solvers { template struct NewVariableNames {}; /** * The type of the names for the newly added variables. * @tparam Size If Size is a fixed non-negative integer, then the type of the * name is std::array. Otherwise the type is * std::vector. */ template struct NewVariableNames { typedef std::array type; }; template <> struct NewVariableNames { typedef std::vector type; }; template struct NewVariableNames : public NewVariableNames< Eigen::Matrix::SizeAtCompileTime> {}; template struct NewSymmetricVariableNames : public NewVariableNames {}; namespace internal { /** * Return un-initialized new variable names. */ template typename std::enable_if_t= 0, typename NewVariableNames::type> CreateNewVariableNames(int) { typename NewVariableNames::type names; return names; } /** * Return un-initialized new variable names. */ template typename std::enable_if_t::type> CreateNewVariableNames(int size) { typename NewVariableNames::type names(size); return names; } /** * Set the names of the newly added variables. * @param name The common name of all new variables. * @param rows The number of rows in the new variables. * @param cols The number of columns in the new variables. * @pre The size of @p names is @p rows * @p cols. */ template void SetVariableNames(const std::string& name, int rows, int cols, Derived* names) { DRAKE_DEMAND(static_cast(names->size()) == rows * cols); if (cols == 1) { for (int i = 0; i < rows; ++i) { (*names)[i] = name + "(" + std::to_string(i) + ")"; } } else { for (int j = 0; j < cols; ++j) { for (int i = 0; i < rows; ++i) { (*names)[j * rows + i] = name + "(" + std::to_string(i) + "," + std::to_string(j) + ")"; } } } } /** * Template condition to only catch when Constraints are inadvertently passed * as an argument. If the class is binding-compatible with a Constraint, then * this will provide a static assertion to enable easier debugging of which * type failed. * @tparam F The type to be tested. * @see http://stackoverflow.com/a/13366183/7829525 */ template struct assert_if_is_constraint { static constexpr bool value = is_binding_compatible::value; // Use deferred evaluation static_assert( !value, "You cannot pass a Constraint to create a Cost object from a function. " "Please ensure you are passing a Cost."); }; } // namespace internal /** * MathematicalProgram stores the decision variables, the constraints and costs * of an optimization problem. The user can solve the problem by calling * solvers::Solve() function, and obtain the results of the optimization. * * @ingroup solvers */ class MathematicalProgram { public: /** @name Does not allow copy, move, or assignment. */ /** @{ */ #ifdef DRAKE_DOXYGEN_CXX // Copy constructor is private for use in implementing Clone(). MathematicalProgram(const MathematicalProgram&) = delete; #endif MathematicalProgram& operator=(const MathematicalProgram&) = delete; MathematicalProgram(MathematicalProgram&&) = delete; MathematicalProgram& operator=(MathematicalProgram&&) = delete; /** @} */ using VarType = symbolic::Variable::Type; /// The optimal cost is +∞ when the problem is globally infeasible. static constexpr double kGlobalInfeasibleCost = std::numeric_limits::infinity(); /// The optimal cost is -∞ when the problem is unbounded. static constexpr double kUnboundedCost = -std::numeric_limits::infinity(); MathematicalProgram(); virtual ~MathematicalProgram(); /** Clones an optimization program. * The clone will be functionally equivalent to the source program with the * same: * * - decision variables * - constraints * - costs * - solver settings * - initial guess * * Note that this is currently a *shallow* clone. The costs and constraints * are not themselves cloned. * * @retval new_prog. The newly constructed mathematical program. */ [[nodiscard]] std::unique_ptr Clone() const; /** * Returns string representation of this program, listing the decision * variables, costs, and constraints. * * Note that by default, we do not require variables to have unique names. * Providing useful variable names and calling Evaluator::set_description() to * describe the costs and constraints can dramatically improve the readability * of the output. See the tutorial `debug_mathematical_program.ipynb` * for more information. */ [[nodiscard]] std::string to_string() const; /** * Adds continuous variables, appending them to an internal vector of any * existing vars. * The initial guess values for the new variables are set to NaN, to * indicate that an initial guess has not been assigned. * Callers are expected to add costs * and/or constraints to have any effect during optimization. * Callers can also set the initial guess of the decision variables through * SetInitialGuess() or SetInitialGuessForAllVariables(). * @param rows The number of rows in the new variables. * @param name The name of the newly added variables * @return The VectorDecisionVariable of size rows x 1, containing the new * vars (not all the vars stored). * * Example: * @code{.cc} * MathematicalProgram prog; * auto x = prog.NewContinuousVariables(2, "x"); * @endcode * This adds a 2 x 1 vector containing decision variables into the program. * The names of the variables are "x(0)" and "x(1)". * * The name of the variable is only used for the user in order to ease * readability. */ VectorXDecisionVariable NewContinuousVariables( int rows, const std::string& name = "x") { return NewContinuousVariables(rows, 1, name); } /** * Adds continuous variables, appending them to an internal vector of any * existing vars. * The initial guess values for the new variables are set to NaN, to * indicate that an initial guess has not been assigned. * Callers are expected to add costs * and/or constraints to have any effect during optimization. * Callers can also set the initial guess of the decision variables through * SetInitialGuess() or SetInitialGuessForAllVariables(). * @tparam Rows The number of rows of the new variables, in the compile time. * @tparam Cols The number of columns of the new variables, in the compile * time. * @param rows The number of rows in the new variables. When Rows is not * Eigen::Dynamic, rows is ignored. * @param cols The number of columns in the new variables. When Cols is not * Eigen::Dynamic, cols is ignored. * @param name All variables will share the same name, but different index. * @return The MatrixDecisionVariable of size Rows x Cols, containing the new * vars (not all the vars stored). * * Example: * @code{.cc} * MathematicalProgram prog; * auto x = prog.NewContinuousVariables(2, 3, "X"); * auto y = prog.NewContinuousVariables<2, 3>(2, 3, "X"); * @endcode * This adds a 2 x 3 matrix decision variables into the program. * * The name of the variable is only used for the user in order to ease * readability. */ template MatrixDecisionVariable NewContinuousVariables( int rows, int cols, const std::string& name = "X") { rows = Rows == Eigen::Dynamic ? rows : Rows; cols = Cols == Eigen::Dynamic ? cols : Cols; auto names = internal::CreateNewVariableNames< Eigen::Matrix::SizeAtCompileTime>(rows * cols); internal::SetVariableNames(name, rows, cols, &names); return NewVariables(VarType::CONTINUOUS, names, rows, cols); } /** * Adds continuous variables, appending them to an internal vector of any * existing vars. * The initial guess values for the new variables are set to NaN, to * indicate that an initial guess has not been assigned. * Callers are expected to add costs * and/or constraints to have any effect during optimization. * Callers can also set the initial guess of the decision variables through * SetInitialGuess() or SetInitialGuessForAllVariables(). * @tparam Rows The number of rows in the new variables. * @tparam Cols The number of columns in the new variables. The default is 1. * @param name All variables will share the same name, but different index. * @return The MatrixDecisionVariable of size rows x cols, containing the new * vars (not all the vars stored). * * Example: * @code{.cc} * MathematicalProgram prog; * auto x = prog.NewContinuousVariables<2, 3>("X"); * @endcode * This adds a 2 x 3 matrix decision variables into the program. * * The name of the variable is only used for the user in order to ease * readability. */ template MatrixDecisionVariable NewContinuousVariables( const std::string& name = "X") { return NewContinuousVariables(Rows, Cols, name); } /** * Adds binary variables, appending them to an internal vector of any * existing vars. * The initial guess values for the new variables are set to NaN, to * indicate that an initial guess has not been assigned. * Callers are expected to add costs * and/or constraints to have any effect during optimization. * Callers can also set the initial guess of the decision variables through * SetInitialGuess() or SetInitialGuessForAllVariables(). * @tparam Rows The number of rows in the new variables. * @tparam Cols The number of columns in the new variables. * @param rows The number of rows in the new variables. * @param cols The number of columns in the new variables. * @param name The commonly shared name of the new variables. * @return The MatrixDecisionVariable of size rows x cols, containing the new * vars (not all the vars stored). * * Example: * @code{.cc} * MathematicalProgram prog; * auto b = prog.NewBinaryVariables(2, 3, "b"); * @endcode * This adds a 2 x 3 matrix decision variables into the program. * * The name of the variable is only used for the user in order to ease * readability. */ template MatrixDecisionVariable NewBinaryVariables( int rows, int cols, const std::string& name) { rows = Rows == Eigen::Dynamic ? rows : Rows; cols = Cols == Eigen::Dynamic ? cols : Cols; auto names = internal::CreateNewVariableNames< Eigen::Matrix::SizeAtCompileTime>(rows * cols); internal::SetVariableNames(name, rows, cols, &names); return NewVariables(VarType::BINARY, names, rows, cols); } /** * Adds a matrix of binary variables into the optimization program. * @tparam Rows The number of rows in the newly added binary variables. * @tparam Cols The number of columns in the new variables. The default is 1. * @param name Each newly added binary variable will share the same name. The * default name is "b". * @return A matrix containing the newly added variables. */ template MatrixDecisionVariable NewBinaryVariables( const std::string& name = "b") { return NewBinaryVariables(Rows, Cols, name); } /** * Adds binary variables to this MathematicalProgram. The new variables are * viewed as a column vector, with size @p rows x 1. * @see NewBinaryVariables(int rows, int cols, const * std::vector& names); */ VectorXDecisionVariable NewBinaryVariables(int rows, const std::string& name = "b") { return NewBinaryVariables(rows, 1, name); } /** * Adds a runtime sized symmetric matrix as decision variables to * this MathematicalProgram. * The optimization will only use the stacked columns of the * lower triangular part of the symmetric matrix as decision variables. * @param rows The number of rows in the symmetric matrix. * @param name The name of the matrix. It is only used the for user to * understand the optimization program. The default name is "Symmetric", and * each variable will be named as *
   * Symmetric(0, 0)     Symmetric(1, 0)     ... Symmetric(rows-1, 0)
   * Symmetric(1, 0)     Symmetric(1, 1)     ... Symmetric(rows-1, 1)
   *            ...
   * Symmetric(rows-1,0) Symmetric(rows-1,1) ... Symmetric(rows-1, rows-1)
   * 
* Notice that the (i,j)'th entry and (j,i)'th entry has the same name. * @return The newly added decision variables. */ MatrixXDecisionVariable NewSymmetricContinuousVariables( int rows, const std::string& name = "Symmetric"); /** * Adds a static sized symmetric matrix as decision variables to * this MathematicalProgram. * The optimization will only use the stacked columns of the * lower triangular part of the symmetric matrix as decision variables. * @tparam rows The number of rows in the symmetric matrix. * @param name The name of the matrix. It is only used the for user to * understand the optimization program. The default name is "Symmetric", and * each variable will be named as *
   * Symmetric(0, 0)     Symmetric(1, 0)     ... Symmetric(rows-1, 0)
   * Symmetric(1, 0)     Symmetric(1, 1)     ... Symmetric(rows-1, 1)
   *            ...
   * Symmetric(rows-1,0) Symmetric(rows-1,1) ... Symmetric(rows-1, rows-1)
   * 
* Notice that the (i,j)'th entry and (j,i)'th entry has the same name. * @return The newly added decision variables. */ template MatrixDecisionVariable NewSymmetricContinuousVariables( const std::string& name = "Symmetric") { typename NewSymmetricVariableNames::type names; int var_count = 0; for (int j = 0; j < static_cast(rows); ++j) { for (int i = j; i < static_cast(rows); ++i) { names[var_count] = name + "(" + std::to_string(i) + "," + std::to_string(j) + ")"; ++var_count; } } return NewSymmetricVariables(VarType::CONTINUOUS, names); } /** Appends new variables to the end of the existing variables. * @param decision_variables The newly added decision_variables. * @pre `decision_variables` should not intersect with the existing * indeterminates in the optimization program. * @pre Each entry in `decision_variables` should not be a dummy variable. * @throws std::exception if the preconditions are not satisfied. */ void AddDecisionVariables( const Eigen::Ref& decision_variables); /** * Returns a free polynomial in a monomial basis over @p indeterminates of a * given @p degree. It uses @p coeff_name to make new decision variables and * use them as coefficients. For example, `NewFreePolynomial({x₀, x₁}, 2)` * returns a₀x₁² + a₁x₀x₁ + a₂x₀² + a₃x₁ + a₄x₀ + a₅. */ symbolic::Polynomial NewFreePolynomial( const symbolic::Variables& indeterminates, int degree, const std::string& coeff_name = "a"); /** * Returns a free polynomial that only contains even degree monomials. A * monomial is even degree if its total degree (sum of all variables' degree) * is even. For example, xy is an even degree monomial (degree 2) while x²y is * not (degree 3). * @param indeterminates The monomial basis is over these indeterminates. * @param degree The highest degree of the polynomial. * @param coeff_name The coefficients of the polynomial are decision variables * with this name as a base. The variable name would be "a1", "a2", etc. */ symbolic::Polynomial NewEvenDegreeFreePolynomial( const symbolic::Variables& indeterminates, int degree, const std::string& coeff_name = "a"); /** * Returns a free polynomial that only contains odd degree monomials. A * monomial is odd degree if its total degree (sum of all variables' degree) * is even. For example, xy is not an odd degree monomial (degree 2) while x²y * is (degree 3). * @param indeterminates The monomial basis is over these indeterminates. * @param degree The highest degree of the polynomial. * @param coeff_name The coefficients of the polynomial are decision variables * with this name as a base. The variable name would be "a1", "a2", etc. */ symbolic::Polynomial NewOddDegreeFreePolynomial( const symbolic::Variables& indeterminates, int degree, const std::string& coeff_name = "a"); /** * Types of non-negative polynomial that can be found through conic * optimization. We currently support SOS, SDSOS and DSOS. For more * information about these polynomial types, please refer to * "DSOS and SDSOS Optimization: More Tractable * Alternatives to Sum of Squares and Semidefinite Optimization" by Amir Ali * Ahmadi and Anirudha Majumdar, with arXiv link * https://arxiv.org/abs/1706.02586 */ enum class NonnegativePolynomial { // We reserve the 0 value as a tactic for identifying uninitialized enums. kSos = 1, ///< A sum-of-squares polynomial. kSdsos, ///< A scaled-diagonally dominant sum-of-squares polynomial. kDsos, ///< A diagonally dominant sum-of-squares polynomial. }; /** Returns a pair of a SOS polynomial p = mᵀQm and the Gramian matrix Q, * where m is the @p monomial basis. * For example, `NewSosPolynomial(Vector2{x,y})` returns a * polynomial * p = Q₍₀,₀₎x² + 2Q₍₁,₀₎xy + Q₍₁,₁₎y² * and Q. * Depending on the type of the polynomial, we will impose different * constraint on the polynomial. * - if type = kSos, we impose the polynomial being SOS. * - if type = kSdsos, we impose the polynomial being SDSOS. * - if type = kDsos, we impose the polynomial being DSOS. * @param gram_name The name of the gram matrix for print out. * @note Q is a symmetric monomial_basis.rows() x monomial_basis.rows() * matrix. */ std::pair NewSosPolynomial( const Eigen::Ref>& monomial_basis, NonnegativePolynomial type = NonnegativePolynomial::kSos, const std::string& gram_name = "S"); /** * Overloads NewSosPolynomial, except the Gramian matrix Q is an * input instead of an output. */ symbolic::Polynomial NewSosPolynomial( const Eigen::Ref>& gramian, const Eigen::Ref>& monomial_basis, NonnegativePolynomial type = NonnegativePolynomial::kSos); /** * Overloads NewSosPolynomial. * Returns a pair of a SOS polynomial p = m(x)ᵀQm(x) of degree @p degree * and the Gramian matrix Q that should be PSD, where m(x) is the * result of calling `MonomialBasis(indeterminates, degree/2)`. For example, * `NewSosPolynomial({x}, 4)` returns a pair of a polynomial * p = Q₍₀,₀₎x⁴ + 2Q₍₁,₀₎ x³ + (2Q₍₂,₀₎ + Q₍₁,₁₎)x² + 2Q₍₂,₁₎x + Q₍₂,₂₎ * and Q. * @param type Depending on the type of the polynomial, we will impose * different constraint on the polynomial. * - if type = kSos, we impose the polynomial being SOS. * - if type = kSdsos, we impose the polynomial being SDSOS. * - if type = kDsos, we impose the polynomial being DSOS. * @param gram_name The name of the gram matrix for print out. * * @throws std::exception if @p degree is not a positive even integer. * @see MonomialBasis. */ std::pair NewSosPolynomial( const symbolic::Variables& indeterminates, int degree, NonnegativePolynomial type = NonnegativePolynomial::kSos, const std::string& gram_name = "S"); /** * @anchor even_degree_nonnegative_polynomial * @name Creating even-degree nonnegative polynomials * Creates a nonnegative polynomial p = m(x)ᵀQm(x) of a given degree, and p * only contains even-degree monomials. If we partition the monomials m(x) to * odd degree monomials m_o(x) and even degree monomials m_e(x), then we * can write p(x) as *
   * ⌈m_o(x)⌉ᵀ * ⌈Q_oo Q_oeᵀ⌉ * ⌈m_o(x)⌉
   * ⌊m_e(x)⌋    ⌊Q_oe Q_ee ⌋   ⌊m_e(x)⌋
   * 
* Since p(x) doesn't contain any odd degree monomials, and p(x) contains * terms m_e(x)ᵀ*Q_oe * m_o(x) which has odd degree, we know that the * off-diagonal block Q_oe has to be zero. So the constraint that Q is psd * can be simplified as Q_oo and Q_ee has to be psd. Since both Q_oo and * Q_ee have smaller size than Q, these PSD constraints are easier to solve * than requiring Q to be PSD. * One use case for even-degree polynomial, is for polynomials that are even * functions, namely p(x) = p(-x). * @param indeterminates The set of indeterminates x * @param degree The total degree of the polynomial p. @pre This must be an * even number. * @return (p(x), Q_oo, Q_ee). p(x) is the newly added non-negative * polynomial. p(x) = m_o(x)ᵀ*Q_oo*m_o(x) + m_e(x)ᵀ*Q_ee*m_e(x) where m_o(x) * and m_e(x) contain all the even/odd monomials of x respectively. * The returned non-negative polynomial can be of different types, including * Sum-of-squares (SOS), diagonally-dominant-sum-of-squares (dsos), and * scaled-diagonally-dominant-sum-of-squares (sdsos). */ //@{ /** * See @ref even_degree_nonnegative_polynomial for more details. * Variant that produces different non-negative polynomials depending on @p * type. * @param type The returned polynomial p(x) can be either SOS, SDSOS or DSOS, * depending on @p type. */ std::tuple NewEvenDegreeNonnegativePolynomial(const symbolic::Variables& indeterminates, int degree, NonnegativePolynomial type); /** * See @ref even_degree_nonnegative_polynomial for more details. * Variant that produces a SOS polynomial. */ std::tuple NewEvenDegreeSosPolynomial(const symbolic::Variables& indeterminates, int degree); /** * see @ref even_degree_nonnegative_polynomial for details. * Variant that produces an SDSOS polynomial. */ std::tuple NewEvenDegreeSdsosPolynomial(const symbolic::Variables& indeterminates, int degree); /** * see @ref even_degree_nonnegative_polynomial for details. * Variant that produces a DSOS polynomial. * Same as NewEvenDegreeSosPolynomial, except the returned polynomial is * diagonally dominant sum of squares (dsos). */ std::tuple NewEvenDegreeDsosPolynomial(const symbolic::Variables& indeterminates, int degree); //@} /** * Creates a symbolic polynomial from the given expression `e`. It uses this * MathematicalProgram's `indeterminates()` in constructing the polynomial. * * This method helps a user create a polynomial with the right set of * indeterminates which are declared in this MathematicalProgram. We recommend * users to use this method over an explicit call to Polynomial constructors * to avoid a possible mismatch between this MathematicalProgram's * indeterminates and the user-specified indeterminates (or unspecified, which * then includes all symbolic variables in the expression `e`). Consider the * following example. * * e = ax + bx + c * * MP.indeterminates() = {x} * MP.decision_variables() = {a, b} * * - `MP.MakePolynomial(e)` create a polynomial, `(a + b)x + c`. Here only * `x` is an indeterminate of this polynomial. * * - In contrast, `symbolic::Polynomial(e)` returns `ax + bx + c` where all * variables `{a, b, x}` are indeterminates. Note that this is problematic * as its indeterminates, `{a, b, x}` and the MathematicalProgram's decision * variables, `{a, b}` overlap. * * @note This function does not require that the decision variables in `e` is * a subset of the decision variables in MathematicalProgram. */ [[nodiscard]] symbolic::Polynomial MakePolynomial( const symbolic::Expression& e) const; /** * Reparses the polynomial `p` using this MathematicalProgram's * indeterminates. */ void Reparse(symbolic::Polynomial* p) const; /** * Adds indeterminates, appending them to an internal vector of any * existing indeterminates. * @tparam rows The number of rows in the new indeterminates. * @tparam cols The number of columns in the new indeterminates. * @param names A vector of strings containing the name for each variable. * @return The MatrixIndeterminate of size rows x cols, containing the * new vars (not all the vars stored). * * Example: * @code{.cc} * MathematicalProgram prog; * std::array names = {"x1", "x2", "x3", "x4", "x5", "x6"}; * auto x = prog.NewIndeterminates<2, 3>(names); * @endcode * This adds a 2 x 3 matrix indeterminates into the program. * * The name of the indeterminates is only used for the user in order to ease * readability. * * @exclude_from_pydrake_mkdoc{Overloads that require explicit template * arguments (rows, cols) are not bound in pydrake.} */ template MatrixIndeterminate NewIndeterminates( const std::array& names) { MatrixIndeterminate indeterminates_matrix; NewIndeterminates_impl(names, indeterminates_matrix); return indeterminates_matrix; } /** * Adds indeterminates, appending them to an internal vector of any * existing indeterminates. * @tparam rows The number of rows in the new indeterminates. * @tparam cols The number of columns in the new indeterminates. * @param names A vector of strings containing the name for each variable. * @return The MatrixIndeterminate of size rows x cols, containing the * new vars (not all the vars stored). * * Example: * @code{.cc} * MathematicalProgram prog; * std::array names = {"x1", "x2"}; * auto x = prog.NewIndeterminates<2>(names); * @endcode * This adds a 2 vector indeterminates into the program. * * The name of the indeterminates is only used for the user in order to ease * readability. * * @exclude_from_pydrake_mkdoc{Overloads that require explicit template * arguments (rows) are not bound in pydrake.} */ template VectorIndeterminate NewIndeterminates( const std::array& names) { return NewIndeterminates(names); } /** * Adds indeterminates, appending them to an internal vector of any * existing indeterminates. * @tparam rows The number of rows in the new indeterminates. * @tparam cols The number of columns in the new indeterminates. * @param names A vector of strings containing the name for each variable. * @return The MatrixIndeterminate of size rows x cols, containing the * new vars (not all the vars stored). * * Example: * @code{.cc} * MathematicalProgram prog; * auto x = prog.NewIndeterminates<2, 3>("X"); * @endcode * This adds a 2 x 3 matrix indeterminates into the program. * * The name of the indeterminates is only used for the user in order to ease * readability. * * @exclude_from_pydrake_mkdoc{Overloads that require explicit template * arguments (rows, cols) are not bound in pydrake.} */ template MatrixIndeterminate NewIndeterminates( const std::string& name = "X") { std::array names; for (int j = 0; j < cols; ++j) { for (int i = 0; i < rows; ++i) { names[j * rows + i] = name + "(" + std::to_string(i) + "," + std::to_string(j) + ")"; } } return NewIndeterminates(names); } /** * Adds indeterminates to the program. * The name for all newly added indeterminates are set to @p name. The default * name is "x" * @see NewIndeterminates(const std::array& names) * * @exclude_from_pydrake_mkdoc{Overloads that require explicit template * arguments (rows) are not bound in pydrake.} */ template VectorIndeterminate NewIndeterminates(const std::string& name = "x") { std::array names; int offset = (name.compare("x") == 0) ? num_vars() : 0; for (int i = 0; i < rows; ++i) { names[i] = name + "(" + std::to_string(offset + i) + ")"; } return NewIndeterminates(names); } /** * Adds indeterminates to this MathematicalProgram. * @see NewIndeterminates(int rows, int cols, const * std::vector& names); * * @exclude_from_pydrake_mkdoc{Not bound in pydrake.} */ VectorXIndeterminate NewIndeterminates(int rows, const std::vector& names); /** * Adds indeterminates to this MathematicalProgram, with default name * "x". * @see NewIndeterminates(int rows, int cols, const * std::vector& names); */ VectorXIndeterminate NewIndeterminates(int rows, const std::string& name = "x"); /** * Adds indeterminates, appending them to an internal vector of any * existing vars. * @param rows The number of rows in the new indeterminates. * @param cols The number of columns in the new indeterminates. * @param names A vector of strings containing the name for each variable. * @return The MatrixIndeterminate of size rows x cols, containing the * new vars (not all the vars stored). * * Example: * @code{.cc} * MathematicalProgram prog; * auto x = prog.NewIndeterminates(2, 3, {"x1", "x2", "x3", "x4", * "x5", "x6"}); * @endcode * This adds a 2 x 3 matrix indeterminates into the program. * * The name of the variable is only used for the user in order to ease * readability. * * @exclude_from_pydrake_mkdoc{Not bound in pydrake.} */ MatrixXIndeterminate NewIndeterminates(int rows, int cols, const std::vector& names); /** * Adds indeterminates to this MathematicalProgram, with default name * "X". The new variables are returned and viewed as a matrix, with size * @p rows x @p cols. * @see NewIndeterminates(int rows, int cols, const * std::vector& names); */ MatrixXIndeterminate NewIndeterminates(int rows, int cols, const std::string& name = "X"); /** Adds indeterminate. * This method appends an indeterminate to the end of the program's old * indeterminates, if `new_indeterminate` is not already in the program's old * indeterminates. * @param new_indeterminate The indeterminate to be appended to the * program's old indeterminates. * @return indeterminate_index The index of the added indeterminate in the * program's indeterminates. i.e. prog.indeterminates()(indeterminate_index) = * new_indeterminate. * @pre `new_indeterminate` should not intersect with the program's * decision variables. * @pre new_indeterminate should not be dummy. * @pre new_indeterminate should be of CONTINUOUS type. */ int AddIndeterminate(const symbolic::Variable& new_indeterminate); /** Adds indeterminates. * This method appends some indeterminates to the end of the program's old * indeterminates. * @param new_indeterminates The indeterminates to be appended to the * program's old indeterminates. * @pre `new_indeterminates` should not intersect with the program's old * decision variables. * @pre Each entry in new_indeterminates should not be dummy. * @pre Each entry in new_indeterminates should be of CONTINUOUS type. */ void AddIndeterminates( const Eigen::Ref& new_indeterminates); /** Adds indeterminates. * This method appends some indeterminates to the end of the program's old * indeterminates. * @param new_indeterminates The indeterminates to be appended to the * program's old indeterminates. * @pre `new_indeterminates` should not intersect with the program's old * decision variables. * @pre Each entry in new_indeterminates should not be dummy. * @pre Each entry in new_indeterminates should be of CONTINUOUS type. */ void AddIndeterminates( const symbolic::Variables& new_indeterminates); /** * Adds a callback method to visualize intermediate results of the * optimization. * * @note Just like other costs/constraints, not all solvers support callbacks. * Adding a callback here will force MathematicalProgram::Solve to select a * solver that support callbacks. For instance, adding a visualization * callback to a quadratic programming problem may result in using a nonlinear * programming solver as the default solver. * * @param callback a std::function that accepts an Eigen::Vector of doubles * representing the bound decision variables. * @param vars the decision variables that should be passed to the callback. */ Binding AddVisualizationCallback( const VisualizationCallback::CallbackFunction& callback, const Eigen::Ref& vars); /** * Adds a callback method to visualize intermediate results of the * optimization. * * @note Just like other costs/constraints, not all solvers support callbacks. * Adding a callback here will force MathematicalProgram::Solve to select a * solver that support callbacks. For instance, adding a visualization * callback to a quadratic programming problem may result in using a nonlinear * programming solver as the default solver. * * @param callback a std::function that accepts an Eigen::Vector of doubles * representing the for the bound decision variables. * @param vars the decision variables that should be passed to the callback. * * @exclude_from_pydrake_mkdoc{Not bound in pydrake.} */ Binding AddVisualizationCallback( const VisualizationCallback::CallbackFunction& callback, const VariableRefList& vars) { return AddVisualizationCallback(callback, ConcatenateVariableRefList((vars))); } /** * Adds a generic cost to the optimization program. * * @exclude_from_pydrake_mkdoc{Not bound in pydrake.} */ Binding AddCost(const Binding& binding); /** * Adds a cost type to the optimization program. * @param obj The added objective. * @param vars The decision variables on which the cost depend. * * @pydrake_mkdoc_identifier{2args_obj_vars} */ template auto AddCost(const std::shared_ptr& obj, const Eigen::Ref& vars) { // Redirect to the appropriate type // Use auto to enable the overloading method to upcast if needed return AddCost(internal::CreateBinding(obj, vars)); } /** * Adds a generic cost to the optimization program. * @param obj The added objective. * @param vars The decision variables on which the cost depend. * * @exclude_from_pydrake_mkdoc{Not bound in pydrake.} */ template auto AddCost(const std::shared_ptr& obj, const VariableRefList& vars) { return AddCost(obj, ConcatenateVariableRefList(vars)); } /** * Convert an input of type @p F to a FunctionCost object. * @tparam F This class should have functions numInputs(), numOutputs and * eval(x, y). */ template static std::shared_ptr MakeCost(F&& f) { return MakeFunctionCost(f); } /** * Adds a cost to the optimization program on a list of variables. * @tparam F it should define functions numInputs, numOutputs and eval. Check * * @exclude_from_pydrake_mkdoc{Not bound in pydrake.} */ template typename std::enable_if_t::value, Binding> AddCost(F&& f, const VariableRefList& vars) { return AddCost(f, ConcatenateVariableRefList(vars)); } /** * Adds a cost to the optimization program on an Eigen::Vector containing * decision variables. * @tparam F Type that defines functions numInputs, numOutputs and eval. * * @exclude_from_pydrake_mkdoc{Not bound in pydrake.} */ template typename std::enable_if_t::value, Binding> AddCost(F&& f, const Eigen::Ref& vars) { auto c = MakeFunctionCost(std::forward(f)); return AddCost(c, vars); } /** * Statically assert if a user inadvertently passes a * binding-compatible Constraint. * @tparam F The type to check. * * @exclude_from_pydrake_mkdoc{Not bound in pydrake.} */ template typename std::enable_if_t::value, Binding> AddCost(F&&, Vars&&) { throw std::runtime_error("This will assert at compile-time."); } /** * Adds a cost term of the form c'*x. * Applied to a subset of the variables and pushes onto * the linear cost data structure. * * @exclude_from_pydrake_mkdoc{Not bound in pydrake.} */ Binding AddCost(const Binding& binding); /** * Adds a linear cost term of the form a'*x + b. * @param e A linear symbolic expression. * @pre e is a linear expression a'*x + b, where each entry of x is a decision * variable in the mathematical program. * @return The newly added linear constraint, together with the bound * variables. */ Binding AddLinearCost(const symbolic::Expression& e); /** * Adds a linear cost term of the form a'*x + b. * Applied to a subset of the variables and pushes onto * the linear cost data structure. */ Binding AddLinearCost(const Eigen::Ref& a, double b, const VariableRefList& vars) { return AddLinearCost(a, b, ConcatenateVariableRefList((vars))); } /** * Adds a linear cost term of the form a'*x + b. * Applied to a subset of the variables and pushes onto * the linear cost data structure. */ Binding AddLinearCost( const Eigen::Ref& a, double b, const Eigen::Ref& vars); /** * Adds a linear cost term of the form a'*x. * Applied to a subset of the variables and pushes onto * the linear cost data structure. */ template Binding AddLinearCost(const Eigen::Ref& a, const VarType& vars) { const double b = 0.; return AddLinearCost(a, b, vars); } /** * Adds a cost term of the form 0.5*x'*Q*x + b'x. * Applied to subset of the variables and pushes onto * the quadratic cost data structure. * * @exclude_from_pydrake_mkdoc{Not bound in pydrake.} */ Binding AddCost(const Binding& binding); /** * Add a quadratic cost term of the form 0.5*x'*Q*x + b'*x + c. * Notice that in the optimization program, the constant term `c` in the cost * is ignored. * @param e A quadratic symbolic expression. * @param is_convex Whether the cost is already known to be convex. If * is_convex=nullopt (the default), then Drake will determine if `e` is a * convex quadratic cost or not. To improve the computation speed, the user * can set is_convex if the user knows whether the cost is convex or not. * @throws std::exception if the expression is not quadratic. * @return The newly added cost together with the bound variables. */ Binding AddQuadraticCost( const symbolic::Expression& e, std::optional is_convex = std::nullopt); /** * Adds a cost term of the form 0.5*x'*Q*x + b'x. * Applied to subset of the variables. * @param is_convex Whether the cost is already known to be convex. If * is_convex=nullopt (the default), then Drake will determine if this is a * convex quadratic cost or not (by checking if matrix Q is positive * semidefinite or not). To improve the computation speed, the user can set * is_convex if the user knows whether the cost is convex or not. * @exclude_from_pydrake_mkdoc{Not bound in pydrake.} */ Binding AddQuadraticCost( const Eigen::Ref& Q, const Eigen::Ref& b, const VariableRefList& vars, std::optional is_convex = std::nullopt) { return AddQuadraticCost(Q, b, ConcatenateVariableRefList(vars), is_convex); } /** * Adds a cost term of the form 0.5*x'*Q*x + b'x + c * Applied to subset of the variables. * @param is_convex Whether the cost is already known to be convex. If * is_convex=nullopt (the default), then Drake will determine if this is a * convex quadratic cost or not. To improve the computation speed, the user * can set is_convex if the user knows whether the cost is convex or not. */ Binding AddQuadraticCost( const Eigen::Ref& Q, const Eigen::Ref& b, double c, const Eigen::Ref& vars, std::optional is_convex = std::nullopt); /** * Adds a cost term of the form 0.5*x'*Q*x + b'x * Applied to subset of the variables. * @param is_convex Whether the cost is already known to be convex. If * is_convex=nullopt (the default), then Drake will determine if this is a * convex quadratic cost or not. To improve the computation speed, the user * can set is_convex if the user knows whether the cost is convex or not. */ Binding AddQuadraticCost( const Eigen::Ref& Q, const Eigen::Ref& b, const Eigen::Ref& vars, std::optional is_convex = std::nullopt); /** * Adds a cost term of the form (x-x_desired)'*Q*(x-x_desired). */ Binding AddQuadraticErrorCost( const Eigen::Ref& Q, const Eigen::Ref& x_desired, const VariableRefList& vars) { return AddQuadraticErrorCost(Q, x_desired, ConcatenateVariableRefList(vars)); } /** * Adds a cost term of the form (x-x_desired)'*Q*(x-x_desired). */ Binding AddQuadraticErrorCost( const Eigen::Ref& Q, const Eigen::Ref& x_desired, const Eigen::Ref& vars); /** * Adds a quadratic cost of the form |Ax-b|²=(Ax-b)ᵀ(Ax-b) */ Binding Add2NormSquaredCost( const Eigen::Ref& A, const Eigen::Ref& b, const VariableRefList& vars) { return Add2NormSquaredCost(A, b, ConcatenateVariableRefList(vars)); } /** * Adds a quadratic cost of the form |Ax-b|²=(Ax-b)ᵀ(Ax-b) */ Binding Add2NormSquaredCost( const Eigen::Ref& A, const Eigen::Ref& b, const Eigen::Ref& vars) { return AddCost(Make2NormSquaredCost(A, b), vars); } /** * Adds an L2 norm cost |Ax+b|₂ (notice this cost is not quadratic since we * don't take the square of the L2 norm). * Refer to AddL2NormCost for more details. */ Binding AddCost(const Binding& binding); /** * Adds an L2 norm cost |Ax+b|₂ (notice this cost is not quadratic since we * don't take the square of the L2 norm). * @note Currently only the SnoptSolver and IpoptSolver support kL2NormCost. * @pydrake_mkdoc_identifier{3args_A_b_vars} */ // TODO(hongkai.dai): support L2NormCost in each solver. Binding AddL2NormCost( const Eigen::Ref& A, const Eigen::Ref& b, const Eigen::Ref& vars); /** * Adds an L2 norm cost |Ax+b|₂ (notice this cost is not quadratic since we * don't take the square of the L2 norm) * @pydrake_mkdoc_identifier{3args_A_b_vars_list} */ Binding AddL2NormCost(const Eigen::Ref& A, const Eigen::Ref& b, const VariableRefList& vars) { return AddL2NormCost(A, b, ConcatenateVariableRefList(vars)); } /** * Adds an L2 norm cost min |Ax+b|₂ as a linear cost min s * on the slack variable s, together with a Lorentz cone constraint * s ≥ |Ax+b|₂ * Many conic optimization solvers (Gurobi, MOSEK™, SCS, etc) natively prefers * this form of linear cost + conic constraints. So if you are going to use * one of these conic solvers, then add the L2 norm cost using this function * instead of AddL2NormCost(). * @return (s, linear_cost, lorentz_cone_constraint). `s` is the slack * variable (with variable name string as "slack"), `linear_cost` is the cost * on `s`, and `lorentz_cone_constraint` is the constraint s≥|Ax+b|₂ */ std::tuple, Binding> AddL2NormCostUsingConicConstraint( const Eigen::Ref& A, const Eigen::Ref& b, const Eigen::Ref& vars); /** * Adds a cost term in the polynomial form. * @param e A symbolic expression in the polynomial form. * @return The newly created cost and the bound variables. */ Binding AddPolynomialCost(const symbolic::Expression& e); /** * Adds a cost in the symbolic form. * Note that the constant part of the cost is ignored. So if you set * `e = x + 2`, then only the cost on `x` is added, the constant term 2 is * ignored. * @param e The linear or quadratic expression of the cost. * @pre `e` is linear or `e` is quadratic. Otherwise throws a runtime error. * @return The newly created cost, together with the bound variables. * * @exclude_from_pydrake_mkdoc{Not bound in pydrake.} */ Binding AddCost(const symbolic::Expression& e); /** * Adds the cost to maximize the log determinant of symmetric matrix X. * log(det(X)) is a concave function of X, so we can maximize it through * convex optimization. In order to do that, we introduce slack variables t, * and a lower triangular matrix Z, with the constraints * * ⌈X Z⌉ is positive semidifinite. * ⌊Zᵀ diag(Z)⌋ * * log(Z(i, i)) >= t(i) * * and we will minimize -∑ᵢt(i). * @param X A symmetric positive semidefinite matrix X, whose log(det(X)) will * be maximized. * @return (cost, t, Z) cost is -∑ᵢt(i), we also return the newly created * slack variables t and the lower triangular matrix Z. Note that Z is not a * matrix of symbolic::Variable but symbolic::Expression, because the * upper-diagonal entries of Z are not variable, but expression 0. * @pre X is a symmetric matrix. * @note We implicitly require that `X` being positive semidefinite (psd) (as * X is the diagonal entry of the big psd matrix above). If your `X` is not * necessarily psd, then don't call this function. * @note The constraint log(Z(i, i)) >= t(i) is imposed as an exponential cone * constraint. Please make sure your have a solver that supports exponential * cone constraint (currently SCS does). * Refer to https://docs.mosek.com/modeling-cookbook/sdo.html#log-determinant * for more details. */ std::tuple, VectorX, MatrixX> AddMaximizeLogDeterminantCost( const Eigen::Ref>& X); /** * @anchor maximize_geometric_mean * @name Maximize geometric mean * Adds the cost to maximize the geometric mean of z = Ax+b, i.e. * power(∏ᵢz(i), 1/n), where z ∈ ℝⁿ, z(i) >= 0. Mathematically, the cost we * add is -power(∏ᵢz(i), 1/r), where r = power(2, ceil(log₂n)), namely r is * the smallest power of 2 that is no smaller than the size of z. For example, * if z ∈ ℝ², then the added cost is -power(z(0)*z(1), 1/2) if z ∈ ℝ³, then * the added cost is -power(z(0)*z(1)*z(2), 1/4). * * In order to add this cost, we need to introduce a set of second-order cone * constraints. For example, to maximize power(z(0) * z(1), 1/2), we * introduce the slack variable w(0), together with the second order cone * constraint w(0)² ≤ z(0) * z(1), z(0) ≥ 0, z(1) ≥ 0, and we maximize w(0). * * To maximize power(z(0) * z(1) * z(2), 1/ 4), we introduce the slack * variable w(0), w(1), w(2), together with the second order cone constraints *
   * w(0)² ≤ z(0) * z(1), z(0) ≥ 0, z(1) ≥ 0
   * w(1)² ≤ z(2), z(2) ≥ 0
   * w(2)² ≤ w(0) * w(1), w(0) ≥ 0, w(1) ≥ 0
   * 
* and we maximize w(2). */ //@{ /** * An overloaded version of @ref maximize_geometric_mean. * @return cost The added cost (note that since MathematicalProgram only * minimizes the cost, the returned cost evaluates to -c * power(∏ᵢx(i), 1/n). * @pre A.rows() == b.rows(), A.rows() >= 2. */ Binding AddMaximizeGeometricMeanCost( const Eigen::Ref& A, const Eigen::Ref& b, const Eigen::Ref>& x); /** * An overloaded version of @ref maximize_geometric_mean. * We add the cost to maximize the geometric mean of x, i.e., c*power(∏ᵢx(i), * 1/n). * @param c The positive coefficient of the geometric mean cost, @default * is 1. * @return cost The added cost (note that since MathematicalProgram only * minimizes the cost, the returned cost evaluates to -c * power(∏ᵢx(i), 1/n). * @pre x.rows() >= 2. * @pre c > 0. */ Binding AddMaximizeGeometricMeanCost( const Eigen::Ref>& x, double c = 1.0); //@} /** * Adds a generic constraint to the program. This should * only be used if a more specific type of constraint is not * available, as it may require the use of a significantly more * expensive solver. * * @note If @p binding.evaluator()->num_constraints() == 0, then this * constraint is not added into the MathematicalProgram. We return @p binding * directly. */ Binding AddConstraint(const Binding& binding); /** * Adds one row of constraint lb <= e <= ub where @p e is a symbolic * expression. * @throws std::exception if * 1. lb <= e <= ub is a trivial constraint such as 1 <= 2 <= 3. * 2. lb <= e <= ub is unsatisfiable such as 1 <= -5 <= 3 * * @param e A symbolic expression of the decision variables. * @param lb A scalar, the lower bound. * @param ub A scalar, the upper bound. * * The resulting constraint may be a BoundingBoxConstraint, LinearConstraint, * LinearEqualityConstraint, or ExpressionConstraint, depending on the * arguments. Constraints of the form x == 1 (which could be created as a * BoundingBoxConstraint or LinearEqualityConstraint) will be * constructed as a LinearEqualityConstraint. */ Binding AddConstraint(const symbolic::Expression& e, double lb, double ub); /** * Adds constraints represented by symbolic expressions to the program. It * throws if lb <= v <= ub includes trivial/unsatisfiable * constraints. * * @overload Binding AddConstraint(const symbolic::Expression& e, * double lb, double ub) * * @exclude_from_pydrake_mkdoc{Not bound in pydrake.} */ Binding AddConstraint( const Eigen::Ref>& v, const Eigen::Ref& lb, const Eigen::Ref& ub); /** * Add a constraint represented by a symbolic formula to the program. The * input formula @p f can be of the following forms: * * 1. e1 <= e2 * 2. e1 >= e2 * 3. e1 == e2 * 4. A conjunction of relational formulas where each conjunct is * a relational formula matched by 1, 2, or 3. * * Note that first two cases might return an object of * Binding, Binding, or * Binding, depending * on @p f. Also the third case might return an object of * Binding or Binding. * * It throws an exception if * 1. @p f is not matched with one of the above patterns. Especially, strict * inequalities (<, >) are not allowed. * 2. @p f is either a trivial constraint such as "1 <= 2" or an * unsatisfiable constraint such as "2 <= 1". * 3. It is not possible to find numerical bounds of `e1` and `e2` where @p f * = e1 ≃ e2. We allow `e1` and `e2` to be infinite but only if there are * no other terms. For example, `x <= ∞` is allowed. However, `x - ∞ <= 0` * is not allowed because `x ↦ ∞` introduces `nan` in the evaluation. */ Binding AddConstraint(const symbolic::Formula& f); /** * Adds a constraint represented by an Eigen::Matrix or * Eigen::Array to the program. A common use-case of this * function is to add a constraint with the element-wise comparison between * two Eigen matrices, using `A.array() <= B.array()`. See the following * example. * * @code * MathematicalProgram prog; * Eigen::Matrix A = ...; * Eigen::Vector2d b = ...; * auto x = prog.NewContinuousVariables(2, "x"); * prog.AddConstraint((A * x).array() <= b.array()); * @endcode * * A formula in @p formulas can be of the following forms: * * 1. e1 <= e2 * 2. e1 >= e2 * 3. e1 == e2 * * It throws an exception if AddConstraint(const symbolic::Formula& f) * throws an exception for f ∈ @p formulas. * * @overload Binding AddConstraint(const symbolic::Formula& f) * * @tparam Derived Eigen::Matrix or Eigen::Array with Formula as the Scalar. */ template typename std::enable_if_t< is_eigen_scalar_same::value, Binding> AddConstraint(const Eigen::DenseBase& formulas) { return AddConstraint(internal::ParseConstraint(formulas)); } /** * Adds a generic constraint to the program. This should * only be used if a more specific type of constraint is not * available, as it may require the use of a significantly more * expensive solver. * * @exclude_from_pydrake_mkdoc{Not bound in pydrake.} */ template auto AddConstraint(std::shared_ptr con, const VariableRefList& vars) { return AddConstraint(con, ConcatenateVariableRefList(vars)); } /** * Adds a generic constraint to the program. This should * only be used if a more specific type of constraint is not * available, as it may require the use of a significantly more * expensive solver. * @pydrake_mkdoc_identifier{2args_con_vars} */ template auto AddConstraint(std::shared_ptr con, const Eigen::Ref& vars) { return AddConstraint(internal::CreateBinding(con, vars)); } /** * Adds linear constraints referencing potentially a subset * of the decision variables (defined in the vars parameter). * * @exclude_from_pydrake_mkdoc{Not bound in pydrake.} */ Binding AddConstraint( const Binding& binding); /** * Adds linear constraints referencing potentially a subset * of the decision variables (defined in the vars parameter). */ Binding AddLinearConstraint( const Eigen::Ref& A, const Eigen::Ref& lb, const Eigen::Ref& ub, const VariableRefList& vars) { return AddLinearConstraint(A, lb, ub, ConcatenateVariableRefList(vars)); } /** * Adds linear constraints referencing potentially a subset * of the decision variables (defined in the vars parameter). */ Binding AddLinearConstraint( const Eigen::Ref& A, const Eigen::Ref& lb, const Eigen::Ref& ub, const Eigen::Ref& vars); /** * Adds one row of linear constraint referencing potentially a * subset of the decision variables (defined in the vars parameter). * lb <= a*vars <= ub * @param a A row vector. * @param lb A scalar, the lower bound. * @param ub A scalar, the upper bound. * @param vars The decision variables on which to impose the linear * constraint. */ Binding AddLinearConstraint( const Eigen::Ref& a, double lb, double ub, const VariableRefList& vars) { return AddLinearConstraint(a, lb, ub, ConcatenateVariableRefList(vars)); } /** * Adds one row of linear constraint referencing potentially a * subset of the decision variables (defined in the vars parameter). * lb <= a*vars <= ub * @param a A row vector. * @param lb A scalar, the lower bound. * @param ub A scalar, the upper bound. * @param vars The decision variables on which to impose the linear * constraint. */ Binding AddLinearConstraint( const Eigen::Ref& a, double lb, double ub, const Eigen::Ref& vars) { return AddLinearConstraint(a, Vector1d(lb), Vector1d(ub), vars); } /** * Adds one row of linear constraint lb <= e <= ub where @p e is a symbolic * expression. * @throws std::exception if * 1. @p e is a non-linear expression. * 2. lb <= e <= ub is a trivial constraint such as 1 <= 2 <= 3. * 3. lb <= e <= ub is unsatisfiable such as 1 <= -5 <= 3 * * @param e A linear symbolic expression in the form of c0 + c1 * v1 + * ... + cn * vn where @c c_i is a constant and @v_i is a variable. * @param lb A scalar, the lower bound. * @param ub A scalar, the upper bound. */ Binding AddLinearConstraint(const symbolic::Expression& e, double lb, double ub); /** * Adds linear constraints represented by symbolic expressions to the * program. It throws if @v includes a non-linear expression or lb <= v <= * ub includes trivial/unsatisfiable constraints. */ Binding AddLinearConstraint( const Eigen::Ref>& v, const Eigen::Ref& lb, const Eigen::Ref& ub); /** * Add a linear constraint represented by a symbolic formula to the * program. The input formula @p f can be of the following forms: * * 1. e1 <= e2 * 2. e1 >= e2 * 3. e1 == e2 * 4. A conjunction of relational formulas where each conjunct is * a relational formula matched by 1, 2, or 3. * * Note that first two cases might return an object of * Binding depending on @p f. Also the third case * returns an object of Binding. * * It throws an exception if * 1. @p f is not matched with one of the above patterns. Especially, strict * inequalities (<, >) are not allowed. * 2. @p f includes a non-linear expression. * 3. @p f is either a trivial constraint such as "1 <= 2" or an * unsatisfiable constraint such as "2 <= 1". * 4. It is not possible to find numerical bounds of `e1` and `e2` where @p f * = e1 ≃ e2. We allow `e1` and `e2` to be infinite but only if there are * no other terms. For example, `x <= ∞` is allowed. However, `x - ∞ <= 0` * is not allowed because `x ↦ ∞` introduces `nan` in the evaluation. */ Binding AddLinearConstraint(const symbolic::Formula& f); /** * Add a linear constraint represented by an Eigen::Array * to the program. A common use-case of this function is to add a linear * constraint with the element-wise comparison between two Eigen matrices, * using `A.array() <= B.array()`. See the following example. * * @code * MathematicalProgram prog; * Eigen::Matrix A; * auto x = prog.NewContinuousVariables(2, "x"); * Eigen::Vector2d b; * ... // set up A and b * prog.AddLinearConstraint((A * x).array() <= b.array()); * @endcode * * A formula in @p formulas can be of the following forms: * * 1. e1 <= e2 * 2. e1 >= e2 * 3. e1 == e2 * * It throws an exception if AddLinearConstraint(const symbolic::Formula& f) * throws an exception for f ∈ @p formulas. * @tparam Derived An Eigen Array type of Formula. */ Binding AddLinearConstraint( const Eigen::Ref>& formulas); /** * Adds linear equality constraints referencing potentially a * subset of the decision variables. * * @exclude_from_pydrake_mkdoc{Not bound in pydrake.} */ Binding AddConstraint( const Binding& binding); /** * Adds one row of linear constraint e = b where @p e is a symbolic * expression. * @throws std::exception if * 1. @p e is a non-linear expression. * 2. @p e is a constant. * * @param e A linear symbolic expression in the form of c0 + c1 * x1 + * ... + cn * xn where @c c_i is a constant and @x_i is a variable. * @param b A scalar. * @return The newly added linear equality constraint, together with the * bound variable. */ Binding AddLinearEqualityConstraint( const symbolic::Expression& e, double b); /** * Adds a linear equality constraint represented by a symbolic formula to the * program. The input formula @p f is either an equality formula (`e1 == e2`) * or a conjunction of equality formulas. * * It throws an exception if * * 1. @p f is neither an equality formula nor a conjunction of equalities. * 2. @p f includes a non-linear expression. */ Binding AddLinearEqualityConstraint( const symbolic::Formula& f); /** * Adds linear equality constraints \f$ v = b \f$, where \p v(i) is a symbolic * linear expression. * @throws std::exception if * 1. @p v(i) is a non-linear expression. * 2. @p v(i) is a constant. * * @tparam DerivedV An Eigen Matrix type of Expression. A column vector. * @tparam DerivedB An Eigen Matrix type of double. A column vector. * @param v v(i) is a linear symbolic expression in the form of * c0 + c1 * x1 + ... + cn * xn where ci is a constant and @xi is * a variable. * @param b A vector of doubles. * @return The newly added linear equality constraint, together with the * bound variables. */ template typename std::enable_if_t< is_eigen_vector_expression_double_pair::value, Binding> AddLinearEqualityConstraint(const Eigen::MatrixBase& v, const Eigen::MatrixBase& b) { return AddConstraint(internal::ParseLinearEqualityConstraint(v, b)); } /** * Adds a linear equality constraint for a matrix of linear expression @p V, * such that V(i, j) = B(i, j). If V is a symmetric matrix, then the user * may only want to constrain the lower triangular part of V. * This function is meant to provide convenience to the user, it incurs * additional copy and memory allocation. For faster speed, add each column * of the matrix equality in a for loop. * @tparam DerivedV An Eigen Matrix type of Expression. The number of columns * at compile time should not be 1. * @tparam DerivedB An Eigen Matrix type of double. * @param V An Eigen Matrix of symbolic expressions. V(i, j) should be a * linear expression. * @param B An Eigen Matrix of doubles. * @param lower_triangle If true, then only the lower triangular part of @p V * is constrained, otherwise the whole matrix V is constrained. @default is * false. * @return The newly added linear equality constraint, together with the * bound variables. * * @exclude_from_pydrake_mkdoc{Not bound in pydrake.} */ template typename std::enable_if_t< is_eigen_nonvector_expression_double_pair::value, Binding> AddLinearEqualityConstraint(const Eigen::MatrixBase& V, const Eigen::MatrixBase& B, bool lower_triangle = false) { return AddConstraint( internal::ParseLinearEqualityConstraint(V, B, lower_triangle)); } /** AddLinearEqualityConstraint * * Adds linear equality constraints referencing potentially a subset of * the decision variables. * * Example: to add two equality constraints which only depend on two of the * elements of x, you could use * @code{.cc} * auto x = prog.NewContinuousVariables(6,"myvar"); * Eigen::Matrix2d Aeq; * Aeq << -1, 2, * 1, 1; * Eigen::Vector2d beq(1, 3); * prog.AddLinearEqualityConstraint(Aeq, beq, {x.segment<1>(2), * x.segment<1>(5)}); * @endcode * The code above imposes constraints * @f[-x(2) + 2x(5) = 1 @f] * @f[ x(2) + x(5) = 3 @f] * * @exclude_from_pydrake_mkdoc{Not bound in pydrake.} */ Binding AddLinearEqualityConstraint( const Eigen::Ref& Aeq, const Eigen::Ref& beq, const VariableRefList& vars) { return AddLinearEqualityConstraint(Aeq, beq, ConcatenateVariableRefList(vars)); } /** AddLinearEqualityConstraint * * Adds linear equality constraints referencing potentially a subset of * the decision variables. * * Example: to add two equality constraints which only depend on two of the * elements of x, you could use * @code{.cc} * auto x = prog.NewContinuousVariables(6,"myvar"); * Eigen::Matrix2d Aeq; * Aeq << -1, 2, * 1, 1; * Eigen::Vector2d beq(1, 3); * // Imposes constraint * // -x(0) + 2x(1) = 1 * // x(0) + x(1) = 3 * prog.AddLinearEqualityConstraint(Aeq, beq, x.head<2>()); * @endcode */ Binding AddLinearEqualityConstraint( const Eigen::Ref& Aeq, const Eigen::Ref& beq, const Eigen::Ref& vars); /** * Adds one row of linear equality constraint referencing potentially a subset * of decision variables. * @f[ * ax = beq * @f] * @param a A row vector. * @param beq A scalar. * @param vars The decision variables on which the constraint is imposed. * * @exclude_from_pydrake_mkdoc{Not bound in pydrake.} */ Binding AddLinearEqualityConstraint( const Eigen::Ref& a, double beq, const VariableRefList& vars) { return AddConstraint(std::make_shared(a, beq), ConcatenateVariableRefList(vars)); } /** * Adds one row of linear equality constraint referencing potentially a subset * of decision variables. * @f[ * ax = beq * @f] * @param a A row vector. * @param beq A scalar. * @param vars The decision variables on which the constraint is imposed. * * @exclude_from_pydrake_mkdoc{Not bound in pydrake.} */ Binding AddLinearEqualityConstraint( const Eigen::Ref& a, double beq, const Eigen::Ref& vars) { return AddLinearEqualityConstraint(a, Vector1d(beq), vars); } /** * Adds bounding box constraints referencing potentially a subest of the * decision variables. * @param binding Binds a BoundingBoxConstraint with some decision variables, * such that * binding.evaluator()->lower_bound()(i) <= binding.variables()(i) * <= binding.evaluator().upper_bound()(i) * * @exclude_from_pydrake_mkdoc{Not bound in pydrake.} */ Binding AddConstraint( const Binding& binding); /** AddBoundingBoxConstraint * * Adds bounding box constraints referencing potentially a * subset of the decision variables (defined in the vars parameter). * Example * \code{.cc} * MathematicalProgram prog; * auto x = prog.NewContinuousVariables<2>("x"); * auto y = prog.NewContinuousVariables<1>("y"); * Eigen::Vector3d lb(0, 1, 2); * Eigen::Vector3d ub(1, 2, 3); * // Imposes the constraint * // 0 ≤ x(0) ≤ 1 * // 1 ≤ x(1) ≤ 2 * // 2 ≤ y ≤ 3 * prog.AddBoundingBoxConstraint(lb, ub, {x, y}); * \endcode * * @exclude_from_pydrake_mkdoc{Not bound in pydrake.} */ Binding AddBoundingBoxConstraint( const Eigen::Ref& lb, const Eigen::Ref& ub, const VariableRefList& vars) { return AddBoundingBoxConstraint(lb, ub, ConcatenateVariableRefList(vars)); } /** * Adds bounding box constraints referencing potentially a subset of the * decision variables. * @param lb The lower bound. * @param ub The upper bound. * @param vars Will imposes constraint lb(i, j) <= vars(i, j) <= ub(i, j). * @return The newly constructed BoundingBoxConstraint. */ Binding AddBoundingBoxConstraint( const Eigen::Ref& lb, const Eigen::Ref& ub, const Eigen::Ref& vars); /** * Adds bounds for a single variable. * @param lb Lower bound. * @param ub Upper bound. * @param var The decision variable. */ Binding AddBoundingBoxConstraint( double lb, double ub, const symbolic::Variable& var) { MatrixDecisionVariable<1, 1> var_matrix(var); return AddBoundingBoxConstraint(Vector1d(lb), Vector1d(ub), var_matrix); } /** * Adds the same scalar lower and upper bound to every variable in the list. * @param lb Lower bound. * @param ub Upper bound. * @param vars The decision variables. * * @exclude_from_pydrake_mkdoc{Not bound in pydrake.} */ Binding AddBoundingBoxConstraint( double lb, double ub, const VariableRefList& vars) { return AddBoundingBoxConstraint(lb, ub, ConcatenateVariableRefList(vars)); } /** * Adds the same scalar lower and upper bound to every variable in @p vars. * @tparam Derived An Eigen Vector type with Variable as the scalar * type. * @param lb Lower bound. * @param ub Upper bound. * @param vars The decision variables. * * @exclude_from_pydrake_mkdoc{Not bound in pydrake.} */ template typename std::enable_if_t< std::is_same_v && Derived::ColsAtCompileTime == 1, Binding> AddBoundingBoxConstraint(double lb, double ub, const Eigen::MatrixBase& vars) { const int kSize = Derived::RowsAtCompileTime; return AddBoundingBoxConstraint( Eigen::Matrix::Constant(vars.size(), lb), Eigen::Matrix::Constant(vars.size(), ub), vars); } /** * Adds the same scalar lower and upper bound to every variable in @p vars. * @tparam Derived An Eigen::Matrix with Variable as the scalar * type. The matrix has unknown number of columns at compile time, or has * more than one column. * @param lb Lower bound. * @param ub Upper bound. * @param vars The decision variables. */ template typename std::enable_if_t< std::is_same_v && Derived::ColsAtCompileTime != 1, Binding> AddBoundingBoxConstraint(double lb, double ub, const Eigen::MatrixBase& vars) { const int kSize = Derived::RowsAtCompileTime != Eigen::Dynamic && Derived::ColsAtCompileTime != Eigen::Dynamic ? Derived::RowsAtCompileTime * Derived::ColsAtCompileTime : Eigen::Dynamic; Eigen::Matrix flat_vars(vars.size()); for (int j = 0; j < vars.cols(); ++j) { for (int i = 0; i < vars.rows(); ++i) { flat_vars(j * vars.rows() + i) = vars(i, j); } } return AddBoundingBoxConstraint( Eigen::Matrix::Constant(vars.size(), lb), Eigen::Matrix::Constant(vars.size(), ub), flat_vars); } /** Adds quadratic constraint. The quadratic constraint is of the form lb ≤ .5 xᵀQx + bᵀx ≤ ub where `x` might be a subset of the decision variables in this MathematicalProgram. @exclude_from_pydrake_mkdoc{Not bound in pydrake.} */ Binding AddConstraint( const Binding& binding); /** Adds quadratic constraint lb ≤ .5 xᵀQx + bᵀx ≤ ub @param vars x in the documentation above. @param hessian_type Whether the Hessian is positive semidefinite, negative semidefinite or indefinite. Drake will check the type if hessian_type=std::nullopt. Specifying the hessian type will speed this method up. @pre hessian_type should be correct if it is not std::nullopt, as we will blindly trust it in the downstream code. */ Binding AddQuadraticConstraint( const Eigen::Ref& Q, const Eigen::Ref& b, double lb, double ub, const Eigen::Ref& vars, std::optional hessian_type = std::nullopt); /** Adds quadratic constraint lb ≤ .5 xᵀQx + bᵀx ≤ ub @param vars x in the documentation above. @param hessian_type Whether the Hessian is positive semidefinite, negative semidefinite or indefinite. Drake will check the type if hessian_type=std::nullopt. Specifying the hessian type will speed this method up. @pre hessian_type should be correct if it is not std::nullopt, as we will blindly trust it in the downstream code. */ Binding AddQuadraticConstraint( const Eigen::Ref& Q, const Eigen::Ref& b, double lb, double ub, const VariableRefList& vars, std::optional hessian_type = std::nullopt); /** Overloads AddQuadraticConstraint, impose lb <= e <= ub where `e` is a quadratic expression. */ Binding AddQuadraticConstraint( const symbolic::Expression& e, double lb, double ub, std::optional hessian_type = std::nullopt); /** * Adds Lorentz cone constraint referencing potentially a subset * of the decision variables. * The linear expression @f$ z=Ax+b @f$ is in the Lorentz cone. * A vector \f$ z \in\mathbb{R}^n \f$ is in the Lorentz cone, if * * @f[ * z_0 \ge \sqrt{z_1^2 + ... + z_{n-1}^2} * @f] * * @exclude_from_pydrake_mkdoc{Not bound in pydrake.} */ Binding AddConstraint( const Binding& binding); /** * Adds Lorentz cone constraint referencing potentially a subset of the * decision variables. * @param v An Eigen::Vector of symbolic::Expression. Constraining that * \f[ * v_0 \ge \sqrt{v_1^2 + ... + v_{n-1}^2} * \f] * @return The newly constructed Lorentz cone constraint with the bounded * variables. * For example, to add the Lorentz cone constraint * * x+1 >= sqrt(y² + 2y + x² + 5), * = sqrt((y+1)²+x²+2²) * The user could call * @code{cc} * Vector4 v(x+1, y+1, x, 2.); * prog.AddLorentzConeConstraint(v); * @endcode * @param eval_type The evaluation type when evaluating the lorentz cone * constraint in generic optimization. Refer to * LorentzConeConstraint::EvalType for more details. */ Binding AddLorentzConeConstraint( const Eigen::Ref>& v, LorentzConeConstraint::EvalType eval_type = LorentzConeConstraint::EvalType::kConvexSmooth); /** * Adds Lorentz cone constraint on the linear expression v1 and quadratic * expression v2, such that v1 >= sqrt(v2) * @param linear_expression The linear expression v1. * @param quadratic_expression The quadratic expression v2. * @param tol The tolerance to determine if the matrix in v2 is positive * semidefinite or not. @see DecomposePositiveQuadraticForm for more * explanation. @default is 0. * @param eval_type The evaluation type when evaluating the lorentz cone * constraint in generic optimization. Refer to * LorentzConeConstraint::EvalType for more details. * @retval binding The newly added Lorentz cone constraint, together with the * bound variables. * @pre * 1. `v1` is a linear expression, in the form of c'*x + d. * 2. `v2` is a quadratic expression, in the form of *
   *          x'*Q*x + b'x + a
   *    
* Also the quadratic expression has to be convex, namely Q is a * positive semidefinite matrix, and the quadratic expression needs * to be non-negative for any x. * @throws std::exception if the preconditions are not satisfied. * * Notice this constraint is equivalent to the vector [z;y] is within a * Lorentz cone, where *
   *  z = v1
   *  y = R * x + d
   * 
* while (R, d) satisfies y'*y = x'*Q*x + b'*x + a * For example, to add the Lorentz cone constraint * * x+1 >= sqrt(y² + 2y + x² + 4), * the user could call * @code{cc} * prog.AddLorentzConeConstraint(x+1, pow(y, 2) + 2 * y + pow(x, 2) + 4); * @endcode */ Binding AddLorentzConeConstraint( const symbolic::Expression& linear_expression, const symbolic::Expression& quadratic_expression, double tol = 0, LorentzConeConstraint::EvalType eval_type = LorentzConeConstraint::EvalType::kConvexSmooth); /** * Adds Lorentz cone constraint referencing potentially a subset of the * decision variables (defined in the vars parameter). * The linear expression @f$ z=Ax+b @f$ is in the Lorentz cone. * A vector \f$ z \in\mathbb{R}^n \f$ is in the Lorentz cone, if * * @f[ * z_0 \ge \sqrt{z_1^2 + ... + z_{n-1}^2} * @f] * @param A A @f$\mathbb{R}^{n\times m}@f$ matrix, whose number of columns * equals to the size of the decision variables. * @param b A @f$\mathbb{R}^n@f$ vector, whose number of rows equals to the * size of the decision variables. * @param vars The list of @f$ m @f$ decision variables. * @param eval_type The evaluation type when evaluating the lorentz cone * constraint in generic optimization. Refer to * LorentzConeConstraint::EvalType for more details. * @return The newly added Lorentz cone constraint. * * @exclude_from_pydrake_mkdoc{Not bound in pydrake.} */ Binding AddLorentzConeConstraint( const Eigen::Ref& A, const Eigen::Ref& b, const VariableRefList& vars, LorentzConeConstraint::EvalType eval_type = LorentzConeConstraint::EvalType::kConvexSmooth) { return AddLorentzConeConstraint(A, b, ConcatenateVariableRefList(vars), eval_type); } /** * Adds Lorentz cone constraint referencing potentially a subset of the * decision variables (defined in the vars parameter). * The linear expression @f$ z=Ax+b @f$ is in the Lorentz cone. * A vector \f$ z \in\mathbb{R}^n \f$ is in the Lorentz cone, if * * @f[ * z_0 \ge \sqrt{z_1^2 + ... + z_{n-1}^2} * @f] * @param A A @f$\mathbb{R}^{n\times m}@f$ matrix, whose number of columns * equals to the size of the decision variables. * @param b A @f$\mathbb{R}^n@f$ vector, whose number of rows equals to the * size of the decision variables. * @param vars The Eigen vector of @f$ m @f$ decision variables. * @param eval_type The evaluation type when evaluating the lorentz cone * constraint in generic optimization. Refer to * LorentzConeConstraint::EvalType for more details. * @return The newly added Lorentz cone constraint. * * For example, to add the Lorentz cone constraint * * x+1 >= sqrt(y² + 2y + x² + 5) = sqrt((y+1)² + x² + 2²), * the user could call * @code{cc} * Eigen::Matrix A; * Eigen::Vector4d b; * A << 1, 0, 0, 1, 1, 0, 0, 0; * b << 1, 1, 0, 2; * // A * [x;y] + b = [x+1; y+1; x; 2] * prog.AddLorentzConeConstraint(A, b, Vector2(x, y)); * @endcode */ Binding AddLorentzConeConstraint( const Eigen::Ref& A, const Eigen::Ref& b, const Eigen::Ref& vars, LorentzConeConstraint::EvalType eval_type = LorentzConeConstraint::EvalType::kConvexSmooth); /** * Imposes that a vector @f$ x\in\mathbb{R}^m @f$ lies in Lorentz cone. Namely * @f[ * x_0 \ge \sqrt{x_1^2 + .. + x_{m-1}^2} * @f] * * x(0) >= sqrt(x(1)² + ... + x(m-1)²) * <--> * @param vars The stacked column of vars should lie within the Lorentz cone. * @param eval_type The evaluation type when evaluating the lorentz cone * constraint in generic optimization. Refer to * LorentzConeConstraint::EvalType for more details. * @return The newly added Lorentz cone constraint. * * @exclude_from_pydrake_mkdoc{Not bound in pydrake.} */ Binding AddLorentzConeConstraint( const VariableRefList& vars, LorentzConeConstraint::EvalType eval_type = LorentzConeConstraint::EvalType::kConvexSmooth) { return AddLorentzConeConstraint(ConcatenateVariableRefList(vars), eval_type); } /** * Imposes that a vector @f$ x\in\mathbb{R}^m @f$ lies in Lorentz cone. Namely * @f[ * x_0 \ge \sqrt{x_1^2 + .. + x_{m-1}^2} * @f] * * x(0) >= sqrt(x(1)² + ... + x(m-1)²) * <--> * @param vars The stacked column of vars should lie within the Lorentz cone. * @param eval_type The evaluation type when evaluating the lorentz cone * constraint in generic optimization. Refer to * LorentzConeConstraint::EvalType for more details. * @return The newly added Lorentz cone constraint. * * @exclude_from_pydrake_mkdoc{Not bound in pydrake.} */ template Binding AddLorentzConeConstraint( const Eigen::MatrixBase>& vars, LorentzConeConstraint::EvalType eval_type = LorentzConeConstraint::EvalType::kConvexSmooth) { Eigen::Matrix A(vars.rows(), vars.rows()); A.setIdentity(); Eigen::Matrix b(vars.rows()); b.setZero(); return AddLorentzConeConstraint(A, b, vars, eval_type); } /** * Adds a rotated Lorentz cone constraint referencing potentially a subset * of decision variables. The linear expression @f$ z=Ax+b @f$ is in rotated * Lorentz cone. * A vector \f$ z \in\mathbb{R}^n \f$ is in the rotated Lorentz cone, if * * @f[ * z_0z_1 \ge z_2^2 + ... + z_{n-1}^2 * @f] * * @exclude_from_pydrake_mkdoc{Not bound in pydrake.} */ Binding AddConstraint( const Binding& binding); /** * Adds rotated Lorentz cone constraint on the linear expression v1, v2 and * quadratic expression u, such that v1 * v2 >= u, v1 >= 0, v2 >= 0 * @param linear_expression1 The linear expression v1. * @param linear_expression2 The linear expression v2. * @param quadratic_expression The quadratic expression u. * @param tol The tolerance to determine if the matrix in v2 is positive * semidefinite or not. @see DecomposePositiveQuadraticForm for more * explanation. @default is 0. * @retval binding The newly added rotated Lorentz cone constraint, together * with the bound variables. * @pre * 1. `linear_expression1` is a linear (affine) expression, in the form of * v1 = c1'*x + d1. * 2. `linear_expression2` is a linear (affine) expression, in the form of * v2 = c2'*x + d2. * 2. `quadratic_expression` is a quadratic expression, in the form of *
   *          u = x'*Q*x + b'x + a
   *    
* Also the quadratic expression has to be convex, namely Q is a * positive semidefinite matrix, and the quadratic expression needs * to be non-negative for any x. * @throws std::exception if the preconditions are not satisfied. * * For example, to add the rotated Lorentz cone constraint * * (x+1)(x+y) >= x²+z²+2z+5 * x+1 >= 0 * x+y >= 0 * The user could call * @code{cc} * prog.AddRotatedLorentzConeConstraint(x+1, x+y, pow(x, 2) + pow(z, 2) + * 2*z+5); * @endcode */ Binding AddRotatedLorentzConeConstraint( const symbolic::Expression& linear_expression1, const symbolic::Expression& linear_expression2, const symbolic::Expression& quadratic_expression, double tol = 0); /** * Adds a constraint that a symbolic expression @param v is in the rotated * Lorentz cone, i.e., * \f[ * v_0v_1 \ge v_2^2 + ... + v_{n-1}^2\\ * v_0 \ge 0, v_1 \ge 0 * \f] * @param v A linear expression of variables, \f$ v = A x + b\f$, where \f$ A, * b \f$ are given matrices of the correct size, \f$ x \f$ is the vector of * decision variables. * @retval binding The newly added rotated Lorentz cone constraint, together * with the bound variables. * * For example, to add the rotated Lorentz cone constraint * * (x+1)(x+y) >= x²+z²+2z+5 = x² + (z+1)² + 2² * x+1 >= 0 * x+y >= 0 * The user could call * @code{cc} * Eigen::Matrix v; * v << x+1, x+y, x, z+1, 2; * prog.AddRotatedLorentzConeConstraint(v); * @endcode */ Binding AddRotatedLorentzConeConstraint( const Eigen::Ref>& v); /** * Adds a rotated Lorentz cone constraint referencing potentially a subset * of decision variables, The linear expression @f$ z=Ax+b @f$ is in rotated * Lorentz cone. * A vector \f$ z \in\mathbb{R}^n \f$ is in the rotated Lorentz cone, if * * @f[ * z_0z_1 \ge z_2^2 + ... + z_{n-1}^2 * @f] * where @f$ A\in\mathbb{R}^{n\times m}, b\in\mathbb{R}^n@f$ are given * matrices. * @param A A matrix whose number of columns equals to the size of the * decision variables. * @param b A vector whose number of rows equals to the size of the decision * variables. * @param vars The decision variables on which the constraint is imposed. */ Binding AddRotatedLorentzConeConstraint( const Eigen::Ref& A, const Eigen::Ref& b, const VariableRefList& vars) { return AddRotatedLorentzConeConstraint(A, b, ConcatenateVariableRefList(vars)); } /** * Adds a rotated Lorentz cone constraint referencing potentially a subset * of decision variables, The linear expression @f$ z=Ax+b @f$ is in rotated * Lorentz cone. * A vector \f$ z \in\mathbb{R}^n \f$ is in the rotated Lorentz cone, if * * @f[ * z_0z_1 \ge z_2^2 + ... + z_{n-1}^2 * @f] * where @f$ A\in\mathbb{R}^{n\times m}, b\in\mathbb{R}^n@f$ are given * matrices. * @param A A matrix whose number of columns equals to the size of the * decision variables. * @param b A vector whose number of rows equals to the size of the decision * variables. * @param vars The decision variables on which the constraint is imposed. */ Binding AddRotatedLorentzConeConstraint( const Eigen::Ref& A, const Eigen::Ref& b, const Eigen::Ref& vars); /** * Impose that a vector @f$ x\in\mathbb{R}^m @f$ is in rotated Lorentz cone. * Namely * @f[ * x_0 x_1 \ge x_2^2 + ... + x_{m-1}^2\\ * x_0 \ge 0, x_1 \ge 0 * @f] * * x(0)*x(1) >= x(2)^2 + ... x(m-1)^2 * x(0) >= 0, x(1) >= 0 * <--> * @param vars The stacked column of vars lies in the rotated Lorentz cone. * @return The newly added rotated Lorentz cone constraint. */ Binding AddRotatedLorentzConeConstraint( const VariableRefList& vars) { return AddRotatedLorentzConeConstraint(ConcatenateVariableRefList(vars)); } /** * Impose that a vector @f$ x\in\mathbb{R}^m @f$ is in rotated Lorentz cone. * Namely * @f[ * x_0 x_1 \ge x_2^2 + ... + x_{m-1}^2\\ * x_0 \ge 0, x_1 \ge 0 * @f] * * x(0)*x(1) >= x(2)^2 + ... x(m-1)^2 * x(0) >= 0, x(1) >= 0 * <--> * @param vars The stacked column of vars lies in the rotated Lorentz cone. * @return The newly added rotated Lorentz cone constraint. */ template Binding AddRotatedLorentzConeConstraint( const Eigen::MatrixBase>& vars) { Eigen::Matrix A(vars.rows(), vars.rows()); A.setIdentity(); Eigen::Matrix b(vars.rows()); b.setZero(); return AddRotatedLorentzConeConstraint(A, b, vars); } /** Add the convex quadratic constraint 0.5xᵀQx + bᵀx + c <= 0 as a * rotated Lorentz cone constraint [rᵀx+s, 1, Px+q] is in the rotated Lorentz * cone. When solving the optimization problem using conic solvers (like * Mosek, Gurobi, SCS, etc), it is numerically preferable to impose the * convex quadratic constraint as rotated Lorentz cone constraint. See * https://docs.mosek.com/latest/capi/prob-def-quadratic.html#a-recommendation * @throw exception if this quadratic constraint is not convex (Q is not * positive semidefinite) * @param Q The Hessian of the quadratic constraint. Should be positive * semidefinite. * @param b The linear coefficient of the quadratic constraint. * @param c The constant term of the quadratic constraint. * @param vars x in the documentation above. * @param psd_tol If the minimal eigenvalue of Q is smaller than -psd_tol, * then throw an exception. @default = 0. */ Binding AddQuadraticAsRotatedLorentzConeConstraint( const Eigen::Ref& Q, const Eigen::Ref& b, double c, const Eigen::Ref>& vars, double psd_tol = 0.); /** * Adds a linear complementarity constraints referencing a subset of * the decision variables. * * @exclude_from_pydrake_mkdoc{Not bound in pydrake.} */ Binding AddConstraint( const Binding& binding); /** * Adds a linear complementarity constraints referencing a subset of * the decision variables. */ Binding AddLinearComplementarityConstraint( const Eigen::Ref& M, const Eigen::Ref& q, const VariableRefList& vars) { return AddLinearComplementarityConstraint(M, q, ConcatenateVariableRefList(vars)); } /** * Adds a linear complementarity constraints referencing a subset of * the decision variables. */ Binding AddLinearComplementarityConstraint( const Eigen::Ref& M, const Eigen::Ref& q, const Eigen::Ref& vars); /** * Adds a polynomial constraint to the program referencing a subset * of the decision variables (defined in the vars parameter). */ Binding AddPolynomialConstraint( const Eigen::Ref>& polynomials, const std::vector& poly_vars, const Eigen::Ref& lb, const Eigen::Ref& ub, const VariableRefList& vars) { return AddPolynomialConstraint(polynomials, poly_vars, lb, ub, ConcatenateVariableRefList(vars)); } /** * Adds a polynomial constraint to the program referencing a subset * of the decision variables (defined in the vars parameter). */ Binding AddPolynomialConstraint( const Eigen::Ref>& polynomials, const std::vector& poly_vars, const Eigen::Ref& lb, const Eigen::Ref& ub, const Eigen::Ref& vars); /** * Adds a positive semidefinite constraint on a symmetric matrix. * * @exclude_from_pydrake_mkdoc{Not bound in pydrake.} */ Binding AddConstraint( const Binding& binding); /** * Adds a positive semidefinite constraint on a symmetric matrix. * * @exclude_from_pydrake_mkdoc{Not bound in pydrake.} */ Binding AddConstraint( std::shared_ptr con, const Eigen::Ref& symmetric_matrix_var); /** * Adds a positive semidefinite constraint on a symmetric matrix. * * @throws std::exception in Debug mode if @p symmetric_matrix_var is not * symmetric. * @param symmetric_matrix_var A symmetric MatrixDecisionVariable object. */ Binding AddPositiveSemidefiniteConstraint( const Eigen::Ref& symmetric_matrix_var); /** * Adds a positive semidefinite constraint on a symmetric matrix of symbolic * expressions @p e. We create a new symmetric matrix of variables M being * positive semidefinite, with the linear equality constraint e == M. * @param e Imposes constraint "e is positive semidefinite". * @pre{1. e is symmetric. * 2. e(i, j) is linear for all i, j * } * @return The newly added positive semidefinite constraint, with the bound * variable M that are also newly added. * * For example, to add a constraint that * * ⌈x + 1 2x + 3 x+y⌉ * |2x+ 3 2 0| is positive semidefinite * ⌊x + y 0 x⌋ * The user could call * @code{cc} * Matrix3 e * e << x+1, 2*x+3, x+y, * 2*x+3, 2, 0, * x+y, 0, x; * prog.AddPositiveSemidefiniteConstraint(e); * @endcode */ Binding AddPositiveSemidefiniteConstraint( const Eigen::Ref>& e) { // TODO(jwnimmer-tri) Move this whole function definition into the cc file. DRAKE_THROW_UNLESS(e.rows() == e.cols()); DRAKE_ASSERT(CheckStructuralEquality(e, e.transpose().eval())); const MatrixXDecisionVariable M = NewSymmetricContinuousVariables(e.rows()); // Adds the linear equality constraint that M = e. AddLinearEqualityConstraint( e - M, Eigen::MatrixXd::Zero(e.rows(), e.rows()), true); return AddPositiveSemidefiniteConstraint(M); } /** * Adds a linear matrix inequality constraint to the program. * * @exclude_from_pydrake_mkdoc{Not bound in pydrake.} */ Binding AddConstraint( const Binding& binding); /** * Adds a linear matrix inequality constraint to the program. */ Binding AddLinearMatrixInequalityConstraint( const std::vector>& F, const VariableRefList& vars) { return AddLinearMatrixInequalityConstraint( F, ConcatenateVariableRefList(vars)); } /** * Adds a linear matrix inequality constraint to the program. */ Binding AddLinearMatrixInequalityConstraint( const std::vector>& F, const Eigen::Ref& vars); /** * Adds the constraint that a symmetric matrix is diagonally dominant with * non-negative diagonal entries. * A symmetric matrix X is diagonally dominant with non-negative diagonal * entries if * X(i, i) >= ∑ⱼ |X(i, j)| ∀ j ≠ i * namely in each row, the diagonal entry is larger than the sum of the * absolute values of all other entries in the same row. A matrix being * diagonally dominant with non-negative diagonals is a sufficient (but not * necessary) condition of a matrix being positive semidefinite. * Internally we will create a matrix Y as slack variables, such that Y(i, j) * represents the absolute value |X(i, j)| ∀ j ≠ i. The diagonal entries * Y(i, i) = X(i, i) * The users can refer to "DSOS and SDSOS Optimization: More Tractable * Alternatives to Sum of Squares and Semidefinite Optimization" by Amir Ali * Ahmadi and Anirudha Majumdar, with arXiv link * https://arxiv.org/abs/1706.02586 * @param X The matrix X. We will use 0.5(X+Xᵀ) as the "symmetric version" of * X. * @return Y The slack variable. Y(i, j) represents |X(i, j)| ∀ j ≠ i, with * the constraint Y(i, j) >= X(i, j) and Y(i, j) >= -X(i, j). Y is a symmetric * matrix. The diagonal entries Y(i, i) = X(i, i) */ MatrixX AddPositiveDiagonallyDominantMatrixConstraint( const Eigen::Ref>& X); /** * @anchor addsdd * @name scaled diagonally dominant matrix constraint * Adds the constraint that a symmetric matrix is scaled diagonally dominant * (sdd). A matrix X is sdd if there exists a diagonal matrix D, such that * the product DXD is diagonally dominant with non-negative diagonal entries, * namely * d(i)X(i, i) ≥ ∑ⱼ |d(j)X(i, j)| ∀ j ≠ i * where d(i) = D(i, i). * X being sdd is equivalent to the existence of symmetric matrices Mⁱʲ∈ ℝⁿˣⁿ, * i < j, such that all entries in Mⁱʲ are 0, except Mⁱʲ(i, i), Mⁱʲ(i, j), * Mⁱʲ(j, j). (Mⁱʲ(i, i), Mⁱʲ(j, j), Mⁱʲ(i, j)) is in the rotated * Lorentz cone, and X = ∑ᵢⱼ Mⁱʲ. * * The users can refer to "DSOS and SDSOS Optimization: More Tractable * Alternatives to Sum of Squares and Semidefinite Optimization" by Amir Ali * Ahmadi and Anirudha Majumdar, with arXiv link * https://arxiv.org/abs/1706.02586. */ //@{ /** * This is an overloaded variant of @ref addsdd * "scaled diagonally dominant matrix constraint" * @param X The matrix X to be constrained scaled diagonally dominant. * X. * @pre X(i, j) should be a linear expression of decision variables. * @return M A vector of vectors of 2 x 2 symmetric matrices M. For i < j, * M[i][j] is *
   * [Mⁱʲ(i, i), Mⁱʲ(i, j)]
   * [Mⁱʲ(i, j), Mⁱʲ(j, j)].
   * 
* Note that M[i][j](0, 1) = Mⁱʲ(i, j) = (X(i, j) + X(j, i)) / 2 * for i >= j, M[i][j] is the zero matrix. * * @pydrake_mkdoc_identifier{expression} */ std::vector>> AddScaledDiagonallyDominantMatrixConstraint( const Eigen::Ref>& X); /** * This is an overloaded variant of @ref addsdd * "scaled diagonally dominant matrix constraint" * @param X The symmetric matrix X to be constrained scaled diagonally * dominant. * @return M For i < j M[i][j] contains the slack variables, mentioned in * @ref addsdd "scaled diagonally dominant matrix constraint". For i >= j, * M[i][j] contains dummy variables. * * @pydrake_mkdoc_identifier{variable} */ std::vector>> AddScaledDiagonallyDominantMatrixConstraint( const Eigen::Ref>& X); //@} /** * Adds constraints that a given polynomial @p p is a sums-of-squares (SOS), * that is, @p p can be decomposed into `mᵀQm`, where m is the @p * monomial_basis. It returns the coefficients matrix Q, which is positive * semidefinite. * @param type The type of the polynomial. @default is kSos, but the user can * also use kSdsos and kDsos. Refer to NonnegativePolynomial for details on * different types of sos polynomials. * @param gram_name The name of the gram matrix for print out. * * @note It calls `Reparse` to enforce `p` to have this MathematicalProgram's * indeterminates if necessary. */ MatrixXDecisionVariable AddSosConstraint( const symbolic::Polynomial& p, const Eigen::Ref>& monomial_basis, NonnegativePolynomial type = NonnegativePolynomial::kSos, const std::string& gram_name = "S"); /** * Adds constraints that a given polynomial @p p is a sums-of-squares (SOS), * that is, @p p can be decomposed into `mᵀQm`, where m is a monomial * basis selected from the sparsity of @p p. It returns a pair of constraint * bindings expressing: * - The coefficients matrix Q, which is positive semidefinite. * - The monomial basis m. * @param type The type of the polynomial. @default is kSos, but the user can * also use kSdsos and kDsos. Refer to NonnegativePolynomial for the details * on different type of sos polynomials. * @param gram_name The name of the gram matrix for print out. * * @note It calls `Reparse` to enforce `p` to have this MathematicalProgram's * indeterminates if necessary. */ std::pair> AddSosConstraint(const symbolic::Polynomial& p, NonnegativePolynomial type = NonnegativePolynomial::kSos, const std::string& gram_name = "S"); /** * Adds constraints that a given symbolic expression @p e is a * sums-of-squares (SOS), that is, @p p can be decomposed into `mᵀQm`, * where m is the @p monomial_basis. Note that it decomposes @p e into a * polynomial with respect to `indeterminates()` in this mathematical * program. It returns the coefficients matrix Q, which is positive * semidefinite. * @param type Refer to NonnegativePolynomial class documentation. * @param gram_name The name of the gram matrix for print out. */ MatrixXDecisionVariable AddSosConstraint( const symbolic::Expression& e, const Eigen::Ref>& monomial_basis, NonnegativePolynomial type = NonnegativePolynomial::kSos, const std::string& gram_name = "S"); /** * Adds constraints that a given symbolic expression @p e is a sums-of-squares * (SOS), that is, @p e can be decomposed into `mᵀQm`. Note that it decomposes * @p e into a polynomial with respect to `indeterminates()` in this * mathematical program. It returns a pair expressing: * - The coefficients matrix Q, which is positive semidefinite. * - The monomial basis m. * @param type Refer to NonnegativePolynomial class documentation. * @param gram_name The name of the gram matrix for print out. */ std::pair> AddSosConstraint(const symbolic::Expression& e, NonnegativePolynomial type = NonnegativePolynomial::kSos, const std::string& gram_name = "S"); /** * Constraining that two polynomials are the same (i.e., they have the same * coefficients for each monomial). This function is often used in * sum-of-squares optimization. * We will impose the linear equality constraint that the coefficient of a * monomial in @p p1 is the same as the coefficient of the same monomial in @p * p2. * @param p1 Note that p1's indeterminates should have been registered as * indeterminates in this MathematicalProgram object, and p1's coefficients * are affine functions of decision variables in this MathematicalProgram * object. * @param p2 Note that p2's indeterminates should have been registered as * indeterminates in this MathematicalProgram object, and p2's coefficients * are affine functions of decision variables in this MathematicalProgram * object. * * @note It calls `Reparse` to enforce `p1` and `p2` to have this * MathematicalProgram's indeterminates. */ std::vector> AddEqualityConstraintBetweenPolynomials(const symbolic::Polynomial& p1, const symbolic::Polynomial& p2); /** * Adds the exponential cone constraint that * z = binding.evaluator()->A() * binding.variables() + * binding.evaluator()->b() * should be in the exponential cone. Namely * {(z₀, z₁, z₂) | z₀ ≥ z₁ * exp(z₂ / z₁), z₁ > 0}. * @param binding The binding of ExponentialConeConstraint and its bound * variables. * * @exclude_from_pydrake_mkdoc{Not bound in pydrake.} */ Binding AddConstraint( const Binding& binding); /** * Adds an exponential cone constraint, that z = A * vars + b should be in * the exponential cone. Namely {z₀, z₁, z₂ | z₀ ≥ z₁ * exp(z₂ / z₁), z₁ > * 0}. * @param A The A matrix in the documentation above. A must have 3 rows. * @param b The b vector in the documentation above. * @param vars The variables bound with this constraint. */ Binding AddExponentialConeConstraint( const Eigen::Ref>& A, const Eigen::Ref& b, const Eigen::Ref& vars); /** * Add the constraint that z is in the exponential cone. * @param z The expression in the exponential cone. * @pre each entry in `z` is a linear expression of the decision variables. */ Binding AddExponentialConeConstraint( const Eigen::Ref>& z); /** * Gets the initial guess for a single variable. * @pre @p decision_variable has been registered in the optimization program. * @throws std::exception if the pre condition is not satisfied. */ [[nodiscard]] double GetInitialGuess( const symbolic::Variable& decision_variable) const; /** * Gets the initial guess for some variables. * @pre Each variable in @p decision_variable_mat has been registered in the * optimization program. * @throws std::exception if the pre condition is not satisfied. */ template typename std::enable_if_t< std::is_same_v, MatrixLikewise> GetInitialGuess( const Eigen::MatrixBase& decision_variable_mat) const { MatrixLikewise decision_variable_values( decision_variable_mat.rows(), decision_variable_mat.cols()); for (int i = 0; i < decision_variable_mat.rows(); ++i) { for (int j = 0; j < decision_variable_mat.cols(); ++j) { decision_variable_values(i, j) = GetInitialGuess(decision_variable_mat(i, j)); } } return decision_variable_values; } /** * Sets the initial guess for a single variable @p decision_variable. * The guess is stored as part of this program. * @pre decision_variable is a registered decision variable in the program. * @throws std::exception if precondition is not satisfied. */ void SetInitialGuess(const symbolic::Variable& decision_variable, double variable_guess_value); /** * Sets the initial guess for the decision variables stored in * @p decision_variable_mat to be @p x0. * The guess is stored as part of this program. */ template void SetInitialGuess(const Eigen::MatrixBase& decision_variable_mat, const Eigen::MatrixBase& x0) { DRAKE_DEMAND(decision_variable_mat.rows() == x0.rows()); DRAKE_DEMAND(decision_variable_mat.cols() == x0.cols()); for (int i = 0; i < decision_variable_mat.rows(); ++i) { for (int j = 0; j < decision_variable_mat.cols(); ++j) { SetInitialGuess(decision_variable_mat(i, j), x0(i, j)); } } } /** * Set the initial guess for ALL decision variables. * Note that variables begin with a default initial guess of NaN to indicate * that no guess is available. * @param x0 A vector of appropriate size (num_vars() x 1). */ template void SetInitialGuessForAllVariables(const Eigen::MatrixBase& x0) { DRAKE_ASSERT(x0.rows() == num_vars() && x0.cols() == 1); x_initial_guess_ = x0; } /** * Updates the value of a single @p decision_variable inside the @p values * vector to be @p decision_variable_new_value. * The other decision variables' values in @p values are unchanged. * @param decision_variable a registered decision variable in this program. * @param decision_variable_new_value the variable's new values. * @param[in,out] values The vector to be tweaked; must be of size num_vars(). */ void SetDecisionVariableValueInVector( const symbolic::Variable& decision_variable, double decision_variable_new_value, EigenPtr values) const; /** * Updates the values of some @p decision_variables inside the @p values * vector to be @p decision_variables_new_values. * The other decision variables' values in @p values are unchanged. * @param decision_variables registered decision variables in this program. * @param decision_variables_new_values the variables' respective new values; * must have the same rows() and cols() sizes and @p decision_variables. * @param[in,out] values The vector to be tweaked; must be of size num_vars(). */ void SetDecisionVariableValueInVector( const Eigen::Ref& decision_variables, const Eigen::Ref& decision_variables_new_values, EigenPtr values) const; /** * @anchor set_solver_option * @name Set solver options * Set the options (parameters) for a specific solver. Refer to SolverOptions * class for more details on the supported options of each solver. */ //@{ /** * See @ref set_solver_option for more details. * Set the double-valued options. * @pydrake_mkdoc_identifier{double_option} */ void SetSolverOption(const SolverId& solver_id, const std::string& solver_option, double option_value) { solver_options_.SetOption(solver_id, solver_option, option_value); } /** * See @ref set_solver_option for more details. * Set the integer-valued options. * @pydrake_mkdoc_identifier{int_option} */ void SetSolverOption(const SolverId& solver_id, const std::string& solver_option, int option_value) { solver_options_.SetOption(solver_id, solver_option, option_value); } /** * See @ref set_solver_option for more details. * Set the string-valued options. * @pydrake_mkdoc_identifier{string_option} */ void SetSolverOption(const SolverId& solver_id, const std::string& solver_option, const std::string& option_value) { solver_options_.SetOption(solver_id, solver_option, option_value); } /** * Overwrite the stored solver options inside MathematicalProgram with the * provided solver options. */ void SetSolverOptions(const SolverOptions& solver_options) { solver_options_ = solver_options; } //@} /** * Returns the solver options stored inside MathematicalProgram. */ const SolverOptions& solver_options() const { return solver_options_; } const std::unordered_map& GetSolverOptionsDouble( const SolverId& solver_id) const { return solver_options_.GetOptionsDouble(solver_id); } const std::unordered_map& GetSolverOptionsInt( const SolverId& solver_id) const { return solver_options_.GetOptionsInt(solver_id); } const std::unordered_map& GetSolverOptionsStr( const SolverId& solver_id) const { return solver_options_.GetOptionsStr(solver_id); } /** * Getter for all callbacks. */ const std::vector>& visualization_callbacks() const { return visualization_callbacks_; } /** * Getter for all generic costs. */ const std::vector>& generic_costs() const { return generic_costs_; } // e.g. for snopt_user_fun /** * Getter for all generic constraints */ const std::vector>& generic_constraints() const { return generic_constraints_; } // e.g. for snopt_user_fun /** * Getter for linear equality constraints. */ const std::vector>& linear_equality_constraints() const { return linear_equality_constraints_; } /** Getter for linear costs. */ const std::vector>& linear_costs() const { return linear_costs_; } /** Getter for quadratic costs. */ const std::vector>& quadratic_costs() const { return quadratic_costs_; } /** Getter for l2norm costs. */ const std::vector>& l2norm_costs() const { return l2norm_costs_; } /** Getter for linear constraints. */ const std::vector>& linear_constraints() const { return linear_constraints_; } /** Getter for quadratic constraints. */ const std::vector>& quadratic_constraints() const { return quadratic_constraints_; } /** Getter for Lorentz cone constraints. */ const std::vector>& lorentz_cone_constraints() const { return lorentz_cone_constraint_; } /** Getter for rotated Lorentz cone constraints. */ const std::vector>& rotated_lorentz_cone_constraints() const { return rotated_lorentz_cone_constraint_; } /** Getter for positive semidefinite constraints. */ const std::vector>& positive_semidefinite_constraints() const { return positive_semidefinite_constraint_; } /** Getter for linear matrix inequality constraints. */ const std::vector>& linear_matrix_inequality_constraints() const { return linear_matrix_inequality_constraint_; } /** Getter for exponential cone constraints. */ const std::vector>& exponential_cone_constraints() const { return exponential_cone_constraints_; } /** Getter for all bounding box constraints */ const std::vector>& bounding_box_constraints() const { return bbox_constraints_; } /** Getter for all linear complementarity constraints.*/ const std::vector>& linear_complementarity_constraints() const { return linear_complementarity_constraints_; } /** * Getter returning all costs. * @returns Vector of all cost bindings. * @note The group ordering may change as more cost types are added. */ [[nodiscard]] std::vector> GetAllCosts() const; /** * Getter returning all linear constraints (both linear equality and * inequality constraints). * @returns Vector of all linear constraint bindings. */ [[nodiscard]] std::vector> GetAllLinearConstraints() const; /** * Getter for returning all constraints. * @returns Vector of all constraint bindings. * @note The group ordering may change as more constraint types are added. */ [[nodiscard]] std::vector> GetAllConstraints() const; /** Getter for number of variables in the optimization program */ int num_vars() const { return decision_variables_.size(); } /** Gets the number of indeterminates in the optimization program */ int num_indeterminates() const { return indeterminates_.size(); } /** Getter for the initial guess */ const Eigen::VectorXd& initial_guess() const { return x_initial_guess_; } /** Returns the index of the decision variable. Internally the solvers thinks * all variables are stored in an array, and it accesses each individual * variable using its index. This index is used when adding constraints * and costs for each solver. * @pre{@p var is a decision variable in the mathematical program, otherwise * this function throws a runtime error.} */ [[nodiscard]] int FindDecisionVariableIndex( const symbolic::Variable& var) const; /** * Returns the indices of the decision variables. Internally the solvers * thinks all variables are stored in an array, and it accesses each * individual * variable using its index. This index is used when adding constraints * and costs for each solver. * @pre{@p vars are decision variables in the mathematical program, otherwise * this function throws a runtime error.} */ [[nodiscard]] std::vector FindDecisionVariableIndices( const Eigen::Ref& vars) const; /** Returns the index of the indeterminate. Internally a solver * thinks all indeterminates are stored in an array, and it accesses each * individual indeterminate using its index. This index is used when adding * constraints and costs for each solver. * @pre @p var is a indeterminate in the mathematical program, * otherwise this function throws a runtime error. */ [[nodiscard]] size_t FindIndeterminateIndex( const symbolic::Variable& var) const; /** * Evaluates the value of some binding, for some input value for all * decision variables. * @param binding A Binding whose variables are decision variables in this * program. * @param prog_var_vals The value of all the decision variables in this * program. * @throws std::exception if the size of `prog_var_vals` is invalid. */ template typename std::enable_if_t::value, VectorX> EvalBinding(const Binding& binding, const Eigen::MatrixBase& prog_var_vals) const { using Scalar = typename DerivedX::Scalar; if (prog_var_vals.rows() != num_vars()) { std::ostringstream oss; oss << "The input binding variable is not in the right size. Expects " << num_vars() << " rows, but it actually has " << prog_var_vals.rows() << " rows.\n"; throw std::logic_error(oss.str()); } VectorX binding_x(binding.GetNumElements()); VectorX binding_y(binding.evaluator()->num_outputs()); for (int i = 0; i < static_cast(binding.GetNumElements()); ++i) { binding_x(i) = prog_var_vals(FindDecisionVariableIndex(binding.variables()(i))); } binding.evaluator()->Eval(binding_x, &binding_y); return binding_y; } /** * Evaluates a set of bindings (plural version of `EvalBinding`). * @param bindings List of bindings. * @param prog * @param prog_var_vals The value of all the decision variables in this * program. * @return All binding values, concatenated into a single vector. * @throws std::exception if the size of `prog_var_vals` is invalid. */ template typename std::enable_if_t::value, VectorX> EvalBindings(const std::vector>& bindings, const Eigen::MatrixBase& prog_var_vals) const { // TODO(eric.cousineau): Minimize memory allocations when it becomes a // major performance bottleneck. using Scalar = typename DerivedX::Scalar; int num_y{}; for (auto& binding : bindings) { num_y += binding.evaluator()->num_outputs(); } VectorX y(num_y); int offset_y{}; for (auto& binding : bindings) { VectorX binding_y = EvalBinding(binding, prog_var_vals); y.segment(offset_y, binding_y.size()) = binding_y; offset_y += binding_y.size(); } DRAKE_DEMAND(offset_y == num_y); return y; } /** * Given the value of all decision variables, namely * this.decision_variable(i) takes the value prog_var_vals(i), returns the * vector that contains the value of the variables in binding.variables(). * @param binding binding.variables() must be decision variables in this * MathematicalProgram. * @param prog_var_vals The value of ALL the decision variables in this * program. * @return binding_variable_vals binding_variable_vals(i) is the value of * binding.variables()(i) in prog_var_vals. */ template typename std::enable_if_t::value, VectorX> GetBindingVariableValues( const Binding& binding, const Eigen::MatrixBase& prog_var_vals) const { DRAKE_DEMAND(prog_var_vals.rows() == num_vars()); VectorX result(binding.GetNumElements()); for (int i = 0; i < static_cast(binding.GetNumElements()); ++i) { result(i) = prog_var_vals(FindDecisionVariableIndex(binding.variables()(i))); } return result; } /** Evaluates all visualization callbacks registered with the * MathematicalProgram. * * @param prog_var_vals The value of all the decision variables in this * program. * @throws std::exception if the size does not match. */ void EvalVisualizationCallbacks( const Eigen::Ref& prog_var_vals) const; /** * Evaluates the evaluator in @p binding at the initial guess. * @return The value of @p binding at the initial guess. */ template Eigen::VectorXd EvalBindingAtInitialGuess(const Binding& binding) const { return EvalBinding(binding, x_initial_guess_); } /** * Evaluates CheckSatisfied for the constraint in @p binding using the value * of ALL of the decision variables in this program. * @throws std::exception if the size of `prog_var_vals` is invalid. */ [[nodiscard]] bool CheckSatisfied( const Binding& binding, const Eigen::Ref& prog_var_vals, double tol = 1e-6) const; /** * Evaluates CheckSatisfied for the all of the constraints in @p binding using * the value of ALL of the decision variables in this program. * @returns true iff all of the constraints are satisfied. * @throws std::exception if the size of `prog_var_vals` is invalid. * @pydrake_mkdoc_identifier{vector} */ [[nodiscard]] bool CheckSatisfied( const std::vector>& bindings, const Eigen::Ref& prog_var_vals, double tol = 1e-6) const; /** * Evaluates CheckSatisfied for the constraint in @p binding at the initial * guess. */ [[nodiscard]] bool CheckSatisfiedAtInitialGuess( const Binding& binding, double tol = 1e-6) const; /** * Evaluates CheckSatisfied for the all of the constraints in @p bindings at * the initial guess. * @returns true iff all of the constraints are satisfied. * @pydrake_mkdoc_identifier{vector} */ [[nodiscard]] bool CheckSatisfiedAtInitialGuess( const std::vector>& bindings, double tol = 1e-6) const; /** Getter for all decision variables in the program. */ Eigen::Map> decision_variables() const { return Eigen::Map>( decision_variables_.data(), decision_variables_.size()); } /** Getter for the decision variable with index @p i in the program. */ const symbolic::Variable& decision_variable(int i) const { DRAKE_ASSERT(i >= 0); DRAKE_ASSERT(i < static_cast(decision_variables_.size())); return decision_variables_[i]; } /** Getter for all indeterminates in the program. */ Eigen::Map> indeterminates() const { return Eigen::Map>( indeterminates_.data(), indeterminates_.size()); } /** Getter for the indeterminate with index @p i in the program. */ const symbolic::Variable& indeterminate(int i) const { DRAKE_ASSERT(i >= 0); DRAKE_ASSERT(i < static_cast(indeterminates_.size())); return indeterminates_[i]; } /// Getter for the required capability on the solver, given the /// cost/constraint/variable types in the program. const ProgramAttributes& required_capabilities() const { return required_capabilities_; } /** * Returns the mapping from a decision variable ID to its index in the vector * containing all the decision variables in the mathematical program. */ const std::unordered_map& decision_variable_index() const { return decision_variable_index_; } /** * Returns the mapping from an indeterminate ID to its index in the vector * containing all the indeterminates in the mathematical program. */ const std::unordered_map& indeterminates_index() const { return indeterminates_index_; } /** * @anchor variable_scaling * @name Variable scaling * Some solvers (e.g. SNOPT) work better if the decision variables values * are on the same scale. Hence, internally we scale the variable as * snopt_var_value = var_value / scaling_factor. * This scaling factor is only used inside the solve, so * users don't need to manually scale the variables every time they appears in * cost and constraints. When the users set the initial guess, or getting the * result from MathematicalProgramResult::GetSolution(), the values are * unscaled. Namely, MathematicalProgramResult::GetSolution(var) returns the * value of var, not var_value / scaling_factor. * * The feature of variable scaling is currently only implemented for SNOPT and * OSQP. */ //@{ /** * Returns the mapping from a decision variable index to its scaling factor. * * See @ref variable_scaling "Variable scaling" for more information. */ const std::unordered_map& GetVariableScaling() const { return var_scaling_map_; } /** * Setter for the scaling @p s of decision variable @p var. * @param var the decision variable to be scaled. * @param s scaling factor (must be positive). * * See @ref variable_scaling "Variable scaling" for more information. */ void SetVariableScaling(const symbolic::Variable& var, double s); /** * Clears the scaling factors for decision variables. * * See @ref variable_scaling "Variable scaling" for more information. */ void ClearVariableScaling() { var_scaling_map_.clear(); } //@} /** * @anchor remove_cost_constraint * @name Remove costs or constraints * Removes costs or constraints from this program. If this program contains * multiple costs/constraints objects matching the given argument, then all of * these costs/constraints are removed. If this program doesn't contain the * specified cost/constraint, then the code does nothing. We regard two * costs/constraints being equal, if their evaluators point to the same * object, and the associated variables are also the same. * @note If two costs/constraints represent the same expression, but their * evaluators point to different objects, then they are NOT regarded the same. * For example, if we have * @code{.cc} * auto cost1 = prog.AddLinearCost(x[0] + x[1]); * auto cost2 = prog.AddLinearCost(x[0] + x[1]); * // cost1 and cost2 represent the same cost, but cost1.evaluator() and * // cost2.evaluator() point to different objects. So after removing cost1, * // cost2 still lives in prog. * prog.RemoveCost(cost1); * // This will print true. * std::cout << (prog.linear_costs()[0] == cost2) << "\n"; * @endcode */ // @{ /** Removes @p cost from this mathematical program. * See @ref remove_cost_constraint "Remove costs or constraints" for more * details. * @return number of cost objects removed from this program. If this program * doesn't contain @p cost, then returns 0. If this program contains multiple * @p cost objects, then returns the repetition of @p cost in this program. */ int RemoveCost(const Binding& cost); /** Removes @p constraint from this mathematical program. * See @ref remove_cost_constraint "Remove costs or constraints" for more * details. * @return number of constraint objects removed from this program. If this * program doesn't contain @p constraint, then returns 0. If this program * contains multiple * @p constraint objects, then returns the repetition of @p constraint in this * program. */ int RemoveConstraint(const Binding& constraint); //@} private: // Copy constructor is private for use in implementing Clone(). explicit MathematicalProgram(const MathematicalProgram&); static void AppendNanToEnd(int new_var_size, Eigen::VectorXd* vector); // Removes a binding of a constraint/constraint, @p removal, from a given // vector of bindings, @p existings. If @p removal does not belong to @p // existings, then do nothing. If @p removal appears multiple times in @p // existings, then all matching terms are removed. After removing @p removal, // we check if we need to erase @p affected_capability from // required_capabilities_. // @return The number of @p removal object in @p existings. template int RemoveCostOrConstraintImpl(const Binding& removal, ProgramAttribute affected_capability, std::vector>* existings); /** * Check and update if this program requires @p query_capability * This method should be called after changing stored * costs/constraints/callbacks. */ void UpdateRequiredCapability(ProgramAttribute query_capability); template void NewVariables_impl( VarType type, const T& names, bool is_symmetric, Eigen::Ref decision_variable_matrix) { CheckVariableType(type); int rows = decision_variable_matrix.rows(); int cols = decision_variable_matrix.cols(); DRAKE_ASSERT(!is_symmetric || rows == cols); int num_new_vars = 0; if (!is_symmetric) { num_new_vars = rows * cols; } else { num_new_vars = rows * (rows + 1) / 2; } DRAKE_ASSERT(static_cast(names.size()) == num_new_vars); int row_index = 0; int col_index = 0; for (int i = 0; i < num_new_vars; ++i) { decision_variables_.emplace_back(names[i], type); const int new_var_index = decision_variables_.size() - 1; decision_variable_index_.insert(std::make_pair( decision_variables_[new_var_index].get_id(), new_var_index)); decision_variable_matrix(row_index, col_index) = decision_variables_[new_var_index]; // If the matrix is not symmetric, then store the variable in column // major. if (!is_symmetric) { if (row_index + 1 < rows) { ++row_index; } else { ++col_index; row_index = 0; } } else { // If the matrix is symmetric, then the decision variables are the lower // triangular part of the symmetric matrix, also stored in column major. if (row_index != col_index) { decision_variable_matrix(col_index, row_index) = decision_variable_matrix(row_index, col_index); } if (row_index + 1 < rows) { ++row_index; } else { ++col_index; row_index = col_index; } } } AppendNanToEnd(num_new_vars, &x_initial_guess_); } MatrixXDecisionVariable NewVariables(VarType type, int rows, int cols, bool is_symmetric, const std::vector& names); template void NewIndeterminates_impl( const T& names, Eigen::Ref indeterminates_matrix) { const int rows = indeterminates_matrix.rows(); const int cols = indeterminates_matrix.cols(); const int num_new_vars = rows * cols; DRAKE_ASSERT(static_cast(names.size()) == num_new_vars); int row_index = 0; int col_index = 0; for (int i = 0; i < num_new_vars; ++i) { indeterminates_.emplace_back(names[i]); const int new_var_index = indeterminates_.size() - 1; indeterminates_index_.insert(std::make_pair( indeterminates_[new_var_index].get_id(), new_var_index)); indeterminates_matrix(row_index, col_index) = indeterminates_[new_var_index]; // store the indeterminate in column major. if (row_index + 1 < rows) { ++row_index; } else { ++col_index; row_index = 0; } } } /* * Given a matrix of decision variables, checks if every entry in the * matrix is a decision variable in the program; throws a runtime * error if any variable is not a decision variable in the program. * @param vars A vector of variables. */ void CheckIsDecisionVariable(const VectorXDecisionVariable& vars) const; /* * Ensure a binding is valid *before* adding it to the program. * @pre The binding has not yet been registered. * @pre The decision variables have been registered. * @throws std::exception if the binding is invalid. * @returns true if the binding is non-trivial (>= 1 output); when false, * this program should avoid adding the binding to its internal state. */ template [[nodiscard]] bool CheckBinding(const Binding& binding) const; /* * Adds new variables to MathematicalProgram. */ template MatrixDecisionVariable NewVariables( VarType type, const typename NewVariableNames::type& names, int rows, int cols) { DRAKE_DEMAND(rows >= 0 && cols >= 0); MatrixDecisionVariable decision_variable_matrix; decision_variable_matrix.resize(rows, cols); NewVariables_impl(type, names, false, decision_variable_matrix); return decision_variable_matrix; } /* * Adds symmetric matrix variables to optimization program. Only the lower * triangular part of the matrix is used as decision variables. * @param names The names of the stacked columns of the lower triangular part * of the matrix. */ template MatrixDecisionVariable NewSymmetricVariables( VarType type, const typename NewSymmetricVariableNames::type& names, int rows = Rows) { MatrixDecisionVariable decision_variable_matrix(rows, rows); NewVariables_impl(type, names, true, decision_variable_matrix); return decision_variable_matrix; } /* * Create a new free polynomial, and add its coefficients as decision * variables. */ symbolic::Polynomial NewFreePolynomialImpl( const symbolic::Variables& indeterminates, int degree, const std::string& coeff_name, // TODO(jwnimmer-tri) Fix this to not depend on all of "monomial_util.h" // for just this tiny enum (e.g., use a bare int == 0,1,2 instead). symbolic::internal::DegreeType degree_type); void CheckVariableType(VarType var_type); // maps the ID of a symbolic variable to the index of the variable stored // in the optimization program. std::unordered_map decision_variable_index_{}; // Use std::vector here instead of Eigen::VectorX because std::vector performs // much better when pushing new variables into the container. std::vector decision_variables_; std::unordered_map indeterminates_index_; // Use std::vector here instead of Eigen::VectorX because std::vector performs // much better when pushing new variables into the container. std::vector indeterminates_; std::vector> visualization_callbacks_; std::vector> generic_costs_; std::vector> quadratic_costs_; std::vector> linear_costs_; std::vector> l2norm_costs_; // note: linear_constraints_ does not include linear_equality_constraints_ std::vector> generic_constraints_; std::vector> linear_constraints_; std::vector> linear_equality_constraints_; std::vector> bbox_constraints_; std::vector> quadratic_constraints_; std::vector> lorentz_cone_constraint_; std::vector> rotated_lorentz_cone_constraint_; std::vector> positive_semidefinite_constraint_; std::vector> linear_matrix_inequality_constraint_; std::vector> exponential_cone_constraints_; // Invariant: The bindings in this list must be non-overlapping, when calling // Linear Complementarity solver like Moby. If this constraint is solved // through a nonlinear optimization solver (like SNOPT) instead, then we allow // the bindings to be overlapping. // TODO(ggould-tri) can this constraint be relaxed? std::vector> linear_complementarity_constraints_; Eigen::VectorXd x_initial_guess_; // The actual per-solver customization options. SolverOptions solver_options_; ProgramAttributes required_capabilities_; std::unordered_map var_scaling_map_{}; }; std::ostream& operator<<(std::ostream& os, const MathematicalProgram& prog); } // namespace solvers } // namespace drake DRAKE_FORMATTER_AS(, drake::solvers, MathematicalProgram, x, x.to_string())