Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1""" 

2A top-level linear programming interface. Currently this interface solves 

3linear programming problems via the Simplex and Interior-Point methods. 

4 

5.. versionadded:: 0.15.0 

6 

7Functions 

8--------- 

9.. autosummary:: 

10 :toctree: generated/ 

11 

12 linprog 

13 linprog_verbose_callback 

14 linprog_terse_callback 

15 

16""" 

17 

18import numpy as np 

19 

20from .optimize import OptimizeResult, OptimizeWarning 

21from warnings import warn 

22from ._linprog_ip import _linprog_ip 

23from ._linprog_simplex import _linprog_simplex 

24from ._linprog_rs import _linprog_rs 

25from ._linprog_util import ( 

26 _parse_linprog, _presolve, _get_Abc, _postprocess, _LPProblem, _autoscale) 

27from copy import deepcopy 

28 

29__all__ = ['linprog', 'linprog_verbose_callback', 'linprog_terse_callback'] 

30 

31__docformat__ = "restructuredtext en" 

32 

33 

34def linprog_verbose_callback(res): 

35 """ 

36 A sample callback function demonstrating the linprog callback interface. 

37 This callback produces detailed output to sys.stdout before each iteration 

38 and after the final iteration of the simplex algorithm. 

39 

40 Parameters 

41 ---------- 

42 res : A `scipy.optimize.OptimizeResult` consisting of the following fields: 

43 

44 x : 1-D array 

45 The independent variable vector which optimizes the linear 

46 programming problem. 

47 fun : float 

48 Value of the objective function. 

49 success : bool 

50 True if the algorithm succeeded in finding an optimal solution. 

51 slack : 1-D array 

52 The values of the slack variables. Each slack variable corresponds 

53 to an inequality constraint. If the slack is zero, then the 

54 corresponding constraint is active. 

55 con : 1-D array 

56 The (nominally zero) residuals of the equality constraints, that is, 

57 ``b - A_eq @ x`` 

58 phase : int 

59 The phase of the optimization being executed. In phase 1 a basic 

60 feasible solution is sought and the T has an additional row 

61 representing an alternate objective function. 

62 status : int 

63 An integer representing the exit status of the optimization:: 

64 

65 0 : Optimization terminated successfully 

66 1 : Iteration limit reached 

67 2 : Problem appears to be infeasible 

68 3 : Problem appears to be unbounded 

69 4 : Serious numerical difficulties encountered 

70 

71 nit : int 

72 The number of iterations performed. 

73 message : str 

74 A string descriptor of the exit status of the optimization. 

75 """ 

76 x = res['x'] 

77 fun = res['fun'] 

78 phase = res['phase'] 

79 status = res['status'] 

80 nit = res['nit'] 

81 message = res['message'] 

82 complete = res['complete'] 

83 

84 saved_printoptions = np.get_printoptions() 

85 np.set_printoptions(linewidth=500, 

86 formatter={'float': lambda x: "{0: 12.4f}".format(x)}) 

87 if status: 

88 print('--------- Simplex Early Exit -------\n'.format(nit)) 

89 print('The simplex method exited early with status {0:d}'.format(status)) 

90 print(message) 

91 elif complete: 

92 print('--------- Simplex Complete --------\n') 

93 print('Iterations required: {}'.format(nit)) 

94 else: 

95 print('--------- Iteration {0:d} ---------\n'.format(nit)) 

96 

97 if nit > 0: 

98 if phase == 1: 

99 print('Current Pseudo-Objective Value:') 

100 else: 

101 print('Current Objective Value:') 

102 print('f = ', fun) 

103 print() 

104 print('Current Solution Vector:') 

105 print('x = ', x) 

106 print() 

107 

108 np.set_printoptions(**saved_printoptions) 

109 

110 

111def linprog_terse_callback(res): 

112 """ 

113 A sample callback function demonstrating the linprog callback interface. 

114 This callback produces brief output to sys.stdout before each iteration 

115 and after the final iteration of the simplex algorithm. 

116 

117 Parameters 

118 ---------- 

119 res : A `scipy.optimize.OptimizeResult` consisting of the following fields: 

120 

121 x : 1-D array 

122 The independent variable vector which optimizes the linear 

123 programming problem. 

124 fun : float 

125 Value of the objective function. 

126 success : bool 

127 True if the algorithm succeeded in finding an optimal solution. 

128 slack : 1-D array 

129 The values of the slack variables. Each slack variable corresponds 

130 to an inequality constraint. If the slack is zero, then the 

131 corresponding constraint is active. 

132 con : 1-D array 

133 The (nominally zero) residuals of the equality constraints, that is, 

134 ``b - A_eq @ x``. 

135 phase : int 

136 The phase of the optimization being executed. In phase 1 a basic 

137 feasible solution is sought and the T has an additional row 

138 representing an alternate objective function. 

139 status : int 

140 An integer representing the exit status of the optimization:: 

141 

142 0 : Optimization terminated successfully 

143 1 : Iteration limit reached 

144 2 : Problem appears to be infeasible 

145 3 : Problem appears to be unbounded 

146 4 : Serious numerical difficulties encountered 

147 

148 nit : int 

149 The number of iterations performed. 

150 message : str 

151 A string descriptor of the exit status of the optimization. 

152 """ 

153 nit = res['nit'] 

154 x = res['x'] 

155 

156 if nit == 0: 

157 print("Iter: X:") 

158 print("{0: <5d} ".format(nit), end="") 

159 print(x) 

160 

161 

162def linprog(c, A_ub=None, b_ub=None, A_eq=None, b_eq=None, 

163 bounds=None, method='interior-point', callback=None, 

164 options=None, x0=None): 

165 r""" 

166 Linear programming: minimize a linear objective function subject to linear 

167 equality and inequality constraints. 

168 

169 Linear programming solves problems of the following form: 

170 

171 .. math:: 

172 

173 \min_x \ & c^T x \\ 

174 \mbox{such that} \ & A_{ub} x \leq b_{ub},\\ 

175 & A_{eq} x = b_{eq},\\ 

176 & l \leq x \leq u , 

177 

178 where :math:`x` is a vector of decision variables; :math:`c`, 

179 :math:`b_{ub}`, :math:`b_{eq}`, :math:`l`, and :math:`u` are vectors; and 

180 :math:`A_{ub}` and :math:`A_{eq}` are matrices. 

181 

182 Informally, that's: 

183 

184 minimize:: 

185 

186 c @ x 

187 

188 such that:: 

189 

190 A_ub @ x <= b_ub 

191 A_eq @ x == b_eq 

192 lb <= x <= ub 

193 

194 Note that by default ``lb = 0`` and ``ub = None`` unless specified with 

195 ``bounds``. 

196 

197 Parameters 

198 ---------- 

199 c : 1-D array 

200 The coefficients of the linear objective function to be minimized. 

201 A_ub : 2-D array, optional 

202 The inequality constraint matrix. Each row of ``A_ub`` specifies the 

203 coefficients of a linear inequality constraint on ``x``. 

204 b_ub : 1-D array, optional 

205 The inequality constraint vector. Each element represents an 

206 upper bound on the corresponding value of ``A_ub @ x``. 

207 A_eq : 2-D array, optional 

208 The equality constraint matrix. Each row of ``A_eq`` specifies the 

209 coefficients of a linear equality constraint on ``x``. 

210 b_eq : 1-D array, optional 

211 The equality constraint vector. Each element of ``A_eq @ x`` must equal 

212 the corresponding element of ``b_eq``. 

213 bounds : sequence, optional 

214 A sequence of ``(min, max)`` pairs for each element in ``x``, defining 

215 the minimum and maximum values of that decision variable. Use ``None`` to 

216 indicate that there is no bound. By default, bounds are ``(0, None)`` 

217 (all decision variables are non-negative). 

218 If a single tuple ``(min, max)`` is provided, then ``min`` and 

219 ``max`` will serve as bounds for all decision variables. 

220 method : {'interior-point', 'revised simplex', 'simplex'}, optional 

221 The algorithm used to solve the standard form problem. 

222 :ref:`'interior-point' <optimize.linprog-interior-point>` (default), 

223 :ref:`'revised simplex' <optimize.linprog-revised_simplex>`, and 

224 :ref:`'simplex' <optimize.linprog-simplex>` (legacy) 

225 are supported. 

226 callback : callable, optional 

227 If a callback function is provided, it will be called at least once per 

228 iteration of the algorithm. The callback function must accept a single 

229 `scipy.optimize.OptimizeResult` consisting of the following fields: 

230 

231 x : 1-D array 

232 The current solution vector. 

233 fun : float 

234 The current value of the objective function ``c @ x``. 

235 success : bool 

236 ``True`` when the algorithm has completed successfully. 

237 slack : 1-D array 

238 The (nominally positive) values of the slack, 

239 ``b_ub - A_ub @ x``. 

240 con : 1-D array 

241 The (nominally zero) residuals of the equality constraints, 

242 ``b_eq - A_eq @ x``. 

243 phase : int 

244 The phase of the algorithm being executed. 

245 status : int 

246 An integer representing the status of the algorithm. 

247 

248 ``0`` : Optimization proceeding nominally. 

249 

250 ``1`` : Iteration limit reached. 

251 

252 ``2`` : Problem appears to be infeasible. 

253 

254 ``3`` : Problem appears to be unbounded. 

255 

256 ``4`` : Numerical difficulties encountered. 

257 

258 nit : int 

259 The current iteration number. 

260 message : str 

261 A string descriptor of the algorithm status. 

262 

263 options : dict, optional 

264 A dictionary of solver options. All methods accept the following 

265 options: 

266 

267 maxiter : int 

268 Maximum number of iterations to perform. 

269 Default: see method-specific documentation. 

270 disp : bool 

271 Set to ``True`` to print convergence messages. 

272 Default: ``False``. 

273 autoscale : bool 

274 Set to ``True`` to automatically perform equilibration. 

275 Consider using this option if the numerical values in the 

276 constraints are separated by several orders of magnitude. 

277 Default: ``False``. 

278 presolve : bool 

279 Set to ``False`` to disable automatic presolve. 

280 Default: ``True``. 

281 rr : bool 

282 Set to ``False`` to disable automatic redundancy removal. 

283 Default: ``True``. 

284 

285 For method-specific options, see 

286 :func:`show_options('linprog') <show_options>`. 

287 

288 x0 : 1-D array, optional 

289 Guess values of the decision variables, which will be refined by 

290 the optimization algorithm. This argument is currently used only by the 

291 'revised simplex' method, and can only be used if `x0` represents a 

292 basic feasible solution. 

293 

294 

295 Returns 

296 ------- 

297 res : OptimizeResult 

298 A :class:`scipy.optimize.OptimizeResult` consisting of the fields: 

299 

300 x : 1-D array 

301 The values of the decision variables that minimizes the 

302 objective function while satisfying the constraints. 

303 fun : float 

304 The optimal value of the objective function ``c @ x``. 

305 slack : 1-D array 

306 The (nominally positive) values of the slack variables, 

307 ``b_ub - A_ub @ x``. 

308 con : 1-D array 

309 The (nominally zero) residuals of the equality constraints, 

310 ``b_eq - A_eq @ x``. 

311 success : bool 

312 ``True`` when the algorithm succeeds in finding an optimal 

313 solution. 

314 status : int 

315 An integer representing the exit status of the algorithm. 

316 

317 ``0`` : Optimization terminated successfully. 

318 

319 ``1`` : Iteration limit reached. 

320 

321 ``2`` : Problem appears to be infeasible. 

322 

323 ``3`` : Problem appears to be unbounded. 

324 

325 ``4`` : Numerical difficulties encountered. 

326 

327 nit : int 

328 The total number of iterations performed in all phases. 

329 message : str 

330 A string descriptor of the exit status of the algorithm. 

331 

332 See Also 

333 -------- 

334 show_options : Additional options accepted by the solvers. 

335 

336 Notes 

337 ----- 

338 This section describes the available solvers that can be selected by the 

339 'method' parameter. 

340 

341 :ref:`'interior-point' <optimize.linprog-interior-point>` is the default 

342 as it is typically the fastest and most robust method. 

343 :ref:`'revised simplex' <optimize.linprog-revised_simplex>` is more 

344 accurate for the problems it solves. 

345 :ref:`'simplex' <optimize.linprog-simplex>` is the legacy method and is 

346 included for backwards compatibility and educational purposes. 

347 

348 Method *interior-point* uses the primal-dual path following algorithm 

349 as outlined in [4]_. This algorithm supports sparse constraint matrices and 

350 is typically faster than the simplex methods, especially for large, sparse 

351 problems. Note, however, that the solution returned may be slightly less 

352 accurate than those of the simplex methods and will not, in general, 

353 correspond with a vertex of the polytope defined by the constraints. 

354 

355 .. versionadded:: 1.0.0 

356 

357 Method *revised simplex* uses the revised simplex method as described in 

358 [9]_, except that a factorization [11]_ of the basis matrix, rather than 

359 its inverse, is efficiently maintained and used to solve the linear systems 

360 at each iteration of the algorithm. 

361 

362 .. versionadded:: 1.3.0 

363 

364 Method *simplex* uses a traditional, full-tableau implementation of 

365 Dantzig's simplex algorithm [1]_, [2]_ (*not* the 

366 Nelder-Mead simplex). This algorithm is included for backwards 

367 compatibility and educational purposes. 

368 

369 .. versionadded:: 0.15.0 

370 

371 Before applying any method, a presolve procedure based on [8]_ attempts 

372 to identify trivial infeasibilities, trivial unboundedness, and potential 

373 problem simplifications. Specifically, it checks for: 

374 

375 - rows of zeros in ``A_eq`` or ``A_ub``, representing trivial constraints; 

376 - columns of zeros in ``A_eq`` `and` ``A_ub``, representing unconstrained 

377 variables; 

378 - column singletons in ``A_eq``, representing fixed variables; and 

379 - column singletons in ``A_ub``, representing simple bounds. 

380 

381 If presolve reveals that the problem is unbounded (e.g. an unconstrained 

382 and unbounded variable has negative cost) or infeasible (e.g., a row of 

383 zeros in ``A_eq`` corresponds with a nonzero in ``b_eq``), the solver 

384 terminates with the appropriate status code. Note that presolve terminates 

385 as soon as any sign of unboundedness is detected; consequently, a problem 

386 may be reported as unbounded when in reality the problem is infeasible 

387 (but infeasibility has not been detected yet). Therefore, if it is 

388 important to know whether the problem is actually infeasible, solve the 

389 problem again with option ``presolve=False``. 

390 

391 If neither infeasibility nor unboundedness are detected in a single pass 

392 of the presolve, bounds are tightened where possible and fixed 

393 variables are removed from the problem. Then, linearly dependent rows 

394 of the ``A_eq`` matrix are removed, (unless they represent an 

395 infeasibility) to avoid numerical difficulties in the primary solve 

396 routine. Note that rows that are nearly linearly dependent (within a 

397 prescribed tolerance) may also be removed, which can change the optimal 

398 solution in rare cases. If this is a concern, eliminate redundancy from 

399 your problem formulation and run with option ``rr=False`` or 

400 ``presolve=False``. 

401 

402 Several potential improvements can be made here: additional presolve 

403 checks outlined in [8]_ should be implemented, the presolve routine should 

404 be run multiple times (until no further simplifications can be made), and 

405 more of the efficiency improvements from [5]_ should be implemented in the 

406 redundancy removal routines. 

407 

408 After presolve, the problem is transformed to standard form by converting 

409 the (tightened) simple bounds to upper bound constraints, introducing 

410 non-negative slack variables for inequality constraints, and expressing 

411 unbounded variables as the difference between two non-negative variables. 

412 Optionally, the problem is automatically scaled via equilibration [12]_. 

413 The selected algorithm solves the standard form problem, and a 

414 postprocessing routine converts the result to a solution to the original 

415 problem. 

416 

417 References 

418 ---------- 

419 .. [1] Dantzig, George B., Linear programming and extensions. Rand 

420 Corporation Research Study Princeton Univ. Press, Princeton, NJ, 

421 1963 

422 .. [2] Hillier, S.H. and Lieberman, G.J. (1995), "Introduction to 

423 Mathematical Programming", McGraw-Hill, Chapter 4. 

424 .. [3] Bland, Robert G. New finite pivoting rules for the simplex method. 

425 Mathematics of Operations Research (2), 1977: pp. 103-107. 

426 .. [4] Andersen, Erling D., and Knud D. Andersen. "The MOSEK interior point 

427 optimizer for linear programming: an implementation of the 

428 homogeneous algorithm." High performance optimization. Springer US, 

429 2000. 197-232. 

430 .. [5] Andersen, Erling D. "Finding all linearly dependent rows in 

431 large-scale linear programming." Optimization Methods and Software 

432 6.3 (1995): 219-227. 

433 .. [6] Freund, Robert M. "Primal-Dual Interior-Point Methods for Linear 

434 Programming based on Newton's Method." Unpublished Course Notes, 

435 March 2004. Available 2/25/2017 at 

436 https://ocw.mit.edu/courses/sloan-school-of-management/15-084j-nonlinear-programming-spring-2004/lecture-notes/lec14_int_pt_mthd.pdf 

437 .. [7] Fourer, Robert. "Solving Linear Programs by Interior-Point Methods." 

438 Unpublished Course Notes, August 26, 2005. Available 2/25/2017 at 

439 http://www.4er.org/CourseNotes/Book%20B/B-III.pdf 

440 .. [8] Andersen, Erling D., and Knud D. Andersen. "Presolving in linear 

441 programming." Mathematical Programming 71.2 (1995): 221-245. 

442 .. [9] Bertsimas, Dimitris, and J. Tsitsiklis. "Introduction to linear 

443 programming." Athena Scientific 1 (1997): 997. 

444 .. [10] Andersen, Erling D., et al. Implementation of interior point 

445 methods for large scale linear programming. HEC/Universite de 

446 Geneve, 1996. 

447 .. [11] Bartels, Richard H. "A stabilization of the simplex method." 

448 Journal in Numerische Mathematik 16.5 (1971): 414-434. 

449 .. [12] Tomlin, J. A. "On scaling linear programming problems." 

450 Mathematical Programming Study 4 (1975): 146-166. 

451 

452 Examples 

453 -------- 

454 Consider the following problem: 

455 

456 .. math:: 

457 

458 \min_{x_0, x_1} \ -x_0 + 4x_1 & \\ 

459 \mbox{such that} \ -3x_0 + x_1 & \leq 6,\\ 

460 -x_0 - 2x_1 & \geq -4,\\ 

461 x_1 & \geq -3. 

462 

463 The problem is not presented in the form accepted by `linprog`. This is 

464 easily remedied by converting the "greater than" inequality 

465 constraint to a "less than" inequality constraint by 

466 multiplying both sides by a factor of :math:`-1`. Note also that the last 

467 constraint is really the simple bound :math:`-3 \leq x_1 \leq \infty`. 

468 Finally, since there are no bounds on :math:`x_0`, we must explicitly 

469 specify the bounds :math:`-\infty \leq x_0 \leq \infty`, as the 

470 default is for variables to be non-negative. After collecting coeffecients 

471 into arrays and tuples, the input for this problem is: 

472 

473 >>> c = [-1, 4] 

474 >>> A = [[-3, 1], [1, 2]] 

475 >>> b = [6, 4] 

476 >>> x0_bounds = (None, None) 

477 >>> x1_bounds = (-3, None) 

478 >>> from scipy.optimize import linprog 

479 >>> res = linprog(c, A_ub=A, b_ub=b, bounds=[x0_bounds, x1_bounds]) 

480 

481 Note that the default method for `linprog` is 'interior-point', which is 

482 approximate by nature. 

483 

484 >>> print(res) 

485 con: array([], dtype=float64) 

486 fun: -21.99999984082494 # may vary 

487 message: 'Optimization terminated successfully.' 

488 nit: 6 # may vary 

489 slack: array([3.89999997e+01, 8.46872439e-08] # may vary 

490 status: 0 

491 success: True 

492 x: array([ 9.99999989, -2.99999999]) # may vary 

493 

494 If you need greater accuracy, try 'revised simplex'. 

495 

496 >>> res = linprog(c, A_ub=A, b_ub=b, bounds=[x0_bounds, x1_bounds], method='revised simplex') 

497 >>> print(res) 

498 con: array([], dtype=float64) 

499 fun: -22.0 # may vary 

500 message: 'Optimization terminated successfully.' 

501 nit: 1 # may vary 

502 slack: array([39., 0.]) # may vary 

503 status: 0 

504 success: True 

505 x: array([10., -3.]) # may vary 

506 

507 """ 

508 meth = method.lower() 

509 

510 if x0 is not None and meth != "revised simplex": 

511 warning_message = "x0 is used only when method is 'revised simplex'. " 

512 warn(warning_message, OptimizeWarning) 

513 

514 lp = _LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0) 

515 lp, solver_options = _parse_linprog(lp, options) 

516 tol = solver_options.get('tol', 1e-9) 

517 

518 iteration = 0 

519 complete = False # will become True if solved in presolve 

520 undo = [] 

521 

522 # Keep the original arrays to calculate slack/residuals for original 

523 # problem. 

524 lp_o = deepcopy(lp) 

525 

526 # Solve trivial problem, eliminate variables, tighten bounds, etc. 

527 c0 = 0 # we might get a constant term in the objective 

528 if solver_options.pop('presolve', True): 

529 rr = solver_options.pop('rr', True) 

530 (lp, c0, x, undo, complete, status, message) = _presolve(lp, rr, tol) 

531 

532 C, b_scale = 1, 1 # for trivial unscaling if autoscale is not used 

533 postsolve_args = (lp_o._replace(bounds=lp.bounds), undo, C, b_scale) 

534 

535 if not complete: 

536 A, b, c, c0, x0 = _get_Abc(lp, c0, undo) 

537 if solver_options.pop('autoscale', False): 

538 A, b, c, x0, C, b_scale = _autoscale(A, b, c, x0) 

539 postsolve_args = postsolve_args[:-2] + (C, b_scale) 

540 

541 if meth == 'simplex': 

542 x, status, message, iteration = _linprog_simplex( 

543 c, c0=c0, A=A, b=b, callback=callback, 

544 postsolve_args=postsolve_args, **solver_options) 

545 elif meth == 'interior-point': 

546 x, status, message, iteration = _linprog_ip( 

547 c, c0=c0, A=A, b=b, callback=callback, 

548 postsolve_args=postsolve_args, **solver_options) 

549 elif meth == 'revised simplex': 

550 x, status, message, iteration = _linprog_rs( 

551 c, c0=c0, A=A, b=b, x0=x0, callback=callback, 

552 postsolve_args=postsolve_args, **solver_options) 

553 else: 

554 raise ValueError('Unknown solver %s' % method) 

555 

556 # Eliminate artificial variables, re-introduce presolved variables, etc. 

557 # need modified bounds here to translate variables appropriately 

558 disp = solver_options.get('disp', False) 

559 

560 x, fun, slack, con, status, message = _postprocess(x, postsolve_args, 

561 complete, status, 

562 message, tol, 

563 iteration, disp) 

564 

565 sol = { 

566 'x': x, 

567 'fun': fun, 

568 'slack': slack, 

569 'con': con, 

570 'status': status, 

571 'message': message, 

572 'nit': iteration, 

573 'success': status == 0} 

574 

575 return OptimizeResult(sol)