1 /* 2 Copyright 2008-2021 3 Matthias Ehmann, 4 Carsten Miller, 5 Reinhard Oldenburg, 6 Alfred Wassermann 7 8 This file is part of JSXGraph. 9 10 JSXGraph is free software dual licensed under the GNU LGPL or MIT License. 11 12 You can redistribute it and/or modify it under the terms of the 13 14 * GNU Lesser General Public License as published by 15 the Free Software Foundation, either version 3 of the License, or 16 (at your option) any later version 17 OR 18 * MIT License: https://github.com/jsxgraph/jsxgraph/blob/master/LICENSE.MIT 19 20 JSXGraph is distributed in the hope that it will be useful, 21 but WITHOUT ANY WARRANTY; without even the implied warranty of 22 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 23 GNU Lesser General Public License for more details. 24 25 You should have received a copy of the GNU Lesser General Public License and 26 the MIT License along with JSXGraph. If not, see <http://www.gnu.org/licenses/> 27 and <http://opensource.org/licenses/MIT/>. 28 29 This is a port of jcobyla 30 31 - to JavaScript by Reihard Oldenburg and 32 - to JSXGraph By Alfred Wassermann 33 */ 34 /* 35 * jcobyla 36 * 37 * The MIT License 38 * 39 * Copyright (c) 2012 Anders Gustafsson, Cureos AB. 40 * 41 * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files 42 * (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, 43 * publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, 44 * subject to the following conditions: 45 * 46 * The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 47 * 48 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 49 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE 50 * FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 51 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 52 * 53 * Remarks: 54 * 55 * The original Fortran 77 version of this code was by Michael Powell (M.J.D.Powell @ damtp.cam.ac.uk) 56 * The Fortran 90 version was by Alan Miller (Alan.Miller @ vic.cmis.csiro.au). Latest revision - 30 October 1998 57 */ 58 59 /** 60 * Constrained Optimization BY Linear Approximation in Java. 61 * 62 * COBYLA2 is an implementation of Powell's nonlinear derivative free constrained optimization that uses 63 * a linear approximation approach. The algorithm is a sequential trust region algorithm that employs linear 64 * approximations to the objective and constraint functions, where the approximations are formed by linear 65 * interpolation at n + 1 points in the space of the variables and tries to maintain a regular shaped simplex 66 * over iterations. 67 * 68 * It solves nonsmooth NLP with a moderate number of variables (about 100). Inequality constraints only. 69 * 70 * The initial point X is taken as one vertex of the initial simplex with zero being another, so, X should 71 * not be entered as the zero vector. 72 * 73 * @author Anders Gustafsson, Cureos AB. Translation to Javascript by Reinhard Oldenburg, Goethe-University 74 */ 75 76 /*global JXG: true, define: true*/ 77 /*jslint nomen: true, plusplus: true, continue: true*/ 78 79 /* depends: 80 jxg 81 math/math 82 utils/type 83 */ 84 85 define(['jxg'], function (JXG) { 86 87 "use strict"; 88 89 /** 90 * The JXG.Math.Nlp namespace holds numerical algorithms for non-linear optimization. 91 * @name JXG.Math.Nlp 92 * @namespace 93 * 94 */ 95 JXG.Math.Nlp = { 96 97 arr: function(n) { 98 var a = new Array(n), 99 i; 100 for (i = 0; i <n ; i++) { 101 a[i] = 0.0; 102 } 103 return a; 104 }, 105 106 arr2: function(n, m) { 107 var i = 0, 108 a = new Array(n); 109 110 while (i < n) { 111 a[i] = this.arr(m); 112 i++; 113 } 114 return a; 115 }, 116 117 arraycopy: function(x, a, iox, b, n) { 118 var i = 0; 119 while (i < n) { 120 iox[i + b] = x[i + a]; 121 i++; 122 } 123 }, 124 125 // status Variables 126 Normal: 0, 127 MaxIterationsReached: 1, 128 DivergingRoundingErrors: 2, 129 130 /** 131 * Minimizes the objective function F with respect to a set of inequality constraints CON, 132 * and returns the optimal variable array. F and CON may be non-linear, and should preferably be smooth. 133 * Calls {@link JXG.Math.Nlp#cobylb}. 134 * 135 * @param calcfc Interface implementation for calculating objective function and constraints. 136 * @param n Number of variables. 137 * @param m Number of constraints. 138 * @param x On input initial values of the variables (zero-based array). On output 139 * optimal values of the variables obtained in the COBYLA minimization. 140 * @param rhobeg Initial size of the simplex. 141 * @param rhoend Final value of the simplex. 142 * @param iprint Print level, 0 <= iprint <= 3, where 0 provides no output and 143 * 3 provides full output to the console. 144 * @param maxfun Maximum number of function evaluations before terminating. 145 * @returns {Number} Exit status of the COBYLA2 optimization. 146 */ 147 FindMinimum: function(calcfc, n, m, x, rhobeg, rhoend, iprint, maxfun) { 148 // CobylaExitStatus FindMinimum(final Calcfc calcfc, int n, int m, double[] x, double rhobeg, double rhoend, int iprint, int maxfun) 149 // This subroutine minimizes an objective function F(X) subject to M 150 // inequality constraints on X, where X is a vector of variables that has 151 // N components. The algorithm employs linear approximations to the 152 // objective and constraint functions, the approximations being formed by 153 // linear interpolation at N+1 points in the space of the variables. 154 // We regard these interpolation points as vertices of a simplex. The 155 // parameter RHO controls the size of the simplex and it is reduced 156 // automatically from RHOBEG to RHOEND. For each RHO the subroutine tries 157 // to achieve a good vector of variables for the current size, and then 158 // RHO is reduced until the value RHOEND is reached. Therefore RHOBEG and 159 // RHOEND should be set to reasonable initial changes to and the required 160 // accuracy in the variables respectively, but this accuracy should be 161 // viewed as a subject for experimentation because it is not guaranteed. 162 // The subroutine has an advantage over many of its competitors, however, 163 // which is that it treats each constraint individually when calculating 164 // a change to the variables, instead of lumping the constraints together 165 // into a single penalty function. The name of the subroutine is derived 166 // from the phrase Constrained Optimization BY Linear Approximations. 167 168 // The user must set the values of N, M, RHOBEG and RHOEND, and must 169 // provide an initial vector of variables in X. Further, the value of 170 // IPRINT should be set to 0, 1, 2 or 3, which controls the amount of 171 // printing during the calculation. Specifically, there is no output if 172 // IPRINT=0 and there is output only at the end of the calculation if 173 // IPRINT=1. Otherwise each new value of RHO and SIGMA is printed. 174 // Further, the vector of variables and some function information are 175 // given either when RHO is reduced or when each new value of F(X) is 176 // computed in the cases IPRINT=2 or IPRINT=3 respectively. Here SIGMA 177 // is a penalty parameter, it being assumed that a change to X is an 178 // improvement if it reduces the merit function 179 // F(X)+SIGMA*MAX(0.0, - C1(X), - C2(X),..., - CM(X)), 180 // where C1,C2,...,CM denote the constraint functions that should become 181 // nonnegative eventually, at least to the precision of RHOEND. In the 182 // printed output the displayed term that is multiplied by SIGMA is 183 // called MAXCV, which stands for 'MAXimum Constraint Violation'. The 184 // argument ITERS is an integer variable that must be set by the user to a 185 // limit on the number of calls of CALCFC, the purpose of this routine being 186 // given below. The value of ITERS will be altered to the number of calls 187 // of CALCFC that are made. 188 // In order to define the objective and constraint functions, we require 189 // a subroutine that has the name and arguments 190 // SUBROUTINE CALCFC (N,M,X,F,CON) 191 // DIMENSION X(:),CON(:) . 192 // The values of N and M are fixed and have been defined already, while 193 // X is now the current vector of variables. The subroutine should return 194 // the objective and constraint functions at X in F and CON(1),CON(2), 195 // ...,CON(M). Note that we are trying to adjust X so that F(X) is as 196 // small as possible subject to the constraint functions being nonnegative. 197 198 // Local variables 199 var mpp = m + 2, 200 status, 201 // Internal base-1 X array 202 iox = this.arr(n + 1), 203 that = this, 204 fcalcfc; 205 206 iox[0] = 0.0; 207 this.arraycopy(x, 0, iox, 1, n); 208 209 // Internal representation of the objective and constraints calculation method, 210 // accounting for that X and CON arrays in the cobylb method are base-1 arrays. 211 fcalcfc = function(n, m, thisx, con) { // int n, int m, double[] x, double[] con 212 var ix = that.arr(n), 213 ocon, f; 214 215 that.arraycopy(thisx, 1, ix, 0, n); 216 ocon = that.arr(m); 217 f = calcfc(n, m, ix, ocon); 218 that.arraycopy(ocon, 0, con, 1, m); 219 return f; 220 }; 221 222 status = this.cobylb(fcalcfc, n, m, mpp, iox, rhobeg, rhoend, iprint, maxfun); 223 this.arraycopy(iox, 1, x, 0, n); 224 225 return status; 226 }, 227 228 // private static CobylaExitStatus cobylb(Calcfc calcfc, int n, int m, int mpp, double[] x, 229 // double rhobeg, double rhoend, int iprint, int maxfun) 230 /** 231 * JavaScript implementation of the non-linear optimization method COBYLA. 232 * @param {Function} calcfc 233 * @param {Number} n 234 * @param {Number} m 235 * @param {Number} mpp 236 * @param {Number} x 237 * @param {Number} rhobeg 238 * @param {Number} rhoend 239 * @param {Number} iprint 240 * @param {Number} maxfun 241 * @returns {Number} Exit status of the COBYLA2 optimization 242 */ 243 cobylb: function (calcfc, n, m, mpp, x, rhobeg, rhoend, iprint, maxfun) { 244 // calcf ist funktion die aufgerufen wird wie calcfc(n, m, ix, ocon) 245 // N.B. Arguments CON, SIM, SIMI, DATMAT, A, VSIG, VETA, SIGBAR, DX, W & IACT 246 // have been removed. 247 248 // Set the initial values of some parameters. The last column of SIM holds 249 // the optimal vertex of the current simplex, and the preceding N columns 250 // hold the displacements from the optimal vertex to the other vertices. 251 // Further, SIMI holds the inverse of the matrix that is contained in the 252 // first N columns of SIM. 253 254 // Local variables 255 var status = -1, 256 257 alpha = 0.25, 258 beta = 2.1, 259 gamma = 0.5, 260 delta = 1.1, 261 262 f = 0.0, 263 resmax = 0.0, 264 total, 265 266 np = n + 1, 267 mp = m + 1, 268 rho = rhobeg, 269 parmu = 0.0, 270 271 iflag = false, 272 ifull = false, 273 parsig = 0.0, 274 prerec = 0.0, 275 prerem = 0.0, 276 277 con = this.arr(1 + mpp), 278 sim = this.arr2(1 + n, 1 + np), 279 simi = this.arr2(1 + n, 1 + n), 280 datmat = this.arr2(1 + mpp, 1 + np), 281 a = this.arr2(1 + n, 1 + mp), 282 vsig = this.arr(1 + n), 283 veta = this.arr(1 + n), 284 sigbar = this.arr(1 + n), 285 dx = this.arr(1 + n), 286 w = this.arr(1 + n), 287 i, j, k, l, 288 temp, tempa, nfvals, 289 jdrop, ibrnch, 290 skipVertexIdent, 291 phimin, nbest, 292 error, 293 pareta, wsig, weta, 294 cvmaxp, cvmaxm, dxsign, 295 resnew, barmu, phi, 296 vmold, vmnew, trured, ratio, edgmax, cmin, cmax, denom; 297 298 if (iprint >= 2) { 299 console.log("The initial value of RHO is " + rho + " and PARMU is set to zero."); 300 } 301 302 nfvals = 0; 303 temp = 1.0 / rho; 304 305 for (i = 1; i <= n; ++i) { 306 sim[i][np] = x[i]; 307 sim[i][i] = rho; 308 simi[i][i] = temp; 309 } 310 311 jdrop = np; 312 ibrnch = false; 313 314 // Make the next call of the user-supplied subroutine CALCFC. These 315 // instructions are also used for calling CALCFC during the iterations of 316 // the algorithm. 317 //alert("Iteration "+nfvals+" x="+x); 318 L_40: 319 do { 320 if (nfvals >= maxfun && nfvals > 0) { 321 status = this.MaxIterationsReached; 322 break L_40; 323 } 324 325 ++nfvals; 326 f = calcfc(n, m, x, con); 327 resmax = 0.0; 328 for (k = 1; k <= m; ++k) { 329 resmax = Math.max(resmax, -con[k]); 330 } 331 //alert( " f="+f+" resmax="+resmax); 332 333 if (nfvals === iprint - 1 || iprint === 3) { 334 this.PrintIterationResult(nfvals, f, resmax, x, n, iprint); 335 } 336 337 con[mp] = f; 338 con[mpp] = resmax; 339 340 // Set the recently calculated function values in a column of DATMAT. This 341 // array has a column for each vertex of the current simplex, the entries of 342 // each column being the values of the constraint functions (if any) 343 // followed by the objective function and the greatest constraint violation 344 // at the vertex. 345 skipVertexIdent = true; 346 if (!ibrnch) { 347 skipVertexIdent = false; 348 349 for (i = 1; i <= mpp; ++i) { 350 datmat[i][jdrop] = con[i]; 351 } 352 353 if (nfvals <= np) { 354 // Exchange the new vertex of the initial simplex with the optimal vertex if 355 // necessary. Then, if the initial simplex is not complete, pick its next 356 // vertex and calculate the function values there. 357 358 if (jdrop <= n) { 359 if (datmat[mp][np] <= f) { 360 x[jdrop] = sim[jdrop][np]; 361 } else { 362 sim[jdrop][np] = x[jdrop]; 363 for (k = 1; k <= mpp; ++k) { 364 datmat[k][jdrop] = datmat[k][np]; 365 datmat[k][np] = con[k]; 366 } 367 for (k = 1; k <= jdrop; ++k) { 368 sim[jdrop][k] = -rho; 369 temp = 0.0; 370 for (i = k; i <= jdrop; ++i) { 371 temp -= simi[i][k]; 372 } 373 simi[jdrop][k] = temp; 374 } 375 } 376 } 377 if (nfvals <= n) { 378 jdrop = nfvals; 379 x[jdrop] += rho; 380 continue L_40; 381 } 382 } 383 ibrnch = true; 384 } 385 386 L_140: 387 do { 388 L_550: 389 do { 390 if (!skipVertexIdent) { 391 // Identify the optimal vertex of the current simplex. 392 phimin = datmat[mp][np] + parmu * datmat[mpp][np]; 393 nbest = np; 394 395 for (j = 1; j <= n; ++j) { 396 temp = datmat[mp][j] + parmu * datmat[mpp][j]; 397 if (temp < phimin) { 398 nbest = j; 399 phimin = temp; 400 } else if (temp === phimin && parmu === 0.0 && datmat[mpp][j] < datmat[mpp][nbest]) { 401 nbest = j; 402 } 403 } 404 405 // Switch the best vertex into pole position if it is not there already, 406 // and also update SIM, SIMI and DATMAT. 407 if (nbest <= n) { 408 for (i = 1; i <= mpp; ++i) { 409 temp = datmat[i][np]; 410 datmat[i][np] = datmat[i][nbest]; 411 datmat[i][nbest] = temp; 412 } 413 for (i = 1; i <= n; ++i) { 414 temp = sim[i][nbest]; 415 sim[i][nbest] = 0.0; 416 sim[i][np] += temp; 417 418 tempa = 0.0; 419 for (k = 1; k <= n; ++k) 420 { 421 sim[i][k] -= temp; 422 tempa -= simi[k][i]; 423 } 424 simi[nbest][i] = tempa; 425 } 426 } 427 428 // Make an error return if SIGI is a poor approximation to the inverse of 429 // the leading N by N submatrix of SIG. 430 error = 0.0; 431 for (i = 1; i <= n; ++i) { 432 for (j = 1; j <= n; ++j) { 433 temp = this.DOT_PRODUCT( 434 this.PART(this.ROW(simi, i), 1, n), 435 this.PART(this.COL(sim, j), 1, n) 436 ) - (i === j ? 1.0 : 0.0); 437 error = Math.max(error, Math.abs(temp)); 438 } 439 } 440 if (error > 0.1) { 441 status = this.DivergingRoundingErrors; 442 break L_40; 443 } 444 445 // Calculate the coefficients of the linear approximations to the objective 446 // and constraint functions, placing minus the objective function gradient 447 // after the constraint gradients in the array A. The vector W is used for 448 // working space. 449 for (k = 1; k <= mp; ++k) { 450 con[k] = -datmat[k][np]; 451 for (j = 1; j <= n; ++j) { 452 w[j] = datmat[k][j] + con[k]; 453 } 454 455 for (i = 1; i <= n; ++i) { 456 a[i][k] = (k === mp ? -1.0 : 1.0) * this.DOT_PRODUCT( 457 this.PART(w, 1, n), this.PART(this.COL(simi, i), 1, n)); 458 } 459 } 460 461 // Calculate the values of sigma and eta, and set IFLAG = 0 if the current 462 // simplex is not acceptable. 463 iflag = true; 464 parsig = alpha * rho; 465 pareta = beta * rho; 466 467 for (j = 1; j <= n; ++j) { 468 wsig = 0.0; 469 for (k = 1; k <= n; ++k) { 470 wsig += simi[j][k] * simi[j][k]; 471 } 472 weta = 0.0; 473 for (k = 1; k <= n; ++k) { 474 weta += sim[k][j] * sim[k][j]; 475 } 476 vsig[j] = 1.0 / Math.sqrt(wsig); 477 veta[j] = Math.sqrt(weta); 478 if (vsig[j] < parsig || veta[j] > pareta) { iflag = false; } 479 } 480 481 // If a new vertex is needed to improve acceptability, then decide which 482 // vertex to drop from the simplex. 483 if (!ibrnch && !iflag) { 484 jdrop = 0; 485 temp = pareta; 486 for (j = 1; j <= n; ++j) { 487 if (veta[j] > temp) { 488 jdrop = j; 489 temp = veta[j]; 490 } 491 } 492 if (jdrop === 0) { 493 for (j = 1; j <= n; ++j) { 494 if (vsig[j] < temp) { 495 jdrop = j; 496 temp = vsig[j]; 497 } 498 } 499 } 500 501 // Calculate the step to the new vertex and its sign. 502 temp = gamma * rho * vsig[jdrop]; 503 for (k = 1; k <= n; ++k) { 504 dx[k] = temp * simi[jdrop][k]; 505 } 506 cvmaxp = 0.0; 507 cvmaxm = 0.0; 508 total = 0.0; 509 for (k = 1; k <= mp; ++k) { 510 total = this.DOT_PRODUCT( 511 this.PART(this.COL(a, k), 1, n), 512 this.PART(dx, 1, n) 513 ); 514 if (k < mp) { 515 temp = datmat[k][np]; 516 cvmaxp = Math.max(cvmaxp, -total - temp); 517 cvmaxm = Math.max(cvmaxm, total - temp); 518 } 519 } 520 dxsign = parmu * (cvmaxp - cvmaxm) > 2.0 * total ? -1.0 : 1.0; 521 522 // Update the elements of SIM and SIMI, and set the next X. 523 temp = 0.0; 524 for (i = 1; i <= n; ++i) { 525 dx[i] = dxsign * dx[i]; 526 sim[i][jdrop] = dx[i]; 527 temp += simi[jdrop][i] * dx[i]; 528 } 529 for (k = 1; k <= n; ++k) { 530 simi[jdrop][k] /= temp; 531 } 532 533 for (j = 1; j <= n; ++j) { 534 if (j !== jdrop) { 535 temp = this.DOT_PRODUCT( 536 this.PART(this.ROW(simi, j), 1, n), 537 this.PART(dx, 1, n) 538 ); 539 for (k = 1; k <= n; ++k) { 540 simi[j][k] -= temp * simi[jdrop][k]; 541 } 542 } 543 x[j] = sim[j][np] + dx[j]; 544 } 545 continue L_40; 546 } 547 548 // Calculate DX = x(*)-x(0). 549 // Branch if the length of DX is less than 0.5*RHO. 550 ifull = this.trstlp(n, m, a, con, rho, dx); 551 if (!ifull) { 552 temp = 0.0; 553 for (k = 1; k <= n; ++k) { 554 temp += dx[k] * dx[k]; 555 } 556 if (temp < 0.25 * rho * rho) { 557 ibrnch = true; 558 break L_550; 559 } 560 } 561 562 // Predict the change to F and the new maximum constraint violation if the 563 // variables are altered from x(0) to x(0) + DX. 564 total = 0.0; 565 resnew = 0.0; 566 con[mp] = 0.0; 567 for (k = 1; k <= mp; ++k) { 568 total = con[k] - this.DOT_PRODUCT(this.PART(this.COL(a, k), 1, n), this.PART(dx, 1, n)); 569 if (k < mp) { resnew = Math.max(resnew, total); } 570 } 571 572 // Increase PARMU if necessary and branch back if this change alters the 573 // optimal vertex. Otherwise PREREM and PREREC will be set to the predicted 574 // reductions in the merit function and the maximum constraint violation 575 // respectively. 576 prerec = datmat[mpp][np] - resnew; 577 barmu = prerec > 0.0 ? total / prerec : 0.0; 578 if (parmu < 1.5 * barmu) { 579 parmu = 2.0 * barmu; 580 if (iprint >= 2) { console.log("Increase in PARMU to " + parmu); } 581 phi = datmat[mp][np] + parmu * datmat[mpp][np]; 582 for (j = 1; j <= n; ++j) { 583 temp = datmat[mp][j] + parmu * datmat[mpp][j]; 584 if (temp < phi || (temp === phi && parmu === 0.0 && datmat[mpp][j] < datmat[mpp][np])) { 585 continue L_140; 586 } 587 } 588 } 589 prerem = parmu * prerec - total; 590 591 // Calculate the constraint and objective functions at x(*). 592 // Then find the actual reduction in the merit function. 593 for (k = 1; k <= n; ++k) { 594 x[k] = sim[k][np] + dx[k]; 595 } 596 ibrnch = true; 597 continue L_40; 598 } 599 600 skipVertexIdent = false; 601 vmold = datmat[mp][np] + parmu * datmat[mpp][np]; 602 vmnew = f + parmu * resmax; 603 trured = vmold - vmnew; 604 if (parmu === 0.0 && f === datmat[mp][np]) { 605 prerem = prerec; 606 trured = datmat[mpp][np] - resmax; 607 } 608 609 // Begin the operations that decide whether x(*) should replace one of the 610 // vertices of the current simplex, the change being mandatory if TRURED is 611 // positive. Firstly, JDROP is set to the index of the vertex that is to be 612 // replaced. 613 ratio = trured <= 0.0 ? 1.0 : 0.0; 614 jdrop = 0; 615 for (j = 1; j <= n; ++j) { 616 temp = Math.abs(this.DOT_PRODUCT(this.PART(this.ROW(simi, j), 1, n), this.PART(dx, 1, n))); 617 if (temp > ratio) { 618 jdrop = j; 619 ratio = temp; 620 } 621 sigbar[j] = temp * vsig[j]; 622 } 623 624 // Calculate the value of ell. 625 626 edgmax = delta * rho; 627 l = 0; 628 for (j = 1; j <= n; ++j) { 629 if (sigbar[j] >= parsig || sigbar[j] >= vsig[j]) { 630 temp = veta[j]; 631 if (trured > 0.0) { 632 temp = 0.0; 633 for (k = 1; k <= n; ++k) { 634 temp += Math.pow(dx[k] - sim[k][j], 2.0); 635 } 636 temp = Math.sqrt(temp); 637 } 638 if (temp > edgmax) { 639 l = j; 640 edgmax = temp; 641 } 642 } 643 } 644 if (l > 0) { jdrop = l; } 645 646 if (jdrop !== 0) { 647 // Revise the simplex by updating the elements of SIM, SIMI and DATMAT. 648 temp = 0.0; 649 for (i = 1; i <= n; ++i) { 650 sim[i][jdrop] = dx[i]; 651 temp += simi[jdrop][i] * dx[i]; 652 } 653 for (k = 1; k <= n; ++k) { simi[jdrop][k] /= temp; } 654 for (j = 1; j <= n; ++j) { 655 if (j !== jdrop) { 656 temp = this.DOT_PRODUCT(this.PART(this.ROW(simi, j), 1, n), this.PART(dx, 1, n)); 657 for (k = 1; k <= n; ++k) { 658 simi[j][k] -= temp * simi[jdrop][k]; 659 } 660 } 661 } 662 for (k = 1; k <= mpp; ++k) { 663 datmat[k][jdrop] = con[k]; 664 } 665 666 // Branch back for further iterations with the current RHO. 667 if (trured > 0.0 && trured >= 0.1 * prerem) { 668 continue L_140; 669 } 670 } 671 } while (false); 672 673 if (!iflag) { 674 ibrnch = false; 675 continue L_140; 676 } 677 678 if (rho <= rhoend) { 679 status = this.Normal; 680 break L_40; 681 } 682 683 // Otherwise reduce RHO if it is not at its least value and reset PARMU. 684 cmin = 0.0; 685 cmax = 0.0; 686 rho *= 0.5; 687 if (rho <= 1.5 * rhoend) { rho = rhoend; } 688 if (parmu > 0.0) { 689 denom = 0.0; 690 for (k = 1; k <= mp; ++k) { 691 cmin = datmat[k][np]; 692 cmax = cmin; 693 for (i = 1; i <= n; ++i) { 694 cmin = Math.min(cmin, datmat[k][i]); 695 cmax = Math.max(cmax, datmat[k][i]); 696 } 697 if (k <= m && cmin < 0.5 * cmax) { 698 temp = Math.max(cmax, 0.0) - cmin; 699 denom = denom <= 0.0 ? temp : Math.min(denom, temp); 700 } 701 } 702 if (denom === 0.0) { 703 parmu = 0.0; 704 } else if (cmax - cmin < parmu * denom) { 705 parmu = (cmax - cmin) / denom; 706 } 707 } 708 if (iprint >= 2) { 709 console.log("Reduction in RHO to "+rho+" and PARMU = "+parmu); 710 } 711 if (iprint === 2) { 712 this.PrintIterationResult(nfvals, datmat[mp][np], datmat[mpp][np], this.COL(sim, np), n, iprint); 713 } 714 } while (true); 715 } while (true); 716 717 switch (status) { 718 case this.Normal: 719 if (iprint >= 1) { console.log("%nNormal return from subroutine COBYLA%n"); } 720 if (ifull) { 721 if (iprint >= 1) { this.PrintIterationResult(nfvals, f, resmax, x, n, iprint); } 722 return status; 723 } 724 break; 725 case this.MaxIterationsReached: 726 if (iprint >= 1) { 727 console.log("%nReturn from subroutine COBYLA because the MAXFUN limit has been reached.%n"); 728 } 729 break; 730 case this.DivergingRoundingErrors: 731 if (iprint >= 1) { 732 console.log("%nReturn from subroutine COBYLA because rounding errors are becoming damaging.%n"); 733 } 734 break; 735 } 736 737 for (k = 1; k <= n; ++k) { x[k] = sim[k][np]; } 738 f = datmat[mp][np]; 739 resmax = datmat[mpp][np]; 740 if (iprint >= 1) { this.PrintIterationResult(nfvals, f, resmax, x, n, iprint); } 741 742 return status; 743 }, 744 745 trstlp: function(n, m, a, b, rho, dx) { //(int n, int m, double[][] a, double[] b, double rho, double[] dx) 746 // N.B. Arguments Z, ZDOTA, VMULTC, SDIRN, DXNEW, VMULTD & IACT have been removed. 747 748 // This subroutine calculates an N-component vector DX by applying the 749 // following two stages. In the first stage, DX is set to the shortest 750 // vector that minimizes the greatest violation of the constraints 751 // A(1,K)*DX(1)+A(2,K)*DX(2)+...+A(N,K)*DX(N) .GE. B(K), K = 2,3,...,M, 752 // subject to the Euclidean length of DX being at most RHO. If its length is 753 // strictly less than RHO, then we use the resultant freedom in DX to 754 // minimize the objective function 755 // -A(1,M+1)*DX(1) - A(2,M+1)*DX(2) - ... - A(N,M+1)*DX(N) 756 // subject to no increase in any greatest constraint violation. This 757 // notation allows the gradient of the objective function to be regarded as 758 // the gradient of a constraint. Therefore the two stages are distinguished 759 // by MCON .EQ. M and MCON .GT. M respectively. It is possible that a 760 // degeneracy may prevent DX from attaining the target length RHO. Then the 761 // value IFULL = 0 would be set, but usually IFULL = 1 on return. 762 // 763 // In general NACT is the number of constraints in the active set and 764 // IACT(1),...,IACT(NACT) are their indices, while the remainder of IACT 765 // contains a permutation of the remaining constraint indices. Further, Z 766 // is an orthogonal matrix whose first NACT columns can be regarded as the 767 // result of Gram-Schmidt applied to the active constraint gradients. For 768 // J = 1,2,...,NACT, the number ZDOTA(J) is the scalar product of the J-th 769 // column of Z with the gradient of the J-th active constraint. DX is the 770 // current vector of variables and here the residuals of the active 771 // constraints should be zero. Further, the active constraints have 772 // nonnegative Lagrange multipliers that are held at the beginning of 773 // VMULTC. The remainder of this vector holds the residuals of the inactive 774 // constraints at DX, the ordering of the components of VMULTC being in 775 // agreement with the permutation of the indices of the constraints that is 776 // in IACT. All these residuals are nonnegative, which is achieved by the 777 // shift RESMAX that makes the least residual zero. 778 779 // Initialize Z and some other variables. The value of RESMAX will be 780 // appropriate to DX = 0, while ICON will be the index of a most violated 781 // constraint if RESMAX is positive. Usually during the first stage the 782 // vector SDIRN gives a search direction that reduces all the active 783 // constraint violations by one simultaneously. 784 785 // Local variables 786 787 var temp = 0, 788 nactx = 0, 789 resold = 0.0, 790 791 z = this.arr2(1 + n, 1 + n), 792 zdota = this.arr(2 + m), 793 vmultc = this.arr(2 + m), 794 sdirn = this.arr(1 + n), 795 dxnew = this.arr(1 + n), 796 vmultd = this.arr(2 + m), 797 iact = this.arr(2 + m), 798 799 mcon = m, 800 nact = 0, 801 icon, resmax, 802 i, k, 803 first, 804 optold, icount, step, stpful, optnew, 805 ratio, isave, vsave, 806 total, 807 kp, kk, sp, alpha, beta, 808 tot, spabs, acca, accb, 809 zdotv, zdvabs, kw, 810 dd, ss, sd, 811 zdotw, zdwabs, 812 kl, sumabs, tempa; 813 814 for (i = 1; i <= n; ++i) { 815 z[i][i] = 1.0; 816 dx[i] = 0.0; 817 } 818 819 icon = 0; 820 resmax = 0.0; 821 if (m >= 1) { 822 for (k = 1; k <= m; ++k) { 823 if (b[k] > resmax) { 824 resmax = b[k]; 825 icon = k; 826 } 827 } 828 for (k = 1; k <= m; ++k) { 829 iact[k] = k; 830 vmultc[k] = resmax - b[k]; 831 } 832 } 833 834 // End the current stage of the calculation if 3 consecutive iterations 835 // have either failed to reduce the best calculated value of the objective 836 // function or to increase the number of active constraints since the best 837 // value was calculated. This strategy prevents cycling, but there is a 838 // remote possibility that it will cause premature termination. 839 840 first = true; 841 do { 842 L_60: 843 do { 844 if (!first || (first && resmax === 0.0)) { 845 mcon = m + 1; 846 icon = mcon; 847 iact[mcon] = mcon; 848 vmultc[mcon] = 0.0; 849 } 850 first = false; 851 852 optold = 0.0; 853 icount = 0; 854 step = 0; 855 stpful = 0; 856 857 L_70: 858 do { 859 optnew = (mcon === m) ? resmax : -this.DOT_PRODUCT( 860 this.PART(dx, 1, n), this.PART(this.COL(a, mcon), 1, n) 861 ); 862 863 if (icount === 0 || optnew < optold) { 864 optold = optnew; 865 nactx = nact; 866 icount = 3; 867 } else if (nact > nactx) { 868 nactx = nact; 869 icount = 3; 870 } else { 871 --icount; 872 } 873 if (icount === 0) { break L_60; } 874 875 // If ICON exceeds NACT, then we add the constraint with index IACT(ICON) to 876 // the active set. Apply Givens rotations so that the last N-NACT-1 columns 877 // of Z are orthogonal to the gradient of the new constraint, a scalar 878 // product being set to zero if its nonzero value could be due to computer 879 // rounding errors. The array DXNEW is used for working space. 880 ratio = 0; 881 if (icon <= nact) { 882 if (icon < nact) { 883 // Delete the constraint that has the index IACT(ICON) from the active set. 884 885 isave = iact[icon]; 886 vsave = vmultc[icon]; 887 k = icon; 888 do { 889 kp = k + 1; 890 kk = iact[kp]; 891 sp = this.DOT_PRODUCT( 892 this.PART(this.COL(z, k), 1, n), 893 this.PART(this.COL(a, kk), 1, n) 894 ); 895 temp = Math.sqrt(sp * sp + zdota[kp] * zdota[kp]); 896 alpha = zdota[kp] / temp; 897 beta = sp / temp; 898 zdota[kp] = alpha * zdota[k]; 899 zdota[k] = temp; 900 for (i = 1; i <= n; ++i) { 901 temp = alpha * z[i][kp] + beta * z[i][k]; 902 z[i][kp] = alpha * z[i][k] - beta * z[i][kp]; 903 z[i][k] = temp; 904 } 905 iact[k] = kk; 906 vmultc[k] = vmultc[kp]; 907 k = kp; 908 } while (k < nact); 909 910 iact[k] = isave; 911 vmultc[k] = vsave; 912 } 913 --nact; 914 915 // If stage one is in progress, then set SDIRN to the direction of the next 916 // change to the current vector of variables. 917 if (mcon > m) { 918 // Pick the next search direction of stage two. 919 temp = 1.0 / zdota[nact]; 920 for (k = 1; k <= n; ++k) { sdirn[k] = temp * z[k][nact]; } 921 } else { 922 temp = this.DOT_PRODUCT( 923 this.PART(sdirn, 1, n), this.PART(this.COL(z, nact + 1), 1, n) 924 ); 925 for (k = 1; k <= n; ++k) { sdirn[k] -= temp * z[k][nact + 1]; } 926 } 927 } else { 928 kk = iact[icon]; 929 for (k = 1; k <= n; ++k) { dxnew[k] = a[k][kk]; } 930 tot = 0.0; 931 932 // { 933 k = n; 934 while (k > nact) { 935 sp = 0.0; 936 spabs = 0.0; 937 for (i = 1; i <= n; ++i) { 938 temp = z[i][k] * dxnew[i]; 939 sp += temp; 940 spabs += Math.abs(temp); 941 } 942 acca = spabs + 0.1 * Math.abs(sp); 943 accb = spabs + 0.2 * Math.abs(sp); 944 if (spabs >= acca || acca >= accb) { sp = 0.0; } 945 if (tot === 0.0) { 946 tot = sp; 947 } else { 948 kp = k + 1; 949 temp = Math.sqrt(sp * sp + tot * tot); 950 alpha = sp / temp; 951 beta = tot / temp; 952 tot = temp; 953 for (i = 1; i <= n; ++i) { 954 temp = alpha * z[i][k] + beta * z[i][kp]; 955 z[i][kp] = alpha * z[i][kp] - beta * z[i][k]; 956 z[i][k] = temp; 957 } 958 } 959 --k; 960 } 961 // } 962 963 if (tot === 0.0) { 964 // The next instruction is reached if a deletion has to be made from the 965 // active set in order to make room for the new active constraint, because 966 // the new constraint gradient is a linear combination of the gradients of 967 // the old active constraints. Set the elements of VMULTD to the multipliers 968 // of the linear combination. Further, set IOUT to the index of the 969 // constraint to be deleted, but branch if no suitable index can be found. 970 971 ratio = -1.0; 972 //{ 973 k = nact; 974 do { 975 zdotv = 0.0; 976 zdvabs = 0.0; 977 978 for (i = 1; i <= n; ++i) { 979 temp = z[i][k] * dxnew[i]; 980 zdotv += temp; 981 zdvabs += Math.abs(temp); 982 } 983 acca = zdvabs + 0.1 * Math.abs(zdotv); 984 accb = zdvabs + 0.2 * Math.abs(zdotv); 985 if (zdvabs < acca && acca < accb) { 986 temp = zdotv / zdota[k]; 987 if (temp > 0.0 && iact[k] <= m) { 988 tempa = vmultc[k] / temp; 989 if (ratio < 0.0 || tempa < ratio) { ratio = tempa; } 990 } 991 992 if (k >= 2) { 993 kw = iact[k]; 994 for (i = 1; i <= n; ++i) { dxnew[i] -= temp * a[i][kw]; } 995 } 996 vmultd[k] = temp; 997 } else { 998 vmultd[k] = 0.0; 999 } 1000 } while (--k > 0); 1001 //} 1002 if (ratio < 0.0) { break L_60; } 1003 1004 // Revise the Lagrange multipliers and reorder the active constraints so 1005 // that the one to be replaced is at the end of the list. Also calculate the 1006 // new value of ZDOTA(NACT) and branch if it is not acceptable. 1007 1008 for (k = 1; k <= nact; ++k) { 1009 vmultc[k] = Math.max(0.0, vmultc[k] - ratio * vmultd[k]); 1010 } 1011 if (icon < nact) { 1012 isave = iact[icon]; 1013 vsave = vmultc[icon]; 1014 k = icon; 1015 do { 1016 kp = k + 1; 1017 kw = iact[kp]; 1018 sp = this.DOT_PRODUCT( 1019 this.PART(this.COL(z, k), 1, n), 1020 this.PART(this.COL(a, kw), 1, n) 1021 ); 1022 temp = Math.sqrt(sp * sp + zdota[kp] * zdota[kp]); 1023 alpha = zdota[kp] / temp; 1024 beta = sp / temp; 1025 zdota[kp] = alpha * zdota[k]; 1026 zdota[k] = temp; 1027 for (i = 1; i <= n; ++i) { 1028 temp = alpha * z[i][kp] + beta * z[i][k]; 1029 z[i][kp] = alpha * z[i][k] - beta * z[i][kp]; 1030 z[i][k] = temp; 1031 } 1032 iact[k] = kw; 1033 vmultc[k] = vmultc[kp]; 1034 k = kp; 1035 } while (k < nact); 1036 iact[k] = isave; 1037 vmultc[k] = vsave; 1038 } 1039 temp = this.DOT_PRODUCT( 1040 this.PART(this.COL(z, nact), 1, n), 1041 this.PART(this.COL(a, kk), 1, n) 1042 ); 1043 if (temp === 0.0) { break L_60; } 1044 zdota[nact] = temp; 1045 vmultc[icon] = 0.0; 1046 vmultc[nact] = ratio; 1047 } else { 1048 // Add the new constraint if this can be done without a deletion from the 1049 // active set. 1050 1051 ++nact; 1052 zdota[nact] = tot; 1053 vmultc[icon] = vmultc[nact]; 1054 vmultc[nact] = 0.0; 1055 } 1056 1057 // Update IACT and ensure that the objective function continues to be 1058 // treated as the last active constraint when MCON>M. 1059 1060 iact[icon] = iact[nact]; 1061 iact[nact] = kk; 1062 if (mcon > m && kk !== mcon) { 1063 k = nact - 1; 1064 sp = this.DOT_PRODUCT( 1065 this.PART(this.COL(z, k), 1, n), 1066 this.PART(this.COL(a, kk), 1, n) 1067 ); 1068 temp = Math.sqrt(sp * sp + zdota[nact] * zdota[nact]); 1069 alpha = zdota[nact] / temp; 1070 beta = sp / temp; 1071 zdota[nact] = alpha * zdota[k]; 1072 zdota[k] = temp; 1073 for (i = 1; i <= n; ++i) { 1074 temp = alpha * z[i][nact] + beta * z[i][k]; 1075 z[i][nact] = alpha * z[i][k] - beta * z[i][nact]; 1076 z[i][k] = temp; 1077 } 1078 iact[nact] = iact[k]; 1079 iact[k] = kk; 1080 temp = vmultc[k]; 1081 vmultc[k] = vmultc[nact]; 1082 vmultc[nact] = temp; 1083 } 1084 1085 // If stage one is in progress, then set SDIRN to the direction of the next 1086 // change to the current vector of variables. 1087 if (mcon > m) { 1088 // Pick the next search direction of stage two. 1089 temp = 1.0 / zdota[nact]; 1090 for (k = 1; k <= n; ++k) { sdirn[k] = temp * z[k][nact]; } 1091 } else { 1092 kk = iact[nact]; 1093 temp = (this.DOT_PRODUCT( 1094 this.PART(sdirn, 1, n), 1095 this.PART(this.COL(a, kk), 1, n) 1096 ) - 1.0) / zdota[nact]; 1097 for (k = 1; k <= n; ++k) { sdirn[k] -= temp * z[k][nact]; } 1098 } 1099 } 1100 1101 // Calculate the step to the boundary of the trust region or take the step 1102 // that reduces RESMAX to zero. The two statements below that include the 1103 // factor 1.0E-6 prevent some harmless underflows that occurred in a test 1104 // calculation. Further, we skip the step if it could be zero within a 1105 // reasonable tolerance for computer rounding errors. 1106 dd = rho * rho; 1107 sd = 0.0; 1108 ss = 0.0; 1109 for (i = 1; i <= n; ++i) { 1110 if (Math.abs(dx[i]) >= 1.0E-6 * rho) { dd -= dx[i] * dx[i]; } 1111 sd += dx[i] * sdirn[i]; 1112 ss += sdirn[i] * sdirn[i]; 1113 } 1114 if (dd <= 0.0) { break L_60; } 1115 temp = Math.sqrt(ss * dd); 1116 if (Math.abs(sd) >= 1.0E-6 * temp) { temp = Math.sqrt(ss * dd + sd * sd); } 1117 stpful = dd / (temp + sd); 1118 step = stpful; 1119 if (mcon === m) { 1120 acca = step + 0.1 * resmax; 1121 accb = step + 0.2 * resmax; 1122 if (step >= acca || acca >= accb) { break L_70; } 1123 step = Math.min(step, resmax); 1124 } 1125 1126 // Set DXNEW to the new variables if STEP is the steplength, and reduce 1127 // RESMAX to the corresponding maximum residual if stage one is being done. 1128 // Because DXNEW will be changed during the calculation of some Lagrange 1129 // multipliers, it will be restored to the following value later. 1130 for (k = 1; k <= n; ++k) { dxnew[k] = dx[k] + step * sdirn[k]; } 1131 if (mcon === m) { 1132 resold = resmax; 1133 resmax = 0.0; 1134 for (k = 1; k <= nact; ++k) { 1135 kk = iact[k]; 1136 temp = b[kk] - this.DOT_PRODUCT( 1137 this.PART(this.COL(a, kk), 1, n), this.PART(dxnew, 1, n) 1138 ); 1139 resmax = Math.max(resmax, temp); 1140 } 1141 } 1142 1143 // Set VMULTD to the VMULTC vector that would occur if DX became DXNEW. A 1144 // device is included to force VMULTD(K) = 0.0 if deviations from this value 1145 // can be attributed to computer rounding errors. First calculate the new 1146 // Lagrange multipliers. 1147 //{ 1148 k = nact; 1149 do { 1150 zdotw = 0.0; 1151 zdwabs = 0.0; 1152 for (i = 1; i <= n; ++i) { 1153 temp = z[i][k] * dxnew[i]; 1154 zdotw += temp; 1155 zdwabs += Math.abs(temp); 1156 } 1157 acca = zdwabs + 0.1 * Math.abs(zdotw); 1158 accb = zdwabs + 0.2 * Math.abs(zdotw); 1159 if (zdwabs >= acca || acca >= accb) { zdotw = 0.0; } 1160 vmultd[k] = zdotw / zdota[k]; 1161 if (k >= 2) { 1162 kk = iact[k]; 1163 for (i = 1; i <= n; ++i) { dxnew[i] -= vmultd[k] * a[i][kk]; } 1164 } 1165 } while (k-- >= 2); 1166 if (mcon > m) { vmultd[nact] = Math.max(0.0, vmultd[nact]); } 1167 //} 1168 1169 // Complete VMULTC by finding the new constraint residuals. 1170 1171 for (k = 1; k <= n; ++k) { dxnew[k] = dx[k] + step * sdirn[k]; } 1172 if (mcon > nact) { 1173 kl = nact + 1; 1174 for (k = kl; k <= mcon; ++k) { 1175 kk = iact[k]; 1176 total = resmax - b[kk]; 1177 sumabs = resmax + Math.abs(b[kk]); 1178 for (i = 1; i <= n; ++i) { 1179 temp = a[i][kk] * dxnew[i]; 1180 total += temp; 1181 sumabs += Math.abs(temp); 1182 } 1183 acca = sumabs + 0.1 * Math.abs(total); 1184 accb = sumabs + 0.2 * Math.abs(total); 1185 if (sumabs >= acca || acca >= accb) { total = 0.0; } 1186 vmultd[k] = total; 1187 } 1188 } 1189 1190 // Calculate the fraction of the step from DX to DXNEW that will be taken. 1191 1192 ratio = 1.0; 1193 icon = 0; 1194 for (k = 1; k <= mcon; ++k) { 1195 if (vmultd[k] < 0.0) { 1196 temp = vmultc[k] / (vmultc[k] - vmultd[k]); 1197 if (temp < ratio) { 1198 ratio = temp; 1199 icon = k; 1200 } 1201 } 1202 } 1203 1204 // Update DX, VMULTC and RESMAX. 1205 1206 temp = 1.0 - ratio; 1207 for (k = 1; k <= n; ++k) { dx[k] = temp * dx[k] + ratio * dxnew[k]; } 1208 for (k = 1; k <= mcon; ++k) { 1209 vmultc[k] = Math.max(0.0, temp * vmultc[k] + ratio * vmultd[k]); 1210 } 1211 if (mcon === m) { resmax = resold + ratio * (resmax - resold); } 1212 1213 // If the full step is not acceptable then begin another iteration. 1214 // Otherwise switch to stage two or end the calculation. 1215 } while (icon > 0); 1216 1217 if (step === stpful) { 1218 return true; 1219 } 1220 1221 } while (true); 1222 1223 // We employ any freedom that may be available to reduce the objective 1224 // function before returning a DX whose length is less than RHO. 1225 1226 } while (mcon === m); 1227 1228 return false; 1229 }, 1230 1231 PrintIterationResult: function(nfvals, f, resmax, x, n, iprint) { 1232 if (iprint > 1) { console.log("NFVALS = "+nfvals+" F = "+f+" MAXCV = "+resmax); } 1233 if (iprint > 1) { console.log("X = " + this.PART(x, 1, n)); } 1234 }, 1235 1236 ROW: function(src, rowidx) { 1237 return src[rowidx].slice(); 1238 // var col, 1239 // cols = src[0].length, 1240 // dest = this.arr(cols); 1241 1242 // for (col = 0; col < cols; ++col) { 1243 // dest[col] = src[rowidx][col]; 1244 // } 1245 // return dest; 1246 }, 1247 1248 COL: function(src, colidx) { 1249 var row, 1250 rows = src.length, 1251 dest = this.arr(rows); 1252 for (row = 0; row < rows; ++row) { 1253 dest[row] = src[row][colidx]; 1254 } 1255 return dest; 1256 }, 1257 1258 PART: function(src, from, to) { 1259 return src.slice(from, to + 1); 1260 // var srcidx, 1261 // dest = this.arr(to - from + 1), 1262 // destidx = 0; 1263 // for (srcidx = from; srcidx <= to; ++srcidx, ++destidx) { 1264 // dest[destidx] = src[srcidx]; 1265 // } 1266 // return dest; 1267 }, 1268 1269 FORMAT: function(x) { 1270 return x.join(','); 1271 // var i, fmt = ""; 1272 // for (i = 0; i < x.length; ++i) { 1273 // fmt += ", " + x[i]; 1274 // } 1275 // return fmt; 1276 }, 1277 1278 DOT_PRODUCT: function(lhs, rhs) { 1279 var i, sum = 0.0, 1280 len = lhs.length; 1281 for (i = 0; i < len; ++i) { 1282 sum += lhs[i] * rhs[i]; 1283 } 1284 return sum; 1285 } 1286 1287 }; 1288 1289 return JXG.Math.Nlp; 1290 }); 1291