Jag vet, men jag är så nära nu målet.

Håller på skriva om en kvadratisk programmeringsalgoritm så den inte använder/använder lite dynamisk allokering.
Det är där problemet sitter i.
Vi kan börja med Matlab koden.
Kod: Markera allt
/* GNU Octave code:
*
% This is quadratic programming with Hildreth's method
% Min 1/2x^TQx + c^Tx
% S.t Ax <= b
%
% If you want to do maximization, then turn Q and c negative. The constraints are the same
%
% Max 1/2x^T(-Q)x + (-c)^Tx
% S.t Ax <= b
%
% Input: Q(Symmetric matrix), c(Objective function), A(Constraint matrix), b(Constraint vector)
% Output: x(Solution vector), solution(boolean flag)
% Example 1: [x, solution] = quadprog(Q, c, A, b)
function [x, solution] = quadprog(Q, c, A, b)
% Assume that the solution is true
solution = true;
% Same as in C code for Functions.h at CControl
MIN_VALUE = 1e-11;
MAX_ITERATIONS = 10000;
% Unconstrained solution
x = -linsolve(Q, c);
% Constraints difference
K = b - A*x;
% Check constraint violation
if(sum(K < 0) == 0)
return; % No violation
end
% Create P
P = linsolve(Q, A');
% Create H = A*Q*A'
H = A*P;
% Solve lambda from H*lambda = -K, where lambda >= 0
[m, n] = size(K);
lambda = zeros(m, n);
for km = 1:MAX_ITERATIONS
lambda_p = lambda;
% Use Gauss Seidel
for i = 1:m
w = -1.0/H(i,i)*(K(i) + H(i,:)*lambda - H(i,i)*lambda(i));
lambda(i) = max(0, w);
end
% Check if the minimum convergence has been reached
w = (lambda - lambda_p)'*(lambda - lambda_p);
if (w < MIN_VALUE)
break;
end
% Check if the maximum iteration have been reached
if(km == MAX_ITERATIONS)
solution = false;
return;
end
end
% Find the solution: x = -inv(Q)*c - inv(Q)*A'*lambda
x = x - P*lambda;
end
*/
Denna kod kan enkelt översättas till detta
Kod: Markera allt
static bool opti(const float Q[], const float c[], const float A[], const float b[], float x[], const size_t row_a, const size_t column_a){
/* Declare */
size_t i, j, k;
/* Use Cholesky factorization to solve x from Qx = c because Q is square and symmetric */
linsolve_chol(Q, x, c, column_a);
/* Save address */
const float* Ai = A;
/* Turn x negative */
for (i = 0; i < column_a; i++) {
x[i] = -x[i];
/* If x was nan - return false - Use higher value on lambda constant! */
#ifdef _MSC_VER
if (_isnanf(x[i])) {
return false;
}
#else
if (isnanf(x[i])) {
return false;
}
#endif
}
/* Count how many constraints A*x > b */
float *K = (float*)malloc(row_a * sizeof(float));
size_t violations = 0;
float value;
for(i = 0; i < row_a; i++){
/* Check how many rows are A*x > b */
value = 0.0f;
for(j = 0; j < column_a; j++){
value += Ai[j] * x[j];
/* value += A[i*column_a + j]*x[j]; */
}
Ai += column_a;
/* Constraints difference */
K[i] = b[i] - value;
/* Check constraint violation */
if (K[i] < 0.0f) {
violations++;
}
}
/* No violation */
if (violations == 0) {
free(K);
return true;
}
/* Solve QP = A' (Notice that we are using a little trick here so we can avoid A') */
float *P = (float*)malloc(row_a * column_a * sizeof(float));
float *P0 = P;
Ai = A;
for (i = 0; i < row_a; i++) {
linsolve_chol(Q, P, Ai, column_a);
/* linsolve_gauss(Q, P, Ai, column_a, column_a, 0.0f); - Old */
P += column_a;
Ai += column_a;
}
P = P0;
tran(P, row_a, column_a);
/* Multiply H = A*Q*A' */
float *H = (float*)malloc(row_a * row_a * sizeof(float));
float *Hj;
mul(A, P, H, row_a, column_a, row_a);
/* Solve lambda from H*lambda = -K, where lambda >= 0 */
float *lambda = (float*)malloc(row_a * sizeof(float));
memset(lambda, 0, row_a * sizeof(float));
float *lambda_p = (float*)malloc(row_a * sizeof(float));
float w;
for(i = 0; i < MAX_ITERATIONS; i++){
/* Update */
memcpy(lambda_p, lambda, row_a * sizeof(float));
/* Use Gauss Seidel */
Hj = H;
for (j = 0; j < row_a; j++) {
/* w = H(i, :)*lambda */
w = 0.0f;
for (k = 0; k < row_a; k++) {
w += Hj[k] * lambda[k];
}
/* Find a solution */
w = -1.0f / Hj[j] * (K[j] + w - Hj[j]*lambda[j]);
Hj += row_a;
lambda[j] = vmax(0.0f, w);
}
/* Check if we should break - Found the optimal solution */
w = 0.0f;
for (j = 0; j < row_a; j++) {
value = lambda[j] - lambda_p[j];
w += value * value;
}
#ifdef _MSC_VER
if (w < MIN_VALUE || _isnanf(w)) {
break;
}
#else
if (w < MIN_VALUE || isnanf(w)) {
break;
}
#endif
}
/* Solve x = x + P*lambda (Notice that x is negative (see above)) */
float *Plambda = (float*)malloc(column_a * sizeof(float));
mul(P, lambda, Plambda, column_a, row_a, 1);
for (j = 0; j < column_a; j++) {
x[j] -= Plambda[j];
}
/* Free */
free(K);
free(P);
free(H);
free(lambda);
free(lambda_p);
free(Plambda);
/* If w was nan - return false */
#ifdef _MSC_VER
if (_isnanf(w)) {
return false;
}
#else
if (isnanf(w)) {
return false;
}
#endif
/* If i equal to MAX_ITERATIONS, then it did not find a solution */
return i < MAX_ITERATIONS;
}
Men för att minimera dynamisk allokering så kan man använda flera for-satser. Som ni ser så måste det fungera säkert väldigt bra att bara ta en rad i taget.
Kod: Markera allt
static bool optislim(const float Q[], const float c[], const float A[], const float b[], float x[], const size_t row_a, const size_t column_a){
/* Declare */
size_t i, j, k, l;
/* Use Cholesky factorization to solve x from Qx = c because Q is square and symmetric */
linsolve_chol(Q, x, c, column_a);
/* Save address */
const float* Ai = A;
/* Turn x negative */
for (i = 0; i < column_a; i++) {
x[i] = -x[i];
/* If x was nan - return false - Use higher value on lambda constant! */
#ifdef _MSC_VER
if (_isnanf(x[i])) {
return false;
}
#else
if (isnanf(x[i])) {
return false;
}
#endif
}
/* Count how many constraints A*x > b */
float K, value;
for (i = 0; i < row_a; i++) {
/* Check how many rows are A*x > b */
value = 0.0f;
for(j = 0; j < column_a; j++){
value += Ai[j] * x[j];
/* value += A[i*column_a + j]*x[j]; */
}
/* Constraints difference */
K = b[i] - value;
/* Check constraint violation */
if (K < 0.0f) {
/* Solve QP = A' (Notice that we are using a little trick here so we can avoid A') */
float* P = (float*)malloc(column_a * sizeof(float));
linsolve_chol(Q, P, Ai, column_a);
/* Multiply H = A*Q*A' */
for (k = 0; k < row_a; k++) {
value = 0.0f;
for (l = 0; l < column_a; l++) {
value += A[k * column_a + l] * P[l];
}
for (l = 0; l < MAX_ITERATIONS; l++) {
}
}
/* Free */
free(P);
}
Ai += column_a;
}
/* No violation */
return true;
/* Solve lambda from H*lambda = -K, where lambda >= 0 */
float *lambda = (float*)malloc(row_a * sizeof(float));
memset(lambda, 0, row_a * sizeof(float));
float *lambda_p = (float*)malloc(row_a * sizeof(float));
float w;
for(i = 0; i < MAX_ITERATIONS; i++){
/* Update */
memcpy(lambda_p, lambda, row_a * sizeof(float));
/* Use Gauss Seidel */
Hj = H;
for (j = 0; j < row_a; j++) {
/* w = H(i, :)*lambda */
w = 0.0f;
for (k = 0; k < row_a; k++) {
w += Hj[k] * lambda[k];
}
/* Find a solution */
w = -1.0f / Hj[j] * (K[j] + w - Hj[j]*lambda[j]);
Hj += row_a;
lambda[j] = vmax(0.0f, w);
}
/* Check if we should break - Found the optimal solution */
w = 0.0f;
for (j = 0; j < row_a; j++) {
value = lambda[j] - lambda_p[j];
w += value * value;
}
#ifdef _MSC_VER
if (w < MIN_VALUE || _isnanf(w)) {
break;
}
#else
if (w < MIN_VALUE || isnanf(w)) {
break;
}
#endif
}
/* Solve x = x + P*lambda (Notice that x is negative (see above)) */
float *Plambda = (float*)malloc(column_a * sizeof(float));
mul(P, lambda, Plambda, column_a, row_a, 1);
for (j = 0; j < column_a; j++) {
x[j] -= Plambda[j];
}
/* Free */
free(K);
free(P);
free(H);
free(lambda);
free(lambda_p);
free(Plambda);
/* If w was nan - return false */
#ifdef _MSC_VER
if (_isnanf(w)) {
return false;
}
#else
if (isnanf(w)) {
return false;
}
#endif
/* If i equal to MAX_ITERATIONS, then it did not find a solution */
return i < MAX_ITERATIONS;
}