2013-07-28 22:08:34 +00:00
|
|
|
//-----------------------------------------------------------------------------
|
|
|
|
// Once we've written our constraint equations in the symbolic algebra system,
|
|
|
|
// these routines linearize them, and solve by a modified Newton's method.
|
|
|
|
// This also contains the routines to detect non-convergence or inconsistency,
|
|
|
|
// and report diagnostics to the user.
|
|
|
|
//
|
|
|
|
// Copyright 2008-2013 Jonathan Westhues.
|
|
|
|
//-----------------------------------------------------------------------------
|
2008-04-20 11:35:10 +00:00
|
|
|
#include "solvespace.h"
|
|
|
|
|
2021-08-08 20:13:30 +00:00
|
|
|
#include <Eigen/Core>
|
2017-05-14 04:23:04 +00:00
|
|
|
#include <Eigen/SparseQR>
|
2010-01-18 10:28:47 +00:00
|
|
|
|
|
|
|
// The solver will converge all unknowns to within this tolerance. This must
|
|
|
|
// always be much less than LENGTH_EPS, and in practice should be much less.
|
|
|
|
const double System::CONVERGE_TOLERANCE = (LENGTH_EPS/(1e2));
|
2008-07-02 08:18:25 +00:00
|
|
|
|
2009-04-19 20:37:51 +00:00
|
|
|
bool System::WriteJacobian(int tag) {
|
2017-05-14 04:23:04 +00:00
|
|
|
// Clear all
|
|
|
|
mat.param.clear();
|
|
|
|
mat.eq.clear();
|
2021-12-23 17:37:33 +00:00
|
|
|
mat.A.sym.setZero();
|
2017-05-14 04:23:04 +00:00
|
|
|
mat.B.sym.clear();
|
2008-04-20 11:35:10 +00:00
|
|
|
|
2017-05-14 04:23:04 +00:00
|
|
|
for(Param &p : param) {
|
|
|
|
if(p.tag != tag) continue;
|
|
|
|
mat.param.push_back(p.h);
|
|
|
|
}
|
|
|
|
mat.n = mat.param.size();
|
2009-04-19 20:37:51 +00:00
|
|
|
|
2017-05-14 04:23:04 +00:00
|
|
|
for(Equation &e : eq) {
|
|
|
|
if(e.tag != tag) continue;
|
|
|
|
mat.eq.push_back(&e);
|
2008-04-20 11:35:10 +00:00
|
|
|
}
|
2017-05-14 04:23:04 +00:00
|
|
|
mat.m = mat.eq.size();
|
2021-12-23 17:37:33 +00:00
|
|
|
mat.A.sym.resize(mat.m, mat.n);
|
|
|
|
mat.A.sym.reserve(Eigen::VectorXi::Constant(mat.n, 10));
|
2008-04-20 11:35:10 +00:00
|
|
|
|
2017-05-12 02:39:07 +00:00
|
|
|
// Fill the param id to index map
|
|
|
|
std::map<uint32_t, int> paramToIndex;
|
|
|
|
for(int j = 0; j < mat.n; j++) {
|
|
|
|
paramToIndex[mat.param[j].v] = j;
|
|
|
|
}
|
2018-01-04 01:42:38 +00:00
|
|
|
|
2017-05-14 04:23:04 +00:00
|
|
|
if(mat.eq.size() >= MAX_UNKNOWNS) {
|
|
|
|
return false;
|
|
|
|
}
|
2021-12-23 17:17:40 +00:00
|
|
|
std::vector<hParam> paramsUsed;
|
|
|
|
// A single possibly-too-large allocation is probably preferred here?
|
|
|
|
mat.B.sym.reserve(mat.eq.size());
|
2017-05-14 04:23:04 +00:00
|
|
|
for(size_t i = 0; i < mat.eq.size(); i++) {
|
|
|
|
Equation *e = mat.eq[i];
|
|
|
|
if(e->tag != tag) continue;
|
|
|
|
Expr *f = e->e->FoldConstants();
|
2017-05-12 03:02:33 +00:00
|
|
|
f = f->DeepCopyWithParamsAsPointers(¶m, &(SK.param));
|
2008-04-30 04:52:34 +00:00
|
|
|
|
2021-12-23 17:17:40 +00:00
|
|
|
paramsUsed.clear();
|
2017-05-12 02:39:07 +00:00
|
|
|
f->ParamsUsedList(¶msUsed);
|
|
|
|
|
|
|
|
for(hParam &p : paramsUsed) {
|
|
|
|
auto j = paramToIndex.find(p.v);
|
|
|
|
if(j == paramToIndex.end()) continue;
|
|
|
|
Expr *pd = f->PartialWrt(p);
|
|
|
|
pd = pd->FoldConstants();
|
2021-12-23 16:14:27 +00:00
|
|
|
if(pd->IsZeroConst())
|
|
|
|
continue;
|
2021-12-23 17:37:33 +00:00
|
|
|
mat.A.sym.insert(i, j->second) = pd;
|
2008-04-20 11:35:10 +00:00
|
|
|
}
|
2021-12-23 17:17:40 +00:00
|
|
|
paramsUsed.clear();
|
2017-05-14 04:23:04 +00:00
|
|
|
mat.B.sym.push_back(f);
|
2008-04-20 11:35:10 +00:00
|
|
|
}
|
2009-04-19 20:37:51 +00:00
|
|
|
return true;
|
2008-04-20 11:35:10 +00:00
|
|
|
}
|
|
|
|
|
2016-05-05 05:54:05 +00:00
|
|
|
void System::EvalJacobian() {
|
2017-05-14 04:23:04 +00:00
|
|
|
using namespace Eigen;
|
2021-12-23 17:37:33 +00:00
|
|
|
mat.A.num.setZero();
|
|
|
|
mat.A.num.resize(mat.m, mat.n);
|
|
|
|
const int size = mat.A.sym.outerSize();
|
2017-05-14 04:23:04 +00:00
|
|
|
|
|
|
|
for(int k = 0; k < size; k++) {
|
2021-12-23 17:37:33 +00:00
|
|
|
for(SparseMatrix <Expr *>::InnerIterator it(mat.A.sym, k); it; ++it) {
|
2017-05-14 04:23:04 +00:00
|
|
|
double value = it.value()->Eval();
|
|
|
|
if(EXACT(value == 0.0)) continue;
|
2021-12-23 17:37:33 +00:00
|
|
|
mat.A.num.insert(it.row(), it.col()) = value;
|
2008-04-20 11:35:10 +00:00
|
|
|
}
|
|
|
|
}
|
2021-12-23 17:37:33 +00:00
|
|
|
mat.A.num.makeCompressed();
|
2008-04-20 11:35:10 +00:00
|
|
|
}
|
|
|
|
|
2008-05-07 07:10:20 +00:00
|
|
|
bool System::IsDragged(hParam p) {
|
2009-11-03 18:54:49 +00:00
|
|
|
hParam *pp;
|
2021-12-23 17:37:33 +00:00
|
|
|
const auto b = dragged.begin();
|
|
|
|
const auto e = dragged.end();
|
|
|
|
return e != std::find(b, e, p);
|
2008-05-07 07:10:20 +00:00
|
|
|
}
|
|
|
|
|
2017-04-12 16:29:41 +00:00
|
|
|
Param *System::GetLastParamSubstitution(Param *p) {
|
|
|
|
Param *current = p;
|
|
|
|
while(current->substd != NULL) {
|
|
|
|
current = current->substd;
|
|
|
|
if(current == p) {
|
|
|
|
// Break the loop
|
|
|
|
current->substd = NULL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return current;
|
|
|
|
}
|
|
|
|
|
|
|
|
void System::SortSubstitutionByDragged(Param *p) {
|
|
|
|
std::vector<Param *> subsParams;
|
|
|
|
Param *by = NULL;
|
|
|
|
Param *current = p;
|
|
|
|
while(current != NULL) {
|
|
|
|
subsParams.push_back(current);
|
|
|
|
if(IsDragged(current->h)) {
|
|
|
|
by = current;
|
|
|
|
}
|
|
|
|
current = current->substd;
|
|
|
|
}
|
|
|
|
if(by == NULL) by = p;
|
|
|
|
for(Param *p : subsParams) {
|
|
|
|
if(p == by) continue;
|
|
|
|
p->substd = by;
|
|
|
|
p->tag = VAR_SUBSTITUTED;
|
|
|
|
}
|
|
|
|
by->substd = NULL;
|
|
|
|
by->tag = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void System::SubstituteParamsByLast(Expr *e) {
|
|
|
|
ssassert(e->op != Expr::Op::PARAM_PTR, "Expected an expression that refer to params via handles");
|
|
|
|
|
|
|
|
if(e->op == Expr::Op::PARAM) {
|
|
|
|
Param *p = param.FindByIdNoOops(e->parh);
|
|
|
|
if(p != NULL) {
|
|
|
|
Param *s = GetLastParamSubstitution(p);
|
|
|
|
if(s != NULL) {
|
|
|
|
e->parh = s->h;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
int c = e->Children();
|
|
|
|
if(c >= 1) {
|
|
|
|
SubstituteParamsByLast(e->a);
|
|
|
|
if(c >= 2) SubstituteParamsByLast(e->b);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-05 05:54:05 +00:00
|
|
|
void System::SolveBySubstitution() {
|
2018-01-04 01:42:38 +00:00
|
|
|
for(auto &teq : eq) {
|
|
|
|
Expr *tex = teq.e;
|
2008-05-07 07:10:20 +00:00
|
|
|
|
Convert all enumerations to use `enum class`.
Specifically, take the old code that looks like this:
class Foo {
enum { X = 1, Y = 2 };
int kind;
}
... foo.kind = Foo::X; ...
and convert it to this:
class Foo {
enum class Kind : uint32_t { X = 1, Y = 2 };
Kind kind;
}
... foo.kind = Foo::Kind::X;
(In some cases the enumeration would not be in the class namespace,
such as when it is generally useful.)
The benefits are as follows:
* The type of the field gives a clear indication of intent, both
to humans and tools (such as binding generators).
* The compiler is able to automatically warn when a switch is not
exhaustive; but this is currently suppressed by the
default: ssassert(false, ...)
idiom.
* Integers and plain enums are weakly type checked: they implicitly
convert into each other. This can hide bugs where type conversion
is performed but not intended. Enum classes are strongly type
checked.
* Plain enums pollute parent namespaces; enum classes do not.
Almost every defined enum we have already has a kind of ad-hoc
namespacing via `NAMESPACE_`, which is now explicit.
* Plain enums do not have a well-defined ABI size, which is
important for bindings. Enum classes can have it, if specified.
We specify the base type for all enums as uint32_t, which is
a safe choice and allows us to not change the numeric values
of any variants.
This commit introduces absolutely no functional change to the code,
just renaming and change of types. It handles almost all cases,
except GraphicsWindow::pending.operation, which needs minor
functional change.
2016-05-20 08:31:20 +00:00
|
|
|
if(tex->op == Expr::Op::MINUS &&
|
|
|
|
tex->a->op == Expr::Op::PARAM &&
|
|
|
|
tex->b->op == Expr::Op::PARAM)
|
2008-05-07 07:10:20 +00:00
|
|
|
{
|
2016-01-09 11:36:32 +00:00
|
|
|
hParam a = tex->a->parh;
|
|
|
|
hParam b = tex->b->parh;
|
2008-05-07 07:10:20 +00:00
|
|
|
if(!(param.FindByIdNoOops(a) && param.FindByIdNoOops(b))) {
|
|
|
|
// Don't substitute unless they're both solver params;
|
|
|
|
// otherwise it's an equation that can be solved immediately,
|
|
|
|
// or an error to flag later.
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2017-04-12 16:29:41 +00:00
|
|
|
if(a.v == b.v) {
|
|
|
|
teq.tag = EQ_SUBSTITUTED;
|
|
|
|
continue;
|
2008-05-07 07:10:20 +00:00
|
|
|
}
|
|
|
|
|
2017-04-12 16:29:41 +00:00
|
|
|
Param *pa = param.FindById(a);
|
|
|
|
Param *pb = param.FindById(b);
|
|
|
|
|
|
|
|
// Take the last substitution of parameter a
|
|
|
|
// This resulted in creation of substitution chains
|
|
|
|
Param *last = GetLastParamSubstitution(pa);
|
|
|
|
last->substd = pb;
|
|
|
|
last->tag = VAR_SUBSTITUTED;
|
|
|
|
|
|
|
|
if(pb->substd != NULL) {
|
|
|
|
// Break the loops
|
|
|
|
GetLastParamSubstitution(pb);
|
|
|
|
// if b loop was broken
|
|
|
|
if(pb->substd == NULL) {
|
|
|
|
// Clear substitution
|
|
|
|
pb->tag = 0;
|
2008-05-07 07:10:20 +00:00
|
|
|
}
|
|
|
|
}
|
2018-01-04 01:42:38 +00:00
|
|
|
teq.tag = EQ_SUBSTITUTED;
|
2008-05-01 06:25:38 +00:00
|
|
|
}
|
|
|
|
}
|
2017-04-12 16:29:41 +00:00
|
|
|
|
|
|
|
//
|
|
|
|
for(Param &p : param) {
|
|
|
|
SortSubstitutionByDragged(&p);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Substitute all the equations
|
|
|
|
for(auto &req : eq) {
|
|
|
|
SubstituteParamsByLast(req.e);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Substitute all the parameters with last substitutions
|
|
|
|
for(auto &p : param) {
|
|
|
|
if(p.substd == NULL) continue;
|
|
|
|
p.substd = GetLastParamSubstitution(p.substd);
|
|
|
|
}
|
2008-05-01 06:25:38 +00:00
|
|
|
}
|
|
|
|
|
2008-07-02 08:18:25 +00:00
|
|
|
//-----------------------------------------------------------------------------
|
2017-05-14 04:23:04 +00:00
|
|
|
// Calculate the rank of the Jacobian matrix
|
2008-07-02 08:18:25 +00:00
|
|
|
//-----------------------------------------------------------------------------
|
2016-05-05 05:54:05 +00:00
|
|
|
int System::CalculateRank() {
|
2017-05-14 04:23:04 +00:00
|
|
|
using namespace Eigen;
|
|
|
|
if(mat.n == 0 || mat.m == 0) return 0;
|
|
|
|
SparseQR <SparseMatrix<double>, COLAMDOrdering<int>> solver;
|
2021-12-23 17:37:33 +00:00
|
|
|
solver.compute(mat.A.num);
|
2017-05-14 04:23:04 +00:00
|
|
|
int result = solver.rank();
|
|
|
|
return result;
|
2008-04-20 11:35:10 +00:00
|
|
|
}
|
|
|
|
|
2017-05-11 13:53:12 +00:00
|
|
|
bool System::TestRank(int *dof) {
|
Distinguish overconstrained and redundantly constrained sketches.
When a solver error arises after a change to the sketch, it should
be easy to understand exactly why it happened. Before this change,
two functionally distinct modes of failure were lumped into one:
the same "redundant constraints" message was displayed when all
degrees of freedom were exhausted and the had a solution, but also
when it had not.
To understand why this is problematic, let's examine several ways
in which we can end up with linearly dependent equations in our
system:
0) create a triangle, then constrain two different pairs of edges
to be perpendicular
1) add two distinct distance constraints on the same segment
2) add two identical distance constraints on the same segment
3) create a triangle, then constrain edges to lengths a, b, and c
so that a+b=c
The case (0) is our baseline case: the constraints in it make
the system unsolvable yet they do not remove more degrees of freedom
than the amount we started with. So the displayed error is
"unsolvable constraints".
The constraints in case (1) remove one too many degrees of freedom,
but otherwise are quite like the case (0): the cause of failure that
is useful to the user is that the constraints are mutually
incompatible.
The constraints in cases (2) and (3) however are not like the others:
there is a set of parameters that satisfies all of the constraints,
but the constraints still remove one degree of freedom too many.
It makes sense to display a different error message for cases (2)
and (3) because in practice, cases like this are likely to arise from
adjustment of constraint values on sketches corresponding to systems
that have a small amount of degenerate solutions, and this is very
different from systems arising in cases like (0) where no adjustment
of constraint values will ever result in a successful solution.
So the error message displayed is "redundant constraints".
At last, this commit makes cases (0) and (1) display a message
with only a minor difference in wording. This is deliberate.
The reason is that the facts "the system is unsolvable" and
"the system is unsolvable and also has linearly dependent equations"
present no meaningful, actionable difference to the user, and placing
emphasis on it would only cause confusion.
However, they are still distinguished, because in case (0) we
list all relevant constraints (and thus we say they are "mutually
incompatible") but in case (1) we only list the ones that constrain
the sketch further than some valid solution (and we say they are
"unsatisfied").
2016-01-21 09:28:05 +00:00
|
|
|
EvalJacobian();
|
2019-05-24 15:40:18 +00:00
|
|
|
int jacobianRank = CalculateRank();
|
2017-05-11 13:53:12 +00:00
|
|
|
// We are calculating dof based on real rank, not mat.m.
|
|
|
|
// Using this approach we can calculate real dof even when redundant is allowed.
|
|
|
|
if(dof != NULL) *dof = mat.n - jacobianRank;
|
2019-05-24 15:40:18 +00:00
|
|
|
return jacobianRank == mat.m;
|
Distinguish overconstrained and redundantly constrained sketches.
When a solver error arises after a change to the sketch, it should
be easy to understand exactly why it happened. Before this change,
two functionally distinct modes of failure were lumped into one:
the same "redundant constraints" message was displayed when all
degrees of freedom were exhausted and the had a solution, but also
when it had not.
To understand why this is problematic, let's examine several ways
in which we can end up with linearly dependent equations in our
system:
0) create a triangle, then constrain two different pairs of edges
to be perpendicular
1) add two distinct distance constraints on the same segment
2) add two identical distance constraints on the same segment
3) create a triangle, then constrain edges to lengths a, b, and c
so that a+b=c
The case (0) is our baseline case: the constraints in it make
the system unsolvable yet they do not remove more degrees of freedom
than the amount we started with. So the displayed error is
"unsolvable constraints".
The constraints in case (1) remove one too many degrees of freedom,
but otherwise are quite like the case (0): the cause of failure that
is useful to the user is that the constraints are mutually
incompatible.
The constraints in cases (2) and (3) however are not like the others:
there is a set of parameters that satisfies all of the constraints,
but the constraints still remove one degree of freedom too many.
It makes sense to display a different error message for cases (2)
and (3) because in practice, cases like this are likely to arise from
adjustment of constraint values on sketches corresponding to systems
that have a small amount of degenerate solutions, and this is very
different from systems arising in cases like (0) where no adjustment
of constraint values will ever result in a successful solution.
So the error message displayed is "redundant constraints".
At last, this commit makes cases (0) and (1) display a message
with only a minor difference in wording. This is deliberate.
The reason is that the facts "the system is unsolvable" and
"the system is unsolvable and also has linearly dependent equations"
present no meaningful, actionable difference to the user, and placing
emphasis on it would only cause confusion.
However, they are still distinguished, because in case (0) we
list all relevant constraints (and thus we say they are "mutually
incompatible") but in case (1) we only list the ones that constrain
the sketch further than some valid solution (and we say they are
"unsatisfied").
2016-01-21 09:28:05 +00:00
|
|
|
}
|
|
|
|
|
2017-05-14 04:23:04 +00:00
|
|
|
bool System::SolveLinearSystem(const Eigen::SparseMatrix <double> &A,
|
|
|
|
const Eigen::VectorXd &B, Eigen::VectorXd *X)
|
2008-05-12 07:29:50 +00:00
|
|
|
{
|
2017-05-14 04:23:04 +00:00
|
|
|
if(A.outerSize() == 0) return true;
|
|
|
|
using namespace Eigen;
|
|
|
|
SparseQR<SparseMatrix<double>, COLAMDOrdering<int>> solver;
|
|
|
|
//SimplicialLDLT<SparseMatrix<double>> solver;
|
|
|
|
solver.compute(A);
|
|
|
|
*X = solver.solve(B);
|
|
|
|
return (solver.info() == Success);
|
2008-05-12 07:29:50 +00:00
|
|
|
}
|
2008-04-20 11:35:10 +00:00
|
|
|
|
2016-05-05 05:54:05 +00:00
|
|
|
bool System::SolveLeastSquares() {
|
2017-05-14 04:23:04 +00:00
|
|
|
using namespace Eigen;
|
2008-05-13 02:35:31 +00:00
|
|
|
// Scale the columns; this scale weights the parameters for the least
|
|
|
|
// squares solve, so that we can encourage the solver to make bigger
|
|
|
|
// changes in some parameters, and smaller in others.
|
2017-05-14 04:23:04 +00:00
|
|
|
mat.scale = VectorXd(mat.n);
|
|
|
|
for(int c = 0; c < mat.n; c++) {
|
2008-05-13 02:35:31 +00:00
|
|
|
if(IsDragged(mat.param[c])) {
|
2008-06-06 07:50:08 +00:00
|
|
|
// It's least squares, so this parameter doesn't need to be all
|
|
|
|
// that big to get a large effect.
|
|
|
|
mat.scale[c] = 1/20.0;
|
2008-05-13 02:35:31 +00:00
|
|
|
} else {
|
|
|
|
mat.scale[c] = 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-12-23 17:37:33 +00:00
|
|
|
int size = mat.A.sym.outerSize();
|
2017-05-14 04:23:04 +00:00
|
|
|
for(int k = 0; k < size; k++) {
|
2021-12-23 17:37:33 +00:00
|
|
|
for(SparseMatrix<double>::InnerIterator it(mat.A.num, k); it; ++it) {
|
2017-05-14 04:23:04 +00:00
|
|
|
it.valueRef() *= mat.scale[it.col()];
|
2008-05-12 07:29:50 +00:00
|
|
|
}
|
2008-04-20 11:35:10 +00:00
|
|
|
}
|
|
|
|
|
2021-12-23 17:37:33 +00:00
|
|
|
SparseMatrix<double> AAt = mat.A.num * mat.A.num.transpose();
|
2017-05-14 04:23:04 +00:00
|
|
|
AAt.makeCompressed();
|
|
|
|
VectorXd z(mat.n);
|
2008-05-12 07:29:50 +00:00
|
|
|
|
2017-05-14 04:23:04 +00:00
|
|
|
if(!SolveLinearSystem(AAt, mat.B.num, &z)) return false;
|
|
|
|
|
2021-12-23 17:37:33 +00:00
|
|
|
mat.X = mat.A.num.transpose() * z;
|
2017-05-14 04:23:04 +00:00
|
|
|
|
|
|
|
for(int c = 0; c < mat.n; c++) {
|
|
|
|
mat.X[c] *= mat.scale[c];
|
2008-05-12 07:29:50 +00:00
|
|
|
}
|
2008-04-20 11:35:10 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool System::NewtonSolve(int tag) {
|
|
|
|
|
|
|
|
int iter = 0;
|
|
|
|
bool converged = false;
|
|
|
|
int i;
|
2008-04-21 08:16:38 +00:00
|
|
|
|
|
|
|
// Evaluate the functions at our operating point.
|
2017-05-14 04:23:04 +00:00
|
|
|
mat.B.num = Eigen::VectorXd(mat.m);
|
2008-04-21 08:16:38 +00:00
|
|
|
for(i = 0; i < mat.m; i++) {
|
|
|
|
mat.B.num[i] = (mat.B.sym[i])->Eval();
|
|
|
|
}
|
2008-04-20 11:35:10 +00:00
|
|
|
do {
|
2008-04-21 08:16:38 +00:00
|
|
|
// And evaluate the Jacobian at our initial operating point.
|
2008-04-20 11:35:10 +00:00
|
|
|
EvalJacobian();
|
|
|
|
|
2008-05-12 07:29:50 +00:00
|
|
|
if(!SolveLeastSquares()) break;
|
2008-04-20 11:35:10 +00:00
|
|
|
|
2015-03-29 00:30:52 +00:00
|
|
|
// Take the Newton step;
|
2008-04-20 11:35:10 +00:00
|
|
|
// J(x_n) (x_{n+1} - x_n) = 0 - F(x_n)
|
2008-05-12 07:29:50 +00:00
|
|
|
for(i = 0; i < mat.n; i++) {
|
2008-06-03 18:28:41 +00:00
|
|
|
Param *p = param.FindById(mat.param[i]);
|
|
|
|
p->val -= mat.X[i];
|
2020-05-12 13:59:23 +00:00
|
|
|
if(IsReasonable(p->val)) {
|
2008-06-03 18:28:41 +00:00
|
|
|
// Very bad, and clearly not convergent
|
|
|
|
return false;
|
|
|
|
}
|
2008-04-20 11:35:10 +00:00
|
|
|
}
|
|
|
|
|
2008-04-21 08:16:38 +00:00
|
|
|
// Re-evalute the functions, since the params have just changed.
|
|
|
|
for(i = 0; i < mat.m; i++) {
|
|
|
|
mat.B.num[i] = (mat.B.sym[i])->Eval();
|
|
|
|
}
|
|
|
|
// Check for convergence
|
2008-04-20 11:35:10 +00:00
|
|
|
converged = true;
|
2008-04-21 01:26:36 +00:00
|
|
|
for(i = 0; i < mat.m; i++) {
|
2020-05-12 13:59:23 +00:00
|
|
|
if(IsReasonable(mat.B.num[i])) {
|
2008-06-03 18:28:41 +00:00
|
|
|
return false;
|
|
|
|
}
|
2020-05-10 09:24:12 +00:00
|
|
|
if(fabs(mat.B.num[i]) > CONVERGE_TOLERANCE) {
|
2008-04-20 11:35:10 +00:00
|
|
|
converged = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} while(iter++ < 50 && !converged);
|
|
|
|
|
2008-05-26 09:56:50 +00:00
|
|
|
return converged;
|
|
|
|
}
|
|
|
|
|
2009-04-20 07:30:09 +00:00
|
|
|
void System::WriteEquationsExceptFor(hConstraint hc, Group *g) {
|
2008-05-26 09:56:50 +00:00
|
|
|
// Generate all the equations from constraints in this group
|
2018-01-04 01:42:38 +00:00
|
|
|
for(auto &con : SK.constraint) {
|
|
|
|
ConstraintBase *c = &con;
|
2019-07-09 14:44:57 +00:00
|
|
|
if(c->group != g->h) continue;
|
|
|
|
if(c->h == hc) continue;
|
2008-05-26 09:56:50 +00:00
|
|
|
|
Convert all enumerations to use `enum class`.
Specifically, take the old code that looks like this:
class Foo {
enum { X = 1, Y = 2 };
int kind;
}
... foo.kind = Foo::X; ...
and convert it to this:
class Foo {
enum class Kind : uint32_t { X = 1, Y = 2 };
Kind kind;
}
... foo.kind = Foo::Kind::X;
(In some cases the enumeration would not be in the class namespace,
such as when it is generally useful.)
The benefits are as follows:
* The type of the field gives a clear indication of intent, both
to humans and tools (such as binding generators).
* The compiler is able to automatically warn when a switch is not
exhaustive; but this is currently suppressed by the
default: ssassert(false, ...)
idiom.
* Integers and plain enums are weakly type checked: they implicitly
convert into each other. This can hide bugs where type conversion
is performed but not intended. Enum classes are strongly type
checked.
* Plain enums pollute parent namespaces; enum classes do not.
Almost every defined enum we have already has a kind of ad-hoc
namespacing via `NAMESPACE_`, which is now explicit.
* Plain enums do not have a well-defined ABI size, which is
important for bindings. Enum classes can have it, if specified.
We specify the base type for all enums as uint32_t, which is
a safe choice and allows us to not change the numeric values
of any variants.
This commit introduces absolutely no functional change to the code,
just renaming and change of types. It handles almost all cases,
except GraphicsWindow::pending.operation, which needs minor
functional change.
2016-05-20 08:31:20 +00:00
|
|
|
if(c->HasLabel() && c->type != Constraint::Type::COMMENT &&
|
2010-05-10 01:06:09 +00:00
|
|
|
g->allDimsReference)
|
|
|
|
{
|
|
|
|
// When all dimensions are reference, we adjust them to display
|
|
|
|
// the correct value, and then don't generate any equations.
|
|
|
|
c->ModifyToSatisfy();
|
|
|
|
continue;
|
|
|
|
}
|
Convert all enumerations to use `enum class`.
Specifically, take the old code that looks like this:
class Foo {
enum { X = 1, Y = 2 };
int kind;
}
... foo.kind = Foo::X; ...
and convert it to this:
class Foo {
enum class Kind : uint32_t { X = 1, Y = 2 };
Kind kind;
}
... foo.kind = Foo::Kind::X;
(In some cases the enumeration would not be in the class namespace,
such as when it is generally useful.)
The benefits are as follows:
* The type of the field gives a clear indication of intent, both
to humans and tools (such as binding generators).
* The compiler is able to automatically warn when a switch is not
exhaustive; but this is currently suppressed by the
default: ssassert(false, ...)
idiom.
* Integers and plain enums are weakly type checked: they implicitly
convert into each other. This can hide bugs where type conversion
is performed but not intended. Enum classes are strongly type
checked.
* Plain enums pollute parent namespaces; enum classes do not.
Almost every defined enum we have already has a kind of ad-hoc
namespacing via `NAMESPACE_`, which is now explicit.
* Plain enums do not have a well-defined ABI size, which is
important for bindings. Enum classes can have it, if specified.
We specify the base type for all enums as uint32_t, which is
a safe choice and allows us to not change the numeric values
of any variants.
This commit introduces absolutely no functional change to the code,
just renaming and change of types. It handles almost all cases,
except GraphicsWindow::pending.operation, which needs minor
functional change.
2016-05-20 08:31:20 +00:00
|
|
|
if(g->relaxConstraints && c->type != Constraint::Type::POINTS_COINCIDENT) {
|
2009-10-01 11:22:56 +00:00
|
|
|
// When the constraints are relaxed, we keep only the point-
|
|
|
|
// coincident constraints, and the constraints generated by
|
|
|
|
// the entities and groups.
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
Rewrite equations generated for pt-on-line constraints.
Before this commit, pt-on-line constraints are buggy. To reproduce,
extrude a circle, then add a datum point and constrain it to the
axis of the circle, then move it. The cylinder will collapse.
To quote Jonathan:
> On investigation, I (a) confirm that the problem is
> the unconstrained extrusion depth going to zero, and (b) retract
> my earlier statement blaming extrude and other similar non-entity
> parameter treatment for this problem; you can easily reproduce it
> with a point in 3d constrained to lie on any line whose length
> is free.
>
> PT_ON_LINE is written using VectorsParallel, for no obvious reason.
> Rewriting that constraint to work on two projected distances (using
> any two basis vectors perpendicular to the line) should fix that
> problem, since replacing the "point on line in 3d" constraint with
> two "point on line in 2d" constraints works. That still has
> the hairy ball problem of choosing the basis vectors, which you
> can't do with a continuous function; you'd need Vector::Normal()
> or equivalent.
>
> You could write three equations and make the constraint itself
> introduce one new parameter for t. I don't know how well that
> would work numerically, but it would avoid the hairy ball problem,
> perhaps elegant at the cost of speed.
Indeed, this commit implements the latter solution: it introduces
an additional free parameter. The point being coincident with
the start of the line corresponds to the parameter being zero, and
point being coincident with the end corresponds to one).
In effect, instead of constraining two of three degrees of freedom
(for which the equations do not exist because of the hairy ball
theorem), it constrains three and adds one more.
2016-11-01 16:06:57 +00:00
|
|
|
c->GenerateEquations(&eq);
|
2008-05-26 09:56:50 +00:00
|
|
|
}
|
|
|
|
// And the equations from entities
|
2018-01-04 01:42:38 +00:00
|
|
|
for(auto &ent : SK.entity) {
|
|
|
|
EntityBase *e = &ent;
|
2019-07-09 14:44:57 +00:00
|
|
|
if(e->group != g->h) continue;
|
2008-05-26 09:56:50 +00:00
|
|
|
|
|
|
|
e->GenerateEquations(&eq);
|
2008-04-20 11:35:10 +00:00
|
|
|
}
|
2008-05-26 09:56:50 +00:00
|
|
|
// And from the groups themselves
|
2009-04-20 07:30:09 +00:00
|
|
|
g->GenerateEquations(&eq);
|
2008-04-20 11:35:10 +00:00
|
|
|
}
|
|
|
|
|
2016-10-22 15:08:41 +00:00
|
|
|
void System::FindWhichToRemoveToFixJacobian(Group *g, List<hConstraint> *bad, bool forceDofCheck) {
|
2020-09-11 19:00:10 +00:00
|
|
|
auto time = GetMilliseconds();
|
|
|
|
g->solved.timeout = false;
|
2018-01-04 01:42:38 +00:00
|
|
|
int a;
|
2008-05-26 09:56:50 +00:00
|
|
|
|
2008-09-05 11:25:53 +00:00
|
|
|
for(a = 0; a < 2; a++) {
|
2018-01-04 01:42:38 +00:00
|
|
|
for(auto &con : SK.constraint) {
|
2020-09-12 19:52:27 +00:00
|
|
|
if((GetMilliseconds() - time) > g->solved.findToFixTimeout) {
|
2020-09-11 19:00:10 +00:00
|
|
|
g->solved.timeout = true;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-01-04 01:42:38 +00:00
|
|
|
ConstraintBase *c = &con;
|
2019-07-09 14:44:57 +00:00
|
|
|
if(c->group != g->h) continue;
|
Convert all enumerations to use `enum class`.
Specifically, take the old code that looks like this:
class Foo {
enum { X = 1, Y = 2 };
int kind;
}
... foo.kind = Foo::X; ...
and convert it to this:
class Foo {
enum class Kind : uint32_t { X = 1, Y = 2 };
Kind kind;
}
... foo.kind = Foo::Kind::X;
(In some cases the enumeration would not be in the class namespace,
such as when it is generally useful.)
The benefits are as follows:
* The type of the field gives a clear indication of intent, both
to humans and tools (such as binding generators).
* The compiler is able to automatically warn when a switch is not
exhaustive; but this is currently suppressed by the
default: ssassert(false, ...)
idiom.
* Integers and plain enums are weakly type checked: they implicitly
convert into each other. This can hide bugs where type conversion
is performed but not intended. Enum classes are strongly type
checked.
* Plain enums pollute parent namespaces; enum classes do not.
Almost every defined enum we have already has a kind of ad-hoc
namespacing via `NAMESPACE_`, which is now explicit.
* Plain enums do not have a well-defined ABI size, which is
important for bindings. Enum classes can have it, if specified.
We specify the base type for all enums as uint32_t, which is
a safe choice and allows us to not change the numeric values
of any variants.
This commit introduces absolutely no functional change to the code,
just renaming and change of types. It handles almost all cases,
except GraphicsWindow::pending.operation, which needs minor
functional change.
2016-05-20 08:31:20 +00:00
|
|
|
if((c->type == Constraint::Type::POINTS_COINCIDENT && a == 0) ||
|
|
|
|
(c->type != Constraint::Type::POINTS_COINCIDENT && a == 1))
|
2008-09-05 11:25:53 +00:00
|
|
|
{
|
|
|
|
// Do the constraints in two passes: first everything but
|
|
|
|
// the point-coincident constraints, then only those
|
|
|
|
// constraints (so they appear last in the list).
|
|
|
|
continue;
|
|
|
|
}
|
2008-05-26 09:56:50 +00:00
|
|
|
|
2008-09-05 11:25:53 +00:00
|
|
|
param.ClearTags();
|
|
|
|
eq.Clear();
|
2009-04-20 07:30:09 +00:00
|
|
|
WriteEquationsExceptFor(c->h, g);
|
2008-09-05 11:25:53 +00:00
|
|
|
eq.ClearTags();
|
2008-05-26 09:56:50 +00:00
|
|
|
|
2008-09-05 11:25:53 +00:00
|
|
|
// It's a major speedup to solve the easy ones by substitution here,
|
|
|
|
// and that doesn't break anything.
|
2016-10-22 15:08:41 +00:00
|
|
|
if(!forceDofCheck) {
|
|
|
|
SolveBySubstitution();
|
|
|
|
}
|
2008-09-05 11:25:53 +00:00
|
|
|
|
|
|
|
WriteJacobian(0);
|
|
|
|
EvalJacobian();
|
2008-05-26 09:56:50 +00:00
|
|
|
|
2008-09-05 11:25:53 +00:00
|
|
|
int rank = CalculateRank();
|
|
|
|
if(rank == mat.m) {
|
|
|
|
// We fixed it by removing this constraint
|
2009-04-20 07:30:09 +00:00
|
|
|
bad->Add(&(c->h));
|
2008-09-05 11:25:53 +00:00
|
|
|
}
|
2008-05-26 09:56:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-24 15:40:18 +00:00
|
|
|
SolveResult System::Solve(Group *g, int *rank, int *dof, List<hConstraint> *bad,
|
2016-10-22 15:08:41 +00:00
|
|
|
bool andFindBad, bool andFindFree, bool forceDofCheck)
|
2009-04-20 07:30:09 +00:00
|
|
|
{
|
|
|
|
WriteEquationsExceptFor(Constraint::NO_CONSTRAINT, g);
|
2008-05-26 09:56:50 +00:00
|
|
|
|
Distinguish overconstrained and redundantly constrained sketches.
When a solver error arises after a change to the sketch, it should
be easy to understand exactly why it happened. Before this change,
two functionally distinct modes of failure were lumped into one:
the same "redundant constraints" message was displayed when all
degrees of freedom were exhausted and the had a solution, but also
when it had not.
To understand why this is problematic, let's examine several ways
in which we can end up with linearly dependent equations in our
system:
0) create a triangle, then constrain two different pairs of edges
to be perpendicular
1) add two distinct distance constraints on the same segment
2) add two identical distance constraints on the same segment
3) create a triangle, then constrain edges to lengths a, b, and c
so that a+b=c
The case (0) is our baseline case: the constraints in it make
the system unsolvable yet they do not remove more degrees of freedom
than the amount we started with. So the displayed error is
"unsolvable constraints".
The constraints in case (1) remove one too many degrees of freedom,
but otherwise are quite like the case (0): the cause of failure that
is useful to the user is that the constraints are mutually
incompatible.
The constraints in cases (2) and (3) however are not like the others:
there is a set of parameters that satisfies all of the constraints,
but the constraints still remove one degree of freedom too many.
It makes sense to display a different error message for cases (2)
and (3) because in practice, cases like this are likely to arise from
adjustment of constraint values on sketches corresponding to systems
that have a small amount of degenerate solutions, and this is very
different from systems arising in cases like (0) where no adjustment
of constraint values will ever result in a successful solution.
So the error message displayed is "redundant constraints".
At last, this commit makes cases (0) and (1) display a message
with only a minor difference in wording. This is deliberate.
The reason is that the facts "the system is unsolvable" and
"the system is unsolvable and also has linearly dependent equations"
present no meaningful, actionable difference to the user, and placing
emphasis on it would only cause confusion.
However, they are still distinguished, because in case (0) we
list all relevant constraints (and thus we say they are "mutually
incompatible") but in case (1) we only list the ones that constrain
the sketch further than some valid solution (and we say they are
"unsatisfied").
2016-01-21 09:28:05 +00:00
|
|
|
bool rankOk;
|
|
|
|
|
2008-04-27 09:31:56 +00:00
|
|
|
/*
|
2021-05-24 20:38:18 +00:00
|
|
|
int x;
|
2008-04-27 09:03:01 +00:00
|
|
|
dbp("%d equations", eq.n);
|
2021-05-24 20:38:18 +00:00
|
|
|
for(x = 0; x < eq.n; x++) {
|
|
|
|
dbp(" %.3f = %s = 0", eq[x].e->Eval(), eq[x].e->Print());
|
2008-04-27 09:03:01 +00:00
|
|
|
}
|
2008-05-08 07:30:30 +00:00
|
|
|
dbp("%d parameters", param.n);
|
2021-05-24 20:38:18 +00:00
|
|
|
for(x = 0; x < param.n; x++) {
|
|
|
|
dbp(" param %08x at %.3f", param[x].h.v, param[x].val);
|
2008-05-08 07:30:30 +00:00
|
|
|
} */
|
2008-04-20 11:35:10 +00:00
|
|
|
|
2008-06-26 09:34:26 +00:00
|
|
|
// All params and equations are assigned to group zero.
|
2008-04-20 11:35:10 +00:00
|
|
|
param.ClearTags();
|
|
|
|
eq.ClearTags();
|
2015-03-29 00:30:52 +00:00
|
|
|
|
2017-05-11 13:55:41 +00:00
|
|
|
// Since we are suppressing dof calculation or allowing redundant, we
|
|
|
|
// can't / don't want to catch result of dof checking without substitution
|
|
|
|
if(g->suppressDofCalculation || g->allowRedundant || !forceDofCheck) {
|
2016-10-22 15:08:41 +00:00
|
|
|
SolveBySubstitution();
|
|
|
|
}
|
2008-05-07 07:10:20 +00:00
|
|
|
|
2008-06-26 09:34:26 +00:00
|
|
|
// Before solving the big system, see if we can find any equations that
|
|
|
|
// are soluble alone. This can be a huge speedup. We don't know whether
|
|
|
|
// the system is consistent yet, but if it isn't then we'll catch that
|
|
|
|
// later.
|
|
|
|
int alone = 1;
|
2018-01-04 01:42:38 +00:00
|
|
|
for(auto &e : eq) {
|
|
|
|
if(e.tag != 0)
|
|
|
|
continue;
|
2008-05-07 07:10:20 +00:00
|
|
|
|
2018-01-04 01:42:38 +00:00
|
|
|
hParam hp = e.e->ReferencedParams(¶m);
|
2019-07-09 14:44:57 +00:00
|
|
|
if(hp == Expr::NO_PARAMS) continue;
|
|
|
|
if(hp == Expr::MULTIPLE_PARAMS) continue;
|
2008-04-30 04:52:34 +00:00
|
|
|
|
2008-06-26 09:34:26 +00:00
|
|
|
Param *p = param.FindById(hp);
|
|
|
|
if(p->tag != 0) continue; // let rank test catch inconsistency
|
|
|
|
|
2018-01-04 01:42:38 +00:00
|
|
|
e.tag = alone;
|
2008-06-26 09:34:26 +00:00
|
|
|
p->tag = alone;
|
2009-04-19 03:55:46 +00:00
|
|
|
WriteJacobian(alone);
|
2008-06-26 09:34:26 +00:00
|
|
|
if(!NewtonSolve(alone)) {
|
2016-11-17 13:57:31 +00:00
|
|
|
// We don't do the rank test, so let's arbitrarily return
|
|
|
|
// the DIDNT_CONVERGE result here.
|
|
|
|
rankOk = true;
|
2008-06-26 09:34:26 +00:00
|
|
|
// Failed to converge, bail out early
|
|
|
|
goto didnt_converge;
|
2008-04-20 11:35:10 +00:00
|
|
|
}
|
2008-06-26 09:34:26 +00:00
|
|
|
alone++;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now write the Jacobian for what's left, and do a rank test; that
|
|
|
|
// tells us if the system is inconsistently constrained.
|
2009-04-19 20:37:51 +00:00
|
|
|
if(!WriteJacobian(0)) {
|
Convert all enumerations to use `enum class`.
Specifically, take the old code that looks like this:
class Foo {
enum { X = 1, Y = 2 };
int kind;
}
... foo.kind = Foo::X; ...
and convert it to this:
class Foo {
enum class Kind : uint32_t { X = 1, Y = 2 };
Kind kind;
}
... foo.kind = Foo::Kind::X;
(In some cases the enumeration would not be in the class namespace,
such as when it is generally useful.)
The benefits are as follows:
* The type of the field gives a clear indication of intent, both
to humans and tools (such as binding generators).
* The compiler is able to automatically warn when a switch is not
exhaustive; but this is currently suppressed by the
default: ssassert(false, ...)
idiom.
* Integers and plain enums are weakly type checked: they implicitly
convert into each other. This can hide bugs where type conversion
is performed but not intended. Enum classes are strongly type
checked.
* Plain enums pollute parent namespaces; enum classes do not.
Almost every defined enum we have already has a kind of ad-hoc
namespacing via `NAMESPACE_`, which is now explicit.
* Plain enums do not have a well-defined ABI size, which is
important for bindings. Enum classes can have it, if specified.
We specify the base type for all enums as uint32_t, which is
a safe choice and allows us to not change the numeric values
of any variants.
This commit introduces absolutely no functional change to the code,
just renaming and change of types. It handles almost all cases,
except GraphicsWindow::pending.operation, which needs minor
functional change.
2016-05-20 08:31:20 +00:00
|
|
|
return SolveResult::TOO_MANY_UNKNOWNS;
|
2009-04-19 20:37:51 +00:00
|
|
|
}
|
2017-05-11 13:53:12 +00:00
|
|
|
// Clear dof value in order to have indication when dof is actually not calculated
|
|
|
|
if(dof != NULL) *dof = -1;
|
2017-05-11 13:55:41 +00:00
|
|
|
// We are suppressing or allowing redundant, so we no need to catch unsolveable + redundant
|
|
|
|
rankOk = (!g->suppressDofCalculation && !g->allowRedundant) ? TestRank(dof) : true;
|
Distinguish overconstrained and redundantly constrained sketches.
When a solver error arises after a change to the sketch, it should
be easy to understand exactly why it happened. Before this change,
two functionally distinct modes of failure were lumped into one:
the same "redundant constraints" message was displayed when all
degrees of freedom were exhausted and the had a solution, but also
when it had not.
To understand why this is problematic, let's examine several ways
in which we can end up with linearly dependent equations in our
system:
0) create a triangle, then constrain two different pairs of edges
to be perpendicular
1) add two distinct distance constraints on the same segment
2) add two identical distance constraints on the same segment
3) create a triangle, then constrain edges to lengths a, b, and c
so that a+b=c
The case (0) is our baseline case: the constraints in it make
the system unsolvable yet they do not remove more degrees of freedom
than the amount we started with. So the displayed error is
"unsolvable constraints".
The constraints in case (1) remove one too many degrees of freedom,
but otherwise are quite like the case (0): the cause of failure that
is useful to the user is that the constraints are mutually
incompatible.
The constraints in cases (2) and (3) however are not like the others:
there is a set of parameters that satisfies all of the constraints,
but the constraints still remove one degree of freedom too many.
It makes sense to display a different error message for cases (2)
and (3) because in practice, cases like this are likely to arise from
adjustment of constraint values on sketches corresponding to systems
that have a small amount of degenerate solutions, and this is very
different from systems arising in cases like (0) where no adjustment
of constraint values will ever result in a successful solution.
So the error message displayed is "redundant constraints".
At last, this commit makes cases (0) and (1) display a message
with only a minor difference in wording. This is deliberate.
The reason is that the facts "the system is unsolvable" and
"the system is unsolvable and also has linearly dependent equations"
present no meaningful, actionable difference to the user, and placing
emphasis on it would only cause confusion.
However, they are still distinguished, because in case (0) we
list all relevant constraints (and thus we say they are "mutually
incompatible") but in case (1) we only list the ones that constrain
the sketch further than some valid solution (and we say they are
"unsatisfied").
2016-01-21 09:28:05 +00:00
|
|
|
|
2016-01-21 08:24:58 +00:00
|
|
|
// And do the leftovers as one big system
|
|
|
|
if(!NewtonSolve(0)) {
|
|
|
|
goto didnt_converge;
|
|
|
|
}
|
|
|
|
|
2017-05-11 13:55:41 +00:00
|
|
|
// Here we are want to calculate dof even when redundant is allowed, so just handle suppressing
|
|
|
|
rankOk = (!g->suppressDofCalculation) ? TestRank(dof) : true;
|
2016-01-21 15:01:43 +00:00
|
|
|
if(!rankOk) {
|
2019-05-24 15:43:20 +00:00
|
|
|
if(andFindBad) FindWhichToRemoveToFixJacobian(g, bad, forceDofCheck);
|
2016-08-14 18:38:13 +00:00
|
|
|
} else {
|
2017-01-11 13:59:07 +00:00
|
|
|
MarkParamsFree(andFindFree);
|
2009-01-04 12:01:46 +00:00
|
|
|
}
|
2008-06-26 09:34:26 +00:00
|
|
|
// System solved correctly, so write the new values back in to the
|
|
|
|
// main parameter table.
|
2018-01-04 01:42:38 +00:00
|
|
|
for(auto &p : param) {
|
2008-06-26 09:34:26 +00:00
|
|
|
double val;
|
2018-01-04 01:42:38 +00:00
|
|
|
if(p.tag == VAR_SUBSTITUTED) {
|
2017-04-12 16:29:41 +00:00
|
|
|
val = p.substd->val;
|
2008-06-26 09:34:26 +00:00
|
|
|
} else {
|
2018-01-04 01:42:38 +00:00
|
|
|
val = p.val;
|
2008-05-26 09:56:50 +00:00
|
|
|
}
|
2018-01-04 01:42:38 +00:00
|
|
|
Param *pp = SK.GetParam(p.h);
|
2008-06-26 09:34:26 +00:00
|
|
|
pp->val = val;
|
|
|
|
pp->known = true;
|
2018-01-04 01:42:38 +00:00
|
|
|
pp->free = p.free;
|
2008-06-26 09:34:26 +00:00
|
|
|
}
|
Convert all enumerations to use `enum class`.
Specifically, take the old code that looks like this:
class Foo {
enum { X = 1, Y = 2 };
int kind;
}
... foo.kind = Foo::X; ...
and convert it to this:
class Foo {
enum class Kind : uint32_t { X = 1, Y = 2 };
Kind kind;
}
... foo.kind = Foo::Kind::X;
(In some cases the enumeration would not be in the class namespace,
such as when it is generally useful.)
The benefits are as follows:
* The type of the field gives a clear indication of intent, both
to humans and tools (such as binding generators).
* The compiler is able to automatically warn when a switch is not
exhaustive; but this is currently suppressed by the
default: ssassert(false, ...)
idiom.
* Integers and plain enums are weakly type checked: they implicitly
convert into each other. This can hide bugs where type conversion
is performed but not intended. Enum classes are strongly type
checked.
* Plain enums pollute parent namespaces; enum classes do not.
Almost every defined enum we have already has a kind of ad-hoc
namespacing via `NAMESPACE_`, which is now explicit.
* Plain enums do not have a well-defined ABI size, which is
important for bindings. Enum classes can have it, if specified.
We specify the base type for all enums as uint32_t, which is
a safe choice and allows us to not change the numeric values
of any variants.
This commit introduces absolutely no functional change to the code,
just renaming and change of types. It handles almost all cases,
except GraphicsWindow::pending.operation, which needs minor
functional change.
2016-05-20 08:31:20 +00:00
|
|
|
return rankOk ? SolveResult::OKAY : SolveResult::REDUNDANT_OKAY;
|
2008-06-26 09:34:26 +00:00
|
|
|
|
|
|
|
didnt_converge:
|
2009-04-19 05:53:16 +00:00
|
|
|
SK.constraint.ClearTags();
|
2021-08-08 20:13:30 +00:00
|
|
|
// Not using range-for here because index is used in additional ways
|
2021-05-24 20:38:18 +00:00
|
|
|
for(size_t i = 0; i < mat.eq.size(); i++) {
|
2020-05-12 13:59:23 +00:00
|
|
|
if(fabs(mat.B.num[i]) > CONVERGE_TOLERANCE || IsReasonable(mat.B.num[i])) {
|
2008-09-05 11:25:53 +00:00
|
|
|
// This constraint is unsatisfied.
|
2017-05-14 04:23:04 +00:00
|
|
|
if(!mat.eq[i]->h.isFromConstraint()) continue;
|
2008-09-05 11:25:53 +00:00
|
|
|
|
2017-05-14 04:23:04 +00:00
|
|
|
hConstraint hc = mat.eq[i]->h.constraint();
|
2009-04-20 07:30:09 +00:00
|
|
|
ConstraintBase *c = SK.constraint.FindByIdNoOops(hc);
|
2008-09-05 11:25:53 +00:00
|
|
|
if(!c) continue;
|
|
|
|
// Don't double-show constraints that generated multiple
|
|
|
|
// unsatisfied equations
|
|
|
|
if(!c->tag) {
|
2009-04-20 07:30:09 +00:00
|
|
|
bad->Add(&(c->h));
|
2008-09-05 11:25:53 +00:00
|
|
|
c->tag = 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2015-03-29 00:30:52 +00:00
|
|
|
|
Convert all enumerations to use `enum class`.
Specifically, take the old code that looks like this:
class Foo {
enum { X = 1, Y = 2 };
int kind;
}
... foo.kind = Foo::X; ...
and convert it to this:
class Foo {
enum class Kind : uint32_t { X = 1, Y = 2 };
Kind kind;
}
... foo.kind = Foo::Kind::X;
(In some cases the enumeration would not be in the class namespace,
such as when it is generally useful.)
The benefits are as follows:
* The type of the field gives a clear indication of intent, both
to humans and tools (such as binding generators).
* The compiler is able to automatically warn when a switch is not
exhaustive; but this is currently suppressed by the
default: ssassert(false, ...)
idiom.
* Integers and plain enums are weakly type checked: they implicitly
convert into each other. This can hide bugs where type conversion
is performed but not intended. Enum classes are strongly type
checked.
* Plain enums pollute parent namespaces; enum classes do not.
Almost every defined enum we have already has a kind of ad-hoc
namespacing via `NAMESPACE_`, which is now explicit.
* Plain enums do not have a well-defined ABI size, which is
important for bindings. Enum classes can have it, if specified.
We specify the base type for all enums as uint32_t, which is
a safe choice and allows us to not change the numeric values
of any variants.
This commit introduces absolutely no functional change to the code,
just renaming and change of types. It handles almost all cases,
except GraphicsWindow::pending.operation, which needs minor
functional change.
2016-05-20 08:31:20 +00:00
|
|
|
return rankOk ? SolveResult::DIDNT_CONVERGE : SolveResult::REDUNDANT_DIDNT_CONVERGE;
|
2008-04-20 11:35:10 +00:00
|
|
|
}
|
|
|
|
|
2019-05-24 15:40:18 +00:00
|
|
|
SolveResult System::SolveRank(Group *g, int *rank, int *dof, List<hConstraint> *bad,
|
2019-05-24 13:24:10 +00:00
|
|
|
bool andFindBad, bool andFindFree)
|
2017-01-11 13:59:07 +00:00
|
|
|
{
|
|
|
|
WriteEquationsExceptFor(Constraint::NO_CONSTRAINT, g);
|
|
|
|
|
|
|
|
// All params and equations are assigned to group zero.
|
|
|
|
param.ClearTags();
|
|
|
|
eq.ClearTags();
|
|
|
|
|
|
|
|
// Now write the Jacobian, and do a rank test; that
|
|
|
|
// tells us if the system is inconsistently constrained.
|
|
|
|
if(!WriteJacobian(0)) {
|
|
|
|
return SolveResult::TOO_MANY_UNKNOWNS;
|
|
|
|
}
|
|
|
|
|
2017-05-11 13:53:12 +00:00
|
|
|
bool rankOk = TestRank(dof);
|
2017-01-11 13:59:07 +00:00
|
|
|
if(!rankOk) {
|
2017-05-11 13:53:12 +00:00
|
|
|
// When we are testing with redundant allowed, we don't want to have additional info
|
|
|
|
// about redundants since this test is working only for single redundant constraint
|
2017-05-11 13:55:41 +00:00
|
|
|
if(!g->suppressDofCalculation && !g->allowRedundant) {
|
2017-05-11 13:53:12 +00:00
|
|
|
if(andFindBad) FindWhichToRemoveToFixJacobian(g, bad, true);
|
|
|
|
}
|
2017-01-11 13:59:07 +00:00
|
|
|
} else {
|
|
|
|
MarkParamsFree(andFindFree);
|
|
|
|
}
|
|
|
|
return rankOk ? SolveResult::OKAY : SolveResult::REDUNDANT_OKAY;
|
|
|
|
}
|
|
|
|
|
2016-05-05 05:54:05 +00:00
|
|
|
void System::Clear() {
|
2013-09-19 04:33:12 +00:00
|
|
|
entity.Clear();
|
|
|
|
param.Clear();
|
|
|
|
eq.Clear();
|
|
|
|
dragged.Clear();
|
2021-12-23 17:37:33 +00:00
|
|
|
mat.A.num.setZero();
|
|
|
|
mat.A.sym.setZero();
|
2013-09-19 04:33:12 +00:00
|
|
|
}
|
2017-01-11 13:59:07 +00:00
|
|
|
|
|
|
|
void System::MarkParamsFree(bool find) {
|
|
|
|
// If requested, find all the free (unbound) variables. This might be
|
|
|
|
// more than the number of degrees of freedom. Don't always do this,
|
|
|
|
// because the display would get annoying and it's slow.
|
2018-01-04 01:42:38 +00:00
|
|
|
for(auto &p : param) {
|
|
|
|
p.free = false;
|
2017-01-11 13:59:07 +00:00
|
|
|
|
|
|
|
if(find) {
|
2018-01-04 01:42:38 +00:00
|
|
|
if(p.tag == 0) {
|
|
|
|
p.tag = VAR_DOF_TEST;
|
2017-01-11 13:59:07 +00:00
|
|
|
WriteJacobian(0);
|
|
|
|
EvalJacobian();
|
|
|
|
int rank = CalculateRank();
|
|
|
|
if(rank == mat.m) {
|
2018-01-04 01:42:38 +00:00
|
|
|
p.free = true;
|
2017-01-11 13:59:07 +00:00
|
|
|
}
|
2018-01-04 01:42:38 +00:00
|
|
|
p.tag = 0;
|
2017-01-11 13:59:07 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|