知识来源
文章:
https://link.springer.com/chapter/10.1007/978-3-319-26287-1_14
代码:
https://github.com/ezulkosk/vsids
位置:
\VISDS-solvers\vsids-master-1\vsids-master-1\adaptvsids\core
技术要点:
在冲突分析阶段考察动态增加(新生成)的学习子句的LBD波动情况,设置窗口期(目前采用的是全窗口)计算LBD的移动平均值;当前学习子句的lbd值与移动平均lbd进行比较,决定变元碰撞的增加量的大小。具体算法如下:
1 int lbd_val = lbd(learnt_clause); //计算新增学习子句的lbd值 2 lbd_ema = lbd_ema_decay * lbd_ema + (1 - lbd_ema_decay) * lbd_val;//整体LBD的移动平均值 3 if (lbd_val >= lbd_ema) { //第一种情况 4 decays++; //正常普通衰减模式使用次数计数 5 varDecayActivity(var_decay); //正常模式下var_decay为0.85,变元活跃度碰撞增加较快 |
代码分析
Solver.h
1 class Solver { ... 102 // Statistics: (read-only member variable) 103 // 104 uint64_t solves, starts, decisions, rnd_decisions, propagations, conflicts, backjumps, decays, thresh_decays; 105 uint64_t dec_vars, clauses_literals, learnts_literals, max_literals, tot_literals; 106 107... 180 long double lbd_ema; 181 long double lbd_ema_decay; |
1 inline int Solver::lbd(vec<Lit>& c) { 2 int lbd = 0; 3 for (int i = 0; i < c.size(); i++) { 4 int lvl = level(var(c[i])); 5 if (!lbd_seen[lvl]) { 6 lbd_seen[lvl] = true; 7 lbd++; 8 } 9 } 10 for (int i = 0; i < c.size(); i++) { 11 int lvl = level(var(c[i])); 12 lbd_seen[lvl] = false; 13 } 14 return lbd; 15 } |
Solver.cpp
36 static DoubleOption opt_lbd_ema_decay |
1 Solver::Solver() : ...50 , lbd_ema (0) 51 , lbd_ema_decay (opt_lbd_ema_decay) //取值0.95 52 {} |
1 lbool Solver::search(int nof_conflicts) 2 { 3 assert(ok); 4 int backtrack_level; 5 int conflictC = 0; 6 vec<Lit> learnt_clause; 7 starts++; 8 9 for (;;){ 10 CRef confl = propagate(); 11 if (confl != CRef_Undef){ 12 // CONFLICT 13 conflicts++; conflictC++; 14 if (decisionLevel() == 0) return l_False; 15 16 learnt_clause.clear(); 17 analyze(confl, learnt_clause, backtrack_level); 18 backjumps += decisionLevel() - backtrack_level; 19 cancelUntil(backtrack_level); 20 21 if (learnt_clause.size() == 1){ 22 uncheckedEnqueue(learnt_clause[0]); 23 }else{ 24 CRef cr = ca.alloc(learnt_clause, true); 25 learnts.push(cr); 26 attachClause(cr); 27 claBumpActivity(ca[cr]); 28 uncheckedEnqueue(learnt_clause[0], cr); 29 } 30 31 int lbd_val = lbd(learnt_clause); 32 lbd_ema = lbd_ema_decay * lbd_ema + (1 - lbd_ema_decay) * lbd_val; 33 if (lbd_val >= lbd_ema) { 34 decays++; 35 varDecayActivity(var_decay); //超出移动平均值,正常碰撞衰减因子0.85,活跃度增加块。 36 } else { 37 thresh_decays++; 38 varDecayActivity(var_thresh_decay); //在移动平均值范围内,衰减因子0.99,活跃度增加慢。 39 } 40 claDecayActivity(); 41 42 if (--learntsize_adjust_cnt == 0){ 43 learntsize_adjust_confl *= learntsize_adjust_inc; 44 learntsize_adjust_cnt = (int)learntsize_adjust_confl; 45 max_learnts *= learntsize_inc; 46 47 if (verbosity >= 1) 48 ... |
1 void Solver::analyze(CRef confl, vec<Lit>& out_learnt, int& out_btlevel) 2 { 3 int pathC = 0; 4 Lit p = lit_Undef; 5 6 // Generate conflict clause: 7 // 8 out_learnt.push(); // (leave room for the asserting literal) 9 int index = trail.size() - 1; 10 11 do{ 12 assert(confl != CRef_Undef); // (otherwise should be UIP) 13 Clause& c = ca[confl]; 14 15 if (c.learnt()) 16 claBumpActivity(c); 17 18 for (int j = (p == lit_Undef) ? 0 : 1; j < c.size(); j++){ 19 Lit q = c[j]; 20 21 if (!seen[var(q)] && level(var(q)) > 0){ 22 varBumpActivity(var(q)); 23 seen[var(q)] = 1; 24 if (level(var(q)) >= decisionLevel()) 25 pathC++; 26 else 27 out_learnt.push(q); 28 } 29 } 30 31 // Select next clause to look at: 32 while (!seen[var(trail[index--])]); 33 p = trail[index+1]; 34 confl = reason(var(p)); 35 seen[var(p)] = 0; 36 pathC--; 37 38 }while (pathC > 0); 39 out_learnt[0] = ~p; 40 41 // Simplify conflict clause: 42 // 43 int i, j; 44 out_learnt.copyTo(analyze_toclear); 45 if (ccmin_mode == 2){ 46 uint32_t abstract_level = 0; 47 for (i = 1; i < out_learnt.size(); i++) 48 abstract_level |= abstractLevel(var(out_learnt[i])); // (maintain an abstraction of levels involved in conflict) 49 50 for (i = j = 1; i < out_learnt.size(); i++) 51 if (reason(var(out_learnt[i])) == CRef_Undef || !litRedundant(out_learnt[i], abstract_level)) 52 out_learnt[j++] = out_learnt[i]; 53 54 }else if (ccmin_mode == 1){ 55 for (i = j = 1; i < out_learnt.size(); i++){ 56 Var x = var(out_learnt[i]); 57 58 if (reason(x) == CRef_Undef) 59 out_learnt[j++] = out_learnt[i]; 60 else{ 61 Clause& c = ca[reason(var(out_learnt[i]))]; 62 for (int k = 1; k < c.size(); k++) 63 if (!seen[var(c[k])] && level(var(c[k])) > 0){ 64 out_learnt[j++] = out_learnt[i]; 65 break; } 66 } 67 } 68 }else 69 i = j = out_learnt.size(); 70 71 max_literals += out_learnt.size(); 72 out_learnt.shrink(i - j); 73 tot_literals += out_learnt.size(); 74 75 // Find correct backtrack level: 76 // 77 if (out_learnt.size() == 1) 78 out_btlevel = 0; 79 else{ 80 int max_i = 1; 81 // Find the first literal assigned at the next-highest level: 82 for (int i = 2; i < out_learnt.size(); i++) 83 if (level(var(out_learnt[i])) > level(var(out_learnt[max_i]))) 84 max_i = i; 85 // Swap-in this literal at index 1: 86 Lit p = out_learnt[max_i]; 87 out_learnt[max_i] = out_learnt[1]; 88 out_learnt[1] = p; 89 out_btlevel = level(var(p)); 90 } 91 92 for (int j = 0; j < analyze_toclear.size(); j++) seen[var(analyze_toclear[j])] = 0; // ('seen[]' is now cleared) 93 } 对变元碰撞增量的的动态调整,只会对下一次冲突分析时起作用(见此代码段的第22行)。届时参与新的学习子句的变元碰撞会使用调整后的增量值。 个人理解: (1)调整值使用是滞后的; (2)原学习子句lbd考察时的子句变元与参与碰撞的此时的变元可能已经不同了,这种调整是泛泛调整,没有具体针对性。 |