cprover
cycle_collection.cpp
Go to the documentation of this file.
1 /*******************************************************************\
2 
3 Module: collection of cycles in graph of abstract events
4 
5 Author: Vincent Nimal
6 
7 Date: 2012
8 
9 \*******************************************************************/
10 
13 
14 #include "event_graph.h"
15 
16 #include <util/message.h>
17 
21  std::set<critical_cyclet> &set_of_cycles)
22 {
23  for(std::set<critical_cyclet>::const_iterator it=set_of_cycles.begin();
24  it!=set_of_cycles.end(); )
25  {
26  std::set<critical_cyclet>::const_iterator next=it;
27  ++next;
28  auto e_it=it->begin();
29  /* is there an event in the cycle not in thin-air events? */
30  for(; e_it!=it->end(); ++e_it)
31  if(thin_air_events.find(*e_it)==thin_air_events.end())
32  break;
33 
34  if(e_it==it->end())
35  set_of_cycles.erase(*it);
36 
37  it=next;
38  }
39 
40 #ifdef DEBUG
41  for(std::set<event_idt>::const_iterator it=thin_air_events.begin();
42  it!=thin_air_events.end();
43  ++it)
44  egraph.message.debug()<<egraph[*it]<<";";
45 
47 #endif
48 }
49 
52  std::set<critical_cyclet> &set_of_cycles,
53  memory_modelt model)
54 {
55  /* all the events initially unmarked */
56  for(std::size_t i=0; i<egraph.size(); i++)
57  mark[i]=false;
58 
59  std::list<event_idt>* order=nullptr;
60  /* on Power, rfe pairs are also potentially unsafe */
61  switch(model)
62  {
63  case TSO:
64  case PSO:
65  case RMO:
66  order=&egraph.po_order;
67  break;
68  case Power:
69  order=&egraph.poUrfe_order;
70  break;
71 
72  case Unknown:
73  assert(false);
74  }
75 
76  if(order->empty())
77  return;
78 
79  /* if we only consider a limited part of the graph */
80  order=order_filtering(order);
81 
82  if(order->empty())
83  return;
84 
85  for(std::list<event_idt>::const_iterator
86  st_it=order->begin();
87  st_it!=order->end();
88  ++st_it)
89  {
90  event_idt source=*st_it;
91  egraph.message.debug() << "explore " << egraph[source].id << messaget::eom;
92  backtrack(set_of_cycles, source, source,
93  false, max_po_trans, false, false, false, "", model);
94 
95  while(!marked_stack.empty())
96  {
97  event_idt up=marked_stack.top();
98  mark[up]=false;
99  marked_stack.pop();
100  }
101  }
102 
103  /* end of collection -- remove spurious by thin-air cycles */
104  if(egraph.filter_thin_air)
105  filter_thin_air(set_of_cycles);
106 }
107 
111  event_idt vertex,
112  event_idt source,
113  unsigned number)
114 {
115  critical_cyclet new_cycle(egraph, number);
116  bool incycle=false;
117  std::stack<event_idt> stack(point_stack);
118 
119  while(!stack.empty())
120  {
121  event_idt current_vertex=stack.top();
122  stack.pop();
123 
124  egraph.message.debug() << "extract: "
125  << egraph[current_vertex].get_operation()
126  << egraph[current_vertex].variable << "@"
127  << egraph[current_vertex].thread << "~"
128  << egraph[current_vertex].local
129  << messaget::eom;
130 
131  if(!new_cycle.has_user_defined_fence)
132  {
133  new_cycle.has_user_defined_fence=egraph[current_vertex].is_fence();
134  }
135 
136  if(current_vertex==vertex)
137  incycle=true;
138 
139  if(incycle)
140  new_cycle.push_front(current_vertex);
141 
142  if(current_vertex==source)
143  break;
144  }
145 
146  return new_cycle;
147 }
148 
151  std::set<critical_cyclet> &set_of_cycles,
152  event_idt source,
153  event_idt vertex,
154  bool unsafe_met, /* unsafe pair for the model met in the visited path */
155  event_idt po_trans, /* po-transition skips still allowed */
156  bool same_var_pair, /* in a thread, tells if we already met one rfi wsi fri */
157  bool lwfence_met, /* if we try to skip a lwsync (only valid for lwsyncWR) */
158  bool has_to_be_unsafe,
159  irep_idt var_to_avoid,
160  memory_modelt model)
161 {
162 #ifdef DEBUG
163  egraph.message.debug() << std::string(80, '-');
164  egraph.message.debug() << messaget::eom;
165  egraph.message.debug() << "marked size:" << marked_stack.size()
166  << messaget::eom;
167  std::stack<event_idt> tmp;
168  while(!point_stack.empty())
169  {
170  egraph.message.debug() << point_stack.top() << " | ";
171  tmp.push(point_stack.top());
172  point_stack.pop();
173  }
174  egraph.message.debug() << messaget::eom;
175  while(!tmp.empty())
176  {
177  point_stack.push(tmp.top());
178  tmp.pop();
179  }
180  while(!marked_stack.empty())
181  {
182  egraph.message.debug() << marked_stack.top() << " | ";
183  tmp.push(marked_stack.top());
184  marked_stack.pop();
185  }
186  egraph.message.debug() << messaget::eom;
187  while(!tmp.empty())
188  {
189  marked_stack.push(tmp.top());
190  tmp.pop();
191  }
192 #endif
193 
194  // TO DISCUSS: shouldn't we still allow po-transition through it instead?
195  if(filtering(vertex))
196  return false;
197 
198  egraph.message.debug() << "bcktck "<<egraph[vertex].id<<"#"<<vertex<<", "
199  <<egraph[source].id<<"#"<<source<<" lw:"<<lwfence_met<<" unsafe:"
200  <<unsafe_met << messaget::eom;
201  bool f=false;
202  bool get_com_only=false;
203  bool unsafe_met_updated=unsafe_met;
204  bool same_var_pair_updated=same_var_pair;
205 
206  bool not_thin_air=true;
207 
208  const abstract_eventt &this_vertex=egraph[vertex];
209 
210  /* if a thread starts with variable x, the last event of this thread in the
211  cycle cannot be with x */
212  irep_idt avoid_at_the_end=var_to_avoid;
213  bool no_comm=false;
214  if(avoid_at_the_end!="" && avoid_at_the_end==this_vertex.variable)
215  no_comm=true;
216 
217  /* if specified, maximum number of events reached */
218  if(max_var!=0 && point_stack.size()>max_var*3)
219  return false;
220 
221  /* we only explore shared variables */
222  if(!this_vertex.local)
223  {
224  /* only the lwsyncWR can be interpreted as poWR (i.e., skip of the fence) */
225  if(lwfence_met && this_vertex.operation!=abstract_eventt::operationt::Read)
226  return false; // {no_comm=true;get_com_only=false;}//return false;
227 
228  bool has_to_be_unsafe_updated=false;
229  // TODO: propagate this constraint within the optimisation
230  // -- no optimisation can strongly affect performances
231  /* tab[] can appear several times */
232  if(egraph.ignore_arrays ||
233  id2string(this_vertex.variable).find("[]")==std::string::npos)
234  {
235  /* no more than 4 events per thread */
239  {
240  if(events_per_thread[this_vertex.thread]==4)
241  return false;
242  else
243  events_per_thread[this_vertex.thread]++;
244  }
245 
246  /* Multiple re-orderings constraint: if the thread on this cycles contains
247  more than one, ensure that an unsafe pair is not protected by another
248  relation in the thread. E.g., in Wx Rx Wy, under TSO, the rfi cannot be
249  delayed, since the only way to make this transformation matter is to
250  re-order also the two writes, which is not permitted on TSO. */
251  if(has_to_be_unsafe && point_stack.size() >= 2)
252  {
253  const event_idt previous=point_stack.top();
254  point_stack.pop();
255  const event_idt preprevious=point_stack.top();
256  point_stack.push(previous);
257  if(!egraph[preprevious].unsafe_pair(this_vertex, model) &&
259  egraph[preprevious].operation==
262  egraph[preprevious].operation==
265  egraph[preprevious].operation==
267  return false;
268  }
269  }
270 
271  has_to_be_unsafe_updated=has_to_be_unsafe;
272 
273  /* constraint 1.a: there is at most one pair of events per thread
274  with different variables. Given that we cannot have more than
275  three events on the same variable, with two in the same thread,
276  this means that we can have at most 2 consecutive events by po
277  with the same variable, and two variables per thread (fences are
278  not taken into account) */
279  if(!point_stack.empty() &&
280  egraph.are_po_ordered(point_stack.top(), vertex) &&
284  this_vertex.variable==egraph[point_stack.top()].variable)
285  {
286  if(same_var_pair ||
288  egraph[point_stack.top()].operation==
290  {
291  events_per_thread[this_vertex.thread]--;
292  return false; // {no_comm=true;get_com_only=false;} //return false;
293  }
294  else
295  {
296  same_var_pair_updated=true;
297  if(events_per_thread[this_vertex.thread]>=3)
298  get_com_only=true;
299  }
300  }
301 
302  /* constraint 1.b */
303  if(!point_stack.empty() &&
304  egraph.are_po_ordered(point_stack.top(), vertex) &&
308  this_vertex.variable!=egraph[point_stack.top()].variable)
309  {
310  same_var_pair_updated=false;
311  }
312 
313  /* constraint 2: per variable, either W W, R W, W R, or R W R */
317  {
318  const unsigned char nb_writes=writes_per_variable[this_vertex.variable];
319  const unsigned char nb_reads=reads_per_variable[this_vertex.variable];
320 
321  if(nb_writes+nb_reads==3)
322  {
323  events_per_thread[this_vertex.thread]--;
324  return false; // {no_comm=true;get_com_only=false;} //return false;
325  }
326  else if(this_vertex.operation==abstract_eventt::operationt::Write)
327  {
328  if(nb_writes==2)
329  {
330  events_per_thread[this_vertex.thread]--;
331  return false; // {no_comm=true;get_com_only=false;} //return false;
332  }
333  else
334  writes_per_variable[this_vertex.variable]++;
335  }
336  else if(this_vertex.operation==abstract_eventt::operationt::Read)
337  {
338  if(nb_reads==2)
339  {
340  events_per_thread[this_vertex.thread]--;
341  return false; // {no_comm=true;get_com_only=false;} //return false;
342  }
343  else
344  reads_per_variable[this_vertex.variable]++;
345  }
346  }
347 
348  if(!point_stack.empty())
349  {
350  const abstract_eventt &prev_vertex=egraph[point_stack.top()];
351  unsafe_met_updated|=
352  prev_vertex.unsafe_pair(this_vertex, model) &&
353  !(prev_vertex.thread==this_vertex.thread &&
354  egraph.map_data_dp[this_vertex.thread].dp(prev_vertex, this_vertex));
355  if(unsafe_met_updated &&
356  !unsafe_met &&
357  egraph.are_po_ordered(point_stack.top(), vertex))
358  has_to_be_unsafe_updated=true;
359  }
360 
361  point_stack.push(vertex);
362  mark[vertex]=true;
363  marked_stack.push(vertex);
364 
365  if(!get_com_only)
366  {
367  /* we first visit via po transition, if existing */
368  for(wmm_grapht::edgest::const_iterator
369  w_it=egraph.po_out(vertex).begin();
370  w_it!=egraph.po_out(vertex).end(); w_it++)
371  {
372  const event_idt w=w_it->first;
373  if(w==source && point_stack.size()>=4
374  && (unsafe_met_updated
375  || this_vertex.unsafe_pair(egraph[source], model)) )
376  {
377  critical_cyclet new_cycle=extract_cycle(vertex, source, cycle_nb++);
378  not_thin_air=!egraph.filter_thin_air || new_cycle.is_not_thin_air();
379  if(!not_thin_air)
380  {
381  for(critical_cyclet::const_iterator e_it=new_cycle.begin();
382  e_it!=new_cycle.end();
383  ++e_it)
384  thin_air_events.insert(*e_it);
385  }
386  if((!egraph.filter_uniproc || new_cycle.is_not_uniproc(model)) &&
387  not_thin_air && new_cycle.is_cycle() &&
388  new_cycle.is_unsafe(model) /*&& new_cycle.is_unsafe_asm(model)*/)
389  {
390  egraph.message.debug() << new_cycle.print_name(model, false)
391  << messaget::eom;
392  set_of_cycles.insert(new_cycle);
393 #if 0
394  const critical_cyclet* reduced=new_cycle.hide_internals();
395  set_of_cycles.insert(*reduced);
396  delete(reduced);
397 #endif
398  }
399  f=true;
400  }
401  else if(!mark[w])
402  f|=
403  backtrack(
404  set_of_cycles,
405  source,
406  w,
407  unsafe_met_updated,
408  po_trans,
409  same_var_pair_updated,
410  false,
411  has_to_be_unsafe_updated,
412  avoid_at_the_end, model);
413  }
414  }
415 
416  if(!no_comm)
417  {
418  /* we then visit via com transitions, if existing */
419  for(wmm_grapht::edgest::const_iterator
420  w_it=egraph.com_out(vertex).begin();
421  w_it!=egraph.com_out(vertex).end(); w_it++)
422  {
423  const event_idt w=w_it->first;
424  if(w < source)
425  egraph.remove_com_edge(vertex, w);
426  else if(w==source && point_stack.size()>=4 &&
427  (unsafe_met_updated ||
428  this_vertex.unsafe_pair(egraph[source], model)))
429  {
430  critical_cyclet new_cycle=extract_cycle(vertex, source, cycle_nb++);
431  not_thin_air=!egraph.filter_thin_air || new_cycle.is_not_thin_air();
432  if(!not_thin_air)
433  {
434  for(critical_cyclet::const_iterator e_it=new_cycle.begin();
435  e_it!=new_cycle.end();
436  ++e_it)
437  thin_air_events.insert(*e_it);
438  }
439  if((!egraph.filter_uniproc || new_cycle.is_not_uniproc(model)) &&
440  not_thin_air && new_cycle.is_cycle() &&
441  new_cycle.is_unsafe(model) /*&& new_cycle.is_unsafe_asm(model)*/)
442  {
443  egraph.message.debug() << new_cycle.print_name(model, false)
444  << messaget::eom;
445  set_of_cycles.insert(new_cycle);
446 #if 0
447  const critical_cyclet* reduced=new_cycle.hide_internals();
448  set_of_cycles.insert(*reduced);
449  delete(reduced);
450 #endif
451  }
452  f=true;
453  }
454  else if(!mark[w])
455  f|=
456  backtrack(
457  set_of_cycles,
458  source,
459  w,
460  unsafe_met_updated,
461  po_trans,
462  false,
463  false,
464  false,
465  "",
466  model);
467  }
468  }
469 
470  if(f)
471  {
472  while(!marked_stack.empty() && marked_stack.top()!=vertex)
473  {
474  event_idt up=marked_stack.top();
475  marked_stack.pop();
476  mark[up]=false;
477  }
478 
479  if(!marked_stack.empty())
480  marked_stack.pop();
481  mark[vertex]=false;
482  }
483 
484  assert(!point_stack.empty());
485  point_stack.pop();
486 
487  /* removes variable access */
491  {
493  writes_per_variable[this_vertex.variable]--;
494  else
495  reads_per_variable[this_vertex.variable]--;
496 
497  events_per_thread[this_vertex.thread]--;
498  }
499  }
500 
501  /* transitivity of po: do the same, but skip this event
502  (except if it is a fence or no more po-transition skips allowed);
503  if the cycle explored so far has a thin-air subcycle, this cycle
504  is not valid: stop this exploration here */
505  if(skip_tracked.find(vertex)==skip_tracked.end()) // 25 oct
506  if(not_thin_air && !get_com_only &&
507  (po_trans > 1 || po_trans==0) &&
508  !point_stack.empty() &&
509  egraph.are_po_ordered(point_stack.top(), vertex) &&
512  egraph[point_stack.top()].operation==
515  !this_vertex.WRfence ||
516  egraph[point_stack.top()].operation==
518  {
519  skip_tracked.insert(vertex);
520 
521  std::stack<event_idt> tmp;
522 
523  while(!marked_stack.empty() && marked_stack.top()!=vertex)
524  {
525  tmp.push(marked_stack.top());
526  mark[marked_stack.top()]=false;
527  marked_stack.pop();
528  }
529 
530  if(!marked_stack.empty())
531  {
532  assert(marked_stack.top()==vertex);
533  mark[vertex]=true;
534  }
535  else
536  {
537  while(!tmp.empty())
538  {
539  marked_stack.push(tmp.top());
540  mark[tmp.top()]=true;
541  tmp.pop();
542  }
543  mark[vertex]=true;
544  marked_stack.push(vertex);
545  }
546 
547  if(!egraph[point_stack.top()].unsafe_pair(this_vertex, model))
548  {
549  /* tab[] should never be avoided */
550  if(egraph.ignore_arrays ||
551  id2string(this_vertex.variable).find("[]")==std::string::npos)
552  avoid_at_the_end=this_vertex.variable;
553  }
554 
555  /* skip lwfence by po-transition only if we consider a WR */
556  // TO CHECK
557  const bool is_lwfence=
559  egraph[point_stack.top()].operation==
562  (!this_vertex.WRfence &&
563  egraph[point_stack.top()].operation==
565 
566  for(wmm_grapht::edgest::const_iterator w_it=
567  egraph.po_out(vertex).begin();
568  w_it!=egraph.po_out(vertex).end(); w_it++)
569  {
570  const event_idt w=w_it->first;
571  f|=
572  backtrack(
573  set_of_cycles,
574  source,
575  w,
576  unsafe_met/*_updated*/,
577  (po_trans==0?0:po_trans-1),
578  same_var_pair/*_updated*/,
579  is_lwfence,
580  has_to_be_unsafe,
581  avoid_at_the_end,
582  model);
583  }
584 
585  if(f)
586  {
587  while(!marked_stack.empty() && marked_stack.top()!=vertex)
588  {
589  event_idt up=marked_stack.top();
590  marked_stack.pop();
591  mark[up]=false;
592  }
593 
594  if(!marked_stack.empty())
595  marked_stack.pop();
596  mark[vertex]=false;
597  }
598 
599  skip_tracked.erase(vertex);
600  }
601 
602  return f;
603 }
const std::string & id2string(const irep_idt &d)
Definition: irep.h:44
void collect_cycles(std::set< critical_cyclet > &set_of_cycles, memory_modelt model)
Tarjan 1972 adapted and modified for events.
std::string print_name(const critical_cyclet &redyced, memory_modelt model) const
void filter_thin_air(std::set< critical_cyclet > &set_of_cycles)
after the collection, eliminates the executions forbidden by an indirect thin-air ...
operationt operation
Definition: wmm.h:23
graph of abstract events
Definition: wmm.h:22
void hide_internals(critical_cyclet &reduced) const
bool unsafe_pair(const abstract_eventt &next, memory_modelt model) const
data_typet::const_iterator const_iterator
Definition: event_graph.h:71
memory_modelt
Definition: wmm.h:17
bool filter_thin_air
Definition: event_graph.h:393
Definition: wmm.h:21
messaget & message
Definition: event_graph.h:395
unsigned max_po_trans
Definition: event_graph.h:246
dstringt has one field, an unsigned integer no which is an index into a static table of strings...
Definition: dstring.h:35
wmm_grapht::node_indext event_idt
Definition: event_graph.h:33
bool backtrack(std::set< critical_cyclet > &set_of_cycles, event_idt source, event_idt vertex, bool unsafe_met, event_idt po_trans, bool same_var_pair, bool lwsync_met, bool has_to_be_unsafe, irep_idt var_to_avoid, memory_modelt model)
see event_grapht::collect_cycles
static eomt eom
Definition: message.h:284
bool is_unsafe(memory_modelt model, bool fast=false)
checks whether there is at least one pair which is unsafe (takes fences and dependencies into account...
std::set< event_idt > thin_air_events
Definition: event_graph.h:287
critical_cyclet extract_cycle(event_idt vertex, event_idt source, unsigned number_of_cycles)
extracts a (whole, unreduced) cycle from the stack.
Definition: wmm.h:19
unsigned max_var
Definition: event_graph.h:245
Definition: wmm.h:20
mstreamt & debug() const
Definition: message.h:416
#define stack(x)
Definition: parser.h:144
bool is_lwfence(const goto_programt::instructiont &instruction, const namespacet &ns)
Definition: fence.cpp:36