cprover
cycle_collection.cpp
Go to the documentation of this file.
1 /*******************************************************************\
2 
3 Module: collection of cycles in graph of abstract events
4 
5 Author: Vincent Nimal
6 
7 Date: 2012
8 
9 \*******************************************************************/
10 
13 
14 #include "event_graph.h"
15 
16 #include <util/message.h>
17 
21  std::set<critical_cyclet> &set_of_cycles)
22 {
23  for(std::set<critical_cyclet>::const_iterator it=set_of_cycles.begin();
24  it!=set_of_cycles.end(); )
25  {
26  std::set<critical_cyclet>::const_iterator next=it;
27  ++next;
28  critical_cyclet::const_iterator e_it=it->begin();
29  /* is there an event in the cycle not in thin-air events? */
30  for(; e_it!=it->end(); ++e_it)
31  if(thin_air_events.find(*e_it)==thin_air_events.end())
32  break;
33 
34  if(e_it==it->end())
35  set_of_cycles.erase(*it);
36 
37  it=next;
38  }
39 
40 #ifdef DEBUG
41  for(std::set<event_idt>::const_iterator it=thin_air_events.begin();
42  it!=thin_air_events.end();
43  ++it)
44  egraph.message.debug()<<egraph[*it]<<";";
45 
47 #endif
48 }
49 
52  std::set<critical_cyclet> &set_of_cycles,
53  memory_modelt model)
54 {
55  /* all the events initially unmarked */
56  for(std::size_t i=0; i<egraph.size(); i++)
57  mark[i]=false;
58 
59  std::list<event_idt>* order=nullptr;
60  /* on Power, rfe pairs are also potentially unsafe */
61  switch(model)
62  {
63  case TSO:
64  case PSO:
65  case RMO:
66  order=&egraph.po_order;
67  break;
68  case Power:
69  order=&egraph.poUrfe_order;
70  break;
71 
72  case Unknown:
73  assert(false);
74  }
75 
76  if(order->empty())
77  return;
78 
79  /* if we only consider a limited part of the graph */
80  order=order_filtering(order);
81 
82  if(order->empty())
83  return;
84 
85  for(std::list<event_idt>::const_iterator
86  st_it=order->begin();
87  st_it!=order->end();
88  ++st_it)
89  {
90  event_idt source=*st_it;
91  egraph.message.debug() << "explore " << egraph[source].id << messaget::eom;
92  backtrack(set_of_cycles, source, source,
93  false, max_po_trans, false, false, false, "", model);
94 
95  while(!marked_stack.empty())
96  {
97  event_idt up=marked_stack.top();
98  mark[up]=false;
99  marked_stack.pop();
100  }
101  }
102 
103  /* end of collection -- remove spurious by thin-air cycles */
105  filter_thin_air(set_of_cycles);
106 }
107 
111  event_idt vertex,
112  event_idt source,
113  unsigned number)
114 {
115  critical_cyclet new_cycle(egraph, number);
116  bool incycle=false;
117  std::stack<event_idt> stack(point_stack);
118 
119  while(!stack.empty())
120  {
121  event_idt current_vertex=stack.top();
122  stack.pop();
123 
124  egraph.message.debug() << "extract: "
125  << egraph[current_vertex].get_operation()
126  << egraph[current_vertex].variable << "@"
127  << egraph[current_vertex].thread << "~"
128  << egraph[current_vertex].local
129  << messaget::eom;
130 
131  if(!new_cycle.has_user_defined_fence)
132  {
133  new_cycle.has_user_defined_fence=egraph[current_vertex].is_fence();
134  }
135 
136  if(current_vertex==vertex)
137  incycle=true;
138 
139  if(incycle)
140  new_cycle.push_front(current_vertex);
141 
142  if(current_vertex==source)
143  break;
144  }
145 
146  return new_cycle;
147 }
148 
152  std::set<critical_cyclet> &set_of_cycles,
153  event_idt source,
154  event_idt vertex,
155  bool unsafe_met, /* unsafe pair for the model met in the visited path */
156  event_idt po_trans, /* po-transition skips still allowed */
157  bool same_var_pair, /* in a thread, tells if we already met one rfi wsi fri */
158  bool lwfence_met, /* if we try to skip a lwsync (only valid for lwsyncWR) */
159  bool has_to_be_unsafe,
160  irep_idt var_to_avoid,
161  memory_modelt model)
162 {
163 #ifdef DEBUG
164  egraph.message.debug() << std::string(80, '-');
166  egraph.message.debug() << "marked size:" << marked_stack.size()
167  << messaget::eom;
168  std::stack<event_idt> tmp;
169  while(!point_stack.empty())
170  {
171  egraph.message.debug() << point_stack.top() << " | ";
172  tmp.push(point_stack.top());
173  point_stack.pop();
174  }
176  while(!tmp.empty())
177  {
178  point_stack.push(tmp.top());
179  tmp.pop();
180  }
181  while(!marked_stack.empty())
182  {
183  egraph.message.debug() << marked_stack.top() << " | ";
184  tmp.push(marked_stack.top());
185  marked_stack.pop();
186  }
188  while(!tmp.empty())
189  {
190  marked_stack.push(tmp.top());
191  tmp.pop();
192  }
193 #endif
194 
195  // TO DISCUSS: shouldn't we still allow po-transition through it instead?
196  if(filtering(vertex))
197  return false;
198 
199  egraph.message.debug() << "bcktck "<<egraph[vertex].id<<"#"<<vertex<<", "
200  <<egraph[source].id<<"#"<<source<<" lw:"<<lwfence_met<<" unsafe:"
201  <<unsafe_met << messaget::eom;
202  bool f=false;
203  bool get_com_only=false;
204  bool unsafe_met_updated=unsafe_met;
205  bool same_var_pair_updated=same_var_pair;
206 
207  bool not_thin_air=true;
208 
209  const abstract_eventt &this_vertex=egraph[vertex];
210 
211  /* if a thread starts with variable x, the last event of this thread in the
212  cycle cannot be with x */
213  irep_idt avoid_at_the_end=var_to_avoid;
214  bool no_comm=false;
215  if(avoid_at_the_end!="" && avoid_at_the_end==this_vertex.variable)
216  no_comm=true;
217 
218  /* if specified, maximum number of events reached */
219  if(max_var!=0 && point_stack.size()>max_var*3)
220  return false;
221 
222  /* we only explore shared variables */
223  if(!this_vertex.local)
224  {
225  /* only the lwsyncWR can be interpreted as poWR (i.e., skip of the fence) */
226  if(lwfence_met && this_vertex.operation!=abstract_eventt::operationt::Read)
227  return false; // {no_comm=true;get_com_only=false;}//return false;
228 
229  bool has_to_be_unsafe_updated=false;
230  // TODO: propagate this constraint within the optimisation
231  // -- no optimisation can strongly affect performances
232  /* tab[] can appear several times */
233  if(egraph.ignore_arrays ||
234  id2string(this_vertex.variable).find("[]")==std::string::npos)
235  {
236  /* no more than 4 events per thread */
240  {
241  if(events_per_thread[this_vertex.thread]==4)
242  return false;
243  else
244  events_per_thread[this_vertex.thread]++;
245  }
246 
247  /* Multiple re-orderings constraint: if the thread on this cycles contains
248  more than one, ensure that an unsafe pair is not protected by another
249  relation in the thread. E.g., in Wx Rx Wy, under TSO, the rfi cannot be
250  delayed, since the only way to make this transformation matter is to
251  re-order also the two writes, which is not permitted on TSO. */
252  if(has_to_be_unsafe && point_stack.size() >= 2)
253  {
254  const event_idt previous=point_stack.top();
255  point_stack.pop();
256  const event_idt preprevious=point_stack.top();
257  point_stack.push(previous);
258  if(!egraph[preprevious].unsafe_pair(this_vertex, model) &&
260  egraph[preprevious].operation==
263  egraph[preprevious].operation==
266  egraph[preprevious].operation==
268  return false;
269  }
270  }
271 
272  has_to_be_unsafe_updated=has_to_be_unsafe;
273 
274  /* constraint 1.a: there is at most one pair of events per thread
275  with different variables. Given that we cannot have more than
276  three events on the same variable, with two in the same thread,
277  this means that we can have at most 2 consecutive events by po
278  with the same variable, and two variables per thread (fences are
279  not taken into account) */
280  if(!point_stack.empty() &&
281  egraph.are_po_ordered(point_stack.top(), vertex) &&
285  this_vertex.variable==egraph[point_stack.top()].variable)
286  {
287  if(same_var_pair ||
289  egraph[point_stack.top()].operation==
291  {
292  events_per_thread[this_vertex.thread]--;
293  return false; // {no_comm=true;get_com_only=false;} //return false;
294  }
295  else
296  {
297  same_var_pair_updated=true;
298  if(events_per_thread[this_vertex.thread]>=3)
299  get_com_only=true;
300  }
301  }
302 
303  /* constraint 1.b */
304  if(!point_stack.empty() &&
305  egraph.are_po_ordered(point_stack.top(), vertex) &&
309  this_vertex.variable!=egraph[point_stack.top()].variable)
310  {
311  same_var_pair_updated=false;
312  }
313 
314  /* constraint 2: per variable, either W W, R W, W R, or R W R */
318  {
319  const unsigned char nb_writes=writes_per_variable[this_vertex.variable];
320  const unsigned char nb_reads=reads_per_variable[this_vertex.variable];
321 
322  if(nb_writes+nb_reads==3)
323  {
324  events_per_thread[this_vertex.thread]--;
325  return false; // {no_comm=true;get_com_only=false;} //return false;
326  }
327  else if(this_vertex.operation==abstract_eventt::operationt::Write)
328  {
329  if(nb_writes==2)
330  {
331  events_per_thread[this_vertex.thread]--;
332  return false; // {no_comm=true;get_com_only=false;} //return false;
333  }
334  else
335  writes_per_variable[this_vertex.variable]++;
336  }
337  else if(this_vertex.operation==abstract_eventt::operationt::Read)
338  {
339  if(nb_reads==2)
340  {
341  events_per_thread[this_vertex.thread]--;
342  return false; // {no_comm=true;get_com_only=false;} //return false;
343  }
344  else
345  reads_per_variable[this_vertex.variable]++;
346  }
347  }
348 
349  if(!point_stack.empty())
350  {
351  const abstract_eventt &prev_vertex=egraph[point_stack.top()];
352  unsafe_met_updated|=
353  prev_vertex.unsafe_pair(this_vertex, model) &&
354  !(prev_vertex.thread==this_vertex.thread &&
355  egraph.map_data_dp[this_vertex.thread].dp(prev_vertex, this_vertex));
356  if(unsafe_met_updated &&
357  !unsafe_met &&
358  egraph.are_po_ordered(point_stack.top(), vertex))
359  has_to_be_unsafe_updated=true;
360  }
361 
362  point_stack.push(vertex);
363  mark[vertex]=true;
364  marked_stack.push(vertex);
365 
366  if(!get_com_only)
367  {
368  /* we first visit via po transition, if existing */
369  for(wmm_grapht::edgest::const_iterator
370  w_it=egraph.po_out(vertex).begin();
371  w_it!=egraph.po_out(vertex).end(); w_it++)
372  {
373  const event_idt w=w_it->first;
374  if(w==source && point_stack.size()>=4
375  && (unsafe_met_updated
376  || this_vertex.unsafe_pair(egraph[source], model)) )
377  {
378  critical_cyclet new_cycle=extract_cycle(vertex, source, cycle_nb++);
379  not_thin_air=!egraph.filter_thin_air || new_cycle.is_not_thin_air();
380  if(!not_thin_air)
381  {
382  for(critical_cyclet::const_iterator e_it=new_cycle.begin();
383  e_it!=new_cycle.end();
384  ++e_it)
385  thin_air_events.insert(*e_it);
386  }
387  if((!egraph.filter_uniproc || new_cycle.is_not_uniproc(model)) &&
388  not_thin_air && new_cycle.is_cycle() &&
389  new_cycle.is_unsafe(model) /*&& new_cycle.is_unsafe_asm(model)*/)
390  {
391  egraph.message.debug() << new_cycle.print_name(model, false)
392  << messaget::eom;
393  set_of_cycles.insert(new_cycle);
394 #if 0
395  const critical_cyclet* reduced=new_cycle.hide_internals();
396  set_of_cycles.insert(*reduced);
397  delete(reduced);
398 #endif
399  }
400  f=true;
401  }
402  else if(!mark[w])
403  f|=
404  backtrack(
405  set_of_cycles,
406  source,
407  w,
408  unsafe_met_updated,
409  po_trans,
410  same_var_pair_updated,
411  false,
412  has_to_be_unsafe_updated,
413  avoid_at_the_end, model);
414  }
415  }
416 
417  if(!no_comm)
418  {
419  /* we then visit via com transitions, if existing */
420  for(wmm_grapht::edgest::const_iterator
421  w_it=egraph.com_out(vertex).begin();
422  w_it!=egraph.com_out(vertex).end(); w_it++)
423  {
424  const event_idt w=w_it->first;
425  if(w < source)
426  egraph.remove_com_edge(vertex, w);
427  else if(w==source && point_stack.size()>=4 &&
428  (unsafe_met_updated ||
429  this_vertex.unsafe_pair(egraph[source], model)))
430  {
431  critical_cyclet new_cycle=extract_cycle(vertex, source, cycle_nb++);
432  not_thin_air=!egraph.filter_thin_air || new_cycle.is_not_thin_air();
433  if(!not_thin_air)
434  {
435  for(critical_cyclet::const_iterator e_it=new_cycle.begin();
436  e_it!=new_cycle.end();
437  ++e_it)
438  thin_air_events.insert(*e_it);
439  }
440  if((!egraph.filter_uniproc || new_cycle.is_not_uniproc(model)) &&
441  not_thin_air && new_cycle.is_cycle() &&
442  new_cycle.is_unsafe(model) /*&& new_cycle.is_unsafe_asm(model)*/)
443  {
444  egraph.message.debug() << new_cycle.print_name(model, false)
445  << messaget::eom;
446  set_of_cycles.insert(new_cycle);
447 #if 0
448  const critical_cyclet* reduced=new_cycle.hide_internals();
449  set_of_cycles.insert(*reduced);
450  delete(reduced);
451 #endif
452  }
453  f=true;
454  }
455  else if(!mark[w])
456  f|=
457  backtrack(
458  set_of_cycles,
459  source,
460  w,
461  unsafe_met_updated,
462  po_trans,
463  false,
464  false,
465  false,
466  "",
467  model);
468  }
469  }
470 
471  if(f)
472  {
473  while(!marked_stack.empty() && marked_stack.top()!=vertex)
474  {
475  event_idt up=marked_stack.top();
476  marked_stack.pop();
477  mark[up]=false;
478  }
479 
480  if(!marked_stack.empty())
481  marked_stack.pop();
482  mark[vertex]=false;
483  }
484 
485  assert(!point_stack.empty());
486  point_stack.pop();
487 
488  /* removes variable access */
492  {
494  writes_per_variable[this_vertex.variable]--;
495  else
496  reads_per_variable[this_vertex.variable]--;
497 
498  events_per_thread[this_vertex.thread]--;
499  }
500  }
501 
502  /* transitivity of po: do the same, but skip this event
503  (except if it is a fence or no more po-transition skips allowed);
504  if the cycle explored so far has a thin-air subcycle, this cycle
505  is not valid: stop this exploration here */
506  if(skip_tracked.find(vertex)==skip_tracked.end()) // 25 oct
507  if(not_thin_air && !get_com_only &&
508  (po_trans > 1 || po_trans==0) &&
509  !point_stack.empty() &&
510  egraph.are_po_ordered(point_stack.top(), vertex) &&
513  egraph[point_stack.top()].operation==
516  !this_vertex.WRfence ||
517  egraph[point_stack.top()].operation==
519  {
520  skip_tracked.insert(vertex);
521 
522  std::stack<event_idt> tmp;
523 
524  while(!marked_stack.empty() && marked_stack.top()!=vertex)
525  {
526  tmp.push(marked_stack.top());
527  mark[marked_stack.top()]=false;
528  marked_stack.pop();
529  }
530 
531  if(!marked_stack.empty())
532  {
533  assert(marked_stack.top()==vertex);
534  mark[vertex]=true;
535  }
536  else
537  {
538  while(!tmp.empty())
539  {
540  marked_stack.push(tmp.top());
541  mark[tmp.top()]=true;
542  tmp.pop();
543  }
544  mark[vertex]=true;
545  marked_stack.push(vertex);
546  }
547 
548  if(!egraph[point_stack.top()].unsafe_pair(this_vertex, model))
549  {
550  /* tab[] should never be avoided */
551  if(egraph.ignore_arrays ||
552  id2string(this_vertex.variable).find("[]")==std::string::npos)
553  avoid_at_the_end=this_vertex.variable;
554  }
555 
556  /* skip lwfence by po-transition only if we consider a WR */
557  // TO CHECK
558  const bool is_lwfence=
560  egraph[point_stack.top()].operation==
563  (!this_vertex.WRfence &&
564  egraph[point_stack.top()].operation==
566 
567  for(wmm_grapht::edgest::const_iterator w_it=
568  egraph.po_out(vertex).begin();
569  w_it!=egraph.po_out(vertex).end(); w_it++)
570  {
571  const event_idt w=w_it->first;
572  f|=
573  backtrack(
574  set_of_cycles,
575  source,
576  w,
577  unsafe_met/*_updated*/,
578  (po_trans==0?0:po_trans-1),
579  same_var_pair/*_updated*/,
580  is_lwfence,
581  has_to_be_unsafe,
582  avoid_at_the_end,
583  model);
584  }
585 
586  if(f)
587  {
588  while(!marked_stack.empty() && marked_stack.top()!=vertex)
589  {
590  event_idt up=marked_stack.top();
591  marked_stack.pop();
592  mark[up]=false;
593  }
594 
595  if(!marked_stack.empty())
596  marked_stack.pop();
597  mark[vertex]=false;
598  }
599 
600  skip_tracked.erase(vertex);
601  }
602 
603  return f;
604 }
std::list< event_idt > po_order
Definition: event_graph.h:362
const std::string & id2string(const irep_idt &d)
Definition: irep.h:44
void collect_cycles(std::set< critical_cyclet > &set_of_cycles, memory_modelt model)
Tarjan 1972 adapted and modified for events.
std::string print_name(const critical_cyclet &redyced, memory_modelt model) const
std::stack< event_idt > marked_stack
Definition: event_graph.h:271
void filter_thin_air(std::set< critical_cyclet > &set_of_cycles)
after the collection, eliminates the executions forbidden by an indirect thin-air ...
std::map< event_idt, bool > mark
Definition: event_graph.h:270
operationt operation
Definition: wmm.h:23
graph of abstract events
Definition: wmm.h:22
void remove_com_edge(event_idt a, event_idt b)
Definition: event_graph.h:454
std::map< irep_idt, unsigned char > writes_per_variable
Definition: event_graph.h:229
void hide_internals(critical_cyclet &reduced) const
bool ignore_arrays
Definition: event_graph.h:211
static mstreamt & eom(mstreamt &m)
Definition: message.h:193
bool unsafe_pair(const abstract_eventt &next, memory_modelt model) const
std::size_t size() const
Definition: event_graph.h:390
const wmm_grapht::edgest & po_out(event_idt n) const
Definition: event_graph.h:400
std::map< irep_idt, unsigned char > reads_per_variable
Definition: event_graph.h:230
memory_modelt
Definition: wmm.h:17
std::map< unsigned, unsigned char > events_per_thread
Definition: event_graph.h:231
bool filter_thin_air
Definition: event_graph.h:354
Definition: wmm.h:21
messaget & message
Definition: event_graph.h:356
const wmm_grapht::edgest & com_out(event_idt n) const
Definition: event_graph.h:410
bool are_po_ordered(event_idt a, event_idt b)
Definition: event_graph.h:481
wmm_grapht::node_indext event_idt
Definition: event_graph.h:32
bool backtrack(std::set< critical_cyclet > &set_of_cycles, event_idt source, event_idt vertex, bool unsafe_met, event_idt po_trans, bool same_var_pair, bool lwsync_met, bool has_to_be_unsafe, irep_idt var_to_avoid, memory_modelt model)
see event_grapht::collect_cycles
std::list< event_idt > poUrfe_order
Definition: event_graph.h:363
mstreamt & debug()
Definition: message.h:253
bool is_unsafe(memory_modelt model, bool fast=false)
checks whether there is at least one pair which is unsafe (takes fences and dependencies into account...
std::set< event_idt > thin_air_events
Definition: event_graph.h:251
critical_cyclet extract_cycle(event_idt vertex, event_idt source, unsigned number_of_cycles)
extracts a (whole, unreduced) cycle from the stack.
Definition: wmm.h:19
virtual std::list< event_idt > * order_filtering(std::list< event_idt > *order)
Definition: event_graph.h:239
Definition: wmm.h:20
std::map< unsigned, data_dpt > map_data_dp
Definition: event_graph.h:359
std::stack< event_idt > point_stack
Definition: event_graph.h:272
std::set< event_idt > skip_tracked
Definition: event_graph.h:274
#define stack(x)
Definition: parser.h:144
bool is_lwfence(const goto_programt::instructiont &instruction, const namespacet &ns)
Definition: fence.cpp:32
bool filter_uniproc
Definition: event_graph.h:355
virtual bool filtering(event_idt u)
Definition: event_graph.h:234