Clearmatics Libsnark  0.1
C++ library for zkSNARK proofs
ram_universal_gadget.tcc
Go to the documentation of this file.
1 /** @file
2  *****************************************************************************
3 
4  Implementation of interfaces for ram_universal_gadget.
5 
6  See ram_universal_gadget.hpp .
7 
8  *****************************************************************************
9  * @author This file is part of libsnark, developed by SCIPR Lab
10  * and contributors (see AUTHORS).
11  * @copyright MIT license (see LICENSE file)
12  *****************************************************************************/
13 
14 #ifndef RAM_UNIVERSAL_GADGET_TCC_
15 #define RAM_UNIVERSAL_GADGET_TCC_
16 
17 #include <libff/algebra/fields/field_utils.hpp>
18 #include <libff/common/profiling.hpp>
19 #include <libff/common/utils.hpp>
20 #include <libsnark/common/data_structures/integer_permutation.hpp>
21 #include <libsnark/relations/ram_computations/memory/ra_memory.hpp>
22 
23 namespace libsnark
24 {
25 
26 template<typename ramT>
27 ram_universal_gadget<ramT>::ram_universal_gadget(
28  ram_protoboard<ramT> &pb,
29  const size_t boot_trace_size_bound,
30  const size_t time_bound,
31  const pb_variable_array<FieldT> &packed_input,
32  const std::string &annotation_prefix)
33  : ram_gadget_base<ramT>(pb, annotation_prefix)
34  , boot_trace_size_bound(boot_trace_size_bound)
35  , time_bound(time_bound)
36  , packed_input(packed_input)
37 {
38  num_memory_lines =
39  boot_trace_size_bound + (time_bound + 1) +
40  time_bound; /* boot lines, (time_bound + 1) execution lines (including
41  initial) and time_bound load instruction lines */
42  const size_t timestamp_size = libff::log2(num_memory_lines);
43 
44  /* allocate all lines on the execution side of the routing network */
45  libff::enter_block("Allocate initial state line");
46  execution_lines.reserve(1 + time_bound);
47  execution_lines.emplace_back(execution_line_variable_gadget<ramT>(
48  pb,
49  timestamp_size,
50  pb.ap,
51  FMT(annotation_prefix, " execution_lines_%zu", 0)));
52  unrouted_memory_lines.emplace_back(&execution_lines[0]);
53  libff::leave_block("Allocate initial state line");
54 
55  libff::enter_block("Allocate boot lines");
56  boot_lines.reserve(boot_trace_size_bound);
57  for (size_t i = 0; i < boot_trace_size_bound; ++i) {
58  boot_lines.emplace_back(memory_line_variable_gadget<ramT>(
59  pb,
60  timestamp_size,
61  pb.ap,
62  FMT(annotation_prefix, " boot_lines_%zu", i)));
63  unrouted_memory_lines.emplace_back(&boot_lines[i]);
64  }
65  libff::leave_block("Allocate boot lines");
66 
67  libff::enter_block("Allocate instruction fetch and execution lines");
68  load_instruction_lines.reserve(
69  time_bound +
70  1); /* the last line is NOT a memory line, but here just for uniform
71  coding (i.e. the (unused) result of next PC) */
72  for (size_t i = 0; i < time_bound; ++i) {
73  load_instruction_lines.emplace_back(memory_line_variable_gadget<ramT>(
74  pb,
75  timestamp_size,
76  pb.ap,
77  FMT(annotation_prefix, " load_instruction_lines_%zu", i)));
78  unrouted_memory_lines.emplace_back(&load_instruction_lines[i]);
79 
80  execution_lines.emplace_back(execution_line_variable_gadget<ramT>(
81  pb,
82  timestamp_size,
83  pb.ap,
84  FMT(annotation_prefix, " execution_lines_%zu", i + 1)));
85  unrouted_memory_lines.emplace_back(&execution_lines[i + 1]);
86  }
87  load_instruction_lines.emplace_back(memory_line_variable_gadget<ramT>(
88  pb,
89  timestamp_size,
90  pb.ap,
91  FMT(annotation_prefix, " load_instruction_lines_%zu", time_bound)));
92  libff::leave_block("Allocate instruction fetch and execution lines");
93 
94  /* deal with packing of the input */
95  libff::enter_block("Pack input");
96  const size_t line_size_bits = pb.ap.address_size() + pb.ap.value_size();
97  const size_t max_chunk_size = FieldT::capacity();
98  const size_t packed_line_size =
99  libff::div_ceil(line_size_bits, max_chunk_size);
100  assert(packed_input.size() == packed_line_size * boot_trace_size_bound);
101 
102  auto input_it = packed_input.begin();
103  for (size_t i = 0; i < boot_trace_size_bound; ++i) {
104  /* note the reversed order */
105  pb_variable_array<FieldT> boot_line_bits;
106  boot_line_bits.insert(
107  boot_line_bits.end(),
108  boot_lines[boot_trace_size_bound - 1 - i].address->bits.begin(),
109  boot_lines[boot_trace_size_bound - 1 - i].address->bits.end());
110  boot_line_bits.insert(
111  boot_line_bits.end(),
112  boot_lines[boot_trace_size_bound - 1 - i]
113  .contents_after->bits.begin(),
114  boot_lines[boot_trace_size_bound - 1 - i]
115  .contents_after->bits.end());
116 
117  pb_variable_array<FieldT> packed_boot_line =
118  pb_variable_array<FieldT>(input_it, input_it + packed_line_size);
119  std::advance(input_it, packed_line_size);
120 
121  unpack_boot_lines.emplace_back(multipacking_gadget<FieldT>(
122  pb,
123  boot_line_bits,
124  packed_boot_line,
125  max_chunk_size,
126  FMT(annotation_prefix, " unpack_boot_lines_%zu", i)));
127  }
128  libff::leave_block("Pack input");
129 
130  /* deal with routing */
131  libff::enter_block("Allocate routed memory lines");
132  for (size_t i = 0; i < num_memory_lines; ++i) {
133  routed_memory_lines.emplace_back(memory_line_variable_gadget<ramT>(
134  pb,
135  timestamp_size,
136  pb.ap,
137  FMT(annotation_prefix, " routed_memory_lines_%zu", i)));
138  }
139  libff::leave_block("Allocate routed memory lines");
140 
141  libff::enter_block("Collect inputs/outputs for the routing network");
142  routing_inputs.reserve(num_memory_lines);
143  routing_outputs.reserve(num_memory_lines);
144 
145  for (size_t i = 0; i < num_memory_lines; ++i) {
146  routing_inputs.emplace_back(unrouted_memory_lines[i]->all_vars());
147  routing_outputs.emplace_back(routed_memory_lines[i].all_vars());
148  }
149  libff::leave_block("Collect inputs/outputs for the routing network");
150 
151  libff::enter_block("Allocate routing network");
152  routing_network.reset(new as_waksman_routing_gadget<FieldT>(
153  pb,
154  num_memory_lines,
155  routing_inputs,
156  routing_outputs,
157  FMT(this->annotation_prefix, " routing_network")));
158  libff::leave_block("Allocate routing network");
159 
160  /* deal with all checkers */
161  libff::enter_block("Allocate execution checkers");
162  execution_checkers.reserve(time_bound);
163  for (size_t i = 0; i < time_bound; ++i) {
164  execution_checkers.emplace_back(ram_cpu_checker<ramT>(
165  pb,
166  load_instruction_lines[i].address->bits, // prev_pc_addr
167  load_instruction_lines[i].contents_after->bits, // prev_pc_val
168  execution_lines[i].cpu_state, // prev_state
169  execution_lines[i + 1].address->bits, // ls_addr,
170  execution_lines[i + 1].contents_before->bits, // ls_prev_val
171  execution_lines[i + 1].contents_after->bits, // ls_next_val
172  execution_lines[i + 1].cpu_state, // next_state
173  load_instruction_lines[i + 1].address->bits, // next_pc_addr
174  execution_lines[i + 1].has_accepted, // next_has_accepted
175  FMT(annotation_prefix, " execution_checkers_%zu", i)));
176  }
177  libff::leave_block("Allocate execution checkers");
178 
179  libff::enter_block("Allocate all memory checkers");
180  memory_checkers.reserve(num_memory_lines);
181  for (size_t i = 0; i < num_memory_lines; ++i) {
182  memory_checkers.emplace_back(memory_checker_gadget<ramT>(
183  pb,
184  timestamp_size,
185  *unrouted_memory_lines[i],
186  routed_memory_lines[i],
187  FMT(this->annotation_prefix, " memory_checkers_%zu", i)));
188  }
189  libff::leave_block("Allocate all memory checkers");
190 
191  /* done */
192 }
193 
194 template<typename ramT>
195 void ram_universal_gadget<ramT>::generate_r1cs_constraints()
196 {
197  libff::enter_block(
198  "Call to generate_r1cs_constraints of ram_universal_gadget");
199  for (size_t i = 0; i < boot_trace_size_bound; ++i) {
200  unpack_boot_lines[i].generate_r1cs_constraints(false);
201  }
202 
203  /* ensure that we start with all zeros state */
204  for (size_t i = 0; i < this->pb.ap.cpu_state_size(); ++i) {
205  generate_r1cs_equals_const_constraint<FieldT>(
206  this->pb, execution_lines[0].cpu_state[i], FieldT::zero());
207  }
208 
209  /* ensure increasing timestamps */
210  for (size_t i = 0; i < num_memory_lines; ++i) {
211  generate_r1cs_equals_const_constraint<FieldT>(
212  this->pb, unrouted_memory_lines[i]->timestamp->packed, FieldT(i));
213  }
214 
215  /* ensure bitness of trace lines on the time side */
216  for (size_t i = 0; i < boot_trace_size_bound; ++i) {
217  boot_lines[i].generate_r1cs_constraints(true);
218  }
219 
220  execution_lines[0].generate_r1cs_constraints(true);
221  for (size_t i = 0; i < time_bound; ++i) {
222  load_instruction_lines[i].generate_r1cs_constraints(true);
223  execution_lines[i + 1].generate_r1cs_constraints(true);
224  }
225 
226  /* ensure bitness of trace lines on the memory side */
227  for (size_t i = 0; i < num_memory_lines; ++i) {
228  routed_memory_lines[i].generate_r1cs_constraints();
229  }
230 
231  /* ensure that load instruction lines really do loads */
232  for (size_t i = 0; i < time_bound; ++i) {
233  this->pb.add_r1cs_constraint(
234  r1cs_constraint<FieldT>(
235  1,
236  load_instruction_lines[i].contents_before->packed,
237  load_instruction_lines[i].contents_after->packed),
238  FMT(this->annotation_prefix, " load_instruction_%zu_is_a_load", i));
239  }
240 
241  /* ensure correct execution */
242  for (size_t i = 0; i < time_bound; ++i) {
243  execution_checkers[i].generate_r1cs_constraints();
244  }
245 
246  /* check memory */
247  routing_network->generate_r1cs_constraints();
248 
249  for (size_t i = 0; i < num_memory_lines; ++i) {
250  memory_checkers[i].generate_r1cs_constraints();
251  }
252 
253  /* ensure that PC started at the prescribed value */
254  generate_r1cs_equals_const_constraint<FieldT>(
255  this->pb,
256  load_instruction_lines[0].address->packed,
257  FieldT(this->pb.ap.initial_pc_addr()));
258 
259  /* ensure that the last state was an accepting one */
260  generate_r1cs_equals_const_constraint<FieldT>(
261  this->pb,
262  execution_lines[time_bound].has_accepted,
263  FieldT::one(),
264  "last_state_must_be_accepting");
265 
266  /* print constraint profiling */
267  const size_t num_constraints = this->pb.num_constraints();
268  const size_t num_variables = this->pb.num_variables();
269 
270  if (!libff::inhibit_profiling_info) {
271  libff::print_indent();
272  printf("* Number of constraints: %zu\n", num_constraints);
273  libff::print_indent();
274  printf(
275  "* Number of constraints / cycle: %0.1f\n",
276  1. * num_constraints / this->time_bound);
277 
278  libff::print_indent();
279  printf("* Number of variables: %zu\n", num_variables);
280  libff::print_indent();
281  printf(
282  "* Number of variables / cycle: %0.1f\n",
283  1. * num_variables / this->time_bound);
284  }
285  libff::leave_block(
286  "Call to generate_r1cs_constraints of ram_universal_gadget");
287 }
288 
289 template<typename ramT>
290 void ram_universal_gadget<ramT>::generate_r1cs_witness(
291  const ram_boot_trace<ramT> &boot_trace,
292  const ram_input_tape<ramT> &auxiliary_input)
293 {
294  /* assign correct timestamps to all lines */
295  for (size_t i = 0; i < num_memory_lines; ++i) {
296  this->pb.val(unrouted_memory_lines[i]->timestamp->packed) = FieldT(i);
297  unrouted_memory_lines[i]
298  ->timestamp->generate_r1cs_witness_from_packed();
299  }
300 
301  /* fill in the initial state */
302  const ram_cpu_state<ramT> initial_state = this->pb.ap.initial_cpu_state();
303  execution_lines[0].cpu_state.fill_with_bits(this->pb, initial_state);
304 
305  /* fill in the boot section */
306  memory_contents memory_after_boot;
307 
308  for (auto it : boot_trace.get_all_trace_entries()) {
309  const size_t boot_pos = it.first;
310  assert(boot_pos < boot_trace_size_bound);
311  const size_t address = it.second.first;
312  const size_t contents = it.second.second;
313 
314  this->pb.val(boot_lines[boot_pos].address->packed) =
315  FieldT(address, true);
316  this->pb.val(boot_lines[boot_pos].contents_after->packed) =
317  FieldT(contents, true);
318  boot_lines[boot_pos].generate_r1cs_witness_from_packed();
319 
320  memory_after_boot[address] = contents;
321  }
322 
323  /* do the actual execution */
324  ra_memory mem_backend(
325  1ul << (this->pb.ap.address_size()),
326  this->pb.ap.value_size(),
327  memory_after_boot);
328  typename ram_input_tape<ramT>::const_iterator auxiliary_input_it =
329  auxiliary_input.begin();
330 
331  this->pb.val(load_instruction_lines[0].address->packed) =
332  FieldT(this->pb.ap.initial_pc_addr(), true);
333  load_instruction_lines[0].address->generate_r1cs_witness_from_packed();
334 
335  for (size_t i = 0; i < time_bound; ++i) {
336  /* load instruction */
337  const size_t pc_addr =
338  this->pb.val(load_instruction_lines[i].address->packed).as_ulong();
339  const size_t pc_val = mem_backend.get_value(pc_addr);
340 
341  this->pb.val(load_instruction_lines[i].contents_before->packed) =
342  FieldT(pc_val, true);
343  this->pb.val(load_instruction_lines[i].contents_after->packed) =
344  FieldT(pc_val, true);
345  load_instruction_lines[i].generate_r1cs_witness_from_packed();
346 
347  /* first fetch the address part of the memory */
348  execution_checkers[i].generate_r1cs_witness_address();
349  execution_lines[i + 1].address->generate_r1cs_witness_from_bits();
350 
351  /* fill it in */
352  const size_t load_store_addr =
353  this->pb.val(execution_lines[i + 1].address->packed).as_ulong();
354  const size_t load_store_prev_val =
355  mem_backend.get_value(load_store_addr);
356 
357  this->pb.val(execution_lines[i + 1].contents_before->packed) =
358  FieldT(load_store_prev_val, true);
359  execution_lines[i + 1]
360  .contents_before->generate_r1cs_witness_from_packed();
361 
362  /* then execute the rest of the instruction */
363  execution_checkers[i].generate_r1cs_witness_other(
364  auxiliary_input_it, auxiliary_input.end());
365 
366  /* update the memory possibly changed by the CPU checker */
367  execution_lines[i + 1]
368  .contents_after->generate_r1cs_witness_from_bits();
369  const size_t load_store_next_val =
370  this->pb.val(execution_lines[i + 1].contents_after->packed)
371  .as_ulong();
372  mem_backend.set_value(load_store_addr, load_store_next_val);
373 
374  /* the next PC address was passed in a bit form, so maintain packed form
375  * as well */
376  load_instruction_lines[i + 1]
377  .address->generate_r1cs_witness_from_bits();
378  }
379 
380  /*
381  Get the correct memory permutation.
382 
383  We sort all memory accesses by address breaking ties by
384  timestamp. In our routing configuration we pair each memory
385  access with subsequent access in this ordering.
386 
387  That way num_memory_pairs of memory checkers will do a full
388  cycle over all memory accesses, enforced by the proper ordering
389  property.
390  */
391 
392  typedef std::pair<size_t, size_t>
393  mem_pair; /* a pair of address, timestamp */
394  std::vector<mem_pair> mem_pairs;
395 
396  for (size_t i = 0; i < this->num_memory_lines; ++i) {
397  mem_pairs.emplace_back(std::make_pair(
398  this->pb.val(unrouted_memory_lines[i]->address->packed).as_ulong(),
399  this->pb.val(unrouted_memory_lines[i]->timestamp->packed)
400  .as_ulong()));
401  }
402 
403  std::sort(mem_pairs.begin(), mem_pairs.end());
404 
405  integer_permutation pi(this->num_memory_lines);
406  for (size_t i = 0; i < this->num_memory_lines; ++i) {
407  const size_t timestamp =
408  this->pb.val(unrouted_memory_lines[i]->timestamp->packed)
409  .as_ulong();
410  const size_t address =
411  this->pb.val(unrouted_memory_lines[i]->address->packed).as_ulong();
412 
413  const auto it = std::upper_bound(
414  mem_pairs.begin(),
415  mem_pairs.end(),
416  std::make_pair(address, timestamp));
417  const size_t prev = (it == mem_pairs.end() ? 0 : it->second);
418  pi.set(prev, i);
419  }
420 
421  /* route according to the memory permutation */
422  routing_network->generate_r1cs_witness(pi);
423 
424  for (size_t i = 0; i < this->num_memory_lines; ++i) {
425  routed_memory_lines[i].generate_r1cs_witness_from_bits();
426  }
427 
428  /* generate witness for memory checkers */
429  for (size_t i = 0; i < this->num_memory_lines; ++i) {
430  memory_checkers[i].generate_r1cs_witness();
431  }
432 
433  /* repack back the input */
434  for (size_t i = 0; i < boot_trace_size_bound; ++i) {
435  unpack_boot_lines[i].generate_r1cs_witness_from_bits();
436  }
437 
438  /* print debugging information */
439  if (!libff::inhibit_profiling_info) {
440  libff::print_indent();
441  printf(
442  "* Protoboard satisfied: %s\n",
443  (this->pb.is_satisfied() ? "YES" : "no"));
444  }
445 }
446 
447 template<typename ramT>
448 void ram_universal_gadget<ramT>::print_execution_trace() const
449 {
450  for (size_t i = 0; i < boot_trace_size_bound; ++i) {
451  printf(
452  "Boot process at t=#%zu: store %zu at %zu\n",
453  i,
454  this->pb.val(boot_lines[i].contents_after->packed).as_ulong(),
455  this->pb.val(boot_lines[i].address->packed).as_ulong());
456  }
457 
458  for (size_t i = 0; i < time_bound; ++i) {
459  printf("Execution step %zu:\n", i);
460  printf(
461  " Loaded instruction %zu from address %zu (ts = %zu)\n",
462  this->pb.val(load_instruction_lines[i].contents_after->packed)
463  .as_ulong(),
464  this->pb.val(load_instruction_lines[i].address->packed).as_ulong(),
465  this->pb.val(load_instruction_lines[i].timestamp->packed)
466  .as_ulong());
467 
468  printf(" Debugging information from the transition function:\n");
469  execution_checkers[i].dump();
470 
471  printf(
472  " Memory operation executed: addr = %zu, contents_before = %zu, "
473  "contents_after = %zu (ts_before = %zu, ts_after = %zu)\n",
474  this->pb.val(execution_lines[i + 1].address->packed).as_ulong(),
475  this->pb.val(execution_lines[i + 1].contents_before->packed)
476  .as_ulong(),
477  this->pb.val(execution_lines[i + 1].contents_after->packed)
478  .as_ulong(),
479  this->pb.val(execution_lines[i].timestamp->packed).as_ulong(),
480  this->pb.val(execution_lines[i + 1].timestamp->packed).as_ulong());
481  }
482 }
483 
484 template<typename ramT>
485 void ram_universal_gadget<ramT>::print_memory_trace() const
486 {
487  for (size_t i = 0; i < num_memory_lines; ++i) {
488  printf("Memory access #%zu:\n", i);
489  printf(
490  " Time side : ts = %zu, address = %zu, contents_before = %zu, "
491  "contents_after = %zu\n",
492  this->pb.val(unrouted_memory_lines[i]->timestamp->packed)
493  .as_ulong(),
494  this->pb.val(unrouted_memory_lines[i]->address->packed).as_ulong(),
495  this->pb.val(unrouted_memory_lines[i]->contents_before->packed)
496  .as_ulong(),
497  this->pb.val(unrouted_memory_lines[i]->contents_after->packed)
498  .as_ulong());
499  printf(
500  " Memory side: ts = %zu, address = %zu, contents_before = %zu, "
501  "contents_after = %zu\n",
502  this->pb.val(routed_memory_lines[i].timestamp->packed).as_ulong(),
503  this->pb.val(routed_memory_lines[i].address->packed).as_ulong(),
504  this->pb.val(routed_memory_lines[i].contents_before->packed)
505  .as_ulong(),
506  this->pb.val(routed_memory_lines[i].contents_after->packed)
507  .as_ulong());
508  }
509 }
510 
511 template<typename ramT>
512 size_t ram_universal_gadget<ramT>::packed_input_element_size(
513  const ram_architecture_params<ramT> &ap)
514 {
515  const size_t line_size_bits = ap.address_size() + ap.value_size();
516  const size_t max_chunk_size = FieldT::capacity();
517  const size_t packed_line_size =
518  libff::div_ceil(line_size_bits, max_chunk_size);
519 
520  return packed_line_size;
521 }
522 
523 template<typename ramT>
524 size_t ram_universal_gadget<ramT>::packed_input_size(
525  const ram_architecture_params<ramT> &ap, const size_t boot_trace_size_bound)
526 {
527  return packed_input_element_size(ap) * boot_trace_size_bound;
528 }
529 
530 } // namespace libsnark
531 
532 #endif // RAM_UNIVERSAL_GADGET_TCC_