1use super::default_attrs;
4use crate::{
5 Backend, Builder, Bytecode, EvmContext, Inst, InstData, InstFlags, IntCC, Result, StackSection,
6 decode_pair, decode_single,
7};
8use oxc_index::IndexVec;
9use revm_bytecode::opcode as op;
10use revm_interpreter::{InputsImpl, InstructionResult};
11use revm_primitives::U256;
12use revmc_backend::{Attribute, BackendTypes, FunctionAttributeLocation, Pointer, TypeMethods};
13use revmc_builtins::{Builtin, Builtins, CallKind, CreateKind};
14use std::mem;
15
16mod peephole;
17
18mod vstack;
19use vstack::{VSlot, VStack};
20
21const STACK_CAP: usize = 1024;
22#[derive(Clone, Copy, Debug)]
25pub(super) struct FcxConfig {
26 pub(super) comments: bool,
27 pub(super) debug_assertions: bool,
28 pub(super) frame_pointers: bool,
29
30 pub(super) debug: bool,
31 pub(super) inspect_stack: bool,
32 pub(super) stack_bound_checks: bool,
33 pub(super) gas_metering: bool,
34 pub(super) single_error: bool,
35}
36
37impl Default for FcxConfig {
38 fn default() -> Self {
39 Self {
40 debug_assertions: cfg!(debug_assertions),
41 comments: false,
42 frame_pointers: cfg!(debug_assertions) || cfg!(force_frame_pointers),
43 debug: false,
44 inspect_stack: false,
45 stack_bound_checks: true,
46 gas_metering: true,
47 single_error: true,
48 }
49 }
50}
51
52type Incoming<B> = Vec<(<B as BackendTypes>::Value, <B as BackendTypes>::BasicBlock)>;
54
55#[allow(dead_code)]
57type SwitchTargets<B> = Vec<(u64, <B as BackendTypes>::BasicBlock)>;
58
59pub(super) struct FunctionCx<'a, B: Backend> {
60 config: FcxConfig,
62
63 bcx: B::Builder<'a>,
65
66 isize_type: B::Type,
68 word_type: B::Type,
69 address_type: B::Type,
70 i8_type: B::Type,
71
72 stack_len: Pointer<B::Builder<'a>>,
75 stack: Pointer<B::Builder<'a>>,
78 sp_arg: Option<B::Value>,
81 ecx: B::Value,
83 len_before: B::Value,
85 len_offset: i8,
87
88 vstack: VStack<B::Value>,
91 section_start_len: B::Value,
94 section_start_sp: B::Value,
98 section_len_offset: i32,
101 stored_len_offset: i32,
105 cached_mem_base: Option<B::Value>,
106
107 bytecode: &'a Bytecode<'a>,
109 inst_lines: IndexVec<Inst, u32>,
111 inst_entries: IndexVec<Inst, B::BasicBlock>,
113 current_inst: Option<Inst>,
115
116 incoming_dynamic_jumps: Incoming<B>,
119 dynamic_jump_table: B::BasicBlock,
121
122 incoming_failures: Incoming<B>,
124 failure_block: Option<B::BasicBlock>,
126 incoming_returns: Incoming<B>,
128 return_block: Option<B::BasicBlock>,
130
131 resume_blocks: Vec<B::BasicBlock>,
133 suspend_blocks: Incoming<B>,
135 suspend_block: B::BasicBlock,
137
138 builtins: &'a mut Builtins<B>,
140}
141
142impl<'a, B: Backend> FunctionCx<'a, B> {
143 #[allow(rustdoc::invalid_rust_codeblocks)] pub(super) fn translate(
199 mut bcx: B::Builder<'a>,
200 config: FcxConfig,
201 builtins: &'a mut Builtins<B>,
202 bytecode: &'a Bytecode<'a>,
203 ) -> Result<()> {
204 let entry_block = bcx.current_block().unwrap();
205
206 if config.debug {
208 bcx.clear_debug_location();
209 }
210
211 let isize_type = bcx.type_ptr_sized_int();
213 let i8_type = bcx.type_int(8);
214 let word_type = bcx.type_int(256);
215 let address_type = bcx.type_int(160);
216
217 let ecx = bcx.fn_param(0);
219
220 let sp_arg = bcx.fn_param(1);
221 let local_stack = !config.inspect_stack;
225 let stack = if local_stack {
226 let stack_type = bcx.type_array(word_type, STACK_CAP as u32);
227 bcx.new_stack_slot(stack_type, "stack.addr")
228 } else {
229 Pointer::new_address(word_type, sp_arg)
230 };
231
232 let stack_len_arg = bcx.fn_param(2);
233 let stack_len = bcx.new_stack_slot(isize_type, "len.addr");
235
236 let unreachable_block = bcx.create_block("unreachable");
240 let mut inst_entries: IndexVec<Inst, _> = bytecode
241 .iter_all_insts()
242 .map(|(i, data)| {
243 if data.is_dead_code() {
244 unreachable_block
245 } else {
246 let name = if config.debug { &bytecode.op_block_name(Some(i), "") } else { "" };
247 bcx.create_block(name)
248 }
249 })
250 .collect();
251 assert!(!inst_entries.is_empty(), "translating empty bytecode");
252
253 if bytecode.has_redirects() {
255 for (&from, &to) in &bytecode.redirects {
256 inst_entries[from] = inst_entries[to];
257 }
258 }
259
260 let dynamic_jump_table = bcx.create_block("dynamic_jump_table");
261 let suspend_block = bcx.create_block("suspend");
262 let failure_block = bcx.create_block("failure");
263 let return_block = bcx.create_block("return");
264
265 let section_start_sp = stack.addr(&mut bcx);
266 let zero = bcx.iconst(isize_type, 0);
267 let mut fx = FunctionCx {
268 config,
269
270 isize_type,
271 word_type,
272 address_type,
273 i8_type,
274 stack_len,
275 stack,
276 sp_arg: local_stack.then_some(sp_arg),
277 ecx,
278 len_before: zero,
279 len_offset: 0,
280 section_start_len: zero,
281 section_start_sp,
282 section_len_offset: 0,
283 stored_len_offset: 0,
284 cached_mem_base: None,
285 bcx,
286
287 bytecode,
288 inst_lines: if config.debug { bytecode.take_inst_lines() } else { IndexVec::new() },
289 inst_entries,
290 current_inst: None,
291
292 incoming_dynamic_jumps: Vec::new(),
293 dynamic_jump_table,
294
295 incoming_failures: Vec::new(),
296 failure_block: Some(failure_block),
297 incoming_returns: Vec::new(),
298 return_block: Some(return_block),
299
300 resume_blocks: Vec::new(),
301 suspend_blocks: Vec::new(),
302 suspend_block,
303
304 builtins,
305
306 vstack: VStack::default(),
307 };
308
309 if config.debug_assertions {
311 let compiled_spec = fx.bcx.iconst(fx.i8_type, bytecode.spec_id as i64);
313 let _ = fx.call_builtin(Builtin::AssertSpecId, &[ecx, compiled_spec]);
314 }
315
316 let first_inst_block = fx.inst_entries[Inst::from_usize(0)];
318 let post_entry_block = fx.bcx.create_block_after(entry_block, "entry.post");
319 let resume_block = fx.bcx.create_block_after(post_entry_block, "resume");
320 fx.bcx.br(post_entry_block);
321
322 for (inst, _) in bytecode.iter_insts() {
324 fx.translate_inst(inst)?;
325 }
326
327 if config.debug {
329 fx.bcx.clear_debug_location();
330 }
331
332 fx.bcx.switch_to_block(unreachable_block);
334 fx.bcx.unreachable();
335 if bytecode.has_dynamic_jumps() {
336 fx.bcx.switch_to_block(fx.dynamic_jump_table);
337 let jumpdests = bytecode.iter_insts().filter(|(_, data)| data.opcode == op::JUMPDEST);
338 let targets = jumpdests
339 .map(|(inst, data)| (data.jumpdest_pc() as u64, fx.inst_entries[inst]))
340 .collect::<Vec<_>>();
341 let i64_type = fx.bcx.type_int(64);
342 let index = fx.bcx.phi(i64_type, &fx.incoming_dynamic_jumps);
343 let invalid_jump = fx.add_invalid_jump();
344 fx.bcx.switch(index, invalid_jump, &targets, true);
345 } else {
346 debug_assert!(fx.incoming_dynamic_jumps.is_empty());
348 fx.bcx.switch_to_block(fx.dynamic_jump_table);
349 fx.bcx.unreachable();
350 }
351
352 let load_len_at_start = |fx: &mut Self| {
355 if config.inspect_stack {
357 let stack_len = fx.bcx.load(fx.isize_type, stack_len_arg, "stack_len");
358 fx.stack_len.store(&mut fx.bcx, stack_len);
359 fx.copy_stack_from_arg(stack_len);
360 } else {
361 fx.stack_len.store_imm(&mut fx.bcx, 0);
362 }
363 };
364 let generate_resume = bytecode.may_suspend();
365 if generate_resume {
366 let get_ecx_resume_at_ptr = |fx: &mut Self| {
367 fx.get_field(
368 fx.ecx,
369 mem::offset_of!(EvmContext<'_>, resume_at),
370 "ecx.resume_at.addr",
371 )
372 };
373
374 let resume_ty = fx.isize_type;
375
376 {
379 let no_resume_block = fx.bcx.create_block_after(resume_block, "no_resume");
381
382 fx.bcx.switch_to_block(post_entry_block);
383 let resume_at = get_ecx_resume_at_ptr(&mut fx);
384 let resume_at = fx.bcx.load_aligned(resume_ty, resume_at, 1, "ecx.resume_at");
385 let no_resume = fx.bcx.icmp_imm(IntCC::Equal, resume_at, 0);
386 fx.bcx.brif(no_resume, no_resume_block, resume_block);
387
388 fx.bcx.switch_to_block(no_resume_block);
389 load_len_at_start(&mut fx);
390 fx.bcx.br(first_inst_block);
391
392 fx.bcx.switch_to_block(resume_block);
394 let stack_len = fx.bcx.load(fx.isize_type, stack_len_arg, "stack_len");
395 fx.stack_len.store(&mut fx.bcx, stack_len);
396 fx.copy_stack_from_arg(stack_len);
397 let default = fx.bcx.create_block_after(resume_block, "resume_invalid");
398 fx.bcx.switch_to_block(default);
399 fx.call_panic("invalid `resume_at` value");
400
401 fx.bcx.switch_to_block(resume_block);
402 let targets = fx
403 .resume_blocks
404 .iter()
405 .enumerate()
406 .map(|(i, b)| (i as u64 + 1, *b))
407 .collect::<Vec<_>>();
408 fx.bcx.switch(resume_at, default, &targets, true);
409 }
410
411 {
413 fx.bcx.switch_to_block(fx.suspend_block);
414 let resume_value = fx.bcx.phi(resume_ty, &fx.suspend_blocks);
415 let resume_at = get_ecx_resume_at_ptr(&mut fx);
416 fx.bcx.store_aligned(resume_value, resume_at, 1);
417
418 if !config.inspect_stack {
421 fx.copy_stack_to_arg();
422 fx.save_stack_len();
423 }
424
425 fx.build_return_imm(InstructionResult::Stop);
426 }
427 } else {
428 debug_assert!(fx.resume_blocks.is_empty());
429 debug_assert!(fx.suspend_blocks.is_empty());
430
431 fx.bcx.switch_to_block(post_entry_block);
432 load_len_at_start(&mut fx);
433 fx.bcx.br(first_inst_block);
434
435 fx.bcx.switch_to_block(resume_block);
436 fx.bcx.unreachable();
437 fx.bcx.switch_to_block(fx.suspend_block);
438 fx.bcx.unreachable();
439 }
440
441 fx.bcx.switch_to_block(fx.failure_block.unwrap());
443 if !fx.incoming_failures.is_empty() {
444 let failure_value = if config.single_error {
449 fx.bcx.iconst(fx.i8_type, InstructionResult::OutOfGas as i64)
450 } else {
451 fx.bcx.phi(fx.i8_type, &fx.incoming_failures)
452 };
453 fx.bcx.set_current_block_cold();
454 fx.build_return(failure_value);
455 } else {
456 fx.bcx.unreachable();
457 }
458
459 fx.bcx.switch_to_block(fx.return_block.unwrap());
461 if !fx.incoming_returns.is_empty() {
462 let return_value = fx.bcx.phi(fx.i8_type, &fx.incoming_returns);
463 if config.inspect_stack {
464 fx.copy_stack_to_arg();
465 fx.save_stack_len();
466 }
467 fx.bcx.ret(&[return_value]);
468 } else {
469 fx.bcx.unreachable();
470 }
471
472 fx.bcx.seal_all_blocks();
473
474 Ok(())
475 }
476
477 #[instrument(level = "debug", skip_all, fields(inst = %self.bytecode.inst(inst).to_op()))]
478 fn translate_inst(&mut self, inst: Inst) -> Result<()> {
479 self.current_inst = Some(inst);
480 let data = self.bytecode.inst(inst);
481 let opcode = data.opcode;
482 let entry_block = self.inst_entries[inst];
483 self.bcx.switch_to_block(entry_block);
484
485 if self.config.debug {
486 self.bcx.set_debug_location(self.inst_lines[inst], 1);
487 }
488
489 let branch_to_next_opcode = |this: &mut Self| {
492 debug_assert!(
493 !this.bytecode.is_instr_diverging(inst),
494 "attempted to branch to next instruction in a diverging instruction: {data:?}",
495 );
496 if let Some(&next) = this.inst_entries.get(inst + 1) {
497 this.bcx.br(next);
498 }
499 };
500
501 macro_rules! goto_return {
505 (no_branch $($comment:expr)?) => {
506 $(
507 if self.config.comments {
508 self.add_comment($comment);
509 }
510 )?
511 return Ok(());
512 };
513 (build $ret:expr) => {{
514 self.build_return_imm($ret);
515 goto_return!(no_branch);
516 }};
517 (fail $ret:expr) => {{
518 self.build_fail_imm($ret);
519 goto_return!(no_branch);
520 }};
521 ($($comment:expr)?) => {
522 if self.inst_entries.get(inst + 1).is_some() {
524 let next = self.bytecode.inst(inst + 1);
525 if !next.is_dead_code() && next.is_stack_section_head() {
526 self.materialize_live_stack();
527 } else {
528 self.relieve_vstack_pressure();
529 }
530 }
531 branch_to_next_opcode(self);
532 goto_return!(no_branch $($comment)?);
533 };
534 }
535
536 debug_assert!(!data.flags.contains(InstFlags::DEAD_CODE));
538
539 #[cfg(test)]
540 if opcode == crate::TEST_SUSPEND {
541 self.suspend();
542 goto_return!(no_branch);
543 }
544
545 if data.flags.contains(InstFlags::DISABLED) {
547 goto_return!(fail InstructionResult::NotActivated);
548 }
549 if data.flags.contains(InstFlags::UNKNOWN) {
550 goto_return!(fail InstructionResult::OpcodeNotFound);
551 }
552
553 self.gas_cost_imm(data.gas_section.gas_cost as u64);
555
556 let (inp, out) = data.stack_io();
560 let diff = effective_stack_diff(inp, out, data);
561 self.len_offset = 0;
562 if data.is_stack_section_head() {
563 self.section_start_len = self.stack_len.load(&mut self.bcx, "stack_len");
564 self.section_start_sp = self.sp_at(self.section_start_len);
565 self.section_len_offset = 0;
566 self.stored_len_offset = 0;
567 self.cached_mem_base = None;
568
569 let section = data.stack_section;
570 self.vstack.reset(section.inputs as usize, section.max_growth.max(0) as usize);
571 }
572 self.len_before = if self.section_len_offset == 0 {
573 self.section_start_len
574 } else {
575 self.bcx.iadd_imm(self.section_start_len, self.section_len_offset as i64)
576 };
577
578 if self.config.stack_bound_checks {
580 self.check_stack_bounds(data.stack_section);
581 }
582
583 if data.flags.contains(InstFlags::NOOP) {
585 self.sync_noop_diff(inst, diff);
586 self.section_len_offset += diff;
587 goto_return!("noop");
588 }
589
590 let new_len_offset = self.section_len_offset + diff;
593 if new_len_offset != self.stored_len_offset {
594 let len_changed = self.bcx.iadd_imm(self.len_before, diff as i64);
595 self.stack_len.store(&mut self.bcx, len_changed);
596 self.stored_len_offset = new_len_offset;
597 }
598
599 if out >= 1
608 && let Some(const_out) = self.bytecode.const_output(inst)
609 {
610 debug_assert!(
611 out == 1 || out == inp + 1,
612 "const_output assumes single synthesized push: inp={inp}, out={out}",
613 );
614 debug_assert!(!data.may_suspend() && !data.is_branching());
615 let drop_count = (inp + 1 - out) as usize;
619 self.vstack.drop_top(drop_count);
620 self.len_offset -= drop_count as i8;
621 let value = self.bcx.iconst_256(const_out);
622 self.push(value);
623 self.section_len_offset += diff;
624 goto_return!("const output");
625 }
626
627 if self.try_peephole(data) {
628 self.sync_virtual_stack_diff(diff);
629 self.section_len_offset += diff;
630 if self.current_inst().is_diverging() {
631 goto_return!(no_branch);
632 } else {
633 goto_return!("peephole");
634 }
635 }
636
637 macro_rules! unop {
639 ($op:ident) => {{
640 let mut a = self.pop();
641 a = self.bcx.$op(a);
642 self.push(a);
643 }};
644 }
645
646 macro_rules! binop {
647 ($op:ident) => {{
648 let [a, b] = self.popn();
649 let r = self.bcx.$op(a, b);
650 self.push(r);
651 }};
652 (@shift $op:ident, | $value:ident, $shift:ident | $default:expr) => {{
653 let [$shift, $value] = self.popn();
654 let r = self.bcx.$op($value, $shift);
655 let overflow = self.bcx.icmp_imm(IntCC::UnsignedGreaterThan, $shift, 255);
656 let default = $default;
657 let r = self.bcx.select(overflow, default, r);
658 self.push(r);
659 }};
660 }
661
662 macro_rules! field {
664 (@get $base:expr, $($paths:path),*; $($spec:tt).*) => {
666 self.get_field($base, 0 $(+ mem::offset_of!($paths, $spec))*, stringify!($($spec).*.addr))
667 };
668 (@load $(@[endian = $endian:tt])? $ty:expr, $base:expr, $($paths:path),*; $($spec:tt).*) => {{
671 let ptr = field!(@get $base, $($paths),*; $($spec).*);
672 #[allow(unused_mut)]
673 let mut value = self.bcx.load_aligned($ty, ptr, 1, stringify!($($spec).*));
674 $(
675 if !cfg!(target_endian = $endian) {
676 value = self.bcx.bswap(value);
677 }
678 )?
679 value
680 }};
681 (@push $(@[endian = $endian:tt])? $ty:expr, $base:expr, $($rest:tt)*) => {{
684 let mut value = field!(@load $(@[endian = $endian])? $ty, $base, $($rest)*);
685 if self.bcx.type_bit_width($ty) < 256 {
686 value = self.bcx.zext(self.word_type, value);
687 }
688 self.push(value);
689 }};
690 }
691
692 match data.opcode {
693 op::STOP => goto_return!(build InstructionResult::Stop),
694
695 op::ADD => binop!(iadd),
696 op::MUL => binop!(imul),
697 op::SUB => binop!(isub),
698 op::DIV => {
699 let sp = self.sp_after_inputs();
700 let _ = self.call_builtin(Builtin::Div, &[sp]);
701 }
702 op::SDIV => {
703 let sp = self.sp_after_inputs();
704 let _ = self.call_builtin(Builtin::SDiv, &[sp]);
705 }
706 op::MOD => {
707 let sp = self.sp_after_inputs();
708 let _ = self.call_builtin(Builtin::Mod, &[sp]);
709 }
710 op::SMOD => {
711 let sp = self.sp_after_inputs();
712 let _ = self.call_builtin(Builtin::SMod, &[sp]);
713 }
714 op::ADDMOD => {
715 let sp = self.sp_after_inputs();
716 let _ = self.call_builtin(Builtin::AddMod, &[sp]);
717 }
718 op::MULMOD => {
719 let sp = self.sp_after_inputs();
720 let _ = self.call_builtin(Builtin::MulMod, &[sp]);
721 }
722 op::EXP => {
723 let sp = self.sp_after_inputs();
724 self.call_fallible_builtin(Builtin::Exp, &[self.ecx, sp]);
725 }
726 op::SIGNEXTEND => {
727 let [ext, x] = self.popn();
732
733 let might_do_something = self.bcx.icmp_imm(IntCC::UnsignedLessThan, ext, 31);
734
735 let shift = self.bcx.imul_imm(ext, 8);
736 let c248 = self.bcx.iconst_256(248);
737 let shift = self.bcx.isub(c248, shift);
738 let shifted = self.bcx.ishl(x, shift);
739 let sext = self.bcx.sshr(shifted, shift);
740
741 let r = self.bcx.select(might_do_something, sext, x);
742 self.push(r);
743 }
744
745 op::LT | op::GT | op::SLT | op::SGT | op::EQ => {
746 let cond = match opcode {
747 op::LT => IntCC::UnsignedLessThan,
748 op::GT => IntCC::UnsignedGreaterThan,
749 op::SLT => IntCC::SignedLessThan,
750 op::SGT => IntCC::SignedGreaterThan,
751 op::EQ => IntCC::Equal,
752 _ => unreachable!(),
753 };
754
755 let [a, b] = self.popn();
756 let r = self.bcx.icmp(cond, a, b);
757 let r = self.bcx.zext(self.word_type, r);
758 self.push(r);
759 }
760 op::ISZERO => {
761 let a = self.pop();
762 let r = self.bcx.icmp_imm(IntCC::Equal, a, 0);
763 let r = self.bcx.zext(self.word_type, r);
764 self.push(r);
765 }
766 op::AND => binop!(bitand),
767 op::OR => binop!(bitor),
768 op::XOR => binop!(bitxor),
769 op::NOT => unop!(bitnot),
770 op::BYTE => {
771 let [index, value] = self.popn();
775
776 let in_range = self.bcx.icmp_imm(IntCC::UnsignedLessThan, index, 32);
777
778 let shift = self.bcx.imul_imm(index, 8);
779 let c248 = self.bcx.iconst_256(248);
780 let shift = self.bcx.isub(c248, shift);
781 let shifted = self.bcx.ushr(value, shift);
782 let mask = self.bcx.iconst_256(0xFF);
783 let byte = self.bcx.bitand(shifted, mask);
784
785 let zero = self.bcx.iconst_256(0);
786
787 let r = self.bcx.select(in_range, byte, zero);
788 self.push(r);
789 }
790 op::SHL => binop!(@shift ishl, |value, shift| self.bcx.iconst_256(0)),
791 op::SHR => binop!(@shift ushr, |value, shift| self.bcx.iconst_256(0)),
792 op::SAR => binop!(@shift sshr, |value, shift| {
793 let is_negative = self.bcx.icmp_imm(IntCC::SignedLessThan, value, 0);
794 let max = self.bcx.iconst_256(U256::MAX);
795 let zero = self.bcx.iconst_256(0);
796 self.bcx.select(is_negative, max, zero)
797 }),
798 op::CLZ => unop!(clz),
799
800 op::KECCAK256 => {
801 let sp = self.sp_after_inputs();
802 self.call_fallible_builtin(Builtin::Keccak256, &[self.ecx, sp]);
803 }
804
805 op::ADDRESS => {
806 let input = self.load_input();
807 field!(@push @[endian = "big"] self.address_type, input, InputsImpl; target_address);
808 }
809 op::BALANCE => {
810 let sp = self.sp_after_inputs();
811 self.call_fallible_builtin(Builtin::Balance, &[self.ecx, sp]);
812 }
813 op::ORIGIN => {
814 let slot = self.sp_at_top();
815 let _ = self.call_builtin(Builtin::Origin, &[self.ecx, slot]);
816 self.narrow_to_address(slot);
817 }
818 op::CALLER => {
819 let input = self.load_input();
820 field!(@push @[endian = "big"] self.address_type, input, InputsImpl; caller_address);
821 }
822 op::CALLVALUE => {
823 let input = self.load_input();
824 field!(@push self.word_type, input, InputsImpl; call_value);
825 }
826 op::CALLDATALOAD => {
827 let sp = self.sp_after_inputs();
828 let _ = self.call_builtin(Builtin::CallDataLoad, &[self.ecx, sp]);
829 }
830 op::CALLDATASIZE => {
831 field!(@push self.isize_type, self.ecx, EvmContext<'_>; calldatasize);
832 }
833 op::CALLDATACOPY => {
834 let sp = self.sp_after_inputs();
835 self.call_fallible_builtin(Builtin::CallDataCopy, &[self.ecx, sp]);
836 }
837 op::CODESIZE => {
838 let len = self.bcx.iconst(self.word_type, self.bytecode.codesize() as i64);
839 self.push(len);
840 }
841 op::CODECOPY => {
842 let sp = self.sp_after_inputs();
843 self.call_fallible_builtin(Builtin::CodeCopy, &[self.ecx, sp]);
844 }
845
846 op::GASPRICE => {
847 let sp = self.sp_after_inputs();
848 let _ = self.call_builtin(Builtin::GasPrice, &[self.ecx, sp]);
849 }
850 op::EXTCODESIZE => {
851 let sp = self.sp_after_inputs();
852 self.call_fallible_builtin(Builtin::ExtCodeSize, &[self.ecx, sp]);
853 }
854 op::EXTCODECOPY => {
855 let sp = self.sp_after_inputs();
856 self.call_fallible_builtin(Builtin::ExtCodeCopy, &[self.ecx, sp]);
857 }
858 op::RETURNDATASIZE => {
859 field!(@push self.isize_type, self.ecx, EvmContext<'_>, pf::Slice; return_data.len);
860 }
861 op::RETURNDATACOPY => {
862 let sp = self.sp_after_inputs();
863 self.call_fallible_builtin(Builtin::ReturnDataCopy, &[self.ecx, sp]);
864 }
865 op::EXTCODEHASH => {
866 let sp = self.sp_after_inputs();
867 self.call_fallible_builtin(Builtin::ExtCodeHash, &[self.ecx, sp]);
868 }
869 op::BLOCKHASH => {
870 let sp = self.sp_after_inputs();
871 self.call_fallible_builtin(Builtin::BlockHash, &[self.ecx, sp]);
872 }
873 op::COINBASE => {
874 let slot = self.sp_at_top();
875 let _ = self.call_builtin(Builtin::Coinbase, &[self.ecx, slot]);
876 self.narrow_to_address(slot);
877 }
878 op::TIMESTAMP => {
879 let slot = self.sp_at_top();
880 let _ = self.call_builtin(Builtin::Timestamp, &[self.ecx, slot]);
881 }
882 op::NUMBER => {
883 let slot = self.sp_at_top();
884 let _ = self.call_builtin(Builtin::Number, &[self.ecx, slot]);
885 }
886 op::DIFFICULTY => {
887 let slot = self.sp_at_top();
888 let _ = self.call_builtin(Builtin::Difficulty, &[self.ecx, slot]);
889 }
890 op::GASLIMIT => {
891 let slot = self.sp_at_top();
892 let _ = self.call_builtin(Builtin::GasLimit, &[self.ecx, slot]);
893 }
894 op::CHAINID => {
895 let slot = self.sp_at_top();
896 let _ = self.call_builtin(Builtin::ChainId, &[self.ecx, slot]);
897 }
898 op::SELFBALANCE => {
899 let slot = self.sp_at_top();
900 self.call_fallible_builtin(Builtin::SelfBalance, &[self.ecx, slot]);
901 }
902 op::BASEFEE => {
903 let slot = self.sp_at_top();
904 let _ = self.call_builtin(Builtin::Basefee, &[self.ecx, slot]);
905 }
906 op::BLOBHASH => {
907 let sp = self.sp_after_inputs();
908 let _ = self.call_builtin(Builtin::BlobHash, &[self.ecx, sp]);
909 }
910 op::BLOBBASEFEE => {
911 let slot = self.sp_at_top();
912 let _ = self.call_builtin(Builtin::BlobBaseFee, &[self.ecx, slot]);
913 }
914 op::SLOTNUM => {
915 let slot = self.sp_at_top();
916 let _ = self.call_builtin(Builtin::SlotNum, &[self.ecx, slot]);
917 }
918
919 op::POP => {
920 self.pop_ignore(1);
921 }
922 op::MLOAD => {
923 let offset = self.pop();
924 let addr = self.build_ensure_memory(offset, 32);
925 let value = self.bcx.load_aligned(self.word_type, addr, 1, "mload.value");
926 let value = if self.little_endian() { self.bcx.bswap(value) } else { value };
927 self.push(value);
928 }
929 op::MSTORE => {
930 let [offset, value] = self.popn();
931 let addr = self.build_ensure_memory(offset, 32);
932 let value = if self.little_endian() { self.bcx.bswap(value) } else { value };
933 self.bcx.store_aligned(value, addr, 1);
934 }
935 op::MSTORE8 => {
936 let [offset, value] = self.popn();
937 let addr = self.build_ensure_memory(offset, 1);
938 let value = self.bcx.ireduce(self.i8_type, value);
939 self.bcx.store_aligned(value, addr, 1);
940 }
941 op::SLOAD => {
942 let sp = self.sp_after_inputs();
943 self.call_fallible_builtin(Builtin::Sload, &[self.ecx, sp]);
944 }
945 op::SSTORE => {
946 let sp = self.sp_after_inputs();
947 self.call_fallible_builtin(Builtin::Sstore, &[self.ecx, sp]);
948 }
949 op::JUMP | op::JUMPI => {
950 let is_invalid = data.flags.contains(InstFlags::INVALID_JUMP);
951 if is_invalid && opcode == op::JUMP {
952 self.pop_ignore(1);
954 self.build_fail_imm(InstructionResult::InvalidJump);
955 } else {
956 let target = if is_invalid {
957 debug_assert_eq!(*data, op::JUMPI);
958 self.pop_ignore(1);
960 self.add_invalid_jump()
961 } else if data.flags.contains(InstFlags::MULTI_JUMP) {
962 let target_value = self.pop();
963 let targets = self.bytecode.multi_jump_targets(inst).unwrap();
964
965 if opcode == op::JUMPI {
966 let cond_word = self.pop();
967 self.materialize_live_stack();
968 let cond = self.bcx.icmp_imm(IntCC::NotEqual, cond_word, 0);
969 let next = self.inst_entries[inst + 1];
970 let switch_block = self.bcx.create_block("multi_jump");
971 self.bcx.brif(cond, switch_block, next);
972 self.bcx.switch_to_block(switch_block);
973 } else {
974 self.materialize_live_stack();
975 }
976
977 let switch_targets: Vec<_> = targets
978 .iter()
979 .map(|&t| {
980 let pc = self.bytecode.inst(t).jumpdest_pc() as u64;
981 (pc, self.inst_entries[t])
982 })
983 .collect();
984 let invalid_jump = self.add_invalid_jump();
985 self.bcx.switch(target_value, invalid_jump, &switch_targets, true);
986
987 self.inst_entries[inst] = self.bcx.current_block().unwrap();
988 goto_return!(no_branch);
989 } else if data.flags.contains(InstFlags::STATIC_JUMP) {
990 self.pop_ignore(1);
992 let target_inst = data.static_jump_target();
993 debug_assert_eq!(
994 *self.bytecode.inst(target_inst),
995 op::JUMPDEST,
996 "jumping to non-JUMPDEST; target_inst={target_inst}",
997 );
998 self.inst_entries[target_inst]
999 } else {
1000 debug_assert!(self.bytecode.has_dynamic_jumps());
1002 let target = self.pop();
1003 let target = self.u256_to_u64_saturating(target, 64);
1004 self.incoming_dynamic_jumps
1005 .push((target, self.bcx.current_block().unwrap()));
1006 self.dynamic_jump_table
1007 };
1008
1009 if opcode == op::JUMPI {
1010 let cond_word = self.pop();
1011 self.materialize_live_stack();
1013 let cond = self.bcx.icmp_imm(IntCC::NotEqual, cond_word, 0);
1014 let next = self.inst_entries[inst + 1];
1015 self.bcx.brif(cond, target, next);
1016 } else {
1017 self.materialize_live_stack();
1019 self.bcx.br(target);
1020 }
1021 self.inst_entries[inst] = self.bcx.current_block().unwrap();
1022 }
1023
1024 goto_return!(no_branch);
1025 }
1026 op::PC => {
1027 let pc = self.bcx.iconst_256(data.pc_imm());
1028 self.push(pc);
1029 }
1030 op::MSIZE => {
1031 let mem_len_field = self.get_field(
1032 self.ecx,
1033 mem::offset_of!(EvmContext<'_>, mem_len),
1034 "ecx.mem_len.addr",
1035 );
1036 let mem_len = self.bcx.load(self.isize_type, mem_len_field, "ecx.mem_len");
1037 let msize = self.bcx.zext(self.word_type, mem_len);
1038 self.push(msize);
1039 }
1040 op::GAS => {
1041 let addr = self.gas_remaining_addr();
1042 let i64_type = self.bcx.type_int(64);
1043 let remaining = self.bcx.load(i64_type, addr, "gas.remaining");
1044 let remaining = self.bcx.zext(self.word_type, remaining);
1045 self.push(remaining);
1046 }
1047 op::JUMPDEST => {
1048 self.bcx.nop();
1049 }
1050 op::TLOAD => {
1051 let sp = self.sp_after_inputs();
1052 let _ = self.call_builtin(Builtin::Tload, &[self.ecx, sp]);
1053 }
1054 op::TSTORE => {
1055 let sp = self.sp_after_inputs();
1056 self.call_fallible_builtin(Builtin::Tstore, &[self.ecx, sp]);
1057 }
1058 op::MCOPY => {
1059 let sp = self.sp_after_inputs();
1060 self.call_fallible_builtin(Builtin::Mcopy, &[self.ecx, sp]);
1061 }
1062
1063 op::PUSH0..=op::PUSH32 => {
1064 unreachable!("handled in const_output");
1065 }
1066
1067 op::DUP1..=op::DUP16 => self.dup((opcode - op::DUP1 + 1) as usize),
1068 op::DUPN => match decode_single(data.imm_byte()) {
1069 Some(n) => self.dup(n as usize),
1070 None => goto_return!(fail InstructionResult::InvalidImmediateEncoding),
1071 },
1072
1073 op::SWAP1..=op::SWAP16 => self.swap((opcode - op::SWAP1 + 1) as usize),
1074 op::SWAPN => match decode_single(data.imm_byte()) {
1075 Some(n) => self.swap(n as usize),
1076 None => goto_return!(fail InstructionResult::InvalidImmediateEncoding),
1077 },
1078
1079 op::EXCHANGE => match decode_pair(data.imm_byte()) {
1080 Some((n, m)) => self.exchange(n as usize, (m - n) as usize),
1081 None => goto_return!(fail InstructionResult::InvalidImmediateEncoding),
1082 },
1083
1084 op::LOG0..=op::LOG4 => {
1085 let n = opcode - op::LOG0;
1086 let sp = self.sp_after_inputs();
1087 let n = self.bcx.iconst(self.i8_type, n as i64);
1088 self.call_fallible_builtin(Builtin::Log, &[self.ecx, sp, n]);
1089 }
1090
1091 op::CREATE => {
1092 self.create_common(CreateKind::Create);
1093 goto_return!(no_branch);
1094 }
1095 op::CALL => {
1096 self.call_common(CallKind::Call);
1097 goto_return!(no_branch);
1098 }
1099 op::CALLCODE => {
1100 self.call_common(CallKind::CallCode);
1101 goto_return!(no_branch);
1102 }
1103 op::RETURN => {
1104 self.return_common(InstructionResult::Return);
1105 goto_return!(no_branch);
1106 }
1107 op::DELEGATECALL => {
1108 self.call_common(CallKind::DelegateCall);
1109 goto_return!(no_branch);
1110 }
1111 op::CREATE2 => {
1112 self.create_common(CreateKind::Create2);
1113 goto_return!(no_branch);
1114 }
1115
1116 op::STATICCALL => {
1117 self.call_common(CallKind::StaticCall);
1118 goto_return!(no_branch);
1119 }
1120
1121 op::REVERT => {
1122 self.return_common(InstructionResult::Revert);
1123 goto_return!(no_branch);
1124 }
1125 op::INVALID => goto_return!(fail InstructionResult::InvalidFEOpcode),
1126 op::SELFDESTRUCT => {
1127 let sp = self.sp_after_inputs();
1128 let _ = self.call_builtin(Builtin::SelfDestruct, &[self.ecx, sp]);
1129 self.bcx.unreachable();
1130 goto_return!(no_branch);
1131 }
1132
1133 _ => unreachable!("unimplemented instruction: {data:?}"),
1134 }
1135
1136 self.sync_virtual_stack_diff(diff);
1137 self.section_len_offset += diff;
1138 goto_return!("normal exit");
1139 }
1140
1141 fn sync_noop_diff(&mut self, inst: Inst, diff: i32) {
1147 let expected_top = self.section_len_offset + diff;
1148 let current_top = self.vstack.top_offset();
1149 if current_top == expected_top {
1150 return;
1151 }
1152 let delta = expected_top - current_top;
1153 if delta < 0 {
1154 self.vstack.drop_top((-delta) as usize);
1155 } else {
1156 if delta == 1
1159 && let Some(c) = self.bytecode.const_output(inst)
1160 {
1161 let value = self.bcx.iconst_256(c);
1162 self.vstack.push(value);
1163 } else {
1164 for _ in 0..delta {
1165 self.vstack.push_mem();
1166 }
1167 }
1168 }
1169 }
1170
1171 fn sync_virtual_stack_diff(&mut self, diff: i32) {
1180 let expected_top = self.section_len_offset + diff;
1181 let current_top = self.vstack.top_offset();
1182 if current_top == expected_top {
1183 return;
1184 }
1185 let delta = expected_top - current_top;
1186 if expected_top < self.vstack.live_range().start {
1187 let inst = self.current_inst.unwrap();
1188 let mut head = inst;
1190 for i in (0..inst.index()).rev() {
1191 let idx = crate::Inst::from_usize(i);
1192 let d = self.bytecode.inst(idx);
1193 if d.is_dead_code() {
1194 continue;
1195 }
1196 if d.is_stack_section_head() {
1197 head = idx;
1198 break;
1199 }
1200 }
1201 let mut section_dump = String::new();
1203 for i in head.index()..=inst.index() {
1204 let idx = crate::Inst::from_usize(i);
1205 let d = self.bytecode.inst(idx);
1206 if d.is_dead_code() {
1207 continue;
1208 }
1209 use std::fmt::Write;
1210 let _ = write!(
1211 section_dump,
1212 "\n ic{i} pc={} {:?} io={:?} flags={:?} gas={:?} stack={:?}{}{}",
1213 self.bytecode.pc(idx),
1214 d.to_op(),
1215 d.stack_io(),
1216 d.flags,
1217 d.gas_section,
1218 d.stack_section,
1219 if d.is_stack_section_head() { " SECTION_HEAD" } else { "" },
1220 if d.is_dead_code() { " DEAD" } else { "" },
1221 );
1222 }
1223 let head_data = self.bytecode.inst(head);
1224 panic!(
1225 "sync: expected_top={expected_top} < base={}, section_len_offset={}, \
1226 diff={diff}, current_top={current_top}, inst={:?} (ic{})\n\
1227 section head=ic{}, head_stack_section={:?}, section:{section_dump}",
1228 self.vstack.live_range().start,
1229 self.section_len_offset,
1230 self.current_inst().to_op(),
1231 inst.index(),
1232 head.index(),
1233 head_data.stack_section,
1234 );
1235 }
1236 if delta < 0 {
1237 self.vstack.drop_top((-delta) as usize);
1238 } else {
1239 for _ in 0..delta {
1240 self.vstack.push_mem();
1241 }
1242 }
1243
1244 let (_, outputs) = self.current_inst().stack_io();
1249 let outputs = outputs as i32;
1250 if outputs > 0 {
1251 self.vstack.mark_materialized_range(expected_top - outputs..expected_top);
1252 }
1253
1254 debug_assert_eq!(
1255 self.vstack.top_offset(),
1256 expected_top,
1257 "virtual stack sync mismatch after {:?}",
1258 self.current_inst().to_op(),
1259 );
1260 }
1261
1262 fn push(&mut self, value: B::Value) {
1264 self.vstack.push(value);
1265 self.len_offset += 1;
1266 }
1267
1268 fn const_operands<const N: usize>(&self) -> [Option<U256>; N] {
1271 let inst = self.current_inst.unwrap();
1272 std::array::from_fn(|i| self.bytecode.const_operand(inst, i))
1273 }
1274
1275 fn fold_const(&mut self, value: impl TryInto<U256>) {
1277 self.pop_ignore(self.current_inst().stack_io().0 as usize);
1278 let v = self.bcx.iconst_256(value);
1279 self.push(v);
1280 }
1281
1282 fn pop_ignore(&mut self, n: usize) {
1284 self.vstack.drop_top(n);
1285 self.len_offset -= n as i8;
1286 }
1287
1288 fn pop(&mut self) -> B::Value {
1290 self.popn::<1>()[0]
1291 }
1292
1293 fn popn<const N: usize>(&mut self) -> [B::Value; N] {
1295 assert_ne!(N, 0);
1296
1297 let operand_depth_base = (-self.len_offset) as usize;
1298 let values = std::array::from_fn(|i| {
1299 let operand_depth = operand_depth_base + i;
1300 let name = b'a' + i as u8;
1301 self.stack_value_at_depth(operand_depth, i, std::str::from_utf8(&[name]).unwrap())
1302 });
1303 self.pop_ignore(N);
1304 values
1305 }
1306
1307 fn dup(&mut self, n: usize) {
1310 assert_ne!(n, 0);
1311 let name = if self.config.debug { &format!("dup{n}") } else { "" };
1312 let value = self.stack_value_at_depth(n - 1, n - 1, name);
1313 self.push(value);
1314 }
1315
1316 fn swap(&mut self, n: usize) {
1319 self.exchange(0, n);
1320 }
1321
1322 fn exchange(&mut self, n: usize, m: usize) {
1326 assert_ne!(m, 0);
1327 let a = self.stack_value_at_depth(n, n, "swap.a");
1328 let b = self.stack_value_at_depth(n + m, n + m, "swap.b");
1329 self.vstack.set(n, b);
1330 self.vstack.set(n + m, a);
1331 }
1332
1333 fn return_common(&mut self, ir: InstructionResult) {
1335 let sp = self.sp_after_inputs();
1336 let ir_const = self.bcx.iconst(self.i8_type, ir as i64);
1337 let _ = self.call_builtin(Builtin::DoReturn, &[self.ecx, sp, ir_const]);
1338 self.bcx.unreachable();
1339 }
1340
1341 fn create_common(&mut self, create_kind: CreateKind) {
1343 let sp = self.sp_after_inputs();
1344 let create_kind = self.bcx.iconst(self.i8_type, create_kind as i64);
1345 self.call_fallible_builtin(Builtin::Create, &[self.ecx, sp, create_kind]);
1346 self.suspend();
1347 }
1348
1349 fn call_common(&mut self, call_kind: CallKind) {
1351 let sp = self.sp_after_inputs();
1352 let call_kind = self.bcx.iconst(self.i8_type, call_kind as i64);
1353 self.call_fallible_builtin(Builtin::Call, &[self.ecx, sp, call_kind]);
1354 self.suspend();
1355 }
1356
1357 fn suspend(&mut self) {
1359 self.materialize_live_stack();
1361
1362 let idx = self.resume_blocks.len();
1364 self.add_resume_at(self.inst_entries[self.current_inst.unwrap() + 1]);
1365
1366 let value = self.bcx.iconst(self.isize_type, idx as i64 + 1);
1368 self.suspend_blocks.push((value, self.bcx.current_block().unwrap()));
1369
1370 self.bcx.br(self.suspend_block);
1372 }
1373
1374 fn add_resume_at(&mut self, block: B::BasicBlock) {
1376 self.resume_blocks.push(block);
1377 }
1378
1379 fn load_word(&mut self, ptr: B::Value, name: &str) -> B::Value {
1381 self.bcx.load(self.word_type, ptr, name)
1382 }
1383
1384 fn get_field(&mut self, ptr: B::Value, offset: usize, name: &str) -> B::Value {
1386 get_field(&mut self.bcx, ptr, offset, name)
1387 }
1388
1389 fn load_input(&mut self) -> B::Value {
1391 let ptr_type = self.bcx.type_ptr();
1392 let input_field = get_field(
1393 &mut self.bcx,
1394 self.ecx,
1395 mem::offset_of!(EvmContext<'_>, input),
1396 "ecx.input.addr",
1397 );
1398 self.bcx.load(ptr_type, input_field, "ecx.input")
1399 }
1400
1401 #[allow(clippy::assertions_on_constants)]
1407 fn narrow_to_address(&mut self, slot: B::Value) {
1408 debug_assert!(self.little_endian(), "big-endian not yet supported");
1409 let value = self.bcx.load(self.address_type, slot, "address");
1410 let value = self.bcx.zext(self.word_type, value);
1411 self.bcx.store(value, slot);
1412 }
1413
1414 fn gas_remaining_addr(&mut self) -> B::Value {
1415 const OFFSET: usize =
1416 mem::offset_of!(EvmContext<'_>, gas) + mem::offset_of!(pf::Gas, tracker.remaining);
1417 let offset = self.bcx.iconst(self.isize_type, OFFSET as i64);
1418 self.bcx.gep(self.i8_type, self.ecx, &[offset], "gas.remaining.addr")
1419 }
1420
1421 fn save_stack_len(&mut self) {
1423 let len = self.stack_len.load(&mut self.bcx, "stack_len");
1424 let ptr = self.stack_len_arg();
1425 self.bcx.store(len, ptr);
1426 }
1427
1428 fn copy_stack_from_arg(&mut self, len: B::Value) {
1431 if let Some(src) = self.sp_arg {
1432 let dst = self.stack.addr(&mut self.bcx);
1433 let word_size = 32i64;
1434 let byte_len = self.bcx.imul_imm(len, word_size);
1435 self.bcx.memcpy(dst, src, byte_len);
1436 }
1437 }
1438
1439 fn copy_stack_to_arg(&mut self) {
1441 if let Some(dst) = self.sp_arg {
1442 let len = self.stack_len.load(&mut self.bcx, "stack_len");
1443 let src = self.stack.addr(&mut self.bcx);
1444 let word_size = 32i64;
1445 let byte_len = self.bcx.imul_imm(len, word_size);
1446 self.bcx.memcpy(dst, src, byte_len);
1447 }
1448 }
1449
1450 fn stack_len_arg(&mut self) -> B::Value {
1452 self.bcx.fn_param(2)
1453 }
1454
1455 #[must_use]
1460 fn sp_at_top(&mut self) -> B::Value {
1461 self.sp_from_section(self.section_len_offset as i64)
1462 }
1463
1464 #[must_use]
1476 fn sp_after_inputs(&mut self) -> B::Value {
1477 let (inputs, outputs) = self.current_inst().stack_io();
1478 let inputs = inputs as usize;
1479 let outputs = outputs as usize;
1480 let top = self.section_len_offset;
1481 let start = top - inputs as i32;
1482 let window = inputs.max(outputs) as i32;
1483 self.materialize_range(start, start + window);
1484 self.write_const_operands(inputs);
1485 self.sp_from_top(inputs)
1486 }
1487
1488 #[must_use]
1491 fn sp_after_inputs_with(&mut self, depths: &[usize]) -> B::Value {
1492 let (inputs, _) = self.current_inst().stack_io();
1493 let inputs = inputs as usize;
1494 let top = self.section_len_offset;
1495 for &depth in depths {
1496 let off = top - inputs as i32 + (inputs - 1 - depth) as i32;
1497 self.materialize_range(off, off + 1);
1498 }
1499 self.write_const_operands(inputs);
1500 self.sp_from_top(inputs)
1501 }
1502
1503 fn write_const_operands(&mut self, inputs: usize) {
1506 let inst = self.current_inst.unwrap();
1507 let top = self.section_len_offset;
1508 for depth in 0..inputs {
1509 let off = top - inputs as i32 + (inputs - 1 - depth) as i32;
1510 if let VSlot::Materialized = self.vstack.get_at_offset(off)
1511 && let Some(c) = self.bytecode.const_operand(inst, depth)
1512 {
1513 let value = self.bcx.iconst_256(c);
1514 let sp = self.sp_from_section(off as i64);
1515 self.bcx.store(value, sp);
1516 }
1517 }
1518 }
1519
1520 fn sp_from_section(&mut self, offset: i64) -> B::Value {
1522 if offset == 0 {
1523 return self.section_start_sp;
1524 }
1525 let offset = self.bcx.iconst(self.isize_type, offset);
1526 self.bcx.gep(self.word_type, self.section_start_sp, &[offset], "sp")
1527 }
1528
1529 fn sp_at(&mut self, len: B::Value) -> B::Value {
1531 let ptr = self.stack.addr(&mut self.bcx);
1532 self.bcx.gep(self.word_type, ptr, &[len], "sp")
1533 }
1534
1535 fn sp_from_top(&mut self, n: usize) -> B::Value {
1537 self.sp_from_section(self.section_len_offset as i64 - n as i64)
1538 }
1539
1540 fn stack_value_at_depth(
1548 &mut self,
1549 operand_depth: usize,
1550 live_depth: usize,
1551 name: &str,
1552 ) -> B::Value {
1553 let inst = self.current_inst.unwrap();
1554 if let Some(c) = self.bytecode.const_operand(inst, operand_depth) {
1555 return self.bcx.iconst_256(c);
1556 }
1557 match self.vstack.get(live_depth) {
1558 VSlot::Virtual(v) => v,
1559 VSlot::Materialized => {
1560 let off = self.vstack.offset_at_depth(live_depth);
1561 let sp = self.sp_from_section(off as i64);
1562 let value = self.load_word(sp, name);
1563 self.vstack.set(live_depth, value);
1564 value
1565 }
1566 }
1567 }
1568
1569 fn materialize_live_stack(&mut self) {
1571 let range = self.vstack.live_range();
1572 self.materialize_range(range.start, range.end);
1573 }
1574
1575 fn relieve_vstack_pressure(&mut self) {
1578 const HIGH_WATER: usize = 2;
1580 const KEEP_HOT: usize = 2;
1582
1583 let live = self.vstack.live_range();
1584 if (live.end - live.start) as usize <= HIGH_WATER {
1585 return;
1586 }
1587
1588 let virtual_count = self.vstack.virtual_count();
1589 if virtual_count <= HIGH_WATER {
1590 return;
1591 }
1592
1593 let cold_end = (self.vstack.top_offset() - KEEP_HOT as i32).max(live.start);
1595 if cold_end > live.start {
1596 self.materialize_range(live.start, cold_end);
1597 }
1598 }
1599
1600 fn materialize_range(&mut self, start: i32, end: i32) {
1602 let pending: Vec<_> = self.vstack.pending_stores(start..end).collect();
1603 for (off, value) in pending {
1604 let sp = self.sp_from_section(off as i64);
1605 self.bcx.store(value, sp);
1606 }
1607 self.vstack.mark_materialized_range(start..end);
1608 }
1609
1610 fn gas_cost_imm(&mut self, cost: u64) {
1612 if !self.config.gas_metering || cost == 0 {
1613 return;
1614 }
1615 let value = self.bcx.iconst(self.isize_type, cost as i64);
1616 self.gas_cost(value);
1617 }
1618
1619 fn gas_cost(&mut self, cost: B::Value) {
1621 if !self.config.gas_metering {
1622 return;
1623 }
1624
1625 let addr = self.gas_remaining_addr();
1628 let i64_type = self.bcx.type_int(64);
1629 let gas_remaining = self.bcx.load(i64_type, addr, "gas.remaining");
1630 let (res, overflow) = self.bcx.usub_overflow(gas_remaining, cost);
1631 self.bcx.store(res, addr);
1632 self.build_check(overflow, InstructionResult::OutOfGas);
1633 }
1634
1635 fn build_ensure_memory(&mut self, offset: B::Value, len: u64) -> B::Value {
1638 let offset = self.u256_to_u64_saturating(offset, 63);
1640 if self.current_inst.is_some_and(|inst| self.can_skip_ensure_memory(inst)) {
1642 return self.build_memory_addr(offset);
1643 }
1644
1645 let isize_type = self.isize_type;
1646 let len_const = self.bcx.iconst(isize_type, len as i64);
1647 let min_size = self.bcx.iadd(offset, len_const);
1648
1649 let direct_resize_size = self
1650 .current_inst
1651 .map(|inst| self.bytecode.memory_section(inst).direct_resize_size)
1652 .unwrap_or_default();
1653 if direct_resize_size != 0 {
1655 self.call_fallible_builtin(Builtin::Mresize, &[self.ecx, min_size]);
1656 self.cached_mem_base = None;
1657 return self.build_memory_addr(offset);
1658 }
1659
1660 let current_block = self.current_block();
1661 let resize_block = self.create_block_after(current_block, "mresize");
1662 let contd_block = self.create_block_after(resize_block, "mresize.contd");
1663
1664 let mem_len_field =
1667 self.get_field(self.ecx, mem::offset_of!(EvmContext<'_>, mem_len), "ecx.mem_len.addr");
1668 let mem_len = self.bcx.load(isize_type, mem_len_field, "ecx.mem_len");
1669 let exceeds = self.bcx.icmp(IntCC::UnsignedGreaterThan, min_size, mem_len);
1670 self.cached_mem_base = None;
1671
1672 self.bcx.brif(exceeds, resize_block, contd_block);
1673
1674 self.bcx.switch_to_block(resize_block);
1676 self.bcx.set_current_block_cold();
1677 self.call_fallible_builtin(Builtin::Mresize, &[self.ecx, min_size]);
1678 self.bcx.br(contd_block);
1679
1680 self.bcx.switch_to_block(contd_block);
1681 self.build_memory_addr(offset)
1682 }
1683
1684 fn can_skip_ensure_memory(&self, inst: Inst) -> bool {
1685 let section = self.bytecode.memory_section(inst);
1686 if section.known_size < section.required_size {
1687 return false;
1688 }
1689 let mut has_memory_access = false;
1690 for (offset, len) in self.bytecode.const_memory_accesses(inst).into_iter().flatten() {
1691 has_memory_access = true;
1692 if offset.is_none() || len.is_none() {
1693 return false;
1694 }
1695 }
1696 has_memory_access
1697 }
1698
1699 fn build_memory_addr(&mut self, offset: B::Value) -> B::Value {
1700 let mem_base = self.cached_mem_base.unwrap_or_else(|| self.load_memory_base());
1701 self.cached_mem_base = Some(mem_base);
1702 self.bcx.gep(self.i8_type, mem_base, &[offset], "mem.addr")
1703 }
1704
1705 fn load_memory_base(&mut self) -> B::Value {
1706 let ptr_type = self.bcx.type_ptr();
1707 let mem_base_field = self.get_field(
1708 self.ecx,
1709 mem::offset_of!(EvmContext<'_>, mem_base),
1710 "ecx.mem_base.addr",
1711 );
1712 self.bcx.load(ptr_type, mem_base_field, "ecx.mem_base")
1713 }
1714
1715 fn check_stack_bounds(&mut self, stack_section: StackSection) {
1726 let inp = stack_section.inputs;
1727 let diff = stack_section.max_growth as i64;
1728
1729 let underflow = |this: &mut Self| {
1730 debug_assert!(inp > 0);
1731 this.bcx.icmp_imm(IntCC::UnsignedLessThan, this.len_before, inp as i64)
1732 };
1733 let overflow = |this: &mut Self| {
1734 debug_assert!(diff > 0);
1735 if diff > STACK_CAP as i64 {
1736 return this.bcx.bool_const(true);
1737 }
1738 this.bcx.icmp_imm(IntCC::UnsignedGreaterThan, this.len_before, STACK_CAP as i64 - diff)
1739 };
1740
1741 let may_underflow = inp > 0;
1742 let may_overflow = diff > 0;
1743 if may_underflow && may_overflow {
1744 let underflow = underflow(self);
1745 let overflow = overflow(self);
1746 let cond = self.bcx.bitor(underflow, overflow);
1747 let ret = {
1748 let under = self.bcx.iconst(self.i8_type, InstructionResult::StackUnderflow as i64);
1749 let over = self.bcx.iconst(self.i8_type, InstructionResult::StackOverflow as i64);
1750 self.bcx.select(underflow, under, over)
1751 };
1752 let target = self.build_check_inner(true, cond, ret);
1753 self.bcx.switch_to_block(target);
1754 } else if may_underflow {
1755 let cond = underflow(self);
1756 self.build_check(cond, InstructionResult::StackUnderflow);
1757 } else if may_overflow {
1758 let cond = overflow(self);
1759 self.build_check(cond, InstructionResult::StackOverflow);
1760 }
1761 }
1762
1763 fn build_check(&mut self, failure_cond: B::Value, ret: InstructionResult) {
1767 self.build_check_imm_inner(true, failure_cond, ret);
1768 }
1769
1770 fn build_check_imm_inner(&mut self, is_failure: bool, cond: B::Value, ret: InstructionResult) {
1771 let ret_value = self.bcx.iconst(self.i8_type, ret as i64);
1772 let target = self.build_check_inner(is_failure, cond, ret_value);
1773 if self.config.comments {
1774 self.add_comment(&format!("check {ret:?}"));
1775 }
1776 self.bcx.switch_to_block(target);
1777 }
1778
1779 #[must_use]
1780 fn build_check_inner(
1781 &mut self,
1782 is_failure: bool,
1783 cond: B::Value,
1784 ret: B::Value,
1785 ) -> B::BasicBlock {
1786 let current_block = self.current_block();
1787 let target = self.create_block_after(current_block, "contd");
1788
1789 let exit_block = if is_failure {
1790 if let Some(failure_block) = self.failure_block {
1791 self.incoming_failures.push((ret, current_block));
1792 failure_block
1793 } else {
1794 self.create_block_after(target, "failure")
1795 }
1796 } else if let Some(return_block) = self.return_block {
1797 self.incoming_returns.push((ret, current_block));
1798 return_block
1799 } else {
1800 self.create_block_after(target, "return")
1801 };
1802 let then_block = if is_failure { exit_block } else { target };
1803 let else_block = if is_failure { target } else { exit_block };
1804 self.bcx.brif_cold(cond, then_block, else_block, is_failure);
1805
1806 if (is_failure && self.failure_block.is_none())
1807 || (!is_failure && self.return_block.is_none())
1808 {
1809 self.bcx.switch_to_block(exit_block);
1810 self.bcx.ret(&[ret]);
1811 }
1812
1813 target
1814 }
1815
1816 fn build_fail_imm(&mut self, ret: InstructionResult) {
1818 let ret_value = self.bcx.iconst(self.i8_type, ret as i64);
1819 self.build_fail(ret_value);
1820 if self.config.comments {
1821 self.add_comment(&format!("fail {ret:?}"));
1822 }
1823 }
1824
1825 fn build_fail(&mut self, ret: B::Value) {
1827 if self.config.inspect_stack {
1828 self.materialize_live_stack();
1829 }
1830 if let Some(block) = self.failure_block {
1831 self.incoming_failures.push((ret, self.bcx.current_block().unwrap()));
1832 self.bcx.br(block);
1833 } else {
1834 self.bcx.ret(&[ret]);
1835 }
1836 }
1837
1838 fn build_return_imm(&mut self, ret: InstructionResult) {
1840 let ret_value = self.bcx.iconst(self.i8_type, ret as i64);
1841 self.build_return(ret_value);
1842 if self.config.comments {
1843 self.add_comment(&format!("return {ret:?}"));
1844 }
1845 }
1846
1847 fn build_return(&mut self, ret: B::Value) {
1849 if self.config.inspect_stack {
1850 self.materialize_live_stack();
1851 }
1852 if let Some(block) = self.return_block {
1853 self.incoming_returns.push((ret, self.bcx.current_block().unwrap()));
1854 self.bcx.br(block);
1855 } else {
1856 self.bcx.ret(&[ret]);
1857 }
1858 }
1859
1860 fn add_invalid_jump(&mut self) -> B::BasicBlock {
1861 let block = self.failure_block.unwrap();
1862 self.incoming_failures.push((
1863 self.bcx.iconst(self.i8_type, InstructionResult::InvalidJump as i64),
1864 self.bcx.current_block().unwrap(),
1865 ));
1866 block
1867 }
1868
1869 fn call_panic(&mut self, msg: &str) {
1871 let function = self.builtin_function(Builtin::Panic);
1872 let ptr = self.bcx.str_const(msg);
1873 let len = self.bcx.iconst(self.isize_type, msg.len() as i64);
1874 let _ = self.bcx.call(function, &[ptr, len]);
1875 self.bcx.unreachable();
1876 }
1877
1878 #[allow(dead_code)]
1879 fn call_printf(&mut self, template: &std::ffi::CStr, values: &[B::Value]) {
1880 let mut args = Vec::with_capacity(values.len() + 1);
1881 args.push(self.bcx.cstr_const(template));
1882 args.extend_from_slice(values);
1883 let printf = self.bcx.get_printf_function();
1884 let _ = self.bcx.call(printf, &args);
1885 }
1886
1887 fn call_fallible_builtin(&mut self, builtin: Builtin, args: &[B::Value]) {
1891 let _ = self.call_builtin(builtin, args);
1892 }
1893
1894 #[must_use]
1896 fn call_builtin(&mut self, builtin: Builtin, args: &[B::Value]) -> Option<B::Value> {
1897 let function = self.builtin_function(builtin);
1898 let invalidate_mem_base =
1903 !self.current_inst.is_some_and(|inst| self.can_skip_ensure_memory(inst));
1904 let value = self.bcx.call(function, args);
1905 if invalidate_mem_base {
1906 self.cached_mem_base = None;
1907 }
1908 value
1909 }
1910
1911 fn builtin_function(&mut self, builtin: Builtin) -> B::Function {
1913 self.builtins.get(builtin, &mut self.bcx)
1914 }
1915
1916 fn add_comment(&mut self, comment: &str) {
1918 if comment.is_empty() || !self.config.comments {
1919 return;
1920 }
1921 self.bcx.add_comment_to_current_inst(comment);
1922 }
1923
1924 fn current_inst(&self) -> &InstData {
1926 self.bytecode.inst(self.current_inst.unwrap())
1927 }
1928
1929 fn current_block(&mut self) -> B::BasicBlock {
1931 self.bcx.current_block().expect("no blocks")
1933 }
1934
1935 fn little_endian(&self) -> bool {
1936 true
1937 }
1938
1939 fn create_block_after(&mut self, after: B::BasicBlock, name: &str) -> B::BasicBlock {
1949 let name = self.op_block_name(name);
1950 self.bcx.create_block_after(after, &name)
1951 }
1952
1953 fn op_block_name(&self, name: &str) -> String {
1955 if !self.config.debug {
1956 return String::new();
1957 }
1958 self.bytecode.op_block_name(self.current_inst, name)
1959 }
1960
1961 fn u256_to_u64_saturating(&mut self, value: B::Value, bits: usize) -> B::Value {
1964 let i64_type = self.bcx.type_int(64);
1965 let reduced = self.bcx.ireduce(i64_type, value);
1966 let sentinel_lit = 1u128.checked_shl(bits as u32).unwrap_or(0).wrapping_sub(1);
1967 let sentinel_u256 = self.bcx.iconst_256(U256::from(sentinel_lit));
1968 let fits = self.bcx.icmp(IntCC::UnsignedLessThanOrEqual, value, sentinel_u256);
1969 let sentinel = self.bcx.iconst(i64_type, sentinel_lit as i64);
1970 self.bcx.select(fits, reduced, sentinel)
1971 }
1972}
1973
1974impl<B: Backend> FunctionCx<'_, B> {
1976 #[allow(dead_code)]
1977 #[must_use]
1978 fn call_ir_builtin(
1979 &mut self,
1980 name: &str,
1981 args: &[B::Value],
1982 arg_types: &[B::Type],
1983 ret: Option<B::Type>,
1984 build: impl FnOnce(&mut Self),
1985 ) -> Option<B::Value> {
1986 let prefix = "__revmc_ir_builtin_";
1987 let name = &format!("{prefix}{name}")[..];
1988
1989 debug_assert_eq!(args.len(), arg_types.len());
1992 let linkage = revmc_backend::Linkage::Private;
1993 let debug_location = self
1997 .config
1998 .debug
1999 .then(|| self.current_inst.map(|inst| self.inst_lines[inst]))
2000 .flatten();
2001 self.bcx.clear_debug_location();
2002
2003 let this = unsafe { &mut *(self as *mut Self) };
2004 let f = self.bcx.get_or_build_function(name, arg_types, ret, linkage, |bcx| {
2005 let prev_return_block = this.return_block.take();
2006 let prev_failure_block = this.failure_block.take();
2007 unsafe { std::ptr::swap(&mut this.bcx, bcx) };
2009
2010 for attr in default_attrs::for_fn().chain(std::iter::once(Attribute::NoUnwind)) {
2011 this.bcx.add_function_attribute(None, attr, FunctionAttributeLocation::Function)
2012 }
2013 for i in 0..this.bcx.num_fn_params() as u32 {
2014 for attr in default_attrs::for_param() {
2015 this.bcx.add_function_attribute(None, attr, FunctionAttributeLocation::Param(i))
2016 }
2017 }
2018 build(this);
2019
2020 unsafe { std::ptr::swap(&mut this.bcx, bcx) };
2022 this.failure_block = prev_failure_block;
2023 this.return_block = prev_return_block;
2024 });
2025 if let Some(line) = debug_location {
2026 self.bcx.set_debug_location(line, 1);
2027 }
2028 self.bcx.call(f, args)
2029 }
2030}
2031
2032#[allow(dead_code)]
2035mod pf {
2036 use super::*;
2037
2038 #[repr(C)] pub(super) struct Slice {
2040 pub(super) ptr: *const u8,
2041 pub(super) len: usize,
2042 }
2043 const _: [(); mem::size_of::<&'static [u8]>()] = [(); mem::size_of::<Slice>()];
2044
2045 pub(super) struct Gas {
2046 pub(super) tracker: GasTracker,
2048 pub(super) memory: MemoryGas,
2050 }
2051
2052 pub(super) struct GasTracker {
2053 pub(super) limit: u64,
2055 pub(super) remaining: u64,
2057 pub(super) reservoir: u64,
2059 pub(super) state_gas_spent: u64,
2061 pub(super) refunded: i64,
2063 }
2064
2065 #[repr(C)]
2066 pub(super) struct MemoryGas {
2067 pub(super) words_num: usize,
2068 pub(super) expansion_cost: u64,
2069 }
2070 const _: [(); mem::size_of::<revm_interpreter::Gas>()] = [(); mem::size_of::<Gas>()];
2071}
2072
2073fn effective_stack_diff(inp: u8, out: u8, data: &InstData) -> i32 {
2075 let mut diff = out as i32 - inp as i32;
2076 if data.may_suspend() {
2078 diff -= 1;
2079 }
2080 diff
2081}
2082
2083fn get_field<B: Builder>(bcx: &mut B, ptr: B::Value, offset: usize, name: &str) -> B::Value {
2084 let offset = bcx.iconst(bcx.type_ptr_sized_int(), offset as i64);
2085 bcx.gep(bcx.type_int(8), ptr, &[offset], name)
2086}
2087
2088#[allow(unused)]
2089macro_rules! format_printf {
2090 ($($t:tt)*) => {
2091 &std::ffi::CString::new(format!($($t)*)).unwrap()
2092 };
2093}
2094#[allow(unused)]
2095use format_printf;