pub(super) struct FunctionCx<'a, B: Backend> {Show 32 fields
config: FcxConfig,
bcx: B::Builder<'a>,
isize_type: B::Type,
word_type: B::Type,
address_type: B::Type,
i8_type: B::Type,
stack_len: Pointer<B::Builder<'a>>,
stack: Pointer<B::Builder<'a>>,
sp_arg: Option<B::Value>,
ecx: B::Value,
len_before: B::Value,
len_offset: i8,
vstack: VStack<B::Value>,
section_start_len: B::Value,
section_start_sp: B::Value,
section_len_offset: i32,
stored_len_offset: i32,
cached_mem_base: Option<B::Value>,
bytecode: &'a Bytecode<'a>,
inst_lines: IndexVec<Inst, u32>,
inst_entries: IndexVec<Inst, B::BasicBlock>,
current_inst: Option<Inst>,
incoming_dynamic_jumps: Vec<(<B as BackendTypes>::Value, <B as BackendTypes>::BasicBlock)>,
dynamic_jump_table: B::BasicBlock,
incoming_failures: Vec<(<B as BackendTypes>::Value, <B as BackendTypes>::BasicBlock)>,
failure_block: Option<B::BasicBlock>,
incoming_returns: Vec<(<B as BackendTypes>::Value, <B as BackendTypes>::BasicBlock)>,
return_block: Option<B::BasicBlock>,
resume_blocks: Vec<B::BasicBlock>,
suspend_blocks: Vec<(<B as BackendTypes>::Value, <B as BackendTypes>::BasicBlock)>,
suspend_block: B::BasicBlock,
builtins: &'a mut Builtins<B>,
}Fields§
§config: FcxConfig§bcx: B::Builder<'a>The backend’s function builder.
isize_type: B::Type§word_type: B::Type§address_type: B::Type§i8_type: B::Type§stack_len: Pointer<B::Builder<'a>>The stack length. Either passed in the arguments as a pointer or allocated locally.
stack: Pointer<B::Builder<'a>>The stack value. Constant throughout the function, either passed in the arguments as a pointer or allocated locally.
sp_arg: Option<B::Value>The stack argument pointer. Only used when local_stack is enabled and the stack needs
to be copied in/out at entry/exit boundaries.
ecx: B::ValueThe EVM context. Opaque pointer, only passed to builtins.
len_before: B::ValueStack length before the current instruction.
len_offset: i8Stack length offset for the current instruction, used for push/pop.
vstack: VStack<B::Value>Section-local virtual stack that caches values as SSA instead of immediately storing/loading from the stack alloca.
section_start_len: B::ValueStack length at the start of the current stack section, loaded once from the alloca.
All intra-section len_before values are derived from this + section_len_offset.
section_start_sp: B::ValueStack pointer at the start of the current stack section (&stack[section_start_len]).
All intra-section stack pointer GEPs are derived from this base to preserve pointer
provenance, which lets LLVM prove aliasing and fold redundant operations.
section_len_offset: i32Cumulative stack diff from the section start to the current instruction (compile-time). Updated after the opcode handler so that push/pop/sp helpers see the pre-diff value.
stored_len_offset: i32The cumulative offset that len.addr currently holds relative to section_start_len.
At section start this is 0 (len.addr == section_start_len). After each store it becomes
section_len_offset + diff. Stores are skipped when the new offset matches this value.
cached_mem_base: Option<B::Value>§bytecode: &'a Bytecode<'a>The bytecode being translated.
inst_lines: IndexVec<Inst, u32>Instruction index to 1-based line number in bytecode.txt (for debug info).
inst_entries: IndexVec<Inst, B::BasicBlock>All entry blocks for each instruction.
current_inst: Option<Inst>The current instruction being translated.
incoming_dynamic_jumps: Vec<(<B as BackendTypes>::Value, <B as BackendTypes>::BasicBlock)>dynamic_jump_table incoming values.
dynamic_jump_table: B::BasicBlockThe dynamic jump table block where all dynamic jumps branch to.
incoming_failures: Vec<(<B as BackendTypes>::Value, <B as BackendTypes>::BasicBlock)>failure_block incoming values.
failure_block: Option<B::BasicBlock>The block that all failures branch to.
incoming_returns: Vec<(<B as BackendTypes>::Value, <B as BackendTypes>::BasicBlock)>return_block incoming values.
return_block: Option<B::BasicBlock>The return block that all return instructions branch to.
resume_blocks: Vec<B::BasicBlock>resume_block switch values.
suspend_blocks: Vec<(<B as BackendTypes>::Value, <B as BackendTypes>::BasicBlock)>suspend_block incoming values.
suspend_block: B::BasicBlockThe suspend block that all suspend instructions branch to.
builtins: &'a mut Builtins<B>Builtins.
Implementations§
Source§impl<'a, B: Backend> FunctionCx<'a, B>
impl<'a, B: Backend> FunctionCx<'a, B>
Sourcepub(super) fn try_peephole(&mut self, data: &InstData) -> bool
pub(super) fn try_peephole(&mut self, data: &InstData) -> bool
Tries to emit optimized inline code for an instruction whose operands are partially known.
Returns true if the peephole fired and code was emitted, false to fall through to the
normal translation.
Sourcefn peephole_div(&mut self) -> bool
fn peephole_div(&mut self) -> bool
DIV a, b => a / b.
General constant divisors could use native LLVM udiv, but i256 division
generates very bloated code (~100+ instructions for the reciprocal multiply),
so we only emit native ops for powers of two where LLVM lowers to lshr.
Sourcefn peephole_sdiv(&mut self) -> bool
fn peephole_sdiv(&mut self) -> bool
SDIV a, b => signed(a) / signed(b), rounded toward zero.
Pow2 cases are intentionally skipped: signed division rounds toward zero while arithmetic shift right rounds toward negative infinity, so the semantics differ.
Sourcefn peephole_mod(&mut self) -> bool
fn peephole_mod(&mut self) -> bool
MOD a, b => a % b.
Same as DIV: only pow2 moduli use native urem (LLVM lowers to and).
Sourcefn peephole_smod(&mut self) -> bool
fn peephole_smod(&mut self) -> bool
SMOD a, b => signed(a) % signed(b). Result sign matches dividend.
Sourcefn peephole_addmod(&mut self) -> bool
fn peephole_addmod(&mut self) -> bool
ADDMOD a, b, N => (a + b) % N.
Sourcefn peephole_mulmod(&mut self) -> bool
fn peephole_mulmod(&mut self) -> bool
MULMOD a, b, N => (a * b) % N.
Sourcefn peephole_exp(&mut self) -> bool
fn peephole_exp(&mut self) -> bool
EXP base, exp => base ** exp.
Dynamic gas is folded into the section gas cost by SectionsAnalysis when the
exponent is a compile-time constant, so the section gas check already covers it.
For constant-base cases with a dynamic exponent, call ExpGas to charge only the
dynamic gas and compute the result inline.
fn pay_exp_dynamic_gas(&mut self)
Sourcefn peephole_signextend(&mut self) -> bool
fn peephole_signextend(&mut self) -> bool
SIGNEXTEND ext, x => sign-extend x from (ext+1) bytes.
Sourcefn peephole_byte(&mut self) -> bool
fn peephole_byte(&mut self) -> bool
BYTE index, value => value[index].
fn peephole_load(&mut self, op: u8) -> bool
fn peephole_keccak256(&mut self) -> bool
fn peephole_return(&mut self, op: u8) -> bool
fn const_memory_operands(&self, args: [Option<U256>; 2]) -> Option<(u64, u64)>
Source§impl<'a, B: Backend> FunctionCx<'a, B>
impl<'a, B: Backend> FunctionCx<'a, B>
Sourcepub(super) fn translate(
bcx: B::Builder<'a>,
config: FcxConfig,
builtins: &'a mut Builtins<B>,
bytecode: &'a Bytecode<'a>,
) -> Result<()>
pub(super) fn translate( bcx: B::Builder<'a>, config: FcxConfig, builtins: &'a mut Builtins<B>, bytecode: &'a Bytecode<'a>, ) -> Result<()>
Translates an EVM bytecode into a native function.
Example pseudo-code:
// `cfg(may_suspend) = bytecode.may_suspend()`: `true` if it contains a
// `*CALL*` or `CREATE*` instruction.
fn evm_bytecode(args: ...) {
setup_locals();
#[cfg(debug_assertions)]
if args.<ptr>.is_null() { panic!("...") };
load_arguments();
#[cfg(may_suspend)]
resume: {
goto match ecx.resume_at {
0 => inst0,
1 => first_call_or_create_inst + 1, // + 1 as in the block after.
2 => second_call_or_create_inst + 1,
... => ...,
_ => unreachable, // Assumed to be valid.
};
};
op.inst0: { /* ... */ };
op.inst1: { /* ... */ };
// ...
#[cfg(may_suspend)]
first_call_or_create_inst: {
// ...
goto suspend(1);
};
// ...
// There will always be at least one diverging instruction.
op.stop: {
goto return(InstructionResult::Stop);
};
#[cfg(may_suspend)]
suspend(resume_at: u32): {
ecx.resume_at = resume_at;
goto return(Ok(())); // Caller checks next_action
};
// All paths lead to here.
return(ir: InstructionResult): {
#[cfg(inspect_stack)]
*args.stack_len = stack_len;
return ir;
}
}fn translate_inst(&mut self, inst: Inst) -> Result<()>
Sourcefn sync_noop_diff(&mut self, inst: Inst, diff: i32)
fn sync_noop_diff(&mut self, inst: Inst, diff: i32)
Syncs the virtual stack for a NOOP instruction. Unlike sync_virtual_stack_diff,
this preserves known-constant outputs as Virtual values so that boundary
materialization (e.g. materialize_live_stack before branch/suspend) can flush
them to physical memory. Without this, NOOP’d constants would be marked
Materialized with no actual store, leaving garbage in memory.
Sourcefn sync_virtual_stack_diff(&mut self, diff: i32)
fn sync_virtual_stack_diff(&mut self, diff: i32)
Syncs the virtual stack’s top_offset with the expected value after applying
the instruction’s stack diff. For inline ops (push/pop), the virtual stack is
already up to date. For builtin ops (sp_after_inputs/sp_at_top), this adjusts
the virtual stack to account for consumed inputs and materialized outputs.
Also invalidates any virtual slots in the output area that the builtin may have overwritten in physical memory, to prevent stale virtual values from shadowing the builtin’s actual output.
Sourcefn const_operands<const N: usize>(&self) -> [Option<U256>; N]
fn const_operands<const N: usize>(&self) -> [Option<U256>; N]
Returns the known constant values of the topmost N stack operands, in the same order
as popn: index 0 is TOS, index 1 is second from top, etc.
Sourcefn fold_const(&mut self, value: impl TryInto<U256>)
fn fold_const(&mut self, value: impl TryInto<U256>)
Discards n stack inputs and pushes a compile-time constant.
Sourcefn pop_ignore(&mut self, n: usize)
fn pop_ignore(&mut self, n: usize)
Consumes the topmost n elements from the stack without loading them.
Sourcefn popn<const N: usize>(&mut self) -> [B::Value; N]
fn popn<const N: usize>(&mut self) -> [B::Value; N]
Removes the topmost N elements from the stack and returns them.
Sourcefn swap(&mut self, n: usize)
fn swap(&mut self, n: usize)
Swaps the topmost value with the nth value from the top.
n cannot be 0.
Sourcefn exchange(&mut self, n: usize, m: usize)
fn exchange(&mut self, n: usize, m: usize)
Exchange two values on the stack.
n is the first index, and the second index is calculated as n + m.
m cannot be 0.
Sourcefn return_common(&mut self, ir: InstructionResult)
fn return_common(&mut self, ir: InstructionResult)
RETURN or REVERT instruction.
Sourcefn create_common(&mut self, create_kind: CreateKind)
fn create_common(&mut self, create_kind: CreateKind)
Builds a CREATE or CREATE2 instruction.
Sourcefn call_common(&mut self, call_kind: CallKind)
fn call_common(&mut self, call_kind: CallKind)
Builds *CALL* instructions.
Sourcefn add_resume_at(&mut self, block: B::BasicBlock)
fn add_resume_at(&mut self, block: B::BasicBlock)
Adds a resume point.
Sourcefn load_word(&mut self, ptr: B::Value, name: &str) -> B::Value
fn load_word(&mut self, ptr: B::Value, name: &str) -> B::Value
Loads the word at the given pointer.
Sourcefn get_field(&mut self, ptr: B::Value, offset: usize, name: &str) -> B::Value
fn get_field(&mut self, ptr: B::Value, offset: usize, name: &str) -> B::Value
Gets a field at the given offset.
Sourcefn load_input(&mut self) -> B::Value
fn load_input(&mut self) -> B::Value
Loads the ecx.input pointer on demand.
Sourcefn narrow_to_address(&mut self, slot: B::Value)
fn narrow_to_address(&mut self, slot: B::Value)
Re-loads the address at slot as i160, zero-extends to i256, and stores it back.
On little-endian the low 160 bits sit at byte offset 0, so a direct
load i160 + zext i256 gives LLVM a typed narrow load — no AND needed
to prove the high 96 bits are zero.
fn gas_remaining_addr(&mut self) -> B::Value
Sourcefn save_stack_len(&mut self)
fn save_stack_len(&mut self)
Saves the local stack_len to stack_len_arg.
Sourcefn copy_stack_from_arg(&mut self, len: B::Value)
fn copy_stack_from_arg(&mut self, len: B::Value)
Copies the live prefix of the stack from the argument to the local alloca.
len is the number of live stack elements.
Sourcefn copy_stack_to_arg(&mut self)
fn copy_stack_to_arg(&mut self)
Copies the live prefix of the stack from the local alloca to the argument.
Sourcefn stack_len_arg(&mut self) -> B::Value
fn stack_len_arg(&mut self) -> B::Value
Returns the stack length argument.
Sourcefn sp_at_top(&mut self) -> B::Value
fn sp_at_top(&mut self) -> B::Value
Returns the stack pointer at the top (&stack[stack.len]).
Used by builtins that write a single output directly to memory.
The virtual stack is synced at instruction end via sync_virtual_stack_diff.
Sourcefn sp_after_inputs(&mut self) -> B::Value
fn sp_after_inputs(&mut self) -> B::Value
Returns the stack pointer after the input has been popped
(&stack[stack.len - op.input()]).
This materializes all virtual values in the input/output window so builtins can read/write the physical stack. For any input whose value is a known constant, the constant is written into the corresponding stack slot. This allows DSE to NOOP the producing PUSH even for builtin-delegated opcodes that read operands directly from the stack pointer.
The virtual stack is synced with the builtin’s stack effect at instruction end
via sync_virtual_stack_diff.
Sourcefn sp_after_inputs_with(&mut self, depths: &[usize]) -> B::Value
fn sp_after_inputs_with(&mut self, depths: &[usize]) -> B::Value
Like sp_after_inputs but only materializes the
specified operand depths.
Sourcefn write_const_operands(&mut self, inputs: usize)
fn write_const_operands(&mut self, inputs: usize)
Writes known-constant operands into the physical stack so that builtins see correct values even when DSE has NOOP’d the producing instruction.
Sourcefn sp_from_section(&mut self, offset: i64) -> B::Value
fn sp_from_section(&mut self, offset: i64) -> B::Value
Returns a stack pointer offset from section_start_sp.
Sourcefn sp_from_top(&mut self, n: usize) -> B::Value
fn sp_from_top(&mut self, n: usize) -> B::Value
Returns the stack pointer at n from the top (&stack[len - n]).
Sourcefn stack_value_at_depth(
&mut self,
operand_depth: usize,
live_depth: usize,
name: &str,
) -> B::Value
fn stack_value_at_depth( &mut self, operand_depth: usize, live_depth: usize, name: &str, ) -> B::Value
Resolves a stack value at the given depth via the virtual stack.
operand_depth: depth forconst_operandlookup (0 = first popped by the instruction).live_depth: depth into the virtual stack’s current live range (0 = current TOS).
These differ only inside popn where multiple pops happen: operand_depth counts
from the instruction start, while live_depth counts from the current virtual TOS.
Sourcefn materialize_live_stack(&mut self)
fn materialize_live_stack(&mut self)
Materializes all live virtual slots in the current section to memory.
Sourcefn relieve_vstack_pressure(&mut self)
fn relieve_vstack_pressure(&mut self)
Eagerly materializes the coldest virtual slots when too many are live, preventing excessive register pressure in long sections.
Sourcefn materialize_range(&mut self, start: i32, end: i32)
fn materialize_range(&mut self, start: i32, end: i32)
Materializes all virtual slots in the given section-relative offset range.
Sourcefn gas_cost_imm(&mut self, cost: u64)
fn gas_cost_imm(&mut self, cost: u64)
Builds a gas cost deduction for an immediate value.
Sourcefn build_ensure_memory(&mut self, offset: B::Value, len: u64) -> B::Value
fn build_ensure_memory(&mut self, offset: B::Value, len: u64) -> B::Value
Ensures the memory is large enough for offset + len bytes, calling the mresize
builtin on the cold path if needed. Returns the pointer to mem_base + offset.
fn can_skip_ensure_memory(&self, inst: Inst) -> bool
fn build_memory_addr(&mut self, offset: B::Value) -> B::Value
fn load_memory_base(&mut self) -> B::Value
Sourcefn check_stack_bounds(&mut self, stack_section: StackSection)
fn check_stack_bounds(&mut self, stack_section: StackSection)
Emits under/overflow bounds checks for a stack section.
Sourcefn build_check(&mut self, failure_cond: B::Value, ret: InstructionResult)
fn build_check(&mut self, failure_cond: B::Value, ret: InstructionResult)
Builds a check, failing if the condition is true.
if failure_cond { return ret } else { ... }
fn build_check_imm_inner( &mut self, is_failure: bool, cond: B::Value, ret: InstructionResult, )
fn build_check_inner( &mut self, is_failure: bool, cond: B::Value, ret: B::Value, ) -> B::BasicBlock
Sourcefn build_fail_imm(&mut self, ret: InstructionResult)
fn build_fail_imm(&mut self, ret: InstructionResult)
Builds a branch to the failure block.
Sourcefn build_fail(&mut self, ret: B::Value)
fn build_fail(&mut self, ret: B::Value)
Builds a branch to the failure block.
Sourcefn build_return_imm(&mut self, ret: InstructionResult)
fn build_return_imm(&mut self, ret: InstructionResult)
Builds a branch to the return block.
Sourcefn build_return(&mut self, ret: B::Value)
fn build_return(&mut self, ret: B::Value)
Builds a branch to the return block.
fn add_invalid_jump(&mut self) -> B::BasicBlock
Sourcefn call_panic(&mut self, msg: &str)
fn call_panic(&mut self, msg: &str)
Build a call to the panic builtin.
fn call_printf(&mut self, template: &CStr, values: &[B::Value])
Sourcefn call_fallible_builtin(&mut self, builtin: Builtin, args: &[B::Value])
fn call_fallible_builtin(&mut self, builtin: Builtin, args: &[B::Value])
Build a call to a fallible builtin.
The builtin longjmps on error, so no return value check is needed.
Sourcefn call_builtin(
&mut self,
builtin: Builtin,
args: &[B::Value],
) -> Option<B::Value>
fn call_builtin( &mut self, builtin: Builtin, args: &[B::Value], ) -> Option<B::Value>
Build a call to a builtin.
Sourcefn builtin_function(&mut self, builtin: Builtin) -> B::Function
fn builtin_function(&mut self, builtin: Builtin) -> B::Function
Gets the function for the given builtin.
Sourcefn add_comment(&mut self, comment: &str)
fn add_comment(&mut self, comment: &str)
Adds a comment to the current instruction.
Sourcefn current_inst(&self) -> &InstData
fn current_inst(&self) -> &InstData
Returns the current instruction.
Sourcefn current_block(&mut self) -> B::BasicBlock
fn current_block(&mut self) -> B::BasicBlock
Returns the current block.
fn little_endian(&self) -> bool
Sourcefn create_block_after(
&mut self,
after: B::BasicBlock,
name: &str,
) -> B::BasicBlock
fn create_block_after( &mut self, after: B::BasicBlock, name: &str, ) -> B::BasicBlock
Creates a named block after the given block.
Sourcefn op_block_name(&self, name: &str) -> String
fn op_block_name(&self, name: &str) -> String
Returns the block name for the current opcode with the given suffix.
Sourcefn u256_to_u64_saturating(&mut self, value: B::Value, bits: usize) -> B::Value
fn u256_to_u64_saturating(&mut self, value: B::Value, bits: usize) -> B::Value
Converts a 256-bit unsigned integer to a 64-bit unsigned integer, saturating at
2^bits - 1.
Source§impl<B: Backend> FunctionCx<'_, B>
IR builtins.
impl<B: Backend> FunctionCx<'_, B>
IR builtins.
Auto Trait Implementations§
impl<'a, B> Freeze for FunctionCx<'a, B>
impl<'a, B> !RefUnwindSafe for FunctionCx<'a, B>
impl<'a, B> !Send for FunctionCx<'a, B>
impl<'a, B> !Sync for FunctionCx<'a, B>
impl<'a, B> Unpin for FunctionCx<'a, B>
impl<'a, B> UnsafeUnpin for FunctionCx<'a, B>where
<B as Backend>::Builder<'a>: UnsafeUnpin,
<B as BackendTypes>::Type: UnsafeUnpin,
<B as BackendTypes>::Value: UnsafeUnpin,
<B as BackendTypes>::BasicBlock: UnsafeUnpin,
<B as BackendTypes>::StackSlot: UnsafeUnpin,
impl<'a, B> !UnwindSafe for FunctionCx<'a, B>
Blanket Implementations§
Source§impl<T> BorrowMut<T> for Twhere
T: ?Sized,
impl<T> BorrowMut<T> for Twhere
T: ?Sized,
Source§fn borrow_mut(&mut self) -> &mut T
fn borrow_mut(&mut self) -> &mut T
§impl<T> Conv for T
impl<T> Conv for T
§impl<T> FmtForward for T
impl<T> FmtForward for T
§fn fmt_binary(self) -> FmtBinary<Self>where
Self: Binary,
fn fmt_binary(self) -> FmtBinary<Self>where
Self: Binary,
self to use its Binary implementation when Debug-formatted.§fn fmt_display(self) -> FmtDisplay<Self>where
Self: Display,
fn fmt_display(self) -> FmtDisplay<Self>where
Self: Display,
self to use its Display implementation when
Debug-formatted.§fn fmt_lower_exp(self) -> FmtLowerExp<Self>where
Self: LowerExp,
fn fmt_lower_exp(self) -> FmtLowerExp<Self>where
Self: LowerExp,
self to use its LowerExp implementation when
Debug-formatted.§fn fmt_lower_hex(self) -> FmtLowerHex<Self>where
Self: LowerHex,
fn fmt_lower_hex(self) -> FmtLowerHex<Self>where
Self: LowerHex,
self to use its LowerHex implementation when
Debug-formatted.§fn fmt_octal(self) -> FmtOctal<Self>where
Self: Octal,
fn fmt_octal(self) -> FmtOctal<Self>where
Self: Octal,
self to use its Octal implementation when Debug-formatted.§fn fmt_pointer(self) -> FmtPointer<Self>where
Self: Pointer,
fn fmt_pointer(self) -> FmtPointer<Self>where
Self: Pointer,
self to use its Pointer implementation when
Debug-formatted.§fn fmt_upper_exp(self) -> FmtUpperExp<Self>where
Self: UpperExp,
fn fmt_upper_exp(self) -> FmtUpperExp<Self>where
Self: UpperExp,
self to use its UpperExp implementation when
Debug-formatted.§fn fmt_upper_hex(self) -> FmtUpperHex<Self>where
Self: UpperHex,
fn fmt_upper_hex(self) -> FmtUpperHex<Self>where
Self: UpperHex,
self to use its UpperHex implementation when
Debug-formatted.§fn fmt_list(self) -> FmtList<Self>where
&'a Self: for<'a> IntoIterator,
fn fmt_list(self) -> FmtList<Self>where
&'a Self: for<'a> IntoIterator,
§impl<T> Instrument for T
impl<T> Instrument for T
§fn instrument(self, span: Span) -> Instrumented<Self>
fn instrument(self, span: Span) -> Instrumented<Self>
§fn in_current_span(self) -> Instrumented<Self>
fn in_current_span(self) -> Instrumented<Self>
Source§impl<T> IntoEither for T
impl<T> IntoEither for T
Source§fn into_either(self, into_left: bool) -> Either<Self, Self>
fn into_either(self, into_left: bool) -> Either<Self, Self>
self into a Left variant of Either<Self, Self>
if into_left is true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read moreSource§fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
fn into_either_with<F>(self, into_left: F) -> Either<Self, Self>
self into a Left variant of Either<Self, Self>
if into_left(&self) returns true.
Converts self into a Right variant of Either<Self, Self>
otherwise. Read more§impl<T> Pipe for Twhere
T: ?Sized,
impl<T> Pipe for Twhere
T: ?Sized,
§fn pipe<R>(self, func: impl FnOnce(Self) -> R) -> Rwhere
Self: Sized,
fn pipe<R>(self, func: impl FnOnce(Self) -> R) -> Rwhere
Self: Sized,
§fn pipe_ref<'a, R>(&'a self, func: impl FnOnce(&'a Self) -> R) -> Rwhere
R: 'a,
fn pipe_ref<'a, R>(&'a self, func: impl FnOnce(&'a Self) -> R) -> Rwhere
R: 'a,
self and passes that borrow into the pipe function. Read more§fn pipe_ref_mut<'a, R>(&'a mut self, func: impl FnOnce(&'a mut Self) -> R) -> Rwhere
R: 'a,
fn pipe_ref_mut<'a, R>(&'a mut self, func: impl FnOnce(&'a mut Self) -> R) -> Rwhere
R: 'a,
self and passes that borrow into the pipe function. Read more§fn pipe_borrow<'a, B, R>(&'a self, func: impl FnOnce(&'a B) -> R) -> R
fn pipe_borrow<'a, B, R>(&'a self, func: impl FnOnce(&'a B) -> R) -> R
§fn pipe_borrow_mut<'a, B, R>(
&'a mut self,
func: impl FnOnce(&'a mut B) -> R,
) -> R
fn pipe_borrow_mut<'a, B, R>( &'a mut self, func: impl FnOnce(&'a mut B) -> R, ) -> R
§fn pipe_as_ref<'a, U, R>(&'a self, func: impl FnOnce(&'a U) -> R) -> R
fn pipe_as_ref<'a, U, R>(&'a self, func: impl FnOnce(&'a U) -> R) -> R
self, then passes self.as_ref() into the pipe function.§fn pipe_as_mut<'a, U, R>(&'a mut self, func: impl FnOnce(&'a mut U) -> R) -> R
fn pipe_as_mut<'a, U, R>(&'a mut self, func: impl FnOnce(&'a mut U) -> R) -> R
self, then passes self.as_mut() into the pipe
function.§fn pipe_deref<'a, T, R>(&'a self, func: impl FnOnce(&'a T) -> R) -> R
fn pipe_deref<'a, T, R>(&'a self, func: impl FnOnce(&'a T) -> R) -> R
self, then passes self.deref() into the pipe function.§impl<T> Tap for T
impl<T> Tap for T
§fn tap_borrow<B>(self, func: impl FnOnce(&B)) -> Self
fn tap_borrow<B>(self, func: impl FnOnce(&B)) -> Self
Borrow<B> of a value. Read more§fn tap_borrow_mut<B>(self, func: impl FnOnce(&mut B)) -> Self
fn tap_borrow_mut<B>(self, func: impl FnOnce(&mut B)) -> Self
BorrowMut<B> of a value. Read more§fn tap_ref<R>(self, func: impl FnOnce(&R)) -> Self
fn tap_ref<R>(self, func: impl FnOnce(&R)) -> Self
AsRef<R> view of a value. Read more§fn tap_ref_mut<R>(self, func: impl FnOnce(&mut R)) -> Self
fn tap_ref_mut<R>(self, func: impl FnOnce(&mut R)) -> Self
AsMut<R> view of a value. Read more§fn tap_deref<T>(self, func: impl FnOnce(&T)) -> Self
fn tap_deref<T>(self, func: impl FnOnce(&T)) -> Self
Deref::Target of a value. Read more§fn tap_deref_mut<T>(self, func: impl FnOnce(&mut T)) -> Self
fn tap_deref_mut<T>(self, func: impl FnOnce(&mut T)) -> Self
Deref::Target of a value. Read more§fn tap_dbg(self, func: impl FnOnce(&Self)) -> Self
fn tap_dbg(self, func: impl FnOnce(&Self)) -> Self
.tap() only in debug builds, and is erased in release builds.§fn tap_mut_dbg(self, func: impl FnOnce(&mut Self)) -> Self
fn tap_mut_dbg(self, func: impl FnOnce(&mut Self)) -> Self
.tap_mut() only in debug builds, and is erased in release
builds.§fn tap_borrow_dbg<B>(self, func: impl FnOnce(&B)) -> Self
fn tap_borrow_dbg<B>(self, func: impl FnOnce(&B)) -> Self
.tap_borrow() only in debug builds, and is erased in release
builds.§fn tap_borrow_mut_dbg<B>(self, func: impl FnOnce(&mut B)) -> Self
fn tap_borrow_mut_dbg<B>(self, func: impl FnOnce(&mut B)) -> Self
.tap_borrow_mut() only in debug builds, and is erased in release
builds.§fn tap_ref_dbg<R>(self, func: impl FnOnce(&R)) -> Self
fn tap_ref_dbg<R>(self, func: impl FnOnce(&R)) -> Self
.tap_ref() only in debug builds, and is erased in release
builds.§fn tap_ref_mut_dbg<R>(self, func: impl FnOnce(&mut R)) -> Self
fn tap_ref_mut_dbg<R>(self, func: impl FnOnce(&mut R)) -> Self
.tap_ref_mut() only in debug builds, and is erased in release
builds.§fn tap_deref_dbg<T>(self, func: impl FnOnce(&T)) -> Self
fn tap_deref_dbg<T>(self, func: impl FnOnce(&T)) -> Self
.tap_deref() only in debug builds, and is erased in release
builds.§impl<T> TryConv for T
impl<T> TryConv for T
§impl<T> WithSubscriber for T
impl<T> WithSubscriber for T
§fn with_subscriber<S>(self, subscriber: S) -> WithDispatch<Self>where
S: Into<Dispatch>,
fn with_subscriber<S>(self, subscriber: S) -> WithDispatch<Self>where
S: Into<Dispatch>,
§fn with_current_subscriber(self) -> WithDispatch<Self>
fn with_current_subscriber(self) -> WithDispatch<Self>
Layout§
Note: Unable to compute type layout, possibly due to this type having generic parameters. Layout can only be computed for concrete, fully-instantiated types.