From 2205ebaef1ba2999be7aa58c31c1753c4e9c679d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Fri, 13 Dec 2024 16:55:31 +0100 Subject: [PATCH 01/18] Refactor - Sanitize IP/PC immediate values (#1) * Always sanitize emit_profile_instruction_count() and emit_undo_profile_instruction_count(). * Sanitize emit_validate_instruction_count() as well. --- src/jit.rs | 62 ++++++++++++++++++++++++------------------------------ 1 file changed, 27 insertions(+), 35 deletions(-) diff --git a/src/jit.rs b/src/jit.rs index 7d0e4570..f151d2c3 100644 --- a/src/jit.rs +++ b/src/jit.rs @@ -38,7 +38,7 @@ use crate::{ const MAX_EMPTY_PROGRAM_MACHINE_CODE_LENGTH: usize = 4096; const MAX_MACHINE_CODE_LENGTH_PER_INSTRUCTION: usize = 110; -const MACHINE_CODE_PER_INSTRUCTION_METER_CHECKPOINT: usize = 13; +const MACHINE_CODE_PER_INSTRUCTION_METER_CHECKPOINT: usize = 23; const MAX_START_PADDING_LENGTH: usize = 256; pub struct JitProgram { @@ -423,7 +423,7 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { match insn.opc { ebpf::LD_DW_IMM if !self.executable.get_sbpf_version().disable_lddw() => { - self.emit_validate_and_profile_instruction_count(false, Some(self.pc + 2)); + self.emit_validate_and_profile_instruction_count(Some(self.pc + 2)); self.pc += 1; self.result.pc_section[self.pc] = self.anchors[ANCHOR_CALL_UNSUPPORTED_INSTRUCTION] as usize; ebpf::augment_lddw_unchecked(self.program, &mut insn); @@ -702,7 +702,7 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { // BPF_JMP class ebpf::JA => { - self.emit_validate_and_profile_instruction_count(true, Some(target_pc)); + self.emit_validate_and_profile_instruction_count(Some(target_pc)); self.emit_ins(X86Instruction::load_immediate(REGISTER_SCRATCH, target_pc as i64)); let jump_offset = self.relative_to_target_pc(target_pc, 5); self.emit_ins(X86Instruction::jump_immediate(jump_offset)); @@ -788,7 +788,7 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x81, 5, REGISTER_PTR_TO_VM, 1, Some(call_depth_access))); // env.call_depth -= 1; // and return - self.emit_profile_instruction_count(false, Some(0)); + self.emit_profile_instruction_count(Some(0)); self.emit_ins(X86Instruction::return_near()); }, @@ -802,7 +802,7 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { if self.offset_in_text_section + MAX_MACHINE_CODE_LENGTH_PER_INSTRUCTION * 2 >= self.result.text_section.len() { return Err(EbpfError::ExhaustedTextSegment(self.pc)); } - self.emit_validate_and_profile_instruction_count(false, Some(self.pc + 1)); + self.emit_validate_and_profile_instruction_count(Some(self.pc + 1)); self.emit_ins(X86Instruction::load_immediate(REGISTER_SCRATCH, self.pc as i64)); // Save pc self.emit_set_exception_kind(EbpfError::ExecutionOverrun); self.emit_ins(X86Instruction::jump_immediate(self.relative_to_anchor(ANCHOR_THROW_EXCEPTION, 5))); @@ -941,49 +941,35 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { // Update `MACHINE_CODE_PER_INSTRUCTION_METER_CHECKPOINT` if you change the code generation here if let Some(pc) = pc { self.last_instruction_meter_validation_pc = pc; - // instruction_meter >= self.pc - self.emit_ins(X86Instruction::cmp_immediate(OperandSize::S64, REGISTER_INSTRUCTION_METER, pc as i64, None)); - } else { - // instruction_meter >= scratch_register - self.emit_ins(X86Instruction::cmp(OperandSize::S64, REGISTER_SCRATCH, REGISTER_INSTRUCTION_METER, None)); + self.emit_sanitized_load_immediate(REGISTER_SCRATCH, pc as i64); } + // If instruction_meter >= pc, throw ExceededMaxInstructions + self.emit_ins(X86Instruction::cmp(OperandSize::S64, REGISTER_SCRATCH, REGISTER_INSTRUCTION_METER, None)); self.emit_ins(X86Instruction::conditional_jump_immediate(0x86, self.relative_to_anchor(ANCHOR_THROW_EXCEEDED_MAX_INSTRUCTIONS, 6))); } #[inline] - fn emit_profile_instruction_count(&mut self, user_provided: bool, target_pc: Option) { + fn emit_profile_instruction_count(&mut self, target_pc: Option) { if !self.config.enable_instruction_meter { return; } match target_pc { Some(target_pc) => { - // instruction_meter += target_pc - (self.pc + 1); - let immediate = target_pc as i64 - self.pc as i64 - 1; - if user_provided { - self.emit_sanitized_alu(OperandSize::S64, 0x01, 0, REGISTER_INSTRUCTION_METER, immediate); - } else { - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x81, 0, REGISTER_INSTRUCTION_METER, immediate, None)); - } + self.emit_sanitized_alu(OperandSize::S64, 0x01, 0, REGISTER_INSTRUCTION_METER, target_pc as i64 - self.pc as i64 - 1); // instruction_meter += target_pc - (self.pc + 1); }, None => { - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x81, 5, REGISTER_INSTRUCTION_METER, self.pc as i64 + 1, None)); // instruction_meter -= self.pc + 1; self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x01, REGISTER_SCRATCH, REGISTER_INSTRUCTION_METER, 0, None)); // instruction_meter += target_pc; + self.emit_sanitized_alu(OperandSize::S64, 0x81, 5, REGISTER_INSTRUCTION_METER, self.pc as i64 + 1); // instruction_meter -= self.pc + 1; }, } } - #[inline] - fn emit_validate_and_profile_instruction_count(&mut self, user_provided: bool, target_pc: Option) { - self.emit_validate_instruction_count(Some(self.pc)); - self.emit_profile_instruction_count(user_provided, target_pc); - } - #[inline] fn emit_undo_profile_instruction_count(&mut self, target_pc: Value) { if self.config.enable_instruction_meter { match target_pc { Value::Constant64(target_pc, _) => { - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x81, 0, REGISTER_INSTRUCTION_METER, self.pc as i64 + 1 - target_pc, None)); // instruction_meter += (self.pc + 1) - target_pc; + self.emit_sanitized_alu(OperandSize::S64, 0x01, 0, REGISTER_INSTRUCTION_METER, self.pc as i64 + 1 - target_pc); // instruction_meter += (self.pc + 1) - target_pc; } Value::Register(target_pc) => { self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x29, target_pc, REGISTER_INSTRUCTION_METER, 0, None)); // instruction_meter -= guest_target_pc @@ -995,6 +981,12 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { } } + #[inline] + fn emit_validate_and_profile_instruction_count(&mut self, target_pc: Option) { + self.emit_validate_instruction_count(Some(self.pc)); + self.emit_profile_instruction_count(target_pc); + } + fn emit_rust_call(&mut self, target: Value, arguments: &[Argument], result_reg: Option) { let mut saved_registers = CALLER_SAVED_REGISTERS.to_vec(); if let Some(reg) = result_reg { @@ -1123,7 +1115,7 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { }, Value::Constant64(target_pc, user_provided) => { debug_assert!(user_provided); - self.emit_profile_instruction_count(user_provided, Some(target_pc as usize)); + self.emit_profile_instruction_count(Some(target_pc as usize)); if user_provided && self.should_sanitize_constant(target_pc) { self.emit_sanitized_load_immediate(REGISTER_SCRATCH, target_pc); } else { @@ -1149,7 +1141,7 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { #[inline] fn emit_syscall_dispatch(&mut self, function: BuiltinFunction) { - self.emit_validate_and_profile_instruction_count(false, Some(0)); + self.emit_validate_and_profile_instruction_count(Some(0)); self.emit_ins(X86Instruction::load_immediate(REGISTER_SCRATCH, function as usize as i64)); self.emit_ins(X86Instruction::call_immediate(self.relative_to_anchor(ANCHOR_EXTERNAL_FUNCTION_CALL, 5))); self.emit_undo_profile_instruction_count(Value::Constant64(0, false)); @@ -1228,7 +1220,7 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { #[inline] fn emit_conditional_branch_reg(&mut self, op: u8, bitwise: bool, first_operand: u8, second_operand: u8, target_pc: usize) { - self.emit_validate_and_profile_instruction_count(true, Some(target_pc)); + self.emit_validate_and_profile_instruction_count(Some(target_pc)); if bitwise { // Logical self.emit_ins(X86Instruction::test(OperandSize::S64, first_operand, second_operand, None)); } else { // Arithmetic @@ -1242,7 +1234,7 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { #[inline] fn emit_conditional_branch_imm(&mut self, op: u8, bitwise: bool, immediate: i64, second_operand: u8, target_pc: usize) { - self.emit_validate_and_profile_instruction_count(true, Some(target_pc)); + self.emit_validate_and_profile_instruction_count(Some(target_pc)); if self.should_sanitize_constant(immediate) { self.emit_sanitized_load_immediate(REGISTER_SCRATCH, immediate); if bitwise { // Logical @@ -1578,7 +1570,7 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { let number_of_instructions = self.result.pc_section.len(); self.emit_ins(X86Instruction::cmp_immediate(OperandSize::S64, REGISTER_SCRATCH, (number_of_instructions * INSN_SIZE) as i64, None)); // guest_target_address.cmp(number_of_instructions * INSN_SIZE) self.emit_ins(X86Instruction::conditional_jump_immediate(0x83, self.relative_to_anchor(ANCHOR_CALL_OUTSIDE_TEXT_SEGMENT, 6))); - // First half of self.emit_profile_instruction_count(false, None); + // First half of self.emit_profile_instruction_count(None); self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x2b, REGISTER_INSTRUCTION_METER, RSP, 0, Some(X86IndirectAccess::OffsetIndexShift(-8, RSP, 0)))); // instruction_meter -= guest_current_pc; self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x81, 5, REGISTER_INSTRUCTION_METER, 1, None)); // instruction_meter -= 1; // Load host target_address from self.result.pc_section @@ -1591,7 +1583,7 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { let shift_amount = INSN_SIZE.trailing_zeros(); debug_assert_eq!(INSN_SIZE, 1 << shift_amount); self.emit_ins(X86Instruction::alu(OperandSize::S64, 0xc1, 5, REGISTER_SCRATCH, shift_amount as i64, None)); // guest_target_pc /= INSN_SIZE; - // Second half of self.emit_profile_instruction_count(false, None); + // Second half of self.emit_profile_instruction_count(None); self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x01, REGISTER_SCRATCH, REGISTER_INSTRUCTION_METER, 0, None)); // instruction_meter += guest_target_pc; // Restore the clobbered REGISTER_MAP[0] self.emit_ins(X86Instruction::xchg(OperandSize::S64, REGISTER_MAP[0], RSP, Some(X86IndirectAccess::OffsetIndexShift(0, RSP, 0)))); // Swap REGISTER_MAP[0] and host_target_address @@ -1841,9 +1833,9 @@ mod tests { let instruction_meter_checkpoint_machine_code_length = instruction_meter_checkpoint_machine_code_length[0] - instruction_meter_checkpoint_machine_code_length[1]; - assert_eq!( - instruction_meter_checkpoint_machine_code_length, - MACHINE_CODE_PER_INSTRUCTION_METER_CHECKPOINT + assert!( + instruction_meter_checkpoint_machine_code_length + <= MACHINE_CODE_PER_INSTRUCTION_METER_CHECKPOINT ); for sbpf_version in [SBPFVersion::V0, SBPFVersion::V3] { From 75d5bc80c24db60e3d1e535be45adae9e82bac9d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Fri, 13 Dec 2024 18:38:06 +0100 Subject: [PATCH 02/18] Bump to v0.9.0 (#2) * Updates the README * Bump to v0.9.0 * Renames the crate. --- .github/workflows/main.yml | 2 +- Cargo.lock | 8 +++--- Cargo.toml | 6 ++-- README.md | 32 ++++++++++----------- benches/elf_loader.rs | 4 +-- benches/jit_compile.rs | 4 +-- benches/memory_mapping.rs | 4 +-- benches/vm_execution.rs | 8 +++--- cli/Cargo.lock | 14 ++++----- cli/Cargo.toml | 8 +++--- cli/src/main.rs | 2 +- examples/disassemble.rs | 4 +-- examples/to_json.rs | 4 +-- fuzz/Cargo.lock | 14 ++++----- fuzz/Cargo.toml | 6 ++-- fuzz/fuzz_targets/common.rs | 2 +- fuzz/fuzz_targets/dumb.rs | 2 +- fuzz/fuzz_targets/grammar_aware.rs | 2 +- fuzz/fuzz_targets/semantic_aware.rs | 2 +- fuzz/fuzz_targets/smart.rs | 2 +- fuzz/fuzz_targets/smart_jit_diff.rs | 2 +- fuzz/fuzz_targets/smarter_jit_diff.rs | 2 +- fuzz/fuzz_targets/verify_semantic_aware.rs | 2 +- misc/rbpf.ico | Bin 1150 -> 0 bytes misc/rbpf.png | Bin 29765 -> 0 bytes misc/rbpf_256.png | Bin 13059 -> 0 bytes src/assembler.rs | 2 +- src/ebpf.rs | 10 +++---- src/lib.rs | 6 +--- src/syscalls.rs | 2 +- src/vm.rs | 2 +- test_utils/Cargo.lock | 8 +++--- test_utils/Cargo.toml | 4 +-- test_utils/src/lib.rs | 8 +++--- tests/assembler.rs | 8 +++--- tests/disassembler.rs | 6 ++-- tests/elfs/elfs.sh | 2 +- tests/execution.rs | 6 ++-- tests/exercise_instructions.rs | 4 +-- tests/verifier.rs | 4 +-- 40 files changed, 101 insertions(+), 107 deletions(-) delete mode 100644 misc/rbpf.ico delete mode 100644 misc/rbpf.png delete mode 100644 misc/rbpf_256.png diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index adf5fecd..68562d26 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -1,4 +1,4 @@ -name: rbpf +name: sbpf on: push: diff --git a/Cargo.lock b/Cargo.lock index 8b9c8a77..b6adf25e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -346,8 +346,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] -name = "solana_rbpf" -version = "0.8.2" +name = "solana-sbpf" +version = "0.9.0" dependencies = [ "arbitrary", "byteorder 1.4.3", @@ -385,10 +385,10 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "test_utils" -version = "0.8.2" +version = "0.9.0" dependencies = [ "libc", - "solana_rbpf", + "solana-sbpf", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index f5537db5..cd0d1c84 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,9 +1,9 @@ [package] -name = "solana_rbpf" -version = "0.8.2" +name = "solana-sbpf" +version = "0.9.0" description = "Virtual machine and JIT compiler for eBPF programs" authors = ["Solana Maintainers "] -repository = "https://github.com/solana-labs/rbpf" +repository = "https://github.com/anza-xyz/sbpf" homepage = "https://solana.com/" keywords = ["BPF", "eBPF", "interpreter", "JIT", "filtering"] license = "Apache-2.0" diff --git a/README.md b/README.md index f1d9e63a..0b5ee51a 100644 --- a/README.md +++ b/README.md @@ -1,15 +1,13 @@ -# solana_rbpf +# solana-sbpf -![](misc/rbpf_256.png) +SBPF virtual machine -Rust (user-space) virtual machine for eBPF - -[![Build Status](https://github.com/solana-labs/rbpf/actions/workflows/main.yml/badge.svg)](https://github.com/solana-labs/rbpf/actions/workflows/main.yml) -[![Crates.io](https://img.shields.io/crates/v/solana_rbpf.svg)](https://crates.io/crates/solana_rbpf) +[![Build Status](https://github.com/anza-xyz/sbpf/actions/workflows/main.yml/badge.svg)](https://github.com/anza-xyz/sbpf/actions/workflows/main.yml) +[![Crates.io](https://img.shields.io/crates/v/solana-sbpf.svg)](https://crates.io/crates/solana-sbpf) ## Description -This is a fork of [RBPF](https://github.com/qmonnet/rbpf) by Quentin Monnet. +This is a fork of [RBPF](https://github.com/solana-labs/rbpf) which in turn is a fork of [RBPF](https://github.com/qmonnet/rbpf) by Quentin Monnet. This crate contains a virtual machine for eBPF program execution. BPF, as in _Berkeley Packet Filter_, is an assembly-like language initially developed for @@ -26,13 +24,13 @@ although the JIT-compiler does not work with Windows at this time. ## Link to the crate -This crate is available from [crates.io](https://crates.io/crates/solana_rbpf), +This crate is available from [crates.io](https://crates.io/crates/solana-sbpf), so it should work out of the box by adding it as a dependency in your `Cargo.toml` file: ```toml [dependencies] -solana_rbpf = "0.8.2" +solana-sbpf = "0.9.0" ``` You can also use the development version from this GitHub repository. This @@ -40,7 +38,7 @@ should be as simple as putting this inside your `Cargo.toml`: ```toml [dependencies] -solana_rbpf = { git = "https://github.com/solana-labs/rbpf", branch = "main" } +solana-sbpf = { git = "https://github.com/anza-xyz/sbpf", branch = "main" } ``` Of course, if you prefer, you can clone it locally, possibly hack the crate, @@ -48,26 +46,26 @@ and then indicate the path of your local version in `Cargo.toml`: ```toml [dependencies] -solana_rbpf = { path = "path/to/solana_rbpf" } +solana-sbpf = { path = "path/to/sbpf" } ``` Then indicate in your source code that you want to use the crate: ```rust,ignore -extern crate solana_rbpf; +extern crate solana_sbpf; ``` ## API The API is pretty well documented inside the source code. You should also be able to access [an online version of the documentation from -here](https://docs.rs/solana_rbpf/), automatically generated from the -[crates.io](https://crates.io/crates/solana_rbpf) +here](https://docs.rs/solana-sbpf/), automatically generated from the +[crates.io](https://crates.io/crates/solana-sbpf) version (may not be up-to-date with master branch). [Examples](examples), [unit tests](tests) and [performance benchmarks](benches) should also prove helpful. -Here are the steps to follow to run an eBPF program with rbpf: +Here are the steps to follow to run an SBPF: 1. Create the config and a loader built-in program, add some functions. 2. Create an executable, either from the bytecode or an ELF. @@ -81,7 +79,7 @@ Here are the steps to follow to run an eBPF program with rbpf: ## Developer ### Dependencies -- rustc version 1.72 or higher +- rustc version 1.83 or higher ### Build and test instructions - To build run `cargo build` @@ -90,7 +88,7 @@ Here are the steps to follow to run an eBPF program with rbpf: ## License Following the effort of the Rust language project itself in order to ease -integration with other projects, the rbpf crate is distributed under the terms +integration with other projects, the sbpf crate is distributed under the terms of both the MIT license and the Apache License (Version 2.0). See [LICENSE-APACHE](LICENSE-APACHE) and [LICENSE-MIT](LICENSE-MIT) for details. diff --git a/benches/elf_loader.rs b/benches/elf_loader.rs index 656c794a..d64c0745 100644 --- a/benches/elf_loader.rs +++ b/benches/elf_loader.rs @@ -6,11 +6,11 @@ #![feature(test)] -extern crate solana_rbpf; +extern crate solana_sbpf; extern crate test; extern crate test_utils; -use solana_rbpf::{ +use solana_sbpf::{ elf::Executable, program::{BuiltinFunction, BuiltinProgram, FunctionRegistry}, syscalls, diff --git a/benches/jit_compile.rs b/benches/jit_compile.rs index cbc7a072..6c7f7b5b 100644 --- a/benches/jit_compile.rs +++ b/benches/jit_compile.rs @@ -6,10 +6,10 @@ #![feature(test)] -extern crate solana_rbpf; +extern crate solana_sbpf; extern crate test; -use solana_rbpf::{ +use solana_sbpf::{ elf::Executable, program::BuiltinProgram, verifier::RequisiteVerifier, vm::TestContextObject, }; use std::{fs::File, io::Read, sync::Arc}; diff --git a/benches/memory_mapping.rs b/benches/memory_mapping.rs index 91e77dee..a2074a96 100644 --- a/benches/memory_mapping.rs +++ b/benches/memory_mapping.rs @@ -7,11 +7,11 @@ #![feature(test)] extern crate rand; -extern crate solana_rbpf; +extern crate solana_sbpf; extern crate test; use rand::{rngs::SmallRng, Rng, SeedableRng}; -use solana_rbpf::{ +use solana_sbpf::{ memory_region::{ AccessType, AlignedMemoryMapping, MemoryRegion, MemoryState, UnalignedMemoryMapping, }, diff --git a/benches/vm_execution.rs b/benches/vm_execution.rs index d39b47c4..45c02690 100644 --- a/benches/vm_execution.rs +++ b/benches/vm_execution.rs @@ -6,17 +6,17 @@ #![feature(test)] -extern crate solana_rbpf; +extern crate solana_sbpf; extern crate test; #[cfg(all(feature = "jit", not(target_os = "windows"), target_arch = "x86_64"))] -use solana_rbpf::{ +use solana_sbpf::{ ebpf, memory_region::MemoryRegion, program::{FunctionRegistry, SBPFVersion}, vm::Config, }; -use solana_rbpf::{ +use solana_sbpf::{ elf::Executable, program::BuiltinProgram, verifier::RequisiteVerifier, vm::TestContextObject, }; use std::{fs::File, io::Read, sync::Arc}; @@ -83,7 +83,7 @@ fn bench_jit_vs_interpreter( instruction_meter: u64, mem: &mut [u8], ) { - let mut executable = solana_rbpf::assembler::assemble::( + let mut executable = solana_sbpf::assembler::assemble::( assembly, Arc::new(BuiltinProgram::new_loader( config, diff --git a/cli/Cargo.lock b/cli/Cargo.lock index 0caf8e1a..c1d74f53 100644 --- a/cli/Cargo.lock +++ b/cli/Cargo.lock @@ -301,11 +301,11 @@ dependencies = [ ] [[package]] -name = "rbpf_cli" -version = "0.8.2" +name = "sbpf_cli" +version = "0.9.0" dependencies = [ "clap", - "solana_rbpf", + "solana-sbpf", "test_utils", ] @@ -322,8 +322,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04c565b551bafbef4157586fa379538366e4385d42082f255bfd96e4fe8519da" [[package]] -name = "solana_rbpf" -version = "0.8.2" +name = "solana-sbpf" +version = "0.9.0" dependencies = [ "byteorder", "combine", @@ -366,10 +366,10 @@ dependencies = [ [[package]] name = "test_utils" -version = "0.8.2" +version = "0.9.0" dependencies = [ "libc", - "solana_rbpf", + "solana-sbpf", ] [[package]] diff --git a/cli/Cargo.toml b/cli/Cargo.toml index 6aa44a7d..1146356c 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -1,14 +1,14 @@ [package] -name = "rbpf_cli" -version = "0.8.2" +name = "sbpf_cli" +version = "0.9.0" description = "CLI to test and analyze eBPF programs" authors = ["Solana Maintainers "] -repository = "https://github.com/solana-labs/rbpf" +repository = "https://github.com/anza-xyz/sbpf" homepage = "https://solana.com/" keywords = ["BPF", "eBPF", "interpreter", "JIT"] edition = "2018" [dependencies] -solana_rbpf = { path = "../", features = ["debugger"] } +solana-sbpf = { path = "../", features = ["debugger"] } test_utils = { path = "../test_utils/" } clap = "3.0.0-beta.2" diff --git a/cli/src/main.rs b/cli/src/main.rs index cded744a..5c3d344b 100644 --- a/cli/src/main.rs +++ b/cli/src/main.rs @@ -1,5 +1,5 @@ use clap::{crate_version, App, Arg}; -use solana_rbpf::{ +use solana_sbpf::{ aligned_memory::AlignedMemory, assembler::assemble, ebpf, diff --git a/examples/disassemble.rs b/examples/disassemble.rs index d668cbaf..50b45fec 100644 --- a/examples/disassemble.rs +++ b/examples/disassemble.rs @@ -4,8 +4,8 @@ // the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. -extern crate solana_rbpf; -use solana_rbpf::{ +extern crate solana_sbpf; +use solana_sbpf::{ elf::Executable, program::{BuiltinProgram, FunctionRegistry, SBPFVersion}, static_analysis::Analysis, diff --git a/examples/to_json.rs b/examples/to_json.rs index cb6b1e2e..068c5edf 100644 --- a/examples/to_json.rs +++ b/examples/to_json.rs @@ -10,8 +10,8 @@ extern crate json; extern crate elf; use std::path::PathBuf; -extern crate solana_rbpf; -use solana_rbpf::{ +extern crate solana_sbpf; +use solana_sbpf::{ elf::Executable, program::{BuiltinProgram, FunctionRegistry, SBPFVersion}, static_analysis::Analysis, diff --git a/fuzz/Cargo.lock b/fuzz/Cargo.lock index 287092d4..0116e06f 100644 --- a/fuzz/Cargo.lock +++ b/fuzz/Cargo.lock @@ -315,8 +315,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04c565b551bafbef4157586fa379538366e4385d42082f255bfd96e4fe8519da" [[package]] -name = "solana_rbpf" -version = "0.8.2" +name = "solana-sbpf" +version = "0.9.0" dependencies = [ "arbitrary", "byteorder", @@ -332,14 +332,14 @@ dependencies = [ ] [[package]] -name = "solana_rbpf-fuzz" -version = "0.8.2" +name = "solana-sbpf-fuzz" +version = "0.9.0" dependencies = [ "arbitrary", "libfuzzer-sys", "num-traits", "rayon", - "solana_rbpf", + "solana-sbpf", "test_utils", ] @@ -356,10 +356,10 @@ dependencies = [ [[package]] name = "test_utils" -version = "0.8.2" +version = "0.9.0" dependencies = [ "libc", - "solana_rbpf", + "solana-sbpf", ] [[package]] diff --git a/fuzz/Cargo.toml b/fuzz/Cargo.toml index c16abfa7..03a1b71f 100644 --- a/fuzz/Cargo.toml +++ b/fuzz/Cargo.toml @@ -1,6 +1,6 @@ [package] -name = "solana_rbpf-fuzz" -version = "0.8.2" +name = "solana-sbpf-fuzz" +version = "0.9.0" authors = ["Automatically generated"] publish = false edition = "2018" @@ -15,7 +15,7 @@ num-traits = "0.2" rayon = "1.5" test_utils = { path = "../test_utils/" } -[dependencies.solana_rbpf] +[dependencies.solana-sbpf] path = ".." features = ["fuzzer-not-safe-for-production"] diff --git a/fuzz/fuzz_targets/common.rs b/fuzz/fuzz_targets/common.rs index 17e8d0c7..0caa9bef 100644 --- a/fuzz/fuzz_targets/common.rs +++ b/fuzz/fuzz_targets/common.rs @@ -2,7 +2,7 @@ use std::mem::size_of; use arbitrary::{Arbitrary, Unstructured}; -use solana_rbpf::vm::Config; +use solana_sbpf::vm::Config; #[derive(Debug)] pub struct ConfigTemplate { diff --git a/fuzz/fuzz_targets/dumb.rs b/fuzz/fuzz_targets/dumb.rs index ee624f5b..d0c8fc0c 100644 --- a/fuzz/fuzz_targets/dumb.rs +++ b/fuzz/fuzz_targets/dumb.rs @@ -4,7 +4,7 @@ use std::hint::black_box; use libfuzzer_sys::fuzz_target; -use solana_rbpf::{ +use solana_sbpf::{ ebpf, elf::Executable, memory_region::MemoryRegion, diff --git a/fuzz/fuzz_targets/grammar_aware.rs b/fuzz/fuzz_targets/grammar_aware.rs index b5b19f2e..69e557fa 100644 --- a/fuzz/fuzz_targets/grammar_aware.rs +++ b/fuzz/fuzz_targets/grammar_aware.rs @@ -1,6 +1,6 @@ #![allow(dead_code)] -use solana_rbpf::insn_builder::{Arch, BpfCode, Cond, Endian, Instruction, MemSize, Source}; +use solana_sbpf::insn_builder::{Arch, BpfCode, Cond, Endian, Instruction, MemSize, Source}; #[derive(arbitrary::Arbitrary, Debug, Eq, PartialEq)] pub enum FuzzedOp { diff --git a/fuzz/fuzz_targets/semantic_aware.rs b/fuzz/fuzz_targets/semantic_aware.rs index 6e1a066f..28bd752a 100644 --- a/fuzz/fuzz_targets/semantic_aware.rs +++ b/fuzz/fuzz_targets/semantic_aware.rs @@ -3,7 +3,7 @@ use std::num::NonZeroI32; -use solana_rbpf::insn_builder::{Arch, BpfCode, Cond, Endian, Instruction, MemSize, Move, Source}; +use solana_sbpf::insn_builder::{Arch, BpfCode, Cond, Endian, Instruction, MemSize, Move, Source}; #[derive(arbitrary::Arbitrary, Debug, Eq, PartialEq, Copy, Clone)] pub struct Register(u8); diff --git a/fuzz/fuzz_targets/smart.rs b/fuzz/fuzz_targets/smart.rs index 9232b0d3..e003ceff 100644 --- a/fuzz/fuzz_targets/smart.rs +++ b/fuzz/fuzz_targets/smart.rs @@ -5,7 +5,7 @@ use std::hint::black_box; use libfuzzer_sys::fuzz_target; use grammar_aware::*; -use solana_rbpf::{ +use solana_sbpf::{ ebpf, elf::Executable, insn_builder::{Arch, IntoBytes}, diff --git a/fuzz/fuzz_targets/smart_jit_diff.rs b/fuzz/fuzz_targets/smart_jit_diff.rs index b77ab691..0bfb637f 100644 --- a/fuzz/fuzz_targets/smart_jit_diff.rs +++ b/fuzz/fuzz_targets/smart_jit_diff.rs @@ -3,7 +3,7 @@ use libfuzzer_sys::fuzz_target; use grammar_aware::*; -use solana_rbpf::{ +use solana_sbpf::{ ebpf, elf::Executable, insn_builder::{Arch, Instruction, IntoBytes}, diff --git a/fuzz/fuzz_targets/smarter_jit_diff.rs b/fuzz/fuzz_targets/smarter_jit_diff.rs index ebfa6e04..e1774b4f 100644 --- a/fuzz/fuzz_targets/smarter_jit_diff.rs +++ b/fuzz/fuzz_targets/smarter_jit_diff.rs @@ -3,7 +3,7 @@ use libfuzzer_sys::fuzz_target; use semantic_aware::*; -use solana_rbpf::{ +use solana_sbpf::{ ebpf, elf::Executable, insn_builder::IntoBytes, diff --git a/fuzz/fuzz_targets/verify_semantic_aware.rs b/fuzz/fuzz_targets/verify_semantic_aware.rs index 68c7d4b1..68e6fecb 100644 --- a/fuzz/fuzz_targets/verify_semantic_aware.rs +++ b/fuzz/fuzz_targets/verify_semantic_aware.rs @@ -3,7 +3,7 @@ use libfuzzer_sys::fuzz_target; use semantic_aware::*; -use solana_rbpf::{ +use solana_sbpf::{ insn_builder::IntoBytes, program::{BuiltinFunction, FunctionRegistry, SBPFVersion}, verifier::{RequisiteVerifier, Verifier}, diff --git a/misc/rbpf.ico b/misc/rbpf.ico deleted file mode 100644 index 6bb94a1c3e0bdeb109a02dba7cb51b05abd2fb7c..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1150 zcmeIuu`feW6vy#X70;yYVr%+DlU6NV`~_n02Ur?|upkBon~;z;5rf4b5eyQe0RuxD zWvMh_>p+@Vni%^ZwBH{t*JiIapPqZ)Ip_A=XBOcbj~lqW8!vE(F zZy=Q#P2vuZnDJZK8%|Nf4F;7bev}dW@QRI0W_*@-fuuWw^)kv~n9dW&aNu%^>~lEA zGa4wN;A&W};uejs`XcKDG}9G!&_o@olWl0$dmN#KOO$bjLp)&;FQ`C0d(gSM&n7cR zi2K-r=2FcF&Y^i9uz;#J?>hTw=s%TT!H}2Nop+&rou@mjdHD(U@6g=WI6(kcqW&1% qtV^t;-$@qir=UJb!%z48y3;Z9wcBQIbC~~mH3NZ}1hL<>>gxh+sB6an diff --git a/misc/rbpf.png b/misc/rbpf.png deleted file mode 100644 index b3f07c4bd6e709af8033c371de2817d7da67b25e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 29765 zcmY(qbzGEDw>CU z-(#lNdZv?Q$Ff6ZL7xeR)Ah+l&y@uGt3CXF8|lY$YnmHJV!AB5E|X>rtc zhMh{kNpwz}tncuH5v8-YMR?Un4M-!)NMErWDN*N;RS^6n)H`+>{j7N~KoA`O(}h)m zl&W~DX7T#)@8ao$`hw~@OPSx&E_SN_@cq||Ylks0tT_~sf?ug6hWsP>M;JhOdv;f! zB(N^-jwbXa&fISQvDkZpiXDv27w+wU@VJYHI9Zfn+q6%)E?AWz@tc&CA^lnh{^g5g zrEm=LyC$9TaxX2@n*DLH(~ikn zMVSkz;}{>dPq+VQ;IBEIZTl=nsJ6V8;j1`hH*k72V>~X5aACeN1>dq-gO08@rT~=? zMmZ0Uy%Hv?Qr{i97>Fr9LA0XJa#;RBbtO;p6DA8HWw6i0*IiX}xq1HG)ImN#*#=87gleZ_8-;f+|mt@e9;p8oO}bT_c>JyUv5BFcH` zx($oeeRh{Yc`T5}e#Kk|?T0=8$?J9X;I(DdQ)lB;%zt@k{1vMe)s zOG|&SLUO$XfA;s^{3OWs#HmiA$VakRMvV%6Xz;UhN-&oiXJR)%4qX?sid`0|SI+s%}1C@5rg7w1Esvd|Sq$DC9PauyQos@JKcdb-)tKlHP3&9UUMH@ry2lv*%1%X)jBhn=r;BeK~IzHL*I3~nnq_15fW zQ1avjh+jC13=E=MKEv2v1ajhE-uK=iY>)(7kjNfzv7h-7huJQsdg3H;jL^nv)rV|& zr=}_I!ts6J#qeU+*TGeIRiHxnyU&`=sWPo4=yn>S#H4&fn%LIBI=Ar+1g-bhZJIVT ziiBy9@Z{LZzeHz~#qOBAcAs#8YIXB|ZYX@9ysd$teY&T8k+qpE%k1&PA-vDWp{*8+ zS29FmBy*tf9ek5Lr>fF%vf<1AlxuFc5}~Rkcq{#y$sOh;5WI!03_=N9nR$*6%aH<>=S+v~t)?)X|+Ekd@4Tik^f7`?yj)`Luyx>0H;5Lzb_VYJpDmULq!XmtqlEMR=9`SD$EMGV%tWqHUT8ndk zAjx8S5SYV0vshsb%3$9Hm`*OlpU%s%_rm7M|9$feWWjrV^$3Ef77H~ZvOPsn+W8QN zOU=28@HH#lO%?4372m~+3#=1sPz0+k&hesxSUUZL_l%*^GT9WiYuyHY771yv{D8XY zVb#AZ)AP#m8<+{>&kV}S9q$BE-jOYEsU`O8V2oCkT`u3Hq*4J+Na|AfSe(jYBSnef zc#jXk?f#S9c)_3H4n@iAmXE*G^O##Pj$igoNy*M%BtvB2UVNwba8A&ZXhg^}d3o#n zn!HJZMt+FlDDgvg;GCx0!~1UmqiXiwQb+GTj($HItW+X6_#5w%=AMG?oV6|=)$@e( z<5@g+jVVp};-UOO&79dGq;**VW5GxJ>iG24kbu+_9@VtbBe!LSHD+G+Dg~$HCwcf` zlvb;l)lAJbJiBgzmhBRFK7P0W`+=nN>c2;JeFEhj*N0B|A_juIq=K_yao^1 zPCnzgZ(aIt&&_M%HAhgYYsZI385h65n;B47CcNQvkGGg28Q3pG$aq?RFDg`6ER^Le z92JP&!r+YrenxjcN`Y`do>%K?$N0$gYWcb|%ni(9-H$-0zsN#Z&WIQO9KCX1#q1dS zGxMD8o2tjgGyZ4krcx<7c%DN%oN!w_=is{cF+Oet&eGpOA=xr=VruW*nFoqkGW2u_ z5>K4;`35U*_gsKce<)wNFJN|_{?AZ9NOo1!V`-I@6_-?N#r7u2J7$RHD0Ey?Uj^Ws z$#^tZqg%K@8d6$PCm?@@L@q9iZ>LJ@m@EC?=FxL~^TPd_>$U_g8n53Lbf8ko@@06dORwR)wHFKXmHdyhYGKCZ7-S7u*X-i#`*CF^pRM z!R&xV1YL>!K%8;D`|@56>U1>E0~3Hrb-Y-u*RuYdv+@FZ0>#xx3nQmDeke;4vIrCK z0?`@_&QCq}FfjIc(Gwy!XL~UuTw55{ASYfHCL~-aWe`?HzYtYUwziAj2i< z&A!ua>0Jsi*j57=JrNk4Z~5~l4nNE;5|X)rN=LWcN3nQ}M9v$_Fc&=0kuyB0mpt@N}MpA?O7?+|;uDz$1@?3qLMQHz^g!Ibx>1X`C?<>@QOu}PO>ZlUZ z-XGNhcb)(aD47-6x0k?k#Y`L~S515NgzI^Dl}vEukiQ&k7jM2KU_bg+@5Nt&dl+PU zR)L5~(rJr>w&1d&vgF&ICBAC+A8o%uA&MBceRPW>d>gI+s}x$b%vmBU5j#2hewvC% zdm*T{&~e4h;2=|-Ak%8x4R_Hfzkw%CpfBGO()r$|jJUCn&1kM8)$3S<#RKY+erVDa zw7a#6u(Ftq|m^0(kl1BFzmtq{de4EDj-mQ*P;*X_yHM2G$jBf)`2pcR8@`aySNr z?*~km0xP*Hl84t3SEHk@9R@(P0`l1EdZ+u!XZ;_E#7bJ>(BQf#RwH#=kXR+|+h!|v zD~hUORu}ew%uOIp`_`{@%&IGdc3XXJYes~P>Kc@@g}KVMKi?v>db09qQVYAaoU{2_ z0q20L(>1KJbgTPJt1Gg+ggT_{v9kTg?uC%Ku*duH=P7QE(pL5-GS@Qs?{9c@rkyh0 z=Gnp@C$iy0WgB(vbBWXxLnUA<4EWW^9F6NITX$)@%5_EFM3d|}t>I~WYOZIS>J;Kp zTn%Mi_?5OKgw=vsnDEXpLrP&l4E;@6VqG)_q4M z)`-OQ^V07FIcYMR4KCF_&4S%(f1~@=nSg}gkhsNxc~2JVXO2hI9gN+Qv=dd9efsB{jPb`PSyVsXl|03iIQd$^ z#IZJN2+l#CTKv0hx?$t3vSdqg62Z?plq=QWb4#+Q*KI`tDQ>ypjLozzMwF+W!FA)t z3R0Xh zxRmRM=F9GfnSifS+$(w%*dn(!kJO4*yR?rv>s`PEwN2~8zN0!{T?JMSF&21-K37~& z{WGMAmT}nncdMGpi?AUkvufWo+K=gA_}y>mzWg>8vmvBHg}~D+tra{w>jQ;f4#=q! zgp0pJQTh>*oOPE-}FMp!Ig1w#2k8uzMsk_a)?ajSF& z`NB{ZLCf*Nj9SUrpooU3x5&LUDxmJO(X#@(e?W35{q*m~pw@4JF$i(%IS5meRZ#23 z?LVK8MvXj(9yUwVmMEd(4kph~H|EU|0CfsXJhXtMqN^-A*}rS?l>Vd>hvE>P+_5>|z$B$7wCP zrNQW#@|a0KjVSJ4JE{93=Bl9FbB1h%&hF4GvvMvRQA!Ma<%E3|knCo3`sWQF(JA#H z|EY}69WnfPX7KakjpMe{c%l%ggrTZJuMLn5>f@4=B95(z81{$CVlADN_fgK|k{{

Oz5%vp^Sd zDT#Yej2*H>nPfk*2vR-|W;}#S-(wp>H;SpI08nZ}_T(nm!^V~#l}5nT)daIB+P=6} zvRT)J*b?RukmFFrJ-UlYJ*?km2dP zNsGODEiFVb1ae#F76xw!E^E*(c8L`(da&6LTv5(}=9V$90<*MpW^Xl%tObWMd11E{ zj9Z01^rxnbeC^jHyV=G(WktV(UA&UAfZ5cysMk0iJB#aQe4D=}^zik5?q4MyI=Rjk zl->lO3t*Me(x1e@1HHa}ig@hQ@lWZRI8BY?&lHC?W4@VaeU1gHE?b+(Ruj~7M1mIy#BuWx z6Y*RL4|Av)Syl$b|1nU!ur@Ub!c;c1PD$FPDUCn6&^r`G+uUT$uGeFC7XXii_idiD z6_lNmRL98#*Yjid9 zB0WBpGc0zG`Mkui(C-%ze#So)9Nd1o^qPjmrhHUocfDMCn2#sS<58CeCHfnRwJp2?D3-V~ z6exr9Au^m(_4zKg1h{Z@b z-n@J*s%py&%HMbLA9Im-uMK(LIPAWQ@iaP)C7-Q{XG|dhz>bUZh`Zl!^B%reFCbpV zi&m8rrZ)I3M^e2Z$-Il!!onaO$rb~U8V!XoiX(29JjmK1?%d31rb zl3w16R^!`ZoF=cd9&f+Kda|0oKvab`xw;laqkTC^PoSa(^wN zJGXqHpLo8T0tE&f5Ws7y5kQPky-Rl9?7c#P$f{Fb?n#O9;k}ci%3f_raEPVaVz@}8 z9S0bn~8Mb)mhPIzO08mZ;gb zN8pviJh?#1Fe;NZYwZG_X6#!K`NHOZCzcI(HU&Hs)@1aWN3c;ZXCNqO<+0>W^xr0! zIJ$#JRTRK*2RGG0CEs#kRz4nkc>#m$efp_;-Fy;+XHEo%7fjF-ePd)PP>rY(+$`fW z**WB+6`7Sq#C$ki0O4Laxxt$R39D9y>Q@m5fn_qS%H#YSLaWX2XV_TkzKMVKXDHmB-PN z|5*qFU}46r=f*k$J`8Ij@iXcZ zOP?*N`VVoKZ9tv#-=kbdQoZXY*zg^j;i#b7!fW_V=&0|*C_%EQr@CK^Qi0Xg3o~(b z=RleQ7zh;N86`s=nt6UVAOUz)_MY_N6(z?uK4?;R|8^C3+Ji*tdfyACwFul0I2#fy&cgrZEUkv6Jr=1DTG zWM@Y2Nj~u;tKa6bal&1jThMXI##K{-DFg|s_y%(`Ax$+F?@QBevy+AJ7XE4xNQh3d zk02Qo^8W7CHNu$^Iu|;E{0A~)n-WvJ$CTS+SpR(muApsoK<{NHOUE_vM-11;atNV3 z3)*4>Q0?jFhr4`(q2k^}__QtM^Vx(KeJ6g?!jo!=&O0W$H0;GW*&Jm30=x@$5JEEYoVO*t!2#_%hI=@LVHTGbF+P8 zbE-g0{XhoPn2ca@zGdgty3DTz^5ZqY zVW0`1KVr^o@!3|rpby43ZU%uZ0<`2!z=9(#%VaOHLb$I0>H9*c;bW*^61pWHpC);m z5v5ed>elOtN+tR+l#8vfT7E+!q4$sma4TVN&nXZ?e-Nq?^-cEo{OS0zY9fuVgdw@k zD| z%Q?rlb}hD4D7%;9UIeeA@q=p6)Bw`@ z^Mb~aTOUbkQp(6c>!SwsAF9xkoZk8J%`jQqbGsINX^cku^w)^xR)(y{Cv{X#bl5QI z5zQ9O!bBG$Mak!?@EV0ZJ@}yqO7Ep=n~-c^h@|&U6zZBu2i-{?@?g6MyG4%sAfyxf zvlVqhzyBiHFNCf-IAm%d!oa}JlxMLOt}+l4kmAaoM>=Qzl#XUkJx2OMU|PmdC0edi z&WjHmt7F3xhF9IH+UD{S%q6#0iOSe7YtfXaW+$y!#axwc49Gu^*Uq^Bs9_m~wNDH= z>AO$O}BO8E1L1@|2N$7RQ+iz}Z+e zHQvLWIJ$aE9NBV))=|^%Vn4j;m)eh`k^ew zQtV5ZK^^b#yC4lUgHE$=JI|S`tz&Uy%Jmt^Nx44CjEA3>bD9EC#Re#)XDc8Jbc||9 zRvbK{&0If0VDNd{^IV(7CyG@1v{oN(8MPO5`2n$B zGBpqdfu4909p4ZD1*GRw!b-|h#QbKx>b&|V zxL0PO5Fgvfda+=oey-M*yZsU_PjMps2pB&t0+6P$`i5X^md__VAaS9)ot< zYsCf3gu!tvYlA6J+0kvSW5O)E8-cVM&6?(0s4F^o_K(DD6uJFDlhb41I|?2c7eM-a zv~Akl;P$pkG`qo*3=`nx#>q)|i?3$T!|Dc^i!7N|NK(^_f|mhQ&0l)A&jO(nZh7oP zV*{e$mOM7~6y1i3YW|=L{3W+@wAJ>^li~U{7NU@D8i$w?3L7%3_mETWy)ssQ1t5rq z<7zEDuuLBMl?mxutPyHtVOCWnt+&IkD$p%;WkU~xfH`{zZT1TeR#MNYPn{V(u`%6m z;twZ~78)sw4GpO5UAM-+1lmwk#;v6QA)AP9`3J@; zqch;MLNa01VDfWx3xEqtKuLI(V+&2kGoRc4;DwR9Z(%|nAD*dmjO5_UXG2Z_Ad%uq z%u^T*P#Ub??ql6O2RnrUyIGI{?^lw^btI9fkF7V^8t*qz4dEe!>!3k(fkcCAZob<7 zgkJ1>z$tMaV^<}|69@J?R%v|U+kGk^+m)ICs}W6u_!D_iB#)gpt(Bk6Ml!Td|JF?& zl0BYqMqy%nOa%r-3!T8kbo)0u9l(?|^C46LIw5QgnB(|{J4i;ibid~SD9+;#c|WXhxBWarGe29YM=^805DRfOmNxb&dzGAH6<_v)7y}E{SXo_ z>gs^RP6f-Jd6OK{OJFx@@YnWp*~8u3fzC{?qR@;f3V2-&M+^1Sh~VP;>T|{cyxf40 ze@?K-$Mm)zo(pSbSj%{3aG8mD!Ns^Oyy#dI&9f&3_4AH6++$9je9vsa6WH!MDM1|s{IGtr)HsC$OYBr!JmWCEa)-fY z2Ge{)=(&)WhWwXSO-f^1T8{%-L0+|=cmS*#bbGCC6SA*Hnv%ZSV8F##;G}%@kSW3i zG(a#4%rNx&&FeoJ+f;=CJv z!MZnFXmsf@w1ILYigg-|hIF6yC<-0X3aWAfX3-xe+sN4;-9hfbQ@k$QPD5!B#7%J`5Dv> z29E2DMt`54A`L{$d*>DI7Al?f+OL%><&yR(jfNehn!ehi?5`+yH@pgOzEWkzo-M%E z9~2c>S0XhRo&M&g0_$=^<-^%v$k;}sqMYd65{(Tyd^4ba(+AHx8wNh~_IzR&j$35C zIyvyLIdKlC`?{S008T3R#{s&X8jL^G)==mcpqYNjT((q(Dh_;TTi}QW$oIbowD)TC zUizc;X?De#))TMW?~xC@4ZZ5DUsAaRR$Yoi)uGZ*5!kI=BItnQ?vQBv>!|j>8xp=* zB|AV^cJT+#m+;yE_>;N8=T(Mg4x7TS83lv1Z$4t*KDL0pcY+ZOsrljojeP{O=n#NT z2&{WDMz8knpP9OLXlYZ51YhjPN5ltu3NoZ@>#^PSbL`H>1}KA&xzO*4+b4hv(CiP7 zHZ9%0JN*tOXhh|Ei{JtLH;|U`*Pglihb1x79aNRB#IPRQvIt{Loqr=pMRziQ~eXs8Y!etdsC^ z-jY)`dJFjFVmwhsfgf8dkTl886&WlJk6JmSQ|J4c)U!v zRFsYG+(Z(9>4@788_RZ`YyiR#{ed^WIY~1^X(sJ?bNPqoArx89ezkc_C~u3zF#LGv z`l+9L5IU^RCN@bg98Z4W>VN>I5>qiUMdm0N%&CaDlld`jP}IQmSqN z;VYOFFf;+ot!yX-7g1AF{_&%g?XQq+D}u-MK!b6X=RSV3^lD{p&qgy)qtmZpwQ_=G zymx+}TaxO7ozsES6{BC#eo~#7n7B_k8M1salNZFk$Oo9J5h%N6shguIYgXLQ%jgzr zW3_Ifa^6nc#W_!7(gJ;gHKyiyYxWB(x!7?q$`{pvDuuvziLAIzCF?%}S#c!H7dzoI zFSpw+L-~g=1NX1{!^6_~X)X!F^YX*iyE`uj_!tIY6f)4zPJZ`5O98E9^a7h8v0>Qv9RMGZW&k|8r9p zknzZ9u?%RwR9P%*_jqDzI=P5=nli9l5$sUZulbTC^7wrB9cxw+2hb~AJwoyP)#Z!v zp~0VJJbHW>VCT9xCl0&YHvRK4aY+L#7gjaW+`Mn@t$Xj!X`~W6+jZ3n{AG;K|C3C` z(`V-7K~6idTPx%vSgz*gByf#$yGR^ z^y|ltr|1@s_Gw+=(ckoE9%SE|gBRx}+ot7+!b{Uq zM)CmixVArQ{PIs?;?9nUI1N;OA^+NDkIvBNCIsVbS5(7yC|xc zlaAaE?52|Y`n5F^UwTV$G59apUyBDM&9r8n{XB}bPu`PUEFh@L9cjMCs{d$V3A@OV zd@QgCwbfYHw6up=k8k+gWZifEZPP$GX@Y1#_7PYtjm^kc@RZ69^>oyw%FNz5Df`+= zfB)5hIK!HlX3mBC+m{s}_jd;e{4nGaTRVKjf&=pPjeCfo=L=3Dj4|mz|Hw|lRPD&< z8z3abubdB{TQ~qB5r~tGc003Bnj04elLy{3z*U(XfUDvP9Dm7QE4nEz2=|hzNVdzRhfE_ji%2hz^11aMsMH!a%}{U zCV_B!{5(Rp=)yaxxEJyCqG=d>`;ZtwjTcmB4A+I*_C%?Hvzd{yK90Bt1|njFUoWCh zU8xa4cTZAu+o#KGc9(@0lSn=h03w0_n|#RWEV_kXLZAWKD`A$gXY%?+U=^Zv z>VNfh%(WhayU~%V8MRwFIj!W);EXjeii=@Q?%H$IF~*XR`p^5)S-L zEKeU^NH2hvxZ}I40u0U5tQ(;=dRses@7-O%F+Tqq|J}}8Z8gRnsx;obCt>JNq}lJB zV}y-Q%XPtf5vuq(wKS{s1&efQ!?UB){|f8M_xVOL-;qUSiWqkXvvG}DwfuNZEp$Jy z8S*JN>xFyH6sqG6P%4Qsm{`1QIeXL&8JGK?flc?1WV0ut*F>>vcubFbDsEq>@|r(b z@ns`O5*XZ8c+|P0@TJvw+WvzJWsFZSUXoK5_k<8Cq6vT=uaAz7bYlNGGmf|J0hu_E zi$$3JpzNGwCWId>{`V`DlA9Oj#1F4+Gh8gX*JfOc+`DGN1M%zm@J^U*GYO?`)tG-$(phT12;{+n2)3QCGOoNQWbD4%| zB+zr0bs^&|c5w(RX0i>JpIJ<(G%vezX+9TyaA!d(yEJ|pk!=0o74yMehAyR8_PF23 z6`R%l=lj}B;cf|?qVwim0zXyAhOJD>)uYnwpin?Z`=Y{=4QQyg4wzJI)Nsw`&<5kk*qScw3_IWK6w6nh_ya6+~9~2GLa{?oYgmK2g1%(vH~$ zbOb8&YY+c&Mo+|_@BaB?)>cxomR2()V$zqtDP<_Iwl(2;iOG-oBl=nz%(WE6S;gZP zctzS4nK5ZJ;2q=B6i^rZ?5I)!HgIv&2ULPPpevHByjS#-s;Nq4Cwyprw^_Q0PQCTR zk_Yr_Cx=ciGrt4yK_%N79bA`3cDJ&g6?}vvAM;Dwd;~nFc_n2S`!hLRW?swdO=RhI z{bkX8E{G|=L0X5e+Lm1L--#C}A_93M{TLc!eZE1C9EjumE zX`XT(Nk8TE*7*zedbpQ~ZkYuFcK_2`^atl@Gn!gCvx?q3e#-2ZqxRNH(odH^WH23| z#0^U8{1)ADR7@)4z%kJiR_wi?V<$#(i-DYg$yqOc`(Q4RItuq~n-*`MR%)AOhn^J3 zv`!#Hna@NPvW}hXFyG<<+IE*-y87SOcpMj1NZU_U04=`gSU<>zOx|}LD^1MH%abDY zywoWBmkn_F5{4U|MUw3|V3KZ1MaAucmS!UP!j$bKI@IMJv)22;S`-V4BF4uVAaQPA ztj)yu$fO6}79uYRth0YoX?^SzPU!bRR!`I#5X~mV_%PU8S7bp>d2zO=_cenf_pH=o z|BR9ZWrxb@?uHK`WuV`Iuh)eipI5WZkFEjzX?1OILV5=60cuUMzL9?o|FrbBYu6zE zRmQPoR}Tz|KLqGQS$;=yAU{y%aj#?>L;Yy>mITUXIY_HqcKO`f9!ls@n`Z%}Ts=Dn zM>iQ8eF?4Ayy4iBUjv#cDTb9PBLERWGVG_S;Sz3_pe3~VpQ(`PWO*6*q@rG15k5F0IYoz2jcQL z+K(goS_>lf)w0-tsrc`&9BJVQ$er%rwSA}IM|L5W(xz< z2YX0UOMs2yEoP)Zws*c`I>5SHgrts=S!@fhLh-1dX&K4Gb8cV!vik{5s|~1mv)lGd zz_G5nE9w!Lyic9Y{Z~!(XSa8N{kBQnevvS2aD-k}_ANYp`8PQ}9ljO{^kw*#?Thp9 zMf9hpL-4_Gx(ZS9RF)p#)o?(NDtlO*b?XAnRFkwtTtN%C7ND~{F#lhj<+FlaK*RCY zp}PNJ9X4QzS3yC+lECtKvzvn;`Xne8@RMC$UPrHNY>Iv;ce+e4Q?qnfklX>S9e>vs zoYZ2cTd#hrPS-Zi>X3fUA*vYl%EOA$hjgJek2sGoFXno1&*`1~A3$t!&F7^-oN_OR z*8eL#%gomFIzCkL{=)IY)0G*lKear~ygh(yPg`UKw(IFp-ppUP;0O^# zDq!Mv7*)Z&-A|2cTtG_!VBI1{kD!i`jI1*q8>;q6=lf;YSIhsE;*?9WQ-*HS}ELhEXvi*qg5t#6EJ{kN4%oBVLU@BA%@ zPk%8yF_3R!9k{~Z+S+;=z5PX|!d?^*FsA=_9~++#9mTKz`Qy@G3%l~XWMvSDx2U%Jc7jOmS=#V!1xy17jtCAt@6M@eaL9cnkbf6N zsJ#huU%~7=suzv%u>^(x_*lu0{YiZGPhVr9{+0XTDS9a4W!?$HniK6r8uNP%(iEC2 z9kbxSdY!?VFgY^h;*8hAT;cMK%!Ny= zbgfV|xZ3=Z52XZu&_3-E5wiB>A*(QG9_Yun9Ig%JkByG{ymCh#<2a!vnTLOe(r>ngH}jbV?>I?EYepxn5&{BSqbUorQ9CKF@-{6ekIRj@Ps+yp5u6B zFX!~zrs)Wx&n$1BF&!x5#6uDP+Ob;=iu2o`4w8`k#U5Up+q;`I2-6wyPo;6=OQB-# zN61=|Xmy|OL&%C0Vs?pfhx?UJTjWKbczrtMRvo@=h;j;%Ul{AjQ#`jxt#0u(WYpNC zk$CJ!FdI;33848l^U#%dQDR#=Hks8>SLY3X?N2xLp04A6gY_mKLd-&>k`8VBPn zQP{EY!2d|ub)Og2$0jRfUrw3<64O5zW|OS7l>K#YrY^mcKH{Liw~U(-KCOsB5{+#^aDIK5oYl?O4kRFF)ZOA7>ff2fPOUSmX zD?kNtBfEIExA9gvv6;23G?3p|^`Ueo%M;O($0n!)ULI=Yeki>Q=Sudzm7m=OAVNq|MLY^ZtC%?nPxHvCXol^~9qFD24A zKN~!&-A4e1F8L!V$xDN$g!iTF2(MX{!-I~b*FsJ5Aq}L$YW|B?+ss?sov>Q1&3zMS zV*?<($xM;>*0K_;%yvOpUH@OzvBi^qE=sFksNjbKNRDxF{U_=*9nUXAVvjz)!xH%& zIE9Z$Cbx*b2Cac?wY<+MPy_~6SQ>eWslUDG7C==i!PaxDefsTvaY1}RDf4k165!I& z=|sy_6Sz&P%+@9FmqrQ_6pxFMdEa`Aa)>v(&O zgqypK8*8-5gkSO3B>N5k}%YZ4dkltkc z>~YPP4g!&5)xRd8&*^BYEke{pM(!8)>_UNHM$5y^7~@YmIF06szL9ftdO5^CX@&ob z<5{%Fme48ZaBA%9;cv~Jbc}ZW^Dw0K7>!T&4MT1v5xYO}t9=>bnTe2fI~UU|cT~JcP_uH*1urbWsMF$v=JR%$-Us z!qdItEW*C*;g(wc{=5G0PBl#frz<9OU9{3@@4nXnVk>TT5?DB1k_y$2d6L7% zwYk`e>d!fldq7Rv4Oe@n7{b1Y$=jIxc*uNk2)BpTjSH|zlwID947gBdmy2A>c?*!6 z|8XOSBFQ<;mqN!7T35-v8lmVl5JjPx?QN2TK&-n>=PtW`z_`s&$%<~bG_jZJ705o0GJ`^YL0lWD^f6r-t_jHzbNd^5!D4sd#Ix%MP zyyl9{szAldAhAcD;jB$2*i7^btw$Jd*Zp}e%)T?tfzz$y@7*@Ku0@njUKahtS1gE{ zsP)))MnHBeJ&UZS1?O+!lYG~F+1iL5e#E;niQU3wFdi5i^^T)44 zL0y(utJy0E?$v?ZuVq)e-HvTQTMX3WH?iSLU3-?|{GmVky4PIqeG;94yO>lz|5`v@ z_qZ>yXeSk!AR%=x9(HSla^0GGE6r3xC&yBf{G-}mvJa=Ea;x(vP85Gb*UWHZIk9Kq zu)=opObbi!?Df+wt?2n5>w-zAP9-M;-CkW~$if$LJ9ndl-u{}sAwBwmOakC>KXvUw zp^=~;n2q$E-CC|3^XYNK8rU z>1@h)Ar|$@`cd?DFg)ALJBG$=KGDr@==<~de_@Mrw+c16w7)t^`OvRX?>m|H$e;2J z>Q<#CheIl(lP+_rE9~#_I`TeJ4)dBQJ~}8`8XbdFdk36PD8OwRJMq4)RuvKl zx+BHS_PcDrU9(k}qAe|mRx7}^C@XU4o9^X0Yg45I!YD@3dLy_{!|<^;G((Ddiy*MR z1>Hin|5DK-{|}Q+u59``yWONZ7lEjR?wvJ&B%-;5wvcqQCLce3Md@X}IAk6ar(8O+)is@Cv?&^ z$P%~>uu}BuZnauPcfoziv$;n!4N{-P)MG6sKS;Kwq@H~Ar~Ib+T?IXa*O(a%Ew4EZ zv6JhPs0&(pVacBMaoZ*X(qsbkn;+`&7@fhYU721qcxdHZkly=<<{vOlULEoEENSn!z^+D06ZE}+M6QDn7)YmxxZ=nCrvZBNFbjuwD81*5B9YPF0 zeuokQ2Ap`LuD#uNGmg>;^KZ?UVPG!*vMSrP{1%{AE)+hfRoq)*T-S}>iRu?v#!CeB zm)wp)^F>!BvSB-?>6n~DfO8gx*1@YAO*vTb7 zM8EJzy*22Nx%!?gHXhw0_z2uq z1L+O!3+|GH(a7shc~uMS1=?+sj|Ni$wFc-?>~{!(C8rlqeGkIgqiBn9Jh(6b z&M(fTB1~H}mhZujR`{TPui8rf-z5NWljrN6gHXBv<^cMDYC6HUy(H7K{nuvRF!5t` z9B>I#Vzm(4rVRPHJ^`nz)!+JGEkJxaBr!)Lx+N!)4PMtaz3n=2?V&%JCdR(Ai4wzZ z?MSHqFkiMm@mRrl#%<$-?GlJtg#o-`?`k0W+jIil_HW(&DRUm)MES~NLp9oiCXi** zql+wE$oeKd)Xz4~ACUn62WyA%yQXiwA&y~%S+-A4>|@nI#3F^@DvtoFvj<)TUTHGH z3MFCn;$G&1r+@F@f`-2RH>v?1@US-IQU;Tn(eY9BR&V&Ix`f5lT4xkfn@)$(=F?BW zH*%U4_pt8zAVx{54xN%3`-^J)a=cE9UppY2mII*~M=g`U1%;QCo==$ZM+;H!rBTAE z4LhcSc!H||;eCO1qkk_ufaVEX9MGB_&Iv(jzT^jo-<|gBRic<R$0Z})^0!nYHNm)6gGvNMF;8V%hECBxYw>@(CZ4(RuIaUoFY zCcS?ZyyU-r`!we_!IOgO%tOO$0lqXIP=z5}!W8X_06q7$B=~6Tk=y=PSmzPl&S^m1 zOgZ(YRY7J5ugmlFvuVQH<9sVY_+GB_QqK2(&NziC zZa&!iwBc!I2|XzZTiJiM!Bz8Ek_1XSL2+Hr`A^E*lXCCgv0Qpt=Jgup=Dx{|(iZ$` zO+{UM08n0eHxYMGROpz=1^VX*aqc|urOuVL(K9#3eN}``D%5-g^djUJJ zU^Yvr{_?8*%pAE`CCV0ktIa$+`gReN`jao%h7C`sZro7X zA5}ABm|@622ntY|>0UHQ7H3gff)|q_^-N;${fsEyjk`l9%uD=yZ+)|-FA%?lKVNbD z3JGYOU{5yF*Mi%po&KAj=nBC07(gISc248>SGI`8Qoth4zkRyE*X5ZTK~j@87oF4^ z7lJlSf1>%fHQttW)a40?>(EJm>|@nUax+W{v<6zzOvF-#uHUMeh52I6PKs^P7XQzU zifzoN@q53!$@*f%O_whHKqUnEqw${<)G#^+TD`6O%|P6>z@jsr&b7gUlra@9a2o`rTiqJ zs6=(8A3=nr^V4 zc5lNo9A{lAc-3`0-)@R9$vt?)^r9Ovhql}me<0pK*iesmWci*3)ZK8nGF8{-X|L(_ z(!ajh^~aPxDBi?emgVi!J2+`;cp4oHhJcbq3?`q=%Z0)FnEHu95TY9N3~ zO)CRLIUnGuw0u}+s5R9*yP~Z~zagrynB*N7Vk{;Xm*e~WwDWyb37s57s&cJ6zJn^$ z-^EuT4pwwl+9x@Uf;Qtf=xB@kEbvu`XvJ%QP!ZHEM}-M zRmPU=v7_38$6>w<>T?(XD&tKsR^%_1iw2pkGWkIa=sF5l^MkFa?J6Mu>;HDGeKQsE z$!NO&$faEX}s$)JRkc}_Ay^iUWxe#%=AN_c%*5Uk{a7FfKpy} zW($Iplyz$-KQGzCJdLyXd$Moi9l9a2bdnA{GjLY4Uj!;r32JWXU4VWyF(&xHk+*%9Cr4axd2ECX05e5<_HQW-ZTxS*e!Ds017EPDjQxUxq zdGGJe`qCX2UT^lD7v$}iTvGhoMu5VK&Uf456V@SMf7hx2zWN;u@$!-A%tjv7u`Su( zvz}=^{6hpyqGmje1xO-!FE4n8jftpO4=^X%Ohvm! zV*5Hj57N=aa(chDv2^Hi5(K?R+T27=WIB7=fMM%66V-_)*s$WvblvRVpc=W~y*E8@ zoiT@P7jBq5yMpLNMS9+qh%4JnF!@m=C*j7M-L^cBohRY7n)owBXOsXRTY<%l9Jw%% z&JmjuFTZC;JpJ%`Zi3(Z{=Sm;H7Qm-Qugz9(kaWNuLURGZ+?-g0GzNQ8LJ46y~51I zE@Hz!3-dM%nSK&I$}K~DrD$2O-&UKn@mIoq)gPg=4Rf0sSYL->w$}lzLa!PTe#|kq zY-R}6pg)8Fn~U7re12*Po#Z^!N_89k#m}{C+B0tEStl;fNLlv5cO!icMI65|mA{8cKqn@LC_M4&rgV3o zUPyfq2ZY)nyO~Tuz4HmnXT5RRh2u&0qBDZy7--l#(^-IL`4d5LC5iT6ApmO3N4YhIjk`G})BgXs19!PZtCg*0c`U#(5;4bcj9 zU5yy{0%`wPN`VPLevuoR;>9u6y;98VwXgdX#Mr52jQ0^IN$Q$&ZGDs zDJ{fIxW(jdTP>;7Gg^-MLiANE|KKfFDoU74(p^3N;+qra5WWX0J3~ASSR;qo8eTY% zX`%jLqamM*Cx7Gu>+(M=JQ+>F`TA4Ru`tq)aMT_Rtj?zQbe6G!;I0p?l{3J6x<&mC z5%6Xl4~(_n#2ohSuz$lV$!J*ghXs1{)&Q{rhiT^w9n!yQ&O`Vf41K|^&uGL4LQs}{ zQv-X^a(z(<92v-Q>QHuRKWgkN%A4==X3pqarD@jVC$%~T^HX2VHtGvfKLMgw6_CF% zb}a&s@=DL@)V(}P8ss!uer98Y(N);_c^VPgfc1G=9eYGrXee{izCQ_X(Z;Q}ByS4~ z3k6LdB-6l8Z#xxr`3qS&6p0ajwQ?(ZoODK#_uHO7y8HKl9JllUvJQiut3Mgc}o#VzCC=5IbK9(24gGwAvj zw!P;f%rJ9;;aL1sd3Pj<-iRL#J_4oDBD&p=lqxvMsaS^Bhw&P#tE<5!g@p^Ppa~Je z$;k-|^7n7~H8?0;5~F>#hvpKL2q9At!{8UrY~aXFD5-yBC;B}(TGCmPI^hq{Gp0Gq z87^1TvERa>Vb||pIx_A`9>S&(*P*|Ru)zBW*-;^m=2o1nRH7MuR{oSOelwm08A;0R$$lUb(@(zV!%_^DR5Of! zYO!TGO<7YTWE^ryJysEEyhjc@jG_N~acyV+y6f{kHz9$3Te1zysIw#54rM#aGqr`e zJLx|cfH>C2S-@WPDqY0S#p;!8o=VccmOa5AvdO6?4%8yj>HXF^J`5>pyzULH4v$s3 z+k?b>cqW(%)Uu`%-c;&^AvVBCL%cVL_2QYrl%=i*XmDtJQsGbJnE(0GTo>b zi}DS%H61BM-Hb0`{aKiPQXi^0YES05M(+4+bzX`_kqK}aXEs0t?5l`BPWie&H&;k! zM?$@7+7(xGK@i>^)7Ma~PRBiT#=gt;LWf0cW;h|9W~E7FamDHr?QW`HA>T?esm{nV zDQaU5t(S>eOuO*Vkki`g5uF;iTdmF7dDymT#5d|4eLjc2!JjW;5099KY^A!_v#w0^ zKkIH()2BMzVG$?Ffsq66HAcphap7Z_iP;cdPJHYK^#;jM3F6Pzhg3UQ<*rLT zG<8e0i$b-@z39lKA9Z?`MtIgtqhri9ySfzSF@HjgGA{_F7y@-w%n z`R2yNPrF-J|J&oG1}SymiZ6NN=afR0*hKMuALW<{$)tqhC*ll-u$C zs>m(mlMASCu(6s&@Az(0b`1RNncE>&chMJGcRgr#0Z{IHM%&KV8n(D>AiC!!y=hP1{`>r9 z!^mW0D{+=`mJIVl?_^Q_73#|2$RGkQ;Tc2?o!xCw{n(u>S=jui8ONp=s&_Js4f4K~ zb}A?NbJ@enMHnhjm>LH;1!F3hx*C+byyJy5Hj?nw8LN+MLR%I43O&qTHPgK{J@`>e zfsaJWp=PBS`XJTM3ZhQH1$b_)#4nr>_U%yAb=X1ei=p}}LRRmcaj(aV&b%4$MrUy{w1}@7zt*Y&&NVO;1!g(7JwwDDl6tllA zMs-&fnX}}~fqI+i3Px`*VRK&ot+a*_7k%%p5>^0q7>d|mq(Ha7nnu1WMwfR<&yBjx zo%X>+W;Wg^9UX9xN8G8}fD&;L%VW&AHqBs=yza&TM3BE{AS?=sDExG$`m@y*4mx>( zkZ++VLxYI{N?z*8$-LC-oydR;DO2k2XuUOFef}jrSm3jh+1Guo6TL?*)`G$Zhj-`C zpRp}b@kr|QiopUCqgz(H@Rx?sRaqO~CpDnn&!Na+w5lslS&q6zJd3*mM`WrfT6!(SV#MqP|%Z6Xc`q)@7nFpnhlw4xQ##JP|&2F2> zFYvm>eLwcm zs#Wt$;v@HP2oKFRt45Zuz^AY{v)=kj8z*e!USIp#ZqkisjnoT|+PG-L+s>BiSqaRz+t|nU)%m^@v|~_=)%cvflG`--)US z?jsiq%ND-;6HuCGywiR3o}y9KoUv9X4I#i{cmWyvio}=;ycpW!#8Ox%9!t@-En!?p z9{3aPlti0;Oii#7xNC1u!d#N_M;!eWXf(m(g%bf$@8`j>HEhHwH=DdjZqr*|2A0OC zYv-?$>c@ojLlf((Nz(*w6<-yD5#i4smeM^4`dl7a&bPuv_)akG4{@rWE`Ga_+ivAm zReg35nV%$y@(0Z@d=&r6Q$3~qRpfQ^MX)q1yZ7*e7$n0DUXG?W7cXAoH!nI{O{%rL z_72S;IMV9(tZ-zBT8$C~qk-mtf9_lRB~0a>OEUqDVCKKyAYw4rv`Y)zxx%U#M7?um zXO7V%UpsQCfc)&r7zX(_+irnfq)3q=Sk`x|In|Hfl1l5hHx0lS+T9Z-rOKa!JBv|cyUVCl2Z%v1b3(W|K7=_BA=Yy zOy2;BH&>f>Tncto=tG{j;f*$4+Y}<6;!+HUJ=5@%lt{UwwBc8Z!y55usg zseTN5{$`RbI|N-6cp2%XR0)RwGnbYd$PRF@1*^9W0__H$%Hwm9P6n(zi(A2z)5k5| zn~IGys*TwLEA{^u-mem)^>m=zxwSYI8q$!tro3^BH!$jTt;S+0)oYwi(Vv=0+;^e9 zLg?Vd-mh$6Eh%lvu@H2(S+c0vCPu+c)9NeVBQNk`*{i%J|2OOpQ13;oJkGt*GA1B8 zV!lP;;D;SZ_3NArp`%ehd7Q4`*0J}T_+?B!21Dw3D-9@A{NL~7@GLjCj%%Yw(7)*S z_?+q>UeB}dRbWP?`S|G~Z!erz~>1QdgcOQu1HZdonTSQ3NNYv+(Z&4?!%>_Yq9NBT!bbz$v;r&Z!P0})ggElIjA_0;Ril76Z0K;zWHe#cFT=)D|zCVxifx}-k?Zj z-kR{Zx^0F|5z49nIaW&fdO`;n8!~ix5PoNQZ?7M6=ec*|O}K97J_nT=1@g>S4nxRv z6?f2Hf1~?>lwghryN{Mv{GP#!c24zsg}P7JR8(G0@PX!ohHvtZL9{a!wWH>rEx^(% z>br?f6KY3==0&tyEHnh!=v7&)`)(yMl7*PPjtzEQobq_-=c}XJU$KT^7LAh;@B52B z>u#h#*w01Kj7dMrXru!*|LzYa4Xc~=y0LJ%GWa4Of|j{uUwIe?q(G@x>O1w;3>UMx z_FUUj3l#SXj9Rk2L!Hu#H^USFrv!iaS>^zHUSjs7YWXX1`$peksg%Xqig@4pUxx5e zdZR%9#i^`b+f*j1v*st+Q8fYv|J?2?UjwtZ0+vIzY4prtnwxO1k>?3(z3#Zce zQrpW48Lz$LPYIwTx`CqwU z*vDTE_8~7ITNIwU@t+SrgIk4%@8E9rKf$9XqEi=)SrlB{|6;A!@~*1r$us?hZ!ZYz zc&K|u`4?YhmM9V(+djUjSu;TbC}7UCd;jYyXm~8(r;#<$-G{wPXSB>6C)u_myuiqZ zRWxdCY?Bn8?*10w?o=R$^xpm@HK@rg#$SvIDH$m7U;M0R_LFU}4ASk7bs3sgcsSYJ zXg1?f=BuI}s`iDX{tl4v5LdbNjvtWCMj`UTf6FLTJ&4cbeZ5{o!){p`$&Qhj5_t)> zlF9%Y{`7cOh1sD>cekD*z^1XU@_;p9AW3-=*{m0F)Sk~~A2sWpgvY!sO!cifDv7Nkum_sxhf@Bkb8S` zu6dBD(Kd^$_;vCMuoH-hd@WAjQSQs2fs7F8?AuW;koUCDZ$41)wq2Yud+$|{r-t*y zR3Xy1SYATOLxPu4lW(%kq-bSER?~V06`V1ad+{OtPKT|n?Rqpxj9C2#Zi4S*o+k0R z@>ltb!=3G1SB@r)?Lbo`WKbkK*KwijOKc=xXibpR8_kwX*(&J)-FK<0KM@R~=?2aR zJoCD7mnMBwzYU2LZq}MchI zE8w7&EE%BBhLG6)k8~w1g$T}d8BzG9N%y!ubuTmF#veBJYp&`7MmY)}Ked#*AGp;{ zjOy`5)%|cPngTYiga|2O=gnuHrQlBWT2V;|o?Ep;>%w0!E%L4;$)@e7LwZeJG^?x(tt+d-wQk%~7#@lOgs)sfE5up_MSiFb5y-<&zt3Z?Q zQg7-nH#u^M{)iYQZgS$3!2UuSgslp~;d|sNxBrw$Sib~pYheDp49EN+dxXpqPz!z` ziY18b>3tUa*L{d=OYTYNQn8i&G;Jmqmp``k?{2!C`YRVYZz^v%G)zZZl;FDgr=@0* zq7Xc98Yy%(wEPn%CTdOs`L#=59SLpvqX}YCf>U*1#g!B`n{~tU1BU( zQ_K%f!s+Sk7MRYSl2*&?B<@rTCw$X$)^OSYKU-Yh9LB>R!x-?554(cy&&S~VE^0rb zzP62*0m;zq_ax&W3g^mvJ4MHeq70T=)B5(Db@=?SKwAbbe!{Bta-J1yj=c`6yP+-e z`%m)#-Z!oO9MW($1-7xcL$PN}d~y>GMN^RP_>~kV%TNBLpZA&ZR_F)TW>hI9;_|J~ zeC%6)Tnt~C{A3opqT7NE65ReGzIE=J9)tdc=77mteG#@<^#ef6WFZ(X`dl5xlVQnZ zS3FXOTfkB2qI}L0ru>7ITU$}_3(GY6dT54;&KOsf;P|1#M&Nk9r9%0&{Xnitl2MF) z0k`a>O69A<9~I77jsEBaB|5|#(ZAJIMetONPSSiMXeAUS$x!t`>7F8Af&n*w@zEox z^PTZ7_msA&jmcZUiH~S3iDQF$$x`{$cU(kYn$*}-^yAeD**%mS`&<||8zaxFHSq+< z{+Zp~`QJ z2&9yvx?+Y531!8+R#VAAyIaufo$U`S>e@sE#0zU_`jF z@y(OJ){9ejnF{qj(tzYD;V#V29hWIxm+Q($r*|dZj@HXcM}si_C3@Y{xnu0(`ke<> zK%qW~ar*%$HsQyeXuT%fjlYA8MDg$#Dsg87kn89Ei+4h8R2!8Zv?9CCut3-lpIXD| zt9X7vk6exRR%P#Z2^IqPGn|XkP*bAX?H4Wo(kQW<9xjb;id16ZP?VypfQrvl!{9Hs z0H}joZflv)ySMvR;~A~YOdAJkCink->HGpc&6mk0ZWgbStotYOg;x;xQ-$( z{j#eA6c*^`_Ness9e(Nn`B%s3UgAUafAYvY_2L0`)|{`!x^{KgHT0=u!asMNV|tZB ztK*Dg4Ak~a-FXPFm{)ii)hfMGFz?t=i_#4u=4*>Hec!E}SfB^YLx|`fxWsh0QNx;txQ88G zAL-dTF7BGQakF{D-1^rqxi*^$t{W|{us1YYht?mlNhke^?Q#H(9dg~lfBt;UbQB7C z#A{_HJCKN0$L~klVll6U-W=bc=$Hv#NRkHYqV8ZnGfCc3q&xQY6PzfveE zV@yH3j)C?Uq23#dQ#xIu1HzDdae4Rf`O}pPSjr>sMH)sTTgQ9FOw~e$ist&PK8)4d zROEg=b@D2iwvvQg5PT=eC07SuVuqz)7tiU83}V62y&fCBFM?}l{&p^I*KWL*#NB@J zzOu3~k^%nFSisu2A#aPIf7fM2ubV>J`z9AXn;Xoh-T3%JF~>EkD*PinC_#M)YIIUA z#fSsyh1}ZOg3(FRa%iZq$n-!<5+j8>OX?p-^n+CQT725?CS03;%HlS^bLQ{LtD+hW zD(q^|L)Ks~0vC~t>r0jnt_f>F_*D?eKfh(U|F}e8Ix(hT*mE%KKy)aS?=0vg--Qe8 z-L01eoKH4I(E)SA!{0#uycynb7Mc3VajCZy%s%x7eODMSZnAk#R*-18h#I1uFguw! zYWI8`B}vxql{un#I36^cYyYjPekOEyPd?s5^+88zx~LTwDF@Mqj1H0VE{edI?!<FTi)PAUE`7U7*p+tO?a=%h(j%|rr|sLSp0U)y+L@BFg>)swZ? z(=4SehZy*q~mk@3*!%BVn}PDD1?)G%<(BSm?(;s{tDyz5peVrXA~pxp+AA;*%8P z@r&-pkG)&TEP{02y4#nNzMWKY{sBRzcx?n6DFyM{W$wb|0$Lgqi3{h6RKwV?R6m*Y z$tCAEMQuo$RGRL z2f8mRDY@m`JeW-I)TlnZ`Rp7n^3#)d4=9zVUdzD|GAVX7+6+`Ar9|HmjOz5)wj8VY zdOcj6s!a8Z)9!zwf+@LtslF)4Coy6hwrO1ME${s}IJ57WT;#&-1ilqdBb@}jbQ2kK zSzWvMBO1O=e2BbP=UN0l$KD`h@6V&403o?fjsi>>$0f*Q7X8G?7aNS?of zot+FOLxF=Zo>aP?^Nf_l4ahq9N3DqyQ6IVlz4u2}QZQlp*!(``k#OO05nPa}%wuL) zd8YjkIdA12o04Jy|9-%-gD=k01fZLdvp9bh^Vpz4mD*!+*|3pE_Lyssi|Tr7Jsc=e zTc-P39L#ZGC&iz(Qe?6aBXb8V(5_t;ZcXtk#&u4UC&G~;2ZV{Vo{#-N{NCOLNosTp zt#JScDiW#m=TKtDWrDVJw+S0rq92I=({TowXzl689i9ShwiXok6`BHZFZTpM^E7IJ z%Juf{0d-rwX)+EPSC$sOgTG$^d zQVQm7)j)i1vjw-O4)x!VoQ!SCy?a?+DyP}S`K=#4!x^6VszcOq@QRG`*~KAwfL(mc zk2L`8$rgK_7MLYcBJ?H`Jgzji_-9oU3u)0RgF7qQ_@HloB8BSQmNdh~zTMrLl+d$>1{h4R2#N9r4G4-G|*nLGkmMlZ^;-~Ey3-E%_QogqpjTJWKi(+in_%- zi>Kq_-%1hB^0R{Y!wk0bTSn_R!!@^51~9)t-^zrN(^H>yn34~UQr%@3lOUbfM!)A7 zv5GNqdIP;TvZ6do+^@g;X$CLw$R(o8Z&L=YT-vJlSQK_>mF|xT53lZb@KxU-gB|K5 z$p#X8L56Z!!rnnoz4H8CP}0!36F;aiIc{XGeil61^W~arDk?BY;ft(!>|kcsQ3R6U^WDinV8~?k6+5@+`%`oo=l&Dd8Ax3b#>i zc&aCO=M`tEs_NQv9j)+EmLq(t;^2z<oSW1)tTU#8Lp<* z1_`*E>UYW6knuI}^?HcHadpD>v`V&ylgT(Kw$3}e;*SabgE|b3N70Y-Er4yQ3>RRcNLsK}KQ-QiW&1$~&9L zvrwjnXTc2B>Y%xv>bK6&-g|CC3;{{%q06k9JEP!6#1Z~hKlZjH18mvfN`3Se*?IRs ze9*o}^ga0a%trmSFF`;zBQk(p;Vb}aF|%6CIX)Nz@&<5bLfy;Sd5~J25x9|?v zBkzSEgM+#N2xp2~5;Yyk5ATEzk_k4yoVbT7>9L*hP_NZ7>$m_EdY?Yve!-26tIt)W z+B&_~S+Ty@P}t$)xU}I#DeTkaiB!6TZq$sJ`M)Q0E@G_u{?BEkmYb|eWZh%EWWmzR zq13(a_QQ4+vCuggfFY37Yy6DtFo#cYUEL8L||9k1vYGH;b~F-%*jCYN~^w8`9WgGC!Wf9-sY=yxoG-m@L&(V9E~5>h~s6C)+(#5<9!6&}J`EJmxTbX7x#z@kI< zr%SN#RCHh=sE8`Mm_V|J}RVf}6EyCAvX7#;4}-hq%rs4ouHd zkeQ>$;?uJD{!{hl?VHyA=>a@+1#ba*GY0za4ekhkB0Q#}4MDX=ZpPQYX5O50aX7uj zya4Bpz5X(jmhZA#PeiTw-?fh=``A^soaZ;YphqtD`#9Vp*O3beeqvhs8`Kz*a}l!~ z^XV=YV}dVF(ni$s(?LlzEnTw)D6f9e(IKB0w~Vg?8)Y5Za7ciVDV{psQ@}a=i91E8 z$ye6mVi6NqVJ6fGAHcsP^iW1WNQ?xEACAr?v~P~uoALeF75+G5?ctXTn3>AkM*Hb& zXfxgw3P3LQV2)9`^*FXnf^;xYI>LByYC!{s7d&!#A^Qdt-Zw%DKJQh~w-dA}f{w65 z#9Q`;L1fuf^ke(kW&z(VAex~Tx~k?N?``RY+?DWE)MQ|2wX)M>$7TgES=okN{MT=_d5jNyCMdX{A4Kdgq zKM9a+@K+4jhI5R4L2hC(@&w5S*tRzC#~+tTeC44NzLJvlP3#J$sbxGUvQ;zpK-Y=^ zl1y8`0frpNX2TVEz{l5{1Pv3XWPH;2dJO;M2|cG*g(05WGG=D=AR+_O&e`QTJsRdf zQT6bMV#V!sUsiYDDT1@W{p9byB9H_BrNFBt35YU4&>VQ}0(|@v+j-j!3E~S$TxP@y z18k~-`iZVxNkY0y$Gr720w9HE(~y&n_IVO?=Z}jdiS(r=gR3QyZf{Nn7J}-Gd=Yon zaz&vVnNyf06umlZ4<1$hoorSHU{Ag+0KVpr?&1_@{&3~Dbu`FsTG(83AmPnDh6;e4 zXvUA}LAQ1da{cxRihs!yP3wAyB*Dw0Jo&8y3v{Az^)T7cgT5cMWK}N^^_?RkkYO`I}8Nbj|jC?i_Wb4JKbiqOa2Wu_?GnOO?NlrwM{8U{5=kei*&0B* z#z}Ku7M*AM$3>k}Yl4K9UCs#+#SYGFPkB94T`4a st8D`$KVW3IGc>Q3W&+hYjkAbcV}oUHeqsnX@Dop8+eE8U)8X0w09oS diff --git a/misc/rbpf_256.png b/misc/rbpf_256.png deleted file mode 100644 index b8ae83498d1be593b8b47bae090759dd80cec7be..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 13059 zcmV+eGyKenP)3qMjx-QD;5W6ot~Z{B_H-nZ{<_U*p&`Rr$RZ<#wYXXec5 z2Raz6G12cetTAH}?wx0iS!|8jHo6|%%UNSKw8jhy->F0AfB;X8imUkjoHgcDYs|q3 z@Ar;yTZigH!wUTNi@@vFn4_YF<3zB=Yyg}B^Z{-Ia3VO7=)D_&SAl!#=<9r-INS$# z8Th&W9}?;_fD3?|tuecWgu@!s2e=b>%o?*!ow(O10SHckm8>zVgx7UJd=*#&=m}hH zjoB*L;MW7a^zYTJG1mm|UjQ7c|88fE867P!z29JsS=}1bw~pElIpzBlxYq#u5HP?w z)|lfX0c;$Mg*HFHR={JxbHV0E|E?>ND#Yq1 zu-qYjzYM&s9Ne!Hn(JNUQ%URLCTq;)z+1uQ2iUgQI;=x_XxBzm)@&ef2XLDc`BT82 zZCv1$;VGML0`e*zlp<1JC~&P&3BZTem>e(w_!Tf1__h;isiQtM5m*ga!->2cyR}K| z|22T0q-?fDxzAD@=s5j}!?wUh!RBwU#*7DEaUx6VSnF6Fz{$X_z;)J`4}o7PGIlPr zy?cORPNcVT4dp;z-~emPmw-P3p8_WVhZWE<)FByq01p6@tua%qG2gew47SGf0M;+F z&C7tDRDbi|GTZo4AZDHqybnCDZP%eAbpY0wpU_sR7g6|fiOV1-vLI;xH+ZdbR>w6&3*flt@MJ4=1vJ z9djMa0W7FwyV2$kh{Ccu+A|T+#j$Js%iR1Zi{298cV>b1)55^m<_DNx zM_H8!KoLbtdnf(*kce1gejDf)2H_n?!e`%W=Yn3EaeUq3q%ZdW(h7bmV%;M>(fX>N zsT9-+Ks7}GYs^c)D7@zV5xlq8(ZC@YnwbVX4gB4S+^g-biZ>0eqt8qP9Pmx0^ZHW1?D-Crl9RU!0UQ=M4VVS|0g?tR8p1NVEB02KIC!n>&%0fX`wwfAo6>uuTD( z{|YB^mLfNPwo||!8H`SK{4`h74 zS=sFgPUPB{Y`+|SRQb09@KJF45GQhVigl59B0mPklik!e7kIah-V7o@5<;vo2LQLG zARLo{J)Fpg?e*Kv`hFl%?12v~Ds_r2b|U>M@$DL7_ml9ugyJ51Ay@ZRC-T3va}+*j zjoAr!(TU70XO68g+vDQ`)=csEQ%+=?`c}`V1F**Q2aW=kDBHdO*q_#Dyd*duILC=B zSy3A(AOdRucLAH1c;zI#bzvoy9b25p)!jhu?-^s)XB8osk=%X`aIO<+DwjC7hrkB7 z9RmCXuM95A**KBi;$yA{6qP0b4*A^S?MN|6qo8mg(X+t(d&t5sM<>+OEJq zGB(+jqs>+-PT2V80S=F2P5oeq7f91c)tk+F4_^qOE*afvk)QBF--&3Ez-udPUM(E^U)pQb4Q-p zbLU1AxX#eV#_>ZN8^4Tq^Ifk7$F={^#>UlV&z*a3G_HUcd_1(VaVFl4xDX5mdJJuB zTy1D$}B}ccEJo04MSxa9tN1<8^U%SQB0BoAUi-z~NnB9!~CpxINq& zvsVFbXDzb%INaRr34juTAL8A3yArHojoBkO9RNn?36ch`b|Np-a=rj%jb8!I>Vj+d zXstyM!lfDB-_ePD+FkQOG=z&pVK)j#PhAUKUZdmuMr@2>-GC2)zfm~z&@)ctq>#O= zMF;|>&ckQeEd)(9I=s!$5OwA5*1_i-Oam7?kw?3;TX4?;rjP;nL=da$|9N1qAbH0{ zsyyj|PvE&Xkk>_a8Swp#efTo4sD#d;C$Kd>4q?MG#<^~=5jMapj{qKXB0K1~;xX$0 zGpsRvOF0k=7JyYvSYbdt~_ZOTg3;xXt5> ziEJ2~`?RoTw2l?OyJ&lP;0k=E;(ox@c!#D4nt?xt1hP94fzS%@CE!e@sKbJ1mO7C> z;X<;;+=@3L%?HLfk&pELrN9p|_MyKMS(I`i_rPb9`9{WXJnlrcS7LV!-n#THjw^H*=N@ zJ*PR53sUxVFz{`BmY}RK6rUFY%}l)-KGtEA$W*9SuE~H6-g^_cGz$Fcj(!0R)|i#m zdE!ZY@XxRUTtctd#H^ve2Pq*~j-ZsQhbRHf_|)E6;R*dc5FZy1LEO!`u|VI}!DliO zYs?L`v^aD_tZ$9EONsSq1@QYx?Ef%z)-R%3d>K#va&S9WvjR2%@44^i7X9D zmMv9oC(apOWsPaE#{3RgrMqzf4a(}TTjH7P0!n=$Z7Z{Mh$BgGnw680_G10rE4Bgd zw#E$7#WRYwg)0leK{bfuhk(6`+`lMB+`ktSy*BD7Vx{mwJDLe=3tDT;T0v#$5wxvY z80Un)NBUTTuA>8RB5$bU#A!vIJIoqW=;ZwnA1}}zXioWD4{J;#@SlwBU+P5W=qa#3 z+4-;Oy)nQ%Ys}jz8uk>}-ib`9MIC@D#FQM?B^U{ouHn?k@%veS=xCM*u{xRcT)!DVhiO&l1+V4 zWY(!p$+QR{*VLchp+IvN0Ddub0T+^IAuBm2XMX<$(kVsYs|G2_X+F;5C*=TUJCUI&=3*Q^k!cc~4qW6!x?S1025Za_s_fY#1xwx8 zp&3{?GFxVZQt|(%120=+wy%YgQBT7LYs~lX-hag`0ZsTs8MOp+&}6SE+;|tH(04p* zjoDQb;dCDYfcJ(PlyY-*cIbh(*-x^@^v~$u-IC&&4e?5?T7lz%-_mLuPl9~P@pl$z z0d{gCZ>QwO(j<2o58RJ;D(Y4Q;P@025;{H~TzgW4VYL%kz)_y9lahc@mxftYK{3RzOY9js8;dJE%3oWNr>ANID+z$a-2TTi-ek6f~~Fwyyip}7P&tKxZWCb9>u*C3+{7k%-T-my>_y^A4ZF>)?xvIeqU>i z*%3Inz@vRhrw7UkcU7vmN;LOn&c7{!@83I-r)rI`S5gNMvBO`-hfgNKwpHpwE(P*) zYTBEcvF(*f_6SQ$?g!(w|6`MM3hiCkh5oK&``Yi56E;73`mbDt?^jH((9X@dL6LjE zRZmH}BL~o+13w42r3Au;SYuXM5uMaB;NT0BW*y(gA#wvGADE$?A;44Cmoyu0MKo4%no`WA|BO_Nz33kGIts2~1Co^FA}> z^Ltuj{*yo`mgq^6`IJbqy#^aUg2&2e{|al&3}C7?W-&0(8nZ3g(_=O2_xqQz2>Rl) z)V>A$HDqV4F~b1SXSZ@9lVY{+Cg2mHT;N19Mat>>7XUv>@%YXZH*uDeF{mXCfvP4n zH#hg8bVOqV@DFRuK2GGWG6?KSDrUSK)hh_*e|zy;qU)QI>@HoDgTM!t-3EL;hL>CP_i@&kYn;fd0pa+r z?x{~H?W<8;`CDtQpM?*M=?u;g3xO}H4|_;$1)sjWBOs4g0NxM$qEtYTX482Ocs{IL zV~zP2ZOd3kS$H;O`2-l}Ig!)#WOy~e(L1_8X9t93w4U5IskT4qggOQ9>%T@;4yKB5 zzZ2QLT8)n$F@m>d&Sdn65xeB``3H&+fVuc+zzf67NCoDyzwV045H|dS<`&DuMn< z$Hg1m27EiRQk2;IBWX>gbtAr7a^!~}uVH_SHRcXr`vOgUU)$O`wqS?j{q-N$@13dy zyPqziD~inB#nzbL0AulqLHFteD6OjB{aZ6U1$K)LF8I{D2M50mj8xM7y13xPX>&3_JX65cOiooa~&xdNxfD)`!Y zE86j=4F?e8R37c1K0N6}woN0yT4TOV+ZqJ3@IDSd!Y9|P1U!aMW^xCuiA@2nb0S9- z6GwvIS@J$q6Yh2*dsp)V)|ib+wj4DBTrM}Vfan#6ovblG z4TyOUYs~f5m@BL?gM)%2gIGQj)gaZEr4e-&rNphlXGfwC0f-dG)|jKMF%N|YQM(C% zDjbgP?8sHw>w*saWN+Xv!T&o3r}10Bzm;%~E~D*5z?YoJ@lNDuyt!&&8SV73#{60P z`6<5M9eB%5Bu#^AwK*IA0ZV;KPd##RtfJ0^Fz~=fNbq5*JlP0Arm< zf7QQCFLNxrSYx)pXE^#m^$|N!fpTvRF`4`QTEMImc~+6skzz*#@w^CMA)TGQ5X_Cr zjwyAYPIjvEA(tssDMDCwStuZt;HPNeAuR-~I{+d8E%N4-yL-_ob4tT?f+#TG@ zS8%9vgY^pO@)Bf$1r;Lzfp?nVJ<}#yV$ zE^tZ-w(Tq@@|oK88-PiH*{9ryxsUKwjc(9BZ-}=m|JI3oS8?zcT1%h3r!wp=db+P) z#y1zEPF9-A>=~dB@N2bY->58a65N504((e>q>kHk(;@rpyYH$;eEZu)H-A2#2gv1e zxssc5X;TyL{_lTiMC%udJo+;ya!HZ()*b>dQxC+=_5YiJ&2+uyVjRH#=~~;e(gYxj z&p@sSMa|)D%g2>yV>aGC{lDPdUU=8Yp}-W~OjUJr49r|>;hkM(IFY8!k9`vI>@Epl?4rky)klza4Sq1o}%JJJ}Ah?(5 zslNlQm5{D;B1c(c{zvJ;E;~DshpK_I-dOW_b>*8+U9w#zK`*bg@DfR>(lD;*qF3FRC?SispyD46`QC&e= zxD9W5-c)r`+W<42$hfo(GJTuiLdLnHLwrfUfM?|@;xUqnhpSK%)Y7! z>!H;AHQ-j zz)Q$pBQb)X<2@X&O{u6H=|nzK2c^AqZZ^|7y?qi6h3%Zkf&#uI{qV}<>wpO< zr>B{=4n+%{$bfbc0PV~VDX&mtL5*xwfU(L8f#1C$e8;Y@a3r3YN?Y@1BEUneSwn*cGN|dwi@peRge{0Skv&Ck``B)2<(ZkMqP+) zpNOvrlvWh{g|7XqO1T-pWlrRGDY%jWdfDY(lNY7Q#x(QW=r1jWa+3hTPpd(ZtlLgLFLif8g_|<@}?NsNp zM8)adi|vD0U4#3Tu)WT74;8<^2)~sQM~j`v;ZCHl6FD*Bl8U_>%X+lT ziQJ=HXqrgamZUY1M09c)6)yX$;vtD}jMZYJ(E>k8<;*6%H@%9B1T~OU5Q?^{xqpJd0>&xzJO0sxkCLXg8A-?_x3B6xj3(zy+KcQfOnn9{??e+ ziYTF;RM|DJ-?T~u1_R%ZPR1&YZ-C1EBYaaD)L@mYs|X3X|mYhUs28LLRK&Y_*(2lj@20ak{{`4%pk310`ZZig(*XB^zl?YT3^kb8OI$^(n0_ zKa{razNO#}C$fnXS-2w8XeHG>bRs9}iwn9tPpoF#4~G4V<+BSJo-Z9<8-2?D$$#8kYd zIo`g*d+t36EGS^7FRC?FFqvquvi~kv|nN0-qcg#xm zbQZ5UirZy{@Q{@NOu=i%pHRih%Xs_$K^X@;3vd3Kq;0*R8u3!!AFc~x2Ye}~@fi`- z1pEYVf1jWW=!;7654XlVLV5KERs=NT{nRgVA`c`G$^I&;6f^gI;6&DpWv`j5ZO>4G zodu?)T8PS9-YFE(Y`>UcksUVFQ^Xa1Lb6TsQzGWyqoius>f+;$G3j%L{1In zU`u=%vA?S#@j0@2+I0ok+KD`qw3plMzi*8>p5pZcIuk6yd)Y;K2KT~yg^j`o$LvgP z)j*7kJO|-D!Qw>VIK@#Cz8}U1$4t{x{|=2E=%J_P<~1@0S38m8f^iSVn-9mTuHbt$ zjNfwM-*{K$>3Gl7Hm7(cysU1^5adee6<}@X8g&-ak z__aW+>r+E>Fen0WX1|!;;RdowCW-M8R@YiU3@y zEab8HT!N=)AgE6Qd+5j|R^iK|(lrH-~G(Sd(_?018Aig~1F5 z0Jq0pc~4J~N(Kc?Q5ilgZuucT4*SDWeYeIu%<@S#YZ;n=ZJo%o!5GuB!*lTY1>X(u zn8Oz{`7AbXlUQ;2Bmd$=j*P9hK~7|TEIjy9vRg5_GCc1@+Tg5R96;PE5c~g?0?LT7 z__*gvW%Zqe*HCX2V{vEG8i!B{exO9DjWP*u>)r^j90PBq_z&J6{#GS!pJwcDv-a)N z57bjSA=v!0RlXmO*NDgA`%dKD7?I3J`5a<6j;Gsx1n+e7SOpjC6?p5< z$Fb{AgnbE0+_&+icOpN;nL}wvt#$}Q^aZ!21x(_5`;-aK~%r4p|!W-W7Q`Y z@_h~$aZuIeYcWFjOrS~rKP~I^ipouohUKoyI0avKBAcXnP+fD+)&6CH955(29mA0B z{hi1o`uCRBnCY}lW_o}%<|TYsaz4yOP{0HnRWw=zoa00;)PDaQ?;{d|=tkU|<(0-C zDACxTfO8NlEsUM&YwSjr3&o7eyR&uLl3HTBfkr4%JF)Ihv!y3~gT4W{W?+svoYs^*nvR-K+56laS zyc`>h{gD_RZl-N;$zFI{{0G*UtBRz~0`Gy>0!(xwZw05ytGeM$(4Re=$m_b{PLH*- zrxTfMjoAa;t8o!*ezbhsh1zZ(-7xJ5YMSCi zUhAl%-Wszxt@$c0bRvs``MLrh4-$#_WAJelQ-HIh3R4Z$=vcsl%?T9_!HM!OWl5TV z6P?IY!2PfY$i z7$4~3%KYxs;AASK1RW1NY>l}bZ#O@Mwx+OGJBYeRbFxK7W$l!poZ7cPJ}7AmeoX#)-?7}pc>8pw!k}1M3Ty?w*$u$@csdV zGrl?>pE3CN?a%>?RnaX?KiA6V^Y6tHqMqtRew|^ghvS{u8e%$?G->R9=|s-#bOfLS zPu5-Wi6KkDJ9=79bRr)F@0UXWmZ!ZO9PC8?7T4HYW5(b!I{hpq zAvnc}TvCnr{z`B71l*2QrDK}7f);$?kK4i+kunLuovMFIv#$oQd=On~0uYRKQg4jTC1P+A$;C1S_<<9-Gl4THhX7O)PIe-frr30+;d2l+CgA^XPUPSf zl>w)ML}0jzdxH};{ine1oya)}?UX|R+6!4ZfKd0*h8tv@%0FjDmv|IjXAnN_2Zg=V64R|Tt5zs zh&F$%jWHZ+pgWtR9-0fjV;+bq3wJjH zFc2T|nkG~5k{TG#YQPiLm|+=Dvakqd66~M^X6IVj1lE`@<6Xmlnt}j!WhgbiU4Zwk zF-Q08W(1%Q>C1TMYh+B@053X`ED24Q;N5~tL0^0j(Zm{=i)%@jIHskGuk%?c55udv z8wcmk(}Mc;7l08IjwEXjypbUo$f_)R zREG8svc}vS#ZDB0bw`b&xw&}&$qKSNoydN1`MMT#0Q$QL?-h5aiY|r5xq6kl@l7S* zk$E-B?Ash%%Ljd^ikUG7!;8qTKhdgJSYzmx1V9Hr1UR|KBO6xXvsr?H96rO(0XDD9NZnG7AnrVL@CYwUCAEkZ#+oXkxK?5+=8uOKK z&T47%TVpoCr>sxna;jCXK8s`B$N2E*eeosf_GS45I+-0|)-ugG@QGPU$%+55#_a7x?yfQX zZi!FavQmlp-O`CnT7i9CgwNJnQP^PAsJ=^^o5z)S>_%(M22SMk?kEBrOS(W21MF#y zNy;tItJES(VDW}pW135xB7O9^?~pyERlK6i=b)P(yy-9vQMtMri?qt{DCvsDe8(E| zcEYnw`0PeyaR8b|ExzIN9pO>Btfj@0uEyohtTBJ>js)Ob-5@8H*aU-U%{@>l7=Wz;!#Q3%$rzfw&xK zrZrvH@*1E;iw;r_sL%qL1zZQ57v$j{iXW-4eyQ{vVApsYwR=5krT+3Il z_Skc|T#tM{zekbHAC=_$r4xBFtJ0!N5rRO`wjSPjq(>JS-hEDF&jjMvTlF8~6W;xX z6S=k4HhvM@_Emy*NGIsp8BXL^RUZ4A_yV_uMAJ8c-JHmbU_ErpV4!#z>+f`-&A$wv zT(j-5G;(CLDK^Ku_9iJXe`$^BS*x2r!fo*PVyb8I>x^(5$$F{5sE1&f!mV2pfI!2( zeT~Yw%ZYyad;cggWE)|x)jaeay!kIzgoQmVr8i+`7xl2l{EkjsAt>l)CE8en;>lxI z!+QX>A(GuTA&5G|eAIMbYBrb;hSA#9FGCxoX9^4og`yMj%2-2qk03E>t@JP z+K1t?E-){Dp_2#w1gXi}+tlj#EXes3`2CJGX7xG&@XDd9fw%CQ^I8cJjgUJHU;BNB zC<|~rnd-=O)?xaL88kIFAA-02M_gZ=^J~#|KgQ?g?+V26@nF6>WoUPp@i?FNU5tLG zVf#N{5c&K$_zc#$1g<`sgBE;_%g?&C2vA7LWsO+}pKko%BCq}rxQld-CaEEp%jMQz zfBljD`t?i1N#B0`a?Q=n)1P?kv3q*=>GM)l=RGNla`1Z;K0D4T37>xvpML#aC$gm2 z9MzzXXQ=8mEIzj56uc#*7=9~CEb{q0Lxv1lZP>737u-K_;+gq;zDEI~l+WdI~$

#H(OwiKO!kBf}&8@fIdM%e@k*x|!*@4OR5jFyUN+1iOb zlXL)fYXT4>1gk20RLTQv5%7g5|GU-~9D}bg(;v7gp6|Yj{ayo~^MA2Y_~}}i2d!}P z0O=|pZ{b9q@8UTAB3>z2biV?7Ig$Hf?~cSfaJ}P1W>g!u0S7YxZ^nGriOh>VGg`UP z4N5e3HE?{AqOnc@S|{{)d_A=^@E*Qk=vGQFdea(MwFnrjIR~oQ-$D2)x#N`cSc>-p zm<8OXgkfCD=ALsRTXtn}&l>YuiPJxXuj5l@?Ta@f_Ej7>bG+X4Mv1@Erg2Opd8 ze!$MZhR;>8L5im4>fyGu5Yeg=fYu40f-h!T3D_^HCs4)yZUDSqq2?9<6T-q6T@1H7 z;LDd+0&a04hg7kz5y6^>FVtEI_^}hYxX_J;`quv(J}l#%#rSBwLus3caf=S<@A%jT zJ{S07nQi6pIpVr1e6bzxKCfI|1lx!Dl1f8xO2-|T=Cz~QU5g>Ue)_jk-CiNI z{#U#ycYyx%1+Go1nBB9!{oB}M<8pidBCjn1o{eeF@4{OVtjcbSszt1TJ@nG6PUM~7 zM0!f~6B{QqF(@|nrdYdOlDIpOs|wuvHOVI3lbp!=!MyyC)@;&W#g{g(7K{zJ^F4I) z<3y%dW3C{%U~CKUFDl z&qJL6B*WSi&-l|HxWO7T)*7>hHRf7iC%xAfUxH~!C2Q;jNIW~ZJjxn#rhcOzKGW>l z<;sNJ2udmPerJtYIhgZMiB`30N&3AB!KKTi>uTCPu0#`=f}iR-fMiO%1Ff3lyvmLX z^~B9GZ?^*%;Y~W9Wi=04V;b-#sZ~|H_)$i}{fTk~18EI=zR8Ik+RgDhowmtABUjmY zX!v7zb82G&5%5glCSZ={_$$eBCwQUu#E0u{3;d=4=jrfy4i8{?r=R9npBFii{&fP- z{spo!Ub8+Mcvg`xU6m{cmeJ(<`u-bu_uK#C9gk*3i^m$%TNONOo9Er@6dz6|-xcm>_0DCtIt?+?XW zi+0rC8UC(|J9- zqOX4iKJ$(q8ncvh?4!CFLpleZG;nyKA~0ESTekvahk-TJ*FH%~pzgZ)ON_mD1rNpH zR6qaYpp4^N4(dw4_7i|+%B$s9X;`KwO`HJKxxDmHhyX;P(kmKTs&N?A2|#v;N=|(Q zTmX!9B7-Pyp6Lka2ds(rOq{CMmQp)q&N~D2q%~Z)3Ai3#W$E-4+^?rqPjH^DyX82% zIsxc7_yOKbvnD=&-kW;;sTJ6#X-=fsiOh8(zjY$x@XEejX=}f(LveT>xDoFmI0QJ- zi9CgO`dPdJ`}Lp`c@a3>iL8!SUOtFdShkzu)ghbwxCD5ifTHDb+B%3d16S$^GOj|O z|3DY$VZa)}^{L@Q+P9aZzw3}1w=MXBA#s?Gw+4lCJ`Zoz`9cM|FavMWi|Y=s>J>?TB8DQb|OQa$TYovUx8PCj?b%D zh>%}|Pdm3HcKt0UGR283bRv6e8=nF*oyfFmjjc`qDhqo6hd7Z%G1lzY!83n$B2m)K zKhYXs62VV((Om}|A6$PKAITR6FP#nSq(LI}*{?1ftDM(iG12Ol*mD;G=Q@#_BQ`yN z7o5mXoXFpldkEtVP7T|2BIn|>OpU9Hay8B!SYy_<#=K;Wxz8HYCq$+-W*2KrUVqLC z-ihx2+!`~}8nbVGHaj0~_4lKSzZ(lY^S<6&YK<8id+#W{{(gOvcm2gONL_sk-TQ_$ z=G1tiQ6&U9YfNK(fplsDFwh#aVT8XJ0pHddv)CGQejyxMW5#H1#}$I#{|78TGMeWS RmQDZw002ovPDHLkV1g04Y0v-w diff --git a/src/assembler.rs b/src/assembler.rs index 40f58ba3..0737a6be 100644 --- a/src/assembler.rs +++ b/src/assembler.rs @@ -293,7 +293,7 @@ fn resolve_label( /// # Examples /// /// ``` -/// use solana_rbpf::{assembler::assemble, program::BuiltinProgram, vm::{Config, TestContextObject}}; +/// use solana_sbpf::{assembler::assemble, program::BuiltinProgram, vm::{Config, TestContextObject}}; /// let executable = assemble::( /// "add64 r1, 0x605 /// mov64 r2, 0x32 diff --git a/src/ebpf.rs b/src/ebpf.rs index 35bdcb36..be9bfc7e 100644 --- a/src/ebpf.rs +++ b/src/ebpf.rs @@ -208,7 +208,7 @@ pub const BPF_JSLT: u8 = 0xc0; pub const BPF_JSLE: u8 = 0xd0; // Op codes -// (Following operation names are not “official”, but may be proper to rbpf; Linux kernel only +// (Following operation names are not “official”, but may be proper to sbpf; Linux kernel only // combines above flags and does not attribute a name per operation.) /// BPF opcode: `lddw dst, imm` /// `dst = imm`. [DEPRECATED] @@ -531,7 +531,7 @@ impl Insn { /// # Examples /// /// ``` - /// use solana_rbpf::ebpf; + /// use solana_sbpf::ebpf; /// /// let prog: &[u8] = &[ /// 0xb7, 0x12, 0x56, 0x34, 0xde, 0xbc, 0x9a, 0x78, @@ -564,7 +564,7 @@ impl Insn { /// # Examples /// /// ``` - /// use solana_rbpf::ebpf; + /// use solana_sbpf::ebpf; /// /// let prog: Vec = vec![ /// 0xb7, 0x12, 0x56, 0x34, 0xde, 0xbc, 0x9a, 0x78, @@ -595,7 +595,7 @@ impl Insn { /// # Examples /// /// ``` -/// use solana_rbpf::ebpf; +/// use solana_sbpf::ebpf; /// /// let prog = &[ /// 0xb7, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -608,7 +608,7 @@ impl Insn { /// The example below will panic, since the last instruction is not complete and cannot be loaded. /// /// ```rust,should_panic -/// use solana_rbpf::ebpf; +/// use solana_sbpf::ebpf; /// /// let prog = &[ /// 0xb7, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, diff --git a/src/lib.rs b/src/lib.rs index 573d8810..3e1c6176 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -9,12 +9,8 @@ // the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. -//! Virtual machine and JIT compiler for eBPF programs. +//! Virtual machine for SBPF programs. #![warn(missing_docs)] -#![doc( - html_logo_url = "https://raw.githubusercontent.com/qmonnet/rbpf/master/misc/rbpf.png", - html_favicon_url = "https://raw.githubusercontent.com/qmonnet/rbpf/master/misc/rbpf.ico" -)] #![deny(clippy::arithmetic_side_effects)] #![deny(clippy::ptr_as_ptr)] diff --git a/src/syscalls.rs b/src/syscalls.rs index a8d398db..1ef007b5 100644 --- a/src/syscalls.rs +++ b/src/syscalls.rs @@ -15,7 +15,7 @@ //! //! * Some of them mimic the syscalls available in the Linux kernel. //! * Some of them were proposed as example syscalls in uBPF and they were adapted here. -//! * Other syscalls may be specific to rbpf. +//! * Other syscalls may be specific to sbpf. //! //! The prototype for syscalls is always the same: five `u64` as arguments, and a `u64` as a return //! value. Hence some syscalls have unused arguments, or return a 0 value in all cases, in order to diff --git a/src/vm.rs b/src/vm.rs index 3afa01b6..e1312c94 100644 --- a/src/vm.rs +++ b/src/vm.rs @@ -232,7 +232,7 @@ pub struct CallFrame { /// # Examples /// /// ``` -/// use solana_rbpf::{ +/// use solana_sbpf::{ /// aligned_memory::AlignedMemory, /// ebpf, /// elf::Executable, diff --git a/test_utils/Cargo.lock b/test_utils/Cargo.lock index a546bc6d..fe0d0385 100644 --- a/test_utils/Cargo.lock +++ b/test_utils/Cargo.lock @@ -153,8 +153,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04c565b551bafbef4157586fa379538366e4385d42082f255bfd96e4fe8519da" [[package]] -name = "solana_rbpf" -version = "0.8.2" +name = "solana-sbpf" +version = "0.9.0" dependencies = [ "byteorder", "combine", @@ -181,10 +181,10 @@ dependencies = [ [[package]] name = "test_utils" -version = "0.8.2" +version = "0.9.0" dependencies = [ "libc", - "solana_rbpf", + "solana-sbpf", ] [[package]] diff --git a/test_utils/Cargo.toml b/test_utils/Cargo.toml index 2c1bc4de..f4592b20 100644 --- a/test_utils/Cargo.toml +++ b/test_utils/Cargo.toml @@ -1,10 +1,10 @@ [package] name = "test_utils" -version = "0.8.2" +version = "0.9.0" authors = ["Solana Maintainers "] edition = "2018" publish = false [dependencies] libc = "0.2" -solana_rbpf = { path = "../" } +solana-sbpf = { path = "../" } diff --git a/test_utils/src/lib.rs b/test_utils/src/lib.rs index 0f8e1c2d..2f1d8df5 100644 --- a/test_utils/src/lib.rs +++ b/test_utils/src/lib.rs @@ -8,7 +8,7 @@ #![allow(dead_code)] -use solana_rbpf::{ +use solana_sbpf::{ aligned_memory::AlignedMemory, ebpf::{self, HOST_ALIGN}, elf::Executable, @@ -194,10 +194,10 @@ pub fn create_memory_mapping<'a, C: ContextObject>( #[macro_export] macro_rules! create_vm { ($vm_name:ident, $verified_executable:expr, $context_object:expr, $stack:ident, $heap:ident, $additional_regions:expr, $cow_cb:expr) => { - let mut $stack = solana_rbpf::aligned_memory::AlignedMemory::zero_filled( + let mut $stack = solana_sbpf::aligned_memory::AlignedMemory::zero_filled( $verified_executable.get_config().stack_size(), ); - let mut $heap = solana_rbpf::aligned_memory::AlignedMemory::with_capacity(0); + let mut $heap = solana_sbpf::aligned_memory::AlignedMemory::with_capacity(0); let stack_len = $stack.len(); let memory_mapping = test_utils::create_memory_mapping( $verified_executable, @@ -207,7 +207,7 @@ macro_rules! create_vm { $cow_cb, ) .unwrap(); - let mut $vm_name = solana_rbpf::vm::EbpfVm::new( + let mut $vm_name = solana_sbpf::vm::EbpfVm::new( $verified_executable.get_loader().clone(), $verified_executable.get_sbpf_version(), $context_object, diff --git a/tests/assembler.rs b/tests/assembler.rs index 2ce72f09..6b78613e 100644 --- a/tests/assembler.rs +++ b/tests/assembler.rs @@ -5,12 +5,12 @@ // the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. -extern crate solana_rbpf; +extern crate solana_sbpf; extern crate test_utils; -use solana_rbpf::program::{FunctionRegistry, SBPFVersion}; -use solana_rbpf::vm::Config; -use solana_rbpf::{assembler::assemble, ebpf, program::BuiltinProgram, vm::TestContextObject}; +use solana_sbpf::program::{FunctionRegistry, SBPFVersion}; +use solana_sbpf::vm::Config; +use solana_sbpf::{assembler::assemble, ebpf, program::BuiltinProgram, vm::TestContextObject}; use std::sync::Arc; use test_utils::{TCP_SACK_ASM, TCP_SACK_BIN}; diff --git a/tests/disassembler.rs b/tests/disassembler.rs index cc55af86..a4a358c8 100644 --- a/tests/disassembler.rs +++ b/tests/disassembler.rs @@ -6,9 +6,9 @@ // the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. -extern crate solana_rbpf; -use solana_rbpf::program::SBPFVersion; -use solana_rbpf::{ +extern crate solana_sbpf; +use solana_sbpf::program::SBPFVersion; +use solana_sbpf::{ assembler::assemble, program::{BuiltinProgram, FunctionRegistry}, static_analysis::Analysis, diff --git a/tests/elfs/elfs.sh b/tests/elfs/elfs.sh index 93190d97..25b72b7a 100755 --- a/tests/elfs/elfs.sh +++ b/tests/elfs/elfs.sh @@ -1,7 +1,7 @@ #!/bin/bash -ex # Requires Latest release of Solana's custom LLVM -# https://github.com/solana-labs/platform-tools/releases +# https://github.com/anza-xyz/platform-tools/releases TOOLCHAIN=../../../agave/sdk/sbf/dependencies/platform-tools RC_COMMON="$TOOLCHAIN/rust/bin/rustc --target sbf-solana-solana --crate-type lib -C panic=abort -C opt-level=2" diff --git a/tests/execution.rs b/tests/execution.rs index 22439ffa..bd87ad07 100644 --- a/tests/execution.rs +++ b/tests/execution.rs @@ -8,14 +8,14 @@ extern crate byteorder; extern crate libc; -extern crate solana_rbpf; +extern crate solana_sbpf; extern crate test_utils; extern crate thiserror; use byteorder::{ByteOrder, LittleEndian}; #[cfg(all(not(windows), target_arch = "x86_64"))] use rand::{rngs::SmallRng, RngCore, SeedableRng}; -use solana_rbpf::{ +use solana_sbpf::{ assembler::assemble, declare_builtin_function, ebpf, elf::Executable, @@ -3435,7 +3435,7 @@ fn execute_generated_program(prog: &[u8]) -> bool { || !TestContextObject::compare_trace_log(&tracer_interpreter, tracer_jit) { let analysis = - solana_rbpf::static_analysis::Analysis::from_executable(&executable).unwrap(); + solana_sbpf::static_analysis::Analysis::from_executable(&executable).unwrap(); println!("result_interpreter={result_interpreter:?}"); println!("result_jit={result_jit:?}"); let stdout = std::io::stdout(); diff --git a/tests/exercise_instructions.rs b/tests/exercise_instructions.rs index 2538b305..f8f5a89b 100644 --- a/tests/exercise_instructions.rs +++ b/tests/exercise_instructions.rs @@ -8,12 +8,12 @@ extern crate byteorder; extern crate libc; -extern crate solana_rbpf; +extern crate solana_sbpf; extern crate test_utils; extern crate thiserror; use rand::{rngs::SmallRng, RngCore, SeedableRng}; -use solana_rbpf::{ +use solana_sbpf::{ assembler::assemble, ebpf, memory_region::MemoryRegion, diff --git a/tests/verifier.rs b/tests/verifier.rs index d6ff690d..658d9b5e 100644 --- a/tests/verifier.rs +++ b/tests/verifier.rs @@ -19,10 +19,10 @@ // These are unit tests for the eBPF “verifier”. -extern crate solana_rbpf; +extern crate solana_sbpf; extern crate thiserror; -use solana_rbpf::{ +use solana_sbpf::{ assembler::assemble, ebpf, elf::Executable, From 995c78fda73944c2b230fc1fc85048c84f5a5efb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Thu, 19 Dec 2024 19:32:06 +0100 Subject: [PATCH 03/18] Some control flow operations were not tested before. (#4) --- src/disassembler.rs | 17 +++++------- src/jit.rs | 61 +++++++++++++++++++++++++++--------------- src/static_analysis.rs | 25 +++++++---------- 3 files changed, 56 insertions(+), 47 deletions(-) diff --git a/src/disassembler.rs b/src/disassembler.rs index b020b374..32748656 100644 --- a/src/disassembler.rs +++ b/src/disassembler.rs @@ -267,19 +267,16 @@ pub fn disassemble_instruction( ebpf::JSLE_REG => { name = "jsle"; desc = jmp_reg_str(name, insn, cfg_nodes); }, ebpf::CALL_IMM => { let key = sbpf_version.calculate_call_imm_target_pc(pc, insn.imm); - let function_name = function_registry.lookup_by_key(key).map(|(function_name, _)| String::from_utf8_lossy(function_name).to_string()); - let function_name = if let Some(function_name) = function_name { - name = "call"; - function_name - } else { + let mut name = "call"; + let mut function_name = function_registry.lookup_by_key(key).map(|(function_name, _)| String::from_utf8_lossy(function_name).to_string()); + if !sbpf_version.static_syscalls() && function_name.is_none() { name = "syscall"; - loader.get_function_registry(sbpf_version).lookup_by_key(insn.imm as u32).map(|(function_name, _)| String::from_utf8_lossy(function_name).to_string()).unwrap_or_else(|| "[invalid]".to_string()) - }; - desc = format!("{name} {function_name}"); + function_name = loader.get_function_registry(sbpf_version).lookup_by_key(insn.imm as u32).map(|(function_name, _)| String::from_utf8_lossy(function_name).to_string()); + } + desc = format!("{} {}", name, function_name.unwrap_or_else(|| "[invalid]".to_string())); }, ebpf::CALL_REG => { name = "callx"; desc = format!("{} r{}", name, if sbpf_version.callx_uses_src_reg() { insn.src } else { insn.imm as u8 }); }, - ebpf::EXIT - | ebpf::RETURN if !sbpf_version.static_syscalls() => { name = "exit"; desc = name.to_string(); }, + ebpf::EXIT if !sbpf_version.static_syscalls() => { name = "exit"; desc = name.to_string(); }, ebpf::RETURN if sbpf_version.static_syscalls() => { name = "return"; desc = name.to_string(); }, ebpf::SYSCALL if sbpf_version.static_syscalls() => { desc = format!("syscall {}", insn.imm); }, diff --git a/src/jit.rs b/src/jit.rs index f151d2c3..d5476f49 100644 --- a/src/jit.rs +++ b/src/jit.rs @@ -1719,12 +1719,14 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { mod tests { use super::*; use crate::{ + disassembler::disassemble_instruction, program::{BuiltinProgram, FunctionRegistry, SBPFVersion}, + static_analysis::CfgNode, syscalls, vm::TestContextObject, }; use byteorder::{ByteOrder, LittleEndian}; - use std::sync::Arc; + use std::{collections::BTreeMap, sync::Arc}; #[test] fn test_runtime_environment_slots() { @@ -1838,35 +1840,45 @@ mod tests { <= MACHINE_CODE_PER_INSTRUCTION_METER_CHECKPOINT ); + let mut cfg_nodes = BTreeMap::new(); + cfg_nodes.insert( + 8, + CfgNode { + label: std::string::String::from("label"), + ..CfgNode::default() + }, + ); + for sbpf_version in [SBPFVersion::V0, SBPFVersion::V3] { + println!("opcode;machine_code_length_per_instruction;assembly"); let empty_program_machine_code_length = empty_program_machine_code_length_per_version[sbpf_version as usize]; for mut opcode in 0x00..=0xFF { let (registers, immediate) = match opcode { - 0x85 | 0x8D => (0x88, 8), - 0x86 => { + 0x85 if !sbpf_version.static_syscalls() => (0x00, Some(8)), + 0x85 if sbpf_version.static_syscalls() => (0x00, None), + 0x8D => (0x88, Some(0)), + 0x95 if sbpf_version.static_syscalls() => (0x00, Some(1)), + 0xE5 if !sbpf_version.static_syscalls() => { // Put external function calls on a separate loop iteration opcode = 0x85; - (0x00, 0x91020CDD) + (0x00, Some(0x91020CDD)) } - 0x87 => { + 0xF5 => { // Put invalid function calls on a separate loop iteration opcode = 0x85; - (0x88, 0x91020CDD) + (0x00, Some(0x91020CD0)) } - 0x95 => { - // Put a valid syscall - (0, 1) - } - 0xD4 | 0xDC => (0x88, 16), - _ => (0x88, 0xFFFFFFFF), + 0xD4 | 0xDC => (0x88, Some(16)), + _ => (0x88, Some(0x11223344)), }; for pc in 0..INSTRUCTION_COUNT { prog[pc * ebpf::INSN_SIZE] = opcode; prog[pc * ebpf::INSN_SIZE + 1] = registers; - prog[pc * ebpf::INSN_SIZE + 2] = 0xFF; - prog[pc * ebpf::INSN_SIZE + 3] = 0xFF; + let offset = 7_u16.wrapping_sub(pc as u16); + LittleEndian::write_u16(&mut prog[pc * ebpf::INSN_SIZE + 2..], offset); + let immediate = immediate.unwrap_or_else(|| 7_u32.wrapping_sub(pc as u32)); LittleEndian::write_u32(&mut prog[pc * ebpf::INSN_SIZE + 4..], immediate); } let config = Config { @@ -1897,15 +1909,22 @@ mod tests { let machine_code_length_per_instruction = machine_code_length as f64 / instruction_count as f64; assert!( - (machine_code_length_per_instruction + 0.5) as usize + f64::ceil(machine_code_length_per_instruction) as usize <= MAX_MACHINE_CODE_LENGTH_PER_INSTRUCTION ); - /*println!("opcode={:02X} machine_code_length_per_instruction={}", opcode, machine_code_length_per_instruction); - let analysis = crate::static_analysis::Analysis::from_executable(&executable).unwrap(); - { - let stdout = std::io::stdout(); - analysis.disassemble(&mut stdout.lock()).unwrap(); - }*/ + let insn = ebpf::get_insn_unchecked(&prog, 0); + let assembly = disassemble_instruction( + &insn, + 0, + &cfg_nodes, + executable.get_function_registry(), + executable.get_loader(), + executable.get_sbpf_version(), + ); + println!( + "{:02X};{:>7.3};{}", + opcode, machine_code_length_per_instruction, assembly + ); } } } diff --git a/src/static_analysis.rs b/src/static_analysis.rs index 89fc8ab7..535fb6ea 100644 --- a/src/static_analysis.rs +++ b/src/static_analysis.rs @@ -230,24 +230,13 @@ impl<'a> Analysis<'a> { self.cfg_nodes.entry(*pc).or_default(); } let mut cfg_edges = BTreeMap::new(); - for insn in self.instructions.iter() { + for (pc, insn) in self.instructions.iter().enumerate() { let target_pc = (insn.ptr as isize + insn.off as isize + 1) as usize; match insn.opc { ebpf::CALL_IMM => { - if let Some((function_name, _function)) = self - .executable - .get_loader() - .get_function_registry(sbpf_version) - .lookup_by_key(insn.imm as u32) - { - if function_name == b"abort" { - self.cfg_nodes.entry(insn.ptr + 1).or_default(); - cfg_edges.insert(insn.ptr, (insn.opc, Vec::new())); - } - } else if let Some((_function_name, target_pc)) = self - .executable - .get_function_registry() - .lookup_by_key(insn.imm as u32) + let key = sbpf_version.calculate_call_imm_target_pc(pc, insn.imm); + if let Some((_function_name, target_pc)) = + self.executable.get_function_registry().lookup_by_key(key) { self.cfg_nodes.entry(insn.ptr + 1).or_default(); self.cfg_nodes.entry(target_pc).or_default(); @@ -269,7 +258,11 @@ impl<'a> Analysis<'a> { }; cfg_edges.insert(insn.ptr, (insn.opc, destinations)); } - ebpf::EXIT => { + ebpf::EXIT if !sbpf_version.static_syscalls() => { + self.cfg_nodes.entry(insn.ptr + 1).or_default(); + cfg_edges.insert(insn.ptr, (insn.opc, Vec::new())); + } + ebpf::RETURN if sbpf_version.static_syscalls() => { self.cfg_nodes.entry(insn.ptr + 1).or_default(); cfg_edges.insert(insn.ptr, (insn.opc, Vec::new())); } From a5cdab3e74ac3948f7e242a0b5becf87fa7138d6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Fri, 20 Dec 2024 17:24:18 +0100 Subject: [PATCH 04/18] Adds sign extension of store immediate value to test_memory_instructions(). (#5) And uses wider verification loads. --- tests/execution.rs | 93 ++++++++++++++++++++++++++++------------------ 1 file changed, 56 insertions(+), 37 deletions(-) diff --git a/tests/execution.rs b/tests/execution.rs index bd87ad07..696e6116 100644 --- a/tests/execution.rs +++ b/tests/execution.rs @@ -977,9 +977,7 @@ fn test_memory_instructions() { ldxw r0, [r1+2] exit", config.clone(), - [ - 0xaa, 0xbb, 0x11, 0x22, 0x33, 0x44, 0xcc, 0xdd, // - ], + [0xaa, 0xbb, 0x11, 0x22, 0x33, 0x44, 0xcc, 0xdd], TestContextObject::new(2), ProgramResult::Ok(0x44332211), ); @@ -988,10 +986,7 @@ fn test_memory_instructions() { ldxdw r0, [r1+2] exit", config.clone(), - [ - 0xaa, 0xbb, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, // - 0x77, 0x88, 0xcc, 0xdd, // - ], + [0xaa, 0xbb, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0xcc, 0xdd], TestContextObject::new(2), ProgramResult::Ok(0x8877665544332211), ); @@ -999,36 +994,62 @@ fn test_memory_instructions() { test_interpreter_and_jit_asm!( " stb [r1+2], 0x11 - ldxb r0, [r1+2] + ldxdw r0, [r1+2] exit", config.clone(), - [0xaa, 0xbb, 0xff, 0xcc, 0xdd], + [0xaa, 0xbb, 0xff, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0xcc, 0xdd], TestContextObject::new(3), - ProgramResult::Ok(0x11), + ProgramResult::Ok(0x8877665544332211), + ); + test_interpreter_and_jit_asm!( + " + stb [r1+2], -1 + ldxdw r0, [r1+2] + exit", + config.clone(), + [0xaa, 0xbb, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0xcc, 0xdd], + TestContextObject::new(3), + ProgramResult::Ok(0x88776655443322FF), ); test_interpreter_and_jit_asm!( " sth [r1+2], 0x2211 - ldxh r0, [r1+2] + ldxdw r0, [r1+2] exit", config.clone(), - [ - 0xaa, 0xbb, 0xff, 0xff, 0xcc, 0xdd, // - ], + [0xaa, 0xbb, 0xff, 0xff, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0xcc, 0xdd], TestContextObject::new(3), - ProgramResult::Ok(0x2211), + ProgramResult::Ok(0x8877665544332211), + ); + test_interpreter_and_jit_asm!( + " + sth [r1+2], -1 + ldxdw r0, [r1+2] + exit", + config.clone(), + [0xaa, 0xbb, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0xcc, 0xdd], + TestContextObject::new(3), + ProgramResult::Ok(0x887766554433FFFF), ); test_interpreter_and_jit_asm!( " stw [r1+2], 0x44332211 - ldxw r0, [r1+2] + ldxdw r0, [r1+2] exit", config.clone(), - [ - 0xaa, 0xbb, 0xff, 0xff, 0xff, 0xff, 0xcc, 0xdd, // - ], + [0xaa, 0xbb, 0xff, 0xff, 0xff, 0xff, 0x55, 0x66, 0x77, 0x88, 0xcc, 0xdd], TestContextObject::new(3), - ProgramResult::Ok(0x44332211), + ProgramResult::Ok(0x8877665544332211), + ); + test_interpreter_and_jit_asm!( + " + stw [r1+2], -1 + ldxdw r0, [r1+2] + exit", + config.clone(), + [0xaa, 0xbb, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0xcc, 0xdd], + TestContextObject::new(3), + ProgramResult::Ok(0x88776655FFFFFFFF), ); test_interpreter_and_jit_asm!( " @@ -1036,13 +1057,20 @@ fn test_memory_instructions() { ldxdw r0, [r1+2] exit", config.clone(), - [ - 0xaa, 0xbb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // - 0xff, 0xff, 0xcc, 0xdd, // - ], + [0xaa, 0xbb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcc, 0xdd], TestContextObject::new(3), ProgramResult::Ok(0x44332211), ); + test_interpreter_and_jit_asm!( + " + stdw [r1+2], -1 + ldxdw r0, [r1+2] + exit", + config.clone(), + [0xaa, 0xbb, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0xcc, 0xdd], + TestContextObject::new(3), + ProgramResult::Ok(0xFFFFFFFFFFFFFFFF), + ); test_interpreter_and_jit_asm!( " @@ -1051,9 +1079,7 @@ fn test_memory_instructions() { ldxb r0, [r1+2] exit", config.clone(), - [ - 0xaa, 0xbb, 0xff, 0xcc, 0xdd, // - ], + [0xaa, 0xbb, 0xff, 0xcc, 0xdd], TestContextObject::new(4), ProgramResult::Ok(0x11), ); @@ -1064,9 +1090,7 @@ fn test_memory_instructions() { ldxh r0, [r1+2] exit", config.clone(), - [ - 0xaa, 0xbb, 0xff, 0xff, 0xcc, 0xdd, // - ], + [0xaa, 0xbb, 0xff, 0xff, 0xcc, 0xdd], TestContextObject::new(4), ProgramResult::Ok(0x2211), ); @@ -1077,9 +1101,7 @@ fn test_memory_instructions() { ldxw r0, [r1+2] exit", config.clone(), - [ - 0xaa, 0xbb, 0xff, 0xff, 0xff, 0xff, 0xcc, 0xdd, // - ], + [0xaa, 0xbb, 0xff, 0xff, 0xff, 0xff, 0xcc, 0xdd], TestContextObject::new(4), ProgramResult::Ok(0x44332211), ); @@ -1092,10 +1114,7 @@ fn test_memory_instructions() { ldxdw r0, [r1+2] exit", config.clone(), - [ - 0xaa, 0xbb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, // - 0xff, 0xff, 0xcc, 0xdd, // - ], + [0xaa, 0xbb, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xcc, 0xdd], TestContextObject::new(6), ProgramResult::Ok(0x8877665544332211), ); From cad781a3b8f148e1421c706ee2e1bd765f80b437 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Fri, 20 Dec 2024 21:27:09 +0100 Subject: [PATCH 05/18] Refactor - Sanitization of memory accesses in JIT (#6) * All the immediate values can use the same encryption key. There is no need to generate a new one for each. * Optimizes memory access instructions. * Moves second half of emit_sanitized_load_immediate(REGISTER_SCRATCH, vm_addr) into ANCHOR_TRANSLATE_MEMORY_ADDRESS. * Moves second half of emit_sanitized_load_immediate(stack_slot_of_value_to_store, constant) into ANCHOR_TRANSLATE_MEMORY_ADDRESS. --- src/jit.rs | 114 +++++++++++++++++++++++++++-------------------------- 1 file changed, 58 insertions(+), 56 deletions(-) diff --git a/src/jit.rs b/src/jit.rs index d5476f49..95bb8840 100644 --- a/src/jit.rs +++ b/src/jit.rs @@ -30,7 +30,7 @@ use crate::{ memory_management::{ allocate_pages, free_pages, get_system_page_size, protect_pages, round_to_page_size, }, - memory_region::{AccessType, MemoryMapping}, + memory_region::MemoryMapping, program::BuiltinFunction, vm::{get_runtime_environment_key, Config, ContextObject, EbpfVm}, x86::*, @@ -38,7 +38,7 @@ use crate::{ const MAX_EMPTY_PROGRAM_MACHINE_CODE_LENGTH: usize = 4096; const MAX_MACHINE_CODE_LENGTH_PER_INSTRUCTION: usize = 110; -const MACHINE_CODE_PER_INSTRUCTION_METER_CHECKPOINT: usize = 23; +const MACHINE_CODE_PER_INSTRUCTION_METER_CHECKPOINT: usize = 24; const MAX_START_PADDING_LENGTH: usize = 256; pub struct JitProgram { @@ -200,7 +200,7 @@ const ANCHOR_INTERNAL_FUNCTION_CALL_PROLOGUE: usize = 12; const ANCHOR_INTERNAL_FUNCTION_CALL_REG: usize = 13; const ANCHOR_CALL_REG_UNSUPPORTED_INSTRUCTION: usize = 14; const ANCHOR_TRANSLATE_MEMORY_ADDRESS: usize = 21; -const ANCHOR_COUNT: usize = 30; // Update me when adding or removing anchors +const ANCHOR_COUNT: usize = 34; // Update me when adding or removing anchors const REGISTER_MAP: [u8; 11] = [ CALLER_SAVED_REGISTERS[0], // RAX @@ -328,6 +328,7 @@ pub struct JitCompiler<'a, C: ContextObject> { next_noop_insertion: u32, noop_range: Uniform, runtime_environment_key: i32, + immediate_value_key: i64, diversification_rng: SmallRng, stopwatch_is_active: bool, } @@ -365,6 +366,7 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { let runtime_environment_key = get_runtime_environment_key(); let mut diversification_rng = SmallRng::from_rng(thread_rng()).map_err(|_| EbpfError::JitNotCompiled)?; + let immediate_value_key = diversification_rng.gen::(); Ok(Self { result: JitProgram::new(pc, code_length_estimate)?, @@ -380,6 +382,7 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { next_noop_insertion: if config.noop_instruction_rate == 0 { u32::MAX } else { diversification_rng.gen_range(0..config.noop_instruction_rate * 2) }, noop_range: Uniform::new_inclusive(0, config.noop_instruction_rate * 2), runtime_environment_key, + immediate_value_key, diversification_rng, stopwatch_is_active: false, }) @@ -873,29 +876,24 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { #[inline] fn emit_sanitized_load_immediate(&mut self, destination: u8, value: i64) { + let lower_key = self.immediate_value_key as i32 as i64; if value >= i32::MIN as i64 && value <= i32::MAX as i64 { - let key = self.diversification_rng.gen::() as i64; - self.emit_ins(X86Instruction::load_immediate(destination, value.wrapping_sub(key))); - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x81, 0, destination, key, None)); + self.emit_ins(X86Instruction::load_immediate(destination, value.wrapping_sub(lower_key))); + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x81, 0, destination, lower_key, None)); // wrapping_add(lower_key) } else if value as u64 & u32::MAX as u64 == 0 { - let key = self.diversification_rng.gen::() as i64; - self.emit_ins(X86Instruction::load_immediate(destination, value.rotate_right(32).wrapping_sub(key))); - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x81, 0, destination, key, None)); // wrapping_add(key) + self.emit_ins(X86Instruction::load_immediate(destination, value.rotate_right(32).wrapping_sub(lower_key))); + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x81, 0, destination, lower_key, None)); // wrapping_add(lower_key) self.emit_ins(X86Instruction::alu(OperandSize::S64, 0xc1, 4, destination, 32, None)); // shift_left(32) + } else if destination != REGISTER_SCRATCH { + self.emit_ins(X86Instruction::load_immediate(destination, value.wrapping_sub(self.immediate_value_key))); + self.emit_ins(X86Instruction::load_immediate(REGISTER_SCRATCH, self.immediate_value_key)); + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x01, REGISTER_SCRATCH, destination, 0, None)); // wrapping_add(immediate_value_key) } else { - let key = self.diversification_rng.gen::(); - if destination != REGISTER_SCRATCH { - self.emit_ins(X86Instruction::load_immediate(destination, value.wrapping_sub(key))); - self.emit_ins(X86Instruction::load_immediate(REGISTER_SCRATCH, key)); - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x01, REGISTER_SCRATCH, destination, 0, None)); - } else { - let lower_key = key as i32 as i64; - let upper_key = (key >> 32) as i32 as i64; - self.emit_ins(X86Instruction::load_immediate(destination, value.wrapping_sub(lower_key).rotate_right(32).wrapping_sub(upper_key))); - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x81, 0, destination, upper_key, None)); // wrapping_add(upper_key) - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0xc1, 1, destination, 32, None)); // rotate_right(32) - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x81, 0, destination, lower_key, None)); // wrapping_add(lower_key) - } + let upper_key = (self.immediate_value_key >> 32) as i32 as i64; + self.emit_ins(X86Instruction::load_immediate(destination, value.wrapping_sub(lower_key).rotate_right(32).wrapping_sub(upper_key))); + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x81, 0, destination, upper_key, None)); // wrapping_add(upper_key) + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0xc1, 1, destination, 32, None)); // rotate_right(32) + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x81, 0, destination, lower_key, None)); // wrapping_add(lower_key) } } @@ -1157,11 +1155,10 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { self.emit_ins(X86Instruction::store(OperandSize::S64, reg, RSP, stack_slot_of_value_to_store)); } Some(Value::Constant64(constant, user_provided)) => { - if user_provided && self.should_sanitize_constant(constant) { - self.emit_sanitized_load_immediate(REGISTER_SCRATCH, constant); - } else { - self.emit_ins(X86Instruction::load_immediate(REGISTER_SCRATCH, constant)); - } + debug_assert!(user_provided); + // First half of emit_sanitized_load_immediate(stack_slot_of_value_to_store, constant) + let lower_key = self.immediate_value_key as i32 as i64; + self.emit_ins(X86Instruction::load_immediate(REGISTER_SCRATCH, constant.wrapping_sub(lower_key))); self.emit_ins(X86Instruction::store(OperandSize::S64, REGISTER_SCRATCH, RSP, stack_slot_of_value_to_store)); } _ => {} @@ -1169,19 +1166,16 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { match vm_addr { Value::RegisterPlusConstant64(reg, constant, user_provided) => { - if user_provided && self.should_sanitize_constant(constant) { - self.emit_sanitized_load_immediate(REGISTER_SCRATCH, constant); - } else { - self.emit_ins(X86Instruction::load_immediate(REGISTER_SCRATCH, constant)); - } - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x01, reg, REGISTER_SCRATCH, 0, None)); - }, - Value::Constant64(constant, user_provided) => { - if user_provided && self.should_sanitize_constant(constant) { - self.emit_sanitized_load_immediate(REGISTER_SCRATCH, constant); - } else { - self.emit_ins(X86Instruction::load_immediate(REGISTER_SCRATCH, constant)); - } + debug_assert!(user_provided); + // First half of emit_sanitized_load_immediate(REGISTER_SCRATCH, vm_addr) + let lower_key = self.immediate_value_key as i32 as i64; + self.emit_ins(X86Instruction::lea(OperandSize::S64, reg, REGISTER_SCRATCH, Some( + if reg == R12 { + X86IndirectAccess::OffsetIndexShift(constant.wrapping_sub(lower_key) as i32, RSP, 0) + } else { + X86IndirectAccess::Offset(constant.wrapping_sub(lower_key) as i32) + } + ))); }, _ => { #[cfg(debug_assertions)] @@ -1190,8 +1184,12 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { } if self.config.enable_address_translation { - let access_type = if value.is_none() { AccessType::Load } else { AccessType::Store }; - let anchor = ANCHOR_TRANSLATE_MEMORY_ADDRESS + len.trailing_zeros() as usize + 4 * (access_type as usize); + let anchor_base = match value { + Some(Value::Register(_reg)) => 4, + Some(Value::Constant64(_constant, _user_provided)) => 8, + _ => 0, + }; + let anchor = ANCHOR_TRANSLATE_MEMORY_ADDRESS + anchor_base + len.trailing_zeros() as usize; self.emit_ins(X86Instruction::push_immediate(OperandSize::S64, self.pc as i32)); self.emit_ins(X86Instruction::call_immediate(self.relative_to_anchor(anchor, 5))); if let Some(dst) = dst { @@ -1600,20 +1598,18 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { self.emit_ins(X86Instruction::jump_immediate(self.relative_to_anchor(ANCHOR_CALL_UNSUPPORTED_INSTRUCTION, 5))); // Translates a vm memory address to a host memory address - for (access_type, len) in &[ - (AccessType::Load, 1i32), - (AccessType::Load, 2i32), - (AccessType::Load, 4i32), - (AccessType::Load, 8i32), - (AccessType::Store, 1i32), - (AccessType::Store, 2i32), - (AccessType::Store, 4i32), - (AccessType::Store, 8i32), + let lower_key = self.immediate_value_key as i32 as i64; + for (anchor_base, len) in &[ + (0, 1i32), (0, 2i32), (0, 4i32), (0, 8i32), + (4, 1i32), (4, 2i32), (4, 4i32), (4, 8i32), + (8, 1i32), (8, 2i32), (8, 4i32), (8, 8i32), ] { - let target_offset = len.trailing_zeros() as usize + 4 * (*access_type as usize); + let target_offset = *anchor_base + len.trailing_zeros() as usize; self.set_anchor(ANCHOR_TRANSLATE_MEMORY_ADDRESS + target_offset); + // Second half of emit_sanitized_load_immediate(REGISTER_SCRATCH, vm_addr) + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x81, 0, REGISTER_SCRATCH, lower_key, None)); // call MemoryMapping::(load|store) storing the result in RuntimeEnvironmentSlot::ProgramResult - if *access_type == AccessType::Load { + if *anchor_base == 0 { // AccessType::Load let load = match len { 1 => MemoryMapping::load:: as *const u8 as i64, 2 => MemoryMapping::load:: as *const u8 as i64, @@ -1627,7 +1623,11 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { Argument { index: 1, value: Value::RegisterPlusConstant32(REGISTER_PTR_TO_VM, self.slot_in_vm(RuntimeEnvironmentSlot::MemoryMapping), false) }, Argument { index: 0, value: Value::RegisterPlusConstant32(REGISTER_PTR_TO_VM, self.slot_in_vm(RuntimeEnvironmentSlot::ProgramResult), false) }, ], None); - } else { + } else { // AccessType::Store + if *anchor_base == 8 { + // Second half of emit_sanitized_load_immediate(stack_slot_of_value_to_store, constant) + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x81, 0, RSP, lower_key, Some(X86IndirectAccess::OffsetIndexShift(-96, RSP, 0)))); + } let store = match len { 1 => MemoryMapping::store:: as *const u8 as i64, 2 => MemoryMapping::store:: as *const u8 as i64, @@ -1650,8 +1650,10 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { self.emit_ins(X86Instruction::xchg(OperandSize::S64, REGISTER_SCRATCH, RSP, Some(X86IndirectAccess::OffsetIndexShift(0, RSP, 0)))); // Swap return address and self.pc self.emit_ins(X86Instruction::conditional_jump_immediate(0x85, self.relative_to_anchor(ANCHOR_THROW_EXCEPTION, 6))); - // unwrap() the result into REGISTER_SCRATCH - self.emit_ins(X86Instruction::load(OperandSize::S64, REGISTER_PTR_TO_VM, REGISTER_SCRATCH, X86IndirectAccess::Offset(self.slot_in_vm(RuntimeEnvironmentSlot::ProgramResult) + std::mem::size_of::() as i32))); + if *anchor_base == 0 { // AccessType::Load + // unwrap() the result into REGISTER_SCRATCH + self.emit_ins(X86Instruction::load(OperandSize::S64, REGISTER_PTR_TO_VM, REGISTER_SCRATCH, X86IndirectAccess::Offset(self.slot_in_vm(RuntimeEnvironmentSlot::ProgramResult) + std::mem::size_of::() as i32))); + } self.emit_ins(X86Instruction::return_near()); } From 6e4acb3961df9e61187bbd9ba1f540e99400bfcc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Sun, 22 Dec 2024 20:49:42 +0100 Subject: [PATCH 06/18] Cleanup - JIT x86 registers (#7) * Removes dst == FRAME_PTR_REG special case. * Allows encoding of u8 displacement in SIB mode. * Splits off X86Instruction::alu_immediate(). * Adds X86Register. --- src/jit.rs | 202 ++++++++++++++++++++++++----------------------- src/x86.rs | 225 +++++++++++++++++++++++++++++++---------------------- 2 files changed, 235 insertions(+), 192 deletions(-) diff --git a/src/jit.rs b/src/jit.rs index 95bb8840..2e77dbbe 100644 --- a/src/jit.rs +++ b/src/jit.rs @@ -33,7 +33,11 @@ use crate::{ memory_region::MemoryMapping, program::BuiltinFunction, vm::{get_runtime_environment_key, Config, ContextObject, EbpfVm}, - x86::*, + x86::{ + FenceType, X86IndirectAccess, X86Instruction, + X86Register::{self, *}, + ARGUMENT_REGISTERS, CALLEE_SAVED_REGISTERS, CALLER_SAVED_REGISTERS, + }, }; const MAX_EMPTY_PROGRAM_MACHINE_CODE_LENGTH: usize = 4096; @@ -202,7 +206,7 @@ const ANCHOR_CALL_REG_UNSUPPORTED_INSTRUCTION: usize = 14; const ANCHOR_TRANSLATE_MEMORY_ADDRESS: usize = 21; const ANCHOR_COUNT: usize = 34; // Update me when adding or removing anchors -const REGISTER_MAP: [u8; 11] = [ +const REGISTER_MAP: [X86Register; 11] = [ CALLER_SAVED_REGISTERS[0], // RAX ARGUMENT_REGISTERS[1], // RSI ARGUMENT_REGISTERS[2], // RDX @@ -217,11 +221,11 @@ const REGISTER_MAP: [u8; 11] = [ ]; /// RDI: Used together with slot_in_vm() -const REGISTER_PTR_TO_VM: u8 = ARGUMENT_REGISTERS[0]; +const REGISTER_PTR_TO_VM: X86Register = ARGUMENT_REGISTERS[0]; /// R10: Program counter limit -const REGISTER_INSTRUCTION_METER: u8 = CALLER_SAVED_REGISTERS[7]; +const REGISTER_INSTRUCTION_METER: X86Register = CALLER_SAVED_REGISTERS[7]; /// R11: Scratch register -const REGISTER_SCRATCH: u8 = CALLER_SAVED_REGISTERS[8]; +const REGISTER_SCRATCH: X86Register = CALLER_SAVED_REGISTERS[8]; #[derive(Copy, Clone, Debug)] pub enum OperandSize { @@ -233,10 +237,10 @@ pub enum OperandSize { } enum Value { - Register(u8), - RegisterIndirect(u8, i32, bool), - RegisterPlusConstant32(u8, i32, bool), - RegisterPlusConstant64(u8, i64, bool), + Register(X86Register), + RegisterIndirect(X86Register, i32, bool), + RegisterPlusConstant32(X86Register, i32, bool), + RegisterPlusConstant64(X86Register, i64, bool), Constant64(i64, bool), } @@ -420,7 +424,7 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { self.emit_ins(X86Instruction::load_immediate(REGISTER_SCRATCH, 0)); } - let dst = if insn.dst == FRAME_PTR_REG as u8 { u8::MAX } else { REGISTER_MAP[insn.dst as usize] }; + let dst = REGISTER_MAP[insn.dst as usize]; let src = REGISTER_MAP[insn.src as usize]; let target_pc = (self.pc as isize + insn.off as isize + 1) as usize; @@ -483,18 +487,18 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { ebpf::ADD32_IMM => { self.emit_sanitized_alu(OperandSize::S32, 0x01, 0, dst, insn.imm); if !self.executable.get_sbpf_version().explicit_sign_extension_of_results() { - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x63, dst, dst, 0, None)); // sign extend i32 to i64 + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x63, dst, dst, None)); // sign extend i32 to i64 } }, ebpf::ADD32_REG => { - self.emit_ins(X86Instruction::alu(OperandSize::S32, 0x01, src, dst, 0, None)); + self.emit_ins(X86Instruction::alu(OperandSize::S32, 0x01, src, dst, None)); if !self.executable.get_sbpf_version().explicit_sign_extension_of_results() { - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x63, dst, dst, 0, None)); // sign extend i32 to i64 + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x63, dst, dst, None)); // sign extend i32 to i64 } }, ebpf::SUB32_IMM => { if self.executable.get_sbpf_version().swap_sub_reg_imm_operands() { - self.emit_ins(X86Instruction::alu(OperandSize::S32, 0xf7, 3, dst, 0, None)); + self.emit_ins(X86Instruction::alu_immediate(OperandSize::S32, 0xf7, 3, dst, 0, None)); if insn.imm != 0 { self.emit_sanitized_alu(OperandSize::S32, 0x01, 0, dst, insn.imm); } @@ -502,13 +506,13 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { self.emit_sanitized_alu(OperandSize::S32, 0x29, 5, dst, insn.imm); } if !self.executable.get_sbpf_version().explicit_sign_extension_of_results() { - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x63, dst, dst, 0, None)); // sign extend i32 to i64 + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x63, dst, dst, None)); // sign extend i32 to i64 } }, ebpf::SUB32_REG => { - self.emit_ins(X86Instruction::alu(OperandSize::S32, 0x29, src, dst, 0, None)); + self.emit_ins(X86Instruction::alu(OperandSize::S32, 0x29, src, dst, None)); if !self.executable.get_sbpf_version().explicit_sign_extension_of_results() { - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x63, dst, dst, 0, None)); // sign extend i32 to i64 + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x63, dst, dst, None)); // sign extend i32 to i64 } }, ebpf::MUL32_IMM | ebpf::DIV32_IMM | ebpf::MOD32_IMM if !self.executable.get_sbpf_version().enable_pqr() => @@ -534,14 +538,14 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { self.emit_address_translation(Some(dst), Value::RegisterPlusConstant64(src, insn.off as i64, true), 2, None); }, ebpf::OR32_IMM => self.emit_sanitized_alu(OperandSize::S32, 0x09, 1, dst, insn.imm), - ebpf::OR32_REG => self.emit_ins(X86Instruction::alu(OperandSize::S32, 0x09, src, dst, 0, None)), + ebpf::OR32_REG => self.emit_ins(X86Instruction::alu(OperandSize::S32, 0x09, src, dst, None)), ebpf::AND32_IMM => self.emit_sanitized_alu(OperandSize::S32, 0x21, 4, dst, insn.imm), - ebpf::AND32_REG => self.emit_ins(X86Instruction::alu(OperandSize::S32, 0x21, src, dst, 0, None)), + ebpf::AND32_REG => self.emit_ins(X86Instruction::alu(OperandSize::S32, 0x21, src, dst, None)), ebpf::LSH32_IMM => self.emit_shift(OperandSize::S32, 4, REGISTER_SCRATCH, dst, Some(insn.imm)), ebpf::LSH32_REG => self.emit_shift(OperandSize::S32, 4, src, dst, None), ebpf::RSH32_IMM => self.emit_shift(OperandSize::S32, 5, REGISTER_SCRATCH, dst, Some(insn.imm)), ebpf::RSH32_REG => self.emit_shift(OperandSize::S32, 5, src, dst, None), - ebpf::NEG32 if !self.executable.get_sbpf_version().disable_neg() => self.emit_ins(X86Instruction::alu(OperandSize::S32, 0xf7, 3, dst, 0, None)), + ebpf::NEG32 if !self.executable.get_sbpf_version().disable_neg() => self.emit_ins(X86Instruction::alu_immediate(OperandSize::S32, 0xf7, 3, dst, 0, None)), ebpf::LD_4B_REG if self.executable.get_sbpf_version().move_memory_instruction_classes() => { self.emit_address_translation(Some(dst), Value::RegisterPlusConstant64(src, insn.off as i64, true), 4, None); }, @@ -549,7 +553,7 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { self.emit_address_translation(Some(dst), Value::RegisterPlusConstant64(src, insn.off as i64, true), 8, None); }, ebpf::XOR32_IMM => self.emit_sanitized_alu(OperandSize::S32, 0x31, 6, dst, insn.imm), - ebpf::XOR32_REG => self.emit_ins(X86Instruction::alu(OperandSize::S32, 0x31, src, dst, 0, None)), + ebpf::XOR32_REG => self.emit_ins(X86Instruction::alu(OperandSize::S32, 0x31, src, dst, None)), ebpf::MOV32_IMM => { if self.should_sanitize_constant(insn.imm) { self.emit_sanitized_load_immediate(dst, insn.imm as u32 as u64 as i64); @@ -569,10 +573,10 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { ebpf::LE if !self.executable.get_sbpf_version().disable_le() => { match insn.imm { 16 => { - self.emit_ins(X86Instruction::alu(OperandSize::S32, 0x81, 4, dst, 0xffff, None)); // Mask to 16 bit + self.emit_ins(X86Instruction::alu_immediate(OperandSize::S32, 0x81, 4, dst, 0xffff, None)); // Mask to 16 bit } 32 => { - self.emit_ins(X86Instruction::alu(OperandSize::S32, 0x81, 4, dst, -1, None)); // Mask to 32 bit + self.emit_ins(X86Instruction::alu_immediate(OperandSize::S32, 0x81, 4, dst, -1, None)); // Mask to 32 bit } 64 => {} _ => { @@ -584,7 +588,7 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { match insn.imm { 16 => { self.emit_ins(X86Instruction::bswap(OperandSize::S16, dst)); - self.emit_ins(X86Instruction::alu(OperandSize::S32, 0x81, 4, dst, 0xffff, None)); // Mask to 16 bit + self.emit_ins(X86Instruction::alu_immediate(OperandSize::S32, 0x81, 4, dst, 0xffff, None)); // Mask to 16 bit } 32 => self.emit_ins(X86Instruction::bswap(OperandSize::S32, dst)), 64 => self.emit_ins(X86Instruction::bswap(OperandSize::S64, dst)), @@ -596,10 +600,10 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { // BPF_ALU64_STORE class ebpf::ADD64_IMM => self.emit_sanitized_alu(OperandSize::S64, 0x01, 0, dst, insn.imm), - ebpf::ADD64_REG => self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x01, src, dst, 0, None)), + ebpf::ADD64_REG => self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x01, src, dst, None)), ebpf::SUB64_IMM => { if self.executable.get_sbpf_version().swap_sub_reg_imm_operands() { - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0xf7, 3, dst, 0, None)); + self.emit_ins(X86Instruction::alu_immediate(OperandSize::S64, 0xf7, 3, dst, 0, None)); if insn.imm != 0 { self.emit_sanitized_alu(OperandSize::S64, 0x01, 0, dst, insn.imm); } @@ -607,7 +611,7 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { self.emit_sanitized_alu(OperandSize::S64, 0x29, 5, dst, insn.imm); } } - ebpf::SUB64_REG => self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x29, src, dst, 0, None)), + ebpf::SUB64_REG => self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x29, src, dst, None)), ebpf::MUL64_IMM | ebpf::DIV64_IMM | ebpf::MOD64_IMM if !self.executable.get_sbpf_version().enable_pqr() => self.emit_product_quotient_remainder( OperandSize::S64, @@ -637,9 +641,9 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { self.emit_address_translation(None, Value::RegisterPlusConstant64(dst, insn.off as i64, true), 2, Some(Value::Register(src))); }, ebpf::OR64_IMM => self.emit_sanitized_alu(OperandSize::S64, 0x09, 1, dst, insn.imm), - ebpf::OR64_REG => self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x09, src, dst, 0, None)), + ebpf::OR64_REG => self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x09, src, dst, None)), ebpf::AND64_IMM => self.emit_sanitized_alu(OperandSize::S64, 0x21, 4, dst, insn.imm), - ebpf::AND64_REG => self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x21, src, dst, 0, None)), + ebpf::AND64_REG => self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x21, src, dst, None)), ebpf::LSH64_IMM => self.emit_shift(OperandSize::S64, 4, REGISTER_SCRATCH, dst, Some(insn.imm)), ebpf::LSH64_REG => self.emit_shift(OperandSize::S64, 4, src, dst, None), ebpf::RSH64_IMM => self.emit_shift(OperandSize::S64, 5, REGISTER_SCRATCH, dst, Some(insn.imm)), @@ -647,7 +651,7 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { ebpf::ST_4B_IMM if self.executable.get_sbpf_version().move_memory_instruction_classes() => { self.emit_address_translation(None, Value::RegisterPlusConstant64(dst, insn.off as i64, true), 4, Some(Value::Constant64(insn.imm, true))); }, - ebpf::NEG64 if !self.executable.get_sbpf_version().disable_neg() => self.emit_ins(X86Instruction::alu(OperandSize::S64, 0xf7, 3, dst, 0, None)), + ebpf::NEG64 if !self.executable.get_sbpf_version().disable_neg() => self.emit_ins(X86Instruction::alu_immediate(OperandSize::S64, 0xf7, 3, dst, 0, None)), ebpf::ST_4B_REG if self.executable.get_sbpf_version().move_memory_instruction_classes() => { self.emit_address_translation(None, Value::RegisterPlusConstant64(dst, insn.off as i64, true), 4, Some(Value::Register(src))); }, @@ -658,7 +662,7 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { self.emit_address_translation(None, Value::RegisterPlusConstant64(dst, insn.off as i64, true), 8, Some(Value::Register(src))); }, ebpf::XOR64_IMM => self.emit_sanitized_alu(OperandSize::S64, 0x31, 6, dst, insn.imm), - ebpf::XOR64_REG => self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x31, src, dst, 0, None)), + ebpf::XOR64_REG => self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x31, src, dst, None)), ebpf::MOV64_IMM => { if self.should_sanitize_constant(insn.imm) { self.emit_sanitized_load_immediate(dst, insn.imm); @@ -788,7 +792,7 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { self.emit_ins(X86Instruction::conditional_jump_immediate(0x84, self.relative_to_anchor(ANCHOR_EXIT, 6))); // else decrement and update env.call_depth - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x81, 5, REGISTER_PTR_TO_VM, 1, Some(call_depth_access))); // env.call_depth -= 1; + self.emit_ins(X86Instruction::alu_immediate(OperandSize::S64, 0x81, 5, REGISTER_PTR_TO_VM, 1, Some(call_depth_access))); // env.call_depth -= 1; // and return self.emit_profile_instruction_count(Some(0)); @@ -875,38 +879,38 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { } #[inline] - fn emit_sanitized_load_immediate(&mut self, destination: u8, value: i64) { + fn emit_sanitized_load_immediate(&mut self, destination: X86Register, value: i64) { let lower_key = self.immediate_value_key as i32 as i64; if value >= i32::MIN as i64 && value <= i32::MAX as i64 { self.emit_ins(X86Instruction::load_immediate(destination, value.wrapping_sub(lower_key))); - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x81, 0, destination, lower_key, None)); // wrapping_add(lower_key) + self.emit_ins(X86Instruction::alu_immediate(OperandSize::S64, 0x81, 0, destination, lower_key, None)); // wrapping_add(lower_key) } else if value as u64 & u32::MAX as u64 == 0 { self.emit_ins(X86Instruction::load_immediate(destination, value.rotate_right(32).wrapping_sub(lower_key))); - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x81, 0, destination, lower_key, None)); // wrapping_add(lower_key) - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0xc1, 4, destination, 32, None)); // shift_left(32) + self.emit_ins(X86Instruction::alu_immediate(OperandSize::S64, 0x81, 0, destination, lower_key, None)); // wrapping_add(lower_key) + self.emit_ins(X86Instruction::alu_immediate(OperandSize::S64, 0xc1, 4, destination, 32, None)); // shift_left(32) } else if destination != REGISTER_SCRATCH { self.emit_ins(X86Instruction::load_immediate(destination, value.wrapping_sub(self.immediate_value_key))); self.emit_ins(X86Instruction::load_immediate(REGISTER_SCRATCH, self.immediate_value_key)); - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x01, REGISTER_SCRATCH, destination, 0, None)); // wrapping_add(immediate_value_key) + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x01, REGISTER_SCRATCH, destination, None)); // wrapping_add(immediate_value_key) } else { let upper_key = (self.immediate_value_key >> 32) as i32 as i64; self.emit_ins(X86Instruction::load_immediate(destination, value.wrapping_sub(lower_key).rotate_right(32).wrapping_sub(upper_key))); - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x81, 0, destination, upper_key, None)); // wrapping_add(upper_key) - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0xc1, 1, destination, 32, None)); // rotate_right(32) - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x81, 0, destination, lower_key, None)); // wrapping_add(lower_key) + self.emit_ins(X86Instruction::alu_immediate(OperandSize::S64, 0x81, 0, destination, upper_key, None)); // wrapping_add(upper_key) + self.emit_ins(X86Instruction::alu_immediate(OperandSize::S64, 0xc1, 1, destination, 32, None)); // rotate_right(32) + self.emit_ins(X86Instruction::alu_immediate(OperandSize::S64, 0x81, 0, destination, lower_key, None)); // wrapping_add(lower_key) } } #[inline] - fn emit_sanitized_alu(&mut self, size: OperandSize, opcode: u8, opcode_extension: u8, destination: u8, immediate: i64) { + fn emit_sanitized_alu(&mut self, size: OperandSize, opcode: u8, opcode_extension: u8, destination: X86Register, immediate: i64) { if self.should_sanitize_constant(immediate) { self.emit_sanitized_load_immediate(REGISTER_SCRATCH, immediate); - self.emit_ins(X86Instruction::alu(size, opcode, REGISTER_SCRATCH, destination, 0, None)); + self.emit_ins(X86Instruction::alu(size, opcode, REGISTER_SCRATCH, destination, None)); } else if immediate >= i32::MIN as i64 && immediate <= i32::MAX as i64 { - self.emit_ins(X86Instruction::alu(size, 0x81, opcode_extension, destination, immediate, None)); + self.emit_ins(X86Instruction::alu_immediate(size, 0x81, opcode_extension, destination, immediate, None)); } else { self.emit_ins(X86Instruction::load_immediate(REGISTER_SCRATCH, immediate)); - self.emit_ins(X86Instruction::alu(size, opcode, REGISTER_SCRATCH, destination, 0, None)); + self.emit_ins(X86Instruction::alu(size, opcode, REGISTER_SCRATCH, destination, None)); } } @@ -919,13 +923,13 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { self.emit_ins(X86Instruction::fence(FenceType::Load)); // lfence self.emit_ins(X86Instruction::cycle_count()); // rdtsc self.emit_ins(X86Instruction::fence(FenceType::Load)); // lfence - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0xc1, 4, RDX, 32, None)); // RDX <<= 32; - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x09, RDX, RAX, 0, None)); // RAX |= RDX; + self.emit_ins(X86Instruction::alu_immediate(OperandSize::S64, 0xc1, 4, RDX, 32, None)); // RDX <<= 32; + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x09, RDX, RAX, None)); // RAX |= RDX; if begin { - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x29, RAX, REGISTER_PTR_TO_VM, 0, Some(X86IndirectAccess::Offset(self.slot_in_vm(RuntimeEnvironmentSlot::StopwatchNumerator))))); // *numerator -= RAX; + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x29, RAX, REGISTER_PTR_TO_VM, Some(X86IndirectAccess::Offset(self.slot_in_vm(RuntimeEnvironmentSlot::StopwatchNumerator))))); // *numerator -= RAX; } else { - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x01, RAX, REGISTER_PTR_TO_VM, 0, Some(X86IndirectAccess::Offset(self.slot_in_vm(RuntimeEnvironmentSlot::StopwatchNumerator))))); // *numerator += RAX; - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x81, 0, REGISTER_PTR_TO_VM, 1, Some(X86IndirectAccess::Offset(self.slot_in_vm(RuntimeEnvironmentSlot::StopwatchDenominator))))); // *denominator += 1; + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x01, RAX, REGISTER_PTR_TO_VM, Some(X86IndirectAccess::Offset(self.slot_in_vm(RuntimeEnvironmentSlot::StopwatchNumerator))))); // *numerator += RAX; + self.emit_ins(X86Instruction::alu_immediate(OperandSize::S64, 0x81, 0, REGISTER_PTR_TO_VM, 1, Some(X86IndirectAccess::Offset(self.slot_in_vm(RuntimeEnvironmentSlot::StopwatchDenominator))))); // *denominator += 1; } self.emit_ins(X86Instruction::pop(RAX)); self.emit_ins(X86Instruction::pop(RDX)); @@ -956,7 +960,7 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { self.emit_sanitized_alu(OperandSize::S64, 0x01, 0, REGISTER_INSTRUCTION_METER, target_pc as i64 - self.pc as i64 - 1); // instruction_meter += target_pc - (self.pc + 1); }, None => { - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x01, REGISTER_SCRATCH, REGISTER_INSTRUCTION_METER, 0, None)); // instruction_meter += target_pc; + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x01, REGISTER_SCRATCH, REGISTER_INSTRUCTION_METER, None)); // instruction_meter += target_pc; self.emit_sanitized_alu(OperandSize::S64, 0x81, 5, REGISTER_INSTRUCTION_METER, self.pc as i64 + 1); // instruction_meter -= self.pc + 1; }, } @@ -970,9 +974,9 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { self.emit_sanitized_alu(OperandSize::S64, 0x01, 0, REGISTER_INSTRUCTION_METER, self.pc as i64 + 1 - target_pc); // instruction_meter += (self.pc + 1) - target_pc; } Value::Register(target_pc) => { - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x29, target_pc, REGISTER_INSTRUCTION_METER, 0, None)); // instruction_meter -= guest_target_pc - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x81, 0, REGISTER_INSTRUCTION_METER, 1, None)); // instruction_meter += 1 - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x01, REGISTER_SCRATCH, REGISTER_INSTRUCTION_METER, 0, None)); // instruction_meter += self.pc + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x29, target_pc, REGISTER_INSTRUCTION_METER, None)); // instruction_meter -= guest_target_pc + self.emit_ins(X86Instruction::alu_immediate(OperandSize::S64, 0x81, 0, REGISTER_INSTRUCTION_METER, 1, None)); // instruction_meter += 1 + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x01, REGISTER_SCRATCH, REGISTER_INSTRUCTION_METER, None)); // instruction_meter += self.pc } _ => debug_assert!(false), } @@ -985,7 +989,7 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { self.emit_profile_instruction_count(target_pc); } - fn emit_rust_call(&mut self, target: Value, arguments: &[Argument], result_reg: Option) { + fn emit_rust_call(&mut self, target: Value, arguments: &[Argument], result_reg: Option) { let mut saved_registers = CALLER_SAVED_REGISTERS.to_vec(); if let Some(reg) = result_reg { if let Some(dst) = saved_registers.iter().position(|x| *x == reg) { @@ -1001,20 +1005,20 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { // Align RSP to 16 bytes self.emit_ins(X86Instruction::push(RSP, None)); self.emit_ins(X86Instruction::push(RSP, Some(X86IndirectAccess::OffsetIndexShift(0, RSP, 0)))); - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x81, 4, RSP, -16, None)); + self.emit_ins(X86Instruction::alu_immediate(OperandSize::S64, 0x81, 4, RSP, -16, None)); let stack_arguments = arguments.len().saturating_sub(ARGUMENT_REGISTERS.len()) as i64; if stack_arguments % 2 != 0 { // If we're going to pass an odd number of stack args we need to pad // to preserve alignment - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x81, 5, RSP, 8, None)); + self.emit_ins(X86Instruction::alu_immediate(OperandSize::S64, 0x81, 5, RSP, 8, None)); } // Pass arguments for argument in arguments { let is_stack_argument = argument.index >= ARGUMENT_REGISTERS.len(); let dst = if is_stack_argument { - u8::MAX // Never used + RSP // Never used } else { ARGUMENT_REGISTERS[argument.index] }; @@ -1029,7 +1033,7 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { Value::RegisterIndirect(reg, offset, user_provided) => { debug_assert!(!user_provided); if is_stack_argument { - debug_assert_ne!(reg, RSP); + debug_assert!(reg != RSP); self.emit_ins(X86Instruction::push(reg, Some(X86IndirectAccess::Offset(offset)))); } else if reg == RSP { self.emit_ins(X86Instruction::load(OperandSize::S64, RSP, dst, X86IndirectAccess::OffsetIndexShift(offset, RSP, 0))); @@ -1041,7 +1045,7 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { debug_assert!(!user_provided); if is_stack_argument { self.emit_ins(X86Instruction::push(reg, None)); - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x81, 0, RSP, offset as i64, Some(X86IndirectAccess::OffsetIndexShift(0, RSP, 0)))); + self.emit_ins(X86Instruction::alu_immediate(OperandSize::S64, 0x81, 0, RSP, offset as i64, Some(X86IndirectAccess::OffsetIndexShift(0, RSP, 0)))); } else if reg == RSP { self.emit_ins(X86Instruction::lea(OperandSize::S64, RSP, dst, Some(X86IndirectAccess::OffsetIndexShift(offset, RSP, 0)))); } else { @@ -1052,10 +1056,10 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { debug_assert!(!user_provided); if is_stack_argument { self.emit_ins(X86Instruction::push(reg, None)); - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x81, 0, RSP, offset, Some(X86IndirectAccess::OffsetIndexShift(0, RSP, 0)))); + self.emit_ins(X86Instruction::alu_immediate(OperandSize::S64, 0x81, 0, RSP, offset, Some(X86IndirectAccess::OffsetIndexShift(0, RSP, 0)))); } else { self.emit_ins(X86Instruction::load_immediate(dst, offset)); - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x01, reg, dst, 0, None)); + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x01, reg, dst, None)); } }, Value::Constant64(value, user_provided) => { @@ -1086,7 +1090,7 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { } // Restore registers from stack - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x81, 0, RSP, + self.emit_ins(X86Instruction::alu_immediate(OperandSize::S64, 0x81, 0, RSP, if stack_arguments % 2 != 0 { stack_arguments + 1 } else { stack_arguments } * 8, None)); self.emit_ins(X86Instruction::load(OperandSize::S64, RSP, RSP, X86IndirectAccess::OffsetIndexShift(8, RSP, 0))); @@ -1146,7 +1150,7 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { } #[inline] - fn emit_address_translation(&mut self, dst: Option, vm_addr: Value, len: u64, value: Option) { + fn emit_address_translation(&mut self, dst: Option, vm_addr: Value, len: u64, value: Option) { debug_assert_ne!(dst.is_some(), value.is_some()); let stack_slot_of_value_to_store = X86IndirectAccess::OffsetIndexShift(-112, RSP, 0); @@ -1217,7 +1221,7 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { } #[inline] - fn emit_conditional_branch_reg(&mut self, op: u8, bitwise: bool, first_operand: u8, second_operand: u8, target_pc: usize) { + fn emit_conditional_branch_reg(&mut self, op: u8, bitwise: bool, first_operand: X86Register, second_operand: X86Register, target_pc: usize) { self.emit_validate_and_profile_instruction_count(Some(target_pc)); if bitwise { // Logical self.emit_ins(X86Instruction::test(OperandSize::S64, first_operand, second_operand, None)); @@ -1231,7 +1235,7 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { } #[inline] - fn emit_conditional_branch_imm(&mut self, op: u8, bitwise: bool, immediate: i64, second_operand: u8, target_pc: usize) { + fn emit_conditional_branch_imm(&mut self, op: u8, bitwise: bool, immediate: i64, second_operand: X86Register, target_pc: usize) { self.emit_validate_and_profile_instruction_count(Some(target_pc)); if self.should_sanitize_constant(immediate) { self.emit_sanitized_load_immediate(REGISTER_SCRATCH, immediate); @@ -1251,24 +1255,24 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { self.emit_undo_profile_instruction_count(Value::Constant64(target_pc as i64, true)); } - fn emit_shift(&mut self, size: OperandSize, opcode_extension: u8, source: u8, destination: u8, immediate: Option) { + fn emit_shift(&mut self, size: OperandSize, opcode_extension: u8, source: X86Register, destination: X86Register, immediate: Option) { if let Some(immediate) = immediate { if self.should_sanitize_constant(immediate) { self.emit_sanitized_load_immediate(source, immediate); } else { - self.emit_ins(X86Instruction::alu(size, 0xc1, opcode_extension, destination, immediate, None)); + self.emit_ins(X86Instruction::alu_immediate(size, 0xc1, opcode_extension, destination, immediate, None)); return; } } if let OperandSize::S32 = size { - self.emit_ins(X86Instruction::alu(OperandSize::S32, 0x81, 4, destination, -1, None)); // Mask to 32 bit + self.emit_ins(X86Instruction::alu_immediate(OperandSize::S32, 0x81, 4, destination, -1, None)); // Mask to 32 bit } if source == RCX { if destination == RCX { - self.emit_ins(X86Instruction::alu(size, 0xd3, opcode_extension, destination, 0, None)); + self.emit_ins(X86Instruction::alu_immediate(size, 0xd3, opcode_extension, destination, 0, None)); } else { self.emit_ins(X86Instruction::push(RCX, None)); - self.emit_ins(X86Instruction::alu(size, 0xd3, opcode_extension, destination, 0, None)); + self.emit_ins(X86Instruction::alu_immediate(size, 0xd3, opcode_extension, destination, 0, None)); self.emit_ins(X86Instruction::pop(RCX)); } } else if destination == RCX { @@ -1276,7 +1280,7 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { self.emit_ins(X86Instruction::push(source, None)); } self.emit_ins(X86Instruction::xchg(OperandSize::S64, source, RCX, None)); - self.emit_ins(X86Instruction::alu(size, 0xd3, opcode_extension, source, 0, None)); + self.emit_ins(X86Instruction::alu_immediate(size, 0xd3, opcode_extension, source, 0, None)); self.emit_ins(X86Instruction::mov(OperandSize::S64, source, RCX)); if source != REGISTER_SCRATCH { self.emit_ins(X86Instruction::pop(source)); @@ -1284,7 +1288,7 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { } else { self.emit_ins(X86Instruction::push(RCX, None)); self.emit_ins(X86Instruction::mov(OperandSize::S64, source, RCX)); - self.emit_ins(X86Instruction::alu(size, 0xd3, opcode_extension, destination, 0, None)); + self.emit_ins(X86Instruction::alu_immediate(size, 0xd3, opcode_extension, destination, 0, None)); self.emit_ins(X86Instruction::pop(RCX)); } } @@ -1296,8 +1300,8 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { alt_dst: bool, division: bool, signed: bool, - src: u8, - dst: u8, + src: X86Register, + dst: X86Register, imm: Option, ) { // LMUL UHMUL SHMUL UDIV SDIV UREM SREM @@ -1357,11 +1361,11 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { if signed { self.emit_ins(X86Instruction::sign_extend_rax_rdx(size)); } else { - self.emit_ins(X86Instruction::alu(size, 0x31, RDX, RDX, 0, None)); // RDX = 0 + self.emit_ins(X86Instruction::alu(size, 0x31, RDX, RDX, None)); // RDX = 0 } } - self.emit_ins(X86Instruction::alu(size, 0xf7, 0x4 | (division as u8) << 1 | signed as u8, REGISTER_SCRATCH, 0, None)); + self.emit_ins(X86Instruction::alu_immediate(size, 0xf7, 0x4 | (division as u8) << 1 | signed as u8, REGISTER_SCRATCH, 0, None)); if dst != RDX { if alt_dst { @@ -1377,7 +1381,7 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { } if let OperandSize::S32 = size { if signed && !self.executable.get_sbpf_version().explicit_sign_extension_of_results() { - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x63, dst, dst, 0, None)); // sign extend i32 to i64 + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x63, dst, dst, None)); // sign extend i32 to i64 } } } @@ -1390,7 +1394,7 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { self.emit_ins(X86Instruction::store_immediate(OperandSize::S64, REGISTER_MAP[0], X86IndirectAccess::Offset(std::mem::size_of::() as i32), err_kind as i64)); // err.kind = err_kind; } - fn emit_result_is_err(&mut self, destination: u8) { + fn emit_result_is_err(&mut self, destination: X86Register) { let ok = ProgramResult::Ok(0); let ok_discriminant = ok.discriminant(); self.emit_ins(X86Instruction::lea(OperandSize::S64, REGISTER_PTR_TO_VM, destination, Some(X86IndirectAccess::Offset(self.slot_in_vm(RuntimeEnvironmentSlot::ProgramResult))))); @@ -1407,15 +1411,15 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { self.emit_ins(X86Instruction::push(*reg, None)); } self.emit_ins(X86Instruction::mov(OperandSize::S64, RSP, REGISTER_MAP[0])); - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x81, 0, RSP, - 8 * 3, None)); // RSP -= 8 * 3; + self.emit_ins(X86Instruction::alu_immediate(OperandSize::S64, 0x81, 0, RSP, - 8 * 3, None)); // RSP -= 8 * 3; self.emit_rust_call(Value::Constant64(C::trace as *const u8 as i64, false), &[ Argument { index: 1, value: Value::Register(REGISTER_MAP[0]) }, // registers Argument { index: 0, value: Value::RegisterIndirect(REGISTER_PTR_TO_VM, self.slot_in_vm(RuntimeEnvironmentSlot::ContextObjectPointer), false) }, ], None); // Pop stack and return - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x81, 0, RSP, 8 * 3, None)); // RSP += 8 * 3; + self.emit_ins(X86Instruction::alu_immediate(OperandSize::S64, 0x81, 0, RSP, 8 * 3, None)); // RSP += 8 * 3; self.emit_ins(X86Instruction::pop(REGISTER_MAP[0])); - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x81, 0, RSP, 8 * (REGISTER_MAP.len() - 1) as i64, None)); // RSP += 8 * (REGISTER_MAP.len() - 1); + self.emit_ins(X86Instruction::alu_immediate(OperandSize::S64, 0x81, 0, RSP, 8 * (REGISTER_MAP.len() - 1) as i64, None)); // RSP += 8 * (REGISTER_MAP.len() - 1); self.emit_ins(X86Instruction::pop(REGISTER_SCRATCH)); self.emit_ins(X86Instruction::return_near()); } @@ -1423,11 +1427,11 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { // Epilogue self.set_anchor(ANCHOR_EPILOGUE); if self.config.enable_instruction_meter { - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x81, 5, REGISTER_INSTRUCTION_METER, 1, None)); // REGISTER_INSTRUCTION_METER -= 1; - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x29, REGISTER_SCRATCH, REGISTER_INSTRUCTION_METER, 0, None)); // REGISTER_INSTRUCTION_METER -= pc; + self.emit_ins(X86Instruction::alu_immediate(OperandSize::S64, 0x81, 5, REGISTER_INSTRUCTION_METER, 1, None)); // REGISTER_INSTRUCTION_METER -= 1; + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x29, REGISTER_SCRATCH, REGISTER_INSTRUCTION_METER, None)); // REGISTER_INSTRUCTION_METER -= pc; // *DueInsnCount = *PreviousInstructionMeter - REGISTER_INSTRUCTION_METER; - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x2B, REGISTER_INSTRUCTION_METER, REGISTER_PTR_TO_VM, 0, Some(X86IndirectAccess::Offset(self.slot_in_vm(RuntimeEnvironmentSlot::PreviousInstructionMeter))))); // REGISTER_INSTRUCTION_METER -= *PreviousInstructionMeter; - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0xf7, 3, REGISTER_INSTRUCTION_METER, 0, None)); // REGISTER_INSTRUCTION_METER = -REGISTER_INSTRUCTION_METER; + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x2B, REGISTER_INSTRUCTION_METER, REGISTER_PTR_TO_VM, Some(X86IndirectAccess::Offset(self.slot_in_vm(RuntimeEnvironmentSlot::PreviousInstructionMeter))))); // REGISTER_INSTRUCTION_METER -= *PreviousInstructionMeter; + self.emit_ins(X86Instruction::alu_immediate(OperandSize::S64, 0xf7, 3, REGISTER_INSTRUCTION_METER, 0, None)); // REGISTER_INSTRUCTION_METER = -REGISTER_INSTRUCTION_METER; self.emit_ins(X86Instruction::store(OperandSize::S64, REGISTER_INSTRUCTION_METER, REGISTER_PTR_TO_VM, X86IndirectAccess::Offset(self.slot_in_vm(RuntimeEnvironmentSlot::DueInsnCount)))); // *DueInsnCount = REGISTER_INSTRUCTION_METER; } // Print stop watch value @@ -1458,11 +1462,11 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { // Quit gracefully self.set_anchor(ANCHOR_EXIT); if self.config.enable_instruction_meter { - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x29, REGISTER_SCRATCH, REGISTER_INSTRUCTION_METER, 0, None)); // REGISTER_INSTRUCTION_METER -= pc; + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x29, REGISTER_SCRATCH, REGISTER_INSTRUCTION_METER, None)); // REGISTER_INSTRUCTION_METER -= pc; } self.emit_ins(X86Instruction::lea(OperandSize::S64, REGISTER_PTR_TO_VM, REGISTER_SCRATCH, Some(X86IndirectAccess::Offset(self.slot_in_vm(RuntimeEnvironmentSlot::ProgramResult))))); self.emit_ins(X86Instruction::store(OperandSize::S64, REGISTER_MAP[0], REGISTER_SCRATCH, X86IndirectAccess::Offset(std::mem::size_of::() as i32))); // result.return_value = R0; - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x31, REGISTER_SCRATCH, REGISTER_SCRATCH, 0, None)); // REGISTER_SCRATCH ^= REGISTER_SCRATCH; // REGISTER_SCRATCH = 0; + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x31, REGISTER_SCRATCH, REGISTER_SCRATCH, None)); // REGISTER_SCRATCH ^= REGISTER_SCRATCH; // REGISTER_SCRATCH = 0; self.emit_ins(X86Instruction::jump_immediate(self.relative_to_anchor(ANCHOR_EPILOGUE, 5))); // Handler for exceptions which report their pc @@ -1530,7 +1534,7 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { // Routine for prologue of emit_internal_call() self.set_anchor(ANCHOR_INTERNAL_FUNCTION_CALL_PROLOGUE); self.emit_validate_instruction_count(None); - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x81, 5, RSP, 8 * (SCRATCH_REGS + 1) as i64, None)); // alloca + self.emit_ins(X86Instruction::alu_immediate(OperandSize::S64, 0x81, 5, RSP, 8 * (SCRATCH_REGS + 1) as i64, None)); // alloca self.emit_ins(X86Instruction::store(OperandSize::S64, REGISTER_SCRATCH, RSP, X86IndirectAccess::OffsetIndexShift(0, RSP, 0))); // Save original REGISTER_SCRATCH self.emit_ins(X86Instruction::load(OperandSize::S64, RSP, REGISTER_SCRATCH, X86IndirectAccess::OffsetIndexShift(8 * (SCRATCH_REGS + 1) as i32, RSP, 0))); // Load return address for (i, reg) in REGISTER_MAP.iter().skip(FIRST_SCRATCH_REG).take(SCRATCH_REGS).enumerate() { @@ -1541,7 +1545,7 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { self.emit_ins(X86Instruction::xchg(OperandSize::S64, REGISTER_SCRATCH, RSP, Some(X86IndirectAccess::OffsetIndexShift(0, RSP, 0)))); // Push return address and restore original REGISTER_SCRATCH // Increase env.call_depth let call_depth_access = X86IndirectAccess::Offset(self.slot_in_vm(RuntimeEnvironmentSlot::CallDepth)); - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x81, 0, REGISTER_PTR_TO_VM, 1, Some(call_depth_access))); // env.call_depth += 1; + self.emit_ins(X86Instruction::alu_immediate(OperandSize::S64, 0x81, 0, REGISTER_PTR_TO_VM, 1, Some(call_depth_access))); // env.call_depth += 1; // If env.call_depth == self.config.max_call_depth, throw CallDepthExceeded self.emit_ins(X86Instruction::cmp_immediate(OperandSize::S32, REGISTER_PTR_TO_VM, self.config.max_call_depth as i64, Some(call_depth_access))); self.emit_ins(X86Instruction::conditional_jump_immediate(0x83, self.relative_to_anchor(ANCHOR_CALL_DEPTH_EXCEEDED, 6))); @@ -1549,7 +1553,7 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { if !self.executable.get_sbpf_version().dynamic_stack_frames() { // With fixed frames we start the new frame at the next fixed offset let stack_frame_size = self.config.stack_frame_size as i64 * if self.config.enable_stack_frame_gaps { 2 } else { 1 }; - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x81, 0, REGISTER_MAP[FRAME_PTR_REG], stack_frame_size, None)); // REGISTER_MAP[FRAME_PTR_REG] += stack_frame_size; + self.emit_ins(X86Instruction::alu_immediate(OperandSize::S64, 0x81, 0, REGISTER_MAP[FRAME_PTR_REG], stack_frame_size, None)); // REGISTER_MAP[FRAME_PTR_REG] += stack_frame_size; } self.emit_ins(X86Instruction::return_near()); @@ -1560,29 +1564,29 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { self.emit_ins(X86Instruction::push(REGISTER_MAP[0], None)); // Calculate offset relative to program_vm_addr self.emit_ins(X86Instruction::load_immediate(REGISTER_MAP[0], self.program_vm_addr as i64)); - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x29, REGISTER_MAP[0], REGISTER_SCRATCH, 0, None)); // guest_target_address -= self.program_vm_addr; + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x29, REGISTER_MAP[0], REGISTER_SCRATCH, None)); // guest_target_address -= self.program_vm_addr; // Force alignment of guest_target_address - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x81, 4, REGISTER_SCRATCH, !(INSN_SIZE as i64 - 1), None)); // guest_target_address &= !(INSN_SIZE - 1); + self.emit_ins(X86Instruction::alu_immediate(OperandSize::S64, 0x81, 4, REGISTER_SCRATCH, !(INSN_SIZE as i64 - 1), None)); // guest_target_address &= !(INSN_SIZE - 1); // Bound check // if(guest_target_address >= number_of_instructions * INSN_SIZE) throw CALL_OUTSIDE_TEXT_SEGMENT; let number_of_instructions = self.result.pc_section.len(); self.emit_ins(X86Instruction::cmp_immediate(OperandSize::S64, REGISTER_SCRATCH, (number_of_instructions * INSN_SIZE) as i64, None)); // guest_target_address.cmp(number_of_instructions * INSN_SIZE) self.emit_ins(X86Instruction::conditional_jump_immediate(0x83, self.relative_to_anchor(ANCHOR_CALL_OUTSIDE_TEXT_SEGMENT, 6))); // First half of self.emit_profile_instruction_count(None); - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x2b, REGISTER_INSTRUCTION_METER, RSP, 0, Some(X86IndirectAccess::OffsetIndexShift(-8, RSP, 0)))); // instruction_meter -= guest_current_pc; - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x81, 5, REGISTER_INSTRUCTION_METER, 1, None)); // instruction_meter -= 1; + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x2b, REGISTER_INSTRUCTION_METER, RSP, Some(X86IndirectAccess::OffsetIndexShift(-8, RSP, 0)))); // instruction_meter -= guest_current_pc; + self.emit_ins(X86Instruction::alu_immediate(OperandSize::S64, 0x81, 5, REGISTER_INSTRUCTION_METER, 1, None)); // instruction_meter -= 1; // Load host target_address from self.result.pc_section debug_assert_eq!(INSN_SIZE, 8); // Because the instruction size is also the slot size we do not need to shift the offset self.emit_ins(X86Instruction::load_immediate(REGISTER_MAP[0], self.result.pc_section.as_ptr() as i64)); // host_target_address = self.result.pc_section; - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x01, REGISTER_SCRATCH, REGISTER_MAP[0], 0, None)); // host_target_address += guest_target_address; + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x01, REGISTER_SCRATCH, REGISTER_MAP[0], None)); // host_target_address += guest_target_address; self.emit_ins(X86Instruction::load(OperandSize::S64, REGISTER_MAP[0], REGISTER_MAP[0], X86IndirectAccess::Offset(0))); // host_target_address = self.result.pc_section[host_target_address / 8]; // Calculate the guest_target_pc (dst / INSN_SIZE) to update REGISTER_INSTRUCTION_METER // and as target_pc for potential ANCHOR_CALL_UNSUPPORTED_INSTRUCTION let shift_amount = INSN_SIZE.trailing_zeros(); debug_assert_eq!(INSN_SIZE, 1 << shift_amount); - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0xc1, 5, REGISTER_SCRATCH, shift_amount as i64, None)); // guest_target_pc /= INSN_SIZE; + self.emit_ins(X86Instruction::alu_immediate(OperandSize::S64, 0xc1, 5, REGISTER_SCRATCH, shift_amount as i64, None)); // guest_target_pc /= INSN_SIZE; // Second half of self.emit_profile_instruction_count(None); - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x01, REGISTER_SCRATCH, REGISTER_INSTRUCTION_METER, 0, None)); // instruction_meter += guest_target_pc; + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x01, REGISTER_SCRATCH, REGISTER_INSTRUCTION_METER, None)); // instruction_meter += guest_target_pc; // Restore the clobbered REGISTER_MAP[0] self.emit_ins(X86Instruction::xchg(OperandSize::S64, REGISTER_MAP[0], RSP, Some(X86IndirectAccess::OffsetIndexShift(0, RSP, 0)))); // Swap REGISTER_MAP[0] and host_target_address self.emit_ins(X86Instruction::return_near()); // Tail call to host_target_address @@ -1607,7 +1611,7 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { let target_offset = *anchor_base + len.trailing_zeros() as usize; self.set_anchor(ANCHOR_TRANSLATE_MEMORY_ADDRESS + target_offset); // Second half of emit_sanitized_load_immediate(REGISTER_SCRATCH, vm_addr) - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x81, 0, REGISTER_SCRATCH, lower_key, None)); + self.emit_ins(X86Instruction::alu_immediate(OperandSize::S64, 0x81, 0, REGISTER_SCRATCH, lower_key, None)); // call MemoryMapping::(load|store) storing the result in RuntimeEnvironmentSlot::ProgramResult if *anchor_base == 0 { // AccessType::Load let load = match len { @@ -1626,7 +1630,7 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { } else { // AccessType::Store if *anchor_base == 8 { // Second half of emit_sanitized_load_immediate(stack_slot_of_value_to_store, constant) - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x81, 0, RSP, lower_key, Some(X86IndirectAccess::OffsetIndexShift(-96, RSP, 0)))); + self.emit_ins(X86Instruction::alu_immediate(OperandSize::S64, 0x81, 0, RSP, lower_key, Some(X86IndirectAccess::OffsetIndexShift(-96, RSP, 0)))); } let store = match len { 1 => MemoryMapping::store:: as *const u8 as i64, diff --git a/src/x86.rs b/src/x86.rs index 1c82619f..9ae32994 100644 --- a/src/x86.rs +++ b/src/x86.rs @@ -13,28 +13,34 @@ macro_rules! exclude_operand_sizes { } } -pub const RAX: u8 = 0; -pub const RCX: u8 = 1; -pub const RDX: u8 = 2; -pub const RBX: u8 = 3; -pub const RSP: u8 = 4; -pub const RBP: u8 = 5; -pub const RSI: u8 = 6; -pub const RDI: u8 = 7; -pub const R8: u8 = 8; -pub const R9: u8 = 9; -pub const R10: u8 = 10; -pub const R11: u8 = 11; -pub const R12: u8 = 12; -pub const R13: u8 = 13; -pub const R14: u8 = 14; -pub const R15: u8 = 15; +#[allow(clippy::upper_case_acronyms)] +#[derive(Copy, Clone, PartialEq, Eq)] +#[repr(u8)] +pub enum X86Register { + RAX = 0, + RCX = 1, + RDX = 2, + RBX = 3, + RSP = 4, + RBP = 5, + RSI = 6, + RDI = 7, + R8 = 8, + R9 = 9, + R10 = 10, + R11 = 11, + R12 = 12, + R13 = 13, + R14 = 14, + R15 = 15, +} +use X86Register::*; // System V AMD64 ABI // Works on: Linux, macOS, BSD and Solaris but not on Windows -pub const ARGUMENT_REGISTERS: [u8; 6] = [RDI, RSI, RDX, RCX, R8, R9]; -pub const CALLER_SAVED_REGISTERS: [u8; 9] = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11]; -pub const CALLEE_SAVED_REGISTERS: [u8; 6] = [RBP, RBX, R12, R13, R14, R15]; +pub const ARGUMENT_REGISTERS: [X86Register; 6] = [RDI, RSI, RDX, RCX, R8, R9]; +pub const CALLER_SAVED_REGISTERS: [X86Register; 9] = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11]; +pub const CALLEE_SAVED_REGISTERS: [X86Register; 6] = [RBP, RBX, R12, R13, R14, R15]; struct X86Rex { w: bool, @@ -60,7 +66,7 @@ pub enum X86IndirectAccess { /// [second_operand + offset] Offset(i32), /// [second_operand + offset + index << shift] - OffsetIndexShift(i32, u8, u8), + OffsetIndexShift(i32, X86Register, u8), } #[allow(dead_code)] @@ -125,9 +131,9 @@ impl X86Instruction { match self.indirect { Some(X86IndirectAccess::Offset(offset)) => { displacement = offset; - debug_assert_ne!(self.second_operand & 0b111, RSP); // Reserved for SIB addressing + debug_assert_ne!(self.second_operand & 0b111, 4); // Reserved for SIB addressing if (-128..=127).contains(&displacement) - || (displacement == 0 && self.second_operand & 0b111 == RBP) + || (displacement == 0 && self.second_operand & 0b111 == 5) { displacement_size = OperandSize::S8; modrm.mode = 1; @@ -138,12 +144,17 @@ impl X86Instruction { } Some(X86IndirectAccess::OffsetIndexShift(offset, index, shift)) => { displacement = offset; - displacement_size = OperandSize::S32; - modrm.mode = 2; - modrm.m = RSP; - rex.x = index & 0b1000 != 0; + if (-128..=127).contains(&displacement) { + displacement_size = OperandSize::S8; + modrm.mode = 1; + } else { + displacement_size = OperandSize::S32; + modrm.mode = 2; + } + modrm.m = 4; + rex.x = (index as u8) & 0b1000 != 0; sib.scale = shift & 0b11; - sib.index = index & 0b111; + sib.index = (index as u8) & 0b111; sib.base = self.second_operand & 0b111; } None => { @@ -182,8 +193,28 @@ impl X86Instruction { pub const fn alu( size: OperandSize, opcode: u8, - source: u8, - destination: u8, + source: X86Register, + destination: X86Register, + indirect: Option, + ) -> Self { + exclude_operand_sizes!(size, OperandSize::S0 | OperandSize::S8 | OperandSize::S16); + Self { + size, + opcode, + first_operand: source as u8, + second_operand: destination as u8, + indirect, + ..X86Instruction::DEFAULT + } + } + + /// Arithmetic or logic + #[inline] + pub const fn alu_immediate( + size: OperandSize, + opcode: u8, + opcode_extension: u8, + destination: X86Register, immediate: i64, indirect: Option, ) -> Self { @@ -191,12 +222,12 @@ impl X86Instruction { Self { size, opcode, - first_operand: source, - second_operand: destination, + first_operand: opcode_extension, + second_operand: destination as u8, immediate_size: match opcode { 0xc1 => OperandSize::S8, 0x81 => OperandSize::S32, - 0xf7 if source == 0 => OperandSize::S32, + 0xf7 if opcode_extension == 0 => OperandSize::S32, _ => OperandSize::S0, }, immediate, @@ -207,40 +238,49 @@ impl X86Instruction { /// Move source to destination #[inline] - pub const fn mov(size: OperandSize, source: u8, destination: u8) -> Self { + pub const fn mov(size: OperandSize, source: X86Register, destination: X86Register) -> Self { exclude_operand_sizes!(size, OperandSize::S0 | OperandSize::S8 | OperandSize::S16); Self { size, opcode: 0x89, - first_operand: source, - second_operand: destination, + first_operand: source as u8, + second_operand: destination as u8, ..Self::DEFAULT } } /// Move source to destination #[inline] - pub const fn mov_with_sign_extension(size: OperandSize, source: u8, destination: u8) -> Self { + pub const fn mov_with_sign_extension( + size: OperandSize, + source: X86Register, + destination: X86Register, + ) -> Self { exclude_operand_sizes!(size, OperandSize::S0 | OperandSize::S8 | OperandSize::S16); Self { size, opcode: 0x63, - first_operand: destination, - second_operand: source, + first_operand: destination as u8, + second_operand: source as u8, ..Self::DEFAULT } } /// Conditionally move source to destination #[inline] - pub const fn cmov(size: OperandSize, condition: u8, source: u8, destination: u8) -> Self { + pub const fn cmov( + size: OperandSize, + condition: u8, + source: X86Register, + destination: X86Register, + ) -> Self { exclude_operand_sizes!(size, OperandSize::S0 | OperandSize::S8 | OperandSize::S16); Self { size, opcode_escape_sequence: 1, opcode: condition, - first_operand: destination, - second_operand: source, + first_operand: destination as u8, + second_operand: source as u8, ..Self::DEFAULT } } @@ -249,8 +289,8 @@ impl X86Instruction { #[inline] pub const fn xchg( size: OperandSize, - source: u8, - destination: u8, + source: X86Register, + destination: X86Register, indirect: Option, ) -> Self { exclude_operand_sizes!( @@ -260,8 +300,8 @@ impl X86Instruction { Self { size, opcode: 0x87, - first_operand: source, - second_operand: destination, + first_operand: source as u8, + second_operand: destination as u8, indirect, ..Self::DEFAULT } @@ -269,13 +309,13 @@ impl X86Instruction { /// Swap byte order of destination #[inline] - pub const fn bswap(size: OperandSize, destination: u8) -> Self { + pub const fn bswap(size: OperandSize, destination: X86Register) -> Self { exclude_operand_sizes!(size, OperandSize::S0 | OperandSize::S8); match size { OperandSize::S16 => Self { size, opcode: 0xc1, - second_operand: destination, + second_operand: destination as u8, immediate_size: OperandSize::S8, immediate: 8, ..Self::DEFAULT @@ -283,9 +323,9 @@ impl X86Instruction { OperandSize::S32 | OperandSize::S64 => Self { size, opcode_escape_sequence: 1, - opcode: 0xc8 | (destination & 0b111), + opcode: 0xc8 | ((destination as u8) & 0b111), modrm: false, - second_operand: destination, + second_operand: destination as u8, ..Self::DEFAULT }, _ => unimplemented!(), @@ -296,8 +336,8 @@ impl X86Instruction { #[inline] pub const fn test( size: OperandSize, - source: u8, - destination: u8, + source: X86Register, + destination: X86Register, indirect: Option, ) -> Self { exclude_operand_sizes!(size, OperandSize::S0); @@ -308,8 +348,8 @@ impl X86Instruction { } else { 0x85 }, - first_operand: source, - second_operand: destination, + first_operand: source as u8, + second_operand: destination as u8, indirect, ..Self::DEFAULT } @@ -319,7 +359,7 @@ impl X86Instruction { #[inline] pub const fn test_immediate( size: OperandSize, - destination: u8, + destination: X86Register, immediate: i64, indirect: Option, ) -> Self { @@ -331,8 +371,8 @@ impl X86Instruction { } else { 0xf7 }, - first_operand: RAX, - second_operand: destination, + first_operand: 0, + second_operand: destination as u8, immediate_size: if let OperandSize::S64 = size { OperandSize::S32 } else { @@ -348,8 +388,8 @@ impl X86Instruction { #[inline] pub const fn cmp( size: OperandSize, - source: u8, - destination: u8, + source: X86Register, + destination: X86Register, indirect: Option, ) -> Self { exclude_operand_sizes!(size, OperandSize::S0); @@ -360,8 +400,8 @@ impl X86Instruction { } else { 0x39 }, - first_operand: source, - second_operand: destination, + first_operand: source as u8, + second_operand: destination as u8, indirect, ..Self::DEFAULT } @@ -371,7 +411,7 @@ impl X86Instruction { #[inline] pub const fn cmp_immediate( size: OperandSize, - destination: u8, + destination: X86Register, immediate: i64, indirect: Option, ) -> Self { @@ -383,8 +423,8 @@ impl X86Instruction { } else { 0x81 }, - first_operand: RDI, - second_operand: destination, + first_operand: 7, + second_operand: destination as u8, immediate_size: if let OperandSize::S64 = size { OperandSize::S32 } else { @@ -400,8 +440,8 @@ impl X86Instruction { #[inline] pub const fn lea( size: OperandSize, - source: u8, - destination: u8, + source: X86Register, + destination: X86Register, indirect: Option, ) -> Self { exclude_operand_sizes!( @@ -411,8 +451,8 @@ impl X86Instruction { Self { size, opcode: 0x8d, - first_operand: destination, - second_operand: source, + first_operand: destination as u8, + second_operand: source as u8, indirect, ..Self::DEFAULT } @@ -434,8 +474,8 @@ impl X86Instruction { #[inline] pub const fn load( size: OperandSize, - source: u8, - destination: u8, + source: X86Register, + destination: X86Register, indirect: X86IndirectAccess, ) -> Self { exclude_operand_sizes!(size, OperandSize::S0); @@ -454,8 +494,8 @@ impl X86Instruction { OperandSize::S16 => 0xb7, _ => 0x8b, }, - first_operand: destination, - second_operand: source, + first_operand: destination as u8, + second_operand: source as u8, indirect: Some(indirect), ..Self::DEFAULT } @@ -465,8 +505,8 @@ impl X86Instruction { #[inline] pub const fn store( size: OperandSize, - source: u8, - destination: u8, + source: X86Register, + destination: X86Register, indirect: X86IndirectAccess, ) -> Self { exclude_operand_sizes!(size, OperandSize::S0); @@ -476,8 +516,8 @@ impl X86Instruction { OperandSize::S8 => 0x88, _ => 0x89, }, - first_operand: source, - second_operand: destination, + first_operand: source as u8, + second_operand: destination as u8, indirect: Some(indirect), ..Self::DEFAULT } @@ -485,7 +525,7 @@ impl X86Instruction { /// Load destination from immediate #[inline] - pub const fn load_immediate(destination: u8, immediate: i64) -> Self { + pub const fn load_immediate(destination: X86Register, immediate: i64) -> Self { let mut size = OperandSize::S64; if immediate >= 0 { if immediate <= u32::MAX as i64 { @@ -497,7 +537,7 @@ impl X86Instruction { return Self { size: OperandSize::S64, opcode: 0xc7, - second_operand: destination, + second_operand: destination as u8, immediate_size: OperandSize::S32, immediate, ..Self::DEFAULT @@ -506,9 +546,9 @@ impl X86Instruction { // Load full u64 imm into u64 reg Self { size, - opcode: 0xb8 | (destination & 0b111), + opcode: 0xb8 | ((destination as u8) & 0b111), modrm: false, - second_operand: destination, + second_operand: destination as u8, immediate_size: size, immediate, ..Self::DEFAULT @@ -519,7 +559,7 @@ impl X86Instruction { #[inline] pub const fn store_immediate( size: OperandSize, - destination: u8, + destination: X86Register, indirect: X86IndirectAccess, immediate: i64, ) -> Self { @@ -530,7 +570,7 @@ impl X86Instruction { OperandSize::S8 => 0xc6, _ => 0xc7, }, - second_operand: destination, + second_operand: destination as u8, indirect: Some(indirect), immediate_size: if let OperandSize::S64 = size { OperandSize::S32 @@ -543,7 +583,6 @@ impl X86Instruction { } /// Push source onto the stack - #[allow(dead_code)] #[inline] pub const fn push_immediate(size: OperandSize, immediate: i32) -> Self { exclude_operand_sizes!(size, OperandSize::S0 | OperandSize::S16); @@ -566,13 +605,13 @@ impl X86Instruction { /// Push source onto the stack #[inline] - pub const fn push(source: u8, indirect: Option) -> Self { + pub const fn push(source: X86Register, indirect: Option) -> Self { if indirect.is_none() { Self { size: OperandSize::S32, - opcode: 0x50 | (source & 0b111), + opcode: 0x50 | ((source as u8) & 0b111), modrm: false, - second_operand: source, + second_operand: source as u8, ..Self::DEFAULT } } else { @@ -581,7 +620,7 @@ impl X86Instruction { opcode: 0xFF, modrm: true, first_operand: 6, - second_operand: source, + second_operand: source as u8, indirect, ..Self::DEFAULT } @@ -590,12 +629,12 @@ impl X86Instruction { /// Pop from the stack into destination #[inline] - pub const fn pop(destination: u8) -> Self { + pub const fn pop(destination: X86Register) -> Self { Self { size: OperandSize::S32, - opcode: 0x58 | (destination & 0b111), + opcode: 0x58 | ((destination as u8) & 0b111), modrm: false, - second_operand: destination, + second_operand: destination as u8, ..Self::DEFAULT } } @@ -630,12 +669,12 @@ impl X86Instruction { /// Jump to absolute destination #[allow(dead_code)] #[inline] - pub const fn jump_reg(destination: u8, indirect: Option) -> Self { + pub const fn jump_reg(destination: X86Register, indirect: Option) -> Self { Self { size: OperandSize::S64, opcode: 0xff, first_operand: 4, - second_operand: destination, + second_operand: destination as u8, indirect, ..Self::DEFAULT } @@ -656,12 +695,12 @@ impl X86Instruction { /// Push RIP and jump to absolute destination #[inline] - pub const fn call_reg(destination: u8, indirect: Option) -> Self { + pub const fn call_reg(destination: X86Register, indirect: Option) -> Self { Self { size: OperandSize::S64, opcode: 0xff, first_operand: 2, - second_operand: destination, + second_operand: destination as u8, indirect, ..Self::DEFAULT } From d33d5c1e68faf6630453a016a04d079a1341e0db Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Mon, 30 Dec 2024 21:34:10 +0100 Subject: [PATCH 07/18] Fix - ISA docs `mov dst, imm` (#10) * Swaps `mov32 dst, imm` and `mov64 dst, imm` behavior description. * Makes cargo clippy happy. --- .github/workflows/main.yml | 4 ++-- clippy.toml | 1 - doc/bytecode.md | 4 ++-- src/insn_builder.rs | 2 +- src/jit.rs | 2 +- 5 files changed, 6 insertions(+), 7 deletions(-) delete mode 100644 clippy.toml diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 68562d26..67af2895 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -31,7 +31,7 @@ jobs: run: | cargo fmt --all -- --check cargo clippy --all --tests -- --deny=warnings - if: matrix.rust == 'nightly' + if: matrix.rust == 'beta' shell: bash - name: Build and test run: | @@ -45,7 +45,7 @@ jobs: run: | cargo fmt --all --manifest-path cli/Cargo.toml -- --check cargo clippy --all --tests --manifest-path cli/Cargo.toml -- --deny=warnings - if: matrix.rust == 'nightly' + if: matrix.rust == 'beta' shell: bash - name: CLI - Build and test run: | diff --git a/clippy.toml b/clippy.toml deleted file mode 100644 index 1d4a2968..00000000 --- a/clippy.toml +++ /dev/null @@ -1 +0,0 @@ -doc-valid-idents = ["eBPF", "uBPF"] diff --git a/doc/bytecode.md b/doc/bytecode.md index c2aa83a7..e42eca18 100644 --- a/doc/bytecode.md +++ b/doc/bytecode.md @@ -114,7 +114,7 @@ The following Rust equivalents assume that: | `9C` / `01111001` | from v2 | `ldxdw dst, [src + off]` | `A4` / `10100100` | all | `xor32 dst, imm` | `dst = (dst as u32).xor(imm) as u64` | `AC` / `10101100` | all | `xor32 dst, src` | `dst = (dst as u32).xor(src as u32) as u64` -| `B4` / `10110100` | all | `mov32 dst, imm` | `dst = imm as i32 as i64 as u64` +| `B4` / `10110100` | all | `mov32 dst, imm` | `dst = imm as u64` | `BC` / `10111100` | until v2 | `mov32 dst, src` | `dst = src as u32 as u64` | `BC` / `10111100` | from v2 | `mov32 dst, src` | `dst = src as i32 as i64 as u64` | `C4` / `11000100` | all | `ash32 dst, imm` | `dst = (dst as i32).wrapping_shr(imm) as u32 as u64` @@ -158,7 +158,7 @@ The following Rust equivalents assume that: | `9F` / `01111011` | from v2 | `stxdw [dst + off], src` | `A7` / `10100111` | all | `xor64 dst, imm` | `dst = dst.xor(imm)` | `AF` / `10101111` | all | `xor64 dst, src` | `dst = dst.xor(src)` -| `B7` / `10110111` | all | `mov64 dst, imm` | `dst = imm as u64` +| `B7` / `10110111` | all | `mov64 dst, imm` | `dst = imm as i32 as i64 as u64` | `BF` / `10111111` | all | `mov64 dst, src` | `dst = src` | `C7` / `11000111` | all | `ash64 dst, imm` | `dst = (dst as i64).wrapping_shr(imm)` | `CF` / `11001111` | all | `ash64 dst, src` | `dst = (dst as i64).wrapping_shr(src as u32)` diff --git a/src/insn_builder.rs b/src/insn_builder.rs index 638cc275..082f351b 100644 --- a/src/insn_builder.rs +++ b/src/insn_builder.rs @@ -89,7 +89,7 @@ impl IntoBytes for &I { fn into_bytes(self) -> Self::Bytes { vec![ self.opt_code_byte(), - self.get_src() << 4 | self.get_dst(), + (self.get_src() << 4) | self.get_dst(), self.get_off() as u8, (self.get_off() >> 8) as u8, self.get_imm() as u8, diff --git a/src/jit.rs b/src/jit.rs index 2e77dbbe..56b9bcb2 100644 --- a/src/jit.rs +++ b/src/jit.rs @@ -1365,7 +1365,7 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { } } - self.emit_ins(X86Instruction::alu_immediate(size, 0xf7, 0x4 | (division as u8) << 1 | signed as u8, REGISTER_SCRATCH, 0, None)); + self.emit_ins(X86Instruction::alu_immediate(size, 0xf7, 0x4 | ((division as u8) << 1) | signed as u8, REGISTER_SCRATCH, 0, None)); if dst != RDX { if alt_dst { From 3061507ed430d8868f750e545fedb7f72c82ff79 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Mon, 30 Dec 2024 22:00:46 +0100 Subject: [PATCH 08/18] Adds X86Instruction::mov_mmx(). (#8) --- src/x86.rs | 40 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 39 insertions(+), 1 deletion(-) diff --git a/src/x86.rs b/src/x86.rs index 9ae32994..e2a1d210 100644 --- a/src/x86.rs +++ b/src/x86.rs @@ -13,7 +13,7 @@ macro_rules! exclude_operand_sizes { } } -#[allow(clippy::upper_case_acronyms)] +#[allow(dead_code, clippy::upper_case_acronyms)] #[derive(Copy, Clone, PartialEq, Eq)] #[repr(u8)] pub enum X86Register { @@ -33,6 +33,14 @@ pub enum X86Register { R13 = 13, R14 = 14, R15 = 15, + MM0 = 16, + MM1 = 17, + MM2 = 18, + MM3 = 19, + MM4 = 20, + MM5 = 21, + MM6 = 22, + MM7 = 23, } use X86Register::*; @@ -266,6 +274,36 @@ impl X86Instruction { } } + /// Move to / from / between MMX (float mantissa) + #[allow(dead_code)] + #[inline] + pub const fn mov_mmx(size: OperandSize, source: X86Register, destination: X86Register) -> Self { + exclude_operand_sizes!( + size, + OperandSize::S0 | OperandSize::S8 | OperandSize::S16 | OperandSize::S32 + ); + if (destination as u8) & 16 != 0 { + // If the destination is a MMX register + Self { + size, + opcode_escape_sequence: 1, + opcode: if (source as u8) & 16 != 0 { 0x6F } else { 0x6E }, + first_operand: (destination as u8) & 0xF, + second_operand: (source as u8) & 0xF, + ..Self::DEFAULT + } + } else { + Self { + size, + opcode_escape_sequence: 1, + opcode: 0x7E, + first_operand: (source as u8) & 0xF, + second_operand: (destination as u8) & 0xF, + ..Self::DEFAULT + } + } + } + /// Conditionally move source to destination #[inline] pub const fn cmov( From e09f77385dfbd464f645b350bbbb95c80f0c34d1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Mon, 30 Dec 2024 22:00:56 +0100 Subject: [PATCH 09/18] Simplifies emit_shift(). (#9) --- src/jit.rs | 26 ++++++-------------------- 1 file changed, 6 insertions(+), 20 deletions(-) diff --git a/src/jit.rs b/src/jit.rs index 56b9bcb2..af90ed4e 100644 --- a/src/jit.rs +++ b/src/jit.rs @@ -1257,34 +1257,20 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { fn emit_shift(&mut self, size: OperandSize, opcode_extension: u8, source: X86Register, destination: X86Register, immediate: Option) { if let Some(immediate) = immediate { - if self.should_sanitize_constant(immediate) { - self.emit_sanitized_load_immediate(source, immediate); - } else { - self.emit_ins(X86Instruction::alu_immediate(size, 0xc1, opcode_extension, destination, immediate, None)); - return; - } + self.emit_ins(X86Instruction::alu_immediate(size, 0xc1, opcode_extension, destination, immediate, None)); + return; } if let OperandSize::S32 = size { - self.emit_ins(X86Instruction::alu_immediate(OperandSize::S32, 0x81, 4, destination, -1, None)); // Mask to 32 bit + self.emit_ins(X86Instruction::mov(OperandSize::S32, destination, destination)); // Truncate to 32 bit } if source == RCX { - if destination == RCX { - self.emit_ins(X86Instruction::alu_immediate(size, 0xd3, opcode_extension, destination, 0, None)); - } else { - self.emit_ins(X86Instruction::push(RCX, None)); - self.emit_ins(X86Instruction::alu_immediate(size, 0xd3, opcode_extension, destination, 0, None)); - self.emit_ins(X86Instruction::pop(RCX)); - } + self.emit_ins(X86Instruction::alu_immediate(size, 0xd3, opcode_extension, destination, 0, None)); } else if destination == RCX { - if source != REGISTER_SCRATCH { - self.emit_ins(X86Instruction::push(source, None)); - } + self.emit_ins(X86Instruction::push(source, None)); self.emit_ins(X86Instruction::xchg(OperandSize::S64, source, RCX, None)); self.emit_ins(X86Instruction::alu_immediate(size, 0xd3, opcode_extension, source, 0, None)); self.emit_ins(X86Instruction::mov(OperandSize::S64, source, RCX)); - if source != REGISTER_SCRATCH { - self.emit_ins(X86Instruction::pop(source)); - } + self.emit_ins(X86Instruction::pop(source)); } else { self.emit_ins(X86Instruction::push(RCX, None)); self.emit_ins(X86Instruction::mov(OperandSize::S64, source, RCX)); From 085a48816b5ebf1e7a2b78d8c8718e9b44f86219 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Thu, 2 Jan 2025 17:58:28 +0100 Subject: [PATCH 10/18] Refactor - Halves memory needed for `pc_section` of JIT compiled programs (#11) * Address offsetting via indirection of the load instruction. * Groups the instructions of `self.emit_profile_instruction_count(None);`. * Stores text_section relative offsets in pc_section instead of absolute addresses. * Stores u32 instead of u64 elements in pc_section. * Spills to MMX register and uses 64 bit load immediate. --- src/jit.rs | 61 ++++++++++++++++++++++++++++-------------------------- 1 file changed, 32 insertions(+), 29 deletions(-) diff --git a/src/jit.rs b/src/jit.rs index af90ed4e..91b27f04 100644 --- a/src/jit.rs +++ b/src/jit.rs @@ -48,8 +48,8 @@ const MAX_START_PADDING_LENGTH: usize = 256; pub struct JitProgram { /// OS page size in bytes and the alignment of the sections page_size: usize, - /// A `*const u8` pointer into the text_section for each BPF instruction - pc_section: &'static mut [usize], + /// Byte offset in the text_section for each BPF instruction + pc_section: &'static mut [u32], /// The x86 machinecode text_section: &'static mut [u8], } @@ -57,13 +57,13 @@ pub struct JitProgram { impl JitProgram { fn new(pc: usize, code_size: usize) -> Result { let page_size = get_system_page_size(); - let pc_loc_table_size = round_to_page_size(pc * 8, page_size); + let pc_loc_table_size = round_to_page_size(pc * std::mem::size_of::(), page_size); let over_allocated_code_size = round_to_page_size(code_size, page_size); unsafe { let raw = allocate_pages(pc_loc_table_size + over_allocated_code_size)?; Ok(Self { page_size, - pc_section: std::slice::from_raw_parts_mut(raw.cast::(), pc), + pc_section: std::slice::from_raw_parts_mut(raw.cast::(), pc), text_section: std::slice::from_raw_parts_mut( raw.add(pc_loc_table_size), over_allocated_code_size, @@ -77,7 +77,8 @@ impl JitProgram { return Ok(()); } let raw = self.pc_section.as_ptr() as *mut u8; - let pc_loc_table_size = round_to_page_size(self.pc_section.len() * 8, self.page_size); + let pc_loc_table_size = + round_to_page_size(std::mem::size_of_val(self.pc_section), self.page_size); let over_allocated_code_size = round_to_page_size(self.text_section.len(), self.page_size); let code_size = round_to_page_size(text_section_usage, self.page_size); unsafe { @@ -139,7 +140,7 @@ impl JitProgram { host_stack_pointer = in(reg) &mut vm.host_stack_pointer, inlateout("rdi") std::ptr::addr_of_mut!(*vm).cast::().offset(get_runtime_environment_key() as isize) => _, inlateout("r10") (vm.previous_instruction_meter as i64).wrapping_add(registers[11] as i64) => _, - inlateout("rax") self.pc_section[registers[11] as usize] => _, + inlateout("rax") &self.text_section[self.pc_section[registers[11] as usize] as usize] as *const u8 => _, inlateout("r11") ®isters => _, lateout("rsi") _, lateout("rdx") _, lateout("rcx") _, lateout("r8") _, lateout("r9") _, lateout("r12") _, lateout("r13") _, lateout("r14") _, lateout("r15") _, @@ -153,7 +154,8 @@ impl JitProgram { } pub fn mem_size(&self) -> usize { - let pc_loc_table_size = round_to_page_size(self.pc_section.len() * 8, self.page_size); + let pc_loc_table_size = + round_to_page_size(std::mem::size_of_val(self.pc_section), self.page_size); let code_size = round_to_page_size(self.text_section.len(), self.page_size); pc_loc_table_size + code_size } @@ -161,7 +163,8 @@ impl JitProgram { impl Drop for JitProgram { fn drop(&mut self) { - let pc_loc_table_size = round_to_page_size(self.pc_section.len() * 8, self.page_size); + let pc_loc_table_size = + round_to_page_size(std::mem::size_of_val(self.pc_section), self.page_size); let code_size = round_to_page_size(self.text_section.len(), self.page_size); if pc_loc_table_size + code_size > 0 { unsafe { @@ -394,8 +397,6 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { /// Compiles the given executable, consuming the compiler pub fn compile(mut self) -> Result { - let text_section_base = self.result.text_section.as_ptr(); - // Randomized padding at the start before random intervals begin if self.config.noop_instruction_rate != 0 { for _ in 0..self.diversification_rng.gen_range(0..MAX_START_PADDING_LENGTH) { @@ -411,7 +412,7 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { return Err(EbpfError::ExhaustedTextSegment(self.pc)); } let mut insn = ebpf::get_insn_unchecked(self.program, self.pc); - self.result.pc_section[self.pc] = unsafe { text_section_base.add(self.offset_in_text_section) } as usize; + self.result.pc_section[self.pc] = self.offset_in_text_section as u32; // Regular instruction meter checkpoints to prevent long linear runs from exceeding their budget if self.last_instruction_meter_validation_pc + self.config.instruction_meter_checkpoint_distance <= self.pc { @@ -432,7 +433,7 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { ebpf::LD_DW_IMM if !self.executable.get_sbpf_version().disable_lddw() => { self.emit_validate_and_profile_instruction_count(Some(self.pc + 2)); self.pc += 1; - self.result.pc_section[self.pc] = self.anchors[ANCHOR_CALL_UNSUPPORTED_INSTRUCTION] as usize; + self.result.pc_section[self.pc] = unsafe { self.anchors[ANCHOR_CALL_UNSUPPORTED_INSTRUCTION].offset_from(self.result.text_section.as_ptr()) as u32 }; ebpf::augment_lddw_unchecked(self.program, &mut insn); if self.should_sanitize_constant(insn.imm) { self.emit_sanitized_load_immediate(dst, insn.imm); @@ -1550,29 +1551,31 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { self.emit_ins(X86Instruction::push(REGISTER_MAP[0], None)); // Calculate offset relative to program_vm_addr self.emit_ins(X86Instruction::load_immediate(REGISTER_MAP[0], self.program_vm_addr as i64)); - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x29, REGISTER_MAP[0], REGISTER_SCRATCH, None)); // guest_target_address -= self.program_vm_addr; - // Force alignment of guest_target_address - self.emit_ins(X86Instruction::alu_immediate(OperandSize::S64, 0x81, 4, REGISTER_SCRATCH, !(INSN_SIZE as i64 - 1), None)); // guest_target_address &= !(INSN_SIZE - 1); + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x29, REGISTER_MAP[0], REGISTER_SCRATCH, None)); // guest_target_pc = guest_target_address - self.program_vm_addr; + // Force alignment of guest_target_pc + self.emit_ins(X86Instruction::alu_immediate(OperandSize::S64, 0x81, 4, REGISTER_SCRATCH, !(INSN_SIZE as i64 - 1), None)); // guest_target_pc &= !(INSN_SIZE - 1); // Bound check - // if(guest_target_address >= number_of_instructions * INSN_SIZE) throw CALL_OUTSIDE_TEXT_SEGMENT; + // if(guest_target_pc >= number_of_instructions * INSN_SIZE) throw CALL_OUTSIDE_TEXT_SEGMENT; let number_of_instructions = self.result.pc_section.len(); - self.emit_ins(X86Instruction::cmp_immediate(OperandSize::S64, REGISTER_SCRATCH, (number_of_instructions * INSN_SIZE) as i64, None)); // guest_target_address.cmp(number_of_instructions * INSN_SIZE) + self.emit_ins(X86Instruction::cmp_immediate(OperandSize::S64, REGISTER_SCRATCH, (number_of_instructions * INSN_SIZE) as i64, None)); // guest_target_pc.cmp(number_of_instructions * INSN_SIZE) self.emit_ins(X86Instruction::conditional_jump_immediate(0x83, self.relative_to_anchor(ANCHOR_CALL_OUTSIDE_TEXT_SEGMENT, 6))); - // First half of self.emit_profile_instruction_count(None); - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x2b, REGISTER_INSTRUCTION_METER, RSP, Some(X86IndirectAccess::OffsetIndexShift(-8, RSP, 0)))); // instruction_meter -= guest_current_pc; - self.emit_ins(X86Instruction::alu_immediate(OperandSize::S64, 0x81, 5, REGISTER_INSTRUCTION_METER, 1, None)); // instruction_meter -= 1; - // Load host target_address from self.result.pc_section - debug_assert_eq!(INSN_SIZE, 8); // Because the instruction size is also the slot size we do not need to shift the offset - self.emit_ins(X86Instruction::load_immediate(REGISTER_MAP[0], self.result.pc_section.as_ptr() as i64)); // host_target_address = self.result.pc_section; - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x01, REGISTER_SCRATCH, REGISTER_MAP[0], None)); // host_target_address += guest_target_address; - self.emit_ins(X86Instruction::load(OperandSize::S64, REGISTER_MAP[0], REGISTER_MAP[0], X86IndirectAccess::Offset(0))); // host_target_address = self.result.pc_section[host_target_address / 8]; // Calculate the guest_target_pc (dst / INSN_SIZE) to update REGISTER_INSTRUCTION_METER // and as target_pc for potential ANCHOR_CALL_UNSUPPORTED_INSTRUCTION let shift_amount = INSN_SIZE.trailing_zeros(); debug_assert_eq!(INSN_SIZE, 1 << shift_amount); self.emit_ins(X86Instruction::alu_immediate(OperandSize::S64, 0xc1, 5, REGISTER_SCRATCH, shift_amount as i64, None)); // guest_target_pc /= INSN_SIZE; - // Second half of self.emit_profile_instruction_count(None); + // A version of `self.emit_profile_instruction_count(None);` which reads self.pc from the stack + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x2b, REGISTER_INSTRUCTION_METER, RSP, Some(X86IndirectAccess::OffsetIndexShift(-8, RSP, 0)))); // instruction_meter -= guest_current_pc; + self.emit_ins(X86Instruction::alu_immediate(OperandSize::S64, 0x81, 5, REGISTER_INSTRUCTION_METER, 1, None)); // instruction_meter -= 1; self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x01, REGISTER_SCRATCH, REGISTER_INSTRUCTION_METER, None)); // instruction_meter += guest_target_pc; + // Load host_target_address offset from self.result.pc_section + self.emit_ins(X86Instruction::load_immediate(REGISTER_MAP[0], self.result.pc_section.as_ptr() as i64)); // host_target_address = self.result.pc_section; + self.emit_ins(X86Instruction::load(OperandSize::S32, REGISTER_MAP[0], REGISTER_MAP[0], X86IndirectAccess::OffsetIndexShift(0, REGISTER_SCRATCH, 2))); // host_target_address = self.result.pc_section[guest_target_pc]; + // Offset host_target_address by self.result.text_section + self.emit_ins(X86Instruction::mov_mmx(OperandSize::S64, REGISTER_SCRATCH, MM0)); + self.emit_ins(X86Instruction::load_immediate(REGISTER_SCRATCH, self.result.text_section.as_ptr() as i64)); // REGISTER_SCRATCH = self.result.text_section; + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x01, REGISTER_SCRATCH, REGISTER_MAP[0], None)); // host_target_address += self.result.text_section; + self.emit_ins(X86Instruction::mov_mmx(OperandSize::S64, MM0, REGISTER_SCRATCH)); // Restore the clobbered REGISTER_MAP[0] self.emit_ins(X86Instruction::xchg(OperandSize::S64, REGISTER_MAP[0], RSP, Some(X86IndirectAccess::OffsetIndexShift(0, RSP, 0)))); // Swap REGISTER_MAP[0] and host_target_address self.emit_ins(X86Instruction::return_near()); // Tail call to host_target_address @@ -1668,7 +1671,7 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { let instruction_end = unsafe { self.result.text_section.as_ptr().add(self.offset_in_text_section).add(instruction_length) }; let destination = if self.result.pc_section[target_pc] != 0 { // Backward jump - self.result.pc_section[target_pc] as *const u8 + &self.result.text_section[self.result.pc_section[target_pc] as usize] as *const u8 } else { // Forward jump, needs relocation self.text_section_jumps.push(Jump { location: unsafe { instruction_end.sub(4) }, target_pc }); @@ -1681,14 +1684,14 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { fn resolve_jumps(&mut self) { // Relocate forward jumps for jump in &self.text_section_jumps { - let destination = self.result.pc_section[jump.target_pc] as *const u8; + let destination = &self.result.text_section[self.result.pc_section[jump.target_pc] as usize] as *const u8; let offset_value = unsafe { destination.offset_from(jump.location) } as i32 // Relative jump - mem::size_of::() as i32; // Jump from end of instruction unsafe { ptr::write_unaligned(jump.location as *mut i32, offset_value); } } // Patch addresses to which `callx` may raise an unsupported instruction error - let call_unsupported_instruction = self.anchors[ANCHOR_CALL_REG_UNSUPPORTED_INSTRUCTION] as usize; + let call_unsupported_instruction = unsafe { self.anchors[ANCHOR_CALL_REG_UNSUPPORTED_INSTRUCTION].offset_from(self.result.text_section.as_ptr()) as u32 }; if self.executable.get_sbpf_version().static_syscalls() { let mut prev_pc = 0; for current_pc in self.executable.get_function_registry().keys() { From 3997d391f8413191f4be279d8d50ad2d4fe57b27 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Thu, 2 Jan 2025 23:09:29 +0100 Subject: [PATCH 11/18] Refactor - Uses the MSB in `pc_section` to mark invalid call targets (#12) * Uses the MSB in pc_section to mark invalid call targets. * Removes the second pass over pc_section in resolve_jumps(). * Removes emit_undo_profile_instruction_count() from ANCHOR_CALL_REG_UNSUPPORTED_INSTRUCTION. --- src/jit.rs | 91 +++++++++++++++++++++--------------------------------- 1 file changed, 36 insertions(+), 55 deletions(-) diff --git a/src/jit.rs b/src/jit.rs index 91b27f04..78bbea34 100644 --- a/src/jit.rs +++ b/src/jit.rs @@ -140,7 +140,7 @@ impl JitProgram { host_stack_pointer = in(reg) &mut vm.host_stack_pointer, inlateout("rdi") std::ptr::addr_of_mut!(*vm).cast::().offset(get_runtime_environment_key() as isize) => _, inlateout("r10") (vm.previous_instruction_meter as i64).wrapping_add(registers[11] as i64) => _, - inlateout("rax") &self.text_section[self.pc_section[registers[11] as usize] as usize] as *const u8 => _, + inlateout("rax") &self.text_section[self.pc_section[registers[11] as usize] as usize & (i32::MAX as u32 as usize)] as *const u8 => _, inlateout("r11") ®isters => _, lateout("rsi") _, lateout("rdx") _, lateout("rcx") _, lateout("r8") _, lateout("r9") _, lateout("r12") _, lateout("r13") _, lateout("r14") _, lateout("r15") _, @@ -201,11 +201,11 @@ const ANCHOR_CALL_DEPTH_EXCEEDED: usize = 6; const ANCHOR_CALL_OUTSIDE_TEXT_SEGMENT: usize = 7; const ANCHOR_DIV_BY_ZERO: usize = 8; const ANCHOR_DIV_OVERFLOW: usize = 9; -const ANCHOR_CALL_UNSUPPORTED_INSTRUCTION: usize = 10; -const ANCHOR_EXTERNAL_FUNCTION_CALL: usize = 11; -const ANCHOR_INTERNAL_FUNCTION_CALL_PROLOGUE: usize = 12; -const ANCHOR_INTERNAL_FUNCTION_CALL_REG: usize = 13; -const ANCHOR_CALL_REG_UNSUPPORTED_INSTRUCTION: usize = 14; +const ANCHOR_CALL_REG_UNSUPPORTED_INSTRUCTION: usize = 10; +const ANCHOR_CALL_UNSUPPORTED_INSTRUCTION: usize = 11; +const ANCHOR_EXTERNAL_FUNCTION_CALL: usize = 12; +const ANCHOR_INTERNAL_FUNCTION_CALL_PROLOGUE: usize = 13; +const ANCHOR_INTERNAL_FUNCTION_CALL_REG: usize = 14; const ANCHOR_TRANSLATE_MEMORY_ADDRESS: usize = 21; const ANCHOR_COUNT: usize = 34; // Update me when adding or removing anchors @@ -407,12 +407,20 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { self.emit_subroutines(); + let mut function_iter = self.executable.get_function_registry().keys().map(|insn_ptr| insn_ptr as usize).peekable(); while self.pc * ebpf::INSN_SIZE < self.program.len() { if self.offset_in_text_section + MAX_MACHINE_CODE_LENGTH_PER_INSTRUCTION * 2 >= self.result.text_section.len() { return Err(EbpfError::ExhaustedTextSegment(self.pc)); } let mut insn = ebpf::get_insn_unchecked(self.program, self.pc); self.result.pc_section[self.pc] = self.offset_in_text_section as u32; + if self.executable.get_sbpf_version().static_syscalls() { + if function_iter.peek() == Some(&self.pc) { + function_iter.next(); + } else { + self.result.pc_section[self.pc] |= 1 << 31; + } + } // Regular instruction meter checkpoints to prevent long linear runs from exceeding their budget if self.last_instruction_meter_validation_pc + self.config.instruction_meter_checkpoint_distance <= self.pc { @@ -968,19 +976,9 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { } #[inline] - fn emit_undo_profile_instruction_count(&mut self, target_pc: Value) { + fn emit_undo_profile_instruction_count(&mut self, target_pc: usize) { if self.config.enable_instruction_meter { - match target_pc { - Value::Constant64(target_pc, _) => { - self.emit_sanitized_alu(OperandSize::S64, 0x01, 0, REGISTER_INSTRUCTION_METER, self.pc as i64 + 1 - target_pc); // instruction_meter += (self.pc + 1) - target_pc; - } - Value::Register(target_pc) => { - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x29, target_pc, REGISTER_INSTRUCTION_METER, None)); // instruction_meter -= guest_target_pc - self.emit_ins(X86Instruction::alu_immediate(OperandSize::S64, 0x81, 0, REGISTER_INSTRUCTION_METER, 1, None)); // instruction_meter += 1 - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x01, REGISTER_SCRATCH, REGISTER_INSTRUCTION_METER, None)); // instruction_meter += self.pc - } - _ => debug_assert!(false), - } + self.emit_sanitized_alu(OperandSize::S64, 0x01, 0, REGISTER_INSTRUCTION_METER, self.pc as i64 + 1 - target_pc as i64); // instruction_meter += (self.pc + 1) - target_pc; } } @@ -1133,7 +1131,7 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { } } - self.emit_undo_profile_instruction_count(Value::Constant64(0, false)); + self.emit_undo_profile_instruction_count(0); // Restore the previous frame pointer self.emit_ins(X86Instruction::pop(REGISTER_MAP[FRAME_PTR_REG])); @@ -1147,7 +1145,7 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { self.emit_validate_and_profile_instruction_count(Some(0)); self.emit_ins(X86Instruction::load_immediate(REGISTER_SCRATCH, function as usize as i64)); self.emit_ins(X86Instruction::call_immediate(self.relative_to_anchor(ANCHOR_EXTERNAL_FUNCTION_CALL, 5))); - self.emit_undo_profile_instruction_count(Value::Constant64(0, false)); + self.emit_undo_profile_instruction_count(0); } #[inline] @@ -1232,7 +1230,7 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { self.emit_ins(X86Instruction::load_immediate(REGISTER_SCRATCH, target_pc as i64)); let jump_offset = self.relative_to_target_pc(target_pc, 6); self.emit_ins(X86Instruction::conditional_jump_immediate(op, jump_offset)); - self.emit_undo_profile_instruction_count(Value::Constant64(target_pc as i64, true)); + self.emit_undo_profile_instruction_count(target_pc); } #[inline] @@ -1253,7 +1251,7 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { self.emit_ins(X86Instruction::load_immediate(REGISTER_SCRATCH, target_pc as i64)); let jump_offset = self.relative_to_target_pc(target_pc, 6); self.emit_ins(X86Instruction::conditional_jump_immediate(op, jump_offset)); - self.emit_undo_profile_instruction_count(Value::Constant64(target_pc as i64, true)); + self.emit_undo_profile_instruction_count(target_pc); } fn emit_shift(&mut self, size: OperandSize, opcode_extension: u8, source: X86Register, destination: X86Register, immediate: Option) { @@ -1483,6 +1481,12 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { self.emit_set_exception_kind(EbpfError::DivideOverflow); self.emit_ins(X86Instruction::jump_immediate(self.relative_to_anchor(ANCHOR_THROW_EXCEPTION, 5))); + // See `ANCHOR_INTERNAL_FUNCTION_CALL_REG` for more details. + self.set_anchor(ANCHOR_CALL_REG_UNSUPPORTED_INSTRUCTION); + self.emit_ins(X86Instruction::load(OperandSize::S64, RSP, REGISTER_SCRATCH, X86IndirectAccess::OffsetIndexShift(-8, RSP, 0))); // Retrieve the current program counter from the stack + self.emit_ins(X86Instruction::pop(REGISTER_MAP[0])); // Restore the clobbered REGISTER_MAP[0] + // Fall through + // Handler for EbpfError::UnsupportedInstruction self.set_anchor(ANCHOR_CALL_UNSUPPORTED_INSTRUCTION); if self.config.enable_instruction_tracing { @@ -1560,17 +1564,21 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { self.emit_ins(X86Instruction::cmp_immediate(OperandSize::S64, REGISTER_SCRATCH, (number_of_instructions * INSN_SIZE) as i64, None)); // guest_target_pc.cmp(number_of_instructions * INSN_SIZE) self.emit_ins(X86Instruction::conditional_jump_immediate(0x83, self.relative_to_anchor(ANCHOR_CALL_OUTSIDE_TEXT_SEGMENT, 6))); // Calculate the guest_target_pc (dst / INSN_SIZE) to update REGISTER_INSTRUCTION_METER - // and as target_pc for potential ANCHOR_CALL_UNSUPPORTED_INSTRUCTION + // and as target_pc for potential ANCHOR_CALL_REG_UNSUPPORTED_INSTRUCTION let shift_amount = INSN_SIZE.trailing_zeros(); debug_assert_eq!(INSN_SIZE, 1 << shift_amount); self.emit_ins(X86Instruction::alu_immediate(OperandSize::S64, 0xc1, 5, REGISTER_SCRATCH, shift_amount as i64, None)); // guest_target_pc /= INSN_SIZE; + // Load host_target_address offset from self.result.pc_section + self.emit_ins(X86Instruction::load_immediate(REGISTER_MAP[0], self.result.pc_section.as_ptr() as i64)); // host_target_address = self.result.pc_section; + self.emit_ins(X86Instruction::load(OperandSize::S32, REGISTER_MAP[0], REGISTER_MAP[0], X86IndirectAccess::OffsetIndexShift(0, REGISTER_SCRATCH, 2))); // host_target_address = self.result.pc_section[guest_target_pc]; + // Check destination is valid + self.emit_ins(X86Instruction::test_immediate(OperandSize::S32, REGISTER_MAP[0], 1 << 31, None)); // host_target_address & (1 << 31) + self.emit_ins(X86Instruction::conditional_jump_immediate(0x85, self.relative_to_anchor(ANCHOR_CALL_REG_UNSUPPORTED_INSTRUCTION, 6))); // If host_target_address & (1 << 31) != 0, throw UnsupportedInstruction + self.emit_ins(X86Instruction::alu_immediate(OperandSize::S32, 0x81, 4, REGISTER_MAP[0], i32::MAX as i64, None)); // host_target_address &= (1 << 31) - 1; // A version of `self.emit_profile_instruction_count(None);` which reads self.pc from the stack self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x2b, REGISTER_INSTRUCTION_METER, RSP, Some(X86IndirectAccess::OffsetIndexShift(-8, RSP, 0)))); // instruction_meter -= guest_current_pc; self.emit_ins(X86Instruction::alu_immediate(OperandSize::S64, 0x81, 5, REGISTER_INSTRUCTION_METER, 1, None)); // instruction_meter -= 1; self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x01, REGISTER_SCRATCH, REGISTER_INSTRUCTION_METER, None)); // instruction_meter += guest_target_pc; - // Load host_target_address offset from self.result.pc_section - self.emit_ins(X86Instruction::load_immediate(REGISTER_MAP[0], self.result.pc_section.as_ptr() as i64)); // host_target_address = self.result.pc_section; - self.emit_ins(X86Instruction::load(OperandSize::S32, REGISTER_MAP[0], REGISTER_MAP[0], X86IndirectAccess::OffsetIndexShift(0, REGISTER_SCRATCH, 2))); // host_target_address = self.result.pc_section[guest_target_pc]; // Offset host_target_address by self.result.text_section self.emit_ins(X86Instruction::mov_mmx(OperandSize::S64, REGISTER_SCRATCH, MM0)); self.emit_ins(X86Instruction::load_immediate(REGISTER_SCRATCH, self.result.text_section.as_ptr() as i64)); // REGISTER_SCRATCH = self.result.text_section; @@ -1580,16 +1588,6 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { self.emit_ins(X86Instruction::xchg(OperandSize::S64, REGISTER_MAP[0], RSP, Some(X86IndirectAccess::OffsetIndexShift(0, RSP, 0)))); // Swap REGISTER_MAP[0] and host_target_address self.emit_ins(X86Instruction::return_near()); // Tail call to host_target_address - // If callx lands in an invalid address, we must undo the changes in the instruction meter - // so that we can correctly calculate the number of executed instructions for error handling. - self.set_anchor(ANCHOR_CALL_REG_UNSUPPORTED_INSTRUCTION); - self.emit_ins(X86Instruction::mov(OperandSize::S64, REGISTER_SCRATCH, REGISTER_MAP[0])); - // Retrieve the current program counter from the stack. `return_near` popped an element from the stack, - // so the offset is 16. Check `ANCHOR_INTERNAL_FUNCTION_CALL_REG` for more details. - self.emit_ins(X86Instruction::load(OperandSize::S64, RSP, REGISTER_SCRATCH, X86IndirectAccess::OffsetIndexShift(-16, RSP, 0))); - self.emit_undo_profile_instruction_count(Value::Register(REGISTER_MAP[0])); - self.emit_ins(X86Instruction::jump_immediate(self.relative_to_anchor(ANCHOR_CALL_UNSUPPORTED_INSTRUCTION, 5))); - // Translates a vm memory address to a host memory address let lower_key = self.immediate_value_key as i32 as i64; for (anchor_base, len) in &[ @@ -1671,7 +1669,7 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { let instruction_end = unsafe { self.result.text_section.as_ptr().add(self.offset_in_text_section).add(instruction_length) }; let destination = if self.result.pc_section[target_pc] != 0 { // Backward jump - &self.result.text_section[self.result.pc_section[target_pc] as usize] as *const u8 + &self.result.text_section[self.result.pc_section[target_pc] as usize & (i32::MAX as u32 as usize)] as *const u8 } else { // Forward jump, needs relocation self.text_section_jumps.push(Jump { location: unsafe { instruction_end.sub(4) }, target_pc }); @@ -1684,29 +1682,12 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { fn resolve_jumps(&mut self) { // Relocate forward jumps for jump in &self.text_section_jumps { - let destination = &self.result.text_section[self.result.pc_section[jump.target_pc] as usize] as *const u8; + let destination = &self.result.text_section[self.result.pc_section[jump.target_pc] as usize & (i32::MAX as u32 as usize)] as *const u8; let offset_value = unsafe { destination.offset_from(jump.location) } as i32 // Relative jump - mem::size_of::() as i32; // Jump from end of instruction unsafe { ptr::write_unaligned(jump.location as *mut i32, offset_value); } } - // Patch addresses to which `callx` may raise an unsupported instruction error - let call_unsupported_instruction = unsafe { self.anchors[ANCHOR_CALL_REG_UNSUPPORTED_INSTRUCTION].offset_from(self.result.text_section.as_ptr()) as u32 }; - if self.executable.get_sbpf_version().static_syscalls() { - let mut prev_pc = 0; - for current_pc in self.executable.get_function_registry().keys() { - if current_pc as usize >= self.result.pc_section.len() { - break; - } - for pc in prev_pc..current_pc as usize { - self.result.pc_section[pc] = call_unsupported_instruction; - } - prev_pc = current_pc as usize + 1; - } - for pc in prev_pc..self.result.pc_section.len() { - self.result.pc_section[pc] = call_unsupported_instruction; - } - } } } From ace7ec4afc43c1bb15f9f63fa2b9acd560554c0d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Fri, 3 Jan 2025 00:06:40 +0100 Subject: [PATCH 12/18] Revert "Refactor - Sanitization of memory accesses in JIT (#6)" (#14) cad781a3b8f148e1421c706ee2e1bd765f80b437 --- src/jit.rs | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/src/jit.rs b/src/jit.rs index 78bbea34..387dd902 100644 --- a/src/jit.rs +++ b/src/jit.rs @@ -1169,16 +1169,12 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { match vm_addr { Value::RegisterPlusConstant64(reg, constant, user_provided) => { - debug_assert!(user_provided); - // First half of emit_sanitized_load_immediate(REGISTER_SCRATCH, vm_addr) - let lower_key = self.immediate_value_key as i32 as i64; - self.emit_ins(X86Instruction::lea(OperandSize::S64, reg, REGISTER_SCRATCH, Some( - if reg == R12 { - X86IndirectAccess::OffsetIndexShift(constant.wrapping_sub(lower_key) as i32, RSP, 0) - } else { - X86IndirectAccess::Offset(constant.wrapping_sub(lower_key) as i32) - } - ))); + if user_provided && self.should_sanitize_constant(constant) { + self.emit_sanitized_load_immediate(REGISTER_SCRATCH, constant); + } else { + self.emit_ins(X86Instruction::load_immediate(REGISTER_SCRATCH, constant)); + } + self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x01, reg, REGISTER_SCRATCH, None)); }, _ => { #[cfg(debug_assertions)] @@ -1597,8 +1593,6 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { ] { let target_offset = *anchor_base + len.trailing_zeros() as usize; self.set_anchor(ANCHOR_TRANSLATE_MEMORY_ADDRESS + target_offset); - // Second half of emit_sanitized_load_immediate(REGISTER_SCRATCH, vm_addr) - self.emit_ins(X86Instruction::alu_immediate(OperandSize::S64, 0x81, 0, REGISTER_SCRATCH, lower_key, None)); // call MemoryMapping::(load|store) storing the result in RuntimeEnvironmentSlot::ProgramResult if *anchor_base == 0 { // AccessType::Load let load = match len { From bd81cd3c1b55f032152b15e53b0470b20fbad8d2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Fri, 3 Jan 2025 15:22:41 +0100 Subject: [PATCH 13/18] Cleanup - `test_interpreter_and_jit!()` macros (#13) * Moves test_invalid_exit_or_return() to verifier tests. * Moves test_invalid_call_imm() to verifier. * Adds test_call_imm_does_not_dispatch_syscalls(). * Moves test_interpreter_and_jit, test_interpreter_and_jit_asm, test_interpreter_and_jit_elf and test_syscall_asm into test_utils crate. * Removes JIT compilation error check. * Refactors expected_result in test_interpreter_and_jit!(). * Removes the option to skip the verifier in test_interpreter_and_jit!(). * Run all tests (except for those which expect ExceededMaxInstructions) with and without override_budget. * Adds a variant of test_interpreter_and_jit!() which does not check the result. * Uses test_utils macros in exercise_instructions test_ins(). * Report all diverged properties. --- test_utils/src/lib.rs | 205 +++++++++++++++++++++ tests/execution.rs | 318 +++------------------------------ tests/exercise_instructions.rs | 123 ++----------- tests/verifier.rs | 29 ++- 4 files changed, 263 insertions(+), 412 deletions(-) diff --git a/test_utils/src/lib.rs b/test_utils/src/lib.rs index 2f1d8df5..c17a796c 100644 --- a/test_utils/src/lib.rs +++ b/test_utils/src/lib.rs @@ -223,3 +223,208 @@ macro_rules! assert_error { assert!(format!("{:?}", $result).contains(&format!($($error),+))); } } + +#[macro_export] +macro_rules! test_interpreter_and_jit { + (override_budget => $override_budget:expr, $executable:expr, $mem:tt, $context_object:expr $(,)?) => {{ + let expected_instruction_count = $context_object.get_remaining(); + #[allow(unused_mut)] + let mut context_object = $context_object; + if $override_budget { + const INSTRUCTION_METER_BUDGET: u64 = 1024; + context_object.remaining = INSTRUCTION_METER_BUDGET; + } + $executable.verify::().unwrap(); + let (instruction_count_interpreter, result_interpreter, interpreter_final_pc, _tracer_interpreter) = { + let mut mem = $mem; + let mem_region = MemoryRegion::new_writable(&mut mem, ebpf::MM_INPUT_START); + let mut context_object = context_object.clone(); + create_vm!( + vm, + &$executable, + &mut context_object, + stack, + heap, + vec![mem_region], + None + ); + let (instruction_count_interpreter, result_interpreter) = vm.execute_program(&$executable, true); + ( + instruction_count_interpreter, + result_interpreter, + vm.registers[11], + vm.context_object_pointer.clone(), + ) + }; + #[cfg(all(feature = "jit", not(target_os = "windows"), target_arch = "x86_64"))] + { + #[allow(unused_mut)] + let compilation_result = $executable.jit_compile(); + let mut mem = $mem; + let mem_region = MemoryRegion::new_writable(&mut mem, ebpf::MM_INPUT_START); + create_vm!( + vm, + &$executable, + &mut context_object, + stack, + heap, + vec![mem_region], + None + ); + match compilation_result { + Err(_) => panic!("{:?}", compilation_result), + Ok(()) => { + let (instruction_count_jit, result_jit) = vm.execute_program(&$executable, false); + let tracer_jit = &vm.context_object_pointer; + let mut diverged = false; + if format!("{:?}", result_interpreter) != format!("{:?}", result_jit) { + println!( + "Result of interpreter ({:?}) and JIT ({:?}) diverged", + result_interpreter, result_jit, + ); + diverged = true; + } + if instruction_count_interpreter != instruction_count_jit { + println!( + "Instruction meter of interpreter ({:?}) and JIT ({:?}) diverged", + instruction_count_interpreter, instruction_count_jit, + ); + diverged = true; + } + if interpreter_final_pc != vm.registers[11] { + println!( + "Final PC of interpreter ({:?}) and JIT ({:?}) result diverged", + interpreter_final_pc, vm.registers[11], + ); + diverged = true; + } + if !TestContextObject::compare_trace_log(&_tracer_interpreter, tracer_jit) { + let analysis = Analysis::from_executable(&$executable).unwrap(); + let stdout = std::io::stdout(); + analysis + .disassemble_trace_log( + &mut stdout.lock(), + &_tracer_interpreter.trace_log, + ) + .unwrap(); + analysis + .disassemble_trace_log(&mut stdout.lock(), &tracer_jit.trace_log) + .unwrap(); + diverged = true; + } + assert!(!diverged); + } + } + } + if $executable.get_config().enable_instruction_meter { + assert_eq!( + instruction_count_interpreter, expected_instruction_count, + "Instruction meter did not consume expected amount" + ); + } + result_interpreter + }}; + ($executable:expr, $mem:tt, $context_object:expr, $expected_result:expr $(,)?) => { + let expected_result = $expected_result; + let result = test_interpreter_and_jit!( + override_budget => false, + $executable, + $mem, + $context_object, + ); + assert_eq!( + format!("{:?}", result), format!("{:?}", expected_result), + "Unexpected result", + ); + if !matches!(expected_result, ProgramResult::Err(solana_sbpf::error::EbpfError::ExceededMaxInstructions)) { + test_interpreter_and_jit!( + override_budget => true, + $executable, + $mem, + $context_object, + ); + } + }; +} + +#[macro_export] +macro_rules! test_interpreter_and_jit_asm { + ($source:expr, $config:expr, $mem:tt, $context_object:expr, $expected_result:expr $(,)?) => { + #[allow(unused_mut)] + { + let mut config = $config; + config.enable_instruction_tracing = true; + let mut function_registry = + FunctionRegistry::>::default(); + let loader = Arc::new(BuiltinProgram::new_loader(config, function_registry)); + let mut executable = assemble($source, loader).unwrap(); + test_interpreter_and_jit!(executable, $mem, $context_object, $expected_result); + } + }; + ($source:expr, $mem:tt, $context_object:expr, $expected_result:expr $(,)?) => { + #[allow(unused_mut)] + { + test_interpreter_and_jit_asm!( + $source, + Config::default(), + $mem, + $context_object, + $expected_result + ); + } + }; +} + +#[macro_export] +macro_rules! test_interpreter_and_jit_elf { + (register, $function_registry:expr, $location:expr => $syscall_function:expr) => { + $function_registry + .register_function_hashed($location.as_bytes(), $syscall_function) + .unwrap(); + }; + ($source:tt, $config:tt, $mem:tt, ($($location:expr => $syscall_function:expr),* $(,)?), $context_object:expr, $expected_result:expr $(,)?) => { + let mut file = File::open($source).unwrap(); + let mut elf = Vec::new(); + file.read_to_end(&mut elf).unwrap(); + #[allow(unused_mut)] + { + let mut function_registry = FunctionRegistry::>::default(); + $(test_interpreter_and_jit_elf!(register, function_registry, $location => $syscall_function);)* + let loader = Arc::new(BuiltinProgram::new_loader($config, function_registry)); + let mut executable = Executable::::from_elf(&elf, loader).unwrap(); + test_interpreter_and_jit!(executable, $mem, $context_object, $expected_result); + } + }; + ($source:tt, $mem:tt, ($($location:expr => $syscall_function:expr),* $(,)?), $context_object:expr, $expected_result:expr $(,)?) => { + let config = Config { + enable_instruction_tracing: true, + ..Config::default() + }; + test_interpreter_and_jit_elf!($source, config, $mem, ($($location => $syscall_function),*), $context_object, $expected_result); + }; +} + +#[macro_export] +macro_rules! test_syscall_asm { + (register, $loader:expr, $syscall_number:literal => $syscall_name:expr => $syscall_function:expr) => { + let _ = $loader.register_function($syscall_name, $syscall_number, $syscall_function).unwrap(); + }; + ($source:tt, $mem:tt, ($($syscall_number:literal => $syscall_name:expr => $syscall_function:expr),*$(,)?), $context_object:expr, $expected_result:expr $(,)?) => { + let mut config = Config { + enable_instruction_tracing: true, + ..Config::default() + }; + for sbpf_version in [SBPFVersion::V0, SBPFVersion::V3] { + config.enabled_sbpf_versions = sbpf_version..=sbpf_version; + let src = if sbpf_version == SBPFVersion::V0 { + format!($source, $($syscall_name, )*) + } else { + format!($source, $($syscall_number, )*) + }; + let mut loader = BuiltinProgram::new_loader_with_dense_registration(config.clone()); + $(test_syscall_asm!(register, loader, $syscall_number => $syscall_name => $syscall_function);)* + let mut executable = assemble(src.as_str(), Arc::new(loader)).unwrap(); + test_interpreter_and_jit!(executable, $mem, $context_object, $expected_result); + } + }; +} diff --git a/tests/execution.rs b/tests/execution.rs index 696e6116..36b04e25 100644 --- a/tests/execution.rs +++ b/tests/execution.rs @@ -29,214 +29,11 @@ use solana_sbpf::{ }; use std::{fs::File, io::Read, sync::Arc}; use test_utils::{ - assert_error, create_vm, PROG_TCP_PORT_80, TCP_SACK_ASM, TCP_SACK_MATCH, TCP_SACK_NOMATCH, + assert_error, create_vm, test_interpreter_and_jit, test_interpreter_and_jit_asm, + test_interpreter_and_jit_elf, test_syscall_asm, PROG_TCP_PORT_80, TCP_SACK_ASM, TCP_SACK_MATCH, + TCP_SACK_NOMATCH, }; -const INSTRUCTION_METER_BUDGET: u64 = 1024; - -macro_rules! test_interpreter_and_jit { - (register, $function_registry:expr, $location:expr => $syscall_function:expr) => { - $function_registry - .register_function_hashed($location.as_bytes(), $syscall_function) - .unwrap(); - }; - ($executable:expr, $mem:tt, $context_object:expr, $expected_result:expr $(,)?) => { - test_interpreter_and_jit!( - false, - true, - $executable, - $mem, - $context_object, - $expected_result - ) - }; - ($verify:literal, $executable:expr, $mem:tt, $context_object:expr, $expected_result:expr $(,)?) => { - test_interpreter_and_jit!( - false, - $verify, - $executable, - $mem, - $context_object, - $expected_result - ) - }; - ($override_budget:literal, $verify:literal, $executable:expr, $mem:tt, $context_object:expr, $expected_result:expr $(,)?) => { - let expected_instruction_count = $context_object.get_remaining(); - #[allow(unused_mut)] - let mut context_object = $context_object; - let expected_result = format!("{:?}", $expected_result); - if !$override_budget && !expected_result.contains("ExceededMaxInstructions") { - context_object.remaining = INSTRUCTION_METER_BUDGET; - } - if $verify { - $executable.verify::().unwrap(); - } - let (instruction_count_interpreter, interpreter_final_pc, _tracer_interpreter) = { - let mut mem = $mem; - let mem_region = MemoryRegion::new_writable(&mut mem, ebpf::MM_INPUT_START); - let mut context_object = context_object.clone(); - create_vm!( - vm, - &$executable, - &mut context_object, - stack, - heap, - vec![mem_region], - None - ); - let (instruction_count_interpreter, result) = vm.execute_program(&$executable, true); - assert_eq!( - format!("{:?}", result), - expected_result, - "Unexpected result for Interpreter" - ); - ( - instruction_count_interpreter, - vm.registers[11], - vm.context_object_pointer.clone(), - ) - }; - #[cfg(all(feature = "jit", not(target_os = "windows"), target_arch = "x86_64"))] - { - #[allow(unused_mut)] - let compilation_result = $executable.jit_compile(); - let mut mem = $mem; - let mem_region = MemoryRegion::new_writable(&mut mem, ebpf::MM_INPUT_START); - create_vm!( - vm, - &$executable, - &mut context_object, - stack, - heap, - vec![mem_region], - None - ); - match compilation_result { - Err(_) => assert_eq!( - format!("{:?}", compilation_result), - expected_result, - "Unexpected result for JIT compilation" - ), - Ok(()) => { - let (instruction_count_jit, result) = vm.execute_program(&$executable, false); - let tracer_jit = &vm.context_object_pointer; - if !TestContextObject::compare_trace_log(&_tracer_interpreter, tracer_jit) { - let analysis = Analysis::from_executable(&$executable).unwrap(); - let stdout = std::io::stdout(); - analysis - .disassemble_trace_log( - &mut stdout.lock(), - &_tracer_interpreter.trace_log, - ) - .unwrap(); - analysis - .disassemble_trace_log(&mut stdout.lock(), &tracer_jit.trace_log) - .unwrap(); - panic!(); - } - assert_eq!( - format!("{:?}", result), - expected_result, - "Unexpected result for JIT" - ); - assert_eq!( - instruction_count_interpreter, instruction_count_jit, - "Interpreter and JIT instruction meter diverged", - ); - assert_eq!( - interpreter_final_pc, vm.registers[11], - "Interpreter and JIT instruction final PC diverged", - ); - } - } - } - if $executable.get_config().enable_instruction_meter { - assert_eq!( - instruction_count_interpreter, expected_instruction_count, - "Instruction meter did not consume expected amount" - ); - } - }; -} - -macro_rules! test_interpreter_and_jit_asm { - ($source:tt, $config:expr, $mem:tt, $context_object:expr, $expected_result:expr $(,)?) => { - #[allow(unused_mut)] - { - let mut config = $config; - config.enable_instruction_tracing = true; - let mut function_registry = - FunctionRegistry::>::default(); - let loader = Arc::new(BuiltinProgram::new_loader(config, function_registry)); - let mut executable = assemble($source, loader).unwrap(); - test_interpreter_and_jit!(executable, $mem, $context_object, $expected_result); - } - }; - ($source:tt, $mem:tt, $context_object:expr, $expected_result:expr $(,)?) => { - #[allow(unused_mut)] - { - test_interpreter_and_jit_asm!( - $source, - Config::default(), - $mem, - $context_object, - $expected_result - ); - } - }; -} - -macro_rules! test_syscall_asm { - (register, $loader:expr, $syscall_number:literal => $syscall_name:expr => $syscall_function:expr) => { - let _ = $loader.register_function($syscall_name, $syscall_number, $syscall_function).unwrap(); - }; - - ($source:tt, $mem:tt, ($($syscall_number:literal => $syscall_name:expr => $syscall_function:expr),*$(,)?), $context_object:expr, $expected_result:expr $(,)?) => { - let mut config = Config { - enable_instruction_tracing: true, - ..Config::default() - }; - for sbpf_version in [SBPFVersion::V0, SBPFVersion::V3] { - config.enabled_sbpf_versions = sbpf_version..=sbpf_version; - let src = if sbpf_version == SBPFVersion::V0 { - format!($source, $($syscall_name, )*) - } else { - format!($source, $($syscall_number, )*) - }; - let mut loader = BuiltinProgram::new_loader_with_dense_registration(config.clone()); - $(test_syscall_asm!(register, loader, $syscall_number => $syscall_name => $syscall_function);)* - let mut executable = assemble(src.as_str(), Arc::new(loader)).unwrap(); - test_interpreter_and_jit!(executable, $mem, $context_object, $expected_result); - } - }; -} - -macro_rules! test_interpreter_and_jit_elf { - ($verify:literal, $source:tt, $config:tt, $mem:tt, ($($location:expr => $syscall_function:expr),* $(,)?), $context_object:expr, $expected_result:expr $(,)?) => { - let mut file = File::open($source).unwrap(); - let mut elf = Vec::new(); - file.read_to_end(&mut elf).unwrap(); - #[allow(unused_mut)] - { - let mut function_registry = FunctionRegistry::>::default(); - $(test_interpreter_and_jit!(register, function_registry, $location => $syscall_function);)* - let loader = Arc::new(BuiltinProgram::new_loader($config, function_registry)); - let mut executable = Executable::::from_elf(&elf, loader).unwrap(); - test_interpreter_and_jit!($verify, executable, $mem, $context_object, $expected_result); - } - }; - ($source:tt, $config:tt, $mem:tt, ($($location:expr => $syscall_function:expr),* $(,)?), $context_object:expr, $expected_result:expr $(,)?) => { - test_interpreter_and_jit_elf!(true, $source, $config, $mem, ($($location => $syscall_function),*), $context_object, $expected_result); - }; - ($source:tt, $mem:tt, ($($location:expr => $syscall_function:expr),* $(,)?), $context_object:expr, $expected_result:expr $(,)?) => { - let config = Config { - enable_instruction_tracing: true, - ..Config::default() - }; - test_interpreter_and_jit_elf!($source, config, $mem, ($($location => $syscall_function),*), $context_object, $expected_result); - }; -} - // BPF_ALU32_LOAD : Arithmetic and Logic #[test] @@ -3502,110 +3299,39 @@ fn test_total_chaos() { } #[test] -fn test_invalid_call_imm() { - // In SBPFv3, `call_imm` N shall not be dispatched a syscall. - let prog = &[ - 0x85, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, // call_imm 2 - 0x9d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - ]; - - let config = Config { - enabled_sbpf_versions: SBPFVersion::V3..=SBPFVersion::V3, - enable_instruction_tracing: true, - ..Config::default() - }; - let mut loader = BuiltinProgram::new_loader_with_dense_registration(config); - loader - .register_function("syscall_string", 2, syscalls::SyscallString::vm) - .unwrap(); - let mut executable = Executable::::from_text_bytes( - prog, - Arc::new(loader), - SBPFVersion::V3, - FunctionRegistry::default(), - ) - .unwrap(); - - test_interpreter_and_jit!( - false, - executable, +fn test_call_imm_does_not_dispatch_syscalls() { + test_syscall_asm!( + " + call function_foo + return + syscall {} + return + function_foo: + mov r0, 42 + return", [], - TestContextObject::new(1), - ProgramResult::Err(EbpfError::UnsupportedInstruction), + ( + 3 => "bpf_syscall_string" => syscalls::SyscallString::vm, + ), + TestContextObject::new(4), + ProgramResult::Ok(42), ); } #[test] -#[should_panic(expected = "Invalid syscall should have been detected in the verifier.")] -fn test_invalid_exit_or_return() { - for sbpf_version in [SBPFVersion::V0, SBPFVersion::V3] { - let inst = if sbpf_version == SBPFVersion::V0 { - 0x9d - } else { - 0x95 - }; - - let prog = &[ - 0xbf, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, // mov64 r0, 2 - inst, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // exit/return - ]; - - let config = Config { - enabled_sbpf_versions: sbpf_version..=sbpf_version, - enable_instruction_tracing: true, - ..Config::default() - }; - let function_registry = FunctionRegistry::>::default(); - let loader = Arc::new(BuiltinProgram::new_loader(config, function_registry)); - let mut executable = Executable::::from_text_bytes( - prog, - loader, - sbpf_version, - FunctionRegistry::default(), - ) - .unwrap(); - - test_interpreter_and_jit!( - false, - executable, - [], - TestContextObject::new(2), - ProgramResult::Err(EbpfError::UnsupportedInstruction), - ); - } -} - -#[test] -fn callx_unsupported_instruction_and_exceeded_max_instructions() { - let program = " +fn test_callx_unsupported_instruction_and_exceeded_max_instructions() { + test_interpreter_and_jit_asm!( + " sub32 r7, r1 sub64 r5, 8 sub64 r7, 0 callx r5 callx r5 - return - "; - test_interpreter_and_jit_asm!( - program, + return", [], TestContextObject::new(4), ProgramResult::Err(EbpfError::UnsupportedInstruction), ); - - let loader = Arc::new(BuiltinProgram::new_loader( - Config::default(), - FunctionRegistry::default(), - )); - - let mut executable = assemble(program, loader).unwrap(); - test_interpreter_and_jit!( - true, - false, - executable, - [], - TestContextObject::new(4), - ProgramResult::Err(EbpfError::UnsupportedInstruction) - ); } #[test] diff --git a/tests/exercise_instructions.rs b/tests/exercise_instructions.rs index f8f5a89b..8622234d 100644 --- a/tests/exercise_instructions.rs +++ b/tests/exercise_instructions.rs @@ -23,113 +23,7 @@ use solana_sbpf::{ vm::{Config, ContextObject, TestContextObject}, }; use std::sync::Arc; -use test_utils::create_vm; - -macro_rules! test_interpreter_and_jit { - (register, $function_registry:expr, $location:expr => $syscall_function:expr) => { - $function_registry - .register_function_hashed($location.as_bytes(), $syscall_function) - .unwrap(); - }; - ($executable:expr, $mem:tt, $context_object:expr $(,)?) => { - let expected_instruction_count = $context_object.get_remaining(); - #[allow(unused_mut)] - let mut context_object = $context_object; - $executable.verify::().unwrap(); - let ( - instruction_count_interpreter, - interpreter_final_pc, - _tracer_interpreter, - interpreter_result, - interpreter_mem, - ) = { - let mut mem = $mem.clone(); - let mem_region = MemoryRegion::new_writable(&mut mem, ebpf::MM_INPUT_START); - let mut context_object = context_object.clone(); - create_vm!( - vm, - &$executable, - &mut context_object, - stack, - heap, - vec![mem_region], - None - ); - let (instruction_count_interpreter, result) = vm.execute_program(&$executable, true); - ( - instruction_count_interpreter, - vm.registers[11], - vm.context_object_pointer.clone(), - result.unwrap(), - mem, - ) - }; - #[cfg(all(feature = "jit", not(target_os = "windows"), target_arch = "x86_64"))] - { - #[allow(unused_mut)] - $executable.jit_compile().unwrap(); - let mut mem = $mem; - let mem_region = MemoryRegion::new_writable(&mut mem, ebpf::MM_INPUT_START); - create_vm!( - vm, - &$executable, - &mut context_object, - stack, - heap, - vec![mem_region], - None - ); - let (instruction_count_jit, result) = vm.execute_program(&$executable, false); - let tracer_jit = &vm.context_object_pointer; - if !TestContextObject::compare_trace_log(&_tracer_interpreter, tracer_jit) { - let analysis = Analysis::from_executable(&$executable).unwrap(); - let stdout = std::io::stdout(); - analysis - .disassemble_trace_log(&mut stdout.lock(), &_tracer_interpreter.trace_log) - .unwrap(); - analysis - .disassemble_trace_log(&mut stdout.lock(), &tracer_jit.trace_log) - .unwrap(); - panic!(); - } - assert_eq!( - result.unwrap(), - interpreter_result, - "Unexpected result for JIT" - ); - assert_eq!( - instruction_count_interpreter, instruction_count_jit, - "Interpreter and JIT instruction meter diverged", - ); - assert_eq!( - interpreter_final_pc, vm.registers[11], - "Interpreter and JIT instruction final PC diverged", - ); - assert_eq!(interpreter_mem, mem, "Interpreter and JIT memory diverged",); - } - if $executable.get_config().enable_instruction_meter { - assert_eq!( - instruction_count_interpreter, expected_instruction_count, - "Instruction meter did not consume expected amount" - ); - } - }; -} - -macro_rules! test_interpreter_and_jit_asm { - ($source:expr, $config:expr, $mem:tt, ($($location:expr => $syscall_function:expr),* $(,)?), $context_object:expr $(,)?) => { - #[allow(unused_mut)] - { - let mut config = $config; - config.enable_instruction_tracing = true; - let mut function_registry = FunctionRegistry::>::default(); - $(test_interpreter_and_jit!(register, function_registry, $location => $syscall_function);)* - let loader = Arc::new(BuiltinProgram::new_loader(config, function_registry)); - let mut executable = assemble($source, loader).unwrap(); - test_interpreter_and_jit!(executable, $mem, $context_object); - } - }; -} +use test_utils::{create_vm, test_interpreter_and_jit}; // BPF_ALU32_LOAD : Arithmetic and Logic #[test] @@ -538,9 +432,20 @@ fn test_ins(v0: bool, ins: String, prng: &mut SmallRng, cu: u64) { exit" ); - let mut config = Config::default(); + let mut config = Config { + enable_instruction_tracing: true, + ..Config::default() + }; if v0 { config.enabled_sbpf_versions = SBPFVersion::V0..=SBPFVersion::V0; } - test_interpreter_and_jit_asm!(asm.as_str(), config, input, (), TestContextObject::new(cu)); + let function_registry = FunctionRegistry::>::default(); + let loader = Arc::new(BuiltinProgram::new_loader(config, function_registry)); + let mut executable = assemble(asm.as_str(), loader).unwrap(); + test_interpreter_and_jit!( + override_budget => true, + executable, + input, + TestContextObject::new(cu), + ); } diff --git a/tests/verifier.rs b/tests/verifier.rs index 658d9b5e..691083f7 100644 --- a/tests/verifier.rs +++ b/tests/verifier.rs @@ -357,11 +357,26 @@ fn test_verifier_err_jmp_out_start() { } #[test] -#[should_panic(expected = "UnknownOpCode(6, 0)")] -fn test_verifier_err_unknown_opcode() { +#[should_panic(expected = "UnknownOpCode(157, 0)")] +fn test_verifier_err_invalid_return() { let prog = &[ - 0x06, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // - 0x9d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // + 0x9d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // return + ]; + let executable = Executable::::from_text_bytes( + prog, + Arc::new(BuiltinProgram::new_mock()), + SBPFVersion::V0, + FunctionRegistry::default(), + ) + .unwrap(); + executable.verify::().unwrap(); +} + +#[test] +#[should_panic(expected = "InvalidFunction(0)")] +fn test_verifier_err_invalid_exit() { + let prog = &[ + 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // exit in v0, but syscall in v3 ]; let executable = Executable::::from_text_bytes( prog, @@ -374,10 +389,10 @@ fn test_verifier_err_unknown_opcode() { } #[test] -#[should_panic(expected = "InvalidFunction(1811268607)")] -fn test_verifier_unknown_sycall() { +#[should_panic(expected = "InvalidSyscall(2)")] +fn test_verifier_unknown_syscall() { let prog = &[ - 0x85, 0x00, 0x00, 0x00, 0xfe, 0xc3, 0xf5, 0x6b, // call 0x6bf5c3fe + 0x95, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, // syscall 2 0x9d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // return ]; let executable = Executable::::from_text_bytes( From 253c57bdf9f9eee29970cfbd11e0188d4b85e89b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Mon, 6 Jan 2025 15:54:54 +0100 Subject: [PATCH 14/18] Has assembler emit source R0 for CALL_IMM. (#16) --- src/assembler.rs | 2 +- tests/assembler.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/assembler.rs b/src/assembler.rs index 0737a6be..bd01d88b 100644 --- a/src/assembler.rs +++ b/src/assembler.rs @@ -431,7 +431,7 @@ pub fn assemble( target_pc as usize, ) .map_err(|_| format!("Label hash collision {name}"))?; - insn(opc, 0, 1, 0, instr_imm) + insn(opc, 0, 0, 0, instr_imm) } (CallReg, [Register(dst)]) => { if sbpf_version.callx_uses_src_reg() { diff --git a/tests/assembler.rs b/tests/assembler.rs index 6b78613e..5914794a 100644 --- a/tests/assembler.rs +++ b/tests/assembler.rs @@ -152,7 +152,7 @@ fn test_call_reg() { fn test_call_imm() { assert_eq!( asm("call 299"), - Ok(vec![insn(0, ebpf::CALL_IMM, 0, 1, 0, 299)]) + Ok(vec![insn(0, ebpf::CALL_IMM, 0, 0, 0, 299)]) ); } From 4bde88401e3ccf39cb3bdc07b1d88bbcb72125ed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Mon, 6 Jan 2025 17:25:40 +0100 Subject: [PATCH 15/18] Profiles the instruction counter in the beginning so that ANCHOR_EXIT does not need to. (#18) --- src/jit.rs | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/src/jit.rs b/src/jit.rs index 387dd902..110094ce 100644 --- a/src/jit.rs +++ b/src/jit.rs @@ -789,14 +789,11 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { || (insn.opc == ebpf::RETURN && !self.executable.get_sbpf_version().static_syscalls()) { return Err(EbpfError::UnsupportedInstruction); } - self.emit_validate_instruction_count(Some(self.pc)); + self.emit_validate_and_profile_instruction_count(Some(0)); let call_depth_access = X86IndirectAccess::Offset(self.slot_in_vm(RuntimeEnvironmentSlot::CallDepth)); // If env.call_depth == 0, we've reached the exit instruction of the entry point self.emit_ins(X86Instruction::cmp_immediate(OperandSize::S32, REGISTER_PTR_TO_VM, 0, Some(call_depth_access))); - if self.config.enable_instruction_meter { - self.emit_ins(X86Instruction::load_immediate(REGISTER_SCRATCH, self.pc as i64)); - } // we're done self.emit_ins(X86Instruction::conditional_jump_immediate(0x84, self.relative_to_anchor(ANCHOR_EXIT, 6))); @@ -804,7 +801,6 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { self.emit_ins(X86Instruction::alu_immediate(OperandSize::S64, 0x81, 5, REGISTER_PTR_TO_VM, 1, Some(call_depth_access))); // env.call_depth -= 1; // and return - self.emit_profile_instruction_count(Some(0)); self.emit_ins(X86Instruction::return_near()); }, @@ -1443,7 +1439,7 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { // Quit gracefully self.set_anchor(ANCHOR_EXIT); if self.config.enable_instruction_meter { - self.emit_ins(X86Instruction::alu(OperandSize::S64, 0x29, REGISTER_SCRATCH, REGISTER_INSTRUCTION_METER, None)); // REGISTER_INSTRUCTION_METER -= pc; + self.emit_ins(X86Instruction::alu_immediate(OperandSize::S64, 0x81, 0, REGISTER_INSTRUCTION_METER, 1, None)); // REGISTER_INSTRUCTION_METER += 1; } self.emit_ins(X86Instruction::lea(OperandSize::S64, REGISTER_PTR_TO_VM, REGISTER_SCRATCH, Some(X86IndirectAccess::Offset(self.slot_in_vm(RuntimeEnvironmentSlot::ProgramResult))))); self.emit_ins(X86Instruction::store(OperandSize::S64, REGISTER_MAP[0], REGISTER_SCRATCH, X86IndirectAccess::Offset(std::mem::size_of::() as i32))); // result.return_value = R0; From 63317129c3a0d19029c35f15b61eb206a6457328 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Tue, 7 Jan 2025 17:36:31 +0000 Subject: [PATCH 16/18] Moves the tests of elf.rs into a separate file. --- src/elf.rs | 953 +-------------------------------------------------- tests/elf.rs | 919 +++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 926 insertions(+), 946 deletions(-) create mode 100644 tests/elf.rs diff --git a/src/elf.rs b/src/elf.rs index 2489d58f..f81252f0 100644 --- a/src/elf.rs +++ b/src/elf.rs @@ -213,8 +213,9 @@ impl BpfRelocationType { } } +/// ELF section #[derive(Debug, PartialEq)] -pub(crate) enum Section { +pub enum Section { /// Owned section data. /// /// The first field is virtual address of the section. @@ -408,7 +409,7 @@ impl Executable { } /// Loads an ELF without relocation - fn load_with_strict_parser( + pub fn load_with_strict_parser( bytes: &[u8], loader: Arc>, ) -> Result { @@ -822,10 +823,8 @@ impl Executable { Ok(()) } - pub(crate) fn parse_ro_sections< - 'a, - S: IntoIterator, &'a Elf64Shdr)>, - >( + /// Parses and concatenates the readonly data sections + pub fn parse_ro_sections<'a, S: IntoIterator, &'a Elf64Shdr)>>( config: &Config, sbpf_version: &SBPFVersion, sections: S, @@ -1364,7 +1363,8 @@ impl Executable { } } -pub(crate) fn get_ro_region(ro_section: &Section, elf: &[u8]) -> MemoryRegion { +/// Creates a [MemoryRegion] for the given [Section] +pub fn get_ro_region(ro_section: &Section, elf: &[u8]) -> MemoryRegion { let (offset, ro_data) = match ro_section { Section::Owned(offset, data) => (*offset, data.as_slice()), Section::Borrowed(offset, byte_range) => (*offset, &elf[byte_range.clone()]), @@ -1375,942 +1375,3 @@ pub(crate) fn get_ro_region(ro_section: &Section, elf: &[u8]) -> MemoryRegion { // will be unmappable, see MemoryRegion::vm_to_host. MemoryRegion::new_readonly(ro_data, offset as u64) } - -#[cfg(test)] -mod test { - use super::*; - use crate::{ - elf_parser::{ - // FIXME consts::{ELFCLASS32, ELFDATA2MSB, ET_REL}, - consts::{ELFCLASS32, ELFDATA2MSB, ET_REL}, - types::{Elf64Ehdr, Elf64Shdr, Elf64Sym}, - SECTION_NAME_LENGTH_MAXIMUM, - }, - error::ProgramResult, - fuzz::fuzz, - program::BuiltinFunction, - syscalls, - vm::TestContextObject, - }; - use rand::{distributions::Uniform, Rng}; - use std::{fs::File, io::Read}; - use test_utils::assert_error; - type ElfExecutable = Executable; - - fn loader() -> Arc> { - let mut function_registry = - FunctionRegistry::>::default(); - function_registry - .register_function_hashed(*b"log", syscalls::SyscallString::vm) - .unwrap(); - function_registry - .register_function_hashed(*b"log_64", syscalls::SyscallU64::vm) - .unwrap(); - Arc::new(BuiltinProgram::new_loader( - Config::default(), - function_registry, - )) - } - - #[test] - fn test_strict_header() { - let elf_bytes = - std::fs::read("tests/elfs/strict_header.so").expect("failed to read elf file"); - let loader = loader(); - - // Check that the unmodified file can be parsed - { - let loader = Arc::new(BuiltinProgram::new_loader( - Config { - enable_symbol_and_section_labels: true, - ..Config::default() - }, - FunctionRegistry::>::default(), - )); - let executable = ElfExecutable::load(&elf_bytes, loader.clone()).unwrap(); - let (name, _pc) = executable.get_function_registry().lookup_by_key(4).unwrap(); - assert_eq!(name, b"entrypoint"); - } - - // Check that using a reserved SBPF version fails - { - let mut elf_bytes = elf_bytes.clone(); - elf_bytes[0x0030] = 0xFF; - let err = ElfExecutable::load(&elf_bytes, loader.clone()).unwrap_err(); - assert_eq!(err, ElfError::UnsupportedSBPFVersion); - } - - // Check that an empty file fails - let err = ElfExecutable::load_with_strict_parser(&[], loader.clone()).unwrap_err(); - assert_eq!(err, ElfParserError::OutOfBounds); - - // Break the file header one byte at a time - let expected_results = std::iter::repeat(&Err(ElfParserError::InvalidFileHeader)) - .take(40) - .chain(std::iter::repeat(&Ok(())).take(12)) - .chain(std::iter::repeat(&Err(ElfParserError::InvalidFileHeader)).take(4)) - .chain(std::iter::repeat(&Err(ElfParserError::InvalidProgramHeader)).take(1)) - .chain(std::iter::repeat(&Err(ElfParserError::InvalidFileHeader)).take(3)) - .chain(std::iter::repeat(&Ok(())).take(2)) - .chain(std::iter::repeat(&Err(ElfParserError::InvalidFileHeader)).take(2)); - for (offset, expected) in (0..std::mem::size_of::()).zip(expected_results) { - let mut elf_bytes = elf_bytes.clone(); - elf_bytes[offset] = 0xAF; - let result = - ElfExecutable::load_with_strict_parser(&elf_bytes, loader.clone()).map(|_| ()); - assert_eq!(&result, expected); - } - - // Break the program header table one byte at a time - let expected_results_readonly = - std::iter::repeat(&Err(ElfParserError::InvalidProgramHeader)) - .take(48) - .chain(std::iter::repeat(&Ok(())).take(8)) - .collect::>(); - let expected_results_writable = - std::iter::repeat(&Err(ElfParserError::InvalidProgramHeader)) - .take(40) - .chain(std::iter::repeat(&Ok(())).take(4)) - .chain(std::iter::repeat(&Err(ElfParserError::InvalidProgramHeader)).take(4)) - .chain(std::iter::repeat(&Ok(())).take(8)) - .collect::>(); - let expected_results = vec![ - expected_results_readonly.iter(), - expected_results_readonly.iter(), - expected_results_writable.iter(), - expected_results_writable.iter(), - expected_results_readonly.iter(), - ]; - for (header_index, expected_results) in expected_results.into_iter().enumerate() { - for (offset, expected) in (std::mem::size_of::() - + std::mem::size_of::() * header_index - ..std::mem::size_of::() - + std::mem::size_of::() * (header_index + 1)) - .zip(expected_results) - { - let mut elf_bytes = elf_bytes.clone(); - elf_bytes[offset] = 0xAF; - let result = - ElfExecutable::load_with_strict_parser(&elf_bytes, loader.clone()).map(|_| ()); - assert_eq!(&&result, expected); - } - } - - // Break the dynamic symbol table one byte at a time - for index in 1..3 { - let expected_results = std::iter::repeat(&Ok(())) - .take(8) - .chain(std::iter::repeat(&Err(ElfParserError::OutOfBounds)).take(8)) - .chain(std::iter::repeat(&Err(ElfParserError::InvalidSize)).take(1)) - .chain(std::iter::repeat(&Err(ElfParserError::OutOfBounds)).take(7)); - for (offset, expected) in (0x3000 + std::mem::size_of::() * index - ..0x3000 + std::mem::size_of::() * (index + 1)) - .zip(expected_results) - { - let mut elf_bytes = elf_bytes.clone(); - elf_bytes[offset] = 0xAF; - let result = - ElfExecutable::load_with_strict_parser(&elf_bytes, loader.clone()).map(|_| ()); - assert_eq!(&result, expected); - } - } - - // Check that an empty function symbol fails - { - let mut elf_bytes = elf_bytes.clone(); - elf_bytes[0x3040] = 0x00; - let err = - ElfExecutable::load_with_strict_parser(&elf_bytes, loader.clone()).unwrap_err(); - assert_eq!(err, ElfParserError::InvalidSize); - } - - // Check that bytecode not covered by function symbols fails - { - let mut elf_bytes = elf_bytes.clone(); - elf_bytes[0x3040] = 0x08; - let err = - ElfExecutable::load_with_strict_parser(&elf_bytes, loader.clone()).unwrap_err(); - assert_eq!(err, ElfParserError::OutOfBounds); - } - - // Check that an entrypoint not covered by function symbols fails - { - let mut elf_bytes = elf_bytes.clone(); - elf_bytes[0x0018] = 0x10; - let err = - ElfExecutable::load_with_strict_parser(&elf_bytes, loader.clone()).unwrap_err(); - assert_eq!(err, ElfParserError::InvalidFileHeader); - } - } - - #[test] - fn test_validate() { - let elf_bytes = std::fs::read("tests/elfs/relative_call_sbpfv0.so").unwrap(); - let elf = Elf64::parse(&elf_bytes).unwrap(); - let mut header = elf.file_header().clone(); - - let config = Config::default(); - - let write_header = |header: Elf64Ehdr| unsafe { - let mut bytes = elf_bytes.clone(); - std::ptr::write(bytes.as_mut_ptr().cast::(), header); - bytes - }; - - ElfExecutable::validate(&config, &elf, &elf_bytes).expect("validation failed"); - - header.e_ident.ei_class = ELFCLASS32; - let bytes = write_header(header.clone()); - // the new parser rejects anything other than ELFCLASS64 directly - Elf64::parse(&bytes).expect_err("allowed bad class"); - - header.e_ident.ei_class = ELFCLASS64; - let bytes = write_header(header.clone()); - ElfExecutable::validate(&config, &Elf64::parse(&bytes).unwrap(), &elf_bytes) - .expect("validation failed"); - - header.e_ident.ei_data = ELFDATA2MSB; - let bytes = write_header(header.clone()); - // the new parser only supports little endian - Elf64::parse(&bytes).expect_err("allowed big endian"); - - header.e_ident.ei_data = ELFDATA2LSB; - let bytes = write_header(header.clone()); - ElfExecutable::validate(&config, &Elf64::parse(&bytes).unwrap(), &elf_bytes) - .expect("validation failed"); - - header.e_ident.ei_osabi = 1; - let bytes = write_header(header.clone()); - ElfExecutable::validate(&config, &Elf64::parse(&bytes).unwrap(), &elf_bytes) - .expect_err("allowed wrong abi"); - - header.e_ident.ei_osabi = ELFOSABI_NONE; - let bytes = write_header(header.clone()); - ElfExecutable::validate(&config, &Elf64::parse(&bytes).unwrap(), &elf_bytes) - .expect("validation failed"); - - header.e_machine = 42; - let bytes = write_header(header.clone()); - ElfExecutable::validate(&config, &Elf64::parse(&bytes).unwrap(), &elf_bytes) - .expect_err("allowed wrong machine"); - - header.e_machine = EM_BPF; - let bytes = write_header(header.clone()); - ElfExecutable::validate(&config, &Elf64::parse(&bytes).unwrap(), &elf_bytes) - .expect("validation failed"); - - header.e_type = ET_REL; - let bytes = write_header(header); - ElfExecutable::validate(&config, &Elf64::parse(&bytes).unwrap(), &elf_bytes) - .expect_err("allowed wrong type"); - } - - #[test] - fn test_load() { - let mut file = File::open("tests/elfs/relative_call_sbpfv0.so").expect("file open failed"); - let mut elf_bytes = Vec::new(); - file.read_to_end(&mut elf_bytes) - .expect("failed to read elf file"); - ElfExecutable::load(&elf_bytes, loader()).expect("validation failed"); - } - - #[test] - fn test_load_unaligned() { - let mut elf_bytes = - std::fs::read("tests/elfs/relative_call_sbpfv0.so").expect("failed to read elf file"); - // The default allocator allocates aligned memory. Move the ELF slice to - // elf_bytes.as_ptr() + 1 to make it unaligned and test unaligned - // parsing. - elf_bytes.insert(0, 0); - ElfExecutable::load(&elf_bytes[1..], loader()).expect("validation failed"); - } - - #[test] - fn test_entrypoint() { - let loader = loader(); - - let mut file = File::open("tests/elfs/relative_call_sbpfv0.so").expect("file open failed"); - let mut elf_bytes = Vec::new(); - file.read_to_end(&mut elf_bytes) - .expect("failed to read elf file"); - let elf = ElfExecutable::load(&elf_bytes, loader.clone()).expect("validation failed"); - let parsed_elf = Elf64::parse(&elf_bytes).unwrap(); - let executable: &Executable = &elf; - assert_eq!(4, executable.get_entrypoint_instruction_offset()); - - let write_header = |header: Elf64Ehdr| unsafe { - let mut bytes = elf_bytes.clone(); - std::ptr::write(bytes.as_mut_ptr().cast::(), header); - bytes - }; - - let mut header = parsed_elf.file_header().clone(); - let initial_e_entry = header.e_entry; - - header.e_entry += 8; - let elf_bytes = write_header(header.clone()); - let elf = ElfExecutable::load(&elf_bytes, loader.clone()).expect("validation failed"); - let executable: &Executable = &elf; - assert_eq!(5, executable.get_entrypoint_instruction_offset()); - - header.e_entry = 1; - let elf_bytes = write_header(header.clone()); - assert!(matches!( - ElfExecutable::load(&elf_bytes, loader.clone()), - Err(ElfError::EntrypointOutOfBounds) - )); - - header.e_entry = u64::MAX; - let elf_bytes = write_header(header.clone()); - assert!(matches!( - ElfExecutable::load(&elf_bytes, loader.clone()), - Err(ElfError::EntrypointOutOfBounds) - )); - - header.e_entry = initial_e_entry + ebpf::INSN_SIZE as u64 + 1; - let elf_bytes = write_header(header.clone()); - assert!(matches!( - ElfExecutable::load(&elf_bytes, loader.clone()), - Err(ElfError::InvalidEntrypoint) - )); - - header.e_entry = initial_e_entry; - let elf_bytes = write_header(header); - let elf = ElfExecutable::load(&elf_bytes, loader).expect("validation failed"); - let executable: &Executable = &elf; - assert_eq!(4, executable.get_entrypoint_instruction_offset()); - } - - #[test] - #[ignore] - fn test_fuzz_load() { - let loader = loader(); - - // Random bytes, will mostly fail due to lack of ELF header so just do a few - let mut rng = rand::thread_rng(); - let range = Uniform::new(0, 255); - println!("random bytes"); - for _ in 0..1_000 { - let elf_bytes: Vec = (0..100).map(|_| rng.sample(range)).collect(); - let _ = ElfExecutable::load(&elf_bytes, loader.clone()); - } - - // Take a real elf and mangle it - - let mut file = File::open("tests/elfs/noop.so").expect("file open failed"); - let mut elf_bytes = Vec::new(); - file.read_to_end(&mut elf_bytes) - .expect("failed to read elf file"); - let parsed_elf = Elf64::parse(&elf_bytes).unwrap(); - - // focus on elf header, small typically 64 bytes - println!("mangle elf header"); - fuzz( - &elf_bytes, - 1_000_000, - 100, - 0..parsed_elf.file_header().e_ehsize as usize, - 0..255, - |bytes: &mut [u8]| { - let _ = ElfExecutable::load(bytes, loader.clone()); - }, - ); - - // focus on section headers - println!("mangle section headers"); - fuzz( - &elf_bytes, - 1_000_000, - 100, - parsed_elf.file_header().e_shoff as usize..elf_bytes.len(), - 0..255, - |bytes: &mut [u8]| { - let _ = ElfExecutable::load(bytes, loader.clone()); - }, - ); - - // mangle whole elf randomly - println!("mangle whole elf"); - fuzz( - &elf_bytes, - 1_000_000, - 100, - 0..elf_bytes.len(), - 0..255, - |bytes: &mut [u8]| { - let _ = ElfExecutable::load(bytes, loader.clone()); - }, - ); - } - - fn new_section(sh_addr: u64, sh_size: u64) -> Elf64Shdr { - Elf64Shdr { - sh_addr, - sh_offset: sh_addr - .checked_sub(ebpf::MM_RODATA_START) - .unwrap_or(sh_addr), - sh_size, - sh_name: 0, - sh_type: 0, - sh_flags: 0, - sh_link: 0, - sh_info: 0, - sh_addralign: 0, - sh_entsize: 0, - } - } - - #[test] - fn test_owned_ro_sections_not_contiguous() { - let config = Config::default(); - let elf_bytes = [0u8; 512]; - - // there's a non-rodata section between two rodata sections - let s1 = new_section(10, 10); - let s2 = new_section(20, 10); - let s3 = new_section(30, 10); - - let sections: [(Option<&[u8]>, &Elf64Shdr); 3] = [ - (Some(b".text"), &s1), - (Some(b".dynamic"), &s2), - (Some(b".rodata"), &s3), - ]; - assert!(matches!( - ElfExecutable::parse_ro_sections( - &config, - &SBPFVersion::V0, - sections, - &elf_bytes, - ), - Ok(Section::Owned(offset, data)) if offset == ebpf::MM_RODATA_START as usize + 10 && data.len() == 30 - )); - } - - #[test] - fn test_owned_ro_sections_with_sh_offset() { - let config = Config { - reject_broken_elfs: false, - ..Config::default() - }; - let elf_bytes = [0u8; 512]; - - // s2 is at a custom sh_offset. We need to merge into an owned buffer so - // s2 can be moved to the right address offset. - let s1 = new_section(10, 10); - let mut s2 = new_section(20, 10); - s2.sh_offset = 30; - - let sections: [(Option<&[u8]>, &Elf64Shdr); 2] = - [(Some(b".text"), &s1), (Some(b".rodata"), &s2)]; - assert!(matches!( - ElfExecutable::parse_ro_sections( - &config, - &SBPFVersion::V0, - sections, - &elf_bytes, - ), - Ok(Section::Owned(offset, data)) if offset == ebpf::MM_RODATA_START as usize + 10 && data.len() == 20 - )); - } - - #[test] - fn test_sh_offset_not_same_as_vaddr() { - let config = Config { - reject_broken_elfs: true, - enabled_sbpf_versions: SBPFVersion::V0..=SBPFVersion::V0, - ..Config::default() - }; - let elf_bytes = [0u8; 512]; - - let mut s1 = new_section(10, 10); - - { - let sections: [(Option<&[u8]>, &Elf64Shdr); 1] = [(Some(b".text"), &s1)]; - assert!(ElfExecutable::parse_ro_sections( - &config, - &SBPFVersion::V0, - sections, - &elf_bytes - ) - .is_ok()); - } - - s1.sh_offset = 0; - let sections: [(Option<&[u8]>, &Elf64Shdr); 1] = [(Some(b".text"), &s1)]; - assert_eq!( - ElfExecutable::parse_ro_sections(&config, &SBPFVersion::V0, sections, &elf_bytes), - Err(ElfError::ValueOutOfBounds) - ); - } - - #[test] - fn test_invalid_sh_offset_larger_than_vaddr() { - let config = Config { - reject_broken_elfs: true, - ..Config::default() - }; - let elf_bytes = [0u8; 512]; - - let s1 = new_section(10, 10); - // sh_offset > sh_addr is invalid - let mut s2 = new_section(20, 10); - s2.sh_offset = 30; - - let sections: [(Option<&[u8]>, &Elf64Shdr); 2] = - [(Some(b".text"), &s1), (Some(b".rodata"), &s2)]; - assert_eq!( - ElfExecutable::parse_ro_sections(&config, &SBPFVersion::V2, sections, &elf_bytes,), - Err(ElfError::ValueOutOfBounds) - ); - } - - #[test] - fn test_reject_non_constant_sh_offset() { - let config = Config { - reject_broken_elfs: true, - ..Config::default() - }; - let elf_bytes = [0u8; 512]; - - let mut s1 = new_section(ebpf::MM_RODATA_START + 10, 10); - let mut s2 = new_section(ebpf::MM_RODATA_START + 20, 10); - // The sections don't have a constant offset. This is rejected since it - // makes it impossible to efficiently map virtual addresses to byte - // offsets - s1.sh_offset = 100; - s2.sh_offset = 120; - - let sections: [(Option<&[u8]>, &Elf64Shdr); 2] = - [(Some(b".text"), &s1), (Some(b".rodata"), &s2)]; - assert_eq!( - ElfExecutable::parse_ro_sections(&config, &SBPFVersion::V3, sections, &elf_bytes), - Err(ElfError::ValueOutOfBounds) - ); - } - - #[test] - fn test_borrowed_ro_sections_with_constant_sh_offset() { - let config = Config { - reject_broken_elfs: true, - ..Config::default() - }; - let elf_bytes = [0u8; 512]; - - let mut s1 = new_section(ebpf::MM_RODATA_START + 10, 10); - let mut s2 = new_section(ebpf::MM_RODATA_START + 20, 10); - // the sections have a constant offset (100) - s1.sh_offset = 100; - s2.sh_offset = 110; - - let sections: [(Option<&[u8]>, &Elf64Shdr); 2] = - [(Some(b".text"), &s1), (Some(b".rodata"), &s2)]; - assert_eq!( - ElfExecutable::parse_ro_sections(&config, &SBPFVersion::V3, sections, &elf_bytes), - Ok(Section::Borrowed( - ebpf::MM_RODATA_START as usize + 10, - 100..120 - )) - ); - } - - #[test] - fn test_owned_ro_region_no_initial_gap() { - let config = Config::default(); - let elf_bytes = [0u8; 512]; - - // need an owned buffer so we can zero the address space taken by s2 - let s1 = new_section(0, 10); - let s2 = new_section(10, 10); - let s3 = new_section(20, 10); - - let sections: [(Option<&[u8]>, &Elf64Shdr); 3] = [ - (Some(b".text"), &s1), - (Some(b".dynamic"), &s2), - (Some(b".rodata"), &s3), - ]; - let ro_section = - ElfExecutable::parse_ro_sections(&config, &SBPFVersion::V0, sections, &elf_bytes) - .unwrap(); - let ro_region = get_ro_region(&ro_section, &elf_bytes); - let owned_section = match &ro_section { - Section::Owned(_offset, data) => data.as_slice(), - _ => panic!(), - }; - - // [0..s3.sh_addr + s3.sh_size] is the valid ro memory area - assert!(matches!( - ro_region.vm_to_host(ebpf::MM_RODATA_START, s3.sh_addr + s3.sh_size), - ProgramResult::Ok(ptr) if ptr == owned_section.as_ptr() as u64, - )); - - // one byte past the ro section is not mappable - assert_error!( - ro_region.vm_to_host(ebpf::MM_RODATA_START + s3.sh_addr + s3.sh_size, 1), - "InvalidVirtualAddress({})", - ebpf::MM_RODATA_START + s3.sh_addr + s3.sh_size - ); - } - - #[test] - fn test_owned_ro_region_initial_gap_mappable() { - let config = Config { - optimize_rodata: false, - ..Config::default() - }; - let elf_bytes = [0u8; 512]; - - // the first section starts at a non-zero offset - let s1 = new_section(10, 10); - let s2 = new_section(20, 10); - let s3 = new_section(30, 10); - - let sections: [(Option<&[u8]>, &Elf64Shdr); 3] = [ - (Some(b".text"), &s1), - (Some(b".dynamic"), &s2), - (Some(b".rodata"), &s3), - ]; - // V2 requires optimize_rodata=true - let ro_section = - ElfExecutable::parse_ro_sections(&config, &SBPFVersion::V0, sections, &elf_bytes) - .unwrap(); - let ro_region = get_ro_region(&ro_section, &elf_bytes); - let owned_section = match &ro_section { - Section::Owned(_offset, data) => data.as_slice(), - _ => panic!(), - }; - - // [s1.sh_addr..s3.sh_addr + s3.sh_size] is where the readonly data is. - // But for backwards compatibility (config.optimize_rodata=false) - // [0..s1.sh_addr] is mappable too (and zeroed). - assert!(matches!( - ro_region.vm_to_host(ebpf::MM_RODATA_START, s3.sh_addr + s3.sh_size), - ProgramResult::Ok(ptr) if ptr == owned_section.as_ptr() as u64, - )); - - // one byte past the ro section is not mappable - assert_error!( - ro_region.vm_to_host(ebpf::MM_RODATA_START + s3.sh_addr + s3.sh_size, 1), - "InvalidVirtualAddress({})", - ebpf::MM_RODATA_START + s3.sh_addr + s3.sh_size - ); - } - - #[test] - fn test_owned_ro_region_initial_gap_map_error() { - let config = Config::default(); - let elf_bytes = [0u8; 512]; - - // the first section starts at a non-zero offset - let s1 = new_section(10, 10); - let s2 = new_section(20, 10); - let s3 = new_section(30, 10); - - let sections: [(Option<&[u8]>, &Elf64Shdr); 3] = [ - (Some(b".text"), &s1), - (Some(b".dynamic"), &s2), - (Some(b".rodata"), &s3), - ]; - let ro_section = - ElfExecutable::parse_ro_sections(&config, &SBPFVersion::V0, sections, &elf_bytes) - .unwrap(); - let owned_section = match &ro_section { - Section::Owned(_offset, data) => data.as_slice(), - _ => panic!(), - }; - let ro_region = get_ro_region(&ro_section, &elf_bytes); - - // s1 starts at sh_addr=10 so [MM_RODATA_START..MM_RODATA_START + 10] is not mappable - - // the low bound of the initial gap is not mappable - assert_error!( - ro_region.vm_to_host(ebpf::MM_RODATA_START, 1), - "InvalidVirtualAddress({})", - ebpf::MM_RODATA_START - ); - - // the hi bound of the initial gap is not mappable - assert_error!( - ro_region.vm_to_host(ebpf::MM_RODATA_START + s1.sh_addr - 1, 1), - "InvalidVirtualAddress({})", - ebpf::MM_RODATA_START + 9 - ); - - // [s1.sh_addr..s3.sh_addr + s3.sh_size] is the valid ro memory area - assert!(matches!( - ro_region.vm_to_host( - ebpf::MM_RODATA_START + s1.sh_addr, - s3.sh_addr + s3.sh_size - s1.sh_addr - ), - ProgramResult::Ok(ptr) if ptr == owned_section.as_ptr() as u64, - )); - - // one byte past the ro section is not mappable - assert_error!( - ro_region.vm_to_host(ebpf::MM_RODATA_START + s3.sh_addr + s3.sh_size, 1), - "InvalidVirtualAddress({})", - ebpf::MM_RODATA_START + s3.sh_addr + s3.sh_size - ); - } - - #[test] - fn test_borrowed_ro_sections_disabled() { - let config = Config { - optimize_rodata: false, - ..Config::default() - }; - let elf_bytes = [0u8; 512]; - - // s1 and s2 are contiguous, the rodata section can be borrowed from the - // original elf input but config.borrow_rodata=false - let s1 = new_section(0, 10); - let s2 = new_section(10, 10); - - let sections: [(Option<&[u8]>, &Elf64Shdr); 2] = - [(Some(b".text"), &s1), (Some(b".rodata"), &s2)]; - assert!(matches!( - ElfExecutable::parse_ro_sections( - &config, - &SBPFVersion::V0, // v2 requires optimize_rodata=true - sections, - &elf_bytes, - ), - Ok(Section::Owned(offset, data)) if offset == ebpf::MM_RODATA_START as usize && data.len() == 20 - )); - } - - #[test] - fn test_borrowed_ro_sections() { - let config = Config::default(); - let elf_bytes = [0u8; 512]; - for (vaddr_base, sbpf_version) in [ - (0, SBPFVersion::V0), - (ebpf::MM_RODATA_START, SBPFVersion::V3), - ] { - let s1 = new_section(vaddr_base, 10); - let s2 = new_section(vaddr_base + 20, 10); - let s3 = new_section(vaddr_base + 40, 10); - let s4 = new_section(vaddr_base + 50, 10); - let sections: [(Option<&[u8]>, &Elf64Shdr); 4] = [ - (Some(b".dynsym"), &s1), - (Some(b".text"), &s2), - (Some(b".rodata"), &s3), - (Some(b".dynamic"), &s4), - ]; - assert_eq!( - ElfExecutable::parse_ro_sections(&config, &sbpf_version, sections, &elf_bytes), - Ok(Section::Borrowed( - ebpf::MM_RODATA_START as usize + 20, - 20..50 - )) - ); - } - } - - #[test] - fn test_borrowed_ro_region_no_initial_gap() { - let config = Config::default(); - let elf_bytes = [0u8; 512]; - for (vaddr_base, sbpf_version) in [ - (0, SBPFVersion::V0), - (ebpf::MM_RODATA_START, SBPFVersion::V3), - ] { - let s1 = new_section(vaddr_base, 10); - let s2 = new_section(vaddr_base + 10, 10); - let s3 = new_section(vaddr_base + 20, 10); - let sections: [(Option<&[u8]>, &Elf64Shdr); 3] = [ - (Some(b".text"), &s1), - (Some(b".rodata"), &s2), - (Some(b".dynamic"), &s3), - ]; - let ro_section = - ElfExecutable::parse_ro_sections(&config, &sbpf_version, sections, &elf_bytes) - .unwrap(); - let ro_region = get_ro_region(&ro_section, &elf_bytes); - - // s1 starts at sh_offset=0 so [0..s2.sh_offset + s2.sh_size] - // is the valid ro memory area - assert!(matches!( - ro_region.vm_to_host(ebpf::MM_RODATA_START + s1.sh_offset, s2.sh_offset + s2.sh_size), - ProgramResult::Ok(ptr) if ptr == elf_bytes.as_ptr() as u64, - )); - - // one byte past the ro section is not mappable - assert_error!( - ro_region.vm_to_host(ebpf::MM_RODATA_START + s3.sh_offset, 1), - "InvalidVirtualAddress({})", - ebpf::MM_RODATA_START + s3.sh_offset - ); - } - } - - #[test] - fn test_borrowed_ro_region_initial_gap() { - let config = Config::default(); - let elf_bytes = [0u8; 512]; - for (vaddr_base, sbpf_version) in [ - (0, SBPFVersion::V0), - (ebpf::MM_RODATA_START, SBPFVersion::V3), - ] { - let s1 = new_section(vaddr_base, 10); - let s2 = new_section(vaddr_base + 10, 10); - let s3 = new_section(vaddr_base + 20, 10); - let sections: [(Option<&[u8]>, &Elf64Shdr); 3] = [ - (Some(b".dynamic"), &s1), - (Some(b".text"), &s2), - (Some(b".rodata"), &s3), - ]; - let ro_section = - ElfExecutable::parse_ro_sections(&config, &sbpf_version, sections, &elf_bytes) - .unwrap(); - let ro_region = get_ro_region(&ro_section, &elf_bytes); - - // s2 starts at sh_addr=10 so [0..10] is not mappable - - // the low bound of the initial gap is not mappable - assert_error!( - ro_region.vm_to_host(ebpf::MM_RODATA_START + s1.sh_offset, 1), - "InvalidVirtualAddress({})", - ebpf::MM_RODATA_START + s1.sh_offset - ); - - // the hi bound of the initial gap is not mappable - assert_error!( - ro_region.vm_to_host(ebpf::MM_RODATA_START + s2.sh_offset - 1, 1), - "InvalidVirtualAddress({})", - ebpf::MM_RODATA_START + s2.sh_offset - 1 - ); - - // [s2.sh_offset..s3.sh_offset + s3.sh_size] is the valid ro memory area - assert!(matches!( - ro_region.vm_to_host( - ebpf::MM_RODATA_START + s2.sh_offset, - s3.sh_offset + s3.sh_size - s2.sh_offset - ), - ProgramResult::Ok(ptr) if ptr == elf_bytes[s2.sh_offset as usize..].as_ptr() as u64, - )); - - // one byte past the ro section is not mappable - assert_error!( - ro_region.vm_to_host(ebpf::MM_RODATA_START + s3.sh_offset + s3.sh_size, 1), - "InvalidVirtualAddress({})", - ebpf::MM_RODATA_START + s3.sh_offset + s3.sh_size - ); - } - } - - #[test] - fn test_reject_rodata_stack_overlap() { - let config = Config { - enabled_sbpf_versions: SBPFVersion::V0..=SBPFVersion::V3, - ..Config::default() - }; - let elf_bytes = [0u8; 512]; - - // no overlap - let mut s1 = new_section(ebpf::MM_STACK_START - 10, 10); - s1.sh_offset = 0; - let sections: [(Option<&[u8]>, &Elf64Shdr); 1] = [(Some(b".text"), &s1)]; - assert!( - ElfExecutable::parse_ro_sections(&config, &SBPFVersion::V3, sections, &elf_bytes) - .is_ok() - ); - - // no overlap - let mut s1 = new_section(ebpf::MM_STACK_START, 0); - s1.sh_offset = 0; - let sections: [(Option<&[u8]>, &Elf64Shdr); 1] = [(Some(b".text"), &s1)]; - assert!( - ElfExecutable::parse_ro_sections(&config, &SBPFVersion::V3, sections, &elf_bytes) - .is_ok() - ); - - // overlap - let mut s1 = new_section(ebpf::MM_STACK_START, 1); - s1.sh_offset = 0; - let sections: [(Option<&[u8]>, &Elf64Shdr); 1] = [(Some(b".text"), &s1)]; - assert_eq!( - ElfExecutable::parse_ro_sections(&config, &SBPFVersion::V3, sections, &elf_bytes), - Err(ElfError::ValueOutOfBounds) - ); - - // valid start but start + size overlap - let mut s1 = new_section(ebpf::MM_STACK_START - 10, 11); - s1.sh_offset = 0; - let sections: [(Option<&[u8]>, &Elf64Shdr); 1] = [(Some(b".text"), &s1)]; - assert_eq!( - ElfExecutable::parse_ro_sections(&config, &SBPFVersion::V3, sections, &elf_bytes), - Err(ElfError::ValueOutOfBounds) - ); - } - - #[test] - #[should_panic(expected = r#"validation failed: WritableSectionNotSupported(".data")"#)] - fn test_writable_data_section() { - let elf_bytes = - std::fs::read("tests/elfs/data_section_sbpfv0.so").expect("failed to read elf file"); - ElfExecutable::load(&elf_bytes, loader()).expect("validation failed"); - } - - #[test] - #[should_panic(expected = r#"validation failed: WritableSectionNotSupported(".bss")"#)] - fn test_bss_section() { - let elf_bytes = - std::fs::read("tests/elfs/bss_section_sbpfv0.so").expect("failed to read elf file"); - ElfExecutable::load(&elf_bytes, loader()).expect("validation failed"); - } - - #[test] - #[should_panic(expected = "validation failed: InvalidProgramHeader")] - fn test_program_headers_overflow() { - let elf_bytes = std::fs::read("tests/elfs/program_headers_overflow.so") - .expect("failed to read elf file"); - ElfExecutable::load(&elf_bytes, loader()).expect("validation failed"); - } - - #[test] - #[should_panic(expected = "validation failed: RelativeJumpOutOfBounds(8)")] - fn test_relative_call_oob_backward() { - let mut elf_bytes = - std::fs::read("tests/elfs/relative_call_sbpfv0.so").expect("failed to read elf file"); - LittleEndian::write_i32(&mut elf_bytes[0x1044..0x1048], -11i32); - ElfExecutable::load(&elf_bytes, loader()).expect("validation failed"); - } - - #[test] - #[should_panic(expected = "validation failed: RelativeJumpOutOfBounds(11)")] - fn test_relative_call_oob_forward() { - let mut elf_bytes = - std::fs::read("tests/elfs/relative_call_sbpfv0.so").expect("failed to read elf file"); - LittleEndian::write_i32(&mut elf_bytes[0x105C..0x1060], 5); - ElfExecutable::load(&elf_bytes, loader()).expect("validation failed"); - } - - #[test] - #[should_panic(expected = "validation failed: UnresolvedSymbol(\"log\", 39, 312)")] - fn test_err_unresolved_syscall_reloc_64_32() { - let loader = BuiltinProgram::new_loader( - Config { - enabled_sbpf_versions: SBPFVersion::V0..=SBPFVersion::V0, - reject_broken_elfs: true, - ..Config::default() - }, - FunctionRegistry::default(), - ); - let elf_bytes = std::fs::read("tests/elfs/syscall_reloc_64_32_sbpfv0.so") - .expect("failed to read elf file"); - ElfExecutable::load(&elf_bytes, Arc::new(loader)).expect("validation failed"); - } - - #[test] - fn test_long_section_name() { - let elf_bytes = std::fs::read("tests/elfs/long_section_name.so").unwrap(); - assert_error!( - Elf64::parse(&elf_bytes), - "StringTooLong({:?}, {})", - ".bss.__rust_no_alloc_shim_is_unstable" - .get(0..SECTION_NAME_LENGTH_MAXIMUM) - .unwrap(), - SECTION_NAME_LENGTH_MAXIMUM - ); - } -} diff --git a/tests/elf.rs b/tests/elf.rs new file mode 100644 index 00000000..8a8d1102 --- /dev/null +++ b/tests/elf.rs @@ -0,0 +1,919 @@ +use byteorder::{ByteOrder, LittleEndian}; +use rand::{distributions::Uniform, Rng}; +use solana_sbpf::{ + ebpf, + elf::{get_ro_region, ElfError, Executable, Section}, + elf_parser::{ + consts::{ELFCLASS32, ELFCLASS64, ELFDATA2LSB, ELFDATA2MSB, ELFOSABI_NONE, EM_BPF, ET_REL}, + types::{Elf64Ehdr, Elf64Phdr, Elf64Shdr, Elf64Sym}, + Elf64, ElfParserError, SECTION_NAME_LENGTH_MAXIMUM, + }, + error::ProgramResult, + fuzz::fuzz, + program::{BuiltinFunction, BuiltinProgram, FunctionRegistry, SBPFVersion}, + syscalls, + vm::{Config, TestContextObject}, +}; +use std::{fs::File, io::Read, sync::Arc}; +use test_utils::assert_error; + +type ElfExecutable = Executable; + +fn loader() -> Arc> { + let mut function_registry = FunctionRegistry::>::default(); + function_registry + .register_function_hashed(*b"log", syscalls::SyscallString::vm) + .unwrap(); + function_registry + .register_function_hashed(*b"log_64", syscalls::SyscallU64::vm) + .unwrap(); + Arc::new(BuiltinProgram::new_loader( + Config::default(), + function_registry, + )) +} + +#[test] +fn test_strict_header() { + let elf_bytes = std::fs::read("tests/elfs/strict_header.so").expect("failed to read elf file"); + let loader = loader(); + + // Check that the unmodified file can be parsed + { + let loader = Arc::new(BuiltinProgram::new_loader( + Config { + enable_symbol_and_section_labels: true, + ..Config::default() + }, + FunctionRegistry::>::default(), + )); + let executable = ElfExecutable::load(&elf_bytes, loader.clone()).unwrap(); + let (name, _pc) = executable.get_function_registry().lookup_by_key(4).unwrap(); + assert_eq!(name, b"entrypoint"); + } + + // Check that using a reserved SBPF version fails + { + let mut elf_bytes = elf_bytes.clone(); + elf_bytes[0x0030] = 0xFF; + let err = ElfExecutable::load(&elf_bytes, loader.clone()).unwrap_err(); + assert_eq!(err, ElfError::UnsupportedSBPFVersion); + } + + // Check that an empty file fails + let err = ElfExecutable::load_with_strict_parser(&[], loader.clone()).unwrap_err(); + assert_eq!(err, ElfParserError::OutOfBounds); + + // Break the file header one byte at a time + let expected_results = std::iter::repeat(&Err(ElfParserError::InvalidFileHeader)) + .take(40) + .chain(std::iter::repeat(&Ok(())).take(12)) + .chain(std::iter::repeat(&Err(ElfParserError::InvalidFileHeader)).take(4)) + .chain(std::iter::repeat(&Err(ElfParserError::InvalidProgramHeader)).take(1)) + .chain(std::iter::repeat(&Err(ElfParserError::InvalidFileHeader)).take(3)) + .chain(std::iter::repeat(&Ok(())).take(2)) + .chain(std::iter::repeat(&Err(ElfParserError::InvalidFileHeader)).take(2)); + for (offset, expected) in (0..std::mem::size_of::()).zip(expected_results) { + let mut elf_bytes = elf_bytes.clone(); + elf_bytes[offset] = 0xAF; + let result = ElfExecutable::load_with_strict_parser(&elf_bytes, loader.clone()).map(|_| ()); + assert_eq!(&result, expected); + } + + // Break the program header table one byte at a time + let expected_results_readonly = std::iter::repeat(&Err(ElfParserError::InvalidProgramHeader)) + .take(48) + .chain(std::iter::repeat(&Ok(())).take(8)) + .collect::>(); + let expected_results_writable = std::iter::repeat(&Err(ElfParserError::InvalidProgramHeader)) + .take(40) + .chain(std::iter::repeat(&Ok(())).take(4)) + .chain(std::iter::repeat(&Err(ElfParserError::InvalidProgramHeader)).take(4)) + .chain(std::iter::repeat(&Ok(())).take(8)) + .collect::>(); + let expected_results = vec![ + expected_results_readonly.iter(), + expected_results_readonly.iter(), + expected_results_writable.iter(), + expected_results_writable.iter(), + expected_results_readonly.iter(), + ]; + for (header_index, expected_results) in expected_results.into_iter().enumerate() { + for (offset, expected) in (std::mem::size_of::() + + std::mem::size_of::() * header_index + ..std::mem::size_of::() + + std::mem::size_of::() * (header_index + 1)) + .zip(expected_results) + { + let mut elf_bytes = elf_bytes.clone(); + elf_bytes[offset] = 0xAF; + let result = + ElfExecutable::load_with_strict_parser(&elf_bytes, loader.clone()).map(|_| ()); + assert_eq!(&&result, expected); + } + } + + // Break the dynamic symbol table one byte at a time + for index in 1..3 { + let expected_results = std::iter::repeat(&Ok(())) + .take(8) + .chain(std::iter::repeat(&Err(ElfParserError::OutOfBounds)).take(8)) + .chain(std::iter::repeat(&Err(ElfParserError::InvalidSize)).take(1)) + .chain(std::iter::repeat(&Err(ElfParserError::OutOfBounds)).take(7)); + for (offset, expected) in (0x3000 + std::mem::size_of::() * index + ..0x3000 + std::mem::size_of::() * (index + 1)) + .zip(expected_results) + { + let mut elf_bytes = elf_bytes.clone(); + elf_bytes[offset] = 0xAF; + let result = + ElfExecutable::load_with_strict_parser(&elf_bytes, loader.clone()).map(|_| ()); + assert_eq!(&result, expected); + } + } + + // Check that an empty function symbol fails + { + let mut elf_bytes = elf_bytes.clone(); + elf_bytes[0x3040] = 0x00; + let err = ElfExecutable::load_with_strict_parser(&elf_bytes, loader.clone()).unwrap_err(); + assert_eq!(err, ElfParserError::InvalidSize); + } + + // Check that bytecode not covered by function symbols fails + { + let mut elf_bytes = elf_bytes.clone(); + elf_bytes[0x3040] = 0x08; + let err = ElfExecutable::load_with_strict_parser(&elf_bytes, loader.clone()).unwrap_err(); + assert_eq!(err, ElfParserError::OutOfBounds); + } + + // Check that an entrypoint not covered by function symbols fails + { + let mut elf_bytes = elf_bytes.clone(); + elf_bytes[0x0018] = 0x10; + let err = ElfExecutable::load_with_strict_parser(&elf_bytes, loader.clone()).unwrap_err(); + assert_eq!(err, ElfParserError::InvalidFileHeader); + } +} + +#[test] +fn test_validate() { + let elf_bytes = std::fs::read("tests/elfs/relative_call_sbpfv0.so").unwrap(); + let elf = Elf64::parse(&elf_bytes).unwrap(); + let mut header = elf.file_header().clone(); + + let config = Config::default(); + + let write_header = |header: Elf64Ehdr| unsafe { + let mut bytes = elf_bytes.clone(); + std::ptr::write(bytes.as_mut_ptr().cast::(), header); + bytes + }; + + ElfExecutable::validate(&config, &elf, &elf_bytes).expect("validation failed"); + + header.e_ident.ei_class = ELFCLASS32; + let bytes = write_header(header.clone()); + // the new parser rejects anything other than ELFCLASS64 directly + Elf64::parse(&bytes).expect_err("allowed bad class"); + + header.e_ident.ei_class = ELFCLASS64; + let bytes = write_header(header.clone()); + ElfExecutable::validate(&config, &Elf64::parse(&bytes).unwrap(), &elf_bytes) + .expect("validation failed"); + + header.e_ident.ei_data = ELFDATA2MSB; + let bytes = write_header(header.clone()); + // the new parser only supports little endian + Elf64::parse(&bytes).expect_err("allowed big endian"); + + header.e_ident.ei_data = ELFDATA2LSB; + let bytes = write_header(header.clone()); + ElfExecutable::validate(&config, &Elf64::parse(&bytes).unwrap(), &elf_bytes) + .expect("validation failed"); + + header.e_ident.ei_osabi = 1; + let bytes = write_header(header.clone()); + ElfExecutable::validate(&config, &Elf64::parse(&bytes).unwrap(), &elf_bytes) + .expect_err("allowed wrong abi"); + + header.e_ident.ei_osabi = ELFOSABI_NONE; + let bytes = write_header(header.clone()); + ElfExecutable::validate(&config, &Elf64::parse(&bytes).unwrap(), &elf_bytes) + .expect("validation failed"); + + header.e_machine = 42; + let bytes = write_header(header.clone()); + ElfExecutable::validate(&config, &Elf64::parse(&bytes).unwrap(), &elf_bytes) + .expect_err("allowed wrong machine"); + + header.e_machine = EM_BPF; + let bytes = write_header(header.clone()); + ElfExecutable::validate(&config, &Elf64::parse(&bytes).unwrap(), &elf_bytes) + .expect("validation failed"); + + header.e_type = ET_REL; + let bytes = write_header(header); + ElfExecutable::validate(&config, &Elf64::parse(&bytes).unwrap(), &elf_bytes) + .expect_err("allowed wrong type"); +} + +#[test] +fn test_load() { + let mut file = File::open("tests/elfs/relative_call_sbpfv0.so").expect("file open failed"); + let mut elf_bytes = Vec::new(); + file.read_to_end(&mut elf_bytes) + .expect("failed to read elf file"); + ElfExecutable::load(&elf_bytes, loader()).expect("validation failed"); +} + +#[test] +fn test_load_unaligned() { + let mut elf_bytes = + std::fs::read("tests/elfs/relative_call_sbpfv0.so").expect("failed to read elf file"); + // The default allocator allocates aligned memory. Move the ELF slice to + // elf_bytes.as_ptr() + 1 to make it unaligned and test unaligned + // parsing. + elf_bytes.insert(0, 0); + ElfExecutable::load(&elf_bytes[1..], loader()).expect("validation failed"); +} + +#[test] +fn test_entrypoint() { + let loader = loader(); + + let mut file = File::open("tests/elfs/relative_call_sbpfv0.so").expect("file open failed"); + let mut elf_bytes = Vec::new(); + file.read_to_end(&mut elf_bytes) + .expect("failed to read elf file"); + let elf = ElfExecutable::load(&elf_bytes, loader.clone()).expect("validation failed"); + let parsed_elf = Elf64::parse(&elf_bytes).unwrap(); + let executable: &Executable = &elf; + assert_eq!(4, executable.get_entrypoint_instruction_offset()); + + let write_header = |header: Elf64Ehdr| unsafe { + let mut bytes = elf_bytes.clone(); + std::ptr::write(bytes.as_mut_ptr().cast::(), header); + bytes + }; + + let mut header = parsed_elf.file_header().clone(); + let initial_e_entry = header.e_entry; + + header.e_entry += 8; + let elf_bytes = write_header(header.clone()); + let elf = ElfExecutable::load(&elf_bytes, loader.clone()).expect("validation failed"); + let executable: &Executable = &elf; + assert_eq!(5, executable.get_entrypoint_instruction_offset()); + + header.e_entry = 1; + let elf_bytes = write_header(header.clone()); + assert!(matches!( + ElfExecutable::load(&elf_bytes, loader.clone()), + Err(ElfError::EntrypointOutOfBounds) + )); + + header.e_entry = u64::MAX; + let elf_bytes = write_header(header.clone()); + assert!(matches!( + ElfExecutable::load(&elf_bytes, loader.clone()), + Err(ElfError::EntrypointOutOfBounds) + )); + + header.e_entry = initial_e_entry + ebpf::INSN_SIZE as u64 + 1; + let elf_bytes = write_header(header.clone()); + assert!(matches!( + ElfExecutable::load(&elf_bytes, loader.clone()), + Err(ElfError::InvalidEntrypoint) + )); + + header.e_entry = initial_e_entry; + let elf_bytes = write_header(header); + let elf = ElfExecutable::load(&elf_bytes, loader).expect("validation failed"); + let executable: &Executable = &elf; + assert_eq!(4, executable.get_entrypoint_instruction_offset()); +} + +#[test] +#[ignore] +fn test_fuzz_load() { + let loader = loader(); + + // Random bytes, will mostly fail due to lack of ELF header so just do a few + let mut rng = rand::thread_rng(); + let range = Uniform::new(0, 255); + println!("random bytes"); + for _ in 0..1_000 { + let elf_bytes: Vec = (0..100).map(|_| rng.sample(range)).collect(); + let _ = ElfExecutable::load(&elf_bytes, loader.clone()); + } + + // Take a real elf and mangle it + + let mut file = File::open("tests/elfs/noop.so").expect("file open failed"); + let mut elf_bytes = Vec::new(); + file.read_to_end(&mut elf_bytes) + .expect("failed to read elf file"); + let parsed_elf = Elf64::parse(&elf_bytes).unwrap(); + + // focus on elf header, small typically 64 bytes + println!("mangle elf header"); + fuzz( + &elf_bytes, + 1_000_000, + 100, + 0..parsed_elf.file_header().e_ehsize as usize, + 0..255, + |bytes: &mut [u8]| { + let _ = ElfExecutable::load(bytes, loader.clone()); + }, + ); + + // focus on section headers + println!("mangle section headers"); + fuzz( + &elf_bytes, + 1_000_000, + 100, + parsed_elf.file_header().e_shoff as usize..elf_bytes.len(), + 0..255, + |bytes: &mut [u8]| { + let _ = ElfExecutable::load(bytes, loader.clone()); + }, + ); + + // mangle whole elf randomly + println!("mangle whole elf"); + fuzz( + &elf_bytes, + 1_000_000, + 100, + 0..elf_bytes.len(), + 0..255, + |bytes: &mut [u8]| { + let _ = ElfExecutable::load(bytes, loader.clone()); + }, + ); +} + +fn new_section(sh_addr: u64, sh_size: u64) -> Elf64Shdr { + Elf64Shdr { + sh_addr, + sh_offset: sh_addr + .checked_sub(ebpf::MM_RODATA_START) + .unwrap_or(sh_addr), + sh_size, + sh_name: 0, + sh_type: 0, + sh_flags: 0, + sh_link: 0, + sh_info: 0, + sh_addralign: 0, + sh_entsize: 0, + } +} + +#[test] +fn test_owned_ro_sections_not_contiguous() { + let config = Config::default(); + let elf_bytes = [0u8; 512]; + + // there's a non-rodata section between two rodata sections + let s1 = new_section(10, 10); + let s2 = new_section(20, 10); + let s3 = new_section(30, 10); + + let sections: [(Option<&[u8]>, &Elf64Shdr); 3] = [ + (Some(b".text"), &s1), + (Some(b".dynamic"), &s2), + (Some(b".rodata"), &s3), + ]; + assert!(matches!( + ElfExecutable::parse_ro_sections( + &config, + &SBPFVersion::V0, + sections, + &elf_bytes, + ), + Ok(Section::Owned(offset, data)) if offset == ebpf::MM_RODATA_START as usize + 10 && data.len() == 30 + )); +} + +#[test] +fn test_owned_ro_sections_with_sh_offset() { + let config = Config { + reject_broken_elfs: false, + ..Config::default() + }; + let elf_bytes = [0u8; 512]; + + // s2 is at a custom sh_offset. We need to merge into an owned buffer so + // s2 can be moved to the right address offset. + let s1 = new_section(10, 10); + let mut s2 = new_section(20, 10); + s2.sh_offset = 30; + + let sections: [(Option<&[u8]>, &Elf64Shdr); 2] = + [(Some(b".text"), &s1), (Some(b".rodata"), &s2)]; + assert!(matches!( + ElfExecutable::parse_ro_sections( + &config, + &SBPFVersion::V0, + sections, + &elf_bytes, + ), + Ok(Section::Owned(offset, data)) if offset == ebpf::MM_RODATA_START as usize + 10 && data.len() == 20 + )); +} + +#[test] +fn test_sh_offset_not_same_as_vaddr() { + let config = Config { + reject_broken_elfs: true, + enabled_sbpf_versions: SBPFVersion::V0..=SBPFVersion::V0, + ..Config::default() + }; + let elf_bytes = [0u8; 512]; + + let mut s1 = new_section(10, 10); + + { + let sections: [(Option<&[u8]>, &Elf64Shdr); 1] = [(Some(b".text"), &s1)]; + assert!( + ElfExecutable::parse_ro_sections(&config, &SBPFVersion::V0, sections, &elf_bytes) + .is_ok() + ); + } + + s1.sh_offset = 0; + let sections: [(Option<&[u8]>, &Elf64Shdr); 1] = [(Some(b".text"), &s1)]; + assert_eq!( + ElfExecutable::parse_ro_sections(&config, &SBPFVersion::V0, sections, &elf_bytes), + Err(ElfError::ValueOutOfBounds) + ); +} + +#[test] +fn test_invalid_sh_offset_larger_than_vaddr() { + let config = Config { + reject_broken_elfs: true, + ..Config::default() + }; + let elf_bytes = [0u8; 512]; + + let s1 = new_section(10, 10); + // sh_offset > sh_addr is invalid + let mut s2 = new_section(20, 10); + s2.sh_offset = 30; + + let sections: [(Option<&[u8]>, &Elf64Shdr); 2] = + [(Some(b".text"), &s1), (Some(b".rodata"), &s2)]; + assert_eq!( + ElfExecutable::parse_ro_sections(&config, &SBPFVersion::V2, sections, &elf_bytes,), + Err(ElfError::ValueOutOfBounds) + ); +} + +#[test] +fn test_reject_non_constant_sh_offset() { + let config = Config { + reject_broken_elfs: true, + ..Config::default() + }; + let elf_bytes = [0u8; 512]; + + let mut s1 = new_section(ebpf::MM_RODATA_START + 10, 10); + let mut s2 = new_section(ebpf::MM_RODATA_START + 20, 10); + // The sections don't have a constant offset. This is rejected since it + // makes it impossible to efficiently map virtual addresses to byte + // offsets + s1.sh_offset = 100; + s2.sh_offset = 120; + + let sections: [(Option<&[u8]>, &Elf64Shdr); 2] = + [(Some(b".text"), &s1), (Some(b".rodata"), &s2)]; + assert_eq!( + ElfExecutable::parse_ro_sections(&config, &SBPFVersion::V3, sections, &elf_bytes), + Err(ElfError::ValueOutOfBounds) + ); +} + +#[test] +fn test_borrowed_ro_sections_with_constant_sh_offset() { + let config = Config { + reject_broken_elfs: true, + ..Config::default() + }; + let elf_bytes = [0u8; 512]; + + let mut s1 = new_section(ebpf::MM_RODATA_START + 10, 10); + let mut s2 = new_section(ebpf::MM_RODATA_START + 20, 10); + // the sections have a constant offset (100) + s1.sh_offset = 100; + s2.sh_offset = 110; + + let sections: [(Option<&[u8]>, &Elf64Shdr); 2] = + [(Some(b".text"), &s1), (Some(b".rodata"), &s2)]; + assert_eq!( + ElfExecutable::parse_ro_sections(&config, &SBPFVersion::V3, sections, &elf_bytes), + Ok(Section::Borrowed( + ebpf::MM_RODATA_START as usize + 10, + 100..120 + )) + ); +} + +#[test] +fn test_owned_ro_region_no_initial_gap() { + let config = Config::default(); + let elf_bytes = [0u8; 512]; + + // need an owned buffer so we can zero the address space taken by s2 + let s1 = new_section(0, 10); + let s2 = new_section(10, 10); + let s3 = new_section(20, 10); + + let sections: [(Option<&[u8]>, &Elf64Shdr); 3] = [ + (Some(b".text"), &s1), + (Some(b".dynamic"), &s2), + (Some(b".rodata"), &s3), + ]; + let ro_section = + ElfExecutable::parse_ro_sections(&config, &SBPFVersion::V0, sections, &elf_bytes).unwrap(); + let ro_region = get_ro_region(&ro_section, &elf_bytes); + let owned_section = match &ro_section { + Section::Owned(_offset, data) => data.as_slice(), + _ => panic!(), + }; + + // [0..s3.sh_addr + s3.sh_size] is the valid ro memory area + assert!(matches!( + ro_region.vm_to_host(ebpf::MM_RODATA_START, s3.sh_addr + s3.sh_size), + ProgramResult::Ok(ptr) if ptr == owned_section.as_ptr() as u64, + )); + + // one byte past the ro section is not mappable + assert_error!( + ro_region.vm_to_host(ebpf::MM_RODATA_START + s3.sh_addr + s3.sh_size, 1), + "InvalidVirtualAddress({})", + ebpf::MM_RODATA_START + s3.sh_addr + s3.sh_size + ); +} + +#[test] +fn test_owned_ro_region_initial_gap_mappable() { + let config = Config { + optimize_rodata: false, + ..Config::default() + }; + let elf_bytes = [0u8; 512]; + + // the first section starts at a non-zero offset + let s1 = new_section(10, 10); + let s2 = new_section(20, 10); + let s3 = new_section(30, 10); + + let sections: [(Option<&[u8]>, &Elf64Shdr); 3] = [ + (Some(b".text"), &s1), + (Some(b".dynamic"), &s2), + (Some(b".rodata"), &s3), + ]; + // V2 requires optimize_rodata=true + let ro_section = + ElfExecutable::parse_ro_sections(&config, &SBPFVersion::V0, sections, &elf_bytes).unwrap(); + let ro_region = get_ro_region(&ro_section, &elf_bytes); + let owned_section = match &ro_section { + Section::Owned(_offset, data) => data.as_slice(), + _ => panic!(), + }; + + // [s1.sh_addr..s3.sh_addr + s3.sh_size] is where the readonly data is. + // But for backwards compatibility (config.optimize_rodata=false) + // [0..s1.sh_addr] is mappable too (and zeroed). + assert!(matches!( + ro_region.vm_to_host(ebpf::MM_RODATA_START, s3.sh_addr + s3.sh_size), + ProgramResult::Ok(ptr) if ptr == owned_section.as_ptr() as u64, + )); + + // one byte past the ro section is not mappable + assert_error!( + ro_region.vm_to_host(ebpf::MM_RODATA_START + s3.sh_addr + s3.sh_size, 1), + "InvalidVirtualAddress({})", + ebpf::MM_RODATA_START + s3.sh_addr + s3.sh_size + ); +} + +#[test] +fn test_owned_ro_region_initial_gap_map_error() { + let config = Config::default(); + let elf_bytes = [0u8; 512]; + + // the first section starts at a non-zero offset + let s1 = new_section(10, 10); + let s2 = new_section(20, 10); + let s3 = new_section(30, 10); + + let sections: [(Option<&[u8]>, &Elf64Shdr); 3] = [ + (Some(b".text"), &s1), + (Some(b".dynamic"), &s2), + (Some(b".rodata"), &s3), + ]; + let ro_section = + ElfExecutable::parse_ro_sections(&config, &SBPFVersion::V0, sections, &elf_bytes).unwrap(); + let owned_section = match &ro_section { + Section::Owned(_offset, data) => data.as_slice(), + _ => panic!(), + }; + let ro_region = get_ro_region(&ro_section, &elf_bytes); + + // s1 starts at sh_addr=10 so [MM_RODATA_START..MM_RODATA_START + 10] is not mappable + + // the low bound of the initial gap is not mappable + assert_error!( + ro_region.vm_to_host(ebpf::MM_RODATA_START, 1), + "InvalidVirtualAddress({})", + ebpf::MM_RODATA_START + ); + + // the hi bound of the initial gap is not mappable + assert_error!( + ro_region.vm_to_host(ebpf::MM_RODATA_START + s1.sh_addr - 1, 1), + "InvalidVirtualAddress({})", + ebpf::MM_RODATA_START + 9 + ); + + // [s1.sh_addr..s3.sh_addr + s3.sh_size] is the valid ro memory area + assert!(matches!( + ro_region.vm_to_host( + ebpf::MM_RODATA_START + s1.sh_addr, + s3.sh_addr + s3.sh_size - s1.sh_addr + ), + ProgramResult::Ok(ptr) if ptr == owned_section.as_ptr() as u64, + )); + + // one byte past the ro section is not mappable + assert_error!( + ro_region.vm_to_host(ebpf::MM_RODATA_START + s3.sh_addr + s3.sh_size, 1), + "InvalidVirtualAddress({})", + ebpf::MM_RODATA_START + s3.sh_addr + s3.sh_size + ); +} + +#[test] +fn test_borrowed_ro_sections_disabled() { + let config = Config { + optimize_rodata: false, + ..Config::default() + }; + let elf_bytes = [0u8; 512]; + + // s1 and s2 are contiguous, the rodata section can be borrowed from the + // original elf input but config.borrow_rodata=false + let s1 = new_section(0, 10); + let s2 = new_section(10, 10); + + let sections: [(Option<&[u8]>, &Elf64Shdr); 2] = + [(Some(b".text"), &s1), (Some(b".rodata"), &s2)]; + assert!(matches!( + ElfExecutable::parse_ro_sections( + &config, + &SBPFVersion::V0, // v2 requires optimize_rodata=true + sections, + &elf_bytes, + ), + Ok(Section::Owned(offset, data)) if offset == ebpf::MM_RODATA_START as usize && data.len() == 20 + )); +} + +#[test] +fn test_borrowed_ro_sections() { + let config = Config::default(); + let elf_bytes = [0u8; 512]; + for (vaddr_base, sbpf_version) in [ + (0, SBPFVersion::V0), + (ebpf::MM_RODATA_START, SBPFVersion::V3), + ] { + let s1 = new_section(vaddr_base, 10); + let s2 = new_section(vaddr_base + 20, 10); + let s3 = new_section(vaddr_base + 40, 10); + let s4 = new_section(vaddr_base + 50, 10); + let sections: [(Option<&[u8]>, &Elf64Shdr); 4] = [ + (Some(b".dynsym"), &s1), + (Some(b".text"), &s2), + (Some(b".rodata"), &s3), + (Some(b".dynamic"), &s4), + ]; + assert_eq!( + ElfExecutable::parse_ro_sections(&config, &sbpf_version, sections, &elf_bytes), + Ok(Section::Borrowed( + ebpf::MM_RODATA_START as usize + 20, + 20..50 + )) + ); + } +} + +#[test] +fn test_borrowed_ro_region_no_initial_gap() { + let config = Config::default(); + let elf_bytes = [0u8; 512]; + for (vaddr_base, sbpf_version) in [ + (0, SBPFVersion::V0), + (ebpf::MM_RODATA_START, SBPFVersion::V3), + ] { + let s1 = new_section(vaddr_base, 10); + let s2 = new_section(vaddr_base + 10, 10); + let s3 = new_section(vaddr_base + 20, 10); + let sections: [(Option<&[u8]>, &Elf64Shdr); 3] = [ + (Some(b".text"), &s1), + (Some(b".rodata"), &s2), + (Some(b".dynamic"), &s3), + ]; + let ro_section = + ElfExecutable::parse_ro_sections(&config, &sbpf_version, sections, &elf_bytes).unwrap(); + let ro_region = get_ro_region(&ro_section, &elf_bytes); + + // s1 starts at sh_offset=0 so [0..s2.sh_offset + s2.sh_size] + // is the valid ro memory area + assert!(matches!( + ro_region.vm_to_host(ebpf::MM_RODATA_START + s1.sh_offset, s2.sh_offset + s2.sh_size), + ProgramResult::Ok(ptr) if ptr == elf_bytes.as_ptr() as u64, + )); + + // one byte past the ro section is not mappable + assert_error!( + ro_region.vm_to_host(ebpf::MM_RODATA_START + s3.sh_offset, 1), + "InvalidVirtualAddress({})", + ebpf::MM_RODATA_START + s3.sh_offset + ); + } +} + +#[test] +fn test_borrowed_ro_region_initial_gap() { + let config = Config::default(); + let elf_bytes = [0u8; 512]; + for (vaddr_base, sbpf_version) in [ + (0, SBPFVersion::V0), + (ebpf::MM_RODATA_START, SBPFVersion::V3), + ] { + let s1 = new_section(vaddr_base, 10); + let s2 = new_section(vaddr_base + 10, 10); + let s3 = new_section(vaddr_base + 20, 10); + let sections: [(Option<&[u8]>, &Elf64Shdr); 3] = [ + (Some(b".dynamic"), &s1), + (Some(b".text"), &s2), + (Some(b".rodata"), &s3), + ]; + let ro_section = + ElfExecutable::parse_ro_sections(&config, &sbpf_version, sections, &elf_bytes).unwrap(); + let ro_region = get_ro_region(&ro_section, &elf_bytes); + + // s2 starts at sh_addr=10 so [0..10] is not mappable + + // the low bound of the initial gap is not mappable + assert_error!( + ro_region.vm_to_host(ebpf::MM_RODATA_START + s1.sh_offset, 1), + "InvalidVirtualAddress({})", + ebpf::MM_RODATA_START + s1.sh_offset + ); + + // the hi bound of the initial gap is not mappable + assert_error!( + ro_region.vm_to_host(ebpf::MM_RODATA_START + s2.sh_offset - 1, 1), + "InvalidVirtualAddress({})", + ebpf::MM_RODATA_START + s2.sh_offset - 1 + ); + + // [s2.sh_offset..s3.sh_offset + s3.sh_size] is the valid ro memory area + assert!(matches!( + ro_region.vm_to_host( + ebpf::MM_RODATA_START + s2.sh_offset, + s3.sh_offset + s3.sh_size - s2.sh_offset + ), + ProgramResult::Ok(ptr) if ptr == elf_bytes[s2.sh_offset as usize..].as_ptr() as u64, + )); + + // one byte past the ro section is not mappable + assert_error!( + ro_region.vm_to_host(ebpf::MM_RODATA_START + s3.sh_offset + s3.sh_size, 1), + "InvalidVirtualAddress({})", + ebpf::MM_RODATA_START + s3.sh_offset + s3.sh_size + ); + } +} + +#[test] +fn test_reject_rodata_stack_overlap() { + let config = Config { + enabled_sbpf_versions: SBPFVersion::V0..=SBPFVersion::V3, + ..Config::default() + }; + let elf_bytes = [0u8; 512]; + + // no overlap + let mut s1 = new_section(ebpf::MM_STACK_START - 10, 10); + s1.sh_offset = 0; + let sections: [(Option<&[u8]>, &Elf64Shdr); 1] = [(Some(b".text"), &s1)]; + assert!( + ElfExecutable::parse_ro_sections(&config, &SBPFVersion::V3, sections, &elf_bytes).is_ok() + ); + + // no overlap + let mut s1 = new_section(ebpf::MM_STACK_START, 0); + s1.sh_offset = 0; + let sections: [(Option<&[u8]>, &Elf64Shdr); 1] = [(Some(b".text"), &s1)]; + assert!( + ElfExecutable::parse_ro_sections(&config, &SBPFVersion::V3, sections, &elf_bytes).is_ok() + ); + + // overlap + let mut s1 = new_section(ebpf::MM_STACK_START, 1); + s1.sh_offset = 0; + let sections: [(Option<&[u8]>, &Elf64Shdr); 1] = [(Some(b".text"), &s1)]; + assert_eq!( + ElfExecutable::parse_ro_sections(&config, &SBPFVersion::V3, sections, &elf_bytes), + Err(ElfError::ValueOutOfBounds) + ); + + // valid start but start + size overlap + let mut s1 = new_section(ebpf::MM_STACK_START - 10, 11); + s1.sh_offset = 0; + let sections: [(Option<&[u8]>, &Elf64Shdr); 1] = [(Some(b".text"), &s1)]; + assert_eq!( + ElfExecutable::parse_ro_sections(&config, &SBPFVersion::V3, sections, &elf_bytes), + Err(ElfError::ValueOutOfBounds) + ); +} + +#[test] +#[should_panic(expected = r#"validation failed: WritableSectionNotSupported(".data")"#)] +fn test_writable_data_section() { + let elf_bytes = + std::fs::read("tests/elfs/data_section_sbpfv0.so").expect("failed to read elf file"); + ElfExecutable::load(&elf_bytes, loader()).expect("validation failed"); +} + +#[test] +#[should_panic(expected = r#"validation failed: WritableSectionNotSupported(".bss")"#)] +fn test_bss_section() { + let elf_bytes = + std::fs::read("tests/elfs/bss_section_sbpfv0.so").expect("failed to read elf file"); + ElfExecutable::load(&elf_bytes, loader()).expect("validation failed"); +} + +#[test] +#[should_panic(expected = "validation failed: InvalidProgramHeader")] +fn test_program_headers_overflow() { + let elf_bytes = + std::fs::read("tests/elfs/program_headers_overflow.so").expect("failed to read elf file"); + ElfExecutable::load(&elf_bytes, loader()).expect("validation failed"); +} + +#[test] +#[should_panic(expected = "validation failed: RelativeJumpOutOfBounds(8)")] +fn test_relative_call_oob_backward() { + let mut elf_bytes = + std::fs::read("tests/elfs/relative_call_sbpfv0.so").expect("failed to read elf file"); + LittleEndian::write_i32(&mut elf_bytes[0x1044..0x1048], -11i32); + ElfExecutable::load(&elf_bytes, loader()).expect("validation failed"); +} + +#[test] +#[should_panic(expected = "validation failed: RelativeJumpOutOfBounds(11)")] +fn test_relative_call_oob_forward() { + let mut elf_bytes = + std::fs::read("tests/elfs/relative_call_sbpfv0.so").expect("failed to read elf file"); + LittleEndian::write_i32(&mut elf_bytes[0x105C..0x1060], 5); + ElfExecutable::load(&elf_bytes, loader()).expect("validation failed"); +} + +#[test] +#[should_panic(expected = "validation failed: UnresolvedSymbol(\"log\", 39, 312)")] +fn test_err_unresolved_syscall_reloc_64_32() { + let loader = BuiltinProgram::new_loader( + Config { + enabled_sbpf_versions: SBPFVersion::V0..=SBPFVersion::V0, + reject_broken_elfs: true, + ..Config::default() + }, + FunctionRegistry::default(), + ); + let elf_bytes = + std::fs::read("tests/elfs/syscall_reloc_64_32_sbpfv0.so").expect("failed to read elf file"); + ElfExecutable::load(&elf_bytes, Arc::new(loader)).expect("validation failed"); +} + +#[test] +fn test_long_section_name() { + let elf_bytes = std::fs::read("tests/elfs/long_section_name.so").unwrap(); + assert_error!( + Elf64::parse(&elf_bytes), + "StringTooLong({:?}, {})", + ".bss.__rust_no_alloc_shim_is_unstable" + .get(0..SECTION_NAME_LENGTH_MAXIMUM) + .unwrap(), + SECTION_NAME_LENGTH_MAXIMUM + ); +} From 894f5a65c06dadea4388e0f8272d794d04f6d80a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Tue, 7 Jan 2025 18:18:48 +0000 Subject: [PATCH 17/18] Moves the tests of jit.rs into separate files. --- src/jit.rs | 262 +++++---------------------------------------------- src/lib.rs | 2 +- src/vm.rs | 24 +++++ tests/jit.rs | 182 +++++++++++++++++++++++++++++++++++ tests/vm.rs | 51 ++++++++++ 5 files changed, 283 insertions(+), 238 deletions(-) create mode 100644 tests/jit.rs create mode 100644 tests/vm.rs diff --git a/src/jit.rs b/src/jit.rs index 110094ce..2eac982f 100644 --- a/src/jit.rs +++ b/src/jit.rs @@ -1,4 +1,5 @@ -#![allow(clippy::arithmetic_side_effects)] +//! Just-in-time compiler (Linux x86, macOS x86) + // Derived from uBPF // Copyright 2015 Big Switch Networks, Inc // (uBPF: JIT algorithm, originally in C) @@ -10,6 +11,8 @@ // the MIT license , at your option. This file may not be // copied, modified, or distributed except according to those terms. +#![allow(clippy::arithmetic_side_effects)] + #[cfg(not(feature = "shuttle-test"))] use rand::{thread_rng, Rng}; @@ -32,7 +35,7 @@ use crate::{ }, memory_region::MemoryMapping, program::BuiltinFunction, - vm::{get_runtime_environment_key, Config, ContextObject, EbpfVm}, + vm::{get_runtime_environment_key, Config, ContextObject, EbpfVm, RuntimeEnvironmentSlot}, x86::{ FenceType, X86IndirectAccess, X86Instruction, X86Register::{self, *}, @@ -40,11 +43,16 @@ use crate::{ }, }; -const MAX_EMPTY_PROGRAM_MACHINE_CODE_LENGTH: usize = 4096; -const MAX_MACHINE_CODE_LENGTH_PER_INSTRUCTION: usize = 110; -const MACHINE_CODE_PER_INSTRUCTION_METER_CHECKPOINT: usize = 24; -const MAX_START_PADDING_LENGTH: usize = 256; +/// The maximum machine code length in bytes of a program with no guest instructions +pub const MAX_EMPTY_PROGRAM_MACHINE_CODE_LENGTH: usize = 4096; +/// The maximum machine code length in bytes of a single guest instruction +pub const MAX_MACHINE_CODE_LENGTH_PER_INSTRUCTION: usize = 110; +/// The maximum machine code length in bytes of an instruction meter checkpoint +pub const MACHINE_CODE_PER_INSTRUCTION_METER_CHECKPOINT: usize = 24; +/// The maximum machine code length of the randomized padding +pub const MAX_START_PADDING_LENGTH: usize = 256; +/// The program compiled to native host machinecode pub struct JitProgram { /// OS page size in bytes and the alignment of the sections page_size: usize, @@ -106,7 +114,7 @@ impl JitProgram { Ok(()) } - pub fn invoke( + pub(crate) fn invoke( &self, _config: &Config, vm: &mut EbpfVm, @@ -149,10 +157,12 @@ impl JitProgram { } } + /// The length of the host machinecode in bytes pub fn machine_code_length(&self) -> usize { self.text_section.len() } + /// The total memory used in bytes rounded up to page boundaries pub fn mem_size(&self) -> usize { let pc_loc_table_size = round_to_page_size(std::mem::size_of_val(self.pc_section), self.page_size); @@ -230,12 +240,18 @@ const REGISTER_INSTRUCTION_METER: X86Register = CALLER_SAVED_REGISTERS[7]; /// R11: Scratch register const REGISTER_SCRATCH: X86Register = CALLER_SAVED_REGISTERS[8]; +/// Bit width of an instruction operand #[derive(Copy, Clone, Debug)] pub enum OperandSize { + /// Empty S0 = 0, + /// 8 bit S8 = 8, + /// 16 bit S16 = 16, + /// 32 bit S32 = 32, + /// 64 bit S64 = 64, } @@ -258,20 +274,6 @@ struct Jump { target_pc: usize, } -/// Indices of slots inside RuntimeEnvironment -enum RuntimeEnvironmentSlot { - HostStackPointer = 0, - CallDepth = 1, - ContextObjectPointer = 2, - PreviousInstructionMeter = 3, - DueInsnCount = 4, - StopwatchNumerator = 5, - StopwatchDenominator = 6, - Registers = 7, - ProgramResult = 19, - MemoryMapping = 27, -} - /* Explanation of the Instruction Meter The instruction meter serves two purposes: First, measure how many BPF instructions are @@ -321,6 +323,7 @@ enum RuntimeEnvironmentSlot { and undo again can be anything, so we just set it to zero. */ +/// Temporary object which stores the compilation context pub struct JitCompiler<'a, C: ContextObject> { result: JitProgram, text_section_jumps: Vec, @@ -872,7 +875,7 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { // This function helps the optimizer to inline the machinecode emission while avoiding stack allocations #[inline(always)] - pub fn emit_ins(&mut self, instruction: X86Instruction) { + fn emit_ins(&mut self, instruction: X86Instruction) { instruction.emit(self); if self.next_noop_insertion == 0 { self.next_noop_insertion = self.noop_range.sample(&mut self.diversification_rng); @@ -1680,218 +1683,3 @@ impl<'a, C: ContextObject> JitCompiler<'a, C> { } } } - -#[cfg(all(test, target_arch = "x86_64", not(target_os = "windows")))] -mod tests { - use super::*; - use crate::{ - disassembler::disassemble_instruction, - program::{BuiltinProgram, FunctionRegistry, SBPFVersion}, - static_analysis::CfgNode, - syscalls, - vm::TestContextObject, - }; - use byteorder::{ByteOrder, LittleEndian}; - use std::{collections::BTreeMap, sync::Arc}; - - #[test] - fn test_runtime_environment_slots() { - let executable = create_mockup_executable(Config::default(), &[]); - let mut context_object = TestContextObject::new(0); - let env = EbpfVm::new( - executable.get_loader().clone(), - executable.get_sbpf_version(), - &mut context_object, - MemoryMapping::new_identity(), - 0, - ); - - macro_rules! check_slot { - ($env:expr, $entry:ident, $slot:ident) => { - assert_eq!( - unsafe { - std::ptr::addr_of!($env.$entry) - .cast::() - .offset_from(std::ptr::addr_of!($env).cast::()) as usize - }, - RuntimeEnvironmentSlot::$slot as usize, - ); - }; - } - - check_slot!(env, host_stack_pointer, HostStackPointer); - check_slot!(env, call_depth, CallDepth); - check_slot!(env, context_object_pointer, ContextObjectPointer); - check_slot!(env, previous_instruction_meter, PreviousInstructionMeter); - check_slot!(env, due_insn_count, DueInsnCount); - check_slot!(env, stopwatch_numerator, StopwatchNumerator); - check_slot!(env, stopwatch_denominator, StopwatchDenominator); - check_slot!(env, registers, Registers); - check_slot!(env, program_result, ProgramResult); - check_slot!(env, memory_mapping, MemoryMapping); - } - - fn create_mockup_executable(config: Config, program: &[u8]) -> Executable { - let sbpf_version = *config.enabled_sbpf_versions.end(); - let mut loader = BuiltinProgram::new_loader_with_dense_registration(config); - loader - .register_function("gather_bytes", 1, syscalls::SyscallGatherBytes::vm) - .unwrap(); - let mut function_registry = FunctionRegistry::default(); - function_registry - .register_function(8, *b"function_foo", 8) - .unwrap(); - Executable::::from_text_bytes( - program, - Arc::new(loader), - sbpf_version, - function_registry, - ) - .unwrap() - } - - #[test] - fn test_code_length_estimate() { - const INSTRUCTION_COUNT: usize = 256; - let mut prog = vec![0; ebpf::INSN_SIZE * INSTRUCTION_COUNT]; - for pc in 0..INSTRUCTION_COUNT { - prog[pc * ebpf::INSN_SIZE] = ebpf::ADD64_IMM; - } - - let mut empty_program_machine_code_length_per_version = [0; 4]; - for sbpf_version in [SBPFVersion::V0, SBPFVersion::V3] { - let empty_program_machine_code_length = { - let config = Config { - noop_instruction_rate: 0, - enabled_sbpf_versions: sbpf_version..=sbpf_version, - ..Config::default() - }; - let mut executable = create_mockup_executable(config, &prog[0..0]); - Executable::::jit_compile(&mut executable).unwrap(); - executable - .get_compiled_program() - .unwrap() - .machine_code_length() - }; - assert!(empty_program_machine_code_length <= MAX_EMPTY_PROGRAM_MACHINE_CODE_LENGTH); - empty_program_machine_code_length_per_version[sbpf_version as usize] = - empty_program_machine_code_length; - } - - let mut instruction_meter_checkpoint_machine_code_length = [0; 2]; - for (index, machine_code_length) in instruction_meter_checkpoint_machine_code_length - .iter_mut() - .enumerate() - { - let config = Config { - instruction_meter_checkpoint_distance: index * INSTRUCTION_COUNT * 2, - noop_instruction_rate: 0, - enabled_sbpf_versions: SBPFVersion::V0..=SBPFVersion::V0, - ..Config::default() - }; - let mut executable = create_mockup_executable(config, &prog); - Executable::::jit_compile(&mut executable).unwrap(); - *machine_code_length = (executable - .get_compiled_program() - .unwrap() - .machine_code_length() - - empty_program_machine_code_length_per_version[0]) - / INSTRUCTION_COUNT; - } - let instruction_meter_checkpoint_machine_code_length = - instruction_meter_checkpoint_machine_code_length[0] - - instruction_meter_checkpoint_machine_code_length[1]; - assert!( - instruction_meter_checkpoint_machine_code_length - <= MACHINE_CODE_PER_INSTRUCTION_METER_CHECKPOINT - ); - - let mut cfg_nodes = BTreeMap::new(); - cfg_nodes.insert( - 8, - CfgNode { - label: std::string::String::from("label"), - ..CfgNode::default() - }, - ); - - for sbpf_version in [SBPFVersion::V0, SBPFVersion::V3] { - println!("opcode;machine_code_length_per_instruction;assembly"); - let empty_program_machine_code_length = - empty_program_machine_code_length_per_version[sbpf_version as usize]; - - for mut opcode in 0x00..=0xFF { - let (registers, immediate) = match opcode { - 0x85 if !sbpf_version.static_syscalls() => (0x00, Some(8)), - 0x85 if sbpf_version.static_syscalls() => (0x00, None), - 0x8D => (0x88, Some(0)), - 0x95 if sbpf_version.static_syscalls() => (0x00, Some(1)), - 0xE5 if !sbpf_version.static_syscalls() => { - // Put external function calls on a separate loop iteration - opcode = 0x85; - (0x00, Some(0x91020CDD)) - } - 0xF5 => { - // Put invalid function calls on a separate loop iteration - opcode = 0x85; - (0x00, Some(0x91020CD0)) - } - 0xD4 | 0xDC => (0x88, Some(16)), - _ => (0x88, Some(0x11223344)), - }; - for pc in 0..INSTRUCTION_COUNT { - prog[pc * ebpf::INSN_SIZE] = opcode; - prog[pc * ebpf::INSN_SIZE + 1] = registers; - let offset = 7_u16.wrapping_sub(pc as u16); - LittleEndian::write_u16(&mut prog[pc * ebpf::INSN_SIZE + 2..], offset); - let immediate = immediate.unwrap_or_else(|| 7_u32.wrapping_sub(pc as u32)); - LittleEndian::write_u32(&mut prog[pc * ebpf::INSN_SIZE + 4..], immediate); - } - let config = Config { - noop_instruction_rate: 0, - enabled_sbpf_versions: sbpf_version..=sbpf_version, - ..Config::default() - }; - let mut executable = create_mockup_executable(config, &prog); - let result = Executable::::jit_compile(&mut executable); - if result.is_err() { - assert!(matches!( - result.unwrap_err(), - EbpfError::UnsupportedInstruction - )); - continue; - } - let machine_code_length = executable - .get_compiled_program() - .unwrap() - .machine_code_length() - - empty_program_machine_code_length; - let instruction_count = if opcode == 0x18 { - // LDDW takes two slots - INSTRUCTION_COUNT / 2 - } else { - INSTRUCTION_COUNT - }; - let machine_code_length_per_instruction = - machine_code_length as f64 / instruction_count as f64; - assert!( - f64::ceil(machine_code_length_per_instruction) as usize - <= MAX_MACHINE_CODE_LENGTH_PER_INSTRUCTION - ); - let insn = ebpf::get_insn_unchecked(&prog, 0); - let assembly = disassemble_instruction( - &insn, - 0, - &cfg_nodes, - executable.get_function_registry(), - executable.get_loader(), - executable.get_sbpf_version(), - ); - println!( - "{:02X};{:>7.3};{}", - opcode, machine_code_length_per_instruction, assembly - ); - } - } - } -} diff --git a/src/lib.rs b/src/lib.rs index 3e1c6176..57178253 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -35,7 +35,7 @@ pub mod fuzz; pub mod insn_builder; pub mod interpreter; #[cfg(all(feature = "jit", not(target_os = "windows"), target_arch = "x86_64"))] -mod jit; +pub mod jit; #[cfg(all(feature = "jit", not(target_os = "windows"), target_arch = "x86_64"))] mod memory_management; pub mod memory_region; diff --git a/src/vm.rs b/src/vm.rs index e1312c94..9aae8514 100644 --- a/src/vm.rs +++ b/src/vm.rs @@ -227,6 +227,30 @@ pub struct CallFrame { pub target_pc: u64, } +/// Indices of slots inside [EbpfVm] +pub enum RuntimeEnvironmentSlot { + /// [EbpfVm::host_stack_pointer] + HostStackPointer = 0, + /// [EbpfVm::call_depth] + CallDepth = 1, + /// [EbpfVm::context_object_pointer] + ContextObjectPointer = 2, + /// [EbpfVm::previous_instruction_meter] + PreviousInstructionMeter = 3, + /// [EbpfVm::due_insn_count] + DueInsnCount = 4, + /// [EbpfVm::stopwatch_numerator] + StopwatchNumerator = 5, + /// [EbpfVm::stopwatch_denominator] + StopwatchDenominator = 6, + /// [EbpfVm::registers] + Registers = 7, + /// [EbpfVm::program_result] + ProgramResult = 19, + /// [EbpfVm::memory_mapping] + MemoryMapping = 27, +} + /// A virtual machine to run eBPF programs. /// /// # Examples diff --git a/tests/jit.rs b/tests/jit.rs new file mode 100644 index 00000000..e06b9686 --- /dev/null +++ b/tests/jit.rs @@ -0,0 +1,182 @@ +#![cfg(all(test, target_arch = "x86_64", not(target_os = "windows")))] + +use byteorder::{ByteOrder, LittleEndian}; +use solana_sbpf::{ + disassembler::disassemble_instruction, + ebpf, + elf::Executable, + error::EbpfError, + jit::{ + MACHINE_CODE_PER_INSTRUCTION_METER_CHECKPOINT, MAX_EMPTY_PROGRAM_MACHINE_CODE_LENGTH, + MAX_MACHINE_CODE_LENGTH_PER_INSTRUCTION, + }, + program::{BuiltinProgram, FunctionRegistry, SBPFVersion}, + static_analysis::CfgNode, + syscalls, + vm::{Config, TestContextObject}, +}; +use std::{collections::BTreeMap, sync::Arc}; + +fn create_mockup_executable(config: Config, program: &[u8]) -> Executable { + let sbpf_version = *config.enabled_sbpf_versions.end(); + let mut loader = BuiltinProgram::new_loader_with_dense_registration(config); + loader + .register_function("gather_bytes", 1, syscalls::SyscallGatherBytes::vm) + .unwrap(); + let mut function_registry = FunctionRegistry::default(); + function_registry + .register_function(8, *b"function_foo", 8) + .unwrap(); + Executable::::from_text_bytes( + program, + Arc::new(loader), + sbpf_version, + function_registry, + ) + .unwrap() +} + +#[test] +fn test_code_length_estimate() { + const INSTRUCTION_COUNT: usize = 256; + let mut prog = vec![0; ebpf::INSN_SIZE * INSTRUCTION_COUNT]; + for pc in 0..INSTRUCTION_COUNT { + prog[pc * ebpf::INSN_SIZE] = ebpf::ADD64_IMM; + } + + let mut empty_program_machine_code_length_per_version = [0; 4]; + for sbpf_version in [SBPFVersion::V0, SBPFVersion::V3] { + let empty_program_machine_code_length = { + let config = Config { + noop_instruction_rate: 0, + enabled_sbpf_versions: sbpf_version..=sbpf_version, + ..Config::default() + }; + let mut executable = create_mockup_executable(config, &prog[0..0]); + Executable::::jit_compile(&mut executable).unwrap(); + executable + .get_compiled_program() + .unwrap() + .machine_code_length() + }; + assert!(empty_program_machine_code_length <= MAX_EMPTY_PROGRAM_MACHINE_CODE_LENGTH); + empty_program_machine_code_length_per_version[sbpf_version as usize] = + empty_program_machine_code_length; + } + + let mut instruction_meter_checkpoint_machine_code_length = [0; 2]; + for (index, machine_code_length) in instruction_meter_checkpoint_machine_code_length + .iter_mut() + .enumerate() + { + let config = Config { + instruction_meter_checkpoint_distance: index * INSTRUCTION_COUNT * 2, + noop_instruction_rate: 0, + enabled_sbpf_versions: SBPFVersion::V0..=SBPFVersion::V0, + ..Config::default() + }; + let mut executable = create_mockup_executable(config, &prog); + Executable::::jit_compile(&mut executable).unwrap(); + *machine_code_length = (executable + .get_compiled_program() + .unwrap() + .machine_code_length() + - empty_program_machine_code_length_per_version[0]) + / INSTRUCTION_COUNT; + } + let instruction_meter_checkpoint_machine_code_length = + instruction_meter_checkpoint_machine_code_length[0] + - instruction_meter_checkpoint_machine_code_length[1]; + assert!( + instruction_meter_checkpoint_machine_code_length + <= MACHINE_CODE_PER_INSTRUCTION_METER_CHECKPOINT + ); + + let mut cfg_nodes = BTreeMap::new(); + cfg_nodes.insert( + 8, + CfgNode { + label: std::string::String::from("label"), + ..CfgNode::default() + }, + ); + + for sbpf_version in [SBPFVersion::V0, SBPFVersion::V3] { + println!("opcode;machine_code_length_per_instruction;assembly"); + let empty_program_machine_code_length = + empty_program_machine_code_length_per_version[sbpf_version as usize]; + + for mut opcode in 0x00..=0xFF { + let (registers, immediate) = match opcode { + 0x85 if !sbpf_version.static_syscalls() => (0x00, Some(8)), + 0x85 if sbpf_version.static_syscalls() => (0x00, None), + 0x8D => (0x88, Some(0)), + 0x95 if sbpf_version.static_syscalls() => (0x00, Some(1)), + 0xE5 if !sbpf_version.static_syscalls() => { + // Put external function calls on a separate loop iteration + opcode = 0x85; + (0x00, Some(0x91020CDD)) + } + 0xF5 => { + // Put invalid function calls on a separate loop iteration + opcode = 0x85; + (0x00, Some(0x91020CD0)) + } + 0xD4 | 0xDC => (0x88, Some(16)), + _ => (0x88, Some(0x11223344)), + }; + for pc in 0..INSTRUCTION_COUNT { + prog[pc * ebpf::INSN_SIZE] = opcode; + prog[pc * ebpf::INSN_SIZE + 1] = registers; + let offset = 7_u16.wrapping_sub(pc as u16); + LittleEndian::write_u16(&mut prog[pc * ebpf::INSN_SIZE + 2..], offset); + let immediate = immediate.unwrap_or_else(|| 7_u32.wrapping_sub(pc as u32)); + LittleEndian::write_u32(&mut prog[pc * ebpf::INSN_SIZE + 4..], immediate); + } + let config = Config { + noop_instruction_rate: 0, + enabled_sbpf_versions: sbpf_version..=sbpf_version, + ..Config::default() + }; + let mut executable = create_mockup_executable(config, &prog); + let result = Executable::::jit_compile(&mut executable); + if result.is_err() { + assert!(matches!( + result.unwrap_err(), + EbpfError::UnsupportedInstruction + )); + continue; + } + let machine_code_length = executable + .get_compiled_program() + .unwrap() + .machine_code_length() + - empty_program_machine_code_length; + let instruction_count = if opcode == 0x18 { + // LDDW takes two slots + INSTRUCTION_COUNT / 2 + } else { + INSTRUCTION_COUNT + }; + let machine_code_length_per_instruction = + machine_code_length as f64 / instruction_count as f64; + assert!( + f64::ceil(machine_code_length_per_instruction) as usize + <= MAX_MACHINE_CODE_LENGTH_PER_INSTRUCTION + ); + let insn = ebpf::get_insn_unchecked(&prog, 0); + let assembly = disassemble_instruction( + &insn, + 0, + &cfg_nodes, + executable.get_function_registry(), + executable.get_loader(), + executable.get_sbpf_version(), + ); + println!( + "{:02X};{:>7.3};{}", + opcode, machine_code_length_per_instruction, assembly + ); + } + } +} diff --git a/tests/vm.rs b/tests/vm.rs new file mode 100644 index 00000000..a6f172b2 --- /dev/null +++ b/tests/vm.rs @@ -0,0 +1,51 @@ +use solana_sbpf::{ + elf::Executable, + program::BuiltinProgram, + vm::{RuntimeEnvironmentSlot, TestContextObject}, +}; +use std::{fs::File, io::Read, sync::Arc}; +use test_utils::create_vm; + +#[test] +fn test_runtime_environment_slots() { + let mut file = File::open("tests/elfs/relative_call_sbpfv0.so").unwrap(); + let mut elf = Vec::new(); + file.read_to_end(&mut elf).unwrap(); + let executable = + Executable::::from_elf(&elf, Arc::new(BuiltinProgram::new_mock())) + .unwrap(); + let mut context_object = TestContextObject::default(); + create_vm!( + env, + &executable, + &mut context_object, + stack, + heap, + Vec::new(), + None + ); + + macro_rules! check_slot { + ($env:expr, $entry:ident, $slot:ident) => { + assert_eq!( + unsafe { + std::ptr::addr_of!($env.$entry) + .cast::() + .offset_from(std::ptr::addr_of!($env).cast::()) as usize + }, + RuntimeEnvironmentSlot::$slot as usize, + ); + }; + } + + check_slot!(env, host_stack_pointer, HostStackPointer); + check_slot!(env, call_depth, CallDepth); + check_slot!(env, context_object_pointer, ContextObjectPointer); + check_slot!(env, previous_instruction_meter, PreviousInstructionMeter); + check_slot!(env, due_insn_count, DueInsnCount); + check_slot!(env, stopwatch_numerator, StopwatchNumerator); + check_slot!(env, stopwatch_denominator, StopwatchDenominator); + check_slot!(env, registers, Registers); + check_slot!(env, program_result, ProgramResult); + check_slot!(env, memory_mapping, MemoryMapping); +} From ef3f35628f3a065349f0d96e5a34423d22311e12 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alexander=20Mei=C3=9Fner?= Date: Tue, 7 Jan 2025 18:21:06 +0000 Subject: [PATCH 18/18] Moves the tests of program.rs into a separate file. --- src/program.rs | 36 ------------------------------------ tests/vm.rs | 32 ++++++++++++++++++++++++++++++-- 2 files changed, 30 insertions(+), 38 deletions(-) diff --git a/src/program.rs b/src/program.rs index fe019dd1..b3df0c7e 100644 --- a/src/program.rs +++ b/src/program.rs @@ -415,39 +415,3 @@ macro_rules! declare_builtin_function { } }; } - -#[cfg(test)] -mod tests { - use super::*; - use crate::{syscalls, vm::TestContextObject}; - - #[test] - fn test_builtin_program_eq() { - let mut function_registry_a = - FunctionRegistry::>::default(); - function_registry_a - .register_function_hashed(*b"log", syscalls::SyscallString::vm) - .unwrap(); - function_registry_a - .register_function_hashed(*b"log_64", syscalls::SyscallU64::vm) - .unwrap(); - let mut function_registry_b = - FunctionRegistry::>::default(); - function_registry_b - .register_function_hashed(*b"log_64", syscalls::SyscallU64::vm) - .unwrap(); - function_registry_b - .register_function_hashed(*b"log", syscalls::SyscallString::vm) - .unwrap(); - let mut function_registry_c = - FunctionRegistry::>::default(); - function_registry_c - .register_function_hashed(*b"log_64", syscalls::SyscallU64::vm) - .unwrap(); - let builtin_program_a = BuiltinProgram::new_loader(Config::default(), function_registry_a); - let builtin_program_b = BuiltinProgram::new_loader(Config::default(), function_registry_b); - assert_eq!(builtin_program_a, builtin_program_b); - let builtin_program_c = BuiltinProgram::new_loader(Config::default(), function_registry_c); - assert_ne!(builtin_program_a, builtin_program_c); - } -} diff --git a/tests/vm.rs b/tests/vm.rs index a6f172b2..2c790890 100644 --- a/tests/vm.rs +++ b/tests/vm.rs @@ -1,7 +1,8 @@ use solana_sbpf::{ elf::Executable, - program::BuiltinProgram, - vm::{RuntimeEnvironmentSlot, TestContextObject}, + program::{BuiltinProgram, BuiltinFunction, FunctionRegistry}, + syscalls, + vm::{RuntimeEnvironmentSlot, TestContextObject, Config}, }; use std::{fs::File, io::Read, sync::Arc}; use test_utils::create_vm; @@ -49,3 +50,30 @@ fn test_runtime_environment_slots() { check_slot!(env, program_result, ProgramResult); check_slot!(env, memory_mapping, MemoryMapping); } + +#[test] +fn test_builtin_program_eq() { + let mut function_registry_a = FunctionRegistry::>::default(); + function_registry_a + .register_function_hashed(*b"log", syscalls::SyscallString::vm) + .unwrap(); + function_registry_a + .register_function_hashed(*b"log_64", syscalls::SyscallU64::vm) + .unwrap(); + let mut function_registry_b = FunctionRegistry::>::default(); + function_registry_b + .register_function_hashed(*b"log_64", syscalls::SyscallU64::vm) + .unwrap(); + function_registry_b + .register_function_hashed(*b"log", syscalls::SyscallString::vm) + .unwrap(); + let mut function_registry_c = FunctionRegistry::>::default(); + function_registry_c + .register_function_hashed(*b"log_64", syscalls::SyscallU64::vm) + .unwrap(); + let builtin_program_a = BuiltinProgram::new_loader(Config::default(), function_registry_a); + let builtin_program_b = BuiltinProgram::new_loader(Config::default(), function_registry_b); + assert_eq!(builtin_program_a, builtin_program_b); + let builtin_program_c = BuiltinProgram::new_loader(Config::default(), function_registry_c); + assert_ne!(builtin_program_a, builtin_program_c); +}