diff --git a/crates/wasm-encoder/src/core/code.rs b/crates/wasm-encoder/src/core/code.rs index 5d6ef7fb38..e27ed69372 100644 --- a/crates/wasm-encoder/src/core/code.rs +++ b/crates/wasm-encoder/src/core/code.rs @@ -647,7 +647,7 @@ pub enum Instruction<'a> { I8x16MinU, I8x16MaxS, I8x16MaxU, - I8x16RoundingAverageU, + I8x16AvgrU, I16x8ExtAddPairwiseI8x16S, I16x8ExtAddPairwiseI8x16U, I16x8Abs, @@ -675,7 +675,7 @@ pub enum Instruction<'a> { I16x8MinU, I16x8MaxS, I16x8MaxU, - I16x8RoundingAverageU, + I16x8AvgrU, I16x8ExtMulLowI8x16S, I16x8ExtMulHighI8x16S, I16x8ExtMulLowI8x16U, @@ -1783,7 +1783,7 @@ impl Encode for Instruction<'_> { sink.push(0xFD); 0x79u32.encode(sink); } - Instruction::I8x16RoundingAverageU => { + Instruction::I8x16AvgrU => { sink.push(0xFD); 0x7Bu32.encode(sink); } @@ -1903,7 +1903,7 @@ impl Encode for Instruction<'_> { sink.push(0xFD); 0x99u32.encode(sink); } - Instruction::I16x8RoundingAverageU => { + Instruction::I16x8AvgrU => { sink.push(0xFD); 0x9Bu32.encode(sink); } diff --git a/crates/wasm-mutate/src/mutators/peephole/dfg.rs b/crates/wasm-mutate/src/mutators/peephole/dfg.rs index da03a1cb2e..d659445913 100644 --- a/crates/wasm-mutate/src/mutators/peephole/dfg.rs +++ b/crates/wasm-mutate/src/mutators/peephole/dfg.rs @@ -945,7 +945,7 @@ impl<'a> DFGBuilder { Operator::I8x16MinU => self.binop(idx, Lang::I8x16MinU), Operator::I8x16MaxS => self.binop(idx, Lang::I8x16MaxS), Operator::I8x16MaxU => self.binop(idx, Lang::I8x16MaxU), - Operator::I8x16RoundingAverageU => self.binop(idx, Lang::I8x16AvgrU), + Operator::I8x16AvgrU => self.binop(idx, Lang::I8x16AvgrU), Operator::I16x8ExtAddPairwiseI8x16S => { self.unop(idx, Lang::I16x8ExtAddPairwiseI8x16S) @@ -978,7 +978,7 @@ impl<'a> DFGBuilder { Operator::I16x8MinU => self.binop(idx, Lang::I16x8MinU), Operator::I16x8MaxS => self.binop(idx, Lang::I16x8MaxS), Operator::I16x8MaxU => self.binop(idx, Lang::I16x8MaxU), - Operator::I16x8RoundingAverageU => self.binop(idx, Lang::I16x8AvgrU), + Operator::I16x8AvgrU => self.binop(idx, Lang::I16x8AvgrU), Operator::I16x8ExtMulLowI8x16S => self.binop(idx, Lang::I16x8ExtMulLowI8x16S), Operator::I16x8ExtMulHighI8x16S => self.binop(idx, Lang::I16x8ExtMulHighI8x16S), Operator::I16x8ExtMulLowI8x16U => self.binop(idx, Lang::I16x8ExtMulLowI8x16U), diff --git a/crates/wasm-mutate/src/mutators/peephole/eggsy/encoder/expr2wasm.rs b/crates/wasm-mutate/src/mutators/peephole/eggsy/encoder/expr2wasm.rs index 0f481087b5..65f5a3522a 100644 --- a/crates/wasm-mutate/src/mutators/peephole/eggsy/encoder/expr2wasm.rs +++ b/crates/wasm-mutate/src/mutators/peephole/eggsy/encoder/expr2wasm.rs @@ -601,7 +601,7 @@ pub fn expr2wasm( Lang::I8x16MinU(_) => insn(Instruction::I8x16MinU), Lang::I8x16MaxS(_) => insn(Instruction::I8x16MaxS), Lang::I8x16MaxU(_) => insn(Instruction::I8x16MaxU), - Lang::I8x16AvgrU(_) => insn(Instruction::I8x16RoundingAverageU), + Lang::I8x16AvgrU(_) => insn(Instruction::I8x16AvgrU), Lang::I16x8ExtAddPairwiseI8x16S(_) => { insn(Instruction::I16x8ExtAddPairwiseI8x16S) @@ -634,7 +634,7 @@ pub fn expr2wasm( Lang::I16x8MinU(_) => insn(Instruction::I16x8MinU), Lang::I16x8MaxS(_) => insn(Instruction::I16x8MaxS), Lang::I16x8MaxU(_) => insn(Instruction::I16x8MaxU), - Lang::I16x8AvgrU(_) => insn(Instruction::I16x8RoundingAverageU), + Lang::I16x8AvgrU(_) => insn(Instruction::I16x8AvgrU), Lang::I16x8ExtMulLowI8x16S(_) => insn(Instruction::I16x8ExtMulLowI8x16S), Lang::I16x8ExtMulHighI8x16S(_) => insn(Instruction::I16x8ExtMulHighI8x16S), Lang::I16x8ExtMulLowI8x16U(_) => insn(Instruction::I16x8ExtMulLowI8x16U), diff --git a/crates/wasm-smith/src/core/code_builder.rs b/crates/wasm-smith/src/core/code_builder.rs index 64b1a02f61..74bdcf788f 100644 --- a/crates/wasm-smith/src/core/code_builder.rs +++ b/crates/wasm-smith/src/core/code_builder.rs @@ -424,7 +424,7 @@ instructions! { (Some(simd_v128_v128_on_stack), i8x16_min_u, Vector), (Some(simd_v128_v128_on_stack), i8x16_max_s, Vector), (Some(simd_v128_v128_on_stack), i8x16_max_u, Vector), - (Some(simd_v128_v128_on_stack), i8x16_rounding_average_u, Vector), + (Some(simd_v128_v128_on_stack), i8x16_avgr_u, Vector), (Some(simd_v128_on_stack), i16x8_extadd_pairwise_i8x16s, Vector), (Some(simd_v128_on_stack), i16x8_extadd_pairwise_i8x16u, Vector), (Some(simd_v128_on_stack), i16x8_abs, Vector), @@ -452,7 +452,7 @@ instructions! { (Some(simd_v128_v128_on_stack), i16x8_min_u, Vector), (Some(simd_v128_v128_on_stack), i16x8_max_s, Vector), (Some(simd_v128_v128_on_stack), i16x8_max_u, Vector), - (Some(simd_v128_v128_on_stack), i16x8_rounding_average_u, Vector), + (Some(simd_v128_v128_on_stack), i16x8_avgr_u, Vector), (Some(simd_v128_v128_on_stack), i16x8_extmul_low_i8x16s, Vector), (Some(simd_v128_v128_on_stack), i16x8_extmul_high_i8x16s, Vector), (Some(simd_v128_v128_on_stack), i16x8_extmul_low_i8x16u, Vector), @@ -3834,7 +3834,7 @@ simd_binop!(I8x16MinS, i8x16_min_s); simd_binop!(I8x16MinU, i8x16_min_u); simd_binop!(I8x16MaxS, i8x16_max_s); simd_binop!(I8x16MaxU, i8x16_max_u); -simd_binop!(I8x16RoundingAverageU, i8x16_rounding_average_u); +simd_binop!(I8x16AvgrU, i8x16_avgr_u); simd_unop!(I16x8ExtAddPairwiseI8x16S, i16x8_extadd_pairwise_i8x16s); simd_unop!(I16x8ExtAddPairwiseI8x16U, i16x8_extadd_pairwise_i8x16u); simd_unop!(I16x8Abs, i16x8_abs); @@ -3862,7 +3862,7 @@ simd_binop!(I16x8MinS, i16x8_min_s); simd_binop!(I16x8MinU, i16x8_min_u); simd_binop!(I16x8MaxS, i16x8_max_s); simd_binop!(I16x8MaxU, i16x8_max_u); -simd_binop!(I16x8RoundingAverageU, i16x8_rounding_average_u); +simd_binop!(I16x8AvgrU, i16x8_avgr_u); simd_binop!(I16x8ExtMulLowI8x16S, i16x8_extmul_low_i8x16s); simd_binop!(I16x8ExtMulHighI8x16S, i16x8_extmul_high_i8x16s); simd_binop!(I16x8ExtMulLowI8x16U, i16x8_extmul_low_i8x16u); diff --git a/crates/wasmparser/src/binary_reader.rs b/crates/wasmparser/src/binary_reader.rs index 8e93b41597..1987b27b32 100644 --- a/crates/wasmparser/src/binary_reader.rs +++ b/crates/wasmparser/src/binary_reader.rs @@ -867,7 +867,7 @@ impl<'a> BinaryReader<'a> { Ok((self.buffer[pos], val)) } - fn read_memarg(&mut self) -> Result { + fn read_memarg(&mut self, max_align: u8) -> Result { let flags_pos = self.original_position(); let mut flags = self.read_var_u32()?; let memory = if flags & (1 << 6) != 0 { @@ -888,6 +888,7 @@ impl<'a> BinaryReader<'a> { }; Ok(MemArg { align, + max_align, offset, memory, }) @@ -1299,18 +1300,6 @@ impl<'a> BinaryReader<'a> { } } - fn read_memarg_of_align(&mut self, max_align: u8) -> Result { - let align_pos = self.original_position(); - let imm = self.read_memarg()?; - if imm.align > max_align { - return Err(BinaryReaderError::new( - "alignment must not be larger than natural", - align_pos, - )); - } - Ok(imm) - } - #[cold] fn invalid_leading_byte(&self, byte: u8, desc: &str) -> Result { Err(Self::invalid_leading_byte_error( @@ -1430,29 +1419,29 @@ impl<'a> BinaryReader<'a> { 0x25 => visitor.visit_table_get(pos, self.read_var_u32()?), 0x26 => visitor.visit_table_set(pos, self.read_var_u32()?), - 0x28 => visitor.visit_i32_load(pos, self.read_memarg()?), - 0x29 => visitor.visit_i64_load(pos, self.read_memarg()?), - 0x2a => visitor.visit_f32_load(pos, self.read_memarg()?), - 0x2b => visitor.visit_f64_load(pos, self.read_memarg()?), - 0x2c => visitor.visit_i32_load8_s(pos, self.read_memarg()?), - 0x2d => visitor.visit_i32_load8_u(pos, self.read_memarg()?), - 0x2e => visitor.visit_i32_load16_s(pos, self.read_memarg()?), - 0x2f => visitor.visit_i32_load16_u(pos, self.read_memarg()?), - 0x30 => visitor.visit_i64_load8_s(pos, self.read_memarg()?), - 0x31 => visitor.visit_i64_load8_u(pos, self.read_memarg()?), - 0x32 => visitor.visit_i64_load16_s(pos, self.read_memarg()?), - 0x33 => visitor.visit_i64_load16_u(pos, self.read_memarg()?), - 0x34 => visitor.visit_i64_load32_s(pos, self.read_memarg()?), - 0x35 => visitor.visit_i64_load32_u(pos, self.read_memarg()?), - 0x36 => visitor.visit_i32_store(pos, self.read_memarg()?), - 0x37 => visitor.visit_i64_store(pos, self.read_memarg()?), - 0x38 => visitor.visit_f32_store(pos, self.read_memarg()?), - 0x39 => visitor.visit_f64_store(pos, self.read_memarg()?), - 0x3a => visitor.visit_i32_store8(pos, self.read_memarg()?), - 0x3b => visitor.visit_i32_store16(pos, self.read_memarg()?), - 0x3c => visitor.visit_i64_store8(pos, self.read_memarg()?), - 0x3d => visitor.visit_i64_store16(pos, self.read_memarg()?), - 0x3e => visitor.visit_i64_store32(pos, self.read_memarg()?), + 0x28 => visitor.visit_i32_load(pos, self.read_memarg(2)?), + 0x29 => visitor.visit_i64_load(pos, self.read_memarg(3)?), + 0x2a => visitor.visit_f32_load(pos, self.read_memarg(2)?), + 0x2b => visitor.visit_f64_load(pos, self.read_memarg(3)?), + 0x2c => visitor.visit_i32_load8_s(pos, self.read_memarg(0)?), + 0x2d => visitor.visit_i32_load8_u(pos, self.read_memarg(0)?), + 0x2e => visitor.visit_i32_load16_s(pos, self.read_memarg(1)?), + 0x2f => visitor.visit_i32_load16_u(pos, self.read_memarg(1)?), + 0x30 => visitor.visit_i64_load8_s(pos, self.read_memarg(0)?), + 0x31 => visitor.visit_i64_load8_u(pos, self.read_memarg(0)?), + 0x32 => visitor.visit_i64_load16_s(pos, self.read_memarg(1)?), + 0x33 => visitor.visit_i64_load16_u(pos, self.read_memarg(1)?), + 0x34 => visitor.visit_i64_load32_s(pos, self.read_memarg(2)?), + 0x35 => visitor.visit_i64_load32_u(pos, self.read_memarg(2)?), + 0x36 => visitor.visit_i32_store(pos, self.read_memarg(2)?), + 0x37 => visitor.visit_i64_store(pos, self.read_memarg(3)?), + 0x38 => visitor.visit_f32_store(pos, self.read_memarg(2)?), + 0x39 => visitor.visit_f64_store(pos, self.read_memarg(3)?), + 0x3a => visitor.visit_i32_store8(pos, self.read_memarg(0)?), + 0x3b => visitor.visit_i32_store16(pos, self.read_memarg(1)?), + 0x3c => visitor.visit_i64_store8(pos, self.read_memarg(0)?), + 0x3d => visitor.visit_i64_store16(pos, self.read_memarg(1)?), + 0x3e => visitor.visit_i64_store32(pos, self.read_memarg(2)?), 0x3f => { let (mem_byte, mem) = self.read_first_byte_and_var_u32()?; visitor.visit_memory_size(pos, mem, mem_byte) @@ -1689,19 +1678,19 @@ impl<'a> BinaryReader<'a> { { let code = self.read_var_u32()?; Ok(match code { - 0x00 => visitor.visit_v128_load(pos, self.read_memarg()?), - 0x01 => visitor.visit_v128_load8x8_s(pos, self.read_memarg_of_align(3)?), - 0x02 => visitor.visit_v128_load8x8_u(pos, self.read_memarg_of_align(3)?), - 0x03 => visitor.visit_v128_load16x4_s(pos, self.read_memarg_of_align(3)?), - 0x04 => visitor.visit_v128_load16x4_u(pos, self.read_memarg_of_align(3)?), - 0x05 => visitor.visit_v128_load32x2_s(pos, self.read_memarg_of_align(3)?), - 0x06 => visitor.visit_v128_load32x2_u(pos, self.read_memarg_of_align(3)?), - 0x07 => visitor.visit_v128_load8_splat(pos, self.read_memarg_of_align(0)?), - 0x08 => visitor.visit_v128_load16_splat(pos, self.read_memarg_of_align(1)?), - 0x09 => visitor.visit_v128_load32_splat(pos, self.read_memarg_of_align(2)?), - 0x0a => visitor.visit_v128_load64_splat(pos, self.read_memarg_of_align(3)?), - - 0x0b => visitor.visit_v128_store(pos, self.read_memarg()?), + 0x00 => visitor.visit_v128_load(pos, self.read_memarg(4)?), + 0x01 => visitor.visit_v128_load8x8_s(pos, self.read_memarg(3)?), + 0x02 => visitor.visit_v128_load8x8_u(pos, self.read_memarg(3)?), + 0x03 => visitor.visit_v128_load16x4_s(pos, self.read_memarg(3)?), + 0x04 => visitor.visit_v128_load16x4_u(pos, self.read_memarg(3)?), + 0x05 => visitor.visit_v128_load32x2_s(pos, self.read_memarg(3)?), + 0x06 => visitor.visit_v128_load32x2_u(pos, self.read_memarg(3)?), + 0x07 => visitor.visit_v128_load8_splat(pos, self.read_memarg(0)?), + 0x08 => visitor.visit_v128_load16_splat(pos, self.read_memarg(1)?), + 0x09 => visitor.visit_v128_load32_splat(pos, self.read_memarg(2)?), + 0x0a => visitor.visit_v128_load64_splat(pos, self.read_memarg(3)?), + + 0x0b => visitor.visit_v128_store(pos, self.read_memarg(4)?), 0x0c => visitor.visit_v128_const(pos, self.read_v128()?), 0x0d => { let mut lanes: [u8; 16] = [0; 16]; @@ -1786,48 +1775,48 @@ impl<'a> BinaryReader<'a> { 0x53 => visitor.visit_v128_any_true(pos), 0x54 => { - let memarg = self.read_memarg()?; + let memarg = self.read_memarg(0)?; let lane = self.read_lane_index(16)?; visitor.visit_v128_load8_lane(pos, memarg, lane) } 0x55 => { - let memarg = self.read_memarg()?; + let memarg = self.read_memarg(1)?; let lane = self.read_lane_index(8)?; visitor.visit_v128_load16_lane(pos, memarg, lane) } 0x56 => { - let memarg = self.read_memarg()?; + let memarg = self.read_memarg(2)?; let lane = self.read_lane_index(4)?; visitor.visit_v128_load32_lane(pos, memarg, lane) } 0x57 => { - let memarg = self.read_memarg()?; + let memarg = self.read_memarg(3)?; let lane = self.read_lane_index(2)?; visitor.visit_v128_load64_lane(pos, memarg, lane) } 0x58 => { - let memarg = self.read_memarg()?; + let memarg = self.read_memarg(0)?; let lane = self.read_lane_index(16)?; visitor.visit_v128_store8_lane(pos, memarg, lane) } 0x59 => { - let memarg = self.read_memarg()?; + let memarg = self.read_memarg(1)?; let lane = self.read_lane_index(8)?; visitor.visit_v128_store16_lane(pos, memarg, lane) } 0x5a => { - let memarg = self.read_memarg()?; + let memarg = self.read_memarg(2)?; let lane = self.read_lane_index(4)?; visitor.visit_v128_store32_lane(pos, memarg, lane) } 0x5b => { - let memarg = self.read_memarg()?; + let memarg = self.read_memarg(3)?; let lane = self.read_lane_index(2)?; visitor.visit_v128_store64_lane(pos, memarg, lane) } - 0x5c => visitor.visit_v128_load32_zero(pos, self.read_memarg_of_align(2)?), - 0x5d => visitor.visit_v128_load64_zero(pos, self.read_memarg_of_align(3)?), + 0x5c => visitor.visit_v128_load32_zero(pos, self.read_memarg(2)?), + 0x5d => visitor.visit_v128_load64_zero(pos, self.read_memarg(3)?), 0x5e => visitor.visit_f32x4_demote_f64x2_zero(pos), 0x5f => visitor.visit_f64x2_promote_low_f32x4(pos), 0x60 => visitor.visit_i8x16_abs(pos), @@ -2006,78 +1995,78 @@ impl<'a> BinaryReader<'a> { { let code = self.read_var_u32()?; Ok(match code { - 0x00 => visitor.visit_memory_atomic_notify(pos, self.read_memarg_of_align(2)?), - 0x01 => visitor.visit_memory_atomic_wait32(pos, self.read_memarg_of_align(2)?), - 0x02 => visitor.visit_memory_atomic_wait64(pos, self.read_memarg_of_align(3)?), + 0x00 => visitor.visit_memory_atomic_notify(pos, self.read_memarg(2)?), + 0x01 => visitor.visit_memory_atomic_wait32(pos, self.read_memarg(2)?), + 0x02 => visitor.visit_memory_atomic_wait64(pos, self.read_memarg(3)?), 0x03 => { if self.read_u8()? != 0 { bail!(pos, "nonzero byte after `atomic.fence`"); } visitor.visit_atomic_fence(pos) } - 0x10 => visitor.visit_i32_atomic_load(pos, self.read_memarg_of_align(2)?), - 0x11 => visitor.visit_i64_atomic_load(pos, self.read_memarg_of_align(3)?), - 0x12 => visitor.visit_i32_atomic_load8_u(pos, self.read_memarg_of_align(0)?), - 0x13 => visitor.visit_i32_atomic_load16_u(pos, self.read_memarg_of_align(1)?), - 0x14 => visitor.visit_i64_atomic_load8_u(pos, self.read_memarg_of_align(0)?), - 0x15 => visitor.visit_i64_atomic_load16_u(pos, self.read_memarg_of_align(1)?), - 0x16 => visitor.visit_i64_atomic_load32_u(pos, self.read_memarg_of_align(2)?), - 0x17 => visitor.visit_i32_atomic_store(pos, self.read_memarg_of_align(2)?), - 0x18 => visitor.visit_i64_atomic_store(pos, self.read_memarg_of_align(3)?), - 0x19 => visitor.visit_i32_atomic_store8(pos, self.read_memarg_of_align(0)?), - 0x1a => visitor.visit_i32_atomic_store16(pos, self.read_memarg_of_align(1)?), - 0x1b => visitor.visit_i64_atomic_store8(pos, self.read_memarg_of_align(0)?), - 0x1c => visitor.visit_i64_atomic_store16(pos, self.read_memarg_of_align(1)?), - 0x1d => visitor.visit_i64_atomic_store32(pos, self.read_memarg_of_align(2)?), - 0x1e => visitor.visit_i32_atomic_rmw_add(pos, self.read_memarg_of_align(2)?), - 0x1f => visitor.visit_i64_atomic_rmw_add(pos, self.read_memarg_of_align(3)?), - 0x20 => visitor.visit_i32_atomic_rmw8_add_u(pos, self.read_memarg_of_align(0)?), - 0x21 => visitor.visit_i32_atomic_rmw16_add_u(pos, self.read_memarg_of_align(1)?), - 0x22 => visitor.visit_i64_atomic_rmw8_add_u(pos, self.read_memarg_of_align(0)?), - 0x23 => visitor.visit_i64_atomic_rmw16_add_u(pos, self.read_memarg_of_align(1)?), - 0x24 => visitor.visit_i64_atomic_rmw32_add_u(pos, self.read_memarg_of_align(2)?), - 0x25 => visitor.visit_i32_atomic_rmw_sub(pos, self.read_memarg_of_align(2)?), - 0x26 => visitor.visit_i64_atomic_rmw_sub(pos, self.read_memarg_of_align(3)?), - 0x27 => visitor.visit_i32_atomic_rmw8_sub_u(pos, self.read_memarg_of_align(0)?), - 0x28 => visitor.visit_i32_atomic_rmw16_sub_u(pos, self.read_memarg_of_align(1)?), - 0x29 => visitor.visit_i64_atomic_rmw8_sub_u(pos, self.read_memarg_of_align(0)?), - 0x2a => visitor.visit_i64_atomic_rmw16_sub_u(pos, self.read_memarg_of_align(1)?), - 0x2b => visitor.visit_i64_atomic_rmw32_sub_u(pos, self.read_memarg_of_align(2)?), - 0x2c => visitor.visit_i32_atomic_rmw_and(pos, self.read_memarg_of_align(2)?), - 0x2d => visitor.visit_i64_atomic_rmw_and(pos, self.read_memarg_of_align(3)?), - 0x2e => visitor.visit_i32_atomic_rmw8_and_u(pos, self.read_memarg_of_align(0)?), - 0x2f => visitor.visit_i32_atomic_rmw16_and_u(pos, self.read_memarg_of_align(1)?), - 0x30 => visitor.visit_i64_atomic_rmw8_and_u(pos, self.read_memarg_of_align(0)?), - 0x31 => visitor.visit_i64_atomic_rmw16_and_u(pos, self.read_memarg_of_align(1)?), - 0x32 => visitor.visit_i64_atomic_rmw32_and_u(pos, self.read_memarg_of_align(2)?), - 0x33 => visitor.visit_i32_atomic_rmw_or(pos, self.read_memarg_of_align(2)?), - 0x34 => visitor.visit_i64_atomic_rmw_or(pos, self.read_memarg_of_align(3)?), - 0x35 => visitor.visit_i32_atomic_rmw8_or_u(pos, self.read_memarg_of_align(0)?), - 0x36 => visitor.visit_i32_atomic_rmw16_or_u(pos, self.read_memarg_of_align(1)?), - 0x37 => visitor.visit_i64_atomic_rmw8_or_u(pos, self.read_memarg_of_align(0)?), - 0x38 => visitor.visit_i64_atomic_rmw16_or_u(pos, self.read_memarg_of_align(1)?), - 0x39 => visitor.visit_i64_atomic_rmw32_or_u(pos, self.read_memarg_of_align(2)?), - 0x3a => visitor.visit_i32_atomic_rmw_xor(pos, self.read_memarg_of_align(2)?), - 0x3b => visitor.visit_i64_atomic_rmw_xor(pos, self.read_memarg_of_align(3)?), - 0x3c => visitor.visit_i32_atomic_rmw8_xor_u(pos, self.read_memarg_of_align(0)?), - 0x3d => visitor.visit_i32_atomic_rmw16_xor_u(pos, self.read_memarg_of_align(1)?), - 0x3e => visitor.visit_i64_atomic_rmw8_xor_u(pos, self.read_memarg_of_align(0)?), - 0x3f => visitor.visit_i64_atomic_rmw16_xor_u(pos, self.read_memarg_of_align(1)?), - 0x40 => visitor.visit_i64_atomic_rmw32_xor_u(pos, self.read_memarg_of_align(2)?), - 0x41 => visitor.visit_i32_atomic_rmw_xchg(pos, self.read_memarg_of_align(2)?), - 0x42 => visitor.visit_i64_atomic_rmw_xchg(pos, self.read_memarg_of_align(3)?), - 0x43 => visitor.visit_i32_atomic_rmw8_xchg_u(pos, self.read_memarg_of_align(0)?), - 0x44 => visitor.visit_i32_atomic_rmw16_xchg_u(pos, self.read_memarg_of_align(1)?), - 0x45 => visitor.visit_i64_atomic_rmw8_xchg_u(pos, self.read_memarg_of_align(0)?), - 0x46 => visitor.visit_i64_atomic_rmw16_xchg_u(pos, self.read_memarg_of_align(1)?), - 0x47 => visitor.visit_i64_atomic_rmw32_xchg_u(pos, self.read_memarg_of_align(2)?), - 0x48 => visitor.visit_i32_atomic_rmw_cmpxchg(pos, self.read_memarg_of_align(2)?), - 0x49 => visitor.visit_i64_atomic_rmw_cmpxchg(pos, self.read_memarg_of_align(3)?), - 0x4a => visitor.visit_i32_atomic_rmw8_cmpxchg_u(pos, self.read_memarg_of_align(0)?), - 0x4b => visitor.visit_i32_atomic_rmw16_cmpxchg_u(pos, self.read_memarg_of_align(1)?), - 0x4c => visitor.visit_i64_atomic_rmw8_cmpxchg_u(pos, self.read_memarg_of_align(0)?), - 0x4d => visitor.visit_i64_atomic_rmw16_cmpxchg_u(pos, self.read_memarg_of_align(1)?), - 0x4e => visitor.visit_i64_atomic_rmw32_cmpxchg_u(pos, self.read_memarg_of_align(2)?), + 0x10 => visitor.visit_i32_atomic_load(pos, self.read_memarg(2)?), + 0x11 => visitor.visit_i64_atomic_load(pos, self.read_memarg(3)?), + 0x12 => visitor.visit_i32_atomic_load8_u(pos, self.read_memarg(0)?), + 0x13 => visitor.visit_i32_atomic_load16_u(pos, self.read_memarg(1)?), + 0x14 => visitor.visit_i64_atomic_load8_u(pos, self.read_memarg(0)?), + 0x15 => visitor.visit_i64_atomic_load16_u(pos, self.read_memarg(1)?), + 0x16 => visitor.visit_i64_atomic_load32_u(pos, self.read_memarg(2)?), + 0x17 => visitor.visit_i32_atomic_store(pos, self.read_memarg(2)?), + 0x18 => visitor.visit_i64_atomic_store(pos, self.read_memarg(3)?), + 0x19 => visitor.visit_i32_atomic_store8(pos, self.read_memarg(0)?), + 0x1a => visitor.visit_i32_atomic_store16(pos, self.read_memarg(1)?), + 0x1b => visitor.visit_i64_atomic_store8(pos, self.read_memarg(0)?), + 0x1c => visitor.visit_i64_atomic_store16(pos, self.read_memarg(1)?), + 0x1d => visitor.visit_i64_atomic_store32(pos, self.read_memarg(2)?), + 0x1e => visitor.visit_i32_atomic_rmw_add(pos, self.read_memarg(2)?), + 0x1f => visitor.visit_i64_atomic_rmw_add(pos, self.read_memarg(3)?), + 0x20 => visitor.visit_i32_atomic_rmw8_add_u(pos, self.read_memarg(0)?), + 0x21 => visitor.visit_i32_atomic_rmw16_add_u(pos, self.read_memarg(1)?), + 0x22 => visitor.visit_i64_atomic_rmw8_add_u(pos, self.read_memarg(0)?), + 0x23 => visitor.visit_i64_atomic_rmw16_add_u(pos, self.read_memarg(1)?), + 0x24 => visitor.visit_i64_atomic_rmw32_add_u(pos, self.read_memarg(2)?), + 0x25 => visitor.visit_i32_atomic_rmw_sub(pos, self.read_memarg(2)?), + 0x26 => visitor.visit_i64_atomic_rmw_sub(pos, self.read_memarg(3)?), + 0x27 => visitor.visit_i32_atomic_rmw8_sub_u(pos, self.read_memarg(0)?), + 0x28 => visitor.visit_i32_atomic_rmw16_sub_u(pos, self.read_memarg(1)?), + 0x29 => visitor.visit_i64_atomic_rmw8_sub_u(pos, self.read_memarg(0)?), + 0x2a => visitor.visit_i64_atomic_rmw16_sub_u(pos, self.read_memarg(1)?), + 0x2b => visitor.visit_i64_atomic_rmw32_sub_u(pos, self.read_memarg(2)?), + 0x2c => visitor.visit_i32_atomic_rmw_and(pos, self.read_memarg(2)?), + 0x2d => visitor.visit_i64_atomic_rmw_and(pos, self.read_memarg(3)?), + 0x2e => visitor.visit_i32_atomic_rmw8_and_u(pos, self.read_memarg(0)?), + 0x2f => visitor.visit_i32_atomic_rmw16_and_u(pos, self.read_memarg(1)?), + 0x30 => visitor.visit_i64_atomic_rmw8_and_u(pos, self.read_memarg(0)?), + 0x31 => visitor.visit_i64_atomic_rmw16_and_u(pos, self.read_memarg(1)?), + 0x32 => visitor.visit_i64_atomic_rmw32_and_u(pos, self.read_memarg(2)?), + 0x33 => visitor.visit_i32_atomic_rmw_or(pos, self.read_memarg(2)?), + 0x34 => visitor.visit_i64_atomic_rmw_or(pos, self.read_memarg(3)?), + 0x35 => visitor.visit_i32_atomic_rmw8_or_u(pos, self.read_memarg(0)?), + 0x36 => visitor.visit_i32_atomic_rmw16_or_u(pos, self.read_memarg(1)?), + 0x37 => visitor.visit_i64_atomic_rmw8_or_u(pos, self.read_memarg(0)?), + 0x38 => visitor.visit_i64_atomic_rmw16_or_u(pos, self.read_memarg(1)?), + 0x39 => visitor.visit_i64_atomic_rmw32_or_u(pos, self.read_memarg(2)?), + 0x3a => visitor.visit_i32_atomic_rmw_xor(pos, self.read_memarg(2)?), + 0x3b => visitor.visit_i64_atomic_rmw_xor(pos, self.read_memarg(3)?), + 0x3c => visitor.visit_i32_atomic_rmw8_xor_u(pos, self.read_memarg(0)?), + 0x3d => visitor.visit_i32_atomic_rmw16_xor_u(pos, self.read_memarg(1)?), + 0x3e => visitor.visit_i64_atomic_rmw8_xor_u(pos, self.read_memarg(0)?), + 0x3f => visitor.visit_i64_atomic_rmw16_xor_u(pos, self.read_memarg(1)?), + 0x40 => visitor.visit_i64_atomic_rmw32_xor_u(pos, self.read_memarg(2)?), + 0x41 => visitor.visit_i32_atomic_rmw_xchg(pos, self.read_memarg(2)?), + 0x42 => visitor.visit_i64_atomic_rmw_xchg(pos, self.read_memarg(3)?), + 0x43 => visitor.visit_i32_atomic_rmw8_xchg_u(pos, self.read_memarg(0)?), + 0x44 => visitor.visit_i32_atomic_rmw16_xchg_u(pos, self.read_memarg(1)?), + 0x45 => visitor.visit_i64_atomic_rmw8_xchg_u(pos, self.read_memarg(0)?), + 0x46 => visitor.visit_i64_atomic_rmw16_xchg_u(pos, self.read_memarg(1)?), + 0x47 => visitor.visit_i64_atomic_rmw32_xchg_u(pos, self.read_memarg(2)?), + 0x48 => visitor.visit_i32_atomic_rmw_cmpxchg(pos, self.read_memarg(2)?), + 0x49 => visitor.visit_i64_atomic_rmw_cmpxchg(pos, self.read_memarg(3)?), + 0x4a => visitor.visit_i32_atomic_rmw8_cmpxchg_u(pos, self.read_memarg(0)?), + 0x4b => visitor.visit_i32_atomic_rmw16_cmpxchg_u(pos, self.read_memarg(1)?), + 0x4c => visitor.visit_i64_atomic_rmw8_cmpxchg_u(pos, self.read_memarg(0)?), + 0x4d => visitor.visit_i64_atomic_rmw16_cmpxchg_u(pos, self.read_memarg(1)?), + 0x4e => visitor.visit_i64_atomic_rmw32_cmpxchg_u(pos, self.read_memarg(2)?), _ => bail!(pos, "unknown 0xfe subopcode: 0x{code:x}"), }) diff --git a/crates/wasmparser/src/lib.rs b/crates/wasmparser/src/lib.rs index 2b0215bd7f..d29fa9e32a 100644 --- a/crates/wasmparser/src/lib.rs +++ b/crates/wasmparser/src/lib.rs @@ -533,7 +533,7 @@ macro_rules! for_each_operator { @simd I8x16MinU => visit_i8x16_min_u @simd I8x16MaxS => visit_i8x16_max_s @simd I8x16MaxU => visit_i8x16_max_u - @simd I8x16RoundingAverageU => visit_i8x16_avgr_u + @simd I8x16AvgrU => visit_i8x16_avgr_u @simd I16x8ExtAddPairwiseI8x16S => visit_i16x8_extadd_pairwise_i8x16_s @simd I16x8ExtAddPairwiseI8x16U => visit_i16x8_extadd_pairwise_i8x16_u @simd I16x8Abs => visit_i16x8_abs @@ -561,7 +561,7 @@ macro_rules! for_each_operator { @simd I16x8MinU => visit_i16x8_min_u @simd I16x8MaxS => visit_i16x8_max_s @simd I16x8MaxU => visit_i16x8_max_u - @simd I16x8RoundingAverageU => visit_i16x8_avgr_u + @simd I16x8AvgrU => visit_i16x8_avgr_u @simd I16x8ExtMulLowI8x16S => visit_i16x8_extmul_low_i8x16_s @simd I16x8ExtMulHighI8x16S => visit_i16x8_extmul_high_i8x16_s @simd I16x8ExtMulLowI8x16U => visit_i16x8_extmul_low_i8x16_u diff --git a/crates/wasmparser/src/readers/core/operators.rs b/crates/wasmparser/src/readers/core/operators.rs index 0448927d70..2b9726d0fb 100644 --- a/crates/wasmparser/src/readers/core/operators.rs +++ b/crates/wasmparser/src/readers/core/operators.rs @@ -33,6 +33,12 @@ pub enum BlockType { pub struct MemArg { /// Alignment, stored as `n` where the actual alignment is `2^n` pub align: u8, + /// Maximum alignment, stored as `n` where the actual alignment is `2^n`. + /// + /// Note that this field is not actually read from the binary format, it + /// will be a constant depending on which instruction this `MemArg` is a + /// payload for. + pub max_align: u8, /// A fixed byte-offset that this memory immediate specifies. /// /// Note that the memory64 proposal can specify a full 64-bit byte offset diff --git a/crates/wasmparser/src/validator/operators.rs b/crates/wasmparser/src/validator/operators.rs index 3be6cf6d97..311322d30a 100644 --- a/crates/wasmparser/src/validator/operators.rs +++ b/crates/wasmparser/src/validator/operators.rs @@ -540,10 +540,9 @@ impl<'resources, R: WasmModuleResources> OperatorValidatorTemp<'_, 'resources, R /// Validates a `memarg for alignment and such (also the memory it /// references), and returns the type of index used to address the memory. - fn check_memarg(&self, memarg: MemArg, max_align: u8, offset: usize) -> Result { + fn check_memarg(&self, memarg: MemArg, offset: usize) -> Result { let index_ty = self.check_memory_index(offset, memarg.memory)?; - let align = memarg.align; - if align > max_align { + if memarg.align > memarg.max_align { bail!(offset, "alignment must not be larger than natural"); } if index_ty == ValType::I32 && memarg.offset > u64::from(u32::MAX) { @@ -560,7 +559,13 @@ impl<'resources, R: WasmModuleResources> OperatorValidatorTemp<'_, 'resources, R Ok(()) } - fn check_shared_memarg_wo_align(&self, offset: usize, memarg: MemArg) -> Result { + fn check_shared_memarg(&self, offset: usize, memarg: MemArg) -> Result { + if memarg.align != memarg.max_align { + bail!( + offset, + "atomic instructions must always specify maximum alignment" + ); + } self.check_memory_index(offset, memarg.memory) } @@ -711,7 +716,7 @@ impl<'resources, R: WasmModuleResources> OperatorValidatorTemp<'_, 'resources, R /// Checks the validity of an atomic load operator. fn check_atomic_load(&mut self, offset: usize, memarg: MemArg, load_ty: ValType) -> Result<()> { - let ty = self.check_shared_memarg_wo_align(offset, memarg)?; + let ty = self.check_shared_memarg(offset, memarg)?; self.pop_operand(offset, Some(ty))?; self.push_operand(load_ty)?; Ok(()) @@ -724,7 +729,7 @@ impl<'resources, R: WasmModuleResources> OperatorValidatorTemp<'_, 'resources, R memarg: MemArg, store_ty: ValType, ) -> Result<()> { - let ty = self.check_shared_memarg_wo_align(offset, memarg)?; + let ty = self.check_shared_memarg(offset, memarg)?; self.pop_operand(offset, Some(store_ty))?; self.pop_operand(offset, Some(ty))?; Ok(()) @@ -737,7 +742,7 @@ impl<'resources, R: WasmModuleResources> OperatorValidatorTemp<'_, 'resources, R memarg: MemArg, op_ty: ValType, ) -> Result<()> { - let ty = self.check_shared_memarg_wo_align(offset, memarg)?; + let ty = self.check_shared_memarg(offset, memarg)?; self.pop_operand(offset, Some(op_ty))?; self.pop_operand(offset, Some(ty))?; self.push_operand(op_ty)?; @@ -751,7 +756,7 @@ impl<'resources, R: WasmModuleResources> OperatorValidatorTemp<'_, 'resources, R memarg: MemArg, op_ty: ValType, ) -> Result<()> { - let ty = self.check_shared_memarg_wo_align(offset, memarg)?; + let ty = self.check_shared_memarg(offset, memarg)?; self.pop_operand(offset, Some(op_ty))?; self.pop_operand(offset, Some(op_ty))?; self.pop_operand(offset, Some(ty))?; @@ -834,7 +839,7 @@ impl<'resources, R: WasmModuleResources> OperatorValidatorTemp<'_, 'resources, R /// Checks a [`V128`] common load operator. fn check_v128_load_op(&mut self, offset: usize, memarg: MemArg) -> Result<()> { - let idx = self.check_memarg(memarg, 3, offset)?; + let idx = self.check_memarg(memarg, offset)?; self.pop_operand(offset, Some(idx))?; self.push_operand(ValType::V128)?; Ok(()) @@ -1269,33 +1274,33 @@ where Ok(()) } fn visit_i32_load(&mut self, offset: usize, memarg: MemArg) -> Self::Output { - let ty = self.check_memarg(memarg, 2, offset)?; + let ty = self.check_memarg(memarg, offset)?; self.pop_operand(offset, Some(ty))?; self.push_operand(ValType::I32)?; Ok(()) } fn visit_i64_load(&mut self, offset: usize, memarg: MemArg) -> Self::Output { - let ty = self.check_memarg(memarg, 3, offset)?; + let ty = self.check_memarg(memarg, offset)?; self.pop_operand(offset, Some(ty))?; self.push_operand(ValType::I64)?; Ok(()) } fn visit_f32_load(&mut self, offset: usize, memarg: MemArg) -> Self::Output { self.check_non_deterministic_enabled(offset)?; - let ty = self.check_memarg(memarg, 2, offset)?; + let ty = self.check_memarg(memarg, offset)?; self.pop_operand(offset, Some(ty))?; self.push_operand(ValType::F32)?; Ok(()) } fn visit_f64_load(&mut self, offset: usize, memarg: MemArg) -> Self::Output { self.check_non_deterministic_enabled(offset)?; - let ty = self.check_memarg(memarg, 3, offset)?; + let ty = self.check_memarg(memarg, offset)?; self.pop_operand(offset, Some(ty))?; self.push_operand(ValType::F64)?; Ok(()) } fn visit_i32_load8_s(&mut self, offset: usize, memarg: MemArg) -> Self::Output { - let ty = self.check_memarg(memarg, 0, offset)?; + let ty = self.check_memarg(memarg, offset)?; self.pop_operand(offset, Some(ty))?; self.push_operand(ValType::I32)?; Ok(()) @@ -1304,7 +1309,7 @@ where self.visit_i32_load8_s(input, memarg) } fn visit_i32_load16_s(&mut self, offset: usize, memarg: MemArg) -> Self::Output { - let ty = self.check_memarg(memarg, 1, offset)?; + let ty = self.check_memarg(memarg, offset)?; self.pop_operand(offset, Some(ty))?; self.push_operand(ValType::I32)?; Ok(()) @@ -1313,7 +1318,7 @@ where self.visit_i32_load16_s(input, memarg) } fn visit_i64_load8_s(&mut self, offset: usize, memarg: MemArg) -> Self::Output { - let ty = self.check_memarg(memarg, 0, offset)?; + let ty = self.check_memarg(memarg, offset)?; self.pop_operand(offset, Some(ty))?; self.push_operand(ValType::I64)?; Ok(()) @@ -1322,7 +1327,7 @@ where self.visit_i64_load8_s(input, memarg) } fn visit_i64_load16_s(&mut self, offset: usize, memarg: MemArg) -> Self::Output { - let ty = self.check_memarg(memarg, 1, offset)?; + let ty = self.check_memarg(memarg, offset)?; self.pop_operand(offset, Some(ty))?; self.push_operand(ValType::I64)?; Ok(()) @@ -1331,7 +1336,7 @@ where self.visit_i64_load16_s(input, memarg) } fn visit_i64_load32_s(&mut self, offset: usize, memarg: MemArg) -> Self::Output { - let ty = self.check_memarg(memarg, 2, offset)?; + let ty = self.check_memarg(memarg, offset)?; self.pop_operand(offset, Some(ty))?; self.push_operand(ValType::I64)?; Ok(()) @@ -1340,57 +1345,57 @@ where self.visit_i64_load32_s(input, memarg) } fn visit_i32_store(&mut self, offset: usize, memarg: MemArg) -> Self::Output { - let ty = self.check_memarg(memarg, 2, offset)?; + let ty = self.check_memarg(memarg, offset)?; self.pop_operand(offset, Some(ValType::I32))?; self.pop_operand(offset, Some(ty))?; Ok(()) } fn visit_i64_store(&mut self, offset: usize, memarg: MemArg) -> Self::Output { - let ty = self.check_memarg(memarg, 3, offset)?; + let ty = self.check_memarg(memarg, offset)?; self.pop_operand(offset, Some(ValType::I64))?; self.pop_operand(offset, Some(ty))?; Ok(()) } fn visit_f32_store(&mut self, offset: usize, memarg: MemArg) -> Self::Output { self.check_non_deterministic_enabled(offset)?; - let ty = self.check_memarg(memarg, 2, offset)?; + let ty = self.check_memarg(memarg, offset)?; self.pop_operand(offset, Some(ValType::F32))?; self.pop_operand(offset, Some(ty))?; Ok(()) } fn visit_f64_store(&mut self, offset: usize, memarg: MemArg) -> Self::Output { self.check_non_deterministic_enabled(offset)?; - let ty = self.check_memarg(memarg, 3, offset)?; + let ty = self.check_memarg(memarg, offset)?; self.pop_operand(offset, Some(ValType::F64))?; self.pop_operand(offset, Some(ty))?; Ok(()) } fn visit_i32_store8(&mut self, offset: usize, memarg: MemArg) -> Self::Output { - let ty = self.check_memarg(memarg, 0, offset)?; + let ty = self.check_memarg(memarg, offset)?; self.pop_operand(offset, Some(ValType::I32))?; self.pop_operand(offset, Some(ty))?; Ok(()) } fn visit_i32_store16(&mut self, offset: usize, memarg: MemArg) -> Self::Output { - let ty = self.check_memarg(memarg, 1, offset)?; + let ty = self.check_memarg(memarg, offset)?; self.pop_operand(offset, Some(ValType::I32))?; self.pop_operand(offset, Some(ty))?; Ok(()) } fn visit_i64_store8(&mut self, offset: usize, memarg: MemArg) -> Self::Output { - let ty = self.check_memarg(memarg, 0, offset)?; + let ty = self.check_memarg(memarg, offset)?; self.pop_operand(offset, Some(ValType::I64))?; self.pop_operand(offset, Some(ty))?; Ok(()) } fn visit_i64_store16(&mut self, offset: usize, memarg: MemArg) -> Self::Output { - let ty = self.check_memarg(memarg, 1, offset)?; + let ty = self.check_memarg(memarg, offset)?; self.pop_operand(offset, Some(ValType::I64))?; self.pop_operand(offset, Some(ty))?; Ok(()) } fn visit_i64_store32(&mut self, offset: usize, memarg: MemArg) -> Self::Output { - let ty = self.check_memarg(memarg, 2, offset)?; + let ty = self.check_memarg(memarg, offset)?; self.pop_operand(offset, Some(ValType::I64))?; self.pop_operand(offset, Some(ty))?; Ok(()) @@ -2035,7 +2040,7 @@ where self.check_atomic_binary_op(offset, memarg, ValType::I32) } fn visit_memory_atomic_wait32(&mut self, offset: usize, memarg: MemArg) -> Self::Output { - let ty = self.check_shared_memarg_wo_align(offset, memarg)?; + let ty = self.check_shared_memarg(offset, memarg)?; self.pop_operand(offset, Some(ValType::I64))?; self.pop_operand(offset, Some(ValType::I32))?; self.pop_operand(offset, Some(ty))?; @@ -2043,7 +2048,7 @@ where Ok(()) } fn visit_memory_atomic_wait64(&mut self, offset: usize, memarg: MemArg) -> Self::Output { - let ty = self.check_shared_memarg_wo_align(offset, memarg)?; + let ty = self.check_shared_memarg(offset, memarg)?; self.pop_operand(offset, Some(ValType::I64))?; self.pop_operand(offset, Some(ValType::I64))?; self.pop_operand(offset, Some(ty))?; @@ -2093,13 +2098,13 @@ where Ok(()) } fn visit_v128_load(&mut self, offset: usize, memarg: MemArg) -> Self::Output { - let ty = self.check_memarg(memarg, 4, offset)?; + let ty = self.check_memarg(memarg, offset)?; self.pop_operand(offset, Some(ty))?; self.push_operand(ValType::V128)?; Ok(()) } fn visit_v128_store(&mut self, offset: usize, memarg: MemArg) -> Self::Output { - let ty = self.check_memarg(memarg, 4, offset)?; + let ty = self.check_memarg(memarg, offset)?; self.pop_operand(offset, Some(ValType::V128))?; self.pop_operand(offset, Some(ty))?; Ok(()) @@ -2875,19 +2880,19 @@ where Ok(()) } fn visit_v128_load8_splat(&mut self, offset: usize, memarg: MemArg) -> Self::Output { - let ty = self.check_memarg(memarg, 0, offset)?; + let ty = self.check_memarg(memarg, offset)?; self.pop_operand(offset, Some(ty))?; self.push_operand(ValType::V128)?; Ok(()) } fn visit_v128_load16_splat(&mut self, offset: usize, memarg: MemArg) -> Self::Output { - let ty = self.check_memarg(memarg, 1, offset)?; + let ty = self.check_memarg(memarg, offset)?; self.pop_operand(offset, Some(ty))?; self.push_operand(ValType::V128)?; Ok(()) } fn visit_v128_load32_splat(&mut self, offset: usize, memarg: MemArg) -> Self::Output { - let ty = self.check_memarg(memarg, 2, offset)?; + let ty = self.check_memarg(memarg, offset)?; self.pop_operand(offset, Some(ty))?; self.push_operand(ValType::V128)?; Ok(()) @@ -2920,7 +2925,7 @@ where self.check_v128_load_op(offset, memarg) } fn visit_v128_load8_lane(&mut self, offset: usize, memarg: MemArg, lane: u8) -> Self::Output { - let idx = self.check_memarg(memarg, 0, offset)?; + let idx = self.check_memarg(memarg, offset)?; self.check_simd_lane_index(offset, lane, 16)?; self.pop_operand(offset, Some(ValType::V128))?; self.pop_operand(offset, Some(idx))?; @@ -2928,7 +2933,7 @@ where Ok(()) } fn visit_v128_load16_lane(&mut self, offset: usize, memarg: MemArg, lane: u8) -> Self::Output { - let idx = self.check_memarg(memarg, 1, offset)?; + let idx = self.check_memarg(memarg, offset)?; self.check_simd_lane_index(offset, lane, 8)?; self.pop_operand(offset, Some(ValType::V128))?; self.pop_operand(offset, Some(idx))?; @@ -2936,7 +2941,7 @@ where Ok(()) } fn visit_v128_load32_lane(&mut self, offset: usize, memarg: MemArg, lane: u8) -> Self::Output { - let idx = self.check_memarg(memarg, 2, offset)?; + let idx = self.check_memarg(memarg, offset)?; self.check_simd_lane_index(offset, lane, 4)?; self.pop_operand(offset, Some(ValType::V128))?; self.pop_operand(offset, Some(idx))?; @@ -2944,7 +2949,7 @@ where Ok(()) } fn visit_v128_load64_lane(&mut self, offset: usize, memarg: MemArg, lane: u8) -> Self::Output { - let idx = self.check_memarg(memarg, 3, offset)?; + let idx = self.check_memarg(memarg, offset)?; self.check_simd_lane_index(offset, lane, 2)?; self.pop_operand(offset, Some(ValType::V128))?; self.pop_operand(offset, Some(idx))?; @@ -2952,28 +2957,28 @@ where Ok(()) } fn visit_v128_store8_lane(&mut self, offset: usize, memarg: MemArg, lane: u8) -> Self::Output { - let idx = self.check_memarg(memarg, 0, offset)?; + let idx = self.check_memarg(memarg, offset)?; self.check_simd_lane_index(offset, lane, 16)?; self.pop_operand(offset, Some(ValType::V128))?; self.pop_operand(offset, Some(idx))?; Ok(()) } fn visit_v128_store16_lane(&mut self, offset: usize, memarg: MemArg, lane: u8) -> Self::Output { - let idx = self.check_memarg(memarg, 1, offset)?; + let idx = self.check_memarg(memarg, offset)?; self.check_simd_lane_index(offset, lane, 8)?; self.pop_operand(offset, Some(ValType::V128))?; self.pop_operand(offset, Some(idx))?; Ok(()) } fn visit_v128_store32_lane(&mut self, offset: usize, memarg: MemArg, lane: u8) -> Self::Output { - let idx = self.check_memarg(memarg, 2, offset)?; + let idx = self.check_memarg(memarg, offset)?; self.check_simd_lane_index(offset, lane, 4)?; self.pop_operand(offset, Some(ValType::V128))?; self.pop_operand(offset, Some(idx))?; Ok(()) } fn visit_v128_store64_lane(&mut self, offset: usize, memarg: MemArg, lane: u8) -> Self::Output { - let idx = self.check_memarg(memarg, 3, offset)?; + let idx = self.check_memarg(memarg, offset)?; self.check_simd_lane_index(offset, lane, 2)?; self.pop_operand(offset, Some(ValType::V128))?; self.pop_operand(offset, Some(idx))?; diff --git a/crates/wasmprinter/src/lib.rs b/crates/wasmprinter/src/lib.rs index 1860f7141b..d527041827 100644 --- a/crates/wasmprinter/src/lib.rs +++ b/crates/wasmprinter/src/lib.rs @@ -965,56 +965,6 @@ impl Printer { } } - fn mem_instr( - &mut self, - state: &State, - name: &str, - memarg: &MemArg, - default_align: u32, - ) -> Result<()> { - self.result.push_str(name); - if memarg.memory != 0 { - self.result.push(' '); - self.print_idx(&state.core.memory_names, memarg.memory)?; - } - if memarg.offset != 0 { - write!(self.result, " offset={}", memarg.offset)?; - } - if memarg.align >= 32 { - bail!("alignment in memarg too large"); - } - let align = 1 << memarg.align; - if default_align != align { - write!(self.result, " align={}", align)?; - } - Ok(()) - } - - fn print_blockty(&mut self, state: &mut State, ty: &BlockType, cur_depth: u32) -> Result<()> { - if let Some(name) = state - .core - .label_names - .get(&(state.core.funcs, state.core.labels)) - { - self.result.push(' '); - name.write(&mut self.result); - } - match ty { - BlockType::Empty => {} - BlockType::Type(t) => { - self.result.push_str(" (result "); - self.print_valtype(*t)?; - self.result.push(')'); - } - BlockType::FuncType(idx) => { - self.print_core_functype_idx(state, *idx, false, None)?; - } - } - write!(self.result, " ;; label = @{}", cur_depth)?; - state.core.labels += 1; - Ok(()) - } - fn print_exports(&mut self, state: &State, data: ExportSectionReader) -> Result<()> { for export in data.into_iter_with_offsets() { let (offset, export) = export?; diff --git a/crates/wasmprinter/src/operator.rs b/crates/wasmprinter/src/operator.rs index bf7e25728e..da32b3b52b 100644 --- a/crates/wasmprinter/src/operator.rs +++ b/crates/wasmprinter/src/operator.rs @@ -1,7 +1,7 @@ use super::{Printer, State}; -use anyhow::Result; +use anyhow::{bail, Result}; use std::fmt::Write; -use wasmparser::{BlockType, BrTable, Ieee32, Ieee64, MemArg, ValType, VisitOperator, V128}; +use wasmparser::{BlockType, BrTable, MemArg, VisitOperator}; pub struct PrintOperator<'a, 'b> { pub(super) printer: &'a mut Printer, @@ -26,12 +26,35 @@ impl<'a, 'b> PrintOperator<'a, 'b> { &mut self.printer.result } - fn print_blockty(&mut self, ty: &BlockType) -> Result<()> { + fn blockty(&mut self, ty: BlockType) -> Result<()> { + if let Some(name) = self + .state + .core + .label_names + .get(&(self.state.core.funcs, self.state.core.labels)) + { + self.printer.result.push(' '); + name.write(&mut self.printer.result); + } + match ty { + BlockType::Empty => {} + BlockType::Type(t) => { + self.push_str("(result "); + self.printer.print_valtype(t)?; + self.push_str(")"); + } + BlockType::FuncType(idx) => { + self.printer + .print_core_functype_idx(self.state, idx, false, None)?; + } + } // Note that 1 is added to the current depth here since if a block type // is being printed then a block is being created which will increase // the label depth of the block itself. - self.printer - .print_blockty(self.state, ty, self.cur_depth() + 1) + let depth = self.cur_depth(); + write!(self.result(), " ;; label = @{}", depth + 1)?; + self.state.core.labels += 1; + Ok(()) } fn cur_depth(&self) -> u32 { @@ -45,51 +68,95 @@ impl<'a, 'b> PrintOperator<'a, 'b> { } } - fn print_func_idx(&mut self, idx: u32) -> Result<()> { + fn tag_index(&mut self, index: u32) -> Result<()> { + write!(self.result(), "{index}")?; + Ok(()) + } + + fn relative_depth(&mut self, depth: u32) -> Result<()> { + let label = self.label(depth); + write!(self.result(), "{depth} (;{label};)")?; + Ok(()) + } + + fn targets(&mut self, targets: BrTable<'_>) -> Result<()> { + for (i, item) in targets.targets().chain([Ok(targets.default())]).enumerate() { + if i > 0 { + self.push_str(" "); + } + self.relative_depth(item?)?; + } + Ok(()) + } + + fn function_index(&mut self, idx: u32) -> Result<()> { self.printer.print_idx(&self.state.core.func_names, idx) } - fn print_table_idx(&mut self, idx: u32) -> Result<()> { - self.printer.print_idx(&self.state.core.table_names, idx) + fn local_index(&mut self, idx: u32) -> Result<()> { + self.printer + .print_local_idx(self.state, self.state.core.funcs, idx) } - fn print_global_idx(&mut self, idx: u32) -> Result<()> { + fn global_index(&mut self, idx: u32) -> Result<()> { self.printer.print_idx(&self.state.core.global_names, idx) } - fn print_memory_idx(&mut self, idx: u32) -> Result<()> { - self.printer.print_idx(&self.state.core.memory_names, idx) + fn table_index(&mut self, idx: u32) -> Result<()> { + self.printer.print_idx(&self.state.core.table_names, idx) } - fn print_data_idx(&mut self, idx: u32) -> Result<()> { - self.printer.print_idx(&self.state.core.data_names, idx) + fn table(&mut self, idx: u32) -> Result<()> { + self.table_index(idx) } - fn print_element_idx(&mut self, idx: u32) -> Result<()> { - self.printer.print_idx(&self.state.core.element_names, idx) + fn memory_index(&mut self, idx: u32) -> Result<()> { + self.printer.print_idx(&self.state.core.memory_names, idx) } - fn print_local_idx(&mut self, idx: u32) -> Result<()> { - self.printer - .print_local_idx(self.state, self.state.core.funcs, idx) + fn type_index(&mut self, idx: u32) -> Result<()> { + self.printer.print_type_ref(self.state, idx, true, None) } - fn print_type_ref(&mut self, idx: u32) -> Result<()> { - self.printer.print_type_ref(self.state, idx, true, None) + fn data_index(&mut self, idx: u32) -> Result<()> { + self.printer.print_idx(&self.state.core.data_names, idx) + } + + fn elem_index(&mut self, idx: u32) -> Result<()> { + self.printer.print_idx(&self.state.core.element_names, idx) } - fn print_valtype(&mut self, ty: ValType) -> Result<()> { - self.printer.print_valtype(ty) + fn lane(&mut self, lane: u8) -> Result<()> { + write!(self.result(), "{lane}")?; + Ok(()) } - fn instr(&mut self, name: &str) -> Result { - self.push_str(name); - Ok(OpKind::Normal) + fn lanes(&mut self, lanes: [u8; 16]) -> Result<()> { + for (i, lane) in lanes.iter().enumerate() { + if i > 0 { + self.push_str(" "); + } + write!(self.result(), "{lane}")?; + } + Ok(()) } - fn mem_instr(&mut self, name: &str, memarg: &MemArg, align: u32) -> Result { - self.printer.mem_instr(self.state, name, memarg, align)?; - Ok(OpKind::Normal) + fn memarg(&mut self, memarg: MemArg) -> Result<()> { + if memarg.memory != 0 { + self.result().push(' '); + self.memory_index(memarg.memory)?; + } + if memarg.offset != 0 { + write!(self.result(), " offset={}", memarg.offset)?; + } + if memarg.align != memarg.max_align { + if memarg.align >= 32 { + bail!("alignment in memarg too large"); + } + let align = 1 << memarg.align; + write!(self.result(), " align={}", align)?; + } + Ok(()) } } @@ -101,1822 +168,676 @@ pub enum OpKind { Normal, } -impl<'a> VisitOperator<'a> for PrintOperator<'_, '_> { - type Output = Result; - - fn visit_unreachable(&mut self, _pos: usize) -> Self::Output { - self.instr("unreachable") - } - fn visit_nop(&mut self, _pos: usize) -> Self::Output { - self.instr("nop") - } - fn visit_block(&mut self, _pos: usize, ty: BlockType) -> Self::Output { - self.push_str("block"); - self.print_blockty(&ty)?; - Ok(OpKind::BlockStart) - } - fn visit_loop(&mut self, _pos: usize, ty: BlockType) -> Self::Output { - self.push_str("loop"); - self.print_blockty(&ty)?; - Ok(OpKind::BlockStart) - } - fn visit_if(&mut self, _pos: usize, ty: BlockType) -> Self::Output { - self.push_str("if"); - self.print_blockty(&ty)?; - Ok(OpKind::BlockStart) - } - fn visit_else(&mut self, _pos: usize) -> Self::Output { - self.push_str("else"); - Ok(OpKind::BlockMid) - } - fn visit_try(&mut self, _pos: usize, ty: BlockType) -> Self::Output { - self.push_str("try"); - self.print_blockty(&ty)?; - Ok(OpKind::BlockStart) - } - fn visit_catch(&mut self, _pos: usize, index: u32) -> Self::Output { - write!(self.result(), "catch {index}")?; - Ok(OpKind::BlockMid) - } - fn visit_throw(&mut self, _pos: usize, index: u32) -> Self::Output { - write!(self.result(), "throw {index}")?; - Ok(OpKind::Normal) - } - fn visit_rethrow(&mut self, _pos: usize, relative_depth: u32) -> Self::Output { - let label = self.label(relative_depth); - write!(self.result(), "rethrow {relative_depth} (;{label};)")?; - Ok(OpKind::Normal) - } - fn visit_delegate(&mut self, _pos: usize, relative_depth: u32) -> Self::Output { - let label = self.label(relative_depth); - write!(self.result(), "delegate {relative_depth} (;{label};)")?; - Ok(OpKind::Delegate) - } - fn visit_catch_all(&mut self, _pos: usize) -> Self::Output { - self.push_str("catch_all"); - Ok(OpKind::BlockMid) - } - fn visit_end(&mut self, _pos: usize) -> Self::Output { - self.push_str("end"); - Ok(OpKind::End) - } - fn visit_br(&mut self, _pos: usize, relative_depth: u32) -> Self::Output { - let label = self.label(relative_depth); - write!(self.result(), "br {relative_depth} (;{label};)")?; - Ok(OpKind::Normal) - } - fn visit_br_if(&mut self, _pos: usize, relative_depth: u32) -> Self::Output { - let label = self.label(relative_depth); - write!(self.result(), "br_if {relative_depth} (;{label};)")?; - Ok(OpKind::Normal) - } - fn visit_br_table(&mut self, _pos: usize, table: BrTable<'a>) -> Self::Output { - self.push_str("br_table"); - for item in table.targets().chain(Some(Ok(table.default()))) { - let item = item?; - let label = self.label(item); - write!(self.result(), " {item} (;{label};)")?; +macro_rules! define_visit { + // General structure of all the operator printer methods: + // + // * Print the name of the insruction as defined in this macro + // * Print any payload, as necessary + // * Return the `OpKind`, as defined by this macro + ($(@$proposal:ident $op:ident $({ $($arg:ident: $argty:ty),* })? => $visit:ident )*) => ($( + fn $visit(&mut self, _pos: usize $( , $($arg: $argty),* )?) -> Self::Output { + self.push_str(define_visit!(name $op)); + $( + define_visit!(payload self $op $($arg)*); + )? + Ok(define_visit!(kind $op)) } - Ok(OpKind::Normal) - } - fn visit_return(&mut self, _pos: usize) -> Self::Output { - self.instr("return") - } - fn visit_call(&mut self, _pos: usize, function_index: u32) -> Self::Output { - self.push_str("call "); - self.print_func_idx(function_index)?; - Ok(OpKind::Normal) - } - fn visit_return_call(&mut self, _pos: usize, function_index: u32) -> Self::Output { - self.push_str("return_call "); - self.print_func_idx(function_index)?; - Ok(OpKind::Normal) - } - fn visit_call_indirect( - &mut self, - _pos: usize, - index: u32, - table_index: u32, - _table_byte: u8, - ) -> Self::Output { - self.push_str("call_indirect"); - if table_index != 0 { - self.push_str(" "); - self.print_table_idx(table_index)?; + )*); + + // Macro case to classify instructions based on their `$op` naming into an + // `OpKind`. There are a few special cases here but the vast majority of + // operators fall into the `Normal` category. + (kind Block) => (OpKind::BlockStart); + (kind Loop) => (OpKind::BlockStart); + (kind If) => (OpKind::BlockStart); + (kind Try) => (OpKind::BlockStart); + (kind Else) => (OpKind::BlockMid); + (kind Catch) => (OpKind::BlockMid); + (kind CatchAll) => (OpKind::BlockMid); + (kind End) => (OpKind::End); + (kind Delegate) => (OpKind::Delegate); + (kind $other:tt) => (OpKind::Normal); + + // How to print the payload of an instruction. There are a number of + // instrucitons that have special cases such as avoiding printing anything + // when an index is 0 or similar. The final case in this list is the + // catch-all which prints each payload individually based on the name of the + // payload field. + (payload $self:ident CallIndirect $ty:ident $table:ident $byte:ident) => ( + if $table != 0 { + $self.push_str(" "); + $self.table_index($table)?; } - self.print_type_ref(index)?; - Ok(OpKind::Normal) - } - fn visit_return_call_indirect( - &mut self, - _pos: usize, - index: u32, - table_index: u32, - ) -> Self::Output { - self.push_str("return_call_indirect"); - if table_index != 0 { - self.push_str(" "); - self.print_table_idx(table_index)?; + $self.type_index($ty)?; + drop($byte); + ); + (payload $self:ident ReturnCallIndirect $ty:ident $table:ident) => ( + if $table != 0 { + $self.push_str(" "); + $self.table_index($table)?; } - self.print_type_ref(index)?; - Ok(OpKind::Normal) - } - - fn visit_drop(&mut self, _pos: usize) -> Self::Output { - self.instr("drop") - } - fn visit_select(&mut self, _pos: usize) -> Self::Output { - self.instr("select") - } - fn visit_typed_select(&mut self, _pos: usize, ty: ValType) -> Self::Output { - self.push_str("select (result "); - self.print_valtype(ty)?; - self.instr(")") - } - - fn visit_ref_null(&mut self, _pos: usize, ty: ValType) -> Self::Output { - self.push_str("ref.null "); - self.printer.print_reftype(ty)?; - Ok(OpKind::Normal) - } - fn visit_ref_is_null(&mut self, _pos: usize) -> Self::Output { - self.instr("ref.is_null") - } - fn visit_ref_func(&mut self, _pos: usize, function_index: u32) -> Self::Output { - self.push_str("ref.func "); - self.print_func_idx(function_index)?; - Ok(OpKind::Normal) - } - - fn visit_local_get(&mut self, _pos: usize, local_index: u32) -> Self::Output { - self.push_str("local.get "); - self.print_local_idx(local_index)?; - Ok(OpKind::Normal) - } - fn visit_local_set(&mut self, _pos: usize, local_index: u32) -> Self::Output { - self.push_str("local.set "); - self.print_local_idx(local_index)?; - Ok(OpKind::Normal) - } - fn visit_local_tee(&mut self, _pos: usize, local_index: u32) -> Self::Output { - self.push_str("local.tee "); - self.print_local_idx(local_index)?; - Ok(OpKind::Normal) - } - fn visit_global_get(&mut self, _pos: usize, global_index: u32) -> Self::Output { - self.push_str("global.get "); - self.print_global_idx(global_index)?; - Ok(OpKind::Normal) - } - fn visit_global_set(&mut self, _pos: usize, global_index: u32) -> Self::Output { - self.push_str("global.set "); - self.print_global_idx(global_index)?; - Ok(OpKind::Normal) - } - fn visit_table_get(&mut self, _pos: usize, table: u32) -> Self::Output { - self.push_str("table.get "); - self.print_table_idx(table)?; - Ok(OpKind::Normal) - } - fn visit_table_set(&mut self, _pos: usize, table: u32) -> Self::Output { - self.push_str("table.set "); - self.print_table_idx(table)?; - Ok(OpKind::Normal) - } - fn visit_table_init(&mut self, _pos: usize, segment: u32, table: u32) -> Self::Output { - self.push_str("table.init "); - if table != 0 { - self.print_table_idx(table)?; - self.push_str(" "); + $self.type_index($ty)?; + ); + (payload $self:ident TypedSelect $ty:ident) => ( + $self.push_str(" (result "); + $self.printer.print_valtype($ty)?; + $self.push_str(")") + ); + (payload $self:ident RefNull $ty:ident) => ( + $self.push_str(" "); + $self.printer.print_reftype($ty)?; + ); + (payload $self:ident TableInit $segment:ident $table:ident) => ( + $self.push_str(" "); + if $table != 0 { + $self.table_index($table)?; + $self.push_str(" "); } - self.print_element_idx(segment)?; - Ok(OpKind::Normal) - } - fn visit_elem_drop(&mut self, _pos: usize, segment: u32) -> Self::Output { - self.push_str("elem.drop "); - self.print_element_idx(segment)?; - Ok(OpKind::Normal) - } - fn visit_table_copy(&mut self, _pos: usize, dst: u32, src: u32) -> Self::Output { - self.push_str("table.copy"); - if dst != 0 || src != 0 { - self.push_str(" "); - self.print_table_idx(dst)?; - self.push_str(" "); - self.print_table_idx(src)?; + $self.elem_index($segment)?; + ); + (payload $self:ident TableCopy $dst:ident $src:ident) => ( + if $src != 0 || $dst != 0 { + $self.push_str(" "); + $self.table_index($dst)?; + $self.push_str(" "); + $self.table_index($src)?; } - Ok(OpKind::Normal) - } - fn visit_table_grow(&mut self, _pos: usize, table: u32) -> Self::Output { - self.push_str("table.grow "); - self.print_table_idx(table)?; - Ok(OpKind::Normal) - } - fn visit_table_size(&mut self, _pos: usize, table: u32) -> Self::Output { - self.push_str("table.size "); - self.print_table_idx(table)?; - Ok(OpKind::Normal) - } - fn visit_table_fill(&mut self, _pos: usize, table: u32) -> Self::Output { - self.push_str("table.fill "); - self.print_table_idx(table)?; - Ok(OpKind::Normal) - } - fn visit_i32_load(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i32.load", &memarg, 4) - } - fn visit_i64_load(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i64.load", &memarg, 8) - } - fn visit_f32_load(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("f32.load", &memarg, 4) - } - fn visit_f64_load(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("f64.load", &memarg, 8) - } - fn visit_i32_load8_s(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i32.load8_s", &memarg, 1) - } - fn visit_i32_load8_u(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i32.load8_u", &memarg, 1) - } - fn visit_i32_load16_s(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i32.load16_s", &memarg, 2) - } - fn visit_i32_load16_u(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i32.load16_u", &memarg, 2) - } - fn visit_i64_load8_s(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i64.load8_s", &memarg, 1) - } - fn visit_i64_load8_u(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i64.load8_u", &memarg, 1) - } - fn visit_i64_load16_s(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i64.load16_s", &memarg, 2) - } - fn visit_i64_load16_u(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i64.load16_u", &memarg, 2) - } - fn visit_i64_load32_s(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i64.load32_s", &memarg, 4) - } - fn visit_i64_load32_u(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i64.load32_u", &memarg, 4) - } - fn visit_i32_store(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i32.store", &memarg, 4) - } - fn visit_i64_store(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i64.store", &memarg, 8) - } - fn visit_f32_store(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("f32.store", &memarg, 4) - } - fn visit_f64_store(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("f64.store", &memarg, 8) - } - fn visit_i32_store8(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i32.store8", &memarg, 1) - } - fn visit_i32_store16(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i32.store16", &memarg, 2) - } - fn visit_i64_store8(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i64.store8", &memarg, 1) - } - fn visit_i64_store16(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i64.store16", &memarg, 2) - } - fn visit_i64_store32(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i64.store32", &memarg, 4) - } - fn visit_memory_size(&mut self, _pos: usize, mem: u32, _mem_byte: u8) -> Self::Output { - self.push_str("memory.size"); - if mem != 0 { - self.push_str(" "); - self.print_memory_idx(mem)?; + ); + (payload $self:ident $mem_op:ident $mem:ident mem_byte) => ( + if $mem != 0 { + $self.push_str(" "); + $self.memory_index($mem)?; } - Ok(OpKind::Normal) - } - fn visit_memory_grow(&mut self, _pos: usize, mem: u32, _mem_byte: u8) -> Self::Output { - self.push_str("memory.grow"); - if mem != 0 { - self.push_str(" "); - self.print_memory_idx(mem)?; + ); + (payload $self:ident MemoryInit $segment:ident $mem:ident) => ( + if $mem != 0 { + $self.push_str(" "); + $self.memory_index($mem)?; } - Ok(OpKind::Normal) - } - fn visit_memory_init(&mut self, _pos: usize, segment: u32, mem: u32) -> Self::Output { - self.push_str("memory.init "); - if mem != 0 { - self.print_memory_idx(mem)?; - self.push_str(" "); + $self.push_str(" "); + $self.data_index($segment)?; + ); + (payload $self:ident MemoryCopy $dst:ident $src:ident) => ( + if $src != 0 || $dst != 0 { + $self.push_str(" "); + $self.memory_index($dst)?; + $self.push_str(" "); + $self.memory_index($src)?; } - self.print_data_idx(segment)?; - Ok(OpKind::Normal) - } - fn visit_data_drop(&mut self, _pos: usize, segment: u32) -> Self::Output { - self.push_str("data.drop "); - self.print_data_idx(segment)?; - Ok(OpKind::Normal) - } - fn visit_memory_copy(&mut self, _pos: usize, dst: u32, src: u32) -> Self::Output { - self.push_str("memory.copy"); - if dst != 0 || src != 0 { - self.push_str(" "); - self.print_memory_idx(dst)?; - self.push_str(" "); - self.print_memory_idx(src)?; + ); + (payload $self:ident MemoryFill $mem:ident) => ( + if $mem != 0 { + $self.push_str(" "); + $self.memory_index($mem)?; } - Ok(OpKind::Normal) - } - fn visit_memory_fill(&mut self, _pos: usize, mem: u32) -> Self::Output { - self.push_str("memory.fill"); - if mem != 0 { - self.push_str(" "); - self.print_memory_idx(mem)?; + ); + (payload $self:ident I32Const $val:ident) => (write!($self.result(), " {}", $val)?); + (payload $self:ident I64Const $val:ident) => (write!($self.result(), " {}", $val)?); + (payload $self:ident F32Const $val:ident) => ( + $self.push_str(" "); + $self.printer.print_f32($val.bits())?; + ); + (payload $self:ident F64Const $val:ident) => ( + $self.push_str(" "); + $self.printer.print_f64($val.bits())?; + ); + (payload $self:ident V128Const $val:ident) => ( + $self.push_str(" i32x4"); + for chunk in $val.bytes().chunks(4) { + write!( + $self.result(), + " 0x{:02x}{:02x}{:02x}{:02x}", + chunk[3], + chunk[2], + chunk[1], + chunk[0], + )?; } - Ok(OpKind::Normal) - } - fn visit_i32_const(&mut self, _pos: usize, value: i32) -> Self::Output { - write!(self.result(), "i32.const {value}")?; - Ok(OpKind::Normal) - } - fn visit_i64_const(&mut self, _pos: usize, value: i64) -> Self::Output { - write!(self.result(), "i64.const {value}")?; - Ok(OpKind::Normal) - } - fn visit_f32_const(&mut self, _pos: usize, value: Ieee32) -> Self::Output { - self.push_str("f32.const "); - self.printer.print_f32(value.bits())?; - Ok(OpKind::Normal) - } - fn visit_f64_const(&mut self, _pos: usize, value: Ieee64) -> Self::Output { - self.push_str("f64.const "); - self.printer.print_f64(value.bits())?; - Ok(OpKind::Normal) - } - fn visit_i32_eqz(&mut self, _pos: usize) -> Self::Output { - self.instr("i32.eqz") - } - fn visit_i32_eq(&mut self, _pos: usize) -> Self::Output { - self.instr("i32.eq") - } - fn visit_i32_ne(&mut self, _pos: usize) -> Self::Output { - self.instr("i32.ne") - } - fn visit_i32_lt_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i32.lt_s") - } - fn visit_i32_lt_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i32.lt_u") - } - fn visit_i32_gt_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i32.gt_s") - } - fn visit_i32_gt_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i32.gt_u") - } - fn visit_i32_le_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i32.le_s") - } - fn visit_i32_le_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i32.le_u") - } - fn visit_i32_ge_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i32.ge_s") - } - fn visit_i32_ge_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i32.ge_u") - } - fn visit_i64_eqz(&mut self, _pos: usize) -> Self::Output { - self.instr("i64.eqz") - } - fn visit_i64_eq(&mut self, _pos: usize) -> Self::Output { - self.instr("i64.eq") - } - fn visit_i64_ne(&mut self, _pos: usize) -> Self::Output { - self.instr("i64.ne") - } - fn visit_i64_lt_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i64.lt_s") - } - fn visit_i64_lt_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i64.lt_u") - } - fn visit_i64_gt_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i64.gt_s") - } - fn visit_i64_gt_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i64.gt_u") - } - fn visit_i64_le_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i64.le_s") - } - fn visit_i64_le_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i64.le_u") - } - fn visit_i64_ge_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i64.ge_s") - } - fn visit_i64_ge_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i64.ge_u") - } - fn visit_f32_eq(&mut self, _pos: usize) -> Self::Output { - self.instr("f32.eq") - } - fn visit_f32_ne(&mut self, _pos: usize) -> Self::Output { - self.instr("f32.ne") - } - fn visit_f32_lt(&mut self, _pos: usize) -> Self::Output { - self.instr("f32.lt") - } - fn visit_f32_gt(&mut self, _pos: usize) -> Self::Output { - self.instr("f32.gt") - } - fn visit_f32_le(&mut self, _pos: usize) -> Self::Output { - self.instr("f32.le") - } - fn visit_f32_ge(&mut self, _pos: usize) -> Self::Output { - self.instr("f32.ge") - } - fn visit_f64_eq(&mut self, _pos: usize) -> Self::Output { - self.instr("f64.eq") - } - fn visit_f64_ne(&mut self, _pos: usize) -> Self::Output { - self.instr("f64.ne") - } - fn visit_f64_lt(&mut self, _pos: usize) -> Self::Output { - self.instr("f64.lt") - } - fn visit_f64_gt(&mut self, _pos: usize) -> Self::Output { - self.instr("f64.gt") - } - fn visit_f64_le(&mut self, _pos: usize) -> Self::Output { - self.instr("f64.le") - } - fn visit_f64_ge(&mut self, _pos: usize) -> Self::Output { - self.instr("f64.ge") - } - fn visit_i32_clz(&mut self, _pos: usize) -> Self::Output { - self.instr("i32.clz") - } - fn visit_i32_ctz(&mut self, _pos: usize) -> Self::Output { - self.instr("i32.ctz") - } - fn visit_i32_popcnt(&mut self, _pos: usize) -> Self::Output { - self.instr("i32.popcnt") - } - fn visit_i32_add(&mut self, _pos: usize) -> Self::Output { - self.instr("i32.add") - } - fn visit_i32_sub(&mut self, _pos: usize) -> Self::Output { - self.instr("i32.sub") - } - fn visit_i32_mul(&mut self, _pos: usize) -> Self::Output { - self.instr("i32.mul") - } - fn visit_i32_div_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i32.div_s") - } - fn visit_i32_div_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i32.div_u") - } - fn visit_i32_rem_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i32.rem_s") - } - fn visit_i32_rem_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i32.rem_u") - } - fn visit_i32_and(&mut self, _pos: usize) -> Self::Output { - self.instr("i32.and") - } - fn visit_i32_or(&mut self, _pos: usize) -> Self::Output { - self.instr("i32.or") - } - fn visit_i32_xor(&mut self, _pos: usize) -> Self::Output { - self.instr("i32.xor") - } - fn visit_i32_shl(&mut self, _pos: usize) -> Self::Output { - self.instr("i32.shl") - } - fn visit_i32_shr_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i32.shr_s") - } - fn visit_i32_shr_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i32.shr_u") - } - fn visit_i32_rotl(&mut self, _pos: usize) -> Self::Output { - self.instr("i32.rotl") - } - fn visit_i32_rotr(&mut self, _pos: usize) -> Self::Output { - self.instr("i32.rotr") - } - - fn visit_i64_clz(&mut self, _pos: usize) -> Self::Output { - self.instr("i64.clz") - } - fn visit_i64_ctz(&mut self, _pos: usize) -> Self::Output { - self.instr("i64.ctz") - } - fn visit_i64_popcnt(&mut self, _pos: usize) -> Self::Output { - self.instr("i64.popcnt") - } - fn visit_i64_add(&mut self, _pos: usize) -> Self::Output { - self.instr("i64.add") - } - fn visit_i64_sub(&mut self, _pos: usize) -> Self::Output { - self.instr("i64.sub") - } - fn visit_i64_mul(&mut self, _pos: usize) -> Self::Output { - self.instr("i64.mul") - } - fn visit_i64_div_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i64.div_s") - } - fn visit_i64_div_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i64.div_u") - } - fn visit_i64_rem_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i64.rem_s") - } - fn visit_i64_rem_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i64.rem_u") - } - fn visit_i64_and(&mut self, _pos: usize) -> Self::Output { - self.instr("i64.and") - } - fn visit_i64_or(&mut self, _pos: usize) -> Self::Output { - self.instr("i64.or") - } - fn visit_i64_xor(&mut self, _pos: usize) -> Self::Output { - self.instr("i64.xor") - } - fn visit_i64_shl(&mut self, _pos: usize) -> Self::Output { - self.instr("i64.shl") - } - fn visit_i64_shr_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i64.shr_s") - } - fn visit_i64_shr_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i64.shr_u") - } - fn visit_i64_rotl(&mut self, _pos: usize) -> Self::Output { - self.instr("i64.rotl") - } - fn visit_i64_rotr(&mut self, _pos: usize) -> Self::Output { - self.instr("i64.rotr") - } - - fn visit_f32_abs(&mut self, _pos: usize) -> Self::Output { - self.instr("f32.abs") - } - fn visit_f32_neg(&mut self, _pos: usize) -> Self::Output { - self.instr("f32.neg") - } - fn visit_f32_ceil(&mut self, _pos: usize) -> Self::Output { - self.instr("f32.ceil") - } - fn visit_f32_floor(&mut self, _pos: usize) -> Self::Output { - self.instr("f32.floor") - } - fn visit_f32_trunc(&mut self, _pos: usize) -> Self::Output { - self.instr("f32.trunc") - } - fn visit_f32_nearest(&mut self, _pos: usize) -> Self::Output { - self.instr("f32.nearest") - } - fn visit_f32_sqrt(&mut self, _pos: usize) -> Self::Output { - self.instr("f32.sqrt") - } - fn visit_f32_add(&mut self, _pos: usize) -> Self::Output { - self.instr("f32.add") - } - fn visit_f32_sub(&mut self, _pos: usize) -> Self::Output { - self.instr("f32.sub") - } - fn visit_f32_mul(&mut self, _pos: usize) -> Self::Output { - self.instr("f32.mul") - } - fn visit_f32_div(&mut self, _pos: usize) -> Self::Output { - self.instr("f32.div") - } - fn visit_f32_min(&mut self, _pos: usize) -> Self::Output { - self.instr("f32.min") - } - fn visit_f32_max(&mut self, _pos: usize) -> Self::Output { - self.instr("f32.max") - } - fn visit_f32_copysign(&mut self, _pos: usize) -> Self::Output { - self.instr("f32.copysign") - } - - fn visit_f64_abs(&mut self, _pos: usize) -> Self::Output { - self.instr("f64.abs") - } - fn visit_f64_neg(&mut self, _pos: usize) -> Self::Output { - self.instr("f64.neg") - } - fn visit_f64_ceil(&mut self, _pos: usize) -> Self::Output { - self.instr("f64.ceil") - } - fn visit_f64_floor(&mut self, _pos: usize) -> Self::Output { - self.instr("f64.floor") - } - fn visit_f64_trunc(&mut self, _pos: usize) -> Self::Output { - self.instr("f64.trunc") - } - fn visit_f64_nearest(&mut self, _pos: usize) -> Self::Output { - self.instr("f64.nearest") - } - fn visit_f64_sqrt(&mut self, _pos: usize) -> Self::Output { - self.instr("f64.sqrt") - } - fn visit_f64_add(&mut self, _pos: usize) -> Self::Output { - self.instr("f64.add") - } - fn visit_f64_sub(&mut self, _pos: usize) -> Self::Output { - self.instr("f64.sub") - } - fn visit_f64_mul(&mut self, _pos: usize) -> Self::Output { - self.instr("f64.mul") - } - fn visit_f64_div(&mut self, _pos: usize) -> Self::Output { - self.instr("f64.div") - } - fn visit_f64_min(&mut self, _pos: usize) -> Self::Output { - self.instr("f64.min") - } - fn visit_f64_max(&mut self, _pos: usize) -> Self::Output { - self.instr("f64.max") - } - fn visit_f64_copysign(&mut self, _pos: usize) -> Self::Output { - self.instr("f64.copysign") - } - - fn visit_i32_wrap_i64(&mut self, _pos: usize) -> Self::Output { - self.instr("i32.wrap_i64") - } - fn visit_i32_trunc_f32_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i32.trunc_f32_s") - } - fn visit_i32_trunc_f32_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i32.trunc_f32_u") - } - fn visit_i32_trunc_f64_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i32.trunc_f64_s") - } - fn visit_i32_trunc_f64_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i32.trunc_f64_u") - } - fn visit_i64_extend_i32_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i64.extend_i32_s") - } - fn visit_i64_extend_i32_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i64.extend_i32_u") - } - fn visit_i64_trunc_f32_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i64.trunc_f32_s") - } - fn visit_i64_trunc_f32_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i64.trunc_f32_u") - } - fn visit_i64_trunc_f64_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i64.trunc_f64_s") - } - fn visit_i64_trunc_f64_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i64.trunc_f64_u") - } + ); + (payload $self:ident $op:ident $($arg:ident)*) => ( + $( + $self.push_str(" "); + $self.$arg($arg)?; + )* + ); + + (name Block) => ("block"); + (name If) => ("if"); + (name Else) => ("else"); + (name Loop) => ("loop"); + (name End) => ("end"); + (name Unreachable) => ("unreachable"); + (name Nop) => ("nop"); + (name Br) => ("br"); + (name BrIf) => ("br_if"); + (name BrTable) => ("br_table"); + (name Return) => ("return"); + (name Call) => ("call"); + (name CallIndirect) => ("call_indirect"); + (name ReturnCall) => ("return_call"); + (name ReturnCallIndirect) => ("return_call_indirect"); + (name Drop) => ("drop"); + (name Select) => ("select"); + (name TypedSelect) => ("select"); + (name LocalGet) => ("local.get"); + (name LocalSet) => ("local.set"); + (name LocalTee) => ("local.tee"); + (name GlobalGet) => ("global.get"); + (name GlobalSet) => ("global.set"); + (name TableGet) => ("table.get"); + (name TableSet) => ("table.set"); + (name I32Load) => ("i32.load"); + (name I64Load) => ("i64.load"); + (name F32Load) => ("f32.load"); + (name F64Load) => ("f64.load"); + (name I32Load8S) => ("i32.load8_s"); + (name I32Load8U) => ("i32.load8_u"); + (name I32Load16S) => ("i32.load16_s"); + (name I32Load16U) => ("i32.load16_u"); + (name I64Load8S) => ("i64.load8_s"); + (name I64Load8U) => ("i64.load8_u"); + (name I64Load16S) => ("i64.load16_s"); + (name I64Load16U) => ("i64.load16_u"); + (name I64Load32S) => ("i64.load32_s"); + (name I64Load32U) => ("i64.load32_u"); + (name I32Store) => ("i32.store"); + (name I64Store) => ("i64.store"); + (name F32Store) => ("f32.store"); + (name F64Store) => ("f64.store"); + (name I32Store8) => ("i32.store8"); + (name I32Store16) => ("i32.store16"); + (name I64Store8) => ("i64.store8"); + (name I64Store16) => ("i64.store16"); + (name I64Store32) => ("i64.store32"); + (name MemorySize) => ("memory.size"); + (name MemoryGrow) => ("memory.grow"); + (name MemoryInit) => ("memory.init"); + (name MemoryCopy) => ("memory.copy"); + (name MemoryFill) => ("memory.fill"); + (name DataDrop) => ("data.drop"); + (name ElemDrop) => ("elem.drop"); + (name TableInit) => ("table.init"); + (name TableCopy) => ("table.copy"); + (name TableFill) => ("table.fill"); + (name TableSize) => ("table.size"); + (name TableGrow) => ("table.grow"); + (name RefNull) => ("ref.null"); + (name RefIsNull) => ("ref.is_null"); + (name RefFunc) => ("ref.func"); + (name I32Const) => ("i32.const"); + (name I64Const) => ("i64.const"); + (name F32Const) => ("f32.const"); + (name F64Const) => ("f64.const"); + (name I32Clz) => ("i32.clz"); + (name I32Ctz) => ("i32.ctz"); + (name I32Popcnt) => ("i32.popcnt"); + (name I32Add) => ("i32.add"); + (name I32Sub) => ("i32.sub"); + (name I32Mul) => ("i32.mul"); + (name I32DivS) => ("i32.div_s"); + (name I32DivU) => ("i32.div_u"); + (name I32RemS) => ("i32.rem_s"); + (name I32RemU) => ("i32.rem_u"); + (name I32And) => ("i32.and"); + (name I32Or) => ("i32.or"); + (name I32Xor) => ("i32.xor"); + (name I32Shl) => ("i32.shl"); + (name I32ShrS) => ("i32.shr_s"); + (name I32ShrU) => ("i32.shr_u"); + (name I32Rotl) => ("i32.rotl"); + (name I32Rotr) => ("i32.rotr"); + (name I64Clz) => ("i64.clz"); + (name I64Ctz) => ("i64.ctz"); + (name I64Popcnt) => ("i64.popcnt"); + (name I64Add) => ("i64.add"); + (name I64Sub) => ("i64.sub"); + (name I64Mul) => ("i64.mul"); + (name I64DivS) => ("i64.div_s"); + (name I64DivU) => ("i64.div_u"); + (name I64RemS) => ("i64.rem_s"); + (name I64RemU) => ("i64.rem_u"); + (name I64And) => ("i64.and"); + (name I64Or) => ("i64.or"); + (name I64Xor) => ("i64.xor"); + (name I64Shl) => ("i64.shl"); + (name I64ShrS) => ("i64.shr_s"); + (name I64ShrU) => ("i64.shr_u"); + (name I64Rotl) => ("i64.rotl"); + (name I64Rotr) => ("i64.rotr"); + (name F32Abs) => ("f32.abs"); + (name F32Neg) => ("f32.neg"); + (name F32Ceil) => ("f32.ceil"); + (name F32Floor) => ("f32.floor"); + (name F32Trunc) => ("f32.trunc"); + (name F32Nearest) => ("f32.nearest"); + (name F32Sqrt) => ("f32.sqrt"); + (name F32Add) => ("f32.add"); + (name F32Sub) => ("f32.sub"); + (name F32Mul) => ("f32.mul"); + (name F32Div) => ("f32.div"); + (name F32Min) => ("f32.min"); + (name F32Max) => ("f32.max"); + (name F32Copysign) => ("f32.copysign"); + (name F64Abs) => ("f64.abs"); + (name F64Neg) => ("f64.neg"); + (name F64Ceil) => ("f64.ceil"); + (name F64Floor) => ("f64.floor"); + (name F64Trunc) => ("f64.trunc"); + (name F64Nearest) => ("f64.nearest"); + (name F64Sqrt) => ("f64.sqrt"); + (name F64Add) => ("f64.add"); + (name F64Sub) => ("f64.sub"); + (name F64Mul) => ("f64.mul"); + (name F64Div) => ("f64.div"); + (name F64Min) => ("f64.min"); + (name F64Max) => ("f64.max"); + (name F64Copysign) => ("f64.copysign"); + (name I32Eqz) => ("i32.eqz"); + (name I32Eq) => ("i32.eq"); + (name I32Ne) => ("i32.ne"); + (name I32LtS) => ("i32.lt_s"); + (name I32LtU) => ("i32.lt_u"); + (name I32GtS) => ("i32.gt_s"); + (name I32GtU) => ("i32.gt_u"); + (name I32LeS) => ("i32.le_s"); + (name I32LeU) => ("i32.le_u"); + (name I32GeS) => ("i32.ge_s"); + (name I32GeU) => ("i32.ge_u"); + (name I64Eqz) => ("i64.eqz"); + (name I64Eq) => ("i64.eq"); + (name I64Ne) => ("i64.ne"); + (name I64LtS) => ("i64.lt_s"); + (name I64LtU) => ("i64.lt_u"); + (name I64GtS) => ("i64.gt_s"); + (name I64GtU) => ("i64.gt_u"); + (name I64LeS) => ("i64.le_s"); + (name I64LeU) => ("i64.le_u"); + (name I64GeS) => ("i64.ge_s"); + (name I64GeU) => ("i64.ge_u"); + (name F32Eq) => ("f32.eq"); + (name F32Ne) => ("f32.ne"); + (name F32Lt) => ("f32.lt"); + (name F32Gt) => ("f32.gt"); + (name F32Le) => ("f32.le"); + (name F32Ge) => ("f32.ge"); + (name F64Eq) => ("f64.eq"); + (name F64Ne) => ("f64.ne"); + (name F64Lt) => ("f64.lt"); + (name F64Gt) => ("f64.gt"); + (name F64Le) => ("f64.le"); + (name F64Ge) => ("f64.ge"); + (name I32WrapI64) => ("i32.wrap_i64"); + (name I32TruncF32S) => ("i32.trunc_f32_s"); + (name I32TruncF32U) => ("i32.trunc_f32_u"); + (name I32TruncF64S) => ("i32.trunc_f64_s"); + (name I32TruncF64U) => ("i32.trunc_f64_u"); + (name I64ExtendI32S) => ("i64.extend_i32_s"); + (name I64ExtendI32U) => ("i64.extend_i32_u"); + (name I64TruncF32S) => ("i64.trunc_f32_s"); + (name I64TruncF32U) => ("i64.trunc_f32_u"); + (name I64TruncF64S) => ("i64.trunc_f64_s"); + (name I64TruncF64U) => ("i64.trunc_f64_u"); + (name F32ConvertI32S) => ("f32.convert_i32_s"); + (name F32ConvertI32U) => ("f32.convert_i32_u"); + (name F32ConvertI64S) => ("f32.convert_i64_s"); + (name F32ConvertI64U) => ("f32.convert_i64_u"); + (name F32DemoteF64) => ("f32.demote_f64"); + (name F64ConvertI32S) => ("f64.convert_i32_s"); + (name F64ConvertI32U) => ("f64.convert_i32_u"); + (name F64ConvertI64S) => ("f64.convert_i64_s"); + (name F64ConvertI64U) => ("f64.convert_i64_u"); + (name F64PromoteF32) => ("f64.promote_f32"); + (name I32ReinterpretF32) => ("i32.reinterpret_f32"); + (name I64ReinterpretF64) => ("i64.reinterpret_f64"); + (name F32ReinterpretI32) => ("f32.reinterpret_i32"); + (name F64ReinterpretI64) => ("f64.reinterpret_i64"); + (name I32TruncSatF32S) => ("i32.trunc_sat_f32_s"); + (name I32TruncSatF32U) => ("i32.trunc_sat_f32_u"); + (name I32TruncSatF64S) => ("i32.trunc_sat_f64_s"); + (name I32TruncSatF64U) => ("i32.trunc_sat_f64_u"); + (name I64TruncSatF32S) => ("i64.trunc_sat_f32_s"); + (name I64TruncSatF32U) => ("i64.trunc_sat_f32_u"); + (name I64TruncSatF64S) => ("i64.trunc_sat_f64_s"); + (name I64TruncSatF64U) => ("i64.trunc_sat_f64_u"); + (name I32Extend8S) => ("i32.extend8_s"); + (name I32Extend16S) => ("i32.extend16_s"); + (name I64Extend8S) => ("i64.extend8_s"); + (name I64Extend16S) => ("i64.extend16_s"); + (name I64Extend32S) => ("i64.extend32_s"); + (name MemoryAtomicNotify) => ("memory.atomic.notify"); + (name MemoryAtomicWait32) => ("memory.atomic.wait32"); + (name MemoryAtomicWait64) => ("memory.atomic.wait64"); + (name AtomicFence) => ("atomic.fence"); + (name I32AtomicLoad) => ("i32.atomic.load"); + (name I64AtomicLoad) => ("i64.atomic.load"); + (name I32AtomicLoad8U) => ("i32.atomic.load8_u"); + (name I32AtomicLoad16U) => ("i32.atomic.load16_u"); + (name I64AtomicLoad8U) => ("i64.atomic.load8_u"); + (name I64AtomicLoad16U) => ("i64.atomic.load16_u"); + (name I64AtomicLoad32U) => ("i64.atomic.load32_u"); + (name I32AtomicStore) => ("i32.atomic.store"); + (name I64AtomicStore) => ("i64.atomic.store"); + (name I32AtomicStore8) => ("i32.atomic.store8"); + (name I32AtomicStore16) => ("i32.atomic.store16"); + (name I64AtomicStore8) => ("i64.atomic.store8"); + (name I64AtomicStore16) => ("i64.atomic.store16"); + (name I64AtomicStore32) => ("i64.atomic.store32"); + (name I32AtomicRmwAdd) => ("i32.atomic.rmw.add"); + (name I64AtomicRmwAdd) => ("i64.atomic.rmw.add"); + (name I32AtomicRmw8AddU) => ("i32.atomic.rmw8.add_u"); + (name I32AtomicRmw16AddU) => ("i32.atomic.rmw16.add_u"); + (name I64AtomicRmw8AddU) => ("i64.atomic.rmw8.add_u"); + (name I64AtomicRmw16AddU) => ("i64.atomic.rmw16.add_u"); + (name I64AtomicRmw32AddU) => ("i64.atomic.rmw32.add_u"); + (name I32AtomicRmwSub) => ("i32.atomic.rmw.sub"); + (name I64AtomicRmwSub) => ("i64.atomic.rmw.sub"); + (name I32AtomicRmw8SubU) => ("i32.atomic.rmw8.sub_u"); + (name I32AtomicRmw16SubU) => ("i32.atomic.rmw16.sub_u"); + (name I64AtomicRmw8SubU) => ("i64.atomic.rmw8.sub_u"); + (name I64AtomicRmw16SubU) => ("i64.atomic.rmw16.sub_u"); + (name I64AtomicRmw32SubU) => ("i64.atomic.rmw32.sub_u"); + (name I32AtomicRmwAnd) => ("i32.atomic.rmw.and"); + (name I64AtomicRmwAnd) => ("i64.atomic.rmw.and"); + (name I32AtomicRmw8AndU) => ("i32.atomic.rmw8.and_u"); + (name I32AtomicRmw16AndU) => ("i32.atomic.rmw16.and_u"); + (name I64AtomicRmw8AndU) => ("i64.atomic.rmw8.and_u"); + (name I64AtomicRmw16AndU) => ("i64.atomic.rmw16.and_u"); + (name I64AtomicRmw32AndU) => ("i64.atomic.rmw32.and_u"); + (name I32AtomicRmwOr) => ("i32.atomic.rmw.or"); + (name I64AtomicRmwOr) => ("i64.atomic.rmw.or"); + (name I32AtomicRmw8OrU) => ("i32.atomic.rmw8.or_u"); + (name I32AtomicRmw16OrU) => ("i32.atomic.rmw16.or_u"); + (name I64AtomicRmw8OrU) => ("i64.atomic.rmw8.or_u"); + (name I64AtomicRmw16OrU) => ("i64.atomic.rmw16.or_u"); + (name I64AtomicRmw32OrU) => ("i64.atomic.rmw32.or_u"); + (name I32AtomicRmwXor) => ("i32.atomic.rmw.xor"); + (name I64AtomicRmwXor) => ("i64.atomic.rmw.xor"); + (name I32AtomicRmw8XorU) => ("i32.atomic.rmw8.xor_u"); + (name I32AtomicRmw16XorU) => ("i32.atomic.rmw16.xor_u"); + (name I64AtomicRmw8XorU) => ("i64.atomic.rmw8.xor_u"); + (name I64AtomicRmw16XorU) => ("i64.atomic.rmw16.xor_u"); + (name I64AtomicRmw32XorU) => ("i64.atomic.rmw32.xor_u"); + (name I32AtomicRmwXchg) => ("i32.atomic.rmw.xchg"); + (name I64AtomicRmwXchg) => ("i64.atomic.rmw.xchg"); + (name I32AtomicRmw8XchgU) => ("i32.atomic.rmw8.xchg_u"); + (name I32AtomicRmw16XchgU) => ("i32.atomic.rmw16.xchg_u"); + (name I64AtomicRmw8XchgU) => ("i64.atomic.rmw8.xchg_u"); + (name I64AtomicRmw16XchgU) => ("i64.atomic.rmw16.xchg_u"); + (name I64AtomicRmw32XchgU) => ("i64.atomic.rmw32.xchg_u"); + (name I32AtomicRmwCmpxchg) => ("i32.atomic.rmw.cmpxchg"); + (name I64AtomicRmwCmpxchg) => ("i64.atomic.rmw.cmpxchg"); + (name I32AtomicRmw8CmpxchgU) => ("i32.atomic.rmw8.cmpxchg_u"); + (name I32AtomicRmw16CmpxchgU) => ("i32.atomic.rmw16.cmpxchg_u"); + (name I64AtomicRmw8CmpxchgU) => ("i64.atomic.rmw8.cmpxchg_u"); + (name I64AtomicRmw16CmpxchgU) => ("i64.atomic.rmw16.cmpxchg_u"); + (name I64AtomicRmw32CmpxchgU) => ("i64.atomic.rmw32.cmpxchg_u"); + (name V128Load) => ("v128.load"); + (name V128Load8x8S) => ("v128.load8x8_s"); + (name V128Load8x8U) => ("v128.load8x8_u"); + (name V128Load16x4S) => ("v128.load16x4_s"); + (name V128Load16x4U) => ("v128.load16x4_u"); + (name V128Load32x2S) => ("v128.load32x2_s"); + (name V128Load32x2U) => ("v128.load32x2_u"); + (name V128Load8Splat) => ("v128.load8_splat"); + (name V128Load16Splat) => ("v128.load16_splat"); + (name V128Load32Splat) => ("v128.load32_splat"); + (name V128Load64Splat) => ("v128.load64_splat"); + (name V128Load32Zero) => ("v128.load32_zero"); + (name V128Load64Zero) => ("v128.load64_zero"); + (name V128Store) => ("v128.store"); + (name V128Load8Lane) => ("v128.load8_lane"); + (name V128Load16Lane) => ("v128.load16_lane"); + (name V128Load32Lane) => ("v128.load32_lane"); + (name V128Load64Lane) => ("v128.load64_lane"); + (name V128Store8Lane) => ("v128.store8_lane"); + (name V128Store16Lane) => ("v128.store16_lane"); + (name V128Store32Lane) => ("v128.store32_lane"); + (name V128Store64Lane) => ("v128.store64_lane"); + (name V128Const) => ("v128.const"); + (name I8x16Shuffle) => ("i8x16.shuffle"); + (name I8x16ExtractLaneS) => ("i8x16.extract_lane_s"); + (name I8x16ExtractLaneU) => ("i8x16.extract_lane_u"); + (name I8x16ReplaceLane) => ("i8x16.replace_lane"); + (name I16x8ExtractLaneS) => ("i16x8.extract_lane_s"); + (name I16x8ExtractLaneU) => ("i16x8.extract_lane_u"); + (name I16x8ReplaceLane) => ("i16x8.replace_lane"); + (name I32x4ExtractLane) => ("i32x4.extract_lane"); + (name I32x4ReplaceLane) => ("i32x4.replace_lane"); + (name I64x2ExtractLane) => ("i64x2.extract_lane"); + (name I64x2ReplaceLane) => ("i64x2.replace_lane"); + (name F32x4ExtractLane) => ("f32x4.extract_lane"); + (name F32x4ReplaceLane) => ("f32x4.replace_lane"); + (name F64x2ExtractLane) => ("f64x2.extract_lane"); + (name F64x2ReplaceLane) => ("f64x2.replace_lane"); + (name I8x16Swizzle) => ("i8x16.swizzle"); + (name I8x16Splat) => ("i8x16.splat"); + (name I16x8Splat) => ("i16x8.splat"); + (name I32x4Splat) => ("i32x4.splat"); + (name I64x2Splat) => ("i64x2.splat"); + (name F32x4Splat) => ("f32x4.splat"); + (name F64x2Splat) => ("f64x2.splat"); + (name I8x16Eq) => ("i8x16.eq"); + (name I8x16Ne) => ("i8x16.ne"); + (name I8x16LtS) => ("i8x16.lt_s"); + (name I8x16LtU) => ("i8x16.lt_u"); + (name I8x16GtS) => ("i8x16.gt_s"); + (name I8x16GtU) => ("i8x16.gt_u"); + (name I8x16LeS) => ("i8x16.le_s"); + (name I8x16LeU) => ("i8x16.le_u"); + (name I8x16GeS) => ("i8x16.ge_s"); + (name I8x16GeU) => ("i8x16.ge_u"); + (name I16x8Eq) => ("i16x8.eq"); + (name I16x8Ne) => ("i16x8.ne"); + (name I16x8LtS) => ("i16x8.lt_s"); + (name I16x8LtU) => ("i16x8.lt_u"); + (name I16x8GtS) => ("i16x8.gt_s"); + (name I16x8GtU) => ("i16x8.gt_u"); + (name I16x8LeS) => ("i16x8.le_s"); + (name I16x8LeU) => ("i16x8.le_u"); + (name I16x8GeS) => ("i16x8.ge_s"); + (name I16x8GeU) => ("i16x8.ge_u"); + (name I32x4Eq) => ("i32x4.eq"); + (name I32x4Ne) => ("i32x4.ne"); + (name I32x4LtS) => ("i32x4.lt_s"); + (name I32x4LtU) => ("i32x4.lt_u"); + (name I32x4GtS) => ("i32x4.gt_s"); + (name I32x4GtU) => ("i32x4.gt_u"); + (name I32x4LeS) => ("i32x4.le_s"); + (name I32x4LeU) => ("i32x4.le_u"); + (name I32x4GeS) => ("i32x4.ge_s"); + (name I32x4GeU) => ("i32x4.ge_u"); + (name I64x2Eq) => ("i64x2.eq"); + (name I64x2Ne) => ("i64x2.ne"); + (name I64x2LtS) => ("i64x2.lt_s"); + (name I64x2GtS) => ("i64x2.gt_s"); + (name I64x2LeS) => ("i64x2.le_s"); + (name I64x2GeS) => ("i64x2.ge_s"); + (name F32x4Eq) => ("f32x4.eq"); + (name F32x4Ne) => ("f32x4.ne"); + (name F32x4Lt) => ("f32x4.lt"); + (name F32x4Gt) => ("f32x4.gt"); + (name F32x4Le) => ("f32x4.le"); + (name F32x4Ge) => ("f32x4.ge"); + (name F64x2Eq) => ("f64x2.eq"); + (name F64x2Ne) => ("f64x2.ne"); + (name F64x2Lt) => ("f64x2.lt"); + (name F64x2Gt) => ("f64x2.gt"); + (name F64x2Le) => ("f64x2.le"); + (name F64x2Ge) => ("f64x2.ge"); + (name V128Not) => ("v128.not"); + (name V128And) => ("v128.and"); + (name V128AndNot) => ("v128.andnot"); + (name V128Or) => ("v128.or"); + (name V128Xor) => ("v128.xor"); + (name V128Bitselect) => ("v128.bitselect"); + (name V128AnyTrue) => ("v128.any_true"); + (name I8x16Abs) => ("i8x16.abs"); + (name I8x16Neg) => ("i8x16.neg"); + (name I8x16Popcnt) => ("i8x16.popcnt"); + (name I8x16AllTrue) => ("i8x16.all_true"); + (name I8x16Bitmask) => ("i8x16.bitmask"); + (name I8x16NarrowI16x8S) => ("i8x16.narrow_i16x8_s"); + (name I8x16NarrowI16x8U) => ("i8x16.narrow_i16x8_u"); + (name I8x16Shl) => ("i8x16.shl"); + (name I8x16ShrS) => ("i8x16.shr_s"); + (name I8x16ShrU) => ("i8x16.shr_u"); + (name I8x16Add) => ("i8x16.add"); + (name I8x16AddSatS) => ("i8x16.add_sat_s"); + (name I8x16AddSatU) => ("i8x16.add_sat_u"); + (name I8x16Sub) => ("i8x16.sub"); + (name I8x16SubSatS) => ("i8x16.sub_sat_s"); + (name I8x16SubSatU) => ("i8x16.sub_sat_u"); + (name I8x16MinS) => ("i8x16.min_s"); + (name I8x16MinU) => ("i8x16.min_u"); + (name I8x16MaxS) => ("i8x16.max_s"); + (name I8x16MaxU) => ("i8x16.max_u"); + (name I8x16AvgrU) => ("i8x16.avgr_u"); + (name I16x8ExtAddPairwiseI8x16S) => ("i16x8.extadd_pairwise_i8x16_s"); + (name I16x8ExtAddPairwiseI8x16U) => ("i16x8.extadd_pairwise_i8x16_u"); + (name I16x8Abs) => ("i16x8.abs"); + (name I16x8Neg) => ("i16x8.neg"); + (name I16x8Q15MulrSatS) => ("i16x8.q15mulr_sat_s"); + (name I16x8AllTrue) => ("i16x8.all_true"); + (name I16x8Bitmask) => ("i16x8.bitmask"); + (name I16x8NarrowI32x4S) => ("i16x8.narrow_i32x4_s"); + (name I16x8NarrowI32x4U) => ("i16x8.narrow_i32x4_u"); + (name I16x8ExtendLowI8x16S) => ("i16x8.extend_low_i8x16_s"); + (name I16x8ExtendHighI8x16S) => ("i16x8.extend_high_i8x16_s"); + (name I16x8ExtendLowI8x16U) => ("i16x8.extend_low_i8x16_u"); + (name I16x8ExtendHighI8x16U) => ("i16x8.extend_high_i8x16_u"); + (name I16x8Shl) => ("i16x8.shl"); + (name I16x8ShrS) => ("i16x8.shr_s"); + (name I16x8ShrU) => ("i16x8.shr_u"); + (name I16x8Add) => ("i16x8.add"); + (name I16x8AddSatS) => ("i16x8.add_sat_s"); + (name I16x8AddSatU) => ("i16x8.add_sat_u"); + (name I16x8Sub) => ("i16x8.sub"); + (name I16x8SubSatS) => ("i16x8.sub_sat_s"); + (name I16x8SubSatU) => ("i16x8.sub_sat_u"); + (name I16x8Mul) => ("i16x8.mul"); + (name I16x8MinS) => ("i16x8.min_s"); + (name I16x8MinU) => ("i16x8.min_u"); + (name I16x8MaxS) => ("i16x8.max_s"); + (name I16x8MaxU) => ("i16x8.max_u"); + (name I16x8AvgrU) => ("i16x8.avgr_u"); + (name I16x8ExtMulLowI8x16S) => ("i16x8.extmul_low_i8x16_s"); + (name I16x8ExtMulHighI8x16S) => ("i16x8.extmul_high_i8x16_s"); + (name I16x8ExtMulLowI8x16U) => ("i16x8.extmul_low_i8x16_u"); + (name I16x8ExtMulHighI8x16U) => ("i16x8.extmul_high_i8x16_u"); + (name I32x4ExtAddPairwiseI16x8S) => ("i32x4.extadd_pairwise_i16x8_s"); + (name I32x4ExtAddPairwiseI16x8U) => ("i32x4.extadd_pairwise_i16x8_u"); + (name I32x4Abs) => ("i32x4.abs"); + (name I32x4Neg) => ("i32x4.neg"); + (name I32x4AllTrue) => ("i32x4.all_true"); + (name I32x4Bitmask) => ("i32x4.bitmask"); + (name I32x4ExtendLowI16x8S) => ("i32x4.extend_low_i16x8_s"); + (name I32x4ExtendHighI16x8S) => ("i32x4.extend_high_i16x8_s"); + (name I32x4ExtendLowI16x8U) => ("i32x4.extend_low_i16x8_u"); + (name I32x4ExtendHighI16x8U) => ("i32x4.extend_high_i16x8_u"); + (name I32x4Shl) => ("i32x4.shl"); + (name I32x4ShrS) => ("i32x4.shr_s"); + (name I32x4ShrU) => ("i32x4.shr_u"); + (name I32x4Add) => ("i32x4.add"); + (name I32x4Sub) => ("i32x4.sub"); + (name I32x4Mul) => ("i32x4.mul"); + (name I32x4MinS) => ("i32x4.min_s"); + (name I32x4MinU) => ("i32x4.min_u"); + (name I32x4MaxS) => ("i32x4.max_s"); + (name I32x4MaxU) => ("i32x4.max_u"); + (name I32x4DotI16x8S) => ("i32x4.dot_i16x8_s"); + (name I32x4ExtMulLowI16x8S) => ("i32x4.extmul_low_i16x8_s"); + (name I32x4ExtMulHighI16x8S) => ("i32x4.extmul_high_i16x8_s"); + (name I32x4ExtMulLowI16x8U) => ("i32x4.extmul_low_i16x8_u"); + (name I32x4ExtMulHighI16x8U) => ("i32x4.extmul_high_i16x8_u"); + (name I64x2Abs) => ("i64x2.abs"); + (name I64x2Neg) => ("i64x2.neg"); + (name I64x2AllTrue) => ("i64x2.all_true"); + (name I64x2Bitmask) => ("i64x2.bitmask"); + (name I64x2ExtendLowI32x4S) => ("i64x2.extend_low_i32x4_s"); + (name I64x2ExtendHighI32x4S) => ("i64x2.extend_high_i32x4_s"); + (name I64x2ExtendLowI32x4U) => ("i64x2.extend_low_i32x4_u"); + (name I64x2ExtendHighI32x4U) => ("i64x2.extend_high_i32x4_u"); + (name I64x2Shl) => ("i64x2.shl"); + (name I64x2ShrS) => ("i64x2.shr_s"); + (name I64x2ShrU) => ("i64x2.shr_u"); + (name I64x2Add) => ("i64x2.add"); + (name I64x2Sub) => ("i64x2.sub"); + (name I64x2Mul) => ("i64x2.mul"); + (name I64x2ExtMulLowI32x4S) => ("i64x2.extmul_low_i32x4_s"); + (name I64x2ExtMulHighI32x4S) => ("i64x2.extmul_high_i32x4_s"); + (name I64x2ExtMulLowI32x4U) => ("i64x2.extmul_low_i32x4_u"); + (name I64x2ExtMulHighI32x4U) => ("i64x2.extmul_high_i32x4_u"); + (name F32x4Ceil) => ("f32x4.ceil"); + (name F32x4Floor) => ("f32x4.floor"); + (name F32x4Trunc) => ("f32x4.trunc"); + (name F32x4Nearest) => ("f32x4.nearest"); + (name F32x4Abs) => ("f32x4.abs"); + (name F32x4Neg) => ("f32x4.neg"); + (name F32x4Sqrt) => ("f32x4.sqrt"); + (name F32x4Add) => ("f32x4.add"); + (name F32x4Sub) => ("f32x4.sub"); + (name F32x4Mul) => ("f32x4.mul"); + (name F32x4Div) => ("f32x4.div"); + (name F32x4Min) => ("f32x4.min"); + (name F32x4Max) => ("f32x4.max"); + (name F32x4PMin) => ("f32x4.pmin"); + (name F32x4PMax) => ("f32x4.pmax"); + (name F64x2Ceil) => ("f64x2.ceil"); + (name F64x2Floor) => ("f64x2.floor"); + (name F64x2Trunc) => ("f64x2.trunc"); + (name F64x2Nearest) => ("f64x2.nearest"); + (name F64x2Abs) => ("f64x2.abs"); + (name F64x2Neg) => ("f64x2.neg"); + (name F64x2Sqrt) => ("f64x2.sqrt"); + (name F64x2Add) => ("f64x2.add"); + (name F64x2Sub) => ("f64x2.sub"); + (name F64x2Mul) => ("f64x2.mul"); + (name F64x2Div) => ("f64x2.div"); + (name F64x2Min) => ("f64x2.min"); + (name F64x2Max) => ("f64x2.max"); + (name F64x2PMin) => ("f64x2.pmin"); + (name F64x2PMax) => ("f64x2.pmax"); + (name I32x4TruncSatF32x4S) => ("i32x4.trunc_sat_f32x4_s"); + (name I32x4TruncSatF32x4U) => ("i32x4.trunc_sat_f32x4_u"); + (name F32x4ConvertI32x4S) => ("f32x4.convert_i32x4_s"); + (name F32x4ConvertI32x4U) => ("f32x4.convert_i32x4_u"); + (name I32x4TruncSatF64x2SZero) => ("i32x4.trunc_sat_f64x2_s_zero"); + (name I32x4TruncSatF64x2UZero) => ("i32x4.trunc_sat_f64x2_u_zero"); + (name F64x2ConvertLowI32x4S) => ("f64x2.convert_low_i32x4_s"); + (name F64x2ConvertLowI32x4U) => ("f64x2.convert_low_i32x4_u"); + (name F32x4DemoteF64x2Zero) => ("f32x4.demote_f64x2_zero"); + (name F64x2PromoteLowF32x4) => ("f64x2.promote_low_f32x4"); + (name Try) => ("try"); + (name Catch) => ("catch"); + (name Throw) => ("throw"); + (name Rethrow) => ("rethrow"); + (name Delegate) => ("delegate"); + (name CatchAll) => ("catch_all"); + (name I8x16RelaxedSwizzle) => ("i8x16.relaxed_swizzle"); + (name I32x4RelaxedTruncSatF32x4S) => ("i32x4.relaxed_trunc_sat_f32x4_s"); + (name I32x4RelaxedTruncSatF32x4U) => ("i32x4.relaxed_trunc_sat_f32x4_u"); + (name I32x4RelaxedTruncSatF64x2SZero) => ("i32x4.relaxed_trunc_sat_f64x2_s_zero"); + (name I32x4RelaxedTruncSatF64x2UZero) => ("i32x4.relaxed_trunc_sat_f64x2_u_zero"); + (name F32x4RelaxedFma) => ("f32x4.relaxed_fma"); + (name F32x4RelaxedFnma) => ("f32x4.relaxed_fnma"); + (name F64x2RelaxedFma) => ("f64x2.relaxed_fma"); + (name F64x2RelaxedFnma) => ("f64x2.relaxed_fnma"); + (name I8x16RelaxedLaneselect) => ("i8x16.relaxed_laneselect"); + (name I16x8RelaxedLaneselect) => ("i16x8.relaxed_laneselect"); + (name I32x4RelaxedLaneselect) => ("i32x4.relaxed_laneselect"); + (name I64x2RelaxedLaneselect) => ("i64x2.relaxed_laneselect"); + (name F32x4RelaxedMin) => ("f32x4.relaxed_min"); + (name F32x4RelaxedMax) => ("f32x4.relaxed_max"); + (name F64x2RelaxedMin) => ("f64x2.relaxed_min"); + (name F64x2RelaxedMax) => ("f64x2.relaxed_max"); + (name I16x8RelaxedQ15mulrS) => ("i16x8.relaxed_q15mulr_s"); + (name I16x8DotI8x16I7x16S) => ("i16x8.dot_i8x16_i7x16_s"); + (name I32x4DotI8x16I7x16AddS) => ("i32x4.dot_i8x16_i7x16_add_s"); + (name F32x4RelaxedDotBf16x8AddF32x4) => ("f32x4.relaxed_dot_bf16x8_add_f32x4"); +} - fn visit_f32_convert_i32_s(&mut self, _pos: usize) -> Self::Output { - self.instr("f32.convert_i32_s") - } - fn visit_f32_convert_i32_u(&mut self, _pos: usize) -> Self::Output { - self.instr("f32.convert_i32_u") - } - fn visit_f32_convert_i64_s(&mut self, _pos: usize) -> Self::Output { - self.instr("f32.convert_i64_s") - } - fn visit_f32_convert_i64_u(&mut self, _pos: usize) -> Self::Output { - self.instr("f32.convert_i64_u") - } - fn visit_f32_demote_f64(&mut self, _pos: usize) -> Self::Output { - self.instr("f32.demote_f64") - } - fn visit_f64_convert_i32_s(&mut self, _pos: usize) -> Self::Output { - self.instr("f64.convert_i32_s") - } - fn visit_f64_convert_i32_u(&mut self, _pos: usize) -> Self::Output { - self.instr("f64.convert_i32_u") - } - fn visit_f64_convert_i64_s(&mut self, _pos: usize) -> Self::Output { - self.instr("f64.convert_i64_s") - } - fn visit_f64_convert_i64_u(&mut self, _pos: usize) -> Self::Output { - self.instr("f64.convert_i64_u") - } - fn visit_f64_promote_f32(&mut self, _pos: usize) -> Self::Output { - self.instr("f64.promote_f32") - } - fn visit_i32_reinterpret_f32(&mut self, _pos: usize) -> Self::Output { - self.instr("i32.reinterpret_f32") - } - fn visit_i64_reinterpret_f64(&mut self, _pos: usize) -> Self::Output { - self.instr("i64.reinterpret_f64") - } - fn visit_f32_reinterpret_i32(&mut self, _pos: usize) -> Self::Output { - self.instr("f32.reinterpret_i32") - } - fn visit_f64_reinterpret_i64(&mut self, _pos: usize) -> Self::Output { - self.instr("f64.reinterpret_i64") - } - fn visit_i32_extend8_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i32.extend8_s") - } - fn visit_i32_extend16_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i32.extend16_s") - } - fn visit_i64_extend8_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i64.extend8_s") - } - fn visit_i64_extend16_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i64.extend16_s") - } - fn visit_i64_extend32_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i64.extend32_s") - } +impl<'a> VisitOperator<'a> for PrintOperator<'_, '_> { + type Output = Result; - fn visit_i32_trunc_sat_f32_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i32.trunc_sat_f32_s") - } - fn visit_i32_trunc_sat_f32_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i32.trunc_sat_f32_u") - } - fn visit_i32_trunc_sat_f64_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i32.trunc_sat_f64_s") - } - fn visit_i32_trunc_sat_f64_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i32.trunc_sat_f64_u") - } - fn visit_i64_trunc_sat_f32_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i64.trunc_sat_f32_s") - } - fn visit_i64_trunc_sat_f32_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i64.trunc_sat_f32_u") - } - fn visit_i64_trunc_sat_f64_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i64.trunc_sat_f64_s") - } - fn visit_i64_trunc_sat_f64_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i64.trunc_sat_f64_u") - } - - fn visit_memory_atomic_notify(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("memory.atomic.notify", &memarg, 4) - } - fn visit_memory_atomic_wait32(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("memory.atomic.wait32", &memarg, 4) - } - fn visit_memory_atomic_wait64(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("memory.atomic.wait64", &memarg, 8) - } - - fn visit_i32_atomic_load(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i32.atomic.load", &memarg, 4) - } - fn visit_i64_atomic_load(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i64.atomic.load", &memarg, 8) - } - fn visit_i32_atomic_load8_u(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i32.atomic.load8_u", &memarg, 1) - } - fn visit_i32_atomic_load16_u(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i32.atomic.load16_u", &memarg, 2) - } - fn visit_i64_atomic_load8_u(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i64.atomic.load8_u", &memarg, 1) - } - fn visit_i64_atomic_load16_u(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i64.atomic.load16_u", &memarg, 2) - } - fn visit_i64_atomic_load32_u(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i64.atomic.load32_u", &memarg, 4) - } - fn visit_i32_atomic_store(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i32.atomic.store", &memarg, 4) - } - fn visit_i32_atomic_store8(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i32.atomic.store8", &memarg, 1) - } - fn visit_i32_atomic_store16(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i32.atomic.store16", &memarg, 2) - } - fn visit_i64_atomic_store(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i64.atomic.store", &memarg, 8) - } - fn visit_i64_atomic_store8(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i64.atomic.store8", &memarg, 1) - } - fn visit_i64_atomic_store16(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i64.atomic.store16", &memarg, 2) - } - fn visit_i64_atomic_store32(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i64.atomic.store32", &memarg, 4) - } - - fn visit_i32_atomic_rmw_add(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i32.atomic.rmw.add", &memarg, 4) - } - fn visit_i32_atomic_rmw8_add_u(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i32.atomic.rmw8.add_u", &memarg, 1) - } - fn visit_i32_atomic_rmw16_add_u(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i32.atomic.rmw16.add_u", &memarg, 2) - } - fn visit_i64_atomic_rmw_add(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i64.atomic.rmw.add", &memarg, 8) - } - fn visit_i64_atomic_rmw8_add_u(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i64.atomic.rmw8.add_u", &memarg, 1) - } - fn visit_i64_atomic_rmw16_add_u(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i64.atomic.rmw16.add_u", &memarg, 2) - } - fn visit_i64_atomic_rmw32_add_u(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i64.atomic.rmw32.add_u", &memarg, 4) - } - - fn visit_i32_atomic_rmw_sub(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i32.atomic.rmw.sub", &memarg, 4) - } - fn visit_i32_atomic_rmw8_sub_u(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i32.atomic.rmw8.sub_u", &memarg, 1) - } - fn visit_i32_atomic_rmw16_sub_u(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i32.atomic.rmw16.sub_u", &memarg, 2) - } - fn visit_i64_atomic_rmw_sub(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i64.atomic.rmw.sub", &memarg, 8) - } - fn visit_i64_atomic_rmw8_sub_u(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i64.atomic.rmw8.sub_u", &memarg, 1) - } - fn visit_i64_atomic_rmw16_sub_u(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i64.atomic.rmw16.sub_u", &memarg, 2) - } - fn visit_i64_atomic_rmw32_sub_u(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i64.atomic.rmw32.sub_u", &memarg, 4) - } - - fn visit_i32_atomic_rmw_and(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i32.atomic.rmw.and", &memarg, 4) - } - fn visit_i32_atomic_rmw8_and_u(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i32.atomic.rmw8.and_u", &memarg, 1) - } - fn visit_i32_atomic_rmw16_and_u(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i32.atomic.rmw16.and_u", &memarg, 2) - } - fn visit_i64_atomic_rmw_and(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i64.atomic.rmw.and", &memarg, 8) - } - fn visit_i64_atomic_rmw8_and_u(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i64.atomic.rmw8.and_u", &memarg, 1) - } - fn visit_i64_atomic_rmw16_and_u(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i64.atomic.rmw16.and_u", &memarg, 2) - } - fn visit_i64_atomic_rmw32_and_u(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i64.atomic.rmw32.and_u", &memarg, 4) - } - - fn visit_i32_atomic_rmw_or(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i32.atomic.rmw.or", &memarg, 4) - } - fn visit_i32_atomic_rmw8_or_u(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i32.atomic.rmw8.or_u", &memarg, 1) - } - fn visit_i32_atomic_rmw16_or_u(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i32.atomic.rmw16.or_u", &memarg, 2) - } - fn visit_i64_atomic_rmw_or(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i64.atomic.rmw.or", &memarg, 8) - } - fn visit_i64_atomic_rmw8_or_u(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i64.atomic.rmw8.or_u", &memarg, 1) - } - fn visit_i64_atomic_rmw16_or_u(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i64.atomic.rmw16.or_u", &memarg, 2) - } - fn visit_i64_atomic_rmw32_or_u(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i64.atomic.rmw32.or_u", &memarg, 4) - } - - fn visit_i32_atomic_rmw_xor(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i32.atomic.rmw.xor", &memarg, 4) - } - fn visit_i32_atomic_rmw8_xor_u(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i32.atomic.rmw8.xor_u", &memarg, 1) - } - fn visit_i32_atomic_rmw16_xor_u(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i32.atomic.rmw16.xor_u", &memarg, 2) - } - fn visit_i64_atomic_rmw_xor(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i64.atomic.rmw.xor", &memarg, 8) - } - fn visit_i64_atomic_rmw8_xor_u(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i64.atomic.rmw8.xor_u", &memarg, 1) - } - fn visit_i64_atomic_rmw16_xor_u(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i64.atomic.rmw16.xor_u", &memarg, 2) - } - fn visit_i64_atomic_rmw32_xor_u(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i64.atomic.rmw32.xor_u", &memarg, 4) - } - - fn visit_i32_atomic_rmw_xchg(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i32.atomic.rmw.xchg", &memarg, 4) - } - fn visit_i32_atomic_rmw8_xchg_u(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i32.atomic.rmw8.xchg_u", &memarg, 1) - } - fn visit_i32_atomic_rmw16_xchg_u(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i32.atomic.rmw16.xchg_u", &memarg, 2) - } - fn visit_i64_atomic_rmw_xchg(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i64.atomic.rmw.xchg", &memarg, 8) - } - fn visit_i64_atomic_rmw8_xchg_u(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i64.atomic.rmw8.xchg_u", &memarg, 1) - } - fn visit_i64_atomic_rmw16_xchg_u(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i64.atomic.rmw16.xchg_u", &memarg, 2) - } - fn visit_i64_atomic_rmw32_xchg_u(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i64.atomic.rmw32.xchg_u", &memarg, 4) - } - - fn visit_i32_atomic_rmw_cmpxchg(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i32.atomic.rmw.cmpxchg", &memarg, 4) - } - fn visit_i32_atomic_rmw8_cmpxchg_u(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i32.atomic.rmw8.cmpxchg_u", &memarg, 1) - } - fn visit_i32_atomic_rmw16_cmpxchg_u(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i32.atomic.rmw16.cmpxchg_u", &memarg, 2) - } - fn visit_i64_atomic_rmw_cmpxchg(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i64.atomic.rmw.cmpxchg", &memarg, 8) - } - fn visit_i64_atomic_rmw8_cmpxchg_u(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i64.atomic.rmw8.cmpxchg_u", &memarg, 1) - } - fn visit_i64_atomic_rmw16_cmpxchg_u(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i64.atomic.rmw16.cmpxchg_u", &memarg, 2) - } - fn visit_i64_atomic_rmw32_cmpxchg_u(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("i64.atomic.rmw32.cmpxchg_u", &memarg, 4) - } - - fn visit_atomic_fence(&mut self, _pos: usize) -> Self::Output { - self.instr("atomic.fence") - } - - fn visit_v128_load(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("v128.load", &memarg, 16) - } - fn visit_v128_store(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("v128.store", &memarg, 16) - } - fn visit_v128_const(&mut self, _pos: usize, value: V128) -> Self::Output { - write!(self.result(), "v128.const i32x4")?; - for chunk in value.bytes().chunks(4) { - write!( - self.result(), - " 0x{:02x}{:02x}{:02x}{:02x}", - chunk[3], - chunk[2], - chunk[1], - chunk[0], - )?; - } - Ok(OpKind::Normal) - } - fn visit_i8x16_splat(&mut self, _pos: usize) -> Self::Output { - self.instr("i8x16.splat") - } - fn visit_i16x8_splat(&mut self, _pos: usize) -> Self::Output { - self.instr("i16x8.splat") - } - fn visit_i32x4_splat(&mut self, _pos: usize) -> Self::Output { - self.instr("i32x4.splat") - } - fn visit_i64x2_splat(&mut self, _pos: usize) -> Self::Output { - self.instr("i64x2.splat") - } - fn visit_f32x4_splat(&mut self, _pos: usize) -> Self::Output { - self.instr("f32x4.splat") - } - fn visit_f64x2_splat(&mut self, _pos: usize) -> Self::Output { - self.instr("f64x2.splat") - } - fn visit_i8x16_extract_lane_s(&mut self, _pos: usize, lane: u8) -> Self::Output { - write!(self.result(), "i8x16.extract_lane_s {lane}")?; - Ok(OpKind::Normal) - } - fn visit_i8x16_extract_lane_u(&mut self, _pos: usize, lane: u8) -> Self::Output { - write!(self.result(), "i8x16.extract_lane_u {lane}")?; - Ok(OpKind::Normal) - } - fn visit_i16x8_extract_lane_s(&mut self, _pos: usize, lane: u8) -> Self::Output { - write!(self.result(), "i16x8.extract_lane_s {lane}")?; - Ok(OpKind::Normal) - } - fn visit_i16x8_extract_lane_u(&mut self, _pos: usize, lane: u8) -> Self::Output { - write!(self.result(), "i16x8.extract_lane_u {lane}")?; - Ok(OpKind::Normal) - } - fn visit_i32x4_extract_lane(&mut self, _pos: usize, lane: u8) -> Self::Output { - write!(self.result(), "i32x4.extract_lane {lane}")?; - Ok(OpKind::Normal) - } - fn visit_i64x2_extract_lane(&mut self, _pos: usize, lane: u8) -> Self::Output { - write!(self.result(), "i64x2.extract_lane {lane}")?; - Ok(OpKind::Normal) - } - fn visit_i8x16_replace_lane(&mut self, _pos: usize, lane: u8) -> Self::Output { - write!(self.result(), "i8x16.replace_lane {lane}")?; - Ok(OpKind::Normal) - } - fn visit_i16x8_replace_lane(&mut self, _pos: usize, lane: u8) -> Self::Output { - write!(self.result(), "i16x8.replace_lane {lane}")?; - Ok(OpKind::Normal) - } - fn visit_i32x4_replace_lane(&mut self, _pos: usize, lane: u8) -> Self::Output { - write!(self.result(), "i32x4.replace_lane {lane}")?; - Ok(OpKind::Normal) - } - fn visit_i64x2_replace_lane(&mut self, _pos: usize, lane: u8) -> Self::Output { - write!(self.result(), "i64x2.replace_lane {lane}")?; - Ok(OpKind::Normal) - } - fn visit_f32x4_extract_lane(&mut self, _pos: usize, lane: u8) -> Self::Output { - write!(self.result(), "f32x4.extract_lane {lane}")?; - Ok(OpKind::Normal) - } - fn visit_f32x4_replace_lane(&mut self, _pos: usize, lane: u8) -> Self::Output { - write!(self.result(), "f32x4.replace_lane {lane}")?; - Ok(OpKind::Normal) - } - fn visit_f64x2_extract_lane(&mut self, _pos: usize, lane: u8) -> Self::Output { - write!(self.result(), "f64x2.extract_lane {lane}")?; - Ok(OpKind::Normal) - } - fn visit_f64x2_replace_lane(&mut self, _pos: usize, lane: u8) -> Self::Output { - write!(self.result(), "f64x2.replace_lane {lane}")?; - Ok(OpKind::Normal) - } - - fn visit_f32x4_eq(&mut self, _pos: usize) -> Self::Output { - self.instr("f32x4.eq") - } - fn visit_f32x4_ne(&mut self, _pos: usize) -> Self::Output { - self.instr("f32x4.ne") - } - fn visit_f32x4_lt(&mut self, _pos: usize) -> Self::Output { - self.instr("f32x4.lt") - } - fn visit_f32x4_gt(&mut self, _pos: usize) -> Self::Output { - self.instr("f32x4.gt") - } - fn visit_f32x4_le(&mut self, _pos: usize) -> Self::Output { - self.instr("f32x4.le") - } - fn visit_f32x4_ge(&mut self, _pos: usize) -> Self::Output { - self.instr("f32x4.ge") - } - - fn visit_f64x2_eq(&mut self, _pos: usize) -> Self::Output { - self.instr("f64x2.eq") - } - fn visit_f64x2_ne(&mut self, _pos: usize) -> Self::Output { - self.instr("f64x2.ne") - } - fn visit_f64x2_lt(&mut self, _pos: usize) -> Self::Output { - self.instr("f64x2.lt") - } - fn visit_f64x2_gt(&mut self, _pos: usize) -> Self::Output { - self.instr("f64x2.gt") - } - fn visit_f64x2_le(&mut self, _pos: usize) -> Self::Output { - self.instr("f64x2.le") - } - fn visit_f64x2_ge(&mut self, _pos: usize) -> Self::Output { - self.instr("f64x2.ge") - } - - fn visit_f32x4_add(&mut self, _pos: usize) -> Self::Output { - self.instr("f32x4.add") - } - fn visit_f32x4_sub(&mut self, _pos: usize) -> Self::Output { - self.instr("f32x4.sub") - } - fn visit_f32x4_mul(&mut self, _pos: usize) -> Self::Output { - self.instr("f32x4.mul") - } - fn visit_f32x4_div(&mut self, _pos: usize) -> Self::Output { - self.instr("f32x4.div") - } - fn visit_f32x4_min(&mut self, _pos: usize) -> Self::Output { - self.instr("f32x4.min") - } - fn visit_f32x4_max(&mut self, _pos: usize) -> Self::Output { - self.instr("f32x4.max") - } - fn visit_f32x4_pmin(&mut self, _pos: usize) -> Self::Output { - self.instr("f32x4.pmin") - } - fn visit_f32x4_pmax(&mut self, _pos: usize) -> Self::Output { - self.instr("f32x4.pmax") - } - - fn visit_f64x2_add(&mut self, _pos: usize) -> Self::Output { - self.instr("f64x2.add") - } - fn visit_f64x2_sub(&mut self, _pos: usize) -> Self::Output { - self.instr("f64x2.sub") - } - fn visit_f64x2_mul(&mut self, _pos: usize) -> Self::Output { - self.instr("f64x2.mul") - } - fn visit_f64x2_div(&mut self, _pos: usize) -> Self::Output { - self.instr("f64x2.div") - } - fn visit_f64x2_min(&mut self, _pos: usize) -> Self::Output { - self.instr("f64x2.min") - } - fn visit_f64x2_max(&mut self, _pos: usize) -> Self::Output { - self.instr("f64x2.max") - } - fn visit_f64x2_pmin(&mut self, _pos: usize) -> Self::Output { - self.instr("f64x2.pmin") - } - fn visit_f64x2_pmax(&mut self, _pos: usize) -> Self::Output { - self.instr("f64x2.pmax") - } - - fn visit_f32x4_relaxed_min(&mut self, _pos: usize) -> Self::Output { - self.instr("f32x4.relaxed_min") - } - fn visit_f32x4_relaxed_max(&mut self, _pos: usize) -> Self::Output { - self.instr("f32x4.relaxed_max") - } - fn visit_f64x2_relaxed_min(&mut self, _pos: usize) -> Self::Output { - self.instr("f64x2.relaxed_min") - } - fn visit_f64x2_relaxed_max(&mut self, _pos: usize) -> Self::Output { - self.instr("f64x2.relaxed_max") - } - fn visit_i16x8_relaxed_q15mulr_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i16x8.relaxed_q15mulr_s") - } - fn visit_i16x8_dot_i8x16_i7x16_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i16x8.dot_i8x16_i7x16_s") - } - - fn visit_i8x16_eq(&mut self, _pos: usize) -> Self::Output { - self.instr("i8x16.eq") - } - fn visit_i8x16_ne(&mut self, _pos: usize) -> Self::Output { - self.instr("i8x16.ne") - } - fn visit_i8x16_lt_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i8x16.lt_s") - } - fn visit_i8x16_lt_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i8x16.lt_u") - } - fn visit_i8x16_gt_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i8x16.gt_s") - } - fn visit_i8x16_gt_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i8x16.gt_u") - } - fn visit_i8x16_le_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i8x16.le_s") - } - fn visit_i8x16_le_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i8x16.le_u") - } - fn visit_i8x16_ge_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i8x16.ge_s") - } - fn visit_i8x16_ge_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i8x16.ge_u") - } - - fn visit_i16x8_eq(&mut self, _pos: usize) -> Self::Output { - self.instr("i16x8.eq") - } - fn visit_i16x8_ne(&mut self, _pos: usize) -> Self::Output { - self.instr("i16x8.ne") - } - fn visit_i16x8_lt_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i16x8.lt_s") - } - fn visit_i16x8_lt_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i16x8.lt_u") - } - fn visit_i16x8_gt_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i16x8.gt_s") - } - fn visit_i16x8_gt_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i16x8.gt_u") - } - fn visit_i16x8_le_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i16x8.le_s") - } - fn visit_i16x8_le_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i16x8.le_u") - } - fn visit_i16x8_ge_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i16x8.ge_s") - } - fn visit_i16x8_ge_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i16x8.ge_u") - } - - fn visit_i32x4_eq(&mut self, _pos: usize) -> Self::Output { - self.instr("i32x4.eq") - } - fn visit_i32x4_ne(&mut self, _pos: usize) -> Self::Output { - self.instr("i32x4.ne") - } - fn visit_i32x4_lt_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i32x4.lt_s") - } - fn visit_i32x4_lt_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i32x4.lt_u") - } - fn visit_i32x4_gt_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i32x4.gt_s") - } - fn visit_i32x4_gt_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i32x4.gt_u") - } - fn visit_i32x4_le_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i32x4.le_s") - } - fn visit_i32x4_le_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i32x4.le_u") - } - fn visit_i32x4_ge_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i32x4.ge_s") - } - fn visit_i32x4_ge_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i32x4.ge_u") - } - - fn visit_i64x2_eq(&mut self, _pos: usize) -> Self::Output { - self.instr("i64x2.eq") - } - fn visit_i64x2_ne(&mut self, _pos: usize) -> Self::Output { - self.instr("i64x2.ne") - } - fn visit_i64x2_lt_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i64x2.lt_s") - } - fn visit_i64x2_gt_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i64x2.gt_s") - } - fn visit_i64x2_le_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i64x2.le_s") - } - fn visit_i64x2_ge_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i64x2.ge_s") - } - - fn visit_v128_and(&mut self, _pos: usize) -> Self::Output { - self.instr("v128.and") - } - fn visit_v128_andnot(&mut self, _pos: usize) -> Self::Output { - self.instr("v128.andnot") - } - fn visit_v128_or(&mut self, _pos: usize) -> Self::Output { - self.instr("v128.or") - } - fn visit_v128_xor(&mut self, _pos: usize) -> Self::Output { - self.instr("v128.xor") - } - - fn visit_i8x16_add(&mut self, _pos: usize) -> Self::Output { - self.instr("i8x16.add") - } - fn visit_i8x16_add_sat_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i8x16.add_sat_s") - } - fn visit_i8x16_add_sat_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i8x16.add_sat_u") - } - fn visit_i8x16_sub(&mut self, _pos: usize) -> Self::Output { - self.instr("i8x16.sub") - } - fn visit_i8x16_sub_sat_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i8x16.sub_sat_s") - } - fn visit_i8x16_sub_sat_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i8x16.sub_sat_u") - } - fn visit_i8x16_min_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i8x16.min_s") - } - fn visit_i8x16_min_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i8x16.min_u") - } - fn visit_i8x16_max_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i8x16.max_s") - } - fn visit_i8x16_max_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i8x16.max_u") - } - - fn visit_i16x8_add(&mut self, _pos: usize) -> Self::Output { - self.instr("i16x8.add") - } - fn visit_i16x8_add_sat_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i16x8.add_sat_s") - } - fn visit_i16x8_add_sat_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i16x8.add_sat_u") - } - fn visit_i16x8_sub(&mut self, _pos: usize) -> Self::Output { - self.instr("i16x8.sub") - } - fn visit_i16x8_sub_sat_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i16x8.sub_sat_s") - } - fn visit_i16x8_sub_sat_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i16x8.sub_sat_u") - } - fn visit_i16x8_min_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i16x8.min_s") - } - fn visit_i16x8_min_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i16x8.min_u") - } - fn visit_i16x8_max_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i16x8.max_s") - } - fn visit_i16x8_max_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i16x8.max_u") - } - fn visit_i16x8_mul(&mut self, _pos: usize) -> Self::Output { - self.instr("i16x8.mul") - } - - fn visit_i32x4_add(&mut self, _pos: usize) -> Self::Output { - self.instr("i32x4.add") - } - fn visit_i32x4_sub(&mut self, _pos: usize) -> Self::Output { - self.instr("i32x4.sub") - } - fn visit_i32x4_min_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i32x4.min_s") - } - fn visit_i32x4_min_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i32x4.min_u") - } - fn visit_i32x4_max_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i32x4.max_s") - } - fn visit_i32x4_max_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i32x4.max_u") - } - fn visit_i32x4_mul(&mut self, _pos: usize) -> Self::Output { - self.instr("i32x4.mul") - } - fn visit_i32x4_dot_i16x8_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i32x4.dot_i16x8_s") - } - - fn visit_i64x2_add(&mut self, _pos: usize) -> Self::Output { - self.instr("i64x2.add") - } - fn visit_i64x2_sub(&mut self, _pos: usize) -> Self::Output { - self.instr("i64x2.sub") - } - fn visit_i64x2_mul(&mut self, _pos: usize) -> Self::Output { - self.instr("i64x2.mul") - } - - fn visit_i8x16_avgr_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i8x16.avgr_u") - } - fn visit_i16x8_avgr_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i16x8.avgr_u") - } - fn visit_i8x16_narrow_i16x8_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i8x16.narrow_i16x8_s") - } - fn visit_i8x16_narrow_i16x8_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i8x16.narrow_i16x8_u") - } - fn visit_i16x8_narrow_i32x4_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i16x8.narrow_i32x4_s") - } - fn visit_i16x8_narrow_i32x4_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i16x8.narrow_i32x4_u") - } - fn visit_i16x8_extmul_low_i8x16_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i16x8.extmul_low_i8x16_s") - } - fn visit_i16x8_extmul_high_i8x16_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i16x8.extmul_high_i8x16_s") - } - fn visit_i16x8_extmul_low_i8x16_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i16x8.extmul_low_i8x16_u") - } - fn visit_i16x8_extmul_high_i8x16_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i16x8.extmul_high_i8x16_u") - } - fn visit_i32x4_extmul_low_i16x8_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i32x4.extmul_low_i16x8_s") - } - fn visit_i32x4_extmul_high_i16x8_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i32x4.extmul_high_i16x8_s") - } - fn visit_i32x4_extmul_low_i16x8_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i32x4.extmul_low_i16x8_u") - } - fn visit_i32x4_extmul_high_i16x8_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i32x4.extmul_high_i16x8_u") - } - fn visit_i64x2_extmul_low_i32x4_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i64x2.extmul_low_i32x4_s") - } - fn visit_i64x2_extmul_high_i32x4_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i64x2.extmul_high_i32x4_s") - } - fn visit_i64x2_extmul_low_i32x4_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i64x2.extmul_low_i32x4_u") - } - fn visit_i64x2_extmul_high_i32x4_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i64x2.extmul_high_i32x4_u") - } - fn visit_i16x8_q15mulr_sat_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i16x8.q15mulr_sat_s") - } - - fn visit_f32x4_ceil(&mut self, _pos: usize) -> Self::Output { - self.instr("f32x4.ceil") - } - fn visit_f32x4_floor(&mut self, _pos: usize) -> Self::Output { - self.instr("f32x4.floor") - } - fn visit_f32x4_trunc(&mut self, _pos: usize) -> Self::Output { - self.instr("f32x4.trunc") - } - fn visit_f32x4_nearest(&mut self, _pos: usize) -> Self::Output { - self.instr("f32x4.nearest") - } - - fn visit_f64x2_ceil(&mut self, _pos: usize) -> Self::Output { - self.instr("f64x2.ceil") - } - fn visit_f64x2_floor(&mut self, _pos: usize) -> Self::Output { - self.instr("f64x2.floor") - } - fn visit_f64x2_trunc(&mut self, _pos: usize) -> Self::Output { - self.instr("f64x2.trunc") - } - fn visit_f64x2_nearest(&mut self, _pos: usize) -> Self::Output { - self.instr("f64x2.nearest") - } - - fn visit_f32x4_abs(&mut self, _pos: usize) -> Self::Output { - self.instr("f32x4.abs") - } - fn visit_f32x4_neg(&mut self, _pos: usize) -> Self::Output { - self.instr("f32x4.neg") - } - fn visit_f32x4_sqrt(&mut self, _pos: usize) -> Self::Output { - self.instr("f32x4.sqrt") - } - fn visit_f64x2_abs(&mut self, _pos: usize) -> Self::Output { - self.instr("f64x2.abs") - } - fn visit_f64x2_neg(&mut self, _pos: usize) -> Self::Output { - self.instr("f64x2.neg") - } - fn visit_f64x2_sqrt(&mut self, _pos: usize) -> Self::Output { - self.instr("f64x2.sqrt") - } - - fn visit_f32x4_demote_f64x2_zero(&mut self, _pos: usize) -> Self::Output { - self.instr("f32x4.demote_f64x2_zero") - } - fn visit_f64x2_promote_low_f32x4(&mut self, _pos: usize) -> Self::Output { - self.instr("f64x2.promote_low_f32x4") - } - fn visit_f64x2_convert_low_i32x4_s(&mut self, _pos: usize) -> Self::Output { - self.instr("f64x2.convert_low_i32x4_s") - } - fn visit_f64x2_convert_low_i32x4_u(&mut self, _pos: usize) -> Self::Output { - self.instr("f64x2.convert_low_i32x4_u") - } - fn visit_i32x4_trunc_sat_f32x4_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i32x4.trunc_sat_f32x4_s") - } - fn visit_i32x4_trunc_sat_f32x4_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i32x4.trunc_sat_f32x4_u") - } - fn visit_i32x4_trunc_sat_f64x2_s_zero(&mut self, _pos: usize) -> Self::Output { - self.instr("i32x4.trunc_sat_f64x2_s_zero") - } - fn visit_i32x4_trunc_sat_f64x2_u_zero(&mut self, _pos: usize) -> Self::Output { - self.instr("i32x4.trunc_sat_f64x2_u_zero") - } - fn visit_f32x4_convert_i32x4_s(&mut self, _pos: usize) -> Self::Output { - self.instr("f32x4.convert_i32x4_s") - } - fn visit_f32x4_convert_i32x4_u(&mut self, _pos: usize) -> Self::Output { - self.instr("f32x4.convert_i32x4_u") - } - fn visit_v128_not(&mut self, _pos: usize) -> Self::Output { - self.instr("v128.not") - } - fn visit_i8x16_abs(&mut self, _pos: usize) -> Self::Output { - self.instr("i8x16.abs") - } - fn visit_i8x16_neg(&mut self, _pos: usize) -> Self::Output { - self.instr("i8x16.neg") - } - fn visit_i8x16_popcnt(&mut self, _pos: usize) -> Self::Output { - self.instr("i8x16.popcnt") - } - fn visit_i16x8_abs(&mut self, _pos: usize) -> Self::Output { - self.instr("i16x8.abs") - } - fn visit_i16x8_neg(&mut self, _pos: usize) -> Self::Output { - self.instr("i16x8.neg") - } - fn visit_i32x4_abs(&mut self, _pos: usize) -> Self::Output { - self.instr("i32x4.abs") - } - fn visit_i32x4_neg(&mut self, _pos: usize) -> Self::Output { - self.instr("i32x4.neg") - } - fn visit_i64x2_abs(&mut self, _pos: usize) -> Self::Output { - self.instr("i64x2.abs") - } - fn visit_i64x2_neg(&mut self, _pos: usize) -> Self::Output { - self.instr("i64x2.neg") - } - fn visit_i16x8_extend_low_i8x16_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i16x8.extend_low_i8x16_s") - } - fn visit_i16x8_extend_high_i8x16_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i16x8.extend_high_i8x16_s") - } - fn visit_i16x8_extend_low_i8x16_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i16x8.extend_low_i8x16_u") - } - fn visit_i16x8_extend_high_i8x16_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i16x8.extend_high_i8x16_u") - } - fn visit_i32x4_extend_low_i16x8_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i32x4.extend_low_i16x8_s") - } - fn visit_i32x4_extend_high_i16x8_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i32x4.extend_high_i16x8_s") - } - fn visit_i32x4_extend_low_i16x8_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i32x4.extend_low_i16x8_u") - } - fn visit_i32x4_extend_high_i16x8_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i32x4.extend_high_i16x8_u") - } - fn visit_i64x2_extend_low_i32x4_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i64x2.extend_low_i32x4_s") - } - fn visit_i64x2_extend_high_i32x4_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i64x2.extend_high_i32x4_s") - } - fn visit_i64x2_extend_low_i32x4_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i64x2.extend_low_i32x4_u") - } - fn visit_i64x2_extend_high_i32x4_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i64x2.extend_high_i32x4_u") - } - fn visit_i16x8_extadd_pairwise_i8x16_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i16x8.extadd_pairwise_i8x16_s") - } - fn visit_i16x8_extadd_pairwise_i8x16_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i16x8.extadd_pairwise_i8x16_u") - } - fn visit_i32x4_extadd_pairwise_i16x8_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i32x4.extadd_pairwise_i16x8_s") - } - fn visit_i32x4_extadd_pairwise_i16x8_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i32x4.extadd_pairwise_i16x8_u") - } - fn visit_i32x4_relaxed_trunc_sat_f32x4_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i32x4.relaxed_trunc_f32x4_s") - } - fn visit_i32x4_relaxed_trunc_sat_f32x4_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i32x4.relaxed_trunc_f32x4_u") - } - fn visit_i32x4_relaxed_trunc_sat_f64x2_s_zero(&mut self, _pos: usize) -> Self::Output { - self.instr("i32x4.relaxed_trunc_f64x2_s_zero") - } - fn visit_i32x4_relaxed_trunc_sat_f64x2_u_zero(&mut self, _pos: usize) -> Self::Output { - self.instr("i32x4.relaxed_trunc_f64x2_u_zero") - } - fn visit_v128_bitselect(&mut self, _pos: usize) -> Self::Output { - self.instr("v128.bitselect") - } - fn visit_f32x4_relaxed_fma(&mut self, _pos: usize) -> Self::Output { - self.instr("f32x4.relaxed_fma") - } - fn visit_f32x4_relaxed_fnma(&mut self, _pos: usize) -> Self::Output { - self.instr("f32x4.relaxed_fnma") - } - fn visit_f64x2_relaxed_fma(&mut self, _pos: usize) -> Self::Output { - self.instr("f64x2.relaxed_fma") - } - fn visit_f64x2_relaxed_fnma(&mut self, _pos: usize) -> Self::Output { - self.instr("f64x2.relaxed_fnma") - } - fn visit_i8x16_relaxed_laneselect(&mut self, _pos: usize) -> Self::Output { - self.instr("i8x16.relaxed_laneselect") - } - fn visit_i16x8_relaxed_laneselect(&mut self, _pos: usize) -> Self::Output { - self.instr("i16x8.relaxed_laneselect") - } - fn visit_i32x4_relaxed_laneselect(&mut self, _pos: usize) -> Self::Output { - self.instr("i32x4.relaxed_laneselect") - } - fn visit_i64x2_relaxed_laneselect(&mut self, _pos: usize) -> Self::Output { - self.instr("i64x2.relaxed_laneselect") - } - fn visit_i32x4_dot_i8x16_i7x16_add_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i32x4.dot_i8x16_i7x16_add_s") - } - fn visit_f32x4_relaxed_dot_bf16x8_add_f32x4(&mut self, _pos: usize) -> Self::Output { - self.instr("f32x4.relaxed_dot_bf16x8_add_f32x4") - } - fn visit_v128_any_true(&mut self, _pos: usize) -> Self::Output { - self.instr("v128.any_true") - } - fn visit_i8x16_all_true(&mut self, _pos: usize) -> Self::Output { - self.instr("i8x16.all_true") - } - fn visit_i8x16_bitmask(&mut self, _pos: usize) -> Self::Output { - self.instr("i8x16.bitmask") - } - fn visit_i16x8_all_true(&mut self, _pos: usize) -> Self::Output { - self.instr("i16x8.all_true") - } - fn visit_i16x8_bitmask(&mut self, _pos: usize) -> Self::Output { - self.instr("i16x8.bitmask") - } - fn visit_i32x4_all_true(&mut self, _pos: usize) -> Self::Output { - self.instr("i32x4.all_true") - } - fn visit_i32x4_bitmask(&mut self, _pos: usize) -> Self::Output { - self.instr("i32x4.bitmask") - } - fn visit_i64x2_all_true(&mut self, _pos: usize) -> Self::Output { - self.instr("i64x2.all_true") - } - fn visit_i64x2_bitmask(&mut self, _pos: usize) -> Self::Output { - self.instr("i64x2.bitmask") - } - fn visit_i8x16_shl(&mut self, _pos: usize) -> Self::Output { - self.instr("i8x16.shl") - } - fn visit_i8x16_shr_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i8x16.shr_s") - } - fn visit_i8x16_shr_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i8x16.shr_u") - } - fn visit_i16x8_shl(&mut self, _pos: usize) -> Self::Output { - self.instr("i16x8.shl") - } - fn visit_i16x8_shr_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i16x8.shr_s") - } - fn visit_i16x8_shr_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i16x8.shr_u") - } - fn visit_i32x4_shl(&mut self, _pos: usize) -> Self::Output { - self.instr("i32x4.shl") - } - fn visit_i32x4_shr_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i32x4.shr_s") - } - fn visit_i32x4_shr_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i32x4.shr_u") - } - fn visit_i64x2_shl(&mut self, _pos: usize) -> Self::Output { - self.instr("i64x2.shl") - } - fn visit_i64x2_shr_s(&mut self, _pos: usize) -> Self::Output { - self.instr("i64x2.shr_s") - } - fn visit_i64x2_shr_u(&mut self, _pos: usize) -> Self::Output { - self.instr("i64x2.shr_u") - } - - fn visit_i8x16_swizzle(&mut self, _pos: usize) -> Self::Output { - self.instr("i8x16.swizzle") - } - fn visit_i8x16_relaxed_swizzle(&mut self, _pos: usize) -> Self::Output { - self.instr("i8x16.relaxed_swizzle") - } - fn visit_i8x16_shuffle(&mut self, _pos: usize, lanes: [u8; 16]) -> Self::Output { - self.push_str("i8x16.shuffle"); - for lane in lanes { - write!(self.result(), " {}", lane)?; - } - Ok(OpKind::Normal) - } - fn visit_v128_load8_splat(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("v128.load8_splat", &memarg, 1) - } - fn visit_v128_load16_splat(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("v128.load16_splat", &memarg, 2) - } - fn visit_v128_load32_splat(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("v128.load32_splat", &memarg, 4) - } - fn visit_v128_load32_zero(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("v128.load32_zero", &memarg, 4) - } - fn visit_v128_load64_splat(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("v128.load64_splat", &memarg, 8) - } - fn visit_v128_load64_zero(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("v128.load64_zero", &memarg, 8) - } - fn visit_v128_load8x8_s(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("v128.load8x8_s", &memarg, 8) - } - fn visit_v128_load8x8_u(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("v128.load8x8_u", &memarg, 8) - } - fn visit_v128_load16x4_s(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("v128.load16x4_s", &memarg, 8) - } - fn visit_v128_load16x4_u(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("v128.load16x4_u", &memarg, 8) - } - fn visit_v128_load32x2_s(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("v128.load32x2_s", &memarg, 8) - } - fn visit_v128_load32x2_u(&mut self, _pos: usize, memarg: MemArg) -> Self::Output { - self.mem_instr("v128.load32x2_u", &memarg, 8) - } - fn visit_v128_load8_lane(&mut self, _pos: usize, memarg: MemArg, lane: u8) -> Self::Output { - self.mem_instr("v128.load8_lane", &memarg, 1)?; - write!(self.result(), " {lane}")?; - Ok(OpKind::Normal) - } - fn visit_v128_load16_lane(&mut self, _pos: usize, memarg: MemArg, lane: u8) -> Self::Output { - self.mem_instr("v128.load16_lane", &memarg, 2)?; - write!(self.result(), " {lane}")?; - Ok(OpKind::Normal) - } - fn visit_v128_load32_lane(&mut self, _pos: usize, memarg: MemArg, lane: u8) -> Self::Output { - self.mem_instr("v128.load32_lane", &memarg, 4)?; - write!(self.result(), " {lane}")?; - Ok(OpKind::Normal) - } - fn visit_v128_load64_lane(&mut self, _pos: usize, memarg: MemArg, lane: u8) -> Self::Output { - self.mem_instr("v128.load64_lane", &memarg, 8)?; - write!(self.result(), " {lane}")?; - Ok(OpKind::Normal) - } - fn visit_v128_store8_lane(&mut self, _pos: usize, memarg: MemArg, lane: u8) -> Self::Output { - self.mem_instr("v128.store8_lane", &memarg, 1)?; - write!(self.result(), " {lane}")?; - Ok(OpKind::Normal) - } - fn visit_v128_store16_lane(&mut self, _pos: usize, memarg: MemArg, lane: u8) -> Self::Output { - self.mem_instr("v128.store16_lane", &memarg, 2)?; - write!(self.result(), " {lane}")?; - Ok(OpKind::Normal) - } - fn visit_v128_store32_lane(&mut self, _pos: usize, memarg: MemArg, lane: u8) -> Self::Output { - self.mem_instr("v128.store32_lane", &memarg, 4)?; - write!(self.result(), " {lane}")?; - Ok(OpKind::Normal) - } - fn visit_v128_store64_lane(&mut self, _pos: usize, memarg: MemArg, lane: u8) -> Self::Output { - self.mem_instr("v128.store64_lane", &memarg, 8)?; - write!(self.result(), " {lane}")?; - Ok(OpKind::Normal) - } + wasmparser::for_each_operator!(define_visit); } diff --git a/crates/wast/src/core/expr.rs b/crates/wast/src/core/expr.rs index 9d358373e6..f4afde2a97 100644 --- a/crates/wast/src/core/expr.rs +++ b/crates/wast/src/core/expr.rs @@ -1140,10 +1140,10 @@ instructions! { // Relaxed SIMD proposal I8x16RelaxedSwizzle : [0xfd, 0x100]: "i8x16.relaxed_swizzle", - I32x4RelaxedTruncF32x4S : [0xfd, 0x101]: "i32x4.relaxed_trunc_f32x4_s", - I32x4RelaxedTruncF32x4U : [0xfd, 0x102]: "i32x4.relaxed_trunc_f32x4_u", - I32x4RelaxedTruncF64x2SZero : [0xfd, 0x103]: "i32x4.relaxed_trunc_f64x2_s_zero", - I32x4RelaxedTruncF64x2UZero : [0xfd, 0x104]: "i32x4.relaxed_trunc_f64x2_u_zero", + I32x4RelaxedTruncSatF32x4S : [0xfd, 0x101]: "i32x4.relaxed_trunc_sat_f32x4_s" | "i32x4.relaxed_trunc_f32x4_s", + I32x4RelaxedTruncSatF32x4U : [0xfd, 0x102]: "i32x4.relaxed_trunc_sat_f32x4_u" | "i32x4.relaxed_trunc_f32x4_u", + I32x4RelaxedTruncSatF64x2SZero : [0xfd, 0x103]: "i32x4.relaxed_trunc_sat_f64x2_s_zero" | "i32x4.relaxed_trunc_f64x2_s_zero", + I32x4RelaxedTruncSatF64x2UZero : [0xfd, 0x104]: "i32x4.relaxed_trunc_sat_f64x2_u_zero" | "i32x4.relaxed_trunc_f64x2_u_zero", F32x4RelaxedFma : [0xfd, 0x105]: "f32x4.relaxed_fma", F32x4RelaxedFnma : [0xfd, 0x106]: "f32x4.relaxed_fnma", F64x2RelaxedFma : [0xfd, 0x107]: "f64x2.relaxed_fma", diff --git a/src/bin/wasm-tools/strip.rs b/src/bin/wasm-tools/strip.rs index 274a2c4e0e..b862f2ce6a 100644 --- a/src/bin/wasm-tools/strip.rs +++ b/src/bin/wasm-tools/strip.rs @@ -1,6 +1,6 @@ -use anyhow::{bail, Result}; +use anyhow::Result; use wasm_encoder::RawSection; -use wasmparser::{Encoding, Parser, Payload::*}; +use wasmparser::{Parser, Payload::*}; /// Removes custom sections from an input WebAssembly file. /// diff --git a/tests/local/relaxed-simd.wast b/tests/local/relaxed-simd.wast index 55f1b9d072..5d7699e53f 100644 --- a/tests/local/relaxed-simd.wast +++ b/tests/local/relaxed-simd.wast @@ -7,19 +7,19 @@ (func $i32x4_trunc_f32x4_s (param v128) (result v128) local.get 0 - i32x4.relaxed_trunc_f32x4_s) + i32x4.relaxed_trunc_sat_f32x4_s) (func $i32x4_trunc_f32x4_u (param v128) (result v128) local.get 0 - i32x4.relaxed_trunc_f32x4_u) + i32x4.relaxed_trunc_sat_f32x4_u) (func $i32x4.trunc_f64x2_s_zero (param v128) (result v128) local.get 0 - i32x4.relaxed_trunc_f64x2_s_zero) + i32x4.relaxed_trunc_sat_f64x2_s_zero) (func $i32x4.trunc_f64x2_u_zero (param v128) (result v128) local.get 0 - i32x4.relaxed_trunc_f64x2_u_zero) + i32x4.relaxed_trunc_sat_f64x2_u_zero) (func $f32x4_fma (param v128 v128 v128) (result v128) local.get 0 diff --git a/tests/local/threads.wast b/tests/local/threads.wast new file mode 100644 index 0000000000..211ee5919f --- /dev/null +++ b/tests/local/threads.wast @@ -0,0 +1,9 @@ +(assert_invalid + (module + (memory 1) + (func (param i32) (result i32) + local.get 0 + i32.atomic.load align=1 + ) + ) + "must always specify maximum alignment")