diff --git a/Cargo.toml b/Cargo.toml index 91d36896..799fdb23 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -29,7 +29,7 @@ leb128 = "0.2.4" log = "0.4.8" rayon = { version = "1.1.0", optional = true } walrus-macro = { path = './crates/macro', version = '=0.14.0' } -wasmparser = "0.44.0" +wasmparser = "0.45.0" [features] parallel = ['rayon', 'id-arena/rayon'] diff --git a/crates/fuzz-utils/Cargo.toml b/crates/fuzz-utils/Cargo.toml index 8e998155..1d44712a 100644 --- a/crates/fuzz-utils/Cargo.toml +++ b/crates/fuzz-utils/Cargo.toml @@ -10,7 +10,7 @@ anyhow = "1.0" env_logger = "0.7.0" rand = { version = "0.7.0", features = ['small_rng'] } tempfile = "3.1.0" -wasmparser = "0.44" +wasmparser = "0.45" wat = "1.0" [dependencies.walrus] diff --git a/src/ir/mod.rs b/src/ir/mod.rs index edfc7c8c..304a97ef 100644 --- a/src/ir/mod.rs +++ b/src/ir/mod.rs @@ -540,13 +540,13 @@ pub enum Instr { indices: ShuffleIndices, }, - /// `iaaxbb.load_splat` - LoadSplat { + /// Various instructions to load a simd vector from memory + LoadSimd { /// The memory we're loading from. memory: MemoryId, /// The size of load this is performing #[walrus(skip_visit)] - kind: LoadSplatKind, + kind: LoadSimdKind, /// The alignment and offset of this memory load #[walrus(skip_visit)] arg: MemArg, @@ -757,6 +757,7 @@ pub enum BinaryOp { V128And, V128Or, V128Xor, + V128AndNot, I8x16Shl, I8x16ShrS, @@ -789,6 +790,7 @@ pub enum BinaryOp { I64x2ShrU, I64x2Add, I64x2Sub, + I64x2Mul, F32x4Add, F32x4Sub, @@ -802,6 +804,13 @@ pub enum BinaryOp { F64x2Div, F64x2Min, F64x2Max, + + I8x16NarrowI16x8S, + I8x16NarrowI16x8U, + I16x8NarrowI32x4S, + I16x8NarrowI32x4U, + I8x16RoundingAverageU, + I16x8RoundingAverageU, } /// Possible unary operations in wasm @@ -922,6 +931,15 @@ pub enum UnaryOp { I64TruncUSatF32, I64TruncSSatF64, I64TruncUSatF64, + + I16x8WidenLowI8x16S, + I16x8WidenLowI8x16U, + I16x8WidenHighI8x16S, + I16x8WidenHighI8x16U, + I32x4WidenLowI16x8S, + I32x4WidenLowI16x8U, + I32x4WidenHighI16x8S, + I32x4WidenHighI16x8U, } /// The different kinds of load instructions that are part of a `Load` IR node @@ -944,14 +962,20 @@ pub enum LoadKind { I64_32 { kind: ExtendedLoad }, } -/// The different kinds of load instructions that are part of a `LoadSplat` IR node +/// The different kinds of load instructions that are part of a `LoadSimd` IR node #[derive(Debug, Copy, Clone)] #[allow(missing_docs)] -pub enum LoadSplatKind { - I8, - I16, - I32, - I64, +pub enum LoadSimdKind { + Splat8, + Splat16, + Splat32, + Splat64, + I16x8Load8x8S, + I16x8Load8x8U, + I32x4Load16x4S, + I32x4Load16x4U, + I64x2Load32x2S, + I64x2Load32x2U, } /// The kinds of extended loads which can happen @@ -1146,7 +1170,7 @@ impl Instr { | Instr::V128Bitselect(..) | Instr::V128Swizzle(..) | Instr::V128Shuffle(..) - | Instr::LoadSplat(..) + | Instr::LoadSimd(..) | Instr::AtomicFence(..) | Instr::Drop(..) => false, } diff --git a/src/module/functions/local_function/emit.rs b/src/module/functions/local_function/emit.rs index a61401ea..d8abe46c 100644 --- a/src/module/functions/local_function/emit.rs +++ b/src/module/functions/local_function/emit.rs @@ -316,6 +316,7 @@ impl<'instr> Visitor<'instr> for Emit<'_, '_> { F64x2Ge => self.simd(0x4b), V128And => self.simd(0x4d), + V128AndNot => self.simd(0xd8), V128Or => self.simd(0x4e), V128Xor => self.simd(0x4f), @@ -350,6 +351,7 @@ impl<'instr> Visitor<'instr> for Emit<'_, '_> { I64x2ShrU => self.simd(0x89), I64x2Add => self.simd(0x8a), I64x2Sub => self.simd(0x8d), + I64x2Mul => self.simd(0x90), F32x4Add => self.simd(0x9a), F32x4Sub => self.simd(0x9b), @@ -363,6 +365,14 @@ impl<'instr> Visitor<'instr> for Emit<'_, '_> { F64x2Div => self.simd(0xa8), F64x2Min => self.simd(0xa9), F64x2Max => self.simd(0xaa), + + I8x16NarrowI16x8S => self.simd(0xc6), + I8x16NarrowI16x8U => self.simd(0xc7), + I16x8NarrowI32x4S => self.simd(0xc8), + I16x8NarrowI32x4U => self.simd(0xc8), + + I8x16RoundingAverageU => self.simd(0xd9), + I16x8RoundingAverageU => self.simd(0xda), } } @@ -508,6 +518,15 @@ impl<'instr> Visitor<'instr> for Emit<'_, '_> { I64TruncUSatF32 => self.encoder.raw(&[0xfc, 0x05]), I64TruncSSatF64 => self.encoder.raw(&[0xfc, 0x06]), I64TruncUSatF64 => self.encoder.raw(&[0xfc, 0x07]), + + I16x8WidenLowI8x16S => self.simd(0xca), + I16x8WidenHighI8x16S => self.simd(0xcb), + I16x8WidenLowI8x16U => self.simd(0xcc), + I16x8WidenHighI8x16U => self.simd(0xcd), + I32x4WidenLowI16x8S => self.simd(0xce), + I32x4WidenHighI16x8S => self.simd(0xcf), + I32x4WidenLowI16x8U => self.simd(0xd0), + I32x4WidenHighI16x8U => self.simd(0xd1), } } @@ -779,14 +798,19 @@ impl<'instr> Visitor<'instr> for Emit<'_, '_> { self.simd(0xc1); self.encoder.raw(&e.indices); } - LoadSplat(e) => { + LoadSimd(e) => { match e.kind { - LoadSplatKind::I8 => self.simd(0xc2), - LoadSplatKind::I16 => self.simd(0xc3), - LoadSplatKind::I32 => self.simd(0xc4), - LoadSplatKind::I64 => self.simd(0xc5), + LoadSimdKind::Splat8 => self.simd(0xc2), + LoadSimdKind::Splat16 => self.simd(0xc3), + LoadSimdKind::Splat32 => self.simd(0xc4), + LoadSimdKind::Splat64 => self.simd(0xc5), + LoadSimdKind::I16x8Load8x8S => self.simd(0xd2), + LoadSimdKind::I16x8Load8x8U => self.simd(0xd3), + LoadSimdKind::I32x4Load16x4S => self.simd(0xd4), + LoadSimdKind::I32x4Load16x4U => self.simd(0xd5), + LoadSimdKind::I64x2Load32x2S => self.simd(0xd6), + LoadSimdKind::I64x2Load32x2U => self.simd(0xd7), } - self.simd(0xc1); self.memarg(e.memory, &e.arg); } } diff --git a/src/module/functions/local_function/mod.rs b/src/module/functions/local_function/mod.rs index 3361d19f..3b645bb4 100644 --- a/src/module/functions/local_function/mod.rs +++ b/src/module/functions/local_function/mod.rs @@ -374,11 +374,11 @@ fn validate_instruction<'context>( Ok(()) }; - let load_splat = |ctx: &mut ValidationContext, arg, kind| -> Result<()> { + let load_simd = |ctx: &mut ValidationContext, arg, kind| -> Result<()> { ctx.pop_operand_expected(Some(I32))?; let memory = ctx.indices.get_memory(0)?; let arg = mem_arg(&arg)?; - ctx.alloc_instr(LoadSplat { memory, arg, kind }, loc); + ctx.alloc_instr(LoadSimd { memory, arg, kind }, loc); ctx.push_operand(Some(V128)); Ok(()) }; @@ -1312,6 +1312,7 @@ fn validate_instruction<'context>( Operator::V128Not => unop(ctx, V128, UnaryOp::V128Not)?, Operator::V128And => binop(ctx, V128, BinaryOp::V128And)?, + Operator::V128AndNot => binop(ctx, V128, BinaryOp::V128AndNot)?, Operator::V128Or => binop(ctx, V128, BinaryOp::V128Or)?, Operator::V128Xor => binop(ctx, V128, BinaryOp::V128Xor)?, @@ -1369,6 +1370,7 @@ fn validate_instruction<'context>( Operator::I64x2ShrU => two_ops(ctx, V128, I32, V128, BinaryOp::I64x2ShrU)?, Operator::I64x2Add => binop(ctx, V128, BinaryOp::I64x2Add)?, Operator::I64x2Sub => binop(ctx, V128, BinaryOp::I64x2Sub)?, + Operator::I64x2Mul => binop(ctx, V128, BinaryOp::I64x2Mul)?, Operator::F32x4Abs => unop(ctx, V128, UnaryOp::F32x4Abs)?, Operator::F32x4Neg => unop(ctx, V128, UnaryOp::F32x4Neg)?, @@ -1408,10 +1410,39 @@ fn validate_instruction<'context>( Operator::I64TruncSatF64S => one_op(ctx, F64, I64, UnaryOp::I64TruncSSatF64)?, Operator::I64TruncSatF64U => one_op(ctx, F64, I64, UnaryOp::I64TruncUSatF64)?, - Operator::V8x16LoadSplat { memarg } => load_splat(ctx, memarg, LoadSplatKind::I8)?, - Operator::V16x8LoadSplat { memarg } => load_splat(ctx, memarg, LoadSplatKind::I16)?, - Operator::V32x4LoadSplat { memarg } => load_splat(ctx, memarg, LoadSplatKind::I32)?, - Operator::V64x2LoadSplat { memarg } => load_splat(ctx, memarg, LoadSplatKind::I64)?, + Operator::V8x16LoadSplat { memarg } => load_simd(ctx, memarg, LoadSimdKind::Splat8)?, + Operator::V16x8LoadSplat { memarg } => load_simd(ctx, memarg, LoadSimdKind::Splat16)?, + Operator::V32x4LoadSplat { memarg } => load_simd(ctx, memarg, LoadSimdKind::Splat32)?, + Operator::V64x2LoadSplat { memarg } => load_simd(ctx, memarg, LoadSimdKind::Splat64)?, + + Operator::I8x16NarrowI16x8S => binop(ctx, V128, BinaryOp::I8x16NarrowI16x8S)?, + Operator::I8x16NarrowI16x8U => binop(ctx, V128, BinaryOp::I8x16NarrowI16x8U)?, + Operator::I16x8NarrowI32x4S => binop(ctx, V128, BinaryOp::I16x8NarrowI32x4S)?, + Operator::I16x8NarrowI32x4U => binop(ctx, V128, BinaryOp::I16x8NarrowI32x4U)?, + Operator::I16x8WidenLowI8x16S => unop(ctx, V128, UnaryOp::I16x8WidenLowI8x16S)?, + Operator::I16x8WidenLowI8x16U => unop(ctx, V128, UnaryOp::I16x8WidenLowI8x16U)?, + Operator::I16x8WidenHighI8x16S => unop(ctx, V128, UnaryOp::I16x8WidenHighI8x16S)?, + Operator::I16x8WidenHighI8x16U => unop(ctx, V128, UnaryOp::I16x8WidenHighI8x16U)?, + Operator::I32x4WidenLowI16x8S => unop(ctx, V128, UnaryOp::I32x4WidenLowI16x8S)?, + Operator::I32x4WidenLowI16x8U => unop(ctx, V128, UnaryOp::I32x4WidenLowI16x8U)?, + Operator::I32x4WidenHighI16x8S => unop(ctx, V128, UnaryOp::I32x4WidenHighI16x8S)?, + Operator::I32x4WidenHighI16x8U => unop(ctx, V128, UnaryOp::I32x4WidenHighI16x8U)?, + Operator::I16x8Load8x8S { memarg } => load_simd(ctx, memarg, LoadSimdKind::I16x8Load8x8S)?, + Operator::I16x8Load8x8U { memarg } => load_simd(ctx, memarg, LoadSimdKind::I16x8Load8x8U)?, + Operator::I32x4Load16x4S { memarg } => { + load_simd(ctx, memarg, LoadSimdKind::I32x4Load16x4S)? + } + Operator::I32x4Load16x4U { memarg } => { + load_simd(ctx, memarg, LoadSimdKind::I32x4Load16x4U)? + } + Operator::I64x2Load32x2S { memarg } => { + load_simd(ctx, memarg, LoadSimdKind::I64x2Load32x2S)? + } + Operator::I64x2Load32x2U { memarg } => { + load_simd(ctx, memarg, LoadSimdKind::I64x2Load32x2U)? + } + Operator::I8x16RoundingAverageU => binop(ctx, V128, BinaryOp::I8x16RoundingAverageU)?, + Operator::I16x8RoundingAverageU => binop(ctx, V128, BinaryOp::I16x8RoundingAverageU)?, op @ Operator::TableInit { .. } | op @ Operator::ElemDrop { .. }