Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update wasmparser, add new SIMD instructions #149

Merged
merged 1 commit into from Dec 18, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 1 addition & 1 deletion Cargo.toml
Expand Up @@ -29,7 +29,7 @@ leb128 = "0.2.4"
log = "0.4.8"
rayon = { version = "1.1.0", optional = true }
walrus-macro = { path = './crates/macro', version = '=0.14.0' }
wasmparser = "0.44.0"
wasmparser = "0.45.0"

[features]
parallel = ['rayon', 'id-arena/rayon']
Expand Down
2 changes: 1 addition & 1 deletion crates/fuzz-utils/Cargo.toml
Expand Up @@ -10,7 +10,7 @@ anyhow = "1.0"
env_logger = "0.7.0"
rand = { version = "0.7.0", features = ['small_rng'] }
tempfile = "3.1.0"
wasmparser = "0.44"
wasmparser = "0.45"
wat = "1.0"

[dependencies.walrus]
Expand Down
44 changes: 34 additions & 10 deletions src/ir/mod.rs
Expand Up @@ -540,13 +540,13 @@ pub enum Instr {
indices: ShuffleIndices,
},

/// `iaaxbb.load_splat`
LoadSplat {
/// Various instructions to load a simd vector from memory
LoadSimd {
/// The memory we're loading from.
memory: MemoryId,
/// The size of load this is performing
#[walrus(skip_visit)]
kind: LoadSplatKind,
kind: LoadSimdKind,
/// The alignment and offset of this memory load
#[walrus(skip_visit)]
arg: MemArg,
Expand Down Expand Up @@ -757,6 +757,7 @@ pub enum BinaryOp {
V128And,
V128Or,
V128Xor,
V128AndNot,

I8x16Shl,
I8x16ShrS,
Expand Down Expand Up @@ -789,6 +790,7 @@ pub enum BinaryOp {
I64x2ShrU,
I64x2Add,
I64x2Sub,
I64x2Mul,

F32x4Add,
F32x4Sub,
Expand All @@ -802,6 +804,13 @@ pub enum BinaryOp {
F64x2Div,
F64x2Min,
F64x2Max,

I8x16NarrowI16x8S,
I8x16NarrowI16x8U,
I16x8NarrowI32x4S,
I16x8NarrowI32x4U,
I8x16RoundingAverageU,
I16x8RoundingAverageU,
}

/// Possible unary operations in wasm
Expand Down Expand Up @@ -922,6 +931,15 @@ pub enum UnaryOp {
I64TruncUSatF32,
I64TruncSSatF64,
I64TruncUSatF64,

I16x8WidenLowI8x16S,
I16x8WidenLowI8x16U,
I16x8WidenHighI8x16S,
I16x8WidenHighI8x16U,
I32x4WidenLowI16x8S,
I32x4WidenLowI16x8U,
I32x4WidenHighI16x8S,
I32x4WidenHighI16x8U,
}

/// The different kinds of load instructions that are part of a `Load` IR node
Expand All @@ -944,14 +962,20 @@ pub enum LoadKind {
I64_32 { kind: ExtendedLoad },
}

/// The different kinds of load instructions that are part of a `LoadSplat` IR node
/// The different kinds of load instructions that are part of a `LoadSimd` IR node
#[derive(Debug, Copy, Clone)]
#[allow(missing_docs)]
pub enum LoadSplatKind {
I8,
I16,
I32,
I64,
pub enum LoadSimdKind {
Splat8,
Splat16,
Splat32,
Splat64,
I16x8Load8x8S,
I16x8Load8x8U,
I32x4Load16x4S,
I32x4Load16x4U,
I64x2Load32x2S,
I64x2Load32x2U,
}

/// The kinds of extended loads which can happen
Expand Down Expand Up @@ -1146,7 +1170,7 @@ impl Instr {
| Instr::V128Bitselect(..)
| Instr::V128Swizzle(..)
| Instr::V128Shuffle(..)
| Instr::LoadSplat(..)
| Instr::LoadSimd(..)
| Instr::AtomicFence(..)
| Instr::Drop(..) => false,
}
Expand Down
36 changes: 30 additions & 6 deletions src/module/functions/local_function/emit.rs
Expand Up @@ -316,6 +316,7 @@ impl<'instr> Visitor<'instr> for Emit<'_, '_> {
F64x2Ge => self.simd(0x4b),

V128And => self.simd(0x4d),
V128AndNot => self.simd(0xd8),
V128Or => self.simd(0x4e),
V128Xor => self.simd(0x4f),

Expand Down Expand Up @@ -350,6 +351,7 @@ impl<'instr> Visitor<'instr> for Emit<'_, '_> {
I64x2ShrU => self.simd(0x89),
I64x2Add => self.simd(0x8a),
I64x2Sub => self.simd(0x8d),
I64x2Mul => self.simd(0x90),

F32x4Add => self.simd(0x9a),
F32x4Sub => self.simd(0x9b),
Expand All @@ -363,6 +365,14 @@ impl<'instr> Visitor<'instr> for Emit<'_, '_> {
F64x2Div => self.simd(0xa8),
F64x2Min => self.simd(0xa9),
F64x2Max => self.simd(0xaa),

I8x16NarrowI16x8S => self.simd(0xc6),
I8x16NarrowI16x8U => self.simd(0xc7),
I16x8NarrowI32x4S => self.simd(0xc8),
I16x8NarrowI32x4U => self.simd(0xc8),

I8x16RoundingAverageU => self.simd(0xd9),
I16x8RoundingAverageU => self.simd(0xda),
}
}

Expand Down Expand Up @@ -508,6 +518,15 @@ impl<'instr> Visitor<'instr> for Emit<'_, '_> {
I64TruncUSatF32 => self.encoder.raw(&[0xfc, 0x05]),
I64TruncSSatF64 => self.encoder.raw(&[0xfc, 0x06]),
I64TruncUSatF64 => self.encoder.raw(&[0xfc, 0x07]),

I16x8WidenLowI8x16S => self.simd(0xca),
I16x8WidenHighI8x16S => self.simd(0xcb),
I16x8WidenLowI8x16U => self.simd(0xcc),
I16x8WidenHighI8x16U => self.simd(0xcd),
I32x4WidenLowI16x8S => self.simd(0xce),
I32x4WidenHighI16x8S => self.simd(0xcf),
I32x4WidenLowI16x8U => self.simd(0xd0),
I32x4WidenHighI16x8U => self.simd(0xd1),
}
}

Expand Down Expand Up @@ -779,14 +798,19 @@ impl<'instr> Visitor<'instr> for Emit<'_, '_> {
self.simd(0xc1);
self.encoder.raw(&e.indices);
}
LoadSplat(e) => {
LoadSimd(e) => {
match e.kind {
LoadSplatKind::I8 => self.simd(0xc2),
LoadSplatKind::I16 => self.simd(0xc3),
LoadSplatKind::I32 => self.simd(0xc4),
LoadSplatKind::I64 => self.simd(0xc5),
LoadSimdKind::Splat8 => self.simd(0xc2),
LoadSimdKind::Splat16 => self.simd(0xc3),
LoadSimdKind::Splat32 => self.simd(0xc4),
LoadSimdKind::Splat64 => self.simd(0xc5),
LoadSimdKind::I16x8Load8x8S => self.simd(0xd2),
LoadSimdKind::I16x8Load8x8U => self.simd(0xd3),
LoadSimdKind::I32x4Load16x4S => self.simd(0xd4),
LoadSimdKind::I32x4Load16x4U => self.simd(0xd5),
LoadSimdKind::I64x2Load32x2S => self.simd(0xd6),
LoadSimdKind::I64x2Load32x2U => self.simd(0xd7),
}
self.simd(0xc1);
self.memarg(e.memory, &e.arg);
}
}
Expand Down
43 changes: 37 additions & 6 deletions src/module/functions/local_function/mod.rs
Expand Up @@ -374,11 +374,11 @@ fn validate_instruction<'context>(
Ok(())
};

let load_splat = |ctx: &mut ValidationContext, arg, kind| -> Result<()> {
let load_simd = |ctx: &mut ValidationContext, arg, kind| -> Result<()> {
ctx.pop_operand_expected(Some(I32))?;
let memory = ctx.indices.get_memory(0)?;
let arg = mem_arg(&arg)?;
ctx.alloc_instr(LoadSplat { memory, arg, kind }, loc);
ctx.alloc_instr(LoadSimd { memory, arg, kind }, loc);
ctx.push_operand(Some(V128));
Ok(())
};
Expand Down Expand Up @@ -1312,6 +1312,7 @@ fn validate_instruction<'context>(

Operator::V128Not => unop(ctx, V128, UnaryOp::V128Not)?,
Operator::V128And => binop(ctx, V128, BinaryOp::V128And)?,
Operator::V128AndNot => binop(ctx, V128, BinaryOp::V128AndNot)?,
Operator::V128Or => binop(ctx, V128, BinaryOp::V128Or)?,
Operator::V128Xor => binop(ctx, V128, BinaryOp::V128Xor)?,

Expand Down Expand Up @@ -1369,6 +1370,7 @@ fn validate_instruction<'context>(
Operator::I64x2ShrU => two_ops(ctx, V128, I32, V128, BinaryOp::I64x2ShrU)?,
Operator::I64x2Add => binop(ctx, V128, BinaryOp::I64x2Add)?,
Operator::I64x2Sub => binop(ctx, V128, BinaryOp::I64x2Sub)?,
Operator::I64x2Mul => binop(ctx, V128, BinaryOp::I64x2Mul)?,

Operator::F32x4Abs => unop(ctx, V128, UnaryOp::F32x4Abs)?,
Operator::F32x4Neg => unop(ctx, V128, UnaryOp::F32x4Neg)?,
Expand Down Expand Up @@ -1408,10 +1410,39 @@ fn validate_instruction<'context>(
Operator::I64TruncSatF64S => one_op(ctx, F64, I64, UnaryOp::I64TruncSSatF64)?,
Operator::I64TruncSatF64U => one_op(ctx, F64, I64, UnaryOp::I64TruncUSatF64)?,

Operator::V8x16LoadSplat { memarg } => load_splat(ctx, memarg, LoadSplatKind::I8)?,
Operator::V16x8LoadSplat { memarg } => load_splat(ctx, memarg, LoadSplatKind::I16)?,
Operator::V32x4LoadSplat { memarg } => load_splat(ctx, memarg, LoadSplatKind::I32)?,
Operator::V64x2LoadSplat { memarg } => load_splat(ctx, memarg, LoadSplatKind::I64)?,
Operator::V8x16LoadSplat { memarg } => load_simd(ctx, memarg, LoadSimdKind::Splat8)?,
Operator::V16x8LoadSplat { memarg } => load_simd(ctx, memarg, LoadSimdKind::Splat16)?,
Operator::V32x4LoadSplat { memarg } => load_simd(ctx, memarg, LoadSimdKind::Splat32)?,
Operator::V64x2LoadSplat { memarg } => load_simd(ctx, memarg, LoadSimdKind::Splat64)?,

Operator::I8x16NarrowI16x8S => binop(ctx, V128, BinaryOp::I8x16NarrowI16x8S)?,
Operator::I8x16NarrowI16x8U => binop(ctx, V128, BinaryOp::I8x16NarrowI16x8U)?,
Operator::I16x8NarrowI32x4S => binop(ctx, V128, BinaryOp::I16x8NarrowI32x4S)?,
Operator::I16x8NarrowI32x4U => binop(ctx, V128, BinaryOp::I16x8NarrowI32x4U)?,
Operator::I16x8WidenLowI8x16S => unop(ctx, V128, UnaryOp::I16x8WidenLowI8x16S)?,
Operator::I16x8WidenLowI8x16U => unop(ctx, V128, UnaryOp::I16x8WidenLowI8x16U)?,
Operator::I16x8WidenHighI8x16S => unop(ctx, V128, UnaryOp::I16x8WidenHighI8x16S)?,
Operator::I16x8WidenHighI8x16U => unop(ctx, V128, UnaryOp::I16x8WidenHighI8x16U)?,
Operator::I32x4WidenLowI16x8S => unop(ctx, V128, UnaryOp::I32x4WidenLowI16x8S)?,
Operator::I32x4WidenLowI16x8U => unop(ctx, V128, UnaryOp::I32x4WidenLowI16x8U)?,
Operator::I32x4WidenHighI16x8S => unop(ctx, V128, UnaryOp::I32x4WidenHighI16x8S)?,
Operator::I32x4WidenHighI16x8U => unop(ctx, V128, UnaryOp::I32x4WidenHighI16x8U)?,
Operator::I16x8Load8x8S { memarg } => load_simd(ctx, memarg, LoadSimdKind::I16x8Load8x8S)?,
Operator::I16x8Load8x8U { memarg } => load_simd(ctx, memarg, LoadSimdKind::I16x8Load8x8U)?,
Operator::I32x4Load16x4S { memarg } => {
load_simd(ctx, memarg, LoadSimdKind::I32x4Load16x4S)?
}
Operator::I32x4Load16x4U { memarg } => {
load_simd(ctx, memarg, LoadSimdKind::I32x4Load16x4U)?
}
Operator::I64x2Load32x2S { memarg } => {
load_simd(ctx, memarg, LoadSimdKind::I64x2Load32x2S)?
}
Operator::I64x2Load32x2U { memarg } => {
load_simd(ctx, memarg, LoadSimdKind::I64x2Load32x2U)?
}
Operator::I8x16RoundingAverageU => binop(ctx, V128, BinaryOp::I8x16RoundingAverageU)?,
Operator::I16x8RoundingAverageU => binop(ctx, V128, BinaryOp::I16x8RoundingAverageU)?,

op @ Operator::TableInit { .. }
| op @ Operator::ElemDrop { .. }
Expand Down