wasmer_vm/trap/
traphandlers.rs

1// This file contains code from external sources.
2// Attributions: https://github.com/wasmerio/wasmer/blob/main/docs/ATTRIBUTIONS.md
3
4#![allow(static_mut_refs)]
5
6//! WebAssembly trap handling, which is built on top of the lower-level
7//! signalhandling mechanisms.
8
9use crate::vmcontext::{VMFunctionContext, VMTrampoline};
10use crate::{Trap, VMContext, VMFunctionBody};
11use backtrace::Backtrace;
12use core::ptr::{read, read_unaligned};
13use corosensei::stack::DefaultStack;
14use corosensei::trap::{CoroutineTrapHandler, TrapHandlerRegs};
15use corosensei::{CoroutineResult, ScopedCoroutine, Yielder};
16use scopeguard::defer;
17use std::any::Any;
18use std::cell::Cell;
19use std::error::Error;
20use std::io;
21use std::mem;
22#[cfg(unix)]
23use std::mem::MaybeUninit;
24use std::ptr::{self, NonNull};
25use std::sync::atomic::{AtomicPtr, AtomicUsize, Ordering, compiler_fence};
26use std::sync::{LazyLock, Once};
27use wasmer_types::TrapCode;
28
29/// Configuration for the runtime VM
30/// Currently only the stack size is configurable
31pub struct VMConfig {
32    /// Optional stack size (in byte) of the VM. Value lower than 8K will be rounded to 8K.
33    pub wasm_stack_size: Option<usize>,
34}
35
36// TrapInformation can be stored in the "Undefined Instruction" itself.
37// On x86_64, 0xC? select a "Register" for the Mod R/M part of "ud1" (so with no other bytes after)
38// On Arm64, the udf alows for a 16bits values, so we'll use the same 0xC? to store the trapinfo
39static MAGIC: u8 = 0xc0;
40
41static DEFAULT_STACK_SIZE: AtomicUsize = AtomicUsize::new(1024 * 1024);
42
43// Current definition of `ucontext_t` in the `libc` crate is incorrect
44// on aarch64-apple-drawin so it's defined here with a more accurate definition.
45#[repr(C)]
46#[cfg(all(target_arch = "aarch64", target_os = "macos"))]
47#[allow(non_camel_case_types)]
48struct ucontext_t {
49    uc_onstack: libc::c_int,
50    uc_sigmask: libc::sigset_t,
51    uc_stack: libc::stack_t,
52    uc_link: *mut libc::ucontext_t,
53    uc_mcsize: usize,
54    uc_mcontext: libc::mcontext_t,
55}
56
57#[cfg(all(unix, not(all(target_arch = "aarch64", target_os = "macos"))))]
58use libc::ucontext_t;
59
60/// Default stack size is 1MB.
61pub fn set_stack_size(size: usize) {
62    DEFAULT_STACK_SIZE.store(size.clamp(8 * 1024, 100 * 1024 * 1024), Ordering::Relaxed);
63}
64
65cfg_if::cfg_if! {
66    if #[cfg(unix)] {
67        /// Function which may handle custom signals while processing traps.
68        pub type TrapHandlerFn<'a> = dyn Fn(libc::c_int, *const libc::siginfo_t, *const libc::c_void) -> bool + Send + Sync + 'a;
69    } else if #[cfg(target_os = "windows")] {
70        /// Function which may handle custom signals while processing traps.
71        pub type TrapHandlerFn<'a> = dyn Fn(*mut windows_sys::Win32::System::Diagnostics::Debug::EXCEPTION_POINTERS) -> bool + Send + Sync + 'a;
72    }
73}
74
75// Process an IllegalOpcode to see if it has a TrapCode payload
76unsafe fn process_illegal_op(addr: usize) -> Option<TrapCode> {
77    let mut val: Option<u8> = None;
78    unsafe {
79        if cfg!(target_arch = "x86_64") {
80            val = if read(addr as *mut u8) & 0xf0 == 0x40
81                && read((addr + 1) as *mut u8) == 0x0f
82                && read((addr + 2) as *mut u8) == 0xb9
83            {
84                Some(read((addr + 3) as *mut u8))
85            } else if read(addr as *mut u8) == 0x0f && read((addr + 1) as *mut u8) == 0xb9 {
86                Some(read((addr + 2) as *mut u8))
87            } else {
88                None
89            }
90        }
91        if cfg!(target_arch = "aarch64") {
92            val = if read_unaligned(addr as *mut u32) & 0xffff0000 == 0 {
93                Some(read(addr as *mut u8))
94            } else {
95                None
96            }
97        }
98        if cfg!(target_arch = "riscv64") {
99            let addr = addr as *mut u32;
100            // Check if 'unimp' instruction
101            val = if read(addr) == 0xc0001073 {
102                // Read from the instruction we emitted: 'addi a0, xzero, $payload'
103                // and take the encoded immediate value (upper 12-bits).
104                let prev_insn = read(addr.sub(1));
105                if (prev_insn & 0xffff) == 0x0513 {
106                    Some((prev_insn >> 20) as u8)
107                } else {
108                    None
109                }
110            } else {
111                None
112            };
113        }
114    }
115
116    // The direct encoding of a trap into the instruction is unused on RISC-V:
117    if cfg!(target_arch = "x86_64") || cfg!(target_arch = "aarch64") {
118        val = val.and_then(|val| {
119            if val & MAGIC == MAGIC {
120                Some(val & 0xf)
121            } else {
122                None
123            }
124        });
125    }
126
127    match val {
128        None => None,
129        Some(val) => match val {
130            0 => Some(TrapCode::StackOverflow),
131            1 => Some(TrapCode::HeapAccessOutOfBounds),
132            2 => Some(TrapCode::HeapMisaligned),
133            3 => Some(TrapCode::TableAccessOutOfBounds),
134            4 => Some(TrapCode::IndirectCallToNull),
135            5 => Some(TrapCode::BadSignature),
136            6 => Some(TrapCode::IntegerOverflow),
137            7 => Some(TrapCode::IntegerDivisionByZero),
138            8 => Some(TrapCode::BadConversionToInteger),
139            9 => Some(TrapCode::UnreachableCodeReached),
140            10 => Some(TrapCode::UnalignedAtomic),
141            _ => None,
142        },
143    }
144}
145
146cfg_if::cfg_if! {
147    if #[cfg(unix)] {
148        static mut PREV_SIGSEGV: MaybeUninit<libc::sigaction> = MaybeUninit::uninit();
149        static mut PREV_SIGBUS: MaybeUninit<libc::sigaction> = MaybeUninit::uninit();
150        static mut PREV_SIGILL: MaybeUninit<libc::sigaction> = MaybeUninit::uninit();
151        static mut PREV_SIGFPE: MaybeUninit<libc::sigaction> = MaybeUninit::uninit();
152
153        unsafe fn platform_init() { unsafe {
154            let register = |slot: &mut MaybeUninit<libc::sigaction>, signal: i32| {
155                let mut handler: libc::sigaction = mem::zeroed();
156                // The flags here are relatively careful, and they are...
157                //
158                // SA_SIGINFO gives us access to information like the program
159                // counter from where the fault happened.
160                //
161                // SA_ONSTACK allows us to handle signals on an alternate stack,
162                // so that the handler can run in response to running out of
163                // stack space on the main stack. Rust installs an alternate
164                // stack with sigaltstack, so we rely on that.
165                //
166                // SA_NODEFER allows us to reenter the signal handler if we
167                // crash while handling the signal, and fall through to the
168                // Breakpad handler by testing handlingSegFault.
169                handler.sa_flags = libc::SA_SIGINFO | libc::SA_NODEFER | libc::SA_ONSTACK;
170                handler.sa_sigaction = trap_handler as *const () as usize;
171                libc::sigemptyset(&mut handler.sa_mask);
172                if libc::sigaction(signal, &handler, slot.as_mut_ptr()) != 0 {
173                    panic!(
174                        "unable to install signal handler: {}",
175                        io::Error::last_os_error(),
176                    );
177                }
178            };
179
180            // Allow handling OOB with signals on all architectures
181            register(&mut PREV_SIGSEGV, libc::SIGSEGV);
182
183            // Handle `unreachable` instructions which execute `ud2` right now
184            register(&mut PREV_SIGILL, libc::SIGILL);
185
186            // x86 uses SIGFPE to report division by zero
187            if cfg!(target_arch = "x86") || cfg!(target_arch = "x86_64") {
188                register(&mut PREV_SIGFPE, libc::SIGFPE);
189            }
190
191            // On ARM, handle Unaligned Accesses.
192            // On Darwin, guard page accesses are raised as SIGBUS.
193            if cfg!(target_arch = "arm") || cfg!(target_vendor = "apple") {
194                register(&mut PREV_SIGBUS, libc::SIGBUS);
195            }
196
197            // This is necessary to support debugging under LLDB on Darwin.
198            // For more details see https://github.com/mono/mono/commit/8e75f5a28e6537e56ad70bf870b86e22539c2fb7
199            #[cfg(target_vendor = "apple")]
200            {
201                use mach2::exception_types::*;
202                use mach2::kern_return::*;
203                use mach2::port::*;
204                use mach2::thread_status::*;
205                use mach2::traps::*;
206                use mach2::mach_types::*;
207
208                unsafe extern "C" {
209                    fn task_set_exception_ports(
210                        task: task_t,
211                        exception_mask: exception_mask_t,
212                        new_port: mach_port_t,
213                        behavior: exception_behavior_t,
214                        new_flavor: thread_state_flavor_t,
215                    ) -> kern_return_t;
216                }
217
218                #[allow(non_snake_case)]
219                #[cfg(target_arch = "x86_64")]
220                let MACHINE_THREAD_STATE = x86_THREAD_STATE64;
221                #[allow(non_snake_case)]
222                #[cfg(target_arch = "aarch64")]
223                let MACHINE_THREAD_STATE = 6;
224
225                task_set_exception_ports(
226                    mach_task_self(),
227                    EXC_MASK_BAD_ACCESS | EXC_MASK_ARITHMETIC | EXC_MASK_BAD_INSTRUCTION,
228                    MACH_PORT_NULL,
229                    EXCEPTION_STATE_IDENTITY as exception_behavior_t,
230                    MACHINE_THREAD_STATE,
231                );
232            }
233        }}
234
235        unsafe extern "C" fn trap_handler(
236            signum: libc::c_int,
237            siginfo: *mut libc::siginfo_t,
238            context: *mut libc::c_void,
239        ) { unsafe {
240            let previous = match signum {
241                libc::SIGSEGV => &PREV_SIGSEGV,
242                libc::SIGBUS => &PREV_SIGBUS,
243                libc::SIGFPE => &PREV_SIGFPE,
244                libc::SIGILL => &PREV_SIGILL,
245                _ => panic!("unknown signal: {signum}"),
246            };
247            // We try to get the fault address associated to this signal
248            let maybe_fault_address = match signum {
249                libc::SIGSEGV | libc::SIGBUS => {
250                    Some((*siginfo).si_addr() as usize)
251                }
252                _ => None,
253            };
254            let trap_code = match signum {
255                // check if it was cased by a UD and if the Trap info is a payload to it
256                libc::SIGILL => {
257                    let addr = (*siginfo).si_addr() as usize;
258                    process_illegal_op(addr)
259                }
260                _ => None,
261            };
262            let ucontext = &mut *(context as *mut ucontext_t);
263            let (pc, sp) = get_pc_sp(ucontext);
264            let handled = TrapHandlerContext::handle_trap(
265                pc,
266                sp,
267                maybe_fault_address,
268                trap_code,
269                |regs| update_context(ucontext, regs),
270                |handler| handler(signum, siginfo, context),
271            );
272
273            if handled {
274                return;
275            }
276
277            // This signal is not for any compiled wasm code we expect, so we
278            // need to forward the signal to the next handler. If there is no
279            // next handler (SIG_IGN or SIG_DFL), then it's time to crash. To do
280            // this, we set the signal back to its original disposition and
281            // return. This will cause the faulting op to be re-executed which
282            // will crash in the normal way. If there is a next handler, call
283            // it. It will either crash synchronously, fix up the instruction
284            // so that execution can continue and return, or trigger a crash by
285            // returning the signal to it's original disposition and returning.
286            let previous = &*previous.as_ptr();
287            if previous.sa_flags & libc::SA_SIGINFO != 0 {
288                mem::transmute::<
289                    usize,
290                    extern "C" fn(libc::c_int, *mut libc::siginfo_t, *mut libc::c_void),
291                >(previous.sa_sigaction)(signum, siginfo, context)
292            } else if previous.sa_sigaction == libc::SIG_DFL
293            {
294                libc::sigaction(signum, previous, ptr::null_mut());
295            } else if previous.sa_sigaction != libc::SIG_IGN {
296                mem::transmute::<usize, extern "C" fn(libc::c_int)>(
297                    previous.sa_sigaction
298                )(signum)
299            }
300        }}
301
302        unsafe fn get_pc_sp(context: &ucontext_t) -> (usize, usize) {
303            let (pc, sp);
304            cfg_if::cfg_if! {
305                if #[cfg(all(
306                    any(target_os = "linux", target_os = "android"),
307                    target_arch = "x86_64",
308                ))] {
309                    pc = context.uc_mcontext.gregs[libc::REG_RIP as usize] as usize;
310                    sp = context.uc_mcontext.gregs[libc::REG_RSP as usize] as usize;
311                } else if #[cfg(all(
312                    any(target_os = "linux", target_os = "android"),
313                    target_arch = "x86",
314                ))] {
315                    pc = context.uc_mcontext.gregs[libc::REG_EIP as usize] as usize;
316                    sp = context.uc_mcontext.gregs[libc::REG_ESP as usize] as usize;
317                } else if #[cfg(all(target_os = "freebsd", target_arch = "x86"))] {
318                    pc = context.uc_mcontext.mc_eip as usize;
319                    sp = context.uc_mcontext.mc_esp as usize;
320                } else if #[cfg(all(target_os = "freebsd", target_arch = "x86_64"))] {
321                    pc = context.uc_mcontext.mc_rip as usize;
322                    sp = context.uc_mcontext.mc_rsp as usize;
323                } else if #[cfg(all(target_vendor = "apple", target_arch = "x86_64"))] {
324                    let mcontext = unsafe { &*context.uc_mcontext };
325                    pc = mcontext.__ss.__rip as usize;
326                    sp = mcontext.__ss.__rsp as usize;
327                } else if #[cfg(all(
328                        any(target_os = "linux", target_os = "android"),
329                        target_arch = "aarch64",
330                    ))] {
331                    pc = context.uc_mcontext.pc as usize;
332                    sp = context.uc_mcontext.sp as usize;
333                } else if #[cfg(all(
334                    any(target_os = "linux", target_os = "android"),
335                    target_arch = "arm",
336                ))] {
337                    pc = context.uc_mcontext.arm_pc as usize;
338                    sp = context.uc_mcontext.arm_sp as usize;
339                } else if #[cfg(all(
340                    any(target_os = "linux", target_os = "android"),
341                    any(target_arch = "riscv64", target_arch = "riscv32"),
342                ))] {
343                    pc = context.uc_mcontext.__gregs[libc::REG_PC] as usize;
344                    sp = context.uc_mcontext.__gregs[libc::REG_SP] as usize;
345                } else if #[cfg(all(target_vendor = "apple", target_arch = "aarch64"))] {
346                    let mcontext = unsafe { &*context.uc_mcontext };
347                    pc = mcontext.__ss.__pc as usize;
348                    sp = mcontext.__ss.__sp as usize;
349                } else if #[cfg(all(target_os = "freebsd", target_arch = "aarch64"))] {
350                    pc = context.uc_mcontext.mc_gpregs.gp_elr as usize;
351                    sp = context.uc_mcontext.mc_gpregs.gp_sp as usize;
352                } else if #[cfg(all(target_os = "linux", target_arch = "loongarch64"))] {
353                    pc = context.uc_mcontext.__gregs[1] as usize;
354                    sp = context.uc_mcontext.__gregs[3] as usize;
355                } else if #[cfg(all(target_os = "linux", target_arch = "powerpc64"))] {
356                    pc = (*context.uc_mcontext.regs).nip as usize;
357                    sp = (*context.uc_mcontext.regs).gpr[1] as usize;
358                } else {
359                    compile_error!("Unsupported platform");
360                }
361            };
362            (pc, sp)
363        }
364
365        unsafe fn update_context(context: &mut ucontext_t, regs: TrapHandlerRegs) {
366            cfg_if::cfg_if! {
367                if #[cfg(all(
368                        any(target_os = "linux", target_os = "android"),
369                        target_arch = "x86_64",
370                    ))] {
371                    let TrapHandlerRegs { rip, rsp, rbp, rdi, rsi } = regs;
372                    context.uc_mcontext.gregs[libc::REG_RIP as usize] = rip as i64;
373                    context.uc_mcontext.gregs[libc::REG_RSP as usize] = rsp as i64;
374                    context.uc_mcontext.gregs[libc::REG_RBP as usize] = rbp as i64;
375                    context.uc_mcontext.gregs[libc::REG_RDI as usize] = rdi as i64;
376                    context.uc_mcontext.gregs[libc::REG_RSI as usize] = rsi as i64;
377                } else if #[cfg(all(
378                    any(target_os = "linux", target_os = "android"),
379                    target_arch = "x86",
380                ))] {
381                    let TrapHandlerRegs { eip, esp, ebp, ecx, edx } = regs;
382                    context.uc_mcontext.gregs[libc::REG_EIP as usize] = eip as i32;
383                    context.uc_mcontext.gregs[libc::REG_ESP as usize] = esp as i32;
384                    context.uc_mcontext.gregs[libc::REG_EBP as usize] = ebp as i32;
385                    context.uc_mcontext.gregs[libc::REG_ECX as usize] = ecx as i32;
386                    context.uc_mcontext.gregs[libc::REG_EDX as usize] = edx as i32;
387                } else if #[cfg(all(target_vendor = "apple", target_arch = "x86_64"))] {
388                    let TrapHandlerRegs { rip, rsp, rbp, rdi, rsi } = regs;
389                    let mcontext = unsafe { &mut *context.uc_mcontext };
390                    mcontext.__ss.__rip = rip;
391                    mcontext.__ss.__rsp = rsp;
392                    mcontext.__ss.__rbp = rbp;
393                    mcontext.__ss.__rdi = rdi;
394                    mcontext.__ss.__rsi = rsi;
395                } else if #[cfg(all(target_os = "freebsd", target_arch = "x86"))] {
396                    let TrapHandlerRegs { eip, esp, ebp, ecx, edx } = regs;
397                    context.uc_mcontext.mc_eip = eip as libc::register_t;
398                    context.uc_mcontext.mc_esp = esp as libc::register_t;
399                    context.uc_mcontext.mc_ebp = ebp as libc::register_t;
400                    context.uc_mcontext.mc_ecx = ecx as libc::register_t;
401                    context.uc_mcontext.mc_edx = edx as libc::register_t;
402                } else if #[cfg(all(target_os = "freebsd", target_arch = "x86_64"))] {
403                    let TrapHandlerRegs { rip, rsp, rbp, rdi, rsi } = regs;
404                    context.uc_mcontext.mc_rip = rip as libc::register_t;
405                    context.uc_mcontext.mc_rsp = rsp as libc::register_t;
406                    context.uc_mcontext.mc_rbp = rbp as libc::register_t;
407                    context.uc_mcontext.mc_rdi = rdi as libc::register_t;
408                    context.uc_mcontext.mc_rsi = rsi as libc::register_t;
409                } else if #[cfg(all(
410                        any(target_os = "linux", target_os = "android"),
411                        target_arch = "aarch64",
412                    ))] {
413                    let TrapHandlerRegs { pc, sp, x0, x1, x29, lr } = regs;
414                    context.uc_mcontext.pc = pc;
415                    context.uc_mcontext.sp = sp;
416                    context.uc_mcontext.regs[0] = x0;
417                    context.uc_mcontext.regs[1] = x1;
418                    context.uc_mcontext.regs[29] = x29;
419                    context.uc_mcontext.regs[30] = lr;
420                } else if #[cfg(all(
421                        any(target_os = "linux", target_os = "android"),
422                        target_arch = "arm",
423                    ))] {
424                    let TrapHandlerRegs {
425                        pc,
426                        r0,
427                        r1,
428                        r7,
429                        r11,
430                        r13,
431                        r14,
432                        cpsr_thumb,
433                        cpsr_endian,
434                    } = regs;
435                    context.uc_mcontext.arm_pc = pc;
436                    context.uc_mcontext.arm_r0 = r0;
437                    context.uc_mcontext.arm_r1 = r1;
438                    context.uc_mcontext.arm_r7 = r7;
439                    context.uc_mcontext.arm_fp = r11;
440                    context.uc_mcontext.arm_sp = r13;
441                    context.uc_mcontext.arm_lr = r14;
442                    if cpsr_thumb {
443                        context.uc_mcontext.arm_cpsr |= 0x20;
444                    } else {
445                        context.uc_mcontext.arm_cpsr &= !0x20;
446                    }
447                    if cpsr_endian {
448                        context.uc_mcontext.arm_cpsr |= 0x200;
449                    } else {
450                        context.uc_mcontext.arm_cpsr &= !0x200;
451                    }
452                } else if #[cfg(all(
453                    any(target_os = "linux", target_os = "android"),
454                    any(target_arch = "riscv64", target_arch = "riscv32"),
455                ))] {
456                    let TrapHandlerRegs { pc, ra, sp, a0, a1, s0 } = regs;
457                    context.uc_mcontext.__gregs[libc::REG_PC] = pc as libc::c_ulong;
458                    context.uc_mcontext.__gregs[libc::REG_RA] = ra as libc::c_ulong;
459                    context.uc_mcontext.__gregs[libc::REG_SP] = sp as libc::c_ulong;
460                    context.uc_mcontext.__gregs[libc::REG_A0] = a0 as libc::c_ulong;
461                    context.uc_mcontext.__gregs[libc::REG_A0 + 1] = a1 as libc::c_ulong;
462                    context.uc_mcontext.__gregs[libc::REG_S0] = s0 as libc::c_ulong;
463                } else if #[cfg(all(target_vendor = "apple", target_arch = "aarch64"))] {
464                    let TrapHandlerRegs { pc, sp, x0, x1, x29, lr } = regs;
465                    let mcontext = unsafe { &mut *context.uc_mcontext };
466                    mcontext.__ss.__pc = pc;
467                    mcontext.__ss.__sp = sp;
468                    mcontext.__ss.__x[0] = x0;
469                    mcontext.__ss.__x[1] = x1;
470                    mcontext.__ss.__fp = x29;
471                    mcontext.__ss.__lr = lr;
472                } else if #[cfg(all(target_os = "freebsd", target_arch = "aarch64"))] {
473                    let TrapHandlerRegs { pc, sp, x0, x1, x29, lr } = regs;
474                    context.uc_mcontext.mc_gpregs.gp_elr = pc as libc::register_t;
475                    context.uc_mcontext.mc_gpregs.gp_sp = sp as libc::register_t;
476                    context.uc_mcontext.mc_gpregs.gp_x[0] = x0 as libc::register_t;
477                    context.uc_mcontext.mc_gpregs.gp_x[1] = x1 as libc::register_t;
478                    context.uc_mcontext.mc_gpregs.gp_x[29] = x29 as libc::register_t;
479                    context.uc_mcontext.mc_gpregs.gp_lr = lr as libc::register_t;
480                } else if #[cfg(all(target_os = "linux", target_arch = "loongarch64"))] {
481                    let TrapHandlerRegs { pc, sp, a0, a1, fp, ra } = regs;
482                    context.uc_mcontext.__pc = pc;
483                    context.uc_mcontext.__gregs[1] = ra;
484                    context.uc_mcontext.__gregs[3] = sp;
485                    context.uc_mcontext.__gregs[4] = a0;
486                    context.uc_mcontext.__gregs[5] = a1;
487                    context.uc_mcontext.__gregs[22] = fp;
488                } else if #[cfg(all(target_os = "linux", target_arch = "powerpc64"))] {
489                    let TrapHandlerRegs { pc, sp, r3, r4, r31, lr } = regs;
490                    (*context.uc_mcontext.regs).nip = pc;
491                    (*context.uc_mcontext.regs).gpr[1] = sp;
492                    (*context.uc_mcontext.regs).gpr[3] = r3;
493                    (*context.uc_mcontext.regs).gpr[4] = r4;
494                    (*context.uc_mcontext.regs).gpr[31] = r31;
495                    (*context.uc_mcontext.regs).link = lr;
496                } else {
497                    compile_error!("Unsupported platform");
498                }
499            };
500        }
501    } else if #[cfg(target_os = "windows")] {
502        use windows_sys::Win32::System::Diagnostics::Debug::{
503            AddVectoredExceptionHandler,
504            CONTEXT,
505            EXCEPTION_CONTINUE_EXECUTION,
506            EXCEPTION_CONTINUE_SEARCH,
507            EXCEPTION_POINTERS,
508        };
509        use windows_sys::Win32::Foundation::{
510            EXCEPTION_ACCESS_VIOLATION,
511            EXCEPTION_ILLEGAL_INSTRUCTION,
512            EXCEPTION_INT_DIVIDE_BY_ZERO,
513            EXCEPTION_INT_OVERFLOW,
514            EXCEPTION_STACK_OVERFLOW,
515        };
516
517        unsafe fn platform_init() {
518            unsafe {
519                // our trap handler needs to go first, so that we can recover from
520                // wasm faults and continue execution, so pass `1` as a true value
521                // here.
522                let handler = AddVectoredExceptionHandler(1, Some(exception_handler));
523                if handler.is_null() {
524                    panic!("failed to add exception handler: {}", io::Error::last_os_error());
525                }
526            }
527        }
528
529        unsafe extern "system" fn exception_handler(
530            exception_info: *mut EXCEPTION_POINTERS
531        ) -> i32 {
532            unsafe {
533                // Check the kind of exception, since we only handle a subset within
534                // wasm code. If anything else happens we want to defer to whatever
535                // the rest of the system wants to do for this exception.
536                let record = &*(*exception_info).ExceptionRecord;
537                if record.ExceptionCode != EXCEPTION_ACCESS_VIOLATION &&
538                    record.ExceptionCode != EXCEPTION_ILLEGAL_INSTRUCTION &&
539                    record.ExceptionCode != EXCEPTION_STACK_OVERFLOW &&
540                    record.ExceptionCode != EXCEPTION_INT_DIVIDE_BY_ZERO &&
541                    record.ExceptionCode != EXCEPTION_INT_OVERFLOW
542                {
543                    return EXCEPTION_CONTINUE_SEARCH;
544                }
545
546                // FIXME: this is what the previous C++ did to make sure that TLS
547                // works by the time we execute this trap handling code. This isn't
548                // exactly super easy to call from Rust though and it's not clear we
549                // necessarily need to do so. Leaving this here in case we need this
550                // in the future, but for now we can probably wait until we see a
551                // strange fault before figuring out how to reimplement this in
552                // Rust.
553                //
554                // if (!NtCurrentTeb()->Reserved1[sThreadLocalArrayPointerIndex]) {
555                //     return EXCEPTION_CONTINUE_SEARCH;
556                // }
557
558                let context = &mut *(*exception_info).ContextRecord;
559                let (pc, sp) = get_pc_sp(context);
560
561                // We try to get the fault address associated to this exception.
562                let maybe_fault_address = match record.ExceptionCode {
563                    EXCEPTION_ACCESS_VIOLATION => Some(record.ExceptionInformation[1]),
564                    EXCEPTION_STACK_OVERFLOW => Some(sp),
565                    _ => None,
566                };
567                let trap_code = match record.ExceptionCode {
568                    // check if it was cased by a UD and if the Trap info is a payload to it
569                    EXCEPTION_ILLEGAL_INSTRUCTION => {
570                        process_illegal_op(pc)
571                    }
572                    _ => None,
573                };
574                // This is basically the same as the unix version above, only with a
575                // few parameters tweaked here and there.
576                let handled = TrapHandlerContext::handle_trap(
577                    pc,
578                    sp,
579                    maybe_fault_address,
580                    trap_code,
581                    |regs| update_context(context, regs),
582                    |handler| handler(exception_info),
583                );
584
585                if handled {
586                    EXCEPTION_CONTINUE_EXECUTION
587                } else {
588                    EXCEPTION_CONTINUE_SEARCH
589                }
590            }
591        }
592
593        unsafe fn get_pc_sp(context: &CONTEXT) -> (usize, usize) {
594            let (pc, sp);
595            cfg_if::cfg_if! {
596                if #[cfg(target_arch = "x86_64")] {
597                    pc = context.Rip as usize;
598                    sp = context.Rsp as usize;
599                } else if #[cfg(target_arch = "x86")] {
600                    pc = context.Rip as usize;
601                    sp = context.Rsp as usize;
602                } else {
603                    compile_error!("Unsupported platform");
604                }
605            };
606            (pc, sp)
607        }
608
609        unsafe fn update_context(context: &mut CONTEXT, regs: TrapHandlerRegs) {
610            cfg_if::cfg_if! {
611                if #[cfg(target_arch = "x86_64")] {
612                    let TrapHandlerRegs { rip, rsp, rbp, rdi, rsi } = regs;
613                    context.Rip = rip;
614                    context.Rsp = rsp;
615                    context.Rbp = rbp;
616                    context.Rdi = rdi;
617                    context.Rsi = rsi;
618                } else if #[cfg(target_arch = "x86")] {
619                    let TrapHandlerRegs { eip, esp, ebp, ecx, edx } = regs;
620                    context.Eip = eip;
621                    context.Esp = esp;
622                    context.Ebp = ebp;
623                    context.Ecx = ecx;
624                    context.Edx = edx;
625                } else {
626                    compile_error!("Unsupported platform");
627                }
628            };
629        }
630    }
631}
632
633/// This function is required to be called before any WebAssembly is entered.
634/// This will configure global state such as signal handlers to prepare the
635/// process to receive wasm traps.
636///
637/// This function must not only be called globally once before entering
638/// WebAssembly but it must also be called once-per-thread that enters
639/// WebAssembly. Currently in wasmer's integration this function is called on
640/// creation of a `Store`.
641pub fn init_traps() {
642    static INIT: Once = Once::new();
643    INIT.call_once(|| unsafe {
644        platform_init();
645    });
646}
647
648/// Raises a user-defined trap immediately.
649///
650/// This function performs as-if a wasm trap was just executed, only the trap
651/// has a dynamic payload associated with it which is user-provided. This trap
652/// payload is then returned from `catch_traps` below.
653///
654/// # Safety
655///
656/// Only safe to call when wasm code is on the stack, aka `catch_traps` must
657/// have been previous called and not yet returned.
658/// Additionally no Rust destructors may be on the stack.
659/// They will be skipped and not executed.
660pub unsafe fn raise_user_trap(data: Box<dyn Error + Send + Sync>) -> ! {
661    unsafe { unwind_with(UnwindReason::UserTrap(data)) }
662}
663
664/// Raises a trap from inside library code immediately.
665///
666/// This function performs as-if a wasm trap was just executed. This trap
667/// payload is then returned from `catch_traps` below.
668///
669/// # Safety
670///
671/// Only safe to call when wasm code is on the stack, aka `catch_traps` must
672/// have been previous called and not yet returned.
673/// Additionally no Rust destructors may be on the stack.
674/// They will be skipped and not executed.
675pub unsafe fn raise_lib_trap(trap: Trap) -> ! {
676    unsafe { unwind_with(UnwindReason::LibTrap(trap)) }
677}
678
679/// Carries a Rust panic across wasm code and resumes the panic on the other
680/// side.
681///
682/// # Safety
683///
684/// Only safe to call when wasm code is on the stack, aka `catch_traps` must
685/// have been previously called and not returned. Additionally no Rust destructors may be on the
686/// stack. They will be skipped and not executed.
687pub unsafe fn resume_panic(payload: Box<dyn Any + Send>) -> ! {
688    unsafe { unwind_with(UnwindReason::Panic(payload)) }
689}
690
691/// Call the wasm function pointed to by `callee`.
692///
693/// * `vmctx` - the callee vmctx argument
694/// * `caller_vmctx` - the caller vmctx argument
695/// * `trampoline` - the jit-generated trampoline whose ABI takes 4 values, the
696///   callee vmctx, the caller vmctx, the `callee` argument below, and then the
697///   `values_vec` argument.
698/// * `callee` - the third argument to the `trampoline` function
699/// * `values_vec` - points to a buffer which holds the incoming arguments, and to
700///   which the outgoing return values will be written.
701///
702/// # Safety
703///
704/// Wildly unsafe because it calls raw function pointers and reads/writes raw
705/// function pointers.
706pub unsafe fn wasmer_call_trampoline(
707    trap_handler: Option<*const TrapHandlerFn<'static>>,
708    config: &VMConfig,
709    vmctx: VMFunctionContext,
710    trampoline: VMTrampoline,
711    callee: *const VMFunctionBody,
712    values_vec: *mut u8,
713) -> Result<(), Trap> {
714    unsafe {
715        catch_traps(trap_handler, config, move || {
716            mem::transmute::<
717                unsafe extern "C" fn(
718                    *mut VMContext,
719                    *const VMFunctionBody,
720                    *mut wasmer_types::RawValue,
721                ),
722                extern "C" fn(VMFunctionContext, *const VMFunctionBody, *mut u8),
723            >(trampoline)(vmctx, callee, values_vec);
724        })
725    }
726}
727
728/// Catches any wasm traps that happen within the execution of `closure`,
729/// returning them as a `Result`.
730///
731/// # Safety
732///
733/// Highly unsafe since `closure` won't have any dtors run.
734pub unsafe fn catch_traps<F, R: 'static>(
735    trap_handler: Option<*const TrapHandlerFn<'static>>,
736    config: &VMConfig,
737    closure: F,
738) -> Result<R, Trap>
739where
740    F: FnOnce() -> R + 'static,
741{
742    // Ensure that per-thread initialization is done.
743    lazy_per_thread_init()?;
744    let stack_size = config
745        .wasm_stack_size
746        .unwrap_or_else(|| DEFAULT_STACK_SIZE.load(Ordering::Relaxed));
747    on_wasm_stack(stack_size, trap_handler, closure).map_err(UnwindReason::into_trap)
748}
749
750// We need two separate thread-local variables here:
751// - YIELDER is set within the new stack and is used to unwind back to the root
752//   of the stack from inside it.
753// - TRAP_HANDLER is set from outside the new stack and is solely used from
754//   signal handlers. It must be atomic since it is used by signal handlers.
755//
756// We also do per-thread signal stack initialization on the first time
757// TRAP_HANDLER is accessed.
758thread_local! {
759    static YIELDER: Cell<Option<NonNull<Yielder<(), UnwindReason>>>> = const { Cell::new(None) };
760    static TRAP_HANDLER: AtomicPtr<TrapHandlerContext> = const { AtomicPtr::new(ptr::null_mut()) };
761}
762
763/// Read-only information that is used by signal handlers to handle and recover
764/// from traps.
765#[allow(clippy::type_complexity)]
766struct TrapHandlerContext {
767    inner: *const u8,
768    handle_trap: fn(
769        *const u8,
770        usize,
771        usize,
772        Option<usize>,
773        Option<TrapCode>,
774        &mut dyn FnMut(TrapHandlerRegs),
775    ) -> bool,
776    custom_trap: Option<*const TrapHandlerFn<'static>>,
777}
778struct TrapHandlerContextInner<T> {
779    /// Information about the currently running coroutine. This is used to
780    /// reset execution to the root of the coroutine when a trap is handled.
781    coro_trap_handler: CoroutineTrapHandler<Result<T, UnwindReason>>,
782}
783
784impl TrapHandlerContext {
785    /// Runs the given function with a trap handler context. The previous
786    /// trap handler context is preserved and restored afterwards.
787    fn install<T, R>(
788        custom_trap: Option<*const TrapHandlerFn<'static>>,
789        coro_trap_handler: CoroutineTrapHandler<Result<T, UnwindReason>>,
790        f: impl FnOnce() -> R,
791    ) -> R {
792        // Type-erase the trap handler function so that it can be placed in TLS.
793        fn func<T>(
794            ptr: *const u8,
795            pc: usize,
796            sp: usize,
797            maybe_fault_address: Option<usize>,
798            trap_code: Option<TrapCode>,
799            update_regs: &mut dyn FnMut(TrapHandlerRegs),
800        ) -> bool {
801            unsafe {
802                (*(ptr as *const TrapHandlerContextInner<T>)).handle_trap(
803                    pc,
804                    sp,
805                    maybe_fault_address,
806                    trap_code,
807                    update_regs,
808                )
809            }
810        }
811        let inner = TrapHandlerContextInner { coro_trap_handler };
812        let ctx = Self {
813            inner: &inner as *const _ as *const u8,
814            handle_trap: func::<T>,
815            custom_trap,
816        };
817
818        compiler_fence(Ordering::Release);
819        let prev = TRAP_HANDLER.with(|ptr| {
820            let prev = ptr.load(Ordering::Relaxed);
821            ptr.store(&ctx as *const Self as *mut Self, Ordering::Relaxed);
822            prev
823        });
824
825        defer! {
826            TRAP_HANDLER.with(|ptr| ptr.store(prev, Ordering::Relaxed));
827            compiler_fence(Ordering::Acquire);
828        }
829
830        f()
831    }
832
833    /// Attempts to handle the trap if it's a wasm trap.
834    unsafe fn handle_trap(
835        pc: usize,
836        sp: usize,
837        maybe_fault_address: Option<usize>,
838        trap_code: Option<TrapCode>,
839        mut update_regs: impl FnMut(TrapHandlerRegs),
840        call_handler: impl Fn(&TrapHandlerFn<'static>) -> bool,
841    ) -> bool {
842        unsafe {
843            let ptr = TRAP_HANDLER.with(|ptr| ptr.load(Ordering::Relaxed));
844            if ptr.is_null() {
845                return false;
846            }
847
848            let ctx = &*ptr;
849
850            // Check if this trap is handled by a custom trap handler.
851            if let Some(trap_handler) = ctx.custom_trap
852                && call_handler(&*trap_handler)
853            {
854                return true;
855            }
856
857            (ctx.handle_trap)(
858                ctx.inner,
859                pc,
860                sp,
861                maybe_fault_address,
862                trap_code,
863                &mut update_regs,
864            )
865        }
866    }
867}
868
869impl<T> TrapHandlerContextInner<T> {
870    unsafe fn handle_trap(
871        &self,
872        pc: usize,
873        sp: usize,
874        maybe_fault_address: Option<usize>,
875        trap_code: Option<TrapCode>,
876        update_regs: &mut dyn FnMut(TrapHandlerRegs),
877    ) -> bool {
878        unsafe {
879            // Check if this trap occurred while executing on the Wasm stack. We can
880            // only recover from traps if that is the case.
881            if !self.coro_trap_handler.stack_ptr_in_bounds(sp) {
882                return false;
883            }
884
885            let signal_trap = trap_code.or_else(|| {
886                maybe_fault_address.map(|addr| {
887                    if self.coro_trap_handler.stack_ptr_in_bounds(addr) {
888                        TrapCode::StackOverflow
889                    } else {
890                        TrapCode::HeapAccessOutOfBounds
891                    }
892                })
893            });
894
895            // Don't try to generate a backtrace for stack overflows: unwinding
896            // information is often not precise enough to properly describe what is
897            // happenning during a function prologue, which can lead the unwinder to
898            // read invalid memory addresses.
899            //
900            // See: https://github.com/rust-lang/backtrace-rs/pull/357
901            let backtrace = if signal_trap == Some(TrapCode::StackOverflow) {
902                Backtrace::from(vec![])
903            } else {
904                Backtrace::new_unresolved()
905            };
906
907            // Set up the register state for exception return to force the
908            // coroutine to return to its caller with UnwindReason::WasmTrap.
909            let unwind = UnwindReason::WasmTrap {
910                backtrace,
911                signal_trap,
912                pc,
913            };
914            let regs = self
915                .coro_trap_handler
916                .setup_trap_handler(move || Err(unwind));
917            update_regs(regs);
918            true
919        }
920    }
921}
922
923enum UnwindReason {
924    /// A panic caused by the host
925    Panic(Box<dyn Any + Send>),
926    /// A custom error triggered by the user
927    UserTrap(Box<dyn Error + Send + Sync>),
928    /// A Trap triggered by a wasm libcall
929    LibTrap(Trap),
930    /// A trap caused by the Wasm generated code
931    WasmTrap {
932        backtrace: Backtrace,
933        pc: usize,
934        signal_trap: Option<TrapCode>,
935    },
936}
937
938impl UnwindReason {
939    fn into_trap(self) -> Trap {
940        match self {
941            Self::UserTrap(data) => Trap::User(data),
942            Self::LibTrap(trap) => trap,
943            Self::WasmTrap {
944                backtrace,
945                pc,
946                signal_trap,
947            } => Trap::wasm(pc, backtrace, signal_trap),
948            Self::Panic(panic) => std::panic::resume_unwind(panic),
949        }
950    }
951}
952
953unsafe fn unwind_with(reason: UnwindReason) -> ! {
954    unsafe {
955        let yielder = YIELDER
956            .with(|cell| cell.replace(None))
957            .expect("not running on Wasm stack");
958
959        yielder.as_ref().suspend(reason);
960
961        // on_wasm_stack will forcibly reset the coroutine stack after yielding.
962        unreachable!();
963    }
964}
965
966/// Runs the given function on a separate stack so that its stack usage can be
967/// bounded. Stack overflows and other traps can be caught and execution
968/// returned to the root of the stack.
969fn on_wasm_stack<F: FnOnce() -> T + 'static, T: 'static>(
970    stack_size: usize,
971    trap_handler: Option<*const TrapHandlerFn<'static>>,
972    f: F,
973) -> Result<T, UnwindReason> {
974    // Allocating a new stack is pretty expensive since it involves several
975    // system calls. We therefore keep a cache of pre-allocated stacks which
976    // allows them to be reused multiple times.
977    // FIXME(Amanieu): We should refactor this to avoid the lock.
978    static STACK_POOL: LazyLock<crossbeam_queue::SegQueue<DefaultStack>> =
979        LazyLock::new(crossbeam_queue::SegQueue::new);
980
981    let stack = STACK_POOL
982        .pop()
983        .unwrap_or_else(|| DefaultStack::new(stack_size).unwrap());
984    let mut stack = scopeguard::guard(stack, |stack| STACK_POOL.push(stack));
985
986    // Create a coroutine with a new stack to run the function on.
987    let coro = ScopedCoroutine::with_stack(&mut *stack, move |yielder, ()| {
988        // Save the yielder to TLS so that it can be used later.
989        YIELDER.with(|cell| cell.set(Some(yielder.into())));
990
991        Ok(f())
992    });
993
994    // Ensure that YIELDER is reset on exit even if the coroutine panics,
995    defer! {
996        YIELDER.with(|cell| cell.set(None));
997    }
998
999    coro.scope(|mut coro_ref| {
1000        // Set up metadata for the trap handler for the duration of the coroutine
1001        // execution. This is restored to its previous value afterwards.
1002        TrapHandlerContext::install(trap_handler, coro_ref.trap_handler(), || {
1003            match coro_ref.resume(()) {
1004                CoroutineResult::Yield(trap) => {
1005                    // This came from unwind_with which requires that there be only
1006                    // Wasm code on the stack.
1007                    unsafe {
1008                        coro_ref.force_reset();
1009                    }
1010                    Err(trap)
1011                }
1012                CoroutineResult::Return(result) => result,
1013            }
1014        })
1015    })
1016}
1017
1018/// When executing on the Wasm stack, temporarily switch back to the host stack
1019/// to perform an operation that should not be constrainted by the Wasm stack
1020/// limits.
1021///
1022/// This is particularly important since the usage of the Wasm stack is under
1023/// the control of untrusted code. Malicious code could artificially induce a
1024/// stack overflow in the middle of a sensitive host operations (e.g. growing
1025/// a memory) which would be hard to recover from.
1026pub fn on_host_stack<F: FnOnce() -> T, T>(f: F) -> T {
1027    // Reset YIEDER to None for the duration of this call to indicate that we
1028    // are no longer on the Wasm stack.
1029    let yielder_ptr = YIELDER.with(|cell| cell.replace(None));
1030
1031    // If we are already on the host stack, execute the function directly. This
1032    // happens if a host function is called directly from the API.
1033    let yielder = match yielder_ptr {
1034        Some(ptr) => unsafe { ptr.as_ref() },
1035        None => return f(),
1036    };
1037
1038    // Restore YIELDER upon exiting normally or unwinding.
1039    defer! {
1040        YIELDER.with(|cell| cell.set(yielder_ptr));
1041    }
1042
1043    // on_parent_stack requires the closure to be Send so that the Yielder
1044    // cannot be called from the parent stack. This is not a problem for us
1045    // since we don't expose the Yielder.
1046    struct SendWrapper<T>(T);
1047    unsafe impl<T> Send for SendWrapper<T> {}
1048    let wrapped = SendWrapper(f);
1049    yielder.on_parent_stack(move || {
1050        let wrapped = wrapped;
1051        (wrapped.0)()
1052    })
1053}
1054
1055#[cfg(windows)]
1056pub fn lazy_per_thread_init() -> Result<(), Trap> {
1057    // We need additional space on the stack to handle stack overflow
1058    // exceptions. Rust's initialization code sets this to 0x5000 but this
1059    // seems to be insufficient in practice.
1060    use windows_sys::Win32::System::Threading::SetThreadStackGuarantee;
1061    if unsafe { SetThreadStackGuarantee(&mut 0x10000) } == 0 {
1062        panic!("failed to set thread stack guarantee");
1063    }
1064
1065    Ok(())
1066}
1067
1068/// A module for registering a custom alternate signal stack (sigaltstack).
1069///
1070/// Rust's libstd installs an alternate stack with size `SIGSTKSZ`, which is not
1071/// always large enough for our signal handling code. Override it by creating
1072/// and registering our own alternate stack that is large enough and has a guard
1073/// page.
1074#[cfg(unix)]
1075pub fn lazy_per_thread_init() -> Result<(), Trap> {
1076    use std::ptr::null_mut;
1077
1078    thread_local! {
1079        /// Thread-local state is lazy-initialized on the first time it's used,
1080        /// and dropped when the thread exits.
1081        static TLS: Tls = unsafe { init_sigstack() };
1082    }
1083
1084    /// The size of the sigaltstack (not including the guard, which will be
1085    /// added). Make this large enough to run our signal handlers.
1086    const MIN_STACK_SIZE: usize = 16 * 4096;
1087
1088    enum Tls {
1089        OutOfMemory,
1090        Allocated {
1091            mmap_ptr: *mut libc::c_void,
1092            mmap_size: usize,
1093        },
1094        BigEnough,
1095    }
1096
1097    unsafe fn init_sigstack() -> Tls {
1098        unsafe {
1099            // Check to see if the existing sigaltstack, if it exists, is big
1100            // enough. If so we don't need to allocate our own.
1101            let mut old_stack = mem::zeroed();
1102            let r = libc::sigaltstack(ptr::null(), &mut old_stack);
1103            assert_eq!(r, 0, "learning about sigaltstack failed");
1104            if old_stack.ss_flags & libc::SS_DISABLE == 0 && old_stack.ss_size >= MIN_STACK_SIZE {
1105                return Tls::BigEnough;
1106            }
1107
1108            // ... but failing that we need to allocate our own, so do all that
1109            // here.
1110            let page_size: usize = region::page::size();
1111            let guard_size = page_size;
1112            let alloc_size = guard_size + MIN_STACK_SIZE;
1113
1114            let ptr = libc::mmap(
1115                null_mut(),
1116                alloc_size,
1117                libc::PROT_NONE,
1118                libc::MAP_PRIVATE | libc::MAP_ANON,
1119                -1,
1120                0,
1121            );
1122            if ptr == libc::MAP_FAILED {
1123                return Tls::OutOfMemory;
1124            }
1125
1126            // Prepare the stack with readable/writable memory and then register it
1127            // with `sigaltstack`.
1128            let stack_ptr = (ptr as usize + guard_size) as *mut libc::c_void;
1129            let r = libc::mprotect(
1130                stack_ptr,
1131                MIN_STACK_SIZE,
1132                libc::PROT_READ | libc::PROT_WRITE,
1133            );
1134            assert_eq!(r, 0, "mprotect to configure memory for sigaltstack failed");
1135            let new_stack = libc::stack_t {
1136                ss_sp: stack_ptr,
1137                ss_flags: 0,
1138                ss_size: MIN_STACK_SIZE,
1139            };
1140            let r = libc::sigaltstack(&new_stack, ptr::null_mut());
1141            assert_eq!(r, 0, "registering new sigaltstack failed");
1142
1143            Tls::Allocated {
1144                mmap_ptr: ptr,
1145                mmap_size: alloc_size,
1146            }
1147        }
1148    }
1149
1150    // Ensure TLS runs its initializer and return an error if it failed to
1151    // set up a separate stack for signal handlers.
1152    return TLS.with(|tls| {
1153        if let Tls::OutOfMemory = tls {
1154            Err(Trap::oom())
1155        } else {
1156            Ok(())
1157        }
1158    });
1159
1160    impl Drop for Tls {
1161        fn drop(&mut self) {
1162            let (ptr, size) = match self {
1163                Self::Allocated {
1164                    mmap_ptr,
1165                    mmap_size,
1166                } => (*mmap_ptr, *mmap_size),
1167                _ => return,
1168            };
1169            unsafe {
1170                // Deallocate the stack memory.
1171                let r = libc::munmap(ptr, size);
1172                debug_assert_eq!(r, 0, "munmap failed during thread shutdown");
1173            }
1174        }
1175    }
1176}