| // Copyright 2023 the V8 project authors. All rights reserved. |
| // Use of this source code is governed by a BSD-style license that can be |
| // found in the LICENSE file. |
| |
| namespace runtime { |
| extern runtime TierUpWasmToJSWrapper(NoContext, WasmImportData): JSAny; |
| extern runtime WasmThrowJSTypeError(Context): never; |
| } // namespace runtime |
| |
| namespace wasm { |
| @export |
| struct WasmToJSResult { |
| popCount: intptr; |
| result0: intptr; |
| result1: intptr; |
| result2: float64; |
| result3: float64; |
| } |
| |
| extern builtin IterableToFixedArrayForWasm(Context, JSAny, Smi): FixedArray; |
| |
| extern macro StackAlignmentInBytes(): intptr; |
| |
| const kSignatureOffset: constexpr intptr |
| generates 'WasmToJSWrapperConstants::kSignatureOffset'; |
| |
| // macro for handling platform specific f32 returns. |
| macro HandleF32Returns( |
| context: NativeContext, locationAllocator: LocationAllocator, |
| toRef: &intptr, retVal: JSAny): void { |
| if constexpr (kIsFpAlwaysDouble) { |
| if (locationAllocator.GetRemainingFPRegs() >= 0) { |
| *RefCast<float64>(toRef) = ChangeTaggedToFloat64(retVal); |
| } else { |
| *RefCast<float32>(toRef) = WasmTaggedToFloat32(retVal); |
| } |
| } else if constexpr (kIsBigEndian) { |
| *toRef = Convert<intptr>(Bitcast<uint32>(WasmTaggedToFloat32(retVal))) |
| << 32; |
| } else if constexpr (kIsBigEndianOnSim) { |
| if (locationAllocator.GetRemainingFPRegs() >= 0) { |
| *toRef = Convert<intptr>(Bitcast<uint32>(WasmTaggedToFloat32(retVal))) |
| << 32; |
| } else { |
| *toRef = Convert<intptr>(Bitcast<uint32>(WasmTaggedToFloat32(retVal))); |
| } |
| } |
| } |
| |
| @export |
| transitioning macro WasmToJSWrapper(data: WasmImportData): WasmToJSResult { |
| const oldSP = SwitchToTheCentralStackIfNeeded(); |
| dcheck(Is<WasmImportData>(data)); |
| // Spill the signature on the stack so that it can be read by the GC. This is |
| // done in the very beginning before a GC could be triggered. |
| // Caller FP + return address. |
| const sigSlot = LoadFramePointer() + kSignatureOffset; |
| *GetRefAt<RawPtr>(sigSlot, 0) = data.sig; |
| const alignment: intptr = |
| StackAlignmentInBytes() / torque_internal::SizeOf<intptr>(); |
| // 1 fixed slot, rounded up to stack alignment. |
| const numFixedSlots = alignment; |
| |
| ModifyThreadInWasmFlag(0); |
| // Trigger a wrapper tier-up when this function got called often enough. |
| dcheck(data.wrapper_budget > 0); |
| data.wrapper_budget = data.wrapper_budget - 1; |
| if (data.wrapper_budget == 0) { |
| runtime::TierUpWasmToJSWrapper(kNoContext, data); |
| } |
| |
| const returnCount = *torque_internal::unsafe::NewOffHeapReference( |
| %RawDownCast<RawPtr<intptr>>(data.sig + 0)); |
| const paramCount = *torque_internal::unsafe::NewOffHeapReference( |
| %RawDownCast<RawPtr<intptr>>( |
| data.sig + torque_internal::SizeOf<intptr>())); |
| const valueTypesStorage = *torque_internal::unsafe::NewOffHeapReference( |
| %RawDownCast<RawPtr<RawPtr<int32>>>( |
| data.sig + 2 * torque_internal::SizeOf<intptr>())); |
| const signatureValueTypes = |
| torque_internal::unsafe::NewOffHeapConstSlice<int32>( |
| valueTypesStorage, paramCount + returnCount); |
| const returnTypes = |
| Subslice(signatureValueTypes, 0, returnCount) otherwise unreachable; |
| const paramTypes = Subslice(signatureValueTypes, returnCount, paramCount) |
| otherwise unreachable; |
| |
| // The number of parameters that get pushed on the stack is (at least) the |
| // number of incoming parameters plus the receiver. |
| const numStackParams = paramCount + 1; |
| const outParams = WasmAllocateZeroedFixedArray(numStackParams); |
| let nextIndex: intptr = 0; |
| // Set the receiver to `Undefined` as the default. If the receiver would be |
| // different, e.g. the global proxy for sloppy functions, then the CallVarargs |
| // builtin takes care of it automatically |
| outParams.objects[nextIndex++] = Undefined; |
| |
| // Caller FP + return address + fixed slots. |
| const stackParamStart = LoadFramePointer() + |
| (2 + numFixedSlots) * torque_internal::SizeOf<intptr>(); |
| const inParams = torque_internal::unsafe::NewOffHeapReference( |
| %RawDownCast<RawPtr<intptr>>(stackParamStart)); |
| |
| let locationAllocator = LocationAllocatorForParams(inParams, 0); |
| |
| let paramIt = paramTypes.Iterator(); |
| |
| let hasTaggedParams: bool = false; |
| while (!paramIt.Empty()) { |
| const paramType = paramIt.NextNotEmpty(); |
| if (paramType == kWasmI32Type) { |
| const slot = locationAllocator.GetGPSlot(); |
| let val: int32; |
| if constexpr (kIsBigEndian) { |
| val = TruncateInt64ToInt32(*RefCast<int64>(slot)); |
| } else { |
| val = *RefCast<int32>(slot); |
| } |
| outParams.objects[nextIndex++] = Convert<Number>(val); |
| } else if (paramType == kWasmF32Type) { |
| const slot = locationAllocator.GetFP32Slot(); |
| let val: float32; |
| if constexpr (kIsFpAlwaysDouble) { |
| if (locationAllocator.GetRemainingFPRegs() >= 0) { |
| val = TruncateFloat64ToFloat32(*RefCast<float64>(slot)); |
| } else { |
| val = *RefCast<float32>(slot); |
| } |
| } else if constexpr (kIsBigEndianOnSim) { |
| if (locationAllocator.GetRemainingFPRegs() >= 0) { |
| val = BitcastInt32ToFloat32( |
| TruncateInt64ToInt32(*RefCast<int64>(slot) >> 32)); |
| } else { |
| val = *RefCast<float32>(slot); |
| } |
| } else { |
| val = *RefCast<float32>(slot); |
| } |
| outParams.objects[nextIndex++] = Convert<Number>(val); |
| } else if (paramType == kWasmI64Type) { |
| if constexpr (Is64()) { |
| const slot = locationAllocator.GetGPSlot(); |
| const val = *slot; |
| outParams.objects[nextIndex++] = I64ToBigInt(val); |
| } else { |
| const lowWordSlot = locationAllocator.GetGPSlot(); |
| const highWordSlot = locationAllocator.GetGPSlot(); |
| const lowWord = *lowWordSlot; |
| const highWord = *highWordSlot; |
| outParams.objects[nextIndex++] = I32PairToBigInt(lowWord, highWord); |
| } |
| } else if (paramType == kWasmF64Type) { |
| const slot = locationAllocator.GetFP64Slot(); |
| const val = *RefCast<float64>(slot); |
| outParams.objects[nextIndex++] = Convert<Number>(val); |
| } else { |
| const paramKind = paramType & kValueTypeKindBitsMask; |
| check(paramKind == ValueKind::kRef || paramKind == ValueKind::kRefNull); |
| nextIndex++; |
| hasTaggedParams = true; |
| } |
| } |
| |
| // Second loop for tagged parameters. |
| if (hasTaggedParams) { |
| locationAllocator.StartRefs(); |
| nextIndex = 1; |
| paramIt = paramTypes.Iterator(); |
| while (!paramIt.Empty()) { |
| const paramType = paramIt.NextNotEmpty(); |
| const paramKind = paramType & kValueTypeKindBitsMask; |
| if (paramKind == ValueKind::kRef || paramKind == ValueKind::kRefNull) { |
| const slot = locationAllocator.GetGPSlot(); |
| const rawRef = *slot; |
| const value = BitcastWordToTagged(rawRef); |
| outParams.objects[nextIndex] = |
| WasmToJSObject(data.native_context, value, paramType); |
| } |
| nextIndex++; |
| } |
| } |
| const target = data.callable; |
| |
| const context = data.native_context; |
| // Reset the signature on the stack, so that incoming parameters don't get |
| // scanned anymore. |
| *GetRefAt<intptr>(sigSlot, 0) = 0; |
| |
| const result = CallVarargs( |
| context, target, 0, Convert<int32>(numStackParams), outParams); |
| |
| // Put a marker on the stack to indicate to the frame iterator that the call |
| // to JavaScript is finished. For asm.js source positions it is important to |
| // know if an exception happened in the call to JS, or in the ToNumber |
| // conversion afterwards. |
| *GetRefAt<intptr>(sigSlot, 0) = -1; |
| let resultFixedArray: FixedArray; |
| if (returnCount > 1) { |
| resultFixedArray = |
| IterableToFixedArrayForWasm(context, result, Convert<Smi>(returnCount)); |
| } else { |
| resultFixedArray = kEmptyFixedArray; |
| } |
| |
| const gpRegSlots = %RawDownCast<RawPtr<intptr>>(StackSlotPtr( |
| 2 * torque_internal::SizeOf<intptr>(), |
| torque_internal::SizeOf<intptr>())); |
| const fpRegSlots = %RawDownCast<RawPtr<float64>>(StackSlotPtr( |
| 2 * torque_internal::SizeOf<float64>(), |
| torque_internal::SizeOf<float64>())); |
| // The return area on the stack starts right after the stack area. |
| const stackSlots = |
| locationAllocator.GetAlignedStackEnd(StackAlignmentInBytes()); |
| locationAllocator = |
| LocationAllocatorForReturns(gpRegSlots, fpRegSlots, stackSlots); |
| |
| let returnIt = returnTypes.Iterator(); |
| nextIndex = 0; |
| let hasTagged: bool = false; |
| while (!returnIt.Empty()) { |
| let retVal: JSAny; |
| if (returnCount == 1) { |
| retVal = result; |
| } else { |
| retVal = UnsafeCast<JSAny>(resultFixedArray.objects[nextIndex]); |
| } |
| const retType = returnIt.NextNotEmpty(); |
| if (retType == kWasmI32Type) { |
| let toRef = locationAllocator.GetGPSlot(); |
| typeswitch (retVal) { |
| case (smiVal: Smi): { |
| *toRef = Convert<intptr>(Unsigned(SmiToInt32(smiVal))); |
| } |
| case (heapVal: JSAnyNotSmi): { |
| *toRef = Convert<intptr>(Unsigned(WasmTaggedNonSmiToInt32(heapVal))); |
| } |
| } |
| } else if (retType == kWasmF32Type) { |
| let toRef = locationAllocator.GetFP32Slot(); |
| if constexpr (kIsFpAlwaysDouble || kIsBigEndian || kIsBigEndianOnSim) { |
| HandleF32Returns(context, locationAllocator, toRef, retVal); |
| } else { |
| *toRef = Convert<intptr>(Bitcast<uint32>(WasmTaggedToFloat32(retVal))); |
| } |
| } else if (retType == kWasmF64Type) { |
| let toRef = locationAllocator.GetFP64Slot(); |
| *RefCast<float64>(toRef) = ChangeTaggedToFloat64(retVal); |
| } else if (retType == kWasmI64Type) { |
| if constexpr (Is64()) { |
| let toRef = locationAllocator.GetGPSlot(); |
| const v = TruncateBigIntToI64(context, retVal); |
| *toRef = v; |
| } else { |
| let toLowRef = locationAllocator.GetGPSlot(); |
| let toHighRef = locationAllocator.GetGPSlot(); |
| const bigIntVal = ToBigInt(context, retVal); |
| const pair = BigIntToRawBytes(bigIntVal); |
| *toLowRef = Signed(pair.low); |
| *toHighRef = Signed(pair.high); |
| } |
| } else { |
| const retKind = retType & kValueTypeKindBitsMask; |
| check(retKind == ValueKind::kRef || retKind == ValueKind::kRefNull); |
| const trustedData = TaggedIsSmi(data.instance_data) ? |
| Undefined : |
| UnsafeCast<WasmTrustedInstanceData>(data.instance_data); |
| const converted = JSToWasmObject(context, trustedData, retType, retVal); |
| if (returnCount == 1) { |
| // There are no other values, we can write the object directly into the |
| // result buffer. |
| let toRef = locationAllocator.GetGPSlot(); |
| *toRef = BitcastTaggedToWord(converted); |
| } else { |
| // Storing the converted value back in the FixedArray serves two |
| // purposes: |
| // (1) There may be other parameters that could still trigger a GC when |
| // they get transformed. |
| // (2) Tagged values are reordered to the end, so we can't assign their |
| // locations yet. |
| hasTagged = true; |
| resultFixedArray.objects[nextIndex] = converted; |
| } |
| } |
| nextIndex++; |
| } |
| if (hasTagged) { |
| locationAllocator.StartRefs(); |
| returnIt = returnTypes.Iterator(); |
| nextIndex = 0; |
| while (!returnIt.Empty()) { |
| const retType = returnIt.NextNotEmpty(); |
| const retKind = retType & kValueTypeKindBitsMask; |
| if (retKind == ValueKind::kRef || retKind == ValueKind::kRefNull) { |
| let toRef = locationAllocator.GetGPSlot(); |
| const value = resultFixedArray.objects[nextIndex]; |
| *toRef = BitcastTaggedToWord(value); |
| } |
| nextIndex++; |
| } |
| } |
| |
| const popCount = |
| (Convert<intptr>(stackSlots) - Convert<intptr>(stackParamStart)) / |
| torque_internal::SizeOf<intptr>() + |
| numFixedSlots; |
| |
| ModifyThreadInWasmFlag(1); |
| const wasmToJSResult = WasmToJSResult{ |
| popCount: popCount, |
| result0: *GetRefAt<intptr>(gpRegSlots, 0), |
| result1: *GetRefAt<intptr>(gpRegSlots, torque_internal::SizeOf<intptr>()), |
| result2: *GetRefAt<float64>(fpRegSlots, 0), |
| result3: *GetRefAt<float64>(fpRegSlots, torque_internal::SizeOf<float64>()) |
| }; |
| SwitchFromTheCentralStack(oldSP); |
| return wasmToJSResult; |
| } |
| } // namespace wasm |