Skip to content

Commit

Permalink
v8: update to v10.7.193.13. (#310)
Browse files Browse the repository at this point in the history
Signed-off-by: Dhi Aurrahman <dio@rockybars.com>
  • Loading branch information
dio authored Oct 9, 2022
1 parent 25d6a99 commit b0a0594
Show file tree
Hide file tree
Showing 4 changed files with 10 additions and 294 deletions.
263 changes: 0 additions & 263 deletions bazel/external/v8.patch
Original file line number Diff line number Diff line change
@@ -1,7 +1,5 @@
# 1. Disable pointer compression (limits the maximum number of WasmVMs).
# 2. Don't expose Wasm C API (only Wasm C++ API).
# 3. Fix cross-compilation (https://crrev.com/c/3735165).
# 4. Fix build errors in SIMD IndexOf/includes (https://crrev.com/c/3749192).

diff --git a/BUILD.bazel b/BUILD.bazel
index 5fb10d3940..a19930d36e 100644
Expand Down Expand Up @@ -35,264 +33,3 @@ index ce3f569fd5..dc8a4c4f6a 100644
} // extern "C"
+
+#endif
diff --git a/src/execution/clobber-registers.cc b/src/execution/clobber-registers.cc
index 8f7fba765f..a7f5bf80cf 100644
--- a/src/execution/clobber-registers.cc
+++ b/src/execution/clobber-registers.cc
@@ -5,19 +5,22 @@

#include "src/base/build_config.h"

-#if V8_HOST_ARCH_ARM
+// Check both {HOST_ARCH} and {TARGET_ARCH} to disable the functionality of this
+// file for cross-compilation. The reason is that the inline assembly code below
+// does not work for cross-compilation.
+#if V8_HOST_ARCH_ARM && V8_TARGET_ARCH_ARM
#include "src/codegen/arm/register-arm.h"
-#elif V8_HOST_ARCH_ARM64
+#elif V8_HOST_ARCH_ARM64 && V8_TARGET_ARCH_ARM64
#include "src/codegen/arm64/register-arm64.h"
-#elif V8_HOST_ARCH_IA32
+#elif V8_HOST_ARCH_IA32 && V8_TARGET_ARCH_IA32
#include "src/codegen/ia32/register-ia32.h"
-#elif V8_HOST_ARCH_X64
+#elif V8_HOST_ARCH_X64 && V8_TARGET_ARCH_X64
#include "src/codegen/x64/register-x64.h"
-#elif V8_HOST_ARCH_LOONG64
+#elif V8_HOST_ARCH_LOONG64 && V8_TARGET_ARCH_LOONG64
#include "src/codegen/loong64/register-loong64.h"
-#elif V8_HOST_ARCH_MIPS
+#elif V8_HOST_ARCH_MIPS && V8_TARGET_ARCH_MIPS
#include "src/codegen/mips/register-mips.h"
-#elif V8_HOST_ARCH_MIPS64
+#elif V8_HOST_ARCH_MIPS64 && V8_TARGET_ARCH_MIPS64
#include "src/codegen/mips64/register-mips64.h"
#endif

@@ -26,14 +29,15 @@ namespace internal {

#if V8_CC_MSVC
// msvc only support inline assembly on x86
-#if V8_HOST_ARCH_IA32
+#if V8_HOST_ARCH_IA32 && V8_TARGET_ARCH_IA32
#define CLOBBER_REGISTER(R) __asm xorps R, R

#endif

#else // !V8_CC_MSVC

-#if V8_HOST_ARCH_X64 || V8_HOST_ARCH_IA32
+#if (V8_HOST_ARCH_X64 && V8_TARGET_ARCH_X64) || \
+ (V8_HOST_ARCH_IA32 && V8_TARGET_ARCH_IA32)
#define CLOBBER_REGISTER(R) \
__asm__ volatile( \
"xorps " \
@@ -42,20 +46,19 @@ namespace internal {
"%%" #R :: \
:);

-#elif V8_HOST_ARCH_ARM64
+#elif V8_HOST_ARCH_ARM64 && V8_TARGET_ARCH_ARM64
#define CLOBBER_REGISTER(R) __asm__ volatile("fmov " #R ",xzr" :::);

-#elif V8_HOST_ARCH_LOONG64
+#elif V8_HOST_ARCH_LOONG64 && V8_TARGET_ARCH_LOONG64
#define CLOBBER_REGISTER(R) __asm__ volatile("movgr2fr.d $" #R ",$zero" :::);

-#elif V8_HOST_ARCH_MIPS
+#elif V8_HOST_ARCH_MIPS && V8_TARGET_ARCH_MIPS
#define CLOBBER_USE_REGISTER(R) __asm__ volatile("mtc1 $zero,$" #R :::);

-#elif V8_HOST_ARCH_MIPS64
+#elif V8_HOST_ARCH_MIPS64 && V8_TARGET_ARCH_MIPS64
#define CLOBBER_USE_REGISTER(R) __asm__ volatile("dmtc1 $zero,$" #R :::);

-#endif // V8_HOST_ARCH_X64 || V8_HOST_ARCH_IA32 || V8_HOST_ARCH_ARM64 ||
- // V8_HOST_ARCH_LOONG64 || V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
+#endif // V8_HOST_ARCH_XXX && V8_TARGET_ARCH_XXX

#endif // V8_CC_MSVC

diff --git a/src/objects/simd.cc b/src/objects/simd.cc
index 0a73b9c686..be6b72d157 100644
--- a/src/objects/simd.cc
+++ b/src/objects/simd.cc
@@ -354,8 +354,13 @@ Address ArrayIndexOfIncludes(Address array_start, uintptr_t array_len,
if (reinterpret_cast<uintptr_t>(array) % sizeof(double) != 0) {
// Slow scalar search for unaligned double array.
for (; from_index < array_len; from_index++) {
- if (fixed_array.get_representation(static_cast<int>(from_index)) ==
- *reinterpret_cast<uint64_t*>(&search_num)) {
+ if (fixed_array.is_the_hole(static_cast<int>(from_index))) {
+ // |search_num| cannot be NaN, so there is no need to check against
+ // holes.
+ continue;
+ }
+ if (fixed_array.get_scalar(static_cast<int>(from_index)) ==
+ search_num) {
return from_index;
}
}
diff --git a/src/objects/simd.cc b/src/objects/simd.cc
index d3cedfe330..0a73b9c686 100644
--- a/src/objects/simd.cc
+++ b/src/objects/simd.cc
@@ -95,24 +95,21 @@ inline int extract_first_nonzero_index(T v) {
}

template <>
-inline int extract_first_nonzero_index(int32x4_t v) {
- int32x4_t mask = {4, 3, 2, 1};
+inline int extract_first_nonzero_index(uint32x4_t v) {
+ uint32x4_t mask = {4, 3, 2, 1};
mask = vandq_u32(mask, v);
return 4 - vmaxvq_u32(mask);
}

template <>
-inline int extract_first_nonzero_index(int64x2_t v) {
- int32x4_t mask = {2, 0, 1, 0}; // Could also be {2,2,1,1} or {0,2,0,1}
- mask = vandq_u32(mask, vreinterpretq_s32_s64(v));
+inline int extract_first_nonzero_index(uint64x2_t v) {
+ uint32x4_t mask = {2, 0, 1, 0}; // Could also be {2,2,1,1} or {0,2,0,1}
+ mask = vandq_u32(mask, vreinterpretq_u32_u64(v));
return 2 - vmaxvq_u32(mask);
}

-template <>
-inline int extract_first_nonzero_index(float64x2_t v) {
- int32x4_t mask = {2, 0, 1, 0}; // Could also be {2,2,1,1} or {0,2,0,1}
- mask = vandq_u32(mask, vreinterpretq_s32_f64(v));
- return 2 - vmaxvq_u32(mask);
+inline int32_t reinterpret_vmaxvq_u64(uint64x2_t v) {
+ return vmaxvq_u32(vreinterpretq_u32_u64(v));
}
#endif

@@ -204,14 +201,14 @@ inline uintptr_t fast_search_noavx(T* array, uintptr_t array_len,
}
#elif defined(NEON64)
if constexpr (std::is_same<T, uint32_t>::value) {
- VECTORIZED_LOOP_Neon(int32x4_t, int32x4_t, vdupq_n_u32, vceqq_u32,
+ VECTORIZED_LOOP_Neon(uint32x4_t, uint32x4_t, vdupq_n_u32, vceqq_u32,
vmaxvq_u32)
} else if constexpr (std::is_same<T, uint64_t>::value) {
- VECTORIZED_LOOP_Neon(int64x2_t, int64x2_t, vdupq_n_u64, vceqq_u64,
- vmaxvq_u32)
+ VECTORIZED_LOOP_Neon(uint64x2_t, uint64x2_t, vdupq_n_u64, vceqq_u64,
+ reinterpret_vmaxvq_u64)
} else if constexpr (std::is_same<T, double>::value) {
- VECTORIZED_LOOP_Neon(float64x2_t, float64x2_t, vdupq_n_f64, vceqq_f64,
- vmaxvq_f64)
+ VECTORIZED_LOOP_Neon(float64x2_t, uint64x2_t, vdupq_n_f64, vceqq_f64,
+ reinterpret_vmaxvq_u64)
}
#else
UNREACHABLE();
diff --git a/src/objects/simd.cc b/src/objects/simd.cc
index be6b72d157..a71968fd10 100644
--- a/src/objects/simd.cc
+++ b/src/objects/simd.cc
@@ -148,9 +148,14 @@ inline int32_t reinterpret_vmaxvq_u64(uint64x2_t v) {
template <typename T>
inline uintptr_t fast_search_noavx(T* array, uintptr_t array_len,
uintptr_t index, T search_element) {
- static_assert(std::is_same<T, uint32_t>::value ||
- std::is_same<T, uint64_t>::value ||
- std::is_same<T, double>::value);
+ static constexpr bool is_uint32 =
+ sizeof(T) == sizeof(uint32_t) && std::is_integral<T>::value;
+ static constexpr bool is_uint64 =
+ sizeof(T) == sizeof(uint64_t) && std::is_integral<T>::value;
+ static constexpr bool is_double =
+ sizeof(T) == sizeof(double) && std::is_floating_point<T>::value;
+
+ static_assert(is_uint32 || is_uint64 || is_double);

#if !(defined(__SSE3__) || defined(NEON64))
// No SIMD available.
@@ -178,14 +183,14 @@ inline uintptr_t fast_search_noavx(T* array, uintptr_t array_len,

// Inserting one of the vectorized loop
#ifdef __SSE3__
- if constexpr (std::is_same<T, uint32_t>::value) {
+ if constexpr (is_uint32) {
#define MOVEMASK(x) _mm_movemask_ps(_mm_castsi128_ps(x))
#define EXTRACT(x) base::bits::CountTrailingZeros32(x)
VECTORIZED_LOOP_x86(__m128i, __m128i, _mm_set1_epi32, _mm_cmpeq_epi32,
MOVEMASK, EXTRACT)
#undef MOVEMASK
#undef EXTRACT
- } else if constexpr (std::is_same<T, uint64_t>::value) {
+ } else if constexpr (is_uint64) {
#define SET1(x) _mm_castsi128_ps(_mm_set1_epi64x(x))
#define CMP(a, b) _mm_cmpeq_pd(_mm_castps_pd(a), _mm_castps_pd(b))
#define EXTRACT(x) base::bits::CountTrailingZeros32(x)
@@ -193,20 +198,20 @@ inline uintptr_t fast_search_noavx(T* array, uintptr_t array_len,
#undef SET1
#undef CMP
#undef EXTRACT
- } else if constexpr (std::is_same<T, double>::value) {
+ } else if constexpr (is_double) {
#define EXTRACT(x) base::bits::CountTrailingZeros32(x)
VECTORIZED_LOOP_x86(__m128d, __m128d, _mm_set1_pd, _mm_cmpeq_pd,
_mm_movemask_pd, EXTRACT)
#undef EXTRACT
}
#elif defined(NEON64)
- if constexpr (std::is_same<T, uint32_t>::value) {
+ if constexpr (is_uint32) {
VECTORIZED_LOOP_Neon(uint32x4_t, uint32x4_t, vdupq_n_u32, vceqq_u32,
vmaxvq_u32)
- } else if constexpr (std::is_same<T, uint64_t>::value) {
+ } else if constexpr (is_uint64) {
VECTORIZED_LOOP_Neon(uint64x2_t, uint64x2_t, vdupq_n_u64, vceqq_u64,
reinterpret_vmaxvq_u64)
- } else if constexpr (std::is_same<T, double>::value) {
+ } else if constexpr (is_double) {
VECTORIZED_LOOP_Neon(float64x2_t, uint64x2_t, vdupq_n_f64, vceqq_f64,
reinterpret_vmaxvq_u64)
}
@@ -240,9 +245,14 @@ template <typename T>
TARGET_AVX2 inline uintptr_t fast_search_avx(T* array, uintptr_t array_len,
uintptr_t index,
T search_element) {
- static_assert(std::is_same<T, uint32_t>::value ||
- std::is_same<T, uint64_t>::value ||
- std::is_same<T, double>::value);
+ static constexpr bool is_uint32 =
+ sizeof(T) == sizeof(uint32_t) && std::is_integral<T>::value;
+ static constexpr bool is_uint64 =
+ sizeof(T) == sizeof(uint64_t) && std::is_integral<T>::value;
+ static constexpr bool is_double =
+ sizeof(T) == sizeof(double) && std::is_floating_point<T>::value;
+
+ static_assert(is_uint32 || is_uint64 || is_double);

const int target_align = 32;
// Scalar loop to reach desired alignment
@@ -256,21 +266,21 @@ TARGET_AVX2 inline uintptr_t fast_search_avx(T* array, uintptr_t array_len,
}

// Generating vectorized loop
- if constexpr (std::is_same<T, uint32_t>::value) {
+ if constexpr (is_uint32) {
#define MOVEMASK(x) _mm256_movemask_ps(_mm256_castsi256_ps(x))
#define EXTRACT(x) base::bits::CountTrailingZeros32(x)
VECTORIZED_LOOP_x86(__m256i, __m256i, _mm256_set1_epi32, _mm256_cmpeq_epi32,
MOVEMASK, EXTRACT)
#undef MOVEMASK
#undef EXTRACT
- } else if constexpr (std::is_same<T, uint64_t>::value) {
+ } else if constexpr (is_uint64) {
#define MOVEMASK(x) _mm256_movemask_pd(_mm256_castsi256_pd(x))
#define EXTRACT(x) base::bits::CountTrailingZeros32(x)
VECTORIZED_LOOP_x86(__m256i, __m256i, _mm256_set1_epi64x,
_mm256_cmpeq_epi64, MOVEMASK, EXTRACT)
#undef MOVEMASK
#undef EXTRACT
- } else if constexpr (std::is_same<T, double>::value) {
+ } else if constexpr (is_double) {
#define CMP(a, b) _mm256_cmp_pd(a, b, _CMP_EQ_OQ)
#define EXTRACT(x) base::bits::CountTrailingZeros32(x)
VECTORIZED_LOOP_x86(__m256d, __m256d, _mm256_set1_pd, CMP,
29 changes: 5 additions & 24 deletions bazel/repositories.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -126,10 +126,10 @@ def proxy_wasm_cpp_host_repositories():
maybe(
git_repository,
name = "v8",
# 10.4.132.18
commit = "ce33dd2c08521fbe7f616bcd5941f2f388338030",
# 10.7.193.13
commit = "6c8b357a84847a479cd329478522feefc1c3195a",
remote = "https://chromium.googlesource.com/v8/v8",
shallow_since = "1657561920 +0000",
shallow_since = "1664374400 +0000",
patches = ["@proxy_wasm_cpp_host//bazel/external:v8.patch"],
patch_args = ["-p1"],
)
Expand All @@ -143,35 +143,16 @@ def proxy_wasm_cpp_host_repositories():
new_git_repository,
name = "com_googlesource_chromium_base_trace_event_common",
build_file = "@v8//:bazel/BUILD.trace_event_common",
commit = "d115b033c4e53666b535cbd1985ffe60badad082",
commit = "521ac34ebd795939c7e16b37d9d3ddb40e8ed556",
remote = "https://chromium.googlesource.com/chromium/src/base/trace_event/common.git",
shallow_since = "1642576054 -0800",
shallow_since = "1662508800 +0000",
)

native.bind(
name = "base_trace_event_common",
actual = "@com_googlesource_chromium_base_trace_event_common//:trace_event_common",
)

maybe(
new_git_repository,
name = "com_googlesource_chromium_zlib",
build_file = "@v8//:bazel/BUILD.zlib",
commit = "64bbf988543996eb8df9a86877b32917187eba8f",
remote = "https://chromium.googlesource.com/chromium/src/third_party/zlib.git",
shallow_since = "1653988038 -0700",
)

native.bind(
name = "zlib",
actual = "@com_googlesource_chromium_zlib//:zlib",
)

native.bind(
name = "zlib_compression_utils",
actual = "@com_googlesource_chromium_zlib//:zlib_compression_utils",
)

# WAMR with dependencies.

maybe(
Expand Down
10 changes: 3 additions & 7 deletions src/v8/v8.cc
Original file line number Diff line number Diff line change
Expand Up @@ -30,14 +30,10 @@

#include "include/v8-version.h"
#include "include/v8.h"
#include "src/flags/flags.h"
#include "src/wasm/c-api.h"
#include "wasm-api/wasm.hh"

namespace v8::internal {
extern bool FLAG_liftoff;
extern unsigned int FLAG_wasm_max_mem_pages;
} // namespace v8::internal

namespace proxy_wasm {
namespace v8 {

Expand All @@ -46,8 +42,8 @@ wasm::Engine *engine() {
static wasm::own<wasm::Engine> engine;

std::call_once(init, []() {
::v8::internal::FLAG_liftoff = false;
::v8::internal::FLAG_wasm_max_mem_pages =
::v8::internal::v8_flags.liftoff = false;
::v8::internal::v8_flags.wasm_max_mem_pages =
PROXY_WASM_HOST_MAX_WASM_MEMORY_SIZE_BYTES / PROXY_WASM_HOST_WASM_MEMORY_PAGE_SIZE_BYTES;
::v8::V8::EnableWebAssemblyTrapHandler(true);
engine = wasm::Engine::make();
Expand Down
2 changes: 2 additions & 0 deletions test/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ cc_test(

cc_test(
name = "runtime_test",
timeout = "long",
srcs = ["runtime_test.cc"],
data = [
"//test/test_data:callback.wasm",
Expand Down Expand Up @@ -152,6 +153,7 @@ cc_test(

cc_test(
name = "wasm_vm_test",
timeout = "long",
srcs = ["wasm_vm_test.cc"],
data = [
"//test/test_data:abi_export.wasm",
Expand Down

0 comments on commit b0a0594

Please sign in to comment.