Skip to content

Commit

Permalink
Merge branch 'main' into aaron.stgeorge/upgrade_01_24
Browse files Browse the repository at this point in the history
  • Loading branch information
aaron-stgeorge committed Jan 17, 2024
2 parents bf6191f + 9c46aeb commit 33846e9
Show file tree
Hide file tree
Showing 5 changed files with 97 additions and 20 deletions.
7 changes: 7 additions & 0 deletions .bazelrc
Original file line number Diff line number Diff line change
Expand Up @@ -34,3 +34,10 @@ build:clang_linux --linkopt=-fuse-ld=lld --host_linkopt=-fuse-ld=lld
build:clang_linux --config=generic_clang

build:clang_osx --config=generic_clang

# Other compilation modes
build:opt --compilation_mode=opt
build:dbg --compilation_mode=dbg

# GDB builds in dbg mode
build:gdb --config=dbg
62 changes: 62 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -52,3 +52,65 @@ The following CI workflows are automatically triggered anytime upstream dependen
- [![Bazel Build and Test (llvm-project)](https://github.com/cruise-automation/mlir-tcp/actions/workflows/bazelBuildAndTestLlvm.yml/badge.svg)](https://github.com/cruise-automation/mlir-tcp/actions/workflows/bazelBuildAndTestLlvm.yml)
- [![Bazel Build and Test (torch-mlir)](https://github.com/cruise-automation/mlir-tcp/actions/workflows/bazelBuildAndTestTorchmlir.yml/badge.svg)](https://github.com/cruise-automation/mlir-tcp/actions/workflows/bazelBuildAndTestTorchmlir.yml)
- [![Bazel Build and Test (stablehlo)](https://github.com/cruise-automation/mlir-tcp/actions/workflows/bazelBuildAndTestStablehlo.yml/badge.svg)](https://github.com/cruise-automation/mlir-tcp/actions/workflows/bazelBuildAndTestStablehlo.yml)


## Debugging Guide

Below are some standard techniques for debugging your compilation process, assuming you've reduced it to a form that can be reproduced with `tcp-opt`. For MLIR-specific debugging tips, refer [here](https://mlir.llvm.org/getting_started/Debugging/).

### `printf` debugging

Printing to stdout/stderr works as usual:
```C++
op.emitWarning() << "HERE: " << myVariable; // preferred for op/loc diagnostics

llvm::errs() << "HERE: " << myVariable << "\n"; // alternative
```

You can also hook into the [LLVM_DEBUG](https://llvm.org/docs/ProgrammersManual.html#the-llvm-debug-macro-and-debug-option) macro:
```C++
#include "llvm/Support/Debug.h"

#define DEBUG_TYPE "foo"
LLVM_DEBUG(llvm::dbgs() << "This only shows up when -debug or -debug-only=foo is provided.\n");
#undef DEBUG_TYPE

#define DEBUG_TYPE "bar"
LLVM_DEBUG(llvm::dbgs() << "This only shows up when -debug or -debug-only=bar is provided.\n");
#undef DEBUG_TYPE
```
Then run with the `-debug-only=foo,bar` flag to cuts out messages that aren't associated with the passed `DEBUG_TYPE`s.
```shell
bazel run --config=clang_linux //:tcp-opt -- --some-pass `pwd`/test.mlir -debug-only=foo,bar
```

### `gdb` debugging

To debug `tcp-opt` with [gdb](https://www.sourceware.org/gdb/):
```shell
bazel build --config=clang_linux --config=gdb //:tcp-opt

gdb --args bazel-bin/tcp-opt -h
```

For help with gdb commands please refer to [gdb cheat sheet](https://gist.github.com/rkubik/b96c23bd8ed58333de37f2b8cd052c30).

### Enable `llvm-symbolizer`

If you get a stack dump without any symbol names:
```shell
Stack dump without symbol names (ensure you have llvm-symbolizer in your PATH or set the environment var `LLVM_SYMBOLIZER_PATH` to point to it):
0 tcp-opt 0x000055ac1c9c0c1d
1 tcp-opt 0x000055ac1c9c110b
2 tcp-opt 0x000055ac1c9be846
3 tcp-opt 0x000055ac1c9c1855
4 libc.so.6 0x00007f7011c6a520
...
```

Do this and re-run:
```shell
bazel build --config=clang_linux @llvm-project//llvm:llvm-symbolizer
export LLVM_SYMBOLIZER_PATH=`pwd`/bazel-bin/external/llvm-project/llvm/llvm-symbolizer
```
12 changes: 6 additions & 6 deletions deps.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,8 @@ def third_party_deps():
path = local_llvm_repo_path(),
)
else:
LLVM_COMMIT = "5e5a22caf88ac1ccfa8dc5720295fdeba0ad9372"
LLVM_SHA256 = "9d9ae8ae30f6262ca0823493893398ea2ab6fbd49027e338e06ac7c25bb8caf4"
LLVM_COMMIT = "6b65d79fbb4682468333cea42b62f15c2dffd8f3"
LLVM_SHA256 = "99278b9422998e248703c348565ca973b10678b65697dcf4dcc80acb9c174c2a"
http_archive(
name = "llvm-raw",
build_file_content = "# empty",
Expand All @@ -39,8 +39,8 @@ def third_party_deps():
path = local_torch_mlir_repo_path(),
)
else:
TORCH_MLIR_COMMIT = "44f6942796536a7cf2eee37ec383c9db74fa853f"
TORCH_MLIR_SHA256 = "168b9eebeee2754d7a804eae9465c84adf7b4019344e2a4a2594799ecbfff4f0"
TORCH_MLIR_COMMIT = "670a99ae196da892310776f110cfe29dfb68a174"
TORCH_MLIR_SHA256 = "7bec6b45d848718e255a3f59fe41a96126db25df84e69b3974d81f4ac0d0a65e"
http_archive(
name = "torch-mlir-raw",
sha256 = TORCH_MLIR_SHA256,
Expand All @@ -55,8 +55,8 @@ def third_party_deps():
path = local_stablehlo_repo_path(),
)
else:
STABLEHLO_COMMIT = "83f095e7217c897f1eccac5652600ceb944cb0e0"
STABLEHLO_SHA256 = "bd31b22048ce214d191678d294a05495071167abea60f89e0578d4db346aa0fd"
STABLEHLO_COMMIT = "3260a31f09744419377dae409043f12bb7418c38"
STABLEHLO_SHA256 = "41cacc34cb591fbc73674f135475928987a1e8ff929ad1329b17bd75a449d102"
http_archive(
name = "stablehlo",
sha256 = STABLEHLO_SHA256,
Expand Down
3 changes: 2 additions & 1 deletion docker/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,8 @@ RUN apt-get update && \
wget \
lld \
clang \
clang-format
clang-format \
gdb

# Install bazel
ARG ARCH="x86_64"
Expand Down
33 changes: 20 additions & 13 deletions test/Conversion/TorchToTcp/tcp_custom_ops.mlir
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// RUN: tcp-opt <%s -convert-torch-to-tcp-custom-op -canonicalize -split-input-file -verify-diagnostics | FileCheck %s
// RUN: tcp-opt <%s -convert-torch-to-tcp-custom-op -canonicalize -split-input-file | FileCheck %s

// CHECK-LABEL: func.func @torch.aten.gather_op(
// CHECK-SAME: %[[ARG0:.*]]: !torch.vtensor<[2,2],si64>
Expand Down Expand Up @@ -58,18 +58,27 @@ func.func @torch.aten.index_put_impl_op(%arg0: !torch.vtensor<[25],f32>, %arg1:
return %1 : !torch.vtensor<[25],f32>
}


// -----

// CHECK: tcp.custom_op("torch.aten.convolution") %{{.*}}, %{{.*}}, %{{.*}} {
// CHECK-SAME: dilation = [1 : index, 1 : index],
// CHECK-SAME: groups = 1 : i64,
// CHECK-SAME: output_padding = [1 : index, 1 : index],
// CHECK-SAME: padding = [1 : index, 1 : index],
// CHECK-SAME: stride = [2 : index, 2 : index],
// CHECK-SAME: torch_operand_names = ["input", "weight", "bias"],
// CHECK-SAME: transposed = true} : tensor<1x64x1x100xf32>, tensor<64x64x3x3xf32>, tensor<64xf32> -> tensor<1x64x2x200xf32>
func.func @torcn.aten.transposed_convolution(%input: !torch.vtensor<[1,64,1,100],f32>) -> !torch.vtensor<[1,64,2,200],f32> {
// CHECK-LABEL: func.func @torch.aten.transposed_convolution(
// CHECK-SAME: %[[ARG0:.*]]: !torch.vtensor<[1,64,1,100],f32>) -> !torch.vtensor<[1,64,2,200],f32>
// CHECK: %[[T0:.*]] = torch.vtensor.literal(dense<0.000000e+00> : tensor<64xf32>) : !torch.vtensor<[64],f32>
// CHECK: %[[T1:.*]] = torch.vtensor.literal(dense<0.000000e+00> : tensor<64x64x3x3xf32>) : !torch.vtensor<[64,64,3,3],f32>
// CHECK: %[[T2:.*]] = torch_c.to_builtin_tensor %[[ARG0]] : !torch.vtensor<[1,64,1,100],f32> -> tensor<1x64x1x100xf32>
// CHECK: %[[T3:.*]] = torch_c.to_builtin_tensor %[[T1]] : !torch.vtensor<[64,64,3,3],f32> -> tensor<64x64x3x3xf32>
// CHECK: %[[T4:.*]] = torch_c.to_builtin_tensor %[[T0]] : !torch.vtensor<[64],f32> -> tensor<64xf32>
// CHECK: %[[CUSTOM:.*]] = tcp.custom_op("torch.aten.convolution") %[[T2]], %[[T3]], %[[T4]] {
// CHECK-SAME: dilation = [1 : index, 1 : index],
// CHECK-SAME: groups = 1 : i64,
// CHECK-SAME: output_padding = [1 : index, 1 : index],
// CHECK-SAME: padding = [1 : index, 1 : index],
// CHECK-SAME: stride = [2 : index, 2 : index],
// CHECK-SAME: torch_operand_names = ["input", "weight", "bias"],
// CHECK-SAME: transposed = true}
// CHECK-SAME: tensor<1x64x1x100xf32>, tensor<64x64x3x3xf32>, tensor<64xf32> -> tensor<1x64x2x200xf32>
// CHECK: %[[RES:.*]] = torch_c.from_builtin_tensor %[[CUSTOM]] : tensor<1x64x2x200xf32> -> !torch.vtensor<[1,64,2,200],f32>
// CHECK: return %[[RES]] : !torch.vtensor<[1,64,2,200],f32>
func.func @torch.aten.transposed_convolution(%input: !torch.vtensor<[1,64,1,100],f32>) -> !torch.vtensor<[1,64,2,200],f32> {
%true = torch.constant.bool true
%int1 = torch.constant.int 1
%int2 = torch.constant.int 2
Expand All @@ -78,7 +87,6 @@ func.func @torcn.aten.transposed_convolution(%input: !torch.vtensor<[1,64,1,100]
%stride = torch.prim.ListConstruct %int2, %int2 : (!torch.int, !torch.int) -> !torch.list<int>
%int1x1 = torch.prim.ListConstruct %int1, %int1 : (!torch.int, !torch.int) -> !torch.list<int>
%output = torch.aten.convolution %input, %weight, %bias, %stride, %int1x1, %int1x1, %true, %int1x1, %int1 : !torch.vtensor<[1,64,1,100],f32>, !torch.vtensor<[64,64,3,3],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,2,200],f32>

return %output : !torch.vtensor<[1,64,2,200],f32>
}

Expand All @@ -95,6 +103,5 @@ func.func @torch.aten.regular_convolution() -> !torch.vtensor<[1,32,16,1600],f32
%int1x1 = torch.prim.ListConstruct %int1, %int1 : (!torch.int, !torch.int) -> !torch.list<int>
%none = torch.constant.none
%output = torch.aten.convolution %input, %weights, %none, %int1x1, %int1x1, %int1x1, %false, %int0x0, %int1 : !torch.vtensor<[1,9,16,1600],f32>, !torch.vtensor<[32,9,3,3],f32>, !torch.none, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,32,16,1600],f32>

return %output : !torch.vtensor<[1,32,16,1600],f32>
}

0 comments on commit 33846e9

Please sign in to comment.