diff --git a/3rdparty/tvm b/3rdparty/tvm index c7a8c4eef..08cce5bb7 160000 --- a/3rdparty/tvm +++ b/3rdparty/tvm @@ -1 +1 @@ -Subproject commit c7a8c4eef38315f9e1796818b1c3d4c68f85a8af +Subproject commit 08cce5bb7443deedb7992a5f1c643f6e1838e8bf diff --git a/testing/python/tilelang/test_tilelang_dyanmic_symbolic.py b/testing/python/tilelang/test_tilelang_dyanmic_symbolic.py new file mode 100644 index 000000000..9af34e037 --- /dev/null +++ b/testing/python/tilelang/test_tilelang_dyanmic_symbolic.py @@ -0,0 +1,422 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +import torch +import torch.backends +from bitblas import tvm as tvm +import bitblas.testing +from tvm import DataType +from tvm import tl as TL +import tvm.tl.language as T +from bitblas.tl.utils import get_swizzle_layout +from bitblas.tl.macro_generator import (TensorCoreIntrinEmitter) + +torch.manual_seed(0) + + +def make_swizzle_layout(shared_buf): + dtype = shared_buf.dtype + shape = shared_buf.shape + + can_swizzle = shape[-1] * DataType(dtype).bits == 512 + if not can_swizzle: + return T.Layout(shape, lambda *args: args) + + def transform_func(i, j): + new_warp_i, new_warp_j = get_swizzle_layout(i, j, shape[-1], dtype) + return [new_warp_i, new_warp_j] + + return T.Layout(shape, transform_func) + + +def tl_matmul_macro( + N, + K, + dtypeAB, + dtypeC, + accum_dtype, +): + assert dtypeAB in [ + "float16", + "int8", + ], "Currently only float16 and int8 are supported" + assert dtypeC in [ + "float16", + "float32", + "int32", + ], "Currently only float16, float32 and int32 are supported" + + micro_size_x = micro_size_y = micro_size_k = 16 + + if dtypeC == "int32": + micro_size_k = 32 + + # This is a debug config + block_row_warps = 1 + block_col_warps = 1 + warp_row_tiles = 16 + warp_col_tiles = 16 + chunk = 32 if dtypeAB == "float16" else 64 + shared_scope = "shared.dyn" + + # Pipeline Stage + stage = 2 + + block_M = block_row_warps * warp_row_tiles + block_N = block_col_warps * warp_col_tiles + block_K = chunk + + M = tvm.te.var("m") + A_shape = (M, K) + B_shape = (N, K) + A_shared_shape = (block_M, block_K) + B_shared_shape = (block_N, block_K) + C_shared_shape = ( + block_M // micro_size_x, + block_N // micro_size_y, + micro_size_x, + micro_size_y, + ) + + warp_size = 32 + threads = warp_size * (block_row_warps * block_col_warps) + local_size = (micro_size_x * micro_size_y) // warp_size + warp_rows = warp_row_tiles // micro_size_x + warp_cols = warp_col_tiles // micro_size_y + + # MMA Wrapper to Auto Generate Code for MMA + mma_emitter = TensorCoreIntrinEmitter( + a_dtype=dtypeAB, + b_dtype=dtypeAB, + accum_dtype=accum_dtype, + a_transposed=False, + b_transposed=True, + block_row_warps=block_row_warps, + block_col_warps=block_col_warps, + warp_row_tiles=warp_row_tiles, + warp_col_tiles=warp_col_tiles, + chunk=chunk, + ) + + @T.prim_func + def main( + A: T.Buffer(A_shape, dtypeAB), + B: T.Buffer(B_shape, dtypeAB), + C: T.Buffer((M, N), dtypeC), + ): + with T.Kernel(T.ceildiv(N, block_N), T.ceildiv(M, block_M), threads=threads) as (bx, by): + + A_shared = T.alloc_shared(A_shared_shape, dtypeAB, scope=shared_scope) + B_shared = T.alloc_shared(B_shared_shape, dtypeAB, scope=shared_scope) + C_shared = T.alloc_shared(C_shared_shape, dtypeC, scope=shared_scope) + A_local = T.alloc_local((warp_rows * local_size), dtypeAB) + B_local = T.alloc_local((warp_cols * local_size), dtypeAB) + C_local = T.alloc_local((warp_rows * warp_cols * local_size), accum_dtype) + + thread_bindings = T.thread_binding(0, threads, "threadIdx.x") + + T.annotate_layout({ + A_shared: make_swizzle_layout(A_shared), + B_shared: make_swizzle_layout(B_shared), + }) + + # Improve L2 Cache + T.use_swizzle(panel_size=10) + + T.clear(C_local) + + for ko in T.Pipelined((K // block_K), num_stages=stage): + + # Load A into shared memory + for i, k in T.Parallel(block_M, block_K): + A_shared[i, k] = A[by * block_M + i, ko * block_K + k] + + # Load B into shared memory + for j, k in T.Parallel(block_N, block_K): + B_shared[j, k] = B[bx * block_N + j, ko * block_K + k] + + for ki in T.serial(0, (block_K // micro_size_k)): + + # Load A into fragment + mma_emitter.ldmatrix_a( + A_local, + A_shared, + ki, + thread_bindings=thread_bindings, + ) + + # Load B into fragment + mma_emitter.ldmatrix_b( + B_local, + B_shared, + ki, + thread_bindings=thread_bindings, + ) + + # Perform Matrix Multiplication + mma_emitter.mma(A_local, B_local, C_local) + + # Perform STMatrix + mma_emitter.stmatrix( + C_local, + C_shared, + thread_bindings=thread_bindings, + ) + + # Store shared into global + for i, j in T.Parallel(block_M, block_N): + C[by * block_M + i, + bx * block_N + j] = C_shared[i // micro_size_x, j // micro_size_y, + i % micro_size_x, j % micro_size_y,] + + return main + + +def assert_tl_matmul_macro_correctness(M, N, K, in_dtype, dtypeC, accum_dtype): + matmul = tl_matmul_macro(N, K, in_dtype, dtypeC, accum_dtype) + + mod, params = TL.lower(matmul) + src_code = mod.imported_modules[0].get_source() + + # src_code is the generated cuda source + assert src_code is not None + + A = torch.rand(M, K, device="cuda", dtype=getattr(torch, in_dtype)) + B = torch.rand(N, K, device="cuda", dtype=getattr(torch, in_dtype)) + C = torch.zeros(M, N, device="cuda", dtype=getattr(torch, accum_dtype)) + + mod = TL.Profiler(mod, params, [], TL.TensorSupplyType.Integer) + + mod(A, B, C) + + # Get Reference Result + ref_c = torch.matmul(A, B.T).to(getattr(torch, accum_dtype)) + torch.testing.assert_close(C, ref_c, rtol=1e-2, atol=1e-2) + + +def tl_matmul_block( + N, + K, + block_M, + block_N, + block_K, + trans_A, + trans_B, + dtypeAB, + dtypeC, + accum_dtype, + num_stages, + threads, +): + M = tvm.te.var("m") + A_shape = (K, M) if trans_A else (M, K) + B_shape = (N, K) if trans_B else (K, N) + A_shared_shape = (block_K, block_M) if trans_A else (block_M, block_K) + B_shared_shape = (block_N, block_K) if trans_B else (block_K, block_N) + + import tvm.tl.language as T + + @T.prim_func + def main(A: T.Buffer(A_shape, dtypeAB), B: T.Buffer(B_shape, dtypeAB), C: T.Buffer((M, N), + dtypeC)): + with T.Kernel(T.ceildiv(N, block_N), T.ceildiv(M, block_M), threads=threads) as (bx, by): + A_shared = T.alloc_shared(A_shared_shape, dtypeAB) + B_shared = T.alloc_shared(B_shared_shape, dtypeAB) + C_local = T.alloc_fragment((block_M, block_N), accum_dtype) + T.clear(C_local) + for k in T.Pipelined(T.ceildiv(K, block_K), num_stages=num_stages): + if trans_A: + T.copy(A[k * block_K, by * block_M], A_shared) + else: + T.copy(A[by * block_M, k * block_K], A_shared) + if trans_B: + T.copy(B[bx * block_N, k * block_K], B_shared) + else: + T.copy(B[k * block_K, bx * block_N], B_shared) + T.gemm(A_shared, B_shared, C_local, trans_A, trans_B) + T.copy(C_local, C[by * block_M, bx * block_N]) + + return main + + +def assert_tl_matmul_block_correctness( + M, + N, + K, + trans_A, + trans_B, + dtypeAB, + dtypeC, + dtypeAccum, + block_M, + block_N, + block_K, + num_stages=3, + num_threads=128, +): + program = tl_matmul_block( + N, + K, + block_M, + block_N, + block_K, + trans_A, + trans_B, + dtypeAB, + dtypeC, + dtypeAccum, + num_stages, + num_threads, + ) + mod, params = TL.lower(program) + + A = torch.rand(M, K, device="cuda", dtype=getattr(torch, dtypeAB)) + B = torch.rand(N, K, device="cuda", dtype=getattr(torch, dtypeAB)) + C = torch.zeros(M, N, device="cuda", dtype=getattr(torch, dtypeC)) + + mod = TL.Profiler(mod, params, [], TL.TensorSupplyType.Integer) + mod(A, B, C) + + def ref_program(A, B): + import torch + + if trans_A: + A = A.T + if trans_B: + B = B.T + C = torch.matmul(A.to(torch.float), B.to(torch.float)) + C = C.to(torch.__getattribute__(dtypeC)) + return C + + # Get Reference Result + ref_c = ref_program(A, B) + + torch.testing.assert_close(C, ref_c, rtol=1e-2, atol=1e-2) + + +def tl_matmul_block_all_dynamic( + block_M, + block_N, + block_K, + trans_A, + trans_B, + dtypeAB, + dtypeC, + accum_dtype, + num_stages, + threads, +): + M = tvm.te.var("m") + N = tvm.te.var("n") + K = tvm.te.var("k") + + A_shape = (K, M) if trans_A else (M, K) + B_shape = (N, K) if trans_B else (K, N) + A_shared_shape = (block_K, block_M) if trans_A else (block_M, block_K) + B_shared_shape = (block_N, block_K) if trans_B else (block_K, block_N) + + import tvm.tl.language as T + + @T.prim_func + def main(A: T.Buffer(A_shape, dtypeAB), B: T.Buffer(B_shape, dtypeAB), C: T.Buffer((M, N), + dtypeC)): + with T.Kernel(T.ceildiv(N, block_N), T.ceildiv(M, block_M), threads=threads) as (bx, by): + A_shared = T.alloc_shared(A_shared_shape, dtypeAB) + B_shared = T.alloc_shared(B_shared_shape, dtypeAB) + C_local = T.alloc_fragment((block_M, block_N), accum_dtype) + T.clear(C_local) + for k in T.Pipelined(T.ceildiv(K, block_K), num_stages=num_stages): + if trans_A: + T.copy(A[k * block_K, by * block_M], A_shared) + else: + T.copy(A[by * block_M, k * block_K], A_shared) + if trans_B: + T.copy(B[bx * block_N, k * block_K], B_shared) + else: + T.copy(B[k * block_K, bx * block_N], B_shared) + T.gemm(A_shared, B_shared, C_local, trans_A, trans_B) + T.copy(C_local, C[by * block_M, bx * block_N]) + + return main + + +def assert_tl_matmul_block_all_dynamic_correctness( + M, + N, + K, + trans_A, + trans_B, + dtypeAB, + dtypeC, + dtypeAccum, + block_M, + block_N, + block_K, + num_stages=3, + num_threads=128, +): + program = tl_matmul_block_all_dynamic( + block_M, + block_N, + block_K, + trans_A, + trans_B, + dtypeAB, + dtypeC, + dtypeAccum, + num_stages, + num_threads, + ) + mod, params = TL.lower(program) + + A = torch.rand(M, K, device="cuda", dtype=getattr(torch, dtypeAB)) + B = torch.rand(N, K, device="cuda", dtype=getattr(torch, dtypeAB)) + C = torch.zeros(M, N, device="cuda", dtype=getattr(torch, dtypeC)) + + mod = TL.Profiler(mod, params, [], TL.TensorSupplyType.Integer) + mod(A, B, C) + print(mod.mod.imported_modules[0].get_source()) + + def ref_program(A, B): + import torch + + if trans_A: + A = A.T + if trans_B: + B = B.T + C = torch.matmul(A.to(torch.float), B.to(torch.float)) + C = C.to(torch.__getattribute__(dtypeC)) + return C + + # Get Reference Result + ref_c = ref_program(A, B) + + torch.testing.assert_close(C, ref_c, rtol=1e-2, atol=1e-2) + + +def test_assert_tl_matmul_macro(): + assert_tl_matmul_macro_correctness(128, 128, 128, "float16", "float16", "float16") + assert_tl_matmul_macro_correctness(66, 128, 128, "float16", "float16", "float16") + assert_tl_matmul_macro_correctness(32, 128, 128, "float16", "float16", "float16") + + +def test_assert_tl_matmul_block(): + assert_tl_matmul_block_correctness(128, 128, 128, False, False, "float16", "float16", "float16", + 64, 64, 32) + assert_tl_matmul_block_correctness(67, 128, 128, False, False, "float16", "float16", "float16", + 64, 64, 32) + assert_tl_matmul_block_correctness(36, 128, 128, False, False, "float16", "float16", "float16", + 64, 64, 32) + + +def test_assert_tl_matmul_block_all_dynamic(): + assert_tl_matmul_block_all_dynamic_correctness(128, 128, 128, False, False, "float16", + "float16", "float16", 64, 64, 32) + assert_tl_matmul_block_all_dynamic_correctness(67, 128, 128, False, False, "float16", "float16", + "float16", 64, 64, 32) + assert_tl_matmul_block_all_dynamic_correctness(36, 128, 128, False, False, "float16", "float16", + "float16", 64, 64, 32) + + +if __name__ == "__main__": + bitblas.testing.main()