Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Packing Refactor] Move all Blockwise Packing to pack_weights_and_bias #7544

Open
wants to merge 3 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 5 additions & 4 deletions bench/gemm-benchmark.cc
Original file line number Diff line number Diff line change
Expand Up @@ -735,7 +735,7 @@ void GEMMBenchmark(benchmark::State& state,
gemm_config.log2_sr = static_cast<uint8_t>(31 - math_clz_nonzero_u32(sr));

const size_t packed_w_stride =
packed_stride(&gemm_config, kc, /*k_stride=*/kc, /*extra_bytes=*/0);
packed_stride(&gemm_config, kc, /*unused_block_size=*/0, /*k_stride=*/kc, /*extra_bytes=*/0);
const size_t packed_w_size = packed_w_stride * round_up(nc, nr);

const size_t c_elements = mc * nc;
Expand All @@ -760,7 +760,7 @@ void GEMMBenchmark(benchmark::State& state,
const xnn_qs8_qc4w_packing_params packing_params = {/*input_zero_point=*/1,
/*kernel_zero_point=*/8};
pack_weights(/*flags=*/0, &gemm_config, kc, nc,
/*groups=*/1, /*k_stride=*/kc,
/*groups=*/1, /*unused_block_size=*/0, /*k_stride=*/kc,
/*accumulator_init=*/nullptr,
/*weights=*/k.data(),
/*int_extra_data0_fn=*/nullptr,
Expand Down Expand Up @@ -852,7 +852,7 @@ void GEMMBenchmark(benchmark::State& state,
gemm_config.log2_sr = static_cast<uint8_t>(31 - math_clz_nonzero_u32(sr));

const size_t packed_w_stride =
packed_stride(&gemm_config, k2, /*k_stride=*/bl, /*extra_bytes=*/0);
packed_stride(&gemm_config, k2, /*block_size=*/bl, /*k_stride=*/kc, /*extra_bytes=*/0);
const size_t packed_w_size = packed_w_stride * round_up(nc, nr);

const size_t c_elements = mc * nc;
Expand All @@ -879,7 +879,8 @@ void GEMMBenchmark(benchmark::State& state,
const xnn_qs8_qc4w_packing_params packing_params = {/*input_zero_point=*/1,
/*kernel_zero_point=*/8};
pack_weights(/*flags=*/0, &gemm_config, k2, nc,
/*groups=*/1, /*k_stride=*/bl,
/*groups=*/1, /*block_size=*/bl,
/*k_stride=*/kc,
/*accumulator_init=*/nullptr,
/*weights=*/k.data(),
/*int_extra_data0_fn=*/nullptr,
Expand Down
9 changes: 6 additions & 3 deletions src/configs/gemm-config.c
Original file line number Diff line number Diff line change
Expand Up @@ -1539,7 +1539,8 @@ static void init_qd8_f16_qc4w_gemm_config(void) {
}

static void init_qd8_f16_qb4w_gemm_config(void) {
qd8_f16_qb4w_gemm_config.pack_gemm_goi_bl = (xnn_packw_gemm_goi_bl_ukernel_fn) xnn_pack_qs8_qb4w_gemm_goi_w;
qd8_f16_qb4w_gemm_config.packed_stride_weights_and_biases = xnn_packed_stride_qb4_weights_and_biases;
qd8_f16_qb4w_gemm_config.pack_weights_and_biases = xnn_pack_qb4_weights_and_biases;

#if XNN_ARCH_ARM && XNN_ENABLE_ARM_FP16_VECTOR && XNN_ENABLE_ARM_FP16_SCALAR
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
Expand Down Expand Up @@ -1845,7 +1846,8 @@ static void init_qp8_f32_qb4w_gemm_config(void) {
}

static void init_qdu8_f32_qb4w_gemm_config(void) {
qdu8_f32_qb4w_gemm_config.pack_gemm_goi_bl = (xnn_packw_gemm_goi_bl_ukernel_fn) xnn_pack_qs8_qb4w_gemm_goi_w;
qdu8_f32_qb4w_gemm_config.packed_stride_weights_and_biases = xnn_packed_stride_qb4_weights_and_biases;
qdu8_f32_qb4w_gemm_config.pack_weights_and_biases = xnn_pack_qb4_weights_and_biases;
#if XNN_ARCH_X86 || XNN_ARCH_X86_64
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
assert(hardware_config != NULL);
Expand Down Expand Up @@ -1884,7 +1886,8 @@ static void init_qdu8_f32_qb4w_gemm_config(void) {
}

static void init_qd8_f32_qb4w_gemm_config(void) {
qd8_f32_qb4w_gemm_config.pack_gemm_goi_bl = (xnn_packw_gemm_goi_bl_ukernel_fn) xnn_pack_qs8_qb4w_gemm_goi_w;
qd8_f32_qb4w_gemm_config.packed_stride_weights_and_biases = xnn_packed_stride_qb4_weights_and_biases;
qd8_f32_qb4w_gemm_config.pack_weights_and_biases = xnn_pack_qb4_weights_and_biases;

#if XNN_ARCH_ARM
const struct xnn_hardware_config* hardware_config = xnn_init_hardware_config();
Expand Down
10 changes: 7 additions & 3 deletions src/operators/batch-matrix-multiply-nc.c
Original file line number Diff line number Diff line change
Expand Up @@ -182,7 +182,9 @@ enum xnn_status xnn_create_batch_matrix_multiply_nc_f32_const_weights(
// Pack the weights.
if (gemm_config->pack_weights_and_biases) {
gemm_config->pack_weights_and_biases(flags, gemm_config, k, n,
/*groups=*/batch_size_b, k_stride,
/*groups=*/batch_size_b,
/*unused_block_size=*/0,
/*kstride=*/k_stride,
/*accumulator_init=*/NULL,
/*weights=*/data_b,
/*int_extra_data0_fn=*/NULL,
Expand Down Expand Up @@ -311,7 +313,7 @@ enum xnn_status create_batch_matrix_multiply_nc_qx8_f32_qc8w(
const size_t weights_stride =
gemm_config->packed_stride_weights_and_biases
? gemm_config->packed_stride_weights_and_biases(
gemm_config, k, k_stride, extra_bytes)
gemm_config, k,/*unused_blocksize=*/0, k_stride, extra_bytes)
: (k_stride << XNN_LOG2_SIZEOF_INT8_T) + extra_bytes +
sizeof(int32_t);
batch_matrix_multiply_op->weights_stride = weights_stride;
Expand Down Expand Up @@ -347,7 +349,9 @@ enum xnn_status create_batch_matrix_multiply_nc_qx8_f32_qc8w(
batch_matrix_multiply_op->flags ^ XNN_FLAG_TRANSPOSE_WEIGHTS,
gemm_config, /*input_channels=*/k,
/*output_channels=*/n,
/*groups=*/batch_size_b, k_stride,
/*groups=*/batch_size_b,
/*unused_block_size=*/0,
/*k_stride=*/k_stride,
/*accumulator_init=*/NULL,
/*weights=*/data_b,
/*int_extra_data0_fn=*/
Expand Down
1 change: 1 addition & 0 deletions src/operators/convolution-nhwc.c
Original file line number Diff line number Diff line change
Expand Up @@ -372,6 +372,7 @@ static enum xnn_status create_gemm_or_igemm(
gemm_config->pack_weights_and_biases(
flags, gemm_config, group_input_channels, group_output_channels,
groups,
/*unused_block_size*/0,
k_stride,
/*accumulator_init=*/bias,
/*weights=*/kernel,
Expand Down
Loading