Skip to content

Commit

Permalink
[CodeStyle][Typos][S-[34-35]]Fix typos(splitting,Splitting,`split…
Browse files Browse the repository at this point in the history
…ted`,`Splited`,`splited`) (#71008)
  • Loading branch information
MrXnneHang authored Feb 5, 2025
1 parent 073b788 commit 1190d8a
Show file tree
Hide file tree
Showing 5 changed files with 11 additions and 14 deletions.
7 changes: 2 additions & 5 deletions _typos.toml
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,8 @@ kinf = 'kinf'
Optin = 'Optin'
padd = 'padd'
pash = 'pash'
splited = 'splited'
splitted = 'splitted'
statis = 'statis'
UNEXPECT = 'UNEXPECT'
tood = 'tood'
Expand Down Expand Up @@ -84,11 +86,6 @@ Patial = 'Patial'
specificed = 'specificed'
splite = 'splite'
spliter = 'spliter'
spliting = 'spliting'
Spliting = 'Spliting'
splited = 'splited'
splitted = 'splitted'
Splited = 'Splited'
sqaure = 'sqaure'
sequeze = 'sequeze'
starup = 'starup'
Expand Down
2 changes: 1 addition & 1 deletion paddle/cinn/backends/codegen_device_util.h
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ struct CollectHostFunctionVisitor : public ir::IRMutator<> {
ir::Var kernel_stream(KERNEL_STREAM, type_of<void*>());

// shared_mem_bytes Can be calculated after codegen_cuda_dev buffer creation
// however, this make CodeGenCudaDev before spliting the host and device
// however, this make CodeGenCudaDev before splitting the host and device
// module Maybe we could reorder the process.
std::optional<Expr> shared_mem_bytes;
cinn::common::DefaultDeviceTarget().arch.Match(
Expand Down
2 changes: 1 addition & 1 deletion paddle/cinn/operator_fusion/graph_transformer/operation.h
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ struct MergeTrivialPatternOperation {
graph->iters_fusion_policy()->SingleDownstreamItersFusion(
upstream, downstream));
graph->RemoveNode(downstream);
VLOG(4) << "Spliting trivial pattern: \nupstream "
VLOG(4) << "Splitting trivial pattern: \nupstream "
<< upstream->DebugStr() << "\ndownstream "
<< downstream->DebugStr() << "\nmerged "
<< merged_node->DebugStr();
Expand Down
6 changes: 3 additions & 3 deletions paddle/phi/infermeta/unary.cc
Original file line number Diff line number Diff line change
Expand Up @@ -4314,7 +4314,7 @@ void FillSplitOutDims(const MetaTensor& x,
}
for (size_t i = 0; i < sections_vec.size(); ++i) {
if (axis_value != 0) {
// Only pass LoD when not spliting along the first dim.
// Only pass LoD when not splitting along the first dim.
(*out)[i]->set_dtype(x.dtype());
(*out)[i]->set_dims(out_dims[i]);
(*out)[i]->set_layout(x.layout());
Expand Down Expand Up @@ -4364,7 +4364,7 @@ void SplitInferMeta(const MetaTensor& x,
}
for (size_t i = 0; i < sections_data.size(); ++i) {
if (axis_value != 0) {
// Only pass LoD when not spliting along the first dim.
// Only pass LoD when not splitting along the first dim.
out[i]->set_dtype(x.dtype());
out[i]->set_dims(out_dims[i]);
out[i]->set_layout(x.layout());
Expand Down Expand Up @@ -4455,7 +4455,7 @@ void SplitWithNumInferMeta(const MetaTensor& x,
}
for (int i = 0; i < num; ++i) {
if (axis_value != 0) {
// Only pass LoD when not spliting along the first dim.
// Only pass LoD when not splitting along the first dim.
out[i]->set_dtype(x.dtype());
out[i]->set_dims(out_dims[i]);
out[i]->set_layout(x.layout());
Expand Down
8 changes: 4 additions & 4 deletions python/paddle/distributed/passes/pass_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -1788,11 +1788,11 @@ def split_matmul_grad_to_matmul(
tran_x = matmul_grad_op.attr("trans_x")
assert (
not tran_x
), f"matmul_grad(id={matmul_grad_id}) with tran_x == True is not supported for spliting matmul_grad to matmul"
), f"matmul_grad(id={matmul_grad_id}) with tran_x == True is not supported for splitting matmul_grad to matmul"
tran_y = matmul_grad_op.attr("trans_y")
assert (
not tran_y
), f"matmul_grad(id={matmul_grad_id}) with tran_y == True is not supported for spliting matmul_grad to matmul"
), f"matmul_grad(id={matmul_grad_id}) with tran_y == True is not supported for splitting matmul_grad to matmul"

x = matmul_grad_op.input("X")
y = matmul_grad_op.input("Y")
Expand Down Expand Up @@ -1927,11 +1927,11 @@ def _pir_split_matmul_grad_to_matmul(block, matmul_grad_id):

assert not matmul_grad_op.has_attr(
"trans_x"
), f"matmul_grad(id={matmul_grad_id}) with tran_x == True is not supported for spliting matmul_grad to matmul"
), f"matmul_grad(id={matmul_grad_id}) with tran_x == True is not supported for splitting matmul_grad to matmul"

assert not matmul_grad_op.has_attr(
"trans_y"
), f"matmul_grad(id={matmul_grad_id}) with tran_y == True is not supported for spliting matmul_grad to matmul"
), f"matmul_grad(id={matmul_grad_id}) with tran_y == True is not supported for splitting matmul_grad to matmul"

x = matmul_grad_op.operand_source(0)
y = matmul_grad_op.operand_source(1)
Expand Down

0 comments on commit 1190d8a

Please sign in to comment.