Skip to content

Commit

Permalink
resolve conflicts
Browse files Browse the repository at this point in the history
  • Loading branch information
changeyoung98 committed Apr 25, 2024
2 parents e595ba4 + 2c9dd33 commit 98bd481
Show file tree
Hide file tree
Showing 587 changed files with 13,708 additions and 10,489 deletions.
1 change: 1 addition & 0 deletions .github/CODEOWNERS
Validating CODEOWNERS rules …
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
python/requirements.txt @phlrain @jzhang533 @kolinwei
15 changes: 0 additions & 15 deletions cmake/external/pybind11.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -17,25 +17,11 @@ include(ExternalProject)
set(PYBIND_PREFIX_DIR ${THIRD_PARTY_PATH}/pybind)
set(PYBIND_SOURCE_DIR ${PYBIND_PREFIX_DIR}/src/extern_pybind)
set(PYBIND_INCLUDE_DIR ${PYBIND_SOURCE_DIR}/include)
set(PYBIND_TAG v2.10.3)
set(SOURCE_DIR ${PADDLE_SOURCE_DIR}/third_party/pybind)
set(SOURCE_INCLUDE_DIR ${SOURCE_DIR}/include)

include_directories(${PYBIND_INCLUDE_DIR})

set(PYBIND_PATCH_COMMAND "")
if(NOT WIN32)
file(TO_NATIVE_PATH ${PADDLE_SOURCE_DIR}/patches/pybind/cast.h.patch
native_dst)
# Note: [Why calling some `git` commands before `patch`?]
# Paddle's CI uses cache to accelerate the make process. However, error might raise when patch codes in two scenarios:
# 1. Patch to the wrong version: the tag version of CI's cache falls behind PYBIND_TAG, use `git checkout ${PYBIND_TAG}` to solve this.
# 2. Patch twice: the tag version of cache == PYBIND_TAG, but patch has already applied to cache.
set(PYBIND_PATCH_COMMAND
git checkout -- . && git checkout ${PYBIND_TAG} && patch -Nd
${SOURCE_INCLUDE_DIR}/pybind11 < ${native_dst})
endif()

ExternalProject_Add(
extern_pybind
${EXTERNAL_PROJECT_LOG_ARGS} ${SHALLOW_CLONE}
Expand All @@ -47,7 +33,6 @@ ExternalProject_Add(
# third-party library version changes cannot be incorporated.
# reference: https://cmake.org/cmake/help/latest/module/ExternalProject.html
UPDATE_COMMAND ""
PATCH_COMMAND ${PYBIND_PATCH_COMMAND}
CONFIGURE_COMMAND ""
# I intentionally preserved an extern_pybind/include/pybind11 directory
# to site-packages, so that you could discern that you intended to
Expand Down
2 changes: 1 addition & 1 deletion cmake/external/xpu.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ if(NOT DEFINED XPU_XDNN_BASE_DATE)
set(XPU_XDNN_BASE_DATE "20240327")
endif()
if(NOT DEFINED XPU_XHPC_BASE_DATE)
set(XPU_XHPC_BASE_DATE "20240413")
set(XPU_XHPC_BASE_DATE "20240422")
endif()
set(XPU_XCCL_BASE_VERSION "1.2.0.5")
if(NOT DEFINED XPU_XFT_BASE_VERSION)
Expand Down
3 changes: 3 additions & 0 deletions cmake/simd.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,9 @@ int main()
return 0;
}"
AVX512F_FOUND)
if(AVX512F_FOUND)
add_definitions(-DPADDLE_WITH_AVX512F)
endif()

set(CMAKE_REQUIRED_FLAGS ${CMAKE_REQUIRED_FLAGS_RETAINED})
mark_as_advanced(MMX_FOUND SSE2_FOUND SSE3_FOUND AVX_FOUND AVX2_FOUND
Expand Down
12 changes: 12 additions & 0 deletions paddle/cinn/ast_gen_ius/ast_gen.cc
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,10 @@ PD_DECLARE_bool(cinn_bucket_compile);
namespace cinn {
namespace ast_gen_ius {

bool IsReduceBool(const ir::Expr& lhs, const ir::Expr& rhs) {
return lhs.type().is_bool() || rhs.type().is_bool();
}

ir::Expr ConvertReduceBody(ir::Expr body,
ir::Tensor tensor,
const std::vector<Expr>& axis_exprs) {
Expand All @@ -38,9 +42,17 @@ ir::Expr ConvertReduceBody(ir::Expr body,

switch (reduce_node->reduce_type) {
case ir::Reduce::kSum:
if (IsReduceBool(tensor(axis_exprs), reduce_node->body)) {
return ir::Store::Make(
tensor, tensor(axis_exprs) || reduce_node->body, axis_exprs);
}
return ir::Store::Make(
tensor, tensor(axis_exprs) + reduce_node->body, axis_exprs);
case ir::Reduce::kMul:
if (IsReduceBool(tensor(axis_exprs), reduce_node->body)) {
return ir::Store::Make(
tensor, tensor(axis_exprs) && reduce_node->body, axis_exprs);
}
return ir::Store::Make(
tensor, tensor(axis_exprs) * reduce_node->body, axis_exprs);
case ir::Reduce::kMax:
Expand Down
29 changes: 25 additions & 4 deletions paddle/cinn/frontend/paddle/cpp/block_desc.cc
Original file line number Diff line number Diff line change
Expand Up @@ -13,18 +13,29 @@
// limitations under the License.

#include "paddle/cinn/frontend/paddle/cpp/block_desc.h"
#include "paddle/common/enforce.h"

namespace cinn::frontend::paddle::cpp {

template <>
VarDesc* BlockDesc::GetVar<VarDesc>(int32_t idx) {
CHECK_LT(idx, VarsSize()) << "idx >= vars.size()";
PADDLE_ENFORCE_LT(
idx,
VarsSize(),
phi::errors::InvalidArgument(
"The value of idx and vars.size() is incorrect."
"Expected idx < vars.size(), but receive idx >= vars.size()."));
return &vars_[idx];
}

template <>
const VarDesc& BlockDesc::GetConstVar<VarDesc>(int32_t idx) const {
CHECK_LT(idx, static_cast<int32_t>(VarsSize())) << "idx >= vars.size()";
PADDLE_ENFORCE_LT(
idx,
static_cast<int32_t>(VarsSize()),
phi::errors::InvalidArgument(
"The value of idx and vars.size() is incorrect."
"Expected idx < vars.size(), but receive idx >= vars.size()."));
return vars_[idx];
}

Expand All @@ -36,13 +47,23 @@ VarDesc* BlockDesc::AddVar<VarDesc>() {

template <>
OpDesc* BlockDesc::GetOp<OpDesc>(int32_t idx) {
CHECK_LT(idx, OpsSize()) << "idx >= ops.size()";
PADDLE_ENFORCE_LT(
idx,
OpsSize(),
phi::errors::InvalidArgument(
"The value of idx and ops.size() is incorrect."
"Expected idx < ops.size(), but receive idx >= ops.size()."));
return &ops_[idx];
}

template <>
const OpDesc& BlockDesc::GetConstOp<OpDesc>(int32_t idx) const {
CHECK_LT(idx, static_cast<int32_t>(OpsSize())) << "idx >= ops.size()";
PADDLE_ENFORCE_LT(
idx,
static_cast<int32_t>(OpsSize()),
phi::errors::InvalidArgument(
"The value of idx and ops.size() is incorrect."
"Expected idx < ops.size(), but receive idx >= ops.size()."));
return ops_[idx];
}

Expand Down
15 changes: 13 additions & 2 deletions paddle/cinn/frontend/paddle/cpp/program_desc.cc
Original file line number Diff line number Diff line change
Expand Up @@ -13,18 +13,29 @@
// limitations under the License.

#include "paddle/cinn/frontend/paddle/cpp/program_desc.h"
#include "paddle/common/enforce.h"

namespace cinn::frontend::paddle::cpp {

template <>
BlockDesc* ProgramDesc::GetBlock<BlockDesc>(int32_t idx) {
CHECK_LT(idx, BlocksSize()) << "idx >= blocks.size()";
PADDLE_ENFORCE_LT(
idx,
BlocksSize(),
phi::errors::InvalidArgument(
"The value of idx and blocks.size() is incorrect."
"Expected idx < blocks.size(), but receive idx >= blocks.size()."));
return &blocks_[idx];
}

template <>
const BlockDesc& ProgramDesc::GetConstBlock<BlockDesc>(int32_t idx) const {
CHECK_LT(idx, static_cast<int32_t>(BlocksSize())) << "idx >= blocks.size()";
PADDLE_ENFORCE_LT(
idx,
static_cast<int32_t>(BlocksSize()),
phi::errors::InvalidArgument(
"The value of idx and blocks.size() is incorrect."
"Expected idx < blocks.size(), but receive idx >= blocks.size()."));
return blocks_[idx];
}

Expand Down
4 changes: 3 additions & 1 deletion paddle/cinn/frontend/paddle/model_parser.cc
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
#include "paddle/cinn/backends/cuda_util.h"
#include "paddle/cinn/common/common.h"
#include "paddle/cinn/frontend/paddle/compatible_pb.h"
#include "paddle/common/enforce.h"

namespace cinn::frontend::paddle {

Expand Down Expand Up @@ -55,7 +56,8 @@ void TensorFromStream(std::istream &is,
using Type = framework_proto::VarType::Type;
uint32_t version;
is.read(reinterpret_cast<char *>(&version), sizeof(version));
CHECK_EQ(version, 0U) << "Only version 0 is supported";
PADDLE_ENFORCE_EQ(
version, 0U, phi::errors::InvalidArgument("Only version 0 is supported"));
// read tensor desc
framework_proto::VarType::TensorDesc desc;
{
Expand Down
15 changes: 13 additions & 2 deletions paddle/cinn/frontend/paddle/pb/block_desc.cc
Original file line number Diff line number Diff line change
Expand Up @@ -13,13 +13,19 @@
// limitations under the License.

#include "paddle/cinn/frontend/paddle/pb/block_desc.h"
#include "paddle/common/enforce.h"

namespace cinn::frontend::paddle::pb {

template <>
framework_proto::VarDesc* BlockDesc::GetVar<framework_proto::VarDesc>(
int32_t idx) {
CHECK_LT(idx, VarsSize()) << "idx >= vars.size()";
PADDLE_ENFORCE_LT(
idx,
VarsSize(),
phi::errors::InvalidArgument(
"The value of idx and vars.size() is incorrect."
"Expected idx < vars.size(), but receive idx >= vars.size()."));
return desc_->mutable_vars(idx);
}

Expand All @@ -31,7 +37,12 @@ framework_proto::VarDesc* BlockDesc::AddVar<framework_proto::VarDesc>() {
template <>
framework_proto::OpDesc* BlockDesc::GetOp<framework_proto::OpDesc>(
int32_t idx) {
CHECK_LT(idx, OpsSize()) << "idx >= ops.size()";
PADDLE_ENFORCE_LT(
idx,
OpsSize(),
phi::errors::InvalidArgument(
"The value of idx and ops.size() is incorrect."
"Expected idx < ops.size(), but receive idx >= ops.size()."));
return desc_->mutable_ops(idx);
}

Expand Down
9 changes: 8 additions & 1 deletion paddle/cinn/frontend/paddle/pb/program_desc.cc
Original file line number Diff line number Diff line change
Expand Up @@ -17,12 +17,19 @@
#include <algorithm>
#include <limits>

#include "paddle/common/enforce.h"

namespace cinn::frontend::paddle::pb {

template <>
framework_proto::BlockDesc* ProgramDesc::GetBlock<framework_proto::BlockDesc>(
int32_t idx) {
CHECK_LT(idx, BlocksSize()) << "idx >= blocks.size()";
PADDLE_ENFORCE_LT(
idx,
BlocksSize(),
phi::errors::InvalidArgument(
"The value of idx and blocks.size() is incorrect."
"Expected idx < blocks.size(), but receive idx >= blocks.size()."));
return desc_->mutable_blocks(idx);
}

Expand Down
10 changes: 10 additions & 0 deletions paddle/cinn/hlir/dialect/operator/ir/ops.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,15 @@
param : [x, broadcast_axes]
interfaces : paddle::dialect::InferSymbolicShapeInterface

- op : gather
args : (Tensor x, Tensor index, int axis)
output : Tensor
infer_meta :
func : GatherInferMeta
kernel :
func : gather
interfaces : paddle::dialect::InferSymbolicShapeInterface

- op : isclose
args : (Tensor x, Tensor y, float rtol=1e-5, float atol=1e-8, bool equal_nan=false)
output : Tensor(out)
Expand All @@ -18,6 +27,7 @@
kernel :
func : isclose
data_type : x
interfaces : paddle::dialect::InferSymbolicShapeInterface

- op : pool2d
args : (Tensor x, int[] kernel_size, int[] stride_size, int[] padding_size, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -107,6 +107,14 @@ bool ProcessOp(pir::Operation* op, pir::PatternRewriter* rewriter) {

if (x_dims != y_dims) {
auto output_shape = GetOutputShape(x_dims, y_dims);
pir::ShapeConstraintIRAnalysis& shape_analysis =
pir::ShapeAnalysisManager::Instance().Get(op->GetParentProgram());
std::vector<symbol::DimExpr> out_dim;
out_dim.reserve(output_shape.size());
for (auto d : output_shape) {
out_dim.emplace_back(d);
}

if (!IsSameDim(x_dims, output_shape)) {
// add broadcast to input 0
if (auto full_op = op->operand_source(0)
Expand All @@ -122,13 +130,18 @@ bool ProcessOp(pir::Operation* op, pir::PatternRewriter* rewriter) {
.dyn_cast<paddle::dialect::PlaceAttribute>()
.data());
op->operand(0).set_source(new_full->result(0));
shape_analysis.SetShapeOrDataForValue(
new_full.result(0), symbol::TensorShapeOrDataDimExprs(out_dim));
} else {
auto new_transpose_op = rewriter->Build<cinn::dialect::BroadcastOp>(
op->operand_source(0),
cinn::hlir::framework::pir::GetBroadcastAxis(x_dims, output_shape),
output_shape);

op->operand(0).set_source(new_transpose_op->result(0));
shape_analysis.SetShapeOrDataForValue(
new_transpose_op.result(0),
symbol::TensorShapeOrDataDimExprs(out_dim));
}
}

Expand All @@ -147,13 +160,18 @@ bool ProcessOp(pir::Operation* op, pir::PatternRewriter* rewriter) {
.data());

op->operand(1).set_source(new_full->result(0));
shape_analysis.SetShapeOrDataForValue(
new_full.result(0), symbol::TensorShapeOrDataDimExprs(out_dim));
} else {
auto new_transpose_op = rewriter->Build<cinn::dialect::BroadcastOp>(
op->operand_source(1),
cinn::hlir::framework::pir::GetBroadcastAxis(y_dims, output_shape),
output_shape);

op->operand(1).set_source(new_transpose_op->result(0));
shape_analysis.SetShapeOrDataForValue(
new_transpose_op.result(0),
symbol::TensorShapeOrDataDimExprs(out_dim));
}
}

Expand Down
Loading

0 comments on commit 98bd481

Please sign in to comment.