Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

【Paddle Toolkit Development Competition No.1】 Paddle 适配 paddle-geometric #1058

Open
wants to merge 1 commit into
base: develop
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  

Large diffs are not rendered by default.

58 changes: 58 additions & 0 deletions jointContribution/paddle_geometric/paddle_geometric/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
from collections import defaultdict

import paddle
import paddle_geometric.typing

from ._compile import compile, is_compiling
from ._onnx import is_in_onnx_export
from .index import Index
from .edge_index import EdgeIndex
from .pytree import pytree
from .seed import seed_everything
from .home import get_home_dir, set_home_dir
from .device import is_mps_available, is_xpu_available, device
from .isinstance import is_paddle_instance
from .debug import is_debug_enabled, debug, set_debug

import paddle_geometric.utils
import paddle_geometric.data
import paddle_geometric.sampler
import paddle_geometric.loader
import paddle_geometric.transforms
import paddle_geometric.datasets
import paddle_geometric.nn
import paddle_geometric.explain
import paddle_geometric.profile

from .experimental import (is_experimental_mode_enabled, experimental_mode,
set_experimental_mode)
from .lazy_loader import LazyLoader

contrib = LazyLoader('contrib', globals(), 'paddle_geometric.contrib')
graphgym = LazyLoader('graphgym', globals(), 'paddle_geometric.graphgym')

__version__ = '2.7.0'

__all__ = [
'Index',
'EdgeIndex',
'seed_everything',
'get_home_dir',
'set_home_dir',
'compile',
'is_compiling',
'is_in_onnx_export',
'is_mps_available',
'is_xpu_available',
'device',
'is_paddle_instance',
'is_debug_enabled',
'debug',
'set_debug',
'is_experimental_mode_enabled',
'experimental_mode',
'set_experimental_mode',
'paddle_geometric',
'__version__',
]

36 changes: 36 additions & 0 deletions jointContribution/paddle_geometric/paddle_geometric/_compile.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
import warnings
from typing import Any, Callable, Optional, Union

import paddle

import paddle_geometric.typing


def is_compiling() -> bool:
r"""Returns :obj:`True` in case PaddlePaddle is compiling via
:meth:`paddle.jit.to_static`.
"""
return False # pragma: no cover


def compile(
model: Optional[paddle.nn.Layer] = None,
*args: Any,
**kwargs: Any,
) -> Union[paddle.nn.Layer, Callable[[paddle.nn.Layer], paddle.nn.Layer]]:
r"""Optimizes the given :pyg:`PyG` model/function via
:meth:`paddle.jit.to_static`.
This function has the same signature as :meth:`paddle.jit.to_static`.

Args:
model: The model to compile.
*args: Additional arguments of :meth:`paddle.jit.to_static`.
**kwargs: Additional keyword arguments of :meth:`paddle.jit.to_static`.

.. note::
:meth:`paddle_geometric.compile` is deprecated in favor of
:meth:`paddle.jit.to_static`.
"""
warnings.warn("'paddle_geometric.compile' is deprecated in favor of "
"'paddle.jit.to_static'")
return paddle.jit.to_static(model, *args, **kwargs)
14 changes: 14 additions & 0 deletions jointContribution/paddle_geometric/paddle_geometric/_onnx.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
import paddle

from paddle_geometric import is_compiling


def is_in_onnx_export() -> bool:
r"""Returns :obj:`True` in case PaddlePaddle is exporting to ONNX via
:meth:`paddle.onnx.export`.
"""
if is_compiling():
return False
if paddle.jit.to_static: # Paddle 没有完全等价于 `torch.jit.is_scripting` 的函数,用 `to_static` 替代
return False
return paddle.onnx.is_in_onnx_export()
55 changes: 55 additions & 0 deletions jointContribution/paddle_geometric/paddle_geometric/backend.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
from typing import Optional

import paddle

# If set to `True`, PyG is configured to use the `segment_matmul` and
# `grouped_matmul` kernels from `pyg-lib` to parallelize matrix multiplication
# across segments/groups of potentially varying size.
# If set to `None`, will automatically decide whether to utilize
# `segment_matmul` and `grouped_matmul` based on input sizes.
# Requires `pyg-lib` to be installed.
use_segment_matmul: Optional[bool] = None

# Helper functions ############################################################


def use_segment_matmul_heuristic(
num_segments: int,
max_segment_size: int,
in_channels: int,
out_channels: int,
) -> bool:
r"""A heuristic based on input sizes to determine whether the usage of
:meth:`segment_matmul` can speed up computation.
"""
# NOTE This heuristic was learned on an A100 via sklearn using a simple
# StandardScaler() -> LinearSVC() model.
# For now, it is only used in combination with `RGCNConv`.
x = paddle.to_tensor([
num_segments,
max_segment_size,
in_channels,
out_channels,
], dtype="float32")
mean = paddle.to_tensor([
125.11603189,
12133.21523472,
163.81222321,
32.43755536,
], dtype="float32")
std = paddle.to_tensor([
163.34480422,
27572.94543809,
177.6426489,
56.82103934,
], dtype="float32")
weight = paddle.to_tensor([
2.43877659e+00,
1.67583047e+00,
-5.20527282e-04,
3.43925501e-01,
], dtype="float32")
bias = 1.20236999

x = (x - mean) / std
return bool(paddle.matmul(x, weight) >= bias)
113 changes: 113 additions & 0 deletions jointContribution/paddle_geometric/paddle_geometric/config_mixin.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,113 @@
import inspect
from dataclasses import fields, is_dataclass
from importlib import import_module
from typing import Any, Dict

from paddle_geometric.config_store import (
class_from_dataclass,
dataclass_from_class,
)
from paddle_geometric.isinstance import is_paddle_instance


class ConfigMixin:
r"""Enables a class to serialize/deserialize itself to a dataclass."""
def config(self) -> Any:
r"""Creates a serializable configuration of the class."""
data_cls = dataclass_from_class(self.__class__)
if data_cls is None:
raise ValueError(f"Could not find the configuration class that "
f"belongs to '{self.__class__.__name__}'. Please "
f"register it in the configuration store.")

kwargs: Dict[str, Any] = {}
for field in fields(data_cls):
if not hasattr(self, field.name):
continue
kwargs[field.name] = _recursive_config(getattr(self, field.name))
return data_cls(**kwargs)

@classmethod
def from_config(cls, cfg: Any, *args: Any, **kwargs: Any) -> Any:
r"""Instantiates the class from a serializable configuration."""
if getattr(cfg, '_target_', None):
cls = _locate_cls(cfg._target_)
elif isinstance(cfg, dict) and '_target_' in cfg:
cls = _locate_cls(cfg['_target_'])

data_cls = cfg.__class__
if not is_dataclass(data_cls):
data_cls = dataclass_from_class(cls)
if data_cls is None:
raise ValueError(f"Could not find the configuration class "
f"that belongs to '{cls.__name__}'. Please "
f"register it in the configuration store.")

field_names = {field.name for field in fields(data_cls)}
if isinstance(cfg, dict):
_kwargs = {k: v for k, v in cfg.items() if k in field_names}
cfg = data_cls(**_kwargs)
assert is_dataclass(cfg)

if len(args) > 0: # Convert `*args` to `**kwargs`:
param_names = list(inspect.signature(cls).parameters.keys())
if 'args' in param_names:
param_names.remove('args')
if 'kwargs' in param_names:
param_names.remove('kwargs')

for name, arg in zip(param_names, args):
kwargs[name] = arg

for key in field_names:
if key not in kwargs and key != '_target_':
kwargs[key] = _recursive_from_config(getattr(cfg, key))

return cls(**kwargs)


def _recursive_config(value: Any) -> Any:
if isinstance(value, ConfigMixin):
return value.config()
if is_paddle_instance(value, ConfigMixin):
return value.config()
if isinstance(value, (tuple, list)):
return [_recursive_config(v) for v in value]
if isinstance(value, dict):
return {k: _recursive_config(v) for k, v in value.items()}
return value


def _recursive_from_config(value: Any) -> Any:
cls: Any = None
if is_dataclass(value):
if getattr(value, '_target_', None):
try:
cls = _locate_cls(value._target_) # type: ignore
except ImportError:
pass # Keep the dataclass as it is.
else:
cls = class_from_dataclass(value.__class__)
elif isinstance(value, dict) and '_target_' in value:
cls = _locate_cls(value['_target_'])

if cls is not None and issubclass(cls, ConfigMixin):
return cls.from_config(value)
if isinstance(value, (tuple, list)):
return [_recursive_from_config(v) for v in value]
if isinstance(value, dict):
return {k: _recursive_from_config(v) for k, v in value.items()}
return value


def _locate_cls(qualname: str) -> Any:
parts = qualname.split('.')

if len(parts) <= 1:
raise ValueError(f"Qualified name is missing a dot (got '{qualname}')")

if any([len(part) == 0 for part in parts]):
raise ValueError(f"Relative imports not supported (got '{qualname}')")

module_name, cls_name = '.'.join(parts[:-1]), parts[-1]
return getattr(import_module(module_name), cls_name)
Loading