Skip to content

Commit

Permalink
pin update (pytorch#7273)
Browse files Browse the repository at this point in the history
* Bump pytorch pin to 20241206

* remove args

---------

Co-authored-by: angelayi <[email protected]>
  • Loading branch information
lucylq and angelayi authored Dec 20, 2024
1 parent b2a680b commit 59a2594
Show file tree
Hide file tree
Showing 12 changed files with 44 additions and 47 deletions.
2 changes: 1 addition & 1 deletion .ci/docker/ci_commit_pins/pytorch.txt
Original file line number Diff line number Diff line change
@@ -1 +1 @@
19eff28ff3f19b50da46f5a9ff5f4d4d213806fe
2ea4b56ec872424e486c4fe2d55da061067a2ed3
2 changes: 1 addition & 1 deletion .ci/docker/common/install_pytorch.sh
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ install_pytorch_and_domains() {

chown -R ci-user .

export _GLIBCXX_USE_CXX11_ABI=0
export _GLIBCXX_USE_CXX11_ABI=1
# Then build and install PyTorch
conda_run python setup.py bdist_wheel
pip_install "$(echo dist/*.whl)"
Expand Down
1 change: 0 additions & 1 deletion .ci/scripts/test_llava.sh
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,6 @@ EXECUTORCH_COMMON_CMAKE_ARGS=" \
-DEXECUTORCH_BUILD_KERNELS_OPTIMIZED=ON \
-DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON \
-DEXECUTORCH_BUILD_XNNPACK=ON \
-DEXECUTORCH_DO_NOT_USE_CXX11_ABI=ON \
-DEXECUTORCH_XNNPACK_SHARED_WORKSPACE=ON"

cmake_install_executorch_libraries() {
Expand Down
13 changes: 0 additions & 13 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -145,12 +145,6 @@ if(EXECUTORCH_ENABLE_EVENT_TRACER)
add_definitions(-DET_EVENT_TRACER_ENABLED)
endif()

option(EXECUTORCH_DO_NOT_USE_CXX11_ABI "Define _GLIBCXX_USE_CXX11_ABI=0 if ON"
OFF
)
if(EXECUTORCH_DO_NOT_USE_CXX11_ABI)
add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0)
endif()
# -ffunction-sections -fdata-sections: breaks function and data into sections so
# they can be properly gc'd. -s: strip symbol. -fno-exceptions -fno-rtti:
# disables exceptions and runtime type.
Expand Down Expand Up @@ -772,13 +766,6 @@ if(EXECUTORCH_BUILD_PYBIND)
-frtti
-fexceptions
)
if(EXECUTORCH_DO_NOT_USE_CXX11_ABI)
# libtorch is built with the old ABI, so we need to do the same for any
# .cpp files that include torch, c10, or ATen targets. Note that PyTorch
# nightly binary is built with _GLIBCXX_USE_CXX11_ABI set to 0 while its
# CI build sets this to 1 (default)
list(APPEND _pybind_compile_options -D_GLIBCXX_USE_CXX11_ABI=0)
endif()

# util lib
add_library(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -74,8 +74,6 @@ def test_llama3_2_text_decoder_aoti(self) -> None:
with tempfile.TemporaryDirectory() as tmpdir:
path = torch._inductor.aoti_compile_and_package(
ep,
model.get_example_inputs(),
kwargs=model.get_example_kwarg_inputs(),
package_path=os.path.join(tmpdir, "text_decoder.pt2"),
)
encoder_aoti = torch._inductor.aoti_load_package(path)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,6 @@ def test_flamingo_vision_encoder(self) -> None:
with tempfile.TemporaryDirectory() as tmpdir:
path = torch._inductor.aoti_compile_and_package(
ep,
model.get_example_inputs(),
package_path=os.path.join(tmpdir, "vision_encoder.pt2"),
)
print(path)
Expand Down
1 change: 0 additions & 1 deletion examples/models/llava/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,6 @@ if(LLAVA_RUNNER_NO_TORCH_DUMMY_IMAGE)
else()
find_package(Torch CONFIG REQUIRED)
endif()
add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0)

#
# llava_main: test binary to run llava, with tokenizer and sampler integrated
Expand Down
56 changes: 38 additions & 18 deletions extension/llm/export/builder.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,11 @@

# pyre-unsafe

import contextlib
import logging
from enum import Enum
from typing import Any, Callable, Dict, List, Optional
from unittest.mock import patch

import torch
from executorch.backends.transforms.duplicate_dynamic_quant_chain import (
Expand Down Expand Up @@ -184,15 +186,23 @@ def export(self) -> "LLMEdgeManager":
# 2. torch.no_grad() is for getting rid of the dropout (not sure why training ops will show up)
with torch.nn.attention.sdpa_kernel([SDPBackend.MATH]), torch.no_grad():
if hasattr(self.args, "qnn") and self.args.qnn:
# TODO: this is temporary and export_for_training doesn't work with qnn either. We need a
# functional graph. See issue https://github.com/pytorch/executorch/pull/4627 for more details
exported_module = torch.export.export(
self.model,
self.example_inputs,
self.example_kwarg_inputs,
dynamic_shapes=dynamic_shape,
strict=True,
)
# TODO: this is temporary, as qnn flow does not work with new, non-functional export IR.
# See issue: https://github.com/pytorch/executorch/issues/7373

with patch.object(
torch._utils_internal,
"export_training_ir_rollout_check",
return_value=False,
):
# TODO: this is temporary and export_for_training doesn't work with qnn either. We need a
# functional graph. See issue https://github.com/pytorch/executorch/pull/4627 for more details
exported_module = torch.export.export(
self.model,
self.example_inputs,
self.example_kwarg_inputs,
dynamic_shapes=dynamic_shape,
strict=True,
)
else:
logging.info("Exporting with:")
logging.info(f"inputs: {self.example_inputs}")
Expand Down Expand Up @@ -354,15 +364,25 @@ def export_to_edge(self) -> "LLMEdgeManager":
if self.pre_autograd_graph_module is None:
# Run export() if it didn't run
self.export()
self.edge_manager = export_to_edge(
self.pre_autograd_graph_module, # pyre-fixme[6]
self.example_inputs,
example_kwarg_inputs=self.example_kwarg_inputs,
dynamic_shapes=dynamic_shape,
edge_constant_methods=self.metadata,
edge_compile_config=edge_config,
verbose=self.verbose,
)

override_export_behaviour = contextlib.nullcontext()
if hasattr(self.args, "qnn") and self.args.qnn:
override_export_behaviour = patch.object(
torch._utils_internal,
"export_training_ir_rollout_check",
return_value=False,
)

with override_export_behaviour:
self.edge_manager = export_to_edge(
self.pre_autograd_graph_module, # pyre-fixme[6]
self.example_inputs,
example_kwarg_inputs=self.example_kwarg_inputs,
dynamic_shapes=dynamic_shape,
edge_constant_methods=self.metadata,
edge_compile_config=edge_config,
verbose=self.verbose,
)
return self

def to_backend(self, partitioners: Optional[List[Partitioner]]) -> "LLMEdgeManager":
Expand Down
1 change: 0 additions & 1 deletion extension/llm/modules/test/test_position_embeddings.py
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,6 @@ def test_tiled_token_positional_embedding_aoti(self):
with tempfile.TemporaryDirectory() as tmpdir:
path = torch._inductor.aoti_compile_and_package(
tpe_ep,
(self.x, self.aspect_ratio),
package_path=os.path.join(tmpdir, "tpe.pt2"),
)
tpe_aoti = load_package(path)
Expand Down
6 changes: 3 additions & 3 deletions install_requirements.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ def python_is_compatible():
# NOTE: If a newly-fetched version of the executorch repo changes the value of
# NIGHTLY_VERSION, you should re-run this script to install the necessary
# package versions.
NIGHTLY_VERSION = "dev20241112"
NIGHTLY_VERSION = "dev20241218"

# The pip repository that hosts nightly torch packages.
TORCH_NIGHTLY_URL = "https://download.pytorch.org/whl/nightly/cpu"
Expand All @@ -124,7 +124,7 @@ def python_is_compatible():
# been installed on CI before this step, so pip won't reinstall them
f"torch==2.6.0.{NIGHTLY_VERSION}" if USE_PYTORCH_NIGHTLY else "torch",
(
f"torchvision==0.20.0.{NIGHTLY_VERSION}"
f"torchvision==0.22.0.{NIGHTLY_VERSION}"
if USE_PYTORCH_NIGHTLY
else "torchvision"
), # For testing.
Expand All @@ -135,7 +135,7 @@ def python_is_compatible():
# TODO: Make each example publish its own requirements.txt
EXAMPLES_REQUIREMENTS = [
"timm==1.0.7",
f"torchaudio==2.5.0.{NIGHTLY_VERSION}" if USE_PYTORCH_NIGHTLY else "torchaudio",
f"torchaudio==2.6.0.{NIGHTLY_VERSION}" if USE_PYTORCH_NIGHTLY else "torchaudio",
"torchsr==1.0.4",
"transformers==4.46.1",
]
Expand Down
2 changes: 0 additions & 2 deletions third-party/TARGETS
Original file line number Diff line number Diff line change
Expand Up @@ -298,7 +298,6 @@ prebuilt_cxx_library(
name = "libtorch",
shared_lib = ":libtorch_gen[libtorch]",
exported_preprocessor_flags = [
"-D_GLIBCXX_USE_CXX11_ABI=0", # `libtorch` is built without CXX11_ABI so any target depends on it need to use the same build config.
"-I$(location :libtorch_gen[include])", # include header directories
"-I$(location :libtorch_gen[include])/torch/csrc/api/include", # include header directories
],
Expand All @@ -318,7 +317,6 @@ prebuilt_cxx_library(
name = "libtorch_python",
shared_lib = ":libtorch_gen[libtorch_python]",
exported_preprocessor_flags = [
"-D_GLIBCXX_USE_CXX11_ABI=0", # `libtorch` is built without CXX11_ABI so any target depends on it need to use the same build config.
"-I$(location :libtorch_gen[include])", # include header directories
"-I$(location :libtorch_gen[include])/torch/csrc/api/include", # include header directories
],
Expand Down
4 changes: 1 addition & 3 deletions third-party/gtest_defs.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,7 @@ COMPILER_FLAGS = [
"-std=c++17",
]
COMPILER_FLAGS_ATEN = [
"-std=c++17",
"-D_GLIBCXX_USE_CXX11_ABI=0", # `libtorch` is built without CXX11_ABI so gtest needs to be compiled in the same way
]
"-std=c++17",]

# define_gtest_targets
def define_gtest_targets():
Expand Down

0 comments on commit 59a2594

Please sign in to comment.