From 59a2594b9ef34b34930a76db3989dd209bb0e4c3 Mon Sep 17 00:00:00 2001 From: lucylq Date: Thu, 19 Dec 2024 18:11:46 -0800 Subject: [PATCH] pin update (#7273) * Bump pytorch pin to 20241206 * remove args --------- Co-authored-by: angelayi --- .ci/docker/ci_commit_pins/pytorch.txt | 2 +- .ci/docker/common/install_pytorch.sh | 2 +- .ci/scripts/test_llava.sh | 1 - CMakeLists.txt | 13 ----- .../text_decoder/test/test_text_decoder.py | 2 - .../test/test_vision_encoder.py | 1 - examples/models/llava/CMakeLists.txt | 1 - extension/llm/export/builder.py | 56 +++++++++++++------ .../modules/test/test_position_embeddings.py | 1 - install_requirements.py | 6 +- third-party/TARGETS | 2 - third-party/gtest_defs.bzl | 4 +- 12 files changed, 44 insertions(+), 47 deletions(-) diff --git a/.ci/docker/ci_commit_pins/pytorch.txt b/.ci/docker/ci_commit_pins/pytorch.txt index d1e1e4843b..9c24945f7d 100644 --- a/.ci/docker/ci_commit_pins/pytorch.txt +++ b/.ci/docker/ci_commit_pins/pytorch.txt @@ -1 +1 @@ -19eff28ff3f19b50da46f5a9ff5f4d4d213806fe +2ea4b56ec872424e486c4fe2d55da061067a2ed3 diff --git a/.ci/docker/common/install_pytorch.sh b/.ci/docker/common/install_pytorch.sh index 5d271b2fd9..4bf3334868 100755 --- a/.ci/docker/common/install_pytorch.sh +++ b/.ci/docker/common/install_pytorch.sh @@ -26,7 +26,7 @@ install_pytorch_and_domains() { chown -R ci-user . - export _GLIBCXX_USE_CXX11_ABI=0 + export _GLIBCXX_USE_CXX11_ABI=1 # Then build and install PyTorch conda_run python setup.py bdist_wheel pip_install "$(echo dist/*.whl)" diff --git a/.ci/scripts/test_llava.sh b/.ci/scripts/test_llava.sh index a30143d895..a9e1313756 100644 --- a/.ci/scripts/test_llava.sh +++ b/.ci/scripts/test_llava.sh @@ -41,7 +41,6 @@ EXECUTORCH_COMMON_CMAKE_ARGS=" \ -DEXECUTORCH_BUILD_KERNELS_OPTIMIZED=ON \ -DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON \ -DEXECUTORCH_BUILD_XNNPACK=ON \ - -DEXECUTORCH_DO_NOT_USE_CXX11_ABI=ON \ -DEXECUTORCH_XNNPACK_SHARED_WORKSPACE=ON" cmake_install_executorch_libraries() { diff --git a/CMakeLists.txt b/CMakeLists.txt index ac8950bc1f..8a9102848d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -145,12 +145,6 @@ if(EXECUTORCH_ENABLE_EVENT_TRACER) add_definitions(-DET_EVENT_TRACER_ENABLED) endif() -option(EXECUTORCH_DO_NOT_USE_CXX11_ABI "Define _GLIBCXX_USE_CXX11_ABI=0 if ON" - OFF -) -if(EXECUTORCH_DO_NOT_USE_CXX11_ABI) - add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0) -endif() # -ffunction-sections -fdata-sections: breaks function and data into sections so # they can be properly gc'd. -s: strip symbol. -fno-exceptions -fno-rtti: # disables exceptions and runtime type. @@ -772,13 +766,6 @@ if(EXECUTORCH_BUILD_PYBIND) -frtti -fexceptions ) - if(EXECUTORCH_DO_NOT_USE_CXX11_ABI) - # libtorch is built with the old ABI, so we need to do the same for any - # .cpp files that include torch, c10, or ATen targets. Note that PyTorch - # nightly binary is built with _GLIBCXX_USE_CXX11_ABI set to 0 while its - # CI build sets this to 1 (default) - list(APPEND _pybind_compile_options -D_GLIBCXX_USE_CXX11_ABI=0) - endif() # util lib add_library( diff --git a/examples/models/llama3_2_vision/text_decoder/test/test_text_decoder.py b/examples/models/llama3_2_vision/text_decoder/test/test_text_decoder.py index 8e678801b8..e6bf2ddd31 100644 --- a/examples/models/llama3_2_vision/text_decoder/test/test_text_decoder.py +++ b/examples/models/llama3_2_vision/text_decoder/test/test_text_decoder.py @@ -74,8 +74,6 @@ def test_llama3_2_text_decoder_aoti(self) -> None: with tempfile.TemporaryDirectory() as tmpdir: path = torch._inductor.aoti_compile_and_package( ep, - model.get_example_inputs(), - kwargs=model.get_example_kwarg_inputs(), package_path=os.path.join(tmpdir, "text_decoder.pt2"), ) encoder_aoti = torch._inductor.aoti_load_package(path) diff --git a/examples/models/llama3_2_vision/vision_encoder/test/test_vision_encoder.py b/examples/models/llama3_2_vision/vision_encoder/test/test_vision_encoder.py index c2f1e77cee..7721350014 100644 --- a/examples/models/llama3_2_vision/vision_encoder/test/test_vision_encoder.py +++ b/examples/models/llama3_2_vision/vision_encoder/test/test_vision_encoder.py @@ -36,7 +36,6 @@ def test_flamingo_vision_encoder(self) -> None: with tempfile.TemporaryDirectory() as tmpdir: path = torch._inductor.aoti_compile_and_package( ep, - model.get_example_inputs(), package_path=os.path.join(tmpdir, "vision_encoder.pt2"), ) print(path) diff --git a/examples/models/llava/CMakeLists.txt b/examples/models/llava/CMakeLists.txt index ed4cbc4634..f22b447153 100644 --- a/examples/models/llava/CMakeLists.txt +++ b/examples/models/llava/CMakeLists.txt @@ -81,7 +81,6 @@ if(LLAVA_RUNNER_NO_TORCH_DUMMY_IMAGE) else() find_package(Torch CONFIG REQUIRED) endif() -add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0) # # llava_main: test binary to run llava, with tokenizer and sampler integrated diff --git a/extension/llm/export/builder.py b/extension/llm/export/builder.py index ebc7f02ee1..7cab3c77b8 100644 --- a/extension/llm/export/builder.py +++ b/extension/llm/export/builder.py @@ -10,9 +10,11 @@ # pyre-unsafe +import contextlib import logging from enum import Enum from typing import Any, Callable, Dict, List, Optional +from unittest.mock import patch import torch from executorch.backends.transforms.duplicate_dynamic_quant_chain import ( @@ -184,15 +186,23 @@ def export(self) -> "LLMEdgeManager": # 2. torch.no_grad() is for getting rid of the dropout (not sure why training ops will show up) with torch.nn.attention.sdpa_kernel([SDPBackend.MATH]), torch.no_grad(): if hasattr(self.args, "qnn") and self.args.qnn: - # TODO: this is temporary and export_for_training doesn't work with qnn either. We need a - # functional graph. See issue https://github.com/pytorch/executorch/pull/4627 for more details - exported_module = torch.export.export( - self.model, - self.example_inputs, - self.example_kwarg_inputs, - dynamic_shapes=dynamic_shape, - strict=True, - ) + # TODO: this is temporary, as qnn flow does not work with new, non-functional export IR. + # See issue: https://github.com/pytorch/executorch/issues/7373 + + with patch.object( + torch._utils_internal, + "export_training_ir_rollout_check", + return_value=False, + ): + # TODO: this is temporary and export_for_training doesn't work with qnn either. We need a + # functional graph. See issue https://github.com/pytorch/executorch/pull/4627 for more details + exported_module = torch.export.export( + self.model, + self.example_inputs, + self.example_kwarg_inputs, + dynamic_shapes=dynamic_shape, + strict=True, + ) else: logging.info("Exporting with:") logging.info(f"inputs: {self.example_inputs}") @@ -354,15 +364,25 @@ def export_to_edge(self) -> "LLMEdgeManager": if self.pre_autograd_graph_module is None: # Run export() if it didn't run self.export() - self.edge_manager = export_to_edge( - self.pre_autograd_graph_module, # pyre-fixme[6] - self.example_inputs, - example_kwarg_inputs=self.example_kwarg_inputs, - dynamic_shapes=dynamic_shape, - edge_constant_methods=self.metadata, - edge_compile_config=edge_config, - verbose=self.verbose, - ) + + override_export_behaviour = contextlib.nullcontext() + if hasattr(self.args, "qnn") and self.args.qnn: + override_export_behaviour = patch.object( + torch._utils_internal, + "export_training_ir_rollout_check", + return_value=False, + ) + + with override_export_behaviour: + self.edge_manager = export_to_edge( + self.pre_autograd_graph_module, # pyre-fixme[6] + self.example_inputs, + example_kwarg_inputs=self.example_kwarg_inputs, + dynamic_shapes=dynamic_shape, + edge_constant_methods=self.metadata, + edge_compile_config=edge_config, + verbose=self.verbose, + ) return self def to_backend(self, partitioners: Optional[List[Partitioner]]) -> "LLMEdgeManager": diff --git a/extension/llm/modules/test/test_position_embeddings.py b/extension/llm/modules/test/test_position_embeddings.py index 039cc798b1..5e92f92df6 100644 --- a/extension/llm/modules/test/test_position_embeddings.py +++ b/extension/llm/modules/test/test_position_embeddings.py @@ -177,7 +177,6 @@ def test_tiled_token_positional_embedding_aoti(self): with tempfile.TemporaryDirectory() as tmpdir: path = torch._inductor.aoti_compile_and_package( tpe_ep, - (self.x, self.aspect_ratio), package_path=os.path.join(tmpdir, "tpe.pt2"), ) tpe_aoti = load_package(path) diff --git a/install_requirements.py b/install_requirements.py index ace2f34b70..18b5d6f9b9 100644 --- a/install_requirements.py +++ b/install_requirements.py @@ -112,7 +112,7 @@ def python_is_compatible(): # NOTE: If a newly-fetched version of the executorch repo changes the value of # NIGHTLY_VERSION, you should re-run this script to install the necessary # package versions. -NIGHTLY_VERSION = "dev20241112" +NIGHTLY_VERSION = "dev20241218" # The pip repository that hosts nightly torch packages. TORCH_NIGHTLY_URL = "https://download.pytorch.org/whl/nightly/cpu" @@ -124,7 +124,7 @@ def python_is_compatible(): # been installed on CI before this step, so pip won't reinstall them f"torch==2.6.0.{NIGHTLY_VERSION}" if USE_PYTORCH_NIGHTLY else "torch", ( - f"torchvision==0.20.0.{NIGHTLY_VERSION}" + f"torchvision==0.22.0.{NIGHTLY_VERSION}" if USE_PYTORCH_NIGHTLY else "torchvision" ), # For testing. @@ -135,7 +135,7 @@ def python_is_compatible(): # TODO: Make each example publish its own requirements.txt EXAMPLES_REQUIREMENTS = [ "timm==1.0.7", - f"torchaudio==2.5.0.{NIGHTLY_VERSION}" if USE_PYTORCH_NIGHTLY else "torchaudio", + f"torchaudio==2.6.0.{NIGHTLY_VERSION}" if USE_PYTORCH_NIGHTLY else "torchaudio", "torchsr==1.0.4", "transformers==4.46.1", ] diff --git a/third-party/TARGETS b/third-party/TARGETS index 7474eb0007..5f31ef13c5 100644 --- a/third-party/TARGETS +++ b/third-party/TARGETS @@ -298,7 +298,6 @@ prebuilt_cxx_library( name = "libtorch", shared_lib = ":libtorch_gen[libtorch]", exported_preprocessor_flags = [ - "-D_GLIBCXX_USE_CXX11_ABI=0", # `libtorch` is built without CXX11_ABI so any target depends on it need to use the same build config. "-I$(location :libtorch_gen[include])", # include header directories "-I$(location :libtorch_gen[include])/torch/csrc/api/include", # include header directories ], @@ -318,7 +317,6 @@ prebuilt_cxx_library( name = "libtorch_python", shared_lib = ":libtorch_gen[libtorch_python]", exported_preprocessor_flags = [ - "-D_GLIBCXX_USE_CXX11_ABI=0", # `libtorch` is built without CXX11_ABI so any target depends on it need to use the same build config. "-I$(location :libtorch_gen[include])", # include header directories "-I$(location :libtorch_gen[include])/torch/csrc/api/include", # include header directories ], diff --git a/third-party/gtest_defs.bzl b/third-party/gtest_defs.bzl index 5e51949b11..c1f4778b80 100644 --- a/third-party/gtest_defs.bzl +++ b/third-party/gtest_defs.bzl @@ -4,9 +4,7 @@ COMPILER_FLAGS = [ "-std=c++17", ] COMPILER_FLAGS_ATEN = [ - "-std=c++17", - "-D_GLIBCXX_USE_CXX11_ABI=0", # `libtorch` is built without CXX11_ABI so gtest needs to be compiled in the same way -] + "-std=c++17",] # define_gtest_targets def define_gtest_targets():