From 8f653fbb7401775f87eaf3f36f83e1ab33be3bcb Mon Sep 17 00:00:00 2001 From: FrankLeeeee Date: Thu, 11 Jan 2024 12:12:04 +0000 Subject: [PATCH] deploy: 0802e9595244c517c61c45f1d9a62c223368253d --- 404.html | 4 ++-- assets/js/{0a41237c.52cf170e.js => 0a41237c.91708dd2.js} | 2 +- ...runtime~main.9eb13b79.js => runtime~main.a42c3c69.js} | 2 +- blog/archive/index.html | 4 ++-- blog/first-blog-post/index.html | 4 ++-- blog/index.html | 4 ++-- blog/tags/docusaurus/index.html | 4 ++-- blog/tags/facebook/index.html | 4 ++-- blog/tags/hello/index.html | 4 ++-- blog/tags/hola/index.html | 4 ++-- blog/tags/index.html | 4 ++-- blog/welcome/index.html | 4 ++-- docs/Colossal-Auto/feature/auto_checkpoint/index.html | 4 ++-- docs/Colossal-Auto/feature/device_mesh/index.html | 4 ++-- .../feature/layout_converting_management/index.html | 4 ++-- docs/Colossal-Auto/feature/tracer/index.html | 4 ++-- docs/Colossal-Auto/get_started/installation/index.html | 4 ++-- docs/Colossal-Auto/get_started/introduction/index.html | 4 ++-- docs/Colossal-Auto/get_started/run_demo/index.html | 4 ++-- .../index.html | 4 ++-- docs/advanced_tutorials/meet_gemini/index.html | 4 ++-- docs/advanced_tutorials/opt_service/index.html | 4 ++-- .../train_gpt_using_hybrid_parallelism/index.html | 4 ++-- .../train_vit_with_hybrid_parallelism/index.html | 4 ++-- docs/basics/booster_api/index.html | 4 ++-- docs/basics/booster_checkpoint/index.html | 4 ++-- docs/basics/booster_plugins/index.html | 9 +++++---- docs/basics/command_line_tool/index.html | 4 ++-- docs/basics/launch_colossalai/index.html | 4 ++-- docs/concepts/colossalai_overview/index.html | 4 ++-- docs/concepts/distributed_training/index.html | 4 ++-- docs/concepts/paradigms_of_parallelism/index.html | 4 ++-- docs/features/1D_tensor_parallel/index.html | 4 ++-- docs/features/2D_tensor_parallel/index.html | 4 ++-- docs/features/2p5D_tensor_parallel/index.html | 4 ++-- docs/features/3D_tensor_parallel/index.html | 4 ++-- docs/features/cluster_utils/index.html | 4 ++-- .../gradient_accumulation_with_booster/index.html | 4 ++-- docs/features/gradient_clipping_with_booster/index.html | 4 ++-- docs/features/lazy_init/index.html | 4 ++-- .../mixed_precision_training_with_booster/index.html | 4 ++-- docs/features/nvme_offload/index.html | 4 ++-- docs/features/pipeline_parallel/index.html | 4 ++-- docs/features/shardformer/index.html | 4 ++-- docs/features/zero_with_chunk/index.html | 4 ++-- docs/get_started/installation/index.html | 4 ++-- docs/get_started/reading_roadmap/index.html | 4 ++-- docs/get_started/run_demo/index.html | 4 ++-- index.html | 4 ++-- markdown-page/index.html | 4 ++-- search/index.html | 4 ++-- zh-Hans/404.html | 4 ++-- .../js/{d742ffe2.3a4495dd.js => d742ffe2.bfcf124a.js} | 2 +- ...runtime~main.58d27fdd.js => runtime~main.9a4f2bfa.js} | 2 +- zh-Hans/blog/archive/index.html | 4 ++-- zh-Hans/blog/first-blog-post/index.html | 4 ++-- zh-Hans/blog/index.html | 4 ++-- zh-Hans/blog/tags/docusaurus/index.html | 4 ++-- zh-Hans/blog/tags/facebook/index.html | 4 ++-- zh-Hans/blog/tags/hello/index.html | 4 ++-- zh-Hans/blog/tags/hola/index.html | 4 ++-- zh-Hans/blog/tags/index.html | 4 ++-- zh-Hans/blog/welcome/index.html | 4 ++-- .../Colossal-Auto/feature/auto_checkpoint/index.html | 4 ++-- .../docs/Colossal-Auto/feature/device_mesh/index.html | 4 ++-- .../feature/layout_converting_management/index.html | 4 ++-- zh-Hans/docs/Colossal-Auto/feature/tracer/index.html | 4 ++-- .../Colossal-Auto/get_started/installation/index.html | 4 ++-- .../Colossal-Auto/get_started/introduction/index.html | 4 ++-- .../docs/Colossal-Auto/get_started/run_demo/index.html | 4 ++-- .../index.html | 4 ++-- zh-Hans/docs/advanced_tutorials/meet_gemini/index.html | 4 ++-- zh-Hans/docs/advanced_tutorials/opt_service/index.html | 4 ++-- .../train_gpt_using_hybrid_parallelism/index.html | 4 ++-- .../train_vit_with_hybrid_parallelism/index.html | 4 ++-- zh-Hans/docs/basics/booster_api/index.html | 4 ++-- zh-Hans/docs/basics/booster_checkpoint/index.html | 4 ++-- zh-Hans/docs/basics/booster_plugins/index.html | 9 +++++---- zh-Hans/docs/basics/command_line_tool/index.html | 4 ++-- zh-Hans/docs/basics/launch_colossalai/index.html | 4 ++-- zh-Hans/docs/concepts/colossalai_overview/index.html | 4 ++-- zh-Hans/docs/concepts/distributed_training/index.html | 4 ++-- .../docs/concepts/paradigms_of_parallelism/index.html | 4 ++-- zh-Hans/docs/features/1D_tensor_parallel/index.html | 4 ++-- zh-Hans/docs/features/2D_tensor_parallel/index.html | 4 ++-- zh-Hans/docs/features/2p5D_tensor_parallel/index.html | 4 ++-- zh-Hans/docs/features/3D_tensor_parallel/index.html | 4 ++-- zh-Hans/docs/features/cluster_utils/index.html | 4 ++-- .../gradient_accumulation_with_booster/index.html | 4 ++-- .../features/gradient_clipping_with_booster/index.html | 4 ++-- zh-Hans/docs/features/lazy_init/index.html | 4 ++-- .../mixed_precision_training_with_booster/index.html | 4 ++-- zh-Hans/docs/features/nvme_offload/index.html | 4 ++-- zh-Hans/docs/features/pipeline_parallel/index.html | 4 ++-- zh-Hans/docs/features/shardformer/index.html | 4 ++-- zh-Hans/docs/features/zero_with_chunk/index.html | 4 ++-- zh-Hans/docs/get_started/installation/index.html | 4 ++-- zh-Hans/docs/get_started/reading_roadmap/index.html | 4 ++-- zh-Hans/docs/get_started/run_demo/index.html | 4 ++-- zh-Hans/index.html | 4 ++-- zh-Hans/markdown-page/index.html | 4 ++-- zh-Hans/search/index.html | 4 ++-- 102 files changed, 206 insertions(+), 204 deletions(-) rename assets/js/{0a41237c.52cf170e.js => 0a41237c.91708dd2.js} (63%) rename assets/js/{runtime~main.9eb13b79.js => runtime~main.a42c3c69.js} (98%) rename zh-Hans/assets/js/{d742ffe2.3a4495dd.js => d742ffe2.bfcf124a.js} (65%) rename zh-Hans/assets/js/{runtime~main.58d27fdd.js => runtime~main.9a4f2bfa.js} (99%) diff --git a/404.html b/404.html index 713f4186..dc52df87 100644 --- a/404.html +++ b/404.html @@ -16,13 +16,13 @@ - +
Skip to main content

Page Not Found

We could not find what you were looking for.

Please contact the owner of the site that linked you to the original URL and let them know their link is broken.

- + \ No newline at end of file diff --git a/assets/js/0a41237c.52cf170e.js b/assets/js/0a41237c.91708dd2.js similarity index 63% rename from assets/js/0a41237c.52cf170e.js rename to assets/js/0a41237c.91708dd2.js index 73040edf..85e69bbb 100644 --- a/assets/js/0a41237c.52cf170e.js +++ b/assets/js/0a41237c.91708dd2.js @@ -1 +1 @@ -"use strict";(self.webpackChunkdemo=self.webpackChunkdemo||[]).push([[4634],{6999:(e,t,o)=>{o.d(t,{Cl:()=>n,Dx:()=>u,Pc:()=>l,aE:()=>s,e_:()=>d,iz:()=>r,nT:()=>p});var a=o(7294),i=o(398);o(814);function n(e){return a.createElement("div",{className:"docstring-container"},e.children)}function l(e){return a.createElement("div",{className:"signature"},"(",e.children,")")}function r(e){return a.createElement("div",{class:"divider"},a.createElement("span",{class:"divider-text"},e.name))}function s(e){return a.createElement("div",null,a.createElement(r,{name:"Parameters"}),a.createElement(i.D,null,e.children))}function p(e){return a.createElement("div",null,a.createElement(r,{name:"Returns"}),a.createElement(i.D,null,`${e.name}: ${e.desc}`))}function u(e){return a.createElement("div",{className:"title-container"},a.createElement("div",{className:"title-module"},a.createElement("h5",null,e.type),"\xa0 ",a.createElement("h3",null,e.name)),a.createElement("div",{className:"title-source"},"<",a.createElement("a",{href:e.source,className:"title-source"},"source"),">"))}function d(e){return a.createElement("div",null,a.createElement(r,{name:"Example"}),a.createElement(i.D,null,e.code))}},5099:(e,t,o)=>{o.r(t),o.d(t,{assets:()=>p,contentTitle:()=>r,default:()=>m,frontMatter:()=>l,metadata:()=>s,toc:()=>u});var a=o(7462),i=(o(7294),o(3905)),n=o(6999);const l={},r="Booster Plugins",s={unversionedId:"basics/booster_plugins",id:"basics/booster_plugins",title:"Booster Plugins",description:"Author: Hongxin Liu, Baizhou Zhang, Pengtai Xu",source:"@site/i18n/en/docusaurus-plugin-content-docs/current/basics/booster_plugins.md",sourceDirName:"basics",slug:"/basics/booster_plugins",permalink:"/docs/basics/booster_plugins",draft:!1,editUrl:"https://github.com/facebook/docusaurus/tree/main/packages/create-docusaurus/templates/shared/docs/basics/booster_plugins.md",tags:[],version:"current",frontMatter:{},sidebar:"tutorialSidebar",previous:{title:"Booster API",permalink:"/docs/basics/booster_api"},next:{title:"Booster Checkpoint",permalink:"/docs/basics/booster_checkpoint"}},p={},u=[{value:"Introduction",id:"introduction",level:2},{value:"Choosing Your Plugin",id:"choosing-your-plugin",level:2},{value:"Plugins",id:"plugins",level:2},{value:"Low Level Zero Plugin",id:"low-level-zero-plugin",level:3},{value:"Gemini Plugin",id:"gemini-plugin",level:3},{value:"Hybrid Parallel Plugin",id:"hybrid-parallel-plugin",level:3},{value:"Torch DDP Plugin",id:"torch-ddp-plugin",level:3},{value:"Torch FSDP Plugin",id:"torch-fsdp-plugin",level:3}],d={toc:u},c="wrapper";function m(e){let{components:t,...o}=e;return(0,i.kt)(c,(0,a.Z)({},d,o,{components:t,mdxType:"MDXLayout"}),(0,i.kt)("h1",{id:"booster-plugins"},"Booster Plugins"),(0,i.kt)("p",null,"Author: ",(0,i.kt)("a",{parentName:"p",href:"https://github.com/ver217"},"Hongxin Liu"),", ",(0,i.kt)("a",{parentName:"p",href:"https://github.com/Fridge003"},"Baizhou Zhang"),", ",(0,i.kt)("a",{parentName:"p",href:"https://github.com/ppt0011"},"Pengtai Xu")),(0,i.kt)("p",null,(0,i.kt)("strong",{parentName:"p"},"Prerequisite:")),(0,i.kt)("ul",null,(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("a",{parentName:"li",href:"/docs/basics/booster_api"},"Booster API"))),(0,i.kt)("h2",{id:"introduction"},"Introduction"),(0,i.kt)("p",null,"As mentioned in ",(0,i.kt)("a",{parentName:"p",href:"/docs/basics/booster_api"},"Booster API"),", we can use booster plugins to customize the parallel training. In this tutorial, we will introduce how to use booster plugins."),(0,i.kt)("p",null,"We currently provide the following plugins:"),(0,i.kt)("ul",null,(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("a",{parentName:"li",href:"#torch-ddp-plugin"},"Torch DDP Plugin"),": It is a wrapper of ",(0,i.kt)("inlineCode",{parentName:"li"},"torch.nn.parallel.DistributedDataParallel")," and can be used to train models with data parallelism."),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("a",{parentName:"li",href:"#torch-fsdp-plugin"},"Torch FSDP Plugin"),": It is a wrapper of ",(0,i.kt)("inlineCode",{parentName:"li"},"torch.distributed.fsdp.FullyShardedDataParallel")," and can be used to train models with zero-dp."),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("a",{parentName:"li",href:"#low-level-zero-plugin"},"Low Level Zero Plugin"),": It wraps the ",(0,i.kt)("inlineCode",{parentName:"li"},"colossalai.zero.low_level.LowLevelZeroOptimizer")," and can be used to train models with zero-dp. It only supports zero stage-1 and stage-2."),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("a",{parentName:"li",href:"#gemini-plugin"},"Gemini Plugin"),": It wraps the ",(0,i.kt)("a",{parentName:"li",href:"/docs/features/zero_with_chunk"},"Gemini")," which implements Zero-3 with chunk-based and heterogeneous memory management."),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("a",{parentName:"li",href:"#hybrid-parallel-plugin"},"Hybrid Parallel Plugin"),": It provides a tidy interface that integrates the power of Shardformer, pipeline manager, mixied precision training, TorchDDP and Zero stage 1/2 feature. With this plugin, transformer models can be easily trained with any combination of tensor parallel, pipeline parallel and data parallel (DDP/Zero) efficiently, along with various kinds of optimization tools for acceleration and memory saving. Detailed information about supported parallel strategies and optimization tools is explained in the section below.")),(0,i.kt)("p",null,"More plugins are coming soon."),(0,i.kt)("h2",{id:"choosing-your-plugin"},"Choosing Your Plugin"),(0,i.kt)("p",null,"Generally only one plugin is used to train a model. Our recommended use case for each plugin is as follows."),(0,i.kt)("ul",null,(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("a",{parentName:"li",href:"#torch-ddp-plugin"},"Torch DDP Plugin"),": It is suitable for models with less than 2 billion parameters (e.g. Bert-3m, GPT2-1.5b)."),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("a",{parentName:"li",href:"#torch-fsdp-plugin"},"Torch FSDP Plugin")," / ",(0,i.kt)("a",{parentName:"li",href:"#low-level-zero-plugin"},"Low Level Zero Plugin"),": It is suitable for models with less than 10 billion parameters (e.g. GPTJ-6b, MegatronLM-8b)."),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("a",{parentName:"li",href:"#gemini-plugin"},"Gemini Plugin"),": It is suitable for models with more than 10 billion parameters (e.g. TuringNLG-17b) and is ideal for scenarios with ",(0,i.kt)("strong",{parentName:"li"},"high cross-node bandwidth and medium to small-scale clusters (below a thousand cards)")," (e.g. Llama2-70b)."),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("a",{parentName:"li",href:"#hybrid-parallel-plugin"},"Hybrid Parallel Plugin"),": It is suitable for models with more than 60 billion parameters, or special models such as those with exceptionally long sequences, very large vocabularies, and is best suited for scenarios with ",(0,i.kt)("strong",{parentName:"li"},"low cross-node bandwidth and large-scale clusters (a thousand cards or more)")," (e.g. GPT3-175b, Bloom-176b).")),(0,i.kt)("h2",{id:"plugins"},"Plugins"),(0,i.kt)("h3",{id:"low-level-zero-plugin"},"Low Level Zero Plugin"),(0,i.kt)("p",null,"This plugin implements Zero-1 and Zero-2 (w/wo CPU offload), using ",(0,i.kt)("inlineCode",{parentName:"p"},"reduce")," and ",(0,i.kt)("inlineCode",{parentName:"p"},"gather")," to synchronize gradients and weights."),(0,i.kt)("p",null,"Zero-1 can be regarded as a better substitute of Torch DDP, which is more memory efficient and faster. It can be easily used in hybrid parallelism."),(0,i.kt)("p",null,"Zero-2 does not support local gradient accumulation. Though you can accumulate gradient if you insist, it cannot reduce communication cost. That is to say, it's not a good idea to use Zero-2 with pipeline parallelism."),(0,i.kt)(n.Cl,{mdxType:"DocStringContainer"},(0,i.kt)("div",null,(0,i.kt)(n.Dx,{type:"class",name:"colossalai.booster.plugin.LowLevelZeroPlugin",source:"https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/booster/plugin/low_level_zero_plugin.py#L213",mdxType:"Title"}),(0,i.kt)(n.Pc,{mdxType:"Signature"},"stage: int = 1, precision: str = 'fp16', initial_scale: float = 4294967296, min_scale: float = 1, growth_factor: float = 2, backoff_factor: float = 0.5, growth_interval: int = 1000, hysteresis: int = 2, max_scale: float = 4294967296, max_norm: float = 0.0, norm_type: float = 2.0, reduce_bucket_size_in_m: int = 12, communication_dtype: typing.Optional[torch.dtype] = None, overlap_communication: bool = True, cpu_offload: bool = False, master_weights: bool = True, verbose: bool = False"),(0,i.kt)(n.aE,{mdxType:"Parameters"},"- **stage** (int, optional) -- ZeRO stage. Defaults to 1.\n- **precision** (str, optional) -- precision. Support 'fp16', 'bf16' and 'fp32'. Defaults to 'fp16'.\n- **initial_scale** (float, optional) -- Initial scale used by DynamicGradScaler. Defaults to 2**32.\n- **min_scale** (float, optional) -- Min scale used by DynamicGradScaler. Defaults to 1.\n- **growth_factor** (float, optional) -- growth_factor used by DynamicGradScaler. Defaults to 2.\n- **backoff_factor** (float, optional) -- backoff_factor used by DynamicGradScaler. Defaults to 0.5.\n- **growth_interval** (float, optional) -- growth_interval used by DynamicGradScaler. Defaults to 1000.\n- **hysteresis** (float, optional) -- hysteresis used by DynamicGradScaler. Defaults to 2.\n- **max_scale** (int, optional) -- max_scale used by DynamicGradScaler. Defaults to 2**32.\n- **max_norm** (float, optional) -- max_norm used for `clip_grad_norm`. You should notice that you shall not do\n clip_grad_norm by yourself when using ZeRO DDP. The ZeRO optimizer will take care of clip_grad_norm.\n- **norm_type** (float, optional) -- norm_type used for `clip_grad_norm`.\n- **reduce_bucket_size_in_m** (int, optional) -- grad reduce bucket size in M. Defaults to 12.\n- **communication_dtype** (torch.dtype, optional) -- communication dtype. If not specified, the dtype of param will be used. Defaults to None.\n- **overlap_communication** (bool, optional) -- whether to overlap communication and computation. Defaults to True.\n- **cpu_offload** (bool, optional) -- whether to offload grad, master weight and optimizer state to cpu. Defaults to False.\n- **verbose** (bool, optional) -- verbose mode. Debug info including grad overflow will be printed. Defaults to False.")),(0,i.kt)("div",null,(0,i.kt)(n.iz,{name:"Description",mdxType:"Divider"}),(0,i.kt)("p",null,"Plugin for low level zero."),(0,i.kt)(n.e_,{code:"```python\nfrom colossalai.booster import Booster\nfrom colossalai.booster.plugin import LowLevelZeroPlugin\n\nmodel, train_dataset, optimizer, criterion = ...\nplugin = LowLevelZeroPlugin()\n\ntrain_dataloader = plugin.prepare_dataloader(train_dataset, batch_size=8)\nbooster = Booster(plugin=plugin)\nmodel, optimizer, train_dataloader, criterion = booster.boost(model, optimizer, train_dataloader, criterion)\n```",mdxType:"ExampleCode"}))),(0,i.kt)("p",null,"We've tested compatibility on some famous models, following models may not be supported:"),(0,i.kt)("ul",null,(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("inlineCode",{parentName:"li"},"timm.models.convit_base")),(0,i.kt)("li",{parentName:"ul"},"dlrm and deepfm models in ",(0,i.kt)("inlineCode",{parentName:"li"},"torchrec"))),(0,i.kt)("p",null,"Compatibility problems will be fixed in the future."),(0,i.kt)("h3",{id:"gemini-plugin"},"Gemini Plugin"),(0,i.kt)("p",null,"This plugin implements Zero-3 with chunk-based and heterogeneous memory management. It can train large models without much loss in speed. It also does not support local gradient accumulation. More details can be found in ",(0,i.kt)("a",{parentName:"p",href:"/docs/features/zero_with_chunk"},"Gemini Doc"),"."),(0,i.kt)(n.Cl,{mdxType:"DocStringContainer"},(0,i.kt)("div",null,(0,i.kt)(n.Dx,{type:"class",name:"colossalai.booster.plugin.GeminiPlugin",source:"https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/booster/plugin/gemini_plugin.py#L255",mdxType:"Title"}),(0,i.kt)(n.Pc,{mdxType:"Signature"},"chunk_config_dict: typing.Optional[dict] = None, chunk_init_device: typing.Optional[torch.device] = None, placement_policy: str = 'static', enable_gradient_accumulation: bool = False, shard_param_frac: float = 1.0, offload_optim_frac: float = 0.0, offload_param_frac: float = 0.0, warmup_non_model_data_ratio: float = 0.8, steady_cuda_cap_ratio: float = 0.9, precision: str = 'fp16', master_weights: bool = True, pin_memory: bool = False, force_outputs_fp32: bool = False, strict_ddp_mode: bool = False, search_range_m: int = 32, hidden_dim: typing.Optional[int] = None, min_chunk_size_m: float = 32, memstats: typing.Optional[colossalai.zero.gemini.memory_tracer.memory_stats.MemStats] = None, gpu_margin_mem_ratio: float = 0.0, initial_scale: float = 65536, min_scale: float = 1, growth_factor: float = 2, backoff_factor: float = 0.5, growth_interval: int = 1000, hysteresis: int = 2, max_scale: float = 4294967296, max_norm: float = 0.0, norm_type: float = 2.0, tp_size: int = 1, extra_dp_size: int = 1, enable_all_optimization: bool = False, enable_fused_normalization: bool = False, enable_flash_attention: bool = False, enable_sequence_parallelism: bool = False, enable_jit_fused: bool = False, enable_sequence_overlap: bool = False, verbose: bool = False"),(0,i.kt)(n.aE,{mdxType:"Parameters"},'- **chunk_config_dict** (dict, optional) -- chunk configuration dictionary.\n- **chunk_init_device** (torch.device, optional) -- device to initialize the chunk.\n- **placement_policy** (str, optional) -- "static" and "auto". Defaults to "static".\n- **enable_gradient_accumulation** (bool, optional) -- Whether to enable gradient accumulation. When set to True, gradient will be stored after doing backward pass. Defaults to False.\n- **shard_param_frac** (float, optional) -- fraction of parameters to be sharded. Only for "static" placement.\n If `shard_param_frac` is 1.0, it\'s equal to zero-3. If `shard_param_frac` is 0.0, it\'s equal to zero-2. Defaults to 1.0.\n- **offload_optim_frac** (float, optional) -- fraction of optimizer states to be offloaded. Only for "static" placement.\n If `shard_param_frac` is 1.0 and `offload_optim_frac` is 0.0, it\'s equal to old "cuda" placement. Defaults to 0.0.\n- **offload_param_frac** (float, optional) -- fraction of parameters to be offloaded. Only for "static" placement.\n For efficiency, this argument is useful only when `shard_param_frac` is 1.0 and `offload_optim_frac` is 1.0.\n If `shard_param_frac` is 1.0, `offload_optim_frac` is 1.0 and `offload_param_frac` is 1.0, it\'s equal to old "cpu" placement.\n When using static placement, we recommend users to tune `shard_param_frac` first and then `offload_optim_frac`.\n Defaults to 0.0.\n- **warmup_non_model_data_ratio** (float, optional) -- ratio of expected non-model data memory during warmup. Only for "auto" placement. Defaults to 0.8.\n- **steady_cuda_cap_ratio** (float, optional) -- ratio of allowed cuda capacity for model data during steady state. Only for "auto" placement. Defaults to 0.9.\n- **precision** (str, optional) -- precision. Support \'fp16\' and \'bf16\'. Defaults to \'fp16\'.\n- **master_weights** (bool, optional) -- Whether to keep fp32 master parameter weights in optimizer. Defaults to True.\n- **pin_memory** (bool, optional) -- use pin memory on CPU. Defaults to False.\n- **force_outputs_fp32** (bool, optional) -- force outputs are fp32. Defaults to False.\n- **strict_ddp_mode** (bool, optional) -- use strict ddp mode (only use dp without other parallelism). Defaults to False.\n- **search_range_m** (int, optional) -- chunk size searching range divided by 2^20. Defaults to 32.\n- **hidden_dim** (int, optional) -- the hidden dimension of DNN.\n Users can provide this argument to speed up searching.\n If users do not know this argument before training, it is ok. We will use a default value 1024.\n- **min_chunk_size_m** (float, optional) -- the minimum chunk size divided by 2^20.\n If the aggregate size of parameters is still smaller than the minimum chunk size,\n all parameters will be compacted into one small chunk.\n- **memstats** (MemStats, optional) the memory statistics collector by a runtime memory tracer. --\n- **gpu_margin_mem_ratio** (float, optional) -- The ratio of GPU remaining memory (after the first forward-backward)\n which will be used when using hybrid CPU optimizer.\n This argument is meaningless when `placement_policy` of `GeminiManager` is not "auto".\n Defaults to 0.0.\n- **initial_scale** (float, optional) -- Initial scale used by DynamicGradScaler. Defaults to 2**16.\n- **min_scale** (float, optional) -- Min scale used by DynamicGradScaler. Defaults to 1.\n- **growth_factor** (float, optional) -- growth_factor used by DynamicGradScaler. Defaults to 2.\n- **backoff_factor** (float, optional) -- backoff_factor used by DynamicGradScaler. Defaults to 0.5.\n- **growth_interval** (float, optional) -- growth_interval used by DynamicGradScaler. Defaults to 1000.\n- **hysteresis** (float, optional) -- hysteresis used by DynamicGradScaler. Defaults to 2.\n- **max_scale** (int, optional) -- max_scale used by DynamicGradScaler. Defaults to 2**32.\n- **max_norm** (float, optional) -- max_norm used for `clip_grad_norm`. You should notice that you shall not do\n clip_grad_norm by yourself when using ZeRO DDP. The ZeRO optimizer will take care of clip_grad_norm.\n- **norm_type** (float, optional) -- norm_type used for `clip_grad_norm`.\n- **tp_size** (int, optional) -- If \'tp_size\' is set to be greater than 1, it means using tensor parallelism strategy, which is implemented in Shardformer, \'tp_size\' determines the size of the tensor parallel process group. Default to 1.\n- **extra_dp_size** (int, optional) -- If \'extra_dp_size\' is set to be greater than 1, it means creating another group to run with a ddp-like strategy. Default to 1.\n- **enable_all_optimization** (bool, optional) -- Whether to switch on all the optimizations supported by Shardformer.\n Currently all the optimization methods include fused normalization, flash attention and JIT.\n Defaults to False.\n- **enable_fused_normalization** (bool, optional) -- Whether to switch on fused normalization in Shardformer. Defaults to False.\n- **enable_flash_attention** (bool, optional) -- Whether to switch on flash attention in Shardformer. Defaults to False.\n- **enable_jit_fused** (bool, optional) -- Whether to switch on JIT in Shardformer. Default to False.\n- **enable_sequence_parallelism** (bool) -- Whether to turn on sequence parallelism in Shardformer. Defaults to False.\n- **enable_sequence_overlap** (bool) -- Whether to turn on sequence overlap in Shardformer. Defaults to False.\n- **verbose** (bool, optional) -- verbose mode. Debug info including chunk search result will be printed. Defaults to False.')),(0,i.kt)("div",null,(0,i.kt)(n.iz,{name:"Description",mdxType:"Divider"}),(0,i.kt)("p",null,"Plugin for Gemini."),(0,i.kt)(n.e_,{code:"```python\nfrom colossalai.booster import Booster\nfrom colossalai.booster.plugin import GeminiPlugin\n\nmodel, train_dataset, optimizer, criterion = ...\nplugin = GeminiPlugin()\n\ntrain_dataloader = plugin.prepare_dataloader(train_dataset, batch_size=8)\nbooster = Booster(plugin=plugin)\nmodel, optimizer, train_dataloader, criterion = booster.boost(model, optimizer, train_dataloader, criterion)\n```",mdxType:"ExampleCode"}))),(0,i.kt)("h3",{id:"hybrid-parallel-plugin"},"Hybrid Parallel Plugin"),(0,i.kt)("p",null,"This plugin implements the combination of various parallel training strategies and optimization tools. The features of HybridParallelPlugin can be generally divided into four parts:"),(0,i.kt)("ol",null,(0,i.kt)("li",{parentName:"ol"},"Shardformer: This plugin provides an entrance to Shardformer, which controls model sharding under tensor parallel and pipeline parallel setting. Shardformer also overloads the logic of model's forward/backward process to ensure the smooth working of tp/pp. Also, optimization tools including fused normalization, flash attention (xformers), JIT and sequence parallel are injected into the overloaded forward/backward method by Shardformer. More details can be found in chapter ",(0,i.kt)("a",{parentName:"li",href:"/docs/features/shardformer"},"Shardformer Doc"),". The diagram below shows the features supported by shardformer together with hybrid parallel plugin.")),(0,i.kt)("div",{align:"center"},(0,i.kt)("img",{src:"https://raw.githubusercontent.com/hpcaitech/public_assets/main/colossalai/img/shardformer/shardformer_and_hybridparallel.png",width:"500"})),(0,i.kt)("ol",{start:2},(0,i.kt)("li",{parentName:"ol"},(0,i.kt)("p",{parentName:"li"},"Mixed Precision Training: Support for fp16/bf16 mixed precision training. More details about its arguments configuration can be found in ",(0,i.kt)("a",{parentName:"p",href:"/docs/features/mixed_precision_training_with_booster"},"Mixed Precision Training Doc"),".")),(0,i.kt)("li",{parentName:"ol"},(0,i.kt)("p",{parentName:"li"},"Torch DDP: This plugin will automatically adopt Pytorch DDP as data parallel strategy when pipeline parallel and Zero is not used. More details about its arguments configuration can be found in ",(0,i.kt)("a",{parentName:"p",href:"https://pytorch.org/docs/main/generated/torch.nn.parallel.DistributedDataParallel.html#torch.nn.parallel.DistributedDataParallel"},"Pytorch DDP Docs"),".")),(0,i.kt)("li",{parentName:"ol"},(0,i.kt)("p",{parentName:"li"},"Zero: This plugin can adopt Zero 1/2 as data parallel strategy through setting the ",(0,i.kt)("inlineCode",{parentName:"p"},"zero_stage")," argument as 1 or 2 when initializing plugin. Zero 1 is compatible with pipeline parallel strategy, while Zero 2 is not. More details about its argument configuration can be found in ",(0,i.kt)("a",{parentName:"p",href:"#low-level-zero-plugin"},"Low Level Zero Plugin"),"."))),(0,i.kt)("blockquote",null,(0,i.kt)("p",{parentName:"blockquote"},"\u26a0 When using this plugin, only the subset of Huggingface transformers supported by Shardformer are compatible with tensor parallel, pipeline parallel and optimization tools. Mainstream transformers such as Llama 1, Llama 2, OPT, Bloom, Bert and GPT2 etc. are all supported by Shardformer.")),(0,i.kt)(n.Cl,{mdxType:"DocStringContainer"},(0,i.kt)("div",null,(0,i.kt)(n.Dx,{type:"class",name:"colossalai.booster.plugin.HybridParallelPlugin",source:"https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/booster/plugin/hybrid_parallel_plugin.py#L863",mdxType:"Title"}),(0,i.kt)(n.Pc,{mdxType:"Signature"},"tp_size: int, pp_size: int, precision: str = 'fp16', zero_stage: int = 0, enable_all_optimization: bool = False, enable_fused_normalization: bool = False, enable_flash_attention: bool = False, enable_jit_fused: bool = False, enable_sequence_parallelism: bool = False, enable_sequence_overlap: bool = False, num_microbatches: typing.Optional[int] = None, microbatch_size: typing.Optional[int] = None, initial_scale: float = 65536, min_scale: float = 1, growth_factor: float = 2, backoff_factor: float = 0.5, growth_interval: int = 1000, hysteresis: int = 2, max_scale: float = 4294967296, max_norm: float = 0, broadcast_buffers: bool = True, ddp_bucket_cap_mb: int = 25, find_unused_parameters: bool = False, check_reduction: bool = False, gradient_as_bucket_view: bool = False, static_graph: bool = False, zero_bucket_size_in_m: int = 12, cpu_offload: bool = False, communication_dtype: typing.Optional[torch.dtype] = None, overlap_communication: bool = True, custom_policy: Policy = None, pp_style: str = '1f1b', num_model_chunks: int = 1"),(0,i.kt)(n.aE,{mdxType:"Parameters"},"- **tp_size** (int) -- The size of tensor parallelism. Tensor parallelism will not be used when tp_size is set to 1.\n- **pp_size** (int) -- The number of pipeline stages in pipeline parallelism. Pipeline parallelism will not be used when pp_size is set to 1.\n- **precision** (str, optional) -- Specifies the precision of parameters during training.\n Auto-mixied precision will be used when this argument is set to 'fp16' or 'bf16', otherwise model is trained with 'fp32'.\n Defaults to 'fp16'.\n- **zero_stage** (int, optional) -- The stage of ZeRO for data parallelism. Can only be choosed from [0, 1, 2].\n When set to 0, ZeRO will not be used. Defaults to 0.\n- **enable_all_optimization** (bool, optional) -- Whether to switch on all the optimizations supported by Shardformer.\n Currently all the optimization methods include fused normalization, flash attention and JIT.\n Defaults to False.\n- **enable_fused_normalization** (bool, optional) -- Whether to switch on fused normalization in Shardformer. Defaults to False.\n- **enable_flash_attention** (bool, optional) -- Whether to switch on flash attention in Shardformer. Defaults to False.\n- **enable_jit_fused** (bool, optional) -- Whether to switch on JIT in Shardformer. Default to False.\n- **enable_sequence_parallelism** (bool) -- Whether to turn on sequence parallelism in Shardformer. Defaults to False.\n- **enable_sequence_overlap** (bool) -- Whether to turn on sequence overlap in Shardformer. Defaults to False.\n- **num_microbatches** (int, optional) -- Number of microbatches when using pipeline parallelism. Defaults to None.\n- **microbatch_size** (int, optional) -- Microbatch size when using pipeline parallelism.\n Either `num_microbatches` or `microbatch_size` should be provided if using pipeline.\n If `num_microbatches` is provided, this will be ignored. Defaults to None.\n- **initial_scale** (float, optional) -- The initial loss scale of AMP. Defaults to 2**16.\n- **min_scale** (float, optional) -- The minimum loss scale of AMP. Defaults to 1.\n- **growth_factor** (float, optional) -- The multiplication factor for increasing loss scale when using AMP. Defaults to 2.\n- **backoff_factor** (float, optional) -- The multiplication factor for decreasing loss scale when using AMP. Defaults to 0.5.\n- **growth_interval** (int, optional) -- The number of steps to increase loss scale when no overflow occurs when using AMP. Defaults to 1000.\n- **hysteresis** (int, optional) -- The number of overflows before decreasing loss scale when using AMP. Defaults to 2.\n- **max_scale** (float, optional) -- The maximum loss scale of AMP. Defaults to 2**32.\n- **max_norm** (float, optional) -- Maximum norm for gradient clipping. Defaults to 0.\n- **broadcast_buffers** (bool, optional) -- Whether to broadcast buffers in the beginning of training when using DDP. Defaults to True.\n- **ddp_bucket_cap_mb** (int, optional) -- The bucket size in MB when using DDP. Defaults to 25.\n- **find_unused_parameters** (bool, optional) -- Whether to find unused parameters when using DDP. Defaults to False.\n- **check_reduction** (bool, optional) -- Whether to check reduction when using DDP. Defaults to False.\n- **gradient_as_bucket_view** (bool, optional) -- Whether to use gradient as bucket view when using DDP. Defaults to False.\n- **static_graph** (bool, optional) -- Whether to use static graph when using DDP. Defaults to False.\n- **zero_bucket_size_in_m** (int, optional) -- Gradient reduce bucket size in million elements when using ZeRO. Defaults to 12.\n- **cpu_offload** (bool, optional) -- Whether to open cpu_offload when using ZeRO. Defaults to False.\n- **communication_dtype** (torch.dtype, optional) -- Communication dtype when using ZeRO. If not specified, the dtype of param will be used. Defaults to None.\n- **overlap_communication** (bool, optional) -- Whether to overlap communication and computation when using ZeRO. Defaults to True.\n- **custom_policy** (Policy, optional) -- Custom policy for Shardformer. Defaults to None.\n- **pp_style** (str, optional) -- The style for pipeline parallelism. Defaults to '1f1b'.\n- **num_model_chunks** (int, optional) -- The number of model chunks for interleaved pipeline parallelism. Defaults to 1.")),(0,i.kt)("div",null,(0,i.kt)(n.iz,{name:"Description",mdxType:"Divider"}),(0,i.kt)("p",null,"Plugin for Hybrid Parallel Training.\nTensor parallel, pipeline parallel and data parallel(DDP/ZeRO) can be picked and combined in this plugin.\nThe size of tp and pp should be passed in by user, then the size of dp is automatically calculated from dp_size = world_size / (tp_size * pp_size)."),(0,i.kt)(n.e_,{code:"```python\nfrom colossalai.booster import Booster\nfrom colossalai.booster.plugin import HybridParallelPlugin\n\nmodel, train_dataset, optimizer, criterion = ...\nplugin = HybridParallelPlugin(tp_size=2, pp_size=2)\n\ntrain_dataloader = plugin.prepare_dataloader(train_dataset, batch_size=8)\nbooster = Booster(plugin=plugin)\nmodel, optimizer, criterion, train_dataloader, _ = booster.boost(model, optimizer, criterion, train_dataloader)\n```",mdxType:"ExampleCode"})),(0,i.kt)(n.Cl,{mdxType:"DocStringContainer"},(0,i.kt)("div",null,(0,i.kt)(n.Dx,{type:"function",name:"prepare_dataloader",source:"https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/booster/plugin/hybrid_parallel_plugin.py#L1188",mdxType:"Title"}),(0,i.kt)(n.Pc,{mdxType:"Signature"},"dataset, batch_size, shuffle = False, seed = 1024, drop_last = False, pin_memory = False, num_workers = 0, **kwargs"),(0,i.kt)(n.aE,{mdxType:"Parameters"},"- **dataset** (*torch.utils.data.Dataset*) -- The dataset to be loaded.\n- **shuffle** (bool, optional) -- Whether to shuffle the dataset. Defaults to False.\n- **seed** (int, optional) -- Random worker seed for sampling, defaults to 1024.\n add_sampler -- Whether to add `DistributedDataParallelSampler` to the dataset. Defaults to True.\n- **drop_last** (bool, optional) -- Set to True to drop the last incomplete batch, if the dataset size\n is not divisible by the batch size. If False and the size of dataset is not divisible by\n the batch size, then the last batch will be smaller, defaults to False.\n- **pin_memory** (bool, optional) -- Whether to pin memory address in CPU memory. Defaults to False.\n- **num_workers** (int, optional) -- Number of worker threads for this dataloader. Defaults to 0.\n- **kwargs** (dict) -- optional parameters for `torch.utils.data.DataLoader`, more details could be found in\n [DataLoader](https://pytorch.org/docs/stable/_modules/torch/utils/data/dataloader.html#DataLoader)."),(0,i.kt)(n.nT,{name:"[`torch.utils.data.DataLoader`]",desc:"A DataLoader used for training or testing.",mdxType:"Returns"})),(0,i.kt)("div",null,(0,i.kt)(n.iz,{name:"Description",mdxType:"Divider"}),(0,i.kt)("p",null,"Prepare a dataloader for distributed training. The dataloader will be wrapped by\n",(0,i.kt)("em",{parentName:"p"},"torch.utils.data.DataLoader")," and ",(0,i.kt)("em",{parentName:"p"},"torch.utils.data.DistributedSampler"),".")))),(0,i.kt)("h3",{id:"torch-ddp-plugin"},"Torch DDP Plugin"),(0,i.kt)("p",null,"More details can be found in ",(0,i.kt)("a",{parentName:"p",href:"https://pytorch.org/docs/main/generated/torch.nn.parallel.DistributedDataParallel.html#torch.nn.parallel.DistributedDataParallel"},"Pytorch Docs"),"."),(0,i.kt)(n.Cl,{mdxType:"DocStringContainer"},(0,i.kt)("div",null,(0,i.kt)(n.Dx,{type:"class",name:"colossalai.booster.plugin.TorchDDPPlugin",source:"https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/booster/plugin/torch_ddp_plugin.py#L129",mdxType:"Title"}),(0,i.kt)(n.Pc,{mdxType:"Signature"},"broadcast_buffers: bool = True, bucket_cap_mb: int = 25, find_unused_parameters: bool = False, check_reduction: bool = False, gradient_as_bucket_view: bool = False, static_graph: bool = False"),(0,i.kt)(n.aE,{mdxType:"Parameters"},"- **broadcast_buffers** (bool, optional) -- Whether to broadcast buffers in the beginning of training. Defaults to True.\n- **bucket_cap_mb** (int, optional) -- The bucket size in MB. Defaults to 25.\n- **find_unused_parameters** (bool, optional) -- Whether to find unused parameters. Defaults to False.\n- **check_reduction** (bool, optional) -- Whether to check reduction. Defaults to False.\n- **gradient_as_bucket_view** (bool, optional) -- Whether to use gradient as bucket view. Defaults to False.\n- **static_graph** (bool, optional) -- Whether to use static graph. Defaults to False.")),(0,i.kt)("div",null,(0,i.kt)(n.iz,{name:"Description",mdxType:"Divider"}),(0,i.kt)("p",null,"Plugin for PyTorch DDP."),(0,i.kt)(n.e_,{code:"```python\nfrom colossalai.booster import Booster\nfrom colossalai.booster.plugin import TorchDDPPlugin\n\nmodel, train_dataset, optimizer, criterion = ...\nplugin = TorchDDPPlugin()\n\ntrain_dataloader = plugin.prepare_dataloader(train_dataset, batch_size=8)\nbooster = Booster(plugin=plugin)\nmodel, optimizer, train_dataloader, criterion = booster.boost(model, optimizer, train_dataloader, criterion)\n```",mdxType:"ExampleCode"}))),(0,i.kt)("h3",{id:"torch-fsdp-plugin"},"Torch FSDP Plugin"),(0,i.kt)("blockquote",null,(0,i.kt)("p",{parentName:"blockquote"},"\u26a0 This plugin is not available when torch version is lower than 1.12.0.")),(0,i.kt)("blockquote",null,(0,i.kt)("p",{parentName:"blockquote"},"\u26a0 This plugin does not support save/load sharded model checkpoint now.")),(0,i.kt)("blockquote",null,(0,i.kt)("p",{parentName:"blockquote"},"\u26a0 This plugin does not support optimizer that use multi params group.")),(0,i.kt)("p",null,"More details can be found in ",(0,i.kt)("a",{parentName:"p",href:"https://pytorch.org/docs/main/fsdp.html"},"Pytorch Docs"),"."),(0,i.kt)(n.Cl,{mdxType:"DocStringContainer"},(0,i.kt)("div",null,(0,i.kt)(n.Dx,{type:"class",name:"colossalai.booster.plugin.TorchFSDPPlugin",source:"https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/booster/plugin/torch_fsdp_plugin.py#L142",mdxType:"Title"}),(0,i.kt)(n.Pc,{mdxType:"Signature"},"process_group: typing.Optional[torch.distributed.distributed_c10d.ProcessGroup] = None, sharding_strategy: typing.Optional[torch.distributed.fsdp.api.ShardingStrategy] = None, cpu_offload: typing.Optional[torch.distributed.fsdp.api.CPUOffload] = None, auto_wrap_policy: typing.Optional[typing.Callable] = None, backward_prefetch: typing.Optional[torch.distributed.fsdp.api.BackwardPrefetch] = None, mixed_precision: typing.Optional[torch.distributed.fsdp.api.MixedPrecision] = None, ignored_modules: typing.Optional[typing.Iterable[torch.nn.modules.module.Module]] = None, param_init_fn: typing.Optional[typing.Callable[[torch.nn.modules.module.Module]], NoneType] = None, sync_module_states: bool = False"),(0,i.kt)(n.aE,{mdxType:"Parameters"},"- **See** https --//pytorch.org/docs/stable/fsdp.html for details.")),(0,i.kt)("div",null,(0,i.kt)(n.iz,{name:"Description",mdxType:"Divider"}),(0,i.kt)("p",null,"Plugin for PyTorch FSDP."),(0,i.kt)(n.e_,{code:"```python\nfrom colossalai.booster import Booster\nfrom colossalai.booster.plugin import TorchFSDPPlugin\n\nmodel, train_dataset, optimizer, criterion = ...\nplugin = TorchFSDPPlugin()\n\ntrain_dataloader = plugin.prepare_train_dataloader(train_dataset, batch_size=8)\nbooster = Booster(plugin=plugin)\nmodel, optimizer, train_dataloader, criterion = booster.boost(model, optimizer, train_dataloader, criterion)\n```",mdxType:"ExampleCode"}))))}m.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunkdemo=self.webpackChunkdemo||[]).push([[4634],{6999:(e,t,a)=>{a.d(t,{Cl:()=>n,Dx:()=>u,Pc:()=>l,aE:()=>s,e_:()=>d,iz:()=>r,nT:()=>p});var o=a(7294),i=a(398);a(814);function n(e){return o.createElement("div",{className:"docstring-container"},e.children)}function l(e){return o.createElement("div",{className:"signature"},"(",e.children,")")}function r(e){return o.createElement("div",{class:"divider"},o.createElement("span",{class:"divider-text"},e.name))}function s(e){return o.createElement("div",null,o.createElement(r,{name:"Parameters"}),o.createElement(i.D,null,e.children))}function p(e){return o.createElement("div",null,o.createElement(r,{name:"Returns"}),o.createElement(i.D,null,`${e.name}: ${e.desc}`))}function u(e){return o.createElement("div",{className:"title-container"},o.createElement("div",{className:"title-module"},o.createElement("h5",null,e.type),"\xa0 ",o.createElement("h3",null,e.name)),o.createElement("div",{className:"title-source"},"<",o.createElement("a",{href:e.source,className:"title-source"},"source"),">"))}function d(e){return o.createElement("div",null,o.createElement(r,{name:"Example"}),o.createElement(i.D,null,e.code))}},5099:(e,t,a)=>{a.r(t),a.d(t,{assets:()=>p,contentTitle:()=>r,default:()=>m,frontMatter:()=>l,metadata:()=>s,toc:()=>u});var o=a(7462),i=(a(7294),a(3905)),n=a(6999);const l={},r="Booster Plugins",s={unversionedId:"basics/booster_plugins",id:"basics/booster_plugins",title:"Booster Plugins",description:"Author: Hongxin Liu, Baizhou Zhang, Pengtai Xu",source:"@site/i18n/en/docusaurus-plugin-content-docs/current/basics/booster_plugins.md",sourceDirName:"basics",slug:"/basics/booster_plugins",permalink:"/docs/basics/booster_plugins",draft:!1,editUrl:"https://github.com/facebook/docusaurus/tree/main/packages/create-docusaurus/templates/shared/docs/basics/booster_plugins.md",tags:[],version:"current",frontMatter:{},sidebar:"tutorialSidebar",previous:{title:"Booster API",permalink:"/docs/basics/booster_api"},next:{title:"Booster Checkpoint",permalink:"/docs/basics/booster_checkpoint"}},p={},u=[{value:"Introduction",id:"introduction",level:2},{value:"Choosing Your Plugin",id:"choosing-your-plugin",level:2},{value:"Plugins",id:"plugins",level:2},{value:"Low Level Zero Plugin",id:"low-level-zero-plugin",level:3},{value:"Gemini Plugin",id:"gemini-plugin",level:3},{value:"Hybrid Parallel Plugin",id:"hybrid-parallel-plugin",level:3},{value:"Torch DDP Plugin",id:"torch-ddp-plugin",level:3},{value:"Torch FSDP Plugin",id:"torch-fsdp-plugin",level:3}],d={toc:u},c="wrapper";function m(e){let{components:t,...a}=e;return(0,i.kt)(c,(0,o.Z)({},d,a,{components:t,mdxType:"MDXLayout"}),(0,i.kt)("h1",{id:"booster-plugins"},"Booster Plugins"),(0,i.kt)("p",null,"Author: ",(0,i.kt)("a",{parentName:"p",href:"https://github.com/ver217"},"Hongxin Liu"),", ",(0,i.kt)("a",{parentName:"p",href:"https://github.com/Fridge003"},"Baizhou Zhang"),", ",(0,i.kt)("a",{parentName:"p",href:"https://github.com/ppt0011"},"Pengtai Xu")),(0,i.kt)("p",null,(0,i.kt)("strong",{parentName:"p"},"Prerequisite:")),(0,i.kt)("ul",null,(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("a",{parentName:"li",href:"/docs/basics/booster_api"},"Booster API"))),(0,i.kt)("h2",{id:"introduction"},"Introduction"),(0,i.kt)("p",null,"As mentioned in ",(0,i.kt)("a",{parentName:"p",href:"/docs/basics/booster_api"},"Booster API"),", we can use booster plugins to customize the parallel training. In this tutorial, we will introduce how to use booster plugins."),(0,i.kt)("p",null,"We currently provide the following plugins:"),(0,i.kt)("ul",null,(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("a",{parentName:"li",href:"#torch-ddp-plugin"},"Torch DDP Plugin"),": It is a wrapper of ",(0,i.kt)("inlineCode",{parentName:"li"},"torch.nn.parallel.DistributedDataParallel")," and can be used to train models with data parallelism."),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("a",{parentName:"li",href:"#torch-fsdp-plugin"},"Torch FSDP Plugin"),": It is a wrapper of ",(0,i.kt)("inlineCode",{parentName:"li"},"torch.distributed.fsdp.FullyShardedDataParallel")," and can be used to train models with zero-dp."),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("a",{parentName:"li",href:"#low-level-zero-plugin"},"Low Level Zero Plugin"),": It wraps the ",(0,i.kt)("inlineCode",{parentName:"li"},"colossalai.zero.low_level.LowLevelZeroOptimizer")," and can be used to train models with zero-dp. It only supports zero stage-1 and stage-2."),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("a",{parentName:"li",href:"#gemini-plugin"},"Gemini Plugin"),": It wraps the ",(0,i.kt)("a",{parentName:"li",href:"/docs/features/zero_with_chunk"},"Gemini")," which implements Zero-3 with chunk-based and heterogeneous memory management."),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("a",{parentName:"li",href:"#hybrid-parallel-plugin"},"Hybrid Parallel Plugin"),": It provides a tidy interface that integrates the power of Shardformer, pipeline manager, mixied precision training, TorchDDP and Zero stage 1/2 feature. With this plugin, transformer models can be easily trained with any combination of tensor parallel, pipeline parallel and data parallel (DDP/Zero) efficiently, along with various kinds of optimization tools for acceleration and memory saving. Detailed information about supported parallel strategies and optimization tools is explained in the section below.")),(0,i.kt)("p",null,"More plugins are coming soon."),(0,i.kt)("h2",{id:"choosing-your-plugin"},"Choosing Your Plugin"),(0,i.kt)("p",null,"Generally only one plugin is used to train a model. Our recommended use case for each plugin is as follows."),(0,i.kt)("ul",null,(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("a",{parentName:"li",href:"#torch-ddp-plugin"},"Torch DDP Plugin"),": It is suitable for models with less than 2 billion parameters (e.g. Bert-3m, GPT2-1.5b)."),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("a",{parentName:"li",href:"#torch-fsdp-plugin"},"Torch FSDP Plugin")," / ",(0,i.kt)("a",{parentName:"li",href:"#low-level-zero-plugin"},"Low Level Zero Plugin"),": It is suitable for models with less than 10 billion parameters (e.g. GPTJ-6b, MegatronLM-8b)."),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("a",{parentName:"li",href:"#gemini-plugin"},"Gemini Plugin"),": It is suitable for models with more than 10 billion parameters (e.g. TuringNLG-17b) and is ideal for scenarios with ",(0,i.kt)("strong",{parentName:"li"},"high cross-node bandwidth and medium to small-scale clusters (below a thousand cards)")," (e.g. Llama2-70b)."),(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("a",{parentName:"li",href:"#hybrid-parallel-plugin"},"Hybrid Parallel Plugin"),": It is suitable for models with more than 60 billion parameters, or special models such as those with exceptionally long sequences, very large vocabularies, and is best suited for scenarios with ",(0,i.kt)("strong",{parentName:"li"},"low cross-node bandwidth and large-scale clusters (a thousand cards or more)")," (e.g. GPT3-175b, Bloom-176b).")),(0,i.kt)("h2",{id:"plugins"},"Plugins"),(0,i.kt)("h3",{id:"low-level-zero-plugin"},"Low Level Zero Plugin"),(0,i.kt)("p",null,"This plugin implements Zero-1 and Zero-2 (w/wo CPU offload), using ",(0,i.kt)("inlineCode",{parentName:"p"},"reduce")," and ",(0,i.kt)("inlineCode",{parentName:"p"},"gather")," to synchronize gradients and weights."),(0,i.kt)("p",null,"Zero-1 can be regarded as a better substitute of Torch DDP, which is more memory efficient and faster. It can be easily used in hybrid parallelism."),(0,i.kt)("p",null,"Zero-2 does not support local gradient accumulation. Though you can accumulate gradient if you insist, it cannot reduce communication cost. That is to say, it's not a good idea to use Zero-2 with pipeline parallelism."),(0,i.kt)(n.Cl,{mdxType:"DocStringContainer"},(0,i.kt)("div",null,(0,i.kt)(n.Dx,{type:"class",name:"colossalai.booster.plugin.LowLevelZeroPlugin",source:"https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/booster/plugin/low_level_zero_plugin.py#L213",mdxType:"Title"}),(0,i.kt)(n.Pc,{mdxType:"Signature"},"stage: int = 1, precision: str = 'fp16', initial_scale: float = 4294967296, min_scale: float = 1, growth_factor: float = 2, backoff_factor: float = 0.5, growth_interval: int = 1000, hysteresis: int = 2, max_scale: float = 4294967296, max_norm: float = 0.0, norm_type: float = 2.0, reduce_bucket_size_in_m: int = 12, communication_dtype: typing.Optional[torch.dtype] = None, overlap_communication: bool = True, cpu_offload: bool = False, master_weights: bool = True, verbose: bool = False"),(0,i.kt)(n.aE,{mdxType:"Parameters"},"- **stage** (int, optional) -- ZeRO stage. Defaults to 1.\n- **precision** (str, optional) -- precision. Support 'fp16', 'bf16' and 'fp32'. Defaults to 'fp16'.\n- **initial_scale** (float, optional) -- Initial scale used by DynamicGradScaler. Defaults to 2**32.\n- **min_scale** (float, optional) -- Min scale used by DynamicGradScaler. Defaults to 1.\n- **growth_factor** (float, optional) -- growth_factor used by DynamicGradScaler. Defaults to 2.\n- **backoff_factor** (float, optional) -- backoff_factor used by DynamicGradScaler. Defaults to 0.5.\n- **growth_interval** (float, optional) -- growth_interval used by DynamicGradScaler. Defaults to 1000.\n- **hysteresis** (float, optional) -- hysteresis used by DynamicGradScaler. Defaults to 2.\n- **max_scale** (int, optional) -- max_scale used by DynamicGradScaler. Defaults to 2**32.\n- **max_norm** (float, optional) -- max_norm used for `clip_grad_norm`. You should notice that you shall not do\n clip_grad_norm by yourself when using ZeRO DDP. The ZeRO optimizer will take care of clip_grad_norm.\n- **norm_type** (float, optional) -- norm_type used for `clip_grad_norm`.\n- **reduce_bucket_size_in_m** (int, optional) -- grad reduce bucket size in M. Defaults to 12.\n- **communication_dtype** (torch.dtype, optional) -- communication dtype. If not specified, the dtype of param will be used. Defaults to None.\n- **overlap_communication** (bool, optional) -- whether to overlap communication and computation. Defaults to True.\n- **cpu_offload** (bool, optional) -- whether to offload grad, master weight and optimizer state to cpu. Defaults to False.\n- **verbose** (bool, optional) -- verbose mode. Debug info including grad overflow will be printed. Defaults to False.")),(0,i.kt)("div",null,(0,i.kt)(n.iz,{name:"Description",mdxType:"Divider"}),(0,i.kt)("p",null,"Plugin for low level zero."),(0,i.kt)(n.e_,{code:"```python\nfrom colossalai.booster import Booster\nfrom colossalai.booster.plugin import LowLevelZeroPlugin\n\nmodel, train_dataset, optimizer, criterion = ...\nplugin = LowLevelZeroPlugin()\n\ntrain_dataloader = plugin.prepare_dataloader(train_dataset, batch_size=8)\nbooster = Booster(plugin=plugin)\nmodel, optimizer, train_dataloader, criterion = booster.boost(model, optimizer, train_dataloader, criterion)\n```",mdxType:"ExampleCode"}))),(0,i.kt)("p",null,"We've tested compatibility on some famous models, following models may not be supported:"),(0,i.kt)("ul",null,(0,i.kt)("li",{parentName:"ul"},(0,i.kt)("inlineCode",{parentName:"li"},"timm.models.convit_base")),(0,i.kt)("li",{parentName:"ul"},"dlrm and deepfm models in ",(0,i.kt)("inlineCode",{parentName:"li"},"torchrec"))),(0,i.kt)("p",null,"Compatibility problems will be fixed in the future."),(0,i.kt)("h3",{id:"gemini-plugin"},"Gemini Plugin"),(0,i.kt)("p",null,"This plugin implements Zero-3 with chunk-based and heterogeneous memory management. It can train large models without much loss in speed. It also does not support local gradient accumulation. More details can be found in ",(0,i.kt)("a",{parentName:"p",href:"/docs/features/zero_with_chunk"},"Gemini Doc"),"."),(0,i.kt)(n.Cl,{mdxType:"DocStringContainer"},(0,i.kt)("div",null,(0,i.kt)(n.Dx,{type:"class",name:"colossalai.booster.plugin.GeminiPlugin",source:"https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/booster/plugin/gemini_plugin.py#L255",mdxType:"Title"}),(0,i.kt)(n.Pc,{mdxType:"Signature"},"chunk_config_dict: typing.Optional[dict] = None, chunk_init_device: typing.Optional[torch.device] = None, placement_policy: str = 'static', enable_gradient_accumulation: bool = False, shard_param_frac: float = 1.0, offload_optim_frac: float = 0.0, offload_param_frac: float = 0.0, warmup_non_model_data_ratio: float = 0.8, steady_cuda_cap_ratio: float = 0.9, precision: str = 'fp16', master_weights: bool = True, pin_memory: bool = False, force_outputs_fp32: bool = False, strict_ddp_mode: bool = False, search_range_m: int = 32, hidden_dim: typing.Optional[int] = None, min_chunk_size_m: float = 32, memstats: typing.Optional[colossalai.zero.gemini.memory_tracer.memory_stats.MemStats] = None, gpu_margin_mem_ratio: float = 0.0, initial_scale: float = 65536, min_scale: float = 1, growth_factor: float = 2, backoff_factor: float = 0.5, growth_interval: int = 1000, hysteresis: int = 2, max_scale: float = 4294967296, max_norm: float = 0.0, norm_type: float = 2.0, tp_size: int = 1, extra_dp_size: int = 1, enable_all_optimization: bool = False, enable_fused_normalization: bool = False, enable_flash_attention: bool = False, enable_sequence_parallelism: bool = False, enable_jit_fused: bool = False, enable_sequence_overlap: bool = False, verbose: bool = False"),(0,i.kt)(n.aE,{mdxType:"Parameters"},'- **chunk_config_dict** (dict, optional) -- chunk configuration dictionary.\n- **chunk_init_device** (torch.device, optional) -- device to initialize the chunk.\n- **placement_policy** (str, optional) -- "static" and "auto". Defaults to "static".\n- **enable_gradient_accumulation** (bool, optional) -- Whether to enable gradient accumulation. When set to True, gradient will be stored after doing backward pass. Defaults to False.\n- **shard_param_frac** (float, optional) -- fraction of parameters to be sharded. Only for "static" placement.\n If `shard_param_frac` is 1.0, it\'s equal to zero-3. If `shard_param_frac` is 0.0, it\'s equal to zero-2. Defaults to 1.0.\n- **offload_optim_frac** (float, optional) -- fraction of optimizer states to be offloaded. Only for "static" placement.\n If `shard_param_frac` is 1.0 and `offload_optim_frac` is 0.0, it\'s equal to old "cuda" placement. Defaults to 0.0.\n- **offload_param_frac** (float, optional) -- fraction of parameters to be offloaded. Only for "static" placement.\n For efficiency, this argument is useful only when `shard_param_frac` is 1.0 and `offload_optim_frac` is 1.0.\n If `shard_param_frac` is 1.0, `offload_optim_frac` is 1.0 and `offload_param_frac` is 1.0, it\'s equal to old "cpu" placement.\n When using static placement, we recommend users to tune `shard_param_frac` first and then `offload_optim_frac`.\n Defaults to 0.0.\n- **warmup_non_model_data_ratio** (float, optional) -- ratio of expected non-model data memory during warmup. Only for "auto" placement. Defaults to 0.8.\n- **steady_cuda_cap_ratio** (float, optional) -- ratio of allowed cuda capacity for model data during steady state. Only for "auto" placement. Defaults to 0.9.\n- **precision** (str, optional) -- precision. Support \'fp16\' and \'bf16\'. Defaults to \'fp16\'.\n- **master_weights** (bool, optional) -- Whether to keep fp32 master parameter weights in optimizer. Defaults to True.\n- **pin_memory** (bool, optional) -- use pin memory on CPU. Defaults to False.\n- **force_outputs_fp32** (bool, optional) -- force outputs are fp32. Defaults to False.\n- **strict_ddp_mode** (bool, optional) -- use strict ddp mode (only use dp without other parallelism). Defaults to False.\n- **search_range_m** (int, optional) -- chunk size searching range divided by 2^20. Defaults to 32.\n- **hidden_dim** (int, optional) -- the hidden dimension of DNN.\n Users can provide this argument to speed up searching.\n If users do not know this argument before training, it is ok. We will use a default value 1024.\n- **min_chunk_size_m** (float, optional) -- the minimum chunk size divided by 2^20.\n If the aggregate size of parameters is still smaller than the minimum chunk size,\n all parameters will be compacted into one small chunk.\n- **memstats** (MemStats, optional) the memory statistics collector by a runtime memory tracer. --\n- **gpu_margin_mem_ratio** (float, optional) -- The ratio of GPU remaining memory (after the first forward-backward)\n which will be used when using hybrid CPU optimizer.\n This argument is meaningless when `placement_policy` of `GeminiManager` is not "auto".\n Defaults to 0.0.\n- **initial_scale** (float, optional) -- Initial scale used by DynamicGradScaler. Defaults to 2**16.\n- **min_scale** (float, optional) -- Min scale used by DynamicGradScaler. Defaults to 1.\n- **growth_factor** (float, optional) -- growth_factor used by DynamicGradScaler. Defaults to 2.\n- **backoff_factor** (float, optional) -- backoff_factor used by DynamicGradScaler. Defaults to 0.5.\n- **growth_interval** (float, optional) -- growth_interval used by DynamicGradScaler. Defaults to 1000.\n- **hysteresis** (float, optional) -- hysteresis used by DynamicGradScaler. Defaults to 2.\n- **max_scale** (int, optional) -- max_scale used by DynamicGradScaler. Defaults to 2**32.\n- **max_norm** (float, optional) -- max_norm used for `clip_grad_norm`. You should notice that you shall not do\n clip_grad_norm by yourself when using ZeRO DDP. The ZeRO optimizer will take care of clip_grad_norm.\n- **norm_type** (float, optional) -- norm_type used for `clip_grad_norm`.\n- **tp_size** (int, optional) -- If \'tp_size\' is set to be greater than 1, it means using tensor parallelism strategy, which is implemented in Shardformer, \'tp_size\' determines the size of the tensor parallel process group. Default to 1.\n- **extra_dp_size** (int, optional) -- If \'extra_dp_size\' is set to be greater than 1, it means creating another group to run with a ddp-like strategy. Default to 1.\n- **enable_all_optimization** (bool, optional) -- Whether to switch on all the optimizations supported by Shardformer.\n Currently all the optimization methods include fused normalization, flash attention and JIT.\n Defaults to False.\n- **enable_fused_normalization** (bool, optional) -- Whether to switch on fused normalization in Shardformer. Defaults to False.\n- **enable_flash_attention** (bool, optional) -- Whether to switch on flash attention in Shardformer. Defaults to False.\n- **enable_jit_fused** (bool, optional) -- Whether to switch on JIT in Shardformer. Default to False.\n- **enable_sequence_parallelism** (bool) -- Whether to turn on sequence parallelism in Shardformer. Defaults to False.\n- **enable_sequence_overlap** (bool) -- Whether to turn on sequence overlap in Shardformer. Defaults to False.\n- **verbose** (bool, optional) -- verbose mode. Debug info including chunk search result will be printed. Defaults to False.')),(0,i.kt)("div",null,(0,i.kt)(n.iz,{name:"Description",mdxType:"Divider"}),(0,i.kt)("p",null,"Plugin for Gemini."),(0,i.kt)(n.e_,{code:"```python\nfrom colossalai.booster import Booster\nfrom colossalai.booster.plugin import GeminiPlugin\n\nmodel, train_dataset, optimizer, criterion = ...\nplugin = GeminiPlugin()\n\ntrain_dataloader = plugin.prepare_dataloader(train_dataset, batch_size=8)\nbooster = Booster(plugin=plugin)\nmodel, optimizer, train_dataloader, criterion = booster.boost(model, optimizer, train_dataloader, criterion)\n```",mdxType:"ExampleCode"}))),(0,i.kt)("h3",{id:"hybrid-parallel-plugin"},"Hybrid Parallel Plugin"),(0,i.kt)("p",null,"This plugin implements the combination of various parallel training strategies and optimization tools. The features of HybridParallelPlugin can be generally divided into four parts:"),(0,i.kt)("ol",null,(0,i.kt)("li",{parentName:"ol"},"Shardformer: This plugin provides an entrance to Shardformer, which controls model sharding under tensor parallel and pipeline parallel setting. Shardformer also overloads the logic of model's forward/backward process to ensure the smooth working of tp/pp. Also, optimization tools including fused normalization, flash attention (xformers), JIT and sequence parallel are injected into the overloaded forward/backward method by Shardformer. More details can be found in chapter ",(0,i.kt)("a",{parentName:"li",href:"/docs/features/shardformer"},"Shardformer Doc"),". The diagram below shows the features supported by shardformer together with hybrid parallel plugin.")),(0,i.kt)("div",{align:"center"},(0,i.kt)("img",{src:"https://raw.githubusercontent.com/hpcaitech/public_assets/main/colossalai/img/shardformer/shardformer_and_hybridparallel.png",width:"500"})),(0,i.kt)("ol",{start:2},(0,i.kt)("li",{parentName:"ol"},(0,i.kt)("p",{parentName:"li"},"Mixed Precision Training: Support for fp16/bf16 mixed precision training. More details about its arguments configuration can be found in ",(0,i.kt)("a",{parentName:"p",href:"/docs/features/mixed_precision_training_with_booster"},"Mixed Precision Training Doc"),".")),(0,i.kt)("li",{parentName:"ol"},(0,i.kt)("p",{parentName:"li"},"Torch DDP: This plugin will automatically adopt Pytorch DDP as data parallel strategy when pipeline parallel and Zero is not used. More details about its arguments configuration can be found in ",(0,i.kt)("a",{parentName:"p",href:"https://pytorch.org/docs/main/generated/torch.nn.parallel.DistributedDataParallel.html#torch.nn.parallel.DistributedDataParallel"},"Pytorch DDP Docs"),".")),(0,i.kt)("li",{parentName:"ol"},(0,i.kt)("p",{parentName:"li"},"Zero: This plugin can adopt Zero 1/2 as data parallel strategy through setting the ",(0,i.kt)("inlineCode",{parentName:"p"},"zero_stage")," argument as 1 or 2 when initializing plugin. Zero 1 is compatible with pipeline parallel strategy, while Zero 2 is not. More details about its argument configuration can be found in ",(0,i.kt)("a",{parentName:"p",href:"#low-level-zero-plugin"},"Low Level Zero Plugin"),"."))),(0,i.kt)("blockquote",null,(0,i.kt)("p",{parentName:"blockquote"},"\u26a0 When using this plugin, only the subset of Huggingface transformers supported by Shardformer are compatible with tensor parallel, pipeline parallel and optimization tools. Mainstream transformers such as Llama 1, Llama 2, OPT, Bloom, Bert and GPT2 etc. are all supported by Shardformer.")),(0,i.kt)(n.Cl,{mdxType:"DocStringContainer"},(0,i.kt)("div",null,(0,i.kt)(n.Dx,{type:"class",name:"colossalai.booster.plugin.HybridParallelPlugin",source:"https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/booster/plugin/hybrid_parallel_plugin.py#L863",mdxType:"Title"}),(0,i.kt)(n.Pc,{mdxType:"Signature"},"tp_size: int, pp_size: int, precision: str = 'fp16', zero_stage: int = 0, enable_all_optimization: bool = False, enable_fused_normalization: bool = False, enable_flash_attention: bool = False, enable_jit_fused: bool = False, enable_sequence_parallelism: bool = False, enable_sequence_overlap: bool = False, num_microbatches: typing.Optional[int] = None, microbatch_size: typing.Optional[int] = None, initial_scale: float = 65536, min_scale: float = 1, growth_factor: float = 2, backoff_factor: float = 0.5, growth_interval: int = 1000, hysteresis: int = 2, max_scale: float = 4294967296, max_norm: float = 0, broadcast_buffers: bool = True, ddp_bucket_cap_mb: int = 25, find_unused_parameters: bool = False, check_reduction: bool = False, gradient_as_bucket_view: bool = False, static_graph: bool = False, zero_bucket_size_in_m: int = 12, cpu_offload: bool = False, communication_dtype: typing.Optional[torch.dtype] = None, overlap_communication: bool = True, custom_policy: Policy = None, pp_style: str = '1f1b', num_model_chunks: int = 1, enable_metadata_cache: bool = True"),(0,i.kt)(n.aE,{mdxType:"Parameters"},"- **tp_size** (int) -- The size of tensor parallelism. Tensor parallelism will not be used when tp_size is set to 1.\n- **pp_size** (int) -- The number of pipeline stages in pipeline parallelism. Pipeline parallelism will not be used when pp_size is set to 1.\n- **precision** (str, optional) -- Specifies the precision of parameters during training.\n Auto-mixied precision will be used when this argument is set to 'fp16' or 'bf16', otherwise model is trained with 'fp32'.\n Defaults to 'fp16'.\n- **zero_stage** (int, optional) -- The stage of ZeRO for data parallelism. Can only be choosed from [0, 1, 2].\n When set to 0, ZeRO will not be used. Defaults to 0.\n- **enable_all_optimization** (bool, optional) -- Whether to switch on all the optimizations supported by Shardformer.\n Currently all the optimization methods include fused normalization, flash attention and JIT.\n Defaults to False.\n- **enable_fused_normalization** (bool, optional) -- Whether to switch on fused normalization in Shardformer. Defaults to False.\n- **enable_flash_attention** (bool, optional) -- Whether to switch on flash attention in Shardformer. Defaults to False.\n- **enable_jit_fused** (bool, optional) -- Whether to switch on JIT in Shardformer. Default to False.\n- **enable_sequence_parallelism** (bool) -- Whether to turn on sequence parallelism in Shardformer. Defaults to False.\n- **enable_sequence_overlap** (bool) -- Whether to turn on sequence overlap in Shardformer. Defaults to False.\n- **num_microbatches** (int, optional) -- Number of microbatches when using pipeline parallelism. Defaults to None.\n- **microbatch_size** (int, optional) -- Microbatch size when using pipeline parallelism.\n Either `num_microbatches` or `microbatch_size` should be provided if using pipeline.\n If `num_microbatches` is provided, this will be ignored. Defaults to None.\n- **initial_scale** (float, optional) -- The initial loss scale of AMP. Defaults to 2**16.\n- **min_scale** (float, optional) -- The minimum loss scale of AMP. Defaults to 1.\n- **growth_factor** (float, optional) -- The multiplication factor for increasing loss scale when using AMP. Defaults to 2.\n- **backoff_factor** (float, optional) -- The multiplication factor for decreasing loss scale when using AMP. Defaults to 0.5.\n- **growth_interval** (int, optional) -- The number of steps to increase loss scale when no overflow occurs when using AMP. Defaults to 1000.\n- **hysteresis** (int, optional) -- The number of overflows before decreasing loss scale when using AMP. Defaults to 2.\n- **max_scale** (float, optional) -- The maximum loss scale of AMP. Defaults to 2**32.\n- **max_norm** (float, optional) -- Maximum norm for gradient clipping. Defaults to 0.\n- **broadcast_buffers** (bool, optional) -- Whether to broadcast buffers in the beginning of training when using DDP. Defaults to True.\n- **ddp_bucket_cap_mb** (int, optional) -- The bucket size in MB when using DDP. Defaults to 25.\n- **find_unused_parameters** (bool, optional) -- Whether to find unused parameters when using DDP. Defaults to False.\n- **check_reduction** (bool, optional) -- Whether to check reduction when using DDP. Defaults to False.\n- **gradient_as_bucket_view** (bool, optional) -- Whether to use gradient as bucket view when using DDP. Defaults to False.\n- **static_graph** (bool, optional) -- Whether to use static graph when using DDP. Defaults to False.\n- **zero_bucket_size_in_m** (int, optional) -- Gradient reduce bucket size in million elements when using ZeRO. Defaults to 12.\n- **cpu_offload** (bool, optional) -- Whether to open cpu_offload when using ZeRO. Defaults to False.\n- **communication_dtype** (torch.dtype, optional) -- Communication dtype when using ZeRO. If not specified, the dtype of param will be used. Defaults to None.\n- **overlap_communication** (bool, optional) -- Whether to overlap communication and computation when using ZeRO. Defaults to True.\n- **custom_policy** (Policy, optional) -- Custom policy for Shardformer. Defaults to None.\n- **pp_style** (str, optional) -- The style for pipeline parallelism. Defaults to '1f1b'.\n- **num_model_chunks** (int, optional) -- The number of model chunks for interleaved pipeline parallelism. Defaults to 1.\n- **enable_metadata_cache** (bool, optional) -- Whether to enable metadata cache for pipeline parallelism. Defaults to True.")),(0,i.kt)("div",null,(0,i.kt)(n.iz,{name:"Description",mdxType:"Divider"}),(0,i.kt)("p",null,"Plugin for Hybrid Parallel Training.\nTensor parallel, pipeline parallel and data parallel(DDP/ZeRO) can be picked and combined in this plugin.\nThe size of tp and pp should be passed in by user, then the size of dp is automatically calculated from dp_size = world_size / (tp_size * pp_size)."),(0,i.kt)(n.e_,{code:"```python\nfrom colossalai.booster import Booster\nfrom colossalai.booster.plugin import HybridParallelPlugin\n\nmodel, train_dataset, optimizer, criterion = ...\nplugin = HybridParallelPlugin(tp_size=2, pp_size=2)\n\ntrain_dataloader = plugin.prepare_dataloader(train_dataset, batch_size=8)\nbooster = Booster(plugin=plugin)\nmodel, optimizer, criterion, train_dataloader, _ = booster.boost(model, optimizer, criterion, train_dataloader)\n```",mdxType:"ExampleCode"})),(0,i.kt)(n.Cl,{mdxType:"DocStringContainer"},(0,i.kt)("div",null,(0,i.kt)(n.Dx,{type:"function",name:"prepare_dataloader",source:"https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/booster/plugin/hybrid_parallel_plugin.py#L1194",mdxType:"Title"}),(0,i.kt)(n.Pc,{mdxType:"Signature"},"dataset, batch_size, shuffle = False, seed = 1024, drop_last = False, pin_memory = False, num_workers = 0, **kwargs"),(0,i.kt)(n.aE,{mdxType:"Parameters"},"- **dataset** (*torch.utils.data.Dataset*) -- The dataset to be loaded.\n- **shuffle** (bool, optional) -- Whether to shuffle the dataset. Defaults to False.\n- **seed** (int, optional) -- Random worker seed for sampling, defaults to 1024.\n add_sampler -- Whether to add `DistributedDataParallelSampler` to the dataset. Defaults to True.\n- **drop_last** (bool, optional) -- Set to True to drop the last incomplete batch, if the dataset size\n is not divisible by the batch size. If False and the size of dataset is not divisible by\n the batch size, then the last batch will be smaller, defaults to False.\n- **pin_memory** (bool, optional) -- Whether to pin memory address in CPU memory. Defaults to False.\n- **num_workers** (int, optional) -- Number of worker threads for this dataloader. Defaults to 0.\n- **kwargs** (dict) -- optional parameters for `torch.utils.data.DataLoader`, more details could be found in\n [DataLoader](https://pytorch.org/docs/stable/_modules/torch/utils/data/dataloader.html#DataLoader)."),(0,i.kt)(n.nT,{name:"[`torch.utils.data.DataLoader`]",desc:"A DataLoader used for training or testing.",mdxType:"Returns"})),(0,i.kt)("div",null,(0,i.kt)(n.iz,{name:"Description",mdxType:"Divider"}),(0,i.kt)("p",null,"Prepare a dataloader for distributed training. The dataloader will be wrapped by\n",(0,i.kt)("em",{parentName:"p"},"torch.utils.data.DataLoader")," and ",(0,i.kt)("em",{parentName:"p"},"torch.utils.data.DistributedSampler"),".")))),(0,i.kt)("h3",{id:"torch-ddp-plugin"},"Torch DDP Plugin"),(0,i.kt)("p",null,"More details can be found in ",(0,i.kt)("a",{parentName:"p",href:"https://pytorch.org/docs/main/generated/torch.nn.parallel.DistributedDataParallel.html#torch.nn.parallel.DistributedDataParallel"},"Pytorch Docs"),"."),(0,i.kt)(n.Cl,{mdxType:"DocStringContainer"},(0,i.kt)("div",null,(0,i.kt)(n.Dx,{type:"class",name:"colossalai.booster.plugin.TorchDDPPlugin",source:"https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/booster/plugin/torch_ddp_plugin.py#L129",mdxType:"Title"}),(0,i.kt)(n.Pc,{mdxType:"Signature"},"broadcast_buffers: bool = True, bucket_cap_mb: int = 25, find_unused_parameters: bool = False, check_reduction: bool = False, gradient_as_bucket_view: bool = False, static_graph: bool = False"),(0,i.kt)(n.aE,{mdxType:"Parameters"},"- **broadcast_buffers** (bool, optional) -- Whether to broadcast buffers in the beginning of training. Defaults to True.\n- **bucket_cap_mb** (int, optional) -- The bucket size in MB. Defaults to 25.\n- **find_unused_parameters** (bool, optional) -- Whether to find unused parameters. Defaults to False.\n- **check_reduction** (bool, optional) -- Whether to check reduction. Defaults to False.\n- **gradient_as_bucket_view** (bool, optional) -- Whether to use gradient as bucket view. Defaults to False.\n- **static_graph** (bool, optional) -- Whether to use static graph. Defaults to False.")),(0,i.kt)("div",null,(0,i.kt)(n.iz,{name:"Description",mdxType:"Divider"}),(0,i.kt)("p",null,"Plugin for PyTorch DDP."),(0,i.kt)(n.e_,{code:"```python\nfrom colossalai.booster import Booster\nfrom colossalai.booster.plugin import TorchDDPPlugin\n\nmodel, train_dataset, optimizer, criterion = ...\nplugin = TorchDDPPlugin()\n\ntrain_dataloader = plugin.prepare_dataloader(train_dataset, batch_size=8)\nbooster = Booster(plugin=plugin)\nmodel, optimizer, train_dataloader, criterion = booster.boost(model, optimizer, train_dataloader, criterion)\n```",mdxType:"ExampleCode"}))),(0,i.kt)("h3",{id:"torch-fsdp-plugin"},"Torch FSDP Plugin"),(0,i.kt)("blockquote",null,(0,i.kt)("p",{parentName:"blockquote"},"\u26a0 This plugin is not available when torch version is lower than 1.12.0.")),(0,i.kt)("blockquote",null,(0,i.kt)("p",{parentName:"blockquote"},"\u26a0 This plugin does not support save/load sharded model checkpoint now.")),(0,i.kt)("blockquote",null,(0,i.kt)("p",{parentName:"blockquote"},"\u26a0 This plugin does not support optimizer that use multi params group.")),(0,i.kt)("p",null,"More details can be found in ",(0,i.kt)("a",{parentName:"p",href:"https://pytorch.org/docs/main/fsdp.html"},"Pytorch Docs"),"."),(0,i.kt)(n.Cl,{mdxType:"DocStringContainer"},(0,i.kt)("div",null,(0,i.kt)(n.Dx,{type:"class",name:"colossalai.booster.plugin.TorchFSDPPlugin",source:"https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/booster/plugin/torch_fsdp_plugin.py#L142",mdxType:"Title"}),(0,i.kt)(n.Pc,{mdxType:"Signature"},"process_group: typing.Optional[torch.distributed.distributed_c10d.ProcessGroup] = None, sharding_strategy: typing.Optional[torch.distributed.fsdp.api.ShardingStrategy] = None, cpu_offload: typing.Optional[torch.distributed.fsdp.api.CPUOffload] = None, auto_wrap_policy: typing.Optional[typing.Callable] = None, backward_prefetch: typing.Optional[torch.distributed.fsdp.api.BackwardPrefetch] = None, mixed_precision: typing.Optional[torch.distributed.fsdp.api.MixedPrecision] = None, ignored_modules: typing.Optional[typing.Iterable[torch.nn.modules.module.Module]] = None, param_init_fn: typing.Optional[typing.Callable[[torch.nn.modules.module.Module]], NoneType] = None, sync_module_states: bool = False"),(0,i.kt)(n.aE,{mdxType:"Parameters"},"- **See** https --//pytorch.org/docs/stable/fsdp.html for details.")),(0,i.kt)("div",null,(0,i.kt)(n.iz,{name:"Description",mdxType:"Divider"}),(0,i.kt)("p",null,"Plugin for PyTorch FSDP."),(0,i.kt)(n.e_,{code:"```python\nfrom colossalai.booster import Booster\nfrom colossalai.booster.plugin import TorchFSDPPlugin\n\nmodel, train_dataset, optimizer, criterion = ...\nplugin = TorchFSDPPlugin()\n\ntrain_dataloader = plugin.prepare_train_dataloader(train_dataset, batch_size=8)\nbooster = Booster(plugin=plugin)\nmodel, optimizer, train_dataloader, criterion = booster.boost(model, optimizer, train_dataloader, criterion)\n```",mdxType:"ExampleCode"}))))}m.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/runtime~main.9eb13b79.js b/assets/js/runtime~main.a42c3c69.js similarity index 98% rename from assets/js/runtime~main.9eb13b79.js rename to assets/js/runtime~main.a42c3c69.js index 1f7f13a8..deb759da 100644 --- a/assets/js/runtime~main.9eb13b79.js +++ b/assets/js/runtime~main.a42c3c69.js @@ -1 +1 @@ -(()=>{"use strict";var e,a,f,c,d,b={},t={};function r(e){var a=t[e];if(void 0!==a)return a.exports;var f=t[e]={id:e,loaded:!1,exports:{}};return b[e].call(f.exports,f,f.exports,r),f.loaded=!0,f.exports}r.m=b,r.c=t,e=[],r.O=(a,f,c,d)=>{if(!f){var b=1/0;for(i=0;i=d)&&Object.keys(r.O).every((e=>r.O[e](f[o])))?f.splice(o--,1):(t=!1,d0&&e[i-1][2]>d;i--)e[i]=e[i-1];e[i]=[f,c,d]},r.n=e=>{var a=e&&e.__esModule?()=>e.default:()=>e;return r.d(a,{a:a}),a},f=Object.getPrototypeOf?e=>Object.getPrototypeOf(e):e=>e.__proto__,r.t=function(e,c){if(1&c&&(e=this(e)),8&c)return e;if("object"==typeof e&&e){if(4&c&&e.__esModule)return e;if(16&c&&"function"==typeof e.then)return e}var d=Object.create(null);r.r(d);var b={};a=a||[null,f({}),f([]),f(f)];for(var t=2&c&&e;"object"==typeof t&&!~a.indexOf(t);t=f(t))Object.getOwnPropertyNames(t).forEach((a=>b[a]=()=>e[a]));return b.default=()=>e,r.d(d,b),d},r.d=(e,a)=>{for(var f in a)r.o(a,f)&&!r.o(e,f)&&Object.defineProperty(e,f,{enumerable:!0,get:a[f]})},r.f={},r.e=e=>Promise.all(Object.keys(r.f).reduce(((a,f)=>(r.f[f](e,a),a)),[])),r.u=e=>"assets/js/"+({53:"935f2afb",110:"66406991",453:"30a24c52",510:"4bb11789",533:"b2b675dd",618:"b0443bc9",820:"912a9b5d",933:"648eb177",1155:"67b08d09",1477:"b2f554cd",1633:"031793e1",1713:"a7023ddc",1906:"6854244a",1914:"d9f32620",2111:"8ead34be",2189:"461c4fde",2362:"e273c56f",2476:"ca294e11",2535:"814f3328",3085:"1f391b9e",3089:"a6aa9e1f",3182:"558dbece",3205:"a80da1cf",3515:"0f6a2247",3602:"e6529910",3608:"9e4087bc",3716:"f65f5e37",4013:"01a85c17",4195:"c4f5d8e4",4327:"aae8e891",4634:"0a41237c",4913:"e08b29aa",4915:"eb377b60",4932:"78eb35df",4958:"2ddc79bc",5247:"635db010",5416:"ff24d540",5870:"6d2b80e7",6103:"ccc49370",6224:"6c830cbd",6271:"0f71a4c4",6584:"41e90a9f",6869:"8872db8b",6938:"608ae6a4",7178:"096bfee4",7414:"393be207",7546:"3bbf8e36",7707:"ebcd4f00",7918:"17896441",7920:"1a4e3797",8001:"d322d808",8185:"24f42c0d",8219:"869830b1",8480:"d5af1612",8610:"6875c492",8658:"f652595b",8675:"e21e4dc9",8801:"252d437a",9003:"925b3f96",9035:"4c9e35b1",9215:"a2f2046f",9514:"1be78505",9642:"7661071f",9700:"e16015ca",9723:"4cb1017f",9793:"a6f6cfa4",9823:"d2f38757",9833:"03101e41"}[e]||e)+"."+{53:"61fa04c9",110:"20eb67b2",398:"586825c7",453:"049da2af",510:"c2eac360",533:"87c7428d",618:"229deafb",820:"e0866ef6",933:"eecc3573",1155:"7850f485",1477:"360fc37d",1633:"9246fd83",1713:"302d47eb",1906:"34ee5c62",1914:"6f3a6b21",2111:"f22cfa03",2189:"a9405cf7",2362:"fe6345ea",2403:"1d371fac",2476:"3a36894c",2535:"1e11420f",3085:"e29bf671",3089:"a491e875",3182:"a8dc49a3",3205:"1fd277fe",3515:"aee38f59",3602:"3c694766",3608:"c41bc3c4",3716:"73cf254b",4013:"ab557569",4195:"b35af3ae",4327:"f265e9cd",4634:"52cf170e",4913:"4f1df8b7",4915:"e38c5fa5",4932:"b267bf39",4958:"1a80822e",4972:"cb4f21fb",4989:"f2816fc4",5247:"ed737b98",5416:"864cf262",5870:"7cdc7487",6048:"acc25360",6103:"e7ee1570",6224:"808567ad",6271:"f661fcae",6584:"8298813b",6780:"19ab39fb",6869:"aae580d4",6938:"3cfd8025",6945:"166dadd9",7178:"b37159c0",7414:"2868a30c",7546:"3cc1c6e3",7707:"ed5f554a",7918:"8d8b61ca",7920:"11339e64",8001:"e9bd0aea",8185:"fe6f5616",8219:"19e0e2fc",8480:"591df0ba",8610:"ce8f6fa8",8658:"ad5b8b16",8675:"5ee4fd2c",8801:"467f3827",8894:"74389eef",9003:"f6448505",9035:"9bd25f1c",9056:"40c86f0d",9215:"5c0631d2",9514:"5c12b6b2",9642:"1187c89c",9700:"02c8357c",9723:"3efdd9dd",9793:"d5d99cf4",9823:"8661eed1",9833:"b4813c32"}[e]+".js",r.miniCssF=e=>{},r.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),r.o=(e,a)=>Object.prototype.hasOwnProperty.call(e,a),c={},d="demo:",r.l=(e,a,f,b)=>{if(c[e])c[e].push(a);else{var t,o;if(void 0!==f)for(var n=document.getElementsByTagName("script"),i=0;i{t.onerror=t.onload=null,clearTimeout(s);var d=c[e];if(delete c[e],t.parentNode&&t.parentNode.removeChild(t),d&&d.forEach((e=>e(f))),a)return a(f)},s=setTimeout(l.bind(null,void 0,{type:"timeout",target:t}),12e4);t.onerror=l.bind(null,t.onerror),t.onload=l.bind(null,t.onload),o&&document.head.appendChild(t)}},r.r=e=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},r.p="/",r.gca=function(e){return e={17896441:"7918",66406991:"110","935f2afb":"53","30a24c52":"453","4bb11789":"510",b2b675dd:"533",b0443bc9:"618","912a9b5d":"820","648eb177":"933","67b08d09":"1155",b2f554cd:"1477","031793e1":"1633",a7023ddc:"1713","6854244a":"1906",d9f32620:"1914","8ead34be":"2111","461c4fde":"2189",e273c56f:"2362",ca294e11:"2476","814f3328":"2535","1f391b9e":"3085",a6aa9e1f:"3089","558dbece":"3182",a80da1cf:"3205","0f6a2247":"3515",e6529910:"3602","9e4087bc":"3608",f65f5e37:"3716","01a85c17":"4013",c4f5d8e4:"4195",aae8e891:"4327","0a41237c":"4634",e08b29aa:"4913",eb377b60:"4915","78eb35df":"4932","2ddc79bc":"4958","635db010":"5247",ff24d540:"5416","6d2b80e7":"5870",ccc49370:"6103","6c830cbd":"6224","0f71a4c4":"6271","41e90a9f":"6584","8872db8b":"6869","608ae6a4":"6938","096bfee4":"7178","393be207":"7414","3bbf8e36":"7546",ebcd4f00:"7707","1a4e3797":"7920",d322d808:"8001","24f42c0d":"8185","869830b1":"8219",d5af1612:"8480","6875c492":"8610",f652595b:"8658",e21e4dc9:"8675","252d437a":"8801","925b3f96":"9003","4c9e35b1":"9035",a2f2046f:"9215","1be78505":"9514","7661071f":"9642",e16015ca:"9700","4cb1017f":"9723",a6f6cfa4:"9793",d2f38757:"9823","03101e41":"9833"}[e]||e,r.p+r.u(e)},(()=>{var e={1303:0,532:0};r.f.j=(a,f)=>{var c=r.o(e,a)?e[a]:void 0;if(0!==c)if(c)f.push(c[2]);else if(/^(1303|532)$/.test(a))e[a]=0;else{var d=new Promise(((f,d)=>c=e[a]=[f,d]));f.push(c[2]=d);var b=r.p+r.u(a),t=new Error;r.l(b,(f=>{if(r.o(e,a)&&(0!==(c=e[a])&&(e[a]=void 0),c)){var d=f&&("load"===f.type?"missing":f.type),b=f&&f.target&&f.target.src;t.message="Loading chunk "+a+" failed.\n("+d+": "+b+")",t.name="ChunkLoadError",t.type=d,t.request=b,c[1](t)}}),"chunk-"+a,a)}},r.O.j=a=>0===e[a];var a=(a,f)=>{var c,d,b=f[0],t=f[1],o=f[2],n=0;if(b.some((a=>0!==e[a]))){for(c in t)r.o(t,c)&&(r.m[c]=t[c]);if(o)var i=o(r)}for(a&&a(f);n{"use strict";var e,a,f,c,d,b={},t={};function r(e){var a=t[e];if(void 0!==a)return a.exports;var f=t[e]={id:e,loaded:!1,exports:{}};return b[e].call(f.exports,f,f.exports,r),f.loaded=!0,f.exports}r.m=b,r.c=t,e=[],r.O=(a,f,c,d)=>{if(!f){var b=1/0;for(i=0;i=d)&&Object.keys(r.O).every((e=>r.O[e](f[o])))?f.splice(o--,1):(t=!1,d0&&e[i-1][2]>d;i--)e[i]=e[i-1];e[i]=[f,c,d]},r.n=e=>{var a=e&&e.__esModule?()=>e.default:()=>e;return r.d(a,{a:a}),a},f=Object.getPrototypeOf?e=>Object.getPrototypeOf(e):e=>e.__proto__,r.t=function(e,c){if(1&c&&(e=this(e)),8&c)return e;if("object"==typeof e&&e){if(4&c&&e.__esModule)return e;if(16&c&&"function"==typeof e.then)return e}var d=Object.create(null);r.r(d);var b={};a=a||[null,f({}),f([]),f(f)];for(var t=2&c&&e;"object"==typeof t&&!~a.indexOf(t);t=f(t))Object.getOwnPropertyNames(t).forEach((a=>b[a]=()=>e[a]));return b.default=()=>e,r.d(d,b),d},r.d=(e,a)=>{for(var f in a)r.o(a,f)&&!r.o(e,f)&&Object.defineProperty(e,f,{enumerable:!0,get:a[f]})},r.f={},r.e=e=>Promise.all(Object.keys(r.f).reduce(((a,f)=>(r.f[f](e,a),a)),[])),r.u=e=>"assets/js/"+({53:"935f2afb",110:"66406991",453:"30a24c52",510:"4bb11789",533:"b2b675dd",618:"b0443bc9",820:"912a9b5d",933:"648eb177",1155:"67b08d09",1477:"b2f554cd",1633:"031793e1",1713:"a7023ddc",1906:"6854244a",1914:"d9f32620",2111:"8ead34be",2189:"461c4fde",2362:"e273c56f",2476:"ca294e11",2535:"814f3328",3085:"1f391b9e",3089:"a6aa9e1f",3182:"558dbece",3205:"a80da1cf",3515:"0f6a2247",3602:"e6529910",3608:"9e4087bc",3716:"f65f5e37",4013:"01a85c17",4195:"c4f5d8e4",4327:"aae8e891",4634:"0a41237c",4913:"e08b29aa",4915:"eb377b60",4932:"78eb35df",4958:"2ddc79bc",5247:"635db010",5416:"ff24d540",5870:"6d2b80e7",6103:"ccc49370",6224:"6c830cbd",6271:"0f71a4c4",6584:"41e90a9f",6869:"8872db8b",6938:"608ae6a4",7178:"096bfee4",7414:"393be207",7546:"3bbf8e36",7707:"ebcd4f00",7918:"17896441",7920:"1a4e3797",8001:"d322d808",8185:"24f42c0d",8219:"869830b1",8480:"d5af1612",8610:"6875c492",8658:"f652595b",8675:"e21e4dc9",8801:"252d437a",9003:"925b3f96",9035:"4c9e35b1",9215:"a2f2046f",9514:"1be78505",9642:"7661071f",9700:"e16015ca",9723:"4cb1017f",9793:"a6f6cfa4",9823:"d2f38757",9833:"03101e41"}[e]||e)+"."+{53:"61fa04c9",110:"20eb67b2",398:"586825c7",453:"049da2af",510:"c2eac360",533:"87c7428d",618:"229deafb",820:"e0866ef6",933:"eecc3573",1155:"7850f485",1477:"360fc37d",1633:"9246fd83",1713:"302d47eb",1906:"34ee5c62",1914:"6f3a6b21",2111:"f22cfa03",2189:"a9405cf7",2362:"fe6345ea",2403:"1d371fac",2476:"3a36894c",2535:"1e11420f",3085:"e29bf671",3089:"a491e875",3182:"a8dc49a3",3205:"1fd277fe",3515:"aee38f59",3602:"3c694766",3608:"c41bc3c4",3716:"73cf254b",4013:"ab557569",4195:"b35af3ae",4327:"f265e9cd",4634:"91708dd2",4913:"4f1df8b7",4915:"e38c5fa5",4932:"b267bf39",4958:"1a80822e",4972:"cb4f21fb",4989:"f2816fc4",5247:"ed737b98",5416:"864cf262",5870:"7cdc7487",6048:"acc25360",6103:"e7ee1570",6224:"808567ad",6271:"f661fcae",6584:"8298813b",6780:"19ab39fb",6869:"aae580d4",6938:"3cfd8025",6945:"166dadd9",7178:"b37159c0",7414:"2868a30c",7546:"3cc1c6e3",7707:"ed5f554a",7918:"8d8b61ca",7920:"11339e64",8001:"e9bd0aea",8185:"fe6f5616",8219:"19e0e2fc",8480:"591df0ba",8610:"ce8f6fa8",8658:"ad5b8b16",8675:"5ee4fd2c",8801:"467f3827",8894:"74389eef",9003:"f6448505",9035:"9bd25f1c",9056:"40c86f0d",9215:"5c0631d2",9514:"5c12b6b2",9642:"1187c89c",9700:"02c8357c",9723:"3efdd9dd",9793:"d5d99cf4",9823:"8661eed1",9833:"b4813c32"}[e]+".js",r.miniCssF=e=>{},r.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),r.o=(e,a)=>Object.prototype.hasOwnProperty.call(e,a),c={},d="demo:",r.l=(e,a,f,b)=>{if(c[e])c[e].push(a);else{var t,o;if(void 0!==f)for(var n=document.getElementsByTagName("script"),i=0;i{t.onerror=t.onload=null,clearTimeout(s);var d=c[e];if(delete c[e],t.parentNode&&t.parentNode.removeChild(t),d&&d.forEach((e=>e(f))),a)return a(f)},s=setTimeout(l.bind(null,void 0,{type:"timeout",target:t}),12e4);t.onerror=l.bind(null,t.onerror),t.onload=l.bind(null,t.onload),o&&document.head.appendChild(t)}},r.r=e=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},r.p="/",r.gca=function(e){return e={17896441:"7918",66406991:"110","935f2afb":"53","30a24c52":"453","4bb11789":"510",b2b675dd:"533",b0443bc9:"618","912a9b5d":"820","648eb177":"933","67b08d09":"1155",b2f554cd:"1477","031793e1":"1633",a7023ddc:"1713","6854244a":"1906",d9f32620:"1914","8ead34be":"2111","461c4fde":"2189",e273c56f:"2362",ca294e11:"2476","814f3328":"2535","1f391b9e":"3085",a6aa9e1f:"3089","558dbece":"3182",a80da1cf:"3205","0f6a2247":"3515",e6529910:"3602","9e4087bc":"3608",f65f5e37:"3716","01a85c17":"4013",c4f5d8e4:"4195",aae8e891:"4327","0a41237c":"4634",e08b29aa:"4913",eb377b60:"4915","78eb35df":"4932","2ddc79bc":"4958","635db010":"5247",ff24d540:"5416","6d2b80e7":"5870",ccc49370:"6103","6c830cbd":"6224","0f71a4c4":"6271","41e90a9f":"6584","8872db8b":"6869","608ae6a4":"6938","096bfee4":"7178","393be207":"7414","3bbf8e36":"7546",ebcd4f00:"7707","1a4e3797":"7920",d322d808:"8001","24f42c0d":"8185","869830b1":"8219",d5af1612:"8480","6875c492":"8610",f652595b:"8658",e21e4dc9:"8675","252d437a":"8801","925b3f96":"9003","4c9e35b1":"9035",a2f2046f:"9215","1be78505":"9514","7661071f":"9642",e16015ca:"9700","4cb1017f":"9723",a6f6cfa4:"9793",d2f38757:"9823","03101e41":"9833"}[e]||e,r.p+r.u(e)},(()=>{var e={1303:0,532:0};r.f.j=(a,f)=>{var c=r.o(e,a)?e[a]:void 0;if(0!==c)if(c)f.push(c[2]);else if(/^(1303|532)$/.test(a))e[a]=0;else{var d=new Promise(((f,d)=>c=e[a]=[f,d]));f.push(c[2]=d);var b=r.p+r.u(a),t=new Error;r.l(b,(f=>{if(r.o(e,a)&&(0!==(c=e[a])&&(e[a]=void 0),c)){var d=f&&("load"===f.type?"missing":f.type),b=f&&f.target&&f.target.src;t.message="Loading chunk "+a+" failed.\n("+d+": "+b+")",t.name="ChunkLoadError",t.type=d,t.request=b,c[1](t)}}),"chunk-"+a,a)}},r.O.j=a=>0===e[a];var a=(a,f)=>{var c,d,b=f[0],t=f[1],o=f[2],n=0;if(b.some((a=>0!==e[a]))){for(c in t)r.o(t,c)&&(r.m[c]=t[c]);if(o)var i=o(r)}for(a&&a(f);n - + - + \ No newline at end of file diff --git a/blog/first-blog-post/index.html b/blog/first-blog-post/index.html index 522c9253..6e9dde75 100644 --- a/blog/first-blog-post/index.html +++ b/blog/first-blog-post/index.html @@ -16,13 +16,13 @@ - +

First Blog Post

· One min read
Gao Wei

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

- + \ No newline at end of file diff --git a/blog/index.html b/blog/index.html index 851afb9a..ab733835 100644 --- a/blog/index.html +++ b/blog/index.html @@ -16,13 +16,13 @@ - +

· One min read
Sébastien Lorber
Yangshun Tay

Docusaurus blogging features are powered by the blog plugin.

Simply add Markdown files (or folders) to the blog directory.

Regular blog authors can be added to authors.yml.

The blog post date can be extracted from filenames, such as:

  • 2019-05-30-welcome.md
  • 2019-05-30-welcome/index.md

A blog post folder can be convenient to co-locate blog post images:

Docusaurus Plushie

The blog supports tags as well!

And if you don't want a blog: just delete this directory, and use blog: false in your Docusaurus config.

· One min read
Gao Wei

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

- + \ No newline at end of file diff --git a/blog/tags/docusaurus/index.html b/blog/tags/docusaurus/index.html index 39c2d454..5e81b325 100644 --- a/blog/tags/docusaurus/index.html +++ b/blog/tags/docusaurus/index.html @@ -16,13 +16,13 @@ - +

2 posts tagged with "docusaurus"

View All Tags

· One min read
Sébastien Lorber
Yangshun Tay

Docusaurus blogging features are powered by the blog plugin.

Simply add Markdown files (or folders) to the blog directory.

Regular blog authors can be added to authors.yml.

The blog post date can be extracted from filenames, such as:

  • 2019-05-30-welcome.md
  • 2019-05-30-welcome/index.md

A blog post folder can be convenient to co-locate blog post images:

Docusaurus Plushie

The blog supports tags as well!

And if you don't want a blog: just delete this directory, and use blog: false in your Docusaurus config.

· One min read
Gao Wei

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

- + \ No newline at end of file diff --git a/blog/tags/facebook/index.html b/blog/tags/facebook/index.html index a203c728..14953b73 100644 --- a/blog/tags/facebook/index.html +++ b/blog/tags/facebook/index.html @@ -16,13 +16,13 @@ - +

One post tagged with "facebook"

View All Tags

· One min read
Sébastien Lorber
Yangshun Tay

Docusaurus blogging features are powered by the blog plugin.

Simply add Markdown files (or folders) to the blog directory.

Regular blog authors can be added to authors.yml.

The blog post date can be extracted from filenames, such as:

  • 2019-05-30-welcome.md
  • 2019-05-30-welcome/index.md

A blog post folder can be convenient to co-locate blog post images:

Docusaurus Plushie

The blog supports tags as well!

And if you don't want a blog: just delete this directory, and use blog: false in your Docusaurus config.

- + \ No newline at end of file diff --git a/blog/tags/hello/index.html b/blog/tags/hello/index.html index e470d146..2bb1d38a 100644 --- a/blog/tags/hello/index.html +++ b/blog/tags/hello/index.html @@ -16,13 +16,13 @@ - +

One post tagged with "hello"

View All Tags

· One min read
Sébastien Lorber
Yangshun Tay

Docusaurus blogging features are powered by the blog plugin.

Simply add Markdown files (or folders) to the blog directory.

Regular blog authors can be added to authors.yml.

The blog post date can be extracted from filenames, such as:

  • 2019-05-30-welcome.md
  • 2019-05-30-welcome/index.md

A blog post folder can be convenient to co-locate blog post images:

Docusaurus Plushie

The blog supports tags as well!

And if you don't want a blog: just delete this directory, and use blog: false in your Docusaurus config.

- + \ No newline at end of file diff --git a/blog/tags/hola/index.html b/blog/tags/hola/index.html index 75eafc9e..13aeae4a 100644 --- a/blog/tags/hola/index.html +++ b/blog/tags/hola/index.html @@ -16,13 +16,13 @@ - +

One post tagged with "hola"

View All Tags

· One min read
Gao Wei

Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

- + \ No newline at end of file diff --git a/blog/tags/index.html b/blog/tags/index.html index 69fe44c3..ad136e34 100644 --- a/blog/tags/index.html +++ b/blog/tags/index.html @@ -16,13 +16,13 @@ - + - + \ No newline at end of file diff --git a/blog/welcome/index.html b/blog/welcome/index.html index 391ed764..0e56f686 100644 --- a/blog/welcome/index.html +++ b/blog/welcome/index.html @@ -16,13 +16,13 @@ - +

Welcome

· One min read
Sébastien Lorber
Yangshun Tay

Docusaurus blogging features are powered by the blog plugin.

Simply add Markdown files (or folders) to the blog directory.

Regular blog authors can be added to authors.yml.

The blog post date can be extracted from filenames, such as:

  • 2019-05-30-welcome.md
  • 2019-05-30-welcome/index.md

A blog post folder can be convenient to co-locate blog post images:

Docusaurus Plushie

The blog supports tags as well!

And if you don't want a blog: just delete this directory, and use blog: false in your Docusaurus config.

- + \ No newline at end of file diff --git a/docs/Colossal-Auto/feature/auto_checkpoint/index.html b/docs/Colossal-Auto/feature/auto_checkpoint/index.html index 67edf544..c0160c2a 100644 --- a/docs/Colossal-Auto/feature/auto_checkpoint/index.html +++ b/docs/Colossal-Auto/feature/auto_checkpoint/index.html @@ -16,13 +16,13 @@ - +
- + \ No newline at end of file diff --git a/docs/Colossal-Auto/feature/device_mesh/index.html b/docs/Colossal-Auto/feature/device_mesh/index.html index 028697c6..3c67f9b2 100644 --- a/docs/Colossal-Auto/feature/device_mesh/index.html +++ b/docs/Colossal-Auto/feature/device_mesh/index.html @@ -16,13 +16,13 @@ - + - + \ No newline at end of file diff --git a/docs/Colossal-Auto/feature/layout_converting_management/index.html b/docs/Colossal-Auto/feature/layout_converting_management/index.html index 72e900a9..bd15c9e5 100644 --- a/docs/Colossal-Auto/feature/layout_converting_management/index.html +++ b/docs/Colossal-Auto/feature/layout_converting_management/index.html @@ -16,13 +16,13 @@ - +

layout_converting_management

When a tensor is required to have different sharding specs in upstream and downstream operators, we need to perform layout conversion processing, which can also be called redistribution. There are currently two mainstream methods, enumeration conversion, and dimension-by-dimension conversion. enumeration conversion is to enumerate all possible situations, and then find the corresponding conversion scheme in the table when conversion is required. However, it has a big problem. That is, as the dimension of the device mesh increases, the scale of this problem is so inflated that it cannot be solved by enumerating tables. Dimension-by-dimension conversion is for a sharding spec of an N-D tensor, X0X1...Xn-1, sharding spec is converted from 0 to n-1 dimension by dimension, so that no matter how many dimensions the device mesh and tensor have, with only one-time Scanning, a feasible conversion operation sequence is generated, the problem is that the conversion efficiency will be very poor.

Therefore, we propose a novel algorithm, using heuristic search, to solve the conversion problem of sharding spec, which can be described as:

  1. Generate all one-step transform sharding specs from source spec
  2. In the one-step transform sharding specs, according to the similarity function, select a sharding spec with the "least difference" as the subsequent source sharding spec, and record the sharding spec in the transform path. If a sharding spec of the one-step transforms is the same as the target sharding spec, the algorithm ends.
  3. Repeat 1, 2 until the end of the algorithm
Source/target sharding spec pairsAll gatherShardAll to AllOne step transformBest sharding specTransform path
S01RRRS01RS_{01}RR, RS_{01}RS0RRS_0RR-S0RS1,S0S1RS_0RS_1, S_0S_1RS0RR,S0RS1,S0S1RS_0RR, S_0RS_1, S_0S_1RS0RRS_0RRS0RRS_0RR
S0RR,RS01RRS_0RR, RS_{01}RRRRRRRRS0S1R,S0RS1S_0S_1R, S_0RS_1RS0R,RRS0RS_0R, RRS_0RRRRRR, S0S1RS_0S_1R, S0RS1S_0RS_1, RS0RRS_0R, RRS0RRS_0RS0RRS_0RS0RRS_0RR -> RS0RRS_0R
RS0R,RS01RRRS_0R, RS_{01}RRRRRRRRRS01R,S1S0R,RS0S1RS_{01}R, S_1S_0R, RS_0S_1S0RR,RRS0S_0RR, RRS_0RRRRRR, RS01RRS_{01}R, S1S0RS_1S_0R, RS0S1RS_0S_1, S0RRS_0RR, RRS0RRS_0RS01RRS_{01}RS0RRS_0RR -> RS0RRS_0R -> RS01RRS_{01}R
- + \ No newline at end of file diff --git a/docs/Colossal-Auto/feature/tracer/index.html b/docs/Colossal-Auto/feature/tracer/index.html index b7bff36a..b6ed246d 100644 --- a/docs/Colossal-Auto/feature/tracer/index.html +++ b/docs/Colossal-Auto/feature/tracer/index.html @@ -16,13 +16,13 @@ - + - + \ No newline at end of file diff --git a/docs/Colossal-Auto/get_started/installation/index.html b/docs/Colossal-Auto/get_started/installation/index.html index 2bf3be9d..b98de7d4 100644 --- a/docs/Colossal-Auto/get_started/installation/index.html +++ b/docs/Colossal-Auto/get_started/installation/index.html @@ -16,13 +16,13 @@ - +

Setup

Announcement

Our auto-parallel feature is a alpha version. It is still under development. We will keep updating it and make it more stable. If you encounter any problem, please feel free to raise an issue.

Requirements

We need some extra dependencies to support auto-parallel. Please install them before using auto-parallel.

Install PyTorch

We only support PyTorch 1.12 now, other versions are not tested. We will support more versions in the future.

#conda
conda install pytorch==1.12.0 torchvision==0.13.0 torchaudio==0.12.0 cudatoolkit=11.3 -c pytorch
#pip
pip install torch==1.12.0+cu113 torchvision==0.13.0+cu113 torchaudio==0.12.0 --extra-index-url https://download.pytorch.org/whl/cu113

Install pulp and coin-or-cbc

pip install pulp
conda install -c conda-forge coin-or-cbc
- + \ No newline at end of file diff --git a/docs/Colossal-Auto/get_started/introduction/index.html b/docs/Colossal-Auto/get_started/introduction/index.html index fc6892d9..0aeda721 100644 --- a/docs/Colossal-Auto/get_started/introduction/index.html +++ b/docs/Colossal-Auto/get_started/introduction/index.html @@ -16,7 +16,7 @@ - + @@ -25,7 +25,7 @@ Colossal-AI's device-mesh uses cluster performance metrics and profiling results to estimate the time consumption of different communication operators. This helps Colossal-AI optimize communication between nodes and improve overall system efficiency. Colossal-AI's shape-consistency manager uses a greedy search algorithm to find relatively efficient ways to transform tensors between different sharding-specs, rather than simply transforming dimensions one by one. This can lead to more efficient and effective transformations. The integration of all-to-all operations in Colossal-AI increases the scalability of the system by enabling more efficient communication between nodes. This is especially useful for large-scale machine learning tasks that require the transfer of large amounts of data between nodes.

- + \ No newline at end of file diff --git a/docs/Colossal-Auto/get_started/run_demo/index.html b/docs/Colossal-Auto/get_started/run_demo/index.html index 96d5a7d7..58d1ccbf 100644 --- a/docs/Colossal-Auto/get_started/run_demo/index.html +++ b/docs/Colossal-Auto/get_started/run_demo/index.html @@ -16,7 +16,7 @@ - + @@ -24,7 +24,7 @@

Quick Demo

Colossal-Auto simplifies the process of deploying large-scale machine learning models for AI developers. Compared to other solutions that require manual configuration of complex parallel policies and model modification, Colossal-Auto only requires one line of code from the user, along with cluster information and model configurations, to enable distributed training. Quick demos showing how to use Colossal-Auto are given below.

1. Basic usage

Colossal-Auto can be used to find a hybrid SPMD parallel strategy includes data, tensor(i.e., 1D, 2D, sequential) for each operation. You can follow the GPT example. Detailed instructions can be found in its README.md.

2. Integration with activation checkpoint

Colossal-Auto's automatic search function for activation checkpointing finds the most efficient checkpoint within a given memory budget, rather than just aiming for maximum memory compression. To avoid a lengthy search process for an optimal activation checkpoint, Colossal-Auto has implemented a two-stage search process. This allows the system to find a feasible distributed training solution in a reasonable amount of time while still benefiting from activation checkpointing for memory management. The integration of activation checkpointing in Colossal-AI improves the efficiency and effectiveness of large model training. You can follow the Resnet example. Detailed instructions can be found in its README.md.

- + \ No newline at end of file diff --git a/docs/advanced_tutorials/integrate_mixture_of_experts_into_your_model/index.html b/docs/advanced_tutorials/integrate_mixture_of_experts_into_your_model/index.html index 3cadd29f..33bb9434 100644 --- a/docs/advanced_tutorials/integrate_mixture_of_experts_into_your_model/index.html +++ b/docs/advanced_tutorials/integrate_mixture_of_experts_into_your_model/index.html @@ -16,7 +16,7 @@ - + @@ -49,7 +49,7 @@ In colossalai.initialize, we will automatically create a MoeGradientHandler object to process gradients. You can find more information about the handler MoeGradientHandler in colossal directory.

The loss criterion should be wrapped by Moeloss to add auxiliary loss of MoE. Example is like this.

criterion = MoeLoss(
aux_weight=0.01,
loss_fn=nn.CrossEntropyLoss,
label_smoothing=0.1
)

Finally, just use trainer or engine in colossalai to do your training. Otherwise, you should take care of gradient by yourself.

- + \ No newline at end of file diff --git a/docs/advanced_tutorials/meet_gemini/index.html b/docs/advanced_tutorials/meet_gemini/index.html index 4c94fd35..0f51e9cb 100644 --- a/docs/advanced_tutorials/meet_gemini/index.html +++ b/docs/advanced_tutorials/meet_gemini/index.html @@ -16,14 +16,14 @@ - +

Meet Gemini:The Heterogeneous Memory Manager of Colossal-AI

Author: Jiarui Fang, Yang You

Brief

When you only have a few GPUs for large model training tasks, heterogeneous training is the most effective approach. By accommodating model data in CPU and GPU and moving the data to the computing device when necessary, it can breakthrough the GPU memory wall by using GPU and CPU memory (composed of CPU DRAM or nvme SSD memory) together at the same time. Moreover, the model scale can be further improved by combining heterogeneous training with the other parallel approaches, such as data parallel, tensor parallel and pipeline parallel . We now describe the design details of Gemini, the heterogeneous memory space manager of Colossal-AI. Its idea comes from PatrickStar, which has been adapted to Colossal-AI.

Usage

At present, Gemini supports compatibility with ZeRO parallel mode, and it is really simple to use Gemini: Inject the features of GeminiPlugin into training components with booster. More instructions of booster please refer to usage of booster.

from torchvision.models import resnet18
from colossalai.booster import Booster
from colossalai.zero import ColoInitContext
from colossalai.booster.plugin import GeminiPlugin
plugin = GeminiPlugin(placement_policy='cuda', strict_ddp_mode=True, max_norm=1.0, initial_scale=2**5)
booster = Booster(plugin=plugin)
ctx = ColoInitContext()
with ctx:
model = resnet18()
optimizer = HybridAdam(model.parameters(), lr=1e-3)
criterion = lambda x: x.mean()
model, optimizer, criterion, _, _ = booster.boost(model, optimizer, criterion)
)

Note that Gemini and parallel strategies such as tensor parallelism, data parallelism, pipeline parallelism and zero should be decoupled. However, Colossal-AI requires users to use Gemini with ZeRO. Although they are not necessarily coupled, we will improve it in the near future.

Concepts

OP(OPerator):operation of a neural network layer, such as linear, LayerNorm, etc. The operator can be a forward propagation calculation or a back-propagation calculation.

Neural networks must manage two types of training data during training. model data: consists of parameters, gradients and optimizer states, and its scale is related to the definition of model structure.

Non-model data: mainly composed of the intermediate tensor generated by the operator and the temporary variables of the operator. Non-model data changes dynamically according to the configuration of training tasks, such as batch size. Model data and non-model data compete with each other for GPU memory.

Design Details

In some solutions, the Zero-offload adopted by DeepSpeed statically divides model data between CPU and GPU memory, and their memory layout is constant for different training configurations. As shown on the left of the figure below, when the GPU memory is insufficient to meet its corresponding model data requirements, the system will crash even if there is still available memory on the CPU at that time. While Colossal-AI can complete the training by moving part of the model data to the CPU.

Comparison of the memory management of Zero-Offload and Gemini

Colossal-AI designed Gemini, just like two-stars, which manages the memory space of CPU and GPU efficiently. It can make the tensor dynamically distributed in the storage space of CPU-GPU during training, so that the model training can break through the memory wall of GPU. The memory manager consists of two parts: MemStatsCollector (MSC) and StatefulTensorMgr (STM).

We take advantage of the iterative characteristics of the deep learning network training process. We divide iterations into two stages: warmup and non-warmup. One or several iterative steps at the beginning belong to the warmup stage, and the other iterative steps belong to the non-warmup stage. In the warmup stage, we collect information for the MSC, while in the non-warmup stage, STM gets the information collected by the MSC to move the tensor, so as to minimize the CPU-GPU data movement volume.

The workflow of Gemini during warmup and non-warmup phase

StatefulTensorMgr

STM manages the information of all model data tensors. In the process of model construction, Colossal-AI registers all model data tensors with STM. The memory manager marks each tensor with state information. The state set includes three types: HOLD, COMPUTE and FREE. The functions of STM are as follows:

Query memory usage:by traversing the locations of all tensors in heterogeneous space, obtain the memory occupation of CPU and GPU by model data.

Transition tensor state: it marks the tensor as COMPUTE state before each model data tensor participates in the operator calculation, and as HOLD state after calculation. The FREE state marked if the tensor is no longer in use.

Adjust tensor position:tensor manager ensures that the tensor in COMPUTE state is placed on the computing device. If the storage space of the computing device is insufficient, it is necessary to move some tensors in HOLD state to other devices for storage. Tensor eviction strategy requires information from MSC, which will be introduced later.

MemStatsCollector

In the warmup stage, the memory information statistician monitors the memory usage of model data and non-model data in CPU and GPU for reference in the non-warmup stage. We can obtain the memory usage of model data at a certain time by querying STM. However, the memory usage of non-model data is difficult to obtain. Owing to the life cycle of non-model data not being managed by users, the existing deep learning framework does not expose the tracking interface of non-model data to users. MSC obtains the usage of CPU and GPU memory by non-model in the warmup stage through sampling. The specific methods are as follows:

We trigger the memory sampling operation at the beginning and end of the operator. We call this time point sampling moment, and the time between the two sampling moments is called period. The calculation process is a black box. Due to the possible allocation of temporary buffer, the memory usage is very complex. However, we can accurately obtain the maximum memory usage of the system during the period. The use of non-model data can be obtained by the maximum memory use of the system between two statistical moments-model memory use.

How do we design the sampling time. Before we choose model data layout adjust of preOp. As shown in the figure below. We sample the system memory used of the previous period and the model data memory used of the next period. The parallel strategy will cause obstacles to the work of MSC. As shown in the figure, for example, for ZeRO or Tensor Parallel, because gathering model data is required before OP calculation, it will bring additional memory requirements. Therefore, we require to sample the system memory before the model data changes, so that the MSC will capture the model change memory of preOp within a period. For example, in period 2-3, we consider the memory changes brought by tensor gather and shard.

Although the sampling time can be placed in other locations, such as excluding the new information of the change of the gather buffer, it will cause trouble. There are differences in the implementation of Op in different parallel modes. For example, for Linear Op, gather buffer in Tensor Parallel is allocated in Op. For ZeRO, the allocation of gather buffer is in PreOp. Sampling at the beginning of PreOp helps to unify the two situations.

workflow

Tensor Eviction Strategy

The important duty of MSC is to adjust the tensor layout position. For example, at S2 in the figure above, we reduce the model data on the device, and meet the peak memory requirement calculated in period 2-3.

In the warmup stage, since we haven't finished a complete iteration yet, we don't know actual memory occupation. At this time, we limit the upper bound of memory usage of the model data. For example, only 30% of the GPU memory can be used. This ensures that we can successfully complete the warmup state.

In the non-warmup stage, we need to use the memory information of non-model data collected in the warm-up stage to reserve the peak memory required by the computing device for the next Period, which requires us to move some model tensors. In order to avoid frequent replacement of the same tensor in and out of the CPU-GPU, causing a phenomenon similar to cache thrashing. Using the iterative characteristics of DNN training, we design the OPT cache swap out strategy. Specifically, in the warmup stage, we record the sampling time required by each tensor computing device. If we need to expel some HOLD tensors, we will choose the latest tensor needed on this device as the victim.

- + \ No newline at end of file diff --git a/docs/advanced_tutorials/opt_service/index.html b/docs/advanced_tutorials/opt_service/index.html index 906a280d..94384d47 100644 --- a/docs/advanced_tutorials/opt_service/index.html +++ b/docs/advanced_tutorials/opt_service/index.html @@ -16,7 +16,7 @@ - + @@ -26,7 +26,7 @@ The entrance of the service is a bash script server.sh. The config of the service is at opt_config.py, which defines the model type, the checkpoint file path, the parallel strategy, and http settings. You can adapt it for your own case. For example, set the model class as opt_125M and set the correct checkpoint path as follows.

model_class = opt_125M
checkpoint = 'your_file_path'

Set the tensor parallelism degree the same as your gpu number.

tp_init_size = #gpu

Now, we can launch a service using docker. You can map the path of the checkpoint and directory containing configs to local disk path /model_checkpoint and /config.

export CHECKPOINT_DIR="your_opt_checkpoint_path"
# the ${CONFIG_DIR} must contain a server.sh file as the entry of service
export CONFIG_DIR="config_file_path"

docker run --gpus all --rm -it -p 8020:8020 -v ${CHECKPOINT_DIR}:/model_checkpoint -v ${CONFIG_DIR}:/config --ipc=host energonai:latest

Then open https://[IP-ADDRESS]:8020/docs# in your browser to try out!

Advance Features Usage:

  1. Batching Optimization

To use our advanced batching technique to collect multiple queries in batches to serve, you can set the executor_max_batch_size as the max batch size. Note, that only the decoder task with the same top_k, top_p and temperature can be batched together.

executor_max_batch_size = 16

All queries are submitted to a FIFO queue. All consecutive queries whose number of decoding steps is less than or equal to that of the head of the queue can be batched together. Left padding is applied to ensure correctness. executor_max_batch_size should not be too large. This ensures batching won't increase latency. For opt-30b, executor_max_batch_size=16 may be a good choice, while for opt-175b, executor_max_batch_size=4 may be better.

  1. Cache Optimization.

You can cache several recently served query results for each independent serving process. Set the cache_size and cache_list_size in config.py. The cache size is the number of queries cached. The cache_list_size is the number of results stored for each query. And a random cached result will be returned. When the cache is full, LRU is applied to evict cached queries. cache_size=0means no cache is applied.

cache_size = 50
cache_list_size = 2
- + \ No newline at end of file diff --git a/docs/advanced_tutorials/train_gpt_using_hybrid_parallelism/index.html b/docs/advanced_tutorials/train_gpt_using_hybrid_parallelism/index.html index 34dc8cb8..0c41fd53 100644 --- a/docs/advanced_tutorials/train_gpt_using_hybrid_parallelism/index.html +++ b/docs/advanced_tutorials/train_gpt_using_hybrid_parallelism/index.html @@ -16,14 +16,14 @@ - +

Fine-tune GPT-2 Using Hybrid Parallelism

Author: Hongxin Liu, Yongbin Li, Mingyan Jiang

Prerequisite:

Example Code

Related Paper

Introduction

In the previous tutorial, we introduce how to train ViT with pipeline. In this tutorial, you will learn a more complex scenario -- fine-tune GPT-2 with hybrid parallelism. In this case, GPT-2 is so large that CPU memory cannot fit it as well. Therefore, you must split the model.

Table of content

In this tutorial we will cover:

  1. Initialize the hybrid parallelism plugin.
  2. Defining the Training Components of the GPT-2 Model
  3. Boost the GPT-2 Model with HybridParallelPlugin
  4. Training GPT-2 using hybrid parallelism

Import libraries

from typing import Callable, List, Union
import torch
import torch.distributed as dist
import torch.nn as nn
from torch.optim import Optimizer
from torch.optim.lr_scheduler import _LRScheduler as LRScheduler
from tqdm import tqdm
from transformers import AutoConfig, GPT2ForSequenceClassification, get_linear_schedule_with_warmup
from transformers import AutoTokenizer

import colossalai
from colossalai.booster import Booster
from colossalai.booster.plugin import GeminiPlugin, HybridParallelPlugin, LowLevelZeroPlugin, TorchDDPPlugin
from colossalai.cluster import DistCoordinator
from colossalai.nn.optimizer import HybridAdam
from colossalai.utils import get_current_device

Define Plugin

Create a HybridParallelPlugin object and specify the desired parallelism strategies to be used. In this example, both pipeline parallelism and ZeRO-1 are used simultaneously.

plugin = HybridParallelPlugin(
tp_size=1,
pp_size=2,
num_microbatches=None,
microbatch_size=1,
enable_all_optimization=True,
zero_stage=1,
precision="fp16",
initial_scale=1,
)

Define GPT-2's Training Components

Before using hybrid parallelism, you need to define the components used for training.

Define hyperparameters

NUM_EPOCHS = 3
BATCH_SIZE = 32
LEARNING_RATE = 2.4e-5
WEIGHT_DECAY = 0.01
WARMUP_FRACTION = 0.1

we create a distributed environment.

# Launch ColossalAI
colossalai.launch_from_torch(config={}, seed=42)
coordinator = DistCoordinator()

prepare the dataset. You can use plugin.prepare_dataloader to generate a dataloader or customize your own dataloader.

def tokenize_batch(batch, tokenizer: Optional[AutoTokenizer] = None, max_length: int = 2048):
texts = [sample["sentence1"] + sample["sentence2"] for sample in batch]
data = tokenizer(texts, return_tensors="pt", padding="max_length", truncation=True, max_length=max_length)
data = {k: v.cuda() for k, v in data.items()}
data["labels"] = data["input_ids"].clone()
return data

tokenizer = AutoTokenizer.from_pretrained("gpt2")
dataset = datasets.load_dataset("glue", "mrpc")
train_dataloader = plugin.prepare_dataloader(
dataset["train"],
batch_size=BATCH_SIZE,
shuffle=True,
drop_last=True,
collate_fn=partial(tokenize_batch, tokenizer=tokenizer, max_length=512),
)

Prepare gpt-2 model

cfg = AutoConfig.from_pretrained("gpt2", num_labels=2)
model = GPT2ForSequenceClassification.from_pretrained("gpt2", config=cfg).cuda()

prepare optimizer

lr = LEARNING_RATE * coordinator.world_size
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": WEIGHT_DECAY,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
optimizer = HybridAdam(optimizer_grouped_parameters, lr=lr, eps=1e-8)

Prepare the lr_scheduler and criterion, and it's important to note that when hybrid parallelism with pipeline parallelism is used, a criterion function should also be defined. This function should take the input and output of the model's forward pass as parameters and return the loss.

# lr scheduler
total_steps = len(train_dataloader) * NUM_EPOCHS
num_warmup_steps = int(WARMUP_FRACTION * total_steps)
lr_scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=num_warmup_steps,
num_training_steps=total_steps,
)

def _criterion(outputs, inputs):
return outputs.loss

Boost the GPT-2 Model

Define a booster with HybridParallelPlugin. Based on the configured plugin parameters, the booster will inject one or more parallel strategies into the model. In this example, pipeline parallelism, zero1, and mixed-precision training optimizations are utilized.

booster = Booster(plugin=plugin)

Boost these components with the defined booster.

model, optimizer, _criterion, _, lr_scheduler = booster.boost(
model, optimizer, criterion=_criterion, lr_scheduler=lr_scheduler
)

Training GPT-2 using hybrid parallelism

In the previous tutorial, We've explained how to inject various parallelism features into the model and its training components using the Booster and HybridParallelPlugin. Now we can start model training. Define a training function. When pipeline parallelism is used, you need to call booster.execute_pipeline to schedule the stages of model training.

def train_epoch(
epoch: int,
model: nn.Module,
optimizer: Optimizer,
_criterion: Callable,
lr_scheduler: LRScheduler,
train_dataloader: DataLoader,
booster: Booster,
coordinator: DistCoordinator,
):
use_pipeline = isinstance(booster.plugin, HybridParallelPlugin) and booster.plugin.pp_size > 1
is_pp_last_stage = use_pipeline and booster.plugin.stage_manager.is_last_stage()
print_flag = (not use_pipeline and coordinator.is_master()) or (use_pipeline and is_pp_last_stage)
total_step = len(train_dataloader)

model.train()
optimizer.zero_grad()
train_dataloader_iter = iter(train_dataloader)
with tqdm(
range(total_step),
desc=f"Epoch [{epoch + 1}/{NUM_EPOCHS}]",
disable=not print_flag,
) as pbar:
# Forward pass
for _ in pbar:
if use_pipeline:
outputs = booster.execute_pipeline(
train_dataloader_iter, model, _criterion, optimizer, return_loss=True, return_outputs=True
)
# Backward and optimize
if is_pp_last_stage:
loss = outputs["loss"]
pbar.set_postfix({"loss": loss.item()})
else:
data = next(train_dataloader_iter)
data = move_to_cuda(data)
outputs = model(**data)
loss = _criterion(outputs, None)
# Backward
booster.backward(loss, optimizer)
pbar.set_postfix({"loss": loss.item()})

optimizer.step()
optimizer.zero_grad()
lr_scheduler.step()

Training the gpt-2 model

for epoch in range(NUM_EPOCHS):
train_epoch(epoch, model, optimizer, _criterion, lr_scheduler, train_dataloader, booster, coordinator)
- + \ No newline at end of file diff --git a/docs/advanced_tutorials/train_vit_with_hybrid_parallelism/index.html b/docs/advanced_tutorials/train_vit_with_hybrid_parallelism/index.html index 2d77e195..2a9c29a8 100644 --- a/docs/advanced_tutorials/train_vit_with_hybrid_parallelism/index.html +++ b/docs/advanced_tutorials/train_vit_with_hybrid_parallelism/index.html @@ -16,7 +16,7 @@ - + @@ -56,7 +56,7 @@ communication_dtype (torch data type, optional): The data type for communication when using ZeRO. If not specified, the data type of the parameters will be used. Default is None. overlap_communication (boolean, optional): Whether to overlap communication and computation when using ZeRO. Default is True. Example of a plugin for ZERO1.

plugin = HybridParallelPlugin(
tp_size=1,
pp_size=1,
zero_stage=1,
cpu_offload=True,
precision="fp16",
initial_scale=1,
)

Hybrid Parallelism

You can refer to the above-mentioned strategies to customize an appropriate hybrid parallelism strategy. And use this plugin to define a booster.

plugin = HybridParallelPlugin(
tp_size=TP_SIZE,
pp_size=PP_SIZE,
num_microbatches=None,
microbatch_size=1,
enable_all_optimization=True,
precision="fp16",
initial_scale=1,
)
booster = Booster(plugin=plugin)

Next, we use booster.boost to inject the features encapsulated by the plugin into the model training components.

model, optimizer, _criterion, train_dataloader, lr_scheduler = booster.boost(
model=model, optimizer=optimizer, criterion=criterion, dataloader=train_dataloader, lr_scheduler=lr_scheduler
)

Train ViT using hybrid parallelism.

Finally, we can use the hybrid parallelism strategy to train the model. Let's first define a training function that describes the training process. It's important to note that if the pipeline parallelism strategy is used, you should call booster.execute_pipeline to perform the model training. This function will invoke the scheduler to manage the model's forward and backward operations.

def run_forward_backward(
model: nn.Module,
optimizer: Optimizer,
criterion: Callable[[Any, Any], torch.Tensor],
data_iter: Iterator,
booster: Booster,
):
if optimizer is not None:
optimizer.zero_grad()
if isinstance(booster.plugin, HybridParallelPlugin) and booster.plugin.pp_size > 1:
# run pipeline forward backward when enabling pp in hybrid parallel plugin
output_dict = booster.execute_pipeline(
data_iter, model, criterion, optimizer, return_loss=True, return_outputs=True
)
loss, outputs = output_dict["loss"], output_dict["outputs"]
else:
batch = next(data_iter)
batch = move_to_cuda(batch, torch.cuda.current_device())
outputs = model(**batch)
loss = criterion(outputs, None)
if optimizer is not None:
booster.backward(loss, optimizer)

def train_epoch(
epoch: int,
model: nn.Module,
optimizer: Optimizer,
criterion: Callable[[Any, Any], torch.Tensor],
lr_scheduler: LRScheduler,
dataloader: DataLoader,
booster: Booster,
coordinator: DistCoordinator,
):
torch.cuda.synchronize()

num_steps = len(dataloader)
data_iter = iter(dataloader)
enable_pbar = coordinator.is_master()
if isinstance(booster.plugin, HybridParallelPlugin) and booster.plugin.pp_size > 1:
# when using pp, only the last stage of master pipeline (dp_rank and tp_rank are both zero) shows pbar
tp_rank = dist.get_rank(booster.plugin.tp_group)
dp_rank = dist.get_rank(booster.plugin.dp_group)
enable_pbar = tp_rank == 0 and dp_rank == 0 and booster.plugin.stage_manager.is_last_stage()
model.train()

with tqdm(range(num_steps), desc=f"Epoch [{epoch + 1}]", disable=not enable_pbar) as pbar:
for _ in pbar:
loss, _ = run_forward_backward(model, optimizer, criterion, data_iter, booster)
optimizer.step()
lr_scheduler.step()

# Print batch loss
if enable_pbar:
pbar.set_postfix({"loss": loss.item()})

Start training the model.

for epoch in range(NUM_EPOCH):
train_epoch(epoch, model, optimizer, criterion, lr_scheduler, train_dataloader, booster, coordinator)
- + \ No newline at end of file diff --git a/docs/basics/booster_api/index.html b/docs/basics/booster_api/index.html index 484143f3..ae2c3ae9 100644 --- a/docs/basics/booster_api/index.html +++ b/docs/basics/booster_api/index.html @@ -16,7 +16,7 @@ - + @@ -128,7 +128,7 @@ names to compose the keys in state_dict. Defaults to None.
  • size_per_shard (int, optional) -- Maximum size of checkpoint shard file in MB. This is useful only when shard=True. Defaults to 1024.
  • Description

    Save optimizer to checkpoint.

    Usage

    In a typical workflow, you should launch distributed environment at the beginning of training script and create objects needed (such as models, optimizers, loss function, data loaders etc.) firstly, then call booster.boost to inject features into these objects, After that, you can use our booster APIs and these returned objects to continue the rest of your training processes.

    A pseudo-code example is like below:

    import torch
    from torch.optim import SGD
    from torchvision.models import resnet18

    import colossalai
    from colossalai.booster import Booster
    from colossalai.booster.plugin import TorchDDPPlugin

    def train():
    # launch colossalai
    colossalai.launch(config=dict(), rank=rank, world_size=world_size, port=port, host='localhost')

    # create plugin and objects for training
    plugin = TorchDDPPlugin()
    booster = Booster(plugin=plugin)
    model = resnet18()
    criterion = lambda x: x.mean()
    optimizer = SGD((model.parameters()), lr=0.001)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.1)

    # use booster.boost to wrap the training objects
    model, optimizer, criterion, _, scheduler = booster.boost(model, optimizer, criterion, lr_scheduler=scheduler)

    # do training as normal, except that the backward should be called by booster
    x = torch.randn(4, 3, 224, 224)
    x = x.to('cuda')
    output = model(x)
    loss = criterion(output)
    booster.backward(loss, optimizer)
    optimizer.clip_grad_by_norm(1.0)
    optimizer.step()
    scheduler.step()
    optimizer.zero_grad()

    # checkpointing using booster api
    save_path = "./model"
    booster.save_model(model, save_path, shard=True, size_per_shard=10, use_safetensors=True)

    new_model = resnet18()
    booster.load_model(new_model, save_path)

    For more design details please see this page.

    - + \ No newline at end of file diff --git a/docs/basics/booster_checkpoint/index.html b/docs/basics/booster_checkpoint/index.html index 76b48ac5..cf0fdb0a 100644 --- a/docs/basics/booster_checkpoint/index.html +++ b/docs/basics/booster_checkpoint/index.html @@ -16,7 +16,7 @@ - + @@ -63,7 +63,7 @@
  • lr_scheduler (LRScheduler) -- A lr scheduler boosted by Booster.
  • checkpoint (str) -- Path to the checkpoint. It must be a local file path.
  • Description
    Load lr scheduler from checkpoint.

    LR scheduler must be boosted by colossalai.booster.Booster before loading. checkpoint is the local path to checkpoint file.

    Checkpoint design

    More details about checkpoint design can be found in our discussion A Unified Checkpoint System Design.

    - + \ No newline at end of file diff --git a/docs/basics/booster_plugins/index.html b/docs/basics/booster_plugins/index.html index 2342a416..59f0a808 100644 --- a/docs/basics/booster_plugins/index.html +++ b/docs/basics/booster_plugins/index.html @@ -16,7 +16,7 @@ - + @@ -111,7 +111,7 @@ train_dataloader = plugin.prepare_dataloader(train_dataset, batch_size=8) booster = Booster(plugin=plugin) model, optimizer, train_dataloader, criterion = booster.boost(model, optimizer, train_dataloader, criterion) -

    Hybrid Parallel Plugin

    This plugin implements the combination of various parallel training strategies and optimization tools. The features of HybridParallelPlugin can be generally divided into four parts:

    1. Shardformer: This plugin provides an entrance to Shardformer, which controls model sharding under tensor parallel and pipeline parallel setting. Shardformer also overloads the logic of model's forward/backward process to ensure the smooth working of tp/pp. Also, optimization tools including fused normalization, flash attention (xformers), JIT and sequence parallel are injected into the overloaded forward/backward method by Shardformer. More details can be found in chapter Shardformer Doc. The diagram below shows the features supported by shardformer together with hybrid parallel plugin.
    1. Mixed Precision Training: Support for fp16/bf16 mixed precision training. More details about its arguments configuration can be found in Mixed Precision Training Doc.

    2. Torch DDP: This plugin will automatically adopt Pytorch DDP as data parallel strategy when pipeline parallel and Zero is not used. More details about its arguments configuration can be found in Pytorch DDP Docs.

    3. Zero: This plugin can adopt Zero 1/2 as data parallel strategy through setting the zero_stage argument as 1 or 2 when initializing plugin. Zero 1 is compatible with pipeline parallel strategy, while Zero 2 is not. More details about its argument configuration can be found in Low Level Zero Plugin.

    ⚠ When using this plugin, only the subset of Huggingface transformers supported by Shardformer are compatible with tensor parallel, pipeline parallel and optimization tools. Mainstream transformers such as Llama 1, Llama 2, OPT, Bloom, Bert and GPT2 etc. are all supported by Shardformer.

    class
     

    colossalai.booster.plugin.HybridParallelPlugin

    (tp_size: int, pp_size: int, precision: str = 'fp16', zero_stage: int = 0, enable_all_optimization: bool = False, enable_fused_normalization: bool = False, enable_flash_attention: bool = False, enable_jit_fused: bool = False, enable_sequence_parallelism: bool = False, enable_sequence_overlap: bool = False, num_microbatches: typing.Optional[int] = None, microbatch_size: typing.Optional[int] = None, initial_scale: float = 65536, min_scale: float = 1, growth_factor: float = 2, backoff_factor: float = 0.5, growth_interval: int = 1000, hysteresis: int = 2, max_scale: float = 4294967296, max_norm: float = 0, broadcast_buffers: bool = True, ddp_bucket_cap_mb: int = 25, find_unused_parameters: bool = False, check_reduction: bool = False, gradient_as_bucket_view: bool = False, static_graph: bool = False, zero_bucket_size_in_m: int = 12, cpu_offload: bool = False, communication_dtype: typing.Optional[torch.dtype] = None, overlap_communication: bool = True, custom_policy: Policy = None, pp_style: str = '1f1b', num_model_chunks: int = 1)
    Parameters
      +

    Hybrid Parallel Plugin

    This plugin implements the combination of various parallel training strategies and optimization tools. The features of HybridParallelPlugin can be generally divided into four parts:

    1. Shardformer: This plugin provides an entrance to Shardformer, which controls model sharding under tensor parallel and pipeline parallel setting. Shardformer also overloads the logic of model's forward/backward process to ensure the smooth working of tp/pp. Also, optimization tools including fused normalization, flash attention (xformers), JIT and sequence parallel are injected into the overloaded forward/backward method by Shardformer. More details can be found in chapter Shardformer Doc. The diagram below shows the features supported by shardformer together with hybrid parallel plugin.
    1. Mixed Precision Training: Support for fp16/bf16 mixed precision training. More details about its arguments configuration can be found in Mixed Precision Training Doc.

    2. Torch DDP: This plugin will automatically adopt Pytorch DDP as data parallel strategy when pipeline parallel and Zero is not used. More details about its arguments configuration can be found in Pytorch DDP Docs.

    3. Zero: This plugin can adopt Zero 1/2 as data parallel strategy through setting the zero_stage argument as 1 or 2 when initializing plugin. Zero 1 is compatible with pipeline parallel strategy, while Zero 2 is not. More details about its argument configuration can be found in Low Level Zero Plugin.

    ⚠ When using this plugin, only the subset of Huggingface transformers supported by Shardformer are compatible with tensor parallel, pipeline parallel and optimization tools. Mainstream transformers such as Llama 1, Llama 2, OPT, Bloom, Bert and GPT2 etc. are all supported by Shardformer.

    class
     

    colossalai.booster.plugin.HybridParallelPlugin

    (tp_size: int, pp_size: int, precision: str = 'fp16', zero_stage: int = 0, enable_all_optimization: bool = False, enable_fused_normalization: bool = False, enable_flash_attention: bool = False, enable_jit_fused: bool = False, enable_sequence_parallelism: bool = False, enable_sequence_overlap: bool = False, num_microbatches: typing.Optional[int] = None, microbatch_size: typing.Optional[int] = None, initial_scale: float = 65536, min_scale: float = 1, growth_factor: float = 2, backoff_factor: float = 0.5, growth_interval: int = 1000, hysteresis: int = 2, max_scale: float = 4294967296, max_norm: float = 0, broadcast_buffers: bool = True, ddp_bucket_cap_mb: int = 25, find_unused_parameters: bool = False, check_reduction: bool = False, gradient_as_bucket_view: bool = False, static_graph: bool = False, zero_bucket_size_in_m: int = 12, cpu_offload: bool = False, communication_dtype: typing.Optional[torch.dtype] = None, overlap_communication: bool = True, custom_policy: Policy = None, pp_style: str = '1f1b', num_model_chunks: int = 1, enable_metadata_cache: bool = True)
    Parameters
    • tp_size (int) -- The size of tensor parallelism. Tensor parallelism will not be used when tp_size is set to 1.
    • pp_size (int) -- The number of pipeline stages in pipeline parallelism. Pipeline parallelism will not be used when pp_size is set to 1.
    • precision (str, optional) -- Specifies the precision of parameters during training. @@ -152,6 +152,7 @@
    • custom_policy (Policy, optional) -- Custom policy for Shardformer. Defaults to None.
    • pp_style (str, optional) -- The style for pipeline parallelism. Defaults to '1f1b'.
    • num_model_chunks (int, optional) -- The number of model chunks for interleaved pipeline parallelism. Defaults to 1.
    • +
    • enable_metadata_cache (bool, optional) -- Whether to enable metadata cache for pipeline parallelism. Defaults to True.
    Description

    Plugin for Hybrid Parallel Training. Tensor parallel, pipeline parallel and data parallel(DDP/ZeRO) can be picked and combined in this plugin. The size of tp and pp should be passed in by user, then the size of dp is automatically calculated from dp_size = world_size / (tp_size * pp_size).

    Example
    from colossalai.booster import Booster
    @@ -163,7 +164,7 @@
     train_dataloader = plugin.prepare_dataloader(train_dataset, batch_size=8)
     booster = Booster(plugin=plugin)
     model, optimizer, criterion, train_dataloader, _ = booster.boost(model, optimizer, criterion, train_dataloader)
    -
    function
     

    prepare_dataloader

    (dataset, batch_size, shuffle = False, seed = 1024, drop_last = False, pin_memory = False, num_workers = 0, **kwargs)
    Parameters
      +
    function
     

    prepare_dataloader

    (dataset, batch_size, shuffle = False, seed = 1024, drop_last = False, pin_memory = False, num_workers = 0, **kwargs)
    Parameters
    • dataset (torch.utils.data.Dataset) -- The dataset to be loaded.
    • shuffle (bool, optional) -- Whether to shuffle the dataset. Defaults to False.
    • seed (int, optional) -- Random worker seed for sampling, defaults to 1024. @@ -204,7 +205,7 @@ booster = Booster(plugin=plugin) model, optimizer, train_dataloader, criterion = booster.boost(model, optimizer, train_dataloader, criterion)
    - + \ No newline at end of file diff --git a/docs/basics/command_line_tool/index.html b/docs/basics/command_line_tool/index.html index 741212ed..546a6d60 100644 --- a/docs/basics/command_line_tool/index.html +++ b/docs/basics/command_line_tool/index.html @@ -16,7 +16,7 @@ - + @@ -25,7 +25,7 @@ The current command line tools support the following features.

    • verify Colossal-AI build
    • launch distributed jobs
    • tensor parallel micro-benchmarking

    Check Installation

    To verify whether your Colossal-AI is built correctly, you can use the command colossalai check -i. This command will inform you information regarding the version compatibility and cuda extension.

    Check Installation Demo

    Launcher

    To launch distributed jobs on single or multiple nodes, the command colossalai run can be used for process launching. You may refer to Launch Colossal-AI for more details.

    - + \ No newline at end of file diff --git a/docs/basics/launch_colossalai/index.html b/docs/basics/launch_colossalai/index.html index f2eb0813..a7533098 100644 --- a/docs/basics/launch_colossalai/index.html +++ b/docs/basics/launch_colossalai/index.html @@ -16,7 +16,7 @@ - + @@ -60,7 +60,7 @@ launch_from_openmpi will automatically read the local rank, global rank and world size from the environment variables OMPI_COMM_WORLD_LOCAL_RANK, MPI_COMM_WORLD_RANK and OMPI_COMM_WORLD_SIZE respectively and use them to start the distributed backend.

    Do this in your train.py:

    colossalai.launch_from_openmpi(
    config=<CONFIG>,
    host=args.host,
    port=args.port
    )

    A sample command to launch multiple processes with OpenMPI would be:

    mpirun --hostfile <my_hostfile> -np <num_process> python train.py --host <node name or ip> --port 29500
    • --hostfile: use this option to specify a list of hosts on which to run
    • --np: set the number of processes (GPUs) to launch in total. For example, if --np 4, 4 python processes will be initialized to run train.py.
    - + \ No newline at end of file diff --git a/docs/concepts/colossalai_overview/index.html b/docs/concepts/colossalai_overview/index.html index 8f8819e3..75869117 100644 --- a/docs/concepts/colossalai_overview/index.html +++ b/docs/concepts/colossalai_overview/index.html @@ -16,13 +16,13 @@ - +

    Colossal-AI Overview

    Author: Shenggui Li, Siqi Mai

    About Colossal-AI

    With the development of deep learning model size, it is important to shift to a new training paradigm. The traditional training method with no parallelism and optimization became a thing of the past and new training methods are the key to make training large-scale models efficient and cost-effective.

    Colossal-AI is designed to be a unified system to provide an integrated set of training skills and utilities to the user. You can find the common training utilities such as mixed precision training and gradient accumulation. Besides, we provide an array of parallelism including data, tensor and pipeline parallelism. We optimize tensor parallelism with different multi-dimensional distributed matrix-matrix multiplication algorithm. We also provided different pipeline parallelism methods to allow the user to scale their model across nodes efficiently. More advanced features such as offloading can be found in this tutorial documentation in detail as well.

    General Usage

    We aim to make Colossal-AI easy to use and non-intrusive to user code. There is a simple general workflow if you want to use Colossal-AI.

    Workflow
    1. Prepare a configuration file where specifies the features you want to use and your parameters.
    2. Initialize distributed backend with colossalai.launch
    3. Inject the training features into your training components (e.g. model, optimizer) with colossalai.booster.
    4. Run training and testing

    We will cover the whole workflow in the basic tutorials section.

    Future Development

    The Colossal-AI system will be expanded to include more training skills, these new developments may include but are not limited to:

    1. optimization of distributed operations
    2. optimization of training on heterogenous system
    3. implementation of training utilities to reduce model size and speed up training while preserving model performance
    4. expansion of existing parallelism methods

    We welcome ideas and contribution from the community and you can post your idea for future development in our forum.

    - + \ No newline at end of file diff --git a/docs/concepts/distributed_training/index.html b/docs/concepts/distributed_training/index.html index ece415c4..d47f2288 100644 --- a/docs/concepts/distributed_training/index.html +++ b/docs/concepts/distributed_training/index.html @@ -16,7 +16,7 @@ - + @@ -57,7 +57,7 @@ The default process group will then be created. The default process group has a world size of 8 and details are as follows:

    process IDrankNode indexGPU index
    0000
    1101
    2202
    3303
    4410
    5511
    6612
    7713

    We can also create a new process group. This new process group can contain any subset of the processes. For example, we can create one containing only even-number processes, and the details of this new group will be:

    process IDrankNode indexGPU index
    0000
    2102
    4210
    6312

    Please note that rank is relative to the process group and one process can have a different rank in different process groups. The max rank is always world size of the process group - 1.

    In the process group, the processes can communicate in two ways:

    1. peer-to-peer: one process send data to another process
    2. collective: a group of process perform operations such as scatter, gather, all-reduce, broadcast together.
    Collective communication, source: PyTorch distributed tutorial
    - + \ No newline at end of file diff --git a/docs/concepts/paradigms_of_parallelism/index.html b/docs/concepts/paradigms_of_parallelism/index.html index 9eddcb76..ebf3672c 100644 --- a/docs/concepts/paradigms_of_parallelism/index.html +++ b/docs/concepts/paradigms_of_parallelism/index.html @@ -16,7 +16,7 @@ - + @@ -54,7 +54,7 @@ typically only has 16 or 32 GB RAM. This prompts the community to think why CPU memory is not utilized for distributed training.

    Recent advances rely on CPU and even NVMe disk to train large models. The main idea is to offload tensors back to CPU memory or NVMe disk when they are not used. By using the heterogeneous system architecture, it is possible to accommodate a huge model on a single machine.

    Heterogenous system illustration

    Related paper:

    - + \ No newline at end of file diff --git a/docs/features/1D_tensor_parallel/index.html b/docs/features/1D_tensor_parallel/index.html index 13fe8ed3..c01ae283 100644 --- a/docs/features/1D_tensor_parallel/index.html +++ b/docs/features/1D_tensor_parallel/index.html @@ -16,7 +16,7 @@ - + @@ -26,7 +26,7 @@ To calculate

    Z=[Y1 Y2][B1B2]Z = [Y_1 ~ Y_2] \left[\begin{matrix} B_1 \\ B_2 \end{matrix} \right]

    we first calculate YiBiY_iB_i on each processor, then use an all-reduce to aggregate the results as Z=Y1B1+Y2B2Z=Y_1B_1+Y_2B_2.

    We also need to note that in the backward pass, the column-parallel linear layer needs to aggregate the gradients of the input tensor XX, because on each processor ii we only have Xi˙=Yi˙AiT\dot{X_i}=\dot{Y_i}A_i^T. Thus, we apply an all-reduce across the processors to get X˙=Y˙AT=Y1˙A1T+Y2˙A2T\dot{X}=\dot{Y}A^T=\dot{Y_1}A_1^T+\dot{Y_2}A_2^T.

    Efficiency

    Given PP processors, we present the theoretical computation and memory cost, as well as the communication cost based on the ring algorithm in both the forward and backward pass of 1D tensor parallelism.

    ComputationMemory (parameters)Memory (activations)Communication (bandwidth)Communication (latency)
    O(1/P)O(1/P)O(1/P)O(1/P)O(1)O(1)O(2(P1)/P)O(2(P-1)/P)O(2(P1))O(2(P-1))

    Usage

    1D tensor parallelism is implemented by Shardformer feature in the newest version of ColossalAI. For more details about ideas and usages of Shardformer, please refer to Shardformer Doc.

    - + \ No newline at end of file diff --git a/docs/features/2D_tensor_parallel/index.html b/docs/features/2D_tensor_parallel/index.html index e89e3e60..e666a904 100644 --- a/docs/features/2D_tensor_parallel/index.html +++ b/docs/features/2D_tensor_parallel/index.html @@ -16,7 +16,7 @@ - + @@ -25,7 +25,7 @@ To evenly distribute the computation and memory load, an efficient 2D tensor parallelism algorithm was introduced based on SUMMA (Scalable Universal Matrix Multiplication Algorithm).

    Let's still take a linear layer Y=XAY = XA as an example. Given P=q×qP=q\times q processors (necessary condition), e.g. q=2q=2, we split both the input XX and weight AA into

    [X00X01X10X11] and [A00A01A10A11].\left[\begin{matrix} X_{00} & X_{01} \\ X_{10} & X_{11} \end{matrix} \right] \text{~and~} \left[\begin{matrix} A_{00} & A_{01} \\ A_{10} & A_{11} \end{matrix} \right].

    The calculation includes qq steps. When t=1t=1, Xi0X_{i0} is broadcasted in its row, and A0jA_{0j} is broadcasted in its column. So, we have

    [X00,A00X00,A01X10,A00X10,A01].\left[\begin{matrix} X_{00},A_{00} & X_{00},A_{01} \\ X_{10},A_{00} & X_{10},A_{01} \end{matrix} \right].

    Then we multiply Xi0X_{i0} and A0jA_{0j} on each processor (i,j)(i, j) as

    [X00A00X00A01X10A00X10A01](1).\left[\begin{matrix} X_{00}A_{00} & X_{00}A_{01} \\ X_{10}A_{00} & X_{10}A_{01} \end{matrix} \right] (1).

    Similarly, when t=2t=2, Xi1X_{i1} is broadcasted in its row, A1jA_{1j} is broadcasted in its column, and we multiply them as

    [X01A10X01A11X11A10X11A11](2).\left[\begin{matrix} X_{01}A_{10} & X_{01}A_{11} \\ X_{11}A_{10} & X_{11}A_{11} \end{matrix} \right] (2).

    By adding (1)(1) and (2)(2) up, we have

    Y=XA=[X00A00+X01A10X00A01+X01A11X10A00+X11A10X10A01+X11A11].Y = XA = \left[\begin{matrix} X_{00}A_{00}+X_{01}A_{10} & X_{00}A_{01}+X_{01}A_{11} \\ X_{10}A_{00}+X_{11}A_{10} & X_{10}A_{01}+X_{11}A_{11} \end{matrix} \right].

    Efficiency

    Given P=q×qP=q\times q processors, we present the theoretical computation and memory cost, as well as the communication cost based on the ring algorithm in both the forward and backward pass of 2D tensor parallelism.

    ComputationMemory (parameters)Memory (activations)Communication (bandwidth)Communication (latency)
    O(1/q2)O(1/q^2)O(1/q2)O(1/q^2)O(1/q2)O(1/q^2)O(6(q1)/q)O(6(q-1)/q)O(6(q1))O(6(q-1))

    Usage

    Currently the newest version of ColossalAI doesn't support 2D tensor parallelism, but this feature will be integrated into Shardformer in future releases. For more details about ideas and usages of Shardformer, please refer to Shardformer Doc.

    For users of older version of ColossalAI, please refer to ColossalAI-Examples - 2D Tensor Parallelism.

    - + \ No newline at end of file diff --git a/docs/features/2p5D_tensor_parallel/index.html b/docs/features/2p5D_tensor_parallel/index.html index e755bae5..ddbb1ab8 100644 --- a/docs/features/2p5D_tensor_parallel/index.html +++ b/docs/features/2p5D_tensor_parallel/index.html @@ -16,7 +16,7 @@ - + @@ -26,7 +26,7 @@ Given P=q×q×dP=q \times q \times d processors (necessary condition), e.g. q=d=2q=d=2, we split the input XX into d×qd\times q rows and qq columns as

    [X00X01X10X11X20X21X30X31],\left[\begin{matrix} X_{00} & X_{01} \\ X_{10} & X_{11} \\ X_{20} & X_{21} \\ X_{30} & X_{31}\end{matrix} \right],

    which can be reshaped into dd layers as

    [X00X01X10X11] and [X20X21X30X31].\left[\begin{matrix} X_{00} & X_{01} \\ X_{10} & X_{11} \end{matrix} \right] \text{~and~}\left[\begin{matrix} X_{20} & X_{21} \\ X_{30} & X_{31} \end{matrix} \right].

    Also, the weight AA is split into

    [A00A01A10A11].\left[\begin{matrix} A_{00} & A_{01} \\ A_{10} & A_{11} \end{matrix} \right].

    For each layer of XX, we use the SUMMA algorithm to multiply XX and AA. Then, we have the output

    [Y00=X00A00+X01A10Y01=X00A01+X01A11Y10=X10A00+X11A10Y11=X10A01+X11A11] and \left[\begin{matrix} Y_{00}=X_{00}A_{00}+X_{01}A_{10} & Y_{01}=X_{00}A_{01}+X_{01}A_{11} \\ Y_{10}=X_{10}A_{00}+X_{11}A_{10} & Y_{11}=X_{10}A_{01}+X_{11}A_{11} \end{matrix} \right] \text{~and~}
    [Y20=X20A00+X21A10Y21=X20A01+X21A11Y30=X30A00+X31A10Y31=X30A01+X31A11].\left[\begin{matrix} Y_{20}=X_{20}A_{00}+X_{21}A_{10} & Y_{21}=X_{20}A_{01}+X_{21}A_{11} \\ Y_{30}=X_{30}A_{00}+X_{31}A_{10} & Y_{31}=X_{30}A_{01}+X_{31}A_{11} \end{matrix} \right].

    Efficiency

    Given P=q×q×dP=q \times q \times d processors, we present the theoretical computation and memory cost, as well as the communication cost based on the ring algorithm in both the forward and backward pass of 2.5D tensor parallelism.

    ComputationMemory (parameters)Memory (activations)Communication (bandwidth)Communication (latency)
    O(1/dq2)O(1/dq^2)O(1/q2)O(1/q^2)O(1/dq2)O(1/dq^2)O(3(q1)(d+1)/dq)\small O(3(q-1)(d+1)/dq)O(6(q1))O(6(q-1))

    Usage

    Currently the newest version of ColossalAI doesn't support 2.5D tensor parallelism, but this feature will be integrated into Shardformer in future releases. For more details about ideas and usages of Shardformer, please refer to Shardformer Doc.

    For users of older version of ColossalAI, please refer to ColossalAI-Examples - 2.5D Tensor Parallelism.

    - + \ No newline at end of file diff --git a/docs/features/3D_tensor_parallel/index.html b/docs/features/3D_tensor_parallel/index.html index a2bbe518..f645adb4 100644 --- a/docs/features/3D_tensor_parallel/index.html +++ b/docs/features/3D_tensor_parallel/index.html @@ -16,7 +16,7 @@ - + @@ -26,7 +26,7 @@ So, we have XilX_{il} and AljA_{lj} on each processor (i,j,l)(i,j,l) to get XilAljX_{il}A_{lj}. Finally, we reduce-scatter the results across (i,j,0...q)(i, j, 0...q) to get YijlY_{ijl}, which forms

    Y=[Y000Y001Y010Y011Y100Y101Y110Y111].Y= \left[\begin{matrix} Y_{000} & Y_{001} \\ Y_{010} & Y_{011} \\ Y_{100} & Y_{101} \\ Y_{110} & Y_{111} \end{matrix} \right].

    We also need to note that in the backward pass, we need to all-gather the gradient Yijl˙\dot{Y_{ijl}}, and then reduce-scatter the gradient Xil˙=Yij˙AljT\dot{X_{il}}=\dot{Y_{ij}}A_{lj}^T and Alj˙=XilTYij˙\dot{A_{lj}}=X_{il}^T\dot{Y_{ij}}.

    Efficiency

    Given P=q×q×qP=q \times q \times q processors, we present the theoretical computation and memory cost, as well as the communication cost based on the ring algorithm in both the forward and backward pass of 3D tensor parallelism.

    ComputationMemory (parameters)Memory (activations)Communication (bandwidth)Communication (latency)
    O(1/q3)O(1/q^3)O(1/q3)O(1/q^3)O(1/q3)O(1/q^3)O(6(q1)/q3)O(6(q-1)/q^3)O(6(q1))O(6(q-1))

    Usage

    Currently the newest version of ColossalAI doesn't support 3D tensor parallelism, but this feature will be integrated into Shardformer in future releases. For more details about ideas and usages of Shardformer, please refer to Shardformer Doc.

    For users of older version of ColossalAI, please refer to ColossalAI-Examples - 3D Tensor Parallelism.

    - + \ No newline at end of file diff --git a/docs/features/cluster_utils/index.html b/docs/features/cluster_utils/index.html index ff32605f..20253628 100644 --- a/docs/features/cluster_utils/index.html +++ b/docs/features/cluster_utils/index.html @@ -16,7 +16,7 @@ - + @@ -62,7 +62,7 @@ with dist_coordinator.priority_execution(): dataset = CIFAR10(root='./data', download=True) - + \ No newline at end of file diff --git a/docs/features/gradient_accumulation_with_booster/index.html b/docs/features/gradient_accumulation_with_booster/index.html index f3e98785..1bc9c1e3 100644 --- a/docs/features/gradient_accumulation_with_booster/index.html +++ b/docs/features/gradient_accumulation_with_booster/index.html @@ -16,14 +16,14 @@ - +

    Gradient Accumulation

    Author: Mingyan Jiang, Baizhou Zhang

    Prerequisite

    Introduction

    Gradient accumulation is a common way to enlarge your batch size for training. When training large-scale models, memory can easily become the bottleneck and the batch size can be very small, (e.g. 2), leading to unsatisfactory convergence. Gradient accumulation works by adding up the gradients calculated in multiple iterations, and only update the parameters in the preset iteration.

    Usage

    It is simple to use gradient accumulation in Colossal-AI. Just call booster.no_sync() which returns a context manager. It accumulate gradients without synchronization, meanwhile you should not update the weights.

    Hands-on Practice

    We now demonstrate gradient accumulation. In this example, we let the gradient accumulation size to be 4.

    Step 1. Import libraries in train.py

    Create a train.py and import the necessary dependencies. The version of torch should not be lower than 1.8.1.

    import os
    from pathlib import Path

    import torch
    from torchvision import transforms
    from torchvision.datasets import CIFAR10
    from torchvision.models import resnet18
    from torch.utils.data import DataLoader

    import colossalai
    from colossalai.booster import Booster
    from colossalai.booster.plugin import TorchDDPPlugin
    from colossalai.logging import get_dist_logger
    from colossalai.cluster.dist_coordinator import priority_execution

    Step 2. Initialize Distributed Environment

    We then need to initialize distributed environment. For demo purpose, we uses launch_from_torch. You can refer to Launch Colossal-AI for other initialization methods.

    # initialize distributed setting
    parser = colossalai.get_default_parser()
    args = parser.parse_args()
    # launch from torch
    colossalai.launch_from_torch(config=dict())

    Step 3. Create training components

    Build your model, optimizer, loss function, lr scheduler and dataloaders. Note that the root path of the dataset is obtained from the environment variable DATA. You may export DATA=/path/to/data or change Path(os.environ['DATA']) to a path on your machine. Data will be automatically downloaded to the root path.

    # define the training hyperparameters
    BATCH_SIZE = 128
    GRADIENT_ACCUMULATION = 4

    # build resnet
    model = resnet18(num_classes=10)

    # build dataloaders
    with priority_execution():
    train_dataset = CIFAR10(root=Path(os.environ.get('DATA', './data')),
    download=True,
    transform=transforms.Compose([
    transforms.RandomCrop(size=32, padding=4),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.4914, 0.4822, 0.4465], std=[0.2023, 0.1994, 0.2010]),
    ]))

    # build criterion
    criterion = torch.nn.CrossEntropyLoss()

    # optimizer
    optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)

    Step 4. Inject Feature

    Create a TorchDDPPlugin object to instantiate a Booster, and boost these training components.

    plugin = TorchDDPPlugin()
    booster = Booster(plugin=plugin)
    train_dataloader = plugin.prepare_dataloader(train_dataset, batch_size=BATCH_SIZE, shuffle=True, drop_last=True)
    model, optimizer, criterion, train_dataloader, _ = booster.boost(model=model,
    optimizer=optimizer,
    criterion=criterion,
    dataloader=train_dataloader)

    Step 5. Train with Booster

    Use booster in a normal training loops, and verify gradient accumulation. param_by_iter is to record the distributed training information.

    optimizer.zero_grad()
    for idx, (img, label) in enumerate(train_dataloader):
    sync_context = booster.no_sync(model)
    img = img.cuda()
    label = label.cuda()
    if idx % (GRADIENT_ACCUMULATION - 1) != 0:
    with sync_context:
    output = model(img)
    train_loss = criterion(output, label)
    train_loss = train_loss / GRADIENT_ACCUMULATION
    booster.backward(train_loss, optimizer)
    else:
    output = model(img)
    train_loss = criterion(output, label)
    train_loss = train_loss / GRADIENT_ACCUMULATION
    booster.backward(train_loss, optimizer)
    optimizer.step()
    optimizer.zero_grad()

    ele_1st = next(model.parameters()).flatten()[0]
    param_by_iter.append(str(ele_1st.item()))

    if idx != 0 and idx % (GRADIENT_ACCUMULATION - 1) == 0:
    break

    for iteration, val in enumerate(param_by_iter):
    print(f'iteration {iteration} - value: {val}')

    if param_by_iter[-1] != param_by_iter[0]:
    print('The parameter is only updated in the last iteration')

    Step 6. Invoke Training Scripts

    To verify gradient accumulation, we can just check the change of parameter values. When gradient accumulation is set, parameters are only updated in the last step. You can run the script using this command:

    colossalai run --nproc_per_node 1 train.py

    You will see output similar to the text below. This shows gradient is indeed accumulated as the parameter is not updated in the first 3 steps, but only updated in the last step.

    iteration 0, first 10 elements of param: tensor([-0.0208,  0.0189,  0.0234,  0.0047,  0.0116, -0.0283,  0.0071, -0.0359, -0.0267, -0.0006], device='cuda:0', grad_fn=<SliceBackward0>)
    iteration 1, first 10 elements of param: tensor([-0.0208, 0.0189, 0.0234, 0.0047, 0.0116, -0.0283, 0.0071, -0.0359, -0.0267, -0.0006], device='cuda:0', grad_fn=<SliceBackward0>)
    iteration 2, first 10 elements of param: tensor([-0.0208, 0.0189, 0.0234, 0.0047, 0.0116, -0.0283, 0.0071, -0.0359, -0.0267, -0.0006], device='cuda:0', grad_fn=<SliceBackward0>)
    iteration 3, first 10 elements of param: tensor([-0.0141, 0.0464, 0.0507, 0.0321, 0.0356, -0.0150, 0.0172, -0.0118, 0.0222, 0.0473], device='cuda:0', grad_fn=<SliceBackward0>)

    Gradient Accumulation on GeminiPlugin

    Currently the plugins supporting no_sync() method include TorchDDPPlugin and LowLevelZeroPlugin set to stage 1. GeminiPlugin doesn't support no_sync() method, but it can enable synchronized gradient accumulation in a torch-like way.

    To enable gradient accumulation feature, the argument enable_gradient_accumulation should be set to True when initializing GeminiPlugin. Following is the pseudocode snippet of enabling gradient accumulation for GeminiPlugin:

    ...
    plugin = GeminiPlugin(..., enable_gradient_accumulation=True)
    booster = Booster(plugin=plugin)
    ...

    ...
    for idx, (input, label) in enumerate(train_dataloader):
    output = gemini_model(input.cuda())
    train_loss = criterion(output, label.cuda())
    train_loss = train_loss / GRADIENT_ACCUMULATION
    booster.backward(train_loss, gemini_optimizer)

    if idx % (GRADIENT_ACCUMULATION - 1) == 0:
    gemini_optimizer.step() # zero_grad is automatically done
    ...
    - + \ No newline at end of file diff --git a/docs/features/gradient_clipping_with_booster/index.html b/docs/features/gradient_clipping_with_booster/index.html index 8030a968..ccfc52bd 100644 --- a/docs/features/gradient_clipping_with_booster/index.html +++ b/docs/features/gradient_clipping_with_booster/index.html @@ -16,14 +16,14 @@ - +

    Gradient Clipping

    Author: Mingyan Jiang

    Prerequisite

    Related Paper

    Introduction

    In order to speed up training process and seek global optimum for better performance, more and more learning rate schedulers have been proposed. People turn to control learning rate to adjust descent pace during training, which makes gradient vector better to be uniformed in every step. In that case, the descent pace can be controlled as expected. As a result, gradient clipping, a technique which can normalize the gradient vector to circumscribe it in a uniformed length, becomes indispensable for those who desire their better performance of their models.

    You do not have to worry about implementing gradient clipping when using Colossal-AI, we support gradient clipping in a powerful and convenient way. All you need is just an additional command in your configuration file.

    Why you should use gradient clipping provided by Colossal-AI

    The reason of why we do not recommend users to write gradient clipping by themselves is that naive gradient clipping may fail when applying tensor parallelism, pipeline parallelism or MoE.

    According to the illustration below, each GPU only owns a portion of parameters of the weight in a linear layer. To get correct norm of gradient vector of the weight of the linear layer, the norm of every gradient vector in each GPU should be summed together. More complicated thing is that the distribution of bias is different from the distribution of the weight. The communication group is different in the sum operation.

    (PS: This situation is an old version of 2D parallelism, the implementation in the code is not the same. But it is a good example about the difficulty to unify all communication in gradient clipping.)

    Layout of parameters

    Do not worry about it, since Colossal-AI have handled it for you.

    Usage

    To use gradient clipping, you can just add the following code to your configuration file, and after boosted, you can call clip_grad_by_norm or clip_grad_by_value method of optimizer, if it support clip gradients.

    Hands-On Practice

    We now demonstrate how to use gradient clipping. In this example, we set the gradient clipping vector norm to be 1.0.

    step 1. Import libraries in train.py

    Create a train.py and import the necessary dependencies.

    import os
    from pathlib import Path

    import torch
    from torchvision import transforms
    from torchvision.datasets import CIFAR10
    from torchvision.models import resnet34
    from tqdm import tqdm

    import colossalai
    from colossalai.booster import Booster
    from colossalai.booster.plugin import TorchDDPPlugin
    from colossalai.logging import get_dist_logger
    from colossalai.nn.lr_scheduler import CosineAnnealingLR

    Step 2. Initialize Distributed Environment

    We then need to initialize distributed environment. For demo purpose, we uses launch_from_torch. You can refer to Launch Colossal-AI for other initialization methods.

    colossalai.launch_from_torch(config=dict())
    logger = get_dist_logger()

    Step 3. Create training components

    Build your model, optimizer, loss function, lr scheduler and dataloaders. Note that the root path of the dataset is obtained from the environment variable DATA. You may export DATA=/path/to/data or change Path(os.environ['DATA']) to a path on your machine. Data will be automatically downloaded to the root path.

    # define training hyperparameters
    NUM_EPOCHS = 200
    BATCH_SIZE = 128
    GRADIENT_CLIPPING = 0.1
    # build resnet
    model = resnet34(num_classes=10)
    # build dataloaders
    train_dataset = CIFAR10(root=Path(os.environ.get('DATA', './data')),
    download=True,
    transform=transforms.Compose([
    transforms.RandomCrop(size=32, padding=4),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.4914, 0.4822, 0.4465], std=[0.2023, 0.1994, 0.2010]),
    ]))
    # build criterion
    criterion = torch.nn.CrossEntropyLoss()

    # optimizer
    optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)

    # lr_scheduler
    lr_scheduler = CosineAnnealingLR(optimizer, total_steps=NUM_EPOCHS)

    Step 4. Inject Gradient Clipping Feature

    Create a TorchDDPPlugin object and Booster object, get a data loader from plugin, then boost all training components.

    plugin = TorchDDPPlugin()
    booster = Booster(mixed_precision='fp16', plugin=plugin)
    train_dataloader = plugin.prepare_dataloader(train_dataset, batch_size=BATCH_SIZE, shuffle=True, drop_last=True)
    model, optimizer, criterion, train_dataloader, lr_scheduler = booster.boost(model,optimizer, criterion,train_dataloader, lr_scheduler)

    Step 5. Train with Booster

    Use booster in a normal training loops.

    # verify gradient clipping
    model.train()
    for idx, (img, label) in enumerate(train_dataloader):
    img = img.cuda()
    label = label.cuda()

    model.zero_grad()
    output = model(img)
    train_loss = criterion(output, label)
    booster.backward(train_loss, optimizer)
    optimizer.clip_grad_by_norm(max_norm=GRADIENT_CLIPPING)
    optimizer.step()
    lr_scheduler.step()

    ele_1st = next(model.parameters()).flatten()[0]
    logger.info(f'iteration {idx}, loss: {train_loss}, 1st element of parameters: {ele_1st.item()}')

    # only run for 4 iterations
    if idx == 3:
    break

    Step 6. Invoke Training Scripts

    You can run the script using this command:

    colossalai run --nproc_per_node 1 train.py
    - + \ No newline at end of file diff --git a/docs/features/lazy_init/index.html b/docs/features/lazy_init/index.html index d3d77c93..44267376 100644 --- a/docs/features/lazy_init/index.html +++ b/docs/features/lazy_init/index.html @@ -16,7 +16,7 @@ - + @@ -30,7 +30,7 @@
  • module (nn.Module) -- Target nn.Module
  • verbose (bool) -- Whether to print lazy initialization rate. Defaults to False.
  • Description
    Initialize all `Parameter` from `LazyTensor`. This function will modify the module in-place.

    Example

    import colossalai
    from colossalai.lazy import LazyInitContext
    from colossalai.booster import Booster
    from colossalai.booster.plugin import GeminiPlugin

    from transformers import LlamaForCausalLM, LlamaConfig, BertForPreTraining

    colossalai.launch({})
    plugin = GeminiPlugin()
    booster = Booster(plugin)

    # 1. Initialize model from scratch
    # Initialization on cuda will accelerate the initialization process but take more GPU memory.
    with LazyInitContext(default_device="cuda"):
    model = LlamaForCausalLM(LlamaConfig(hidden_size=64, intermediate_size=172, num_hidden_layers=4, num_attention_heads=4))
    model, *_ = booster.boost(model)

    # 2. Initialize model from pretrained
    with LazyInitContext():
    model = BertForPreTraining.from_pretrained("prajjwal1/bert-tiny")
    model, *_ = booster.boost(model)

    ⚠️ Lazy initialization from pretrained is supported for colossalai>0.3.3 or main branch.

    Limitations

    As we claimed, lazy initialization must be used with booster. And only several plugins support it.

    PluginSupportedRemarks
    GeminiYes
    Hybrid ParallelYes
    Low Level ZeroNoNo need
    Torch DDPNoIncompatible
    Torch FSDPNoIncompatible

    Not all models can be lazily initialized. In some cases, a part of parameters/buffers may be early initialized. But don't worry, this part usually takes a small proportion of the whole model.

    And some models are not supported at all which will raise an error. We tested models in torchvision, diffusers, timm, transformers, torchaudio and torchrec. Below models are not supported:

    ModelCategory
    wav2vec2_basetorchaudio
    hubert_basetorchaudio
    ViTModeltransformers
    ViTForMaskedImageModelingtransformers
    ViTForImageClassificationtransformers
    Blip2Modeltransformers
    Blip2ForConditionalGenerationtransformers
    - + \ No newline at end of file diff --git a/docs/features/mixed_precision_training_with_booster/index.html b/docs/features/mixed_precision_training_with_booster/index.html index 5fc5ea10..872162b1 100644 --- a/docs/features/mixed_precision_training_with_booster/index.html +++ b/docs/features/mixed_precision_training_with_booster/index.html @@ -16,7 +16,7 @@ - + @@ -66,7 +66,7 @@ for other initialization methods.

    # initialize distributed setting
    parser = colossalai.get_default_parser()
    args = parser.parse_args()

    # launch from torch
    colossalai.launch_from_torch(config=dict())

    Step 3. Create training components

    Build your model, optimizer, loss function, lr scheduler and dataloaders. Note that the root path of the dataset is obtained from the environment variable DATA. You may export DATA=/path/to/data or change Path(os.environ['DATA']) to a path on your machine. Data will be automatically downloaded to the root path.

    # define the constants
    NUM_EPOCHS = 2
    BATCH_SIZE = 128

    # build model
    model = vit_base_patch16_224(drop_rate=0.1)

    # build dataloader
    train_dataset = datasets.Caltech101(
    root=Path(os.environ['DATA']),
    download=True,
    transform=transforms.Compose([
    transforms.Resize(256),
    transforms.RandomResizedCrop(224),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
    Gray2RGB(),
    transforms.Normalize([0.5, 0.5, 0.5],
    [0.5, 0.5, 0.5])
    ]))

    # build optimizer
    optimizer = torch.optim.SGD(model.parameters(), lr=1e-2, weight_decay=0.1)

    # build loss
    criterion = torch.nn.CrossEntropyLoss()

    # lr_scheduler
    lr_scheduler = LinearWarmupLR(optimizer, warmup_steps=50, total_steps=NUM_EPOCHS)

    Step 4. Inject AMP Feature

    Create a MixedPrecision(if needed) and TorchDDPPlugin object, call colossalai.boost convert the training components to be running with FP16.

    plugin = TorchDDPPlugin()
    train_dataloader = plugin.prepare_dataloader(train_dataset, batch_size=BATCH_SIZE, shuffle=True, drop_last=True)
    booster = Booster(mixed_precision='fp16', plugin=plugin)

    # if you need to customize the config, do like this
    # >>> from colossalai.mixed_precision import FP16TorchMixedPrecision
    # >>> mixed_precision = FP16TorchMixedPrecision(
    # >>> init_scale=2.**16,
    # >>> growth_factor=2.0,
    # >>> backoff_factor=0.5,
    # >>> growth_interval=2000)
    # >>> plugin = TorchDDPPlugin()
    # >>> booster = Booster(mixed_precision=mixed_precision, plugin=plugin)

    # boost model, optimizer, criterion, dataloader, lr_scheduler
    model, optimizer, criterion, dataloader, lr_scheduler = booster.boost(model, optimizer, criterion, dataloader, lr_scheduler)

    Step 5. Train with Booster

    Use booster in a normal training loops.

    model.train()
    for epoch in range(NUM_EPOCHS):
    for img, label in enumerate(train_dataloader):
    img = img.cuda()
    label = label.cuda()
    optimizer.zero_grad()
    output = model(img)
    loss = criterion(output, label)
    booster.backward(loss, optimizer)
    optimizer.step()
    lr_scheduler.step()

    Step 6. Invoke Training Scripts

    Use the following command to start the training scripts. You can change --nproc_per_node to use a different number of GPUs.

    colossalai run --nproc_per_node 1 train.py
    - + \ No newline at end of file diff --git a/docs/features/nvme_offload/index.html b/docs/features/nvme_offload/index.html index 80c846a1..c7094ef0 100644 --- a/docs/features/nvme_offload/index.html +++ b/docs/features/nvme_offload/index.html @@ -16,7 +16,7 @@ - + @@ -71,7 +71,7 @@ https://arxiv.org/abs/1412.6980 .. _On the Convergence of Adam and Beyond: https://openreview.net/forum?id=ryQu7f-RZ

    - + \ No newline at end of file diff --git a/docs/features/pipeline_parallel/index.html b/docs/features/pipeline_parallel/index.html index ab66ff91..26adfddc 100644 --- a/docs/features/pipeline_parallel/index.html +++ b/docs/features/pipeline_parallel/index.html @@ -16,13 +16,13 @@ - +

    Pipeline Parallel

    Author: Guangyang Lu, Hongxin Liu, Yongbin Li, Mingyan Jiang

    Prerequisite

    Example Code

    Related Paper

    Quick introduction

    In this tutorial, you will learn how to use pipeline parallel. In Colossal-AI, we use 1F1B pipeline, introduced by Nvidia. In this case, ViT and Imagenet are too large to use. Therefore, here we use bert model and glue dataset as example.

    Table Of Content

    In this tutorial we will cover:

    1. Introduction of 1F1B pipeline.
    2. Usage of non-interleaved and interleaved schedule.
    3. Finetune Bert with pipeline.

    Introduction of 1F1B pipeline

    First of all, we will introduce you GPipe for your better understanding.

    Figure1: GPipe. This figure is from Megatron-LM paper.

    As you can see, for GPipe, only when the forward passes of all microbatches in a batch finish, the backward passes would be executed.

    In general, 1F1B(one forward pass followed by one backward pass) is more efficient than GPipe(in memory or both memory and time). There are two schedules of 1F1B pipeline, the non-interleaved and the interleaved. The figures are shown below.

    Figure2: This figure is from Megatron-LM paper. The top part shows the default non-interleaved schedule. And the bottom part shows the interleaved schedule.

    Non-interleaved Schedule

    The non-interleaved schedule can be divided into three stages. The first stage is the warm-up stage, where workers perform differing numbers of forward passes. At the following stage, workers perform one forward pass followed by one backward pass. Workers will finish backward passes at the last stage.

    This mode is more memory-efficient than GPipe. However, it would take the same time to finish a turn of passes as GPipe.

    Interleaved Schedule

    This schedule requires the number of microbatches to be an integer multiple of the stage of pipeline.

    In this schedule, each device can perform computation for multiple subsets of layers(called a model chunk) instead of a single contiguous set of layers. i.e. Before device 1 had layer 1-4; device 2 had layer 5-8; and so on. But now device 1 has layer 1,2,9,10; device 2 has layer 3,4,11,12; and so on. With this scheme, each device in the pipeline is assigned multiple pipeline stages and each pipeline stage has less computation.

    This mode is both memory-efficient and time-efficient.

    Colossal-AI's Implementation

    In Colossal-AI, pipeline parallelism relies on the scheduler and Shardformer. We provide both non-interleaved (OneForwardOneBackwardSchedule) and interleaved (InterleavedSchedule) schedules. While Shardformer implements layer splitting for models and replaces the forward function of the model to make it compatible with the scheduler.

    In Colossal-AI, the HybridParallelPlugin encapsulates pipeline execution strategies. It manages pipeline parallel communication groups and a scheduler. When boosting the model with this plugin, the model's layers are split by calling the shardformer.optimize function, and then execute_pipeline is called to execute the model in segments using OneForwardOneBackwardSchedule which is default scheduler used in HybridParallelPlugin, and InterleavedSchedule will be integrated later.

    You can customize your parallel strategy by setting parameters for the HybridParallelPlugin.

    For more usage details, please refer to the documentation for HybridParallelPlugin.

    Fine-tune Bert with pipeline

    First, we define the necessary training components, including model, dataloader, optimizer, lr_scheduler, criterion:

    import argparse
    from typing import Callable, List, Union

    import torch
    import torch.nn as nn
    from data import GLUEDataBuilder
    from torch.optim import Adam, Optimizer
    from torch.optim.lr_scheduler import _LRScheduler as LRScheduler
    from torch.utils.data import DataLoader
    from tqdm import tqdm
    from transformers import (
    AlbertForSequenceClassification,
    AutoConfig,
    BertForSequenceClassification,
    get_linear_schedule_with_warmup,
    )

    import colossalai
    from colossalai.booster import Booster
    from colossalai.booster.plugin import HybridParallelPlugin
    from colossalai.cluster import DistCoordinator
    from colossalai.nn.optimizer import HybridAdam

    # Define some config
    NUM_EPOCHS = 3
    BATCH_SIZE = 32
    LEARNING_RATE = 2.4e-5
    WEIGHT_DECAY = 0.01
    WARMUP_FRACTION = 0.1

    coordinator = DistCoordinator()

    def move_to_cuda(batch):
    return {k: v.cuda() for k, v in batch.items()}


    # Define 'criterion' function with two inputs, which will be passed to 'execute_pipeline'.
    def _criterion(outputs, inputs):
    return outputs.loss

    # Define optimizer
    lr = LEARNING_RATE
    no_decay = ["bias", "LayerNorm.weight"]
    optimizer_grouped_parameters = [
    {
    "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
    "weight_decay": WEIGHT_DECAY,
    },
    {
    "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
    "weight_decay": 0.0,
    },
    ]

    optimizer = HybridAdam(optimizer_grouped_parameters, lr=lr, eps=1e-8)


    # Define lr_scheduler
    total_steps = len(train_dataloader) * NUM_EPOCHS
    num_warmup_steps = int(WARMUP_FRACTION * total_steps)
    lr_scheduler = get_linear_schedule_with_warmup(
    optimizer,
    num_warmup_steps=num_warmup_steps,
    num_training_steps=total_steps,
    )


    # Define Bert model
    model = BertForSequenceClassification.from_pretrained("bert-base-uncased", config=cfg).cuda()

    # Define a dataloader
    data_builder = GLUEDataBuilder(model_name,
    plugin,
    args.task,
    train_batch_size=BATCH_SIZE,
    eval_batch_size=BATCH_SIZE)
    train_dataloader = data_builder.train_dataloader()

    Define a booster with the HybridParallelPlugin.

    plugin = HybridParallelPlugin(tp_size=1,
    pp_size=2,
    num_microbatches=None,
    microbatch_size=1,
    enable_all_optimization=True,
    zero_stage=1,
    precision='fp16',
    initial_scale=1)
    booster = Booster(plugin=plugin)

    Boost these train components with the booster created.

    model, optimizer, _criterion, _, lr_scheduler = booster.boost(model,
    optimizer,
    criterion=_criterion,
    lr_scheduler=lr_scheduler)

    Train the model at last.

    # Define a train function
    def train_epoch(epoch: int, model: nn.Module, optimizer: Optimizer, _criterion: Callable, lr_scheduler: LRScheduler,
    train_dataloader: DataLoader, booster: Booster, coordinator: DistCoordinator):

    is_pp_last_stage = booster.plugin.stage_manager.is_last_stage()
    total_step = len(train_dataloader)

    model.train()
    optimizer.zero_grad()
    # convert train_dataloader to a iterator
    train_dataloader_iter = iter(train_dataloader)
    with tqdm(range(total_step),
    desc=f'Epoch [{epoch + 1}/{NUM_EPOCHS}]',
    disable=not (is_pp_last_stage)) as pbar:
    # Forward pass
    for _ in pbar:
    outputs = booster.execute_pipeline(train_dataloader_iter,
    model,
    _criterion,
    optimizer,
    return_loss=True,
    return_outputs=True)
    # Backward and optimize
    if is_pp_last_stage:
    loss = outputs['loss']
    pbar.set_postfix({'loss': loss.item()})

    optimizer.step()
    optimizer.zero_grad()
    lr_scheduler.step()

    # Train model
    for epoch in range(NUM_EPOCHS):
    train_epoch(epoch, model, optimizer, _criterion, lr_scheduler, train_dataloader, booster, coordinator)

    We use 2 pipeline stages and the micro batches is 1. (these parameters can be configured to an appropriate value)

    - + \ No newline at end of file diff --git a/docs/features/shardformer/index.html b/docs/features/shardformer/index.html index bfc5d194..5b9e980d 100644 --- a/docs/features/shardformer/index.html +++ b/docs/features/shardformer/index.html @@ -16,7 +16,7 @@ - + @@ -79,7 +79,7 @@ -7 1c-4.667 0-9.167-1.833-13.5-5.5S337 184 337 178c0-12.667 15.667-32.333 47-59 H213l-171-1c-8.667-6-13-12.333-13-19 0-4.667 4.333-11.333 13-20h359 c-16-25.333-24-45-24-59z"> needs to do Reduce-Scatter to split the output of Row Linear layer of tensor parallel to all devices along sequence dimension, and All-Gather to get the whole gradient during backward.

  • NCCL's implementation of All-Reduce adopts the Ring All-Reduce approach, which consists of a Reduce-Scatter operation and an All-Gather operation with equal costs. Therefore, compared with sequence parallelism and tensor parallelism, it does not introduce additional communication overhead.

  • One important thing to note is that when using sequence parallelism along with Column Linear module of tensor parallelism, the complete input needs to be obtained during the backward computation of gradients. During the forward pass, only the portion of the input that is split along the sequence dimension is retained, in the shape of (batch,sequencelen/k,hiddenstates)(batch, sequence_len/k, hidden_states). Therefore, an additional All-Gather operation is required to obtain the complete input for gradient computation. However, it is possible to overlap the gradient computation with the All-Gather communication operation in our implementation, which would not introduce additional communication overhead (corresponding to the enable_sequence_overlap parameter in Shardformer).

  • - + \ No newline at end of file diff --git a/docs/features/zero_with_chunk/index.html b/docs/features/zero_with_chunk/index.html index 0708da07..dd7cb558 100644 --- a/docs/features/zero_with_chunk/index.html +++ b/docs/features/zero_with_chunk/index.html @@ -16,7 +16,7 @@ - + @@ -27,7 +27,7 @@ and communication efficiency is retained.

    1. Shard Optimizer States: The optimizer states (e.g., for Adam optimizer, 32-bit weights, and the first and second momentum estimates) are partitioned across the processes, so that each process updates only its partition.
    1. Shard Gradient: After reduction inside data parallel process group, gradient tensors are also partitioned such that each process only stores the gradients corresponding to its partition of the optimizer states. Note, Colossal converts gradient into fp32 format to participate in parameter updating.

    2. Shard Parameter: The 16-bit model parameters are partitioned across the processes of a data parallel group.

    3. Gemini: Dynamic heterogeneous memory space manager for parameters, gradients and optimizer states.

    Besides, this article will introduce the Zero Redundancy Optimizer with chunk-based memory management.

    When using ZeRO, we distributed the model by sharding the parameters. The advantage of this method is that the memory of each node is load balanced. But this approach has two significant disadvantages. First, during communication, a temporary memory buffer needs to be allocated and released afterwards, leading to the memory fragmentation problem. Secondly, using tensor as the granularity for communication will cause the network bandwidth underutilized. Generally, the longer the transmitted message length, the higher the bandwidth utilization.

    Using the Chunk mechanism introduced in ColossalAI v0.1.8, we can improve the efficiency of ZeRO. We store a continuous set of parameters in initialization order into a Chunk (a chunk is a continuous memory space), and each Chunk has the same size. Organizing memory in chunks can lead to efficient use of network bandwidth between PCI-e and GPU-GPU, reduce the number of communications, and avoid potential memory fragmentation.

    Before v0.1.8, ZeRO had a high communication cost for parameter communications. If a parameter was used multiple times in several consecutive operators, there will be repeated communications operations, and the efficiency was highly damaged. This situation is very common when using the Gradient Checkpoint technique, and the parameter will recompute the forward propagation during backward propagation.

    Taking GPT as an example, its Checkpoint will be applied to each GPT Block, and each GPT Block contains a Self-Attention layer and an MLP layer. During the backward pass, the forward of the Self-Attention layer and the MLP layer will be computed in turn, and then the backward of the MLP layer and the Self-Attention layer will be computed in turn.

    In addition, due to the communication and memory movement of small Tensors, the bandwidth of NVLINK and PCI-E cannot be fully utilized, and each communication and memory movement has the overhead of kernel launch. After using Chunk, multiple small Tensor communication and memory movement can be changed into one large Tensor communication and memory movement, which not only improves bandwidth utilization but also reduces the overhead of kernel launch.

    We also provide a lightweight chunk search mechanism to help users automatically find the chunk size with the smallest memory fragmentation.

    Usage

    GeminiDDP

    We will use GeminiDDP to use ZeRO with chunk-based memory management. This is our new torch.Module wrapper which uses ZeRO-DP and Gemini. ZeRO is for parallelism and Gemini is for memory management.

    Gemini allows LazyInitContext, which can save memory when initializing large models with multi-GPUs.

    If your model has N billion parameters and your GPU memory is M GB, we recommend you use LazyInitContext when 4N >= M. Otherwise, LazyInitContext is optional.

    with LazyInitContext(default_device=torch.device('cuda')):
    model = gpt2_medium(checkpoint=True)

    We've provided Booster API which is user-friendly. We recommend you use Booster API. But if you still want to use low level API, you can read below content of this section.

    Wrap the model with GeminiDDP.

    model = GeminiDDP(model, hidden_dim=hidden_dim, min_chunk_size_m=min_chunk_size_m)

    hidden_dim is the hidden dimension of DNN. Users can provide this argument to speed up searching. If users do not know this argument before training, it is ok. We will use a default value 1024. min_chunk_size_m is a floating point, being the minimum chunk size divided by 2^20 (e.g., if min_chunk_size_m=2.5, then the minimum chunk size should be 2.5*(2^20)).If the aggregate size of parameters is still smaller than the minimum chunk size, all parameters will be compacted into one small chunk.

    Initialization of the optimizer.

    optimizer = GeminiAdamOptimizer(model, lr=1e-3, initial_scale=2**5)

    Training

    optimizer.zero_grad()
    outputs = model(input_ids, attn_mask)
    loss = criterion(outputs, input_ids)
    optimizer.backward(loss)
    optimizer.step()

    ⚠️ Note: Please do not use loss.backward(), the standard way of writing is optimizer.backward(loss).

    Train GPT

    In this example, we use Hugging Face Transformers. You have to install transformers before running this example. We will take GPT2 Medium as an example here.

    For simplicity, we just use randomly generated data here.

    First we only need to import GPT2LMHeadModel from Huggingface transformers to define our model, which does not require users to define or modify the model, so that users can use it more conveniently.

    Define a GPT model:

    class GPTLMModel(nn.Module):

    def __init__(self,
    hidden_size=768,
    num_layers=12,
    num_attention_heads=12,
    max_seq_len=1024,
    vocab_size=50257,
    checkpoint=False):
    super().__init__()
    self.checkpoint = checkpoint
    self.model = GPT2LMHeadModel(
    GPT2Config(n_embd=hidden_size,
    n_layer=num_layers,
    n_head=num_attention_heads,
    n_positions=max_seq_len,
    n_ctx=max_seq_len,
    vocab_size=vocab_size))
    if checkpoint:
    self.model.gradient_checkpointing_enable()

    def forward(self, input_ids, attention_mask):
    return self.model(input_ids=input_ids, attention_mask=attention_mask, use_cache=not self.checkpoint)[0]

    def gpt2_medium(checkpoint=False):
    return GPTLMModel(hidden_size=1024, num_layers=24, num_attention_heads=16, checkpoint=checkpoint)

    Define our loss function:

    class GPTLMLoss(nn.Module):

    def __init__(self):
    super().__init__()
    self.loss_fn = nn.CrossEntropyLoss()

    def forward(self, logits, labels):
    shift_logits = logits[..., :-1, :].contiguous()
    shift_labels = labels[..., 1:].contiguous()
    return self.loss_fn(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))

    Write a function to get random inputs:

    def get_data(batch_size, seq_len, vocab_size):
    input_ids = torch.randint(0, vocab_size, (batch_size, seq_len), device=torch.cuda.current_device())
    attention_mask = torch.ones_like(input_ids)
    return input_ids, attention_mask

    Finally, we define a model which uses Gemini + ZeRO DDP and define our training loop, As we pre-train GPT in this example, we just use a simple language model loss:

    from colossalai.nn.optimizer import HybridAdam

    from colossalai.booster import Booster
    from colossalai.lazy import LazyInitContext
    from colossalai.booster.plugin import GeminiPlugin

    def main():
    args = parse_args()
    BATCH_SIZE = 8
    SEQ_LEN = 1024
    VOCAB_SIZE = 50257
    NUM_STEPS = 10
    colossalai.launch_from_torch(config={})

    # build criterion
    criterion = GPTLMLoss()
    optimizer = HybridAdam(model.parameters(), lr=0.001)

    torch.manual_seed(123)
    # build GPT model
    with ColoInitContext(default_device=torch.device('cuda')):
    model = gpt2_medium(checkpoint=True)


    # Gemini + ZeRO DP
    plugin = GeminiPlugin(max_norm=1.0, initial_scale=2**5)
    booster = Booster(plugin=plugin)
    model, optimizer, criterion, _, _ = booster.boost(model, optimizer, criterion)

    torch.cuda.synchronize()
    model.train()
    for n in range(NUM_STEPS):
    # we just use randomly generated data here
    input_ids, attn_mask = get_data(BATCH_SIZE, SEQ_LEN, VOCAB_SIZE)
    optimizer.zero_grad()
    outputs = model(input_ids, attn_mask)
    loss = criterion(outputs, input_ids)
    booster.backward(loss, optimizer)
    optimizer.step()

    torch.cuda.synchronize()

    ⚠️ Note: If you want to use the Gemini module, please do not use the Gradient Accumulation we mentioned before。 The complete example can be found on Train GPT with Colossal-AI.

    - + \ No newline at end of file diff --git a/docs/get_started/installation/index.html b/docs/get_started/installation/index.html index 3a5602fd..cbabb646 100644 --- a/docs/get_started/installation/index.html +++ b/docs/get_started/installation/index.html @@ -16,13 +16,13 @@ - +

    Setup

    Requirements:

    If you encounter any problem about installation, you may want to raise an issue in this repository.

    Download From PyPI

    You can install Colossal-AI with

    pip install colossalai

    Note: only Linux is supported for now

    If you want to build PyTorch extensions during installation, you can use the command below. Otherwise, the PyTorch extensions will be built during runtime.

    CUDA_EXT=1 pip install colossalai

    Download From Source

    The version of Colossal-AI will be in line with the main branch of the repository. Feel free to raise an issue if you encounter any problem.

    git clone https://github.com/hpcaitech/ColossalAI.git
    cd ColossalAI

    # install dependency
    pip install -r requirements/requirements.txt

    # install colossalai
    CUDA_EXT=1 pip install .

    If you don't want to install and enable CUDA kernel fusion (compulsory installation when using fused optimizer), just don't specify the CUDA_EXT:

    pip install .

    For Users with CUDA 10.2, you can still build ColossalAI from source. However, you need to manually download the cub library and copy it to the corresponding directory.

    # clone the repository
    git clone https://github.com/hpcaitech/ColossalAI.git
    cd ColossalAI

    # download the cub library
    wget https://github.com/NVIDIA/cub/archive/refs/tags/1.8.0.zip
    unzip 1.8.0.zip
    cp -r cub-1.8.0/cub/ colossalai/kernel/cuda_native/csrc/kernels/include/

    # install
    CUDA_EXT=1 pip install .
    - + \ No newline at end of file diff --git a/docs/get_started/reading_roadmap/index.html b/docs/get_started/reading_roadmap/index.html index c460ba6c..f948d6c7 100644 --- a/docs/get_started/reading_roadmap/index.html +++ b/docs/get_started/reading_roadmap/index.html @@ -16,7 +16,7 @@ - + @@ -31,7 +31,7 @@ advanced tutorials section is the place to go!

    We always welcome suggestions and discussions from the community, and we would be more than willing to help you if you encounter any issue. You can raise an issue here or create a discussion topic in the forum.

    - + \ No newline at end of file diff --git a/docs/get_started/run_demo/index.html b/docs/get_started/run_demo/index.html index 705d6c80..ef396a92 100644 --- a/docs/get_started/run_demo/index.html +++ b/docs/get_started/run_demo/index.html @@ -16,7 +16,7 @@ - + @@ -37,7 +37,7 @@ Tutorial: Integrate Mixture-of-Experts Into Your Model

    4. sequence parallel

    Sequence parallel is designed to tackle memory efficiency and sequence length limit problems in NLP tasks. We provided an example of BERT in ColossalAI-Examples. You can follow the README.md to execute the code.

    - + \ No newline at end of file diff --git a/index.html b/index.html index 75522129..f11dfee1 100644 --- a/index.html +++ b/index.html @@ -16,13 +16,13 @@ - +

    Unmatched Speed and Scale

    Learn about the distributed techniques of Colossal-AI to maximize the runtime performance of your large neural networks.

    Need professional help? Talk to our experts

    All about Colossal-AI

    Get started

    Start your first Colossal-AI project.

    Concepts

    Understand how Colossal-AI works.

    Command Line Interface (CLI)

    The Colossal-AI Command Line Interface is a unified tool to manage your Colossal-AI projects.

    Configuration

    Define your Colossal-AI project configuration as per your needs.

    Do you use Colossal-AI?

    If you are a happy user of our open source Colossal-AI software and implemented a deep learning project with it, please let us know.

    - + \ No newline at end of file diff --git a/markdown-page/index.html b/markdown-page/index.html index 3eaf9a9d..60721b2b 100644 --- a/markdown-page/index.html +++ b/markdown-page/index.html @@ -16,13 +16,13 @@ - +

    Markdown page example

    You don't need React to write simple standalone pages.

    - + \ No newline at end of file diff --git a/search/index.html b/search/index.html index 0a56bf9a..186cc51b 100644 --- a/search/index.html +++ b/search/index.html @@ -16,13 +16,13 @@ - +

    Search the documentation

    - + \ No newline at end of file diff --git a/zh-Hans/404.html b/zh-Hans/404.html index b78380a0..80dda7cd 100644 --- a/zh-Hans/404.html +++ b/zh-Hans/404.html @@ -16,13 +16,13 @@ - +

    找不到页面

    我们找不到您要找的页面。

    请联系原始链接来源网站的所有者,并告知他们链接已损坏。

    - + \ No newline at end of file diff --git a/zh-Hans/assets/js/d742ffe2.3a4495dd.js b/zh-Hans/assets/js/d742ffe2.bfcf124a.js similarity index 65% rename from zh-Hans/assets/js/d742ffe2.3a4495dd.js rename to zh-Hans/assets/js/d742ffe2.bfcf124a.js index 3bab8791..2583c83c 100644 --- a/zh-Hans/assets/js/d742ffe2.3a4495dd.js +++ b/zh-Hans/assets/js/d742ffe2.bfcf124a.js @@ -1 +1 @@ -"use strict";(self.webpackChunkdemo=self.webpackChunkdemo||[]).push([[9314],{6999:(e,t,a)=>{a.d(t,{Cl:()=>i,Dx:()=>c,Pc:()=>n,aE:()=>s,e_:()=>d,iz:()=>r,nT:()=>p});var o=a(7294),l=a(398);a(814);function i(e){return o.createElement("div",{className:"docstring-container"},e.children)}function n(e){return o.createElement("div",{className:"signature"},"(",e.children,")")}function r(e){return o.createElement("div",{class:"divider"},o.createElement("span",{class:"divider-text"},e.name))}function s(e){return o.createElement("div",null,o.createElement(r,{name:"Parameters"}),o.createElement(l.D,null,e.children))}function p(e){return o.createElement("div",null,o.createElement(r,{name:"Returns"}),o.createElement(l.D,null,`${e.name}: ${e.desc}`))}function c(e){return o.createElement("div",{className:"title-container"},o.createElement("div",{className:"title-module"},o.createElement("h5",null,e.type),"\xa0 ",o.createElement("h3",null,e.name)),o.createElement("div",{className:"title-source"},"<",o.createElement("a",{href:e.source,className:"title-source"},"source"),">"))}function d(e){return o.createElement("div",null,o.createElement(r,{name:"Example"}),o.createElement(l.D,null,e.code))}},189:(e,t,a)=>{a.r(t),a.d(t,{assets:()=>p,contentTitle:()=>r,default:()=>m,frontMatter:()=>n,metadata:()=>s,toc:()=>c});var o=a(7462),l=(a(7294),a(3905)),i=a(6999);const n={},r="Booster \u63d2\u4ef6",s={unversionedId:"basics/booster_plugins",id:"basics/booster_plugins",title:"Booster \u63d2\u4ef6",description:"\u4f5c\u8005: Hongxin Liu, Baizhou Zhang, Pengtai Xu",source:"@site/i18n/zh-Hans/docusaurus-plugin-content-docs/current/basics/booster_plugins.md",sourceDirName:"basics",slug:"/basics/booster_plugins",permalink:"/zh-Hans/docs/basics/booster_plugins",draft:!1,editUrl:"https://github.com/facebook/docusaurus/tree/main/packages/create-docusaurus/templates/shared/docs/basics/booster_plugins.md",tags:[],version:"current",frontMatter:{},sidebar:"tutorialSidebar",previous:{title:"Booster API",permalink:"/zh-Hans/docs/basics/booster_api"},next:{title:"Booster Checkpoint",permalink:"/zh-Hans/docs/basics/booster_checkpoint"}},p={},c=[{value:"\u5f15\u8a00",id:"\u5f15\u8a00",level:2},{value:"\u63d2\u4ef6\u9009\u62e9",id:"\u63d2\u4ef6\u9009\u62e9",level:2},{value:"\u63d2\u4ef6",id:"\u63d2\u4ef6",level:2},{value:"Low Level Zero \u63d2\u4ef6",id:"low-level-zero-\u63d2\u4ef6",level:3},{value:"Gemini \u63d2\u4ef6",id:"gemini-\u63d2\u4ef6",level:3},{value:"Hybrid Parallel \u63d2\u4ef6",id:"hybrid-parallel-\u63d2\u4ef6",level:3},{value:"Torch DDP \u63d2\u4ef6",id:"torch-ddp-\u63d2\u4ef6",level:3},{value:"Torch FSDP \u63d2\u4ef6",id:"torch-fsdp-\u63d2\u4ef6",level:3}],d={toc:c},u="wrapper";function m(e){let{components:t,...a}=e;return(0,l.kt)(u,(0,o.Z)({},d,a,{components:t,mdxType:"MDXLayout"}),(0,l.kt)("h1",{id:"booster-\u63d2\u4ef6"},"Booster \u63d2\u4ef6"),(0,l.kt)("p",null,"\u4f5c\u8005: ",(0,l.kt)("a",{parentName:"p",href:"https://github.com/ver217"},"Hongxin Liu"),", ",(0,l.kt)("a",{parentName:"p",href:"https://github.com/Fridge003"},"Baizhou Zhang"),", ",(0,l.kt)("a",{parentName:"p",href:"https://github.com/ppt0011"},"Pengtai Xu")),(0,l.kt)("p",null,(0,l.kt)("strong",{parentName:"p"},"\u524d\u7f6e\u6559\u7a0b:")),(0,l.kt)("ul",null,(0,l.kt)("li",{parentName:"ul"},(0,l.kt)("a",{parentName:"li",href:"/zh-Hans/docs/basics/booster_api"},"Booster API"))),(0,l.kt)("h2",{id:"\u5f15\u8a00"},"\u5f15\u8a00"),(0,l.kt)("p",null,"\u6b63\u5982 ",(0,l.kt)("a",{parentName:"p",href:"/zh-Hans/docs/basics/booster_api"},"Booster API")," \u4e2d\u63d0\u5230\u7684\uff0c\u6211\u4eec\u53ef\u4ee5\u4f7f\u7528 booster \u63d2\u4ef6\u6765\u81ea\u5b9a\u4e49\u5e76\u884c\u8bad\u7ec3\u3002\u5728\u672c\u6559\u7a0b\u4e2d\uff0c\u6211\u4eec\u5c06\u4ecb\u7ecd\u5982\u4f55\u4f7f\u7528 booster \u63d2\u4ef6\u3002"),(0,l.kt)("p",null,"\u6211\u4eec\u73b0\u5728\u63d0\u4f9b\u4ee5\u4e0b\u63d2\u4ef6:"),(0,l.kt)("ul",null,(0,l.kt)("li",{parentName:"ul"},(0,l.kt)("a",{parentName:"li",href:"#torch-ddp-%E6%8F%92%E4%BB%B6"},"Torch DDP \u63d2\u4ef6"),": \u5b83\u5305\u88c5\u4e86 ",(0,l.kt)("inlineCode",{parentName:"li"},"torch.nn.parallel.DistributedDataParallel")," \u5e76\u4e14\u53ef\u7528\u4e8e\u4f7f\u7528\u6570\u636e\u5e76\u884c\u8bad\u7ec3\u6a21\u578b\u3002"),(0,l.kt)("li",{parentName:"ul"},(0,l.kt)("a",{parentName:"li",href:"#torch-fsdp-%E6%8F%92%E4%BB%B6"},"Torch FSDP \u63d2\u4ef6"),": \u5b83\u5305\u88c5\u4e86 ",(0,l.kt)("inlineCode",{parentName:"li"},"torch.distributed.fsdp.FullyShardedDataParallel")," \u5e76\u4e14\u53ef\u7528\u4e8e\u4f7f\u7528 Zero-dp \u8bad\u7ec3\u6a21\u578b\u3002"),(0,l.kt)("li",{parentName:"ul"},(0,l.kt)("a",{parentName:"li",href:"#low-level-zero-%E6%8F%92%E4%BB%B6"},"Low Level Zero \u63d2\u4ef6"),": \u5b83\u5305\u88c5\u4e86 ",(0,l.kt)("inlineCode",{parentName:"li"},"colossalai.zero.low_level.LowLevelZeroOptimizer"),"\uff0c\u53ef\u7528\u4e8e\u4f7f\u7528 Zero-dp \u8bad\u7ec3\u6a21\u578b\u3002\u5b83\u4ec5\u652f\u6301 Zero \u9636\u6bb51\u548c\u9636\u6bb52\u3002"),(0,l.kt)("li",{parentName:"ul"},(0,l.kt)("a",{parentName:"li",href:"#gemini-%E6%8F%92%E4%BB%B6"},"Gemini \u63d2\u4ef6"),": \u5b83\u5305\u88c5\u4e86 ",(0,l.kt)("a",{parentName:"li",href:"/zh-Hans/docs/features/zero_with_chunk"},"Gemini"),"\uff0cGemini \u5b9e\u73b0\u4e86\u57fa\u4e8eChunk\u5185\u5b58\u7ba1\u7406\u548c\u5f02\u6784\u5185\u5b58\u7ba1\u7406\u7684 Zero-3\u3002"),(0,l.kt)("li",{parentName:"ul"},(0,l.kt)("a",{parentName:"li",href:"#hybrid-parallel-%E6%8F%92%E4%BB%B6"},"Hybrid Parallel \u63d2\u4ef6"),": \u5b83\u4e3aShardformer\uff0c\u6d41\u6c34\u7ebf\u7ba1\u7406\u5668\uff0c\u6df7\u5408\u7cbe\u5ea6\u8fd0\u7b97\uff0cTorchDDP\u4ee5\u53caZero-1/Zero-2\u529f\u80fd\u63d0\u4f9b\u4e86\u4e00\u4e2a\u7edf\u4e00\u4e14\u7b80\u6d01\u7684\u63a5\u53e3\u3002\u4f7f\u7528\u8be5\u63d2\u4ef6\u53ef\u4ee5\u7b80\u5355\u9ad8\u6548\u5730\u5b9e\u73b0transformer\u6a21\u578b\u5728\u5f20\u91cf\u5e76\u884c\uff0c\u6d41\u6c34\u7ebf\u5e76\u884c\u4ee5\u53ca\u6570\u636e\u5e76\u884c\uff08DDP, Zero\uff09\u95f4\u4efb\u610f\u7ec4\u5408\u5e76\u884c\u8bad\u7ec3\u7b56\u7565\uff0c\u540c\u65f6\u652f\u6301\u591a\u79cd\u8bad\u7ec3\u901f\u5ea6\u548c\u5185\u5b58\u7684\u4f18\u5316\u5de5\u5177\u3002\u6709\u5173\u8fd9\u4e9b\u8bad\u7ec3\u7b56\u7565\u548c\u4f18\u5316\u5de5\u5177\u7684\u5177\u4f53\u4fe1\u606f\u5c06\u5728\u4e0b\u4e00\u7ae0\u4e2d\u9610\u8ff0\u3002")),(0,l.kt)("p",null,"\u66f4\u591a\u63d2\u4ef6\u5373\u5c06\u63a8\u51fa\u3002"),(0,l.kt)("h2",{id:"\u63d2\u4ef6\u9009\u62e9"},"\u63d2\u4ef6\u9009\u62e9"),(0,l.kt)("ul",null,(0,l.kt)("li",{parentName:"ul"},(0,l.kt)("a",{parentName:"li",href:"#torch-ddp-%E6%8F%92%E4%BB%B6"},"Torch DDP \u63d2\u4ef6"),": \u9002\u7528\u4e8e\u53c2\u6570\u5c11\u4e8e 20 \u4ebf\u7684\u6a21\u578b\uff08\u4f8b\u5982 Bert-3m\u3001GPT2-1.5b\uff09\u3002"),(0,l.kt)("li",{parentName:"ul"},(0,l.kt)("a",{parentName:"li",href:"#torch-fsdp-%E6%8F%92%E4%BB%B6"},"Torch FSDP \u63d2\u4ef6")," / ",(0,l.kt)("a",{parentName:"li",href:"#low-level-zero-%E6%8F%92%E4%BB%B6"},"Low Level Zero \u63d2\u4ef6"),": \u9002\u7528\u4e8e\u53c2\u6570\u5c11\u4e8e 100 \u4ebf\u7684\u6a21\u578b\uff08\u4f8b\u5982 GPTJ-6b\u3001MegatronLM-8b\uff09\u3002"),(0,l.kt)("li",{parentName:"ul"},(0,l.kt)("a",{parentName:"li",href:"#gemini-%E6%8F%92%E4%BB%B6"},"Gemini \u63d2\u4ef6"),": \u9002\u5408\u53c2\u6570\u8d85\u8fc7 100 \u4ebf\u7684\u6a21\u578b\uff08\u4f8b\u5982 TuringNLG-17b\uff09\uff0c\u4e14",(0,l.kt)("strong",{parentName:"li"},"\u8de8\u8282\u70b9\u5e26\u5bbd\u9ad8\u3001\u4e2d\u5c0f\u89c4\u6a21\u96c6\u7fa4\uff08\u5343\u5361\u4ee5\u4e0b\uff09"),"\u7684\u573a\u666f\uff08\u4f8b\u5982 Llama2-70b\uff09\u3002"),(0,l.kt)("li",{parentName:"ul"},(0,l.kt)("a",{parentName:"li",href:"#hybrid-parallel-%E6%8F%92%E4%BB%B6"},"Hybrid Parallel \u63d2\u4ef6"),": \u9002\u5408\u53c2\u6570\u8d85\u8fc7 600 \u4ebf\u7684\u6a21\u578b\u3001\u8d85\u957f\u5e8f\u5217\u3001\u8d85\u5927\u8bcd\u8868\u7b49\u7279\u6b8a\u6a21\u578b\uff0c\u4e14",(0,l.kt)("strong",{parentName:"li"},"\u8de8\u8282\u70b9\u5e26\u5bbd\u4f4e\u3001\u5927\u89c4\u6a21\u96c6\u7fa4\uff08\u5343\u5361\u4ee5\u4e0a\uff09"),"\u7684\u573a\u666f\uff08\u4f8b\u5982 GPT3-175b\u3001Bloom-176b\uff09\u3002")),(0,l.kt)("h2",{id:"\u63d2\u4ef6"},"\u63d2\u4ef6"),(0,l.kt)("h3",{id:"low-level-zero-\u63d2\u4ef6"},"Low Level Zero \u63d2\u4ef6"),(0,l.kt)("p",null,"\u8be5\u63d2\u4ef6\u5b9e\u73b0\u4e86 Zero-1 \u548c Zero-2\uff08\u4f7f\u7528/\u4e0d\u4f7f\u7528 CPU \u5378\u8f7d\uff09\uff0c\u4f7f\u7528",(0,l.kt)("inlineCode",{parentName:"p"},"reduce"),"\u548c",(0,l.kt)("inlineCode",{parentName:"p"},"gather"),"\u6765\u540c\u6b65\u68af\u5ea6\u548c\u6743\u91cd\u3002"),(0,l.kt)("p",null,"Zero-1 \u53ef\u4ee5\u770b\u4f5c\u662f Torch DDP \u66f4\u597d\u7684\u66ff\u4ee3\u54c1\uff0c\u5185\u5b58\u6548\u7387\u66f4\u9ad8\uff0c\u901f\u5ea6\u66f4\u5feb\u3002\u5b83\u53ef\u4ee5\u5f88\u5bb9\u6613\u5730\u7528\u4e8e\u6df7\u5408\u5e76\u884c\u3002"),(0,l.kt)("p",null,"Zero-2 \u4e0d\u652f\u6301\u5c40\u90e8\u68af\u5ea6\u7d2f\u79ef\u3002\u5982\u679c\u60a8\u575a\u6301\u4f7f\u7528\uff0c\u867d\u7136\u53ef\u4ee5\u79ef\u7d2f\u68af\u5ea6\uff0c\u4f46\u4e0d\u80fd\u964d\u4f4e\u901a\u4fe1\u6210\u672c\u3002\u4e5f\u5c31\u662f\u8bf4\uff0c\u540c\u65f6\u4f7f\u7528\u6d41\u6c34\u7ebf\u5e76\u884c\u548c Zero-2 \u5e76\u4e0d\u662f\u4e00\u4e2a\u597d\u4e3b\u610f\u3002"),(0,l.kt)(i.Cl,{mdxType:"DocStringContainer"},(0,l.kt)("div",null,(0,l.kt)(i.Dx,{type:"class",name:"colossalai.booster.plugin.LowLevelZeroPlugin",source:"https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/booster/plugin/low_level_zero_plugin.py#L213",mdxType:"Title"}),(0,l.kt)(i.Pc,{mdxType:"Signature"},"stage: int = 1, precision: str = 'fp16', initial_scale: float = 4294967296, min_scale: float = 1, growth_factor: float = 2, backoff_factor: float = 0.5, growth_interval: int = 1000, hysteresis: int = 2, max_scale: float = 4294967296, max_norm: float = 0.0, norm_type: float = 2.0, reduce_bucket_size_in_m: int = 12, communication_dtype: typing.Optional[torch.dtype] = None, overlap_communication: bool = True, cpu_offload: bool = False, master_weights: bool = True, verbose: bool = False"),(0,l.kt)(i.aE,{mdxType:"Parameters"},"- **stage** (int, optional) -- ZeRO stage. Defaults to 1.\n- **precision** (str, optional) -- precision. Support 'fp16', 'bf16' and 'fp32'. Defaults to 'fp16'.\n- **initial_scale** (float, optional) -- Initial scale used by DynamicGradScaler. Defaults to 2**32.\n- **min_scale** (float, optional) -- Min scale used by DynamicGradScaler. Defaults to 1.\n- **growth_factor** (float, optional) -- growth_factor used by DynamicGradScaler. Defaults to 2.\n- **backoff_factor** (float, optional) -- backoff_factor used by DynamicGradScaler. Defaults to 0.5.\n- **growth_interval** (float, optional) -- growth_interval used by DynamicGradScaler. Defaults to 1000.\n- **hysteresis** (float, optional) -- hysteresis used by DynamicGradScaler. Defaults to 2.\n- **max_scale** (int, optional) -- max_scale used by DynamicGradScaler. Defaults to 2**32.\n- **max_norm** (float, optional) -- max_norm used for `clip_grad_norm`. You should notice that you shall not do\n clip_grad_norm by yourself when using ZeRO DDP. The ZeRO optimizer will take care of clip_grad_norm.\n- **norm_type** (float, optional) -- norm_type used for `clip_grad_norm`.\n- **reduce_bucket_size_in_m** (int, optional) -- grad reduce bucket size in M. Defaults to 12.\n- **communication_dtype** (torch.dtype, optional) -- communication dtype. If not specified, the dtype of param will be used. Defaults to None.\n- **overlap_communication** (bool, optional) -- whether to overlap communication and computation. Defaults to True.\n- **cpu_offload** (bool, optional) -- whether to offload grad, master weight and optimizer state to cpu. Defaults to False.\n- **verbose** (bool, optional) -- verbose mode. Debug info including grad overflow will be printed. Defaults to False.")),(0,l.kt)("div",null,(0,l.kt)(i.iz,{name:"Description",mdxType:"Divider"}),(0,l.kt)("p",null,"Plugin for low level zero."),(0,l.kt)(i.e_,{code:"```python\nfrom colossalai.booster import Booster\nfrom colossalai.booster.plugin import LowLevelZeroPlugin\n\nmodel, train_dataset, optimizer, criterion = ...\nplugin = LowLevelZeroPlugin()\n\ntrain_dataloader = plugin.prepare_dataloader(train_dataset, batch_size=8)\nbooster = Booster(plugin=plugin)\nmodel, optimizer, train_dataloader, criterion = booster.boost(model, optimizer, train_dataloader, criterion)\n```",mdxType:"ExampleCode"}))),(0,l.kt)("p",null,"\u6211\u4eec\u5df2\u7ecf\u6d4b\u8bd5\u4e86\u4e00\u4e9b\u4e3b\u6d41\u6a21\u578b\u7684\u517c\u5bb9\u6027\uff0c\u53ef\u80fd\u4e0d\u652f\u6301\u4ee5\u4e0b\u6a21\u578b\uff1a"),(0,l.kt)("ul",null,(0,l.kt)("li",{parentName:"ul"},(0,l.kt)("inlineCode",{parentName:"li"},"timm.models.convit_base")),(0,l.kt)("li",{parentName:"ul"},"dlrm and deepfm models in ",(0,l.kt)("inlineCode",{parentName:"li"},"torchrec"))),(0,l.kt)("p",null,"\u517c\u5bb9\u6027\u95ee\u9898\u5c06\u5728\u672a\u6765\u4fee\u590d\u3002"),(0,l.kt)("h3",{id:"gemini-\u63d2\u4ef6"},"Gemini \u63d2\u4ef6"),(0,l.kt)("p",null,"\u8fd9\u4e2a\u63d2\u4ef6\u5b9e\u73b0\u4e86\u57fa\u4e8eChunk\u5185\u5b58\u7ba1\u7406\u548c\u5f02\u6784\u5185\u5b58\u7ba1\u7406\u7684 Zero-3\u3002\u5b83\u53ef\u4ee5\u8bad\u7ec3\u5927\u578b\u6a21\u578b\u800c\u4e0d\u4f1a\u635f\u5931\u592a\u591a\u901f\u5ea6\u3002\u5b83\u4e5f\u4e0d\u652f\u6301\u5c40\u90e8\u68af\u5ea6\u7d2f\u79ef\u3002\u66f4\u591a\u8be6\u7ec6\u4fe1\u606f\uff0c\u8bf7\u53c2\u9605 ",(0,l.kt)("a",{parentName:"p",href:"/zh-Hans/docs/features/zero_with_chunk"},"Gemini \u6587\u6863"),"."),(0,l.kt)(i.Cl,{mdxType:"DocStringContainer"},(0,l.kt)("div",null,(0,l.kt)(i.Dx,{type:"class",name:"colossalai.booster.plugin.GeminiPlugin",source:"https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/booster/plugin/gemini_plugin.py#L255",mdxType:"Title"}),(0,l.kt)(i.Pc,{mdxType:"Signature"},"chunk_config_dict: typing.Optional[dict] = None, chunk_init_device: typing.Optional[torch.device] = None, placement_policy: str = 'static', enable_gradient_accumulation: bool = False, shard_param_frac: float = 1.0, offload_optim_frac: float = 0.0, offload_param_frac: float = 0.0, warmup_non_model_data_ratio: float = 0.8, steady_cuda_cap_ratio: float = 0.9, precision: str = 'fp16', master_weights: bool = True, pin_memory: bool = False, force_outputs_fp32: bool = False, strict_ddp_mode: bool = False, search_range_m: int = 32, hidden_dim: typing.Optional[int] = None, min_chunk_size_m: float = 32, memstats: typing.Optional[colossalai.zero.gemini.memory_tracer.memory_stats.MemStats] = None, gpu_margin_mem_ratio: float = 0.0, initial_scale: float = 65536, min_scale: float = 1, growth_factor: float = 2, backoff_factor: float = 0.5, growth_interval: int = 1000, hysteresis: int = 2, max_scale: float = 4294967296, max_norm: float = 0.0, norm_type: float = 2.0, tp_size: int = 1, extra_dp_size: int = 1, enable_all_optimization: bool = False, enable_fused_normalization: bool = False, enable_flash_attention: bool = False, enable_sequence_parallelism: bool = False, enable_jit_fused: bool = False, enable_sequence_overlap: bool = False, verbose: bool = False"),(0,l.kt)(i.aE,{mdxType:"Parameters"},'- **chunk_config_dict** (dict, optional) -- chunk configuration dictionary.\n- **chunk_init_device** (torch.device, optional) -- device to initialize the chunk.\n- **placement_policy** (str, optional) -- "static" and "auto". Defaults to "static".\n- **enable_gradient_accumulation** (bool, optional) -- Whether to enable gradient accumulation. When set to True, gradient will be stored after doing backward pass. Defaults to False.\n- **shard_param_frac** (float, optional) -- fraction of parameters to be sharded. Only for "static" placement.\n If `shard_param_frac` is 1.0, it\'s equal to zero-3. If `shard_param_frac` is 0.0, it\'s equal to zero-2. Defaults to 1.0.\n- **offload_optim_frac** (float, optional) -- fraction of optimizer states to be offloaded. Only for "static" placement.\n If `shard_param_frac` is 1.0 and `offload_optim_frac` is 0.0, it\'s equal to old "cuda" placement. Defaults to 0.0.\n- **offload_param_frac** (float, optional) -- fraction of parameters to be offloaded. Only for "static" placement.\n For efficiency, this argument is useful only when `shard_param_frac` is 1.0 and `offload_optim_frac` is 1.0.\n If `shard_param_frac` is 1.0, `offload_optim_frac` is 1.0 and `offload_param_frac` is 1.0, it\'s equal to old "cpu" placement.\n When using static placement, we recommend users to tune `shard_param_frac` first and then `offload_optim_frac`.\n Defaults to 0.0.\n- **warmup_non_model_data_ratio** (float, optional) -- ratio of expected non-model data memory during warmup. Only for "auto" placement. Defaults to 0.8.\n- **steady_cuda_cap_ratio** (float, optional) -- ratio of allowed cuda capacity for model data during steady state. Only for "auto" placement. Defaults to 0.9.\n- **precision** (str, optional) -- precision. Support \'fp16\' and \'bf16\'. Defaults to \'fp16\'.\n- **master_weights** (bool, optional) -- Whether to keep fp32 master parameter weights in optimizer. Defaults to True.\n- **pin_memory** (bool, optional) -- use pin memory on CPU. Defaults to False.\n- **force_outputs_fp32** (bool, optional) -- force outputs are fp32. Defaults to False.\n- **strict_ddp_mode** (bool, optional) -- use strict ddp mode (only use dp without other parallelism). Defaults to False.\n- **search_range_m** (int, optional) -- chunk size searching range divided by 2^20. Defaults to 32.\n- **hidden_dim** (int, optional) -- the hidden dimension of DNN.\n Users can provide this argument to speed up searching.\n If users do not know this argument before training, it is ok. We will use a default value 1024.\n- **min_chunk_size_m** (float, optional) -- the minimum chunk size divided by 2^20.\n If the aggregate size of parameters is still smaller than the minimum chunk size,\n all parameters will be compacted into one small chunk.\n- **memstats** (MemStats, optional) the memory statistics collector by a runtime memory tracer. --\n- **gpu_margin_mem_ratio** (float, optional) -- The ratio of GPU remaining memory (after the first forward-backward)\n which will be used when using hybrid CPU optimizer.\n This argument is meaningless when `placement_policy` of `GeminiManager` is not "auto".\n Defaults to 0.0.\n- **initial_scale** (float, optional) -- Initial scale used by DynamicGradScaler. Defaults to 2**16.\n- **min_scale** (float, optional) -- Min scale used by DynamicGradScaler. Defaults to 1.\n- **growth_factor** (float, optional) -- growth_factor used by DynamicGradScaler. Defaults to 2.\n- **backoff_factor** (float, optional) -- backoff_factor used by DynamicGradScaler. Defaults to 0.5.\n- **growth_interval** (float, optional) -- growth_interval used by DynamicGradScaler. Defaults to 1000.\n- **hysteresis** (float, optional) -- hysteresis used by DynamicGradScaler. Defaults to 2.\n- **max_scale** (int, optional) -- max_scale used by DynamicGradScaler. Defaults to 2**32.\n- **max_norm** (float, optional) -- max_norm used for `clip_grad_norm`. You should notice that you shall not do\n clip_grad_norm by yourself when using ZeRO DDP. The ZeRO optimizer will take care of clip_grad_norm.\n- **norm_type** (float, optional) -- norm_type used for `clip_grad_norm`.\n- **tp_size** (int, optional) -- If \'tp_size\' is set to be greater than 1, it means using tensor parallelism strategy, which is implemented in Shardformer, \'tp_size\' determines the size of the tensor parallel process group. Default to 1.\n- **extra_dp_size** (int, optional) -- If \'extra_dp_size\' is set to be greater than 1, it means creating another group to run with a ddp-like strategy. Default to 1.\n- **enable_all_optimization** (bool, optional) -- Whether to switch on all the optimizations supported by Shardformer.\n Currently all the optimization methods include fused normalization, flash attention and JIT.\n Defaults to False.\n- **enable_fused_normalization** (bool, optional) -- Whether to switch on fused normalization in Shardformer. Defaults to False.\n- **enable_flash_attention** (bool, optional) -- Whether to switch on flash attention in Shardformer. Defaults to False.\n- **enable_jit_fused** (bool, optional) -- Whether to switch on JIT in Shardformer. Default to False.\n- **enable_sequence_parallelism** (bool) -- Whether to turn on sequence parallelism in Shardformer. Defaults to False.\n- **enable_sequence_overlap** (bool) -- Whether to turn on sequence overlap in Shardformer. Defaults to False.\n- **verbose** (bool, optional) -- verbose mode. Debug info including chunk search result will be printed. Defaults to False.')),(0,l.kt)("div",null,(0,l.kt)(i.iz,{name:"Description",mdxType:"Divider"}),(0,l.kt)("p",null,"Plugin for Gemini."),(0,l.kt)(i.e_,{code:"```python\nfrom colossalai.booster import Booster\nfrom colossalai.booster.plugin import GeminiPlugin\n\nmodel, train_dataset, optimizer, criterion = ...\nplugin = GeminiPlugin()\n\ntrain_dataloader = plugin.prepare_dataloader(train_dataset, batch_size=8)\nbooster = Booster(plugin=plugin)\nmodel, optimizer, train_dataloader, criterion = booster.boost(model, optimizer, train_dataloader, criterion)\n```",mdxType:"ExampleCode"}))),(0,l.kt)("h3",{id:"hybrid-parallel-\u63d2\u4ef6"},"Hybrid Parallel \u63d2\u4ef6"),(0,l.kt)("p",null,"\u8fd9\u4e2a\u63d2\u4ef6\u5b9e\u73b0\u4e86\u591a\u79cd\u5e76\u884c\u8bad\u7ec3\u7b56\u7565\u548c\u4f18\u5316\u5de5\u5177\u7684\u7ec4\u5408\u3002Hybrid Parallel\u63d2\u4ef6\u652f\u6301\u7684\u529f\u80fd\u5927\u81f4\u53ef\u4ee5\u88ab\u5206\u4e3a\u4ee5\u4e0b\u56db\u4e2a\u90e8\u5206\uff1a"),(0,l.kt)("ol",null,(0,l.kt)("li",{parentName:"ol"},"Shardformer: Shardformer\u8d1f\u8d23\u5728\u5f20\u91cf\u5e76\u884c\u4ee5\u53ca\u6d41\u6c34\u7ebf\u5e76\u884c\u4e0b\u5207\u5206\u6a21\u578b\u7684\u903b\u8f91\uff0c\u4ee5\u53ca\u524d\u5411/\u540e\u5411\u65b9\u6cd5\u7684\u91cd\u8f7d\uff0c\u8fd9\u4e2a\u63d2\u4ef6\u4e3aShardformer\u529f\u80fd\u63d0\u4f9b\u4e86\u4e00\u4e2a\u7b80\u5355\u6613\u7528\u7684\u63a5\u53e3\u3002\u4e0e\u6b64\u540c\u65f6\uff0cShardformer\u8fd8\u8d1f\u8d23\u5c06\u5305\u62ecfused normalization, flash attention (xformers), JIT\u548c\u5e8f\u5217\u5e76\u884c\u5728\u5185\u7684\u5404\u7c7b\u4f18\u5316\u5de5\u5177\u878d\u5165\u91cd\u8f7d\u540e\u7684\u524d\u5411/\u540e\u5411\u65b9\u6cd5\u3002\u66f4\u591a\u5173\u4e8eShardformer\u7684\u4fe1\u606f\u8bf7\u53c2\u8003 ",(0,l.kt)("a",{parentName:"li",href:"/zh-Hans/docs/features/shardformer"},"Shardformer\u6587\u6863"),"\u3002\u4e0b\u56fe\u5c55\u793a\u4e86Shardformer\u4e0eHybrid Parallel\u63d2\u4ef6\u6240\u652f\u6301\u7684\u529f\u80fd\u3002")),(0,l.kt)("div",{align:"center"},(0,l.kt)("img",{src:"https://raw.githubusercontent.com/hpcaitech/public_assets/main/colossalai/img/shardformer/shardformer_and_hybridparallel.png",width:"500"})),(0,l.kt)("ol",{start:2},(0,l.kt)("li",{parentName:"ol"},(0,l.kt)("p",{parentName:"li"},"\u6df7\u5408\u7cbe\u5ea6\u8bad\u7ec3\uff1a\u63d2\u4ef6\u652f\u6301fp16/bf16\u7684\u6df7\u5408\u7cbe\u5ea6\u8bad\u7ec3\u3002\u66f4\u591a\u5173\u4e8e\u6df7\u5408\u7cbe\u5ea6\u8bad\u7ec3\u7684\u53c2\u6570\u914d\u7f6e\u7684\u8be6\u7ec6\u4fe1\u606f\u8bf7\u53c2\u8003 ",(0,l.kt)("a",{parentName:"p",href:"/zh-Hans/docs/features/mixed_precision_training_with_booster"},"\u6df7\u5408\u7cbe\u5ea6\u8bad\u7ec3\u6587\u6863"),"\u3002")),(0,l.kt)("li",{parentName:"ol"},(0,l.kt)("p",{parentName:"li"},"Torch DDP: \u5f53\u6d41\u6c34\u7ebf\u5e76\u884c\u548cZero\u4e0d\u88ab\u4f7f\u7528\u7684\u65f6\u5019\uff0c\u63d2\u4ef6\u4f1a\u81ea\u52a8\u91c7\u7528Pytorch DDP\u4f5c\u4e3a\u6570\u636e\u5e76\u884c\u7684\u7b56\u7565\u3002\u66f4\u591a\u5173\u4e8eTorch DDP\u7684\u53c2\u6570\u914d\u7f6e\u7684\u8be6\u7ec6\u4fe1\u606f\u8bf7\u53c2\u8003 ",(0,l.kt)("a",{parentName:"p",href:"https://pytorch.org/docs/main/generated/torch.nn.parallel.DistributedDataParallel.html#torch.nn.parallel.DistributedDataParallel"},"Pytorch DDP \u6587\u6863"),"\u3002")),(0,l.kt)("li",{parentName:"ol"},(0,l.kt)("p",{parentName:"li"},"Zero: \u5728\u521d\u59cb\u5316\u63d2\u4ef6\u7684\u65f6\u5019\uff0c\u53ef\u4ee5\u901a\u8fc7\u5c06",(0,l.kt)("inlineCode",{parentName:"p"},"zero_stage"),"\u53c2\u6570\u8bbe\u7f6e\u4e3a1\u62162\u6765\u8ba9\u63d2\u4ef6\u91c7\u7528Zero 1/2\u4f5c\u4e3a\u6570\u636e\u5e76\u884c\u7684\u7b56\u7565\u3002Zero 1\u53ef\u4ee5\u548c\u6d41\u6c34\u7ebf\u5e76\u884c\u7b56\u7565\u540c\u65f6\u4f7f\u7528, \u800cZero 2\u5219\u4e0d\u53ef\u4ee5\u548c\u6d41\u6c34\u7ebf\u5e76\u884c\u7b56\u7565\u540c\u65f6\u4f7f\u7528\u3002\u66f4\u591a\u5173\u4e8eZero\u7684\u53c2\u6570\u914d\u7f6e\u7684\u8be6\u7ec6\u4fe1\u606f\u8bf7\u53c2\u8003 ",(0,l.kt)("a",{parentName:"p",href:"#low-level-zero-%E6%8F%92%E4%BB%B6"},"Low Level Zero \u63d2\u4ef6"),"."))),(0,l.kt)("blockquote",null,(0,l.kt)("p",{parentName:"blockquote"},"\u26a0 \u5728\u4f7f\u7528\u8be5\u63d2\u4ef6\u7684\u65f6\u5019, \u53ea\u6709\u652f\u6301Shardformer\u7684\u90e8\u5206Huggingface transformers\u6a21\u578b\u624d\u80fd\u591f\u4f7f\u7528\u5f20\u91cf\u5e76\u884c\u3001\u6d41\u6c34\u7ebf\u5e76\u884c\u4ee5\u53ca\u4f18\u5316\u5de5\u5177\u3002Llama 1\u3001Llama 2\u3001OPT\u3001Bloom\u3001Bert\u4ee5\u53caGPT2\u7b49\u4e3b\u6d41transformers\u6a21\u578b\u5747\u5df2\u652f\u6301Shardformer\u3002")),(0,l.kt)(i.Cl,{mdxType:"DocStringContainer"},(0,l.kt)("div",null,(0,l.kt)(i.Dx,{type:"class",name:"colossalai.booster.plugin.HybridParallelPlugin",source:"https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/booster/plugin/hybrid_parallel_plugin.py#L863",mdxType:"Title"}),(0,l.kt)(i.Pc,{mdxType:"Signature"},"tp_size: int, pp_size: int, precision: str = 'fp16', zero_stage: int = 0, enable_all_optimization: bool = False, enable_fused_normalization: bool = False, enable_flash_attention: bool = False, enable_jit_fused: bool = False, enable_sequence_parallelism: bool = False, enable_sequence_overlap: bool = False, num_microbatches: typing.Optional[int] = None, microbatch_size: typing.Optional[int] = None, initial_scale: float = 65536, min_scale: float = 1, growth_factor: float = 2, backoff_factor: float = 0.5, growth_interval: int = 1000, hysteresis: int = 2, max_scale: float = 4294967296, max_norm: float = 0, broadcast_buffers: bool = True, ddp_bucket_cap_mb: int = 25, find_unused_parameters: bool = False, check_reduction: bool = False, gradient_as_bucket_view: bool = False, static_graph: bool = False, zero_bucket_size_in_m: int = 12, cpu_offload: bool = False, communication_dtype: typing.Optional[torch.dtype] = None, overlap_communication: bool = True, custom_policy: Policy = None, pp_style: str = '1f1b', num_model_chunks: int = 1"),(0,l.kt)(i.aE,{mdxType:"Parameters"},"- **tp_size** (int) -- The size of tensor parallelism. Tensor parallelism will not be used when tp_size is set to 1.\n- **pp_size** (int) -- The number of pipeline stages in pipeline parallelism. Pipeline parallelism will not be used when pp_size is set to 1.\n- **precision** (str, optional) -- Specifies the precision of parameters during training.\n Auto-mixied precision will be used when this argument is set to 'fp16' or 'bf16', otherwise model is trained with 'fp32'.\n Defaults to 'fp16'.\n- **zero_stage** (int, optional) -- The stage of ZeRO for data parallelism. Can only be choosed from [0, 1, 2].\n When set to 0, ZeRO will not be used. Defaults to 0.\n- **enable_all_optimization** (bool, optional) -- Whether to switch on all the optimizations supported by Shardformer.\n Currently all the optimization methods include fused normalization, flash attention and JIT.\n Defaults to False.\n- **enable_fused_normalization** (bool, optional) -- Whether to switch on fused normalization in Shardformer. Defaults to False.\n- **enable_flash_attention** (bool, optional) -- Whether to switch on flash attention in Shardformer. Defaults to False.\n- **enable_jit_fused** (bool, optional) -- Whether to switch on JIT in Shardformer. Default to False.\n- **enable_sequence_parallelism** (bool) -- Whether to turn on sequence parallelism in Shardformer. Defaults to False.\n- **enable_sequence_overlap** (bool) -- Whether to turn on sequence overlap in Shardformer. Defaults to False.\n- **num_microbatches** (int, optional) -- Number of microbatches when using pipeline parallelism. Defaults to None.\n- **microbatch_size** (int, optional) -- Microbatch size when using pipeline parallelism.\n Either `num_microbatches` or `microbatch_size` should be provided if using pipeline.\n If `num_microbatches` is provided, this will be ignored. Defaults to None.\n- **initial_scale** (float, optional) -- The initial loss scale of AMP. Defaults to 2**16.\n- **min_scale** (float, optional) -- The minimum loss scale of AMP. Defaults to 1.\n- **growth_factor** (float, optional) -- The multiplication factor for increasing loss scale when using AMP. Defaults to 2.\n- **backoff_factor** (float, optional) -- The multiplication factor for decreasing loss scale when using AMP. Defaults to 0.5.\n- **growth_interval** (int, optional) -- The number of steps to increase loss scale when no overflow occurs when using AMP. Defaults to 1000.\n- **hysteresis** (int, optional) -- The number of overflows before decreasing loss scale when using AMP. Defaults to 2.\n- **max_scale** (float, optional) -- The maximum loss scale of AMP. Defaults to 2**32.\n- **max_norm** (float, optional) -- Maximum norm for gradient clipping. Defaults to 0.\n- **broadcast_buffers** (bool, optional) -- Whether to broadcast buffers in the beginning of training when using DDP. Defaults to True.\n- **ddp_bucket_cap_mb** (int, optional) -- The bucket size in MB when using DDP. Defaults to 25.\n- **find_unused_parameters** (bool, optional) -- Whether to find unused parameters when using DDP. Defaults to False.\n- **check_reduction** (bool, optional) -- Whether to check reduction when using DDP. Defaults to False.\n- **gradient_as_bucket_view** (bool, optional) -- Whether to use gradient as bucket view when using DDP. Defaults to False.\n- **static_graph** (bool, optional) -- Whether to use static graph when using DDP. Defaults to False.\n- **zero_bucket_size_in_m** (int, optional) -- Gradient reduce bucket size in million elements when using ZeRO. Defaults to 12.\n- **cpu_offload** (bool, optional) -- Whether to open cpu_offload when using ZeRO. Defaults to False.\n- **communication_dtype** (torch.dtype, optional) -- Communication dtype when using ZeRO. If not specified, the dtype of param will be used. Defaults to None.\n- **overlap_communication** (bool, optional) -- Whether to overlap communication and computation when using ZeRO. Defaults to True.\n- **custom_policy** (Policy, optional) -- Custom policy for Shardformer. Defaults to None.\n- **pp_style** (str, optional) -- The style for pipeline parallelism. Defaults to '1f1b'.\n- **num_model_chunks** (int, optional) -- The number of model chunks for interleaved pipeline parallelism. Defaults to 1.")),(0,l.kt)("div",null,(0,l.kt)(i.iz,{name:"Description",mdxType:"Divider"}),(0,l.kt)("p",null,"Plugin for Hybrid Parallel Training.\nTensor parallel, pipeline parallel and data parallel(DDP/ZeRO) can be picked and combined in this plugin.\nThe size of tp and pp should be passed in by user, then the size of dp is automatically calculated from dp_size = world_size / (tp_size * pp_size)."),(0,l.kt)(i.e_,{code:"```python\nfrom colossalai.booster import Booster\nfrom colossalai.booster.plugin import HybridParallelPlugin\n\nmodel, train_dataset, optimizer, criterion = ...\nplugin = HybridParallelPlugin(tp_size=2, pp_size=2)\n\ntrain_dataloader = plugin.prepare_dataloader(train_dataset, batch_size=8)\nbooster = Booster(plugin=plugin)\nmodel, optimizer, criterion, train_dataloader, _ = booster.boost(model, optimizer, criterion, train_dataloader)\n```",mdxType:"ExampleCode"})),(0,l.kt)(i.Cl,{mdxType:"DocStringContainer"},(0,l.kt)("div",null,(0,l.kt)(i.Dx,{type:"function",name:"prepare_dataloader",source:"https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/booster/plugin/hybrid_parallel_plugin.py#L1188",mdxType:"Title"}),(0,l.kt)(i.Pc,{mdxType:"Signature"},"dataset, batch_size, shuffle = False, seed = 1024, drop_last = False, pin_memory = False, num_workers = 0, **kwargs"),(0,l.kt)(i.aE,{mdxType:"Parameters"},"- **dataset** (*torch.utils.data.Dataset*) -- The dataset to be loaded.\n- **shuffle** (bool, optional) -- Whether to shuffle the dataset. Defaults to False.\n- **seed** (int, optional) -- Random worker seed for sampling, defaults to 1024.\n add_sampler -- Whether to add `DistributedDataParallelSampler` to the dataset. Defaults to True.\n- **drop_last** (bool, optional) -- Set to True to drop the last incomplete batch, if the dataset size\n is not divisible by the batch size. If False and the size of dataset is not divisible by\n the batch size, then the last batch will be smaller, defaults to False.\n- **pin_memory** (bool, optional) -- Whether to pin memory address in CPU memory. Defaults to False.\n- **num_workers** (int, optional) -- Number of worker threads for this dataloader. Defaults to 0.\n- **kwargs** (dict) -- optional parameters for `torch.utils.data.DataLoader`, more details could be found in\n [DataLoader](https://pytorch.org/docs/stable/_modules/torch/utils/data/dataloader.html#DataLoader)."),(0,l.kt)(i.nT,{name:"[`torch.utils.data.DataLoader`]",desc:"A DataLoader used for training or testing.",mdxType:"Returns"})),(0,l.kt)("div",null,(0,l.kt)(i.iz,{name:"Description",mdxType:"Divider"}),(0,l.kt)("p",null,"Prepare a dataloader for distributed training. The dataloader will be wrapped by\n",(0,l.kt)("em",{parentName:"p"},"torch.utils.data.DataLoader")," and ",(0,l.kt)("em",{parentName:"p"},"torch.utils.data.DistributedSampler"),".")))),(0,l.kt)("h3",{id:"torch-ddp-\u63d2\u4ef6"},"Torch DDP \u63d2\u4ef6"),(0,l.kt)("p",null,"\u66f4\u591a\u8be6\u7ec6\u4fe1\u606f\uff0c\u8bf7\u53c2\u9605 ",(0,l.kt)("a",{parentName:"p",href:"https://pytorch.org/docs/main/generated/torch.nn.parallel.DistributedDataParallel.html#torch.nn.parallel.DistributedDataParallel"},"Pytorch \u6587\u6863"),"."),(0,l.kt)(i.Cl,{mdxType:"DocStringContainer"},(0,l.kt)("div",null,(0,l.kt)(i.Dx,{type:"class",name:"colossalai.booster.plugin.TorchDDPPlugin",source:"https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/booster/plugin/torch_ddp_plugin.py#L129",mdxType:"Title"}),(0,l.kt)(i.Pc,{mdxType:"Signature"},"broadcast_buffers: bool = True, bucket_cap_mb: int = 25, find_unused_parameters: bool = False, check_reduction: bool = False, gradient_as_bucket_view: bool = False, static_graph: bool = False"),(0,l.kt)(i.aE,{mdxType:"Parameters"},"- **broadcast_buffers** (bool, optional) -- Whether to broadcast buffers in the beginning of training. Defaults to True.\n- **bucket_cap_mb** (int, optional) -- The bucket size in MB. Defaults to 25.\n- **find_unused_parameters** (bool, optional) -- Whether to find unused parameters. Defaults to False.\n- **check_reduction** (bool, optional) -- Whether to check reduction. Defaults to False.\n- **gradient_as_bucket_view** (bool, optional) -- Whether to use gradient as bucket view. Defaults to False.\n- **static_graph** (bool, optional) -- Whether to use static graph. Defaults to False.")),(0,l.kt)("div",null,(0,l.kt)(i.iz,{name:"Description",mdxType:"Divider"}),(0,l.kt)("p",null,"Plugin for PyTorch DDP."),(0,l.kt)(i.e_,{code:"```python\nfrom colossalai.booster import Booster\nfrom colossalai.booster.plugin import TorchDDPPlugin\n\nmodel, train_dataset, optimizer, criterion = ...\nplugin = TorchDDPPlugin()\n\ntrain_dataloader = plugin.prepare_dataloader(train_dataset, batch_size=8)\nbooster = Booster(plugin=plugin)\nmodel, optimizer, train_dataloader, criterion = booster.boost(model, optimizer, train_dataloader, criterion)\n```",mdxType:"ExampleCode"}))),(0,l.kt)("h3",{id:"torch-fsdp-\u63d2\u4ef6"},"Torch FSDP \u63d2\u4ef6"),(0,l.kt)("blockquote",null,(0,l.kt)("p",{parentName:"blockquote"},"\u26a0 \u5982\u679c torch \u7248\u672c\u4f4e\u4e8e 1.12.0\uff0c\u6b64\u63d2\u4ef6\u5c06\u4e0d\u53ef\u7528\u3002")),(0,l.kt)("blockquote",null,(0,l.kt)("p",{parentName:"blockquote"},"\u26a0 \u8be5\u63d2\u4ef6\u73b0\u5728\u8fd8\u4e0d\u652f\u6301\u4fdd\u5b58/\u52a0\u8f7d\u5206\u7247\u7684\u6a21\u578b checkpoint\u3002")),(0,l.kt)("blockquote",null,(0,l.kt)("p",{parentName:"blockquote"},"\u26a0 \u8be5\u63d2\u4ef6\u73b0\u5728\u8fd8\u4e0d\u652f\u6301\u4f7f\u7528\u4e86multi params group\u7684optimizer\u3002")),(0,l.kt)("p",null,"\u66f4\u591a\u8be6\u7ec6\u4fe1\u606f\uff0c\u8bf7\u53c2\u9605 ",(0,l.kt)("a",{parentName:"p",href:"https://pytorch.org/docs/main/fsdp.html"},"Pytorch \u6587\u6863"),"."),(0,l.kt)(i.Cl,{mdxType:"DocStringContainer"},(0,l.kt)("div",null,(0,l.kt)(i.Dx,{type:"class",name:"colossalai.booster.plugin.TorchFSDPPlugin",source:"https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/booster/plugin/torch_fsdp_plugin.py#L142",mdxType:"Title"}),(0,l.kt)(i.Pc,{mdxType:"Signature"},"process_group: typing.Optional[torch.distributed.distributed_c10d.ProcessGroup] = None, sharding_strategy: typing.Optional[torch.distributed.fsdp.api.ShardingStrategy] = None, cpu_offload: typing.Optional[torch.distributed.fsdp.api.CPUOffload] = None, auto_wrap_policy: typing.Optional[typing.Callable] = None, backward_prefetch: typing.Optional[torch.distributed.fsdp.api.BackwardPrefetch] = None, mixed_precision: typing.Optional[torch.distributed.fsdp.api.MixedPrecision] = None, ignored_modules: typing.Optional[typing.Iterable[torch.nn.modules.module.Module]] = None, param_init_fn: typing.Optional[typing.Callable[[torch.nn.modules.module.Module]], NoneType] = None, sync_module_states: bool = False"),(0,l.kt)(i.aE,{mdxType:"Parameters"},"- **See** https --//pytorch.org/docs/stable/fsdp.html for details.")),(0,l.kt)("div",null,(0,l.kt)(i.iz,{name:"Description",mdxType:"Divider"}),(0,l.kt)("p",null,"Plugin for PyTorch FSDP."),(0,l.kt)(i.e_,{code:"```python\nfrom colossalai.booster import Booster\nfrom colossalai.booster.plugin import TorchFSDPPlugin\n\nmodel, train_dataset, optimizer, criterion = ...\nplugin = TorchFSDPPlugin()\n\ntrain_dataloader = plugin.prepare_train_dataloader(train_dataset, batch_size=8)\nbooster = Booster(plugin=plugin)\nmodel, optimizer, train_dataloader, criterion = booster.boost(model, optimizer, train_dataloader, criterion)\n```",mdxType:"ExampleCode"}))))}m.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunkdemo=self.webpackChunkdemo||[]).push([[9314],{6999:(e,t,a)=>{a.d(t,{Cl:()=>n,Dx:()=>c,Pc:()=>i,aE:()=>s,e_:()=>d,iz:()=>r,nT:()=>p});var o=a(7294),l=a(398);a(814);function n(e){return o.createElement("div",{className:"docstring-container"},e.children)}function i(e){return o.createElement("div",{className:"signature"},"(",e.children,")")}function r(e){return o.createElement("div",{class:"divider"},o.createElement("span",{class:"divider-text"},e.name))}function s(e){return o.createElement("div",null,o.createElement(r,{name:"Parameters"}),o.createElement(l.D,null,e.children))}function p(e){return o.createElement("div",null,o.createElement(r,{name:"Returns"}),o.createElement(l.D,null,`${e.name}: ${e.desc}`))}function c(e){return o.createElement("div",{className:"title-container"},o.createElement("div",{className:"title-module"},o.createElement("h5",null,e.type),"\xa0 ",o.createElement("h3",null,e.name)),o.createElement("div",{className:"title-source"},"<",o.createElement("a",{href:e.source,className:"title-source"},"source"),">"))}function d(e){return o.createElement("div",null,o.createElement(r,{name:"Example"}),o.createElement(l.D,null,e.code))}},189:(e,t,a)=>{a.r(t),a.d(t,{assets:()=>p,contentTitle:()=>r,default:()=>m,frontMatter:()=>i,metadata:()=>s,toc:()=>c});var o=a(7462),l=(a(7294),a(3905)),n=a(6999);const i={},r="Booster \u63d2\u4ef6",s={unversionedId:"basics/booster_plugins",id:"basics/booster_plugins",title:"Booster \u63d2\u4ef6",description:"\u4f5c\u8005: Hongxin Liu, Baizhou Zhang, Pengtai Xu",source:"@site/i18n/zh-Hans/docusaurus-plugin-content-docs/current/basics/booster_plugins.md",sourceDirName:"basics",slug:"/basics/booster_plugins",permalink:"/zh-Hans/docs/basics/booster_plugins",draft:!1,editUrl:"https://github.com/facebook/docusaurus/tree/main/packages/create-docusaurus/templates/shared/docs/basics/booster_plugins.md",tags:[],version:"current",frontMatter:{},sidebar:"tutorialSidebar",previous:{title:"Booster API",permalink:"/zh-Hans/docs/basics/booster_api"},next:{title:"Booster Checkpoint",permalink:"/zh-Hans/docs/basics/booster_checkpoint"}},p={},c=[{value:"\u5f15\u8a00",id:"\u5f15\u8a00",level:2},{value:"\u63d2\u4ef6\u9009\u62e9",id:"\u63d2\u4ef6\u9009\u62e9",level:2},{value:"\u63d2\u4ef6",id:"\u63d2\u4ef6",level:2},{value:"Low Level Zero \u63d2\u4ef6",id:"low-level-zero-\u63d2\u4ef6",level:3},{value:"Gemini \u63d2\u4ef6",id:"gemini-\u63d2\u4ef6",level:3},{value:"Hybrid Parallel \u63d2\u4ef6",id:"hybrid-parallel-\u63d2\u4ef6",level:3},{value:"Torch DDP \u63d2\u4ef6",id:"torch-ddp-\u63d2\u4ef6",level:3},{value:"Torch FSDP \u63d2\u4ef6",id:"torch-fsdp-\u63d2\u4ef6",level:3}],d={toc:c},u="wrapper";function m(e){let{components:t,...a}=e;return(0,l.kt)(u,(0,o.Z)({},d,a,{components:t,mdxType:"MDXLayout"}),(0,l.kt)("h1",{id:"booster-\u63d2\u4ef6"},"Booster \u63d2\u4ef6"),(0,l.kt)("p",null,"\u4f5c\u8005: ",(0,l.kt)("a",{parentName:"p",href:"https://github.com/ver217"},"Hongxin Liu"),", ",(0,l.kt)("a",{parentName:"p",href:"https://github.com/Fridge003"},"Baizhou Zhang"),", ",(0,l.kt)("a",{parentName:"p",href:"https://github.com/ppt0011"},"Pengtai Xu")),(0,l.kt)("p",null,(0,l.kt)("strong",{parentName:"p"},"\u524d\u7f6e\u6559\u7a0b:")),(0,l.kt)("ul",null,(0,l.kt)("li",{parentName:"ul"},(0,l.kt)("a",{parentName:"li",href:"/zh-Hans/docs/basics/booster_api"},"Booster API"))),(0,l.kt)("h2",{id:"\u5f15\u8a00"},"\u5f15\u8a00"),(0,l.kt)("p",null,"\u6b63\u5982 ",(0,l.kt)("a",{parentName:"p",href:"/zh-Hans/docs/basics/booster_api"},"Booster API")," \u4e2d\u63d0\u5230\u7684\uff0c\u6211\u4eec\u53ef\u4ee5\u4f7f\u7528 booster \u63d2\u4ef6\u6765\u81ea\u5b9a\u4e49\u5e76\u884c\u8bad\u7ec3\u3002\u5728\u672c\u6559\u7a0b\u4e2d\uff0c\u6211\u4eec\u5c06\u4ecb\u7ecd\u5982\u4f55\u4f7f\u7528 booster \u63d2\u4ef6\u3002"),(0,l.kt)("p",null,"\u6211\u4eec\u73b0\u5728\u63d0\u4f9b\u4ee5\u4e0b\u63d2\u4ef6:"),(0,l.kt)("ul",null,(0,l.kt)("li",{parentName:"ul"},(0,l.kt)("a",{parentName:"li",href:"#torch-ddp-%E6%8F%92%E4%BB%B6"},"Torch DDP \u63d2\u4ef6"),": \u5b83\u5305\u88c5\u4e86 ",(0,l.kt)("inlineCode",{parentName:"li"},"torch.nn.parallel.DistributedDataParallel")," \u5e76\u4e14\u53ef\u7528\u4e8e\u4f7f\u7528\u6570\u636e\u5e76\u884c\u8bad\u7ec3\u6a21\u578b\u3002"),(0,l.kt)("li",{parentName:"ul"},(0,l.kt)("a",{parentName:"li",href:"#torch-fsdp-%E6%8F%92%E4%BB%B6"},"Torch FSDP \u63d2\u4ef6"),": \u5b83\u5305\u88c5\u4e86 ",(0,l.kt)("inlineCode",{parentName:"li"},"torch.distributed.fsdp.FullyShardedDataParallel")," \u5e76\u4e14\u53ef\u7528\u4e8e\u4f7f\u7528 Zero-dp \u8bad\u7ec3\u6a21\u578b\u3002"),(0,l.kt)("li",{parentName:"ul"},(0,l.kt)("a",{parentName:"li",href:"#low-level-zero-%E6%8F%92%E4%BB%B6"},"Low Level Zero \u63d2\u4ef6"),": \u5b83\u5305\u88c5\u4e86 ",(0,l.kt)("inlineCode",{parentName:"li"},"colossalai.zero.low_level.LowLevelZeroOptimizer"),"\uff0c\u53ef\u7528\u4e8e\u4f7f\u7528 Zero-dp \u8bad\u7ec3\u6a21\u578b\u3002\u5b83\u4ec5\u652f\u6301 Zero \u9636\u6bb51\u548c\u9636\u6bb52\u3002"),(0,l.kt)("li",{parentName:"ul"},(0,l.kt)("a",{parentName:"li",href:"#gemini-%E6%8F%92%E4%BB%B6"},"Gemini \u63d2\u4ef6"),": \u5b83\u5305\u88c5\u4e86 ",(0,l.kt)("a",{parentName:"li",href:"/zh-Hans/docs/features/zero_with_chunk"},"Gemini"),"\uff0cGemini \u5b9e\u73b0\u4e86\u57fa\u4e8eChunk\u5185\u5b58\u7ba1\u7406\u548c\u5f02\u6784\u5185\u5b58\u7ba1\u7406\u7684 Zero-3\u3002"),(0,l.kt)("li",{parentName:"ul"},(0,l.kt)("a",{parentName:"li",href:"#hybrid-parallel-%E6%8F%92%E4%BB%B6"},"Hybrid Parallel \u63d2\u4ef6"),": \u5b83\u4e3aShardformer\uff0c\u6d41\u6c34\u7ebf\u7ba1\u7406\u5668\uff0c\u6df7\u5408\u7cbe\u5ea6\u8fd0\u7b97\uff0cTorchDDP\u4ee5\u53caZero-1/Zero-2\u529f\u80fd\u63d0\u4f9b\u4e86\u4e00\u4e2a\u7edf\u4e00\u4e14\u7b80\u6d01\u7684\u63a5\u53e3\u3002\u4f7f\u7528\u8be5\u63d2\u4ef6\u53ef\u4ee5\u7b80\u5355\u9ad8\u6548\u5730\u5b9e\u73b0transformer\u6a21\u578b\u5728\u5f20\u91cf\u5e76\u884c\uff0c\u6d41\u6c34\u7ebf\u5e76\u884c\u4ee5\u53ca\u6570\u636e\u5e76\u884c\uff08DDP, Zero\uff09\u95f4\u4efb\u610f\u7ec4\u5408\u5e76\u884c\u8bad\u7ec3\u7b56\u7565\uff0c\u540c\u65f6\u652f\u6301\u591a\u79cd\u8bad\u7ec3\u901f\u5ea6\u548c\u5185\u5b58\u7684\u4f18\u5316\u5de5\u5177\u3002\u6709\u5173\u8fd9\u4e9b\u8bad\u7ec3\u7b56\u7565\u548c\u4f18\u5316\u5de5\u5177\u7684\u5177\u4f53\u4fe1\u606f\u5c06\u5728\u4e0b\u4e00\u7ae0\u4e2d\u9610\u8ff0\u3002")),(0,l.kt)("p",null,"\u66f4\u591a\u63d2\u4ef6\u5373\u5c06\u63a8\u51fa\u3002"),(0,l.kt)("h2",{id:"\u63d2\u4ef6\u9009\u62e9"},"\u63d2\u4ef6\u9009\u62e9"),(0,l.kt)("ul",null,(0,l.kt)("li",{parentName:"ul"},(0,l.kt)("a",{parentName:"li",href:"#torch-ddp-%E6%8F%92%E4%BB%B6"},"Torch DDP \u63d2\u4ef6"),": \u9002\u7528\u4e8e\u53c2\u6570\u5c11\u4e8e 20 \u4ebf\u7684\u6a21\u578b\uff08\u4f8b\u5982 Bert-3m\u3001GPT2-1.5b\uff09\u3002"),(0,l.kt)("li",{parentName:"ul"},(0,l.kt)("a",{parentName:"li",href:"#torch-fsdp-%E6%8F%92%E4%BB%B6"},"Torch FSDP \u63d2\u4ef6")," / ",(0,l.kt)("a",{parentName:"li",href:"#low-level-zero-%E6%8F%92%E4%BB%B6"},"Low Level Zero \u63d2\u4ef6"),": \u9002\u7528\u4e8e\u53c2\u6570\u5c11\u4e8e 100 \u4ebf\u7684\u6a21\u578b\uff08\u4f8b\u5982 GPTJ-6b\u3001MegatronLM-8b\uff09\u3002"),(0,l.kt)("li",{parentName:"ul"},(0,l.kt)("a",{parentName:"li",href:"#gemini-%E6%8F%92%E4%BB%B6"},"Gemini \u63d2\u4ef6"),": \u9002\u5408\u53c2\u6570\u8d85\u8fc7 100 \u4ebf\u7684\u6a21\u578b\uff08\u4f8b\u5982 TuringNLG-17b\uff09\uff0c\u4e14",(0,l.kt)("strong",{parentName:"li"},"\u8de8\u8282\u70b9\u5e26\u5bbd\u9ad8\u3001\u4e2d\u5c0f\u89c4\u6a21\u96c6\u7fa4\uff08\u5343\u5361\u4ee5\u4e0b\uff09"),"\u7684\u573a\u666f\uff08\u4f8b\u5982 Llama2-70b\uff09\u3002"),(0,l.kt)("li",{parentName:"ul"},(0,l.kt)("a",{parentName:"li",href:"#hybrid-parallel-%E6%8F%92%E4%BB%B6"},"Hybrid Parallel \u63d2\u4ef6"),": \u9002\u5408\u53c2\u6570\u8d85\u8fc7 600 \u4ebf\u7684\u6a21\u578b\u3001\u8d85\u957f\u5e8f\u5217\u3001\u8d85\u5927\u8bcd\u8868\u7b49\u7279\u6b8a\u6a21\u578b\uff0c\u4e14",(0,l.kt)("strong",{parentName:"li"},"\u8de8\u8282\u70b9\u5e26\u5bbd\u4f4e\u3001\u5927\u89c4\u6a21\u96c6\u7fa4\uff08\u5343\u5361\u4ee5\u4e0a\uff09"),"\u7684\u573a\u666f\uff08\u4f8b\u5982 GPT3-175b\u3001Bloom-176b\uff09\u3002")),(0,l.kt)("h2",{id:"\u63d2\u4ef6"},"\u63d2\u4ef6"),(0,l.kt)("h3",{id:"low-level-zero-\u63d2\u4ef6"},"Low Level Zero \u63d2\u4ef6"),(0,l.kt)("p",null,"\u8be5\u63d2\u4ef6\u5b9e\u73b0\u4e86 Zero-1 \u548c Zero-2\uff08\u4f7f\u7528/\u4e0d\u4f7f\u7528 CPU \u5378\u8f7d\uff09\uff0c\u4f7f\u7528",(0,l.kt)("inlineCode",{parentName:"p"},"reduce"),"\u548c",(0,l.kt)("inlineCode",{parentName:"p"},"gather"),"\u6765\u540c\u6b65\u68af\u5ea6\u548c\u6743\u91cd\u3002"),(0,l.kt)("p",null,"Zero-1 \u53ef\u4ee5\u770b\u4f5c\u662f Torch DDP \u66f4\u597d\u7684\u66ff\u4ee3\u54c1\uff0c\u5185\u5b58\u6548\u7387\u66f4\u9ad8\uff0c\u901f\u5ea6\u66f4\u5feb\u3002\u5b83\u53ef\u4ee5\u5f88\u5bb9\u6613\u5730\u7528\u4e8e\u6df7\u5408\u5e76\u884c\u3002"),(0,l.kt)("p",null,"Zero-2 \u4e0d\u652f\u6301\u5c40\u90e8\u68af\u5ea6\u7d2f\u79ef\u3002\u5982\u679c\u60a8\u575a\u6301\u4f7f\u7528\uff0c\u867d\u7136\u53ef\u4ee5\u79ef\u7d2f\u68af\u5ea6\uff0c\u4f46\u4e0d\u80fd\u964d\u4f4e\u901a\u4fe1\u6210\u672c\u3002\u4e5f\u5c31\u662f\u8bf4\uff0c\u540c\u65f6\u4f7f\u7528\u6d41\u6c34\u7ebf\u5e76\u884c\u548c Zero-2 \u5e76\u4e0d\u662f\u4e00\u4e2a\u597d\u4e3b\u610f\u3002"),(0,l.kt)(n.Cl,{mdxType:"DocStringContainer"},(0,l.kt)("div",null,(0,l.kt)(n.Dx,{type:"class",name:"colossalai.booster.plugin.LowLevelZeroPlugin",source:"https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/booster/plugin/low_level_zero_plugin.py#L213",mdxType:"Title"}),(0,l.kt)(n.Pc,{mdxType:"Signature"},"stage: int = 1, precision: str = 'fp16', initial_scale: float = 4294967296, min_scale: float = 1, growth_factor: float = 2, backoff_factor: float = 0.5, growth_interval: int = 1000, hysteresis: int = 2, max_scale: float = 4294967296, max_norm: float = 0.0, norm_type: float = 2.0, reduce_bucket_size_in_m: int = 12, communication_dtype: typing.Optional[torch.dtype] = None, overlap_communication: bool = True, cpu_offload: bool = False, master_weights: bool = True, verbose: bool = False"),(0,l.kt)(n.aE,{mdxType:"Parameters"},"- **stage** (int, optional) -- ZeRO stage. Defaults to 1.\n- **precision** (str, optional) -- precision. Support 'fp16', 'bf16' and 'fp32'. Defaults to 'fp16'.\n- **initial_scale** (float, optional) -- Initial scale used by DynamicGradScaler. Defaults to 2**32.\n- **min_scale** (float, optional) -- Min scale used by DynamicGradScaler. Defaults to 1.\n- **growth_factor** (float, optional) -- growth_factor used by DynamicGradScaler. Defaults to 2.\n- **backoff_factor** (float, optional) -- backoff_factor used by DynamicGradScaler. Defaults to 0.5.\n- **growth_interval** (float, optional) -- growth_interval used by DynamicGradScaler. Defaults to 1000.\n- **hysteresis** (float, optional) -- hysteresis used by DynamicGradScaler. Defaults to 2.\n- **max_scale** (int, optional) -- max_scale used by DynamicGradScaler. Defaults to 2**32.\n- **max_norm** (float, optional) -- max_norm used for `clip_grad_norm`. You should notice that you shall not do\n clip_grad_norm by yourself when using ZeRO DDP. The ZeRO optimizer will take care of clip_grad_norm.\n- **norm_type** (float, optional) -- norm_type used for `clip_grad_norm`.\n- **reduce_bucket_size_in_m** (int, optional) -- grad reduce bucket size in M. Defaults to 12.\n- **communication_dtype** (torch.dtype, optional) -- communication dtype. If not specified, the dtype of param will be used. Defaults to None.\n- **overlap_communication** (bool, optional) -- whether to overlap communication and computation. Defaults to True.\n- **cpu_offload** (bool, optional) -- whether to offload grad, master weight and optimizer state to cpu. Defaults to False.\n- **verbose** (bool, optional) -- verbose mode. Debug info including grad overflow will be printed. Defaults to False.")),(0,l.kt)("div",null,(0,l.kt)(n.iz,{name:"Description",mdxType:"Divider"}),(0,l.kt)("p",null,"Plugin for low level zero."),(0,l.kt)(n.e_,{code:"```python\nfrom colossalai.booster import Booster\nfrom colossalai.booster.plugin import LowLevelZeroPlugin\n\nmodel, train_dataset, optimizer, criterion = ...\nplugin = LowLevelZeroPlugin()\n\ntrain_dataloader = plugin.prepare_dataloader(train_dataset, batch_size=8)\nbooster = Booster(plugin=plugin)\nmodel, optimizer, train_dataloader, criterion = booster.boost(model, optimizer, train_dataloader, criterion)\n```",mdxType:"ExampleCode"}))),(0,l.kt)("p",null,"\u6211\u4eec\u5df2\u7ecf\u6d4b\u8bd5\u4e86\u4e00\u4e9b\u4e3b\u6d41\u6a21\u578b\u7684\u517c\u5bb9\u6027\uff0c\u53ef\u80fd\u4e0d\u652f\u6301\u4ee5\u4e0b\u6a21\u578b\uff1a"),(0,l.kt)("ul",null,(0,l.kt)("li",{parentName:"ul"},(0,l.kt)("inlineCode",{parentName:"li"},"timm.models.convit_base")),(0,l.kt)("li",{parentName:"ul"},"dlrm and deepfm models in ",(0,l.kt)("inlineCode",{parentName:"li"},"torchrec"))),(0,l.kt)("p",null,"\u517c\u5bb9\u6027\u95ee\u9898\u5c06\u5728\u672a\u6765\u4fee\u590d\u3002"),(0,l.kt)("h3",{id:"gemini-\u63d2\u4ef6"},"Gemini \u63d2\u4ef6"),(0,l.kt)("p",null,"\u8fd9\u4e2a\u63d2\u4ef6\u5b9e\u73b0\u4e86\u57fa\u4e8eChunk\u5185\u5b58\u7ba1\u7406\u548c\u5f02\u6784\u5185\u5b58\u7ba1\u7406\u7684 Zero-3\u3002\u5b83\u53ef\u4ee5\u8bad\u7ec3\u5927\u578b\u6a21\u578b\u800c\u4e0d\u4f1a\u635f\u5931\u592a\u591a\u901f\u5ea6\u3002\u5b83\u4e5f\u4e0d\u652f\u6301\u5c40\u90e8\u68af\u5ea6\u7d2f\u79ef\u3002\u66f4\u591a\u8be6\u7ec6\u4fe1\u606f\uff0c\u8bf7\u53c2\u9605 ",(0,l.kt)("a",{parentName:"p",href:"/zh-Hans/docs/features/zero_with_chunk"},"Gemini \u6587\u6863"),"."),(0,l.kt)(n.Cl,{mdxType:"DocStringContainer"},(0,l.kt)("div",null,(0,l.kt)(n.Dx,{type:"class",name:"colossalai.booster.plugin.GeminiPlugin",source:"https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/booster/plugin/gemini_plugin.py#L255",mdxType:"Title"}),(0,l.kt)(n.Pc,{mdxType:"Signature"},"chunk_config_dict: typing.Optional[dict] = None, chunk_init_device: typing.Optional[torch.device] = None, placement_policy: str = 'static', enable_gradient_accumulation: bool = False, shard_param_frac: float = 1.0, offload_optim_frac: float = 0.0, offload_param_frac: float = 0.0, warmup_non_model_data_ratio: float = 0.8, steady_cuda_cap_ratio: float = 0.9, precision: str = 'fp16', master_weights: bool = True, pin_memory: bool = False, force_outputs_fp32: bool = False, strict_ddp_mode: bool = False, search_range_m: int = 32, hidden_dim: typing.Optional[int] = None, min_chunk_size_m: float = 32, memstats: typing.Optional[colossalai.zero.gemini.memory_tracer.memory_stats.MemStats] = None, gpu_margin_mem_ratio: float = 0.0, initial_scale: float = 65536, min_scale: float = 1, growth_factor: float = 2, backoff_factor: float = 0.5, growth_interval: int = 1000, hysteresis: int = 2, max_scale: float = 4294967296, max_norm: float = 0.0, norm_type: float = 2.0, tp_size: int = 1, extra_dp_size: int = 1, enable_all_optimization: bool = False, enable_fused_normalization: bool = False, enable_flash_attention: bool = False, enable_sequence_parallelism: bool = False, enable_jit_fused: bool = False, enable_sequence_overlap: bool = False, verbose: bool = False"),(0,l.kt)(n.aE,{mdxType:"Parameters"},'- **chunk_config_dict** (dict, optional) -- chunk configuration dictionary.\n- **chunk_init_device** (torch.device, optional) -- device to initialize the chunk.\n- **placement_policy** (str, optional) -- "static" and "auto". Defaults to "static".\n- **enable_gradient_accumulation** (bool, optional) -- Whether to enable gradient accumulation. When set to True, gradient will be stored after doing backward pass. Defaults to False.\n- **shard_param_frac** (float, optional) -- fraction of parameters to be sharded. Only for "static" placement.\n If `shard_param_frac` is 1.0, it\'s equal to zero-3. If `shard_param_frac` is 0.0, it\'s equal to zero-2. Defaults to 1.0.\n- **offload_optim_frac** (float, optional) -- fraction of optimizer states to be offloaded. Only for "static" placement.\n If `shard_param_frac` is 1.0 and `offload_optim_frac` is 0.0, it\'s equal to old "cuda" placement. Defaults to 0.0.\n- **offload_param_frac** (float, optional) -- fraction of parameters to be offloaded. Only for "static" placement.\n For efficiency, this argument is useful only when `shard_param_frac` is 1.0 and `offload_optim_frac` is 1.0.\n If `shard_param_frac` is 1.0, `offload_optim_frac` is 1.0 and `offload_param_frac` is 1.0, it\'s equal to old "cpu" placement.\n When using static placement, we recommend users to tune `shard_param_frac` first and then `offload_optim_frac`.\n Defaults to 0.0.\n- **warmup_non_model_data_ratio** (float, optional) -- ratio of expected non-model data memory during warmup. Only for "auto" placement. Defaults to 0.8.\n- **steady_cuda_cap_ratio** (float, optional) -- ratio of allowed cuda capacity for model data during steady state. Only for "auto" placement. Defaults to 0.9.\n- **precision** (str, optional) -- precision. Support \'fp16\' and \'bf16\'. Defaults to \'fp16\'.\n- **master_weights** (bool, optional) -- Whether to keep fp32 master parameter weights in optimizer. Defaults to True.\n- **pin_memory** (bool, optional) -- use pin memory on CPU. Defaults to False.\n- **force_outputs_fp32** (bool, optional) -- force outputs are fp32. Defaults to False.\n- **strict_ddp_mode** (bool, optional) -- use strict ddp mode (only use dp without other parallelism). Defaults to False.\n- **search_range_m** (int, optional) -- chunk size searching range divided by 2^20. Defaults to 32.\n- **hidden_dim** (int, optional) -- the hidden dimension of DNN.\n Users can provide this argument to speed up searching.\n If users do not know this argument before training, it is ok. We will use a default value 1024.\n- **min_chunk_size_m** (float, optional) -- the minimum chunk size divided by 2^20.\n If the aggregate size of parameters is still smaller than the minimum chunk size,\n all parameters will be compacted into one small chunk.\n- **memstats** (MemStats, optional) the memory statistics collector by a runtime memory tracer. --\n- **gpu_margin_mem_ratio** (float, optional) -- The ratio of GPU remaining memory (after the first forward-backward)\n which will be used when using hybrid CPU optimizer.\n This argument is meaningless when `placement_policy` of `GeminiManager` is not "auto".\n Defaults to 0.0.\n- **initial_scale** (float, optional) -- Initial scale used by DynamicGradScaler. Defaults to 2**16.\n- **min_scale** (float, optional) -- Min scale used by DynamicGradScaler. Defaults to 1.\n- **growth_factor** (float, optional) -- growth_factor used by DynamicGradScaler. Defaults to 2.\n- **backoff_factor** (float, optional) -- backoff_factor used by DynamicGradScaler. Defaults to 0.5.\n- **growth_interval** (float, optional) -- growth_interval used by DynamicGradScaler. Defaults to 1000.\n- **hysteresis** (float, optional) -- hysteresis used by DynamicGradScaler. Defaults to 2.\n- **max_scale** (int, optional) -- max_scale used by DynamicGradScaler. Defaults to 2**32.\n- **max_norm** (float, optional) -- max_norm used for `clip_grad_norm`. You should notice that you shall not do\n clip_grad_norm by yourself when using ZeRO DDP. The ZeRO optimizer will take care of clip_grad_norm.\n- **norm_type** (float, optional) -- norm_type used for `clip_grad_norm`.\n- **tp_size** (int, optional) -- If \'tp_size\' is set to be greater than 1, it means using tensor parallelism strategy, which is implemented in Shardformer, \'tp_size\' determines the size of the tensor parallel process group. Default to 1.\n- **extra_dp_size** (int, optional) -- If \'extra_dp_size\' is set to be greater than 1, it means creating another group to run with a ddp-like strategy. Default to 1.\n- **enable_all_optimization** (bool, optional) -- Whether to switch on all the optimizations supported by Shardformer.\n Currently all the optimization methods include fused normalization, flash attention and JIT.\n Defaults to False.\n- **enable_fused_normalization** (bool, optional) -- Whether to switch on fused normalization in Shardformer. Defaults to False.\n- **enable_flash_attention** (bool, optional) -- Whether to switch on flash attention in Shardformer. Defaults to False.\n- **enable_jit_fused** (bool, optional) -- Whether to switch on JIT in Shardformer. Default to False.\n- **enable_sequence_parallelism** (bool) -- Whether to turn on sequence parallelism in Shardformer. Defaults to False.\n- **enable_sequence_overlap** (bool) -- Whether to turn on sequence overlap in Shardformer. Defaults to False.\n- **verbose** (bool, optional) -- verbose mode. Debug info including chunk search result will be printed. Defaults to False.')),(0,l.kt)("div",null,(0,l.kt)(n.iz,{name:"Description",mdxType:"Divider"}),(0,l.kt)("p",null,"Plugin for Gemini."),(0,l.kt)(n.e_,{code:"```python\nfrom colossalai.booster import Booster\nfrom colossalai.booster.plugin import GeminiPlugin\n\nmodel, train_dataset, optimizer, criterion = ...\nplugin = GeminiPlugin()\n\ntrain_dataloader = plugin.prepare_dataloader(train_dataset, batch_size=8)\nbooster = Booster(plugin=plugin)\nmodel, optimizer, train_dataloader, criterion = booster.boost(model, optimizer, train_dataloader, criterion)\n```",mdxType:"ExampleCode"}))),(0,l.kt)("h3",{id:"hybrid-parallel-\u63d2\u4ef6"},"Hybrid Parallel \u63d2\u4ef6"),(0,l.kt)("p",null,"\u8fd9\u4e2a\u63d2\u4ef6\u5b9e\u73b0\u4e86\u591a\u79cd\u5e76\u884c\u8bad\u7ec3\u7b56\u7565\u548c\u4f18\u5316\u5de5\u5177\u7684\u7ec4\u5408\u3002Hybrid Parallel\u63d2\u4ef6\u652f\u6301\u7684\u529f\u80fd\u5927\u81f4\u53ef\u4ee5\u88ab\u5206\u4e3a\u4ee5\u4e0b\u56db\u4e2a\u90e8\u5206\uff1a"),(0,l.kt)("ol",null,(0,l.kt)("li",{parentName:"ol"},"Shardformer: Shardformer\u8d1f\u8d23\u5728\u5f20\u91cf\u5e76\u884c\u4ee5\u53ca\u6d41\u6c34\u7ebf\u5e76\u884c\u4e0b\u5207\u5206\u6a21\u578b\u7684\u903b\u8f91\uff0c\u4ee5\u53ca\u524d\u5411/\u540e\u5411\u65b9\u6cd5\u7684\u91cd\u8f7d\uff0c\u8fd9\u4e2a\u63d2\u4ef6\u4e3aShardformer\u529f\u80fd\u63d0\u4f9b\u4e86\u4e00\u4e2a\u7b80\u5355\u6613\u7528\u7684\u63a5\u53e3\u3002\u4e0e\u6b64\u540c\u65f6\uff0cShardformer\u8fd8\u8d1f\u8d23\u5c06\u5305\u62ecfused normalization, flash attention (xformers), JIT\u548c\u5e8f\u5217\u5e76\u884c\u5728\u5185\u7684\u5404\u7c7b\u4f18\u5316\u5de5\u5177\u878d\u5165\u91cd\u8f7d\u540e\u7684\u524d\u5411/\u540e\u5411\u65b9\u6cd5\u3002\u66f4\u591a\u5173\u4e8eShardformer\u7684\u4fe1\u606f\u8bf7\u53c2\u8003 ",(0,l.kt)("a",{parentName:"li",href:"/zh-Hans/docs/features/shardformer"},"Shardformer\u6587\u6863"),"\u3002\u4e0b\u56fe\u5c55\u793a\u4e86Shardformer\u4e0eHybrid Parallel\u63d2\u4ef6\u6240\u652f\u6301\u7684\u529f\u80fd\u3002")),(0,l.kt)("div",{align:"center"},(0,l.kt)("img",{src:"https://raw.githubusercontent.com/hpcaitech/public_assets/main/colossalai/img/shardformer/shardformer_and_hybridparallel.png",width:"500"})),(0,l.kt)("ol",{start:2},(0,l.kt)("li",{parentName:"ol"},(0,l.kt)("p",{parentName:"li"},"\u6df7\u5408\u7cbe\u5ea6\u8bad\u7ec3\uff1a\u63d2\u4ef6\u652f\u6301fp16/bf16\u7684\u6df7\u5408\u7cbe\u5ea6\u8bad\u7ec3\u3002\u66f4\u591a\u5173\u4e8e\u6df7\u5408\u7cbe\u5ea6\u8bad\u7ec3\u7684\u53c2\u6570\u914d\u7f6e\u7684\u8be6\u7ec6\u4fe1\u606f\u8bf7\u53c2\u8003 ",(0,l.kt)("a",{parentName:"p",href:"/zh-Hans/docs/features/mixed_precision_training_with_booster"},"\u6df7\u5408\u7cbe\u5ea6\u8bad\u7ec3\u6587\u6863"),"\u3002")),(0,l.kt)("li",{parentName:"ol"},(0,l.kt)("p",{parentName:"li"},"Torch DDP: \u5f53\u6d41\u6c34\u7ebf\u5e76\u884c\u548cZero\u4e0d\u88ab\u4f7f\u7528\u7684\u65f6\u5019\uff0c\u63d2\u4ef6\u4f1a\u81ea\u52a8\u91c7\u7528Pytorch DDP\u4f5c\u4e3a\u6570\u636e\u5e76\u884c\u7684\u7b56\u7565\u3002\u66f4\u591a\u5173\u4e8eTorch DDP\u7684\u53c2\u6570\u914d\u7f6e\u7684\u8be6\u7ec6\u4fe1\u606f\u8bf7\u53c2\u8003 ",(0,l.kt)("a",{parentName:"p",href:"https://pytorch.org/docs/main/generated/torch.nn.parallel.DistributedDataParallel.html#torch.nn.parallel.DistributedDataParallel"},"Pytorch DDP \u6587\u6863"),"\u3002")),(0,l.kt)("li",{parentName:"ol"},(0,l.kt)("p",{parentName:"li"},"Zero: \u5728\u521d\u59cb\u5316\u63d2\u4ef6\u7684\u65f6\u5019\uff0c\u53ef\u4ee5\u901a\u8fc7\u5c06",(0,l.kt)("inlineCode",{parentName:"p"},"zero_stage"),"\u53c2\u6570\u8bbe\u7f6e\u4e3a1\u62162\u6765\u8ba9\u63d2\u4ef6\u91c7\u7528Zero 1/2\u4f5c\u4e3a\u6570\u636e\u5e76\u884c\u7684\u7b56\u7565\u3002Zero 1\u53ef\u4ee5\u548c\u6d41\u6c34\u7ebf\u5e76\u884c\u7b56\u7565\u540c\u65f6\u4f7f\u7528, \u800cZero 2\u5219\u4e0d\u53ef\u4ee5\u548c\u6d41\u6c34\u7ebf\u5e76\u884c\u7b56\u7565\u540c\u65f6\u4f7f\u7528\u3002\u66f4\u591a\u5173\u4e8eZero\u7684\u53c2\u6570\u914d\u7f6e\u7684\u8be6\u7ec6\u4fe1\u606f\u8bf7\u53c2\u8003 ",(0,l.kt)("a",{parentName:"p",href:"#low-level-zero-%E6%8F%92%E4%BB%B6"},"Low Level Zero \u63d2\u4ef6"),"."))),(0,l.kt)("blockquote",null,(0,l.kt)("p",{parentName:"blockquote"},"\u26a0 \u5728\u4f7f\u7528\u8be5\u63d2\u4ef6\u7684\u65f6\u5019, \u53ea\u6709\u652f\u6301Shardformer\u7684\u90e8\u5206Huggingface transformers\u6a21\u578b\u624d\u80fd\u591f\u4f7f\u7528\u5f20\u91cf\u5e76\u884c\u3001\u6d41\u6c34\u7ebf\u5e76\u884c\u4ee5\u53ca\u4f18\u5316\u5de5\u5177\u3002Llama 1\u3001Llama 2\u3001OPT\u3001Bloom\u3001Bert\u4ee5\u53caGPT2\u7b49\u4e3b\u6d41transformers\u6a21\u578b\u5747\u5df2\u652f\u6301Shardformer\u3002")),(0,l.kt)(n.Cl,{mdxType:"DocStringContainer"},(0,l.kt)("div",null,(0,l.kt)(n.Dx,{type:"class",name:"colossalai.booster.plugin.HybridParallelPlugin",source:"https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/booster/plugin/hybrid_parallel_plugin.py#L863",mdxType:"Title"}),(0,l.kt)(n.Pc,{mdxType:"Signature"},"tp_size: int, pp_size: int, precision: str = 'fp16', zero_stage: int = 0, enable_all_optimization: bool = False, enable_fused_normalization: bool = False, enable_flash_attention: bool = False, enable_jit_fused: bool = False, enable_sequence_parallelism: bool = False, enable_sequence_overlap: bool = False, num_microbatches: typing.Optional[int] = None, microbatch_size: typing.Optional[int] = None, initial_scale: float = 65536, min_scale: float = 1, growth_factor: float = 2, backoff_factor: float = 0.5, growth_interval: int = 1000, hysteresis: int = 2, max_scale: float = 4294967296, max_norm: float = 0, broadcast_buffers: bool = True, ddp_bucket_cap_mb: int = 25, find_unused_parameters: bool = False, check_reduction: bool = False, gradient_as_bucket_view: bool = False, static_graph: bool = False, zero_bucket_size_in_m: int = 12, cpu_offload: bool = False, communication_dtype: typing.Optional[torch.dtype] = None, overlap_communication: bool = True, custom_policy: Policy = None, pp_style: str = '1f1b', num_model_chunks: int = 1, enable_metadata_cache: bool = True"),(0,l.kt)(n.aE,{mdxType:"Parameters"},"- **tp_size** (int) -- The size of tensor parallelism. Tensor parallelism will not be used when tp_size is set to 1.\n- **pp_size** (int) -- The number of pipeline stages in pipeline parallelism. Pipeline parallelism will not be used when pp_size is set to 1.\n- **precision** (str, optional) -- Specifies the precision of parameters during training.\n Auto-mixied precision will be used when this argument is set to 'fp16' or 'bf16', otherwise model is trained with 'fp32'.\n Defaults to 'fp16'.\n- **zero_stage** (int, optional) -- The stage of ZeRO for data parallelism. Can only be choosed from [0, 1, 2].\n When set to 0, ZeRO will not be used. Defaults to 0.\n- **enable_all_optimization** (bool, optional) -- Whether to switch on all the optimizations supported by Shardformer.\n Currently all the optimization methods include fused normalization, flash attention and JIT.\n Defaults to False.\n- **enable_fused_normalization** (bool, optional) -- Whether to switch on fused normalization in Shardformer. Defaults to False.\n- **enable_flash_attention** (bool, optional) -- Whether to switch on flash attention in Shardformer. Defaults to False.\n- **enable_jit_fused** (bool, optional) -- Whether to switch on JIT in Shardformer. Default to False.\n- **enable_sequence_parallelism** (bool) -- Whether to turn on sequence parallelism in Shardformer. Defaults to False.\n- **enable_sequence_overlap** (bool) -- Whether to turn on sequence overlap in Shardformer. Defaults to False.\n- **num_microbatches** (int, optional) -- Number of microbatches when using pipeline parallelism. Defaults to None.\n- **microbatch_size** (int, optional) -- Microbatch size when using pipeline parallelism.\n Either `num_microbatches` or `microbatch_size` should be provided if using pipeline.\n If `num_microbatches` is provided, this will be ignored. Defaults to None.\n- **initial_scale** (float, optional) -- The initial loss scale of AMP. Defaults to 2**16.\n- **min_scale** (float, optional) -- The minimum loss scale of AMP. Defaults to 1.\n- **growth_factor** (float, optional) -- The multiplication factor for increasing loss scale when using AMP. Defaults to 2.\n- **backoff_factor** (float, optional) -- The multiplication factor for decreasing loss scale when using AMP. Defaults to 0.5.\n- **growth_interval** (int, optional) -- The number of steps to increase loss scale when no overflow occurs when using AMP. Defaults to 1000.\n- **hysteresis** (int, optional) -- The number of overflows before decreasing loss scale when using AMP. Defaults to 2.\n- **max_scale** (float, optional) -- The maximum loss scale of AMP. Defaults to 2**32.\n- **max_norm** (float, optional) -- Maximum norm for gradient clipping. Defaults to 0.\n- **broadcast_buffers** (bool, optional) -- Whether to broadcast buffers in the beginning of training when using DDP. Defaults to True.\n- **ddp_bucket_cap_mb** (int, optional) -- The bucket size in MB when using DDP. Defaults to 25.\n- **find_unused_parameters** (bool, optional) -- Whether to find unused parameters when using DDP. Defaults to False.\n- **check_reduction** (bool, optional) -- Whether to check reduction when using DDP. Defaults to False.\n- **gradient_as_bucket_view** (bool, optional) -- Whether to use gradient as bucket view when using DDP. Defaults to False.\n- **static_graph** (bool, optional) -- Whether to use static graph when using DDP. Defaults to False.\n- **zero_bucket_size_in_m** (int, optional) -- Gradient reduce bucket size in million elements when using ZeRO. Defaults to 12.\n- **cpu_offload** (bool, optional) -- Whether to open cpu_offload when using ZeRO. Defaults to False.\n- **communication_dtype** (torch.dtype, optional) -- Communication dtype when using ZeRO. If not specified, the dtype of param will be used. Defaults to None.\n- **overlap_communication** (bool, optional) -- Whether to overlap communication and computation when using ZeRO. Defaults to True.\n- **custom_policy** (Policy, optional) -- Custom policy for Shardformer. Defaults to None.\n- **pp_style** (str, optional) -- The style for pipeline parallelism. Defaults to '1f1b'.\n- **num_model_chunks** (int, optional) -- The number of model chunks for interleaved pipeline parallelism. Defaults to 1.\n- **enable_metadata_cache** (bool, optional) -- Whether to enable metadata cache for pipeline parallelism. Defaults to True.")),(0,l.kt)("div",null,(0,l.kt)(n.iz,{name:"Description",mdxType:"Divider"}),(0,l.kt)("p",null,"Plugin for Hybrid Parallel Training.\nTensor parallel, pipeline parallel and data parallel(DDP/ZeRO) can be picked and combined in this plugin.\nThe size of tp and pp should be passed in by user, then the size of dp is automatically calculated from dp_size = world_size / (tp_size * pp_size)."),(0,l.kt)(n.e_,{code:"```python\nfrom colossalai.booster import Booster\nfrom colossalai.booster.plugin import HybridParallelPlugin\n\nmodel, train_dataset, optimizer, criterion = ...\nplugin = HybridParallelPlugin(tp_size=2, pp_size=2)\n\ntrain_dataloader = plugin.prepare_dataloader(train_dataset, batch_size=8)\nbooster = Booster(plugin=plugin)\nmodel, optimizer, criterion, train_dataloader, _ = booster.boost(model, optimizer, criterion, train_dataloader)\n```",mdxType:"ExampleCode"})),(0,l.kt)(n.Cl,{mdxType:"DocStringContainer"},(0,l.kt)("div",null,(0,l.kt)(n.Dx,{type:"function",name:"prepare_dataloader",source:"https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/booster/plugin/hybrid_parallel_plugin.py#L1194",mdxType:"Title"}),(0,l.kt)(n.Pc,{mdxType:"Signature"},"dataset, batch_size, shuffle = False, seed = 1024, drop_last = False, pin_memory = False, num_workers = 0, **kwargs"),(0,l.kt)(n.aE,{mdxType:"Parameters"},"- **dataset** (*torch.utils.data.Dataset*) -- The dataset to be loaded.\n- **shuffle** (bool, optional) -- Whether to shuffle the dataset. Defaults to False.\n- **seed** (int, optional) -- Random worker seed for sampling, defaults to 1024.\n add_sampler -- Whether to add `DistributedDataParallelSampler` to the dataset. Defaults to True.\n- **drop_last** (bool, optional) -- Set to True to drop the last incomplete batch, if the dataset size\n is not divisible by the batch size. If False and the size of dataset is not divisible by\n the batch size, then the last batch will be smaller, defaults to False.\n- **pin_memory** (bool, optional) -- Whether to pin memory address in CPU memory. Defaults to False.\n- **num_workers** (int, optional) -- Number of worker threads for this dataloader. Defaults to 0.\n- **kwargs** (dict) -- optional parameters for `torch.utils.data.DataLoader`, more details could be found in\n [DataLoader](https://pytorch.org/docs/stable/_modules/torch/utils/data/dataloader.html#DataLoader)."),(0,l.kt)(n.nT,{name:"[`torch.utils.data.DataLoader`]",desc:"A DataLoader used for training or testing.",mdxType:"Returns"})),(0,l.kt)("div",null,(0,l.kt)(n.iz,{name:"Description",mdxType:"Divider"}),(0,l.kt)("p",null,"Prepare a dataloader for distributed training. The dataloader will be wrapped by\n",(0,l.kt)("em",{parentName:"p"},"torch.utils.data.DataLoader")," and ",(0,l.kt)("em",{parentName:"p"},"torch.utils.data.DistributedSampler"),".")))),(0,l.kt)("h3",{id:"torch-ddp-\u63d2\u4ef6"},"Torch DDP \u63d2\u4ef6"),(0,l.kt)("p",null,"\u66f4\u591a\u8be6\u7ec6\u4fe1\u606f\uff0c\u8bf7\u53c2\u9605 ",(0,l.kt)("a",{parentName:"p",href:"https://pytorch.org/docs/main/generated/torch.nn.parallel.DistributedDataParallel.html#torch.nn.parallel.DistributedDataParallel"},"Pytorch \u6587\u6863"),"."),(0,l.kt)(n.Cl,{mdxType:"DocStringContainer"},(0,l.kt)("div",null,(0,l.kt)(n.Dx,{type:"class",name:"colossalai.booster.plugin.TorchDDPPlugin",source:"https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/booster/plugin/torch_ddp_plugin.py#L129",mdxType:"Title"}),(0,l.kt)(n.Pc,{mdxType:"Signature"},"broadcast_buffers: bool = True, bucket_cap_mb: int = 25, find_unused_parameters: bool = False, check_reduction: bool = False, gradient_as_bucket_view: bool = False, static_graph: bool = False"),(0,l.kt)(n.aE,{mdxType:"Parameters"},"- **broadcast_buffers** (bool, optional) -- Whether to broadcast buffers in the beginning of training. Defaults to True.\n- **bucket_cap_mb** (int, optional) -- The bucket size in MB. Defaults to 25.\n- **find_unused_parameters** (bool, optional) -- Whether to find unused parameters. Defaults to False.\n- **check_reduction** (bool, optional) -- Whether to check reduction. Defaults to False.\n- **gradient_as_bucket_view** (bool, optional) -- Whether to use gradient as bucket view. Defaults to False.\n- **static_graph** (bool, optional) -- Whether to use static graph. Defaults to False.")),(0,l.kt)("div",null,(0,l.kt)(n.iz,{name:"Description",mdxType:"Divider"}),(0,l.kt)("p",null,"Plugin for PyTorch DDP."),(0,l.kt)(n.e_,{code:"```python\nfrom colossalai.booster import Booster\nfrom colossalai.booster.plugin import TorchDDPPlugin\n\nmodel, train_dataset, optimizer, criterion = ...\nplugin = TorchDDPPlugin()\n\ntrain_dataloader = plugin.prepare_dataloader(train_dataset, batch_size=8)\nbooster = Booster(plugin=plugin)\nmodel, optimizer, train_dataloader, criterion = booster.boost(model, optimizer, train_dataloader, criterion)\n```",mdxType:"ExampleCode"}))),(0,l.kt)("h3",{id:"torch-fsdp-\u63d2\u4ef6"},"Torch FSDP \u63d2\u4ef6"),(0,l.kt)("blockquote",null,(0,l.kt)("p",{parentName:"blockquote"},"\u26a0 \u5982\u679c torch \u7248\u672c\u4f4e\u4e8e 1.12.0\uff0c\u6b64\u63d2\u4ef6\u5c06\u4e0d\u53ef\u7528\u3002")),(0,l.kt)("blockquote",null,(0,l.kt)("p",{parentName:"blockquote"},"\u26a0 \u8be5\u63d2\u4ef6\u73b0\u5728\u8fd8\u4e0d\u652f\u6301\u4fdd\u5b58/\u52a0\u8f7d\u5206\u7247\u7684\u6a21\u578b checkpoint\u3002")),(0,l.kt)("blockquote",null,(0,l.kt)("p",{parentName:"blockquote"},"\u26a0 \u8be5\u63d2\u4ef6\u73b0\u5728\u8fd8\u4e0d\u652f\u6301\u4f7f\u7528\u4e86multi params group\u7684optimizer\u3002")),(0,l.kt)("p",null,"\u66f4\u591a\u8be6\u7ec6\u4fe1\u606f\uff0c\u8bf7\u53c2\u9605 ",(0,l.kt)("a",{parentName:"p",href:"https://pytorch.org/docs/main/fsdp.html"},"Pytorch \u6587\u6863"),"."),(0,l.kt)(n.Cl,{mdxType:"DocStringContainer"},(0,l.kt)("div",null,(0,l.kt)(n.Dx,{type:"class",name:"colossalai.booster.plugin.TorchFSDPPlugin",source:"https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/booster/plugin/torch_fsdp_plugin.py#L142",mdxType:"Title"}),(0,l.kt)(n.Pc,{mdxType:"Signature"},"process_group: typing.Optional[torch.distributed.distributed_c10d.ProcessGroup] = None, sharding_strategy: typing.Optional[torch.distributed.fsdp.api.ShardingStrategy] = None, cpu_offload: typing.Optional[torch.distributed.fsdp.api.CPUOffload] = None, auto_wrap_policy: typing.Optional[typing.Callable] = None, backward_prefetch: typing.Optional[torch.distributed.fsdp.api.BackwardPrefetch] = None, mixed_precision: typing.Optional[torch.distributed.fsdp.api.MixedPrecision] = None, ignored_modules: typing.Optional[typing.Iterable[torch.nn.modules.module.Module]] = None, param_init_fn: typing.Optional[typing.Callable[[torch.nn.modules.module.Module]], NoneType] = None, sync_module_states: bool = False"),(0,l.kt)(n.aE,{mdxType:"Parameters"},"- **See** https --//pytorch.org/docs/stable/fsdp.html for details.")),(0,l.kt)("div",null,(0,l.kt)(n.iz,{name:"Description",mdxType:"Divider"}),(0,l.kt)("p",null,"Plugin for PyTorch FSDP."),(0,l.kt)(n.e_,{code:"```python\nfrom colossalai.booster import Booster\nfrom colossalai.booster.plugin import TorchFSDPPlugin\n\nmodel, train_dataset, optimizer, criterion = ...\nplugin = TorchFSDPPlugin()\n\ntrain_dataloader = plugin.prepare_train_dataloader(train_dataset, batch_size=8)\nbooster = Booster(plugin=plugin)\nmodel, optimizer, train_dataloader, criterion = booster.boost(model, optimizer, train_dataloader, criterion)\n```",mdxType:"ExampleCode"}))))}m.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/zh-Hans/assets/js/runtime~main.58d27fdd.js b/zh-Hans/assets/js/runtime~main.9a4f2bfa.js similarity index 99% rename from zh-Hans/assets/js/runtime~main.58d27fdd.js rename to zh-Hans/assets/js/runtime~main.9a4f2bfa.js index b65074da..e516fe28 100644 --- a/zh-Hans/assets/js/runtime~main.58d27fdd.js +++ b/zh-Hans/assets/js/runtime~main.9a4f2bfa.js @@ -1 +1 @@ -(()=>{"use strict";var e,a,f,d,c,t={},r={};function b(e){var a=r[e];if(void 0!==a)return a.exports;var f=r[e]={id:e,loaded:!1,exports:{}};return t[e].call(f.exports,f,f.exports,b),f.loaded=!0,f.exports}b.m=t,b.c=r,e=[],b.O=(a,f,d,c)=>{if(!f){var t=1/0;for(i=0;i=c)&&Object.keys(b.O).every((e=>b.O[e](f[o])))?f.splice(o--,1):(r=!1,c0&&e[i-1][2]>c;i--)e[i]=e[i-1];e[i]=[f,d,c]},b.n=e=>{var a=e&&e.__esModule?()=>e.default:()=>e;return b.d(a,{a:a}),a},f=Object.getPrototypeOf?e=>Object.getPrototypeOf(e):e=>e.__proto__,b.t=function(e,d){if(1&d&&(e=this(e)),8&d)return e;if("object"==typeof e&&e){if(4&d&&e.__esModule)return e;if(16&d&&"function"==typeof e.then)return e}var c=Object.create(null);b.r(c);var t={};a=a||[null,f({}),f([]),f(f)];for(var r=2&d&&e;"object"==typeof r&&!~a.indexOf(r);r=f(r))Object.getOwnPropertyNames(r).forEach((a=>t[a]=()=>e[a]));return t.default=()=>e,b.d(c,t),c},b.d=(e,a)=>{for(var f in a)b.o(a,f)&&!b.o(e,f)&&Object.defineProperty(e,f,{enumerable:!0,get:a[f]})},b.f={},b.e=e=>Promise.all(Object.keys(b.f).reduce(((a,f)=>(b.f[f](e,a),a)),[])),b.u=e=>"assets/js/"+({53:"935f2afb",257:"3c4a8a83",1131:"55566578",1692:"a1b70643",1776:"24fa5c90",1867:"472f7e38",1913:"7c3c73c3",1914:"d9f32620",2109:"68403855",2315:"7321f337",2362:"e273c56f",2535:"814f3328",3085:"1f391b9e",3089:"a6aa9e1f",3362:"6e94bde7",3380:"f5998a11",3404:"4069fdb2",3608:"9e4087bc",3906:"3e45fbbc",4013:"01a85c17",4195:"c4f5d8e4",4410:"774d8ffb",4439:"b8d15835",4569:"39478845",4617:"032fc316",4699:"2d2fd80b",4768:"60446705",4841:"10ac9080",5032:"99fb5e39",5119:"d18a71e0",5140:"b5fea3fe",5151:"48c7a9d6",5345:"7284ea81",5496:"e1be4094",5537:"85145bcd",5573:"a54e725d",5804:"f2542c28",6019:"a6d057f0",6103:"ccc49370",6197:"3941bf17",6271:"0f71a4c4",6457:"52d0f5d7",6649:"6731b6dc",6724:"36d7a7fb",6869:"8872db8b",7067:"1cebf892",7414:"393be207",7564:"2bf541da",7605:"53de9a8e",7842:"a254dda1",7918:"17896441",7920:"1a4e3797",8145:"a4a9e0d9",8388:"918dc965",8430:"ed503baa",8480:"d5af1612",8511:"67d0af1c",8576:"ce999370",8610:"6875c492",9003:"925b3f96",9314:"d742ffe2",9423:"769a252b",9514:"1be78505",9532:"46d4bf1e",9551:"f1de92c9",9642:"7661071f",9812:"63ba90e1",9823:"d2f38757"}[e]||e)+"."+{53:"7776f813",257:"0256933d",398:"586825c7",1131:"7e2bd7bd",1692:"00864984",1776:"3ad3b4ba",1867:"209f9b6e",1913:"97c9f574",1914:"e20e0cdf",2109:"2c0fd18d",2315:"fb68d2aa",2362:"c703ec62",2403:"1d371fac",2535:"a268073f",3085:"e29bf671",3089:"a491e875",3362:"7eb71f1e",3380:"9a3d86f9",3404:"519be2ca",3608:"c41bc3c4",3906:"6587d4de",4013:"ab557569",4195:"b35af3ae",4410:"0c936919",4439:"4040ba02",4569:"108f4a68",4617:"33bd756d",4699:"ea7c7096",4768:"c906966a",4841:"f1c85158",4972:"cb4f21fb",4989:"f2816fc4",5032:"0a48a182",5119:"e96eed3e",5140:"191f1164",5151:"e7d6437e",5345:"b3efcbd9",5496:"8c950d4d",5537:"600186fc",5573:"404ce031",5804:"148b15a8",6019:"ecf0d4bf",6048:"acc25360",6103:"e7ee1570",6197:"30e391db",6271:"f661fcae",6457:"a523dbe7",6649:"16fff71d",6724:"d9f858b3",6780:"19ab39fb",6869:"aae580d4",6945:"166dadd9",7067:"5f4e6541",7414:"89d32549",7564:"e6949578",7605:"a16e915c",7842:"de4be984",7918:"8d8b61ca",7920:"11339e64",8145:"2f563003",8388:"e3ac1518",8430:"f3b241c4",8480:"591df0ba",8511:"ee53c869",8576:"ebf5ce04",8610:"ce8f6fa8",8894:"74389eef",9003:"c4ca9980",9056:"40c86f0d",9314:"3a4495dd",9423:"55ab967e",9514:"5c12b6b2",9532:"8d4a6594",9551:"565ac184",9642:"b38fbac0",9812:"d5e47827",9823:"8661eed1"}[e]+".js",b.miniCssF=e=>{},b.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),b.o=(e,a)=>Object.prototype.hasOwnProperty.call(e,a),d={},c="demo:",b.l=(e,a,f,t)=>{if(d[e])d[e].push(a);else{var r,o;if(void 0!==f)for(var n=document.getElementsByTagName("script"),i=0;i{r.onerror=r.onload=null,clearTimeout(s);var c=d[e];if(delete d[e],r.parentNode&&r.parentNode.removeChild(r),c&&c.forEach((e=>e(f))),a)return a(f)},s=setTimeout(l.bind(null,void 0,{type:"timeout",target:r}),12e4);r.onerror=l.bind(null,r.onerror),r.onload=l.bind(null,r.onload),o&&document.head.appendChild(r)}},b.r=e=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},b.p="/zh-Hans/",b.gca=function(e){return e={17896441:"7918",39478845:"4569",55566578:"1131",60446705:"4768",68403855:"2109","935f2afb":"53","3c4a8a83":"257",a1b70643:"1692","24fa5c90":"1776","472f7e38":"1867","7c3c73c3":"1913",d9f32620:"1914","7321f337":"2315",e273c56f:"2362","814f3328":"2535","1f391b9e":"3085",a6aa9e1f:"3089","6e94bde7":"3362",f5998a11:"3380","4069fdb2":"3404","9e4087bc":"3608","3e45fbbc":"3906","01a85c17":"4013",c4f5d8e4:"4195","774d8ffb":"4410",b8d15835:"4439","032fc316":"4617","2d2fd80b":"4699","10ac9080":"4841","99fb5e39":"5032",d18a71e0:"5119",b5fea3fe:"5140","48c7a9d6":"5151","7284ea81":"5345",e1be4094:"5496","85145bcd":"5537",a54e725d:"5573",f2542c28:"5804",a6d057f0:"6019",ccc49370:"6103","3941bf17":"6197","0f71a4c4":"6271","52d0f5d7":"6457","6731b6dc":"6649","36d7a7fb":"6724","8872db8b":"6869","1cebf892":"7067","393be207":"7414","2bf541da":"7564","53de9a8e":"7605",a254dda1:"7842","1a4e3797":"7920",a4a9e0d9:"8145","918dc965":"8388",ed503baa:"8430",d5af1612:"8480","67d0af1c":"8511",ce999370:"8576","6875c492":"8610","925b3f96":"9003",d742ffe2:"9314","769a252b":"9423","1be78505":"9514","46d4bf1e":"9532",f1de92c9:"9551","7661071f":"9642","63ba90e1":"9812",d2f38757:"9823"}[e]||e,b.p+b.u(e)},(()=>{var e={1303:0,532:0};b.f.j=(a,f)=>{var d=b.o(e,a)?e[a]:void 0;if(0!==d)if(d)f.push(d[2]);else if(/^(1303|532)$/.test(a))e[a]=0;else{var c=new Promise(((f,c)=>d=e[a]=[f,c]));f.push(d[2]=c);var t=b.p+b.u(a),r=new Error;b.l(t,(f=>{if(b.o(e,a)&&(0!==(d=e[a])&&(e[a]=void 0),d)){var c=f&&("load"===f.type?"missing":f.type),t=f&&f.target&&f.target.src;r.message="Loading chunk "+a+" failed.\n("+c+": "+t+")",r.name="ChunkLoadError",r.type=c,r.request=t,d[1](r)}}),"chunk-"+a,a)}},b.O.j=a=>0===e[a];var a=(a,f)=>{var d,c,t=f[0],r=f[1],o=f[2],n=0;if(t.some((a=>0!==e[a]))){for(d in r)b.o(r,d)&&(b.m[d]=r[d]);if(o)var i=o(b)}for(a&&a(f);n{"use strict";var e,a,f,d,c,t={},r={};function b(e){var a=r[e];if(void 0!==a)return a.exports;var f=r[e]={id:e,loaded:!1,exports:{}};return t[e].call(f.exports,f,f.exports,b),f.loaded=!0,f.exports}b.m=t,b.c=r,e=[],b.O=(a,f,d,c)=>{if(!f){var t=1/0;for(i=0;i=c)&&Object.keys(b.O).every((e=>b.O[e](f[o])))?f.splice(o--,1):(r=!1,c0&&e[i-1][2]>c;i--)e[i]=e[i-1];e[i]=[f,d,c]},b.n=e=>{var a=e&&e.__esModule?()=>e.default:()=>e;return b.d(a,{a:a}),a},f=Object.getPrototypeOf?e=>Object.getPrototypeOf(e):e=>e.__proto__,b.t=function(e,d){if(1&d&&(e=this(e)),8&d)return e;if("object"==typeof e&&e){if(4&d&&e.__esModule)return e;if(16&d&&"function"==typeof e.then)return e}var c=Object.create(null);b.r(c);var t={};a=a||[null,f({}),f([]),f(f)];for(var r=2&d&&e;"object"==typeof r&&!~a.indexOf(r);r=f(r))Object.getOwnPropertyNames(r).forEach((a=>t[a]=()=>e[a]));return t.default=()=>e,b.d(c,t),c},b.d=(e,a)=>{for(var f in a)b.o(a,f)&&!b.o(e,f)&&Object.defineProperty(e,f,{enumerable:!0,get:a[f]})},b.f={},b.e=e=>Promise.all(Object.keys(b.f).reduce(((a,f)=>(b.f[f](e,a),a)),[])),b.u=e=>"assets/js/"+({53:"935f2afb",257:"3c4a8a83",1131:"55566578",1692:"a1b70643",1776:"24fa5c90",1867:"472f7e38",1913:"7c3c73c3",1914:"d9f32620",2109:"68403855",2315:"7321f337",2362:"e273c56f",2535:"814f3328",3085:"1f391b9e",3089:"a6aa9e1f",3362:"6e94bde7",3380:"f5998a11",3404:"4069fdb2",3608:"9e4087bc",3906:"3e45fbbc",4013:"01a85c17",4195:"c4f5d8e4",4410:"774d8ffb",4439:"b8d15835",4569:"39478845",4617:"032fc316",4699:"2d2fd80b",4768:"60446705",4841:"10ac9080",5032:"99fb5e39",5119:"d18a71e0",5140:"b5fea3fe",5151:"48c7a9d6",5345:"7284ea81",5496:"e1be4094",5537:"85145bcd",5573:"a54e725d",5804:"f2542c28",6019:"a6d057f0",6103:"ccc49370",6197:"3941bf17",6271:"0f71a4c4",6457:"52d0f5d7",6649:"6731b6dc",6724:"36d7a7fb",6869:"8872db8b",7067:"1cebf892",7414:"393be207",7564:"2bf541da",7605:"53de9a8e",7842:"a254dda1",7918:"17896441",7920:"1a4e3797",8145:"a4a9e0d9",8388:"918dc965",8430:"ed503baa",8480:"d5af1612",8511:"67d0af1c",8576:"ce999370",8610:"6875c492",9003:"925b3f96",9314:"d742ffe2",9423:"769a252b",9514:"1be78505",9532:"46d4bf1e",9551:"f1de92c9",9642:"7661071f",9812:"63ba90e1",9823:"d2f38757"}[e]||e)+"."+{53:"7776f813",257:"0256933d",398:"586825c7",1131:"7e2bd7bd",1692:"00864984",1776:"3ad3b4ba",1867:"209f9b6e",1913:"97c9f574",1914:"e20e0cdf",2109:"2c0fd18d",2315:"fb68d2aa",2362:"c703ec62",2403:"1d371fac",2535:"a268073f",3085:"e29bf671",3089:"a491e875",3362:"7eb71f1e",3380:"9a3d86f9",3404:"519be2ca",3608:"c41bc3c4",3906:"6587d4de",4013:"ab557569",4195:"b35af3ae",4410:"0c936919",4439:"4040ba02",4569:"108f4a68",4617:"33bd756d",4699:"ea7c7096",4768:"c906966a",4841:"f1c85158",4972:"cb4f21fb",4989:"f2816fc4",5032:"0a48a182",5119:"e96eed3e",5140:"191f1164",5151:"e7d6437e",5345:"b3efcbd9",5496:"8c950d4d",5537:"600186fc",5573:"404ce031",5804:"148b15a8",6019:"ecf0d4bf",6048:"acc25360",6103:"e7ee1570",6197:"30e391db",6271:"f661fcae",6457:"a523dbe7",6649:"16fff71d",6724:"d9f858b3",6780:"19ab39fb",6869:"aae580d4",6945:"166dadd9",7067:"5f4e6541",7414:"89d32549",7564:"e6949578",7605:"a16e915c",7842:"de4be984",7918:"8d8b61ca",7920:"11339e64",8145:"2f563003",8388:"e3ac1518",8430:"f3b241c4",8480:"591df0ba",8511:"ee53c869",8576:"ebf5ce04",8610:"ce8f6fa8",8894:"74389eef",9003:"c4ca9980",9056:"40c86f0d",9314:"bfcf124a",9423:"55ab967e",9514:"5c12b6b2",9532:"8d4a6594",9551:"565ac184",9642:"b38fbac0",9812:"d5e47827",9823:"8661eed1"}[e]+".js",b.miniCssF=e=>{},b.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),b.o=(e,a)=>Object.prototype.hasOwnProperty.call(e,a),d={},c="demo:",b.l=(e,a,f,t)=>{if(d[e])d[e].push(a);else{var r,o;if(void 0!==f)for(var n=document.getElementsByTagName("script"),i=0;i{r.onerror=r.onload=null,clearTimeout(s);var c=d[e];if(delete d[e],r.parentNode&&r.parentNode.removeChild(r),c&&c.forEach((e=>e(f))),a)return a(f)},s=setTimeout(l.bind(null,void 0,{type:"timeout",target:r}),12e4);r.onerror=l.bind(null,r.onerror),r.onload=l.bind(null,r.onload),o&&document.head.appendChild(r)}},b.r=e=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},b.p="/zh-Hans/",b.gca=function(e){return e={17896441:"7918",39478845:"4569",55566578:"1131",60446705:"4768",68403855:"2109","935f2afb":"53","3c4a8a83":"257",a1b70643:"1692","24fa5c90":"1776","472f7e38":"1867","7c3c73c3":"1913",d9f32620:"1914","7321f337":"2315",e273c56f:"2362","814f3328":"2535","1f391b9e":"3085",a6aa9e1f:"3089","6e94bde7":"3362",f5998a11:"3380","4069fdb2":"3404","9e4087bc":"3608","3e45fbbc":"3906","01a85c17":"4013",c4f5d8e4:"4195","774d8ffb":"4410",b8d15835:"4439","032fc316":"4617","2d2fd80b":"4699","10ac9080":"4841","99fb5e39":"5032",d18a71e0:"5119",b5fea3fe:"5140","48c7a9d6":"5151","7284ea81":"5345",e1be4094:"5496","85145bcd":"5537",a54e725d:"5573",f2542c28:"5804",a6d057f0:"6019",ccc49370:"6103","3941bf17":"6197","0f71a4c4":"6271","52d0f5d7":"6457","6731b6dc":"6649","36d7a7fb":"6724","8872db8b":"6869","1cebf892":"7067","393be207":"7414","2bf541da":"7564","53de9a8e":"7605",a254dda1:"7842","1a4e3797":"7920",a4a9e0d9:"8145","918dc965":"8388",ed503baa:"8430",d5af1612:"8480","67d0af1c":"8511",ce999370:"8576","6875c492":"8610","925b3f96":"9003",d742ffe2:"9314","769a252b":"9423","1be78505":"9514","46d4bf1e":"9532",f1de92c9:"9551","7661071f":"9642","63ba90e1":"9812",d2f38757:"9823"}[e]||e,b.p+b.u(e)},(()=>{var e={1303:0,532:0};b.f.j=(a,f)=>{var d=b.o(e,a)?e[a]:void 0;if(0!==d)if(d)f.push(d[2]);else if(/^(1303|532)$/.test(a))e[a]=0;else{var c=new Promise(((f,c)=>d=e[a]=[f,c]));f.push(d[2]=c);var t=b.p+b.u(a),r=new Error;b.l(t,(f=>{if(b.o(e,a)&&(0!==(d=e[a])&&(e[a]=void 0),d)){var c=f&&("load"===f.type?"missing":f.type),t=f&&f.target&&f.target.src;r.message="Loading chunk "+a+" failed.\n("+c+": "+t+")",r.name="ChunkLoadError",r.type=c,r.request=t,d[1](r)}}),"chunk-"+a,a)}},b.O.j=a=>0===e[a];var a=(a,f)=>{var d,c,t=f[0],r=f[1],o=f[2],n=0;if(t.some((a=>0!==e[a]))){for(d in r)b.o(r,d)&&(b.m[d]=r[d]);if(o)var i=o(b)}for(a&&a(f);n - + - + \ No newline at end of file diff --git a/zh-Hans/blog/first-blog-post/index.html b/zh-Hans/blog/first-blog-post/index.html index 64ea4d8a..8d4b1889 100644 --- a/zh-Hans/blog/first-blog-post/index.html +++ b/zh-Hans/blog/first-blog-post/index.html @@ -16,13 +16,13 @@ - +

    First Blog Post

    · 1 分钟阅读
    Gao Wei

    Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

    - + \ No newline at end of file diff --git a/zh-Hans/blog/index.html b/zh-Hans/blog/index.html index b6b8fd67..20383f20 100644 --- a/zh-Hans/blog/index.html +++ b/zh-Hans/blog/index.html @@ -16,13 +16,13 @@ - +

    · 1 分钟阅读
    Sébastien Lorber
    Yangshun Tay

    Docusaurus blogging features are powered by the blog plugin.

    Simply add Markdown files (or folders) to the blog directory.

    Regular blog authors can be added to authors.yml.

    The blog post date can be extracted from filenames, such as:

    • 2019-05-30-welcome.md
    • 2019-05-30-welcome/index.md

    A blog post folder can be convenient to co-locate blog post images:

    Docusaurus Plushie

    The blog supports tags as well!

    And if you don't want a blog: just delete this directory, and use blog: false in your Docusaurus config.

    · 1 分钟阅读
    Gao Wei

    Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

    - + \ No newline at end of file diff --git a/zh-Hans/blog/tags/docusaurus/index.html b/zh-Hans/blog/tags/docusaurus/index.html index c4eb628e..00da26ea 100644 --- a/zh-Hans/blog/tags/docusaurus/index.html +++ b/zh-Hans/blog/tags/docusaurus/index.html @@ -16,13 +16,13 @@ - +

    2 篇博文 含有标签「docusaurus」

    查看所有标签

    · 1 分钟阅读
    Sébastien Lorber
    Yangshun Tay

    Docusaurus blogging features are powered by the blog plugin.

    Simply add Markdown files (or folders) to the blog directory.

    Regular blog authors can be added to authors.yml.

    The blog post date can be extracted from filenames, such as:

    • 2019-05-30-welcome.md
    • 2019-05-30-welcome/index.md

    A blog post folder can be convenient to co-locate blog post images:

    Docusaurus Plushie

    The blog supports tags as well!

    And if you don't want a blog: just delete this directory, and use blog: false in your Docusaurus config.

    · 1 分钟阅读
    Gao Wei

    Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

    - + \ No newline at end of file diff --git a/zh-Hans/blog/tags/facebook/index.html b/zh-Hans/blog/tags/facebook/index.html index 18b946df..da1926d8 100644 --- a/zh-Hans/blog/tags/facebook/index.html +++ b/zh-Hans/blog/tags/facebook/index.html @@ -16,13 +16,13 @@ - +

    1 篇博文 含有标签「facebook」

    查看所有标签

    · 1 分钟阅读
    Sébastien Lorber
    Yangshun Tay

    Docusaurus blogging features are powered by the blog plugin.

    Simply add Markdown files (or folders) to the blog directory.

    Regular blog authors can be added to authors.yml.

    The blog post date can be extracted from filenames, such as:

    • 2019-05-30-welcome.md
    • 2019-05-30-welcome/index.md

    A blog post folder can be convenient to co-locate blog post images:

    Docusaurus Plushie

    The blog supports tags as well!

    And if you don't want a blog: just delete this directory, and use blog: false in your Docusaurus config.

    - + \ No newline at end of file diff --git a/zh-Hans/blog/tags/hello/index.html b/zh-Hans/blog/tags/hello/index.html index 0fa7f069..b1a89db1 100644 --- a/zh-Hans/blog/tags/hello/index.html +++ b/zh-Hans/blog/tags/hello/index.html @@ -16,13 +16,13 @@ - +

    1 篇博文 含有标签「hello」

    查看所有标签

    · 1 分钟阅读
    Sébastien Lorber
    Yangshun Tay

    Docusaurus blogging features are powered by the blog plugin.

    Simply add Markdown files (or folders) to the blog directory.

    Regular blog authors can be added to authors.yml.

    The blog post date can be extracted from filenames, such as:

    • 2019-05-30-welcome.md
    • 2019-05-30-welcome/index.md

    A blog post folder can be convenient to co-locate blog post images:

    Docusaurus Plushie

    The blog supports tags as well!

    And if you don't want a blog: just delete this directory, and use blog: false in your Docusaurus config.

    - + \ No newline at end of file diff --git a/zh-Hans/blog/tags/hola/index.html b/zh-Hans/blog/tags/hola/index.html index 9877b0f8..f2e616e2 100644 --- a/zh-Hans/blog/tags/hola/index.html +++ b/zh-Hans/blog/tags/hola/index.html @@ -16,13 +16,13 @@ - +

    1 篇博文 含有标签「hola」

    查看所有标签

    · 1 分钟阅读
    Gao Wei

    Lorem ipsum dolor sit amet, consectetur adipiscing elit. Pellentesque elementum dignissim ultricies. Fusce rhoncus ipsum tempor eros aliquam consequat. Lorem ipsum dolor sit amet

    - + \ No newline at end of file diff --git a/zh-Hans/blog/tags/index.html b/zh-Hans/blog/tags/index.html index 5999c542..42aa45ec 100644 --- a/zh-Hans/blog/tags/index.html +++ b/zh-Hans/blog/tags/index.html @@ -16,13 +16,13 @@ - + - + \ No newline at end of file diff --git a/zh-Hans/blog/welcome/index.html b/zh-Hans/blog/welcome/index.html index 5ff3ef4f..168dd975 100644 --- a/zh-Hans/blog/welcome/index.html +++ b/zh-Hans/blog/welcome/index.html @@ -16,13 +16,13 @@ - +

    Welcome

    · 1 分钟阅读
    Sébastien Lorber
    Yangshun Tay

    Docusaurus blogging features are powered by the blog plugin.

    Simply add Markdown files (or folders) to the blog directory.

    Regular blog authors can be added to authors.yml.

    The blog post date can be extracted from filenames, such as:

    • 2019-05-30-welcome.md
    • 2019-05-30-welcome/index.md

    A blog post folder can be convenient to co-locate blog post images:

    Docusaurus Plushie

    The blog supports tags as well!

    And if you don't want a blog: just delete this directory, and use blog: false in your Docusaurus config.

    - + \ No newline at end of file diff --git a/zh-Hans/docs/Colossal-Auto/feature/auto_checkpoint/index.html b/zh-Hans/docs/Colossal-Auto/feature/auto_checkpoint/index.html index c0c05308..08d57c03 100644 --- a/zh-Hans/docs/Colossal-Auto/feature/auto_checkpoint/index.html +++ b/zh-Hans/docs/Colossal-Auto/feature/auto_checkpoint/index.html @@ -16,13 +16,13 @@ - +
    - + \ No newline at end of file diff --git a/zh-Hans/docs/Colossal-Auto/feature/device_mesh/index.html b/zh-Hans/docs/Colossal-Auto/feature/device_mesh/index.html index ea08b47a..92f8ef2d 100644 --- a/zh-Hans/docs/Colossal-Auto/feature/device_mesh/index.html +++ b/zh-Hans/docs/Colossal-Auto/feature/device_mesh/index.html @@ -16,13 +16,13 @@ - + - + \ No newline at end of file diff --git a/zh-Hans/docs/Colossal-Auto/feature/layout_converting_management/index.html b/zh-Hans/docs/Colossal-Auto/feature/layout_converting_management/index.html index 090847b9..844f5a24 100644 --- a/zh-Hans/docs/Colossal-Auto/feature/layout_converting_management/index.html +++ b/zh-Hans/docs/Colossal-Auto/feature/layout_converting_management/index.html @@ -16,7 +16,7 @@ - + @@ -24,7 +24,7 @@

    layout_converting_management

    当一个张量在上下游算子中被要求的sharding spec不同时,我们需要进行分布转换处理(Layout Conversion)。目前主流的方式有两种,打表转换和逐维度转换。打表转换就是将所有可能的情况枚举出来,然后在遇到需要转换的情况下,去表格中找到对应的转换方案。 为了解决这个问题,我们提出一个新奇的想法,使用启发式的搜索,来解决sharding spec的转换问题。 然而它有一个很大问题,就是随着设备块(Device Mesh)的维度增加,这个问题的规模极具膨胀,以至于无法通过这种枚举打表的方式来解决。逐维度转换是对于一个N-d tensor的sharding spec,X0X1...Xn-1,我们让i从0到n-1逐维度地进行转换,这样不管设备块和张量的维度多少,我们都只需要一次扫描,就可以得到一个可行的转换操作序列,然而它问题是这样的转换效率会很差。为了解决这个问题,我们提出一个新奇的想法,使用启发式算法,来解决sharding spec的转换问题。,这个算法可以描述为:

    1. 从source spec生成所有的one-step transform sharding specs
    2. 在one-step transform sharding specs中,根据相似度函数,挑选一个”区别最小“的sharding spec作为后续的source sharding spec,并将该sharding spec记录在transform path中,如果one-step transform sharding spec中,有与target sharding spec相同的sharding spec,则算法结束。
    3. 重复a,b直到算法结束
    Source/target sharding spec pairsAll gatherShardAll to AllOne step transformBest sharding specTransform path
    S01RRRS01RS_{01}RR, RS_{01}RS0RRS_0RR-S0RS1,S0S1RS_0RS_1, S_0S_1RS0RR,S0RS1,S0S1RS_0RR, S_0RS_1, S_0S_1RS0RRS_0RRS0RRS_0RR
    S0RR,RS01RRS_0RR, RS_{01}RRRRRRRRS0S1R,S0RS1S_0S_1R, S_0RS_1RS0R,RRS0RS_0R, RRS_0RRRRRR, S0S1RS_0S_1R, S0RS1S_0RS_1, RS0RRS_0R, RRS0RRS_0RS0RRS_0RS0RRS_0RR -> RS0RRS_0R
    RS0R,RS01RRRS_0R, RS_{01}RRRRRRRRRS01R,S1S0R,RS0S1RS_{01}R, S_1S_0R, RS_0S_1S0RR,RRS0S_0RR, RRS_0RRRRRR, RS01RRS_{01}R, S1S0RS_1S_0R, RS0S1RS_0S_1, S0RRS_0RR, RRS0RRS_0RS01RRS_{01}RS0RRS_0RR -> RS0RRS_0R -> RS01RRS_{01}R
    - + \ No newline at end of file diff --git a/zh-Hans/docs/Colossal-Auto/feature/tracer/index.html b/zh-Hans/docs/Colossal-Auto/feature/tracer/index.html index f25f5d21..32c3b6bb 100644 --- a/zh-Hans/docs/Colossal-Auto/feature/tracer/index.html +++ b/zh-Hans/docs/Colossal-Auto/feature/tracer/index.html @@ -16,13 +16,13 @@ - + - + \ No newline at end of file diff --git a/zh-Hans/docs/Colossal-Auto/get_started/installation/index.html b/zh-Hans/docs/Colossal-Auto/get_started/installation/index.html index 1ea496f3..241d7268 100644 --- a/zh-Hans/docs/Colossal-Auto/get_started/installation/index.html +++ b/zh-Hans/docs/Colossal-Auto/get_started/installation/index.html @@ -16,13 +16,13 @@ - +

    安装

    声明

    我们的自动并行功能处于alpha版本,仍在快速的开发迭代中。我们会在兼容性和稳定性上做持续地改进。如果您遇到任何问题,欢迎随时提issue给我们。

    要求

    我们需要一些额外的依赖性来支持自动并行功能。 请在使用自动平行之前安装它们。

    安装PyTorch

    我们仅支持Pytorch 1.12,现在未测试其他版本。 将来我们将支持更多版本。

    #conda
    conda install pytorch==1.12.0 torchvision==0.13.0 torchaudio==0.12.0 cudatoolkit=11.3 -c pytorch
    #pip
    pip install torch==1.12.0+cu113 torchvision==0.13.0+cu113 torchaudio==0.12.0 --extra-index-url https://download.pytorch.org/whl/cu113

    安装pulp和coin-or-cbc

    pip install pulp
    conda install -c conda-forge coin-or-cbc
    - + \ No newline at end of file diff --git a/zh-Hans/docs/Colossal-Auto/get_started/introduction/index.html b/zh-Hans/docs/Colossal-Auto/get_started/introduction/index.html index 7d17b69f..35c632d9 100644 --- a/zh-Hans/docs/Colossal-Auto/get_started/introduction/index.html +++ b/zh-Hans/docs/Colossal-Auto/get_started/introduction/index.html @@ -16,14 +16,14 @@ - +

    介绍

    近年来,大规模机器学习模型的部署受到越来越多的重视。然而,目前常见的分布式大模型训练方案,都依赖用户人工反复尝试和系统专家的经验来进行配置部署。这对绝大多数AI开发者来说十分不友好,因为他们不希望将时间精力花费在研究分布式系统和试错上。 Colossal-AI的Colossal-Auto 帮助AI开发者简化了大规模机器学习模型的部署过程。相比现有其他手动配置复杂并行策略和修改模型的解决方案,Colossal-Auto 仅需增加一行代码,提供 cluster 信息以及单机训练模型即可获得分布式训练能力,并且原生支持包括 Hugging Face,Timm 等热门 AI 模型库

    概览

    用法

    # wrap the model using auto_engine
    model = autoparallelize(model, meta_input_samples)
    # normal training loop
    ...

    图追踪

    Colossal-Auto 是首个基于 PyTorch 框架使用静态图分析的自动并行系统。PyTorch 作为一个动态图框架,获取其静态的执行计划是机器学习系统领域被长期研究的问题。Colossal-Auto 使用基于 torch.FX Tracer 的 ColoTracer 来完成对于最优并行策略的搜索。在 tracing 过程中推导并记录了每个 tensor 的元信息,例如 tensor shape,dims,dtype 等。因此 Colossal-AI 具有更好的模型泛化能力,而不是依靠模型名或手动修改来适配并行策略。

    细粒度分布式训练策略搜索

    我们调研了很多现有的自动并行系统( Tofu , Flexflow , Alpa ),以及自动激活值检查点算法( Rotor , Sublinear ),在他们的启发下,我们开发一个基于PyTorch框架的自动并行系统Colossal-Auto。Colossal-Auto会在满足内存预算的限制下,以最快运行时间为目标,为每个 op 进行策略搜索,最终得到真实训练时的策略,包括每个 tensor 的切分策略,不同计算节点间需要插入的通信算子类型,是否要进行算子替换等。现有系统中的张量并行,数据并行,NVIDIA 在 Megatron-LM 等并行系统中使用的 column 切分和 row 切分并行等混合并行,都是自动并行可以搜索到的策略的子集。除了这些可以手动指定的并行方式外,Colossal-AI 有能力为每个 op 指定独特的并行方式,因此有可能找到比依赖专家经验和试错配置的手动切分更好的并行策略。

    分布式 tensor 与 shape consistency 系统

    与 PyTorch 最新发布的 DTensor 类似,Colossal-AI 也使用了 device mesh 对集群进行了抽象管理。具体来说,Colossal-AI 使用 sharding spec 对 tensor 的分布式存储状态进行标注,使用 shape consistency manager 自动地对同一 tensor 在不同 sharding spec 间进行转换。这让 Colossal-AI 的通用性和易用性极大地提升,借助 shape consistency manager 可以没有负担地切分 tensor,而不用担心上游 op 的 output 与下游的 input 在集群中的存储方式不同。

    相较于 PyTorch DTensor,Colossal-AI 有以下优势:

    • Colossal-AI 的 device mesh 可以 profiling 到集群性能指标,对不同的通信算子进行耗时估算。
    • Colossal-AI 的 shape consistency 会贪心地搜索 sharding spec 间的转换方式,而不是朴素地逐 dimension 进行转换,这样能找到更高效的转换路径,进而使得 sharding spec 间的转换通信开销更小。
    • 加入了 all_to_all 操作,使得 Colossal-AI 的扩展性更强,这在大规模集群上进行训练时,可以展现出很大的优势。
    - + \ No newline at end of file diff --git a/zh-Hans/docs/Colossal-Auto/get_started/run_demo/index.html b/zh-Hans/docs/Colossal-Auto/get_started/run_demo/index.html index bfe3911c..262aabf1 100644 --- a/zh-Hans/docs/Colossal-Auto/get_started/run_demo/index.html +++ b/zh-Hans/docs/Colossal-Auto/get_started/run_demo/index.html @@ -16,7 +16,7 @@ - + @@ -24,7 +24,7 @@

    快速上手

    Colossal-AI 提供了业界急需的一套高效易用自动并行系统。相比现有其他手动配置复杂并行策略和修改模型的解决方案,Colossal-AI 仅需增加一行代码,提供 cluster 信息以及单机训练模型即可获得分布式训练能力。Colossal-Auto的快速上手示例如下。

    1. 基本用法

    Colossal-Auto 可被用于为每一次操作寻找一个包含数据、张量(如1D、2D、序列化)的混合SPMD并行策略。您可参考GPT 示例。 详细的操作指引见其 README.md

    2. 与 activation checkpoint 结合

    作为大模型训练中必不可少的显存压缩技术,Colossal-AI 也提供了对于 activation checkpoint 的自动搜索功能。相比于大部分将最大显存压缩作为目标的技术方案,Colossal-AI 的搜索目标是在显存预算以内,找到最快的 activation checkpoint 方案。同时,为了避免将 activation checkpoint 的搜索一起建模到 SPMD solver 中导致搜索时间爆炸,Colossal-AI 做了 2-stage search 的设计,因此可以在合理的时间内搜索到有效可行的分布式训练方案。 您可参考 Resnet 示例。 详细的操作指引见其 README.md

    - + \ No newline at end of file diff --git a/zh-Hans/docs/advanced_tutorials/integrate_mixture_of_experts_into_your_model/index.html b/zh-Hans/docs/advanced_tutorials/integrate_mixture_of_experts_into_your_model/index.html index d978fc17..0f145a8a 100644 --- a/zh-Hans/docs/advanced_tutorials/integrate_mixture_of_experts_into_your_model/index.html +++ b/zh-Hans/docs/advanced_tutorials/integrate_mixture_of_experts_into_your_model/index.html @@ -16,7 +16,7 @@ - + @@ -26,7 +26,7 @@ 但是,当前的实施现在有两个主要缺点。第一个缺点是它在大批量和长序列长度训练中效率低下。第二个缺点是与张量并行性不兼容。我们正在致力于系统优化,以克服训练效率问题。与张量并行的兼容性问题需要更多的适应,我们将在未来解决这个问题。 在这里,我们将介绍如何使用具有模型并行性和数据并行性的 MoE。

    目录

    在本教程中,我们将介绍:

    1. 搭建MoE运行环境
    2. 创建MoE层
    3. 定义训练模型

    我们提供示例, 详细介绍请参考 ColossalAI-Examples. 该示例使用 WideNet 作为基于 MoE 的模型的示例.

    搭建MoE运行环境

    在您的项目文件夹中,创建config.py文件。在该文件中,您可以指定希望用于训练模型的一些功能。为了启用 MoE,您需要在config.py中定义parallel字段,并指定moe的值。moe表示一组moe并行化训练组的并行大小。例如,moe设置为4,则4个进程将分配给4个连续的GPU,这4个进程组成一个moe模型并行组。每个进程只会得到一部分专家。增加mo e并行的大小将降低通信成本,但会增加每个GPU的计算成本和内存中activation的存储成本。总的数据并行的大小是自动检测的,默认情况下设置为GPU的数量。

    MOE_MODEL_PARALLEL_SIZE = ...
    parallel = dict(
    moe=dict(size=MOE_MODEL_PARALLEL_SIZE)
    )

    如果MOE_MODEL_PARALLEL_SIZE = E,即设置专家的总数为EE为一个常数)。在模型并行中,transformer编码器中前向部分的处理流程如下图所示。

    MoE Transformer, image source: GShard

    所有专家都分配给模型并行组中的GPU,每一个GPU只拥有一部分专家,原始数据并行组在反向传递的梯度处理期间不再适用于专家参数。所以我们创建了一个新的并行组,叫做moe数据并行组。当配置设置为WORLD_SIZE=4MOE_MODEL_PARALLEL_SIZE=2时,两个并行组的区别如下图所示。

    MoE并行处理

    至于梯度处理,我们提供了MoeGradientHandler来all-reduce模型的每个参数。如果您使用colossalai.initialize函数创建您的训练引擎,MoE梯度处理程序将自动添加到您的引擎中。否则,你应该自己处理梯度。MoE运行环境的所有参数都保存在colossalai.global_variables.moe_env中。您可以访问您的配置参数来检查您的设置是否正确。

    from colossalai.global_variables import moe_env

    创建MoE层

    您可以从colossalai.nn.moe创建MoE层。但在此之前,您应该为所有进程设置随机种子。

    from colossalai.context.random import moe_set_seed
    from model_zoo.moe.models import Widenet

    moe_set_seed(42)
    model = Widenet(num_experts=4, capacity_factor=1.2)

    moe_set_seed 会为一个moe模型并行组中的不同进程设置不同的种子(这有助于在专家中初始化参数),创建一个专家实例和一个路由器实例,示例如下。

    from colossalai.nn.layer.moe import Experts, MoeLayer, Top2Router, NormalNoiseGenerator


    noisy_func = NormalNoiseGenerator(num_experts)
    shared_router = Top2Router(capacity_factor,
    noisy_func=noisy_func)
    shared_experts = Experts(expert=VanillaFFN,
    num_experts=num_experts,
    **moe_mlp_args(
    d_model=d_model,
    d_ff=d_ff,
    drop_rate=drop_rate
    ))
    ffn=MoeLayer(dim_model=d_model, num_experts=num_experts,
    router=shared_router, experts=shared_experts)

    在Experts的初始化中,会自动计算每个GPU的本地expert数量,您只需指定每个专家的类型及其在初始化时使用的参数。此外,我们提供了Top1RouterTop2Router,您可以在colossalai.nn.layer.moe 找到它们。在创建experts和router的实例时,Moelayer只初始化了gate模块,类型的更多详细信息您可以参考我们的API文档和代码。

    定义训练模型

    使用colossalai中的colossalai.initialize函数为引擎添加梯度处理程序以处理 MoE模型的反向传播。在 colossalai.initialize 中,我们会自动创建一个MoeGradientHandler对象来处理梯度。您可以在colossal目录中找到有关MoeGradientHandler的更多信息。为了添加MoE的相关损失处理,损失函数应使用Moeloss封装,示例如下。

    criterion = MoeLoss(
    aux_weight=0.01,
    loss_fn=nn.CrossEntropyLoss,
    label_smoothing=0.1
    )

    最后,您只需使用 colossalai 中的trainerengine进行训练即可。

    - + \ No newline at end of file diff --git a/zh-Hans/docs/advanced_tutorials/meet_gemini/index.html b/zh-Hans/docs/advanced_tutorials/meet_gemini/index.html index 67bcea2c..70c6ccf6 100644 --- a/zh-Hans/docs/advanced_tutorials/meet_gemini/index.html +++ b/zh-Hans/docs/advanced_tutorials/meet_gemini/index.html @@ -16,7 +16,7 @@ - + @@ -24,7 +24,7 @@

    认识Gemini:ColossalAI的异构内存空间管理器

    作者: Jiarui Fang

    简介

    在GPU数量不足情况下,想要增加模型规模,异构训练是最有效的手段。它通过在 CPU 和 GPU 中容纳模型数据,并仅在必要时将数据移动到当前设备,可以同时利用 GPU 内存、CPU 内存(由 CPU DRAM 或 NVMe SSD内存组成)来突破单GPU内存墙的限制。并行,在大规模训练下,其他方案如数据并行、模型并行、流水线并行都可以在异构训练基础上进一步扩展GPU规模。这篇文章描述ColossalAI的异构内存空间管理模块Gemini的设计细节,它的思想来源于PatrickStar,ColossalAI根据自身情况进行了重新实现。

    用法

    目前Gemini支持和ZeRO并行方式兼容,它的使用方法很简单:使用booster将GeminiPlugin中的特性注入到训练组件中。更多booster介绍请参考booster使用

    from torchvision.models import resnet18
    from colossalai.booster import Booster
    from colossalai.zero import ColoInitContext
    from colossalai.booster.plugin import GeminiPlugin
    plugin = GeminiPlugin(placement_policy='cuda', strict_ddp_mode=True, max_norm=1.0, initial_scale=2**5)
    booster = Booster(plugin=plugin)
    ctx = ColoInitContext()
    with ctx:
    model = resnet18()
    optimizer = HybridAdam(model.parameters(), lr=1e-3)
    criterion = lambda x: x.mean()
    model, optimizer, criterion, _, _ = booster.boost(model, optimizer, criterion)
    )

    注意,Gemini和并行策略,如Tensor Parallelism,Data Parallelism,Pipeline Parallelism,ZeRO是解耦合的。对TP,PP的支持还在开发中。

    术语

    算子(OPerator):一个神经网络层的计算操作,比如Linear,LayerNorm等。算子可以是正向传播的计算,也可以是反向传播的计算。

    神经网络在训练期间必须管理的两种类型的训练数据。

    模型数据(model data): 由参数、梯度和优化器状态组成,其规模与模型结构定义相关

    非模型数据(non-model data): 主要由算子生成的中间张量和算子的临时变量组成。非模型数据根据训练任务的配置动态变化,例如批量大小。模型数据和非模型数据相互竞争 GPU 内存。

    设计

    目前的一些解决方案,DeepSpeed采用的Zero-offload在CPU和GPU内存之间静态划分模型数据,并且它们的内存布局对于不同的训练配置是恒定的。如下图左边所示,当 GPU 内存不足以满足其相应的模型数据要求时,即使当时CPU上仍有可用内存,系统也会崩溃。而ColossalAI可以通过将一部分模型数据换出到CPU上来完成训练。

    比较Zero-Offload和Gemini的内存管理方案

    ColossalAI设计了Gemini,就像双子星一样,它管理CPU和GPU二者内存空间。它可以让张量在训练过程中动态分布在CPU-GPU的存储空间内,从而让模型训练突破GPU的内存墙。内存管理器由两部分组成,分别是MemStatsCollector(MSC)和StatefulTensorMgr(STM)。

    我们利用了深度学习网络训练过程的迭代特性。我们将迭代分为warmup和non-warmup两个阶段,开始时的一个或若干迭代步属于预热阶段,其余的迭代步属于正式阶段。在warmup阶段我们为MSC收集信息,而在non-warmup阶段STM入去MSC收集的信息来移动tensor,以达到最小化CPU-GPU数据移动volume的目的。

    Gemini在不同训练阶段的运行流程

    StatefulTensorMgr

    STM管理所有model data tensor的信息。在模型的构造过程中,ColossalAI把所有model data张量注册给STM。内存管理器给每个张量标记一个状态信息。状态集合包括HOLD,COMPUTE,FREE三种状态。STM的功能如下:

    查询内存使用:通过遍历所有tensor的在异构空间的位置,获取模型数据对CPU和GPU的内存占用。

    转换张量状态:它在每个模型数据张量参与算子计算之前,将张量标记为COMPUTE状态,在计算之后标记为HOLD状态。如果张量不再使用则标记的FREE状态。

    调整张量位置:张量管理器保证COMPUTE状态的张量被放置在计算设备上,如果计算设备的存储空间不足,则需要移动出一些HOLD状态的张量到其他设备上存储。Tensor eviction strategy需要MSC的信息,我们将在后面介绍。

    MemStatsCollector

    在预热阶段,内存信息统计器监测CPU和GPU中模型数据和非模型数据的内存使用情况,供正式训练阶段参考。我们通过查询STM可以获得模型数据在某个时刻的内存使用。但是非模型的内存使用却难以获取。因为非模型数据的生存周期并不归用户管理,现有的深度学习框架没有暴露非模型数据的追踪接口给用户。MSC通过采样方式在预热阶段获得非模型对CPU和GPU内存的使用情况。具体方法如下:

    我们在算子的开始和结束计算时,触发内存采样操作,我们称这个时间点为采样时刻(sampling moment),两个采样时刻之间的时间我们称为period。计算过程是一个黑盒,由于可能分配临时buffer,内存使用情况很复杂。但是,我们可以较准确的获取period的系统最大内存使用。非模型数据的使用可以通过两个统计时刻之间系统最大内存使用-模型内存使用获得。

    我们如何设计采样时刻呢。我们选择preOp的model data layout adjust之前。如下图所示。我们采样获得上一个period的system memory used,和下一个period的model data memory used。并行策略会给MSC的工作造成障碍。如图所示,比如对于ZeRO或者Tensor Parallel,由于Op计算前需要gather模型数据,会带来额外的内存需求。因此,我们要求在模型数据变化前进行采样系统内存,这样在一个period内,MSC会把preOp的模型变化内存捕捉。比如在period 2-3内,我们考虑的tensor gather和shard带来的内存变化。 尽管可以将采样时刻放在其他位置,比如排除gather buffer的变动新信息,但是会给造成麻烦。不同并行方式Op的实现有差异,比如对于Linear Op,Tensor Parallel中gather buffer的分配在Op中。而对于ZeRO,gather buffer的分配是在PreOp中。将放在PreOp开始时采样有利于将两种情况统一。

    尽管可以将采样时刻放在其他位置,比如排除gather buffer的变动新信息,但是会给造成麻烦。不同并行方式Op的实现有差异,比如对于Linear Op,Tensor Parallel中gather buffer的分配在Op中。而对于ZeRO,gather buffer的分配是在PreOp中。将放在PreOp开始时采样有利于将两种情况统一。

    Sampling based MemStatsCollector

    Tensor Eviction Strategy

    MSC的重要职责是在调整tensor layout位置,比如在上图S2时刻,我们减少设备上model data数据,Period 2-3计算的峰值内存得到满足。

    在warmup阶段,由于还没执行完毕一个完整的迭代,我们对内存的真实使用情况尚一无所知。我们此时限制模型数据的内存使用上限,比如只使用30%的GPU内存。这样保证我们可以顺利完成预热状态。

    在non-warmup阶段,我们需要利用预热阶段采集的非模型数据内存信息,预留出下一个Period在计算设备上需要的峰值内存,这需要我们移动出一些模型张量。 为了避免频繁在CPU-GPU换入换出相同的tensor,引起类似cache thrashing的现象。我们利用DNN训练迭代特性,设计了OPT cache换出策略。具体来说,在warmup阶段,我们记录每个tensor被计算设备需要的采样时刻。如果我们需要驱逐一些HOLD tensor,那么我们选择在本设备上最晚被需要的tensor作为受害者。

    - + \ No newline at end of file diff --git a/zh-Hans/docs/advanced_tutorials/opt_service/index.html b/zh-Hans/docs/advanced_tutorials/opt_service/index.html index b7ecc473..978b2a85 100644 --- a/zh-Hans/docs/advanced_tutorials/opt_service/index.html +++ b/zh-Hans/docs/advanced_tutorials/opt_service/index.html @@ -16,7 +16,7 @@ - + @@ -25,7 +25,7 @@ 服务的入口是一个bash脚本 server.sh。 本服务的配置文件参考 opt_config.py,该文件定义了模型的类型、 检查点文件路径、并行策略和http设置。您能按照您的需求来修改这些设置。 例如,将模型的大小设置为opt_125M,将正确的检查点路径按照如下设置:

    model_class = opt_125M
    checkpoint = 'your_file_path'

    将张量并行度设置为您的gpu数量。

    tp_init_size = #gpu

    现在,我们就能利用docker发布一个服务。您能在/model_checkpoint/config路径下找到检查点文件和配置文件。

    export CHECKPOINT_DIR="your_opt_checkpoint_path"
    # the ${CONFIG_DIR} must contain a server.sh file as the entry of service
    export CONFIG_DIR="config_file_path"

    docker run --gpus all --rm -it -p 8020:8020 -v ${CHECKPOINT_DIR}:/model_checkpoint -v ${CONFIG_DIR}:/config --ipc=host energonai:latest

    接下来,您就可以在您的浏览器中打开 https://[IP-ADDRESS]:8020/docs# 进行测试。

    高级特性用法

    1. 批处理优化

    若想使用我们的高级批处理技术来批量收集多个查询,您可以将executor_max_batch_size设置为最大批处理大小。 请注意,只有具有相同 top_k、top_p 和温度的解码任务才能一起批处理。

    executor_max_batch_size = 16

    所有的查询将进入FIFO队列。解码步数小于或等于队列头部解码步数的所有连续查询可以一起批处理。 应用左填充以确保正确性。 executor_max_batch_size 不应该过大,从而确保批处理不会增加延迟。 以opt-30b为例, executor_max_batch_size=16 合适,但对于opt-175b而言, executor_max_batch_size=4 更合适。

    1. 缓存优化

    对于每一个独立的服务过程,您能将最近的多个查询结果缓存在一起。在config.py中设置 cache_size 和 cache_list_size。缓存的大小应为缓存的查询数目。cache_list_size 应为每次查询存储的结果数。一个随机缓存的结果将会被返回。当缓存已满,LRU策略被用于清理缓存过的查询。cache_size=0意味着不缓存。

    cache_size = 50
    cache_list_size = 2
    - + \ No newline at end of file diff --git a/zh-Hans/docs/advanced_tutorials/train_gpt_using_hybrid_parallelism/index.html b/zh-Hans/docs/advanced_tutorials/train_gpt_using_hybrid_parallelism/index.html index 269cdfde..70926f22 100644 --- a/zh-Hans/docs/advanced_tutorials/train_gpt_using_hybrid_parallelism/index.html +++ b/zh-Hans/docs/advanced_tutorials/train_gpt_using_hybrid_parallelism/index.html @@ -16,7 +16,7 @@ - + @@ -24,7 +24,7 @@

    使用混合并行训练 GPT-2

    作者: Hongxin Liu, Yongbin Li, Mingyan Jiang

    前置教程

    示例代码

    相关论文

    引言

    在上一篇教程中,我们介绍了如何用流水并行训练 ViT。在本教程中,你将学习一个更复杂的场景--用混合并行方式训练GPT-2。在这种情况下,由于GPT-2过大,即使CPU内存也无法容纳它。因此,该模型必须被分割。

    目录

    在本教程中,我们将介绍:

    1. 初始化混合并行插件
    2. 定义 GPT-2 模型的训练组件
    3. 使用 HybridParallelPlugin 增强GPT-2模型
    4. 使用混合并行训练 GPT-2

    导入依赖库

    from typing import Callable, List, Union
    import torch
    import torch.distributed as dist
    import torch.nn as nn
    from torch.optim import Optimizer
    from torch.optim.lr_scheduler import _LRScheduler as LRScheduler
    from tqdm import tqdm
    from transformers import AutoConfig, GPT2ForSequenceClassification, get_linear_schedule_with_warmup
    from transformers import AutoTokenizer

    import colossalai
    from colossalai.booster import Booster
    from colossalai.booster.plugin import GeminiPlugin, HybridParallelPlugin, LowLevelZeroPlugin, TorchDDPPlugin
    from colossalai.cluster import DistCoordinator
    from colossalai.nn.optimizer import HybridAdam
    from colossalai.utils import get_current_device

    定义plugin

    定义一个HybridParallelPlugin对象,指定所需要使用的并行策略,在该例子中,同时使用了流水线并行和zero1.

    plugin = HybridParallelPlugin(
    tp_size=1,
    pp_size=2,
    num_microbatches=None,
    microbatch_size=1,
    enable_all_optimization=True,
    zero_stage=1,
    precision="fp16",
    initial_scale=1,
    )

    创建分布式环境.

    # Launch ColossalAI
    colossalai.launch_from_torch(config={}, seed=42)
    coordinator = DistCoordinator()

    定义GPT-2模型的训练组件

    在使用混合并行之前,您需要定义训练所使用的组件。 定义超参数。

    NUM_EPOCHS = 3
    BATCH_SIZE = 32
    LEARNING_RATE = 2.4e-5
    WEIGHT_DECAY = 0.01
    WARMUP_FRACTION = 0.1

    获取数据集。您可以使用plugin.prepare_dataloader生成dataloader,也可以自定义您的dataloader。

    def tokenize_batch(batch, tokenizer: Optional[AutoTokenizer] = None, max_length: int = 2048):
    texts = [sample["sentence1"] + sample["sentence2"] for sample in batch]
    data = tokenizer(texts, return_tensors="pt", padding="max_length", truncation=True, max_length=max_length)
    data = {k: v.cuda() for k, v in data.items()}
    data["labels"] = data["input_ids"].clone()
    return data

    tokenizer = AutoTokenizer.from_pretrained("gpt2")
    dataset = datasets.load_dataset("glue", "mrpc")
    train_dataloader = plugin.prepare_dataloader(
    dataset["train"],
    batch_size=BATCH_SIZE,
    shuffle=True,
    drop_last=True,
    collate_fn=partial(tokenize_batch, tokenizer=tokenizer, max_length=512),
    )

    定义GPT-2模型。

    cfg = AutoConfig.from_pretrained("gpt2", num_labels=2)
    model = GPT2ForSequenceClassification.from_pretrained("gpt2", config=cfg).cuda()

    准备优化器

    lr = LEARNING_RATE * coordinator.world_size
    no_decay = ["bias", "LayerNorm.weight"]
    optimizer_grouped_parameters = [
    {
    "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
    "weight_decay": WEIGHT_DECAY,
    },
    {
    "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
    "weight_decay": 0.0,
    },
    ]

    optimizer = HybridAdam(optimizer_grouped_parameters, lr=lr, eps=1e-8)

    准备 lr_schedulercriterion,需要注意的是,当混合并行使用了管道并行时,还需定义criterion函数。这个函数应该以模型前后向的输入和输出作为参数,并返回loss。

    # lr scheduler
    total_steps = len(train_dataloader) * NUM_EPOCHS
    num_warmup_steps = int(WARMUP_FRACTION * total_steps)
    lr_scheduler = get_linear_schedule_with_warmup(
    optimizer,
    num_warmup_steps=num_warmup_steps,
    num_training_steps=total_steps,
    )

    def _criterion(outputs, inputs):
    return outputs.loss

    增强GPT-2模型

    使用 HybridParallelPlugin 定义一个 booster(增强器)。根据设置的插件参数,booster会将一种或者多种并行策略注入到模型中。该例子中使用了管道并行,zero1,及半精度训练等优化。

    booster = Booster(plugin=plugin)

    使用定义的 booster 来增强这些组件。

    model, optimizer, _criterion, _, lr_scheduler = booster.boost(
    model, optimizer, criterion=_criterion, lr_scheduler=lr_scheduler
    )

    使用混合并行训练 GPT-2

    在前面的教程中,我们已经解释了如何使用 Booster 和 HybridParallelPlugin 将各种并行特性注入到模型及其训练组件中。现在我们可以开始模型训练。 定义一个训练函数。当使用了管道并行时,需要调用booster.execute_pipeline进行模型训练的阶段调度。

    def train_epoch(
    epoch: int,
    model: nn.Module,
    optimizer: Optimizer,
    _criterion: Callable,
    lr_scheduler: LRScheduler,
    train_dataloader: DataLoader,
    booster: Booster,
    coordinator: DistCoordinator,
    ):
    use_pipeline = isinstance(booster.plugin, HybridParallelPlugin) and booster.plugin.pp_size > 1
    is_pp_last_stage = use_pipeline and booster.plugin.stage_manager.is_last_stage()
    print_flag = (not use_pipeline and coordinator.is_master()) or (use_pipeline and is_pp_last_stage)
    total_step = len(train_dataloader)

    model.train()
    optimizer.zero_grad()
    train_dataloader_iter = iter(train_dataloader)
    with tqdm(
    range(total_step),
    desc=f"Epoch [{epoch + 1}/{NUM_EPOCHS}]",
    disable=not print_flag,
    ) as pbar:
    # Forward pass
    for _ in pbar:
    if use_pipeline:
    outputs = booster.execute_pipeline(
    train_dataloader_iter, model, _criterion, optimizer, return_loss=True, return_outputs=True
    )
    # Backward and optimize
    if is_pp_last_stage:
    loss = outputs["loss"]
    pbar.set_postfix({"loss": loss.item()})
    else:
    data = next(train_dataloader_iter)
    data = move_to_cuda(data)
    outputs = model(**data)
    loss = _criterion(outputs, None)
    # Backward
    booster.backward(loss, optimizer)
    pbar.set_postfix({"loss": loss.item()})

    optimizer.step()
    optimizer.zero_grad()
    lr_scheduler.step()

    训练 GPT-2 模型。

    for epoch in range(NUM_EPOCHS):
    train_epoch(epoch, model, optimizer, _criterion, lr_scheduler, train_dataloader, booster, coordinator)
    - + \ No newline at end of file diff --git a/zh-Hans/docs/advanced_tutorials/train_vit_with_hybrid_parallelism/index.html b/zh-Hans/docs/advanced_tutorials/train_vit_with_hybrid_parallelism/index.html index feb1c3fc..e0255e41 100644 --- a/zh-Hans/docs/advanced_tutorials/train_vit_with_hybrid_parallelism/index.html +++ b/zh-Hans/docs/advanced_tutorials/train_vit_with_hybrid_parallelism/index.html @@ -16,7 +16,7 @@ - + @@ -48,7 +48,7 @@ cpu_offload(布尔值,可选项):在使用ZeRO时是否打开cpu_offload。默认为False。 communication_dtype(torch数据类型,可选项):在使用ZeRO时的通信数据类型。如果未指定,则将使用参数的数据类型。默认为None。 overlap_communication(布尔值,可选项):在使用ZeRO时是否重叠通信和计算。默认为True。

    zero1的plugin示例

    plugin = HybridParallelPlugin(
    tp_size=1,
    pp_size=1,
    zero_stage=1,
    cpu_offload=True,
    precision="fp16",
    initial_scale=1,
    )

    混合并行

    可参考上述的策略自定义合适的混合并行策略。定义混合并行的插件,并使用该插件定义一个booster:

    plugin = HybridParallelPlugin(
    tp_size=TP_SIZE,
    pp_size=PP_SIZE,
    num_microbatches=None,
    microbatch_size=1,
    enable_all_optimization=True,
    precision="fp16",
    initial_scale=1,
    )
    booster = Booster(plugin=plugin)

    接着我们使用booster.boost来将plugin所封装的特性注入到模型训练组件中。

    model, optimizer, _criterion, train_dataloader, lr_scheduler = booster.boost(
    model=model, optimizer=optimizer, criterion=criterion, dataloader=train_dataloader, lr_scheduler=lr_scheduler
    )

    使用混合并行训练 ViT

    最后就可以使用混合并行策略来训练模型了。我们先定义一个训练函数,描述训练过程。需要注意的是,如果使用了管道并行策略,需要调用booster.execute_pipeline来执行模型的训练,它会调用scheduler管理模型的前后向操作。

    def run_forward_backward(
    model: nn.Module,
    optimizer: Optimizer,
    criterion: Callable[[Any, Any], torch.Tensor],
    data_iter: Iterator,
    booster: Booster,
    ):
    if optimizer is not None:
    optimizer.zero_grad()
    if isinstance(booster.plugin, HybridParallelPlugin) and booster.plugin.pp_size > 1:
    # run pipeline forward backward when enabling pp in hybrid parallel plugin
    output_dict = booster.execute_pipeline(
    data_iter, model, criterion, optimizer, return_loss=True, return_outputs=True
    )
    loss, outputs = output_dict["loss"], output_dict["outputs"]
    else:
    batch = next(data_iter)
    batch = move_to_cuda(batch, torch.cuda.current_device())
    outputs = model(**batch)
    loss = criterion(outputs, None)
    if optimizer is not None:
    booster.backward(loss, optimizer)

    def train_epoch(
    epoch: int,
    model: nn.Module,
    optimizer: Optimizer,
    criterion: Callable[[Any, Any], torch.Tensor],
    lr_scheduler: LRScheduler,
    dataloader: DataLoader,
    booster: Booster,
    coordinator: DistCoordinator,
    ):
    torch.cuda.synchronize()

    num_steps = len(dataloader)
    data_iter = iter(dataloader)
    enable_pbar = coordinator.is_master()
    if isinstance(booster.plugin, HybridParallelPlugin) and booster.plugin.pp_size > 1:
    # when using pp, only the last stage of master pipeline (dp_rank and tp_rank are both zero) shows pbar
    tp_rank = dist.get_rank(booster.plugin.tp_group)
    dp_rank = dist.get_rank(booster.plugin.dp_group)
    enable_pbar = tp_rank == 0 and dp_rank == 0 and booster.plugin.stage_manager.is_last_stage()
    model.train()

    with tqdm(range(num_steps), desc=f"Epoch [{epoch + 1}]", disable=not enable_pbar) as pbar:
    for _ in pbar:
    loss, _ = run_forward_backward(model, optimizer, criterion, data_iter, booster)
    optimizer.step()
    lr_scheduler.step()

    # Print batch loss
    if enable_pbar:
    pbar.set_postfix({"loss": loss.item()})

    开始训练模型

    for epoch in range(NUM_EPOCH):
    train_epoch(epoch, model, optimizer, criterion, lr_scheduler, train_dataloader, booster, coordinator)
    - + \ No newline at end of file diff --git a/zh-Hans/docs/basics/booster_api/index.html b/zh-Hans/docs/basics/booster_api/index.html index 23d11366..17dbfb8e 100644 --- a/zh-Hans/docs/basics/booster_api/index.html +++ b/zh-Hans/docs/basics/booster_api/index.html @@ -16,7 +16,7 @@ - + @@ -129,7 +129,7 @@ names to compose the keys in state_dict. Defaults to None.
  • size_per_shard (int, optional) -- Maximum size of checkpoint shard file in MB. This is useful only when shard=True. Defaults to 1024.
  • Description

    Save optimizer to checkpoint.

    使用方法及示例

    在使用 colossalai 训练时,首先需要在训练脚本的开头启动分布式环境,并创建需要使用的模型、优化器、损失函数、数据加载器等对象。之后,调用booster.boost 将特征注入到这些对象中,您就可以使用我们的 booster API 去进行您接下来的训练流程。

    以下是一个伪代码示例,将展示如何使用我们的 booster API 进行模型训练:

    import torch
    from torch.optim import SGD
    from torchvision.models import resnet18

    import colossalai
    from colossalai.booster import Booster
    from colossalai.booster.plugin import TorchDDPPlugin

    def train():
    # launch colossalai
    colossalai.launch(config=dict(), rank=rank, world_size=world_size, port=port, host='localhost')

    # create plugin and objects for training
    plugin = TorchDDPPlugin()
    booster = Booster(plugin=plugin)
    model = resnet18()
    criterion = lambda x: x.mean()
    optimizer = SGD((model.parameters()), lr=0.001)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.1)

    # use booster.boost to wrap the training objects
    model, optimizer, criterion, _, scheduler = booster.boost(model, optimizer, criterion, lr_scheduler=scheduler)

    # do training as normal, except that the backward should be called by booster
    x = torch.randn(4, 3, 224, 224)
    x = x.to('cuda')
    output = model(x)
    loss = criterion(output)
    booster.backward(loss, optimizer)
    optimizer.clip_grad_by_norm(1.0)
    optimizer.step()
    scheduler.step()
    optimizer.zero_grad()

    # checkpointing using booster api
    save_path = "./model"
    booster.save_model(model, save_path, shard=True, size_per_shard=10, use_safetensors=True)

    new_model = resnet18()
    booster.load_model(new_model, save_path)

    更多的Booster设计细节请参考这一页面

    - + \ No newline at end of file diff --git a/zh-Hans/docs/basics/booster_checkpoint/index.html b/zh-Hans/docs/basics/booster_checkpoint/index.html index 8cabb1ed..afffa82d 100644 --- a/zh-Hans/docs/basics/booster_checkpoint/index.html +++ b/zh-Hans/docs/basics/booster_checkpoint/index.html @@ -16,7 +16,7 @@ - + @@ -63,7 +63,7 @@
  • lr_scheduler (LRScheduler) -- A lr scheduler boosted by Booster.
  • checkpoint (str) -- Path to the checkpoint. It must be a local file path.
  • Description
    Load lr scheduler from checkpoint.

    学习率调度器在加载前必须被 colossalai.booster.Booster 封装。 checkpoint 是 checkpoint 文件的本地路径.

    Checkpoint 设计

    有关 Checkpoint 设计的更多详细信息,请参见我们的讨论 A Unified Checkpoint System Design.

    - + \ No newline at end of file diff --git a/zh-Hans/docs/basics/booster_plugins/index.html b/zh-Hans/docs/basics/booster_plugins/index.html index 0903221e..2dc64f7a 100644 --- a/zh-Hans/docs/basics/booster_plugins/index.html +++ b/zh-Hans/docs/basics/booster_plugins/index.html @@ -16,7 +16,7 @@ - + @@ -111,7 +111,7 @@ train_dataloader = plugin.prepare_dataloader(train_dataset, batch_size=8) booster = Booster(plugin=plugin) model, optimizer, train_dataloader, criterion = booster.boost(model, optimizer, train_dataloader, criterion) -

    Hybrid Parallel 插件

    这个插件实现了多种并行训练策略和优化工具的组合。Hybrid Parallel插件支持的功能大致可以被分为以下四个部分:

    1. Shardformer: Shardformer负责在张量并行以及流水线并行下切分模型的逻辑,以及前向/后向方法的重载,这个插件为Shardformer功能提供了一个简单易用的接口。与此同时,Shardformer还负责将包括fused normalization, flash attention (xformers), JIT和序列并行在内的各类优化工具融入重载后的前向/后向方法。更多关于Shardformer的信息请参考 Shardformer文档。下图展示了Shardformer与Hybrid Parallel插件所支持的功能。
    1. 混合精度训练:插件支持fp16/bf16的混合精度训练。更多关于混合精度训练的参数配置的详细信息请参考 混合精度训练文档

    2. Torch DDP: 当流水线并行和Zero不被使用的时候,插件会自动采用Pytorch DDP作为数据并行的策略。更多关于Torch DDP的参数配置的详细信息请参考 Pytorch DDP 文档

    3. Zero: 在初始化插件的时候,可以通过将zero_stage参数设置为1或2来让插件采用Zero 1/2作为数据并行的策略。Zero 1可以和流水线并行策略同时使用, 而Zero 2则不可以和流水线并行策略同时使用。更多关于Zero的参数配置的详细信息请参考 Low Level Zero 插件.

    ⚠ 在使用该插件的时候, 只有支持Shardformer的部分Huggingface transformers模型才能够使用张量并行、流水线并行以及优化工具。Llama 1、Llama 2、OPT、Bloom、Bert以及GPT2等主流transformers模型均已支持Shardformer。

    class
     

    colossalai.booster.plugin.HybridParallelPlugin

    (tp_size: int, pp_size: int, precision: str = 'fp16', zero_stage: int = 0, enable_all_optimization: bool = False, enable_fused_normalization: bool = False, enable_flash_attention: bool = False, enable_jit_fused: bool = False, enable_sequence_parallelism: bool = False, enable_sequence_overlap: bool = False, num_microbatches: typing.Optional[int] = None, microbatch_size: typing.Optional[int] = None, initial_scale: float = 65536, min_scale: float = 1, growth_factor: float = 2, backoff_factor: float = 0.5, growth_interval: int = 1000, hysteresis: int = 2, max_scale: float = 4294967296, max_norm: float = 0, broadcast_buffers: bool = True, ddp_bucket_cap_mb: int = 25, find_unused_parameters: bool = False, check_reduction: bool = False, gradient_as_bucket_view: bool = False, static_graph: bool = False, zero_bucket_size_in_m: int = 12, cpu_offload: bool = False, communication_dtype: typing.Optional[torch.dtype] = None, overlap_communication: bool = True, custom_policy: Policy = None, pp_style: str = '1f1b', num_model_chunks: int = 1)
    Parameters
      +

    Hybrid Parallel 插件

    这个插件实现了多种并行训练策略和优化工具的组合。Hybrid Parallel插件支持的功能大致可以被分为以下四个部分:

    1. Shardformer: Shardformer负责在张量并行以及流水线并行下切分模型的逻辑,以及前向/后向方法的重载,这个插件为Shardformer功能提供了一个简单易用的接口。与此同时,Shardformer还负责将包括fused normalization, flash attention (xformers), JIT和序列并行在内的各类优化工具融入重载后的前向/后向方法。更多关于Shardformer的信息请参考 Shardformer文档。下图展示了Shardformer与Hybrid Parallel插件所支持的功能。
    1. 混合精度训练:插件支持fp16/bf16的混合精度训练。更多关于混合精度训练的参数配置的详细信息请参考 混合精度训练文档

    2. Torch DDP: 当流水线并行和Zero不被使用的时候,插件会自动采用Pytorch DDP作为数据并行的策略。更多关于Torch DDP的参数配置的详细信息请参考 Pytorch DDP 文档

    3. Zero: 在初始化插件的时候,可以通过将zero_stage参数设置为1或2来让插件采用Zero 1/2作为数据并行的策略。Zero 1可以和流水线并行策略同时使用, 而Zero 2则不可以和流水线并行策略同时使用。更多关于Zero的参数配置的详细信息请参考 Low Level Zero 插件.

    ⚠ 在使用该插件的时候, 只有支持Shardformer的部分Huggingface transformers模型才能够使用张量并行、流水线并行以及优化工具。Llama 1、Llama 2、OPT、Bloom、Bert以及GPT2等主流transformers模型均已支持Shardformer。

    class
     

    colossalai.booster.plugin.HybridParallelPlugin

    (tp_size: int, pp_size: int, precision: str = 'fp16', zero_stage: int = 0, enable_all_optimization: bool = False, enable_fused_normalization: bool = False, enable_flash_attention: bool = False, enable_jit_fused: bool = False, enable_sequence_parallelism: bool = False, enable_sequence_overlap: bool = False, num_microbatches: typing.Optional[int] = None, microbatch_size: typing.Optional[int] = None, initial_scale: float = 65536, min_scale: float = 1, growth_factor: float = 2, backoff_factor: float = 0.5, growth_interval: int = 1000, hysteresis: int = 2, max_scale: float = 4294967296, max_norm: float = 0, broadcast_buffers: bool = True, ddp_bucket_cap_mb: int = 25, find_unused_parameters: bool = False, check_reduction: bool = False, gradient_as_bucket_view: bool = False, static_graph: bool = False, zero_bucket_size_in_m: int = 12, cpu_offload: bool = False, communication_dtype: typing.Optional[torch.dtype] = None, overlap_communication: bool = True, custom_policy: Policy = None, pp_style: str = '1f1b', num_model_chunks: int = 1, enable_metadata_cache: bool = True)
    Parameters
    • tp_size (int) -- The size of tensor parallelism. Tensor parallelism will not be used when tp_size is set to 1.
    • pp_size (int) -- The number of pipeline stages in pipeline parallelism. Pipeline parallelism will not be used when pp_size is set to 1.
    • precision (str, optional) -- Specifies the precision of parameters during training. @@ -152,6 +152,7 @@
    • custom_policy (Policy, optional) -- Custom policy for Shardformer. Defaults to None.
    • pp_style (str, optional) -- The style for pipeline parallelism. Defaults to '1f1b'.
    • num_model_chunks (int, optional) -- The number of model chunks for interleaved pipeline parallelism. Defaults to 1.
    • +
    • enable_metadata_cache (bool, optional) -- Whether to enable metadata cache for pipeline parallelism. Defaults to True.
    Description

    Plugin for Hybrid Parallel Training. Tensor parallel, pipeline parallel and data parallel(DDP/ZeRO) can be picked and combined in this plugin. The size of tp and pp should be passed in by user, then the size of dp is automatically calculated from dp_size = world_size / (tp_size * pp_size).

    Example
    from colossalai.booster import Booster
    @@ -163,7 +164,7 @@
     train_dataloader = plugin.prepare_dataloader(train_dataset, batch_size=8)
     booster = Booster(plugin=plugin)
     model, optimizer, criterion, train_dataloader, _ = booster.boost(model, optimizer, criterion, train_dataloader)
    -
    function
     

    prepare_dataloader

    (dataset, batch_size, shuffle = False, seed = 1024, drop_last = False, pin_memory = False, num_workers = 0, **kwargs)
    Parameters
      +
    function
     

    prepare_dataloader

    (dataset, batch_size, shuffle = False, seed = 1024, drop_last = False, pin_memory = False, num_workers = 0, **kwargs)
    Parameters
    • dataset (torch.utils.data.Dataset) -- The dataset to be loaded.
    • shuffle (bool, optional) -- Whether to shuffle the dataset. Defaults to False.
    • seed (int, optional) -- Random worker seed for sampling, defaults to 1024. @@ -204,7 +205,7 @@ booster = Booster(plugin=plugin) model, optimizer, train_dataloader, criterion = booster.boost(model, optimizer, train_dataloader, criterion)
    - + \ No newline at end of file diff --git a/zh-Hans/docs/basics/command_line_tool/index.html b/zh-Hans/docs/basics/command_line_tool/index.html index 7fe67ace..cf19fc60 100644 --- a/zh-Hans/docs/basics/command_line_tool/index.html +++ b/zh-Hans/docs/basics/command_line_tool/index.html @@ -16,13 +16,13 @@ - +

    命令行工具

    作者: Shenggui Li

    预备知识:

    简介

    Colossal-AI给用户提供了命令行工具,目前命令行工具可以用来支持以下功能。

    • 检查Colossal-AI是否安装正确
    • 启动分布式训练
    • 张量并行基准测试

    安装检查

    用户可以使用colossalai check -i这个命令来检查目前环境里的版本兼容性以及CUDA Extension的状态。

    Check Installation Demo

    启动分布式训练

    在分布式训练时,我们可以使用colossalai run来启动单节点或者多节点的多进程,详细的内容可以参考启动 Colossal-AI

    - + \ No newline at end of file diff --git a/zh-Hans/docs/basics/launch_colossalai/index.html b/zh-Hans/docs/basics/launch_colossalai/index.html index a53916b7..b869ef17 100644 --- a/zh-Hans/docs/basics/launch_colossalai/index.html +++ b/zh-Hans/docs/basics/launch_colossalai/index.html @@ -16,7 +16,7 @@ - + @@ -38,7 +38,7 @@ launch_from_slurm 会自动从环境变量 SLURM_PROCIDSLURM_NPROCS 中分别读取 rank 和 world size ,并使用它们来启动分布式后端。

    您可以在您的训练脚本中尝试以下操作。

    import colossalai

    colossalai.launch_from_slurm(
    config=<CONFIG>,
    host=args.host,
    port=args.port
    )

    您可以通过在终端使用这个命令来初始化分布式环境。

    srun python train.py --host <master_node> --port 29500

    用 OpenMPI 启动

    如果您对OpenMPI比较熟悉,您也可以使用 launch_from_openmpilaunch_from_openmpi 会自动从环境变量 OMPI_COMM_WORLD_LOCAL_RANKMPI_COMM_WORLD_RANKOMPI_COMM_WORLD_SIZE 中分别读取local rank、global rank 和 world size,并利用它们来启动分布式后端。

    您可以在您的训练脚本中尝试以下操作。

    colossalai.launch_from_openmpi(
    config=<CONFIG>,
    host=args.host,
    port=args.port
    )

    以下是用 OpenMPI 启动多个进程的示例命令。

    mpirun --hostfile <my_hostfile> -np <num_process> python train.py --host <node name or ip> --port 29500
    • --hostfile: 指定一个要运行的主机列表。
    • --np: 设置总共要启动的进程(GPU)的数量。例如,如果 --np 4,4个 python 进程将被初始化以运行 train.py。
    - + \ No newline at end of file diff --git a/zh-Hans/docs/concepts/colossalai_overview/index.html b/zh-Hans/docs/concepts/colossalai_overview/index.html index da64f15e..21f5b4c2 100644 --- a/zh-Hans/docs/concepts/colossalai_overview/index.html +++ b/zh-Hans/docs/concepts/colossalai_overview/index.html @@ -16,13 +16,13 @@ - +

    Colossal-AI 总览

    作者: Shenggui Li, Siqi Mai

    关于 Colossal-AI

    随着深度学习模型规模的发展,向新的训练模式转变是非常重要的。没有并行和优化的传统训练方法将成为过去,新的训练方法是使训练大规模模型高效和节省成本的关键。

    Colossal-AI 是一个集成的系统,为用户提供一套综合的训练方法。您可以找到常见的训练方法,如混合精度训练和梯度累积。此外,我们提供了一系列的并行技术,包括数据并行、张量并行和流水线并行。我们通过不同的多维分布式矩阵乘法算法来优化张量并行。我们还提供了不同的流水线并行方法,使用户能够有效地跨节点扩展他们的模型。更多的高级功能,如卸载,也可以在这个教程文档中找到详细的内容。

    Colossal-AI 的使用

    我们的目标是使 Colossal-AI 易于使用,并且对用户的代码不产生干扰。如果您想使用Colossal-AI,这里有一个简单的一般工作流程。

    Workflow
    1. 准备一个配置文件,指定您要使用的功能和参数。
    2. colossalai.launch 初始化分布式后端。
    3. colossalai.booster 将训练特征注入您的训练组件(如模型、优化器)中。
    4. 进行训练和测试.

    我们将在基本教程部分介绍整个工作流程。

    未来计划

    Colossal-AI 系统将会进一步拓展和优化,包括但不限于:

    1. 分布式操作的优化
    2. 异构系统训练的优化
    3. 从模型大小的维度切入,提升训练速度并维持精度
    4. 拓展现有的并行方法

    我们始终欢迎社区的建议和讨论,如果您遇到任何问题,我们将非常愿意帮助您。您可以在GitHub 提 issue ,或在论坛上创建一个讨论主题。

    - + \ No newline at end of file diff --git a/zh-Hans/docs/concepts/distributed_training/index.html b/zh-Hans/docs/concepts/distributed_training/index.html index 0371a780..b3bd795d 100644 --- a/zh-Hans/docs/concepts/distributed_training/index.html +++ b/zh-Hans/docs/concepts/distributed_training/index.html @@ -16,7 +16,7 @@ - + @@ -25,7 +25,7 @@ 2018年的 BERT-Large有3.45亿的参数,2018年的 GPT-2 有15亿的参数,而2020年的 GPT-3 有1750亿个参数。很明显,模型规模随着时间的推移呈指数级增长。目前最大的模型已经超过了1000多亿个参数。而与较小的模型相比,超大型模型通常能提供更优越的性能。
    图片来源: HuggingFace
    1. 数据集规模迅速增加。对于大多数机器学习开发者来说,MNIST 和 CIFAR10 数据集往往是他们训练模型的前几个数据集。然而,与著名的 ImageNet 数据集相比,这些数据集非常小。谷歌甚至有自己的(未公布的)JFT-300M 数据集,它有大约3亿张图片,这比 ImageNet-1k 数据集大了近300倍。
    1. 计算能力越来越强。随着半导体行业的进步,显卡变得越来越强大。由于核的数量增多,GPU是深度学习最常见的算力资源。从2012年的 K10 GPU 到2020年的 A100 GPU,计算能力已经增加了几百倍。这使我们能够更快地执行计算密集型任务,而深度学习正是这样一项任务。

    如今,我们接触到的模型可能太大,以致于无法装入一个GPU,而数据集也可能大到足以在一个GPU上训练一百天。这时,只有用不同的并行化技术在多个GPU上训练我们的模型,我们才能完成并加快模型训练,以追求在合理的时间内获得想要的结果。

    分布式训练的基本概念

    分布式训练需要多台机器/GPU。在训练期间,这些设备之间会有通信。为了更好地理解分布式训练,有几个重要的术语需要我们了解清楚。

    • host: 主机(host)是通信网络中的主要设备。在初始化分布式环境时,经常需要它作为一个参数。
    • port: 这里的端口(port)主要是指主机上用于通信的主端口。
    • rank: 在网络中赋予设备的唯一ID。
    • world size: 网络中设备的数量。
    • process group: 进程组(process group)是一个通信网络,包括设备的一个子集。总是有一个默认的进程组,它包含所有的设备。一个子集的设备可以形成一个进程组,以便它们只在组内的设备之间进行通信。
    一个分布式系统的例子

    为了说明这些概念,让我们假设我们有2台机器(也称为节点),每台机器有4个 GPU。当我们在这两台机器上初始化分布式环境时,我们基本上启动了8个进程(每台机器上有4个进程),每个进程被绑定到一个 GPU 上。

    在初始化分布式环境之前,我们需要指定主机(主地址)和端口(主端口)。在这个例子中,我们可以让主机为节点0,端口为一个数字,如29500。所有的8个进程将寻找地址和端口并相互连接,默认的进程组将被创建。默认进程组的 world size 为8,细节如下。

    process IDrankNode indexGPU index
    0000
    1101
    2202
    3303
    4410
    5511
    6612
    7713

    我们还可以创建一个新的进程组。这个新的进程组可以包含任何进程的子集。例如,我们可以创建一个只包含偶数进程的组:

    process IDrankNode indexGPU index
    0000
    2102
    4210
    6312

    请注意,rank 是相对于进程组而言的,一个进程在不同的进程组中可以有不同的 rank。最大的 rank 始终是 world size of the process group - 1

    在进程组中,各进程可以通过两种方式进行通信。

    1. peer-to-peer: 一个进程向另一个进程发送数据。
    2. collective: 一组进程一起执行分散、聚集、all-reduce、广播等操作。
    Collective communication, 来源: PyTorch distributed tutorial
    - + \ No newline at end of file diff --git a/zh-Hans/docs/concepts/paradigms_of_parallelism/index.html b/zh-Hans/docs/concepts/paradigms_of_parallelism/index.html index a03cbde6..a220ce87 100644 --- a/zh-Hans/docs/concepts/paradigms_of_parallelism/index.html +++ b/zh-Hans/docs/concepts/paradigms_of_parallelism/index.html @@ -16,13 +16,13 @@ - +

    并行技术

    作者: Shenggui Li, Siqi Mai

    简介

    随着深度学习的发展,对并行训练的需求越来越大。这是因为模型和数据集越来越大,如果我们坚持使用单 GPU 训练,训练过程的等待将会成为一场噩梦。在本节中,我们将对现有的并行训练方法进行简要介绍。如果您想对这篇文章进行补充,欢迎在GitHub论坛上进行讨论。

    数据并行

    数据并行是最常见的并行形式,因为它很简单。在数据并行训练中,数据集被分割成几个碎片,每个碎片被分配到一个设备上。这相当于沿批次维度对训练过程进行并行化。每个设备将持有一个完整的模型副本,并在分配的数据集碎片上进行训练。在反向传播之后,模型的梯度将被全部减少,以便在不同设备上的模型参数能够保持同步。

    数据并行

    模型并行

    在数据并行训练中,一个明显的特点是每个 GPU 持有整个模型权重的副本。这就带来了冗余问题。另一种并行模式是模型并行,即模型被分割并分布在一个设备阵列上。通常有两种类型的并行:张量并行和流水线并行。张量并行是在一个操作中进行并行计算,如矩阵-矩阵乘法。流水线并行是在各层之间进行并行计算。因此,从另一个角度来看,张量并行可以被看作是层内并行,流水线并行可以被看作是层间并行。

    张量并行

    张量并行训练是将一个张量沿特定维度分成 N 块,每个设备只持有整个张量的 1/N,同时不影响计算图的正确性。这需要额外的通信来确保结果的正确性。

    以一般的矩阵乘法为例,假设我们有 C = AB。我们可以将B沿着列分割成 [B0 B1 B2 ... Bn],每个设备持有一列。然后我们将 A 与每个设备上 B 中的每一列相乘,我们将得到 [AB0 AB1 AB2 ... ABn] 。此刻,每个设备仍然持有一部分的结果,例如,设备(rank=0)持有 AB0。为了确保结果的正确性,我们需要收集全部的结果,并沿列维串联张量。通过这种方式,我们能够将张量分布在设备上,同时确保计算流程保持正确。

    张量并行

    在 Colossal-AI 中,我们提供了一系列的张量并行方法,即 1D、2D、2.5D 和 3D 张量并行。我们将在高级教程中详细讨论它们。

    相关文章:

    流水线并行

    流水线并行一般来说很容易理解。请您回忆一下您的计算机结构课程,这确实存在于 CPU 设计中。

    流水线并行

    流水线并行的核心思想是,模型按层分割成若干块,每块都交给一个设备。在前向传递过程中,每个设备将中间的激活传递给下一个阶段。在后向传递过程中,每个设备将输入张量的梯度传回给前一个流水线阶段。这允许设备同时进行计算,并增加了训练的吞吐量。流水线并行训练的一个缺点是,会有一些设备参与计算的冒泡时间,导致计算资源的浪费。

    Source: GPipe

    相关文章:

    优化器相关的并行

    另一种并行方法和优化器相关,目前这种并行最流行的方法是 ZeRO,即零冗余优化器。 ZeRO 在三个层面上工作,以消除内存冗余(ZeRO需要进行fp16训练)。

    • Level 1: 优化器状态在各进程中被划分。
    • Level 2: 用于更新模型权重的32位梯度也被划分,因此每个进程只存储与其优化器状态划分相对应的梯度。
    • Level 3: 16位模型参数在各进程中被划分。

    相关文章:

    异构系统的并行

    上述方法通常需要大量的 GPU 来训练一个大型模型。然而,人们常常忽略的是,与 GPU 相比,CPU 的内存要大得多。在一个典型的服务器上,CPU 可以轻松拥有几百GB的内存,而每个 GPU 通常只有16或32GB的内存。这促使人们思考为什么 CPU 内存没有被用于分布式训练。

    最近的进展是依靠 CPU 甚至是 NVMe 磁盘来训练大型模型。主要的想法是,在不使用张量时,将其卸载回 CPU 内存或 NVMe 磁盘。通过使用异构系统架构,有可能在一台机器上容纳一个巨大的模型。

    异构系统

    相关文章:

    - + \ No newline at end of file diff --git a/zh-Hans/docs/features/1D_tensor_parallel/index.html b/zh-Hans/docs/features/1D_tensor_parallel/index.html index 5202ee6a..1edd300e 100644 --- a/zh-Hans/docs/features/1D_tensor_parallel/index.html +++ b/zh-Hans/docs/features/1D_tensor_parallel/index.html @@ -16,7 +16,7 @@ - + @@ -24,7 +24,7 @@

    1D 张量并行

    作者: Zhengda Bian, Yongbin Li

    示例代码

    相关论文

    引言

    张量并行将模型参数划分到多个设备上,以减少内存负荷。 Megatron-LM 介绍了一种高效的一维张量并行化实现。

    让我们以一个线性层为例,它包括一个 GEMM Y=XAY = XA。 给定2个处理器,我们把列 AA 划分为 [A1 A2][A_1 ~ A_2], 并在每个处理器上计算 Yi=XAiY_i = XA_i , 然后形成 [Y1 Y2]=[XA1 XA2][Y_1 ~ Y_2] = [XA_1 ~ XA_2]. 这被称为列并行方式。

    当第二个线性层 Z=YBZ=YB 跟随上述列并行层的时候, 我们把 BB 划分为

    [B1B2]这就是所谓的行并行方式.\left[\begin{matrix} B_1 \\ B_2 \end{matrix} \right] ``` 这就是所谓的行并行方式.

    为了计算

    Z=[Y1 Y2][B1B2]Z = [Y_1 ~ Y_2] \left[\begin{matrix} B_1 \\ B_2 \end{matrix} \right]

    我们首先在每个处理器上计算 YiBiY_iB_i 然后使用一个all-reduce操作将结果汇总为 Z=Y1B1+Y2B2Z=Y_1B_1+Y_2B_2

    我们还需要注意,在后向计算中,列并行线性层需要聚合输入张量 XX, 因为在每个处理器 ii 上,我们只有 Xi˙=Yi˙AiT\dot{X_i}=\dot{Y_i}A_i^T,因此,我们在各处理器之间进行all-reduce,得到 X˙=Y˙AT=Y1˙A1T+Y2˙A2T\dot{X}=\dot{Y}A^T=\dot{Y_1}A_1^T+\dot{Y_2}A_2^T

    效率

    给定 PP 个处理器, 我们展现理论上的计算和内存成本,以及基于环形算法的1D张量并行的前向和后向的通信成本。

    计算内存 (参数)内存 (activations)通信 (带宽)通信 (时延)
    O(1/P)O(1/P)O(1/P)O(1/P)O(1)O(1)O(2(P1)/P)O(2(P-1)/P)O(2(P1))O(2(P-1))

    使用

    在ColossalAI最新的版本中,1D张量并行由Shardformer功能实现。 关于Shardformer的原理和用法细节请参考当前目录下的Shardformer文档。

    - + \ No newline at end of file diff --git a/zh-Hans/docs/features/2D_tensor_parallel/index.html b/zh-Hans/docs/features/2D_tensor_parallel/index.html index 73df8415..a55215b9 100644 --- a/zh-Hans/docs/features/2D_tensor_parallel/index.html +++ b/zh-Hans/docs/features/2D_tensor_parallel/index.html @@ -16,7 +16,7 @@ - + @@ -24,7 +24,7 @@

    2D 张量并行

    作者: Zhengda Bian, Yongbin Li

    前置教程

    示例代码

    相关论文

    引言

    1D张量并行没有对 activations 进行划分,就大规模模型而言,这也会消耗大量的内存。 为了平均分配计算和内存负荷,在 SUMMA(可扩展的通用矩阵乘法算法)的基础上, 2D张量并行 被引入。

    我们还是以线性层 Y=XAY = XA 为例。 给定 P=q×qP=q\times q 个处理器(必要条件), 如 q=2q=2, 我们把输入 XX 和权重A AA 都划分为

    [X00X01X10X11] and [A00A01A10A11].\left[\begin{matrix} X_{00} & X_{01} \\ X_{10} & X_{11} \end{matrix} \right] \text{~and~} \left[\begin{matrix} A_{00} & A_{01} \\ A_{10} & A_{11} \end{matrix} \right].

    该计算包括 qq 步。 当 t=1t=1 时, Xi0X_{i0} 在其行中被广播, 而 A0jA_{0j} 在其列中被广播。因此,我们有

    [X00,A00X00,A01X10,A00X10,A01].\left[\begin{matrix} X_{00},A_{00} & X_{00},A_{01} \\ X_{10},A_{00} & X_{10},A_{01} \end{matrix} \right].

    然后我们在每个处理器 (i,j)(i, j) 上将 Xi0X_{i0}A0jA_{0j} 相乘为

    [X00A00X00A01X10A00X10A01](1).\left[\begin{matrix} X_{00}A_{00} & X_{00}A_{01} \\ X_{10}A_{00} & X_{10}A_{01} \end{matrix} \right] (1).

    同样,当 t=2t=2 时, Xi1X_{i1} 在其行中被广播, A1jA_{1j} 在其列中被广播, 我们将它们相乘为

    [X01A10X01A11X11A10X11A11](2).\left[\begin{matrix} X_{01}A_{10} & X_{01}A_{11} \\ X_{11}A_{10} & X_{11}A_{11} \end{matrix} \right] (2).

    通过将 (1)(1)(2)(2) 相加,我们有

    Y=XA=[X00A00+X01A10X00A01+X01A11X10A00+X11A10X10A01+X11A11].Y = XA = \left[\begin{matrix} X_{00}A_{00}+X_{01}A_{10} & X_{00}A_{01}+X_{01}A_{11} \\ X_{10}A_{00}+X_{11}A_{10} & X_{10}A_{01}+X_{11}A_{11} \end{matrix} \right].

    效率

    给定 P=q×qP=q\times q 个处理器, 我们展现理论上的计算和内存成本,以及基于环形算法的2D张量并行的前向和后向的通信成本。

    计算内存 (参数)内存 (activations)通信 (带宽)通信 (时延)
    O(1/q2)O(1/q^2)O(1/q2)O(1/q^2)O(1/q2)O(1/q^2)O(6(q1)/q)O(6(q-1)/q)O(6(q1))O(6(q-1))

    使用

    ColossalAI的最新版本还暂不支持2D张量并行,但2D张量并行的功能会在未来的版本被集成入Shardformer中。关于Shardformer的原理和用法细节请参考当前目录下的Shardformer文档。

    对于老版本ColossalAI的用户,2D张量并行的用法请参考ColossalAI-Examples - 2D Tensor Parallelism

    - + \ No newline at end of file diff --git a/zh-Hans/docs/features/2p5D_tensor_parallel/index.html b/zh-Hans/docs/features/2p5D_tensor_parallel/index.html index 7d8eee0a..4e86ca87 100644 --- a/zh-Hans/docs/features/2p5D_tensor_parallel/index.html +++ b/zh-Hans/docs/features/2p5D_tensor_parallel/index.html @@ -16,7 +16,7 @@ - + @@ -24,7 +24,7 @@

    2.5D 张量并行

    作者: Zhengda Bian, Yongbin Li

    前置教程

    示例代码

    相关论文

    引言

    与一维张量并行相比,二维并行降低了内存成本,但可能引入更多的通信。因此,2.5D张量并行 在 2.5D SUMMA 的基础上被提出,它通过使用更多的设备来减少通信。

    我们还是以线性层 Y=XAY = XA 为例。 给定 P=q×q×dP=q \times q \times d 个处理器(必要条件), 如 q=d=2q=d=2, 我们把输入 XX 划分为 d×qd\times q 行和 qq

    [X00X01X10X11X20X21X30X31],\left[\begin{matrix} X_{00} & X_{01} \\ X_{10} & X_{11} \\ X_{20} & X_{21} \\ X_{30} & X_{31}\end{matrix} \right],

    它可以被重塑为 dd

    [X00X01X10X11] and [X20X21X30X31].\left[\begin{matrix} X_{00} & X_{01} \\ X_{10} & X_{11} \end{matrix} \right] \text{~and~}\left[\begin{matrix} X_{20} & X_{21} \\ X_{30} & X_{31} \end{matrix} \right].

    另外,权重 AA 被分割为

    [A00A01A10A11].\left[\begin{matrix} A_{00} & A_{01} \\ A_{10} & A_{11} \end{matrix} \right].

    对于 XX 相关的每一层, 我们使用SUMMA算法将 XXAA 相乘。 然后,我们得到输出

    [Y00=X00A00+X01A10Y01=X00A01+X01A11Y10=X10A00+X11A10Y11=X10A01+X11A11] and \left[\begin{matrix} Y_{00}=X_{00}A_{00}+X_{01}A_{10} & Y_{01}=X_{00}A_{01}+X_{01}A_{11} \\ Y_{10}=X_{10}A_{00}+X_{11}A_{10} & Y_{11}=X_{10}A_{01}+X_{11}A_{11} \end{matrix} \right] \text{~and~}
    [Y20=X20A00+X21A10Y21=X20A01+X21A11Y30=X30A00+X31A10Y31=X30A01+X31A11].\left[\begin{matrix} Y_{20}=X_{20}A_{00}+X_{21}A_{10} & Y_{21}=X_{20}A_{01}+X_{21}A_{11} \\ Y_{30}=X_{30}A_{00}+X_{31}A_{10} & Y_{31}=X_{30}A_{01}+X_{31}A_{11} \end{matrix} \right].

    效率

    给定 P=q×q×dP=q \times q \times d 个处理器, 我们展现理论上的计算和内存成本,以及基于环形算法的2.5D张量并行的前向和后向的通信成本。

    计算内存 (参数)内存 (activations)通信 (带宽)通信 (时延)
    O(1/dq2)O(1/dq^2)O(1/q2)O(1/q^2)O(1/dq2)O(1/dq^2)O(3(q1)(d+1)/dq)\small O(3(q-1)(d+1)/dq)O(6(q1))O(6(q-1))

    使用

    ColossalAI的最新版本还暂不支持2.5D张量并行,但2.5D张量并行的功能会在未来的版本被集成入Shardformer中。关于Shardformer的原理和用法细节请参考当前目录下的Shardformer文档。

    对于老版本ColossalAI的用户,2.5D张量并行的用法请参考ColossalAI-Examples - 2.5D Tensor Parallelism

    - + \ No newline at end of file diff --git a/zh-Hans/docs/features/3D_tensor_parallel/index.html b/zh-Hans/docs/features/3D_tensor_parallel/index.html index d1c057dc..588dbde6 100644 --- a/zh-Hans/docs/features/3D_tensor_parallel/index.html +++ b/zh-Hans/docs/features/3D_tensor_parallel/index.html @@ -16,7 +16,7 @@ - + @@ -25,7 +25,7 @@ 给定 P=q×q×qP=q \times q \times q 个处理器(必要条件), 如 q=2q=2, 我们把输入 XX 和权重 AA 划分为

    [X000X001X010X011X100X101X110X111] and [A000A001A010A011A100A101A110A111] respectively,\left[\begin{matrix} X_{000} & X_{001} \\ X_{010} & X_{011} \\ X_{100} & X_{101} \\ X_{110} & X_{111} \end{matrix} \right] \text{~and~} \left[\begin{matrix} A_{000} & A_{001} & A_{010} & A_{011} \\ A_{100} & A_{101} & A_{110} & A_{111} \end{matrix} \right] \text{~respectively,}

    其中每个 XijlX_{ijl}AljiA_{lji} 都被存储在处理器 (i,j,l)(i,j,l) 上, 如下图所示。

    然后我们在 (i,0...q,l)(i, 0...q,l) 上收集 XijlX_{ijl}, 以及在(0...q,j,l)(0...q, j, l) 上收集 AljiA_{lji}。 因此,我们在每个处理器 (i,j,l)(i,j,l) 上都有 XilX_{il}AljA_{lj} 以获得 XilAljX_{il}A_{lj}。 最后,我们在 (i,j,0...q)(i, j, 0...q) 对结果进行 reduce-scatter 得到 YijlY_{ijl}, 形成

    Y=[Y000Y001Y010Y011Y100Y101Y110Y111].Y= \left[\begin{matrix} Y_{000} & Y_{001} \\ Y_{010} & Y_{011} \\ Y_{100} & Y_{101} \\ Y_{110} & Y_{111} \end{matrix} \right].

    我们还需要注意,在后向传播中, 我们需要 all-gather 梯度 Yijl˙\dot{Y_{ijl}}, 然后 reduce-scatter 梯度 Xil˙=Yij˙AljT\dot{X_{il}}=\dot{Y_{ij}}A_{lj}^T and Alj˙=XilTYij˙\dot{A_{lj}}=X_{il}^T\dot{Y_{ij}}

    效率

    给定 P=q×q×qP=q \times q \times q 个处理器, 我们展现理论上的计算和内存成本,以及基于环形算法的3D张量并行的前向和后向的通信成本。

    计算内存 (参数)内存 (activations)通信 (带宽)通信 (时延)
    O(1/q3)O(1/q^3)O(1/q3)O(1/q^3)O(1/q3)O(1/q^3)O(6(q1)/q3)O(6(q-1)/q^3)O(6(q1))O(6(q-1))

    使用

    ColossalAI的最新版本还暂不支持3D张量并行,但3D张量并行的功能会在未来的版本被集成入Shardformer中。关于Shardformer的原理和用法细节请参考当前目录下的Shardformer文档。

    对于老版本ColossalAI的用户,3D张量并行的用法请参考ColossalAI-Examples - 3D Tensor Parallelism

    - + \ No newline at end of file diff --git a/zh-Hans/docs/features/cluster_utils/index.html b/zh-Hans/docs/features/cluster_utils/index.html index 7ca3db99..18611c76 100644 --- a/zh-Hans/docs/features/cluster_utils/index.html +++ b/zh-Hans/docs/features/cluster_utils/index.html @@ -16,7 +16,7 @@ - + @@ -62,7 +62,7 @@ with dist_coordinator.priority_execution(): dataset = CIFAR10(root='./data', download=True) - + \ No newline at end of file diff --git a/zh-Hans/docs/features/gradient_accumulation_with_booster/index.html b/zh-Hans/docs/features/gradient_accumulation_with_booster/index.html index 2361780d..3344ef42 100644 --- a/zh-Hans/docs/features/gradient_accumulation_with_booster/index.html +++ b/zh-Hans/docs/features/gradient_accumulation_with_booster/index.html @@ -16,13 +16,13 @@ - +

    梯度累积

    作者: Mingyan Jiang, Baizhou Zhang

    前置教程

    引言

    梯度累积是一种常见的增大训练 batch size 的方式。 在训练大模型时,内存经常会成为瓶颈,并且 batch size 通常会很小(如2),这导致收敛性无法保证。梯度累积将多次迭代的梯度累加,并仅在达到预设迭代次数时更新参数。

    使用

    在 Colossal-AI 中使用梯度累积非常简单,booster提供no_sync返回一个上下文管理器,在该上下文管理器下取消同步并且累积梯度。

    实例

    我们将介绍如何使用梯度累积。在这个例子中,梯度累积次数被设置为4。

    步骤 1. 在 train.py 导入相关库

    创建train.py并导入必要依赖。 torch 的版本应不低于1.8.1。

    import os
    from pathlib import Path

    import torch
    from torchvision import transforms
    from torchvision.datasets import CIFAR10
    from torchvision.models import resnet18

    import colossalai
    from colossalai.booster import Booster
    from colossalai.booster.plugin import TorchDDPPlugin
    from colossalai.logging import get_dist_logger
    from colossalai.cluster.dist_coordinator import priority_execution

    步骤 2. 初始化分布式环境

    我们需要初始化分布式环境。为了快速演示,我们使用launch_from_torch。你可以参考 Launch Colossal-AI使用其他初始化方法。

    # initialize distributed setting
    parser = colossalai.get_default_parser()
    args = parser.parse_args()

    # launch from torch
    colossalai.launch_from_torch(config=dict())

    步骤 3. 创建训练组件

    构建你的模型、优化器、损失函数、学习率调整器和数据加载器。注意数据集的路径从环境变量DATA获得。你可以通过 export DATA=/path/to/dataPath(os.environ['DATA']),在你的机器上设置路径。数据将会被自动下载到该路径。

    # define the training hyperparameters
    BATCH_SIZE = 128
    GRADIENT_ACCUMULATION = 4

    # build resnet
    model = resnet18(num_classes=10)

    # build dataloaders
    with priority_execution():
    train_dataset = CIFAR10(root=Path(os.environ.get('DATA', './data')),
    download=True,
    transform=transforms.Compose([
    transforms.RandomCrop(size=32, padding=4),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.4914, 0.4822, 0.4465], std=[0.2023, 0.1994, 0.2010]),
    ]))

    # build criterion
    criterion = torch.nn.CrossEntropyLoss()

    # optimizer
    optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)

    步骤 4. 注入特性

    创建一个TorchDDPPlugin对象,并作为参实例化Booster, 调用booster.boost注入特性。

    plugin = TorchDDPPlugin()
    booster = Booster(plugin=plugin)
    train_dataloader = plugin.prepare_dataloader(train_dataset, batch_size=BATCH_SIZE, shuffle=True, drop_last=True)
    model, optimizer, criterion, train_dataloader, _ = booster.boost(model=model,
    optimizer=optimizer,
    criterion=criterion,
    dataloader=train_dataloader)

    步骤 5. 使用booster训练

    使用booster构建一个普通的训练循环,验证梯度累积。 param_by_iter 记录分布训练的信息。

    optimizer.zero_grad()
    for idx, (img, label) in enumerate(train_dataloader):
    sync_context = booster.no_sync(model)
    img = img.cuda()
    label = label.cuda()
    if idx % (GRADIENT_ACCUMULATION - 1) != 0:
    with sync_context:
    output = model(img)
    train_loss = criterion(output, label)
    train_loss = train_loss / GRADIENT_ACCUMULATION
    booster.backward(train_loss, optimizer)
    else:
    output = model(img)
    train_loss = criterion(output, label)
    train_loss = train_loss / GRADIENT_ACCUMULATION
    booster.backward(train_loss, optimizer)
    optimizer.step()
    optimizer.zero_grad()

    ele_1st = next(model.parameters()).flatten()[0]
    param_by_iter.append(str(ele_1st.item()))

    if idx != 0 and idx % (GRADIENT_ACCUMULATION - 1) == 0:
    break

    for iteration, val in enumerate(param_by_iter):
    print(f'iteration {iteration} - value: {val}')

    if param_by_iter[-1] != param_by_iter[0]:
    print('The parameter is only updated in the last iteration')

    步骤 6. 启动训练脚本

    为了验证梯度累积,我们可以只检查参数值的变化。当设置梯度累加时,仅在最后一步更新参数。您可以使用以下命令运行脚本:

    colossalai run --nproc_per_node 1 train.py

    你将会看到类似下方的文本输出。这展现了梯度虽然在前3个迭代中被计算,但直到最后一次迭代,参数才被更新。

    iteration 0, first 10 elements of param: tensor([-0.0208,  0.0189,  0.0234,  0.0047,  0.0116, -0.0283,  0.0071, -0.0359, -0.0267, -0.0006], device='cuda:0', grad_fn=<SliceBackward0>)
    iteration 1, first 10 elements of param: tensor([-0.0208, 0.0189, 0.0234, 0.0047, 0.0116, -0.0283, 0.0071, -0.0359, -0.0267, -0.0006], device='cuda:0', grad_fn=<SliceBackward0>)
    iteration 2, first 10 elements of param: tensor([-0.0208, 0.0189, 0.0234, 0.0047, 0.0116, -0.0283, 0.0071, -0.0359, -0.0267, -0.0006], device='cuda:0', grad_fn=<SliceBackward0>)
    iteration 3, first 10 elements of param: tensor([-0.0141, 0.0464, 0.0507, 0.0321, 0.0356, -0.0150, 0.0172, -0.0118, 0.0222, 0.0473], device='cuda:0', grad_fn=<SliceBackward0>)

    在Gemini插件中使用梯度累积

    目前支持no_sync()方法的插件包括 TorchDDPPluginLowLevelZeroPlugin(需要设置参数stage为1). GeminiPlugin 不支持 no_sync() 方法, 但是它可以通过和pytorch类似的方式来使用同步的梯度累积。

    为了开启梯度累积功能,在初始化GeminiPlugin的时候需要将参数enable_gradient_accumulation设置为True。以下是 GeminiPlugin 进行梯度累积的伪代码片段:

    ...
    plugin = GeminiPlugin(..., enable_gradient_accumulation=True)
    booster = Booster(plugin=plugin)
    ...

    ...
    for idx, (input, label) in enumerate(train_dataloader):
    output = gemini_model(input.cuda())
    train_loss = criterion(output, label.cuda())
    train_loss = train_loss / GRADIENT_ACCUMULATION
    booster.backward(train_loss, gemini_optimizer)

    if idx % (GRADIENT_ACCUMULATION - 1) == 0:
    gemini_optimizer.step() # zero_grad is automatically done
    ...
    - + \ No newline at end of file diff --git a/zh-Hans/docs/features/gradient_clipping_with_booster/index.html b/zh-Hans/docs/features/gradient_clipping_with_booster/index.html index b7bd32f0..8dab87ec 100644 --- a/zh-Hans/docs/features/gradient_clipping_with_booster/index.html +++ b/zh-Hans/docs/features/gradient_clipping_with_booster/index.html @@ -16,14 +16,14 @@ - +

    梯度裁剪

    作者: Mingyan Jiang

    前置教程

    相关论文

    引言

    为了加快训练过程和寻求全局最优以获得更好的性能,越来越多的学习率调度器被提出。人们通过控制学习率来调整训练中的下降速度。这使得梯度向量在每一步都能更好地统一。在这种情况下,下降速度可以按预期被控制。 因此,梯度裁剪,一种可以将梯度向量归一化,以将其限制在统一长度的技术,对于那些希望模型性能更好的人来说是不可或缺的。

    在使用 Colossal-AI 时,你不必担心实现梯度剪裁,我们以一种有效而方便的方式支持梯度剪裁。你所需要的只是在你的配置文件中增加一个命令。

    为什么应该使用 Colossal-AI 中的梯度裁剪

    我们不建议用户自己编写梯度剪裁,因为朴素的梯度剪裁在应用张量并行、流水线并行、MoE 等功能时可能会失败。

    根据下图,每个 GPU 只拥有线性层中权重的一部分参数。为了得到线性层权重的梯度向量的正确范数,每个 GPU 中的每个梯度向量的范数应该相加。更复杂的是,偏置的分布不同于权重的分布。通信组在求和运算中有所不同。

    (注: 这种情况是旧版本的 2D 并行,在代码中的实现是不一样的。但这是一个很好的例子,能够说明在梯度剪裁中统一所有通信的困难。)

    参数分布

    不用担心它,因为 Colossal-AI 已经为你处理好。

    使用

    要使用梯度裁剪,只需在使用booster注入特性之后,调用optimizer的clip_grad_by_norm或者clip_grad_by_value函数即可进行梯度裁剪。

    实例

    下面我们将介绍如何使用梯度裁剪,在本例中,我们将梯度裁剪范数设置为1.0。

    步骤 1. 在训练中导入相关库

    创建train.py并导入相关库。

    import os
    from pathlib import Path

    import torch
    from torchvision import transforms
    from torchvision.datasets import CIFAR10
    from torchvision.models import resnet34
    from tqdm import tqdm

    import colossalai
    from colossalai.booster import Booster
    from colossalai.booster.plugin import TorchDDPPlugin
    from colossalai.logging import get_dist_logger
    from colossalai.nn.lr_scheduler import CosineAnnealingLR

    步骤 2. 初始化分布式环境

    我们需要初始化分布式环境. 为了快速演示,我们使用launch_from_torch. 您可以参考 Launch Colossal-AI

    colossalai.launch_from_torch(config=dict())
    logger = get_dist_logger()

    步骤 3. 创建训练组件

    构建你的模型、优化器、损失函数、学习率调整器和数据加载器。注意数据集的路径从环境变量DATA获得。你可以通过 export DATA=/path/to/dataPath(os.environ['DATA'])在你的机器上设置路径。数据将会被自动下载到该路径。

    # define training hyperparameters
    NUM_EPOCHS = 200
    BATCH_SIZE = 128
    GRADIENT_CLIPPING = 0.1
    # build resnet
    model = resnet34(num_classes=10)
    # build dataloaders
    train_dataset = CIFAR10(root=Path(os.environ.get('DATA', './data')),
    download=True,
    transform=transforms.Compose([
    transforms.RandomCrop(size=32, padding=4),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.4914, 0.4822, 0.4465], std=[0.2023, 0.1994, 0.2010]),
    ]))
    # build criterion
    criterion = torch.nn.CrossEntropyLoss()

    # optimizer
    optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)

    # lr_scheduler
    lr_scheduler = CosineAnnealingLR(optimizer, total_steps=NUM_EPOCHS)

    步骤 4. 注入梯度裁剪特性

    创建TorchDDPPlugin对象并初始化Booster, 使用booster注入相关特性。

    plugin = TorchDDPPlugin()
    booster = Booster(plugin=plugin)
    train_dataloader = plugin.prepare_dataloader(train_dataset, batch_size=BATCH_SIZE, shuffle=True, drop_last=True)
    model, optimizer, criterion, train_dataloader, lr_scheduler = booster.boost(model,optimizer, criterion,train_dataloader, lr_scheduler)

    步骤 5. 使用booster训练

    使用booster进行训练。

    # verify gradient clipping
    model.train()
    for idx, (img, label) in enumerate(train_dataloader):
    img = img.cuda()
    label = label.cuda()

    model.zero_grad()
    output = model(img)
    train_loss = criterion(output, label)
    booster.backward(train_loss, optimizer)
    optimizer.clip_grad_by_norm(max_norm=GRADIENT_CLIPPING)
    optimizer.step()
    lr_scheduler.step()

    ele_1st = next(model.parameters()).flatten()[0]
    logger.info(f'iteration {idx}, loss: {train_loss}, 1st element of parameters: {ele_1st.item()}')

    # only run for 4 iterations
    if idx == 3:
    break

    步骤 6. 启动训练脚本

    你可以使用以下命令运行脚本:

    colossalai run --nproc_per_node 1 train.py
    - + \ No newline at end of file diff --git a/zh-Hans/docs/features/lazy_init/index.html b/zh-Hans/docs/features/lazy_init/index.html index 3633a72a..1c138879 100644 --- a/zh-Hans/docs/features/lazy_init/index.html +++ b/zh-Hans/docs/features/lazy_init/index.html @@ -16,7 +16,7 @@ - + @@ -30,7 +30,7 @@
  • module (nn.Module) -- Target nn.Module
  • verbose (bool) -- Whether to print lazy initialization rate. Defaults to False.
  • Description
    Initialize all `Parameter` from `LazyTensor`. This function will modify the module in-place.

    例子

    import colossalai
    from colossalai.lazy import LazyInitContext
    from colossalai.booster import Booster
    from colossalai.booster.plugin import GeminiPlugin

    from transformers import LlamaForCausalLM, LlamaConfig, BertForPreTraining

    colossalai.launch({})
    plugin = GeminiPlugin()
    booster = Booster(plugin)

    # 1. Initialize model from scratch
    # Initialization on cuda will accelerate the initialization process but take more GPU memory.
    with LazyInitContext(default_device="cuda"):
    model = LlamaForCausalLM(LlamaConfig(hidden_size=64, intermediate_size=172, num_hidden_layers=4, num_attention_heads=4))
    model, *_ = booster.boost(model)

    # 2. Initialize model from pretrained
    with LazyInitContext():
    model = BertForPreTraining.from_pretrained("prajjwal1/bert-tiny")
    model, *_ = booster.boost(model)

    ⚠️ 使用懒惰初始化加载预训练模型在 colossalai>0.3.3 或主分支上支持。

    限制

    我们提到,懒惰初始化必须与 booster 一起使用。只有几个插件支持它。

    插件支持情况备注
    Gemini
    Hybrid Parallel
    Low Level Zero不需要
    Torch DDP不兼容
    Torch FSDP不兼容

    不是所有的模型都可以懒惰初始化。在某些情况下,一部分参数/缓冲区可能会被提前初始化。但是不用担心,这部分通常只占整个模型的一小部分。

    并且一些模型完全不支持,会引发错误。我们测试了 torchvision, diffusers, timm, transformers, torchaudio 和 torchrec 中的模型。以下模型不受支持:

    模型分类
    wav2vec2_basetorchaudio
    hubert_basetorchaudio
    ViTModeltransformers
    ViTForMaskedImageModelingtransformers
    ViTForImageClassificationtransformers
    Blip2Modeltransformers
    Blip2ForConditionalGenerationtransformers
    - + \ No newline at end of file diff --git a/zh-Hans/docs/features/mixed_precision_training_with_booster/index.html b/zh-Hans/docs/features/mixed_precision_training_with_booster/index.html index 64751c5b..4690fb52 100644 --- a/zh-Hans/docs/features/mixed_precision_training_with_booster/index.html +++ b/zh-Hans/docs/features/mixed_precision_training_with_booster/index.html @@ -16,7 +16,7 @@ - + @@ -54,7 +54,7 @@ verbose(bool) -- if set to True, will print debug info.

    Description

    Precision for mixed precision training in FP16 using naive AMP.

    当使用colossalai.booster时, 首先需要实例化一个模型、一个优化器和一个标准。将输出模型转换为内存消耗较小的 AMP 模型。如果您的输入模型已经太大,无法放置在 GPU 中,请使用dtype=torch.float16实例化你的模型。或者请尝试更小的模型,或尝试更多的并行化训练技术!

    实例

    下面我们将展现如何在 Colossal-AI 使用 AMP。在该例程中,我们使用 Torch AMP.

    步骤 1. 在 train.py 导入相关库

    创建train.py并导入必要依赖. 请记得通过命令pip install timm scipy安装scipytimm

    import os
    from pathlib import Path

    import torch
    from timm.models import vit_base_patch16_224
    from titans.utils import barrier_context
    from torchvision import datasets, transforms

    import colossalai
    from colossalai.booster import Booster
    from colossalai.booster.plugin import TorchDDPPlugin
    from colossalai.logging import get_dist_logger
    from colossalai.nn.lr_scheduler import LinearWarmupLR

    步骤 2. 初始化分布式环境

    我们需要初始化分布式环境。为了快速演示,我们使用launch_from_torch。你可以参考 Launch Colossal-AI 使用其他初始化方法。

    # 初始化分布式设置
    parser = colossalai.get_default_parser()
    args = parser.parse_args()

    # launch from torch
    colossalai.launch_from_torch(config=dict())

    步骤 3. 创建训练组件

    构建你的模型、优化器、损失函数、学习率调整器和数据加载器。注意数据集的路径从环境变量DATA获得。你可以通过 export DATA=/path/to/dataPath(os.environ['DATA']) 在你的机器上设置路径。数据将会被自动下载到该路径。

    # define the constants
    NUM_EPOCHS = 2
    BATCH_SIZE = 128
    # build model
    model = vit_base_patch16_224(drop_rate=0.1)

    # build dataloader
    train_dataset = datasets.Caltech101(
    root=Path(os.environ['DATA']),
    download=True,
    transform=transforms.Compose([
    transforms.Resize(256),
    transforms.RandomResizedCrop(224),
    transforms.RandomHorizontalFlip(),
    transforms.ToTensor(),
    Gray2RGB(),
    transforms.Normalize([0.5, 0.5, 0.5],
    [0.5, 0.5, 0.5])
    ]))

    # build optimizer
    optimizer = torch.optim.SGD(model.parameters(), lr=1e-2, weight_decay=0.1)

    # build loss
    criterion = torch.nn.CrossEntropyLoss()

    # lr_scheduler
    lr_scheduler = LinearWarmupLR(optimizer, warmup_steps=50, total_steps=NUM_EPOCHS)

    步骤 4. 插入 AMP

    创建一个 MixedPrecision 对象(如果需要)及 torchDDPPlugin 对象,调用 colossalai.boost 将所有训练组件转为为 FP16 模式.

    plugin = TorchDDPPlugin()
    train_dataloader = plugin.prepare_dataloader(train_dataset, batch_size=BATCH_SIZE, shuffle=True, drop_last=True)
    booster = Booster(mixed_precision='fp16', plugin=plugin)

    # if you need to customize the config, do like this
    # >>> from colossalai.mixed_precision import FP16TorchMixedPrecision
    # >>> mixed_precision = FP16TorchMixedPrecision(
    # >>> init_scale=2.**16,
    # >>> growth_factor=2.0,
    # >>> backoff_factor=0.5,
    # >>> growth_interval=2000)
    # >>> plugin = TorchDDPPlugin()
    # >>> booster = Booster(mixed_precision=mixed_precision, plugin=plugin)

    # boost model, optimizer, criterion, dataloader, lr_scheduler
    model, optimizer, criterion, dataloader, lr_scheduler = booster.boost(model, optimizer, criterion, dataloader, lr_scheduler)

    步骤 5. 使用 booster 训练

    使用 booster 构建一个普通的训练循环。

    model.train()
    for epoch in range(NUM_EPOCHS):
    for img, label in enumerate(train_dataloader):
    img = img.cuda()
    label = label.cuda()
    optimizer.zero_grad()
    output = model(img)
    loss = criterion(output, label)
    booster.backward(loss, optimizer)
    optimizer.step()
    lr_scheduler.step()

    步骤 6. 启动训练脚本

    使用下列命令启动训练脚本,你可以改变 --nproc_per_node 以使用不同数量的 GPU。

    colossalai run --nproc_per_node 1 train.py
    - + \ No newline at end of file diff --git a/zh-Hans/docs/features/nvme_offload/index.html b/zh-Hans/docs/features/nvme_offload/index.html index 64b640bc..2c25551a 100644 --- a/zh-Hans/docs/features/nvme_offload/index.html +++ b/zh-Hans/docs/features/nvme_offload/index.html @@ -16,7 +16,7 @@ - + @@ -71,7 +71,7 @@ https://arxiv.org/abs/1412.6980 .. _On the Convergence of Adam and Beyond: https://openreview.net/forum?id=ryQu7f-RZ

    - + \ No newline at end of file diff --git a/zh-Hans/docs/features/pipeline_parallel/index.html b/zh-Hans/docs/features/pipeline_parallel/index.html index faea3ff8..203b6fd5 100644 --- a/zh-Hans/docs/features/pipeline_parallel/index.html +++ b/zh-Hans/docs/features/pipeline_parallel/index.html @@ -16,14 +16,14 @@ - +

    流水并行

    作者: Guangyang Lu, Hongxin Liu, Yongbin Li, Mingyan Jiang

    前置教程

    示例代码

    相关论文

    快速预览

    在本教程中,你将学习如何使用流水并行。在 Colossal-AI 中, 我们使用 NVIDIA 推出的 1F1B 流水线。由于在本例中, 使用 ViT 和 ImageNet 太过庞大,因此我们使用 Bert 和 Glue数据集 为例.

    目录

    在本教程中,我们将介绍:

    1. 介绍 1F1B 流水线;
    2. 使用非交错和交错 schedule;
    3. 使用流水线微调 Bert

    认识 1F1B 流水线

    首先,我们将向您介绍 GPipe,以便您更好地了解。

    图1: GPipe,来自论文 Megatron-LM

    正如你所看到的,对于 GPipe,只有当一个批次中所有 microbatches 的前向计算完成后,才会执行后向计算。

    一般来说,1F1B(一个前向通道和一个后向通道)比 GPipe (在内存或内存和时间方面)更有效率。1F1B 流水线有两个 schedule ,非交错式和交错式,图示如下。

    Figure2: 图片来自论文 Megatron-LM 。上面的部分显示了默认的非交错 schedule,底部显示的是交错的 schedule。

    非交错 Schedule

    非交错式 schedule 可分为三个阶段。第一阶段是热身阶段,处理器进行不同数量的前向计算。在接下来的阶段,处理器进行一次前向计算,然后是一次后向计算。处理器将在最后一个阶段完成后向计算。

    这种模式比 GPipe 更节省内存。然而,它需要和 GPipe 一样的时间来完成一轮计算。

    交错 Schedule

    这个 schedule 要求microbatches的数量是流水线阶段的整数倍

    在这个 schedule 中,每个设备可以对多个层的子集(称为模型块)进行计算,而不是一个连续层的集合。具体来看,之前设备1拥有层1-4,设备2拥有层5-8,以此类推;但现在设备1有层1,2,9,10,设备2有层3,4,11,12,以此类推。 在该模式下,流水线上的每个设备都被分配到多个流水线阶段,每个流水线阶段的计算量较少。

    这种模式既节省内存又节省时间。

    Colossal-AI中的实现

    在 Colossal-AI 中,流水线并行依赖于 schedulerShardformer。我们提供了非交错的(OneForwardOneBackwardSchedule)和交错的(InterleavedSchedule)两种调度方式。而 Shardformer 实现了对模型的层分割,并替换了模型的 forward 函数,使其与调度器兼容。

    在 Colossal-AI 中,HybridParallelPlugin 封装了流水线执行策略。它管理流水线并行通信组和一个 scheduler。当使用此插件增强模型时,模型的层将通过调用 shardformer.optimize 函数进行分割,然后调用 execute_pipeline 使用 scheduler 来分别执行模型的各个部分。 HybridParallelPlugin暂时只支持OneForwardOneBackwardSchedule, InterleavedSchedule将会在不久后支持。

    您可以通过设置 HybridParallelPlugin 的参数来自定义您的并行策略。更多使用细节请参考HybridParallelPlugin使用文档

    使用流水线微调 Bert模型

    首先我们定义好需要的训练组件,包括model, dataloader, optimizer, lr_scheduler, criterion 等:

    import argparse
    from typing import Callable, List, Union

    import torch
    import torch.nn as nn
    from data import GLUEDataBuilder
    from torch.optim import Adam, Optimizer
    from torch.optim.lr_scheduler import _LRScheduler as LRScheduler
    from torch.utils.data import DataLoader
    from tqdm import tqdm
    from transformers import (
    AlbertForSequenceClassification,
    AutoConfig,
    BertForSequenceClassification,
    get_linear_schedule_with_warmup,
    )

    import colossalai
    from colossalai.booster import Booster
    from colossalai.booster.plugin import HybridParallelPlugin
    from colossalai.cluster import DistCoordinator
    from colossalai.nn.optimizer import HybridAdam

    # Define some config
    NUM_EPOCHS = 3
    BATCH_SIZE = 32
    LEARNING_RATE = 2.4e-5
    WEIGHT_DECAY = 0.01
    WARMUP_FRACTION = 0.1

    coordinator = DistCoordinator()

    def move_to_cuda(batch):
    return {k: v.cuda() for k, v in batch.items()}

    # Define 'criterion' function with two inputs, which will be passed to 'execute_pipeline'.
    def _criterion(outputs, inputs):
    return outputs.loss

    # Define optimizer
    lr = LEARNING_RATE
    no_decay = ["bias", "LayerNorm.weight"]
    optimizer_grouped_parameters = [
    {
    "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
    "weight_decay": WEIGHT_DECAY,
    },
    {
    "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
    "weight_decay": 0.0,
    },
    ]

    optimizer = HybridAdam(optimizer_grouped_parameters, lr=lr, eps=1e-8)


    # Define lr_scheduler
    total_steps = len(train_dataloader) * NUM_EPOCHS
    num_warmup_steps = int(WARMUP_FRACTION * total_steps)
    lr_scheduler = get_linear_schedule_with_warmup(
    optimizer,
    num_warmup_steps=num_warmup_steps,
    num_training_steps=total_steps,
    )


    # Define Bert model
    model = BertForSequenceClassification.from_pretrained("bert-base-uncased", config=cfg).cuda()

    # Define a dataloader
    data_builder = GLUEDataBuilder(model_name,
    plugin,
    args.task,
    train_batch_size=BATCH_SIZE,
    eval_batch_size=BATCH_SIZE)
    train_dataloader = data_builder.train_dataloader()

    使用HybridParallelPlugin初始化一个booster.

    plugin = HybridParallelPlugin(tp_size=1,
    pp_size=2,
    num_microbatches=None,
    microbatch_size=1,
    enable_all_optimization=True,
    zero_stage=1,
    precision='fp16',
    initial_scale=1)
    booster = Booster(plugin=plugin)

    使用booster将优化特性注入到训练组件中。

    model, optimizer, _criterion, _, lr_scheduler = booster.boost(model,
    optimizer,
    criterion=_criterion,
    lr_scheduler=lr_scheduler)

    最后训练模型

    # Define a train function
    def train_epoch(epoch: int, model: nn.Module, optimizer: Optimizer, _criterion: Callable, lr_scheduler: LRScheduler,
    train_dataloader: DataLoader, booster: Booster, coordinator: DistCoordinator):

    is_pp_last_stage = booster.plugin.stage_manager.is_last_stage()
    total_step = len(train_dataloader)

    model.train()
    optimizer.zero_grad()
    # convert train_dataloader to a iterator
    train_dataloader_iter = iter(train_dataloader)
    with tqdm(range(total_step),
    desc=f'Epoch [{epoch + 1}/{NUM_EPOCHS}]',
    disable=not (is_pp_last_stage)) as pbar:
    # Forward pass
    for _ in pbar:
    outputs = booster.execute_pipeline(train_dataloader_iter,
    model,
    _criterion,
    optimizer,
    return_loss=True,
    return_outputs=True)
    # Backward and optimize
    if is_pp_last_stage:
    loss = outputs['loss']
    pbar.set_postfix({'loss': loss.item()})

    optimizer.step()
    optimizer.zero_grad()
    lr_scheduler.step()

    # Train model
    for epoch in range(NUM_EPOCHS):
    train_epoch(epoch, model, optimizer, _criterion, lr_scheduler, train_dataloader, booster, coordinator)

    我们使用 2 个流水段,并且 batch 将被切分为 1 个 micro batches。(这些参数都可根据实际情况设置为合适的值)

    - + \ No newline at end of file diff --git a/zh-Hans/docs/features/shardformer/index.html b/zh-Hans/docs/features/shardformer/index.html index 014db16e..d262426c 100644 --- a/zh-Hans/docs/features/shardformer/index.html +++ b/zh-Hans/docs/features/shardformer/index.html @@ -16,7 +16,7 @@ - + @@ -62,7 +62,7 @@ -7 1c-4.667 0-9.167-1.833-13.5-5.5S337 184 337 178c0-12.667 15.667-32.333 47-59 H213l-171-1c-8.667-6-13-12.333-13-19 0-4.667 4.333-11.333 13-20h359 c-16-25.333-24-45-24-59z">需要进行Reduce-Scatter以将序列维度上的行线性层输出分割到所有设备上,并进行All-Gather以获取完整的梯度。

  • 使用NCCL的All-reduce实现采用了Ring All-Reduce方法,由一次Reduce-Scatter和一次All-Gather组成,两者的开销相等。因此,与序列并行和张量并行相比,它并不会引入额外的通信开销。

  • 需要注意的一点是,在张量并行的 Column Linear 层中进行序列并行时,梯度的反向计算过程中需要获取完整的输入。在前向传播过程中,仅保留沿序列维度分割的输入部分,张量的形状例如(batch,sequence_len/k,hidden_states)(batch, sequence\_len/k, hidden\_states)。因此,需要进行额外的全局收集操作以获取完整的输入进行梯度计算。但是,在实现中,可以将梯度计算与全局收集通信操作重叠,这不会引入额外的通信开销(对应Shardformer中的enable_sequence_overlap参数)。

  • - + \ No newline at end of file diff --git a/zh-Hans/docs/features/zero_with_chunk/index.html b/zh-Hans/docs/features/zero_with_chunk/index.html index 310ecbd7..54912400 100644 --- a/zh-Hans/docs/features/zero_with_chunk/index.html +++ b/zh-Hans/docs/features/zero_with_chunk/index.html @@ -16,7 +16,7 @@ - + @@ -24,7 +24,7 @@

    基于Chunk内存管理的零冗余优化器 (ZeRO)

    作者: Hongxin Liu, Jiarui Fang, Zijian Ye

    前置教程:

    示例代码

    相关论文

    引言

    零冗余优化器 (ZeRO) 通过对三个模型状态(优化器状态、梯度和参数)进行划分而不是复制他们,消除了数据并行进程中的内存冗余。该方法与传统的数据并行相比,内存效率得到了极大的提高,而计算粒度和通信效率得到了保留。

    1. 分片优化器状态: 优化器状态 (如 Adam optimizer, 32位的权重, 以及一二阶动量估计) 被划分到各个进程中, 因此每个进程只更新其分区。
    1. 分片梯度: 在梯度在数据并行进程组内进行 reduction 后, 梯度张量也被划分,这样每个进程只存储与其划分的优化器状态对应的梯度。 注意, Colossal-AI 将梯度转换为 FP32 格式以参与更新参数。

    2. 分片参数: 16位的模型参数被划分到一个数据并行组的进程中。

    3. Gemini: 对于参数、梯度、优化器状态的动态异构内存空间管理器。

    此外,我们还将介绍基于Chunk内存管理的零冗余优化器。

    在使用零冗余优化器 (ZeRO)时,我们通过切分参数的方式对模型进行分布式存储,这种方法的优点是每个节点的内存负载是完全均衡的。但是这种方式有很多缺点。首先,通信时需要申请一块临时内存用来通信,通信完毕释放,这回导致存在内存碎片化的问题。其次,以Tensor为粒度进行通信,会导致网络带宽无法充分利用。通常来说传输的消息长度越长带宽利用率越高。

    利用ColossalAI v0.1.8引入了Chunk机制,我们可以提升ZeRO的性能。我们将运算顺序上连续的一组参数存入一个Chunk中(Chunk即一段连续的内存空间),每个Chunk的大小相同。Chunk方式组织内存可以保证PCI-e和GPU-GPU之间网络带宽的高效利用,减小了通信次数,同时避免潜在的内存碎片。

    在v0.1.8之前,ZeRO在进行参数聚合时通信成本较高,如果一个参数在连续的几次计算中被使用多次,即会发生多次通信,效率较低。这种情况在使用Checkpoint时非常常见,参数在计算backward时会重计算一遍forward。这种情况下,ZeRO的效率便不高。

    以GPT为例,其Checkpoint会应用在每一个GPT Block上,每一个GPT Block包含一个Self-Attention层和MLP层。在计算Backward时,会依次计算Self-Attention层、MLP层的forward,然后依次计算MLP层、Self-Attention层的backward。如使用Chunk机制,我们将Self-Attention层和MLP层放在同一个Chunk中,在每个GPT Block的backward的中便无需再通信。

    除此之外,由于小Tensor的通信、内存移动没法完全利用NVLINK、PCIE带宽,而且每次通信、内存移动都有kernel launch的开销。使用了Chunk之后可以把多次小Tensor的通信、内存移动变为一次大Tensor的通信、内存移动,既提高了带宽利用,也减小了kernel launch的开销。

    我们提供了轻量级的Chunk搜索机制,帮助用户自动找到内存碎片最小的Chunk尺寸。

    使用

    GeminiDDP

    我们将运用GeminiDDP的方式来使用基于Chunk内存管理的ZeRO。这是我们新包装的torch.Module ,它使用 ZeRO-DP 和 Gemini,其中ZeRO 用于并行,Gemini 用于内存管理。

    Gemini支持惰性初始化, 它可以节省多卡初始化大模型时的显存使用.

    如果你的模型有 N billion 个参数,你的 GPU 内存为 M GB, 当 4N >= M 时,我们推荐使用 LazyInitContext。否则,LazyInitContext 是可选的。

    with LazyInitContext(default_device=torch.device('cuda')):
    model = gpt2_medium(checkpoint=True)

    我们提供了 Booster API,它用户友好。我们推荐你使用 Booster API。如果您仍然想使用底层 API,您可以继续阅读本节其他内容。

    使用 GeminiDDP 包装模型。

    model = GeminiDDP(model, hidden_dim=hidden_dim, min_chunk_size_m=min_chunk_size_m)

    hidden dim是DNN的隐藏维度。用户可以提供这个参数来加快搜索速度。如果用户在训练前不知道这个参数也可以。 我们将使用默认值 1024。min_chunk_size_m是以兆(2^20)为单位的最小块大小。如果参数的总大小仍然小于最小块大小,则所有参数将被压缩为一个小块。

    初始化优化器。

    optimizer = GeminiAdamOptimizer(model, lr=1e-3, initial_scale=2**5)

    训练

    optimizer.zero_grad()
    outputs = model(input_ids, attn_mask)
    loss = criterion(outputs, input_ids)
    optimizer.backward(loss)
    optimizer.step()

    ⚠️ 注意:请不要使用loss.backward(),规范写法是optimizer.backward(loss)

    训练GPT

    在此例程中, 我们使用 Hugging Face Transformers,并以 GPT2 Medium 为例。你必须在允许该例程前安装 transformers

    为了简单起见,我们在这里只使用随机生成的数据。

    首先我们只需要引入Huggingface transformersGPT2LMHeadModel来定义我们的模型,不需要用户进行模型的定义与修改,方便用户使用。

    定义GPT模型:

    class GPTLMModel(nn.Module):

    def __init__(self,
    hidden_size=768,
    num_layers=12,
    num_attention_heads=12,
    max_seq_len=1024,
    vocab_size=50257,
    checkpoint=False):
    super().__init__()
    self.checkpoint = checkpoint
    self.model = GPT2LMHeadModel(
    GPT2Config(n_embd=hidden_size,
    n_layer=num_layers,
    n_head=num_attention_heads,
    n_positions=max_seq_len,
    n_ctx=max_seq_len,
    vocab_size=vocab_size))
    if checkpoint:
    self.model.gradient_checkpointing_enable()

    def forward(self, input_ids, attention_mask):
    return self.model(input_ids=input_ids, attention_mask=attention_mask, use_cache=not self.checkpoint)[0]

    def gpt2_medium(checkpoint=False):
    return GPTLMModel(hidden_size=1024, num_layers=24, num_attention_heads=16, checkpoint=checkpoint)

    定义损失函数:

    class GPTLMLoss(nn.Module):

    def __init__(self):
    super().__init__()
    self.loss_fn = nn.CrossEntropyLoss()

    def forward(self, logits, labels):
    shift_logits = logits[..., :-1, :].contiguous()
    shift_labels = labels[..., 1:].contiguous()
    return self.loss_fn(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))

    写一个获得随机输入的函数:

    def get_data(batch_size, seq_len, vocab_size):
    input_ids = torch.randint(0, vocab_size, (batch_size, seq_len), device=torch.cuda.current_device())
    attention_mask = torch.ones_like(input_ids)
    return input_ids, attention_mask

    最后,使用booster注入 Gemini + ZeRO DDP 特性, 并定义训练循环。由于我们在这个例子中对GPT进行预训练,因此只使用了一个简单的语言模型损失函数:

    from colossalai.nn.optimizer import HybridAdam

    from colossalai.booster import Booster
    from colossalai.lazy import LazyInitContext
    from colossalai.booster.plugin import GeminiPlugin

    def main():
    args = parse_args()
    BATCH_SIZE = 8
    SEQ_LEN = 1024
    VOCAB_SIZE = 50257
    NUM_STEPS = 10
    colossalai.launch_from_torch(config={})

    # build criterion
    criterion = GPTLMLoss()
    optimizer = HybridAdam(model.parameters(), lr=0.001)

    torch.manual_seed(123)
    # build GPT model
    with ColoInitContext(default_device=torch.device('cuda')):
    model = gpt2_medium(checkpoint=True)


    # Gemini + ZeRO DP
    plugin = GeminiPlugin(max_norm=1.0, initial_scale=2**5)
    booster = Booster(plugin=plugin)
    model, optimizer, criterion, _, _ = booster.boost(model, optimizer, criterion)

    torch.cuda.synchronize()
    model.train()
    for n in range(NUM_STEPS):
    # we just use randomly generated data here
    input_ids, attn_mask = get_data(BATCH_SIZE, SEQ_LEN, VOCAB_SIZE)
    optimizer.zero_grad()
    outputs = model(input_ids, attn_mask)
    loss = criterion(outputs, input_ids)
    booster.backward(loss, optimizer)
    optimizer.step()

    torch.cuda.synchronize()

    ⚠️ 注意:如果你使用Gemini模块的话,请不要使用我们之前提到过的梯度累加。 完整的例子代码可以在 Train GPT with Colossal-AI. 获得。

    - + \ No newline at end of file diff --git a/zh-Hans/docs/get_started/installation/index.html b/zh-Hans/docs/get_started/installation/index.html index cc4a8e68..0c27d20f 100644 --- a/zh-Hans/docs/get_started/installation/index.html +++ b/zh-Hans/docs/get_started/installation/index.html @@ -16,13 +16,13 @@ - +

    安装

    环境要求:

    如果你遇到安装问题,可以向本项目 反馈

    从PyPI上安装

    你可以PyPI上使用以下命令直接安装Colossal-AI。

    pip install colossalai

    注:现在只支持Linux。

    如果你想同时安装PyTorch扩展的话,可以添加CUDA_EXT=1。如果不添加的话,PyTorch扩展会在运行时自动安装。

    CUDA_EXT=1 pip install colossalai

    从源安装

    此文档将与版本库的主分支保持一致。如果您遇到任何问题,欢迎给我们提 issue。

    git clone https://github.com/hpcaitech/ColossalAI.git
    cd ColossalAI

    # install dependency
    pip install -r requirements/requirements.txt

    # install colossalai
    CUDA_EXT=1 pip install .

    如果您不想安装和启用 CUDA 内核融合(使用融合优化器时强制安装),您可以不添加CUDA_EXT=1

    pip install .

    如果您在使用CUDA 10.2,您仍然可以从源码安装ColossalAI。但是您需要手动下载cub库并将其复制到相应的目录。

    # clone the repository
    git clone https://github.com/hpcaitech/ColossalAI.git
    cd ColossalAI

    # download the cub library
    wget https://github.com/NVIDIA/cub/archive/refs/tags/1.8.0.zip
    unzip 1.8.0.zip
    cp -r cub-1.8.0/cub/ colossalai/kernel/cuda_native/csrc/kernels/include/

    # install
    CUDA_EXT=1 pip install .
    - + \ No newline at end of file diff --git a/zh-Hans/docs/get_started/reading_roadmap/index.html b/zh-Hans/docs/get_started/reading_roadmap/index.html index 22ddac00..d5a2ab25 100644 --- a/zh-Hans/docs/get_started/reading_roadmap/index.html +++ b/zh-Hans/docs/get_started/reading_roadmap/index.html @@ -16,13 +16,13 @@ - +

    阅读指引

    Colossal-AI为您提供了一系列的并行训练组件。我们的目标是支持您开发分布式深度学习模型,就像您编写单GPU深度学习模型一样简单。ColossalAI提供了易于使用的API来帮助您启动您的训练过程。为了更好地了解ColossalAI的工作原理,我们建议您按照以下顺序阅读本文档。

    • 如果您不熟悉分布式系统,或者没有使用过Colossal-AI,您可以先浏览概念部分,了解我们要实现的目标同时掌握一些关于分布式训练的背景知识。
    • 接下来,您可以按照基础教程进行学习。该节将介绍关于如何使用Colossal-AI的细节。
    • 这时候,您就可以小试牛刀了!功能 部分将帮助您尝试如何使用Colossal-AI为您的模型训练进行加速。我们将为每个教程提供一个代码库。这些教程将涵盖Colossal-AI的基本用法,以实现简单的功能,如数据并行和混合精度训练。
    • 最后,如果您希望应用更高超的技术,比如,如何在GPT-3上运行混合并行,快来高级教程部分学习如何搭建您自己的模型吧!

    我们始终欢迎社区的建议和讨论,如果您遇到任何问题,我们将非常愿意帮助您。您可以在GitHub 提 issue ,或在论坛上创建一个讨论主题。

    - + \ No newline at end of file diff --git a/zh-Hans/docs/get_started/run_demo/index.html b/zh-Hans/docs/get_started/run_demo/index.html index cc47ef36..591d765c 100644 --- a/zh-Hans/docs/get_started/run_demo/index.html +++ b/zh-Hans/docs/get_started/run_demo/index.html @@ -16,14 +16,14 @@ - +

    快速演示

    Colossal-AI 是一个集成的大规模深度学习系统,具有高效的并行化技术。该系统可以通过应用并行化技术在具有多个 GPU 的分布式系统上加速模型训练。该系统也可以在只有一个 GPU 的系统上运行。以下是展示如何使用 Colossal-AI 的 Quick demos。

    单 GPU

    Colossal-AI 可以用在只有一个 GPU 的系统上训练深度学习模型,并达到 baseline 的性能。 我们提供了一个 在 CIFAR10 数据集上训练 ResNet 的例子,该例子只需要一个 GPU。 您可以在 ColossalAI-Examples 中获取该例子。详细说明可以在其 README.md 中获取。

    多 GPU

    Colossal-AI 可用于在具有多个 GPU 的分布式系统上训练深度学习模型,并通过应用高效的并行化技术大幅加速训练过程。我们提供了多种并行化技术供您尝试。

    1. 数据并行

    您可以使用与上述单 GPU 演示相同的 ResNet 例子。 通过设置 --nproc_per_node 为您机器上的 GPU 数量,您就能把数据并行应用在您的例子上了。

    2. 混合并行

    混合并行包括数据、张量和流水线并行。在 Colossal-AI 中,我们支持不同类型的张量并行(即 1D、2D、2.5D 和 3D)。您可以通过简单地改变 config.py 中的配置在不同的张量并行之间切换。您可以参考 GPT example, 更多细节能在它的 README.md 中被找到。

    3. MoE 并行

    我们提供了一个 ViT-MoE 例子 来验证 MoE 的并行性。 WideNet 使用 Mixture of Experts(MoE)来实现更好的性能。更多的细节可以在我们的教程中获取:教会您如何把 Mixture of Experts 整合到模型中

    4. 序列并行

    序列并行是为了解决 NLP 任务中的内存效率和序列长度限制问题。 我们在 ColossalAI-Examples 中提供了一个 Sequence Parallelism 例子。您可以按照 README.md 来执行代码。

    - + \ No newline at end of file diff --git a/zh-Hans/index.html b/zh-Hans/index.html index 1a241e7b..70391acf 100644 --- a/zh-Hans/index.html +++ b/zh-Hans/index.html @@ -16,13 +16,13 @@ - +

    无与伦比的速度与规模

    了解Colossal-AI内置的分布式技术以充分优化您的大型神经网络的运行性能。

    需要专业的帮助吗? 联系我们的分布式专家

    了解Colossal-AI

    快速开始

    开启您的第一个 Colossal-AI 项目。

    概念

    了解 Colossal-AI 如何工作。

    命令行工具

    Colossal-AI命令行工具可以管理您的 Colossal-AI 项目。

    配置

    根据您的需要定义您的 Colossal-AI 项目配置。

    使用Colossal-AI开发?

    如果您喜欢我们的开源 Colossal-AI 软件并使用它开发深度学习项目,请告诉我们。

    - + \ No newline at end of file diff --git a/zh-Hans/markdown-page/index.html b/zh-Hans/markdown-page/index.html index c0556440..6f9047f5 100644 --- a/zh-Hans/markdown-page/index.html +++ b/zh-Hans/markdown-page/index.html @@ -16,13 +16,13 @@ - +

    Markdown page example

    You don't need React to write simple standalone pages.

    - + \ No newline at end of file diff --git a/zh-Hans/search/index.html b/zh-Hans/search/index.html index 392d3e6b..24164e88 100644 --- a/zh-Hans/search/index.html +++ b/zh-Hans/search/index.html @@ -16,13 +16,13 @@ - +

    在文档中搜索

    - + \ No newline at end of file