Skip to content

Commit

Permalink
convert : remove AWQ remnants (#8320)
Browse files Browse the repository at this point in the history
  • Loading branch information
ggerganov authored Jul 5, 2024
1 parent 2cccbaa commit 148ec97
Showing 1 changed file with 1 addition and 18 deletions.
19 changes: 1 addition & 18 deletions convert_hf_to_gguf.py
Original file line number Diff line number Diff line change
Expand Up @@ -2445,7 +2445,7 @@ def set_gguf_parameters(self):
raise ValueError("query_pre_attn_scalar must be equal to n_embd / n_head")

def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]:
del bid # unusem
del bid # unused

# lm_head is not used in llama.cpp, while autoawq will include this tensor in model
# To prevent errors, skip loading lm_head.weight.
Expand Down Expand Up @@ -3225,10 +3225,6 @@ def parse_args() -> argparse.Namespace:
"--vocab-only", action="store_true",
help="extract only the vocab",
)
parser.add_argument(
"--awq-path", type=Path, default=None,
help="Path to scale awq cache file",
)
parser.add_argument(
"--outfile", type=Path,
help="path to write to; default: based on input. {ftype} will be replaced by the outtype.",
Expand Down Expand Up @@ -3306,19 +3302,6 @@ def main() -> None:

dir_model = args.model

if args.awq_path:
sys.path.insert(1, str(Path(__file__).parent / 'awq-py'))
from awq.apply_awq import add_scale_weights # type: ignore[import-not-found]
tmp_model_path = args.model / "weighted_model"
dir_model = tmp_model_path
if tmp_model_path.is_dir():
logger.info(f"{tmp_model_path} exists as a weighted model.")
else:
tmp_model_path.mkdir(parents=True, exist_ok=True)
logger.info("Saving new weighted model ...")
add_scale_weights(str(args.model), str(args.awq_path), str(tmp_model_path))
logger.info(f"Saved weighted model at {tmp_model_path}.")

if not dir_model.is_dir():
logger.error(f'Error: {args.model} is not a directory')
sys.exit(1)
Expand Down

0 comments on commit 148ec97

Please sign in to comment.