Skip to content

Commit

Permalink
fix: module 'llama_cpp.llama_cpp' has no attribute 'c_uint8'
Browse files Browse the repository at this point in the history
  • Loading branch information
abetlen committed Feb 23, 2024
1 parent 427d816 commit db776a8
Showing 1 changed file with 4 additions and 3 deletions.
7 changes: 4 additions & 3 deletions llama_cpp/llama.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,10 @@
import uuid
import time
import json
import ctypes
import fnmatch
import multiprocessing

from typing import (
List,
Optional,
Expand All @@ -20,7 +22,6 @@
from collections import deque
from pathlib import Path

import ctypes

from llama_cpp.llama_types import List

Expand Down Expand Up @@ -1789,15 +1790,15 @@ def save_state(self) -> LlamaState:
state_size = llama_cpp.llama_get_state_size(self._ctx.ctx)
if self.verbose:
print(f"Llama.save_state: got state size: {state_size}", file=sys.stderr)
llama_state = (llama_cpp.c_uint8 * int(state_size))()
llama_state = (ctypes.c_uint8 * int(state_size))()
if self.verbose:
print("Llama.save_state: allocated state", file=sys.stderr)
n_bytes = llama_cpp.llama_copy_state_data(self._ctx.ctx, llama_state)
if self.verbose:
print(f"Llama.save_state: copied llama state: {n_bytes}", file=sys.stderr)
if int(n_bytes) > int(state_size):
raise RuntimeError("Failed to copy llama state data")
llama_state_compact = (llama_cpp.c_uint8 * int(n_bytes))()
llama_state_compact = (ctypes.c_uint8 * int(n_bytes))()
llama_cpp.ctypes.memmove(llama_state_compact, llama_state, int(n_bytes))
if self.verbose:
print(
Expand Down

0 comments on commit db776a8

Please sign in to comment.