tl;dr, Go parser for the GGUF.
GGUF is a file format for storing models for inference with GGML and executors based on GGML. GGUF is a binary format that is designed for fast loading and saving of models, and for ease of reading. Models are traditionally developed using PyTorch or another framework, and then converted to GGUF for use in GGML.
GGUF Parser provides some functions to parse the GGUF file in Go for the following purposes:
- Read metadata from the GGUF file without downloading the whole model remotely.
- Estimate the model usage.
Import the package as below.
go get github.com/gpustack/gguf-parser-go
If you need one-shot command-line, try gguf-parser from releases or go install github.com/gpustack/gguf-parser-go/cmd/gguf-parser
from HEAD.
flowchart
parseGGUFFileRemote[/parseGGUFFileRemote/]
parseGGUFFile[/parseGGUFFile/]
ParseGGUFFile -.-> parseGGUFFile
ParseGGUFFileFromHuggingFace -.-> ParseGGUFFileRemote
ParseGGUFFileFromModelScope -.-> ParseGGUFFileRemote
ParseGGUFFileRemote -.-> parseGGUFFileRemote
parseGGUFFileRemote -.-> parseGGUFFile
ParseGGUFFileFromOllama -.-> ParseGGUFFileFromOllamaModel
ParseGGUFFileFromOllamaModel -.-> parseGGUFFileRemote
import (
"github.com/davecgh/go-spew/spew"
. "github.com/gpustack/gguf-parser-go"
)
f, err := ParseGGUFFile("path/to/model.gguf")
if err != nil {
panic(err)
}
spew.Dump(f)
f, err := ParseGGUFFile("path/to/model.gguf", UseMMap())
if err != nil {
panic(err)
}
f, err := ParseGGUFFile("path/to/model.gguf", SkipLargeMetadata())
if err != nil {
panic(err)
}
import (
"context"
"github.com/davecgh/go-spew/spew"
. "github.com/gpustack/gguf-parser-go"
)
f, err := ParseGGUFFileRemote(context.Background(), "https://example.com/model.gguf")
if err != nil {
panic(err)
}
spew.Dump(f)
f, err := ParseGGUFFileRemote(context.Background(), "https://example.com/model.gguf", UseBufferSize(1 * 1024 * 1024) /* 1M */)
if err != nil {
panic(err)
}
// Model
spew.Dump(f.Model())
// Architecture
spew.Dump(f.Architecture())
// Tokenizer
spew.Dump(f.Tokenizer())
Estimate usage in llama.cpp
The evaluation result is close to those run with
llama-cli
(examples/main/main.cpp).
es := f.EstimateLLaMACppUsage()
spew.Dump(es)
// Since the estimated result is detail and lack of context,
// you can summarize the result as below.
s := es.Summarize(true /* load via mmap */, 0, 0 /* no unified memory RAM, VRAM footprint */)
spew.Dump(s)
es := f.EstimateLLaMACppUsage(WithContextSize(4096) /* Use 4k context */))
spew.Dump(es)
// Since the estimated result is detail and lack of context,
// you can summarize the result as below.
s := es.Summarize(true /* load via mmap */, 0, 0 /* no unified memory RAM, VRAM footprint */)
spew.Dump(s)
es := f.EstimateLLaMACppUsage(WithOffloadLayers(10) /* Offload last 10 layers to GPU */))
spew.Dump(es)
// Since the estimated result is detail and lack of context,
// you can summarize the result as below.
s := es.Summarize(true /* load via mmap */, 0, 0 /* no unified memory RAM, VRAM footprint */)
spew.Dump(s)
MIT