diff --git a/src/AutoGGUF.py b/src/AutoGGUF.py
index b9fcb63..c35a00f 100644
--- a/src/AutoGGUF.py
+++ b/src/AutoGGUF.py
@@ -31,7 +31,7 @@ def __init__(self):
self.logger.info(INITIALIZING_AUTOGGUF)
self.setWindowTitle(WINDOW_TITLE)
- self.setWindowIcon(QIcon(resource_path("assets/favicon.ico")))
+ self.setWindowIcon(QIcon(resource_path("assets/favicon.ico")))
self.setGeometry(100, 100, 1600, 1200)
ensure_directory(os.path.abspath("quantized_models"))
@@ -171,7 +171,7 @@ def __init__(self):
"Q5_K_S",
"Q5_K_M",
"Q6_K",
- "Q8_0",
+ "Q8_0",
"Q4_0",
"Q4_1",
"Q5_0",
@@ -180,7 +180,7 @@ def __init__(self):
"Q4_0_4_8",
"Q4_0_8_8",
"BF16",
- "F16",
+ "F16",
"F32",
"COPY",
]
@@ -452,8 +452,13 @@ def __init__(self):
# Output Type Dropdown
self.lora_output_type_combo = QComboBox()
self.lora_output_type_combo.addItems(["GGML", "GGUF"])
- self.lora_output_type_combo.currentIndexChanged.connect(self.update_base_model_visibility)
- lora_layout.addRow(self.create_label(OUTPUT_TYPE, SELECT_OUTPUT_TYPE), self.lora_output_type_combo)
+ self.lora_output_type_combo.currentIndexChanged.connect(
+ self.update_base_model_visibility
+ )
+ lora_layout.addRow(
+ self.create_label(OUTPUT_TYPE, SELECT_OUTPUT_TYPE),
+ self.lora_output_type_combo,
+ )
# Base Model Path (initially hidden)
self.base_model_label = self.create_label(BASE_MODEL, SELECT_BASE_MODEL_FILE)
@@ -471,7 +476,9 @@ def __init__(self):
wrapper_layout = QHBoxLayout(self.base_model_wrapper)
wrapper_layout.addWidget(self.base_model_label)
wrapper_layout.addWidget(self.base_model_widget, 1) # Give it a stretch factor
- wrapper_layout.setContentsMargins(0, 0, 0, 0) # Remove margins for better alignment
+ wrapper_layout.setContentsMargins(
+ 0, 0, 0, 0
+ ) # Remove margins for better alignment
# Add the wrapper to the layout
lora_layout.addRow(self.base_model_wrapper)
@@ -545,7 +552,7 @@ def __init__(self):
# Modify the task list to support right-click menu
self.task_list.setContextMenuPolicy(Qt.ContextMenuPolicy.CustomContextMenu)
self.task_list.customContextMenuRequested.connect(self.show_task_context_menu)
-
+
# Set inital state
self.update_base_model_visibility(self.lora_output_type_combo.currentIndex())
@@ -1200,19 +1207,19 @@ def delete_task(self, item):
if reply == QMessageBox.StandardButton.Yes:
# Retrieve the task_item before removing it from the list
task_item = self.task_list.itemWidget(item)
-
+
# Remove the item from the list
row = self.task_list.row(item)
self.task_list.takeItem(row)
-
+
# If the task is still running, terminate it
if task_item and task_item.log_file:
for thread in self.quant_threads:
if thread.log_file == task_item.log_file:
thread.terminate()
self.quant_threads.remove(thread)
- break
-
+ break
+
# Delete the task_item widget
if task_item:
task_item.deleteLater()
@@ -1395,7 +1402,7 @@ def quantize_model(self):
override_string = entry.get_override_string(
model_name=model_name,
quant_type=quant_type,
- output_path=output_path
+ output_path=output_path,
)
if override_string:
command.extend(["--override-kv", override_string])
@@ -1413,7 +1420,7 @@ def quantize_model(self):
log_file = os.path.join(
logs_path, f"{model_name}_{timestamp}_{quant_type}.log"
)
-
+
# Log quant command
command_str = " ".join(command)
self.logger.info(f"{QUANTIZATION_COMMAND}: {command_str}")
@@ -1430,7 +1437,9 @@ def quantize_model(self):
self.task_list.setItemWidget(list_item, task_item)
# Connect the output signal to the new progress parsing function
- thread.output_signal.connect(lambda line: self.parse_progress(line, task_item))
+ thread.output_signal.connect(
+ lambda line: self.parse_progress(line, task_item)
+ )
thread.status_signal.connect(task_item.update_status)
thread.finished_signal.connect(lambda: self.task_finished(thread))
thread.error_signal.connect(lambda err: self.handle_error(err, task_item))
@@ -1556,7 +1565,7 @@ def generate_imatrix(self):
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
log_file = os.path.join(self.logs_input.text(), f"imatrix_{timestamp}.log")
-
+
# Log command
command_str = " ".join(command)
self.logger.info(f"{IMATRIX_GENERATION_COMMAND}: {command_str}")
@@ -1580,7 +1589,7 @@ def generate_imatrix(self):
except Exception as e:
self.show_error(ERROR_STARTING_IMATRIX_GENERATION.format(str(e)))
self.logger.info(IMATRIX_GENERATION_TASK_STARTED)
-
+
def show_error(self, message):
self.logger.error(ERROR_MESSAGE.format(message))
QMessageBox.critical(self, ERROR, message)
@@ -1617,4 +1626,4 @@ def closeEvent(self, event: QCloseEvent):
app = QApplication(sys.argv)
window = AutoGGUF()
window.show()
- sys.exit(app.exec())
\ No newline at end of file
+ sys.exit(app.exec())
diff --git a/src/DownloadThread.py b/src/DownloadThread.py
index fa9e8ed..fe926c9 100644
--- a/src/DownloadThread.py
+++ b/src/DownloadThread.py
@@ -1,54 +1,55 @@
-from PyQt6.QtWidgets import *
-from PyQt6.QtCore import *
-from PyQt6.QtGui import *
-import os
-import sys
-import psutil
-import subprocess
-import time
-import signal
-import json
-import platform
-import requests
-import zipfile
-from datetime import datetime
-
-class DownloadThread(QThread):
- progress_signal = pyqtSignal(int)
- finished_signal = pyqtSignal(str)
- error_signal = pyqtSignal(str)
-
- def __init__(self, url, save_path):
- super().__init__()
- self.url = url
- self.save_path = save_path
-
- def run(self):
- try:
- response = requests.get(self.url, stream=True)
- response.raise_for_status()
- total_size = int(response.headers.get('content-length', 0))
- block_size = 8192
- downloaded = 0
-
- with open(self.save_path, 'wb') as file:
- for data in response.iter_content(block_size):
- size = file.write(data)
- downloaded += size
- if total_size:
- progress = int((downloaded / total_size) * 100)
- self.progress_signal.emit(progress)
-
- # Extract the downloaded zip file
- extract_dir = os.path.splitext(self.save_path)[0]
- with zipfile.ZipFile(self.save_path, 'r') as zip_ref:
- zip_ref.extractall(extract_dir)
-
- # Remove the zip file after extraction
- os.remove(self.save_path)
-
- self.finished_signal.emit(extract_dir)
- except Exception as e:
- self.error_signal.emit(str(e))
- if os.path.exists(self.save_path):
- os.remove(self.save_path)
+from PyQt6.QtWidgets import *
+from PyQt6.QtCore import *
+from PyQt6.QtGui import *
+import os
+import sys
+import psutil
+import subprocess
+import time
+import signal
+import json
+import platform
+import requests
+import zipfile
+from datetime import datetime
+
+
+class DownloadThread(QThread):
+ progress_signal = pyqtSignal(int)
+ finished_signal = pyqtSignal(str)
+ error_signal = pyqtSignal(str)
+
+ def __init__(self, url, save_path):
+ super().__init__()
+ self.url = url
+ self.save_path = save_path
+
+ def run(self):
+ try:
+ response = requests.get(self.url, stream=True)
+ response.raise_for_status()
+ total_size = int(response.headers.get("content-length", 0))
+ block_size = 8192
+ downloaded = 0
+
+ with open(self.save_path, "wb") as file:
+ for data in response.iter_content(block_size):
+ size = file.write(data)
+ downloaded += size
+ if total_size:
+ progress = int((downloaded / total_size) * 100)
+ self.progress_signal.emit(progress)
+
+ # Extract the downloaded zip file
+ extract_dir = os.path.splitext(self.save_path)[0]
+ with zipfile.ZipFile(self.save_path, "r") as zip_ref:
+ zip_ref.extractall(extract_dir)
+
+ # Remove the zip file after extraction
+ os.remove(self.save_path)
+
+ self.finished_signal.emit(extract_dir)
+ except Exception as e:
+ self.error_signal.emit(str(e))
+ if os.path.exists(self.save_path):
+ os.remove(self.save_path)
diff --git a/src/KVOverrideEntry.py b/src/KVOverrideEntry.py
index a382bd5..6d73c85 100644
--- a/src/KVOverrideEntry.py
+++ b/src/KVOverrideEntry.py
@@ -1,83 +1,92 @@
-from PyQt6.QtWidgets import QWidget, QHBoxLayout, QLineEdit, QComboBox, QPushButton
-from PyQt6.QtCore import pyqtSignal, QRegularExpression
-from PyQt6.QtGui import QDoubleValidator, QIntValidator, QRegularExpressionValidator
-from datetime import datetime
-import time
-import os
-import socket
-import platform
-
-class KVOverrideEntry(QWidget):
- deleted = pyqtSignal(QWidget)
-
- def __init__(self, parent=None):
- super().__init__(parent)
- layout = QHBoxLayout(self)
- layout.setContentsMargins(0, 0, 0, 0)
-
- self.key_input = QLineEdit()
- self.key_input.setPlaceholderText("Key")
- # Set validator for key input (letters and dots only)
- key_validator = QRegularExpressionValidator(QRegularExpression(r"[A-Za-z.]+"))
- self.key_input.setValidator(key_validator)
- layout.addWidget(self.key_input)
-
- self.type_combo = QComboBox()
- self.type_combo.addItems(["int", "str", "float"])
- layout.addWidget(self.type_combo)
-
- self.value_input = QLineEdit()
- self.value_input.setPlaceholderText("Value")
- layout.addWidget(self.value_input)
-
- delete_button = QPushButton("X")
- delete_button.setFixedSize(30, 30)
- delete_button.clicked.connect(self.delete_clicked)
- layout.addWidget(delete_button)
-
- # Connect type change to validator update
- self.type_combo.currentTextChanged.connect(self.update_validator)
-
- # Initialize validator
- self.update_validator(self.type_combo.currentText())
-
- def delete_clicked(self):
- self.deleted.emit(self)
-
- def get_override_string(self, model_name=None, quant_type=None, output_path=None): # Add arguments
- key = self.key_input.text()
- type_ = self.type_combo.currentText()
- value = self.value_input.text()
-
- dynamic_params = {
- "{system.time.milliseconds}": lambda: str(int(time.time() * 1000)),
- "{system.time.seconds}": lambda: str(int(time.time())),
- "{system.date.iso}": lambda: datetime.now().strftime("%Y-%m-%d"),
- "{system.datetime.iso}": lambda: datetime.now().isoformat(),
- "{system.username}": lambda: os.getlogin(),
- "{system.hostname}": lambda: socket.gethostname(),
- "{system.platform}": lambda: platform.system(),
- "{system.python.version}": lambda: platform.python_version(),
- "{system.time.milliseconds}": lambda: str(int(time.time() * 1000)),
- "{system.date}": lambda: datetime.now().strftime("%Y-%m-%d"),
- "{model.name}": lambda: model_name if model_name is not None else "Unknown Model",
- "{quant.type}": lambda: quant_type if quant_type is not None else "Unknown Quant",
- "{output.path}": lambda: output_path if output_path is not None else "Unknown Output Path",
- }
-
- for param, func in dynamic_params.items():
- value = value.replace(param, func())
-
- return f"{key}={type_}:{value}"
-
- def get_raw_override_string(self):
- # Return the raw override string with placeholders intact
- return f"{self.key_input.text()}={self.type_combo.currentText()}:{self.value_input.text()}"
-
- def update_validator(self, type_):
- if type_ == "int":
- self.value_input.setValidator(QIntValidator())
- elif type_ == "float":
- self.value_input.setValidator(QDoubleValidator())
- else: # str
- self.value_input.setValidator(None)
+from PyQt6.QtWidgets import QWidget, QHBoxLayout, QLineEdit, QComboBox, QPushButton
+from PyQt6.QtCore import pyqtSignal, QRegularExpression
+from PyQt6.QtGui import QDoubleValidator, QIntValidator, QRegularExpressionValidator
+from datetime import datetime
+import time
+import os
+import socket
+import platform
+
+
+class KVOverrideEntry(QWidget):
+ deleted = pyqtSignal(QWidget)
+
+ def __init__(self, parent=None):
+ super().__init__(parent)
+ layout = QHBoxLayout(self)
+ layout.setContentsMargins(0, 0, 0, 0)
+
+ self.key_input = QLineEdit()
+ self.key_input.setPlaceholderText("Key")
+ # Set validator for key input (letters and dots only)
+ key_validator = QRegularExpressionValidator(QRegularExpression(r"[A-Za-z.]+"))
+ self.key_input.setValidator(key_validator)
+ layout.addWidget(self.key_input)
+
+ self.type_combo = QComboBox()
+ self.type_combo.addItems(["int", "str", "float"])
+ layout.addWidget(self.type_combo)
+
+ self.value_input = QLineEdit()
+ self.value_input.setPlaceholderText("Value")
+ layout.addWidget(self.value_input)
+
+ delete_button = QPushButton("X")
+ delete_button.setFixedSize(30, 30)
+ delete_button.clicked.connect(self.delete_clicked)
+ layout.addWidget(delete_button)
+
+ # Connect type change to validator update
+ self.type_combo.currentTextChanged.connect(self.update_validator)
+
+ # Initialize validator
+ self.update_validator(self.type_combo.currentText())
+
+ def delete_clicked(self):
+ self.deleted.emit(self)
+
+ def get_override_string(
+ self, model_name=None, quant_type=None, output_path=None
+ ): # Add arguments
+ key = self.key_input.text()
+ type_ = self.type_combo.currentText()
+ value = self.value_input.text()
+
+ dynamic_params = {
+ "{system.time.milliseconds}": lambda: str(int(time.time() * 1000)),
+ "{system.time.seconds}": lambda: str(int(time.time())),
+ "{system.date.iso}": lambda: datetime.now().strftime("%Y-%m-%d"),
+ "{system.datetime.iso}": lambda: datetime.now().isoformat(),
+ "{system.username}": lambda: os.getlogin(),
+ "{system.hostname}": lambda: socket.gethostname(),
+ "{system.platform}": lambda: platform.system(),
+ "{system.python.version}": lambda: platform.python_version(),
+ "{system.time.milliseconds}": lambda: str(int(time.time() * 1000)),
+ "{system.date}": lambda: datetime.now().strftime("%Y-%m-%d"),
+ "{model.name}": lambda: (
+ model_name if model_name is not None else "Unknown Model"
+ ),
+ "{quant.type}": lambda: (
+ quant_type if quant_type is not None else "Unknown Quant"
+ ),
+ "{output.path}": lambda: (
+ output_path if output_path is not None else "Unknown Output Path"
+ ),
+ }
+
+ for param, func in dynamic_params.items():
+ value = value.replace(param, func())
+
+ return f"{key}={type_}:{value}"
+
+ def get_raw_override_string(self):
+ # Return the raw override string with placeholders intact
+ return f"{self.key_input.text()}={self.type_combo.currentText()}:{self.value_input.text()}"
+
+ def update_validator(self, type_):
+ if type_ == "int":
+ self.value_input.setValidator(QIntValidator())
+ elif type_ == "float":
+ self.value_input.setValidator(QDoubleValidator())
+ else: # str
+ self.value_input.setValidator(None)
diff --git a/src/Logger.py b/src/Logger.py
index aaa5bb9..4e77c80 100644
--- a/src/Logger.py
+++ b/src/Logger.py
@@ -1,46 +1,51 @@
-import logging
-from logging.handlers import RotatingFileHandler
-import os
-import sys
-from datetime import datetime
-
-class Logger:
- def __init__(self, name, log_dir):
- self.logger = logging.getLogger(name)
- self.logger.setLevel(logging.DEBUG)
-
- # Create logs directory if it doesn't exist
- os.makedirs(log_dir, exist_ok=True)
-
- # Console handler
- console_handler = logging.StreamHandler()
- console_handler.setLevel(logging.INFO)
- console_format = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
- console_handler.setFormatter(console_format)
-
- # File handler
- timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
- log_file = os.path.join(log_dir, f"latest_{timestamp}.log")
- file_handler = RotatingFileHandler(log_file, maxBytes=10*1024*1024, backupCount=5, encoding='utf-8')
- file_handler.setLevel(logging.DEBUG)
- file_format = logging.Formatter('%(asctime)s - %(levelname)s - %(filename)s:%(lineno)d - %(message)s')
- file_handler.setFormatter(file_format)
-
- # Add handlers to logger
- self.logger.addHandler(console_handler)
- self.logger.addHandler(file_handler)
-
- def debug(self, message):
- self.logger.debug(message)
-
- def info(self, message):
- self.logger.info(message)
-
- def warning(self, message):
- self.logger.warning(message)
-
- def error(self, message):
- self.logger.error(message)
-
- def critical(self, message):
- self.logger.critical(message)
\ No newline at end of file
+import logging
+from logging.handlers import RotatingFileHandler
+import os
+import sys
+from datetime import datetime
+
+
+class Logger:
+ def __init__(self, name, log_dir):
+ self.logger = logging.getLogger(name)
+ self.logger.setLevel(logging.DEBUG)
+
+ # Create logs directory if it doesn't exist
+ os.makedirs(log_dir, exist_ok=True)
+
+ # Console handler
+ console_handler = logging.StreamHandler()
+ console_handler.setLevel(logging.INFO)
+ console_format = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
+ console_handler.setFormatter(console_format)
+
+ # File handler
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
+ log_file = os.path.join(log_dir, f"latest_{timestamp}.log")
+ file_handler = RotatingFileHandler(
+ log_file, maxBytes=10 * 1024 * 1024, backupCount=5, encoding="utf-8"
+ )
+ file_handler.setLevel(logging.DEBUG)
+ file_format = logging.Formatter(
+ "%(asctime)s - %(levelname)s - %(filename)s:%(lineno)d - %(message)s"
+ )
+ file_handler.setFormatter(file_format)
+
+ # Add handlers to logger
+ self.logger.addHandler(console_handler)
+ self.logger.addHandler(file_handler)
+
+ def debug(self, message):
+ self.logger.debug(message)
+
+ def info(self, message):
+ self.logger.info(message)
+
+ def warning(self, message):
+ self.logger.warning(message)
+
+ def error(self, message):
+ self.logger.error(message)
+
+ def critical(self, message):
+ self.logger.critical(message)
diff --git a/src/ModelInfoDialog.py b/src/ModelInfoDialog.py
index 1f46897..6b0897f 100644
--- a/src/ModelInfoDialog.py
+++ b/src/ModelInfoDialog.py
@@ -1,48 +1,48 @@
-from PyQt6.QtWidgets import *
-from PyQt6.QtCore import *
-from PyQt6.QtGui import *
-import os
-import sys
-import psutil
-import subprocess
-import time
-import signal
-import json
-import platform
-import requests
-import zipfile
-from datetime import datetime
-
-class ModelInfoDialog(QDialog):
- def __init__(self, model_info, parent=None):
- super().__init__(parent)
- self.setWindowTitle("Model Information")
- self.setGeometry(200, 200, 600, 400)
-
- layout = QVBoxLayout()
-
- info_text = QTextEdit()
- info_text.setReadOnly(True)
- info_text.setHtml(self.format_model_info(model_info))
-
- layout.addWidget(info_text)
-
- close_button = QPushButton("Close")
- close_button.clicked.connect(self.accept)
- layout.addWidget(close_button)
-
- self.setLayout(layout)
-
- def format_model_info(self, model_info):
- html = "
Model Information
"
- html += f"Architecture: {model_info.get('architecture', 'N/A')}
"
- html += f"Quantization Type: {model_info.get('quantization_type', 'N/A')}
"
- html += f"KV Pairs: {model_info.get('kv_pairs', 'N/A')}
"
- html += f"Tensors: {model_info.get('tensors', 'N/A')}
"
-
- html += "Key-Value Pairs:
"
- for key, value in model_info.get('kv_data', {}).items():
- html += f"{key}: {value}
"
-
- return html
-
+from PyQt6.QtWidgets import *
+from PyQt6.QtCore import *
+from PyQt6.QtGui import *
+import os
+import sys
+import psutil
+import subprocess
+import time
+import signal
+import json
+import platform
+import requests
+import zipfile
+from datetime import datetime
+
+
+class ModelInfoDialog(QDialog):
+ def __init__(self, model_info, parent=None):
+ super().__init__(parent)
+ self.setWindowTitle("Model Information")
+ self.setGeometry(200, 200, 600, 400)
+
+ layout = QVBoxLayout()
+
+ info_text = QTextEdit()
+ info_text.setReadOnly(True)
+ info_text.setHtml(self.format_model_info(model_info))
+
+ layout.addWidget(info_text)
+
+ close_button = QPushButton("Close")
+ close_button.clicked.connect(self.accept)
+ layout.addWidget(close_button)
+
+ self.setLayout(layout)
+
+ def format_model_info(self, model_info):
+ html = "Model Information
"
+ html += f"Architecture: {model_info.get('architecture', 'N/A')}
"
+ html += f"Quantization Type: {model_info.get('quantization_type', 'N/A')}
"
+ html += f"KV Pairs: {model_info.get('kv_pairs', 'N/A')}
"
+ html += f"Tensors: {model_info.get('tensors', 'N/A')}
"
+
+ html += "Key-Value Pairs:
"
+ for key, value in model_info.get("kv_data", {}).items():
+ html += f"{key}: {value}
"
+
+ return html
diff --git a/src/QuantizationThread.py b/src/QuantizationThread.py
index 712cac3..aa798f2 100644
--- a/src/QuantizationThread.py
+++ b/src/QuantizationThread.py
@@ -1,94 +1,95 @@
-from PyQt6.QtWidgets import *
-from PyQt6.QtCore import *
-from PyQt6.QtGui import *
-import os
-import sys
-import psutil
-import subprocess
-import time
-import signal
-import json
-import platform
-import requests
-import zipfile
-import traceback
-from datetime import datetime
-from imports_and_globals import open_file_safe
-
-class QuantizationThread(QThread):
- # Define custom signals for communication with the main thread
- output_signal = pyqtSignal(str)
- status_signal = pyqtSignal(str)
- finished_signal = pyqtSignal()
- error_signal = pyqtSignal(str)
- model_info_signal = pyqtSignal(dict)
-
- def __init__(self, command, cwd, log_file):
- super().__init__()
- self.command = command
- self.cwd = cwd
- self.log_file = log_file
- self.process = None
- self.model_info = {}
-
- def run(self):
- try:
- # Start the subprocess
- self.process = subprocess.Popen(
- self.command,
- stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT,
- text=True,
- cwd=self.cwd,
- )
- # Open log file and process output
- with open_file_safe(self.log_file, "w") as log:
- for line in self.process.stdout:
- line = line.strip()
- self.output_signal.emit(line)
- log.write(line + "\n")
- log.flush()
- self.status_signal.emit("In Progress")
- self.parse_model_info(line)
-
- # Wait for process to complete
- self.process.wait()
- if self.process.returncode == 0:
- self.status_signal.emit("Completed")
- self.model_info_signal.emit(self.model_info)
- else:
- self.error_signal.emit(
- f"Process exited with code {self.process.returncode}"
- )
- self.finished_signal.emit()
- except Exception as e:
- self.error_signal.emit(str(e))
-
- def parse_model_info(self, line):
- # Parse output for model information
- if "llama_model_loader: loaded meta data with" in line:
- parts = line.split()
- self.model_info["kv_pairs"] = parts[6]
- self.model_info["tensors"] = parts[9]
- elif "general.architecture" in line:
- self.model_info["architecture"] = line.split("=")[-1].strip()
- elif line.startswith("llama_model_loader: - kv"):
- key = line.split(":")[2].strip()
- value = line.split("=")[-1].strip()
- self.model_info.setdefault("kv_data", {})[key] = value
- elif line.startswith("llama_model_loader: - type"):
- parts = line.split(":")
- if len(parts) > 1:
- quant_type = parts[1].strip()
- tensors = parts[2].strip().split()[0]
- self.model_info.setdefault("quantization_type", []).append(
- f"{quant_type}: {tensors} tensors"
- )
-
- def terminate(self):
- # Terminate the subprocess if it's still running
- if self.process:
- os.kill(self.process.pid, signal.SIGTERM)
- self.process.wait(timeout=5)
- if self.process.poll() is None:
- os.kill(self.process.pid, signal.SIGKILL)
\ No newline at end of file
+from PyQt6.QtWidgets import *
+from PyQt6.QtCore import *
+from PyQt6.QtGui import *
+import os
+import sys
+import psutil
+import subprocess
+import time
+import signal
+import json
+import platform
+import requests
+import zipfile
+import traceback
+from datetime import datetime
+from imports_and_globals import open_file_safe
+
+
+class QuantizationThread(QThread):
+ # Define custom signals for communication with the main thread
+ output_signal = pyqtSignal(str)
+ status_signal = pyqtSignal(str)
+ finished_signal = pyqtSignal()
+ error_signal = pyqtSignal(str)
+ model_info_signal = pyqtSignal(dict)
+
+ def __init__(self, command, cwd, log_file):
+ super().__init__()
+ self.command = command
+ self.cwd = cwd
+ self.log_file = log_file
+ self.process = None
+ self.model_info = {}
+
+ def run(self):
+ try:
+ # Start the subprocess
+ self.process = subprocess.Popen(
+ self.command,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ text=True,
+ cwd=self.cwd,
+ )
+ # Open log file and process output
+ with open_file_safe(self.log_file, "w") as log:
+ for line in self.process.stdout:
+ line = line.strip()
+ self.output_signal.emit(line)
+ log.write(line + "\n")
+ log.flush()
+ self.status_signal.emit("In Progress")
+ self.parse_model_info(line)
+
+ # Wait for process to complete
+ self.process.wait()
+ if self.process.returncode == 0:
+ self.status_signal.emit("Completed")
+ self.model_info_signal.emit(self.model_info)
+ else:
+ self.error_signal.emit(
+ f"Process exited with code {self.process.returncode}"
+ )
+ self.finished_signal.emit()
+ except Exception as e:
+ self.error_signal.emit(str(e))
+
+ def parse_model_info(self, line):
+ # Parse output for model information
+ if "llama_model_loader: loaded meta data with" in line:
+ parts = line.split()
+ self.model_info["kv_pairs"] = parts[6]
+ self.model_info["tensors"] = parts[9]
+ elif "general.architecture" in line:
+ self.model_info["architecture"] = line.split("=")[-1].strip()
+ elif line.startswith("llama_model_loader: - kv"):
+ key = line.split(":")[2].strip()
+ value = line.split("=")[-1].strip()
+ self.model_info.setdefault("kv_data", {})[key] = value
+ elif line.startswith("llama_model_loader: - type"):
+ parts = line.split(":")
+ if len(parts) > 1:
+ quant_type = parts[1].strip()
+ tensors = parts[2].strip().split()[0]
+ self.model_info.setdefault("quantization_type", []).append(
+ f"{quant_type}: {tensors} tensors"
+ )
+
+ def terminate(self):
+ # Terminate the subprocess if it's still running
+ if self.process:
+ os.kill(self.process.pid, signal.SIGTERM)
+ self.process.wait(timeout=5)
+ if self.process.poll() is None:
+ os.kill(self.process.pid, signal.SIGKILL)
diff --git a/src/TaskListItem.py b/src/TaskListItem.py
index 354d735..f97f08b 100644
--- a/src/TaskListItem.py
+++ b/src/TaskListItem.py
@@ -1,72 +1,73 @@
-from PyQt6.QtWidgets import *
-from PyQt6.QtCore import *
-from PyQt6.QtGui import *
-import os
-import sys
-import psutil
-import subprocess
-import time
-import signal
-import json
-import platform
-import requests
-import zipfile
-from datetime import datetime
-
-class TaskListItem(QWidget):
- def __init__(self, task_name, log_file, show_progress_bar=True, parent=None):
- super().__init__(parent)
- self.task_name = task_name
- self.log_file = log_file
- self.status = "Pending"
- layout = QHBoxLayout(self)
- self.task_label = QLabel(task_name)
- self.progress_bar = QProgressBar()
- self.progress_bar.setRange(0, 100)
- self.status_label = QLabel(self.status)
- layout.addWidget(self.task_label)
- layout.addWidget(self.progress_bar)
- layout.addWidget(self.status_label)
-
- # Hide progress bar if show_progress_bar is False
- self.progress_bar.setVisible(show_progress_bar)
-
- # Use indeterminate progress bar if not showing percentage
- if not show_progress_bar:
- self.progress_bar.setRange(0, 0)
-
- self.progress_timer = QTimer(self)
- self.progress_timer.timeout.connect(self.update_progress)
- self.progress_value = 0
-
- def update_status(self, status):
- self.status = status
- self.status_label.setText(status)
- if status == "In Progress":
- # Only start timer if showing percentage progress
- if self.progress_bar.isVisible():
- self.progress_bar.setRange(0, 100)
- self.progress_timer.start(100)
- elif status == "Completed":
- self.progress_timer.stop()
- self.progress_bar.setValue(100)
- elif status == "Canceled":
- self.progress_timer.stop()
- self.progress_bar.setValue(0)
-
- def set_error(self):
- self.status = "Error"
- self.status_label.setText("Error")
- self.status_label.setStyleSheet("color: red;")
- self.progress_bar.setRange(0, 100)
- self.progress_timer.stop()
-
- def update_progress(self, value=None):
- if value is not None:
- # Update progress bar with specific value
- self.progress_value = value
- self.progress_bar.setValue(self.progress_value)
- else:
- # Increment progress bar for indeterminate progress
- self.progress_value = (self.progress_value + 1) % 101
- self.progress_bar.setValue(self.progress_value)
\ No newline at end of file
+from PyQt6.QtWidgets import *
+from PyQt6.QtCore import *
+from PyQt6.QtGui import *
+import os
+import sys
+import psutil
+import subprocess
+import time
+import signal
+import json
+import platform
+import requests
+import zipfile
+from datetime import datetime
+
+
+class TaskListItem(QWidget):
+ def __init__(self, task_name, log_file, show_progress_bar=True, parent=None):
+ super().__init__(parent)
+ self.task_name = task_name
+ self.log_file = log_file
+ self.status = "Pending"
+ layout = QHBoxLayout(self)
+ self.task_label = QLabel(task_name)
+ self.progress_bar = QProgressBar()
+ self.progress_bar.setRange(0, 100)
+ self.status_label = QLabel(self.status)
+ layout.addWidget(self.task_label)
+ layout.addWidget(self.progress_bar)
+ layout.addWidget(self.status_label)
+
+ # Hide progress bar if show_progress_bar is False
+ self.progress_bar.setVisible(show_progress_bar)
+
+ # Use indeterminate progress bar if not showing percentage
+ if not show_progress_bar:
+ self.progress_bar.setRange(0, 0)
+
+ self.progress_timer = QTimer(self)
+ self.progress_timer.timeout.connect(self.update_progress)
+ self.progress_value = 0
+
+ def update_status(self, status):
+ self.status = status
+ self.status_label.setText(status)
+ if status == "In Progress":
+ # Only start timer if showing percentage progress
+ if self.progress_bar.isVisible():
+ self.progress_bar.setRange(0, 100)
+ self.progress_timer.start(100)
+ elif status == "Completed":
+ self.progress_timer.stop()
+ self.progress_bar.setValue(100)
+ elif status == "Canceled":
+ self.progress_timer.stop()
+ self.progress_bar.setValue(0)
+
+ def set_error(self):
+ self.status = "Error"
+ self.status_label.setText("Error")
+ self.status_label.setStyleSheet("color: red;")
+ self.progress_bar.setRange(0, 100)
+ self.progress_timer.stop()
+
+ def update_progress(self, value=None):
+ if value is not None:
+ # Update progress bar with specific value
+ self.progress_value = value
+ self.progress_bar.setValue(self.progress_value)
+ else:
+ # Increment progress bar for indeterminate progress
+ self.progress_value = (self.progress_value + 1) % 101
+ self.progress_bar.setValue(self.progress_value)
diff --git a/src/convert_hf_to_gguf.py b/src/convert_hf_to_gguf.py
index 10416da..1ca7308 100644
--- a/src/convert_hf_to_gguf.py
+++ b/src/convert_hf_to_gguf.py
@@ -4392,4 +4392,4 @@ def main() -> None:
if __name__ == "__main__":
- main()
\ No newline at end of file
+ main()
diff --git a/src/convert_lora_to_ggml.py b/src/convert_lora_to_ggml.py
index 5ed71a4..e6e152d 100644
--- a/src/convert_lora_to_ggml.py
+++ b/src/convert_lora_to_ggml.py
@@ -12,8 +12,8 @@
import numpy as np
import torch
-if 'NO_LOCAL_GGUF' not in os.environ:
- sys.path.insert(1, str(Path(__file__).parent / 'gguf-py' / 'gguf'))
+if "NO_LOCAL_GGUF" not in os.environ:
+ sys.path.insert(1, str(Path(__file__).parent / "gguf-py" / "gguf"))
import gguf
logging.basicConfig(level=logging.DEBUG)
@@ -35,7 +35,9 @@ def write_file_header(fout: BinaryIO, params: dict[str, Any]) -> None:
fout.write(struct.pack("i", int(params["lora_alpha"])))
-def write_tensor_header(fout: BinaryIO, name: str, shape: Sequence[int], data_type: np.dtype[Any]) -> None:
+def write_tensor_header(
+ fout: BinaryIO, name: str, shape: Sequence[int], data_type: np.dtype[Any]
+) -> None:
sname = name.encode("utf-8")
fout.write(
struct.pack(
@@ -49,15 +51,21 @@ def write_tensor_header(fout: BinaryIO, name: str, shape: Sequence[int], data_ty
fout.write(sname)
fout.seek((fout.tell() + 31) & -32)
+
def pyinstaller_include():
# PyInstaller import
pass
-if __name__ == '__main__':
+
+if __name__ == "__main__":
if len(sys.argv) < 2:
logger.info(f"Usage: python {sys.argv[0]} [arch]")
- logger.info("Path must contain HuggingFace PEFT LoRA files 'adapter_config.json' and 'adapter_model.bin'")
- logger.info(f"Arch must be one of {list(gguf.MODEL_ARCH_NAMES.values())} (default: llama)")
+ logger.info(
+ "Path must contain HuggingFace PEFT LoRA files 'adapter_config.json' and 'adapter_model.bin'"
+ )
+ logger.info(
+ f"Arch must be one of {list(gguf.MODEL_ARCH_NAMES.values())} (default: llama)"
+ )
sys.exit(1)
input_json = os.path.join(sys.argv[1], "adapter_config.json")
@@ -70,6 +78,7 @@ def pyinstaller_include():
input_model = os.path.join(sys.argv[1], "adapter_model.safetensors")
# lazy import load_file only if lora is in safetensors format.
from safetensors.torch import load_file
+
model = load_file(input_model, device="cpu")
arch_name = sys.argv[2] if len(sys.argv) == 3 else "llama"
@@ -78,14 +87,18 @@ def pyinstaller_include():
logger.error(f"Error: unsupported architecture {arch_name}")
sys.exit(1)
- arch = list(gguf.MODEL_ARCH_NAMES.keys())[list(gguf.MODEL_ARCH_NAMES.values()).index(arch_name)]
- name_map = gguf.TensorNameMap(arch, 200) # 200 layers ought to be enough for anyone
+ arch = list(gguf.MODEL_ARCH_NAMES.keys())[
+ list(gguf.MODEL_ARCH_NAMES.values()).index(arch_name)
+ ]
+ name_map = gguf.TensorNameMap(arch, 200) # 200 layers ought to be enough for anyone
with open(input_json, "r") as f:
params = json.load(f)
if params["peft_type"] != "LORA":
- logger.error(f"Error: unsupported adapter type {params['peft_type']}, expected LORA")
+ logger.error(
+ f"Error: unsupported adapter type {params['peft_type']}, expected LORA"
+ )
sys.exit(1)
if params["fan_in_fan_out"] is True:
@@ -127,7 +140,7 @@ def pyinstaller_include():
lora_suffixes = (".lora_A.weight", ".lora_B.weight")
if k.endswith(lora_suffixes):
- suffix = k[-len(lora_suffixes[0]):]
+ suffix = k[-len(lora_suffixes[0]) :]
k = k[: -len(lora_suffixes[0])]
else:
logger.error(f"Error: unrecognized tensor name {orig_k}")
@@ -136,7 +149,9 @@ def pyinstaller_include():
tname = name_map.get_name(k)
if tname is None:
logger.error(f"Error: could not map tensor name {orig_k}")
- logger.error(" Note: the arch parameter must be specified if the model is not llama")
+ logger.error(
+ " Note: the arch parameter must be specified if the model is not llama"
+ )
sys.exit(1)
if suffix == ".lora_A.weight":
@@ -146,7 +161,9 @@ def pyinstaller_include():
else:
assert False
- logger.info(f"{k} => {tname} {t.shape} {t.dtype} {t.nbytes/1024/1024:.2f}MB")
+ logger.info(
+ f"{k} => {tname} {t.shape} {t.dtype} {t.nbytes/1024/1024:.2f}MB"
+ )
write_tensor_header(fout, tname, t.shape, t.dtype)
t.tofile(fout)
diff --git a/src/gguf-py/gguf/constants.py b/src/gguf-py/gguf/constants.py
index e343c2e..87a9acd 100644
--- a/src/gguf-py/gguf/constants.py
+++ b/src/gguf-py/gguf/constants.py
@@ -7,10 +7,10 @@
# constants
#
-GGUF_MAGIC = 0x46554747 # "GGUF"
-GGUF_VERSION = 3
+GGUF_MAGIC = 0x46554747 # "GGUF"
+GGUF_VERSION = 3
GGUF_DEFAULT_ALIGNMENT = 32
-GGML_QUANT_VERSION = 2 # GGML_QNT_VERSION from ggml.h
+GGML_QUANT_VERSION = 2 # GGML_QNT_VERSION from ggml.h
#
# metadata keys
@@ -19,415 +19,420 @@
class Keys:
class General:
- TYPE = "general.type"
- ARCHITECTURE = "general.architecture"
- QUANTIZATION_VERSION = "general.quantization_version"
- ALIGNMENT = "general.alignment"
- FILE_TYPE = "general.file_type"
+ TYPE = "general.type"
+ ARCHITECTURE = "general.architecture"
+ QUANTIZATION_VERSION = "general.quantization_version"
+ ALIGNMENT = "general.alignment"
+ FILE_TYPE = "general.file_type"
# Authorship Metadata
- NAME = "general.name"
- AUTHOR = "general.author"
- VERSION = "general.version"
- ORGANIZATION = "general.organization"
+ NAME = "general.name"
+ AUTHOR = "general.author"
+ VERSION = "general.version"
+ ORGANIZATION = "general.organization"
- FINETUNE = "general.finetune"
- BASENAME = "general.basename"
+ FINETUNE = "general.finetune"
+ BASENAME = "general.basename"
- DESCRIPTION = "general.description"
- QUANTIZED_BY = "general.quantized_by"
+ DESCRIPTION = "general.description"
+ QUANTIZED_BY = "general.quantized_by"
- SIZE_LABEL = "general.size_label"
+ SIZE_LABEL = "general.size_label"
# Licensing details
- LICENSE = "general.license"
- LICENSE_NAME = "general.license.name"
- LICENSE_LINK = "general.license.link"
+ LICENSE = "general.license"
+ LICENSE_NAME = "general.license.name"
+ LICENSE_LINK = "general.license.link"
# Typically represents the converted GGUF repo (Unless native)
- URL = "general.url" # Model Website/Paper
- DOI = "general.doi"
- UUID = "general.uuid"
- REPO_URL = "general.repo_url" # Model Source Repository (git/svn/etc...)
+ URL = "general.url" # Model Website/Paper
+ DOI = "general.doi"
+ UUID = "general.uuid"
+ REPO_URL = "general.repo_url" # Model Source Repository (git/svn/etc...)
# Model Source during conversion
- SOURCE_URL = "general.source.url" # Model Website/Paper
- SOURCE_DOI = "general.source.doi"
- SOURCE_UUID = "general.source.uuid"
- SOURCE_REPO_URL = "general.source.repo_url" # Model Source Repository (git/svn/etc...)
+ SOURCE_URL = "general.source.url" # Model Website/Paper
+ SOURCE_DOI = "general.source.doi"
+ SOURCE_UUID = "general.source.uuid"
+ SOURCE_REPO_URL = (
+ "general.source.repo_url" # Model Source Repository (git/svn/etc...)
+ )
# Base Model Source. There can be more than one source if it's a merged
# model like with 'Mistral-7B-Merge-14-v0.1'. This will assist in
# tracing linage of models as it is finetuned or merged over time.
- BASE_MODEL_COUNT = "general.base_model.count"
- BASE_MODEL_NAME = "general.base_model.{id}.name"
- BASE_MODEL_AUTHOR = "general.base_model.{id}.author"
- BASE_MODEL_VERSION = "general.base_model.{id}.version"
- BASE_MODEL_ORGANIZATION = "general.base_model.{id}.organization"
- BASE_MODEL_URL = "general.base_model.{id}.url" # Model Website/Paper
- BASE_MODEL_DOI = "general.base_model.{id}.doi"
- BASE_MODEL_UUID = "general.base_model.{id}.uuid"
- BASE_MODEL_REPO_URL = "general.base_model.{id}.repo_url" # Model Source Repository (git/svn/etc...)
+ BASE_MODEL_COUNT = "general.base_model.count"
+ BASE_MODEL_NAME = "general.base_model.{id}.name"
+ BASE_MODEL_AUTHOR = "general.base_model.{id}.author"
+ BASE_MODEL_VERSION = "general.base_model.{id}.version"
+ BASE_MODEL_ORGANIZATION = "general.base_model.{id}.organization"
+ BASE_MODEL_URL = "general.base_model.{id}.url" # Model Website/Paper
+ BASE_MODEL_DOI = "general.base_model.{id}.doi"
+ BASE_MODEL_UUID = "general.base_model.{id}.uuid"
+ BASE_MODEL_REPO_URL = "general.base_model.{id}.repo_url" # Model Source Repository (git/svn/etc...)
# Array based KV stores
- TAGS = "general.tags"
- LANGUAGES = "general.languages"
- DATASETS = "general.datasets"
+ TAGS = "general.tags"
+ LANGUAGES = "general.languages"
+ DATASETS = "general.datasets"
class LLM:
- VOCAB_SIZE = "{arch}.vocab_size"
- CONTEXT_LENGTH = "{arch}.context_length"
- EMBEDDING_LENGTH = "{arch}.embedding_length"
- BLOCK_COUNT = "{arch}.block_count"
- LEADING_DENSE_BLOCK_COUNT = "{arch}.leading_dense_block_count"
- FEED_FORWARD_LENGTH = "{arch}.feed_forward_length"
- EXPERT_FEED_FORWARD_LENGTH = "{arch}.expert_feed_forward_length"
+ VOCAB_SIZE = "{arch}.vocab_size"
+ CONTEXT_LENGTH = "{arch}.context_length"
+ EMBEDDING_LENGTH = "{arch}.embedding_length"
+ BLOCK_COUNT = "{arch}.block_count"
+ LEADING_DENSE_BLOCK_COUNT = "{arch}.leading_dense_block_count"
+ FEED_FORWARD_LENGTH = "{arch}.feed_forward_length"
+ EXPERT_FEED_FORWARD_LENGTH = "{arch}.expert_feed_forward_length"
EXPERT_SHARED_FEED_FORWARD_LENGTH = "{arch}.expert_shared_feed_forward_length"
- USE_PARALLEL_RESIDUAL = "{arch}.use_parallel_residual"
- TENSOR_DATA_LAYOUT = "{arch}.tensor_data_layout"
- EXPERT_COUNT = "{arch}.expert_count"
- EXPERT_USED_COUNT = "{arch}.expert_used_count"
- EXPERT_SHARED_COUNT = "{arch}.expert_shared_count"
- EXPERT_WEIGHTS_SCALE = "{arch}.expert_weights_scale"
- POOLING_TYPE = "{arch}.pooling_type"
- LOGIT_SCALE = "{arch}.logit_scale"
- DECODER_START_TOKEN_ID = "{arch}.decoder_start_token_id"
- ATTN_LOGIT_SOFTCAPPING = "{arch}.attn_logit_softcapping"
- FINAL_LOGIT_SOFTCAPPING = "{arch}.final_logit_softcapping"
+ USE_PARALLEL_RESIDUAL = "{arch}.use_parallel_residual"
+ TENSOR_DATA_LAYOUT = "{arch}.tensor_data_layout"
+ EXPERT_COUNT = "{arch}.expert_count"
+ EXPERT_USED_COUNT = "{arch}.expert_used_count"
+ EXPERT_SHARED_COUNT = "{arch}.expert_shared_count"
+ EXPERT_WEIGHTS_SCALE = "{arch}.expert_weights_scale"
+ POOLING_TYPE = "{arch}.pooling_type"
+ LOGIT_SCALE = "{arch}.logit_scale"
+ DECODER_START_TOKEN_ID = "{arch}.decoder_start_token_id"
+ ATTN_LOGIT_SOFTCAPPING = "{arch}.attn_logit_softcapping"
+ FINAL_LOGIT_SOFTCAPPING = "{arch}.final_logit_softcapping"
class Attention:
- HEAD_COUNT = "{arch}.attention.head_count"
- HEAD_COUNT_KV = "{arch}.attention.head_count_kv"
- MAX_ALIBI_BIAS = "{arch}.attention.max_alibi_bias"
- CLAMP_KQV = "{arch}.attention.clamp_kqv"
- KEY_LENGTH = "{arch}.attention.key_length"
- VALUE_LENGTH = "{arch}.attention.value_length"
- LAYERNORM_EPS = "{arch}.attention.layer_norm_epsilon"
+ HEAD_COUNT = "{arch}.attention.head_count"
+ HEAD_COUNT_KV = "{arch}.attention.head_count_kv"
+ MAX_ALIBI_BIAS = "{arch}.attention.max_alibi_bias"
+ CLAMP_KQV = "{arch}.attention.clamp_kqv"
+ KEY_LENGTH = "{arch}.attention.key_length"
+ VALUE_LENGTH = "{arch}.attention.value_length"
+ LAYERNORM_EPS = "{arch}.attention.layer_norm_epsilon"
LAYERNORM_RMS_EPS = "{arch}.attention.layer_norm_rms_epsilon"
- CAUSAL = "{arch}.attention.causal"
- Q_LORA_RANK = "{arch}.attention.q_lora_rank"
- KV_LORA_RANK = "{arch}.attention.kv_lora_rank"
+ CAUSAL = "{arch}.attention.causal"
+ Q_LORA_RANK = "{arch}.attention.q_lora_rank"
+ KV_LORA_RANK = "{arch}.attention.kv_lora_rank"
REL_BUCKETS_COUNT = "{arch}.attention.relative_buckets_count"
- SLIDING_WINDOW = "{arch}.attention.sliding_window"
+ SLIDING_WINDOW = "{arch}.attention.sliding_window"
class Rope:
- DIMENSION_COUNT = "{arch}.rope.dimension_count"
- FREQ_BASE = "{arch}.rope.freq_base"
- SCALING_TYPE = "{arch}.rope.scaling.type"
- SCALING_FACTOR = "{arch}.rope.scaling.factor"
- SCALING_ATTN_FACTOR = "{arch}.rope.scaling.attn_factor"
- SCALING_ORIG_CTX_LEN = "{arch}.rope.scaling.original_context_length"
- SCALING_FINETUNED = "{arch}.rope.scaling.finetuned"
- SCALING_YARN_LOG_MUL = "{arch}.rope.scaling.yarn_log_multiplier"
+ DIMENSION_COUNT = "{arch}.rope.dimension_count"
+ FREQ_BASE = "{arch}.rope.freq_base"
+ SCALING_TYPE = "{arch}.rope.scaling.type"
+ SCALING_FACTOR = "{arch}.rope.scaling.factor"
+ SCALING_ATTN_FACTOR = "{arch}.rope.scaling.attn_factor"
+ SCALING_ORIG_CTX_LEN = "{arch}.rope.scaling.original_context_length"
+ SCALING_FINETUNED = "{arch}.rope.scaling.finetuned"
+ SCALING_YARN_LOG_MUL = "{arch}.rope.scaling.yarn_log_multiplier"
class Split:
- LLM_KV_SPLIT_NO = "split.no"
- LLM_KV_SPLIT_COUNT = "split.count"
+ LLM_KV_SPLIT_NO = "split.no"
+ LLM_KV_SPLIT_COUNT = "split.count"
LLM_KV_SPLIT_TENSORS_COUNT = "split.tensors.count"
class SSM:
- CONV_KERNEL = "{arch}.ssm.conv_kernel"
- INNER_SIZE = "{arch}.ssm.inner_size"
- STATE_SIZE = "{arch}.ssm.state_size"
+ CONV_KERNEL = "{arch}.ssm.conv_kernel"
+ INNER_SIZE = "{arch}.ssm.inner_size"
+ STATE_SIZE = "{arch}.ssm.state_size"
TIME_STEP_RANK = "{arch}.ssm.time_step_rank"
class Tokenizer:
- MODEL = "tokenizer.ggml.model"
- PRE = "tokenizer.ggml.pre"
- LIST = "tokenizer.ggml.tokens"
- TOKEN_TYPE = "tokenizer.ggml.token_type"
- TOKEN_TYPE_COUNT = "tokenizer.ggml.token_type_count" # for BERT-style token types
- SCORES = "tokenizer.ggml.scores"
- MERGES = "tokenizer.ggml.merges"
- BOS_ID = "tokenizer.ggml.bos_token_id"
- EOS_ID = "tokenizer.ggml.eos_token_id"
- UNK_ID = "tokenizer.ggml.unknown_token_id"
- SEP_ID = "tokenizer.ggml.seperator_token_id"
- PAD_ID = "tokenizer.ggml.padding_token_id"
- CLS_ID = "tokenizer.ggml.cls_token_id"
- MASK_ID = "tokenizer.ggml.mask_token_id"
- ADD_BOS = "tokenizer.ggml.add_bos_token"
- ADD_EOS = "tokenizer.ggml.add_eos_token"
- ADD_PREFIX = "tokenizer.ggml.add_space_prefix"
- REMOVE_EXTRA_WS = "tokenizer.ggml.remove_extra_whitespaces"
+ MODEL = "tokenizer.ggml.model"
+ PRE = "tokenizer.ggml.pre"
+ LIST = "tokenizer.ggml.tokens"
+ TOKEN_TYPE = "tokenizer.ggml.token_type"
+ TOKEN_TYPE_COUNT = (
+ "tokenizer.ggml.token_type_count" # for BERT-style token types
+ )
+ SCORES = "tokenizer.ggml.scores"
+ MERGES = "tokenizer.ggml.merges"
+ BOS_ID = "tokenizer.ggml.bos_token_id"
+ EOS_ID = "tokenizer.ggml.eos_token_id"
+ UNK_ID = "tokenizer.ggml.unknown_token_id"
+ SEP_ID = "tokenizer.ggml.seperator_token_id"
+ PAD_ID = "tokenizer.ggml.padding_token_id"
+ CLS_ID = "tokenizer.ggml.cls_token_id"
+ MASK_ID = "tokenizer.ggml.mask_token_id"
+ ADD_BOS = "tokenizer.ggml.add_bos_token"
+ ADD_EOS = "tokenizer.ggml.add_eos_token"
+ ADD_PREFIX = "tokenizer.ggml.add_space_prefix"
+ REMOVE_EXTRA_WS = "tokenizer.ggml.remove_extra_whitespaces"
PRECOMPILED_CHARSMAP = "tokenizer.ggml.precompiled_charsmap"
- HF_JSON = "tokenizer.huggingface.json"
- RWKV = "tokenizer.rwkv.world"
- CHAT_TEMPLATE = "tokenizer.chat_template"
- CHAT_TEMPLATE_N = "tokenizer.chat_template.{name}"
- CHAT_TEMPLATES = "tokenizer.chat_templates"
+ HF_JSON = "tokenizer.huggingface.json"
+ RWKV = "tokenizer.rwkv.world"
+ CHAT_TEMPLATE = "tokenizer.chat_template"
+ CHAT_TEMPLATE_N = "tokenizer.chat_template.{name}"
+ CHAT_TEMPLATES = "tokenizer.chat_templates"
# FIM/Infill special tokens constants
- PREFIX_ID = "tokenizer.ggml.prefix_token_id"
- SUFFIX_ID = "tokenizer.ggml.suffix_token_id"
- MIDDLE_ID = "tokenizer.ggml.middle_token_id"
- EOT_ID = "tokenizer.ggml.eot_token_id"
+ PREFIX_ID = "tokenizer.ggml.prefix_token_id"
+ SUFFIX_ID = "tokenizer.ggml.suffix_token_id"
+ MIDDLE_ID = "tokenizer.ggml.middle_token_id"
+ EOT_ID = "tokenizer.ggml.eot_token_id"
class Adapter:
- TYPE = "adapter.type"
+ TYPE = "adapter.type"
LORA_ALPHA = "adapter.lora.alpha"
+
#
# recommended mapping of model tensor names for storage in gguf
#
class GGUFType:
- MODEL = "model"
+ MODEL = "model"
ADAPTER = "adapter"
class MODEL_ARCH(IntEnum):
- LLAMA = auto()
- FALCON = auto()
- BAICHUAN = auto()
- GROK = auto()
- GPT2 = auto()
- GPTJ = auto()
- GPTNEOX = auto()
- MPT = auto()
- STARCODER = auto()
- REFACT = auto()
- BERT = auto()
- NOMIC_BERT = auto()
+ LLAMA = auto()
+ FALCON = auto()
+ BAICHUAN = auto()
+ GROK = auto()
+ GPT2 = auto()
+ GPTJ = auto()
+ GPTNEOX = auto()
+ MPT = auto()
+ STARCODER = auto()
+ REFACT = auto()
+ BERT = auto()
+ NOMIC_BERT = auto()
JINA_BERT_V2 = auto()
- BLOOM = auto()
- STABLELM = auto()
- QWEN = auto()
- QWEN2 = auto()
- QWEN2MOE = auto()
- PHI2 = auto()
- PHI3 = auto()
- PLAMO = auto()
- CODESHELL = auto()
- ORION = auto()
- INTERNLM2 = auto()
- MINICPM = auto()
- GEMMA = auto()
- GEMMA2 = auto()
- STARCODER2 = auto()
- MAMBA = auto()
- XVERSE = auto()
- COMMAND_R = auto()
- DBRX = auto()
- OLMO = auto()
- OPENELM = auto()
- ARCTIC = auto()
- DEEPSEEK2 = auto()
- CHATGLM = auto()
- BITNET = auto()
- T5 = auto()
- JAIS = auto()
+ BLOOM = auto()
+ STABLELM = auto()
+ QWEN = auto()
+ QWEN2 = auto()
+ QWEN2MOE = auto()
+ PHI2 = auto()
+ PHI3 = auto()
+ PLAMO = auto()
+ CODESHELL = auto()
+ ORION = auto()
+ INTERNLM2 = auto()
+ MINICPM = auto()
+ GEMMA = auto()
+ GEMMA2 = auto()
+ STARCODER2 = auto()
+ MAMBA = auto()
+ XVERSE = auto()
+ COMMAND_R = auto()
+ DBRX = auto()
+ OLMO = auto()
+ OPENELM = auto()
+ ARCTIC = auto()
+ DEEPSEEK2 = auto()
+ CHATGLM = auto()
+ BITNET = auto()
+ T5 = auto()
+ JAIS = auto()
class MODEL_TENSOR(IntEnum):
- TOKEN_EMBD = auto()
- TOKEN_EMBD_NORM = auto()
- TOKEN_TYPES = auto()
- POS_EMBD = auto()
- OUTPUT = auto()
- OUTPUT_NORM = auto()
- ROPE_FREQS = auto()
- ROPE_FACTORS_LONG = auto()
- ROPE_FACTORS_SHORT = auto()
- ATTN_Q = auto()
- ATTN_K = auto()
- ATTN_V = auto()
- ATTN_QKV = auto()
- ATTN_OUT = auto()
- ATTN_NORM = auto()
- ATTN_NORM_2 = auto()
- ATTN_OUT_NORM = auto()
- ATTN_POST_NORM = auto()
- ATTN_ROT_EMBD = auto()
- FFN_GATE_INP = auto()
- FFN_GATE_INP_SHEXP = auto()
- FFN_NORM = auto()
- FFN_PRE_NORM = auto()
- FFN_POST_NORM = auto()
- FFN_GATE = auto()
- FFN_DOWN = auto()
- FFN_UP = auto()
- FFN_ACT = auto()
- FFN_NORM_EXP = auto()
- FFN_GATE_EXP = auto()
- FFN_DOWN_EXP = auto()
- FFN_UP_EXP = auto()
- FFN_GATE_SHEXP = auto()
- FFN_DOWN_SHEXP = auto()
- FFN_UP_SHEXP = auto()
- ATTN_Q_NORM = auto()
- ATTN_K_NORM = auto()
- LAYER_OUT_NORM = auto()
- SSM_IN = auto()
- SSM_CONV1D = auto()
- SSM_X = auto()
- SSM_DT = auto()
- SSM_A = auto()
- SSM_D = auto()
- SSM_OUT = auto()
- ATTN_Q_A = auto()
- ATTN_Q_B = auto()
- ATTN_KV_A_MQA = auto()
- ATTN_KV_B = auto()
- ATTN_Q_A_NORM = auto()
- ATTN_KV_A_NORM = auto()
- FFN_SUB_NORM = auto()
- ATTN_SUB_NORM = auto()
- DEC_ATTN_NORM = auto()
- DEC_ATTN_Q = auto()
- DEC_ATTN_K = auto()
- DEC_ATTN_V = auto()
- DEC_ATTN_OUT = auto()
- DEC_ATTN_REL_B = auto()
- DEC_CROSS_ATTN_NORM = auto()
- DEC_CROSS_ATTN_Q = auto()
- DEC_CROSS_ATTN_K = auto()
- DEC_CROSS_ATTN_V = auto()
- DEC_CROSS_ATTN_OUT = auto()
+ TOKEN_EMBD = auto()
+ TOKEN_EMBD_NORM = auto()
+ TOKEN_TYPES = auto()
+ POS_EMBD = auto()
+ OUTPUT = auto()
+ OUTPUT_NORM = auto()
+ ROPE_FREQS = auto()
+ ROPE_FACTORS_LONG = auto()
+ ROPE_FACTORS_SHORT = auto()
+ ATTN_Q = auto()
+ ATTN_K = auto()
+ ATTN_V = auto()
+ ATTN_QKV = auto()
+ ATTN_OUT = auto()
+ ATTN_NORM = auto()
+ ATTN_NORM_2 = auto()
+ ATTN_OUT_NORM = auto()
+ ATTN_POST_NORM = auto()
+ ATTN_ROT_EMBD = auto()
+ FFN_GATE_INP = auto()
+ FFN_GATE_INP_SHEXP = auto()
+ FFN_NORM = auto()
+ FFN_PRE_NORM = auto()
+ FFN_POST_NORM = auto()
+ FFN_GATE = auto()
+ FFN_DOWN = auto()
+ FFN_UP = auto()
+ FFN_ACT = auto()
+ FFN_NORM_EXP = auto()
+ FFN_GATE_EXP = auto()
+ FFN_DOWN_EXP = auto()
+ FFN_UP_EXP = auto()
+ FFN_GATE_SHEXP = auto()
+ FFN_DOWN_SHEXP = auto()
+ FFN_UP_SHEXP = auto()
+ ATTN_Q_NORM = auto()
+ ATTN_K_NORM = auto()
+ LAYER_OUT_NORM = auto()
+ SSM_IN = auto()
+ SSM_CONV1D = auto()
+ SSM_X = auto()
+ SSM_DT = auto()
+ SSM_A = auto()
+ SSM_D = auto()
+ SSM_OUT = auto()
+ ATTN_Q_A = auto()
+ ATTN_Q_B = auto()
+ ATTN_KV_A_MQA = auto()
+ ATTN_KV_B = auto()
+ ATTN_Q_A_NORM = auto()
+ ATTN_KV_A_NORM = auto()
+ FFN_SUB_NORM = auto()
+ ATTN_SUB_NORM = auto()
+ DEC_ATTN_NORM = auto()
+ DEC_ATTN_Q = auto()
+ DEC_ATTN_K = auto()
+ DEC_ATTN_V = auto()
+ DEC_ATTN_OUT = auto()
+ DEC_ATTN_REL_B = auto()
+ DEC_CROSS_ATTN_NORM = auto()
+ DEC_CROSS_ATTN_Q = auto()
+ DEC_CROSS_ATTN_K = auto()
+ DEC_CROSS_ATTN_V = auto()
+ DEC_CROSS_ATTN_OUT = auto()
DEC_CROSS_ATTN_REL_B = auto()
- DEC_FFN_NORM = auto()
- DEC_FFN_GATE = auto()
- DEC_FFN_DOWN = auto()
- DEC_FFN_UP = auto()
- DEC_OUTPUT_NORM = auto()
- ENC_ATTN_NORM = auto()
- ENC_ATTN_Q = auto()
- ENC_ATTN_K = auto()
- ENC_ATTN_V = auto()
- ENC_ATTN_OUT = auto()
- ENC_ATTN_REL_B = auto()
- ENC_FFN_NORM = auto()
- ENC_FFN_GATE = auto()
- ENC_FFN_DOWN = auto()
- ENC_FFN_UP = auto()
- ENC_OUTPUT_NORM = auto()
+ DEC_FFN_NORM = auto()
+ DEC_FFN_GATE = auto()
+ DEC_FFN_DOWN = auto()
+ DEC_FFN_UP = auto()
+ DEC_OUTPUT_NORM = auto()
+ ENC_ATTN_NORM = auto()
+ ENC_ATTN_Q = auto()
+ ENC_ATTN_K = auto()
+ ENC_ATTN_V = auto()
+ ENC_ATTN_OUT = auto()
+ ENC_ATTN_REL_B = auto()
+ ENC_FFN_NORM = auto()
+ ENC_FFN_GATE = auto()
+ ENC_FFN_DOWN = auto()
+ ENC_FFN_UP = auto()
+ ENC_OUTPUT_NORM = auto()
MODEL_ARCH_NAMES: dict[MODEL_ARCH, str] = {
- MODEL_ARCH.LLAMA: "llama",
- MODEL_ARCH.FALCON: "falcon",
- MODEL_ARCH.BAICHUAN: "baichuan",
- MODEL_ARCH.GROK: "grok",
- MODEL_ARCH.GPT2: "gpt2",
- MODEL_ARCH.GPTJ: "gptj",
- MODEL_ARCH.GPTNEOX: "gptneox",
- MODEL_ARCH.MPT: "mpt",
- MODEL_ARCH.STARCODER: "starcoder",
- MODEL_ARCH.REFACT: "refact",
- MODEL_ARCH.BERT: "bert",
- MODEL_ARCH.NOMIC_BERT: "nomic-bert",
- MODEL_ARCH.JINA_BERT_V2: "jina-bert-v2",
- MODEL_ARCH.BLOOM: "bloom",
- MODEL_ARCH.STABLELM: "stablelm",
- MODEL_ARCH.QWEN: "qwen",
- MODEL_ARCH.QWEN2: "qwen2",
- MODEL_ARCH.QWEN2MOE: "qwen2moe",
- MODEL_ARCH.PHI2: "phi2",
- MODEL_ARCH.PHI3: "phi3",
- MODEL_ARCH.PLAMO: "plamo",
- MODEL_ARCH.CODESHELL: "codeshell",
- MODEL_ARCH.ORION: "orion",
- MODEL_ARCH.INTERNLM2: "internlm2",
- MODEL_ARCH.MINICPM: "minicpm",
- MODEL_ARCH.GEMMA: "gemma",
- MODEL_ARCH.GEMMA2: "gemma2",
- MODEL_ARCH.STARCODER2: "starcoder2",
- MODEL_ARCH.MAMBA: "mamba",
- MODEL_ARCH.XVERSE: "xverse",
- MODEL_ARCH.COMMAND_R: "command-r",
- MODEL_ARCH.DBRX: "dbrx",
- MODEL_ARCH.OLMO: "olmo",
- MODEL_ARCH.OPENELM: "openelm",
- MODEL_ARCH.ARCTIC: "arctic",
- MODEL_ARCH.DEEPSEEK2: "deepseek2",
- MODEL_ARCH.CHATGLM: "chatglm",
- MODEL_ARCH.BITNET: "bitnet",
- MODEL_ARCH.T5: "t5",
- MODEL_ARCH.JAIS: "jais",
+ MODEL_ARCH.LLAMA: "llama",
+ MODEL_ARCH.FALCON: "falcon",
+ MODEL_ARCH.BAICHUAN: "baichuan",
+ MODEL_ARCH.GROK: "grok",
+ MODEL_ARCH.GPT2: "gpt2",
+ MODEL_ARCH.GPTJ: "gptj",
+ MODEL_ARCH.GPTNEOX: "gptneox",
+ MODEL_ARCH.MPT: "mpt",
+ MODEL_ARCH.STARCODER: "starcoder",
+ MODEL_ARCH.REFACT: "refact",
+ MODEL_ARCH.BERT: "bert",
+ MODEL_ARCH.NOMIC_BERT: "nomic-bert",
+ MODEL_ARCH.JINA_BERT_V2: "jina-bert-v2",
+ MODEL_ARCH.BLOOM: "bloom",
+ MODEL_ARCH.STABLELM: "stablelm",
+ MODEL_ARCH.QWEN: "qwen",
+ MODEL_ARCH.QWEN2: "qwen2",
+ MODEL_ARCH.QWEN2MOE: "qwen2moe",
+ MODEL_ARCH.PHI2: "phi2",
+ MODEL_ARCH.PHI3: "phi3",
+ MODEL_ARCH.PLAMO: "plamo",
+ MODEL_ARCH.CODESHELL: "codeshell",
+ MODEL_ARCH.ORION: "orion",
+ MODEL_ARCH.INTERNLM2: "internlm2",
+ MODEL_ARCH.MINICPM: "minicpm",
+ MODEL_ARCH.GEMMA: "gemma",
+ MODEL_ARCH.GEMMA2: "gemma2",
+ MODEL_ARCH.STARCODER2: "starcoder2",
+ MODEL_ARCH.MAMBA: "mamba",
+ MODEL_ARCH.XVERSE: "xverse",
+ MODEL_ARCH.COMMAND_R: "command-r",
+ MODEL_ARCH.DBRX: "dbrx",
+ MODEL_ARCH.OLMO: "olmo",
+ MODEL_ARCH.OPENELM: "openelm",
+ MODEL_ARCH.ARCTIC: "arctic",
+ MODEL_ARCH.DEEPSEEK2: "deepseek2",
+ MODEL_ARCH.CHATGLM: "chatglm",
+ MODEL_ARCH.BITNET: "bitnet",
+ MODEL_ARCH.T5: "t5",
+ MODEL_ARCH.JAIS: "jais",
}
TENSOR_NAMES: dict[MODEL_TENSOR, str] = {
- MODEL_TENSOR.TOKEN_EMBD: "token_embd",
- MODEL_TENSOR.TOKEN_EMBD_NORM: "token_embd_norm",
- MODEL_TENSOR.TOKEN_TYPES: "token_types",
- MODEL_TENSOR.POS_EMBD: "position_embd",
- MODEL_TENSOR.OUTPUT_NORM: "output_norm",
- MODEL_TENSOR.OUTPUT: "output",
- MODEL_TENSOR.ROPE_FREQS: "rope_freqs",
- MODEL_TENSOR.ROPE_FACTORS_LONG: "rope_factors_long",
- MODEL_TENSOR.ROPE_FACTORS_SHORT: "rope_factors_short",
- MODEL_TENSOR.ATTN_NORM: "blk.{bid}.attn_norm",
- MODEL_TENSOR.ATTN_NORM_2: "blk.{bid}.attn_norm_2",
- MODEL_TENSOR.ATTN_QKV: "blk.{bid}.attn_qkv",
- MODEL_TENSOR.ATTN_Q: "blk.{bid}.attn_q",
- MODEL_TENSOR.ATTN_K: "blk.{bid}.attn_k",
- MODEL_TENSOR.ATTN_V: "blk.{bid}.attn_v",
- MODEL_TENSOR.ATTN_OUT: "blk.{bid}.attn_output",
- MODEL_TENSOR.ATTN_ROT_EMBD: "blk.{bid}.attn_rot_embd",
- MODEL_TENSOR.ATTN_Q_NORM: "blk.{bid}.attn_q_norm",
- MODEL_TENSOR.ATTN_K_NORM: "blk.{bid}.attn_k_norm",
- MODEL_TENSOR.ATTN_OUT_NORM: "blk.{bid}.attn_output_norm",
- MODEL_TENSOR.ATTN_POST_NORM: "blk.{bid}.post_attention_norm",
- MODEL_TENSOR.FFN_GATE_INP: "blk.{bid}.ffn_gate_inp",
- MODEL_TENSOR.FFN_GATE_INP_SHEXP: "blk.{bid}.ffn_gate_inp_shexp",
- MODEL_TENSOR.FFN_NORM: "blk.{bid}.ffn_norm",
- MODEL_TENSOR.FFN_PRE_NORM: "blk.{bid}.ffn_norm",
- MODEL_TENSOR.FFN_POST_NORM: "blk.{bid}.post_ffw_norm",
- MODEL_TENSOR.FFN_GATE: "blk.{bid}.ffn_gate",
- MODEL_TENSOR.FFN_DOWN: "blk.{bid}.ffn_down",
- MODEL_TENSOR.FFN_UP: "blk.{bid}.ffn_up",
- MODEL_TENSOR.FFN_GATE_SHEXP: "blk.{bid}.ffn_gate_shexp",
- MODEL_TENSOR.FFN_DOWN_SHEXP: "blk.{bid}.ffn_down_shexp",
- MODEL_TENSOR.FFN_UP_SHEXP: "blk.{bid}.ffn_up_shexp",
- MODEL_TENSOR.FFN_ACT: "blk.{bid}.ffn",
- MODEL_TENSOR.FFN_NORM_EXP: "blk.{bid}.ffn_norm_exps",
- MODEL_TENSOR.FFN_GATE_EXP: "blk.{bid}.ffn_gate_exps",
- MODEL_TENSOR.FFN_DOWN_EXP: "blk.{bid}.ffn_down_exps",
- MODEL_TENSOR.FFN_UP_EXP: "blk.{bid}.ffn_up_exps",
- MODEL_TENSOR.LAYER_OUT_NORM: "blk.{bid}.layer_output_norm",
- MODEL_TENSOR.SSM_IN: "blk.{bid}.ssm_in",
- MODEL_TENSOR.SSM_CONV1D: "blk.{bid}.ssm_conv1d",
- MODEL_TENSOR.SSM_X: "blk.{bid}.ssm_x",
- MODEL_TENSOR.SSM_DT: "blk.{bid}.ssm_dt",
- MODEL_TENSOR.SSM_A: "blk.{bid}.ssm_a",
- MODEL_TENSOR.SSM_D: "blk.{bid}.ssm_d",
- MODEL_TENSOR.SSM_OUT: "blk.{bid}.ssm_out",
- MODEL_TENSOR.ATTN_Q_A: "blk.{bid}.attn_q_a",
- MODEL_TENSOR.ATTN_Q_B: "blk.{bid}.attn_q_b",
- MODEL_TENSOR.ATTN_KV_A_MQA: "blk.{bid}.attn_kv_a_mqa",
- MODEL_TENSOR.ATTN_KV_B: "blk.{bid}.attn_kv_b",
- MODEL_TENSOR.ATTN_Q_A_NORM: "blk.{bid}.attn_q_a_norm",
- MODEL_TENSOR.ATTN_KV_A_NORM: "blk.{bid}.attn_kv_a_norm",
- MODEL_TENSOR.ATTN_SUB_NORM: "blk.{bid}.attn_sub_norm",
- MODEL_TENSOR.FFN_SUB_NORM: "blk.{bid}.ffn_sub_norm",
- MODEL_TENSOR.DEC_ATTN_NORM: "dec.blk.{bid}.attn_norm",
- MODEL_TENSOR.DEC_ATTN_Q: "dec.blk.{bid}.attn_q",
- MODEL_TENSOR.DEC_ATTN_K: "dec.blk.{bid}.attn_k",
- MODEL_TENSOR.DEC_ATTN_V: "dec.blk.{bid}.attn_v",
- MODEL_TENSOR.DEC_ATTN_OUT: "dec.blk.{bid}.attn_o",
- MODEL_TENSOR.DEC_ATTN_REL_B: "dec.blk.{bid}.attn_rel_b",
- MODEL_TENSOR.DEC_CROSS_ATTN_NORM: "dec.blk.{bid}.cross_attn_norm",
- MODEL_TENSOR.DEC_CROSS_ATTN_Q: "dec.blk.{bid}.cross_attn_q",
- MODEL_TENSOR.DEC_CROSS_ATTN_K: "dec.blk.{bid}.cross_attn_k",
- MODEL_TENSOR.DEC_CROSS_ATTN_V: "dec.blk.{bid}.cross_attn_v",
- MODEL_TENSOR.DEC_CROSS_ATTN_OUT: "dec.blk.{bid}.cross_attn_o",
+ MODEL_TENSOR.TOKEN_EMBD: "token_embd",
+ MODEL_TENSOR.TOKEN_EMBD_NORM: "token_embd_norm",
+ MODEL_TENSOR.TOKEN_TYPES: "token_types",
+ MODEL_TENSOR.POS_EMBD: "position_embd",
+ MODEL_TENSOR.OUTPUT_NORM: "output_norm",
+ MODEL_TENSOR.OUTPUT: "output",
+ MODEL_TENSOR.ROPE_FREQS: "rope_freqs",
+ MODEL_TENSOR.ROPE_FACTORS_LONG: "rope_factors_long",
+ MODEL_TENSOR.ROPE_FACTORS_SHORT: "rope_factors_short",
+ MODEL_TENSOR.ATTN_NORM: "blk.{bid}.attn_norm",
+ MODEL_TENSOR.ATTN_NORM_2: "blk.{bid}.attn_norm_2",
+ MODEL_TENSOR.ATTN_QKV: "blk.{bid}.attn_qkv",
+ MODEL_TENSOR.ATTN_Q: "blk.{bid}.attn_q",
+ MODEL_TENSOR.ATTN_K: "blk.{bid}.attn_k",
+ MODEL_TENSOR.ATTN_V: "blk.{bid}.attn_v",
+ MODEL_TENSOR.ATTN_OUT: "blk.{bid}.attn_output",
+ MODEL_TENSOR.ATTN_ROT_EMBD: "blk.{bid}.attn_rot_embd",
+ MODEL_TENSOR.ATTN_Q_NORM: "blk.{bid}.attn_q_norm",
+ MODEL_TENSOR.ATTN_K_NORM: "blk.{bid}.attn_k_norm",
+ MODEL_TENSOR.ATTN_OUT_NORM: "blk.{bid}.attn_output_norm",
+ MODEL_TENSOR.ATTN_POST_NORM: "blk.{bid}.post_attention_norm",
+ MODEL_TENSOR.FFN_GATE_INP: "blk.{bid}.ffn_gate_inp",
+ MODEL_TENSOR.FFN_GATE_INP_SHEXP: "blk.{bid}.ffn_gate_inp_shexp",
+ MODEL_TENSOR.FFN_NORM: "blk.{bid}.ffn_norm",
+ MODEL_TENSOR.FFN_PRE_NORM: "blk.{bid}.ffn_norm",
+ MODEL_TENSOR.FFN_POST_NORM: "blk.{bid}.post_ffw_norm",
+ MODEL_TENSOR.FFN_GATE: "blk.{bid}.ffn_gate",
+ MODEL_TENSOR.FFN_DOWN: "blk.{bid}.ffn_down",
+ MODEL_TENSOR.FFN_UP: "blk.{bid}.ffn_up",
+ MODEL_TENSOR.FFN_GATE_SHEXP: "blk.{bid}.ffn_gate_shexp",
+ MODEL_TENSOR.FFN_DOWN_SHEXP: "blk.{bid}.ffn_down_shexp",
+ MODEL_TENSOR.FFN_UP_SHEXP: "blk.{bid}.ffn_up_shexp",
+ MODEL_TENSOR.FFN_ACT: "blk.{bid}.ffn",
+ MODEL_TENSOR.FFN_NORM_EXP: "blk.{bid}.ffn_norm_exps",
+ MODEL_TENSOR.FFN_GATE_EXP: "blk.{bid}.ffn_gate_exps",
+ MODEL_TENSOR.FFN_DOWN_EXP: "blk.{bid}.ffn_down_exps",
+ MODEL_TENSOR.FFN_UP_EXP: "blk.{bid}.ffn_up_exps",
+ MODEL_TENSOR.LAYER_OUT_NORM: "blk.{bid}.layer_output_norm",
+ MODEL_TENSOR.SSM_IN: "blk.{bid}.ssm_in",
+ MODEL_TENSOR.SSM_CONV1D: "blk.{bid}.ssm_conv1d",
+ MODEL_TENSOR.SSM_X: "blk.{bid}.ssm_x",
+ MODEL_TENSOR.SSM_DT: "blk.{bid}.ssm_dt",
+ MODEL_TENSOR.SSM_A: "blk.{bid}.ssm_a",
+ MODEL_TENSOR.SSM_D: "blk.{bid}.ssm_d",
+ MODEL_TENSOR.SSM_OUT: "blk.{bid}.ssm_out",
+ MODEL_TENSOR.ATTN_Q_A: "blk.{bid}.attn_q_a",
+ MODEL_TENSOR.ATTN_Q_B: "blk.{bid}.attn_q_b",
+ MODEL_TENSOR.ATTN_KV_A_MQA: "blk.{bid}.attn_kv_a_mqa",
+ MODEL_TENSOR.ATTN_KV_B: "blk.{bid}.attn_kv_b",
+ MODEL_TENSOR.ATTN_Q_A_NORM: "blk.{bid}.attn_q_a_norm",
+ MODEL_TENSOR.ATTN_KV_A_NORM: "blk.{bid}.attn_kv_a_norm",
+ MODEL_TENSOR.ATTN_SUB_NORM: "blk.{bid}.attn_sub_norm",
+ MODEL_TENSOR.FFN_SUB_NORM: "blk.{bid}.ffn_sub_norm",
+ MODEL_TENSOR.DEC_ATTN_NORM: "dec.blk.{bid}.attn_norm",
+ MODEL_TENSOR.DEC_ATTN_Q: "dec.blk.{bid}.attn_q",
+ MODEL_TENSOR.DEC_ATTN_K: "dec.blk.{bid}.attn_k",
+ MODEL_TENSOR.DEC_ATTN_V: "dec.blk.{bid}.attn_v",
+ MODEL_TENSOR.DEC_ATTN_OUT: "dec.blk.{bid}.attn_o",
+ MODEL_TENSOR.DEC_ATTN_REL_B: "dec.blk.{bid}.attn_rel_b",
+ MODEL_TENSOR.DEC_CROSS_ATTN_NORM: "dec.blk.{bid}.cross_attn_norm",
+ MODEL_TENSOR.DEC_CROSS_ATTN_Q: "dec.blk.{bid}.cross_attn_q",
+ MODEL_TENSOR.DEC_CROSS_ATTN_K: "dec.blk.{bid}.cross_attn_k",
+ MODEL_TENSOR.DEC_CROSS_ATTN_V: "dec.blk.{bid}.cross_attn_v",
+ MODEL_TENSOR.DEC_CROSS_ATTN_OUT: "dec.blk.{bid}.cross_attn_o",
MODEL_TENSOR.DEC_CROSS_ATTN_REL_B: "dec.blk.{bid}.cross_attn_rel_b",
- MODEL_TENSOR.DEC_FFN_NORM: "dec.blk.{bid}.ffn_norm",
- MODEL_TENSOR.DEC_FFN_GATE: "dec.blk.{bid}.ffn_gate",
- MODEL_TENSOR.DEC_FFN_DOWN: "dec.blk.{bid}.ffn_down",
- MODEL_TENSOR.DEC_FFN_UP: "dec.blk.{bid}.ffn_up",
- MODEL_TENSOR.DEC_OUTPUT_NORM: "dec.output_norm",
- MODEL_TENSOR.ENC_ATTN_NORM: "enc.blk.{bid}.attn_norm",
- MODEL_TENSOR.ENC_ATTN_Q: "enc.blk.{bid}.attn_q",
- MODEL_TENSOR.ENC_ATTN_K: "enc.blk.{bid}.attn_k",
- MODEL_TENSOR.ENC_ATTN_V: "enc.blk.{bid}.attn_v",
- MODEL_TENSOR.ENC_ATTN_OUT: "enc.blk.{bid}.attn_o",
- MODEL_TENSOR.ENC_ATTN_REL_B: "enc.blk.{bid}.attn_rel_b",
- MODEL_TENSOR.ENC_FFN_NORM: "enc.blk.{bid}.ffn_norm",
- MODEL_TENSOR.ENC_FFN_GATE: "enc.blk.{bid}.ffn_gate",
- MODEL_TENSOR.ENC_FFN_DOWN: "enc.blk.{bid}.ffn_down",
- MODEL_TENSOR.ENC_FFN_UP: "enc.blk.{bid}.ffn_up",
- MODEL_TENSOR.ENC_OUTPUT_NORM: "enc.output_norm",
+ MODEL_TENSOR.DEC_FFN_NORM: "dec.blk.{bid}.ffn_norm",
+ MODEL_TENSOR.DEC_FFN_GATE: "dec.blk.{bid}.ffn_gate",
+ MODEL_TENSOR.DEC_FFN_DOWN: "dec.blk.{bid}.ffn_down",
+ MODEL_TENSOR.DEC_FFN_UP: "dec.blk.{bid}.ffn_up",
+ MODEL_TENSOR.DEC_OUTPUT_NORM: "dec.output_norm",
+ MODEL_TENSOR.ENC_ATTN_NORM: "enc.blk.{bid}.attn_norm",
+ MODEL_TENSOR.ENC_ATTN_Q: "enc.blk.{bid}.attn_q",
+ MODEL_TENSOR.ENC_ATTN_K: "enc.blk.{bid}.attn_k",
+ MODEL_TENSOR.ENC_ATTN_V: "enc.blk.{bid}.attn_v",
+ MODEL_TENSOR.ENC_ATTN_OUT: "enc.blk.{bid}.attn_o",
+ MODEL_TENSOR.ENC_ATTN_REL_B: "enc.blk.{bid}.attn_rel_b",
+ MODEL_TENSOR.ENC_FFN_NORM: "enc.blk.{bid}.ffn_norm",
+ MODEL_TENSOR.ENC_FFN_GATE: "enc.blk.{bid}.ffn_gate",
+ MODEL_TENSOR.ENC_FFN_DOWN: "enc.blk.{bid}.ffn_down",
+ MODEL_TENSOR.ENC_FFN_UP: "enc.blk.{bid}.ffn_up",
+ MODEL_TENSOR.ENC_OUTPUT_NORM: "enc.output_norm",
}
MODEL_TENSORS: dict[MODEL_ARCH, list[MODEL_TENSOR]] = {
@@ -976,7 +981,7 @@ class MODEL_TENSOR(IntEnum):
MODEL_TENSOR.FFN_DOWN_SHEXP,
MODEL_TENSOR.FFN_UP_SHEXP,
],
- MODEL_ARCH.CHATGLM : [
+ MODEL_ARCH.CHATGLM: [
MODEL_TENSOR.TOKEN_EMBD,
MODEL_TENSOR.ROPE_FREQS,
MODEL_TENSOR.OUTPUT_NORM,
@@ -1095,56 +1100,56 @@ class MODEL_TENSOR(IntEnum):
class TokenType(IntEnum):
- NORMAL = 1
- UNKNOWN = 2
- CONTROL = 3
+ NORMAL = 1
+ UNKNOWN = 2
+ CONTROL = 3
USER_DEFINED = 4
- UNUSED = 5
- BYTE = 6
+ UNUSED = 5
+ BYTE = 6
class RopeScalingType(Enum):
- NONE = 'none'
- LINEAR = 'linear'
- YARN = 'yarn'
+ NONE = "none"
+ LINEAR = "linear"
+ YARN = "yarn"
class PoolingType(IntEnum):
NONE = 0
MEAN = 1
- CLS = 2
+ CLS = 2
class GGMLQuantizationType(IntEnum):
- F32 = 0
- F16 = 1
- Q4_0 = 2
- Q4_1 = 3
- Q5_0 = 6
- Q5_1 = 7
- Q8_0 = 8
- Q8_1 = 9
- Q2_K = 10
- Q3_K = 11
- Q4_K = 12
- Q5_K = 13
- Q6_K = 14
- Q8_K = 15
+ F32 = 0
+ F16 = 1
+ Q4_0 = 2
+ Q4_1 = 3
+ Q5_0 = 6
+ Q5_1 = 7
+ Q8_0 = 8
+ Q8_1 = 9
+ Q2_K = 10
+ Q3_K = 11
+ Q4_K = 12
+ Q5_K = 13
+ Q6_K = 14
+ Q8_K = 15
IQ2_XXS = 16
- IQ2_XS = 17
+ IQ2_XS = 17
IQ3_XXS = 18
- IQ1_S = 19
- IQ4_NL = 20
- IQ3_S = 21
- IQ2_S = 22
- IQ4_XS = 23
- I8 = 24
- I16 = 25
- I32 = 26
- I64 = 27
- F64 = 28
- IQ1_M = 29
- BF16 = 30
+ IQ1_S = 19
+ IQ4_NL = 20
+ IQ3_S = 21
+ IQ2_S = 22
+ IQ4_XS = 23
+ I8 = 24
+ I16 = 25
+ I32 = 26
+ I64 = 27
+ F64 = 28
+ IQ1_M = 29
+ BF16 = 30
# TODO: add GGMLFileType from ggml_ftype in ggml.h
@@ -1153,41 +1158,41 @@ class GGMLQuantizationType(IntEnum):
# from llama_ftype in llama.h
# ALL VALUES SHOULD BE THE SAME HERE AS THEY ARE OVER THERE.
class LlamaFileType(IntEnum):
- ALL_F32 = 0
- MOSTLY_F16 = 1 # except 1d tensors
- MOSTLY_Q4_0 = 2 # except 1d tensors
- MOSTLY_Q4_1 = 3 # except 1d tensors
- MOSTLY_Q4_1_SOME_F16 = 4 # tok_embeddings.weight and output.weight are F16
+ ALL_F32 = 0
+ MOSTLY_F16 = 1 # except 1d tensors
+ MOSTLY_Q4_0 = 2 # except 1d tensors
+ MOSTLY_Q4_1 = 3 # except 1d tensors
+ MOSTLY_Q4_1_SOME_F16 = 4 # tok_embeddings.weight and output.weight are F16
# MOSTLY_Q4_2 = 5 # support has been removed
# MOSTLY_Q4_3 = 6 # support has been removed
- MOSTLY_Q8_0 = 7 # except 1d tensors
- MOSTLY_Q5_0 = 8 # except 1d tensors
- MOSTLY_Q5_1 = 9 # except 1d tensors
- MOSTLY_Q2_K = 10 # except 1d tensors
- MOSTLY_Q3_K_S = 11 # except 1d tensors
- MOSTLY_Q3_K_M = 12 # except 1d tensors
- MOSTLY_Q3_K_L = 13 # except 1d tensors
- MOSTLY_Q4_K_S = 14 # except 1d tensors
- MOSTLY_Q4_K_M = 15 # except 1d tensors
- MOSTLY_Q5_K_S = 16 # except 1d tensors
- MOSTLY_Q5_K_M = 17 # except 1d tensors
- MOSTLY_Q6_K = 18 # except 1d tensors
- MOSTLY_IQ2_XXS = 19 # except 1d tensors
- MOSTLY_IQ2_XS = 20 # except 1d tensors
- MOSTLY_Q2_K_S = 21 # except 1d tensors
- MOSTLY_IQ3_XS = 22 # except 1d tensors
- MOSTLY_IQ3_XXS = 23 # except 1d tensors
- MOSTLY_IQ1_S = 24 # except 1d tensors
- MOSTLY_IQ4_NL = 25 # except 1d tensors
- MOSTLY_IQ3_S = 26 # except 1d tensors
- MOSTLY_IQ3_M = 27 # except 1d tensors
- MOSTLY_IQ2_S = 28 # except 1d tensors
- MOSTLY_IQ2_M = 29 # except 1d tensors
- MOSTLY_IQ4_XS = 30 # except 1d tensors
- MOSTLY_IQ1_M = 31 # except 1d tensors
- MOSTLY_BF16 = 32 # except 1d tensors
-
- GUESSED = 1024 # not specified in the model file
+ MOSTLY_Q8_0 = 7 # except 1d tensors
+ MOSTLY_Q5_0 = 8 # except 1d tensors
+ MOSTLY_Q5_1 = 9 # except 1d tensors
+ MOSTLY_Q2_K = 10 # except 1d tensors
+ MOSTLY_Q3_K_S = 11 # except 1d tensors
+ MOSTLY_Q3_K_M = 12 # except 1d tensors
+ MOSTLY_Q3_K_L = 13 # except 1d tensors
+ MOSTLY_Q4_K_S = 14 # except 1d tensors
+ MOSTLY_Q4_K_M = 15 # except 1d tensors
+ MOSTLY_Q5_K_S = 16 # except 1d tensors
+ MOSTLY_Q5_K_M = 17 # except 1d tensors
+ MOSTLY_Q6_K = 18 # except 1d tensors
+ MOSTLY_IQ2_XXS = 19 # except 1d tensors
+ MOSTLY_IQ2_XS = 20 # except 1d tensors
+ MOSTLY_Q2_K_S = 21 # except 1d tensors
+ MOSTLY_IQ3_XS = 22 # except 1d tensors
+ MOSTLY_IQ3_XXS = 23 # except 1d tensors
+ MOSTLY_IQ1_S = 24 # except 1d tensors
+ MOSTLY_IQ4_NL = 25 # except 1d tensors
+ MOSTLY_IQ3_S = 26 # except 1d tensors
+ MOSTLY_IQ3_M = 27 # except 1d tensors
+ MOSTLY_IQ2_S = 28 # except 1d tensors
+ MOSTLY_IQ2_M = 29 # except 1d tensors
+ MOSTLY_IQ4_XS = 30 # except 1d tensors
+ MOSTLY_IQ1_M = 31 # except 1d tensors
+ MOSTLY_BF16 = 32 # except 1d tensors
+
+ GUESSED = 1024 # not specified in the model file
class GGUFEndian(IntEnum):
@@ -1196,18 +1201,18 @@ class GGUFEndian(IntEnum):
class GGUFValueType(IntEnum):
- UINT8 = 0
- INT8 = 1
- UINT16 = 2
- INT16 = 3
- UINT32 = 4
- INT32 = 5
+ UINT8 = 0
+ INT8 = 1
+ UINT16 = 2
+ INT16 = 3
+ UINT32 = 4
+ INT32 = 5
FLOAT32 = 6
- BOOL = 7
- STRING = 8
- ARRAY = 9
- UINT64 = 10
- INT64 = 11
+ BOOL = 7
+ STRING = 8
+ ARRAY = 9
+ UINT64 = 10
+ INT64 = 11
FLOAT64 = 12
@staticmethod
@@ -1230,100 +1235,100 @@ def get_type(val: Any) -> GGUFValueType:
# Items here are (block size, type size)
QK_K = 256
GGML_QUANT_SIZES: dict[GGMLQuantizationType, tuple[int, int]] = {
- GGMLQuantizationType.F32: (1, 4),
- GGMLQuantizationType.F16: (1, 2),
- GGMLQuantizationType.Q4_0: (32, 2 + 16),
- GGMLQuantizationType.Q4_1: (32, 2 + 2 + 16),
- GGMLQuantizationType.Q5_0: (32, 2 + 4 + 16),
- GGMLQuantizationType.Q5_1: (32, 2 + 2 + 4 + 16),
- GGMLQuantizationType.Q8_0: (32, 2 + 32),
- GGMLQuantizationType.Q8_1: (32, 4 + 4 + 32),
- GGMLQuantizationType.Q2_K: (256, 2 + 2 + QK_K // 16 + QK_K // 4),
- GGMLQuantizationType.Q3_K: (256, 2 + QK_K // 4 + QK_K // 8 + 12),
- GGMLQuantizationType.Q4_K: (256, 2 + 2 + QK_K // 2 + 12),
- GGMLQuantizationType.Q5_K: (256, 2 + 2 + QK_K // 2 + QK_K // 8 + 12),
- GGMLQuantizationType.Q6_K: (256, 2 + QK_K // 2 + QK_K // 4 + QK_K // 16),
- GGMLQuantizationType.Q8_K: (256, 4 + QK_K + QK_K // 8),
+ GGMLQuantizationType.F32: (1, 4),
+ GGMLQuantizationType.F16: (1, 2),
+ GGMLQuantizationType.Q4_0: (32, 2 + 16),
+ GGMLQuantizationType.Q4_1: (32, 2 + 2 + 16),
+ GGMLQuantizationType.Q5_0: (32, 2 + 4 + 16),
+ GGMLQuantizationType.Q5_1: (32, 2 + 2 + 4 + 16),
+ GGMLQuantizationType.Q8_0: (32, 2 + 32),
+ GGMLQuantizationType.Q8_1: (32, 4 + 4 + 32),
+ GGMLQuantizationType.Q2_K: (256, 2 + 2 + QK_K // 16 + QK_K // 4),
+ GGMLQuantizationType.Q3_K: (256, 2 + QK_K // 4 + QK_K // 8 + 12),
+ GGMLQuantizationType.Q4_K: (256, 2 + 2 + QK_K // 2 + 12),
+ GGMLQuantizationType.Q5_K: (256, 2 + 2 + QK_K // 2 + QK_K // 8 + 12),
+ GGMLQuantizationType.Q6_K: (256, 2 + QK_K // 2 + QK_K // 4 + QK_K // 16),
+ GGMLQuantizationType.Q8_K: (256, 4 + QK_K + QK_K // 8),
GGMLQuantizationType.IQ2_XXS: (256, 2 + QK_K // 4),
- GGMLQuantizationType.IQ2_XS: (256, 2 + QK_K // 4 + QK_K // 32),
+ GGMLQuantizationType.IQ2_XS: (256, 2 + QK_K // 4 + QK_K // 32),
GGMLQuantizationType.IQ3_XXS: (256, 2 + QK_K // 4 + QK_K // 8),
- GGMLQuantizationType.IQ1_S: (256, 2 + QK_K // 8 + QK_K // 16),
- GGMLQuantizationType.IQ4_NL: (32, 2 + 16),
- GGMLQuantizationType.IQ3_S: (256, 2 + QK_K // 4 + QK_K // 8 + QK_K // 32 + 4),
- GGMLQuantizationType.IQ2_S: (256, 2 + QK_K // 4 + QK_K // 16),
- GGMLQuantizationType.IQ4_XS: (256, 2 + 2 + QK_K // 2 + QK_K // 64),
- GGMLQuantizationType.I8: (1, 1),
- GGMLQuantizationType.I16: (1, 2),
- GGMLQuantizationType.I32: (1, 4),
- GGMLQuantizationType.I64: (1, 8),
- GGMLQuantizationType.F64: (1, 8),
- GGMLQuantizationType.IQ1_M: (256, QK_K // 8 + QK_K // 16 + QK_K // 32),
- GGMLQuantizationType.BF16: (1, 2),
+ GGMLQuantizationType.IQ1_S: (256, 2 + QK_K // 8 + QK_K // 16),
+ GGMLQuantizationType.IQ4_NL: (32, 2 + 16),
+ GGMLQuantizationType.IQ3_S: (256, 2 + QK_K // 4 + QK_K // 8 + QK_K // 32 + 4),
+ GGMLQuantizationType.IQ2_S: (256, 2 + QK_K // 4 + QK_K // 16),
+ GGMLQuantizationType.IQ4_XS: (256, 2 + 2 + QK_K // 2 + QK_K // 64),
+ GGMLQuantizationType.I8: (1, 1),
+ GGMLQuantizationType.I16: (1, 2),
+ GGMLQuantizationType.I32: (1, 4),
+ GGMLQuantizationType.I64: (1, 8),
+ GGMLQuantizationType.F64: (1, 8),
+ GGMLQuantizationType.IQ1_M: (256, QK_K // 8 + QK_K // 16 + QK_K // 32),
+ GGMLQuantizationType.BF16: (1, 2),
}
# Aliases for backward compatibility.
# general
-KEY_GENERAL_ARCHITECTURE = Keys.General.ARCHITECTURE
+KEY_GENERAL_ARCHITECTURE = Keys.General.ARCHITECTURE
KEY_GENERAL_QUANTIZATION_VERSION = Keys.General.QUANTIZATION_VERSION
-KEY_GENERAL_ALIGNMENT = Keys.General.ALIGNMENT
-KEY_GENERAL_NAME = Keys.General.NAME
-KEY_GENERAL_AUTHOR = Keys.General.AUTHOR
-KEY_GENERAL_URL = Keys.General.URL
-KEY_GENERAL_DESCRIPTION = Keys.General.DESCRIPTION
-KEY_GENERAL_LICENSE = Keys.General.LICENSE
-KEY_GENERAL_SOURCE_URL = Keys.General.SOURCE_URL
-KEY_GENERAL_FILE_TYPE = Keys.General.FILE_TYPE
+KEY_GENERAL_ALIGNMENT = Keys.General.ALIGNMENT
+KEY_GENERAL_NAME = Keys.General.NAME
+KEY_GENERAL_AUTHOR = Keys.General.AUTHOR
+KEY_GENERAL_URL = Keys.General.URL
+KEY_GENERAL_DESCRIPTION = Keys.General.DESCRIPTION
+KEY_GENERAL_LICENSE = Keys.General.LICENSE
+KEY_GENERAL_SOURCE_URL = Keys.General.SOURCE_URL
+KEY_GENERAL_FILE_TYPE = Keys.General.FILE_TYPE
# LLM
-KEY_VOCAB_SIZE = Keys.LLM.VOCAB_SIZE
-KEY_CONTEXT_LENGTH = Keys.LLM.CONTEXT_LENGTH
-KEY_EMBEDDING_LENGTH = Keys.LLM.EMBEDDING_LENGTH
-KEY_BLOCK_COUNT = Keys.LLM.BLOCK_COUNT
-KEY_FEED_FORWARD_LENGTH = Keys.LLM.FEED_FORWARD_LENGTH
+KEY_VOCAB_SIZE = Keys.LLM.VOCAB_SIZE
+KEY_CONTEXT_LENGTH = Keys.LLM.CONTEXT_LENGTH
+KEY_EMBEDDING_LENGTH = Keys.LLM.EMBEDDING_LENGTH
+KEY_BLOCK_COUNT = Keys.LLM.BLOCK_COUNT
+KEY_FEED_FORWARD_LENGTH = Keys.LLM.FEED_FORWARD_LENGTH
KEY_USE_PARALLEL_RESIDUAL = Keys.LLM.USE_PARALLEL_RESIDUAL
-KEY_TENSOR_DATA_LAYOUT = Keys.LLM.TENSOR_DATA_LAYOUT
+KEY_TENSOR_DATA_LAYOUT = Keys.LLM.TENSOR_DATA_LAYOUT
# attention
-KEY_ATTENTION_HEAD_COUNT = Keys.Attention.HEAD_COUNT
-KEY_ATTENTION_HEAD_COUNT_KV = Keys.Attention.HEAD_COUNT_KV
-KEY_ATTENTION_MAX_ALIBI_BIAS = Keys.Attention.MAX_ALIBI_BIAS
-KEY_ATTENTION_CLAMP_KQV = Keys.Attention.CLAMP_KQV
-KEY_ATTENTION_LAYERNORM_EPS = Keys.Attention.LAYERNORM_EPS
+KEY_ATTENTION_HEAD_COUNT = Keys.Attention.HEAD_COUNT
+KEY_ATTENTION_HEAD_COUNT_KV = Keys.Attention.HEAD_COUNT_KV
+KEY_ATTENTION_MAX_ALIBI_BIAS = Keys.Attention.MAX_ALIBI_BIAS
+KEY_ATTENTION_CLAMP_KQV = Keys.Attention.CLAMP_KQV
+KEY_ATTENTION_LAYERNORM_EPS = Keys.Attention.LAYERNORM_EPS
KEY_ATTENTION_LAYERNORM_RMS_EPS = Keys.Attention.LAYERNORM_RMS_EPS
# RoPE
-KEY_ROPE_DIMENSION_COUNT = Keys.Rope.DIMENSION_COUNT
-KEY_ROPE_FREQ_BASE = Keys.Rope.FREQ_BASE
-KEY_ROPE_SCALING_TYPE = Keys.Rope.SCALING_TYPE
-KEY_ROPE_SCALING_FACTOR = Keys.Rope.SCALING_FACTOR
+KEY_ROPE_DIMENSION_COUNT = Keys.Rope.DIMENSION_COUNT
+KEY_ROPE_FREQ_BASE = Keys.Rope.FREQ_BASE
+KEY_ROPE_SCALING_TYPE = Keys.Rope.SCALING_TYPE
+KEY_ROPE_SCALING_FACTOR = Keys.Rope.SCALING_FACTOR
KEY_ROPE_SCALING_ORIG_CTX_LEN = Keys.Rope.SCALING_ORIG_CTX_LEN
-KEY_ROPE_SCALING_FINETUNED = Keys.Rope.SCALING_FINETUNED
+KEY_ROPE_SCALING_FINETUNED = Keys.Rope.SCALING_FINETUNED
# SSM
-KEY_SSM_CONV_KERNEL = Keys.SSM.CONV_KERNEL
-KEY_SSM_INNER_SIZE = Keys.SSM.INNER_SIZE
-KEY_SSM_STATE_SIZE = Keys.SSM.STATE_SIZE
+KEY_SSM_CONV_KERNEL = Keys.SSM.CONV_KERNEL
+KEY_SSM_INNER_SIZE = Keys.SSM.INNER_SIZE
+KEY_SSM_STATE_SIZE = Keys.SSM.STATE_SIZE
KEY_SSM_TIME_STEP_RANK = Keys.SSM.TIME_STEP_RANK
# tokenization
-KEY_TOKENIZER_MODEL = Keys.Tokenizer.MODEL
-KEY_TOKENIZER_PRE = Keys.Tokenizer.PRE
-KEY_TOKENIZER_LIST = Keys.Tokenizer.LIST
+KEY_TOKENIZER_MODEL = Keys.Tokenizer.MODEL
+KEY_TOKENIZER_PRE = Keys.Tokenizer.PRE
+KEY_TOKENIZER_LIST = Keys.Tokenizer.LIST
KEY_TOKENIZER_TOKEN_TYPE = Keys.Tokenizer.TOKEN_TYPE
-KEY_TOKENIZER_SCORES = Keys.Tokenizer.SCORES
-KEY_TOKENIZER_MERGES = Keys.Tokenizer.MERGES
-KEY_TOKENIZER_BOS_ID = Keys.Tokenizer.BOS_ID
-KEY_TOKENIZER_EOS_ID = Keys.Tokenizer.EOS_ID
-KEY_TOKENIZER_UNK_ID = Keys.Tokenizer.UNK_ID
-KEY_TOKENIZER_SEP_ID = Keys.Tokenizer.SEP_ID
-KEY_TOKENIZER_PAD_ID = Keys.Tokenizer.PAD_ID
-KEY_TOKENIZER_CLS_ID = Keys.Tokenizer.CLS_ID
-KEY_TOKENIZER_MASK_ID = Keys.Tokenizer.MASK_ID
-KEY_TOKENIZER_HF_JSON = Keys.Tokenizer.HF_JSON
-KEY_TOKENIZER_RWKV = Keys.Tokenizer.RWKV
-KEY_TOKENIZER_PRIFIX_ID = Keys.Tokenizer.PREFIX_ID
-KEY_TOKENIZER_SUFFIX_ID = Keys.Tokenizer.SUFFIX_ID
-KEY_TOKENIZER_MIDDLE_ID = Keys.Tokenizer.MIDDLE_ID
-KEY_TOKENIZER_EOT_ID = Keys.Tokenizer.EOT_ID
+KEY_TOKENIZER_SCORES = Keys.Tokenizer.SCORES
+KEY_TOKENIZER_MERGES = Keys.Tokenizer.MERGES
+KEY_TOKENIZER_BOS_ID = Keys.Tokenizer.BOS_ID
+KEY_TOKENIZER_EOS_ID = Keys.Tokenizer.EOS_ID
+KEY_TOKENIZER_UNK_ID = Keys.Tokenizer.UNK_ID
+KEY_TOKENIZER_SEP_ID = Keys.Tokenizer.SEP_ID
+KEY_TOKENIZER_PAD_ID = Keys.Tokenizer.PAD_ID
+KEY_TOKENIZER_CLS_ID = Keys.Tokenizer.CLS_ID
+KEY_TOKENIZER_MASK_ID = Keys.Tokenizer.MASK_ID
+KEY_TOKENIZER_HF_JSON = Keys.Tokenizer.HF_JSON
+KEY_TOKENIZER_RWKV = Keys.Tokenizer.RWKV
+KEY_TOKENIZER_PRIFIX_ID = Keys.Tokenizer.PREFIX_ID
+KEY_TOKENIZER_SUFFIX_ID = Keys.Tokenizer.SUFFIX_ID
+KEY_TOKENIZER_MIDDLE_ID = Keys.Tokenizer.MIDDLE_ID
+KEY_TOKENIZER_EOT_ID = Keys.Tokenizer.EOT_ID
diff --git a/src/gguf-py/gguf/gguf_reader.py b/src/gguf-py/gguf/gguf_reader.py
index e8e61ab..a5fc908 100644
--- a/src/gguf-py/gguf/gguf_reader.py
+++ b/src/gguf-py/gguf/gguf_reader.py
@@ -67,32 +67,34 @@ class ReaderTensor(NamedTuple):
class GGUFReader:
# I - same as host, S - swapped
- byte_order: Literal['I', 'S'] = 'I'
+ byte_order: Literal["I", "S"] = "I"
alignment: int = GGUF_DEFAULT_ALIGNMENT
data_offset: int
# Note: Internal helper, API may change.
gguf_scalar_to_np: dict[GGUFValueType, type[np.generic]] = {
- GGUFValueType.UINT8: np.uint8,
- GGUFValueType.INT8: np.int8,
- GGUFValueType.UINT16: np.uint16,
- GGUFValueType.INT16: np.int16,
- GGUFValueType.UINT32: np.uint32,
- GGUFValueType.INT32: np.int32,
+ GGUFValueType.UINT8: np.uint8,
+ GGUFValueType.INT8: np.int8,
+ GGUFValueType.UINT16: np.uint16,
+ GGUFValueType.INT16: np.int16,
+ GGUFValueType.UINT32: np.uint32,
+ GGUFValueType.INT32: np.int32,
GGUFValueType.FLOAT32: np.float32,
- GGUFValueType.UINT64: np.uint64,
- GGUFValueType.INT64: np.int64,
+ GGUFValueType.UINT64: np.uint64,
+ GGUFValueType.INT64: np.int64,
GGUFValueType.FLOAT64: np.float64,
- GGUFValueType.BOOL: np.bool_,
+ GGUFValueType.BOOL: np.bool_,
}
- def __init__(self, path: os.PathLike[str] | str, mode: Literal['r', 'r+', 'c'] = 'r'):
- self.data = np.memmap(path, mode = mode)
+ def __init__(
+ self, path: os.PathLike[str] | str, mode: Literal["r", "r+", "c"] = "r"
+ ):
+ self.data = np.memmap(path, mode=mode)
offs = 0
# Check for GGUF magic
- if self._get(offs, np.uint32, override_order = '<')[0] != GGUF_MAGIC:
- raise ValueError('GGUF magic invalid')
+ if self._get(offs, np.uint32, override_order="<")[0] != GGUF_MAGIC:
+ raise ValueError("GGUF magic invalid")
offs += 4
# Check GGUF version
@@ -100,28 +102,46 @@ def __init__(self, path: os.PathLike[str] | str, mode: Literal['r', 'r+', 'c'] =
if temp_version[0] & 65535 == 0:
# If we get 0 here that means it's (probably) a GGUF file created for
# the opposite byte order of the machine this script is running on.
- self.byte_order = 'S'
+ self.byte_order = "S"
temp_version = temp_version.newbyteorder(self.byte_order)
version = temp_version[0]
if version not in READER_SUPPORTED_VERSIONS:
- raise ValueError(f'Sorry, file appears to be version {version} which we cannot handle')
+ raise ValueError(
+ f"Sorry, file appears to be version {version} which we cannot handle"
+ )
self.fields: OrderedDict[str, ReaderField] = OrderedDict()
self.tensors: list[ReaderTensor] = []
- offs += self._push_field(ReaderField(offs, 'GGUF.version', [temp_version], [0], [GGUFValueType.UINT32]))
+ offs += self._push_field(
+ ReaderField(
+ offs, "GGUF.version", [temp_version], [0], [GGUFValueType.UINT32]
+ )
+ )
# Check tensor count and kv count
temp_counts = self._get(offs, np.uint64, 2)
- offs += self._push_field(ReaderField(offs, 'GGUF.tensor_count', [temp_counts[:1]], [0], [GGUFValueType.UINT64]))
- offs += self._push_field(ReaderField(offs, 'GGUF.kv_count', [temp_counts[1:]], [0], [GGUFValueType.UINT64]))
+ offs += self._push_field(
+ ReaderField(
+ offs,
+ "GGUF.tensor_count",
+ [temp_counts[:1]],
+ [0],
+ [GGUFValueType.UINT64],
+ )
+ )
+ offs += self._push_field(
+ ReaderField(
+ offs, "GGUF.kv_count", [temp_counts[1:]], [0], [GGUFValueType.UINT64]
+ )
+ )
tensor_count, kv_count = temp_counts
offs = self._build_fields(offs, kv_count)
# Build Tensor Info Fields
offs, tensors_fields = self._build_tensor_info(offs, tensor_count)
- new_align = self.fields.get('general.alignment')
+ new_align = self.fields.get("general.alignment")
if new_align is not None:
if new_align.types != [GGUFValueType.UINT32]:
- raise ValueError('Bad type for general.alignment field')
+ raise ValueError("Bad type for general.alignment field")
self.alignment = new_align.parts[-1][0]
padding = offs % self.alignment
if padding != 0:
@@ -129,7 +149,7 @@ def __init__(self, path: os.PathLike[str] | str, mode: Literal['r', 'r+', 'c'] =
self.data_offset = offs
self._build_tensors(offs, tensors_fields)
- _DT = TypeVar('_DT', bound = npt.DTypeLike)
+ _DT = TypeVar("_DT", bound=npt.DTypeLike)
# Fetch a key/value metadata field by key.
def get_field(self, key: str) -> Union[ReaderField, None]:
@@ -140,14 +160,18 @@ def get_tensor(self, idx: int) -> ReaderTensor:
return self.tensors[idx]
def _get(
- self, offset: int, dtype: npt.DTypeLike, count: int = 1, override_order: None | Literal['I', 'S', '<'] = None,
+ self,
+ offset: int,
+ dtype: npt.DTypeLike,
+ count: int = 1,
+ override_order: None | Literal["I", "S", "<"] = None,
) -> npt.NDArray[Any]:
count = int(count)
- itemsize = int(np.empty([], dtype = dtype).itemsize)
+ itemsize = int(np.empty([], dtype=dtype).itemsize)
end_offs = offset + itemsize * count
return (
self.data[offset:end_offs]
- .view(dtype = dtype)[:count]
+ .view(dtype=dtype)[:count]
.newbyteorder(override_order or self.byte_order)
)
@@ -156,18 +180,22 @@ def _push_field(self, field: ReaderField, skip_sum: bool = False) -> int:
# TODO: add option to generate error on duplicate keys
# raise KeyError(f'Duplicate {field.name} already in list at offset {field.offset}')
- logger.warning(f'Duplicate key {field.name} at offset {field.offset}')
- self.fields[field.name + '_{}'.format(field.offset)] = field
+ logger.warning(f"Duplicate key {field.name} at offset {field.offset}")
+ self.fields[field.name + "_{}".format(field.offset)] = field
else:
self.fields[field.name] = field
return 0 if skip_sum else sum(int(part.nbytes) for part in field.parts)
- def _get_str(self, offset: int) -> tuple[npt.NDArray[np.uint64], npt.NDArray[np.uint8]]:
+ def _get_str(
+ self, offset: int
+ ) -> tuple[npt.NDArray[np.uint64], npt.NDArray[np.uint8]]:
slen = self._get(offset, np.uint64)
return slen, self._get(offset + 8, np.uint8, slen[0])
def _get_field_parts(
- self, orig_offs: int, raw_type: int,
+ self,
+ orig_offs: int,
+ raw_type: int,
) -> tuple[int, list[npt.NDArray[Any]], list[int], list[GGUFValueType]]:
offs = orig_offs
types: list[GGUFValueType] = []
@@ -192,7 +220,9 @@ def _get_field_parts(
aparts: list[npt.NDArray[Any]] = [raw_itype, alen]
data_idxs: list[int] = []
for idx in range(alen[0]):
- curr_size, curr_parts, curr_idxs, curr_types = self._get_field_parts(offs, raw_itype[0])
+ curr_size, curr_parts, curr_idxs, curr_types = self._get_field_parts(
+ offs, raw_itype[0]
+ )
if idx == 0:
types += curr_types
idxs_offs = len(aparts)
@@ -201,7 +231,7 @@ def _get_field_parts(
offs += curr_size
return offs - orig_offs, aparts, data_idxs, types
# We can't deal with this one.
- raise ValueError('Unknown/unhandled field type {gtype}')
+ raise ValueError("Unknown/unhandled field type {gtype}")
def _get_tensor_info_field(self, orig_offs: int) -> ReaderField:
offs = orig_offs
@@ -228,7 +258,7 @@ def _get_tensor_info_field(self, orig_offs: int) -> ReaderField:
return ReaderField(
orig_offs,
- str(bytes(name_data), encoding = 'utf-8'),
+ str(bytes(name_data), encoding="utf-8"),
[name_len, name_data, n_dims, dims, raw_dtype, offset_tensor],
[1, 3, 4, 5],
)
@@ -242,19 +272,26 @@ def _build_fields(self, offs: int, count: int) -> int:
offs += int(raw_kv_type.nbytes)
parts: list[npt.NDArray[Any]] = [kv_klen, kv_kdata, raw_kv_type]
idxs_offs = len(parts)
- field_size, field_parts, field_idxs, field_types = self._get_field_parts(offs, raw_kv_type[0])
+ field_size, field_parts, field_idxs, field_types = self._get_field_parts(
+ offs, raw_kv_type[0]
+ )
parts += field_parts
- self._push_field(ReaderField(
- orig_offs,
- str(bytes(kv_kdata), encoding = 'utf-8'),
- parts,
- [idx + idxs_offs for idx in field_idxs],
- field_types,
- ), skip_sum = True)
+ self._push_field(
+ ReaderField(
+ orig_offs,
+ str(bytes(kv_kdata), encoding="utf-8"),
+ parts,
+ [idx + idxs_offs for idx in field_idxs],
+ field_types,
+ ),
+ skip_sum=True,
+ )
offs += field_size
return offs
- def _build_tensor_info(self, offs: int, count: int) -> tuple[int, list[ReaderField]]:
+ def _build_tensor_info(
+ self, offs: int, count: int
+ ) -> tuple[int, list[ReaderField]]:
tensor_fields = []
for _ in range(count):
field = self._get_tensor_info_field(offs)
@@ -264,13 +301,13 @@ def _build_tensor_info(self, offs: int, count: int) -> tuple[int, list[ReaderFie
def _build_tensors(self, start_offs: int, fields: list[ReaderField]) -> None:
tensors = []
- tensor_names = set() # keep track of name to prevent duplicated tensors
+ tensor_names = set() # keep track of name to prevent duplicated tensors
for field in fields:
_name_len, name_data, _n_dims, dims, raw_dtype, offset_tensor = field.parts
# check if there's any tensor having same name already in the list
- tensor_name = str(bytes(name_data), encoding = 'utf-8')
+ tensor_name = str(bytes(name_data), encoding="utf-8")
if tensor_name in tensor_names:
- raise ValueError(f'Found duplicated tensor with name {tensor_name}')
+ raise ValueError(f"Found duplicated tensor with name {tensor_name}")
tensor_names.add(tensor_name)
ggml_type = GGMLQuantizationType(raw_dtype[0])
n_elems = int(np.prod(dims))
@@ -304,14 +341,16 @@ def _build_tensors(self, start_offs: int, fields: list[ReaderField]) -> None:
item_count = n_bytes
item_type = np.uint8
np_dims = quant_shape_to_byte_shape(np_dims, ggml_type)
- tensors.append(ReaderTensor(
- name = tensor_name,
- tensor_type = ggml_type,
- shape = dims,
- n_elements = n_elems,
- n_bytes = n_bytes,
- data_offset = data_offs,
- data = self._get(data_offs, item_type, item_count).reshape(np_dims),
- field = field,
- ))
+ tensors.append(
+ ReaderTensor(
+ name=tensor_name,
+ tensor_type=ggml_type,
+ shape=dims,
+ n_elements=n_elems,
+ n_bytes=n_bytes,
+ data_offset=data_offs,
+ data=self._get(data_offs, item_type, item_count).reshape(np_dims),
+ field=field,
+ )
+ )
self.tensors = tensors
diff --git a/src/gguf-py/gguf/gguf_writer.py b/src/gguf-py/gguf/gguf_writer.py
index 2e0b335..df0c894 100644
--- a/src/gguf-py/gguf/gguf_writer.py
+++ b/src/gguf-py/gguf/gguf_writer.py
@@ -52,8 +52,8 @@ class GGUFValue:
class WriterState(Enum):
NO_FILE = auto()
- EMPTY = auto()
- HEADER = auto()
+ EMPTY = auto()
+ HEADER = auto()
KV_DATA = auto()
TI_DATA = auto()
WEIGHTS = auto()
@@ -67,22 +67,29 @@ class GGUFWriter:
kv_data: list[dict[str, GGUFValue]]
state: WriterState
_simple_value_packing = {
- GGUFValueType.UINT8: "B",
- GGUFValueType.INT8: "b",
- GGUFValueType.UINT16: "H",
- GGUFValueType.INT16: "h",
- GGUFValueType.UINT32: "I",
- GGUFValueType.INT32: "i",
+ GGUFValueType.UINT8: "B",
+ GGUFValueType.INT8: "b",
+ GGUFValueType.UINT16: "H",
+ GGUFValueType.INT16: "h",
+ GGUFValueType.UINT32: "I",
+ GGUFValueType.INT32: "i",
GGUFValueType.FLOAT32: "f",
- GGUFValueType.UINT64: "Q",
- GGUFValueType.INT64: "q",
+ GGUFValueType.UINT64: "Q",
+ GGUFValueType.INT64: "q",
GGUFValueType.FLOAT64: "d",
- GGUFValueType.BOOL: "?",
+ GGUFValueType.BOOL: "?",
}
def __init__(
- self, path: os.PathLike[str] | str | None, arch: str, use_temp_file: bool = False, endianess: GGUFEndian = GGUFEndian.LITTLE,
- split_max_tensors: int = 0, split_max_size: int = 0, dry_run: bool = False, small_first_shard: bool = False
+ self,
+ path: os.PathLike[str] | str | None,
+ arch: str,
+ use_temp_file: bool = False,
+ endianess: GGUFEndian = GGUFEndian.LITTLE,
+ split_max_tensors: int = 0,
+ split_max_size: int = 0,
+ dry_run: bool = False,
+ small_first_shard: bool = False,
):
self.fout = None
self.path = Path(path) if path else None
@@ -97,9 +104,11 @@ def __init__(
self.split_max_size = split_max_size
self.dry_run = dry_run
self.small_first_shard = small_first_shard
- logger.info("gguf: This GGUF file is for {0} Endian only".format(
- "Big" if self.endianess == GGUFEndian.BIG else "Little",
- ))
+ logger.info(
+ "gguf: This GGUF file is for {0} Endian only".format(
+ "Big" if self.endianess == GGUFEndian.BIG else "Little",
+ )
+ )
self.state = WriterState.NO_FILE
if self.small_first_shard:
@@ -128,7 +137,9 @@ def get_total_parameter_count(self) -> tuple[int, int, int, int]:
elif name.endswith(".lora_b"):
if last_lora_a is None or last_lora_a[0] != name[:-1] + "a":
# Bail when the LoRA pair can't be found trivially
- logger.warning("can't measure LoRA size correctly, tensor order is unusual")
+ logger.warning(
+ "can't measure LoRA size correctly, tensor order is unusual"
+ )
return 0, 0, 0, 0
else:
shape = (*shape[:-1], last_lora_a[1].shape[-1])
@@ -136,7 +147,7 @@ def get_total_parameter_count(self) -> tuple[int, int, int, int]:
size = prod(shape)
if "_exps." in name:
- expert_params += (size // shape[-3])
+ expert_params += size // shape[-3]
expert_sum += shape[-3]
n_expert_tensors += 1
else:
@@ -157,15 +168,26 @@ def get_total_parameter_count(self) -> tuple[int, int, int, int]:
def format_shard_names(self, path: Path) -> list[Path]:
if len(self.tensors) == 1:
return [path]
- return [path.with_name(SHARD_NAME_FORMAT.format(path.stem, i + 1, len(self.tensors))) for i in range(len(self.tensors))]
+ return [
+ path.with_name(
+ SHARD_NAME_FORMAT.format(path.stem, i + 1, len(self.tensors))
+ )
+ for i in range(len(self.tensors))
+ ]
def open_output_file(self, path: Path | None = None) -> None:
- if self.state is WriterState.EMPTY and self.fout is not None and (path is None or path == self.path):
+ if (
+ self.state is WriterState.EMPTY
+ and self.fout is not None
+ and (path is None or path == self.path)
+ ):
# allow calling this multiple times as long as the path is the same
return
if self.state is not WriterState.NO_FILE:
- raise ValueError(f'Expected output file to be not yet opened, got {self.state}')
+ raise ValueError(
+ f"Expected output file to be not yet opened, got {self.state}"
+ )
if path is not None:
self.path = path
@@ -181,7 +203,9 @@ def print_plan(self) -> list[Path]:
filenames = self.format_shard_names(self.path)
assert len(filenames) == len(self.tensors)
for name, tensors in zip(filenames, self.tensors):
- logger.info(f"{name}: n_tensors = {len(tensors)}, total_size = {GGUFWriter.format_n_bytes_to_str(sum(ti.nbytes for ti in tensors.values()))}")
+ logger.info(
+ f"{name}: n_tensors = {len(tensors)}, total_size = {GGUFWriter.format_n_bytes_to_str(sum(ti.nbytes for ti in tensors.values()))}"
+ )
if self.dry_run:
logger.info("Dry run, not writing files")
@@ -201,17 +225,23 @@ def add_shard_kv_data(self) -> None:
self.kv_data.extend({} for _ in range(len(self.kv_data), total_splits))
for i, kv_data in enumerate(self.kv_data):
kv_data[Keys.Split.LLM_KV_SPLIT_NO] = GGUFValue(i, GGUFValueType.UINT16)
- kv_data[Keys.Split.LLM_KV_SPLIT_COUNT] = GGUFValue(total_splits, GGUFValueType.UINT16)
- kv_data[Keys.Split.LLM_KV_SPLIT_TENSORS_COUNT] = GGUFValue(total_tensors, GGUFValueType.INT32)
+ kv_data[Keys.Split.LLM_KV_SPLIT_COUNT] = GGUFValue(
+ total_splits, GGUFValueType.UINT16
+ )
+ kv_data[Keys.Split.LLM_KV_SPLIT_TENSORS_COUNT] = GGUFValue(
+ total_tensors, GGUFValueType.INT32
+ )
def write_header_to_file(self, path: Path | None = None) -> None:
- if len(self.tensors) == 1 and (self.split_max_tensors != 0 or self.split_max_size != 0):
+ if len(self.tensors) == 1 and (
+ self.split_max_tensors != 0 or self.split_max_size != 0
+ ):
logger.warning("Model fails split requirements, not splitting")
self.open_output_file(path)
if self.state is not WriterState.EMPTY:
- raise ValueError(f'Expected output file to be empty, got {self.state}')
+ raise ValueError(f"Expected output file to be empty, got {self.state}")
assert self.fout is not None
assert len(self.fout) == len(self.tensors)
@@ -220,7 +250,7 @@ def write_header_to_file(self, path: Path | None = None) -> None:
self.add_shard_kv_data()
for fout, tensors, kv_data in zip(self.fout, self.tensors, self.kv_data):
- fout.write(self._pack(" None:
def write_kv_data_to_file(self) -> None:
if self.state is not WriterState.HEADER:
- raise ValueError(f'Expected output file to contain the header, got {self.state}')
+ raise ValueError(
+ f"Expected output file to contain the header, got {self.state}"
+ )
assert self.fout is not None
for fout, kv_data in zip(self.fout, self.kv_data):
@@ -246,7 +278,9 @@ def write_kv_data_to_file(self) -> None:
def write_ti_data_to_file(self) -> None:
if self.state is not WriterState.KV_DATA:
- raise ValueError(f'Expected output file to contain KV data, got {self.state}')
+ raise ValueError(
+ f"Expected output file to contain KV data, got {self.state}"
+ )
assert self.fout is not None
for fout, tensors in zip(self.fout, self.tensors):
@@ -269,12 +303,12 @@ def write_ti_data_to_file(self) -> None:
def add_key_value(self, key: str, val: Any, vtype: GGUFValueType) -> None:
if any(key in kv_data for kv_data in self.kv_data):
- raise ValueError(f'Duplicated key name {key!r}')
+ raise ValueError(f"Duplicated key name {key!r}")
self.kv_data[0][key] = GGUFValue(value=val, type=vtype)
def add_uint8(self, key: str, val: int) -> None:
- self.add_key_value(key,val, GGUFValueType.UINT8)
+ self.add_key_value(key, val, GGUFValueType.UINT8)
def add_int8(self, key: str, val: int) -> None:
self.add_key_value(key, val, GGUFValueType.INT8)
@@ -321,14 +355,20 @@ def ggml_pad(x: int, n: int) -> int:
return ((x + n - 1) // n) * n
def add_tensor_info(
- self, name: str, tensor_shape: Sequence[int], tensor_dtype: np.dtype,
- tensor_nbytes: int, raw_dtype: GGMLQuantizationType | None = None,
+ self,
+ name: str,
+ tensor_shape: Sequence[int],
+ tensor_dtype: np.dtype,
+ tensor_nbytes: int,
+ raw_dtype: GGMLQuantizationType | None = None,
) -> None:
if self.state is not WriterState.NO_FILE:
- raise ValueError(f'Expected output file to be not yet opened, got {self.state}')
+ raise ValueError(
+ f"Expected output file to be not yet opened, got {self.state}"
+ )
if any(name in tensors for tensors in self.tensors):
- raise ValueError(f'Duplicated tensor name {name!r}')
+ raise ValueError(f"Duplicated tensor name {name!r}")
if raw_dtype is None:
if tensor_dtype == np.float16:
@@ -346,7 +386,9 @@ def add_tensor_info(
elif tensor_dtype == np.int64:
dtype = GGMLQuantizationType.I64
else:
- raise ValueError("Only F16, F32, F64, I8, I16, I32, I64 tensors are supported for now")
+ raise ValueError(
+ "Only F16, F32, F64, I8, I16, I32, I64 tensors are supported for now"
+ )
else:
dtype = raw_dtype
if tensor_dtype == np.uint8:
@@ -357,16 +399,22 @@ def add_tensor_info(
if ( # split when over tensor limit
self.split_max_tensors != 0
and len(self.tensors[-1]) >= self.split_max_tensors
- ) or ( # split when over size limit
+ ) or ( # split when over size limit
self.split_max_size != 0
- and sum(ti.nbytes for ti in self.tensors[-1].values()) + tensor_nbytes > self.split_max_size
+ and sum(ti.nbytes for ti in self.tensors[-1].values()) + tensor_nbytes
+ > self.split_max_size
):
self.tensors.append({})
- self.tensors[-1][name] = TensorInfo(shape=tensor_shape, dtype=dtype, nbytes=tensor_nbytes)
+ self.tensors[-1][name] = TensorInfo(
+ shape=tensor_shape, dtype=dtype, nbytes=tensor_nbytes
+ )
def add_tensor(
- self, name: str, tensor: np.ndarray[Any, Any], raw_shape: Sequence[int] | None = None,
+ self,
+ name: str,
+ tensor: np.ndarray[Any, Any],
+ raw_shape: Sequence[int] | None = None,
raw_dtype: GGMLQuantizationType | None = None,
) -> None:
if self.endianess == GGUFEndian.BIG:
@@ -377,7 +425,9 @@ def add_tensor(
self.temp_file = fp
shape: Sequence[int] = raw_shape if raw_shape is not None else tensor.shape
- self.add_tensor_info(name, shape, tensor.dtype, tensor.nbytes, raw_dtype=raw_dtype)
+ self.add_tensor_info(
+ name, shape, tensor.dtype, tensor.nbytes, raw_dtype=raw_dtype
+ )
if self.temp_file is None:
self.tensors[-1][name].tensor = tensor
@@ -387,13 +437,21 @@ def add_tensor(
self.write_padding(self.temp_file, tensor.nbytes)
def write_padding(self, fp: IO[bytes], n: int, align: int | None = None) -> None:
- pad = GGUFWriter.ggml_pad(n, align if align is not None else self.data_alignment) - n
+ pad = (
+ GGUFWriter.ggml_pad(n, align if align is not None else self.data_alignment)
+ - n
+ )
if pad != 0:
fp.write(bytes([0] * pad))
def write_tensor_data(self, tensor: np.ndarray[Any, Any]) -> None:
- if self.state is not WriterState.TI_DATA and self.state is not WriterState.WEIGHTS:
- raise ValueError(f'Expected output file to contain tensor info or weights, got {self.state}')
+ if (
+ self.state is not WriterState.TI_DATA
+ and self.state is not WriterState.WEIGHTS
+ ):
+ raise ValueError(
+ f"Expected output file to contain tensor info or weights, got {self.state}"
+ )
assert self.fout is not None
if self.endianess == GGUFEndian.BIG:
@@ -409,7 +467,9 @@ def write_tensor_data(self, tensor: np.ndarray[Any, Any]) -> None:
# pop the first tensor info
# TODO: cleaner way to get the first key
- first_tensor_name = [name for name, _ in zip(self.tensors[file_id].keys(), range(1))][0]
+ first_tensor_name = [
+ name for name, _ in zip(self.tensors[file_id].keys(), range(1))
+ ][0]
ti = self.tensors[file_id].pop(first_tensor_name)
assert ti.nbytes == tensor.nbytes
@@ -437,8 +497,15 @@ def write_tensors_to_file(self, *, progress: bool = False) -> None:
total_bytes = sum(ti.nbytes for t in self.tensors for ti in t.values())
if len(self.fout) > 1:
- shard_bar = tqdm(desc=f"Shard (0/{len(self.fout)})", total=None, unit="byte", unit_scale=True)
- bar = tqdm(desc="Writing", total=total_bytes, unit="byte", unit_scale=True)
+ shard_bar = tqdm(
+ desc=f"Shard (0/{len(self.fout)})",
+ total=None,
+ unit="byte",
+ unit_scale=True,
+ )
+ bar = tqdm(
+ desc="Writing", total=total_bytes, unit="byte", unit_scale=True
+ )
for i, (fout, tensors) in enumerate(zip(self.fout, self.tensors)):
if shard_bar is not None:
@@ -448,7 +515,9 @@ def write_tensors_to_file(self, *, progress: bool = False) -> None:
# relying on the fact that Python dicts preserve insertion order (since 3.7)
for ti in tensors.values():
- assert ti.tensor is not None # can only iterate once over the tensors
+ assert (
+ ti.tensor is not None
+ ) # can only iterate once over the tensors
assert ti.tensor.nbytes == ti.nbytes
ti.tensor.tofile(fout)
if shard_bar is not None:
@@ -460,7 +529,9 @@ def write_tensors_to_file(self, *, progress: bool = False) -> None:
else:
self.temp_file.seek(0)
- shutil.copyfileobj(self.temp_file, self.fout[0 if not self.small_first_shard else 1])
+ shutil.copyfileobj(
+ self.temp_file, self.fout[0 if not self.small_first_shard else 1]
+ )
self.flush()
self.temp_file.close()
@@ -566,7 +637,9 @@ def add_base_model_version(self, source_id: int, version: str) -> None:
self.add_string(Keys.General.BASE_MODEL_VERSION.format(id=source_id), version)
def add_base_model_organization(self, source_id: int, organization: str) -> None:
- self.add_string(Keys.General.BASE_MODEL_ORGANIZATION.format(id=source_id), organization)
+ self.add_string(
+ Keys.General.BASE_MODEL_ORGANIZATION.format(id=source_id), organization
+ )
def add_base_model_url(self, source_id: int, url: str) -> None:
self.add_string(Keys.General.BASE_MODEL_URL.format(id=source_id), url)
@@ -605,7 +678,9 @@ def add_block_count(self, length: int) -> None:
self.add_uint32(Keys.LLM.BLOCK_COUNT.format(arch=self.arch), length)
def add_leading_dense_block_count(self, length: int) -> None:
- self.add_uint32(Keys.LLM.LEADING_DENSE_BLOCK_COUNT.format(arch=self.arch), length)
+ self.add_uint32(
+ Keys.LLM.LEADING_DENSE_BLOCK_COUNT.format(arch=self.arch), length
+ )
def add_feed_forward_length(self, length: int | Sequence[int]) -> None:
if isinstance(length, int):
@@ -614,10 +689,14 @@ def add_feed_forward_length(self, length: int | Sequence[int]) -> None:
self.add_array(Keys.LLM.FEED_FORWARD_LENGTH.format(arch=self.arch), length)
def add_expert_feed_forward_length(self, length: int) -> None:
- self.add_uint32(Keys.LLM.EXPERT_FEED_FORWARD_LENGTH.format(arch=self.arch), length)
+ self.add_uint32(
+ Keys.LLM.EXPERT_FEED_FORWARD_LENGTH.format(arch=self.arch), length
+ )
def add_expert_shared_feed_forward_length(self, length: int) -> None:
- self.add_uint32(Keys.LLM.EXPERT_SHARED_FEED_FORWARD_LENGTH.format(arch=self.arch), length)
+ self.add_uint32(
+ Keys.LLM.EXPERT_SHARED_FEED_FORWARD_LENGTH.format(arch=self.arch), length
+ )
def add_parallel_residual(self, use: bool) -> None:
self.add_bool(Keys.LLM.USE_PARALLEL_RESIDUAL.format(arch=self.arch), use)
@@ -736,10 +815,14 @@ def add_tokenizer_model(self, model: str) -> None:
def add_tokenizer_pre(self, pre: str) -> None:
self.add_string(Keys.Tokenizer.PRE, pre)
- def add_token_list(self, tokens: Sequence[str] | Sequence[bytes] | Sequence[bytearray]) -> None:
+ def add_token_list(
+ self, tokens: Sequence[str] | Sequence[bytes] | Sequence[bytearray]
+ ) -> None:
self.add_array(Keys.Tokenizer.LIST, tokens)
- def add_token_merges(self, merges: Sequence[str] | Sequence[bytes] | Sequence[bytearray]) -> None:
+ def add_token_merges(
+ self, merges: Sequence[str] | Sequence[bytes] | Sequence[bytearray]
+ ) -> None:
self.add_array(Keys.Tokenizer.MERGES, merges)
def add_token_types(self, types: Sequence[TokenType] | Sequence[int]) -> None:
@@ -793,18 +876,22 @@ def add_chat_template(self, value: str | Sequence[Mapping[str, str]]) -> None:
template_names = set()
for choice in value:
- name = choice.get('name', '')
- template = choice.get('template')
+ name = choice.get("name", "")
+ template = choice.get("template")
# Allowing non-alphanumerical characters in template name is probably not a good idea, so filter it
- name = ''.join((c if c in ascii_letters + digits else '_' for c in name))
+ name = "".join(
+ (c if c in ascii_letters + digits else "_" for c in name)
+ )
if name and template is not None:
- if name == 'default':
+ if name == "default":
template_default = template
else:
template_names.add(name)
- self.add_string(Keys.Tokenizer.CHAT_TEMPLATE_N.format(name=name), template)
+ self.add_string(
+ Keys.Tokenizer.CHAT_TEMPLATE_N.format(name=name), template
+ )
if template_names:
self.add_array(Keys.Tokenizer.CHAT_TEMPLATES, list(template_names))
@@ -829,10 +916,10 @@ def add_eot_token_id(self, id: int) -> None:
self.add_uint32(Keys.Tokenizer.EOT_ID, id)
def _pack(self, fmt: str, value: Any, skip_pack_prefix: bool = False) -> bytes:
- pack_prefix = ''
+ pack_prefix = ""
if not skip_pack_prefix:
- pack_prefix = '<' if self.endianess == GGUFEndian.LITTLE else '>'
- return struct.pack(f'{pack_prefix}{fmt}', value)
+ pack_prefix = "<" if self.endianess == GGUFEndian.LITTLE else ">"
+ return struct.pack(f"{pack_prefix}{fmt}", value)
def _pack_val(self, val: Any, vtype: GGUFValueType, add_vtype: bool) -> bytes:
kv_data = bytearray()
@@ -842,7 +929,9 @@ def _pack_val(self, val: Any, vtype: GGUFValueType, add_vtype: bool) -> bytes:
pack_fmt = self._simple_value_packing.get(vtype)
if pack_fmt is not None:
- kv_data += self._pack(pack_fmt, val, skip_pack_prefix = vtype == GGUFValueType.BOOL)
+ kv_data += self._pack(
+ pack_fmt, val, skip_pack_prefix=vtype == GGUFValueType.BOOL
+ )
elif vtype == GGUFValueType.STRING:
encoded_val = val.encode("utf-8") if isinstance(val, str) else val
kv_data += self._pack("Q", len(encoded_val))
@@ -860,7 +949,9 @@ def _pack_val(self, val: Any, vtype: GGUFValueType, add_vtype: bool) -> bytes:
else:
ltype = GGUFValueType.get_type(val[0])
if not all(GGUFValueType.get_type(i) is ltype for i in val[1:]):
- raise ValueError("All items in a GGUF array should be of the same type")
+ raise ValueError(
+ "All items in a GGUF array should be of the same type"
+ )
kv_data += self._pack("I", ltype)
kv_data += self._pack("Q", len(val))
for item in val:
diff --git a/src/gguf-py/gguf/lazy.py b/src/gguf-py/gguf/lazy.py
index ac98d9a..8746ad6 100644
--- a/src/gguf-py/gguf/lazy.py
+++ b/src/gguf-py/gguf/lazy.py
@@ -13,7 +13,9 @@
class LazyMeta(ABCMeta):
- def __new__(cls, name: str, bases: tuple[type, ...], namespace: dict[str, Any], **kwargs):
+ def __new__(
+ cls, name: str, bases: tuple[type, ...], namespace: dict[str, Any], **kwargs
+ ):
def __getattr__(self, name: str) -> Any:
meta_attr = getattr(self._meta, name)
if callable(meta_attr):
@@ -41,6 +43,7 @@ def wrapped_special_op(self, *args, **kwargs):
getattr(type(self)._tensor_type, op_name),
meta_noop=meta_noop,
)(self, *args, **kwargs)
+
return wrapped_special_op
# special methods bypass __getattr__, so they need to be added manually
@@ -48,11 +51,48 @@ def wrapped_special_op(self, *args, **kwargs):
# NOTE: doing this from a metaclass is very convenient
# TODO: make this even more comprehensive
for binary_op in (
- "lt", "le", "eq", "ne", "ge", "gt", "not"
- "abs", "add", "and", "floordiv", "invert", "lshift", "mod", "mul", "matmul",
- "neg", "or", "pos", "pow", "rshift", "sub", "truediv", "xor",
- "iadd", "iand", "ifloordiv", "ilshift", "imod", "imul", "ior", "irshift", "isub", "ixor",
- "radd", "rand", "rfloordiv", "rmul", "ror", "rpow", "rsub", "rtruediv", "rxor",
+ "lt",
+ "le",
+ "eq",
+ "ne",
+ "ge",
+ "gt",
+ "not" "abs",
+ "add",
+ "and",
+ "floordiv",
+ "invert",
+ "lshift",
+ "mod",
+ "mul",
+ "matmul",
+ "neg",
+ "or",
+ "pos",
+ "pow",
+ "rshift",
+ "sub",
+ "truediv",
+ "xor",
+ "iadd",
+ "iand",
+ "ifloordiv",
+ "ilshift",
+ "imod",
+ "imul",
+ "ior",
+ "irshift",
+ "isub",
+ "ixor",
+ "radd",
+ "rand",
+ "rfloordiv",
+ "rmul",
+ "ror",
+ "rpow",
+ "rsub",
+ "rtruediv",
+ "rxor",
):
attr_name = f"__{binary_op}__"
# the result of these operators usually has the same shape and dtype as the input,
@@ -60,7 +100,9 @@ def wrapped_special_op(self, *args, **kwargs):
namespace[attr_name] = mk_wrap(attr_name, meta_noop=True)
for special_op in (
- "getitem", "setitem", "len",
+ "getitem",
+ "setitem",
+ "len",
):
attr_name = f"__{special_op}__"
namespace[attr_name] = mk_wrap(attr_name, meta_noop=False)
@@ -77,7 +119,15 @@ class LazyBase(ABC, metaclass=LazyMeta):
_kwargs: dict[str, Any]
_func: Callable[[Any], Any] | None
- def __init__(self, *, meta: Any, data: Any | None = None, args: tuple = (), kwargs: dict[str, Any] | None = None, func: Callable[[Any], Any] | None = None):
+ def __init__(
+ self,
+ *,
+ meta: Any,
+ data: Any | None = None,
+ args: tuple = (),
+ kwargs: dict[str, Any] | None = None,
+ func: Callable[[Any], Any] | None = None,
+ ):
super().__init__()
self._meta = meta
self._data = data
@@ -107,7 +157,17 @@ def _recurse_apply(o: Any, fn: Callable[[Any], Any]) -> Any:
return o
@classmethod
- def _wrap_fn(cls, fn: Callable, *, use_self: LazyBase | None = None, meta_noop: bool | DTypeLike | tuple[DTypeLike, Callable[[tuple[int, ...]], tuple[int, ...]]] = False) -> Callable[[Any], Any]:
+ def _wrap_fn(
+ cls,
+ fn: Callable,
+ *,
+ use_self: LazyBase | None = None,
+ meta_noop: (
+ bool
+ | DTypeLike
+ | tuple[DTypeLike, Callable[[tuple[int, ...]], tuple[int, ...]]]
+ ) = False,
+ ) -> Callable[[Any], Any]:
def wrapped_fn(*args, **kwargs):
if kwargs is None:
kwargs = {}
@@ -138,13 +198,16 @@ def wrapped_fn(*args, **kwargs):
res = cls.meta_with_dtype_and_shape(meta_noop, res.shape)
if isinstance(res, cls._tensor_type):
- return cls(meta=cls.eager_to_meta(res), args=args, kwargs=kwargs, func=fn)
+ return cls(
+ meta=cls.eager_to_meta(res), args=args, kwargs=kwargs, func=fn
+ )
else:
del res # not needed
# non-tensor return likely relies on the contents of the args
# (e.g. the result of torch.equal)
eager_args = cls.to_eager(args)
return fn(*eager_args, **kwargs)
+
return wrapped_fn
@classmethod
@@ -175,7 +238,8 @@ def eager_to_meta(cls, t: Any) -> Any:
# must be overridden, meta tensor init is backend-specific
@classmethod
@abstractmethod
- def meta_with_dtype_and_shape(cls, dtype: Any, shape: Any) -> Any: pass
+ def meta_with_dtype_and_shape(cls, dtype: Any, shape: Any) -> Any:
+ pass
@classmethod
def from_eager(cls, t: Any) -> Any:
@@ -192,7 +256,9 @@ class LazyNumpyTensor(LazyBase):
_tensor_type = np.ndarray
@classmethod
- def meta_with_dtype_and_shape(cls, dtype: DTypeLike, shape: tuple[int, ...]) -> np.ndarray[Any, Any]:
+ def meta_with_dtype_and_shape(
+ cls, dtype: DTypeLike, shape: tuple[int, ...]
+ ) -> np.ndarray[Any, Any]:
# The initial idea was to use np.nan as the fill value,
# but non-float types like np.int16 can't use that.
# So zero it is.
@@ -201,8 +267,16 @@ def meta_with_dtype_and_shape(cls, dtype: DTypeLike, shape: tuple[int, ...]) ->
def astype(self, dtype, *args, **kwargs):
meta = type(self).meta_with_dtype_and_shape(dtype, self._meta.shape)
- full_args = (self, dtype,) + args
- return type(self)(meta=meta, args=full_args, kwargs=kwargs, func=(lambda a, *args, **kwargs: a.astype(*args, **kwargs)))
+ full_args = (
+ self,
+ dtype,
+ ) + args
+ return type(self)(
+ meta=meta,
+ args=full_args,
+ kwargs=kwargs,
+ func=(lambda a, *args, **kwargs: a.astype(*args, **kwargs)),
+ )
def tofile(self, *args, **kwargs):
eager = LazyNumpyTensor.to_eager(self)
diff --git a/src/gguf-py/gguf/metadata.py b/src/gguf-py/gguf/metadata.py
index 15189f7..18f3704 100644
--- a/src/gguf-py/gguf/metadata.py
+++ b/src/gguf-py/gguf/metadata.py
@@ -44,7 +44,12 @@ class Metadata:
datasets: Optional[list[str]] = None
@staticmethod
- def load(metadata_override_path: Optional[Path] = None, model_path: Optional[Path] = None, model_name: Optional[str] = None, total_params: int = 0) -> Metadata:
+ def load(
+ metadata_override_path: Optional[Path] = None,
+ model_path: Optional[Path] = None,
+ model_name: Optional[str] = None,
+ total_params: int = 0,
+ ) -> Metadata:
# This grabs as many contextual authorship metadata as possible from the model repository
# making any conversion as required to match the gguf kv store metadata format
# as well as giving users the ability to override any authorship metadata that may be incorrect
@@ -57,43 +62,77 @@ def load(metadata_override_path: Optional[Path] = None, model_path: Optional[Pat
# TODO: load adapter_config.json when possible, it usually contains the base model of the LoRA adapter
# heuristics
- metadata = Metadata.apply_metadata_heuristic(metadata, model_card, hf_params, model_path, total_params)
+ metadata = Metadata.apply_metadata_heuristic(
+ metadata, model_card, hf_params, model_path, total_params
+ )
# Metadata Override File Provided
# This is based on LLM_KV_NAMES mapping in llama.cpp
metadata_override = Metadata.load_metadata_override(metadata_override_path)
- metadata.name = metadata_override.get(Keys.General.NAME, metadata.name)
- metadata.author = metadata_override.get(Keys.General.AUTHOR, metadata.author)
- metadata.version = metadata_override.get(Keys.General.VERSION, metadata.version)
- metadata.organization = metadata_override.get(Keys.General.ORGANIZATION, metadata.organization)
-
- metadata.finetune = metadata_override.get(Keys.General.FINETUNE, metadata.finetune)
- metadata.basename = metadata_override.get(Keys.General.BASENAME, metadata.basename)
-
- metadata.description = metadata_override.get(Keys.General.DESCRIPTION, metadata.description)
- metadata.quantized_by = metadata_override.get(Keys.General.QUANTIZED_BY, metadata.quantized_by)
-
- metadata.size_label = metadata_override.get(Keys.General.SIZE_LABEL, metadata.size_label)
- metadata.license_name = metadata_override.get(Keys.General.LICENSE_NAME, metadata.license_name)
- metadata.license_link = metadata_override.get(Keys.General.LICENSE_LINK, metadata.license_link)
-
- metadata.url = metadata_override.get(Keys.General.URL, metadata.url)
- metadata.doi = metadata_override.get(Keys.General.DOI, metadata.doi)
- metadata.uuid = metadata_override.get(Keys.General.UUID, metadata.uuid)
- metadata.repo_url = metadata_override.get(Keys.General.REPO_URL, metadata.repo_url)
-
- metadata.source_url = metadata_override.get(Keys.General.SOURCE_URL, metadata.source_url)
- metadata.source_doi = metadata_override.get(Keys.General.SOURCE_DOI, metadata.source_doi)
- metadata.source_uuid = metadata_override.get(Keys.General.SOURCE_UUID, metadata.source_uuid)
- metadata.source_repo_url = metadata_override.get(Keys.General.SOURCE_REPO_URL, metadata.source_repo_url)
+ metadata.name = metadata_override.get(Keys.General.NAME, metadata.name)
+ metadata.author = metadata_override.get(Keys.General.AUTHOR, metadata.author)
+ metadata.version = metadata_override.get(Keys.General.VERSION, metadata.version)
+ metadata.organization = metadata_override.get(
+ Keys.General.ORGANIZATION, metadata.organization
+ )
+
+ metadata.finetune = metadata_override.get(
+ Keys.General.FINETUNE, metadata.finetune
+ )
+ metadata.basename = metadata_override.get(
+ Keys.General.BASENAME, metadata.basename
+ )
+
+ metadata.description = metadata_override.get(
+ Keys.General.DESCRIPTION, metadata.description
+ )
+ metadata.quantized_by = metadata_override.get(
+ Keys.General.QUANTIZED_BY, metadata.quantized_by
+ )
+
+ metadata.size_label = metadata_override.get(
+ Keys.General.SIZE_LABEL, metadata.size_label
+ )
+ metadata.license_name = metadata_override.get(
+ Keys.General.LICENSE_NAME, metadata.license_name
+ )
+ metadata.license_link = metadata_override.get(
+ Keys.General.LICENSE_LINK, metadata.license_link
+ )
+
+ metadata.url = metadata_override.get(Keys.General.URL, metadata.url)
+ metadata.doi = metadata_override.get(Keys.General.DOI, metadata.doi)
+ metadata.uuid = metadata_override.get(Keys.General.UUID, metadata.uuid)
+ metadata.repo_url = metadata_override.get(
+ Keys.General.REPO_URL, metadata.repo_url
+ )
+
+ metadata.source_url = metadata_override.get(
+ Keys.General.SOURCE_URL, metadata.source_url
+ )
+ metadata.source_doi = metadata_override.get(
+ Keys.General.SOURCE_DOI, metadata.source_doi
+ )
+ metadata.source_uuid = metadata_override.get(
+ Keys.General.SOURCE_UUID, metadata.source_uuid
+ )
+ metadata.source_repo_url = metadata_override.get(
+ Keys.General.SOURCE_REPO_URL, metadata.source_repo_url
+ )
# Base Models is received here as an array of models
- metadata.base_models = metadata_override.get("general.base_models", metadata.base_models)
-
- metadata.tags = metadata_override.get(Keys.General.TAGS, metadata.tags)
- metadata.languages = metadata_override.get(Keys.General.LANGUAGES, metadata.languages)
- metadata.datasets = metadata_override.get(Keys.General.DATASETS, metadata.datasets)
+ metadata.base_models = metadata_override.get(
+ "general.base_models", metadata.base_models
+ )
+
+ metadata.tags = metadata_override.get(Keys.General.TAGS, metadata.tags)
+ metadata.languages = metadata_override.get(
+ Keys.General.LANGUAGES, metadata.languages
+ )
+ metadata.datasets = metadata_override.get(
+ Keys.General.DATASETS, metadata.datasets
+ )
# Direct Metadata Override (via direct cli argument)
if model_name is not None:
@@ -102,7 +141,9 @@ def load(metadata_override_path: Optional[Path] = None, model_path: Optional[Pat
return metadata
@staticmethod
- def load_metadata_override(metadata_override_path: Optional[Path] = None) -> dict[str, Any]:
+ def load_metadata_override(
+ metadata_override_path: Optional[Path] = None,
+ ) -> dict[str, Any]:
if metadata_override_path is None or not metadata_override_path.is_file():
return {}
@@ -128,7 +169,9 @@ def load_model_card(model_path: Optional[Path] = None) -> dict[str, Any]:
if isinstance(data, dict):
return data
else:
- logger.error(f"while reading YAML model card frontmatter, data is {type(data)} instead of dict")
+ logger.error(
+ f"while reading YAML model card frontmatter, data is {type(data)} instead of dict"
+ )
return {}
else:
return {}
@@ -149,10 +192,21 @@ def load_hf_parameters(model_path: Optional[Path] = None) -> dict[str, Any]:
@staticmethod
def id_to_title(string):
# Convert capitalization into title form unless acronym or version number
- return ' '.join([w.title() if w.islower() and not re.match(r'^(v\d+(?:\.\d+)*|\d.*)$', w) else w for w in string.strip().replace('-', ' ').split()])
+ return " ".join(
+ [
+ (
+ w.title()
+ if w.islower() and not re.match(r"^(v\d+(?:\.\d+)*|\d.*)$", w)
+ else w
+ )
+ for w in string.strip().replace("-", " ").split()
+ ]
+ )
@staticmethod
- def get_model_id_components(model_id: Optional[str] = None, total_params: int = 0) -> tuple[str | None, str | None, str | None, str | None, str | None, str | None]:
+ def get_model_id_components(
+ model_id: Optional[str] = None, total_params: int = 0
+ ) -> tuple[str | None, str | None, str | None, str | None, str | None, str | None]:
# Huggingface often store model id as '/'
# so let's parse it and apply some heuristics if possible for model name components
@@ -160,24 +214,24 @@ def get_model_id_components(model_id: Optional[str] = None, total_params: int =
# model ID missing
return None, None, None, None, None, None
- if ' ' in model_id:
+ if " " in model_id:
# model ID is actually a normal human sentence
# which means its most likely a normal model name only
# not part of the hugging face naming standard, but whatever
return model_id, None, None, None, None, None
- if '/' in model_id:
+ if "/" in model_id:
# model ID (huggingface style)
- org_component, model_full_name_component = model_id.split('/', 1)
+ org_component, model_full_name_component = model_id.split("/", 1)
else:
# model ID but missing org components
org_component, model_full_name_component = None, model_id
# Check if we erroneously matched against './' or '../' etc...
- if org_component is not None and org_component[0] == '.':
+ if org_component is not None and org_component[0] == ".":
org_component = None
- name_parts: list[str] = model_full_name_component.split('-')
+ name_parts: list[str] = model_full_name_component.split("-")
# Remove empty parts
for i in reversed(range(len(name_parts))):
@@ -191,14 +245,18 @@ def get_model_id_components(model_id: Optional[str] = None, total_params: int =
# Annotate the name
for i, part in enumerate(name_parts):
# Version
- if re.fullmatch(r'(v|iter)?\d+([.]\d+)*', part, re.IGNORECASE):
+ if re.fullmatch(r"(v|iter)?\d+([.]\d+)*", part, re.IGNORECASE):
name_types[i].add("version")
# Quant type (should not be there for base models, but still annotated)
- elif re.fullmatch(r'i?q\d(_\w)*|b?fp?(16|32)', part, re.IGNORECASE):
+ elif re.fullmatch(r"i?q\d(_\w)*|b?fp?(16|32)", part, re.IGNORECASE):
name_types[i].add("type")
name_parts[i] = part.upper()
# Model size
- elif i > 0 and re.fullmatch(r'(([A]|\d+[x])?\d+([._]\d+)?[KMBT][\d]?|small|mini|medium|large|x?xl)', part, re.IGNORECASE):
+ elif i > 0 and re.fullmatch(
+ r"(([A]|\d+[x])?\d+([._]\d+)?[KMBT][\d]?|small|mini|medium|large|x?xl)",
+ part,
+ re.IGNORECASE,
+ ):
part = part.replace("_", ".")
# Handle weird bloom-7b1 notation
if part[-1].isdecimal():
@@ -209,14 +267,19 @@ def get_model_id_components(model_id: Optional[str] = None, total_params: int =
part = part[:-1] + part[-1].upper()
if total_params != 0:
try:
- label_params = float(part[:-1]) * pow(1000, " KMBT".find(part[-1]))
+ label_params = float(part[:-1]) * pow(
+ 1000, " KMBT".find(part[-1])
+ )
# Only use it as a size label if it's close or bigger than the model size
# Note that LoRA adapters don't necessarily include all layers,
# so this is why bigger label sizes are accepted.
# Do not use the size label when it's smaller than 1/8 of the model size
- if (total_params < 0 and label_params < abs(total_params) // 8) or (
+ if (
+ total_params < 0 and label_params < abs(total_params) // 8
+ ) or (
# Check both directions when the current model isn't a LoRA adapter
- total_params > 0 and abs(label_params - total_params) > 7 * total_params // 8
+ total_params > 0
+ and abs(label_params - total_params) > 7 * total_params // 8
):
# Likely a context length
name_types[i].add("finetune")
@@ -229,7 +292,9 @@ def get_model_id_components(model_id: Optional[str] = None, total_params: int =
name_types[i].add("size_label")
name_parts[i] = part
# Some easy to recognize finetune names
- elif i > 0 and re.fullmatch(r'chat|instruct|vision|lora', part, re.IGNORECASE):
+ elif i > 0 and re.fullmatch(
+ r"chat|instruct|vision|lora", part, re.IGNORECASE
+ ):
if total_params < 0 and part.lower() == "lora":
# ignore redundant "lora" in the finetune part when the output is a lora adapter
name_types[i].add("type")
@@ -238,7 +303,12 @@ def get_model_id_components(model_id: Optional[str] = None, total_params: int =
# Ignore word-based size labels when there is at least a number-based one present
# TODO: should word-based size labels always be removed instead?
- if any(c.isdecimal() for n, t in zip(name_parts, name_types) if "size_label" in t for c in n):
+ if any(
+ c.isdecimal()
+ for n, t in zip(name_parts, name_types)
+ if "size_label" in t
+ for c in n
+ ):
for n, t in zip(name_parts, name_types):
if "size_label" in t:
if all(c.isalpha() for c in n):
@@ -262,22 +332,55 @@ def get_model_id_components(model_id: Optional[str] = None, total_params: int =
else:
break
- basename = "-".join(n for n, t in zip(name_parts, name_types) if "basename" in t) or None
+ basename = (
+ "-".join(n for n, t in zip(name_parts, name_types) if "basename" in t)
+ or None
+ )
# Deduplicate size labels using order-preserving 'dict' ('set' seems to sort the keys)
- size_label = "-".join(dict.fromkeys(s for s, t in zip(name_parts, name_types) if "size_label" in t).keys()) or None
- finetune = "-".join(f for f, t in zip(name_parts, name_types) if "finetune" in t) or None
+ size_label = (
+ "-".join(
+ dict.fromkeys(
+ s for s, t in zip(name_parts, name_types) if "size_label" in t
+ ).keys()
+ )
+ or None
+ )
+ finetune = (
+ "-".join(f for f, t in zip(name_parts, name_types) if "finetune" in t)
+ or None
+ )
# TODO: should the basename version always be excluded?
# NOTE: multiple finetune versions are joined together
- version = "-".join(v for v, t, in zip(name_parts, name_types) if "version" in t and "basename" not in t) or None
+ version = (
+ "-".join(
+ v
+ for v, t, in zip(name_parts, name_types)
+ if "version" in t and "basename" not in t
+ )
+ or None
+ )
if size_label is None and finetune is None and version is None:
# Too ambiguous, output nothing
basename = None
- return model_full_name_component, org_component, basename, finetune, version, size_label
+ return (
+ model_full_name_component,
+ org_component,
+ basename,
+ finetune,
+ version,
+ size_label,
+ )
@staticmethod
- def apply_metadata_heuristic(metadata: Metadata, model_card: Optional[dict] = None, hf_params: Optional[dict] = None, model_path: Optional[Path] = None, total_params: int = 0) -> Metadata:
+ def apply_metadata_heuristic(
+ metadata: Metadata,
+ model_card: Optional[dict] = None,
+ hf_params: Optional[dict] = None,
+ model_path: Optional[Path] = None,
+ total_params: int = 0,
+ ) -> Metadata:
# Reference Model Card Metadata: https://github.com/huggingface/hub-docs/blob/main/modelcard.md?plain=1
# Model Card Heuristics
@@ -317,16 +420,30 @@ def apply_metadata_heuristic(metadata: Metadata, model_card: Optional[dict] = No
for model_id in metadata_base_models:
# NOTE: model size of base model is assumed to be similar to the size of the current model
- model_full_name_component, org_component, basename, finetune, version, size_label = Metadata.get_model_id_components(model_id, total_params)
+ (
+ model_full_name_component,
+ org_component,
+ basename,
+ finetune,
+ version,
+ size_label,
+ ) = Metadata.get_model_id_components(model_id, total_params)
base_model = {}
if model_full_name_component is not None:
- base_model["name"] = Metadata.id_to_title(model_full_name_component)
+ base_model["name"] = Metadata.id_to_title(
+ model_full_name_component
+ )
if org_component is not None:
base_model["organization"] = Metadata.id_to_title(org_component)
if version is not None:
base_model["version"] = version
- if org_component is not None and model_full_name_component is not None:
- base_model["repo_url"] = f"https://huggingface.co/{org_component}/{model_full_name_component}"
+ if (
+ org_component is not None
+ and model_full_name_component is not None
+ ):
+ base_model["repo_url"] = (
+ f"https://huggingface.co/{org_component}/{model_full_name_component}"
+ )
metadata.base_models.append(base_model)
if "license" in model_card and metadata.license is None:
@@ -360,7 +477,9 @@ def apply_metadata_heuristic(metadata: Metadata, model_card: Optional[dict] = No
elif isinstance(pipeline_tags_value, list):
metadata.tags.extend(pipeline_tags_value)
- language_value = model_card.get("languages", model_card.get("language", None))
+ language_value = model_card.get(
+ "languages", model_card.get("language", None)
+ )
if language_value is not None:
if metadata.languages is None:
@@ -388,11 +507,18 @@ def apply_metadata_heuristic(metadata: Metadata, model_card: Optional[dict] = No
if hf_params is not None:
hf_name_or_path = hf_params.get("_name_or_path")
- if hf_name_or_path is not None and hf_name_or_path.count('/') <= 1:
+ if hf_name_or_path is not None and hf_name_or_path.count("/") <= 1:
# Use _name_or_path only if its actually a model name and not some computer path
# e.g. 'meta-llama/Llama-2-7b-hf'
model_id = hf_name_or_path
- model_full_name_component, org_component, basename, finetune, version, size_label = Metadata.get_model_id_components(model_id, total_params)
+ (
+ model_full_name_component,
+ org_component,
+ basename,
+ finetune,
+ version,
+ size_label,
+ ) = Metadata.get_model_id_components(model_id, total_params)
if metadata.name is None and model_full_name_component is not None:
metadata.name = Metadata.id_to_title(model_full_name_component)
if metadata.organization is None and org_component is not None:
@@ -410,7 +536,14 @@ def apply_metadata_heuristic(metadata: Metadata, model_card: Optional[dict] = No
############################################
if model_path is not None:
model_id = model_path.name
- model_full_name_component, org_component, basename, finetune, version, size_label = Metadata.get_model_id_components(model_id, total_params)
+ (
+ model_full_name_component,
+ org_component,
+ basename,
+ finetune,
+ version,
+ size_label,
+ ) = Metadata.get_model_id_components(model_id, total_params)
if metadata.name is None and model_full_name_component is not None:
metadata.name = Metadata.id_to_title(model_full_name_component)
if metadata.organization is None and org_component is not None:
@@ -485,7 +618,9 @@ def set_gguf_meta_model(self, gguf_writer: gguf.GGUFWriter):
if "version" in base_model_entry:
gguf_writer.add_base_model_version(key, base_model_entry["version"])
if "organization" in base_model_entry:
- gguf_writer.add_base_model_organization(key, base_model_entry["organization"])
+ gguf_writer.add_base_model_organization(
+ key, base_model_entry["organization"]
+ )
if "url" in base_model_entry:
gguf_writer.add_base_model_url(key, base_model_entry["url"])
if "doi" in base_model_entry:
@@ -493,7 +628,9 @@ def set_gguf_meta_model(self, gguf_writer: gguf.GGUFWriter):
if "uuid" in base_model_entry:
gguf_writer.add_base_model_uuid(key, base_model_entry["uuid"])
if "repo_url" in base_model_entry:
- gguf_writer.add_base_model_repo_url(key, base_model_entry["repo_url"])
+ gguf_writer.add_base_model_repo_url(
+ key, base_model_entry["repo_url"]
+ )
if self.tags is not None:
gguf_writer.add_tags(self.tags)
diff --git a/src/gguf-py/gguf/quants.py b/src/gguf-py/gguf/quants.py
index f4361d7..a5d8b44 100644
--- a/src/gguf-py/gguf/quants.py
+++ b/src/gguf-py/gguf/quants.py
@@ -12,14 +12,18 @@
def quant_shape_to_byte_shape(shape: Sequence[int], quant_type: GGMLQuantizationType):
block_size, type_size = GGML_QUANT_SIZES[quant_type]
if shape[-1] % block_size != 0:
- raise ValueError(f"Quantized tensor row size ({shape[-1]}) is not a multiple of {quant_type.name} block size ({block_size})")
+ raise ValueError(
+ f"Quantized tensor row size ({shape[-1]}) is not a multiple of {quant_type.name} block size ({block_size})"
+ )
return (*shape[:-1], shape[-1] // block_size * type_size)
def quant_shape_from_byte_shape(shape: Sequence[int], quant_type: GGMLQuantizationType):
block_size, type_size = GGML_QUANT_SIZES[quant_type]
if shape[-1] % type_size != 0:
- raise ValueError(f"Quantized tensor bytes per row ({shape[-1]}) is not a multiple of {quant_type.name} type size ({type_size})")
+ raise ValueError(
+ f"Quantized tensor bytes per row ({shape[-1]}) is not a multiple of {quant_type.name} type size ({type_size})"
+ )
return (*shape[:-1], shape[-1] // type_size * block_size)
@@ -27,14 +31,23 @@ def quant_shape_from_byte_shape(shape: Sequence[int], quant_type: GGMLQuantizati
def __compute_fp32_to_bf16(n: np.ndarray) -> np.ndarray:
n = n.astype(np.float32, copy=False).view(np.uint32)
# force nan to quiet
- n = np.where((n & 0x7fffffff) > 0x7f800000, (n & np.uint32(0xffff0000)) | np.uint32(64 << 16), n)
+ n = np.where(
+ (n & 0x7FFFFFFF) > 0x7F800000,
+ (n & np.uint32(0xFFFF0000)) | np.uint32(64 << 16),
+ n,
+ )
# round to nearest even
- n = (np.uint64(n) + (0x7fff + ((n >> 16) & 1))) >> 16
+ n = (np.uint64(n) + (0x7FFF + ((n >> 16) & 1))) >> 16
return n.astype(np.uint16)
# This is faster than np.vectorize and np.apply_along_axis because it works on more than one row at a time
-def __apply_over_grouped_rows(func: Callable[[np.ndarray], np.ndarray], arr: np.ndarray, otype: DTypeLike, oshape: tuple[int, ...]) -> np.ndarray:
+def __apply_over_grouped_rows(
+ func: Callable[[np.ndarray], np.ndarray],
+ arr: np.ndarray,
+ otype: DTypeLike,
+ oshape: tuple[int, ...],
+) -> np.ndarray:
rows = arr.reshape((-1, arr.shape[-1]))
osize = 1
for dim in oshape:
@@ -42,15 +55,23 @@ def __apply_over_grouped_rows(func: Callable[[np.ndarray], np.ndarray], arr: np.
out = np.empty(shape=osize, dtype=otype)
# compute over groups of 16 rows (arbitrary, but seems good for performance)
n_groups = (rows.shape[0] // 16) or 1
- np.concatenate([func(group).ravel() for group in np.array_split(rows, n_groups)], axis=0, out=out)
+ np.concatenate(
+ [func(group).ravel() for group in np.array_split(rows, n_groups)],
+ axis=0,
+ out=out,
+ )
return out.reshape(oshape)
def __quantize_bf16_array(n: np.ndarray) -> np.ndarray:
- return __apply_over_grouped_rows(__compute_fp32_to_bf16, arr=n, otype=np.uint16, oshape=n.shape)
+ return __apply_over_grouped_rows(
+ __compute_fp32_to_bf16, arr=n, otype=np.uint16, oshape=n.shape
+ )
-__quantize_bf16_lazy = LazyNumpyTensor._wrap_fn(__quantize_bf16_array, meta_noop=np.uint16)
+__quantize_bf16_lazy = LazyNumpyTensor._wrap_fn(
+ __quantize_bf16_array, meta_noop=np.uint16
+)
def quantize_bf16(n: np.ndarray):
@@ -105,7 +126,12 @@ def __quantize_q8_0_rows(n: np.ndarray) -> np.ndarray:
def __quantize_q8_0_array(n: np.ndarray) -> np.ndarray:
- return __apply_over_grouped_rows(__quantize_q8_0_rows, arr=n, otype=np.uint8, oshape=__quantize_q8_0_shape_change(n.shape))
+ return __apply_over_grouped_rows(
+ __quantize_q8_0_rows,
+ arr=n,
+ otype=np.uint8,
+ oshape=__quantize_q8_0_shape_change(n.shape),
+ )
__quantize_q8_0_lazy = LazyNumpyTensor._wrap_fn(
diff --git a/src/gguf-py/gguf/tensor_mapping.py b/src/gguf-py/gguf/tensor_mapping.py
index 9aa2209..5389d5f 100644
--- a/src/gguf-py/gguf/tensor_mapping.py
+++ b/src/gguf-py/gguf/tensor_mapping.py
@@ -9,74 +9,68 @@ class TensorNameMap:
mappings_cfg: dict[MODEL_TENSOR, tuple[str, ...]] = {
# Token embeddings
MODEL_TENSOR.TOKEN_EMBD: (
- "gpt_neox.embed_in", # gptneox
- "transformer.wte", # gpt2 gpt-j mpt refact qwen dbrx jais
- "transformer.word_embeddings", # falcon
- "word_embeddings", # bloom
- "model.embed_tokens", # llama-hf
- "tok_embeddings", # llama-pth
- "embeddings.word_embeddings", # bert nomic-bert
+ "gpt_neox.embed_in", # gptneox
+ "transformer.wte", # gpt2 gpt-j mpt refact qwen dbrx jais
+ "transformer.word_embeddings", # falcon
+ "word_embeddings", # bloom
+ "model.embed_tokens", # llama-hf
+ "tok_embeddings", # llama-pth
+ "embeddings.word_embeddings", # bert nomic-bert
"language_model.embedding.word_embeddings", # persimmon
- "wte", # gpt2
- "transformer.embd.wte", # phi2
- "model.tok_embeddings", # internlm2
- "model.embedding", # mamba-qbert
- "backbone.embedding", # mamba
- "backbone.embeddings", # mamba-hf
- "transformer.in_out_embed", # Grok
- "embedding.word_embeddings", # chatglm
- "transformer.token_embeddings", # openelm
- "shared", # t5
+ "wte", # gpt2
+ "transformer.embd.wte", # phi2
+ "model.tok_embeddings", # internlm2
+ "model.embedding", # mamba-qbert
+ "backbone.embedding", # mamba
+ "backbone.embeddings", # mamba-hf
+ "transformer.in_out_embed", # Grok
+ "embedding.word_embeddings", # chatglm
+ "transformer.token_embeddings", # openelm
+ "shared", # t5
),
-
# Token type embeddings
MODEL_TENSOR.TOKEN_TYPES: (
"embeddings.token_type_embeddings", # bert nomic-bert
),
-
# Normalization of token embeddings
MODEL_TENSOR.TOKEN_EMBD_NORM: (
"word_embeddings_layernorm", # bloom
- "embeddings.LayerNorm", # bert
- "emb_ln", # nomic-bert
- "transformer.norm", # openelm
+ "embeddings.LayerNorm", # bert
+ "emb_ln", # nomic-bert
+ "transformer.norm", # openelm
),
-
# Position embeddings
MODEL_TENSOR.POS_EMBD: (
- "transformer.wpe", # gpt2
+ "transformer.wpe", # gpt2
"embeddings.position_embeddings", # bert
- "wpe", # gpt2
+ "wpe", # gpt2
),
-
# Output
MODEL_TENSOR.OUTPUT: (
- "embed_out", # gptneox
- "lm_head", # gpt2 mpt falcon llama-hf baichuan qwen mamba dbrx jais
- "output", # llama-pth bloom internlm2
+ "embed_out", # gptneox
+ "lm_head", # gpt2 mpt falcon llama-hf baichuan qwen mamba dbrx jais
+ "output", # llama-pth bloom internlm2
"word_embeddings_for_head", # persimmon
- "lm_head.linear", # phi2
- "output_layer", # chatglm
+ "lm_head.linear", # phi2
+ "output_layer", # chatglm
),
-
# Output norm
MODEL_TENSOR.OUTPUT_NORM: (
- "gpt_neox.final_layer_norm", # gptneox
- "transformer.ln_f", # gpt2 gpt-j falcon jais
- "model.norm", # llama-hf baichuan internlm2
- "norm", # llama-pth
- "transformer.norm_f", # mpt dbrx
- "ln_f", # refact bloom qwen gpt2
+ "gpt_neox.final_layer_norm", # gptneox
+ "transformer.ln_f", # gpt2 gpt-j falcon jais
+ "model.norm", # llama-hf baichuan internlm2
+ "norm", # llama-pth
+ "transformer.norm_f", # mpt dbrx
+ "ln_f", # refact bloom qwen gpt2
"language_model.encoder.final_layernorm", # persimmon
- "model.final_layernorm", # persimmon
- "lm_head.ln", # phi2
- "model.norm_f", # mamba-qbert
- "backbone.norm_f", # mamba
- "transformer.rms_norm", # Grok
- "encoder.final_layernorm", # chatglm
- "transformer.norm", # openelm
+ "model.final_layernorm", # persimmon
+ "lm_head.ln", # phi2
+ "model.norm_f", # mamba-qbert
+ "backbone.norm_f", # mamba
+ "transformer.rms_norm", # Grok
+ "encoder.final_layernorm", # chatglm
+ "transformer.norm", # openelm
),
-
# Rope frequencies
MODEL_TENSOR.ROPE_FREQS: (
"rope.freqs", # llama-pth
@@ -87,501 +81,394 @@ class TensorNameMap:
block_mappings_cfg: dict[MODEL_TENSOR, tuple[str, ...]] = {
# Attention norm
MODEL_TENSOR.ATTN_NORM: (
- "gpt_neox.layers.{bid}.input_layernorm", # gptneox
- "transformer.h.{bid}.ln_1", # gpt2 gpt-j refact qwen jais
- "transformer.blocks.{bid}.norm_1", # mpt
- "transformer.h.{bid}.input_layernorm", # falcon7b
- "h.{bid}.input_layernorm", # bloom
- "transformer.h.{bid}.ln_mlp", # falcon40b
- "model.layers.{bid}.input_layernorm", # llama-hf
- "layers.{bid}.attention_norm", # llama-pth
+ "gpt_neox.layers.{bid}.input_layernorm", # gptneox
+ "transformer.h.{bid}.ln_1", # gpt2 gpt-j refact qwen jais
+ "transformer.blocks.{bid}.norm_1", # mpt
+ "transformer.h.{bid}.input_layernorm", # falcon7b
+ "h.{bid}.input_layernorm", # bloom
+ "transformer.h.{bid}.ln_mlp", # falcon40b
+ "model.layers.{bid}.input_layernorm", # llama-hf
+ "layers.{bid}.attention_norm", # llama-pth
"language_model.encoder.layers.{bid}.input_layernorm", # persimmon
- "model.layers.{bid}.ln1", # yi
- "h.{bid}.ln_1", # gpt2
- "transformer.h.{bid}.ln", # phi2
- "model.layers.layers.{bid}.norm", # plamo
- "model.layers.{bid}.attention_norm", # internlm2
- "model.layers.{bid}.norm", # mamba-qbert
- "backbone.layers.{bid}.norm", # mamba
- "transformer.decoder_layer.{bid}.rms_norm", # Grok
- "transformer.blocks.{bid}.norm_attn_norm.norm_1", # dbrx
- "encoder.layers.{bid}.input_layernorm", # chatglm
- "transformer.layers.{bid}.attn_norm", # openelm
+ "model.layers.{bid}.ln1", # yi
+ "h.{bid}.ln_1", # gpt2
+ "transformer.h.{bid}.ln", # phi2
+ "model.layers.layers.{bid}.norm", # plamo
+ "model.layers.{bid}.attention_norm", # internlm2
+ "model.layers.{bid}.norm", # mamba-qbert
+ "backbone.layers.{bid}.norm", # mamba
+ "transformer.decoder_layer.{bid}.rms_norm", # Grok
+ "transformer.blocks.{bid}.norm_attn_norm.norm_1", # dbrx
+ "encoder.layers.{bid}.input_layernorm", # chatglm
+ "transformer.layers.{bid}.attn_norm", # openelm
),
-
# Attention norm 2
MODEL_TENSOR.ATTN_NORM_2: (
"transformer.h.{bid}.ln_attn", # falcon40b
- "encoder.layer.{bid}.layer_norm_1", # jina-v2-code
+ "encoder.layer.{bid}.layer_norm_1", # jina-v2-code
),
-
# Attention query-key-value
MODEL_TENSOR.ATTN_QKV: (
- "gpt_neox.layers.{bid}.attention.query_key_value", # gptneox
- "transformer.h.{bid}.attn.c_attn", # gpt2 qwen jais
- "transformer.blocks.{bid}.attn.Wqkv", # mpt
- "transformer.blocks.{bid}.norm_attn_norm.attn.Wqkv", # dbrx
- "transformer.h.{bid}.self_attention.query_key_value", # falcon
- "h.{bid}.self_attention.query_key_value", # bloom
+ "gpt_neox.layers.{bid}.attention.query_key_value", # gptneox
+ "transformer.h.{bid}.attn.c_attn", # gpt2 qwen jais
+ "transformer.blocks.{bid}.attn.Wqkv", # mpt
+ "transformer.blocks.{bid}.norm_attn_norm.attn.Wqkv", # dbrx
+ "transformer.h.{bid}.self_attention.query_key_value", # falcon
+ "h.{bid}.self_attention.query_key_value", # bloom
"language_model.encoder.layers.{bid}.self_attention.query_key_value", # persimmon
- "model.layers.{bid}.self_attn.query_key_value", # persimmon
- "h.{bid}.attn.c_attn", # gpt2
- "transformer.h.{bid}.mixer.Wqkv", # phi2
- "encoder.layers.{bid}.attn.Wqkv", # nomic-bert
- "model.layers.{bid}.self_attn.qkv_proj", # phi3
- "encoder.layers.{bid}.self_attention.query_key_value", # chatglm
- "transformer.layers.{bid}.attn.qkv_proj", # openelm
+ "model.layers.{bid}.self_attn.query_key_value", # persimmon
+ "h.{bid}.attn.c_attn", # gpt2
+ "transformer.h.{bid}.mixer.Wqkv", # phi2
+ "encoder.layers.{bid}.attn.Wqkv", # nomic-bert
+ "model.layers.{bid}.self_attn.qkv_proj", # phi3
+ "encoder.layers.{bid}.self_attention.query_key_value", # chatglm
+ "transformer.layers.{bid}.attn.qkv_proj", # openelm
),
-
# Attention query
MODEL_TENSOR.ATTN_Q: (
- "model.layers.{bid}.self_attn.q_proj", # llama-hf
- "layers.{bid}.attention.wq", # llama-pth
- "encoder.layer.{bid}.attention.self.query", # bert
- "transformer.h.{bid}.attn.q_proj", # gpt-j
- "model.layers.layers.{bid}.self_attn.q_proj", # plamo
- "model.layers.{bid}.attention.wq", # internlm2
- "transformer.decoder_layer.{bid}.multi_head_attention.query",# Grok
+ "model.layers.{bid}.self_attn.q_proj", # llama-hf
+ "layers.{bid}.attention.wq", # llama-pth
+ "encoder.layer.{bid}.attention.self.query", # bert
+ "transformer.h.{bid}.attn.q_proj", # gpt-j
+ "model.layers.layers.{bid}.self_attn.q_proj", # plamo
+ "model.layers.{bid}.attention.wq", # internlm2
+ "transformer.decoder_layer.{bid}.multi_head_attention.query", # Grok
),
-
# Attention key
MODEL_TENSOR.ATTN_K: (
- "model.layers.{bid}.self_attn.k_proj", # llama-hf
- "layers.{bid}.attention.wk", # llama-pth
- "encoder.layer.{bid}.attention.self.key", # bert
- "transformer.h.{bid}.attn.k_proj", # gpt-j
- "transformer.h.{bid}.attn.k", # refact
- "model.layers.layers.{bid}.self_attn.k_proj", # plamo
- "model.layers.{bid}.attention.wk", # internlm2
- "transformer.decoder_layer.{bid}.multi_head_attention.key",# Grok
+ "model.layers.{bid}.self_attn.k_proj", # llama-hf
+ "layers.{bid}.attention.wk", # llama-pth
+ "encoder.layer.{bid}.attention.self.key", # bert
+ "transformer.h.{bid}.attn.k_proj", # gpt-j
+ "transformer.h.{bid}.attn.k", # refact
+ "model.layers.layers.{bid}.self_attn.k_proj", # plamo
+ "model.layers.{bid}.attention.wk", # internlm2
+ "transformer.decoder_layer.{bid}.multi_head_attention.key", # Grok
),
-
# Attention value
MODEL_TENSOR.ATTN_V: (
- "model.layers.{bid}.self_attn.v_proj", # llama-hf
- "layers.{bid}.attention.wv", # llama-pth
- "encoder.layer.{bid}.attention.self.value", # bert
- "transformer.h.{bid}.attn.v_proj", # gpt-j
- "transformer.h.{bid}.attn.v", # refact
- "model.layers.layers.{bid}.self_attn.v_proj", # plamo
- "model.layers.{bid}.attention.wv", # internlm2
- "transformer.decoder_layer.{bid}.multi_head_attention.value" # Grok
+ "model.layers.{bid}.self_attn.v_proj", # llama-hf
+ "layers.{bid}.attention.wv", # llama-pth
+ "encoder.layer.{bid}.attention.self.value", # bert
+ "transformer.h.{bid}.attn.v_proj", # gpt-j
+ "transformer.h.{bid}.attn.v", # refact
+ "model.layers.layers.{bid}.self_attn.v_proj", # plamo
+ "model.layers.{bid}.attention.wv", # internlm2
+ "transformer.decoder_layer.{bid}.multi_head_attention.value", # Grok
),
-
# Attention output
MODEL_TENSOR.ATTN_OUT: (
- "gpt_neox.layers.{bid}.attention.dense", # gptneox
- "transformer.h.{bid}.attn.c_proj", # gpt2 refact qwen jais
- "transformer.blocks.{bid}.attn.out_proj", # mpt
- "transformer.h.{bid}.self_attention.dense", # falcon
- "h.{bid}.self_attention.dense", # bloom
- "model.layers.{bid}.self_attn.o_proj", # llama-hf
- "layers.{bid}.attention.wo", # llama-pth
- "encoder.layer.{bid}.attention.output.dense", # bert
- "transformer.h.{bid}.attn.out_proj", # gpt-j
- "language_model.encoder.layers.{bid}.self_attention.dense", # persimmon
- "model.layers.{bid}.self_attn.dense", # persimmon
- "h.{bid}.attn.c_proj", # gpt2
- "transformer.h.{bid}.mixer.out_proj", # phi2
- "model.layers.layers.{bid}.self_attn.o_proj", # plamo
- "model.layers.{bid}.attention.wo", # internlm2
- "encoder.layers.{bid}.attn.out_proj", # nomic-bert
+ "gpt_neox.layers.{bid}.attention.dense", # gptneox
+ "transformer.h.{bid}.attn.c_proj", # gpt2 refact qwen jais
+ "transformer.blocks.{bid}.attn.out_proj", # mpt
+ "transformer.h.{bid}.self_attention.dense", # falcon
+ "h.{bid}.self_attention.dense", # bloom
+ "model.layers.{bid}.self_attn.o_proj", # llama-hf
+ "layers.{bid}.attention.wo", # llama-pth
+ "encoder.layer.{bid}.attention.output.dense", # bert
+ "transformer.h.{bid}.attn.out_proj", # gpt-j
+ "language_model.encoder.layers.{bid}.self_attention.dense", # persimmon
+ "model.layers.{bid}.self_attn.dense", # persimmon
+ "h.{bid}.attn.c_proj", # gpt2
+ "transformer.h.{bid}.mixer.out_proj", # phi2
+ "model.layers.layers.{bid}.self_attn.o_proj", # plamo
+ "model.layers.{bid}.attention.wo", # internlm2
+ "encoder.layers.{bid}.attn.out_proj", # nomic-bert
"transformer.decoder_layer.{bid}.multi_head_attention.linear", # Grok
- "transformer.blocks.{bid}.norm_attn_norm.attn.out_proj", # dbrx
- "encoder.layers.{bid}.self_attention.dense", # chatglm
- "transformer.layers.{bid}.attn.out_proj", # openelm
+ "transformer.blocks.{bid}.norm_attn_norm.attn.out_proj", # dbrx
+ "encoder.layers.{bid}.self_attention.dense", # chatglm
+ "transformer.layers.{bid}.attn.out_proj", # openelm
),
-
# Attention output norm
MODEL_TENSOR.ATTN_OUT_NORM: (
"encoder.layer.{bid}.attention.output.LayerNorm", # bert
- "encoder.layers.{bid}.norm1", # nomic-bert
- "transformer.decoder_layer.{bid}.rms_norm_1", # Grok
+ "encoder.layers.{bid}.norm1", # nomic-bert
+ "transformer.decoder_layer.{bid}.rms_norm_1", # Grok
"transformer.blocks.{bid}.norm_attn_norm.norm_2", # dbrx
),
-
MODEL_TENSOR.ATTN_POST_NORM: (
- "model.layers.{bid}.post_attention_layernorm", # gemma2
+ "model.layers.{bid}.post_attention_layernorm", # gemma2
),
-
# Rotary embeddings
MODEL_TENSOR.ATTN_ROT_EMBD: (
- "model.layers.{bid}.self_attn.rotary_emb.inv_freq", # llama-hf
- "layers.{bid}.attention.inner_attention.rope.freqs", # llama-pth
- "model.layers.layers.{bid}.self_attn.rotary_emb.inv_freq", # plamo
- "transformer.h.{bid}.attn.rotary_emb.inv_freq", # codeshell
+ "model.layers.{bid}.self_attn.rotary_emb.inv_freq", # llama-hf
+ "layers.{bid}.attention.inner_attention.rope.freqs", # llama-pth
+ "model.layers.layers.{bid}.self_attn.rotary_emb.inv_freq", # plamo
+ "transformer.h.{bid}.attn.rotary_emb.inv_freq", # codeshell
),
-
# Feed-forward norm
MODEL_TENSOR.FFN_NORM: (
- "gpt_neox.layers.{bid}.post_attention_layernorm", # gptneox
- "transformer.h.{bid}.ln_2", # gpt2 refact qwen jais
- "h.{bid}.post_attention_layernorm", # bloom
- "transformer.blocks.{bid}.norm_2", # mpt
- "model.layers.{bid}.post_attention_layernorm", # llama-hf
- "layers.{bid}.ffn_norm", # llama-pth
+ "gpt_neox.layers.{bid}.post_attention_layernorm", # gptneox
+ "transformer.h.{bid}.ln_2", # gpt2 refact qwen jais
+ "h.{bid}.post_attention_layernorm", # bloom
+ "transformer.blocks.{bid}.norm_2", # mpt
+ "model.layers.{bid}.post_attention_layernorm", # llama-hf
+ "layers.{bid}.ffn_norm", # llama-pth
"language_model.encoder.layers.{bid}.post_attention_layernorm", # persimmon
- "model.layers.{bid}.ln2", # yi
- "h.{bid}.ln_2", # gpt2
- "model.layers.{bid}.ffn_norm", # internlm2
- "transformer.decoder_layer.{bid}.rms_norm_2", # Grok
- "encoder.layers.{bid}.post_attention_layernorm", # chatglm
- "transformer.layers.{bid}.ffn_norm", # openelm
+ "model.layers.{bid}.ln2", # yi
+ "h.{bid}.ln_2", # gpt2
+ "model.layers.{bid}.ffn_norm", # internlm2
+ "transformer.decoder_layer.{bid}.rms_norm_2", # Grok
+ "encoder.layers.{bid}.post_attention_layernorm", # chatglm
+ "transformer.layers.{bid}.ffn_norm", # openelm
),
-
# Post feed-forward norm
MODEL_TENSOR.FFN_PRE_NORM: (
- "model.layers.{bid}.pre_feedforward_layernorm", # gemma2
+ "model.layers.{bid}.pre_feedforward_layernorm", # gemma2
),
-
# Post feed-forward norm
MODEL_TENSOR.FFN_POST_NORM: (
- "model.layers.{bid}.post_feedforward_layernorm", # gemma2
+ "model.layers.{bid}.post_feedforward_layernorm", # gemma2
),
-
MODEL_TENSOR.FFN_GATE_INP: (
- "layers.{bid}.feed_forward.gate", # mixtral
- "model.layers.{bid}.block_sparse_moe.gate", # mixtral
- "model.layers.{bid}.mlp.gate", # qwen2moe
- "transformer.decoder_layer.{bid}.router", # Grok
+ "layers.{bid}.feed_forward.gate", # mixtral
+ "model.layers.{bid}.block_sparse_moe.gate", # mixtral
+ "model.layers.{bid}.mlp.gate", # qwen2moe
+ "transformer.decoder_layer.{bid}.router", # Grok
"transformer.blocks.{bid}.ffn.router.layer", # dbrx
),
-
MODEL_TENSOR.FFN_GATE_INP_SHEXP: (
- "model.layers.{bid}.mlp.shared_expert_gate", # qwen2moe
+ "model.layers.{bid}.mlp.shared_expert_gate", # qwen2moe
),
-
# Feed-forward up
MODEL_TENSOR.FFN_UP: (
- "gpt_neox.layers.{bid}.mlp.dense_h_to_4h", # gptneox
- "transformer.h.{bid}.mlp.c_fc", # gpt2 jais
- "transformer.blocks.{bid}.ffn.up_proj", # mpt
- "transformer.h.{bid}.mlp.dense_h_to_4h", # falcon
- "h.{bid}.mlp.dense_h_to_4h", # bloom
- "model.layers.{bid}.mlp.up_proj", # llama-hf refact
- "layers.{bid}.feed_forward.w3", # llama-pth
- "encoder.layer.{bid}.intermediate.dense", # bert
- "transformer.h.{bid}.mlp.fc_in", # gpt-j
- "transformer.h.{bid}.mlp.linear_3", # refact
+ "gpt_neox.layers.{bid}.mlp.dense_h_to_4h", # gptneox
+ "transformer.h.{bid}.mlp.c_fc", # gpt2 jais
+ "transformer.blocks.{bid}.ffn.up_proj", # mpt
+ "transformer.h.{bid}.mlp.dense_h_to_4h", # falcon
+ "h.{bid}.mlp.dense_h_to_4h", # bloom
+ "model.layers.{bid}.mlp.up_proj", # llama-hf refact
+ "layers.{bid}.feed_forward.w3", # llama-pth
+ "encoder.layer.{bid}.intermediate.dense", # bert
+ "transformer.h.{bid}.mlp.fc_in", # gpt-j
+ "transformer.h.{bid}.mlp.linear_3", # refact
"language_model.encoder.layers.{bid}.mlp.dense_h_to_4h", # persimmon
- "model.layers.{bid}.mlp.dense_h_to_4h", # persimmon
- "transformer.h.{bid}.mlp.w1", # qwen
- "h.{bid}.mlp.c_fc", # gpt2
- "transformer.h.{bid}.mlp.fc1", # phi2
- "model.layers.{bid}.mlp.fc1", # phi2
- "model.layers.{bid}.mlp.gate_up_proj", # phi3
- "model.layers.layers.{bid}.mlp.up_proj", # plamo
- "model.layers.{bid}.feed_forward.w3", # internlm2
- "encoder.layers.{bid}.mlp.fc11", # nomic-bert
- "model.layers.{bid}.mlp.c_fc", # starcoder2
- "encoder.layer.{bid}.mlp.gated_layers_v", # jina-bert-v2
- "model.layers.{bid}.residual_mlp.w3", # arctic
- "encoder.layers.{bid}.mlp.dense_h_to_4h", # chatglm
+ "model.layers.{bid}.mlp.dense_h_to_4h", # persimmon
+ "transformer.h.{bid}.mlp.w1", # qwen
+ "h.{bid}.mlp.c_fc", # gpt2
+ "transformer.h.{bid}.mlp.fc1", # phi2
+ "model.layers.{bid}.mlp.fc1", # phi2
+ "model.layers.{bid}.mlp.gate_up_proj", # phi3
+ "model.layers.layers.{bid}.mlp.up_proj", # plamo
+ "model.layers.{bid}.feed_forward.w3", # internlm2
+ "encoder.layers.{bid}.mlp.fc11", # nomic-bert
+ "model.layers.{bid}.mlp.c_fc", # starcoder2
+ "encoder.layer.{bid}.mlp.gated_layers_v", # jina-bert-v2
+ "model.layers.{bid}.residual_mlp.w3", # arctic
+ "encoder.layers.{bid}.mlp.dense_h_to_4h", # chatglm
),
-
MODEL_TENSOR.FFN_UP_EXP: (
- "layers.{bid}.feed_forward.experts.w3", # mixtral (merged)
+ "layers.{bid}.feed_forward.experts.w3", # mixtral (merged)
"transformer.decoder_layer.{bid}.moe.linear_v", # Grok (merged)
- "transformer.blocks.{bid}.ffn.experts.mlp.v1", # dbrx
- "model.layers.{bid}.mlp.experts.up_proj", # qwen2moe (merged)
+ "transformer.blocks.{bid}.ffn.experts.mlp.v1", # dbrx
+ "model.layers.{bid}.mlp.experts.up_proj", # qwen2moe (merged)
),
-
MODEL_TENSOR.FFN_UP_SHEXP: (
"model.layers.{bid}.mlp.shared_expert.up_proj", # qwen2moe
- "model.layers.{bid}.mlp.shared_experts.up_proj", # deepseek2
+ "model.layers.{bid}.mlp.shared_experts.up_proj", # deepseek2
),
-
# AWQ-activation gate
- MODEL_TENSOR.FFN_ACT: (
- "transformer.blocks.{bid}.ffn.act", # mpt
- ),
-
+ MODEL_TENSOR.FFN_ACT: ("transformer.blocks.{bid}.ffn.act",), # mpt
# Feed-forward gate
MODEL_TENSOR.FFN_GATE: (
- "model.layers.{bid}.mlp.gate_proj", # llama-hf refact
- "layers.{bid}.feed_forward.w1", # llama-pth
- "transformer.h.{bid}.mlp.w2", # qwen
- "transformer.h.{bid}.mlp.c_fc2", # jais
- "model.layers.layers.{bid}.mlp.gate_proj", # plamo
- "model.layers.{bid}.feed_forward.w1", # internlm2
- "encoder.layers.{bid}.mlp.fc12", # nomic-bert
- "encoder.layer.{bid}.mlp.gated_layers_w", # jina-bert-v2
- "transformer.h.{bid}.mlp.linear_1", # refact
- "model.layers.{bid}.residual_mlp.w1", # arctic
+ "model.layers.{bid}.mlp.gate_proj", # llama-hf refact
+ "layers.{bid}.feed_forward.w1", # llama-pth
+ "transformer.h.{bid}.mlp.w2", # qwen
+ "transformer.h.{bid}.mlp.c_fc2", # jais
+ "model.layers.layers.{bid}.mlp.gate_proj", # plamo
+ "model.layers.{bid}.feed_forward.w1", # internlm2
+ "encoder.layers.{bid}.mlp.fc12", # nomic-bert
+ "encoder.layer.{bid}.mlp.gated_layers_w", # jina-bert-v2
+ "transformer.h.{bid}.mlp.linear_1", # refact
+ "model.layers.{bid}.residual_mlp.w1", # arctic
),
-
MODEL_TENSOR.FFN_GATE_EXP: (
- "layers.{bid}.feed_forward.experts.w1", # mixtral (merged)
- "transformer.decoder_layer.{bid}.moe.linear", # Grok (merged)
+ "layers.{bid}.feed_forward.experts.w1", # mixtral (merged)
+ "transformer.decoder_layer.{bid}.moe.linear", # Grok (merged)
"transformer.blocks.{bid}.ffn.experts.mlp.w1", # dbrx
- "model.layers.{bid}.mlp.experts.gate_proj", # qwen2moe (merged)
+ "model.layers.{bid}.mlp.experts.gate_proj", # qwen2moe (merged)
),
-
MODEL_TENSOR.FFN_GATE_SHEXP: (
"model.layers.{bid}.mlp.shared_expert.gate_proj", # qwen2moe
- "model.layers.{bid}.mlp.shared_experts.gate_proj", # deepseek2
+ "model.layers.{bid}.mlp.shared_experts.gate_proj", # deepseek2
),
-
# Feed-forward down
MODEL_TENSOR.FFN_DOWN: (
- "gpt_neox.layers.{bid}.mlp.dense_4h_to_h", # gptneox
- "transformer.h.{bid}.mlp.c_proj", # gpt2 refact qwen jais
- "transformer.blocks.{bid}.ffn.down_proj", # mpt
- "transformer.h.{bid}.mlp.dense_4h_to_h", # falcon
- "h.{bid}.mlp.dense_4h_to_h", # bloom
- "model.layers.{bid}.mlp.down_proj", # llama-hf
- "layers.{bid}.feed_forward.w2", # llama-pth
- "encoder.layer.{bid}.output.dense", # bert
- "transformer.h.{bid}.mlp.fc_out", # gpt-j
+ "gpt_neox.layers.{bid}.mlp.dense_4h_to_h", # gptneox
+ "transformer.h.{bid}.mlp.c_proj", # gpt2 refact qwen jais
+ "transformer.blocks.{bid}.ffn.down_proj", # mpt
+ "transformer.h.{bid}.mlp.dense_4h_to_h", # falcon
+ "h.{bid}.mlp.dense_4h_to_h", # bloom
+ "model.layers.{bid}.mlp.down_proj", # llama-hf
+ "layers.{bid}.feed_forward.w2", # llama-pth
+ "encoder.layer.{bid}.output.dense", # bert
+ "transformer.h.{bid}.mlp.fc_out", # gpt-j
"language_model.encoder.layers.{bid}.mlp.dense_4h_to_h", # persimmon
- "model.layers.{bid}.mlp.dense_4h_to_h", # persimmon
- "h.{bid}.mlp.c_proj", # gpt2
- "transformer.h.{bid}.mlp.fc2", # phi2
- "model.layers.{bid}.mlp.fc2", # phi2
- "model.layers.layers.{bid}.mlp.down_proj", # plamo
- "model.layers.{bid}.feed_forward.w2", # internlm2
- "encoder.layers.{bid}.mlp.fc2", # nomic-bert
- "model.layers.{bid}.mlp.c_proj", # starcoder2
- "encoder.layer.{bid}.mlp.wo", # jina-bert-v2
- "transformer.layers.{bid}.ffn.proj_2", # openelm
- "model.layers.{bid}.residual_mlp.w2", # arctic
- "encoder.layer.{bid}.mlp.down_layer", # jina-bert-v2
- "encoder.layers.{bid}.mlp.dense_4h_to_h", # chatglm
+ "model.layers.{bid}.mlp.dense_4h_to_h", # persimmon
+ "h.{bid}.mlp.c_proj", # gpt2
+ "transformer.h.{bid}.mlp.fc2", # phi2
+ "model.layers.{bid}.mlp.fc2", # phi2
+ "model.layers.layers.{bid}.mlp.down_proj", # plamo
+ "model.layers.{bid}.feed_forward.w2", # internlm2
+ "encoder.layers.{bid}.mlp.fc2", # nomic-bert
+ "model.layers.{bid}.mlp.c_proj", # starcoder2
+ "encoder.layer.{bid}.mlp.wo", # jina-bert-v2
+ "transformer.layers.{bid}.ffn.proj_2", # openelm
+ "model.layers.{bid}.residual_mlp.w2", # arctic
+ "encoder.layer.{bid}.mlp.down_layer", # jina-bert-v2
+ "encoder.layers.{bid}.mlp.dense_4h_to_h", # chatglm
),
-
MODEL_TENSOR.FFN_DOWN_EXP: (
- "layers.{bid}.feed_forward.experts.w2", # mixtral (merged)
+ "layers.{bid}.feed_forward.experts.w2", # mixtral (merged)
"transformer.decoder_layer.{bid}.moe.linear_1", # Grok (merged)
- "transformer.blocks.{bid}.ffn.experts.mlp.w2", # dbrx
- "model.layers.{bid}.mlp.experts.down_proj", # qwen2moe (merged)
+ "transformer.blocks.{bid}.ffn.experts.mlp.w2", # dbrx
+ "model.layers.{bid}.mlp.experts.down_proj", # qwen2moe (merged)
),
-
MODEL_TENSOR.FFN_DOWN_SHEXP: (
"model.layers.{bid}.mlp.shared_expert.down_proj", # qwen2moe
- "model.layers.{bid}.mlp.shared_experts.down_proj", # deepseek2
+ "model.layers.{bid}.mlp.shared_experts.down_proj", # deepseek2
),
-
MODEL_TENSOR.ATTN_Q_NORM: (
"language_model.encoder.layers.{bid}.self_attention.q_layernorm",
- "model.layers.{bid}.self_attn.q_layernorm", # persimmon
- "model.layers.{bid}.self_attn.q_norm", # cohere
- "transformer.blocks.{bid}.attn.q_ln", # sea-lion
- "encoder.layer.{bid}.attention.self.layer_norm_q", # jina-bert-v2
- "transformer.layers.{bid}.attn.q_norm", # openelm
+ "model.layers.{bid}.self_attn.q_layernorm", # persimmon
+ "model.layers.{bid}.self_attn.q_norm", # cohere
+ "transformer.blocks.{bid}.attn.q_ln", # sea-lion
+ "encoder.layer.{bid}.attention.self.layer_norm_q", # jina-bert-v2
+ "transformer.layers.{bid}.attn.q_norm", # openelm
),
-
MODEL_TENSOR.ATTN_K_NORM: (
"language_model.encoder.layers.{bid}.self_attention.k_layernorm",
- "model.layers.{bid}.self_attn.k_layernorm", # persimmon
- "model.layers.{bid}.self_attn.k_norm", # cohere
- "transformer.blocks.{bid}.attn.k_ln", # sea-lion
- "encoder.layer.{bid}.attention.self.layer_norm_k", # jina-bert-v2
- "transformer.layers.{bid}.attn.k_norm", # openelm
+ "model.layers.{bid}.self_attn.k_layernorm", # persimmon
+ "model.layers.{bid}.self_attn.k_norm", # cohere
+ "transformer.blocks.{bid}.attn.k_ln", # sea-lion
+ "encoder.layer.{bid}.attention.self.layer_norm_k", # jina-bert-v2
+ "transformer.layers.{bid}.attn.k_norm", # openelm
),
-
MODEL_TENSOR.ROPE_FREQS: (
"language_model.encoder.layers.{bid}.self_attention.rotary_emb.inv_freq", # persimmon
),
-
MODEL_TENSOR.LAYER_OUT_NORM: (
- "encoder.layer.{bid}.output.LayerNorm", # bert
- "encoder.layers.{bid}.norm2", # nomic-bert
- "transformer.decoder_layer.{bid}.rms_norm_3", # Grok
- "encoder.layer.{bid}.mlp.layernorm", # jina-bert-v2
- "encoder.layer.{bid}.layer_norm_2" # jina-v2-code
+ "encoder.layer.{bid}.output.LayerNorm", # bert
+ "encoder.layers.{bid}.norm2", # nomic-bert
+ "transformer.decoder_layer.{bid}.rms_norm_3", # Grok
+ "encoder.layer.{bid}.mlp.layernorm", # jina-bert-v2
+ "encoder.layer.{bid}.layer_norm_2", # jina-v2-code
),
-
MODEL_TENSOR.SSM_IN: (
"model.layers.{bid}.in_proj",
"backbone.layers.{bid}.mixer.in_proj",
),
-
MODEL_TENSOR.SSM_CONV1D: (
"model.layers.{bid}.conv1d",
"backbone.layers.{bid}.mixer.conv1d",
),
-
MODEL_TENSOR.SSM_X: (
"model.layers.{bid}.x_proj",
"backbone.layers.{bid}.mixer.x_proj",
),
-
MODEL_TENSOR.SSM_DT: (
"model.layers.{bid}.dt_proj",
"backbone.layers.{bid}.mixer.dt_proj",
),
-
MODEL_TENSOR.SSM_A: (
"model.layers.{bid}.A_log",
"backbone.layers.{bid}.mixer.A_log",
),
-
MODEL_TENSOR.SSM_D: (
"model.layers.{bid}.D",
"backbone.layers.{bid}.mixer.D",
),
-
MODEL_TENSOR.SSM_OUT: (
"model.layers.{bid}.out_proj",
"backbone.layers.{bid}.mixer.out_proj",
),
-
- MODEL_TENSOR.ATTN_Q_A: (
- "model.layers.{bid}.self_attn.q_a_proj", # deepseek2
- ),
-
- MODEL_TENSOR.ATTN_Q_B: (
- "model.layers.{bid}.self_attn.q_b_proj", # deepseek2
- ),
-
+ MODEL_TENSOR.ATTN_Q_A: ("model.layers.{bid}.self_attn.q_a_proj",), # deepseek2
+ MODEL_TENSOR.ATTN_Q_B: ("model.layers.{bid}.self_attn.q_b_proj",), # deepseek2
MODEL_TENSOR.ATTN_KV_A_MQA: (
- "model.layers.{bid}.self_attn.kv_a_proj_with_mqa", # deepseek2
+ "model.layers.{bid}.self_attn.kv_a_proj_with_mqa", # deepseek2
),
-
MODEL_TENSOR.ATTN_KV_B: (
- "model.layers.{bid}.self_attn.kv_b_proj", # deepseek2
+ "model.layers.{bid}.self_attn.kv_b_proj", # deepseek2
),
-
MODEL_TENSOR.ATTN_Q_A_NORM: (
- "model.layers.{bid}.self_attn.q_a_layernorm", # deepseek2
+ "model.layers.{bid}.self_attn.q_a_layernorm", # deepseek2
),
-
MODEL_TENSOR.ATTN_KV_A_NORM: (
- "model.layers.{bid}.self_attn.kv_a_layernorm", # deepseek2
+ "model.layers.{bid}.self_attn.kv_a_layernorm", # deepseek2
),
-
MODEL_TENSOR.ATTN_SUB_NORM: (
"model.layers.{bid}.self_attn.inner_attn_ln", # bitnet
),
-
- MODEL_TENSOR.FFN_SUB_NORM: (
- "model.layers.{bid}.mlp.ffn_layernorm", # bitnet
- ),
-
- MODEL_TENSOR.DEC_ATTN_NORM: (
- "decoder.block.{bid}.layer.0.layer_norm", # t5
- ),
-
- MODEL_TENSOR.DEC_ATTN_Q: (
- "decoder.block.{bid}.layer.0.SelfAttention.q", # t5
- ),
-
- MODEL_TENSOR.DEC_ATTN_K: (
- "decoder.block.{bid}.layer.0.SelfAttention.k", # t5
- ),
-
- MODEL_TENSOR.DEC_ATTN_V: (
- "decoder.block.{bid}.layer.0.SelfAttention.v", # t5
- ),
-
+ MODEL_TENSOR.FFN_SUB_NORM: ("model.layers.{bid}.mlp.ffn_layernorm",), # bitnet
+ MODEL_TENSOR.DEC_ATTN_NORM: ("decoder.block.{bid}.layer.0.layer_norm",), # t5
+ MODEL_TENSOR.DEC_ATTN_Q: ("decoder.block.{bid}.layer.0.SelfAttention.q",), # t5
+ MODEL_TENSOR.DEC_ATTN_K: ("decoder.block.{bid}.layer.0.SelfAttention.k",), # t5
+ MODEL_TENSOR.DEC_ATTN_V: ("decoder.block.{bid}.layer.0.SelfAttention.v",), # t5
MODEL_TENSOR.DEC_ATTN_OUT: (
- "decoder.block.{bid}.layer.0.SelfAttention.o", # t5
+ "decoder.block.{bid}.layer.0.SelfAttention.o", # t5
),
-
MODEL_TENSOR.DEC_ATTN_REL_B: (
- "decoder.block.{bid}.layer.0.SelfAttention.relative_attention_bias", # t5
+ "decoder.block.{bid}.layer.0.SelfAttention.relative_attention_bias", # t5
),
-
MODEL_TENSOR.DEC_CROSS_ATTN_NORM: (
- "decoder.block.{bid}.layer.1.layer_norm", # t5
+ "decoder.block.{bid}.layer.1.layer_norm", # t5
),
-
MODEL_TENSOR.DEC_CROSS_ATTN_Q: (
- "decoder.block.{bid}.layer.1.EncDecAttention.q", # t5
+ "decoder.block.{bid}.layer.1.EncDecAttention.q", # t5
),
-
MODEL_TENSOR.DEC_CROSS_ATTN_K: (
- "decoder.block.{bid}.layer.1.EncDecAttention.k", # t5
+ "decoder.block.{bid}.layer.1.EncDecAttention.k", # t5
),
-
MODEL_TENSOR.DEC_CROSS_ATTN_V: (
- "decoder.block.{bid}.layer.1.EncDecAttention.v", # t5
+ "decoder.block.{bid}.layer.1.EncDecAttention.v", # t5
),
-
MODEL_TENSOR.DEC_CROSS_ATTN_OUT: (
- "decoder.block.{bid}.layer.1.EncDecAttention.o", # t5
+ "decoder.block.{bid}.layer.1.EncDecAttention.o", # t5
),
-
MODEL_TENSOR.DEC_CROSS_ATTN_REL_B: (
- "decoder.block.{bid}.layer.1.EncDecAttention.relative_attention_bias", # t5
- ),
-
- MODEL_TENSOR.DEC_FFN_NORM: (
- "decoder.block.{bid}.layer.2.layer_norm", # t5
+ "decoder.block.{bid}.layer.1.EncDecAttention.relative_attention_bias", # t5
),
-
+ MODEL_TENSOR.DEC_FFN_NORM: ("decoder.block.{bid}.layer.2.layer_norm",), # t5
MODEL_TENSOR.DEC_FFN_GATE: (
- "decoder.block.{bid}.layer.2.DenseReluDense.wi_0", # flan-t5
+ "decoder.block.{bid}.layer.2.DenseReluDense.wi_0", # flan-t5
),
-
MODEL_TENSOR.DEC_FFN_UP: (
- "decoder.block.{bid}.layer.2.DenseReluDense.wi", # t5
- "decoder.block.{bid}.layer.2.DenseReluDense.wi_1", # flan-t5
+ "decoder.block.{bid}.layer.2.DenseReluDense.wi", # t5
+ "decoder.block.{bid}.layer.2.DenseReluDense.wi_1", # flan-t5
),
-
MODEL_TENSOR.DEC_FFN_DOWN: (
- "decoder.block.{bid}.layer.2.DenseReluDense.wo", # t5
- ),
-
- MODEL_TENSOR.DEC_OUTPUT_NORM: (
- "decoder.final_layer_norm", # t5
- ),
-
- MODEL_TENSOR.ENC_ATTN_NORM: (
- "encoder.block.{bid}.layer.0.layer_norm", # t5
- ),
-
- MODEL_TENSOR.ENC_ATTN_Q: (
- "encoder.block.{bid}.layer.0.SelfAttention.q", # t5
- ),
-
- MODEL_TENSOR.ENC_ATTN_K: (
- "encoder.block.{bid}.layer.0.SelfAttention.k", # t5
+ "decoder.block.{bid}.layer.2.DenseReluDense.wo", # t5
),
-
- MODEL_TENSOR.ENC_ATTN_V: (
- "encoder.block.{bid}.layer.0.SelfAttention.v", # t5
- ),
-
+ MODEL_TENSOR.DEC_OUTPUT_NORM: ("decoder.final_layer_norm",), # t5
+ MODEL_TENSOR.ENC_ATTN_NORM: ("encoder.block.{bid}.layer.0.layer_norm",), # t5
+ MODEL_TENSOR.ENC_ATTN_Q: ("encoder.block.{bid}.layer.0.SelfAttention.q",), # t5
+ MODEL_TENSOR.ENC_ATTN_K: ("encoder.block.{bid}.layer.0.SelfAttention.k",), # t5
+ MODEL_TENSOR.ENC_ATTN_V: ("encoder.block.{bid}.layer.0.SelfAttention.v",), # t5
MODEL_TENSOR.ENC_ATTN_OUT: (
- "encoder.block.{bid}.layer.0.SelfAttention.o", # t5
+ "encoder.block.{bid}.layer.0.SelfAttention.o", # t5
),
-
MODEL_TENSOR.ENC_ATTN_REL_B: (
- "encoder.block.{bid}.layer.0.SelfAttention.relative_attention_bias", # t5
- ),
-
- MODEL_TENSOR.ENC_FFN_NORM: (
- "encoder.block.{bid}.layer.1.layer_norm", # t5
+ "encoder.block.{bid}.layer.0.SelfAttention.relative_attention_bias", # t5
),
-
+ MODEL_TENSOR.ENC_FFN_NORM: ("encoder.block.{bid}.layer.1.layer_norm",), # t5
MODEL_TENSOR.ENC_FFN_GATE: (
- "encoder.block.{bid}.layer.1.DenseReluDense.wi_0", # flan-t5
+ "encoder.block.{bid}.layer.1.DenseReluDense.wi_0", # flan-t5
),
-
MODEL_TENSOR.ENC_FFN_UP: (
- "encoder.block.{bid}.layer.1.DenseReluDense.wi", # t5
- "encoder.block.{bid}.layer.1.DenseReluDense.wi_1", # flan-t5
+ "encoder.block.{bid}.layer.1.DenseReluDense.wi", # t5
+ "encoder.block.{bid}.layer.1.DenseReluDense.wi_1", # flan-t5
),
-
MODEL_TENSOR.ENC_FFN_DOWN: (
- "encoder.block.{bid}.layer.1.DenseReluDense.wo", # t5
- ),
-
- MODEL_TENSOR.ENC_OUTPUT_NORM: (
- "encoder.final_layer_norm", # t5
+ "encoder.block.{bid}.layer.1.DenseReluDense.wo", # t5
),
+ MODEL_TENSOR.ENC_OUTPUT_NORM: ("encoder.final_layer_norm",), # t5
}
# architecture-specific block mappings
arch_block_mappings_cfg: dict[MODEL_ARCH, dict[MODEL_TENSOR, tuple[str, ...]]] = {
MODEL_ARCH.ARCTIC: {
- MODEL_TENSOR.FFN_NORM: (
- "model.layers.{bid}.residual_layernorm",
- ),
- MODEL_TENSOR.FFN_NORM_EXP: (
- "model.layers.{bid}.post_attention_layernorm",
- ),
+ MODEL_TENSOR.FFN_NORM: ("model.layers.{bid}.residual_layernorm",),
+ MODEL_TENSOR.FFN_NORM_EXP: ("model.layers.{bid}.post_attention_layernorm",),
},
}
@@ -603,31 +490,35 @@ def __init__(self, arch: MODEL_ARCH, n_blocks: int):
if tensor not in MODEL_TENSORS[arch]:
continue
- tensor_name = TENSOR_NAMES[tensor].format(bid = bid)
+ tensor_name = TENSOR_NAMES[tensor].format(bid=bid)
self.mapping[tensor_name] = (tensor, tensor_name)
for key in keys:
- key = key.format(bid = bid)
+ key = key.format(bid=bid)
self.mapping[key] = (tensor, tensor_name)
- def get_type_and_name(self, key: str, try_suffixes: Sequence[str] = ()) -> tuple[MODEL_TENSOR, str] | None:
+ def get_type_and_name(
+ self, key: str, try_suffixes: Sequence[str] = ()
+ ) -> tuple[MODEL_TENSOR, str] | None:
result = self.mapping.get(key)
if result is not None:
return result
for suffix in try_suffixes:
if key.endswith(suffix):
- result = self.mapping.get(key[:-len(suffix)])
+ result = self.mapping.get(key[: -len(suffix)])
if result is not None:
return result[0], result[1] + suffix
return None
def get_name(self, key: str, try_suffixes: Sequence[str] = ()) -> str | None:
- result = self.get_type_and_name(key, try_suffixes = try_suffixes)
+ result = self.get_type_and_name(key, try_suffixes=try_suffixes)
if result is None:
return None
return result[1]
- def get_type(self, key: str, try_suffixes: Sequence[str] = ()) -> MODEL_TENSOR | None:
- result = self.get_type_and_name(key, try_suffixes = try_suffixes)
+ def get_type(
+ self, key: str, try_suffixes: Sequence[str] = ()
+ ) -> MODEL_TENSOR | None:
+ result = self.get_type_and_name(key, try_suffixes=try_suffixes)
if result is None:
return None
return result[0]
diff --git a/src/gguf-py/gguf/utility.py b/src/gguf-py/gguf/utility.py
index 40d59b7..e72b904 100644
--- a/src/gguf-py/gguf/utility.py
+++ b/src/gguf-py/gguf/utility.py
@@ -7,21 +7,27 @@ def fill_templated_filename(filename: str, output_type: str | None) -> str:
# Given a file name fill in any type templates e.g. 'some-model-name.{ftype}.gguf'
ftype_lowercase: str = output_type.lower() if output_type is not None else ""
ftype_uppercase: str = output_type.upper() if output_type is not None else ""
- return filename.format(ftype_lowercase,
- outtype=ftype_lowercase, ftype=ftype_lowercase,
- OUTTYPE=ftype_uppercase, FTYPE=ftype_uppercase)
-
-
-def model_weight_count_rounded_notation(model_params_count: int, min_digits: int = 2) -> str:
- if model_params_count > 1e12 :
+ return filename.format(
+ ftype_lowercase,
+ outtype=ftype_lowercase,
+ ftype=ftype_lowercase,
+ OUTTYPE=ftype_uppercase,
+ FTYPE=ftype_uppercase,
+ )
+
+
+def model_weight_count_rounded_notation(
+ model_params_count: int, min_digits: int = 2
+) -> str:
+ if model_params_count > 1e12:
# Trillions Of Parameters
scaled_model_params = model_params_count * 1e-12
scale_suffix = "T"
- elif model_params_count > 1e9 :
+ elif model_params_count > 1e9:
# Billions Of Parameters
scaled_model_params = model_params_count * 1e-9
scale_suffix = "B"
- elif model_params_count > 1e6 :
+ elif model_params_count > 1e6:
# Millions Of Parameters
scaled_model_params = model_params_count * 1e-6
scale_suffix = "M"
@@ -30,39 +36,65 @@ def model_weight_count_rounded_notation(model_params_count: int, min_digits: int
scaled_model_params = model_params_count * 1e-3
scale_suffix = "K"
- fix = max(min_digits - len(str(round(scaled_model_params)).lstrip('0')), 0)
+ fix = max(min_digits - len(str(round(scaled_model_params)).lstrip("0")), 0)
return f"{scaled_model_params:.{fix}f}{scale_suffix}"
-def size_label(total_params: int, shared_params: int, expert_params: int, expert_count: int) -> str:
+def size_label(
+ total_params: int, shared_params: int, expert_params: int, expert_count: int
+) -> str:
if expert_count > 0:
- pretty_size = model_weight_count_rounded_notation(abs(shared_params) + abs(expert_params), min_digits=2)
+ pretty_size = model_weight_count_rounded_notation(
+ abs(shared_params) + abs(expert_params), min_digits=2
+ )
size_class = f"{expert_count}x{pretty_size}"
else:
- size_class = model_weight_count_rounded_notation(abs(total_params), min_digits=2)
+ size_class = model_weight_count_rounded_notation(
+ abs(total_params), min_digits=2
+ )
return size_class
-def naming_convention(model_name: str | None, base_name: str | None, finetune_string: str | None, version_string: str | None, size_label: str | None, output_type: str | None, model_type: Literal['vocab', 'LoRA'] | None = None) -> str:
+def naming_convention(
+ model_name: str | None,
+ base_name: str | None,
+ finetune_string: str | None,
+ version_string: str | None,
+ size_label: str | None,
+ output_type: str | None,
+ model_type: Literal["vocab", "LoRA"] | None = None,
+) -> str:
# Reference: https://github.com/ggerganov/ggml/blob/master/docs/gguf.md#gguf-naming-convention
if base_name is not None:
- name = base_name.strip().replace(' ', '-').replace('/', '-')
+ name = base_name.strip().replace(" ", "-").replace("/", "-")
elif model_name is not None:
- name = model_name.strip().replace(' ', '-').replace('/', '-')
+ name = model_name.strip().replace(" ", "-").replace("/", "-")
else:
name = "ggml-model"
parameters = f"-{size_label}" if size_label is not None else ""
- finetune = f"-{finetune_string.strip().replace(' ', '-')}" if finetune_string is not None else ""
-
- version = f"-{version_string.strip().replace(' ', '-')}" if version_string is not None else ""
-
- encoding = f"-{output_type.strip().replace(' ', '-').upper()}" if output_type is not None else ""
+ finetune = (
+ f"-{finetune_string.strip().replace(' ', '-')}"
+ if finetune_string is not None
+ else ""
+ )
+
+ version = (
+ f"-{version_string.strip().replace(' ', '-')}"
+ if version_string is not None
+ else ""
+ )
+
+ encoding = (
+ f"-{output_type.strip().replace(' ', '-').upper()}"
+ if output_type is not None
+ else ""
+ )
kind = f"-{model_type.strip().replace(' ', '-')}" if model_type is not None else ""
diff --git a/src/gguf-py/gguf/vocab.py b/src/gguf-py/gguf/vocab.py
index dc57499..dddb670 100644
--- a/src/gguf-py/gguf/vocab.py
+++ b/src/gguf-py/gguf/vocab.py
@@ -5,7 +5,16 @@
import json
import os
from pathlib import Path
-from typing import Any, Callable, Sequence, Mapping, Iterable, Protocol, ClassVar, runtime_checkable
+from typing import (
+ Any,
+ Callable,
+ Sequence,
+ Mapping,
+ Iterable,
+ Protocol,
+ ClassVar,
+ runtime_checkable,
+)
from sentencepiece import SentencePieceProcessor
@@ -23,7 +32,9 @@ class SpecialVocab:
chat_template: str | Sequence[Mapping[str, str]] | None
def __init__(
- self, path: str | os.PathLike[str], load_merges: bool = False,
+ self,
+ path: str | os.PathLike[str],
+ load_merges: bool = False,
special_token_types: Iterable[str] | None = None,
n_vocab: int | None = None,
):
@@ -36,40 +47,60 @@ def __init__(
if special_token_types is not None:
self.special_token_types = special_token_types
else:
- self.special_token_types = ('bos', 'eos', 'unk', 'sep', 'pad', 'cls', 'mask')
+ self.special_token_types = (
+ "bos",
+ "eos",
+ "unk",
+ "sep",
+ "pad",
+ "cls",
+ "mask",
+ )
self._load(Path(path))
def __repr__(self) -> str:
- return ''.format(
- len(self.merges), self.special_token_ids or "unset", self.add_special_token or "unset",
+ return "".format(
+ len(self.merges),
+ self.special_token_ids or "unset",
+ self.add_special_token or "unset",
)
def add_to_gguf(self, gw: GGUFWriter, quiet: bool = False) -> None:
if self.merges:
if not quiet:
- logger.info(f'Adding {len(self.merges)} merge(s).')
+ logger.info(f"Adding {len(self.merges)} merge(s).")
gw.add_token_merges(self.merges)
elif self.load_merges:
- logger.warning('Adding merges requested but no merges found, output may be non-functional.')
+ logger.warning(
+ "Adding merges requested but no merges found, output may be non-functional."
+ )
for typ, tokid in self.special_token_ids.items():
- id_handler: Callable[[int], None] | None = getattr(gw, f'add_{typ}_token_id', None)
+ id_handler: Callable[[int], None] | None = getattr(
+ gw, f"add_{typ}_token_id", None
+ )
if id_handler is None:
- logger.warning(f'No handler for special token type {typ} with id {tokid} - skipping')
+ logger.warning(
+ f"No handler for special token type {typ} with id {tokid} - skipping"
+ )
continue
if not quiet:
- logger.info(f'Setting special token type {typ} to {tokid}')
+ logger.info(f"Setting special token type {typ} to {tokid}")
id_handler(tokid)
for typ, value in self.add_special_token.items():
- add_handler: Callable[[bool], None] | None = getattr(gw, f'add_add_{typ}_token', None)
+ add_handler: Callable[[bool], None] | None = getattr(
+ gw, f"add_add_{typ}_token", None
+ )
if add_handler is None:
- logger.warning(f'No handler for add_{typ}_token with value {value} - skipping')
+ logger.warning(
+ f"No handler for add_{typ}_token with value {value} - skipping"
+ )
continue
if not quiet:
- logger.info(f'Setting add_{typ}_token to {value}')
+ logger.info(f"Setting add_{typ}_token to {value}")
add_handler(value)
if self.chat_template is not None:
if not quiet:
- logger.info(f'Setting chat_template to {self.chat_template}')
+ logger.info(f"Setting chat_template to {self.chat_template}")
gw.add_chat_template(self.chat_template)
def _load(self, path: Path) -> None:
@@ -79,12 +110,12 @@ def _load(self, path: Path) -> None:
self._try_load_merges_txt(path)
def _try_load_merges_txt(self, path: Path) -> bool:
- merges_file = path / 'merges.txt'
+ merges_file = path / "merges.txt"
if not merges_file.is_file():
return False
- with open(merges_file, 'r', encoding = 'utf-8') as fp:
- first_line = next(fp, '').strip()
- if not first_line.startswith('#'):
+ with open(merges_file, "r", encoding="utf-8") as fp:
+ first_line = next(fp, "").strip()
+ if not first_line.startswith("#"):
fp.seek(0)
line_num = 0
else:
@@ -97,9 +128,11 @@ def _try_load_merges_txt(self, path: Path) -> bool:
continue
parts = line.split(None, 3)
if len(parts) != 2:
- logger.warning(f'{merges_file.name}: Line {line_num}: Entry malformed, ignoring')
+ logger.warning(
+ f"{merges_file.name}: Line {line_num}: Entry malformed, ignoring"
+ )
continue
- merges.append(f'{parts[0]} {parts[1]}')
+ merges.append(f"{parts[0]} {parts[1]}")
self.merges = merges
return True
@@ -107,45 +140,49 @@ def _set_special_token(self, typ: str, tid: Any) -> None:
if not isinstance(tid, int):
return
if tid < 0:
- raise ValueError(f'invalid value for special token type {typ}: {tid}')
+ raise ValueError(f"invalid value for special token type {typ}: {tid}")
if self.n_vocab is None or tid < self.n_vocab:
if typ in self.special_token_ids:
return
self.special_token_ids[typ] = tid
return
- logger.warning(f'Special token type {typ}, id {tid} out of range, must be under {self.n_vocab} - skipping')
+ logger.warning(
+ f"Special token type {typ}, id {tid} out of range, must be under {self.n_vocab} - skipping"
+ )
def _try_load_from_tokenizer_json(self, path: Path) -> bool:
- tokenizer_file = path / 'tokenizer.json'
+ tokenizer_file = path / "tokenizer.json"
if tokenizer_file.is_file():
- with open(tokenizer_file, encoding = 'utf-8') as f:
+ with open(tokenizer_file, encoding="utf-8") as f:
tokenizer = json.load(f)
if self.load_merges:
- merges = tokenizer.get('model', {}).get('merges')
+ merges = tokenizer.get("model", {}).get("merges")
if isinstance(merges, list) and merges and isinstance(merges[0], str):
self.merges = merges
- added_tokens = tokenizer.get('added_tokens', {})
+ added_tokens = tokenizer.get("added_tokens", {})
else:
added_tokens = {}
- tokenizer_config_file = path / 'tokenizer_config.json'
+ tokenizer_config_file = path / "tokenizer_config.json"
if not tokenizer_config_file.is_file():
return True
- with open(tokenizer_config_file, encoding = 'utf-8') as f:
+ with open(tokenizer_config_file, encoding="utf-8") as f:
tokenizer_config = json.load(f)
- chat_template = tokenizer_config.get('chat_template')
+ chat_template = tokenizer_config.get("chat_template")
if chat_template is None or isinstance(chat_template, (str, list)):
self.chat_template = chat_template
else:
- logger.warning(f'Bad type for chat_template field in {tokenizer_config_file!r} - ignoring')
+ logger.warning(
+ f"Bad type for chat_template field in {tokenizer_config_file!r} - ignoring"
+ )
for typ in self.special_token_types:
- add_entry = tokenizer_config.get(f'add_{typ}_token')
+ add_entry = tokenizer_config.get(f"add_{typ}_token")
if isinstance(add_entry, bool):
self.add_special_token[typ] = add_entry
- entry = tokenizer_config.get(f'{typ}_token')
+ entry = tokenizer_config.get(f"{typ}_token")
if isinstance(entry, str):
tc_content = entry
elif isinstance(entry, dict):
- entry_content = entry.get('content')
+ entry_content = entry.get("content")
if not isinstance(entry_content, str):
continue
tc_content = entry_content
@@ -153,20 +190,24 @@ def _try_load_from_tokenizer_json(self, path: Path) -> bool:
continue
# We only need the first match here.
maybe_token_id = next(
- (atok.get('id') for atok in added_tokens if atok.get('content') == tc_content),
+ (
+ atok.get("id")
+ for atok in added_tokens
+ if atok.get("content") == tc_content
+ ),
None,
)
self._set_special_token(typ, maybe_token_id)
return True
def _try_load_from_config_json(self, path: Path) -> bool:
- config_file = path / 'config.json'
+ config_file = path / "config.json"
if not config_file.is_file():
return False
- with open(config_file, encoding = 'utf-8') as f:
+ with open(config_file, encoding="utf-8") as f:
config = json.load(f)
for typ in self.special_token_types:
- self._set_special_token(typ, config.get(f'{typ}_token_id'))
+ self._set_special_token(typ, config.get(f"{typ}_token_id"))
return True
@@ -202,54 +243,59 @@ class BpeVocab(Vocab):
def __init__(self, base_path: Path):
added_tokens: dict[str, int] = {}
- if (fname_tokenizer := base_path / 'vocab.json').exists():
+ if (fname_tokenizer := base_path / "vocab.json").exists():
# "slow" tokenizer
with open(fname_tokenizer, encoding="utf-8") as f:
self.vocab = json.load(f)
try:
# FIXME: Verify that added tokens here _cannot_ overlap with the main vocab.
- with open(base_path / 'added_tokens.json', encoding="utf-8") as f:
+ with open(base_path / "added_tokens.json", encoding="utf-8") as f:
added_tokens = json.load(f)
except FileNotFoundError:
pass
else:
# "fast" tokenizer
- fname_tokenizer = base_path / 'tokenizer.json'
+ fname_tokenizer = base_path / "tokenizer.json"
# if this fails, FileNotFoundError propagates to caller
with open(fname_tokenizer, encoding="utf-8") as f:
tokenizer_json = json.load(f)
- tokenizer_model: dict[str, Any] = tokenizer_json['model']
+ tokenizer_model: dict[str, Any] = tokenizer_json["model"]
if (
- tokenizer_model['type'] != 'BPE' or tokenizer_model.get('byte_fallback', False)
- or tokenizer_json['decoder']['type'] != 'ByteLevel'
+ tokenizer_model["type"] != "BPE"
+ or tokenizer_model.get("byte_fallback", False)
+ or tokenizer_json["decoder"]["type"] != "ByteLevel"
):
- raise FileNotFoundError('Cannot find GPT-2 BPE tokenizer')
+ raise FileNotFoundError("Cannot find GPT-2 BPE tokenizer")
self.vocab = tokenizer_model["vocab"]
- if (added := tokenizer_json.get('added_tokens')) is not None:
+ if (added := tokenizer_json.get("added_tokens")) is not None:
# Added tokens here can be duplicates of the main vocabulary.
- added_tokens = {item['content']: item['id']
- for item in added
- if item['content'] not in self.vocab}
+ added_tokens = {
+ item["content"]: item["id"]
+ for item in added
+ if item["content"] not in self.vocab
+ }
- vocab_size = len(self.vocab)
+ vocab_size = len(self.vocab)
expected_ids = list(range(vocab_size, vocab_size + len(added_tokens)))
- actual_ids = sorted(added_tokens.values())
+ actual_ids = sorted(added_tokens.values())
if expected_ids != actual_ids:
expected_end_id = vocab_size + len(actual_ids) - 1
- raise ValueError(f"Expected the {len(actual_ids)} added token ID(s) to be sequential in the range "
- f"{vocab_size} - {expected_end_id}; got {actual_ids}")
+ raise ValueError(
+ f"Expected the {len(actual_ids)} added token ID(s) to be sequential in the range "
+ f"{vocab_size} - {expected_end_id}; got {actual_ids}"
+ )
items = sorted(added_tokens.items(), key=lambda text_idx: text_idx[1])
- self.added_tokens_dict = added_tokens
- self.added_tokens_list = [text for (text, idx) in items]
- self.vocab_size_base = vocab_size
- self.vocab_size = self.vocab_size_base + len(self.added_tokens_list)
- self.fname_tokenizer = fname_tokenizer
+ self.added_tokens_dict = added_tokens
+ self.added_tokens_list = [text for (text, idx) in items]
+ self.vocab_size_base = vocab_size
+ self.vocab_size = self.vocab_size_base + len(self.added_tokens_list)
+ self.fname_tokenizer = fname_tokenizer
def bpe_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
reverse_vocab = {id: encoded_tok for encoded_tok, id in self.vocab.items()}
@@ -276,40 +322,44 @@ class SentencePieceVocab(Vocab):
def __init__(self, base_path: Path):
added_tokens: dict[str, int] = {}
- if (fname_tokenizer := base_path / 'tokenizer.model').exists():
+ if (fname_tokenizer := base_path / "tokenizer.model").exists():
# normal location
try:
- with open(base_path / 'added_tokens.json', encoding="utf-8") as f:
+ with open(base_path / "added_tokens.json", encoding="utf-8") as f:
added_tokens = json.load(f)
except FileNotFoundError:
pass
- elif not (fname_tokenizer := base_path.parent / 'tokenizer.model').exists():
+ elif not (fname_tokenizer := base_path.parent / "tokenizer.model").exists():
# not found in alternate location either
- raise FileNotFoundError('Cannot find tokenizer.model')
+ raise FileNotFoundError("Cannot find tokenizer.model")
self.sentencepiece_tokenizer = SentencePieceProcessor()
self.sentencepiece_tokenizer.LoadFromFile(str(fname_tokenizer))
vocab_size = self.sentencepiece_tokenizer.vocab_size()
- new_tokens = {id: piece for piece, id in added_tokens.items() if id >= vocab_size}
+ new_tokens = {
+ id: piece for piece, id in added_tokens.items() if id >= vocab_size
+ }
expected_new_ids = list(range(vocab_size, vocab_size + len(new_tokens)))
- actual_new_ids = sorted(new_tokens.keys())
+ actual_new_ids = sorted(new_tokens.keys())
if expected_new_ids != actual_new_ids:
- raise ValueError(f"Expected new token IDs {expected_new_ids} to be sequential; got {actual_new_ids}")
+ raise ValueError(
+ f"Expected new token IDs {expected_new_ids} to be sequential; got {actual_new_ids}"
+ )
# Token pieces that were added to the base vocabulary.
- self.added_tokens_dict = added_tokens
- self.added_tokens_list = [new_tokens[id] for id in actual_new_ids]
- self.vocab_size_base = vocab_size
- self.vocab_size = self.vocab_size_base + len(self.added_tokens_list)
- self.fname_tokenizer = fname_tokenizer
+ self.added_tokens_dict = added_tokens
+ self.added_tokens_list = [new_tokens[id] for id in actual_new_ids]
+ self.vocab_size_base = vocab_size
+ self.vocab_size = self.vocab_size_base + len(self.added_tokens_list)
+ self.fname_tokenizer = fname_tokenizer
def sentencepiece_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
tokenizer = self.sentencepiece_tokenizer
for i in range(tokenizer.vocab_size()):
piece = tokenizer.IdToPiece(i)
- text = piece.encode("utf-8")
+ text = piece.encode("utf-8")
score: float = tokenizer.GetScore(i)
toktype = gguf.TokenType.NORMAL
@@ -347,25 +397,27 @@ class LlamaHfVocab(Vocab):
name = "hfft"
def __init__(self, base_path: Path):
- fname_tokenizer = base_path / 'tokenizer.json'
+ fname_tokenizer = base_path / "tokenizer.json"
# if this fails, FileNotFoundError propagates to caller
- with open(fname_tokenizer, encoding='utf-8') as f:
+ with open(fname_tokenizer, encoding="utf-8") as f:
tokenizer_json = json.load(f)
# pre-check so we know if we need transformers
- tokenizer_model: dict[str, Any] = tokenizer_json['model']
+ tokenizer_model: dict[str, Any] = tokenizer_json["model"]
is_llama3 = (
- tokenizer_model['type'] == 'BPE' and tokenizer_model.get('ignore_merges', False)
- and not tokenizer_model.get('byte_fallback', True)
+ tokenizer_model["type"] == "BPE"
+ and tokenizer_model.get("ignore_merges", False)
+ and not tokenizer_model.get("byte_fallback", True)
)
if is_llama3:
- raise TypeError('Llama 3 must be converted with BpeVocab')
+ raise TypeError("Llama 3 must be converted with BpeVocab")
if not is_llama3 and (
- tokenizer_model['type'] != 'BPE' or not tokenizer_model.get('byte_fallback', False)
- or tokenizer_json['decoder']['type'] != 'Sequence'
+ tokenizer_model["type"] != "BPE"
+ or not tokenizer_model.get("byte_fallback", False)
+ or tokenizer_json["decoder"]["type"] != "Sequence"
):
- raise FileNotFoundError('Cannot find Llama BPE tokenizer')
+ raise FileNotFoundError("Cannot find Llama BPE tokenizer")
try:
from transformers import AutoTokenizer
@@ -387,7 +439,7 @@ def __init__(self, base_path: Path):
# Initialize lists and dictionaries for added tokens
self.added_tokens_list = []
self.added_tokens_dict = dict()
- self.added_tokens_ids = set()
+ self.added_tokens_ids = set()
# Process added tokens
for tok, tokidx in sorted(
@@ -408,7 +460,7 @@ def __init__(self, base_path: Path):
# Set vocabulary sizes
self.vocab_size_base = self.tokenizer.vocab_size
- self.vocab_size = self.vocab_size_base + len(self.added_tokens_list)
+ self.vocab_size = self.vocab_size_base + len(self.added_tokens_list)
self.fname_tokenizer = fname_tokenizer
@@ -427,16 +479,22 @@ def hf_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
# Yield token text, score, and type
yield token_text, self.get_token_score(token_id), self.get_token_type(
- token_id, token_text, self.special_ids # Reuse already stored special IDs
+ token_id,
+ token_text,
+ self.special_ids, # Reuse already stored special IDs
)
- def get_token_type(self, token_id: int, token_text: bytes, special_ids: set[int]) -> gguf.TokenType:
+ def get_token_type(
+ self, token_id: int, token_text: bytes, special_ids: set[int]
+ ) -> gguf.TokenType:
# Special case for byte tokens
- if re.fullmatch(br"<0x[0-9A-Fa-f]{2}>", token_text):
+ if re.fullmatch(rb"<0x[0-9A-Fa-f]{2}>", token_text):
return gguf.TokenType.BYTE
# Determine token type based on whether it's a special token
- return gguf.TokenType.CONTROL if token_id in special_ids else gguf.TokenType.NORMAL
+ return (
+ gguf.TokenType.CONTROL if token_id in special_ids else gguf.TokenType.NORMAL
+ )
def get_token_score(self, token_id: int) -> float:
# Placeholder for actual logic to determine the token's score
@@ -446,7 +504,9 @@ def get_token_score(self, token_id: int) -> float:
def added_tokens(self) -> Iterable[tuple[bytes, float, gguf.TokenType]]:
for text in self.added_tokens_list:
if text in self.specials:
- toktype = self.get_token_type(self.specials[text], b'', self.special_ids)
+ toktype = self.get_token_type(
+ self.specials[text], b"", self.special_ids
+ )
score = self.get_token_score(self.specials[text])
else:
toktype = gguf.TokenType.USER_DEFINED
diff --git a/src/imports_and_globals.py b/src/imports_and_globals.py
index ee32b53..246d1bf 100644
--- a/src/imports_and_globals.py
+++ b/src/imports_and_globals.py
@@ -1,39 +1,66 @@
-import os
-import sys
-import psutil
-import subprocess
-import time
-import signal
-import json
-import platform
-import requests
-import zipfile
-from datetime import datetime
-from PyQt6.QtWidgets import (QApplication, QMainWindow, QVBoxLayout, QHBoxLayout, QWidget, QPushButton,
- QListWidget, QLineEdit, QLabel, QFileDialog, QProgressBar, QComboBox, QTextEdit,
- QCheckBox, QGroupBox, QFormLayout, QScrollArea, QSlider, QSpinBox, QListWidgetItem,
- QMessageBox, QDialog, QPlainTextEdit, QMenu)
-from PyQt6.QtCore import QTimer, QThread, pyqtSignal, Qt, QSize
-from PyQt6.QtGui import QCloseEvent, QAction
-
-def ensure_directory(path):
- if not os.path.exists(path):
- os.makedirs(path)
-
-def open_file_safe(file_path, mode='r'):
- encodings = ['utf-8', 'latin-1', 'ascii', 'utf-16']
- for encoding in encodings:
- try:
- return open(file_path, mode, encoding=encoding)
- except UnicodeDecodeError:
- continue
- raise ValueError(f"Unable to open file {file_path} with any of the encodings: {encodings}")
-
-def resource_path(relative_path):
- try:
- # PyInstaller creates a temp folder and stores path in _MEIPASS
- base_path = sys._MEIPASS
- except Exception:
- base_path = os.path.abspath(".")
-
- return os.path.join(base_path, relative_path)
\ No newline at end of file
+import os
+import sys
+import psutil
+import subprocess
+import time
+import signal
+import json
+import platform
+import requests
+import zipfile
+from datetime import datetime
+from PyQt6.QtWidgets import (
+ QApplication,
+ QMainWindow,
+ QVBoxLayout,
+ QHBoxLayout,
+ QWidget,
+ QPushButton,
+ QListWidget,
+ QLineEdit,
+ QLabel,
+ QFileDialog,
+ QProgressBar,
+ QComboBox,
+ QTextEdit,
+ QCheckBox,
+ QGroupBox,
+ QFormLayout,
+ QScrollArea,
+ QSlider,
+ QSpinBox,
+ QListWidgetItem,
+ QMessageBox,
+ QDialog,
+ QPlainTextEdit,
+ QMenu,
+)
+from PyQt6.QtCore import QTimer, QThread, pyqtSignal, Qt, QSize
+from PyQt6.QtGui import QCloseEvent, QAction
+
+
+def ensure_directory(path):
+ if not os.path.exists(path):
+ os.makedirs(path)
+
+
+def open_file_safe(file_path, mode="r"):
+ encodings = ["utf-8", "latin-1", "ascii", "utf-16"]
+ for encoding in encodings:
+ try:
+ return open(file_path, mode, encoding=encoding)
+ except UnicodeDecodeError:
+ continue
+ raise ValueError(
+ f"Unable to open file {file_path} with any of the encodings: {encodings}"
+ )
+
+
+def resource_path(relative_path):
+ try:
+ # PyInstaller creates a temp folder and stores path in _MEIPASS
+ base_path = sys._MEIPASS
+ except Exception:
+ base_path = os.path.abspath(".")
+
+ return os.path.join(base_path, relative_path)
diff --git a/src/localizations.py b/src/localizations.py
index 6b64b14..b33a66c 100644
--- a/src/localizations.py
+++ b/src/localizations.py
@@ -1,5258 +1,6126 @@
-import os
-
-class _Localization:
- def __init__(self):
- super().__init__()
- self.WINDOW_TITLE = ""
- self.RAM_USAGE = ""
- self.CPU_USAGE = ""
- self.BACKEND = ""
- self.REFRESH_BACKENDS = ""
- self.MODELS_PATH = ""
- self.OUTPUT_PATH = ""
- self.LOGS_PATH = ""
- self.BROWSE = ""
- self.AVAILABLE_MODELS = ""
- self.QUANTIZATION_TYPE = ""
- self.ALLOW_REQUANTIZE = ""
- self.LEAVE_OUTPUT_TENSOR = ""
- self.PURE = ""
- self.IMATRIX = ""
- self.INCLUDE_WEIGHTS = ""
- self.EXCLUDE_WEIGHTS = ""
- self.USE_OUTPUT_TENSOR_TYPE = ""
- self.USE_TOKEN_EMBEDDING_TYPE = ""
- self.KEEP_SPLIT = ""
- self.KV_OVERRIDES = ""
- self.ADD_NEW_OVERRIDE = ""
- self.QUANTIZE_MODEL = ""
- self.SAVE_PRESET = ""
- self.LOAD_PRESET = ""
- self.TASKS = ""
- self.DOWNLOAD_LLAMACPP = ""
- self.SELECT_RELEASE = ""
- self.SELECT_ASSET = ""
- self.EXTRACT_CUDA_FILES = ""
- self.SELECT_CUDA_BACKEND = ""
- self.DOWNLOAD = ""
- self.IMATRIX_GENERATION = ""
- self.DATA_FILE = ""
- self.MODEL = ""
- self.OUTPUT = ""
- self.OUTPUT_FREQUENCY = ""
- self.GPU_OFFLOAD = ""
- self.AUTO = ""
- self.GENERATE_IMATRIX = ""
- self.ERROR = ""
- self.WARNING = ""
- self.PROPERTIES = ""
- self.CANCEL = ""
- self.RESTART = ""
- self.DELETE = ""
- self.CONFIRM_DELETION = ""
- self.TASK_RUNNING_WARNING = ""
- self.YES = ""
- self.NO = ""
- self.DOWNLOAD_COMPLETE = ""
- self.CUDA_EXTRACTION_FAILED = ""
- self.PRESET_SAVED = ""
- self.PRESET_LOADED = ""
- self.NO_ASSET_SELECTED = ""
- self.DOWNLOAD_FAILED = ""
- self.NO_BACKEND_SELECTED = ""
- self.NO_MODEL_SELECTED = ""
- self.REFRESH_RELEASES = ""
- self.NO_SUITABLE_CUDA_BACKENDS = ""
- self.LLAMACPP_DOWNLOADED_EXTRACTED = ""
- self.CUDA_FILES_EXTRACTED = ""
- self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = ""
- self.ERROR_FETCHING_RELEASES = ""
- self.CONFIRM_DELETION_TITLE = ""
- self.LOG_FOR = ""
- self.ALL_FILES = ""
- self.GGUF_FILES = ""
- self.DAT_FILES = ""
- self.JSON_FILES = ""
- self.FAILED_LOAD_PRESET = ""
- self.INITIALIZING_AUTOGGUF = ""
- self.AUTOGGUF_INITIALIZATION_COMPLETE = ""
- self.REFRESHING_BACKENDS = ""
- self.NO_BACKENDS_AVAILABLE = ""
- self.FOUND_VALID_BACKENDS = ""
- self.SAVING_PRESET = ""
- self.PRESET_SAVED_TO = ""
- self.LOADING_PRESET = ""
- self.PRESET_LOADED_FROM = ""
- self.ADDING_KV_OVERRIDE = ""
- self.SAVING_TASK_PRESET = ""
- self.TASK_PRESET_SAVED = ""
- self.TASK_PRESET_SAVED_TO = ""
- self.RESTARTING_TASK = ""
- self.IN_PROGRESS = ""
- self.DOWNLOAD_FINISHED_EXTRACTED_TO = ""
- self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = ""
- self.NO_SUITABLE_CUDA_BACKEND_FOUND = ""
- self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = ""
- self.REFRESHING_LLAMACPP_RELEASES = ""
- self.UPDATING_ASSET_LIST = ""
- self.UPDATING_CUDA_OPTIONS = ""
- self.STARTING_LLAMACPP_DOWNLOAD = ""
- self.UPDATING_CUDA_BACKENDS = ""
- self.NO_CUDA_BACKEND_SELECTED = ""
- self.EXTRACTING_CUDA_FILES = ""
- self.DOWNLOAD_ERROR = ""
- self.SHOWING_TASK_CONTEXT_MENU = ""
- self.SHOWING_PROPERTIES_FOR_TASK = ""
- self.CANCELLING_TASK = ""
- self.CANCELED = ""
- self.DELETING_TASK = ""
- self.LOADING_MODELS = ""
- self.LOADED_MODELS = ""
- self.BROWSING_FOR_MODELS_DIRECTORY = ""
- self.SELECT_MODELS_DIRECTORY = ""
- self.BROWSING_FOR_OUTPUT_DIRECTORY = ""
- self.SELECT_OUTPUT_DIRECTORY = ""
- self.BROWSING_FOR_LOGS_DIRECTORY = ""
- self.SELECT_LOGS_DIRECTORY = ""
- self.BROWSING_FOR_IMATRIX_FILE = ""
- self.SELECT_IMATRIX_FILE = ""
- self.RAM_USAGE_FORMAT = ""
- self.CPU_USAGE_FORMAT = ""
- self.VALIDATING_QUANTIZATION_INPUTS = ""
- self.MODELS_PATH_REQUIRED = ""
- self.OUTPUT_PATH_REQUIRED = ""
- self.LOGS_PATH_REQUIRED = ""
- self.STARTING_MODEL_QUANTIZATION = ""
- self.INPUT_FILE_NOT_EXIST = ""
- self.QUANTIZING_MODEL_TO = ""
- self.QUANTIZATION_TASK_STARTED = ""
- self.ERROR_STARTING_QUANTIZATION = ""
- self.UPDATING_MODEL_INFO = ""
- self.TASK_FINISHED = ""
- self.SHOWING_TASK_DETAILS_FOR = ""
- self.BROWSING_FOR_IMATRIX_DATA_FILE = ""
- self.SELECT_DATA_FILE = ""
- self.BROWSING_FOR_IMATRIX_MODEL_FILE = ""
- self.SELECT_MODEL_FILE = ""
- self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = ""
- self.SELECT_OUTPUT_FILE = ""
- self.STARTING_IMATRIX_GENERATION = ""
- self.BACKEND_PATH_NOT_EXIST = ""
- self.GENERATING_IMATRIX = ""
- self.ERROR_STARTING_IMATRIX_GENERATION = ""
- self.IMATRIX_GENERATION_TASK_STARTED = ""
- self.ERROR_MESSAGE = ""
- self.TASK_ERROR = ""
- self.APPLICATION_CLOSING = ""
- self.APPLICATION_CLOSED = ""
- self.SELECT_QUANTIZATION_TYPE = ""
- self.ALLOWS_REQUANTIZING = ""
- self.LEAVE_OUTPUT_WEIGHT = ""
- self.DISABLE_K_QUANT_MIXTURES = ""
- self.USE_DATA_AS_IMPORTANCE_MATRIX = ""
- self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = ""
- self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = ""
- self.OUTPUT_TENSOR_TYPE = ""
- self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = ""
- self.TOKEN_EMBEDDING_TYPE = ""
- self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = ""
- self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = ""
- self.OVERRIDE_MODEL_METADATA = ""
- self.INPUT_DATA_FILE_FOR_IMATRIX = ""
- self.MODEL_TO_BE_QUANTIZED = ""
- self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = ""
- self.HOW_OFTEN_TO_SAVE_IMATRIX = ""
- self.SET_GPU_OFFLOAD_VALUE = ""
- self.COMPLETED = ""
- self.REFRESH_MODELS = ""
- self.EXTRA_ARGUMENTS = ""
- self.EXTRA_ARGUMENTS_LABEL = ""
- self.CONTEXT_SIZE = ""
- self.CONTEXT_SIZE_FOR_IMATRIX = ""
- self.THREADS = ""
- self.NUMBER_OF_THREADS_FOR_IMATRIX = ""
- self.LORA_CONVERSION = ""
- self.LORA_INPUT_PATH = ""
- self.LORA_OUTPUT_PATH = ""
- self.SELECT_LORA_INPUT_DIRECTORY = ""
- self.SELECT_LORA_OUTPUT_FILE = ""
- self.CONVERT_LORA = ""
- self.STARTING_LORA_CONVERSION = ""
- self.LORA_INPUT_PATH_REQUIRED = ""
- self.LORA_OUTPUT_PATH_REQUIRED = ""
- self.ERROR_STARTING_LORA_CONVERSION = ""
- self.LORA_CONVERSION_TASK_STARTED = ""
- self.BIN_FILES = ""
- self.BROWSING_FOR_LORA_INPUT_DIRECTORY = ""
- self.BROWSING_FOR_LORA_OUTPUT_FILE = ""
- self.CONVERTING_LORA = ""
- self.LORA_CONVERSION_FINISHED = ""
- self.LORA_FILE_MOVED = ""
- self.LORA_FILE_NOT_FOUND = ""
- self.ERROR_MOVING_LORA_FILE = ""
- self.EXPORT_LORA = ""
- self.MODEL_PATH_REQUIRED = ""
- self.OUTPUT_PATH_REQUIRED = ""
- self.AT_LEAST_ONE_LORA_ADAPTER_REQUIRED = ""
- self.INVALID_LORA_SCALE_VALUE = ""
- self.ERROR_STARTING_LORA_EXPORT = ""
- self.LORA_EXPORT_TASK_STARTED = ""
- self.GGML_LORA_ADAPTERS = ""
- self.SELECT_LORA_ADAPTER_FILES = ""
- self.ADD_ADAPTER = ""
- self.DELETE_ADAPTER = ""
- self.LORA_SCALE = ""
- self.ENTER_LORA_SCALE_VALUE = ""
- self.NUMBER_OF_THREADS_FOR_LORA_EXPORT = ""
- self.EXPORTING_LORA = ""
- self.BROWSING_FOR_EXPORT_LORA_MODEL_FILE = ""
- self.BROWSING_FOR_EXPORT_LORA_OUTPUT_FILE = ""
- self.ADDING_LORA_ADAPTER = ""
- self.DELETING_LORA_ADAPTER = ""
- self.LORA_FILES = ""
- self.SELECT_LORA_ADAPTER_FILE = ""
- self.STARTING_LORA_EXPORT = ""
- self.OUTPUT_TYPE = ""
- self.SELECT_OUTPUT_TYPE = ""
- self.GGUF_AND_BIN_FILES = ""
- self.BASE_MODEL = ""
- self.SELECT_BASE_MODEL_FILE = ""
- self.BASE_MODEL_PATH_REQUIRED = ""
- self.BROWSING_FOR_BASE_MODEL_FILE = ""
- self.SELECT_BASE_MODEL_FOLDER = ""
- self.BROWSING_FOR_BASE_MODEL_FOLDER = ""
- self.LORA_CONVERSION_FROM_TO = ""
- self.GENERATING_IMATRIX_FOR = ""
- self.MODEL_PATH_REQUIRED_FOR_IMATRIX = ""
- self.NO_ASSET_SELECTED_FOR_CUDA_CHECK = ""
- self.QUANTIZATION_COMMAND = ""
- self.IMATRIX_GENERATION_COMMAND = ""
- self.LORA_CONVERSION_COMMAND = ""
- self.LORA_EXPORT_COMMAND = ""
-
-class _English(_Localization):
- def __init__(self):
- super().__init__()
- self.WINDOW_TITLE = "AutoGGUF (automated GGUF model quantizer)"
- self.RAM_USAGE = "RAM Usage:"
- self.CPU_USAGE = "CPU Usage:"
- self.BACKEND = "Llama.cpp Backend:"
- self.REFRESH_BACKENDS = "Refresh Backends"
- self.MODELS_PATH = "Models Path:"
- self.OUTPUT_PATH = "Output Path:"
- self.LOGS_PATH = "Logs Path:"
- self.BROWSE = "Browse"
- self.AVAILABLE_MODELS = "Available Models:"
- self.QUANTIZATION_TYPE = "Quantization Type:"
- self.ALLOW_REQUANTIZE = "Allow Requantize"
- self.LEAVE_OUTPUT_TENSOR = "Leave Output Tensor"
- self.PURE = "Pure"
- self.IMATRIX = "IMatrix:"
- self.INCLUDE_WEIGHTS = "Include Weights:"
- self.EXCLUDE_WEIGHTS = "Exclude Weights:"
- self.USE_OUTPUT_TENSOR_TYPE = "Use Output Tensor Type"
- self.USE_TOKEN_EMBEDDING_TYPE = "Use Token Embedding Type"
- self.KEEP_SPLIT = "Keep Split"
- self.KV_OVERRIDES = "KV Overrides:"
- self.ADD_NEW_OVERRIDE = "Add new override"
- self.QUANTIZE_MODEL = "Quantize Model"
- self.SAVE_PRESET = "Save Preset"
- self.LOAD_PRESET = "Load Preset"
- self.TASKS = "Tasks:"
- self.DOWNLOAD_LLAMACPP = "Download llama.cpp"
- self.SELECT_RELEASE = "Select Release:"
- self.SELECT_ASSET = "Select Asset:"
- self.EXTRACT_CUDA_FILES = "Extract CUDA files"
- self.SELECT_CUDA_BACKEND = "Select CUDA Backend:"
- self.DOWNLOAD = "Download"
- self.IMATRIX_GENERATION = "IMatrix Generation"
- self.DATA_FILE = "Data File:"
- self.MODEL = "Model:"
- self.OUTPUT = "Output:"
- self.OUTPUT_FREQUENCY = "Output Frequency:"
- self.GPU_OFFLOAD = "GPU Offload:"
- self.AUTO = "Auto"
- self.GENERATE_IMATRIX = "Generate IMatrix"
- self.ERROR = "Error"
- self.WARNING = "Warning"
- self.PROPERTIES = "Properties"
- self.CANCEL = "Cancel"
- self.RESTART = "Restart"
- self.DELETE = "Delete"
- self.CONFIRM_DELETION = "Are you sure you want to delete this task?"
- self.TASK_RUNNING_WARNING = "Some tasks are still running. Are you sure you want to quit?"
- self.YES = "Yes"
- self.NO = "No"
- self.DOWNLOAD_COMPLETE = "Download Complete"
- self.CUDA_EXTRACTION_FAILED = "CUDA Extraction Failed"
- self.PRESET_SAVED = "Preset Saved"
- self.PRESET_LOADED = "Preset Loaded"
- self.NO_ASSET_SELECTED = "No asset selected"
- self.DOWNLOAD_FAILED = "Download failed"
- self.NO_BACKEND_SELECTED = "No backend selected"
- self.NO_MODEL_SELECTED = "No model selected"
- self.REFRESH_RELEASES = "Refresh Releases"
- self.NO_SUITABLE_CUDA_BACKENDS = "No suitable CUDA backends found"
- self.LLAMACPP_DOWNLOADED_EXTRACTED = "llama.cpp binary downloaded and extracted to {0}"
- self.CUDA_FILES_EXTRACTED = "CUDA files extracted to"
- self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = "No suitable CUDA backend found for extraction"
- self.ERROR_FETCHING_RELEASES = "Error fetching releases: {0}"
- self.CONFIRM_DELETION_TITLE = "Confirm Deletion"
- self.LOG_FOR = "Log for {0}"
- self.ALL_FILES = "All Files (*)"
- self.GGUF_FILES = "GGUF Files (*.gguf)"
- self.DAT_FILES = "DAT Files (*.dat)"
- self.JSON_FILES = "JSON Files (*.json)"
- self.FAILED_LOAD_PRESET = "Failed to load preset: {0}"
- self.INITIALIZING_AUTOGGUF = "Initializing AutoGGUF application"
- self.AUTOGGUF_INITIALIZATION_COMPLETE = "AutoGGUF initialization complete"
- self.REFRESHING_BACKENDS = "Refreshing backends"
- self.NO_BACKENDS_AVAILABLE = "No backends available"
- self.FOUND_VALID_BACKENDS = "Found {0} valid backends"
- self.SAVING_PRESET = "Saving preset"
- self.PRESET_SAVED_TO = "Preset saved to {0}"
- self.LOADING_PRESET = "Loading preset"
- self.PRESET_LOADED_FROM = "Preset loaded from {0}"
- self.ADDING_KV_OVERRIDE = "Adding KV override: {0}"
- self.SAVING_TASK_PRESET = "Saving task preset for {0}"
- self.TASK_PRESET_SAVED = "Task Preset Saved"
- self.TASK_PRESET_SAVED_TO = "Task preset saved to {0}"
- self.RESTARTING_TASK = "Restarting task: {0}"
- self.IN_PROGRESS = "In Progress"
- self.DOWNLOAD_FINISHED_EXTRACTED_TO = "Download finished. Extracted to: {0}"
- self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "llama.cpp binary downloaded and extracted to {0}"
- self.NO_SUITABLE_CUDA_BACKEND_FOUND = "No suitable CUDA backend found for extraction"
- self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = "llama.cpp binary downloaded and extracted to {0}"
- self.REFRESHING_LLAMACPP_RELEASES = "Refreshing llama.cpp releases"
- self.UPDATING_ASSET_LIST = "Updating asset list"
- self.UPDATING_CUDA_OPTIONS = "Updating CUDA options"
- self.STARTING_LLAMACPP_DOWNLOAD = "Starting llama.cpp download"
- self.UPDATING_CUDA_BACKENDS = "Updating CUDA backends"
- self.NO_CUDA_BACKEND_SELECTED = "No CUDA backend selected for extraction"
- self.EXTRACTING_CUDA_FILES = "Extracting CUDA files from {0} to {1}"
- self.DOWNLOAD_ERROR = "Download error: {0}"
- self.SHOWING_TASK_CONTEXT_MENU = "Showing task context menu"
- self.SHOWING_PROPERTIES_FOR_TASK = "Showing properties for task: {0}"
- self.CANCELLING_TASK = "Cancelling task: {0}"
- self.CANCELED = "Canceled"
- self.DELETING_TASK = "Deleting task: {0}"
- self.LOADING_MODELS = "Loading models"
- self.LOADED_MODELS = "Loaded {0} models"
- self.BROWSING_FOR_MODELS_DIRECTORY = "Browsing for models directory"
- self.SELECT_MODELS_DIRECTORY = "Select Models Directory"
- self.BROWSING_FOR_OUTPUT_DIRECTORY = "Browsing for output directory"
- self.SELECT_OUTPUT_DIRECTORY = "Select Output Directory"
- self.BROWSING_FOR_LOGS_DIRECTORY = "Browsing for logs directory"
- self.SELECT_LOGS_DIRECTORY = "Select Logs Directory"
- self.BROWSING_FOR_IMATRIX_FILE = "Browsing for IMatrix file"
- self.SELECT_IMATRIX_FILE = "Select IMatrix File"
- self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} MB / {2} MB)"
- self.CPU_USAGE_FORMAT = "CPU Usage: {0:.1f}%"
- self.VALIDATING_QUANTIZATION_INPUTS = "Validating quantization inputs"
- self.MODELS_PATH_REQUIRED = "Models path is required"
- self.OUTPUT_PATH_REQUIRED = "Output path is required"
- self.LOGS_PATH_REQUIRED = "Logs path is required"
- self.STARTING_MODEL_QUANTIZATION = "Starting model quantization"
- self.INPUT_FILE_NOT_EXIST = "Input file '{0}' does not exist."
- self.QUANTIZING_MODEL_TO = "Quantizing {0} to {1}"
- self.QUANTIZATION_TASK_STARTED = "Quantization task started for {0}"
- self.ERROR_STARTING_QUANTIZATION = "Error starting quantization: {0}"
- self.UPDATING_MODEL_INFO = "Updating model info: {0}"
- self.TASK_FINISHED = "Task finished: {0}"
- self.SHOWING_TASK_DETAILS_FOR = "Showing task details for: {0}"
- self.BROWSING_FOR_IMATRIX_DATA_FILE = "Browsing for IMatrix data file"
- self.SELECT_DATA_FILE = "Select Data File"
- self.BROWSING_FOR_IMATRIX_MODEL_FILE = "Browsing for IMatrix model file"
- self.SELECT_MODEL_FILE = "Select Model File"
- self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "Browsing for IMatrix output file"
- self.SELECT_OUTPUT_FILE = "Select Output File"
- self.STARTING_IMATRIX_GENERATION = "Starting IMatrix generation"
- self.BACKEND_PATH_NOT_EXIST = "Backend path does not exist: {0}"
- self.GENERATING_IMATRIX = "Generating IMatrix"
- self.ERROR_STARTING_IMATRIX_GENERATION = "Error starting IMatrix generation: {0}"
- self.IMATRIX_GENERATION_TASK_STARTED = "IMatrix generation task started"
- self.ERROR_MESSAGE = "Error: {0}"
- self.TASK_ERROR = "Task error: {0}"
- self.APPLICATION_CLOSING = "Application closing"
- self.APPLICATION_CLOSED = "Application closed"
- self.SELECT_QUANTIZATION_TYPE = "Select the quantization type"
- self.ALLOWS_REQUANTIZING = "Allows requantizing tensors that have already been quantized"
- self.LEAVE_OUTPUT_WEIGHT = "Will leave output.weight un(re)quantized"
- self.DISABLE_K_QUANT_MIXTURES = "Disable k-quant mixtures and quantize all tensors to the same type"
- self.USE_DATA_AS_IMPORTANCE_MATRIX = "Use data in file as importance matrix for quant optimizations"
- self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Use importance matrix for these tensors"
- self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Don't use importance matrix for these tensors"
- self.OUTPUT_TENSOR_TYPE = "Output Tensor Type:"
- self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = "Use this type for the output.weight tensor"
- self.TOKEN_EMBEDDING_TYPE = "Token Embedding Type:"
- self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = "Use this type for the token embeddings tensor"
- self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = "Will generate quantized model in the same shards as input"
- self.OVERRIDE_MODEL_METADATA = "Override model metadata"
- self.INPUT_DATA_FILE_FOR_IMATRIX = "Input data file for IMatrix generation"
- self.MODEL_TO_BE_QUANTIZED = "Model to be quantized"
- self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "Output path for the generated IMatrix"
- self.HOW_OFTEN_TO_SAVE_IMATRIX = "How often to save the IMatrix"
- self.SET_GPU_OFFLOAD_VALUE = "Set GPU offload value (-ngl)"
- self.COMPLETED = "Completed"
- # TODO: Add the following keys to other languages
- self.REFRESH_MODELS = "Refresh Models"
- self.EXTRA_ARGUMENTS = "Extra Arguments:"
- self.EXTRA_ARGUMENTS_LABEL = "Additional command-line arguments"
- self.CONTEXT_SIZE = "Context Size:"
- self.CONTEXT_SIZE_FOR_IMATRIX = "Context size for IMatrix generation"
- self.THREADS = "Threads:"
- self.NUMBER_OF_THREADS_FOR_IMATRIX = "Number of threads for IMatrix generation"
- self.LORA_CONVERSION = "LoRA Conversion"
- self.LORA_INPUT_PATH = "LoRA Input Path"
- self.LORA_OUTPUT_PATH = "LoRA Output Path"
- self.SELECT_LORA_INPUT_DIRECTORY = "Select LoRA Input Directory"
- self.SELECT_LORA_OUTPUT_FILE = "Select LoRA Output File"
- self.CONVERT_LORA = "Convert LoRA"
- self.STARTING_LORA_CONVERSION = "Starting LoRA Conversion"
- self.LORA_INPUT_PATH_REQUIRED = "LoRA input path is required."
- self.LORA_OUTPUT_PATH_REQUIRED = "LoRA output path is required."
- self.ERROR_STARTING_LORA_CONVERSION = "Error starting LoRA conversion: {}"
- self.LORA_CONVERSION_TASK_STARTED = "LoRA conversion task started."
- self.BIN_FILES = "Binary Files (*.bin)"
- self.BROWSING_FOR_LORA_INPUT_DIRECTORY = "Browsing for LoRA input directory..."
- self.BROWSING_FOR_LORA_OUTPUT_FILE = "Browsing for LoRA output file..."
- self.CONVERTING_LORA = "LoRA Conversion"
- self.LORA_CONVERSION_FINISHED = "LoRA conversion finished."
- self.LORA_FILE_MOVED = "LoRA file moved from {} to {}."
- self.LORA_FILE_NOT_FOUND = "LoRA file not found: {}."
- self.ERROR_MOVING_LORA_FILE = "Error moving LoRA file: {}"
- self.EXPORT_LORA = "Export LoRA"
- self.MODEL_PATH_REQUIRED = "Model path is required."
- self.OUTPUT_PATH_REQUIRED = "Output path is required."
- self.AT_LEAST_ONE_LORA_ADAPTER_REQUIRED = "At least one LoRA adapter is required."
- self.INVALID_LORA_SCALE_VALUE = "Invalid LoRA scale value."
- self.ERROR_STARTING_LORA_EXPORT = "Error starting LoRA export: {}"
- self.LORA_EXPORT_TASK_STARTED = "LoRA export task started."
- self.GGML_LORA_ADAPTERS = "GGML LoRA Adapters"
- self.SELECT_LORA_ADAPTER_FILES = "Select LoRA Adapter Files"
- self.ADD_ADAPTER = "Add Adapter"
- self.DELETE_ADAPTER = "Delete"
- self.LORA_SCALE = "LoRA Scale"
- self.ENTER_LORA_SCALE_VALUE = "Enter LoRA Scale Value (Optional)"
- self.NUMBER_OF_THREADS_FOR_LORA_EXPORT = "Number of Threads for LoRA Export"
- self.EXPORTING_LORA = "Exporting LoRA..."
- self.BROWSING_FOR_EXPORT_LORA_MODEL_FILE = "Browsing for Export LoRA Model File..."
- self.BROWSING_FOR_EXPORT_LORA_OUTPUT_FILE = "Browsing for Export LoRA Output File..."
- self.ADDING_LORA_ADAPTER = "Adding LoRA Adapter..."
- self.DELETING_LORA_ADAPTER = "Deleting LoRA Adapter..."
- self.LORA_FILES = "LoRA Files (*.bin)"
- self.SELECT_LORA_ADAPTER_FILE = "Select LoRA Adapter File"
- self.STARTING_LORA_EXPORT = "Starting LoRA export..."
- self.OUTPUT_TYPE = "Output Type"
- self.SELECT_OUTPUT_TYPE = "Select Output Type (GGUF or GGML)"
- self.GGUF_AND_BIN_FILES = "GGUF and Binary Files (*.gguf *.bin)"
- self.BASE_MODEL = "Base Model"
- self.SELECT_BASE_MODEL_FILE = "Select Base Model File (GGUF)"
- self.BASE_MODEL_PATH_REQUIRED = "Base model path is required for GGUF output."
- self.BROWSING_FOR_BASE_MODEL_FILE = "Browsing for base model file..."
- self.SELECT_BASE_MODEL_FOLDER = "Select Base Model Folder (containing safetensors)"
- self.BROWSING_FOR_BASE_MODEL_FOLDER = "Browsing for base model folder..."
- self.LORA_CONVERSION_FROM_TO = "LoRA Conversion from {} to {}"
- self.GENERATING_IMATRIX_FOR = "Generating IMatrix for {}"
- self.MODEL_PATH_REQUIRED_FOR_IMATRIX = "Model path is required for IMatrix generation."
- self.NO_ASSET_SELECTED_FOR_CUDA_CHECK = "No asset selected for CUDA check"
- self.QUANTIZATION_COMMAND = "Quantization command"
- self.IMATRIX_GENERATION_COMMAND = "IMatrix generation command"
- self.LORA_CONVERSION_COMMAND = "LoRA conversion command"
- self.LORA_EXPORT_COMMAND = "LoRA export command"
-
-class _French:
- # French localization
- def __init__(self):
- super().__init__()
- self.WINDOW_TITLE = "AutoGGUF (quantificateur automatisé de modèles GGUF)"
- self.RAM_USAGE = "Utilisation RAM :"
- self.CPU_USAGE = "Utilisation CPU :"
- self.BACKEND = "Backend Llama.cpp :"
- self.REFRESH_BACKENDS = "Actualiser les Backends"
- self.MODELS_PATH = "Chemin des Modèles :"
- self.OUTPUT_PATH = "Chemin de Sortie :"
- self.LOGS_PATH = "Chemin des Logs :"
- self.BROWSE = "Parcourir"
- self.AVAILABLE_MODELS = "Modèles Disponibles :"
- self.QUANTIZATION_TYPE = "Type de Quantification :"
- self.ALLOW_REQUANTIZE = "Autoriser la Requantification"
- self.LEAVE_OUTPUT_TENSOR = "Laisser le Tenseur de Sortie"
- self.PURE = "Pur"
- self.IMATRIX = "IMatrix :"
- self.INCLUDE_WEIGHTS = "Inclure les Poids :"
- self.EXCLUDE_WEIGHTS = "Exclure les Poids :"
- self.USE_OUTPUT_TENSOR_TYPE = "Utiliser le Type de Tenseur de Sortie"
- self.USE_TOKEN_EMBEDDING_TYPE = "Utiliser le Type d'Embedding de Token"
- self.KEEP_SPLIT = "Garder la Division"
- self.KV_OVERRIDES = "Remplacements KV :"
- self.ADD_NEW_OVERRIDE = "Ajouter un nouveau remplacement"
- self.QUANTIZE_MODEL = "Quantifier le Modèle"
- self.SAVE_PRESET = "Sauvegarder le Préréglage"
- self.LOAD_PRESET = "Charger le Préréglage"
- self.TASKS = "Tâches :"
- self.DOWNLOAD_LLAMACPP = "Télécharger llama.cpp"
- self.SELECT_RELEASE = "Sélectionner la Version :"
- self.SELECT_ASSET = "Sélectionner l'Asset :"
- self.EXTRACT_CUDA_FILES = "Extraire les fichiers CUDA"
- self.SELECT_CUDA_BACKEND = "Sélectionner le Backend CUDA :"
- self.DOWNLOAD = "Télécharger"
- self.IMATRIX_GENERATION = "Génération IMatrix"
- self.DATA_FILE = "Fichier de Données :"
- self.MODEL = "Modèle :"
- self.OUTPUT = "Sortie :"
- self.OUTPUT_FREQUENCY = "Fréquence de Sortie :"
- self.GPU_OFFLOAD = "Déchargement GPU :"
- self.AUTO = "Auto"
- self.GENERATE_IMATRIX = "Générer IMatrix"
- self.ERROR = "Erreur"
- self.WARNING = "Avertissement"
- self.PROPERTIES = "Propriétés"
- self.CANCEL = "Annuler"
- self.RESTART = "Redémarrer"
- self.DELETE = "Supprimer"
- self.CONFIRM_DELETION = "Êtes-vous sûr de vouloir supprimer cette tâche ?"
- self.TASK_RUNNING_WARNING = "Certaines tâches sont encore en cours. Êtes-vous sûr de vouloir quitter ?"
- self.YES = "Oui"
- self.NO = "Non"
- self.DOWNLOAD_COMPLETE = "Téléchargement Terminé"
- self.CUDA_EXTRACTION_FAILED = "Échec de l'Extraction CUDA"
- self.PRESET_SAVED = "Préréglage Sauvegardé"
- self.PRESET_LOADED = "Préréglage Chargé"
- self.NO_ASSET_SELECTED = "Aucun asset sélectionné"
- self.DOWNLOAD_FAILED = "Échec du téléchargement"
- self.NO_BACKEND_SELECTED = "Aucun backend sélectionné"
- self.NO_MODEL_SELECTED = "Aucun modèle sélectionné"
- self.REFRESH_RELEASES = "Actualiser les Versions"
- self.NO_SUITABLE_CUDA_BACKENDS = "Aucun backend CUDA approprié trouvé"
- self.LLAMACPP_DOWNLOADED_EXTRACTED = "Binaire llama.cpp téléchargé et extrait vers {0}\nFichiers CUDA extraits vers {1}"
- self.CUDA_FILES_EXTRACTED = "Fichiers CUDA extraits vers"
- self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = "Aucun backend CUDA approprié trouvé pour l'extraction"
- self.ERROR_FETCHING_RELEASES = "Erreur lors de la récupération des versions : {0}"
- self.CONFIRM_DELETION_TITLE = "Confirmer la Suppression"
- self.LOG_FOR = "Log pour {0}"
- self.ALL_FILES = "Tous les Fichiers (*)"
- self.GGUF_FILES = "Fichiers GGUF (*.gguf)"
- self.DAT_FILES = "Fichiers DAT (*.dat)"
- self.JSON_FILES = "Fichiers JSON (*.json)"
- self.FAILED_LOAD_PRESET = "Échec du chargement du préréglage : {0}"
- self.INITIALIZING_AUTOGGUF = "Initialisation de l'application AutoGGUF"
- self.AUTOGGUF_INITIALIZATION_COMPLETE = "Initialisation d'AutoGGUF terminée"
- self.REFRESHING_BACKENDS = "Actualisation des backends"
- self.NO_BACKENDS_AVAILABLE = "Aucun backend disponible"
- self.FOUND_VALID_BACKENDS = "{0} backends valides trouvés"
- self.SAVING_PRESET = "Sauvegarde du préréglage"
- self.PRESET_SAVED_TO = "Préréglage sauvegardé dans {0}"
- self.LOADING_PRESET = "Chargement du préréglage"
- self.PRESET_LOADED_FROM = "Préréglage chargé depuis {0}"
- self.ADDING_KV_OVERRIDE = "Ajout du remplacement KV : {0}"
- self.SAVING_TASK_PRESET = "Sauvegarde du préréglage de tâche pour {0}"
- self.TASK_PRESET_SAVED = "Préréglage de Tâche Sauvegardé"
- self.TASK_PRESET_SAVED_TO = "Préréglage de tâche sauvegardé dans {0}"
- self.RESTARTING_TASK = "Redémarrage de la tâche : {0}"
- self.IN_PROGRESS = "En Cours"
- self.DOWNLOAD_FINISHED_EXTRACTED_TO = "Téléchargement terminé. Extrait vers : {0}"
- self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "Binaire llama.cpp téléchargé et extrait vers {0}\nFichiers CUDA extraits vers {1}"
- self.NO_SUITABLE_CUDA_BACKEND_FOUND = "Aucun backend CUDA approprié trouvé pour l'extraction"
- self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = "Binaire llama.cpp téléchargé et extrait vers {0}"
- self.REFRESHING_LLAMACPP_RELEASES = "Actualisation des versions de llama.cpp"
- self.UPDATING_ASSET_LIST = "Mise à jour de la liste des assets"
- self.UPDATING_CUDA_OPTIONS = "Mise à jour des options CUDA"
- self.STARTING_LLAMACPP_DOWNLOAD = "Démarrage du téléchargement de llama.cpp"
- self.UPDATING_CUDA_BACKENDS = "Mise à jour des backends CUDA"
- self.NO_CUDA_BACKEND_SELECTED = "Aucun backend CUDA sélectionné pour l'extraction"
- self.EXTRACTING_CUDA_FILES = "Extraction des fichiers CUDA de {0} vers {1}"
- self.DOWNLOAD_ERROR = "Erreur de téléchargement : {0}"
- self.SHOWING_TASK_CONTEXT_MENU = "Affichage du menu contextuel de tâche"
- self.SHOWING_PROPERTIES_FOR_TASK = "Affichage des propriétés pour la tâche : {0}"
- self.CANCELLING_TASK = "Annulation de la tâche : {0}"
- self.CANCELED = "Annulé"
- self.DELETING_TASK = "Suppression de la tâche : {0}"
- self.LOADING_MODELS = "Chargement des modèles"
- self.LOADED_MODELS = "{0} modèles chargés"
- self.BROWSING_FOR_MODELS_DIRECTORY = "Recherche du répertoire des modèles"
- self.SELECT_MODELS_DIRECTORY = "Sélectionner le Répertoire des Modèles"
- self.BROWSING_FOR_OUTPUT_DIRECTORY = "Recherche du répertoire de sortie"
- self.SELECT_OUTPUT_DIRECTORY = "Sélectionner le Répertoire de Sortie"
- self.BROWSING_FOR_LOGS_DIRECTORY = "Recherche du répertoire des logs"
- self.SELECT_LOGS_DIRECTORY = "Sélectionner le Répertoire des Logs"
- self.BROWSING_FOR_IMATRIX_FILE = "Recherche du fichier IMatrix"
- self.SELECT_IMATRIX_FILE = "Sélectionner le Fichier IMatrix"
- self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} Mo / {2} Mo)"
- self.CPU_USAGE_FORMAT = "Utilisation CPU : {0:.1f}%"
- self.VALIDATING_QUANTIZATION_INPUTS = "Validation des entrées de quantification"
- self.MODELS_PATH_REQUIRED = "Le chemin des modèles est requis"
- self.OUTPUT_PATH_REQUIRED = "Le chemin de sortie est requis"
- self.LOGS_PATH_REQUIRED = "Le chemin des logs est requis"
- self.STARTING_MODEL_QUANTIZATION = "Démarrage de la quantification du modèle"
- self.INPUT_FILE_NOT_EXIST = "Le fichier d'entrée '{0}' n'existe pas."
- self.QUANTIZING_MODEL_TO = "Quantification de {0} vers {1}"
- self.QUANTIZATION_TASK_STARTED = "Tâche de quantification démarrée pour {0}"
- self.ERROR_STARTING_QUANTIZATION = "Erreur au démarrage de la quantification : {0}"
- self.UPDATING_MODEL_INFO = "Mise à jour des infos du modèle : {0}"
- self.TASK_FINISHED = "Tâche terminée : {0}"
- self.SHOWING_TASK_DETAILS_FOR = "Affichage des détails de la tâche pour : {0}"
- self.BROWSING_FOR_IMATRIX_DATA_FILE = "Recherche du fichier de données IMatrix"
- self.SELECT_DATA_FILE = "Sélectionner le Fichier de Données"
- self.BROWSING_FOR_IMATRIX_MODEL_FILE = "Recherche du fichier modèle IMatrix"
- self.SELECT_MODEL_FILE = "Sélectionner le Fichier Modèle"
- self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "Recherche du fichier de sortie IMatrix"
- self.SELECT_OUTPUT_FILE = "Sélectionner le Fichier de Sortie"
- self.STARTING_IMATRIX_GENERATION = "Démarrage de la génération IMatrix"
- self.BACKEND_PATH_NOT_EXIST = "Le chemin du backend n'existe pas : {0}"
- self.GENERATING_IMATRIX = "Génération de l'IMatrix"
- self.ERROR_STARTING_IMATRIX_GENERATION = "Erreur au démarrage de la génération IMatrix : {0}"
- self.IMATRIX_GENERATION_TASK_STARTED = "Tâche de génération IMatrix démarrée"
- self.ERROR_MESSAGE = "Erreur : {0}"
- self.TASK_ERROR = "Erreur de tâche : {0}"
- self.APPLICATION_CLOSING = "Fermeture de l'application"
- self.APPLICATION_CLOSED = "Application fermée"
- self.SELECT_QUANTIZATION_TYPE = "Sélectionnez le type de quantification"
- self.ALLOWS_REQUANTIZING = "Permet de requantifier les tenseurs déjà quantifiés"
- self.LEAVE_OUTPUT_WEIGHT = "Laissera output.weight non (re)quantifié"
- self.DISABLE_K_QUANT_MIXTURES = "Désactive les mélanges k-quant et quantifie tous les tenseurs au même type"
- self.USE_DATA_AS_IMPORTANCE_MATRIX = "Utilise les données du fichier comme matrice d'importance pour les optimisations de quant"
- self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Utiliser la matrice d'importance pour ces tenseurs"
- self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Ne pas utiliser la matrice d'importance pour ces tenseurs"
- self.OUTPUT_TENSOR_TYPE = "Type de Tenseur de Sortie :"
- self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = "Utiliser ce type pour le tenseur output.weight"
- self.TOKEN_EMBEDDING_TYPE = "Type d'Embedding de Token :"
- self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = "Utiliser ce type pour le tenseur des embeddings de token"
- self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = "Générera le modèle quantifié dans les mêmes shards que l'entrée"
- self.OVERRIDE_MODEL_METADATA = "Remplacer les métadonnées du modèle"
- self.INPUT_DATA_FILE_FOR_IMATRIX = "Fichier de données d'entrée pour la génération IMatrix"
- self.MODEL_TO_BE_QUANTIZED = "Modèle à quantifier"
- self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "Chemin de sortie pour l'IMatrix généré"
- self.HOW_OFTEN_TO_SAVE_IMATRIX = "Fréquence de sauvegarde de l'IMatrix"
- self.SET_GPU_OFFLOAD_VALUE = "Définir la valeur de déchargement GPU (-ngl)"
- self.COMPLETED = "Terminé"
- self.REFRESH_MODELS = "Actualiser les modèles"
- self.REFRESH_MODELS = "Actualiser les modèles"
- self.EXTRA_ARGUMENTS = "Arguments supplémentaires :"
- self.EXTRA_ARGUMENTS_LABEL = "Arguments supplémentaires en ligne de commande"
- self.CONTEXT_SIZE = "Taille du contexte :"
- self.CONTEXT_SIZE_FOR_IMATRIX = "Taille du contexte pour la génération d'IMatrix"
- self.THREADS = "Threads :"
- self.NUMBER_OF_THREADS_FOR_IMATRIX = "Nombre de threads pour la génération d'IMatrix"
- self.LORA_CONVERSION = "Conversion LoRA"
- self.LORA_INPUT_PATH = "Chemin d'entrée LoRA"
- self.LORA_OUTPUT_PATH = "Chemin de sortie LoRA"
- self.SELECT_LORA_INPUT_DIRECTORY = "Sélectionner le répertoire d'entrée LoRA"
- self.SELECT_LORA_OUTPUT_FILE = "Sélectionner le fichier de sortie LoRA"
- self.CONVERT_LORA = "Convertir LoRA"
- self.STARTING_LORA_CONVERSION = "Démarrage de la conversion LoRA"
- self.LORA_INPUT_PATH_REQUIRED = "Le chemin d'entrée LoRA est requis."
- self.LORA_OUTPUT_PATH_REQUIRED = "Le chemin de sortie LoRA est requis."
- self.ERROR_STARTING_LORA_CONVERSION = "Erreur lors du démarrage de la conversion LoRA : {}"
- self.LORA_CONVERSION_TASK_STARTED = "Tâche de conversion LoRA démarrée."
- self.BIN_FILES = "Fichiers binaires (*.bin)"
- self.BROWSING_FOR_LORA_INPUT_DIRECTORY = "Recherche du répertoire d'entrée LoRA..."
- self.BROWSING_FOR_LORA_OUTPUT_FILE = "Recherche du fichier de sortie LoRA..."
- self.CONVERTING_LORA = "Conversion LoRA"
- self.LORA_CONVERSION_FINISHED = "Conversion LoRA terminée."
- self.LORA_FILE_MOVED = "Fichier LoRA déplacé de {} à {}."
- self.LORA_FILE_NOT_FOUND = "Fichier LoRA non trouvé : {}."
- self.ERROR_MOVING_LORA_FILE = "Erreur lors du déplacement du fichier LoRA : {}"
- self.EXPORT_LORA = "Exporter LoRA"
- self.MODEL_PATH_REQUIRED = "Le chemin du modèle est requis."
- self.OUTPUT_PATH_REQUIRED = "Le chemin de sortie est requis."
- self.AT_LEAST_ONE_LORA_ADAPTER_REQUIRED = "Au moins un adaptateur LoRA est requis."
- self.INVALID_LORA_SCALE_VALUE = "Valeur d'échelle LoRA invalide."
- self.ERROR_STARTING_LORA_EXPORT = "Erreur lors du démarrage de l'exportation LoRA : {}"
- self.LORA_EXPORT_TASK_STARTED = "Tâche d'exportation LoRA démarrée."
- self.GGML_LORA_ADAPTERS = "Adaptateurs LoRA GGML"
- self.SELECT_LORA_ADAPTER_FILES = "Sélectionner les fichiers d'adaptateur LoRA"
- self.ADD_ADAPTER = "Ajouter un adaptateur"
- self.DELETE_ADAPTER = "Supprimer"
- self.LORA_SCALE = "Échelle LoRA"
- self.ENTER_LORA_SCALE_VALUE = "Entrez la valeur d'échelle LoRA (Optionnel)"
- self.NUMBER_OF_THREADS_FOR_LORA_EXPORT = "Nombre de threads pour l'exportation LoRA"
- self.EXPORTING_LORA = "Exportation de LoRA..."
- self.BROWSING_FOR_EXPORT_LORA_MODEL_FILE = "Recherche du fichier de modèle LoRA à exporter..."
- self.BROWSING_FOR_EXPORT_LORA_OUTPUT_FILE = "Recherche du fichier de sortie LoRA à exporter..."
- self.ADDING_LORA_ADAPTER = "Ajout d'un adaptateur LoRA..."
- self.DELETING_LORA_ADAPTER = "Suppression de l'adaptateur LoRA..."
- self.LORA_FILES = "Fichiers LoRA (*.bin)"
- self.SELECT_LORA_ADAPTER_FILE = "Sélectionner le fichier d'adaptateur LoRA"
- self.STARTING_LORA_EXPORT = "Démarrage de l'exportation LoRA..."
- self.OUTPUT_TYPE = "Type de sortie"
- self.SELECT_OUTPUT_TYPE = "Sélectionner le type de sortie (GGUF ou GGML)"
- self.GGUF_AND_BIN_FILES = "Fichiers GGUF et binaires (*.gguf *.bin)"
- self.BASE_MODEL = "Modèle de base"
- self.SELECT_BASE_MODEL_FILE = "Sélectionner le fichier du modèle de base (GGUF)"
- self.BASE_MODEL_PATH_REQUIRED = "Le chemin du modèle de base est requis pour la sortie GGUF."
- self.BROWSING_FOR_BASE_MODEL_FILE = "Recherche du fichier du modèle de base..."
- self.SELECT_BASE_MODEL_FOLDER = "Sélectionner le dossier du modèle de base (contenant safetensors)"
- self.BROWSING_FOR_BASE_MODEL_FOLDER = "Recherche du dossier du modèle de base..."
- self.LORA_CONVERSION_FROM_TO = "Conversion LoRA de {} à {}"
- self.GENERATING_IMATRIX_FOR = "Génération d'IMatrix pour {}"
- self.MODEL_PATH_REQUIRED_FOR_IMATRIX = "Le chemin du modèle est requis pour la génération d'IMatrix."
-
-class _SimplifiedChinese(_Localization):
- def __init__(self):
- super().__init__()
- self.WINDOW_TITLE = "AutoGGUF(自动GGUF模型量化器)"
- self.RAM_USAGE = "内存使用率:"
- self.CPU_USAGE = "CPU使用率:"
- self.BACKEND = "Llama.cpp后端:"
- self.REFRESH_BACKENDS = "刷新后端"
- self.MODELS_PATH = "模型路径:"
- self.OUTPUT_PATH = "输出路径:"
- self.LOGS_PATH = "日志路径:"
- self.BROWSE = "浏览"
- self.AVAILABLE_MODELS = "可用模型:"
- self.QUANTIZATION_TYPE = "量化类型:"
- self.ALLOW_REQUANTIZE = "允许重新量化"
- self.LEAVE_OUTPUT_TENSOR = "保留输出张量"
- self.PURE = "纯净"
- self.IMATRIX = "IMatrix:"
- self.INCLUDE_WEIGHTS = "包含权重:"
- self.EXCLUDE_WEIGHTS = "排除权重:"
- self.USE_OUTPUT_TENSOR_TYPE = "使用输出张量类型"
- self.USE_TOKEN_EMBEDDING_TYPE = "使用令牌嵌入类型"
- self.KEEP_SPLIT = "保持分割"
- self.KV_OVERRIDES = "KV覆盖:"
- self.ADD_NEW_OVERRIDE = "添加新覆盖"
- self.QUANTIZE_MODEL = "量化模型"
- self.SAVE_PRESET = "保存预设"
- self.LOAD_PRESET = "加载预设"
- self.TASKS = "任务:"
- self.DOWNLOAD_LLAMACPP = "下载llama.cpp"
- self.SELECT_RELEASE = "选择发布版本:"
- self.SELECT_ASSET = "选择资源:"
- self.EXTRACT_CUDA_FILES = "提取CUDA文件"
- self.SELECT_CUDA_BACKEND = "选择CUDA后端:"
- self.DOWNLOAD = "下载"
- self.IMATRIX_GENERATION = "IMatrix生成"
- self.DATA_FILE = "数据文件:"
- self.MODEL = "模型:"
- self.OUTPUT = "输出:"
- self.OUTPUT_FREQUENCY = "输出频率:"
- self.GPU_OFFLOAD = "GPU卸载:"
- self.AUTO = "自动"
- self.GENERATE_IMATRIX = "生成IMatrix"
- self.ERROR = "错误"
- self.WARNING = "警告"
- self.PROPERTIES = "属性"
- self.CANCEL = "取消"
- self.RESTART = "重启"
- self.DELETE = "删除"
- self.CONFIRM_DELETION = "您确定要删除此任务吗?"
- self.TASK_RUNNING_WARNING = "某些任务仍在运行。您确定要退出吗?"
- self.YES = "是"
- self.NO = "否"
- self.DOWNLOAD_COMPLETE = "下载完成"
- self.CUDA_EXTRACTION_FAILED = "CUDA提取失败"
- self.PRESET_SAVED = "预设已保存"
- self.PRESET_LOADED = "预设已加载"
- self.NO_ASSET_SELECTED = "未选择资源"
- self.DOWNLOAD_FAILED = "下载失败"
- self.NO_BACKEND_SELECTED = "未选择后端"
- self.NO_MODEL_SELECTED = "未选择模型"
- self.REFRESH_RELEASES = "刷新发布版本"
- self.NO_SUITABLE_CUDA_BACKENDS = "未找到合适的CUDA后端"
- self.LLAMACPP_DOWNLOADED_EXTRACTED = "llama.cpp二进制文件已下载并提取到{0}\nCUDA文件已提取到{1}"
- self.CUDA_FILES_EXTRACTED = "CUDA文件已提取到"
- self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = "未找到适合提取的CUDA后端"
- self.ERROR_FETCHING_RELEASES = "获取发布版本时出错:{0}"
- self.CONFIRM_DELETION_TITLE = "确认删除"
- self.LOG_FOR = "{0}的日志"
- self.ALL_FILES = "所有文件 (*)"
- self.GGUF_FILES = "GGUF文件 (*.gguf)"
- self.DAT_FILES = "DAT文件 (*.dat)"
- self.JSON_FILES = "JSON文件 (*.json)"
- self.FAILED_LOAD_PRESET = "加载预设失败:{0}"
- self.INITIALIZING_AUTOGGUF = "初始化AutoGGUF应用程序"
- self.AUTOGGUF_INITIALIZATION_COMPLETE = "AutoGGUF初始化完成"
- self.REFRESHING_BACKENDS = "刷新后端"
- self.NO_BACKENDS_AVAILABLE = "没有可用的后端"
- self.FOUND_VALID_BACKENDS = "找到{0}个有效后端"
- self.SAVING_PRESET = "保存预设"
- self.PRESET_SAVED_TO = "预设已保存到{0}"
- self.LOADING_PRESET = "加载预设"
- self.PRESET_LOADED_FROM = "从{0}加载了预设"
- self.ADDING_KV_OVERRIDE = "添加KV覆盖:{0}"
- self.SAVING_TASK_PRESET = "保存{0}的任务预设"
- self.TASK_PRESET_SAVED = "任务预设已保存"
- self.TASK_PRESET_SAVED_TO = "任务预设已保存到{0}"
- self.RESTARTING_TASK = "重启任务:{0}"
- self.IN_PROGRESS = "进行中"
- self.DOWNLOAD_FINISHED_EXTRACTED_TO = "下载完成。已提取到:{0}"
- self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "llama.cpp二进制文件已下载并提取到{0}\nCUDA文件已提取到{1}"
- self.NO_SUITABLE_CUDA_BACKEND_FOUND = "未找到适合提取的CUDA后端"
- self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = "llama.cpp二进制文件已下载并提取到{0}"
- self.REFRESHING_LLAMACPP_RELEASES = "刷新llama.cpp发布版本"
- self.UPDATING_ASSET_LIST = "更新资源列表"
- self.UPDATING_CUDA_OPTIONS = "更新CUDA选项"
- self.STARTING_LLAMACPP_DOWNLOAD = "开始下载llama.cpp"
- self.UPDATING_CUDA_BACKENDS = "更新CUDA后端"
- self.NO_CUDA_BACKEND_SELECTED = "未选择要提取的CUDA后端"
- self.EXTRACTING_CUDA_FILES = "从{0}提取CUDA文件到{1}"
- self.DOWNLOAD_ERROR = "下载错误:{0}"
- self.SHOWING_TASK_CONTEXT_MENU = "显示任务上下文菜单"
- self.SHOWING_PROPERTIES_FOR_TASK = "显示任务属性:{0}"
- self.CANCELLING_TASK = "取消任务:{0}"
- self.CANCELED = "已取消"
- self.DELETING_TASK = "删除任务:{0}"
- self.LOADING_MODELS = "加载模型"
- self.LOADED_MODELS = "已加载{0}个模型"
- self.BROWSING_FOR_MODELS_DIRECTORY = "浏览模型目录"
- self.SELECT_MODELS_DIRECTORY = "选择模型目录"
- self.BROWSING_FOR_OUTPUT_DIRECTORY = "浏览输出目录"
- self.SELECT_OUTPUT_DIRECTORY = "选择输出目录"
- self.BROWSING_FOR_LOGS_DIRECTORY = "浏览日志目录"
- self.SELECT_LOGS_DIRECTORY = "选择日志目录"
- self.BROWSING_FOR_IMATRIX_FILE = "浏览IMatrix文件"
- self.SELECT_IMATRIX_FILE = "选择IMatrix文件"
- self.RAM_USAGE_FORMAT = "{0:.1f}%({1} MB / {2} MB)"
- self.CPU_USAGE_FORMAT = "CPU使用率:{0:.1f}%"
- self.VALIDATING_QUANTIZATION_INPUTS = "验证量化输入"
- self.MODELS_PATH_REQUIRED = "需要模型路径"
- self.OUTPUT_PATH_REQUIRED = "需要输出路径"
- self.LOGS_PATH_REQUIRED = "需要日志路径"
- self.STARTING_MODEL_QUANTIZATION = "开始模型量化"
- self.INPUT_FILE_NOT_EXIST = "输入文件'{0}'不存在。"
- self.QUANTIZING_MODEL_TO = "将{0}量化为{1}"
- self.QUANTIZATION_TASK_STARTED = "已启动{0}的量化任务"
- self.ERROR_STARTING_QUANTIZATION = "启动量化时出错:{0}"
- self.UPDATING_MODEL_INFO = "更新模型信息:{0}"
- self.TASK_FINISHED = "任务完成:{0}"
- self.SHOWING_TASK_DETAILS_FOR = "显示任务详情:{0}"
- self.BROWSING_FOR_IMATRIX_DATA_FILE = "浏览IMatrix数据文件"
- self.SELECT_DATA_FILE = "选择数据文件"
- self.BROWSING_FOR_IMATRIX_MODEL_FILE = "浏览IMatrix模型文件"
- self.SELECT_MODEL_FILE = "选择模型文件"
- self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "浏览IMatrix输出文件"
- self.SELECT_OUTPUT_FILE = "选择输出文件"
- self.STARTING_IMATRIX_GENERATION = "开始IMatrix生成"
- self.BACKEND_PATH_NOT_EXIST = "后端路径不存在:{0}"
- self.GENERATING_IMATRIX = "生成IMatrix"
- self.ERROR_STARTING_IMATRIX_GENERATION = "启动IMatrix生成时出错:{0}"
- self.IMATRIX_GENERATION_TASK_STARTED = "IMatrix生成任务已启动"
- self.ERROR_MESSAGE = "错误:{0}"
- self.TASK_ERROR = "任务错误:{0}"
- self.APPLICATION_CLOSING = "应用程序正在关闭"
- self.APPLICATION_CLOSED = "应用程序已关闭"
- self.SELECT_QUANTIZATION_TYPE = "选择量化类型"
- self.ALLOWS_REQUANTIZING = "允许重新量化已经量化的张量"
- self.LEAVE_OUTPUT_WEIGHT = "将保留output.weight不被(重新)量化"
- self.DISABLE_K_QUANT_MIXTURES = "禁用k-quant混合并将所有张量量化为相同类型"
- self.USE_DATA_AS_IMPORTANCE_MATRIX = "使用文件中的数据作为量化优化的重要性矩阵"
- self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "对这些张量使用重要性矩阵"
- self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = "不对这些张量使用重要性矩阵"
- self.OUTPUT_TENSOR_TYPE = "输出张量类型:"
- self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = "对output.weight张量使用此类型"
- self.TOKEN_EMBEDDING_TYPE = "令牌嵌入类型:"
- self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = "对令牌嵌入张量使用此类型"
- self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = "将在与输入相同的分片中生成量化模型"
- self.OVERRIDE_MODEL_METADATA = "覆盖模型元数据"
- self.INPUT_DATA_FILE_FOR_IMATRIX = "IMatrix生成的输入数据文件"
- self.MODEL_TO_BE_QUANTIZED = "要量化的模型"
- self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "生成的IMatrix的输出路径"
- self.HOW_OFTEN_TO_SAVE_IMATRIX = "保存IMatrix的频率"
- self.SET_GPU_OFFLOAD_VALUE = "设置GPU卸载值(-ngl)"
- self.COMPLETED = "已完成"
- self.REFRESH_MODELS = "刷新模型"
- self.REFRESH_MODELS = "刷新模型"
- self.EXTRA_ARGUMENTS = "额外参数:"
- self.EXTRA_ARGUMENTS_LABEL = "附加命令行参数"
- self.CONTEXT_SIZE = "上下文大小:"
- self.CONTEXT_SIZE_FOR_IMATRIX = "IMatrix生成的上下文大小"
- self.THREADS = "线程数:"
- self.NUMBER_OF_THREADS_FOR_IMATRIX = "IMatrix生成的线程数"
- self.LORA_CONVERSION = "LoRA转换"
- self.LORA_INPUT_PATH = "LoRA输入路径"
- self.LORA_OUTPUT_PATH = "LoRA输出路径"
- self.SELECT_LORA_INPUT_DIRECTORY = "选择LoRA输入目录"
- self.SELECT_LORA_OUTPUT_FILE = "选择LoRA输出文件"
- self.CONVERT_LORA = "转换LoRA"
- self.STARTING_LORA_CONVERSION = "开始LoRA转换"
- self.LORA_INPUT_PATH_REQUIRED = "需要LoRA输入路径。"
- self.LORA_OUTPUT_PATH_REQUIRED = "需要LoRA输出路径。"
- self.ERROR_STARTING_LORA_CONVERSION = "启动LoRA转换时出错:{}"
- self.LORA_CONVERSION_TASK_STARTED = "LoRA转换任务已启动。"
- self.BIN_FILES = "二进制文件 (*.bin)"
- self.BROWSING_FOR_LORA_INPUT_DIRECTORY = "正在浏览LoRA输入目录..."
- self.BROWSING_FOR_LORA_OUTPUT_FILE = "正在浏览LoRA输出文件..."
- self.CONVERTING_LORA = "LoRA转换"
- self.LORA_CONVERSION_FINISHED = "LoRA转换完成。"
- self.LORA_FILE_MOVED = "LoRA文件已从{}移动到{}。"
- self.LORA_FILE_NOT_FOUND = "未找到LoRA文件:{}。"
- self.ERROR_MOVING_LORA_FILE = "移动LoRA文件时出错:{}"
- self.EXPORT_LORA = "导出LoRA"
- self.MODEL_PATH_REQUIRED = "需要模型路径。"
- self.OUTPUT_PATH_REQUIRED = "需要输出路径。"
- self.AT_LEAST_ONE_LORA_ADAPTER_REQUIRED = "至少需要一个LoRA适配器。"
- self.INVALID_LORA_SCALE_VALUE = "无效的LoRA比例值。"
- self.ERROR_STARTING_LORA_EXPORT = "启动LoRA导出时出错:{}"
- self.LORA_EXPORT_TASK_STARTED = "LoRA导出任务已启动。"
- self.GGML_LORA_ADAPTERS = "GGML LoRA适配器"
- self.SELECT_LORA_ADAPTER_FILES = "选择LoRA适配器文件"
- self.ADD_ADAPTER = "添加适配器"
- self.DELETE_ADAPTER = "删除"
- self.LORA_SCALE = "LoRA比例"
- self.ENTER_LORA_SCALE_VALUE = "输入LoRA比例值(可选)"
- self.NUMBER_OF_THREADS_FOR_LORA_EXPORT = "LoRA导出的线程数"
- self.EXPORTING_LORA = "正在导出LoRA..."
- self.BROWSING_FOR_EXPORT_LORA_MODEL_FILE = "正在浏览导出LoRA模型文件..."
- self.BROWSING_FOR_EXPORT_LORA_OUTPUT_FILE = "正在浏览导出LoRA输出文件..."
- self.ADDING_LORA_ADAPTER = "正在添加LoRA适配器..."
- self.DELETING_LORA_ADAPTER = "正在删除LoRA适配器..."
- self.LORA_FILES = "LoRA文件 (*.bin)"
- self.SELECT_LORA_ADAPTER_FILE = "选择LoRA适配器文件"
- self.STARTING_LORA_EXPORT = "开始LoRA导出..."
- self.OUTPUT_TYPE = "输出类型"
- self.SELECT_OUTPUT_TYPE = "选择输出类型(GGUF或GGML)"
- self.GGUF_AND_BIN_FILES = "GGUF和二进制文件 (*.gguf *.bin)"
- self.BASE_MODEL = "基础模型"
- self.SELECT_BASE_MODEL_FILE = "选择基础模型文件(GGUF)"
- self.BASE_MODEL_PATH_REQUIRED = "GGUF输出需要基础模型路径。"
- self.BROWSING_FOR_BASE_MODEL_FILE = "正在浏览基础模型文件..."
- self.SELECT_BASE_MODEL_FOLDER = "选择基础模型文件夹(包含safetensors)"
- self.BROWSING_FOR_BASE_MODEL_FOLDER = "正在浏览基础模型文件夹..."
- self.LORA_CONVERSION_FROM_TO = "LoRA从{}转换到{}"
- self.GENERATING_IMATRIX_FOR = "正在为{}生成IMatrix"
- self.MODEL_PATH_REQUIRED_FOR_IMATRIX = "IMatrix生成需要模型路径。"
-
-class _Spanish(_Localization):
- def __init__(self):
- super().__init__()
- self.WINDOW_TITLE = "AutoGGUF (cuantizador automático de modelos GGUF)"
- self.RAM_USAGE = "Uso de RAM:"
- self.CPU_USAGE = "Uso de CPU:"
- self.BACKEND = "Backend de Llama.cpp:"
- self.REFRESH_BACKENDS = "Actualizar Backends"
- self.MODELS_PATH = "Ruta de Modelos:"
- self.OUTPUT_PATH = "Ruta de Salida:"
- self.LOGS_PATH = "Ruta de Registros:"
- self.BROWSE = "Explorar"
- self.AVAILABLE_MODELS = "Modelos Disponibles:"
- self.QUANTIZATION_TYPE = "Tipo de Cuantización:"
- self.ALLOW_REQUANTIZE = "Permitir Recuantización"
- self.LEAVE_OUTPUT_TENSOR = "Dejar Tensor de Salida"
- self.PURE = "Puro"
- self.IMATRIX = "IMatrix:"
- self.INCLUDE_WEIGHTS = "Incluir Pesos:"
- self.EXCLUDE_WEIGHTS = "Excluir Pesos:"
- self.USE_OUTPUT_TENSOR_TYPE = "Usar Tipo de Tensor de Salida"
- self.USE_TOKEN_EMBEDDING_TYPE = "Usar Tipo de Incrustación de Token"
- self.KEEP_SPLIT = "Mantener División"
- self.KV_OVERRIDES = "Anulaciones KV:"
- self.ADD_NEW_OVERRIDE = "Agregar nueva anulación"
- self.QUANTIZE_MODEL = "Cuantizar Modelo"
- self.SAVE_PRESET = "Guardar Preajuste"
- self.LOAD_PRESET = "Cargar Preajuste"
- self.TASKS = "Tareas:"
- self.DOWNLOAD_LLAMACPP = "Descargar llama.cpp"
- self.SELECT_RELEASE = "Seleccionar Versión:"
- self.SELECT_ASSET = "Seleccionar Activo:"
- self.EXTRACT_CUDA_FILES = "Extraer archivos CUDA"
- self.SELECT_CUDA_BACKEND = "Seleccionar Backend CUDA:"
- self.DOWNLOAD = "Descargar"
- self.IMATRIX_GENERATION = "Generación de IMatrix"
- self.DATA_FILE = "Archivo de Datos:"
- self.MODEL = "Modelo:"
- self.OUTPUT = "Salida:"
- self.OUTPUT_FREQUENCY = "Frecuencia de Salida:"
- self.GPU_OFFLOAD = "Descarga GPU:"
- self.AUTO = "Auto"
- self.GENERATE_IMATRIX = "Generar IMatrix"
- self.ERROR = "Error"
- self.WARNING = "Advertencia"
- self.PROPERTIES = "Propiedades"
- self.CANCEL = "Cancelar"
- self.RESTART = "Reiniciar"
- self.DELETE = "Eliminar"
- self.CONFIRM_DELETION = "¿Estás seguro de que quieres eliminar esta tarea?"
- self.TASK_RUNNING_WARNING = "Algunas tareas aún se están ejecutando. ¿Estás seguro de que quieres salir?"
- self.YES = "Sí"
- self.NO = "No"
- self.DOWNLOAD_COMPLETE = "Descarga Completa"
- self.CUDA_EXTRACTION_FAILED = "Extracción de CUDA Fallida"
- self.PRESET_SAVED = "Preajuste Guardado"
- self.PRESET_LOADED = "Preajuste Cargado"
- self.NO_ASSET_SELECTED = "Ningún activo seleccionado"
- self.DOWNLOAD_FAILED = "Descarga fallida"
- self.NO_BACKEND_SELECTED = "Ningún backend seleccionado"
- self.NO_MODEL_SELECTED = "Ningún modelo seleccionado"
- self.REFRESH_RELEASES = "Actualizar Versiones"
- self.NO_SUITABLE_CUDA_BACKENDS = "No se encontraron backends CUDA adecuados"
- self.LLAMACPP_DOWNLOADED_EXTRACTED = "Binario de llama.cpp descargado y extraído en {0}\nArchivos CUDA extraídos en {1}"
- self.CUDA_FILES_EXTRACTED = "Archivos CUDA extraídos en"
- self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = "No se encontró un backend CUDA adecuado para la extracción"
- self.ERROR_FETCHING_RELEASES = "Error al obtener versiones: {0}"
- self.CONFIRM_DELETION_TITLE = "Confirmar Eliminación"
- self.LOG_FOR = "Registro para {0}"
- self.ALL_FILES = "Todos los Archivos (*)"
- self.GGUF_FILES = "Archivos GGUF (*.gguf)"
- self.DAT_FILES = "Archivos DAT (*.dat)"
- self.JSON_FILES = "Archivos JSON (*.json)"
- self.FAILED_LOAD_PRESET = "Error al cargar el preajuste: {0}"
- self.INITIALIZING_AUTOGGUF = "Inicializando aplicación AutoGGUF"
- self.AUTOGGUF_INITIALIZATION_COMPLETE = "Inicialización de AutoGGUF completa"
- self.REFRESHING_BACKENDS = "Actualizando backends"
- self.NO_BACKENDS_AVAILABLE = "No hay backends disponibles"
- self.FOUND_VALID_BACKENDS = "Se encontraron {0} backends válidos"
- self.SAVING_PRESET = "Guardando preajuste"
- self.PRESET_SAVED_TO = "Preajuste guardado en {0}"
- self.LOADING_PRESET = "Cargando preajuste"
- self.PRESET_LOADED_FROM = "Preajuste cargado desde {0}"
- self.ADDING_KV_OVERRIDE = "Agregando anulación KV: {0}"
- self.SAVING_TASK_PRESET = "Guardando preajuste de tarea para {0}"
- self.TASK_PRESET_SAVED = "Preajuste de Tarea Guardado"
- self.TASK_PRESET_SAVED_TO = "Preajuste de tarea guardado en {0}"
- self.RESTARTING_TASK = "Reiniciando tarea: {0}"
- self.IN_PROGRESS = "En Progreso"
- self.DOWNLOAD_FINISHED_EXTRACTED_TO = "Descarga finalizada. Extraído en: {0}"
- self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "Binario de llama.cpp descargado y extraído en {0}\nArchivos CUDA extraídos en {1}"
- self.NO_SUITABLE_CUDA_BACKEND_FOUND = "No se encontró un backend CUDA adecuado para la extracción"
- self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = "Binario de llama.cpp descargado y extraído en {0}"
- self.REFRESHING_LLAMACPP_RELEASES = "Actualizando versiones de llama.cpp"
- self.UPDATING_ASSET_LIST = "Actualizando lista de activos"
- self.UPDATING_CUDA_OPTIONS = "Actualizando opciones de CUDA"
- self.STARTING_LLAMACPP_DOWNLOAD = "Iniciando descarga de llama.cpp"
- self.UPDATING_CUDA_BACKENDS = "Actualizando backends CUDA"
- self.NO_CUDA_BACKEND_SELECTED = "No se seleccionó backend CUDA para extracción"
- self.EXTRACTING_CUDA_FILES = "Extrayendo archivos CUDA de {0} a {1}"
- self.DOWNLOAD_ERROR = "Error de descarga: {0}"
- self.SHOWING_TASK_CONTEXT_MENU = "Mostrando menú contextual de tarea"
- self.SHOWING_PROPERTIES_FOR_TASK = "Mostrando propiedades para la tarea: {0}"
- self.CANCELLING_TASK = "Cancelando tarea: {0}"
- self.CANCELED = "Cancelado"
- self.DELETING_TASK = "Eliminando tarea: {0}"
- self.LOADING_MODELS = "Cargando modelos"
- self.LOADED_MODELS = "Cargados {0} modelos"
- self.BROWSING_FOR_MODELS_DIRECTORY = "Explorando directorio de modelos"
- self.SELECT_MODELS_DIRECTORY = "Seleccionar Directorio de Modelos"
- self.BROWSING_FOR_OUTPUT_DIRECTORY = "Explorando directorio de salida"
- self.SELECT_OUTPUT_DIRECTORY = "Seleccionar Directorio de Salida"
- self.BROWSING_FOR_LOGS_DIRECTORY = "Explorando directorio de registros"
- self.SELECT_LOGS_DIRECTORY = "Seleccionar Directorio de Registros"
- self.BROWSING_FOR_IMATRIX_FILE = "Explorando archivo IMatrix"
- self.SELECT_IMATRIX_FILE = "Seleccionar Archivo IMatrix"
- self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} MB / {2} MB)"
- self.CPU_USAGE_FORMAT = "Uso de CPU: {0:.1f}%"
- self.VALIDATING_QUANTIZATION_INPUTS = "Validando entradas de cuantización"
- self.MODELS_PATH_REQUIRED = "Se requiere la ruta de modelos"
- self.OUTPUT_PATH_REQUIRED = "Se requiere la ruta de salida"
- self.LOGS_PATH_REQUIRED = "Se requiere la ruta de registros"
- self.STARTING_MODEL_QUANTIZATION = "Iniciando cuantización de modelo"
- self.INPUT_FILE_NOT_EXIST = "El archivo de entrada '{0}' no existe."
- self.QUANTIZING_MODEL_TO = "Cuantizando {0} a {1}"
- self.QUANTIZATION_TASK_STARTED = "Tarea de cuantización iniciada para {0}"
- self.ERROR_STARTING_QUANTIZATION = "Error al iniciar la cuantización: {0}"
- self.UPDATING_MODEL_INFO = "Actualizando información del modelo: {0}"
- self.TASK_FINISHED = "Tarea finalizada: {0}"
- self.SHOWING_TASK_DETAILS_FOR = "Mostrando detalles de la tarea para: {0}"
- self.BROWSING_FOR_IMATRIX_DATA_FILE = "Explorando archivo de datos IMatrix"
- self.SELECT_DATA_FILE = "Seleccionar Archivo de Datos"
- self.BROWSING_FOR_IMATRIX_MODEL_FILE = "Explorando archivo de modelo IMatrix"
- self.SELECT_MODEL_FILE = "Seleccionar Archivo de Modelo"
- self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "Explorando archivo de salida IMatrix"
- self.SELECT_OUTPUT_FILE = "Seleccionar Archivo de Salida"
- self.STARTING_IMATRIX_GENERATION = "Iniciando generación de IMatrix"
- self.BACKEND_PATH_NOT_EXIST = "La ruta del backend no existe: {0}"
- self.GENERATING_IMATRIX = "Generando IMatrix"
- self.ERROR_STARTING_IMATRIX_GENERATION = "Error al iniciar la generación de IMatrix: {0}"
- self.IMATRIX_GENERATION_TASK_STARTED = "Tarea de generación de IMatrix iniciada"
- self.ERROR_MESSAGE = "Error: {0}"
- self.TASK_ERROR = "Error de tarea: {0}"
- self.APPLICATION_CLOSING = "Cerrando aplicación"
- self.APPLICATION_CLOSED = "Aplicación cerrada"
- self.SELECT_QUANTIZATION_TYPE = "Seleccione el tipo de cuantización"
- self.ALLOWS_REQUANTIZING = "Permite recuantizar tensores que ya han sido cuantizados"
- self.LEAVE_OUTPUT_WEIGHT = "Dejará output.weight sin (re)cuantizar"
- self.DISABLE_K_QUANT_MIXTURES = "Desactiva las mezclas k-quant y cuantiza todos los tensores al mismo tipo"
- self.USE_DATA_AS_IMPORTANCE_MATRIX = "Usa los datos en el archivo como matriz de importancia para optimizaciones de cuantización"
- self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Usar matriz de importancia para estos tensores"
- self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = "No usar matriz de importancia para estos tensores"
- self.OUTPUT_TENSOR_TYPE = "Tipo de Tensor de Salida:"
- self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = "Usar este tipo para el tensor output.weight"
- self.TOKEN_EMBEDDING_TYPE = "Tipo de Incrustación de Token:"
- self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = "Usar este tipo para el tensor de incrustaciones de token"
- self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = "Generará el modelo cuantizado en los mismos fragmentos que la entrada"
- self.OVERRIDE_MODEL_METADATA = "Anular metadatos del modelo"
- self.INPUT_DATA_FILE_FOR_IMATRIX = "Archivo de datos de entrada para generación de IMatrix"
- self.MODEL_TO_BE_QUANTIZED = "Modelo a cuantizar"
- self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "Ruta de salida para el IMatrix generado"
- self.HOW_OFTEN_TO_SAVE_IMATRIX = "Con qué frecuencia guardar el IMatrix"
- self.SET_GPU_OFFLOAD_VALUE = "Establecer valor de descarga GPU (-ngl)"
- self.COMPLETED = "Completado"
- self.REFRESH_MODELS = "Actualizar modelos"
- self.REFRESH_MODELS = "Actualizar modelos"
- self.EXTRA_ARGUMENTS = "Argumentos adicionales:"
- self.EXTRA_ARGUMENTS_LABEL = "Argumentos adicionales de línea de comandos"
- self.CONTEXT_SIZE = "Tamaño del contexto:"
- self.CONTEXT_SIZE_FOR_IMATRIX = "Tamaño del contexto para generación de IMatrix"
- self.THREADS = "Hilos:"
- self.NUMBER_OF_THREADS_FOR_IMATRIX = "Número de hilos para generación de IMatrix"
- self.LORA_CONVERSION = "Conversión LoRA"
- self.LORA_INPUT_PATH = "Ruta de entrada LoRA"
- self.LORA_OUTPUT_PATH = "Ruta de salida LoRA"
- self.SELECT_LORA_INPUT_DIRECTORY = "Seleccionar directorio de entrada LoRA"
- self.SELECT_LORA_OUTPUT_FILE = "Seleccionar archivo de salida LoRA"
- self.CONVERT_LORA = "Convertir LoRA"
- self.STARTING_LORA_CONVERSION = "Iniciando conversión LoRA"
- self.LORA_INPUT_PATH_REQUIRED = "Se requiere la ruta de entrada LoRA."
- self.LORA_OUTPUT_PATH_REQUIRED = "Se requiere la ruta de salida LoRA."
- self.ERROR_STARTING_LORA_CONVERSION = "Error al iniciar la conversión LoRA: {}"
- self.LORA_CONVERSION_TASK_STARTED = "Tarea de conversión LoRA iniciada."
- self.BIN_FILES = "Archivos binarios (*.bin)"
- self.BROWSING_FOR_LORA_INPUT_DIRECTORY = "Buscando directorio de entrada LoRA..."
- self.BROWSING_FOR_LORA_OUTPUT_FILE = "Buscando archivo de salida LoRA..."
- self.CONVERTING_LORA = "Convirtiendo LoRA"
- self.LORA_CONVERSION_FINISHED = "Conversión LoRA finalizada."
- self.LORA_FILE_MOVED = "Archivo LoRA movido de {} a {}."
- self.LORA_FILE_NOT_FOUND = "Archivo LoRA no encontrado: {}."
- self.ERROR_MOVING_LORA_FILE = "Error al mover el archivo LoRA: {}"
- self.EXPORT_LORA = "Exportar LoRA"
- self.MODEL_PATH_REQUIRED = "Se requiere la ruta del modelo."
- self.OUTPUT_PATH_REQUIRED = "Se requiere la ruta de salida."
- self.AT_LEAST_ONE_LORA_ADAPTER_REQUIRED = "Se requiere al menos un adaptador LoRA."
- self.INVALID_LORA_SCALE_VALUE = "Valor de escala LoRA inválido."
- self.ERROR_STARTING_LORA_EXPORT = "Error al iniciar la exportación LoRA: {}"
- self.LORA_EXPORT_TASK_STARTED = "Tarea de exportación LoRA iniciada."
- self.GGML_LORA_ADAPTERS = "Adaptadores LoRA GGML"
- self.SELECT_LORA_ADAPTER_FILES = "Seleccionar archivos de adaptador LoRA"
- self.ADD_ADAPTER = "Añadir adaptador"
- self.DELETE_ADAPTER = "Eliminar"
- self.LORA_SCALE = "Escala LoRA"
- self.ENTER_LORA_SCALE_VALUE = "Ingresar valor de escala LoRA (Opcional)"
- self.NUMBER_OF_THREADS_FOR_LORA_EXPORT = "Número de hilos para exportación LoRA"
- self.EXPORTING_LORA = "Exportando LoRA..."
- self.BROWSING_FOR_EXPORT_LORA_MODEL_FILE = "Buscando archivo de modelo LoRA para exportar..."
- self.BROWSING_FOR_EXPORT_LORA_OUTPUT_FILE = "Buscando archivo de salida LoRA para exportar..."
- self.ADDING_LORA_ADAPTER = "Añadiendo adaptador LoRA..."
- self.DELETING_LORA_ADAPTER = "Eliminando adaptador LoRA..."
- self.LORA_FILES = "Archivos LoRA (*.bin)"
- self.SELECT_LORA_ADAPTER_FILE = "Seleccionar archivo de adaptador LoRA"
- self.STARTING_LORA_EXPORT = "Iniciando exportación LoRA..."
- self.OUTPUT_TYPE = "Tipo de salida"
- self.SELECT_OUTPUT_TYPE = "Seleccionar tipo de salida (GGUF o GGML)"
- self.GGUF_AND_BIN_FILES = "Archivos GGUF y binarios (*.gguf *.bin)"
- self.BASE_MODEL = "Modelo base"
- self.SELECT_BASE_MODEL_FILE = "Seleccionar archivo de modelo base (GGUF)"
- self.BASE_MODEL_PATH_REQUIRED = "Se requiere la ruta del modelo base para la salida GGUF."
- self.BROWSING_FOR_BASE_MODEL_FILE = "Buscando archivo de modelo base..."
- self.SELECT_BASE_MODEL_FOLDER = "Seleccionar carpeta de modelo base (que contiene safetensors)"
- self.BROWSING_FOR_BASE_MODEL_FOLDER = "Buscando carpeta de modelo base..."
- self.LORA_CONVERSION_FROM_TO = "Conversión LoRA de {} a {}"
- self.GENERATING_IMATRIX_FOR = "Generando IMatrix para {}"
- self.MODEL_PATH_REQUIRED_FOR_IMATRIX = "Se requiere la ruta del modelo para la generación de IMatrix."
-
-class _Hindi(_Localization):
- def __init__(self):
- super().__init__()
- self.WINDOW_TITLE = "AutoGGUF (स्वचालित GGUF मॉडल क्वांटाइज़र)"
- self.RAM_USAGE = "RAM उपयोग:"
- self.CPU_USAGE = "CPU उपयोग:"
- self.BACKEND = "Llama.cpp बैकएंड:"
- self.REFRESH_BACKENDS = "बैकएंड रीफ्रेश करें"
- self.MODELS_PATH = "मॉडल पथ:"
- self.OUTPUT_PATH = "आउटपुट पथ:"
- self.LOGS_PATH = "लॉग पथ:"
- self.BROWSE = "ब्राउज़ करें"
- self.AVAILABLE_MODELS = "उपलब्ध मॉडल:"
- self.QUANTIZATION_TYPE = "क्वांटाइजेशन प्रकार:"
- self.ALLOW_REQUANTIZE = "पुनः क्वांटाइज़ करने की अनुमति दें"
- self.LEAVE_OUTPUT_TENSOR = "आउटपुट टेंसर छोड़ें"
- self.PURE = "शुद्ध"
- self.IMATRIX = "IMatrix:"
- self.INCLUDE_WEIGHTS = "वेट शामिल करें:"
- self.EXCLUDE_WEIGHTS = "वेट बाहर रखें:"
- self.USE_OUTPUT_TENSOR_TYPE = "आउटपुट टेंसर प्रकार का उपयोग करें"
- self.USE_TOKEN_EMBEDDING_TYPE = "टोकन एम्बेडिंग प्रकार का उपयोग करें"
- self.KEEP_SPLIT = "विभाजन रखें"
- self.KV_OVERRIDES = "KV ओवरराइड:"
- self.ADD_NEW_OVERRIDE = "नया ओवरराइड जोड़ें"
- self.QUANTIZE_MODEL = "मॉडल क्वांटाइज़ करें"
- self.SAVE_PRESET = "प्रीसेट सहेजें"
- self.LOAD_PRESET = "प्रीसेट लोड करें"
- self.TASKS = "कार्य:"
- self.DOWNLOAD_LLAMACPP = "llama.cpp डाउनलोड करें"
- self.SELECT_RELEASE = "रिलीज़ चुनें:"
- self.SELECT_ASSET = "एसेट चुनें:"
- self.EXTRACT_CUDA_FILES = "CUDA फ़ाइलें निकालें"
- self.SELECT_CUDA_BACKEND = "CUDA बैकएंड चुनें:"
- self.DOWNLOAD = "डाउनलोड करें"
- self.IMATRIX_GENERATION = "IMatrix उत्पादन"
- self.DATA_FILE = "डेटा फ़ाइल:"
- self.MODEL = "मॉडल:"
- self.OUTPUT = "आउटपुट:"
- self.OUTPUT_FREQUENCY = "आउटपुट आवृत्ति:"
- self.GPU_OFFLOAD = "GPU ऑफलोड:"
- self.AUTO = "स्वचालित"
- self.GENERATE_IMATRIX = "IMatrix उत्पन्न करें"
- self.ERROR = "त्रुटि"
- self.WARNING = "चेतावनी"
- self.PROPERTIES = "गुण"
- self.WINDOW_TITLE = "AutoGGUF (स्वचालित GGUF मॉडल क्वांटाइज़र)"
- self.RAM_USAGE = "RAM उपयोग:"
- self.CPU_USAGE = "CPU उपयोग:"
- self.BACKEND = "Llama.cpp बैकएंड:"
- self.REFRESH_BACKENDS = "बैकएंड रीफ्रेश करें"
- self.MODELS_PATH = "मॉडल पथ:"
- self.OUTPUT_PATH = "आउटपुट पथ:"
- self.LOGS_PATH = "लॉग पथ:"
- self.BROWSE = "ब्राउज़ करें"
- self.AVAILABLE_MODELS = "उपलब्ध मॉडल:"
- self.QUANTIZATION_TYPE = "क्वांटाइजेशन प्रकार:"
- self.ALLOW_REQUANTIZE = "पुनः क्वांटाइज़ करने की अनुमति दें"
- self.LEAVE_OUTPUT_TENSOR = "आउटपुट टेंसर छोड़ें"
- self.PURE = "शुद्ध"
- self.IMATRIX = "IMatrix:"
- self.INCLUDE_WEIGHTS = "वेट शामिल करें:"
- self.EXCLUDE_WEIGHTS = "वेट बाहर रखें:"
- self.USE_OUTPUT_TENSOR_TYPE = "आउटपुट टेंसर प्रकार का उपयोग करें"
- self.USE_TOKEN_EMBEDDING_TYPE = "टोकन एम्बेडिंग प्रकार का उपयोग करें"
- self.KEEP_SPLIT = "विभाजन रखें"
- self.KV_OVERRIDES = "KV ओवरराइड:"
- self.ADD_NEW_OVERRIDE = "नया ओवरराइड जोड़ें"
- self.QUANTIZE_MODEL = "मॉडल क्वांटाइज़ करें"
- self.SAVE_PRESET = "प्रीसेट सहेजें"
- self.LOAD_PRESET = "प्रीसेट लोड करें"
- self.TASKS = "कार्य:"
- self.DOWNLOAD_LLAMACPP = "llama.cpp डाउनलोड करें"
- self.SELECT_RELEASE = "रिलीज़ चुनें:"
- self.SELECT_ASSET = "एसेट चुनें:"
- self.EXTRACT_CUDA_FILES = "CUDA फ़ाइलें निकालें"
- self.SELECT_CUDA_BACKEND = "CUDA बैकएंड चुनें:"
- self.DOWNLOAD = "डाउनलोड करें"
- self.IMATRIX_GENERATION = "IMatrix उत्पादन"
- self.DATA_FILE = "डेटा फ़ाइल:"
- self.MODEL = "मॉडल:"
- self.OUTPUT = "आउटपुट:"
- self.OUTPUT_FREQUENCY = "आउटपुट आवृत्ति:"
- self.GPU_OFFLOAD = "GPU ऑफलोड:"
- self.AUTO = "स्वचालित"
- self.GENERATE_IMATRIX = "IMatrix उत्पन्न करें"
- self.ERROR = "त्रुटि"
- self.WARNING = "चेतावनी"
- self.PROPERTIES = "गुण"
- self.CANCEL = "रद्द करें"
- self.RESTART = "पुनः आरंभ करें"
- self.DELETE = "हटाएं"
- self.CONFIRM_DELETION = "क्या आप वाकई इस कार्य को हटाना चाहते हैं?"
- self.TASK_RUNNING_WARNING = "कुछ कार्य अभी भी चल रहे हैं। क्या आप वाकई बाहर निकलना चाहते हैं?"
- self.YES = "हां"
- self.NO = "नहीं"
- self.DOWNLOAD_COMPLETE = "डाउनलोड पूरा हुआ"
- self.CUDA_EXTRACTION_FAILED = "CUDA निष्कर्षण विफल"
- self.PRESET_SAVED = "प्रीसेट सहेजा गया"
- self.PRESET_LOADED = "प्रीसेट लोड किया गया"
- self.NO_ASSET_SELECTED = "कोई एसेट चयनित नहीं"
- self.DOWNLOAD_FAILED = "डाउनलोड विफल"
- self.NO_BACKEND_SELECTED = "कोई बैकएंड चयनित नहीं"
- self.NO_MODEL_SELECTED = "कोई मॉडल चयनित नहीं"
- self.REFRESH_RELEASES = "रिलीज़ रीफ्रेश करें"
- self.NO_SUITABLE_CUDA_BACKENDS = "कोई उपयुक्त CUDA बैकएंड नहीं मिला"
- self.LLAMACPP_DOWNLOADED_EXTRACTED = "llama.cpp बाइनरी डाउनलोड और {0} में निकाली गई\nCUDA फ़ाइलें {1} में निकाली गईं"
- self.CUDA_FILES_EXTRACTED = "CUDA फ़ाइलें निकाली गईं"
- self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = "निष्कर्षण के लिए कोई उपयुक्त CUDA बैकएंड नहीं मिला"
- self.ERROR_FETCHING_RELEASES = "रिलीज़ प्राप्त करने में त्रुटि: {0}"
- self.CONFIRM_DELETION_TITLE = "हटाने की पुष्टि करें"
- self.LOG_FOR = "{0} के लिए लॉग"
- self.ALL_FILES = "सभी फ़ाइलें (*)"
- self.GGUF_FILES = "GGUF फ़ाइलें (*.gguf)"
- self.DAT_FILES = "DAT फ़ाइलें (*.dat)"
- self.JSON_FILES = "JSON फ़ाइलें (*.json)"
- self.FAILED_LOAD_PRESET = "प्रीसेट लोड करने में विफल: {0}"
- self.INITIALIZING_AUTOGGUF = "AutoGGUF एप्लिकेशन प्रारंभ हो रहा है"
- self.AUTOGGUF_INITIALIZATION_COMPLETE = "AutoGGUF प्रारंभीकरण पूरा हुआ"
- self.REFRESHING_BACKENDS = "बैकएंड रीफ्रेश हो रहे हैं"
- self.NO_BACKENDS_AVAILABLE = "कोई बैकएंड उपलब्ध नहीं"
- self.FOUND_VALID_BACKENDS = "{0} मान्य बैकएंड मिले"
- self.SAVING_PRESET = "प्रीसेट सहेजा जा रहा है"
- self.PRESET_SAVED_TO = "प्रीसेट {0} में सहेजा गया"
- self.LOADING_PRESET = "प्रीसेट लोड हो रहा है"
- self.PRESET_LOADED_FROM = "{0} से प्रीसेट लोड किया गया"
- self.ADDING_KV_OVERRIDE = "KV ओवरराइड जोड़ा जा रहा है: {0}"
- self.SAVING_TASK_PRESET = "{0} के लिए कार्य प्रीसेट सहेजा जा रहा है"
- self.TASK_PRESET_SAVED = "कार्य प्रीसेट सहेजा गया"
- self.TASK_PRESET_SAVED_TO = "कार्य प्रीसेट {0} में सहेजा गया"
- self.RESTARTING_TASK = "कार्य पुनः आरंभ हो रहा है: {0}"
- self.IN_PROGRESS = "प्रगति में"
- self.DOWNLOAD_FINISHED_EXTRACTED_TO = "डाउनलोड समाप्त। निकाला गया: {0}"
- self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "llama.cpp बाइनरी डाउनलोड और {0} में निकाली गई\nCUDA फ़ाइलें {1} में निकाली गईं"
- self.NO_SUITABLE_CUDA_BACKEND_FOUND = "निष्कर्षण के लिए कोई उपयुक्त CUDA बैकएंड नहीं मिला"
- self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = "llama.cpp बाइनरी डाउनलोड और {0} में निकाली गई"
- self.REFRESHING_LLAMACPP_RELEASES = "llama.cpp रिलीज़ रीफ्रेश हो रही हैं"
- self.UPDATING_ASSET_LIST = "एसेट सूची अपडेट हो रही है"
- self.UPDATING_CUDA_OPTIONS = "CUDA विकल्प अपडेट हो रहे हैं"
- self.STARTING_LLAMACPP_DOWNLOAD = "llama.cpp डाउनलोड शुरू हो रहा है"
- self.UPDATING_CUDA_BACKENDS = "CUDA बैकएंड अपडेट हो रहे हैं"
- self.NO_CUDA_BACKEND_SELECTED = "निष्कर्षण के लिए कोई CUDA बैकएंड चयनित नहीं"
- self.EXTRACTING_CUDA_FILES = "{0} से {1} में CUDA फ़ाइलें निकाली जा रही हैं"
- self.DOWNLOAD_ERROR = "डाउनलोड त्रुटि: {0}"
- self.SHOWING_TASK_CONTEXT_MENU = "कार्य संदर्भ मेनू दिखाया जा रहा है"
- self.SHOWING_PROPERTIES_FOR_TASK = "कार्य के लिए गुण दिखाए जा रहे हैं: {0}"
- self.CANCELLING_TASK = "कार्य रद्द किया जा रहा है: {0}"
- self.CANCELED = "रद्द किया गया"
- self.DELETING_TASK = "कार्य हटाया जा रहा है: {0}"
- self.LOADING_MODELS = "मॉडल लोड हो रहे हैं"
- self.LOADED_MODELS = "{0} मॉडल लोड किए गए"
- self.BROWSING_FOR_MODELS_DIRECTORY = "मॉडल निर्देशिका के लिए ब्राउज़ किया जा रहा है"
- self.SELECT_MODELS_DIRECTORY = "मॉडल निर्देशिका चुनें"
- self.BROWSING_FOR_OUTPUT_DIRECTORY = "आउटपुट निर्देशिका के लिए ब्राउज़ किया जा रहा है"
- self.SELECT_OUTPUT_DIRECTORY = "आउटपुट निर्देशिका चुनें"
- self.BROWSING_FOR_LOGS_DIRECTORY = "लॉग निर्देशिका के लिए ब्राउज़ किया जा रहा है"
- self.SELECT_LOGS_DIRECTORY = "लॉग निर्देशिका चुनें"
- self.BROWSING_FOR_IMATRIX_FILE = "IMatrix फ़ाइल के लिए ब्राउज़ किया जा रहा है"
- self.SELECT_IMATRIX_FILE = "IMatrix फ़ाइल चुनें"
- self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} MB / {2} MB)"
- self.CPU_USAGE_FORMAT = "CPU उपयोग: {0:.1f}%"
- self.VALIDATING_QUANTIZATION_INPUTS = "क्वांटाइजेशन इनपुट सत्यापित किए जा रहे हैं"
- self.MODELS_PATH_REQUIRED = "मॉडल पथ आवश्यक है"
- self.OUTPUT_PATH_REQUIRED = "आउटपुट पथ आवश्यक है"
- self.LOGS_PATH_REQUIRED = "लॉग पथ आवश्यक है"
- self.STARTING_MODEL_QUANTIZATION = "मॉडल क्वांटाइजेशन शुरू हो रहा है"
- self.INPUT_FILE_NOT_EXIST = "इनपुट फ़ाइल '{0}' मौजूद नहीं है।"
- self.QUANTIZING_MODEL_TO = "{0} को {1} में क्वांटाइज़ किया जा रहा है"
- self.QUANTIZATION_TASK_STARTED = "{0} के लिए क्वांटाइजेशन कार्य शुरू हुआ"
- self.ERROR_STARTING_QUANTIZATION = "क्वांटाइजेशन शुरू करने में त्रुटि: {0}"
- self.UPDATING_MODEL_INFO = "मॉडल जानकारी अपडेट हो रही है: {0}"
- self.TASK_FINISHED = "कार्य समाप्त: {0}"
- self.SHOWING_TASK_DETAILS_FOR = "कार्य विवरण दिखाए जा रहे हैं: {0}"
- self.BROWSING_FOR_IMATRIX_DATA_FILE = "IMatrix डेटा फ़ाइल के लिए ब्राउज़ किया जा रहा है"
- self.SELECT_DATA_FILE = "डेटा फ़ाइल चुनें"
- self.BROWSING_FOR_IMATRIX_MODEL_FILE = "IMatrix मॉडल फ़ाइल के लिए ब्राउज़ किया जा रहा है"
- self.SELECT_MODEL_FILE = "मॉडल फ़ाइल चुनें"
- self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "IMatrix आउटपुट फ़ाइल के लिए ब्राउज़ किया जा रहा है"
- self.SELECT_OUTPUT_FILE = "आउटपुट फ़ाइल चुनें"
- self.STARTING_IMATRIX_GENERATION = "IMatrix उत्पादन शुरू हो रहा है"
- self.BACKEND_PATH_NOT_EXIST = "बैकएंड पथ मौजूद नहीं है: {0}"
- self.GENERATING_IMATRIX = "IMatrix उत्पन्न किया जा रहा है"
- self.ERROR_STARTING_IMATRIX_GENERATION = "IMatrix उत्पादन शुरू करने में त्रुटि: {0}"
- self.IMATRIX_GENERATION_TASK_STARTED = "IMatrix उत्पादन कार्य शुरू हुआ"
- self.ERROR_MESSAGE = "त्रुटि: {0}"
- self.TASK_ERROR = "कार्य त्रुटि: {0}"
- self.APPLICATION_CLOSING = "एप्लिकेशन बंद हो रहा है"
- self.APPLICATION_CLOSED = "एप्लिकेशन बंद हो गया"
- self.SELECT_QUANTIZATION_TYPE = "क्वांटाइजेशन प्रकार चुनें"
- self.ALLOWS_REQUANTIZING = "पहले से क्वांटाइज़ किए गए टेंसर को पुनः क्वांटाइज़ करने की अनुमति देता है"
- self.LEAVE_OUTPUT_WEIGHT = "output.weight को अक्वांटाइज़ (या पुनः क्वांटाइज़) छोड़ देगा"
- self.DISABLE_K_QUANT_MIXTURES = "k-quant मिश्रण को अक्षम करें और सभी टेंसर को एक ही प्रकार में क्वांटाइज़ करें"
- self.USE_DATA_AS_IMPORTANCE_MATRIX = "क्वांट अनुकूलन के लिए फ़ाइल में डेटा को महत्व मैट्रिक्स के रूप में उपयोग करें"
- self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "इन टेंसर के लिए महत्व मैट्रिक्स का उपयोग करें"
- self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = "इन टेंसर के लिए महत्व मैट्रिक्स का उपयोग न करें"
- self.OUTPUT_TENSOR_TYPE = "आउटपुट टेंसर प्रकार:"
- self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = "output.weight टेंसर के लिए इस प्रकार का उपयोग करें"
- self.TOKEN_EMBEDDING_TYPE = "टोकन एम्बेडिंग प्रकार:"
- self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = "टोकन एम्बेडिंग टेंसर के लिए इस प्रकार का उपयोग करें"
- self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = "इनपुट के समान शार्ड्स में क्वांटाइज़ किए गए मॉडल को उत्पन्न करेगा"
- self.OVERRIDE_MODEL_METADATA = "मॉडल मेटाडेटा को ओवरराइड करें"
- self.INPUT_DATA_FILE_FOR_IMATRIX = "IMatrix उत्पादन के लिए इनपुट डेटा फ़ाइल"
- self.MODEL_TO_BE_QUANTIZED = "क्वांटाइज़ किए जाने वाला मॉडल"
- self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "उत्पन्न IMatrix के लिए आउटपुट पथ"
- self.HOW_OFTEN_TO_SAVE_IMATRIX = "IMatrix को कितनी बार सहेजना है"
- self.SET_GPU_OFFLOAD_VALUE = "GPU ऑफलोड मान सेट करें (-ngl)"
- self.COMPLETED = "पूरा हुआ"
- self.REFRESH_MODELS = "मॉडल रीफ्रेश करें"
-
-class _Russian(_Localization):
- def __init__(self):
- super().__init__()
- self.WINDOW_TITLE = "AutoGGUF (автоматический квантователь моделей GGUF)"
- self.RAM_USAGE = "Использование ОЗУ:"
- self.CPU_USAGE = "Использование ЦП:"
- self.BACKEND = "Бэкенд Llama.cpp:"
- self.REFRESH_BACKENDS = "Обновить бэкенды"
- self.MODELS_PATH = "Путь к моделям:"
- self.OUTPUT_PATH = "Путь вывода:"
- self.LOGS_PATH = "Путь к логам:"
- self.BROWSE = "Обзор"
- self.AVAILABLE_MODELS = "Доступные модели:"
- self.QUANTIZATION_TYPE = "Тип квантования:"
- self.ALLOW_REQUANTIZE = "Разрешить переквантование"
- self.LEAVE_OUTPUT_TENSOR = "Оставить выходной тензор"
- self.PURE = "Чистый"
- self.IMATRIX = "IMatrix:"
- self.INCLUDE_WEIGHTS = "Включить веса:"
- self.EXCLUDE_WEIGHTS = "Исключить веса:"
- self.USE_OUTPUT_TENSOR_TYPE = "Использовать тип выходного тензора"
- self.USE_TOKEN_EMBEDDING_TYPE = "Использовать тип встраивания токенов"
- self.KEEP_SPLIT = "Сохранить разделение"
- self.KV_OVERRIDES = "KV переопределения:"
- self.ADD_NEW_OVERRIDE = "Добавить новое переопределение"
- self.QUANTIZE_MODEL = "Квантовать модель"
- self.SAVE_PRESET = "Сохранить пресет"
- self.LOAD_PRESET = "Загрузить пресет"
- self.TASKS = "Задачи:"
- self.DOWNLOAD_LLAMACPP = "Скачать llama.cpp"
- self.SELECT_RELEASE = "Выбрать релиз:"
- self.SELECT_ASSET = "Выбрать актив:"
- self.EXTRACT_CUDA_FILES = "Извлечь файлы CUDA"
- self.SELECT_CUDA_BACKEND = "Выбрать бэкенд CUDA:"
- self.DOWNLOAD = "Скачать"
- self.IMATRIX_GENERATION = "Генерация IMatrix"
- self.DATA_FILE = "Файл данных:"
- self.MODEL = "Модель:"
- self.OUTPUT = "Вывод:"
- self.OUTPUT_FREQUENCY = "Частота вывода:"
- self.GPU_OFFLOAD = "Разгрузка GPU:"
- self.AUTO = "Авто"
- self.GENERATE_IMATRIX = "Сгенерировать IMatrix"
- self.ERROR = "Ошибка"
- self.WARNING = "Предупреждение"
- self.PROPERTIES = "Свойства"
- self.CANCEL = "Отмена"
- self.RESTART = "Перезапуск"
- self.DELETE = "Удалить"
- self.CONFIRM_DELETION = "Вы уверены, что хотите удалить эту задачу?"
- self.TASK_RUNNING_WARNING = "Некоторые задачи все еще выполняются. Вы уверены, что хотите выйти?"
- self.YES = "Да"
- self.NO = "Нет"
- self.DOWNLOAD_COMPLETE = "Загрузка завершена"
- self.CUDA_EXTRACTION_FAILED = "Извлечение CUDA не удалось"
- self.PRESET_SAVED = "Пресет сохранен"
- self.PRESET_LOADED = "Пресет загружен"
- self.NO_ASSET_SELECTED = "Актив не выбран"
- self.DOWNLOAD_FAILED = "Загрузка не удалась"
- self.NO_BACKEND_SELECTED = "Бэкенд не выбран"
- self.NO_MODEL_SELECTED = "Модель не выбрана"
- self.REFRESH_RELEASES = "Обновить релизы"
- self.NO_SUITABLE_CUDA_BACKENDS = "Подходящие бэкенды CUDA не найдены"
- self.LLAMACPP_DOWNLOADED_EXTRACTED = "Бинарный файл llama.cpp загружен и извлечен в {0}\nФайлы CUDA извлечены в {1}"
- self.CUDA_FILES_EXTRACTED = "Файлы CUDA извлечены в"
- self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = "Подходящий бэкенд CUDA для извлечения не найден"
- self.ERROR_FETCHING_RELEASES = "Ошибка получения релизов: {0}"
- self.CONFIRM_DELETION_TITLE = "Подтвердить удаление"
- self.LOG_FOR = "Лог для {0}"
- self.ALL_FILES = "Все файлы (*)"
- self.GGUF_FILES = "Файлы GGUF (*.gguf)"
- self.DAT_FILES = "Файлы DAT (*.dat)"
- self.JSON_FILES = "Файлы JSON (*.json)"
- self.FAILED_LOAD_PRESET = "Не удалось загрузить пресет: {0}"
- self.INITIALIZING_AUTOGGUF = "Инициализация приложения AutoGGUF"
- self.AUTOGGUF_INITIALIZATION_COMPLETE = "Инициализация AutoGGUF завершена"
- self.REFRESHING_BACKENDS = "Обновление бэкендов"
- self.NO_BACKENDS_AVAILABLE = "Бэкенды недоступны"
- self.FOUND_VALID_BACKENDS = "Найдено {0} действительных бэкендов"
- self.SAVING_PRESET = "Сохранение пресета"
- self.PRESET_SAVED_TO = "Пресет сохранен в {0}"
- self.LOADING_PRESET = "Загрузка пресета"
- self.PRESET_LOADED_FROM = "Пресет загружен из {0}"
- self.ADDING_KV_OVERRIDE = "Добавление KV переопределения: {0}"
- self.SAVING_TASK_PRESET = "Сохранение пресета задачи для {0}"
- self.TASK_PRESET_SAVED = "Пресет задачи сохранен"
- self.TASK_PRESET_SAVED_TO = "Пресет задачи сохранен в {0}"
- self.RESTARTING_TASK = "Перезапуск задачи: {0}"
- self.IN_PROGRESS = "В процессе"
- self.DOWNLOAD_FINISHED_EXTRACTED_TO = "Загрузка завершена. Извлечено в: {0}"
- self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "Бинарный файл llama.cpp загружен и извлечен в {0}\nФайлы CUDA извлечены в {1}"
- self.NO_SUITABLE_CUDA_BACKEND_FOUND = "Подходящий бэкенд CUDA для извлечения не найден"
- self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = "Бинарный файл llama.cpp загружен и извлечен в {0}"
- self.REFRESHING_LLAMACPP_RELEASES = "Обновление релизов llama.cpp"
- self.UPDATING_ASSET_LIST = "Обновление списка активов"
- self.UPDATING_CUDA_OPTIONS = "Обновление параметров CUDA"
- self.STARTING_LLAMACPP_DOWNLOAD = "Начало загрузки llama.cpp"
- self.UPDATING_CUDA_BACKENDS = "Обновление бэкендов CUDA"
- self.NO_CUDA_BACKEND_SELECTED = "Бэкенд CUDA для извлечения не выбран"
- self.EXTRACTING_CUDA_FILES = "Извлечение файлов CUDA из {0} в {1}"
- self.DOWNLOAD_ERROR = "Ошибка загрузки: {0}"
- self.SHOWING_TASK_CONTEXT_MENU = "Отображение контекстного меню задачи"
- self.SHOWING_PROPERTIES_FOR_TASK = "Отображение свойств задачи: {0}"
- self.CANCELLING_TASK = "Отмена задачи: {0}"
- self.CANCELED = "Отменено"
- self.DELETING_TASK = "Удаление задачи: {0}"
- self.LOADING_MODELS = "Загрузка моделей"
- self.LOADED_MODELS = "Загружено {0} моделей"
- self.BROWSING_FOR_MODELS_DIRECTORY = "Поиск каталога моделей"
- self.SELECT_MODELS_DIRECTORY = "Выберите каталог моделей"
- self.BROWSING_FOR_OUTPUT_DIRECTORY = "Поиск выходного каталога"
- self.SELECT_OUTPUT_DIRECTORY = "Выберите выходной каталог"
- self.BROWSING_FOR_LOGS_DIRECTORY = "Поиск каталога логов"
- self.SELECT_LOGS_DIRECTORY = "Выберите каталог логов"
- self.BROWSING_FOR_IMATRIX_FILE = "Поиск файла IMatrix"
- self.SELECT_IMATRIX_FILE = "Выберите файл IMatrix"
- self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} МБ / {2} МБ)"
- self.CPU_USAGE_FORMAT = "Использование ЦП: {0:.1f}%"
- self.VALIDATING_QUANTIZATION_INPUTS = "Проверка входных данных квантования"
- self.MODELS_PATH_REQUIRED = "Требуется путь к моделям"
- self.OUTPUT_PATH_REQUIRED = "Требуется путь вывода"
- self.LOGS_PATH_REQUIRED = "Требуется путь к логам"
- self.STARTING_MODEL_QUANTIZATION = "Начало квантования модели"
- self.INPUT_FILE_NOT_EXIST = "Входной файл '{0}' не существует."
- self.QUANTIZING_MODEL_TO = "Квантование {0} в {1}"
- self.QUANTIZATION_TASK_STARTED = "Задача квантования запущена для {0}"
- self.ERROR_STARTING_QUANTIZATION = "Ошибка запуска квантования: {0}"
- self.UPDATING_MODEL_INFO = "Обновление информации о модели: {0}"
- self.TASK_FINISHED = "Задача завершена: {0}"
- self.SHOWING_TASK_DETAILS_FOR = "Отображение сведений о задаче для: {0}"
- self.BROWSING_FOR_IMATRIX_DATA_FILE = "Поиск файла данных IMatrix"
- self.SELECT_DATA_FILE = "Выберите файл данных"
- self.BROWSING_FOR_IMATRIX_MODEL_FILE = "Поиск файла модели IMatrix"
- self.SELECT_MODEL_FILE = "Выберите файл модели"
- self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "Поиск выходного файла IMatrix"
- self.SELECT_OUTPUT_FILE = "Выберите выходной файл"
- self.STARTING_IMATRIX_GENERATION = "Начало генерации IMatrix"
- self.BACKEND_PATH_NOT_EXIST = "Путь бэкенда не существует: {0}"
- self.GENERATING_IMATRIX = "Генерация IMatrix"
- self.ERROR_STARTING_IMATRIX_GENERATION = "Ошибка запуска генерации IMatrix: {0}"
- self.IMATRIX_GENERATION_TASK_STARTED = "Задача генерации IMatrix запущена"
- self.ERROR_MESSAGE = "Ошибка: {0}"
- self.TASK_ERROR = "Ошибка задачи: {0}"
- self.APPLICATION_CLOSING = "Закрытие приложения"
- self.APPLICATION_CLOSED = "Приложение закрыто"
- self.SELECT_QUANTIZATION_TYPE = "Выберите тип квантования"
- self.ALLOWS_REQUANTIZING = "Позволяет переквантовать тензоры, которые уже были квантованы"
- self.LEAVE_OUTPUT_WEIGHT = "Оставит output.weight не (пере)квантованным"
- self.DISABLE_K_QUANT_MIXTURES = "Отключить k-квантовые смеси и квантовать все тензоры к одному типу"
- self.USE_DATA_AS_IMPORTANCE_MATRIX = "Использовать данные в файле как матрицу важности для оптимизации квантования"
- self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Использовать матрицу важности для этих тензоров"
- self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Не использовать матрицу важности для этих тензоров"
- self.OUTPUT_TENSOR_TYPE = "Тип выходного тензора:"
- self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = "Использовать этот тип для тензора output.weight"
- self.TOKEN_EMBEDDING_TYPE = "Тип встраивания токенов:"
- self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = "Использовать этот тип для тензора встраивания токенов"
- self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = "Будет генерировать квантованную модель в тех же шардах, что и входные данные"
- self.OVERRIDE_MODEL_METADATA = "Переопределить метаданные модели"
- self.INPUT_DATA_FILE_FOR_IMATRIX = "Входной файл данных для генерации IMatrix"
- self.MODEL_TO_BE_QUANTIZED = "Модель для квантования"
- self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "Выходной путь для сгенерированного IMatrix"
- self.HOW_OFTEN_TO_SAVE_IMATRIX = "Как часто сохранять IMatrix"
- self.SET_GPU_OFFLOAD_VALUE = "Установить значение разгрузки GPU (-ngl)"
- self.COMPLETED = "Завершено"
- self.REFRESH_MODELS = "Обновить модели"
-
-class _Ukrainian(_Localization):
- def __init__(self):
- super().__init__()
- self.WINDOW_TITLE = "AutoGGUF (автоматичний квантувальник моделей GGUF)"
- self.RAM_USAGE = "Використання ОЗУ:"
- self.CPU_USAGE = "Використання ЦП:"
- self.BACKEND = "Бекенд Llama.cpp:"
- self.REFRESH_BACKENDS = "Оновити бекенди"
- self.MODELS_PATH = "Шлях до моделей:"
- self.OUTPUT_PATH = "Шлях виводу:"
- self.LOGS_PATH = "Шлях до логів:"
- self.BROWSE = "Огляд"
- self.AVAILABLE_MODELS = "Доступні моделі:"
- self.QUANTIZATION_TYPE = "Тип квантування:"
- self.ALLOW_REQUANTIZE = "Дозволити переквантування"
- self.LEAVE_OUTPUT_TENSOR = "Залишити вихідний тензор"
- self.PURE = "Чистий"
- self.IMATRIX = "IMatrix:"
- self.INCLUDE_WEIGHTS = "Включити ваги:"
- self.EXCLUDE_WEIGHTS = "Виключити ваги:"
- self.USE_OUTPUT_TENSOR_TYPE = "Використовувати тип вихідного тензора"
- self.USE_TOKEN_EMBEDDING_TYPE = "Використовувати тип вбудовування токенів"
- self.KEEP_SPLIT = "Зберегти розділення"
- self.KV_OVERRIDES = "KV перевизначення:"
- self.ADD_NEW_OVERRIDE = "Додати нове перевизначення"
- self.QUANTIZE_MODEL = "Квантувати модель"
- self.SAVE_PRESET = "Зберегти пресет"
- self.LOAD_PRESET = "Завантажити пресет"
- self.TASKS = "Завдання:"
- self.DOWNLOAD_LLAMACPP = "Завантажити llama.cpp"
- self.SELECT_RELEASE = "Вибрати реліз:"
- self.SELECT_ASSET = "Вибрати актив:"
- self.EXTRACT_CUDA_FILES = "Витягнути файли CUDA"
- self.SELECT_CUDA_BACKEND = "Вибрати бекенд CUDA:"
- self.DOWNLOAD = "Завантажити"
- self.IMATRIX_GENERATION = "Генерація IMatrix"
- self.DATA_FILE = "Файл даних:"
- self.MODEL = "Модель:"
- self.OUTPUT = "Вивід:"
- self.OUTPUT_FREQUENCY = "Частота виводу:"
- self.GPU_OFFLOAD = "Розвантаження GPU:"
- self.AUTO = "Авто"
- self.GENERATE_IMATRIX = "Згенерувати IMatrix"
- self.ERROR = "Помилка"
- self.WARNING = "Попередження"
- self.PROPERTIES = "Властивості"
- self.CANCEL = "Скасувати"
- self.RESTART = "Перезапустити"
- self.DELETE = "Видалити"
- self.CONFIRM_DELETION = "Ви впевнені, що хочете видалити це завдання?"
- self.TASK_RUNNING_WARNING = "Деякі завдання все ще виконуються. Ви впевнені, що хочете вийти?"
- self.YES = "Так"
- self.NO = "Ні"
- self.DOWNLOAD_COMPLETE = "Завантаження завершено"
- self.CUDA_EXTRACTION_FAILED = "Витягнення CUDA не вдалося"
- self.PRESET_SAVED = "Пресет збережено"
- self.PRESET_LOADED = "Пресет завантажено"
- self.NO_ASSET_SELECTED = "Актив не вибрано"
- self.DOWNLOAD_FAILED = "Завантаження не вдалося"
- self.NO_BACKEND_SELECTED = "Бекенд не вибрано"
- self.NO_MODEL_SELECTED = "Модель не вибрано"
- self.REFRESH_RELEASES = "Оновити релізи"
- self.NO_SUITABLE_CUDA_BACKENDS = "Підходящі бекенди CUDA не знайдено"
- self.LLAMACPP_DOWNLOADED_EXTRACTED = "Бінарний файл llama.cpp завантажено та витягнуто в {0}\nФайли CUDA витягнуто в {1}"
- self.CUDA_FILES_EXTRACTED = "Файли CUDA витягнуто в"
- self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = "Підходящий бекенд CUDA для витягнення не знайдено"
- self.ERROR_FETCHING_RELEASES = "Помилка отримання релізів: {0}"
- self.CONFIRM_DELETION_TITLE = "Підтвердити видалення"
- self.LOG_FOR = "Лог для {0}"
- self.ALL_FILES = "Всі файли (*)"
- self.GGUF_FILES = "Файли GGUF (*.gguf)"
- self.DAT_FILES = "Файли DAT (*.dat)"
- self.JSON_FILES = "Файли JSON (*.json)"
- self.FAILED_LOAD_PRESET = "Не вдалося завантажити пресет: {0}"
- self.INITIALIZING_AUTOGGUF = "Ініціалізація програми AutoGGUF"
- self.AUTOGGUF_INITIALIZATION_COMPLETE = "Ініціалізація AutoGGUF завершена"
- self.REFRESHING_BACKENDS = "Оновлення бекендів"
- self.NO_BACKENDS_AVAILABLE = "Бекенди недоступні"
- self.FOUND_VALID_BACKENDS = "Знайдено {0} дійсних бекендів"
- self.SAVING_PRESET = "Збереження пресета"
- self.PRESET_SAVED_TO = "Пресет збережено в {0}"
- self.LOADING_PRESET = "Завантаження пресета"
- self.PRESET_LOADED_FROM = "Пресет завантажено з {0}"
- self.ADDING_KV_OVERRIDE = "Додавання KV перевизначення: {0}"
- self.SAVING_TASK_PRESET = "Збереження пресета завдання для {0}"
- self.TASK_PRESET_SAVED = "Пресет завдання збережено"
- self.TASK_PRESET_SAVED_TO = "Пресет завдання збережено в {0}"
- self.RESTARTING_TASK = "Перезапуск завдання: {0}"
- self.IN_PROGRESS = "В процесі"
- self.DOWNLOAD_FINISHED_EXTRACTED_TO = "Завантаження завершено. Витягнуто в: {0}"
- self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "Бінарний файл llama.cpp завантажено та витягнуто в {0}\nФайли CUDA витягнуто в {1}"
- self.NO_SUITABLE_CUDA_BACKEND_FOUND = "Підходящий бекенд CUDA для витягнення не знайдено"
- self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = "Бінарний файл llama.cpp завантажено та витягнуто в {0}"
- self.REFRESHING_LLAMACPP_RELEASES = "Оновлення релізів llama.cpp"
- self.UPDATING_ASSET_LIST = "Оновлення списку активів"
- self.UPDATING_CUDA_OPTIONS = "Оновлення параметрів CUDA"
- self.STARTING_LLAMACPP_DOWNLOAD = "Початок завантаження llama.cpp"
- self.UPDATING_CUDA_BACKENDS = "Оновлення бекендів CUDA"
- self.NO_CUDA_BACKEND_SELECTED = "Бекенд CUDA для витягнення не вибрано"
- self.EXTRACTING_CUDA_FILES = "Витягнення файлів CUDA з {0} в {1}"
- self.DOWNLOAD_ERROR = "Помилка завантаження: {0}"
- self.SHOWING_TASK_CONTEXT_MENU = "Відображення контекстного меню завдання"
- self.SHOWING_PROPERTIES_FOR_TASK = "Відображення властивостей завдання: {0}"
- self.CANCELLING_TASK = "Скасування завдання: {0}"
- self.CANCELED = "Скасовано"
- self.DELETING_TASK = "Видалення завдання: {0}"
- self.LOADING_MODELS = "Завантаження моделей"
- self.LOADED_MODELS = "Завантажено {0} моделей"
- self.BROWSING_FOR_MODELS_DIRECTORY = "Пошук каталогу моделей"
- self.SELECT_MODELS_DIRECTORY = "Виберіть каталог моделей"
- self.BROWSING_FOR_OUTPUT_DIRECTORY = "Пошук вихідного каталогу"
- self.SELECT_OUTPUT_DIRECTORY = "Виберіть вихідний каталог"
- self.BROWSING_FOR_LOGS_DIRECTORY = "Пошук каталогу логів"
- self.SELECT_LOGS_DIRECTORY = "Виберіть каталог логів"
- self.BROWSING_FOR_IMATRIX_FILE = "Пошук файлу IMatrix"
- self.SELECT_IMATRIX_FILE = "Виберіть файл IMatrix"
- self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} МБ / {2} МБ)"
- self.CPU_USAGE_FORMAT = "Використання ЦП: {0:.1f}%"
- self.VALIDATING_QUANTIZATION_INPUTS = "Перевірка вхідних даних квантування"
- self.MODELS_PATH_REQUIRED = "Потрібен шлях до моделей"
- self.OUTPUT_PATH_REQUIRED = "Потрібен шлях виводу"
- self.LOGS_PATH_REQUIRED = "Потрібен шлях до логів"
- self.STARTING_MODEL_QUANTIZATION = "Початок квантування моделі"
- self.INPUT_FILE_NOT_EXIST = "Вхідний файл '{0}' не існує."
- self.QUANTIZING_MODEL_TO = "Квантування {0} в {1}"
- self.QUANTIZATION_TASK_STARTED = "Завдання квантування запущено для {0}"
- self.ERROR_STARTING_QUANTIZATION = "Помилка запуску квантування: {0}"
- self.UPDATING_MODEL_INFO = "Оновлення інформації про модель: {0}"
- self.TASK_FINISHED = "Завдання завершено: {0}"
- self.SHOWING_TASK_DETAILS_FOR = "Відображення відомостей про завдання для: {0}"
- self.BROWSING_FOR_IMATRIX_DATA_FILE = "Пошук файлу даних IMatrix"
- self.SELECT_DATA_FILE = "Виберіть файл даних"
- self.BROWSING_FOR_IMATRIX_MODEL_FILE = "Пошук файлу моделі IMatrix"
- self.SELECT_MODEL_FILE = "Виберіть файл моделі"
- self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "Пошук вихідного файлу IMatrix"
- self.SELECT_OUTPUT_FILE = "Виберіть вихідний файл"
- self.STARTING_IMATRIX_GENERATION = "Початок генерації IMatrix"
- self.BACKEND_PATH_NOT_EXIST = "Шлях бекенда не існує: {0}"
- self.GENERATING_IMATRIX = "Генерація IMatrix"
- self.ERROR_STARTING_IMATRIX_GENERATION = "Помилка запуску генерації IMatrix: {0}"
- self.IMATRIX_GENERATION_TASK_STARTED = "Завдання генерації IMatrix запущено"
- self.ERROR_MESSAGE = "Помилка: {0}"
- self.TASK_ERROR = "Помилка завдання: {0}"
- self.APPLICATION_CLOSING = "Закриття програми"
- self.APPLICATION_CLOSED = "Програма закрита"
- self.SELECT_QUANTIZATION_TYPE = "Виберіть тип квантування"
- self.ALLOWS_REQUANTIZING = "Дозволяє переквантувати тензори, які вже були квантовані"
- self.LEAVE_OUTPUT_WEIGHT = "Залишить output.weight не (пере)квантованим"
- self.DISABLE_K_QUANT_MIXTURES = "Вимкнути k-квантові суміші та квантувати всі тензори до одного типу"
- self.USE_DATA_AS_IMPORTANCE_MATRIX = "Використовувати дані у файлі як матрицю важливості для оптимізації квантування"
- self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Використовувати матрицю важливості для цих тензорів"
- self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Не використовувати матрицю важливості для цих тензорів"
- self.OUTPUT_TENSOR_TYPE = "Тип вихідного тензора:"
- self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = "Використовувати цей тип для тензора output.weight"
- self.TOKEN_EMBEDDING_TYPE = "Тип вбудовування токенів:"
- self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = "Використовувати цей тип для тензора вбудовування токенів"
- self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = "Генеруватиме квантовану модель у тих самих шардах, що й вхідні дані"
- self.OVERRIDE_MODEL_METADATA = "Перевизначити метадані моделі"
- self.INPUT_DATA_FILE_FOR_IMATRIX = "Вхідний файл даних для генерації IMatrix"
- self.MODEL_TO_BE_QUANTIZED = "Модель для квантування"
- self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "Вихідний шлях для згенерованого IMatrix"
- self.HOW_OFTEN_TO_SAVE_IMATRIX = "Як часто зберігати IMatrix"
- self.SET_GPU_OFFLOAD_VALUE = "Встановити значення розвантаження GPU (-ngl)"
- self.COMPLETED = "Завершено"
- self.REFRESH_MODELS = "Оновити моделі"
-
-class _Japanese(_Localization):
- def __init__(self):
- super().__init__()
- self.WINDOW_TITLE = "AutoGGUF (自動GGUFモデル量子化器)"
- self.RAM_USAGE = "RAM使用量:"
- self.CPU_USAGE = "CPU使用率:"
- self.BACKEND = "Llama.cppバックエンド:"
- self.REFRESH_BACKENDS = "バックエンドを更新"
- self.MODELS_PATH = "モデルパス:"
- self.OUTPUT_PATH = "出力パス:"
- self.LOGS_PATH = "ログパス:"
- self.BROWSE = "参照"
- self.AVAILABLE_MODELS = "利用可能なモデル:"
- self.QUANTIZATION_TYPE = "量子化タイプ:"
- self.ALLOW_REQUANTIZE = "再量子化を許可"
- self.LEAVE_OUTPUT_TENSOR = "出力テンソルを残す"
- self.PURE = "純粋"
- self.IMATRIX = "IMatrix:"
- self.INCLUDE_WEIGHTS = "重みを含める:"
- self.EXCLUDE_WEIGHTS = "重みを含めない:"
- self.USE_OUTPUT_TENSOR_TYPE = "出力テンソルタイプを使用"
- self.USE_TOKEN_EMBEDDING_TYPE = "トークン埋め込みタイプを使用"
- self.KEEP_SPLIT = "分割を維持"
- self.KV_OVERRIDES = "KVオーバーライド:"
- self.ADD_NEW_OVERRIDE = "新しいオーバーライドを追加"
- self.QUANTIZE_MODEL = "モデルを量子化"
- self.SAVE_PRESET = "プリセットを保存"
- self.LOAD_PRESET = "プリセットを読み込む"
- self.TASKS = "タスク:"
- self.DOWNLOAD_LLAMACPP = "llama.cppをダウンロード"
- self.SELECT_RELEASE = "リリースを選択:"
- self.SELECT_ASSET = "アセットを選択:"
- self.EXTRACT_CUDA_FILES = "CUDAファイルを抽出"
- self.SELECT_CUDA_BACKEND = "CUDAバックエンドを選択:"
- self.DOWNLOAD = "ダウンロード"
- self.IMATRIX_GENERATION = "IMatrix生成"
- self.DATA_FILE = "データファイル:"
- self.MODEL = "モデル:"
- self.OUTPUT = "出力:"
- self.OUTPUT_FREQUENCY = "出力頻度:"
- self.GPU_OFFLOAD = "GPUオフロード:"
- self.AUTO = "自動"
- self.GENERATE_IMATRIX = "IMatrixを生成"
- self.ERROR = "エラー"
- self.WARNING = "警告"
- self.PROPERTIES = "プロパティ"
- self.CANCEL = "キャンセル"
- self.RESTART = "再起動"
- self.DELETE = "削除"
- self.CONFIRM_DELETION = "このタスクを削除してもよろしいですか?"
- self.TASK_RUNNING_WARNING = "一部のタスクはまだ実行中です。終了してもよろしいですか?"
- self.YES = "はい"
- self.NO = "いいえ"
- self.DOWNLOAD_COMPLETE = "ダウンロード完了"
- self.CUDA_EXTRACTION_FAILED = "CUDA抽出に失敗しました"
- self.PRESET_SAVED = "プリセットが保存されました"
- self.PRESET_LOADED = "プリセットが読み込まれました"
- self.NO_ASSET_SELECTED = "アセットが選択されていません"
- self.DOWNLOAD_FAILED = "ダウンロードに失敗しました"
- self.NO_BACKEND_SELECTED = "バックエンドが選択されていません"
- self.NO_MODEL_SELECTED = "モデルが選択されていません"
- self.REFRESH_RELEASES = "リリースを更新"
- self.NO_SUITABLE_CUDA_BACKENDS = "適切なCUDAバックエンドが見つかりませんでした"
- self.LLAMACPP_DOWNLOADED_EXTRACTED = "llama.cppバイナリがダウンロードされ、{0}に抽出されました\nCUDAファイルは{1}に抽出されました"
- self.CUDA_FILES_EXTRACTED = "CUDAファイルはに抽出されました"
- self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = "抽出に適したCUDAバックエンドが見つかりませんでした"
- self.ERROR_FETCHING_RELEASES = "リリースの取得中にエラーが発生しました: {0}"
- self.CONFIRM_DELETION_TITLE = "削除の確認"
- self.LOG_FOR = "{0}のログ"
- self.ALL_FILES = "すべてのファイル (*)"
- self.GGUF_FILES = "GGUFファイル (*.gguf)"
- self.DAT_FILES = "DATファイル (*.dat)"
- self.JSON_FILES = "JSONファイル (*.json)"
- self.FAILED_LOAD_PRESET = "プリセットの読み込みに失敗しました: {0}"
- self.INITIALIZING_AUTOGGUF = "AutoGGUFアプリケーションを初期化しています"
- self.AUTOGGUF_INITIALIZATION_COMPLETE = "AutoGGUFの初期化が完了しました"
- self.REFRESHING_BACKENDS = "バックエンドを更新しています"
- self.NO_BACKENDS_AVAILABLE = "利用可能なバックエンドがありません"
- self.FOUND_VALID_BACKENDS = "{0}個の有効なバックエンドが見つかりました"
- self.SAVING_PRESET = "プリセットを保存しています"
- self.PRESET_SAVED_TO = "プリセットは{0}に保存されました"
- self.LOADING_PRESET = "プリセットを読み込んでいます"
- self.PRESET_LOADED_FROM = "{0}からプリセットが読み込まれました"
- self.ADDING_KV_OVERRIDE = "KVオーバーライドを追加しています: {0}"
- self.SAVING_TASK_PRESET = "{0}のタスクプリセットを保存しています"
- self.TASK_PRESET_SAVED = "タスクプリセットが保存されました"
- self.TASK_PRESET_SAVED_TO = "タスクプリセットは{0}に保存されました"
- self.RESTARTING_TASK = "タスクを再起動しています: {0}"
- self.IN_PROGRESS = "処理中"
- self.DOWNLOAD_FINISHED_EXTRACTED_TO = "ダウンロードが完了しました。抽出先: {0}"
- self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "llama.cppバイナリがダウンロードされ、{0}に抽出されました\nCUDAファイルは{1}に抽出されました"
- self.NO_SUITABLE_CUDA_BACKEND_FOUND = "抽出に適したCUDAバックエンドが見つかりませんでした"
- self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = "llama.cppバイナリがダウンロードされ、{0}に抽出されました"
- self.REFRESHING_LLAMACPP_RELEASES = "llama.cppリリースを更新しています"
- self.UPDATING_ASSET_LIST = "アセットリストを更新しています"
- self.UPDATING_CUDA_OPTIONS = "CUDAオプションを更新しています"
- self.STARTING_LLAMACPP_DOWNLOAD = "llama.cppのダウンロードを開始しています"
- self.UPDATING_CUDA_BACKENDS = "CUDAバックエンドを更新しています"
- self.NO_CUDA_BACKEND_SELECTED = "抽出にCUDAバックエンドが選択されていません"
- self.EXTRACTING_CUDA_FILES = "{0}から{1}にCUDAファイルを抽出しています"
- self.DOWNLOAD_ERROR = "ダウンロードエラー: {0}"
- self.SHOWING_TASK_CONTEXT_MENU = "タスクコンテキストメニューを表示しています"
- self.SHOWING_PROPERTIES_FOR_TASK = "タスクのプロパティを表示しています: {0}"
- self.CANCELLING_TASK = "タスクをキャンセルしています: {0}"
- self.CANCELED = "キャンセル済み"
- self.DELETING_TASK = "タスクを削除しています: {0}"
- self.LOADING_MODELS = "モデルを読み込んでいます"
- self.LOADED_MODELS = "{0}個のモデルが読み込まれました"
- self.BROWSING_FOR_MODELS_DIRECTORY = "モデルディレクトリを参照しています"
- self.SELECT_MODELS_DIRECTORY = "モデルディレクトリを選択"
- self.BROWSING_FOR_OUTPUT_DIRECTORY = "出力ディレクトリを参照しています"
- self.SELECT_OUTPUT_DIRECTORY = "出力ディレクトリを選択"
- self.BROWSING_FOR_LOGS_DIRECTORY = "ログディレクトリを参照しています"
- self.SELECT_LOGS_DIRECTORY = "ログディレクトリを選択"
- self.BROWSING_FOR_IMATRIX_FILE = "IMatrixファイルを参照しています"
- self.SELECT_IMATRIX_FILE = "IMatrixファイルを選択"
- self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} MB / {2} MB)"
- self.CPU_USAGE_FORMAT = "CPU使用率: {0:.1f}%"
- self.VALIDATING_QUANTIZATION_INPUTS = "量子化入力を検証しています"
- self.MODELS_PATH_REQUIRED = "モデルパスが必要です"
- self.OUTPUT_PATH_REQUIRED = "出力パスが必要です"
- self.LOGS_PATH_REQUIRED = "ログパスが必要です"
- self.STARTING_MODEL_QUANTIZATION = "モデルの量子化を開始しています"
- self.INPUT_FILE_NOT_EXIST = "入力ファイル '{0}' は存在しません。"
- self.QUANTIZING_MODEL_TO = "{0} を {1} に量子化しています"
- self.QUANTIZATION_TASK_STARTED = "{0} の量子化タスクが開始されました"
- self.ERROR_STARTING_QUANTIZATION = "量子化の開始中にエラーが発生しました: {0}"
- self.UPDATING_MODEL_INFO = "モデル情報を更新しています: {0}"
- self.TASK_FINISHED = "タスクが完了しました: {0}"
- self.SHOWING_TASK_DETAILS_FOR = "タスクの詳細を表示しています: {0}"
- self.BROWSING_FOR_IMATRIX_DATA_FILE = "IMatrixデータファイルを参照しています"
- self.SELECT_DATA_FILE = "データファイルを選択"
- self.BROWSING_FOR_IMATRIX_MODEL_FILE = "IMatrixモデルファイルを参照しています"
- self.SELECT_MODEL_FILE = "モデルファイルを選択"
- self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "IMatrix出力ファイルを参照しています"
- self.SELECT_OUTPUT_FILE = "出力ファイルを選択"
- self.STARTING_IMATRIX_GENERATION = "IMatrixの生成を開始しています"
- self.BACKEND_PATH_NOT_EXIST = "バックエンドパスが存在しません: {0}"
- self.GENERATING_IMATRIX = "IMatrixを生成しています"
- self.ERROR_STARTING_IMATRIX_GENERATION = "IMatrixの生成を開始中にエラーが発生しました: {0}"
- self.IMATRIX_GENERATION_TASK_STARTED = "IMatrix生成タスクが開始されました"
- self.ERROR_MESSAGE = "エラー: {0}"
- self.TASK_ERROR = "タスクエラー: {0}"
- self.APPLICATION_CLOSING = "アプリケーションを終了しています"
- self.APPLICATION_CLOSED = "アプリケーションが終了しました"
- self.SELECT_QUANTIZATION_TYPE = "量子化タイプを選択してください"
- self.ALLOWS_REQUANTIZING = "すでに量子化されているテンソルの再量子化を許可します"
- self.LEAVE_OUTPUT_WEIGHT = "output.weightは(再)量子化されません"
- self.DISABLE_K_QUANT_MIXTURES = "k-quant混合を無効にし、すべてのテンソルを同じタイプに量子化します"
- self.USE_DATA_AS_IMPORTANCE_MATRIX = "量子化最適化の重要度マトリックスとしてファイル内のデータを使用します"
- self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "これらのテンソルに重要度マトリックスを使用します"
- self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = "これらのテンソルに重要度マトリックスを使用しません"
- self.OUTPUT_TENSOR_TYPE = "出力テンソルタイプ:"
- self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = "output.weightテンソルにこのタイプを使用します"
- self.TOKEN_EMBEDDING_TYPE = "トークン埋め込みタイプ:"
- self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = "トークン埋め込みテンソルにこのタイプを使用します"
- self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = "入力と同じシャードで量子化されたモデルを生成します"
- self.OVERRIDE_MODEL_METADATA = "モデルメタデータを上書きする"
- self.INPUT_DATA_FILE_FOR_IMATRIX = "IMatrix生成用の入力データファイル"
- self.MODEL_TO_BE_QUANTIZED = "量子化されるモデル"
- self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "生成されたIMatrixの出力パス"
- self.HOW_OFTEN_TO_SAVE_IMATRIX = "IMatrixを保存する頻度"
- self.SET_GPU_OFFLOAD_VALUE = "GPUオフロード値を設定 (-ngl)"
- self.COMPLETED = "完了しました"
- self.REFRESH_MODELS = "モデルを更新"
-
-class _German(_Localization):
- def __init__(self):
- super().__init__()
- self.WINDOW_TITLE = "AutoGGUF (automatisierter GGUF-Modellquantisierer)"
- self.RAM_USAGE = "RAM-Nutzung:"
- self.CPU_USAGE = "CPU-Auslastung:"
- self.BACKEND = "Llama.cpp-Backend:"
- self.REFRESH_BACKENDS = "Backends aktualisieren"
- self.MODELS_PATH = "Modelle Pfad:"
- self.OUTPUT_PATH = "Ausgabepfad:"
- self.LOGS_PATH = "Log-Pfad:"
- self.BROWSE = "Durchsuchen"
- self.AVAILABLE_MODELS = "Verfügbare Modelle:"
- self.QUANTIZATION_TYPE = "Quantisierungstyp:"
- self.ALLOW_REQUANTIZE = "Requantisierung zulassen"
- self.LEAVE_OUTPUT_TENSOR = "Ausgabetensor belassen"
- self.PURE = "Rein"
- self.IMATRIX = "IMatrix:"
- self.INCLUDE_WEIGHTS = "Gewichte einschließen:"
- self.EXCLUDE_WEIGHTS = "Gewichte ausschließen:"
- self.USE_OUTPUT_TENSOR_TYPE = "Ausgabetensortyp verwenden"
- self.USE_TOKEN_EMBEDDING_TYPE = "Token-Einbettungstyp verwenden"
- self.KEEP_SPLIT = "Aufteilung beibehalten"
- self.KV_OVERRIDES = "KV-Überschreibungen:"
- self.ADD_NEW_OVERRIDE = "Neue Überschreibung hinzufügen"
- self.QUANTIZE_MODEL = "Modell quantisieren"
- self.SAVE_PRESET = "Preset speichern"
- self.LOAD_PRESET = "Preset laden"
- self.TASKS = "Aufgaben:"
- self.DOWNLOAD_LLAMACPP = "llama.cpp herunterladen"
- self.SELECT_RELEASE = "Release auswählen:"
- self.SELECT_ASSET = "Asset auswählen:"
- self.EXTRACT_CUDA_FILES = "CUDA-Dateien extrahieren"
- self.SELECT_CUDA_BACKEND = "CUDA-Backend auswählen:"
- self.DOWNLOAD = "Herunterladen"
- self.IMATRIX_GENERATION = "IMatrix-Generierung"
- self.DATA_FILE = "Datendatei:"
- self.MODEL = "Modell:"
- self.OUTPUT = "Ausgabe:"
- self.OUTPUT_FREQUENCY = "Ausgabefrequenz:"
- self.GPU_OFFLOAD = "GPU-Offload:"
- self.AUTO = "Auto"
- self.GENERATE_IMATRIX = "IMatrix generieren"
- self.ERROR = "Fehler"
- self.WARNING = "Warnung"
- self.PROPERTIES = "Eigenschaften"
- self.CANCEL = "Abbrechen"
- self.RESTART = "Neustart"
- self.DELETE = "Löschen"
- self.CONFIRM_DELETION = "Sind Sie sicher, dass Sie diese Aufgabe löschen möchten?"
- self.TASK_RUNNING_WARNING = "Einige Aufgaben laufen noch. Möchten Sie wirklich beenden?"
- self.YES = "Ja"
- self.NO = "Nein"
- self.DOWNLOAD_COMPLETE = "Download abgeschlossen"
- self.CUDA_EXTRACTION_FAILED = "CUDA-Extraktion fehlgeschlagen"
- self.PRESET_SAVED = "Preset gespeichert"
- self.PRESET_LOADED = "Preset geladen"
- self.NO_ASSET_SELECTED = "Kein Asset ausgewählt"
- self.DOWNLOAD_FAILED = "Download fehlgeschlagen"
- self.NO_BACKEND_SELECTED = "Kein Backend ausgewählt"
- self.NO_MODEL_SELECTED = "Kein Modell ausgewählt"
- self.REFRESH_RELEASES = "Releases aktualisieren"
- self.NO_SUITABLE_CUDA_BACKENDS = "Keine geeigneten CUDA-Backends gefunden"
- self.LLAMACPP_DOWNLOADED_EXTRACTED = "llama.cpp-Binärdatei heruntergeladen und extrahiert nach {0}\nCUDA-Dateien extrahiert nach {1}"
- self.CUDA_FILES_EXTRACTED = "CUDA-Dateien extrahiert nach"
- self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = "Kein geeignetes CUDA-Backend für die Extraktion gefunden"
- self.ERROR_FETCHING_RELEASES = "Fehler beim Abrufen der Releases: {0}"
- self.CONFIRM_DELETION_TITLE = "Löschen bestätigen"
- self.LOG_FOR = "Log für {0}"
- self.ALL_FILES = "Alle Dateien (*)"
- self.GGUF_FILES = "GGUF-Dateien (*.gguf)"
- self.DAT_FILES = "DAT-Dateien (*.dat)"
- self.JSON_FILES = "JSON-Dateien (*.json)"
- self.FAILED_LOAD_PRESET = "Preset konnte nicht geladen werden: {0}"
- self.INITIALIZING_AUTOGGUF = "AutoGGUF-Anwendung wird initialisiert"
- self.AUTOGGUF_INITIALIZATION_COMPLETE = "AutoGGUF-Initialisierung abgeschlossen"
- self.REFRESHING_BACKENDS = "Backends werden aktualisiert"
- self.NO_BACKENDS_AVAILABLE = "Keine Backends verfügbar"
- self.FOUND_VALID_BACKENDS = "{0} gültige Backends gefunden"
- self.SAVING_PRESET = "Preset wird gespeichert"
- self.PRESET_SAVED_TO = "Preset gespeichert unter {0}"
- self.LOADING_PRESET = "Preset wird geladen"
- self.PRESET_LOADED_FROM = "Preset von {0} geladen"
- self.ADDING_KV_OVERRIDE = "KV-Überschreibung wird hinzugefügt: {0}"
- self.SAVING_TASK_PRESET = "Task-Preset für {0} wird gespeichert"
- self.TASK_PRESET_SAVED = "Task-Preset gespeichert"
- self.TASK_PRESET_SAVED_TO = "Task-Preset gespeichert unter {0}"
- self.RESTARTING_TASK = "Aufgabe wird neu gestartet: {0}"
- self.IN_PROGRESS = "In Bearbeitung"
- self.DOWNLOAD_FINISHED_EXTRACTED_TO = "Download abgeschlossen. Extrahiert nach: {0}"
- self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "llama.cpp-Binärdatei heruntergeladen und extrahiert nach {0}\nCUDA-Dateien extrahiert nach {1}"
- self.NO_SUITABLE_CUDA_BACKEND_FOUND = "Kein geeignetes CUDA-Backend für die Extraktion gefunden"
- self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = "llama.cpp-Binärdatei heruntergeladen und extrahiert nach {0}"
- self.REFRESHING_LLAMACPP_RELEASES = "llama.cpp-Releases werden aktualisiert"
- self.UPDATING_ASSET_LIST = "Asset-Liste wird aktualisiert"
- self.UPDATING_CUDA_OPTIONS = "CUDA-Optionen werden aktualisiert"
- self.STARTING_LLAMACPP_DOWNLOAD = "Download von llama.cpp wird gestartet"
- self.UPDATING_CUDA_BACKENDS = "CUDA-Backends werden aktualisiert"
- self.NO_CUDA_BACKEND_SELECTED = "Kein CUDA-Backend für die Extraktion ausgewählt"
- self.EXTRACTING_CUDA_FILES = "CUDA-Dateien werden von {0} nach {1} extrahiert"
- self.DOWNLOAD_ERROR = "Download-Fehler: {0}"
- self.SHOWING_TASK_CONTEXT_MENU = "Kontextmenü der Aufgabe wird angezeigt"
- self.SHOWING_PROPERTIES_FOR_TASK = "Eigenschaften für Aufgabe werden angezeigt: {0}"
- self.CANCELLING_TASK = "Aufgabe wird abgebrochen: {0}"
- self.CANCELED = "Abgebrochen"
- self.DELETING_TASK = "Aufgabe wird gelöscht: {0}"
- self.LOADING_MODELS = "Modelle werden geladen"
- self.LOADED_MODELS = "{0} Modelle geladen"
- self.BROWSING_FOR_MODELS_DIRECTORY = "Modelle-Verzeichnis wird durchsucht"
- self.SELECT_MODELS_DIRECTORY = "Modelle-Verzeichnis auswählen"
- self.BROWSING_FOR_OUTPUT_DIRECTORY = "Ausgabeverzeichnis wird durchsucht"
- self.SELECT_OUTPUT_DIRECTORY = "Ausgabeverzeichnis auswählen"
- self.BROWSING_FOR_LOGS_DIRECTORY = "Log-Verzeichnis wird durchsucht"
- self.SELECT_LOGS_DIRECTORY = "Log-Verzeichnis auswählen"
- self.BROWSING_FOR_IMATRIX_FILE = "IMatrix-Datei wird durchsucht"
- self.SELECT_IMATRIX_FILE = "IMatrix-Datei auswählen"
- self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} MB / {2} MB)"
- self.CPU_USAGE_FORMAT = "CPU-Auslastung: {0:.1f}%"
- self.VALIDATING_QUANTIZATION_INPUTS = "Quantisierungseingaben werden validiert"
- self.MODELS_PATH_REQUIRED = "Modelle-Pfad ist erforderlich"
- self.OUTPUT_PATH_REQUIRED = "Ausgabepfad ist erforderlich"
- self.LOGS_PATH_REQUIRED = "Log-Pfad ist erforderlich"
- self.STARTING_MODEL_QUANTIZATION = "Modellquantisierung wird gestartet"
- self.INPUT_FILE_NOT_EXIST = "Die Eingabedatei '{0}' existiert nicht."
- self.QUANTIZING_MODEL_TO = "Quantisierung von {0} zu {1}"
- self.QUANTIZATION_TASK_STARTED = "Quantisierungsaufgabe für {0} gestartet"
- self.ERROR_STARTING_QUANTIZATION = "Fehler beim Starten der Quantisierung: {0}"
- self.UPDATING_MODEL_INFO = "Modellinformationen werden aktualisiert: {0}"
- self.TASK_FINISHED = "Aufgabe abgeschlossen: {0}"
- self.SHOWING_TASK_DETAILS_FOR = "Aufgabendetails werden angezeigt für: {0}"
- self.BROWSING_FOR_IMATRIX_DATA_FILE = "IMatrix-Datendatei wird durchsucht"
- self.SELECT_DATA_FILE = "Datendatei auswählen"
- self.BROWSING_FOR_IMATRIX_MODEL_FILE = "IMatrix-Modelldatei wird durchsucht"
- self.SELECT_MODEL_FILE = "Modelldatei auswählen"
- self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "IMatrix-Ausgabedatei wird durchsucht"
- self.SELECT_OUTPUT_FILE = "Ausgabedatei auswählen"
- self.STARTING_IMATRIX_GENERATION = "IMatrix-Generierung wird gestartet"
- self.BACKEND_PATH_NOT_EXIST = "Backend-Pfad existiert nicht: {0}"
- self.GENERATING_IMATRIX = "IMatrix wird generiert"
- self.ERROR_STARTING_IMATRIX_GENERATION = "Fehler beim Starten der IMatrix-Generierung: {0}"
- self.IMATRIX_GENERATION_TASK_STARTED = "IMatrix-Generierungsaufgabe gestartet"
- self.ERROR_MESSAGE = "Fehler: {0}"
- self.TASK_ERROR = "Aufgabenfehler: {0}"
- self.APPLICATION_CLOSING = "Anwendung wird geschlossen"
- self.APPLICATION_CLOSED = "Anwendung geschlossen"
- self.SELECT_QUANTIZATION_TYPE = "Wählen Sie den Quantisierungstyp aus"
- self.ALLOWS_REQUANTIZING = "Ermöglicht die Requantisierung von Tensoren, die bereits quantisiert wurden"
- self.LEAVE_OUTPUT_WEIGHT = "Lässt output.weight nicht (re)quantisiert"
- self.DISABLE_K_QUANT_MIXTURES = "Deaktivieren Sie k-Quant-Mischungen und quantisieren Sie alle Tensoren auf denselben Typ"
- self.USE_DATA_AS_IMPORTANCE_MATRIX = "Verwenden Sie Daten in der Datei als Wichtigkeitsmatrix für Quant-Optimierungen"
- self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Verwenden Sie die Wichtigkeitsmatrix für diese Tensoren"
- self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Verwenden Sie die Wichtigkeitsmatrix nicht für diese Tensoren"
- self.OUTPUT_TENSOR_TYPE = "Ausgabetensortyp:"
- self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = "Verwenden Sie diesen Typ für den output.weight-Tensor"
- self.TOKEN_EMBEDDING_TYPE = "Token-Einbettungstyp:"
- self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = "Verwenden Sie diesen Typ für den Token-Einbettungstensor"
- self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = "Generiert ein quantisiertes Modell in denselben Shards wie die Eingabe"
- self.OVERRIDE_MODEL_METADATA = "Modellmetadaten überschreiben"
- self.INPUT_DATA_FILE_FOR_IMATRIX = "Eingabedatendatei für die IMatrix-Generierung"
- self.MODEL_TO_BE_QUANTIZED = "Zu quantisierendes Modell"
- self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "Ausgabepfad für die generierte IMatrix"
- self.HOW_OFTEN_TO_SAVE_IMATRIX = "Wie oft die IMatrix gespeichert werden soll"
- self.SET_GPU_OFFLOAD_VALUE = "GPU-Offload-Wert festlegen (-ngl)"
- self.COMPLETED = "Abgeschlossen"
- self.REFRESH_MODELS = "Modelle aktualisieren"
-
-class _Portuguese(_Localization):
- def __init__(self):
- super().__init__()
- self.WINDOW_TITLE = "AutoGGUF (Quantizador Automático de Modelos GGUF)"
- self.RAM_USAGE = "Uso de RAM:"
- self.CPU_USAGE = "Uso da CPU:"
- self.BACKEND = "Backend do Llama.cpp:"
- self.REFRESH_BACKENDS = "Atualizar Backends"
- self.MODELS_PATH = "Caminho dos Modelos:"
- self.OUTPUT_PATH = "Caminho de Saída:"
- self.LOGS_PATH = "Caminho dos Logs:"
- self.BROWSE = "Navegar"
- self.AVAILABLE_MODELS = "Modelos Disponíveis:"
- self.QUANTIZATION_TYPE = "Tipo de Quantização:"
- self.ALLOW_REQUANTIZE = "Permitir Requantização"
- self.LEAVE_OUTPUT_TENSOR = "Manter Tensor de Saída"
- self.PURE = "Puro"
- self.IMATRIX = "IMatrix:"
- self.INCLUDE_WEIGHTS = "Incluir Pesos:"
- self.EXCLUDE_WEIGHTS = "Excluir Pesos:"
- self.USE_OUTPUT_TENSOR_TYPE = "Usar Tipo de Tensor de Saída"
- self.USE_TOKEN_EMBEDDING_TYPE = "Usar Tipo de Incorporação de Token"
- self.KEEP_SPLIT = "Manter Divisão"
- self.KV_OVERRIDES = "Substituições KV:"
- self.ADD_NEW_OVERRIDE = "Adicionar Nova Substituição"
- self.QUANTIZE_MODEL = "Quantizar Modelo"
- self.SAVE_PRESET = "Salvar Predefinição"
- self.LOAD_PRESET = "Carregar Predefinição"
- self.TASKS = "Tarefas:"
- self.DOWNLOAD_LLAMACPP = "Baixar llama.cpp"
- self.SELECT_RELEASE = "Selecionar Versão:"
- self.SELECT_ASSET = "Selecionar Ativo:"
- self.EXTRACT_CUDA_FILES = "Extrair Arquivos CUDA"
- self.SELECT_CUDA_BACKEND = "Selecionar Backend CUDA:"
- self.DOWNLOAD = "Baixar"
- self.IMATRIX_GENERATION = "Geração de IMatrix"
- self.DATA_FILE = "Arquivo de Dados:"
- self.MODEL = "Modelo:"
- self.OUTPUT = "Saída:"
- self.OUTPUT_FREQUENCY = "Frequência de Saída:"
- self.GPU_OFFLOAD = "Offload da GPU:"
- self.AUTO = "Automático"
- self.GENERATE_IMATRIX = "Gerar IMatrix"
- self.ERROR = "Erro"
- self.WARNING = "Aviso"
- self.PROPERTIES = "Propriedades"
- self.CANCEL = "Cancelar"
- self.RESTART = "Reiniciar"
- self.DELETE = "Excluir"
- self.CONFIRM_DELETION = "Tem certeza de que deseja excluir esta tarefa?"
- self.TASK_RUNNING_WARNING = "Algumas tarefas ainda estão em execução. Tem certeza de que deseja sair?"
- self.YES = "Sim"
- self.NO = "Não"
- self.DOWNLOAD_COMPLETE = "Download Concluído"
- self.CUDA_EXTRACTION_FAILED = "Falha na Extração do CUDA"
- self.PRESET_SAVED = "Predefinição Salva"
- self.PRESET_LOADED = "Predefinição Carregada"
- self.NO_ASSET_SELECTED = "Nenhum ativo selecionado"
- self.DOWNLOAD_FAILED = "Falha no download"
- self.NO_BACKEND_SELECTED = "Nenhum backend selecionado"
- self.NO_MODEL_SELECTED = "Nenhum modelo selecionado"
- self.REFRESH_RELEASES = "Atualizar Versões"
- self.NO_SUITABLE_CUDA_BACKENDS = "Nenhum backend CUDA adequado encontrado"
- self.LLAMACPP_DOWNLOADED_EXTRACTED = "Binário llama.cpp baixado e extraído para {0}\nArquivos CUDA extraídos para {1}"
- self.CUDA_FILES_EXTRACTED = "Arquivos CUDA extraídos para"
- self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = "Nenhum backend CUDA adequado encontrado para extração"
- self.ERROR_FETCHING_RELEASES = "Erro ao buscar versões: {0}"
- self.CONFIRM_DELETION_TITLE = "Confirmar Exclusão"
- self.LOG_FOR = "Log para {0}"
- self.ALL_FILES = "Todos os Arquivos (*)"
- self.GGUF_FILES = "Arquivos GGUF (*.gguf)"
- self.DAT_FILES = "Arquivos DAT (*.dat)"
- self.JSON_FILES = "Arquivos JSON (*.json)"
- self.FAILED_LOAD_PRESET = "Falha ao carregar a predefinição: {0}"
- self.INITIALIZING_AUTOGGUF = "Inicializando o aplicativo AutoGGUF"
- self.AUTOGGUF_INITIALIZATION_COMPLETE = "Inicialização do AutoGGUF concluída"
- self.REFRESHING_BACKENDS = "Atualizando backends"
- self.NO_BACKENDS_AVAILABLE = "Nenhum backend disponível"
- self.FOUND_VALID_BACKENDS = "{0} backends válidos encontrados"
- self.SAVING_PRESET = "Salvando predefinição"
- self.PRESET_SAVED_TO = "Predefinição salva em {0}"
- self.LOADING_PRESET = "Carregando predefinição"
- self.PRESET_LOADED_FROM = "Predefinição carregada de {0}"
- self.ADDING_KV_OVERRIDE = "Adicionando substituição KV: {0}"
- self.SAVING_TASK_PRESET = "Salvando predefinição de tarefa para {0}"
- self.TASK_PRESET_SAVED = "Predefinição de Tarefa Salva"
- self.TASK_PRESET_SAVED_TO = "Predefinição de tarefa salva em {0}"
- self.RESTARTING_TASK = "Reiniciando tarefa: {0}"
- self.IN_PROGRESS = "Em Andamento"
- self.DOWNLOAD_FINISHED_EXTRACTED_TO = "Download concluído. Extraído para: {0}"
- self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "Binário llama.cpp baixado e extraído para {0}\nArquivos CUDA extraídos para {1}"
- self.NO_SUITABLE_CUDA_BACKEND_FOUND = "Nenhum backend CUDA adequado encontrado para extração"
- self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = "Binário llama.cpp baixado e extraído para {0}"
- self.REFRESHING_LLAMACPP_RELEASES = "Atualizando versões do llama.cpp"
- self.UPDATING_ASSET_LIST = "Atualizando lista de ativos"
- self.UPDATING_CUDA_OPTIONS = "Atualizando opções CUDA"
- self.STARTING_LLAMACPP_DOWNLOAD = "Iniciando download do llama.cpp"
- self.UPDATING_CUDA_BACKENDS = "Atualizando backends CUDA"
- self.NO_CUDA_BACKEND_SELECTED = "Nenhum backend CUDA selecionado para extração"
- self.EXTRACTING_CUDA_FILES = "Extraindo arquivos CUDA de {0} para {1}"
- self.DOWNLOAD_ERROR = "Erro de download: {0}"
- self.SHOWING_TASK_CONTEXT_MENU = "Exibindo menu de contexto da tarefa"
- self.SHOWING_PROPERTIES_FOR_TASK = "Exibindo propriedades para a tarefa: {0}"
- self.CANCELLING_TASK = "Cancelando tarefa: {0}"
- self.CANCELED = "Cancelado"
- self.DELETING_TASK = "Excluindo tarefa: {0}"
- self.LOADING_MODELS = "Carregando modelos"
- self.LOADED_MODELS = "{0} modelos carregados"
- self.BROWSING_FOR_MODELS_DIRECTORY = "Navegando pelo diretório de modelos"
- self.SELECT_MODELS_DIRECTORY = "Selecionar Diretório de Modelos"
- self.BROWSING_FOR_OUTPUT_DIRECTORY = "Navegando pelo diretório de saída"
- self.SELECT_OUTPUT_DIRECTORY = "Selecionar Diretório de Saída"
- self.BROWSING_FOR_LOGS_DIRECTORY = "Navegando pelo diretório de logs"
- self.SELECT_LOGS_DIRECTORY = "Selecionar Diretório de Logs"
- self.BROWSING_FOR_IMATRIX_FILE = "Navegando pelo arquivo IMatrix"
- self.SELECT_IMATRIX_FILE = "Selecionar Arquivo IMatrix"
- self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} MB / {2} MB)"
- self.CPU_USAGE_FORMAT = "Uso da CPU: {0:.1f}%"
- self.VALIDATING_QUANTIZATION_INPUTS = "Validando entradas de quantização"
- self.MODELS_PATH_REQUIRED = "O caminho dos modelos é obrigatório"
- self.OUTPUT_PATH_REQUIRED = "O caminho de saída é obrigatório"
- self.LOGS_PATH_REQUIRED = "O caminho dos logs é obrigatório"
- self.STARTING_MODEL_QUANTIZATION = "Iniciando a quantização do modelo"
- self.INPUT_FILE_NOT_EXIST = "O arquivo de entrada '{0}' não existe."
- self.QUANTIZING_MODEL_TO = "Quantizando {0} para {1}"
- self.QUANTIZATION_TASK_STARTED = "Tarefa de quantização iniciada para {0}"
- self.ERROR_STARTING_QUANTIZATION = "Erro ao iniciar a quantização: {0}"
- self.UPDATING_MODEL_INFO = "Atualizando informações do modelo: {0}"
- self.TASK_FINISHED = "Tarefa concluída: {0}"
- self.SHOWING_TASK_DETAILS_FOR = "Mostrando detalhes da tarefa para: {0}"
- self.BROWSING_FOR_IMATRIX_DATA_FILE = "Navegando pelo arquivo de dados IMatrix"
- self.SELECT_DATA_FILE = "Selecionar Arquivo de Dados"
- self.BROWSING_FOR_IMATRIX_MODEL_FILE = "Navegando pelo arquivo de modelo IMatrix"
- self.SELECT_MODEL_FILE = "Selecionar Arquivo de Modelo"
- self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "Navegando pelo arquivo de saída IMatrix"
- self.SELECT_OUTPUT_FILE = "Selecionar Arquivo de Saída"
- self.STARTING_IMATRIX_GENERATION = "Iniciando a geração de IMatrix"
- self.BACKEND_PATH_NOT_EXIST = "O caminho do backend não existe: {0}"
- self.GENERATING_IMATRIX = "Gerando IMatrix"
- self.ERROR_STARTING_IMATRIX_GENERATION = "Erro ao iniciar a geração de IMatrix: {0}"
- self.IMATRIX_GENERATION_TASK_STARTED = "Tarefa de geração de IMatrix iniciada"
- self.ERROR_MESSAGE = "Erro: {0}"
- self.TASK_ERROR = "Erro de tarefa: {0}"
- self.APPLICATION_CLOSING = "Fechando o aplicativo"
- self.APPLICATION_CLOSED = "Aplicativo fechado"
- self.SELECT_QUANTIZATION_TYPE = "Selecione o tipo de quantização"
- self.ALLOWS_REQUANTIZING = "Permite requantizar tensores que já foram quantizados"
- self.LEAVE_OUTPUT_WEIGHT = "Deixará output.weight não (re)quantizado"
- self.DISABLE_K_QUANT_MIXTURES = "Desabilitar misturas k-quant e quantizar todos os tensores para o mesmo tipo"
- self.USE_DATA_AS_IMPORTANCE_MATRIX = "Usar os dados no arquivo como matriz de importância para otimizações de quantização"
- self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Usar matriz de importância para estes tensores"
- self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Não usar matriz de importância para estes tensores"
- self.OUTPUT_TENSOR_TYPE = "Tipo de Tensor de Saída:"
- self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = "Usar este tipo para o tensor output.weight"
- self.TOKEN_EMBEDDING_TYPE = "Tipo de Incorporação de Token:"
- self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = "Usar este tipo para o tensor de incorporações de token"
- self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = "Irá gerar o modelo quantizado nos mesmos shards da entrada"
- self.OVERRIDE_MODEL_METADATA = "Substituir metadados do modelo"
- self.INPUT_DATA_FILE_FOR_IMATRIX = "Arquivo de dados de entrada para geração de IMatrix"
- self.MODEL_TO_BE_QUANTIZED = "Modelo a ser quantizado"
- self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "Caminho de saída para o IMatrix gerado"
- self.HOW_OFTEN_TO_SAVE_IMATRIX = "Com que frequência salvar o IMatrix"
- self.SET_GPU_OFFLOAD_VALUE = "Definir valor de offload da GPU (-ngl)"
- self.COMPLETED = "Concluído"
- self.REFRESH_MODELS = "Atualizar modelos"
-
-class _Arabic(_Localization):
- def __init__(self):
- super().__init__()
- self.WINDOW_TITLE = "AutoGGUF (مُكَمِّم نماذج GGUF التلقائي)"
- self.RAM_USAGE = "استخدام ذاكرة الوصول العشوائي:"
- self.CPU_USAGE = "استخدام وحدة المعالجة المركزية:"
- self.BACKEND = "الخلفية Llama.cpp:"
- self.REFRESH_BACKENDS = "تحديث الخلفيات"
- self.MODELS_PATH = "مسار النماذج:"
- self.OUTPUT_PATH = "مسار الإخراج:"
- self.LOGS_PATH = "مسار السجلات:"
- self.BROWSE = "استعراض"
- self.AVAILABLE_MODELS = "النماذج المتاحة:"
- self.QUANTIZATION_TYPE = "نوع التكميم:"
- self.ALLOW_REQUANTIZE = "السماح بإعادة التكميم"
- self.LEAVE_OUTPUT_TENSOR = "ترك موتر الإخراج"
- self.PURE = "نقي"
- self.IMATRIX = "IMatrix:"
- self.INCLUDE_WEIGHTS = "تضمين الأوزان:"
- self.EXCLUDE_WEIGHTS = "استبعاد الأوزان:"
- self.USE_OUTPUT_TENSOR_TYPE = "استخدام نوع موتر الإخراج"
- self.USE_TOKEN_EMBEDDING_TYPE = "استخدام نوع تضمين الرمز المميز"
- self.KEEP_SPLIT = "الحفاظ على التقسيم"
- self.KV_OVERRIDES = "تجاوزات KV:"
- self.ADD_NEW_OVERRIDE = "إضافة تجاوز جديد"
- self.QUANTIZE_MODEL = "تكميم النموذج"
- self.SAVE_PRESET = "حفظ الإعداد المسبق"
- self.LOAD_PRESET = "تحميل الإعداد المسبق"
- self.TASKS = "المهام:"
- self.DOWNLOAD_LLAMACPP = "تنزيل llama.cpp"
- self.SELECT_RELEASE = "تحديد الإصدار:"
- self.SELECT_ASSET = "تحديد الأصل:"
- self.EXTRACT_CUDA_FILES = "استخراج ملفات CUDA"
- self.SELECT_CUDA_BACKEND = "تحديد خلفية CUDA:"
- self.DOWNLOAD = "تنزيل"
- self.IMATRIX_GENERATION = "توليد IMatrix"
- self.DATA_FILE = "ملف البيانات:"
- self.MODEL = "النموذج:"
- self.OUTPUT = "الإخراج:"
- self.OUTPUT_FREQUENCY = "تردد الإخراج:"
- self.GPU_OFFLOAD = "تفريغ GPU:"
- self.AUTO = "تلقائي"
- self.GENERATE_IMATRIX = "توليد IMatrix"
- self.ERROR = "خطأ"
- self.WARNING = "تحذير"
- self.PROPERTIES = "الخصائص"
- self.CANCEL = "إلغاء"
- self.RESTART = "إعادة تشغيل"
- self.DELETE = "حذف"
- self.CONFIRM_DELETION = "هل أنت متأكد أنك تريد حذف هذه المهمة؟"
- self.TASK_RUNNING_WARNING = "لا تزال بعض المهام قيد التشغيل. هل أنت متأكد أنك تريد الإنهاء؟"
- self.YES = "نعم"
- self.NO = "لا"
- self.DOWNLOAD_COMPLETE = "اكتمل التنزيل"
- self.CUDA_EXTRACTION_FAILED = "فشل استخراج CUDA"
- self.PRESET_SAVED = "تم حفظ الإعداد المسبق"
- self.PRESET_LOADED = "تم تحميل الإعداد المسبق"
- self.NO_ASSET_SELECTED = "لم يتم تحديد أصل"
- self.DOWNLOAD_FAILED = "فشل التنزيل"
- self.NO_BACKEND_SELECTED = "لم يتم تحديد خلفية"
- self.NO_MODEL_SELECTED = "لم يتم تحديد نموذج"
- self.REFRESH_RELEASES = "تحديث الإصدارات"
- self.NO_SUITABLE_CUDA_BACKENDS = "لم يتم العثور على خلفيات CUDA مناسبة"
- self.LLAMACPP_DOWNLOADED_EXTRACTED = "تم تنزيل ملف llama.cpp الثنائي واستخراجه إلى {0}\nتم استخراج ملفات CUDA إلى {1}"
- self.CUDA_FILES_EXTRACTED = "تم استخراج ملفات CUDA إلى"
- self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = "لم يتم العثور على خلفية CUDA مناسبة للاستخراج"
- self.ERROR_FETCHING_RELEASES = "خطأ في جلب الإصدارات: {0}"
- self.CONFIRM_DELETION_TITLE = "تأكيد الحذف"
- self.LOG_FOR = "سجل لـ {0}"
- self.ALL_FILES = "جميع الملفات (*)"
- self.GGUF_FILES = "ملفات GGUF (*.gguf)"
- self.DAT_FILES = "ملفات DAT (*.dat)"
- self.JSON_FILES = "ملفات JSON (*.json)"
- self.FAILED_LOAD_PRESET = "فشل تحميل الإعداد المسبق: {0}"
- self.INITIALIZING_AUTOGGUF = "تهيئة تطبيق AutoGGUF"
- self.AUTOGGUF_INITIALIZATION_COMPLETE = "اكتملت تهيئة AutoGGUF"
- self.REFRESHING_BACKENDS = "تحديث الخلفيات"
- self.NO_BACKENDS_AVAILABLE = "لا توجد خلفيات متاحة"
- self.FOUND_VALID_BACKENDS = "تم العثور على {0} خلفيات صالحة"
- self.SAVING_PRESET = "حفظ الإعداد المسبق"
- self.PRESET_SAVED_TO = "تم حفظ الإعداد المسبق إلى {0}"
- self.LOADING_PRESET = "تحميل الإعداد المسبق"
- self.PRESET_LOADED_FROM = "تم تحميل الإعداد المسبق من {0}"
- self.ADDING_KV_OVERRIDE = "إضافة تجاوز KV: {0}"
- self.SAVING_TASK_PRESET = "حفظ الإعداد المسبق للمهمة لـ {0}"
- self.TASK_PRESET_SAVED = "تم حفظ الإعداد المسبق للمهمة"
- self.TASK_PRESET_SAVED_TO = "تم حفظ الإعداد المسبق للمهمة إلى {0}"
- self.RESTARTING_TASK = "إعادة تشغيل المهمة: {0}"
- self.IN_PROGRESS = "قيد التقدم"
- self.DOWNLOAD_FINISHED_EXTRACTED_TO = "اكتمل التنزيل. تم الاستخراج إلى: {0}"
- self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "تم تنزيل ملف llama.cpp الثنائي واستخراجه إلى {0}\nتم استخراج ملفات CUDA إلى {1}"
- self.NO_SUITABLE_CUDA_BACKEND_FOUND = "لم يتم العثور على خلفية CUDA مناسبة للاستخراج"
- self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = "تم تنزيل ملف llama.cpp الثنائي واستخراجه إلى {0}"
- self.REFRESHING_LLAMACPP_RELEASES = "تحديث إصدارات llama.cpp"
- self.UPDATING_ASSET_LIST = "تحديث قائمة الأصول"
- self.UPDATING_CUDA_OPTIONS = "تحديث خيارات CUDA"
- self.STARTING_LLAMACPP_DOWNLOAD = "بدء تنزيل llama.cpp"
- self.UPDATING_CUDA_BACKENDS = "تحديث خلفيات CUDA"
- self.NO_CUDA_BACKEND_SELECTED = "لم يتم تحديد خلفية CUDA للاستخراج"
- self.EXTRACTING_CUDA_FILES = "استخراج ملفات CUDA من {0} إلى {1}"
- self.DOWNLOAD_ERROR = "خطأ في التنزيل: {0}"
- self.SHOWING_TASK_CONTEXT_MENU = "إظهار قائمة سياق المهمة"
- self.SHOWING_PROPERTIES_FOR_TASK = "إظهار خصائص المهمة: {0}"
- self.CANCELLING_TASK = "إلغاء المهمة: {0}"
- self.CANCELED = "تم الإلغاء"
- self.DELETING_TASK = "حذف المهمة: {0}"
- self.LOADING_MODELS = "تحميل النماذج"
- self.LOADED_MODELS = "تم تحميل {0} نماذج"
- self.BROWSING_FOR_MODELS_DIRECTORY = "استعراض دليل النماذج"
- self.SELECT_MODELS_DIRECTORY = "حدد دليل النماذج"
- self.BROWSING_FOR_OUTPUT_DIRECTORY = "استعراض دليل الإخراج"
- self.SELECT_OUTPUT_DIRECTORY = "حدد دليل الإخراج"
- self.BROWSING_FOR_LOGS_DIRECTORY = "استعراض دليل السجلات"
- self.SELECT_LOGS_DIRECTORY = "حدد دليل السجلات"
- self.BROWSING_FOR_IMATRIX_FILE = "استعراض ملف IMatrix"
- self.SELECT_IMATRIX_FILE = "حدد ملف IMatrix"
- self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} ميغابايت / {2} ميغابايت)"
- self.CPU_USAGE_FORMAT = "استخدام وحدة المعالجة المركزية: {0:.1f}%"
- self.VALIDATING_QUANTIZATION_INPUTS = "التحقق من صحة مدخلات التكميم"
- self.MODELS_PATH_REQUIRED = "مسار النماذج مطلوب"
- self.OUTPUT_PATH_REQUIRED = "مسار الإخراج مطلوب"
- self.LOGS_PATH_REQUIRED = "مسار السجلات مطلوب"
- self.STARTING_MODEL_QUANTIZATION = "بدء تكميم النموذج"
- self.INPUT_FILE_NOT_EXIST = "ملف الإدخال '{0}' غير موجود."
- self.QUANTIZING_MODEL_TO = "تكميم {0} إلى {1}"
- self.QUANTIZATION_TASK_STARTED = "بدأت مهمة التكميم لـ {0}"
- self.ERROR_STARTING_QUANTIZATION = "خطأ في بدء التكميم: {0}"
- self.UPDATING_MODEL_INFO = "تحديث معلومات النموذج: {0}"
- self.TASK_FINISHED = "انتهت المهمة: {0}"
- self.SHOWING_TASK_DETAILS_FOR = "إظهار تفاصيل المهمة لـ: {0}"
- self.BROWSING_FOR_IMATRIX_DATA_FILE = "استعراض ملف بيانات IMatrix"
- self.SELECT_DATA_FILE = "حدد ملف البيانات"
- self.BROWSING_FOR_IMATRIX_MODEL_FILE = "استعراض ملف نموذج IMatrix"
- self.SELECT_MODEL_FILE = "حدد ملف النموذج"
- self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "استعراض ملف إخراج IMatrix"
- self.SELECT_OUTPUT_FILE = "حدد ملف الإخراج"
- self.STARTING_IMATRIX_GENERATION = "بدء توليد IMatrix"
- self.BACKEND_PATH_NOT_EXIST = "مسار الخلفية غير موجود: {0}"
- self.GENERATING_IMATRIX = "توليد IMatrix"
- self.ERROR_STARTING_IMATRIX_GENERATION = "خطأ في بدء توليد IMatrix: {0}"
- self.IMATRIX_GENERATION_TASK_STARTED = "بدأت مهمة توليد IMatrix"
- self.ERROR_MESSAGE = "خطأ: {0}"
- self.TASK_ERROR = "خطأ في المهمة: {0}"
- self.APPLICATION_CLOSING = "إغلاق التطبيق"
- self.APPLICATION_CLOSED = "تم إغلاق التطبيق"
- self.SELECT_QUANTIZATION_TYPE = "حدد نوع التكميم"
- self.ALLOWS_REQUANTIZING = "يسمح بإعادة تكميم الموترات التي تم تكميمها بالفعل"
- self.LEAVE_OUTPUT_WEIGHT = "سيترك output.weight غير مُكَمَّم (أو مُعاد تكميمه)"
- self.DISABLE_K_QUANT_MIXTURES = "تعطيل خلطات k-quant وتكميم جميع الموترات إلى نفس النوع"
- self.USE_DATA_AS_IMPORTANCE_MATRIX = "استخدام البيانات في الملف كمصفوفة أهمية لتحسينات التكميم"
- self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "استخدام مصفوفة الأهمية لهذه الموترات"
- self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = "عدم استخدام مصفوفة الأهمية لهذه الموترات"
- self.OUTPUT_TENSOR_TYPE = "نوع موتر الإخراج:"
- self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = "استخدم هذا النوع لموتر output.weight"
- self.TOKEN_EMBEDDING_TYPE = "نوع تضمين الرمز المميز:"
- self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = "استخدم هذا النوع لموتر تضمينات الرمز المميز"
- self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = "سيولد نموذجًا مُكَمَّمًا في نفس شظايا الإدخال"
- self.OVERRIDE_MODEL_METADATA = "تجاوز بيانات تعريف النموذج"
- self.INPUT_DATA_FILE_FOR_IMATRIX = "ملف بيانات الإدخال لتوليد IMatrix"
- self.MODEL_TO_BE_QUANTIZED = "النموذج المراد تكميمه"
- self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "مسار الإخراج لـ IMatrix الذي تم إنشاؤه"
- self.HOW_OFTEN_TO_SAVE_IMATRIX = "عدد مرات حفظ IMatrix"
- self.SET_GPU_OFFLOAD_VALUE = "تعيين قيمة تفريغ GPU (-ngl)"
- self.COMPLETED = "مكتمل"
- self.REFRESH_MODELS = "تحديث النماذج"
-
-class _Korean(_Localization):
- def __init__(self):
- super().__init__()
- self.WINDOW_TITLE = "AutoGGUF (자동 GGUF 모델 양자화기)"
- self.RAM_USAGE = "RAM 사용량:"
- self.CPU_USAGE = "CPU 사용량:"
- self.BACKEND = "Llama.cpp 백엔드:"
- self.REFRESH_BACKENDS = "백엔드 새로 고침"
- self.MODELS_PATH = "모델 경로:"
- self.OUTPUT_PATH = "출력 경로:"
- self.LOGS_PATH = "로그 경로:"
- self.BROWSE = "찾아보기"
- self.AVAILABLE_MODELS = "사용 가능한 모델:"
- self.QUANTIZATION_TYPE = "양자화 유형:"
- self.ALLOW_REQUANTIZE = "재양자화 허용"
- self.LEAVE_OUTPUT_TENSOR = "출력 텐서 유지"
- self.PURE = "순수"
- self.IMATRIX = "IMatrix:"
- self.INCLUDE_WEIGHTS = "가중치 포함:"
- self.EXCLUDE_WEIGHTS = "가중치 제외:"
- self.USE_OUTPUT_TENSOR_TYPE = "출력 텐서 유형 사용"
- self.USE_TOKEN_EMBEDDING_TYPE = "토큰 임베딩 유형 사용"
- self.KEEP_SPLIT = "분할 유지"
- self.KV_OVERRIDES = "KV 재정의:"
- self.ADD_NEW_OVERRIDE = "새 재정의 추가"
- self.QUANTIZE_MODEL = "모델 양자화"
- self.SAVE_PRESET = "프리셋 저장"
- self.LOAD_PRESET = "프리셋 로드"
- self.TASKS = "작업:"
- self.DOWNLOAD_LLAMACPP = "llama.cpp 다운로드"
- self.SELECT_RELEASE = "릴리스 선택:"
- self.SELECT_ASSET = "자산 선택:"
- self.EXTRACT_CUDA_FILES = "CUDA 파일 추출"
- self.SELECT_CUDA_BACKEND = "CUDA 백엔드 선택:"
- self.DOWNLOAD = "다운로드"
- self.IMATRIX_GENERATION = "IMatrix 생성"
- self.DATA_FILE = "데이터 파일:"
- self.MODEL = "모델:"
- self.OUTPUT = "출력:"
- self.OUTPUT_FREQUENCY = "출력 빈도:"
- self.GPU_OFFLOAD = "GPU 오프로드:"
- self.AUTO = "자동"
- self.GENERATE_IMATRIX = "IMatrix 생성"
- self.ERROR = "오류"
- self.WARNING = "경고"
- self.PROPERTIES = "속성"
- self.CANCEL = "취소"
- self.RESTART = "다시 시작"
- self.DELETE = "삭제"
- self.CONFIRM_DELETION = "이 작업을 삭제하시겠습니까?"
- self.TASK_RUNNING_WARNING = "일부 작업이 아직 실행 중입니다. 종료하시겠습니까?"
- self.YES = "예"
- self.NO = "아니요"
- self.DOWNLOAD_COMPLETE = "다운로드 완료"
- self.CUDA_EXTRACTION_FAILED = "CUDA 추출 실패"
- self.PRESET_SAVED = "프리셋 저장됨"
- self.PRESET_LOADED = "프리셋 로드됨"
- self.NO_ASSET_SELECTED = "자산이 선택되지 않았습니다"
- self.DOWNLOAD_FAILED = "다운로드 실패"
- self.NO_BACKEND_SELECTED = "백엔드가 선택되지 않았습니다"
- self.NO_MODEL_SELECTED = "모델이 선택되지 않았습니다"
- self.REFRESH_RELEASES = "릴리스 새로 고침"
- self.NO_SUITABLE_CUDA_BACKENDS = "적합한 CUDA 백엔드를 찾을 수 없습니다"
- self.LLAMACPP_DOWNLOADED_EXTRACTED = "llama.cpp 바이너리가 다운로드되어 {0}에 추출되었습니다.\nCUDA 파일이 {1}에 추출되었습니다."
- self.CUDA_FILES_EXTRACTED = "CUDA 파일이 에 추출되었습니다."
- self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = "추출에 적합한 CUDA 백엔드를 찾을 수 없습니다."
- self.ERROR_FETCHING_RELEASES = "릴리스를 가져오는 중 오류가 발생했습니다: {0}"
- self.CONFIRM_DELETION_TITLE = "삭제 확인"
- self.LOG_FOR = "{0}에 대한 로그"
- self.ALL_FILES = "모든 파일 (*)"
- self.GGUF_FILES = "GGUF 파일 (*.gguf)"
- self.DAT_FILES = "DAT 파일 (*.dat)"
- self.JSON_FILES = "JSON 파일 (*.json)"
- self.FAILED_LOAD_PRESET = "프리셋을 로드하지 못했습니다: {0}"
- self.INITIALIZING_AUTOGGUF = "AutoGGUF 애플리케이션을 초기화하는 중입니다."
- self.AUTOGGUF_INITIALIZATION_COMPLETE = "AutoGGUF 초기화가 완료되었습니다."
- self.REFRESHING_BACKENDS = "백엔드를 새로 고치는 중입니다."
- self.NO_BACKENDS_AVAILABLE = "사용 가능한 백엔드가 없습니다."
- self.FOUND_VALID_BACKENDS = "{0}개의 유효한 백엔드를 찾았습니다."
- self.SAVING_PRESET = "프리셋을 저장하는 중입니다."
- self.PRESET_SAVED_TO = "프리셋이 {0}에 저장되었습니다."
- self.LOADING_PRESET = "프리셋을 로드하는 중입니다."
- self.PRESET_LOADED_FROM = "{0}에서 프리셋을 로드했습니다."
- self.ADDING_KV_OVERRIDE = "KV 재정의를 추가하는 중입니다: {0}"
- self.SAVING_TASK_PRESET = "{0}에 대한 작업 프리셋을 저장하는 중입니다."
- self.TASK_PRESET_SAVED = "작업 프리셋이 저장되었습니다."
- self.TASK_PRESET_SAVED_TO = "작업 프리셋이 {0}에 저장되었습니다."
- self.RESTARTING_TASK = "작업을 다시 시작하는 중입니다: {0}"
- self.IN_PROGRESS = "진행 중"
- self.DOWNLOAD_FINISHED_EXTRACTED_TO = "다운로드가 완료되었습니다. 추출 위치: {0}"
- self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "llama.cpp 바이너리가 다운로드되어 {0}에 추출되었습니다.\nCUDA 파일이 {1}에 추출되었습니다."
- self.NO_SUITABLE_CUDA_BACKEND_FOUND = "추출에 적합한 CUDA 백엔드를 찾을 수 없습니다."
- self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = "llama.cpp 바이너리가 다운로드되어 {0}에 추출되었습니다."
- self.REFRESHING_LLAMACPP_RELEASES = "llama.cpp 릴리스를 새로 고치는 중입니다."
- self.UPDATING_ASSET_LIST = "자산 목록을 업데이트하는 중입니다."
- self.UPDATING_CUDA_OPTIONS = "CUDA 옵션을 업데이트하는 중입니다."
- self.STARTING_LLAMACPP_DOWNLOAD = "llama.cpp 다운로드를 시작하는 중입니다."
- self.UPDATING_CUDA_BACKENDS = "CUDA 백엔드를 업데이트하는 중입니다."
- self.NO_CUDA_BACKEND_SELECTED = "추출에 CUDA 백엔드가 선택되지 않았습니다."
- self.EXTRACTING_CUDA_FILES = "{0}에서 {1}로 CUDA 파일을 추출하는 중입니다."
- self.DOWNLOAD_ERROR = "다운로드 오류: {0}"
- self.SHOWING_TASK_CONTEXT_MENU = "작업 컨텍스트 메뉴를 표시하는 중입니다."
- self.SHOWING_PROPERTIES_FOR_TASK = "작업에 대한 속성을 표시하는 중입니다: {0}"
- self.CANCELLING_TASK = "작업을 취소하는 중입니다: {0}"
- self.CANCELED = "취소됨"
- self.DELETING_TASK = "작업을 삭제하는 중입니다: {0}"
- self.LOADING_MODELS = "모델을 로드하는 중입니다."
- self.LOADED_MODELS = "{0}개의 모델이 로드되었습니다."
- self.BROWSING_FOR_MODELS_DIRECTORY = "모델 디렉토리를 찾아보는 중입니다."
- self.SELECT_MODELS_DIRECTORY = "모델 디렉토리 선택"
- self.BROWSING_FOR_OUTPUT_DIRECTORY = "출력 디렉토리를 찾아보는 중입니다."
- self.SELECT_OUTPUT_DIRECTORY = "출력 디렉토리 선택"
- self.BROWSING_FOR_LOGS_DIRECTORY = "로그 디렉토리를 찾아보는 중입니다."
- self.SELECT_LOGS_DIRECTORY = "로그 디렉토리 선택"
- self.BROWSING_FOR_IMATRIX_FILE = "IMatrix 파일을 찾아보는 중입니다."
- self.SELECT_IMATRIX_FILE = "IMatrix 파일 선택"
- self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} MB / {2} MB)"
- self.CPU_USAGE_FORMAT = "CPU 사용량: {0:.1f}%"
- self.VALIDATING_QUANTIZATION_INPUTS = "양자화 입력을 검증하는 중입니다."
- self.MODELS_PATH_REQUIRED = "모델 경로가 필요합니다."
- self.OUTPUT_PATH_REQUIRED = "출력 경로가 필요합니다."
- self.LOGS_PATH_REQUIRED = "로그 경로가 필요합니다."
- self.STARTING_MODEL_QUANTIZATION = "모델 양자화를 시작하는 중입니다."
- self.INPUT_FILE_NOT_EXIST = "입력 파일 '{0}'이 존재하지 않습니다."
- self.QUANTIZING_MODEL_TO = "{0}을 {1}(으)로 양자화하는 중입니다."
- self.QUANTIZATION_TASK_STARTED = "{0}에 대한 양자화 작업이 시작되었습니다."
- self.ERROR_STARTING_QUANTIZATION = "양자화를 시작하는 중 오류가 발생했습니다: {0}"
- self.UPDATING_MODEL_INFO = "모델 정보를 업데이트하는 중입니다: {0}"
- self.TASK_FINISHED = "작업이 완료되었습니다: {0}"
- self.SHOWING_TASK_DETAILS_FOR = "다음에 대한 작업 세부 정보를 표시하는 중입니다: {0}"
- self.BROWSING_FOR_IMATRIX_DATA_FILE = "IMatrix 데이터 파일을 찾아보는 중입니다."
- self.SELECT_DATA_FILE = "데이터 파일 선택"
- self.BROWSING_FOR_IMATRIX_MODEL_FILE = "IMatrix 모델 파일을 찾아보는 중입니다."
- self.SELECT_MODEL_FILE = "모델 파일 선택"
- self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "IMatrix 출력 파일을 찾아보는 중입니다."
- self.SELECT_OUTPUT_FILE = "출력 파일 선택"
- self.STARTING_IMATRIX_GENERATION = "IMatrix 생성을 시작하는 중입니다."
- self.BACKEND_PATH_NOT_EXIST = "백엔드 경로가 존재하지 않습니다: {0}"
- self.GENERATING_IMATRIX = "IMatrix를 생성하는 중입니다."
- self.ERROR_STARTING_IMATRIX_GENERATION = "IMatrix 생성을 시작하는 중 오류가 발생했습니다: {0}"
- self.IMATRIX_GENERATION_TASK_STARTED = "IMatrix 생성 작업이 시작되었습니다."
- self.ERROR_MESSAGE = "오류: {0}"
- self.TASK_ERROR = "작업 오류: {0}"
- self.APPLICATION_CLOSING = "애플리케이션을 닫는 중입니다."
- self.APPLICATION_CLOSED = "애플리케이션이 닫혔습니다."
- self.SELECT_QUANTIZATION_TYPE = "양자화 유형을 선택하세요."
- self.ALLOWS_REQUANTIZING = "이미 양자화된 텐서의 재양자화를 허용합니다."
- self.LEAVE_OUTPUT_WEIGHT = "output.weight를 (재)양자화하지 않은 상태로 둡니다."
- self.DISABLE_K_QUANT_MIXTURES = "k-양자 혼합을 비활성화하고 모든 텐서를 동일한 유형으로 양자화합니다."
- self.USE_DATA_AS_IMPORTANCE_MATRIX = "양자 최적화를 위한 중요도 행렬로 파일의 데이터를 사용합니다."
- self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "이러한 텐서에 중요도 행렬을 사용합니다."
- self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = "이러한 텐서에 중요도 행렬을 사용하지 않습니다."
- self.OUTPUT_TENSOR_TYPE = "출력 텐서 유형:"
- self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = "output.weight 텐서에 이 유형을 사용합니다."
- self.TOKEN_EMBEDDING_TYPE = "토큰 임베딩 유형:"
- self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = "토큰 임베딩 텐서에 이 유형을 사용합니다."
- self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = "입력과 동일한 샤드에 양자화된 모델을 생성합니다."
- self.OVERRIDE_MODEL_METADATA = "모델 메타데이터를 재정의합니다."
- self.INPUT_DATA_FILE_FOR_IMATRIX = "IMatrix 생성을 위한 입력 데이터 파일"
- self.MODEL_TO_BE_QUANTIZED = "양자화될 모델"
- self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "생성된 IMatrix의 출력 경로"
- self.HOW_OFTEN_TO_SAVE_IMATRIX = "IMatrix를 저장할 빈도"
- self.SET_GPU_OFFLOAD_VALUE = "GPU 오프로드 값 설정 (-ngl)"
- self.COMPLETED = "완료됨"
- self.REFRESH_MODELS = "모델 새로고침"
-
-class _Italian(_Localization):
- def __init__(self):
- super().__init__()
- self.WINDOW_TITLE = "AutoGGUF (Quantizzatore Automatico di Modelli GGUF)"
- self.RAM_USAGE = "Utilizzo RAM:"
- self.CPU_USAGE = "Utilizzo CPU:"
- self.BACKEND = "Backend Llama.cpp:"
- self.REFRESH_BACKENDS = "Aggiorna Backend"
- self.MODELS_PATH = "Percorso Modelli:"
- self.OUTPUT_PATH = "Percorso Output:"
- self.LOGS_PATH = "Percorso Log:"
- self.BROWSE = "Sfoglia"
- self.AVAILABLE_MODELS = "Modelli Disponibili:"
- self.QUANTIZATION_TYPE = "Tipo di Quantizzazione:"
- self.ALLOW_REQUANTIZE = "Consenti Riquantizzazione"
- self.LEAVE_OUTPUT_TENSOR = "Lascia Tensore di Output"
- self.PURE = "Puro"
- self.IMATRIX = "IMatrix:"
- self.INCLUDE_WEIGHTS = "Includi Pesi:"
- self.EXCLUDE_WEIGHTS = "Escludi Pesi:"
- self.USE_OUTPUT_TENSOR_TYPE = "Usa Tipo di Tensore di Output"
- self.USE_TOKEN_EMBEDDING_TYPE = "Usa Tipo di Incorporamento Token"
- self.KEEP_SPLIT = "Mantieni Divisione"
- self.KV_OVERRIDES = "Override KV:"
- self.ADD_NEW_OVERRIDE = "Aggiungi Nuovo Override"
- self.QUANTIZE_MODEL = "Quantizza Modello"
- self.SAVE_PRESET = "Salva Preimpostazione"
- self.LOAD_PRESET = "Carica Preimpostazione"
- self.TASKS = "Attività:"
- self.DOWNLOAD_LLAMACPP = "Scarica llama.cpp"
- self.SELECT_RELEASE = "Seleziona Versione:"
- self.SELECT_ASSET = "Seleziona Asset:"
- self.EXTRACT_CUDA_FILES = "Estrai File CUDA"
- self.SELECT_CUDA_BACKEND = "Seleziona Backend CUDA:"
- self.DOWNLOAD = "Scarica"
- self.IMATRIX_GENERATION = "Generazione IMatrix"
- self.DATA_FILE = "File Dati:"
- self.MODEL = "Modello:"
- self.OUTPUT = "Output:"
- self.OUTPUT_FREQUENCY = "Frequenza di Output:"
- self.GPU_OFFLOAD = "Offload GPU:"
- self.AUTO = "Auto"
- self.GENERATE_IMATRIX = "Genera IMatrix"
- self.ERROR = "Errore"
- self.WARNING = "Avviso"
- self.PROPERTIES = "Proprietà"
- self.CANCEL = "Annulla"
- self.RESTART = "Riavvia"
- self.DELETE = "Elimina"
- self.CONFIRM_DELETION = "Sei sicuro di voler eliminare questa attività?"
- self.TASK_RUNNING_WARNING = "Alcune attività sono ancora in esecuzione. Sei sicuro di voler uscire?"
- self.YES = "Sì"
- self.NO = "No"
- self.DOWNLOAD_COMPLETE = "Download Completato"
- self.CUDA_EXTRACTION_FAILED = "Estrazione CUDA Fallita"
- self.PRESET_SAVED = "Preimpostazione Salvata"
- self.PRESET_LOADED = "Preimpostazione Caricata"
- self.NO_ASSET_SELECTED = "Nessun asset selezionato"
- self.DOWNLOAD_FAILED = "Download fallito"
- self.NO_BACKEND_SELECTED = "Nessun backend selezionato"
- self.NO_MODEL_SELECTED = "Nessun modello selezionato"
- self.REFRESH_RELEASES = "Aggiorna Versioni"
- self.NO_SUITABLE_CUDA_BACKENDS = "Nessun backend CUDA adatto trovato"
- self.LLAMACPP_DOWNLOADED_EXTRACTED = "Binario llama.cpp scaricato ed estratto in {0}\nFile CUDA estratti in {1}"
- self.CUDA_FILES_EXTRACTED = "File CUDA estratti in"
- self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = "Nessun backend CUDA adatto trovato per l'estrazione"
- self.ERROR_FETCHING_RELEASES = "Errore durante il recupero delle versioni: {0}"
- self.CONFIRM_DELETION_TITLE = "Conferma Eliminazione"
- self.LOG_FOR = "Log per {0}"
- self.ALL_FILES = "Tutti i File (*)"
- self.GGUF_FILES = "File GGUF (*.gguf)"
- self.DAT_FILES = "File DAT (*.dat)"
- self.JSON_FILES = "File JSON (*.json)"
- self.FAILED_LOAD_PRESET = "Impossibile caricare la preimpostazione: {0}"
- self.INITIALIZING_AUTOGGUF = "Inizializzazione dell'applicazione AutoGGUF"
- self.AUTOGGUF_INITIALIZATION_COMPLETE = "Inizializzazione di AutoGGUF completata"
- self.REFRESHING_BACKENDS = "Aggiornamento backend"
- self.NO_BACKENDS_AVAILABLE = "Nessun backend disponibile"
- self.FOUND_VALID_BACKENDS = "Trovati {0} backend validi"
- self.SAVING_PRESET = "Salvataggio preimpostazione"
- self.PRESET_SAVED_TO = "Preimpostazione salvata in {0}"
- self.LOADING_PRESET = "Caricamento preimpostazione"
- self.PRESET_LOADED_FROM = "Preimpostazione caricata da {0}"
- self.ADDING_KV_OVERRIDE = "Aggiunta override KV: {0}"
- self.SAVING_TASK_PRESET = "Salvataggio preimpostazione attività per {0}"
- self.TASK_PRESET_SAVED = "Preimpostazione Attività Salvata"
- self.TASK_PRESET_SAVED_TO = "Preimpostazione attività salvata in {0}"
- self.RESTARTING_TASK = "Riavvio attività: {0}"
- self.IN_PROGRESS = "In Corso"
- self.DOWNLOAD_FINISHED_EXTRACTED_TO = "Download completato. Estratto in: {0}"
- self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "Binario llama.cpp scaricato ed estratto in {0}\nFile CUDA estratti in {1}"
- self.NO_SUITABLE_CUDA_BACKEND_FOUND = "Nessun backend CUDA adatto trovato per l'estrazione"
- self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = "Binario llama.cpp scaricato ed estratto in {0}"
- self.REFRESHING_LLAMACPP_RELEASES = "Aggiornamento versioni di llama.cpp"
- self.UPDATING_ASSET_LIST = "Aggiornamento elenco asset"
- self.UPDATING_CUDA_OPTIONS = "Aggiornamento opzioni CUDA"
- self.STARTING_LLAMACPP_DOWNLOAD = "Avvio download di llama.cpp"
- self.UPDATING_CUDA_BACKENDS = "Aggiornamento backend CUDA"
- self.NO_CUDA_BACKEND_SELECTED = "Nessun backend CUDA selezionato per l'estrazione"
- self.EXTRACTING_CUDA_FILES = "Estrazione file CUDA da {0} a {1}"
- self.DOWNLOAD_ERROR = "Errore di download: {0}"
- self.SHOWING_TASK_CONTEXT_MENU = "Visualizzazione menu contestuale attività"
- self.SHOWING_PROPERTIES_FOR_TASK = "Visualizzazione proprietà per l'attività: {0}"
- self.CANCELLING_TASK = "Annullamento attività: {0}"
- self.CANCELED = "Annullato"
- self.DELETING_TASK = "Eliminazione attività: {0}"
- self.LOADING_MODELS = "Caricamento modelli"
- self.LOADED_MODELS = "{0} modelli caricati"
- self.BROWSING_FOR_MODELS_DIRECTORY = "Esplorazione directory modelli"
- self.SELECT_MODELS_DIRECTORY = "Seleziona Directory Modelli"
- self.BROWSING_FOR_OUTPUT_DIRECTORY = "Esplorazione directory output"
- self.SELECT_OUTPUT_DIRECTORY = "Seleziona Directory Output"
- self.BROWSING_FOR_LOGS_DIRECTORY = "Esplorazione directory log"
- self.SELECT_LOGS_DIRECTORY = "Seleziona Directory Log"
- self.BROWSING_FOR_IMATRIX_FILE = "Esplorazione file IMatrix"
- self.SELECT_IMATRIX_FILE = "Seleziona File IMatrix"
- self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} MB / {2} MB)"
- self.CPU_USAGE_FORMAT = "Utilizzo CPU: {0:.1f}%"
- self.VALIDATING_QUANTIZATION_INPUTS = "Convalida input di quantizzazione"
- self.MODELS_PATH_REQUIRED = "Il percorso dei modelli è obbligatorio"
- self.OUTPUT_PATH_REQUIRED = "Il percorso di output è obbligatorio"
- self.LOGS_PATH_REQUIRED = "Il percorso dei log è obbligatorio"
- self.STARTING_MODEL_QUANTIZATION = "Avvio quantizzazione del modello"
- self.INPUT_FILE_NOT_EXIST = "Il file di input '{0}' non esiste."
- self.QUANTIZING_MODEL_TO = "Quantizzazione di {0} a {1}"
- self.QUANTIZATION_TASK_STARTED = "Attività di quantizzazione avviata per {0}"
- self.ERROR_STARTING_QUANTIZATION = "Errore durante l'avvio della quantizzazione: {0}"
- self.UPDATING_MODEL_INFO = "Aggiornamento informazioni sul modello: {0}"
- self.TASK_FINISHED = "Attività completata: {0}"
- self.SHOWING_TASK_DETAILS_FOR = "Visualizzazione dettagli attività per: {0}"
- self.BROWSING_FOR_IMATRIX_DATA_FILE = "Esplorazione file dati IMatrix"
- self.SELECT_DATA_FILE = "Seleziona File Dati"
- self.BROWSING_FOR_IMATRIX_MODEL_FILE = "Esplorazione file modello IMatrix"
- self.SELECT_MODEL_FILE = "Seleziona File Modello"
- self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "Esplorazione file output IMatrix"
- self.SELECT_OUTPUT_FILE = "Seleziona File Output"
- self.STARTING_IMATRIX_GENERATION = "Avvio generazione IMatrix"
- self.BACKEND_PATH_NOT_EXIST = "Il percorso del backend non esiste: {0}"
- self.GENERATING_IMATRIX = "Generazione IMatrix"
- self.ERROR_STARTING_IMATRIX_GENERATION = "Errore durante l'avvio della generazione di IMatrix: {0}"
- self.IMATRIX_GENERATION_TASK_STARTED = "Attività di generazione IMatrix avviata"
- self.ERROR_MESSAGE = "Errore: {0}"
- self.TASK_ERROR = "Errore attività: {0}"
- self.APPLICATION_CLOSING = "Chiusura applicazione"
- self.APPLICATION_CLOSED = "Applicazione chiusa"
- self.SELECT_QUANTIZATION_TYPE = "Seleziona il tipo di quantizzazione"
- self.ALLOWS_REQUANTIZING = "Consente di riquantizzare tensori che sono già stati quantizzati"
- self.LEAVE_OUTPUT_WEIGHT = "Lascerà output.weight non (ri)quantizzato"
- self.DISABLE_K_QUANT_MIXTURES = "Disabilita le miscele k-quant e quantizza tutti i tensori allo stesso tipo"
- self.USE_DATA_AS_IMPORTANCE_MATRIX = "Utilizza i dati nel file come matrice di importanza per le ottimizzazioni di quantizzazione"
- self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Usa la matrice di importanza per questi tensori"
- self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Non usare la matrice di importanza per questi tensori"
- self.OUTPUT_TENSOR_TYPE = "Tipo di Tensore di Output:"
- self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = "Usa questo tipo per il tensore output.weight"
- self.TOKEN_EMBEDDING_TYPE = "Tipo di Incorporamento Token:"
- self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = "Usa questo tipo per il tensore di incorporamenti token"
- self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = "Genererà il modello quantizzato negli stessi frammenti dell'input"
- self.OVERRIDE_MODEL_METADATA = "Sovrascrivi i metadati del modello"
- self.INPUT_DATA_FILE_FOR_IMATRIX = "File di dati di input per la generazione di IMatrix"
- self.MODEL_TO_BE_QUANTIZED = "Modello da quantizzare"
- self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "Percorso di output per l'IMatrix generato"
- self.HOW_OFTEN_TO_SAVE_IMATRIX = "Con quale frequenza salvare l'IMatrix"
- self.SET_GPU_OFFLOAD_VALUE = "Imposta il valore di offload GPU (-ngl)"
- self.COMPLETED = "Completato"
- self.REFRESH_MODELS = "Aggiorna modelli"
-
-class _Turkish(_Localization):
- def __init__(self):
- super().__init__()
- self.WINDOW_TITLE = "AutoGGUF (Otomatik GGUF Modeli Niceleyici)"
- self.RAM_USAGE = "RAM Kullanımı:"
- self.CPU_USAGE = "CPU Kullanımı:"
- self.BACKEND = "Llama.cpp Arka Uç:"
- self.REFRESH_BACKENDS = "Arka Uçları Yenile"
- self.MODELS_PATH = "Modeller Yolu:"
- self.OUTPUT_PATH = "Çıkış Yolu:"
- self.LOGS_PATH = "Günlükler Yolu:"
- self.BROWSE = "Gözat"
- self.AVAILABLE_MODELS = "Kullanılabilir Modeller:"
- self.QUANTIZATION_TYPE = "Niceleme Türü:"
- self.ALLOW_REQUANTIZE = "Yeniden Nicelemeye İzin Ver"
- self.LEAVE_OUTPUT_TENSOR = "Çıkış Tensörünü Bırak"
- self.PURE = "Saf"
- self.IMATRIX = "IMatrix:"
- self.INCLUDE_WEIGHTS = "Ağırlıkları Dahil Et:"
- self.EXCLUDE_WEIGHTS = "Ağırlıkları Hariç Tut:"
- self.USE_OUTPUT_TENSOR_TYPE = "Çıkış Tensör Türünü Kullan"
- self.USE_TOKEN_EMBEDDING_TYPE = "Token Gömme Türünü Kullan"
- self.KEEP_SPLIT = "Bölmeyi Koru"
- self.KV_OVERRIDES = "KV Geçersiz Kılmaları:"
- self.ADD_NEW_OVERRIDE = "Yeni Geçersiz Kılma Ekle"
- self.QUANTIZE_MODEL = "Modeli Nicele"
- self.SAVE_PRESET = "Ön Ayarı Kaydet"
- self.LOAD_PRESET = "Ön Ayarı Yükle"
- self.TASKS = "Görevler:"
- self.DOWNLOAD_LLAMACPP = "llama.cpp'yi İndir"
- self.SELECT_RELEASE = "Sürümü Seç:"
- self.SELECT_ASSET = "Varlığı Seç:"
- self.EXTRACT_CUDA_FILES = "CUDA Dosyalarını Çıkar"
- self.SELECT_CUDA_BACKEND = "CUDA Arka Ucunu Seç:"
- self.DOWNLOAD = "İndir"
- self.IMATRIX_GENERATION = "IMatrix Üretimi"
- self.DATA_FILE = "Veri Dosyası:"
- self.MODEL = "Model:"
- self.OUTPUT = "Çıkış:"
- self.OUTPUT_FREQUENCY = "Çıkış Sıklığı:"
- self.GPU_OFFLOAD = "GPU Yük Boşaltma:"
- self.AUTO = "Otomatik"
- self.GENERATE_IMATRIX = "IMatrix Oluştur"
- self.ERROR = "Hata"
- self.WARNING = "Uyarı"
- self.PROPERTIES = "Özellikler"
- self.CANCEL = "İptal"
- self.RESTART = "Yeniden Başlat"
- self.DELETE = "Sil"
- self.CONFIRM_DELETION = "Bu görevi silmek istediğinizden emin misiniz?"
- self.TASK_RUNNING_WARNING = "Bazı görevler hala çalışıyor. Çıkmak istediğinizden emin misiniz?"
- self.YES = "Evet"
- self.NO = "Hayır"
- self.DOWNLOAD_COMPLETE = "İndirme Tamamlandı"
- self.CUDA_EXTRACTION_FAILED = "CUDA Çıkarma Başarısız"
- self.PRESET_SAVED = "Ön Ayar Kaydedildi"
- self.PRESET_LOADED = "Ön Ayar Yüklendi"
- self.NO_ASSET_SELECTED = "Varlık seçilmedi"
- self.DOWNLOAD_FAILED = "İndirme başarısız"
- self.NO_BACKEND_SELECTED = "Arka uç seçilmedi"
- self.NO_MODEL_SELECTED = "Model seçilmedi"
- self.REFRESH_RELEASES = "Sürümleri Yenile"
- self.NO_SUITABLE_CUDA_BACKENDS = "Uygun CUDA arka uçları bulunamadı"
- self.LLAMACPP_DOWNLOADED_EXTRACTED = "llama.cpp ikili dosyası indirildi ve {0} konumuna çıkarıldı\nCUDA dosyaları {1} konumuna çıkarıldı"
- self.CUDA_FILES_EXTRACTED = "CUDA dosyaları konumuna çıkarıldı"
- self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = "Çıkarma için uygun bir CUDA arka ucu bulunamadı"
- self.ERROR_FETCHING_RELEASES = "Sürümleri getirirken hata oluştu: {0}"
- self.CONFIRM_DELETION_TITLE = "Silmeyi Onayla"
- self.LOG_FOR = "{0} için Günlük"
- self.ALL_FILES = "Tüm Dosyalar (*)"
- self.GGUF_FILES = "GGUF Dosyaları (*.gguf)"
- self.DAT_FILES = "DAT Dosyaları (*.dat)"
- self.JSON_FILES = "JSON Dosyaları (*.json)"
- self.FAILED_LOAD_PRESET = "Ön ayarı yükleme başarısız: {0}"
- self.INITIALIZING_AUTOGGUF = "AutoGGUF uygulaması başlatılıyor"
- self.AUTOGGUF_INITIALIZATION_COMPLETE = "AutoGGUF başlatması tamamlandı"
- self.REFRESHING_BACKENDS = "Arka uçlar yenileniyor"
- self.NO_BACKENDS_AVAILABLE = "Kullanılabilir arka uç yok"
- self.FOUND_VALID_BACKENDS = "{0} geçerli arka uç bulundu"
- self.SAVING_PRESET = "Ön ayar kaydediliyor"
- self.PRESET_SAVED_TO = "Ön ayar {0} konumuna kaydedildi"
- self.LOADING_PRESET = "Ön ayar yükleniyor"
- self.PRESET_LOADED_FROM = "Ön ayar {0} konumundan yüklendi"
- self.ADDING_KV_OVERRIDE = "KV geçersiz kılma ekleniyor: {0}"
- self.SAVING_TASK_PRESET = "{0} için görev ön ayarı kaydediliyor"
- self.TASK_PRESET_SAVED = "Görev Ön Ayarı Kaydedildi"
- self.TASK_PRESET_SAVED_TO = "Görev ön ayarı {0} konumuna kaydedildi"
- self.RESTARTING_TASK = "Görev yeniden başlatılıyor: {0}"
- self.IN_PROGRESS = "Devam Ediyor"
- self.DOWNLOAD_FINISHED_EXTRACTED_TO = "İndirme tamamlandı. Şuraya çıkarıldı: {0}"
- self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "llama.cpp ikili dosyası indirildi ve {0} konumuna çıkarıldı\nCUDA dosyaları {1} konumuna çıkarıldı"
- self.NO_SUITABLE_CUDA_BACKEND_FOUND = "Çıkarma için uygun bir CUDA arka ucu bulunamadı"
- self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = "llama.cpp ikili dosyası indirildi ve {0} konumuna çıkarıldı"
- self.REFRESHING_LLAMACPP_RELEASES = "llama.cpp sürümleri yenileniyor"
- self.UPDATING_ASSET_LIST = "Varlık listesi güncelleniyor"
- self.UPDATING_CUDA_OPTIONS = "CUDA seçenekleri güncelleniyor"
- self.STARTING_LLAMACPP_DOWNLOAD = "llama.cpp indirme başlatılıyor"
- self.UPDATING_CUDA_BACKENDS = "CUDA arka uçları güncelleniyor"
- self.NO_CUDA_BACKEND_SELECTED = "Çıkarma için CUDA arka ucu seçilmedi"
- self.EXTRACTING_CUDA_FILES = "CUDA dosyaları {0} konumundan {1} konumuna çıkarılıyor"
- self.DOWNLOAD_ERROR = "İndirme hatası: {0}"
- self.SHOWING_TASK_CONTEXT_MENU = "Görev bağlam menüsü gösteriliyor"
- self.SHOWING_PROPERTIES_FOR_TASK = "Görev için özellikler gösteriliyor: {0}"
- self.CANCELLING_TASK = "Görev iptal ediliyor: {0}"
- self.CANCELED = "İptal Edildi"
- self.DELETING_TASK = "Görev siliniyor: {0}"
- self.LOADING_MODELS = "Modeller yükleniyor"
- self.LOADED_MODELS = "{0} model yüklendi"
- self.BROWSING_FOR_MODELS_DIRECTORY = "Modeller dizinine göz atılıyor"
- self.SELECT_MODELS_DIRECTORY = "Modeller Dizini Seç"
- self.BROWSING_FOR_OUTPUT_DIRECTORY = "Çıkış dizinine göz atılıyor"
- self.SELECT_OUTPUT_DIRECTORY = "Çıkış Dizini Seç"
- self.BROWSING_FOR_LOGS_DIRECTORY = "Günlükler dizinine göz atılıyor"
- self.SELECT_LOGS_DIRECTORY = "Günlükler Dizini Seç"
- self.BROWSING_FOR_IMATRIX_FILE = "IMatrix dosyasına göz atılıyor"
- self.SELECT_IMATRIX_FILE = "IMatrix Dosyası Seç"
- self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} MB / {2} MB)"
- self.CPU_USAGE_FORMAT = "CPU Kullanımı: {0:.1f}%"
- self.VALIDATING_QUANTIZATION_INPUTS = "Niceleme girişleri doğrulanıyor"
- self.MODELS_PATH_REQUIRED = "Modeller yolu gerekli"
- self.OUTPUT_PATH_REQUIRED = "Çıkış yolu gerekli"
- self.LOGS_PATH_REQUIRED = "Günlükler yolu gerekli"
- self.STARTING_MODEL_QUANTIZATION = "Model niceleme başlatılıyor"
- self.INPUT_FILE_NOT_EXIST = "Giriş dosyası '{0}' mevcut değil."
- self.QUANTIZING_MODEL_TO = "{0} öğesini {1} öğesine niceleme"
- self.QUANTIZATION_TASK_STARTED = "{0} için niceleme görevi başlatıldı"
- self.ERROR_STARTING_QUANTIZATION = "Niceleme başlatılırken hata oluştu: {0}"
- self.UPDATING_MODEL_INFO = "Model bilgileri güncelleniyor: {0}"
- self.TASK_FINISHED = "Görev tamamlandı: {0}"
- self.SHOWING_TASK_DETAILS_FOR = "Şunun için görev ayrıntıları gösteriliyor: {0}"
- self.BROWSING_FOR_IMATRIX_DATA_FILE = "IMatrix veri dosyasına göz atılıyor"
- self.SELECT_DATA_FILE = "Veri Dosyası Seç"
- self.BROWSING_FOR_IMATRIX_MODEL_FILE = "IMatrix model dosyasına göz atılıyor"
- self.SELECT_MODEL_FILE = "Model Dosyası Seç"
- self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "IMatrix çıkış dosyasına göz atılıyor"
- self.SELECT_OUTPUT_FILE = "Çıkış Dosyası Seç"
- self.STARTING_IMATRIX_GENERATION = "IMatrix üretimi başlatılıyor"
- self.BACKEND_PATH_NOT_EXIST = "Arka uç yolu mevcut değil: {0}"
- self.GENERATING_IMATRIX = "IMatrix oluşturuluyor"
- self.ERROR_STARTING_IMATRIX_GENERATION = "IMatrix üretimi başlatılırken hata oluştu: {0}"
- self.IMATRIX_GENERATION_TASK_STARTED = "IMatrix oluşturma görevi başlatıldı"
- self.ERROR_MESSAGE = "Hata: {0}"
- self.TASK_ERROR = "Görev hatası: {0}"
- self.APPLICATION_CLOSING = "Uygulama kapatılıyor"
- self.APPLICATION_CLOSED = "Uygulama kapatıldı"
- self.SELECT_QUANTIZATION_TYPE = "Niceleme türünü seçin"
- self.ALLOWS_REQUANTIZING = "Zaten niceleme yapılmış tensörlerin yeniden nicelemesine izin verir"
- self.LEAVE_OUTPUT_WEIGHT = "output.weight öğesini (yeniden) nicelememiş halde bırakır"
- self.DISABLE_K_QUANT_MIXTURES = "k-Quant karışımlarını devre dışı bırakın ve tüm tensörleri aynı türe niceleyin"
- self.USE_DATA_AS_IMPORTANCE_MATRIX = "Quant optimizasyonları için dosyadaki verileri önem matrisi olarak kullanın"
- self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Bu tensörler için önem matrisini kullanın"
- self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Bu tensörler için önem matrisini kullanmayın"
- self.OUTPUT_TENSOR_TYPE = "Çıkış Tensör Türü:"
- self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = "output.weight tensörü için bu türü kullanın"
- self.TOKEN_EMBEDDING_TYPE = "Token Gömme Türü:"
- self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = "Token gömme tensörü için bu türü kullanın"
- self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = "Nicelemeli modeli girişle aynı parçalarda oluşturacaktır"
- self.OVERRIDE_MODEL_METADATA = "Model meta verilerini geçersiz kıl"
- self.INPUT_DATA_FILE_FOR_IMATRIX = "IMatrix oluşturma için giriş veri dosyası"
- self.MODEL_TO_BE_QUANTIZED = "Nicelemeli model"
- self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "Oluşturulan IMatrix için çıkış yolu"
- self.HOW_OFTEN_TO_SAVE_IMATRIX = "IMatrix'in ne sıklıkta kaydedileceği"
- self.SET_GPU_OFFLOAD_VALUE = "GPU yük boşaltma değerini ayarla (-ngl)"
- self.COMPLETED = "Tamamlandı"
- self.REFRESH_MODELS = "Modelleri yenile"
-
-class _Dutch(_Localization):
- def __init__(self):
- super().__init__()
- self.WINDOW_TITLE = "AutoGGUF (automatische GGUF-modelkwantisering)"
- self.RAM_USAGE = "RAM-gebruik:"
- self.CPU_USAGE = "CPU-gebruik:"
- self.BACKEND = "Llama.cpp Backend:"
- self.REFRESH_BACKENDS = "Backends vernieuwen"
- self.MODELS_PATH = "Modelpad:"
- self.OUTPUT_PATH = "Uitvoerpad:"
- self.LOGS_PATH = "Logboekpad:"
- self.BROWSE = "Bladeren"
- self.AVAILABLE_MODELS = "Beschikbare modellen:"
- self.QUANTIZATION_TYPE = "Kwantiseringstype:"
- self.ALLOW_REQUANTIZE = "Herkwantisering toestaan"
- self.LEAVE_OUTPUT_TENSOR = "Uitvoertensor behouden"
- self.PURE = "Zuiver"
- self.IMATRIX = "IMatrix:"
- self.INCLUDE_WEIGHTS = "Gewichten opnemen:"
- self.EXCLUDE_WEIGHTS = "Gewichten uitsluiten:"
- self.USE_OUTPUT_TENSOR_TYPE = "Uitvoertensortype gebruiken"
- self.USE_TOKEN_EMBEDDING_TYPE = "Tokeninbeddingstype gebruiken"
- self.KEEP_SPLIT = "Splitsing behouden"
- self.KV_OVERRIDES = "KV-overschrijvingen:"
- self.ADD_NEW_OVERRIDE = "Nieuwe overschrijving toevoegen"
- self.QUANTIZE_MODEL = "Model kwantiseren"
- self.SAVE_PRESET = "Voorinstelling opslaan"
- self.LOAD_PRESET = "Voorinstelling laden"
- self.TASKS = "Taken:"
- self.DOWNLOAD_LLAMACPP = "Download llama.cpp"
- self.SELECT_RELEASE = "Selecteer release:"
- self.SELECT_ASSET = "Selecteer item:"
- self.EXTRACT_CUDA_FILES = "CUDA-bestanden uitpakken"
- self.SELECT_CUDA_BACKEND = "Selecteer CUDA-backend:"
- self.DOWNLOAD = "Downloaden"
- self.IMATRIX_GENERATION = "IMatrix-generatie"
- self.DATA_FILE = "Gegevensbestand:"
- self.MODEL = "Model:"
- self.OUTPUT = "Uitvoer:"
- self.OUTPUT_FREQUENCY = "Uitvoerfrequentie:"
- self.GPU_OFFLOAD = "GPU-offload:"
- self.AUTO = "Automatisch"
- self.GENERATE_IMATRIX = "IMatrix genereren"
- self.ERROR = "Fout"
- self.WARNING = "Waarschuwing"
- self.PROPERTIES = "Eigenschappen"
- self.CANCEL = "Annuleren"
- self.RESTART = "Opnieuw starten"
- self.DELETE = "Verwijderen"
- self.CONFIRM_DELETION = "Weet u zeker dat u deze taak wilt verwijderen?"
- self.TASK_RUNNING_WARNING = "Sommige taken worden nog uitgevoerd. Weet u zeker dat u wilt afsluiten?"
- self.YES = "Ja"
- self.NO = "Nee"
- self.DOWNLOAD_COMPLETE = "Download voltooid"
- self.CUDA_EXTRACTION_FAILED = "CUDA-extractie mislukt"
- self.PRESET_SAVED = "Voorinstelling opgeslagen"
- self.PRESET_LOADED = "Voorinstelling geladen"
- self.NO_ASSET_SELECTED = "Geen item geselecteerd"
- self.DOWNLOAD_FAILED = "Download mislukt"
- self.NO_BACKEND_SELECTED = "Geen backend geselecteerd"
- self.NO_MODEL_SELECTED = "Geen model geselecteerd"
- self.REFRESH_RELEASES = "Releases vernieuwen"
- self.NO_SUITABLE_CUDA_BACKENDS = "Geen geschikte CUDA-backends gevonden"
- self.LLAMACPP_DOWNLOADED_EXTRACTED = "llama.cpp-binairbestand gedownload en uitgepakt naar {0}\nCUDA-bestanden uitgepakt naar {1}"
- self.CUDA_FILES_EXTRACTED = "CUDA-bestanden uitgepakt naar"
- self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = "Geen geschikte CUDA-backend gevonden voor extractie"
- self.ERROR_FETCHING_RELEASES = "Fout bij het ophalen van releases: {0}"
- self.CONFIRM_DELETION_TITLE = "Verwijdering bevestigen"
- self.LOG_FOR = "Logboek voor {0}"
- self.ALL_FILES = "Alle bestanden (*)"
- self.GGUF_FILES = "GGUF-bestanden (*.gguf)"
- self.DAT_FILES = "DAT-bestanden (*.dat)"
- self.JSON_FILES = "JSON-bestanden (*.json)"
- self.FAILED_LOAD_PRESET = "Voorinstelling laden mislukt: {0}"
- self.INITIALIZING_AUTOGGUF = "AutoGGUF-applicatie wordt geïnitialiseerd"
- self.AUTOGGUF_INITIALIZATION_COMPLETE = "AutoGGUF-initialisatie voltooid"
- self.REFRESHING_BACKENDS = "Backends worden vernieuwd"
- self.NO_BACKENDS_AVAILABLE = "Geen backends beschikbaar"
- self.FOUND_VALID_BACKENDS = "{0} geldige backends gevonden"
- self.SAVING_PRESET = "Voorinstelling wordt opgeslagen"
- self.PRESET_SAVED_TO = "Voorinstelling opgeslagen in {0}"
- self.LOADING_PRESET = "Voorinstelling wordt geladen"
- self.PRESET_LOADED_FROM = "Voorinstelling geladen van {0}"
- self.ADDING_KV_OVERRIDE = "KV-overschrijving toevoegen: {0}"
- self.SAVING_TASK_PRESET = "Taakvoorinstelling opslaan voor {0}"
- self.TASK_PRESET_SAVED = "Taakvoorinstelling opgeslagen"
- self.TASK_PRESET_SAVED_TO = "Taakvoorinstelling opgeslagen in {0}"
- self.RESTARTING_TASK = "Taak opnieuw starten: {0}"
- self.IN_PROGRESS = "Bezig"
- self.DOWNLOAD_FINISHED_EXTRACTED_TO = "Download voltooid. Uitgepakt naar: {0}"
- self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "llama.cpp-binairbestand gedownload en uitgepakt naar {0}\nCUDA-bestanden uitgepakt naar {1}"
- self.NO_SUITABLE_CUDA_BACKEND_FOUND = "Geen geschikte CUDA-backend gevonden voor extractie"
- self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = "llama.cpp-binairbestand gedownload en uitgepakt naar {0}"
- self.REFRESHING_LLAMACPP_RELEASES = "llama.cpp-releases worden vernieuwd"
- self.UPDATING_ASSET_LIST = "Itemlijst wordt bijgewerkt"
- self.UPDATING_CUDA_OPTIONS = "CUDA-opties worden bijgewerkt"
- self.STARTING_LLAMACPP_DOWNLOAD = "Downloaden van llama.cpp wordt gestart"
- self.UPDATING_CUDA_BACKENDS = "CUDA-backends worden bijgewerkt"
- self.NO_CUDA_BACKEND_SELECTED = "Geen CUDA-backend geselecteerd voor extractie"
- self.EXTRACTING_CUDA_FILES = "CUDA-bestanden uitpakken van {0} naar {1}"
- self.DOWNLOAD_ERROR = "Downloadfout: {0}"
- self.SHOWING_TASK_CONTEXT_MENU = "Taakcontextmenu weergeven"
- self.SHOWING_PROPERTIES_FOR_TASK = "Eigenschappen voor taak weergeven: {0}"
- self.CANCELLING_TASK = "Taak annuleren: {0}"
- self.CANCELED = "Geannuleerd"
- self.DELETING_TASK = "Taak verwijderen: {0}"
- self.LOADING_MODELS = "Modellen laden"
- self.LOADED_MODELS = "{0} modellen geladen"
- self.BROWSING_FOR_MODELS_DIRECTORY = "Bladeren naar modelmap"
- self.SELECT_MODELS_DIRECTORY = "Selecteer modelmap"
- self.BROWSING_FOR_OUTPUT_DIRECTORY = "Bladeren naar uitvoermap"
- self.SELECT_OUTPUT_DIRECTORY = "Selecteer uitvoermap"
- self.BROWSING_FOR_LOGS_DIRECTORY = "Bladeren naar logboekmap"
- self.SELECT_LOGS_DIRECTORY = "Selecteer logboekmap"
- self.BROWSING_FOR_IMATRIX_FILE = "Bladeren naar IMatrix-bestand"
- self.SELECT_IMATRIX_FILE = "Selecteer IMatrix-bestand"
- self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} MB / {2} MB)"
- self.CPU_USAGE_FORMAT = "CPU-gebruik: {0:.1f}%"
- self.VALIDATING_QUANTIZATION_INPUTS = "Kwantiseringsinvoer valideren"
- self.MODELS_PATH_REQUIRED = "Modelpad is vereist"
- self.OUTPUT_PATH_REQUIRED = "Uitvoerpad is vereist"
- self.LOGS_PATH_REQUIRED = "Logboekpad is vereist"
- self.STARTING_MODEL_QUANTIZATION = "Modelkwantisering starten"
- self.INPUT_FILE_NOT_EXIST = "Invoerbestand '{0}' bestaat niet."
- self.QUANTIZING_MODEL_TO = "Kwantiseren van {0} naar {1}"
- self.QUANTIZATION_TASK_STARTED = "Kwantiseringstaak gestart voor {0}"
- self.ERROR_STARTING_QUANTIZATION = "Fout bij het starten van kwantisering: {0}"
- self.UPDATING_MODEL_INFO = "Modelinformatie bijwerken: {0}"
- self.TASK_FINISHED = "Taak voltooid: {0}"
- self.SHOWING_TASK_DETAILS_FOR = "Taakdetails weergeven voor: {0}"
- self.BROWSING_FOR_IMATRIX_DATA_FILE = "Bladeren naar IMatrix-gegevensbestand"
- self.SELECT_DATA_FILE = "Selecteer gegevensbestand"
- self.BROWSING_FOR_IMATRIX_MODEL_FILE = "Bladeren naar IMatrix-modelbestand"
- self.SELECT_MODEL_FILE = "Selecteer modelbestand"
- self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "Bladeren naar IMatrix-uitvoerbestand"
- self.SELECT_OUTPUT_FILE = "Selecteer uitvoerbestand"
- self.STARTING_IMATRIX_GENERATION = "IMatrix-generatie starten"
- self.BACKEND_PATH_NOT_EXIST = "Backendpad bestaat niet: {0}"
- self.GENERATING_IMATRIX = "IMatrix genereren"
- self.ERROR_STARTING_IMATRIX_GENERATION = "Fout bij het starten van IMatrix-generatie: {0}"
- self.IMATRIX_GENERATION_TASK_STARTED = "IMatrix-generatietaak gestart"
- self.ERROR_MESSAGE = "Fout: {0}"
- self.TASK_ERROR = "Taakfout: {0}"
- self.APPLICATION_CLOSING = "Applicatie wordt afgesloten"
- self.APPLICATION_CLOSED = "Applicatie afgesloten"
- self.SELECT_QUANTIZATION_TYPE = "Selecteer het kwantiseringstype"
- self.ALLOWS_REQUANTIZING = "Staat herkwantisering toe van tensoren die al gekwantiseerd zijn"
- self.LEAVE_OUTPUT_WEIGHT = "Laat output.weight niet (opnieuw) gekwantiseerd"
- self.DISABLE_K_QUANT_MIXTURES = "Schakel k-kwant-mengsels uit en kwantiseer alle tensoren naar hetzelfde type"
- self.USE_DATA_AS_IMPORTANCE_MATRIX = "Gebruik gegevens in bestand als belangrijkheidsmatrix voor kwant-optimalisaties"
- self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Gebruik belangrijkheidsmatrix voor deze tensoren"
- self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Gebruik geen belangrijkheidsmatrix voor deze tensoren"
- self.OUTPUT_TENSOR_TYPE = "Uitvoertensortype:"
- self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = "Gebruik dit type voor de output.weight-tensor"
- self.TOKEN_EMBEDDING_TYPE = "Tokeninbeddingstype:"
- self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = "Gebruik dit type voor de tokeninbeddingstensor"
- self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = "Genereert een gekwantiseerd model in dezelfde shards als de invoer"
- self.OVERRIDE_MODEL_METADATA = "Modelmetadata overschrijven"
- self.INPUT_DATA_FILE_FOR_IMATRIX = "Invoergegevensbestand voor IMatrix-generatie"
- self.MODEL_TO_BE_QUANTIZED = "Te kwantiseren model"
- self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "Uitvoerpad voor de gegenereerde IMatrix"
- self.HOW_OFTEN_TO_SAVE_IMATRIX = "Hoe vaak de IMatrix moet worden opgeslagen"
- self.SET_GPU_OFFLOAD_VALUE = "Stel de GPU-offloadwaarde in (-ngl)"
- self.COMPLETED = "Voltooid"
- self.REFRESH_MODELS = "Modellen vernieuwen"
-
-class _Finnish(_Localization):
- def __init__(self):
- super().__init__()
- self.WINDOW_TITLE = "AutoGGUF (automaattinen GGUF-mallien kvantisoija)"
- self.RAM_USAGE = "RAM-muistin käyttö:"
- self.CPU_USAGE = "CPU:n käyttö:"
- self.BACKEND = "Llama.cpp-taustaosa:"
- self.REFRESH_BACKENDS = "Päivitä taustaosat"
- self.MODELS_PATH = "Mallien polku:"
- self.OUTPUT_PATH = "Tulostepolku:"
- self.LOGS_PATH = "Lokien polku:"
- self.BROWSE = "Selaa"
- self.AVAILABLE_MODELS = "Käytettävissä olevat mallit:"
- self.QUANTIZATION_TYPE = "Kvantisointityyppi:"
- self.ALLOW_REQUANTIZE = "Salli uudelleenkvantisointi"
- self.LEAVE_OUTPUT_TENSOR = "Jätä tulostensori"
- self.PURE = "Puhdas"
- self.IMATRIX = "IMatrix:"
- self.INCLUDE_WEIGHTS = "Sisällytä painot:"
- self.EXCLUDE_WEIGHTS = "Sulje pois painot:"
- self.USE_OUTPUT_TENSOR_TYPE = "Käytä tulostensorin tyyppiä"
- self.USE_TOKEN_EMBEDDING_TYPE = "Käytä token-upotustyyppiä"
- self.KEEP_SPLIT = "Säilytä jako"
- self.KV_OVERRIDES = "KV-ohitukset:"
- self.ADD_NEW_OVERRIDE = "Lisää uusi ohitus"
- self.QUANTIZE_MODEL = "Kvantisoi malli"
- self.SAVE_PRESET = "Tallenna esiasetus"
- self.LOAD_PRESET = "Lataa esiasetus"
- self.TASKS = "Tehtävät:"
- self.DOWNLOAD_LLAMACPP = "Lataa llama.cpp"
- self.SELECT_RELEASE = "Valitse julkaisu:"
- self.SELECT_ASSET = "Valitse resurssi:"
- self.EXTRACT_CUDA_FILES = "Pura CUDA-tiedostot"
- self.SELECT_CUDA_BACKEND = "Valitse CUDA-taustaosa:"
- self.DOWNLOAD = "Lataa"
- self.IMATRIX_GENERATION = "IMatrix-generointi"
- self.DATA_FILE = "Datatiedosto:"
- self.MODEL = "Malli:"
- self.OUTPUT = "Tuloste:"
- self.OUTPUT_FREQUENCY = "Tulostetaajuus:"
- self.GPU_OFFLOAD = "GPU-kuormansiirto:"
- self.AUTO = "Automaattinen"
- self.GENERATE_IMATRIX = "Generoi IMatrix"
- self.ERROR = "Virhe"
- self.WARNING = "Varoitus"
- self.PROPERTIES = "Ominaisuudet"
- self.CANCEL = "Peruuta"
- self.RESTART = "Käynnistä uudelleen"
- self.DELETE = "Poista"
- self.CONFIRM_DELETION = "Haluatko varmasti poistaa tämän tehtävän?"
- self.TASK_RUNNING_WARNING = "Jotkin tehtävät ovat vielä käynnissä. Haluatko varmasti lopettaa?"
- self.YES = "Kyllä"
- self.NO = "Ei"
- self.DOWNLOAD_COMPLETE = "Lataus valmis"
- self.CUDA_EXTRACTION_FAILED = "CUDA-purku epäonnistui"
- self.PRESET_SAVED = "Esiasetus tallennettu"
- self.PRESET_LOADED = "Esiasetus ladattu"
- self.NO_ASSET_SELECTED = "Ei resurssia valittuna"
- self.DOWNLOAD_FAILED = "Lataus epäonnistui"
- self.NO_BACKEND_SELECTED = "Ei taustaosaa valittuna"
- self.NO_MODEL_SELECTED = "Ei mallia valittuna"
- self.REFRESH_RELEASES = "Päivitä julkaisut"
- self.NO_SUITABLE_CUDA_BACKENDS = "Sopivia CUDA-taustaosoja ei löytynyt"
- self.LLAMACPP_DOWNLOADED_EXTRACTED = "llama.cpp-binaaritiedosto ladattu ja purettu kansioon {0}\nCUDA-tiedostot purettu kansioon {1}"
- self.CUDA_FILES_EXTRACTED = "CUDA-tiedostot purettu kansioon"
- self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = "Sopivaa CUDA-taustaosaa purkua varten ei löytynyt"
- self.ERROR_FETCHING_RELEASES = "Virhe haettaessa julkaisuja: {0}"
- self.CONFIRM_DELETION_TITLE = "Vahvista poisto"
- self.LOG_FOR = "Loki kohteelle {0}"
- self.ALL_FILES = "Kaikki tiedostot (*)"
- self.GGUF_FILES = "GGUF-tiedostot (*.gguf)"
- self.DAT_FILES = "DAT-tiedostot (*.dat)"
- self.JSON_FILES = "JSON-tiedostot (*.json)"
- self.FAILED_LOAD_PRESET = "Esiasetuksen lataus epäonnistui: {0}"
- self.INITIALIZING_AUTOGGUF = "Alustetaan AutoGGUF-sovellusta"
- self.AUTOGGUF_INITIALIZATION_COMPLETE = "AutoGGUF-alustus valmis"
- self.REFRESHING_BACKENDS = "Päivitetään taustaosoja"
- self.NO_BACKENDS_AVAILABLE = "Ei käytettävissä olevia taustaosoja"
- self.FOUND_VALID_BACKENDS = "Löydettiin {0} kelvollista taustaosaa"
- self.SAVING_PRESET = "Tallennetaan esiasetusta"
- self.PRESET_SAVED_TO = "Esiasetus tallennettu kansioon {0}"
- self.LOADING_PRESET = "Ladataan esiasetusta"
- self.PRESET_LOADED_FROM = "Esiasetus ladattu kansiosta {0}"
- self.ADDING_KV_OVERRIDE = "Lisätään KV-ohitus: {0}"
- self.SAVING_TASK_PRESET = "Tallennetaan tehtäväesiasetusta kohteelle {0}"
- self.TASK_PRESET_SAVED = "Tehtäväesiasetus tallennettu"
- self.TASK_PRESET_SAVED_TO = "Tehtäväesiasetus tallennettu kansioon {0}"
- self.RESTARTING_TASK = "Käynnistetään tehtävä uudelleen: {0}"
- self.IN_PROGRESS = "Käynnissä"
- self.DOWNLOAD_FINISHED_EXTRACTED_TO = "Lataus valmis. Purettu kansioon: {0}"
- self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "llama.cpp-binaaritiedosto ladattu ja purettu kansioon {0}\nCUDA-tiedostot purettu kansioon {1}"
- self.NO_SUITABLE_CUDA_BACKEND_FOUND = "Sopivaa CUDA-taustaosaa purkua varten ei löytynyt"
- self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = "llama.cpp-binaaritiedosto ladattu ja purettu kansioon {0}"
- self.REFRESHING_LLAMACPP_RELEASES = "Päivitetään llama.cpp-julkaisuja"
- self.UPDATING_ASSET_LIST = "Päivitetään resurssilistaa"
- self.UPDATING_CUDA_OPTIONS = "Päivitetään CUDA-asetuksia"
- self.STARTING_LLAMACPP_DOWNLOAD = "Aloitetaan llama.cpp:n lataus"
- self.UPDATING_CUDA_BACKENDS = "Päivitetään CUDA-taustaosoja"
- self.NO_CUDA_BACKEND_SELECTED = "Ei CUDA-taustaosaa valittuna purkua varten"
- self.EXTRACTING_CUDA_FILES = "Puretaan CUDA-tiedostoja kansiosta {0} kansioon {1}"
- self.DOWNLOAD_ERROR = "Latausvirhe: {0}"
- self.SHOWING_TASK_CONTEXT_MENU = "Näytetään tehtäväkontekstivalikko"
- self.SHOWING_PROPERTIES_FOR_TASK = "Näytetään tehtävän ominaisuudet: {0}"
- self.CANCELLING_TASK = "Peruutetaan tehtävää: {0}"
- self.CANCELED = "Peruutettu"
- self.DELETING_TASK = "Poistetaan tehtävää: {0}"
- self.LOADING_MODELS = "Ladataan malleja"
- self.LOADED_MODELS = "{0} mallia ladattu"
- self.BROWSING_FOR_MODELS_DIRECTORY = "Selaillaan mallikansiota"
- self.SELECT_MODELS_DIRECTORY = "Valitse mallikansio"
- self.BROWSING_FOR_OUTPUT_DIRECTORY = "Selaillaan tulostekansiota"
- self.SELECT_OUTPUT_DIRECTORY = "Valitse tulostekansio"
- self.BROWSING_FOR_LOGS_DIRECTORY = "Selaillaan lokikansiota"
- self.SELECT_LOGS_DIRECTORY = "Valitse lokikansio"
- self.BROWSING_FOR_IMATRIX_FILE = "Selaillaan IMatrix-tiedostoa"
- self.SELECT_IMATRIX_FILE = "Valitse IMatrix-tiedosto"
- self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} Mt / {2} Mt)"
- self.CPU_USAGE_FORMAT = "CPU:n käyttö: {0:.1f}%"
- self.VALIDATING_QUANTIZATION_INPUTS = "Vahvistetaan kvantisointisyötteet"
- self.MODELS_PATH_REQUIRED = "Mallien polku on pakollinen"
- self.OUTPUT_PATH_REQUIRED = "Tulostepolku on pakollinen"
- self.LOGS_PATH_REQUIRED = "Lokien polku on pakollinen"
- self.STARTING_MODEL_QUANTIZATION = "Aloitetaan mallin kvantisointi"
- self.INPUT_FILE_NOT_EXIST = "Syötetiedostoa '{0}' ei ole."
- self.QUANTIZING_MODEL_TO = "Kvantisoidaan mallia {0} muotoon {1}"
- self.QUANTIZATION_TASK_STARTED = "Kvantisointitehtävä käynnistetty kohteelle {0}"
- self.ERROR_STARTING_QUANTIZATION = "Virhe kvantisoinnin käynnistyksessä: {0}"
- self.UPDATING_MODEL_INFO = "Päivitetään mallitietoja: {0}"
- self.TASK_FINISHED = "Tehtävä valmis: {0}"
- self.SHOWING_TASK_DETAILS_FOR = "Näytetään tehtävän tiedot kohteelle: {0}"
- self.BROWSING_FOR_IMATRIX_DATA_FILE = "Selaillaan IMatrix-datatiedostoa"
- self.SELECT_DATA_FILE = "Valitse datatiedosto"
- self.BROWSING_FOR_IMATRIX_MODEL_FILE = "Selaillaan IMatrix-mallitiedostoa"
- self.SELECT_MODEL_FILE = "Valitse mallitiedosto"
- self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "Selaillaan IMatrix-tulostetiedostoa"
- self.SELECT_OUTPUT_FILE = "Valitse tulostetiedosto"
- self.STARTING_IMATRIX_GENERATION = "Aloitetaan IMatrix-generointi"
- self.BACKEND_PATH_NOT_EXIST = "Taustaosan polkua ei ole: {0}"
- self.GENERATING_IMATRIX = "Generoidaan IMatrixia"
- self.ERROR_STARTING_IMATRIX_GENERATION = "Virhe IMatrix-generoinnin käynnistyksessä: {0}"
- self.IMATRIX_GENERATION_TASK_STARTED = "IMatrix-generointi käynnistetty"
- self.ERROR_MESSAGE = "Virhe: {0}"
- self.TASK_ERROR = "Tehtävävirhe: {0}"
- self.APPLICATION_CLOSING = "Sovellus suljetaan"
- self.APPLICATION_CLOSED = "Sovellus suljettu"
- self.SELECT_QUANTIZATION_TYPE = "Valitse kvantisointityyppi"
- self.ALLOWS_REQUANTIZING = "Sallii jo kvantisoitujen tensoreiden uudelleenkvantisoinnin"
- self.LEAVE_OUTPUT_WEIGHT = "Jättää output.weight-tensorin (uudelleen)kvantisoimatta"
- self.DISABLE_K_QUANT_MIXTURES = "Poista käytöstä k-kvanttisekoitukset ja kvantisoi kaikki tensorit samaan tyyppiin"
- self.USE_DATA_AS_IMPORTANCE_MATRIX = "Käytä tiedoston tietoja kvantisoinnin optimoinnin tärkeysmatriisina"
- self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Käytä tärkeysmatriisia näille tensoreille"
- self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Älä käytä tärkeysmatriisia näille tensoreille"
- self.OUTPUT_TENSOR_TYPE = "Tulostensorin tyyppi:"
- self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = "Käytä tätä tyyppiä output.weight-tensorille"
- self.TOKEN_EMBEDDING_TYPE = "Token-upotustyyppi:"
- self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = "Käytä tätä tyyppiä token-upotustensorille"
- self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = "Generoi kvantisoidun mallin samoihin osiin kuin syöte"
- self.OVERRIDE_MODEL_METADATA = "Ohita mallitiedot"
- self.INPUT_DATA_FILE_FOR_IMATRIX = "IMatrix-generoinnin syötedatatiedosto"
- self.MODEL_TO_BE_QUANTIZED = "Kvantisoitava malli"
- self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "Generoidun IMatrixin tulostepolku"
- self.HOW_OFTEN_TO_SAVE_IMATRIX = "Kuinka usein IMatrix tallennetaan"
- self.SET_GPU_OFFLOAD_VALUE = "Aseta GPU-kuormansiirron arvo (-ngl)"
- self.COMPLETED = "Valmis"
- self.REFRESH_MODELS = "Päivitä mallit"
-
-class _Bengali(_Localization):
- def __init__(self):
- super().__init__()
- self.WINDOW_TITLE = "AutoGGUF (স্বয়ংক্রিয় GGUF মডেল কোয়ান্টাইজার)"
- self.RAM_USAGE = "RAM ব্যবহার:"
- self.CPU_USAGE = "CPU ব্যবহার:"
- self.BACKEND = "Llama.cpp ব্যাকএন্ড:"
- self.REFRESH_BACKENDS = "ব্যাকএন্ড রিফ্রেশ করুন"
- self.MODELS_PATH = "মডেল পাথ:"
- self.OUTPUT_PATH = "আউটপুট পাথ:"
- self.LOGS_PATH = "লগ পাথ:"
- self.BROWSE = "ব্রাউজ করুন"
- self.AVAILABLE_MODELS = "উপলব্ধ মডেল:"
- self.QUANTIZATION_TYPE = "কোয়ান্টাইজেশন ধরণ:"
- self.ALLOW_REQUANTIZE = "পুনরায় কোয়ান্টাইজ করার অনুমতি দিন"
- self.LEAVE_OUTPUT_TENSOR = "আউটপুট টেন্সর রেখে দিন"
- self.PURE = "বিশুদ্ধ"
- self.IMATRIX = "IMatrix:"
- self.INCLUDE_WEIGHTS = "ওজন অন্তর্ভুক্ত করুন:"
- self.EXCLUDE_WEIGHTS = "ওজন বাদ দিন:"
- self.USE_OUTPUT_TENSOR_TYPE = "আউটপুট টেন্সর ধরণ ব্যবহার করুন"
- self.USE_TOKEN_EMBEDDING_TYPE = "টোকেন এম্বেডিং ধরণ ব্যবহার করুন"
- self.KEEP_SPLIT = "বিভাজন রাখুন"
- self.KV_OVERRIDES = "KV ওভাররাইড:"
- self.ADD_NEW_OVERRIDE = "নতুন ওভাররাইড যুক্ত করুন"
- self.QUANTIZE_MODEL = "মডেল কোয়ান্টাইজ করুন"
- self.SAVE_PRESET = "প্রিসেট সংরক্ষণ করুন"
- self.LOAD_PRESET = "প্রিসেট লোড করুন"
- self.TASKS = "কার্য:"
- self.DOWNLOAD_LLAMACPP = "llama.cpp ডাউনলোড করুন"
- self.SELECT_RELEASE = "রিলিজ নির্বাচন করুন:"
- self.SELECT_ASSET = "অ্যাসেট নির্বাচন করুন:"
- self.EXTRACT_CUDA_FILES = "CUDA ফাইলগুলি বের করুন"
- self.SELECT_CUDA_BACKEND = "CUDA ব্যাকএন্ড নির্বাচন করুন:"
- self.DOWNLOAD = "ডাউনলোড করুন"
- self.IMATRIX_GENERATION = "IMatrix জেনারেশন"
- self.DATA_FILE = "ডেটা ফাইল:"
- self.MODEL = "মডেল:"
- self.OUTPUT = "আউটপুট:"
- self.OUTPUT_FREQUENCY = "আউটপুট ফ্রিকোয়েন্সি:"
- self.GPU_OFFLOAD = "GPU অফলোড:"
- self.AUTO = "স্বয়ংক্রিয়"
- self.GENERATE_IMATRIX = "IMatrix তৈরি করুন"
- self.ERROR = "ত্রুটি"
- self.WARNING = "সতর্কীকরণ"
- self.PROPERTIES = "বৈশিষ্ট্য"
- self.CANCEL = "বাতিল করুন"
- self.RESTART = "পুনরায় আরম্ভ করুন"
- self.DELETE = "মুছে ফেলুন"
- self.CONFIRM_DELETION = "আপনি কি নিশ্চিত যে আপনি এই কাজটি মুছে ফেলতে চান?"
- self.TASK_RUNNING_WARNING = "কিছু কাজ এখনও চলছে। আপনি কি নিশ্চিত যে আপনি প্রস্থান করতে চান?"
- self.YES = "হ্যাঁ"
- self.NO = "না"
- self.DOWNLOAD_COMPLETE = "ডাউনলোড সম্পন্ন"
- self.CUDA_EXTRACTION_FAILED = "CUDA এক্সট্র্যাকশন ব্যর্থ"
- self.PRESET_SAVED = "প্রিসেট সংরক্ষিত"
- self.PRESET_LOADED = "প্রিসেট লোড করা হয়েছে"
- self.NO_ASSET_SELECTED = "কোন অ্যাসেট নির্বাচন করা হয়নি"
- self.DOWNLOAD_FAILED = "ডাউনলোড ব্যর্থ"
- self.NO_BACKEND_SELECTED = "কোন ব্যাকএন্ড নির্বাচন করা হয়নি"
- self.NO_MODEL_SELECTED = "কোন মডেল নির্বাচন করা হয়নি"
- self.REFRESH_RELEASES = "রিলিজগুলি রিফ্রেশ করুন"
- self.NO_SUITABLE_CUDA_BACKENDS = "কোন উপযুক্ত CUDA ব্যাকএন্ড পাওয়া যায়নি"
- self.LLAMACPP_DOWNLOADED_EXTRACTED = "llama.cpp বাইনারি ফাইল ডাউনলোড এবং {0} এ বের করা হয়েছে\nCUDA ফাইলগুলি {1} এ বের করা হয়েছে"
- self.CUDA_FILES_EXTRACTED = "CUDA ফাইলগুলি তে বের করা হয়েছে"
- self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = "এক্সট্র্যাকশনের জন্য কোন উপযুক্ত CUDA ব্যাকএন্ড পাওয়া যায়নি"
- self.ERROR_FETCHING_RELEASES = "রিলিজগুলি আনতে ত্রুটি: {0}"
- self.CONFIRM_DELETION_TITLE = "মুছে ফেলা নিশ্চিত করুন"
- self.LOG_FOR = "{0} এর জন্য লগ"
- self.ALL_FILES = "সমস্ত ফাইল (*)"
- self.GGUF_FILES = "GGUF ফাইল (*.gguf)"
- self.DAT_FILES = "DAT ফাইল (*.dat)"
- self.JSON_FILES = "JSON ফাইল (*.json)"
- self.FAILED_LOAD_PRESET = "প্রিসেট লোড করতে ব্যর্থ: {0}"
- self.INITIALIZING_AUTOGGUF = "AutoGGUF অ্যাপ্লিকেশন শুরু হচ্ছে"
- self.AUTOGGUF_INITIALIZATION_COMPLETE = "AutoGGUF ইনিশিয়ালাইজেশন সম্পন্ন"
- self.REFRESHING_BACKENDS = "ব্যাকএন্ডগুলি রিফ্রেশ করা হচ্ছে"
- self.NO_BACKENDS_AVAILABLE = "কোন ব্যাকএন্ড উপলব্ধ নেই"
- self.FOUND_VALID_BACKENDS = "{0} টি বৈধ ব্যাকএন্ড পাওয়া গেছে"
- self.SAVING_PRESET = "প্রিসেট সংরক্ষণ করা হচ্ছে"
- self.PRESET_SAVED_TO = "{0} এ প্রিসেট সংরক্ষিত"
- self.LOADING_PRESET = "প্রিসেট লোড করা হচ্ছে"
- self.PRESET_LOADED_FROM = "{0} থেকে প্রিসেট লোড করা হয়েছে"
- self.ADDING_KV_OVERRIDE = "KV ওভাররাইড যুক্ত করা হচ্ছে: {0}"
- self.SAVING_TASK_PRESET = "{0} এর জন্য টাস্ক প্রিসেট সংরক্ষণ করা হচ্ছে"
- self.TASK_PRESET_SAVED = "টাস্ক প্রিসেট সংরক্ষিত"
- self.TASK_PRESET_SAVED_TO = "{0} এ টাস্ক প্রিসেট সংরক্ষিত"
- self.RESTARTING_TASK = "টাস্ক পুনরায় শুরু করা হচ্ছে: {0}"
- self.IN_PROGRESS = "চলছে"
- self.DOWNLOAD_FINISHED_EXTRACTED_TO = "ডাউনলোড সম্পন্ন। বের করা হয়েছে: {0}"
- self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "llama.cpp বাইনারি ফাইল ডাউনলোড এবং {0} এ বের করা হয়েছে\nCUDA ফাইলগুলি {1} এ বের করা হয়েছে"
- self.NO_SUITABLE_CUDA_BACKEND_FOUND = "এক্সট্র্যাকশনের জন্য কোন উপযুক্ত CUDA ব্যাকএন্ড পাওয়া যায়নি"
- self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = "llama.cpp বাইনারি ফাইল ডাউনলোড এবং {0} এ বের করা হয়েছে"
- self.REFRESHING_LLAMACPP_RELEASES = "llama.cpp রিলিজগুলি রিফ্রেশ করা হচ্ছে"
- self.UPDATING_ASSET_LIST = "অ্যাসেট তালিকা আপডেট করা হচ্ছে"
- self.UPDATING_CUDA_OPTIONS = "CUDA অপশনগুলি আপডেট করা হচ্ছে"
- self.STARTING_LLAMACPP_DOWNLOAD = "llama.cpp ডাউনলোড শুরু করা হচ্ছে"
- self.UPDATING_CUDA_BACKENDS = "CUDA ব্যাকএন্ডগুলি আপডেট করা হচ্ছে"
- self.NO_CUDA_BACKEND_SELECTED = "এক্সট্র্যাকশনের জন্য কোন CUDA ব্যাকএন্ড নির্বাচন করা হয়নি"
- self.EXTRACTING_CUDA_FILES = "{0} থেকে {1} এ CUDA ফাইলগুলি বের করা হচ্ছে"
- self.DOWNLOAD_ERROR = "ডাউনলোড ত্রুটি: {0}"
- self.SHOWING_TASK_CONTEXT_MENU = "টাস্ক কনটেক্সট মেনু দেখানো হচ্ছে"
- self.SHOWING_PROPERTIES_FOR_TASK = "টাস্কের জন্য বৈশিষ্ট্য দেখানো হচ্ছে: {0}"
- self.CANCELLING_TASK = "টাস্ক বাতিল করা হচ্ছে: {0}"
- self.CANCELED = "বাতিল করা হয়েছে"
- self.DELETING_TASK = "টাস্ক মুছে ফেলা হচ্ছে: {0}"
- self.LOADING_MODELS = "মডেলগুলি লোড করা হচ্ছে"
- self.LOADED_MODELS = "{0} টি মডেল লোড করা হয়েছে"
- self.BROWSING_FOR_MODELS_DIRECTORY = "মডেল ডিরেক্টরি ব্রাউজ করা হচ্ছে"
- self.SELECT_MODELS_DIRECTORY = "মডেল ডিরেক্টরি নির্বাচন করুন"
- self.BROWSING_FOR_OUTPUT_DIRECTORY = "আউটপুট ডিরেক্টরি ব্রাউজ করা হচ্ছে"
- self.SELECT_OUTPUT_DIRECTORY = "আউটপুট ডিরেক্টরি নির্বাচন করুন"
- self.BROWSING_FOR_LOGS_DIRECTORY = "লগ ডিরেক্টরি ব্রাউজ করা হচ্ছে"
- self.SELECT_LOGS_DIRECTORY = "লগ ডিরেক্টরি নির্বাচন করুন"
- self.BROWSING_FOR_IMATRIX_FILE = "IMatrix ফাইল ব্রাউজ করা হচ্ছে"
- self.SELECT_IMATRIX_FILE = "IMatrix ফাইল নির্বাচন করুন"
- self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} MB / {2} MB)"
- self.CPU_USAGE_FORMAT = "CPU ব্যবহার: {0:.1f}%"
- self.VALIDATING_QUANTIZATION_INPUTS = "কোয়ান্টাইজেশন ইনপুট যাচাই করা হচ্ছে"
- self.MODELS_PATH_REQUIRED = "মডেল পাথ প্রয়োজন"
- self.OUTPUT_PATH_REQUIRED = "আউটপুট পাথ প্রয়োজন"
- self.LOGS_PATH_REQUIRED = "লগ পাথ প্রয়োজন"
- self.STARTING_MODEL_QUANTIZATION = "মডেল কোয়ান্টাইজেশন শুরু হচ্ছে"
- self.INPUT_FILE_NOT_EXIST = "ইনপুট ফাইল '{0}' বিদ্যমান নেই।"
- self.QUANTIZING_MODEL_TO = "{0} কে {1} এ কোয়ান্টাইজ করা হচ্ছে"
- self.QUANTIZATION_TASK_STARTED = "{0} এর জন্য কোয়ান্টাইজেশন টাস্ক শুরু হয়েছে"
- self.ERROR_STARTING_QUANTIZATION = "কোয়ান্টাইজেশন শুরু করতে ত্রুটি: {0}"
- self.UPDATING_MODEL_INFO = "মডেল তথ্য আপডেট করা হচ্ছে: {0}"
- self.TASK_FINISHED = "টাস্ক সম্পন্ন: {0}"
- self.SHOWING_TASK_DETAILS_FOR = "এর জন্য টাস্কের বিবরণ দেখানো হচ্ছে: {0}"
- self.BROWSING_FOR_IMATRIX_DATA_FILE = "IMatrix ডেটা ফাইল ব্রাউজ করা হচ্ছে"
- self.SELECT_DATA_FILE = "ডেটা ফাইল নির্বাচন করুন"
- self.BROWSING_FOR_IMATRIX_MODEL_FILE = "IMatrix মডেল ফাইল ব্রাউজ করা হচ্ছে"
- self.SELECT_MODEL_FILE = "মডেল ফাইল নির্বাচন করুন"
- self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "IMatrix আউটপুট ফাইল ব্রাউজ করা হচ্ছে"
- self.SELECT_OUTPUT_FILE = "আউটপুট ফাইল নির্বাচন করুন"
- self.STARTING_IMATRIX_GENERATION = "IMatrix জেনারেশন শুরু হচ্ছে"
- self.BACKEND_PATH_NOT_EXIST = "ব্যাকএন্ড পাথ বিদ্যমান নেই: {0}"
- self.GENERATING_IMATRIX = "IMatrix তৈরি করা হচ্ছে"
- self.ERROR_STARTING_IMATRIX_GENERATION = "IMatrix জেনারেশন শুরু করতে ত্রুটি: {0}"
- self.IMATRIX_GENERATION_TASK_STARTED = "IMatrix জেনারেশন টাস্ক শুরু হয়েছে"
- self.ERROR_MESSAGE = "ত্রুটি: {0}"
- self.TASK_ERROR = "টাস্ক ত্রুটি: {0}"
- self.APPLICATION_CLOSING = "অ্যাপ্লিকেশন বন্ধ করা হচ্ছে"
- self.APPLICATION_CLOSED = "অ্যাপ্লিকেশন বন্ধ"
- self.SELECT_QUANTIZATION_TYPE = "কোয়ান্টাইজেশন ধরণ নির্বাচন করুন"
- self.ALLOWS_REQUANTIZING = "যে টেন্সরগুলি ইতিমধ্যে কোয়ান্টাইজ করা হয়েছে তাদের পুনরায় কোয়ান্টাইজ করার অনুমতি দেয়"
- self.LEAVE_OUTPUT_WEIGHT = "output.weight কে (পুনরায়) কোয়ান্টাইজ না করে রেখে দেবে"
- self.DISABLE_K_QUANT_MIXTURES = "k-কোয়ান্ট মিশ্রণগুলি অক্ষম করুন এবং সমস্ত টেন্সরকে একই ধরণের কোয়ান্টাইজ করুন"
- self.USE_DATA_AS_IMPORTANCE_MATRIX = "কোয়ান্ট অপ্টিমাইজেশনের জন্য ফাইলের ডেটা গুরুত্বপূর্ণ ম্যাট্রিক্স হিসাবে ব্যবহার করুন"
- self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "এই টেন্সরগুলির জন্য গুরুত্বপূর্ণ ম্যাট্রিক্স ব্যবহার করুন"
- self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = "এই টেন্সরগুলির জন্য গুরুত্বপূর্ণ ম্যাট্রিক্স ব্যবহার করবেন না"
- self.OUTPUT_TENSOR_TYPE = "আউটপুট টেন্সর ধরণ:"
- self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = "output.weight টেন্সরের জন্য এই ধরণটি ব্যবহার করুন"
- self.TOKEN_EMBEDDING_TYPE = "টোকেন এম্বেডিং ধরণ:"
- self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = "টোকেন এম্বেডিং টেন্সরের জন্য এই ধরণটি ব্যবহার করুন"
- self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = "ইনপুটের মতো একই শার্ডে কোয়ান্টাইজ করা মডেল তৈরি করবে"
- self.OVERRIDE_MODEL_METADATA = "মডেল মেটাডেটা ওভাররাইড করুন"
- self.INPUT_DATA_FILE_FOR_IMATRIX = "IMatrix জেনারেশনের জন্য ইনপুট ডেটা ফাইল"
- self.MODEL_TO_BE_QUANTIZED = "কোয়ান্টাইজ করার জন্য মডেল"
- self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "তৈরি করা IMatrix এর জন্য আউটপুট পাথ"
- self.HOW_OFTEN_TO_SAVE_IMATRIX = "IMatrix কতবার সংরক্ষণ করবেন"
- self.SET_GPU_OFFLOAD_VALUE = "GPU অফলোড মান সেট করুন (-ngl)"
- self.COMPLETED = "সম্পন্ন"
- self.REFRESH_MODELS = "মডেল রিফ্রেশ করুন"
-
-class _Polish(_Localization):
- def __init__(self):
- super().__init__()
- self.WINDOW_TITLE = "AutoGGUF (Automatyczny kwantyzator modeli GGUF)"
- self.RAM_USAGE = "Użycie pamięci RAM:"
- self.CPU_USAGE = "Użycie procesora:"
- self.BACKEND = "Backend Llama.cpp:"
- self.REFRESH_BACKENDS = "Odśwież backendy"
- self.MODELS_PATH = "Ścieżka modeli:"
- self.OUTPUT_PATH = "Ścieżka wyjściowa:"
- self.LOGS_PATH = "Ścieżka logów:"
- self.BROWSE = "Przeglądaj"
- self.AVAILABLE_MODELS = "Dostępne modele:"
- self.QUANTIZATION_TYPE = "Typ kwantyzacji:"
- self.ALLOW_REQUANTIZE = "Zezwól na ponowną kwantyzację"
- self.LEAVE_OUTPUT_TENSOR = "Pozostaw tensor wyjściowy"
- self.PURE = "Czysty"
- self.IMATRIX = "IMatrix:"
- self.INCLUDE_WEIGHTS = "Uwzględnij wagi:"
- self.EXCLUDE_WEIGHTS = "Wyklucz wagi:"
- self.USE_OUTPUT_TENSOR_TYPE = "Użyj typu tensora wyjściowego"
- self.USE_TOKEN_EMBEDDING_TYPE = "Użyj typu osadzania tokenów"
- self.KEEP_SPLIT = "Zachowaj podział"
- self.KV_OVERRIDES = "Nadpisania KV:"
- self.ADD_NEW_OVERRIDE = "Dodaj nowe nadpisanie"
- self.QUANTIZE_MODEL = "Kwantyzuj model"
- self.SAVE_PRESET = "Zapisz ustawienia predefiniowane"
- self.LOAD_PRESET = "Wczytaj ustawienia predefiniowane"
- self.TASKS = "Zadania:"
- self.DOWNLOAD_LLAMACPP = "Pobierz llama.cpp"
- self.SELECT_RELEASE = "Wybierz wersję:"
- self.SELECT_ASSET = "Wybierz zasób:"
- self.EXTRACT_CUDA_FILES = "Wyodrębnij pliki CUDA"
- self.SELECT_CUDA_BACKEND = "Wybierz backend CUDA:"
- self.DOWNLOAD = "Pobierz"
- self.IMATRIX_GENERATION = "Generowanie IMatrix"
- self.DATA_FILE = "Plik danych:"
- self.MODEL = "Model:"
- self.OUTPUT = "Wyjście:"
- self.OUTPUT_FREQUENCY = "Częstotliwość wyjścia:"
- self.GPU_OFFLOAD = "Odciążenie GPU:"
- self.AUTO = "Automatyczny"
- self.GENERATE_IMATRIX = "Generuj IMatrix"
- self.ERROR = "Błąd"
- self.WARNING = "Ostrzeżenie"
- self.PROPERTIES = "Właściwości"
- self.CANCEL = "Anuluj"
- self.RESTART = "Uruchom ponownie"
- self.DELETE = "Usuń"
- self.CONFIRM_DELETION = "Czy na pewno chcesz usunąć to zadanie?"
- self.TASK_RUNNING_WARNING = "Niektóre zadania są nadal uruchomione. Czy na pewno chcesz wyjść?"
- self.YES = "Tak"
- self.NO = "Nie"
- self.DOWNLOAD_COMPLETE = "Pobieranie zakończone"
- self.CUDA_EXTRACTION_FAILED = "Wyodrębnianie CUDA nie powiodło się"
- self.PRESET_SAVED = "Ustawienia predefiniowane zapisane"
- self.PRESET_LOADED = "Ustawienia predefiniowane wczytane"
- self.NO_ASSET_SELECTED = "Nie wybrano zasobu"
- self.DOWNLOAD_FAILED = "Pobieranie nie powiodło się"
- self.NO_BACKEND_SELECTED = "Nie wybrano backendu"
- self.NO_MODEL_SELECTED = "Nie wybrano modelu"
- self.REFRESH_RELEASES = "Odśwież wersje"
- self.NO_SUITABLE_CUDA_BACKENDS = "Nie znaleziono odpowiednich backendów CUDA"
- self.LLAMACPP_DOWNLOADED_EXTRACTED = "Plik binarny llama.cpp został pobrany i wyodrębniony do {0}\nPliki CUDA wyodrębnione do {1}"
- self.CUDA_FILES_EXTRACTED = "Pliki CUDA wyodrębnione do"
- self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = "Nie znaleziono odpowiedniego backendu CUDA do wyodrębnienia"
- self.ERROR_FETCHING_RELEASES = "Błąd podczas pobierania wersji: {0}"
- self.CONFIRM_DELETION_TITLE = "Potwierdź usunięcie"
- self.LOG_FOR = "Dziennik dla {0}"
- self.ALL_FILES = "Wszystkie pliki (*)"
- self.GGUF_FILES = "Pliki GGUF (*.gguf)"
- self.DAT_FILES = "Pliki DAT (*.dat)"
- self.JSON_FILES = "Pliki JSON (*.json)"
- self.FAILED_LOAD_PRESET = "Nie udało się wczytać ustawień predefiniowanych: {0}"
- self.INITIALIZING_AUTOGGUF = "Inicjalizacja aplikacji AutoGGUF"
- self.AUTOGGUF_INITIALIZATION_COMPLETE = "Inicjalizacja AutoGGUF zakończona"
- self.REFRESHING_BACKENDS = "Odświeżanie backendów"
- self.NO_BACKENDS_AVAILABLE = "Brak dostępnych backendów"
- self.FOUND_VALID_BACKENDS = "Znaleziono {0} prawidłowych backendów"
- self.SAVING_PRESET = "Zapisywanie ustawień predefiniowanych"
- self.PRESET_SAVED_TO = "Ustawienia predefiniowane zapisane do {0}"
- self.LOADING_PRESET = "Wczytywanie ustawień predefiniowanych"
- self.PRESET_LOADED_FROM = "Ustawienia predefiniowane wczytane z {0}"
- self.ADDING_KV_OVERRIDE = "Dodawanie nadpisania KV: {0}"
- self.SAVING_TASK_PRESET = "Zapisywanie ustawień predefiniowanych zadania dla {0}"
- self.TASK_PRESET_SAVED = "Ustawienia predefiniowane zadania zapisane"
- self.TASK_PRESET_SAVED_TO = "Ustawienia predefiniowane zadania zapisane do {0}"
- self.RESTARTING_TASK = "Ponowne uruchamianie zadania: {0}"
- self.IN_PROGRESS = "W trakcie"
- self.DOWNLOAD_FINISHED_EXTRACTED_TO = "Pobieranie zakończone. Wyodrębniono do: {0}"
- self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "Plik binarny llama.cpp został pobrany i wyodrębniony do {0}\nPliki CUDA wyodrębnione do {1}"
- self.NO_SUITABLE_CUDA_BACKEND_FOUND = "Nie znaleziono odpowiedniego backendu CUDA do wyodrębnienia"
- self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = "Plik binarny llama.cpp został pobrany i wyodrębniony do {0}"
- self.REFRESHING_LLAMACPP_RELEASES = "Odświeżanie wersji llama.cpp"
- self.UPDATING_ASSET_LIST = "Aktualizacja listy zasobów"
- self.UPDATING_CUDA_OPTIONS = "Aktualizacja opcji CUDA"
- self.STARTING_LLAMACPP_DOWNLOAD = "Rozpoczynanie pobierania llama.cpp"
- self.UPDATING_CUDA_BACKENDS = "Aktualizacja backendów CUDA"
- self.NO_CUDA_BACKEND_SELECTED = "Nie wybrano backendu CUDA do wyodrębnienia"
- self.EXTRACTING_CUDA_FILES = "Wyodrębnianie plików CUDA z {0} do {1}"
- self.DOWNLOAD_ERROR = "Błąd pobierania: {0}"
- self.SHOWING_TASK_CONTEXT_MENU = "Wyświetlanie menu kontekstowego zadania"
- self.SHOWING_PROPERTIES_FOR_TASK = "Wyświetlanie właściwości zadania: {0}"
- self.CANCELLING_TASK = "Anulowanie zadania: {0}"
- self.CANCELED = "Anulowano"
- self.DELETING_TASK = "Usuwanie zadania: {0}"
- self.LOADING_MODELS = "Ładowanie modeli"
- self.LOADED_MODELS = "Załadowano {0} modeli"
- self.BROWSING_FOR_MODELS_DIRECTORY = "Przeglądanie katalogu modeli"
- self.SELECT_MODELS_DIRECTORY = "Wybierz katalog modeli"
- self.BROWSING_FOR_OUTPUT_DIRECTORY = "Przeglądanie katalogu wyjściowego"
- self.SELECT_OUTPUT_DIRECTORY = "Wybierz katalog wyjściowy"
- self.BROWSING_FOR_LOGS_DIRECTORY = "Przeglądanie katalogu logów"
- self.SELECT_LOGS_DIRECTORY = "Wybierz katalog logów"
- self.BROWSING_FOR_IMATRIX_FILE = "Przeglądanie pliku IMatrix"
- self.SELECT_IMATRIX_FILE = "Wybierz plik IMatrix"
- self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} MB / {2} MB)"
- self.CPU_USAGE_FORMAT = "Użycie procesora: {0:.1f}%"
- self.VALIDATING_QUANTIZATION_INPUTS = "Walidacja danych wejściowych kwantyzacji"
- self.MODELS_PATH_REQUIRED = "Ścieżka modeli jest wymagana"
- self.OUTPUT_PATH_REQUIRED = "Ścieżka wyjściowa jest wymagana"
- self.LOGS_PATH_REQUIRED = "Ścieżka logów jest wymagana"
- self.STARTING_MODEL_QUANTIZATION = "Rozpoczynanie kwantyzacji modelu"
- self.INPUT_FILE_NOT_EXIST = "Plik wejściowy '{0}' nie istnieje."
- self.QUANTIZING_MODEL_TO = "Kwantyzacja {0} do {1}"
- self.QUANTIZATION_TASK_STARTED = "Zadanie kwantyzacji uruchomione dla {0}"
- self.ERROR_STARTING_QUANTIZATION = "Błąd podczas uruchamiania kwantyzacji: {0}"
- self.UPDATING_MODEL_INFO = "Aktualizacja informacji o modelu: {0}"
- self.TASK_FINISHED = "Zadanie zakończone: {0}"
- self.SHOWING_TASK_DETAILS_FOR = "Wyświetlanie szczegółów zadania dla: {0}"
- self.BROWSING_FOR_IMATRIX_DATA_FILE = "Przeglądanie pliku danych IMatrix"
- self.SELECT_DATA_FILE = "Wybierz plik danych"
- self.BROWSING_FOR_IMATRIX_MODEL_FILE = "Przeglądanie pliku modelu IMatrix"
- self.SELECT_MODEL_FILE = "Wybierz plik modelu"
- self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "Przeglądanie pliku wyjściowego IMatrix"
- self.SELECT_OUTPUT_FILE = "Wybierz plik wyjściowy"
- self.STARTING_IMATRIX_GENERATION = "Rozpoczynanie generowania IMatrix"
- self.BACKEND_PATH_NOT_EXIST = "Ścieżka backendu nie istnieje: {0}"
- self.GENERATING_IMATRIX = "Generowanie IMatrix"
- self.ERROR_STARTING_IMATRIX_GENERATION = "Błąd podczas uruchamiania generowania IMatrix: {0}"
- self.IMATRIX_GENERATION_TASK_STARTED = "Zadanie generowania IMatrix uruchomione"
- self.ERROR_MESSAGE = "Błąd: {0}"
- self.TASK_ERROR = "Błąd zadania: {0}"
- self.APPLICATION_CLOSING = "Zamykanie aplikacji"
- self.APPLICATION_CLOSED = "Aplikacja zamknięta"
- self.SELECT_QUANTIZATION_TYPE = "Wybierz typ kwantyzacji"
- self.ALLOWS_REQUANTIZING = "Pozwala na ponowną kwantyzację tensorów, które zostały już skwantyzowane"
- self.LEAVE_OUTPUT_WEIGHT = "Pozostawi output.weight nieskwantyzowany (lub nieskwantyzowany ponownie)"
- self.DISABLE_K_QUANT_MIXTURES = "Wyłącz mieszanki k-kwant i kwantyzuj wszystkie tensory do tego samego typu"
- self.USE_DATA_AS_IMPORTANCE_MATRIX = "Użyj danych w pliku jako macierzy ważności dla optymalizacji kwantyzacji"
- self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Użyj macierzy ważności dla tych tensorów"
- self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Nie używaj macierzy ważności dla tych tensorów"
- self.OUTPUT_TENSOR_TYPE = "Typ tensora wyjściowego:"
- self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = "Użyj tego typu dla tensora output.weight"
- self.TOKEN_EMBEDDING_TYPE = "Typ osadzania tokenów:"
- self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = "Użyj tego typu dla tensora osadzania tokenów"
- self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = "Wygeneruje skwantyzowany model w tych samych fragmentach co dane wejściowe"
- self.OVERRIDE_MODEL_METADATA = "Zastąp metadane modelu"
- self.INPUT_DATA_FILE_FOR_IMATRIX = "Plik danych wejściowych do generowania IMatrix"
- self.MODEL_TO_BE_QUANTIZED = "Model do kwantyzacji"
- self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "Ścieżka wyjściowa dla wygenerowanego IMatrix"
- self.HOW_OFTEN_TO_SAVE_IMATRIX = "Jak często zapisywać IMatrix"
- self.SET_GPU_OFFLOAD_VALUE = "Ustaw wartość odciążenia GPU (-ngl)"
- self.COMPLETED = "Ukończono"
- self.REFRESH_MODELS = "Obnovit modely"
-
-class _Romanian(_Localization):
- def __init__(self):
- super().__init__()
- self.WINDOW_TITLE = "AutoGGUF (Cuantizator automat de modele GGUF)"
- self.RAM_USAGE = "Utilizare RAM:"
- self.CPU_USAGE = "Utilizare CPU:"
- self.BACKEND = "Backend Llama.cpp:"
- self.REFRESH_BACKENDS = "Reîmprospătați backends"
- self.MODELS_PATH = "Cale modele:"
- self.OUTPUT_PATH = "Cale ieșire:"
- self.LOGS_PATH = "Cale jurnale:"
- self.BROWSE = "Răsfoiți"
- self.AVAILABLE_MODELS = "Modele disponibile:"
- self.QUANTIZATION_TYPE = "Tipul de cuantizare:"
- self.ALLOW_REQUANTIZE = "Permiteți recuantizarea"
- self.LEAVE_OUTPUT_TENSOR = "Lăsați tensorul de ieșire"
- self.PURE = "Pur"
- self.IMATRIX = "IMatrix:"
- self.INCLUDE_WEIGHTS = "Includeți ponderile:"
- self.EXCLUDE_WEIGHTS = "Excludeți ponderile:"
- self.USE_OUTPUT_TENSOR_TYPE = "Utilizați tipul tensorului de ieșire"
- self.USE_TOKEN_EMBEDDING_TYPE = "Utilizați tipul de încorporare a tokenului"
- self.KEEP_SPLIT = "Păstrați divizarea"
- self.KV_OVERRIDES = "Suprascrieri KV:"
- self.ADD_NEW_OVERRIDE = "Adăugați o nouă suprascriere"
- self.QUANTIZE_MODEL = "Cuantizați modelul"
- self.SAVE_PRESET = "Salvați presetarea"
- self.LOAD_PRESET = "Încărcați presetarea"
- self.TASKS = "Sarcini:"
- self.DOWNLOAD_LLAMACPP = "Descărcați llama.cpp"
- self.SELECT_RELEASE = "Selectați versiunea:"
- self.SELECT_ASSET = "Selectați activul:"
- self.EXTRACT_CUDA_FILES = "Extrageți fișierele CUDA"
- self.SELECT_CUDA_BACKEND = "Selectați backend CUDA:"
- self.DOWNLOAD = "Descărcați"
- self.IMATRIX_GENERATION = "Generare IMatrix"
- self.DATA_FILE = "Fișier de date:"
- self.MODEL = "Model:"
- self.OUTPUT = "Ieșire:"
- self.OUTPUT_FREQUENCY = "Frecvența ieșirii:"
- self.GPU_OFFLOAD = "Descărcare GPU:"
- self.AUTO = "Automat"
- self.GENERATE_IMATRIX = "Generați IMatrix"
- self.ERROR = "Eroare"
- self.WARNING = "Avertisment"
- self.PROPERTIES = "Proprietăți"
- self.CANCEL = "Anulați"
- self.RESTART = "Reporniți"
- self.DELETE = "Ștergeți"
- self.CONFIRM_DELETION = "Sunteți sigur că doriți să ștergeți această sarcină?"
- self.TASK_RUNNING_WARNING = "Unele sarcini sunt încă în curs de execuție. Sunteți sigur că doriți să ieșiți?"
- self.YES = "Da"
- self.NO = "Nu"
- self.DOWNLOAD_COMPLETE = "Descărcare finalizată"
- self.CUDA_EXTRACTION_FAILED = "Extragerea CUDA a eșuat"
- self.PRESET_SAVED = "Presetare salvată"
- self.PRESET_LOADED = "Presetare încărcată"
- self.NO_ASSET_SELECTED = "Niciun activ selectat"
- self.DOWNLOAD_FAILED = "Descărcarea a eșuat"
- self.NO_BACKEND_SELECTED = "Niciun backend selectat"
- self.NO_MODEL_SELECTED = "Niciun model selectat"
- self.REFRESH_RELEASES = "Reîmprospătați versiunile"
- self.NO_SUITABLE_CUDA_BACKENDS = "Nu s-au găsit backends CUDA potrivite"
- self.LLAMACPP_DOWNLOADED_EXTRACTED = "Fișierul binar llama.cpp a fost descărcat și extras în {0}\nFișierele CUDA au fost extrase în {1}"
- self.CUDA_FILES_EXTRACTED = "Fișierele CUDA au fost extrase în"
- self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = "Nu s-a găsit un backend CUDA potrivit pentru extragere"
- self.ERROR_FETCHING_RELEASES = "Eroare la preluarea versiunilor: {0}"
- self.CONFIRM_DELETION_TITLE = "Confirmați ștergerea"
- self.LOG_FOR = "Jurnal pentru {0}"
- self.ALL_FILES = "Toate fișierele (*)"
- self.GGUF_FILES = "Fișiere GGUF (*.gguf)"
- self.DAT_FILES = "Fișiere DAT (*.dat)"
- self.JSON_FILES = "Fișiere JSON (*.json)"
- self.FAILED_LOAD_PRESET = "Nu s-a putut încărca presetarea: {0}"
- self.INITIALIZING_AUTOGGUF = "Inițializarea aplicației AutoGGUF"
- self.AUTOGGUF_INITIALIZATION_COMPLETE = "Inițializarea AutoGGUF finalizată"
- self.REFRESHING_BACKENDS = "Reîmprospătarea backends"
- self.NO_BACKENDS_AVAILABLE = "Nu există backends disponibile"
- self.FOUND_VALID_BACKENDS = "S-au găsit {0} backends valide"
- self.SAVING_PRESET = "Salvarea presetării"
- self.PRESET_SAVED_TO = "Presetare salvată în {0}"
- self.LOADING_PRESET = "Încărcarea presetării"
- self.PRESET_LOADED_FROM = "Presetare încărcată din {0}"
- self.ADDING_KV_OVERRIDE = "Adăugarea suprascrierii KV: {0}"
- self.SAVING_TASK_PRESET = "Salvarea presetării sarcinii pentru {0}"
- self.TASK_PRESET_SAVED = "Presetare sarcină salvată"
- self.TASK_PRESET_SAVED_TO = "Presetare sarcină salvată în {0}"
- self.RESTARTING_TASK = "Repornirea sarcinii: {0}"
- self.IN_PROGRESS = "În curs"
- self.DOWNLOAD_FINISHED_EXTRACTED_TO = "Descărcare finalizată. Extras în: {0}"
- self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "Fișierul binar llama.cpp a fost descărcat și extras în {0}\nFișierele CUDA au fost extrase în {1}"
- self.NO_SUITABLE_CUDA_BACKEND_FOUND = "Nu s-a găsit un backend CUDA potrivit pentru extragere"
- self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = "Fișierul binar llama.cpp a fost descărcat și extras în {0}"
- self.REFRESHING_LLAMACPP_RELEASES = "Reîmprospătarea versiunilor llama.cpp"
- self.UPDATING_ASSET_LIST = "Actualizarea listei de active"
- self.UPDATING_CUDA_OPTIONS = "Actualizarea opțiunilor CUDA"
- self.STARTING_LLAMACPP_DOWNLOAD = "Începerea descărcării llama.cpp"
- self.UPDATING_CUDA_BACKENDS = "Actualizarea backends CUDA"
- self.NO_CUDA_BACKEND_SELECTED = "Niciun backend CUDA selectat pentru extragere"
- self.EXTRACTING_CUDA_FILES = "Extragerea fișierelor CUDA din {0} în {1}"
- self.DOWNLOAD_ERROR = "Eroare de descărcare: {0}"
- self.SHOWING_TASK_CONTEXT_MENU = "Afișarea meniului contextual al sarcinii"
- self.SHOWING_PROPERTIES_FOR_TASK = "Afișarea proprietăților pentru sarcina: {0}"
- self.CANCELLING_TASK = "Anularea sarcinii: {0}"
- self.CANCELED = "Anulat"
- self.DELETING_TASK = "Ștergerea sarcinii: {0}"
- self.LOADING_MODELS = "Încărcarea modelelor"
- self.LOADED_MODELS = "{0} modele încărcate"
- self.BROWSING_FOR_MODELS_DIRECTORY = "Răsfoirea directorului de modele"
- self.SELECT_MODELS_DIRECTORY = "Selectați directorul de modele"
- self.BROWSING_FOR_OUTPUT_DIRECTORY = "Răsfoirea directorului de ieșire"
- self.SELECT_OUTPUT_DIRECTORY = "Selectați directorul de ieșire"
- self.BROWSING_FOR_LOGS_DIRECTORY = "Răsfoirea directorului de jurnale"
- self.SELECT_LOGS_DIRECTORY = "Selectați directorul de jurnale"
- self.BROWSING_FOR_IMATRIX_FILE = "Răsfoirea fișierului IMatrix"
- self.SELECT_IMATRIX_FILE = "Selectați fișierul IMatrix"
- self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} MB / {2} MB)"
- self.CPU_USAGE_FORMAT = "Utilizare CPU: {0:.1f}%"
- self.VALIDATING_QUANTIZATION_INPUTS = "Validarea intrărilor de cuantizare"
- self.MODELS_PATH_REQUIRED = "Calea modelelor este obligatorie"
- self.OUTPUT_PATH_REQUIRED = "Calea ieșirii este obligatorie"
- self.LOGS_PATH_REQUIRED = "Calea jurnalelor este obligatorie"
- self.STARTING_MODEL_QUANTIZATION = "Pornirea cuantizării modelului"
- self.INPUT_FILE_NOT_EXIST = "Fișierul de intrare '{0}' nu există."
- self.QUANTIZING_MODEL_TO = "Cuantizarea {0} la {1}"
- self.QUANTIZATION_TASK_STARTED = "Sarcina de cuantizare a fost pornită pentru {0}"
- self.ERROR_STARTING_QUANTIZATION = "Eroare la pornirea cuantizării: {0}"
- self.UPDATING_MODEL_INFO = "Actualizarea informațiilor despre model: {0}"
- self.TASK_FINISHED = "Sarcină finalizată: {0}"
- self.SHOWING_TASK_DETAILS_FOR = "Afișarea detaliilor sarcinii pentru: {0}"
- self.BROWSING_FOR_IMATRIX_DATA_FILE = "Răsfoirea fișierului de date IMatrix"
- self.SELECT_DATA_FILE = "Selectați fișierul de date"
- self.BROWSING_FOR_IMATRIX_MODEL_FILE = "Răsfoirea fișierului de model IMatrix"
- self.SELECT_MODEL_FILE = "Selectați fișierul model"
- self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "Răsfoirea fișierului de ieșire IMatrix"
- self.SELECT_OUTPUT_FILE = "Selectați fișierul de ieșire"
- self.STARTING_IMATRIX_GENERATION = "Pornirea generării IMatrix"
- self.BACKEND_PATH_NOT_EXIST = "Calea backendului nu există: {0}"
- self.GENERATING_IMATRIX = "Generarea IMatrix"
- self.ERROR_STARTING_IMATRIX_GENERATION = "Eroare la pornirea generării IMatrix: {0}"
- self.IMATRIX_GENERATION_TASK_STARTED = "Sarcina de generare IMatrix a fost pornită"
- self.ERROR_MESSAGE = "Eroare: {0}"
- self.TASK_ERROR = "Eroare de sarcină: {0}"
- self.APPLICATION_CLOSING = "Închiderea aplicației"
- self.APPLICATION_CLOSED = "Aplicație închisă"
- self.SELECT_QUANTIZATION_TYPE = "Selectați tipul de cuantizare"
- self.ALLOWS_REQUANTIZING = "Permite recuantizarea tensorilor care au fost deja cuantizați"
- self.LEAVE_OUTPUT_WEIGHT = "Va lăsa output.weight necuantizat (sau recuantizat)"
- self.DISABLE_K_QUANT_MIXTURES = "Dezactivați mixurile k-quant și cuantizați toți tensorii la același tip"
- self.USE_DATA_AS_IMPORTANCE_MATRIX = "Utilizați datele din fișier ca matrice de importanță pentru optimizările de cuantizare"
- self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Utilizați matricea de importanță pentru acești tensori"
- self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Nu utilizați matricea de importanță pentru acești tensori"
- self.OUTPUT_TENSOR_TYPE = "Tipul tensorului de ieșire:"
- self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = "Utilizați acest tip pentru tensorul output.weight"
- self.TOKEN_EMBEDDING_TYPE = "Tipul de încorporare a tokenului:"
- self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = "Utilizați acest tip pentru tensorul de încorporări ale tokenului"
- self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = "Va genera modelul cuantizat în aceleași fragmente ca și intrarea"
- self.OVERRIDE_MODEL_METADATA = "Suprascrieți metadatele modelului"
- self.INPUT_DATA_FILE_FOR_IMATRIX = "Fișier de date de intrare pentru generarea IMatrix"
- self.MODEL_TO_BE_QUANTIZED = "Modelul de cuantizat"
- self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "Calea de ieșire pentru IMatrix generat"
- self.HOW_OFTEN_TO_SAVE_IMATRIX = "Cât de des să salvați IMatrix"
- self.SET_GPU_OFFLOAD_VALUE = "Setați valoarea de descărcare GPU (-ngl)"
- self.COMPLETED = "Finalizat"
- self.REFRESH_MODELS = "Odśwież modele"
-
-class _Czech(_Localization):
- def __init__(self):
- super().__init__()
- self.WINDOW_TITLE = "AutoGGUF (Automatický kvantizátor modelů GGUF)"
- self.RAM_USAGE = "Využití RAM:"
- self.CPU_USAGE = "Využití CPU:"
- self.BACKEND = "Backend Llama.cpp:"
- self.REFRESH_BACKENDS = "Obnovit backendy"
- self.MODELS_PATH = "Cesta k modelům:"
- self.OUTPUT_PATH = "Výstupní cesta:"
- self.LOGS_PATH = "Cesta k logům:"
- self.BROWSE = "Procházet"
- self.AVAILABLE_MODELS = "Dostupné modely:"
- self.QUANTIZATION_TYPE = "Typ kvantizace:"
- self.ALLOW_REQUANTIZE = "Povolit rekvantizaci"
- self.LEAVE_OUTPUT_TENSOR = "Ponechat výstupní tenzor"
- self.PURE = "Čistý"
- self.IMATRIX = "IMatrix:"
- self.INCLUDE_WEIGHTS = "Zahrnout váhy:"
- self.EXCLUDE_WEIGHTS = "Vyloučit váhy:"
- self.USE_OUTPUT_TENSOR_TYPE = "Použít typ výstupního tenzoru"
- self.USE_TOKEN_EMBEDDING_TYPE = "Použít typ vkládání tokenů"
- self.KEEP_SPLIT = "Zachovat rozdělení"
- self.KV_OVERRIDES = "Přepsání KV:"
- self.ADD_NEW_OVERRIDE = "Přidat nové přepsání"
- self.QUANTIZE_MODEL = "Kvantizovat model"
- self.SAVE_PRESET = "Uložit předvolbu"
- self.LOAD_PRESET = "Načíst předvolbu"
- self.TASKS = "Úkoly:"
- self.DOWNLOAD_LLAMACPP = "Stáhnout llama.cpp"
- self.SELECT_RELEASE = "Vybrat verzi:"
- self.SELECT_ASSET = "Vybrat aktivum:"
- self.EXTRACT_CUDA_FILES = "Extrahovat soubory CUDA"
- self.SELECT_CUDA_BACKEND = "Vybrat backend CUDA:"
- self.DOWNLOAD = "Stáhnout"
- self.IMATRIX_GENERATION = "Generování IMatrix"
- self.DATA_FILE = "Datový soubor:"
- self.MODEL = "Model:"
- self.OUTPUT = "Výstup:"
- self.OUTPUT_FREQUENCY = "Frekvence výstupu:"
- self.GPU_OFFLOAD = "Odlehčení GPU:"
- self.AUTO = "Automaticky"
- self.GENERATE_IMATRIX = "Generovat IMatrix"
- self.ERROR = "Chyba"
- self.WARNING = "Varování"
- self.PROPERTIES = "Vlastnosti"
- self.CANCEL = "Zrušit"
- self.RESTART = "Restartovat"
- self.DELETE = "Smazat"
- self.CONFIRM_DELETION = "Jste si jisti, že chcete smazat tento úkol?"
- self.TASK_RUNNING_WARNING = "Některé úkoly stále běží. Jste si jisti, že chcete ukončit?"
- self.YES = "Ano"
- self.NO = "Ne"
- self.DOWNLOAD_COMPLETE = "Stahování dokončeno"
- self.CUDA_EXTRACTION_FAILED = "Extrahování CUDA se nezdařilo"
- self.PRESET_SAVED = "Předvolba uložena"
- self.PRESET_LOADED = "Předvolba načtena"
- self.NO_ASSET_SELECTED = "Nebylo vybráno žádné aktivum"
- self.DOWNLOAD_FAILED = "Stahování se nezdařilo"
- self.NO_BACKEND_SELECTED = "Nebyl vybrán žádný backend"
- self.NO_MODEL_SELECTED = "Nebyl vybrán žádný model"
- self.REFRESH_RELEASES = "Obnovit verze"
- self.NO_SUITABLE_CUDA_BACKENDS = "Nebyly nalezeny žádné vhodné backendy CUDA"
- self.LLAMACPP_DOWNLOADED_EXTRACTED = "Binární soubor llama.cpp byl stažen a extrahován do {0}\nSoubory CUDA extrahovány do {1}"
- self.CUDA_FILES_EXTRACTED = "Soubory CUDA extrahovány do"
- self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = "Nebyl nalezen žádný vhodný backend CUDA pro extrakci"
- self.ERROR_FETCHING_RELEASES = "Chyba při načítání verzí: {0}"
- self.CONFIRM_DELETION_TITLE = "Potvrdit smazání"
- self.LOG_FOR = "Log pro {0}"
- self.ALL_FILES = "Všechny soubory (*)"
- self.GGUF_FILES = "Soubory GGUF (*.gguf)"
- self.DAT_FILES = "Soubory DAT (*.dat)"
- self.JSON_FILES = "Soubory JSON (*.json)"
- self.FAILED_LOAD_PRESET = "Nepodařilo se načíst předvolbu: {0}"
- self.INITIALIZING_AUTOGGUF = "Inicializace aplikace AutoGGUF"
- self.AUTOGGUF_INITIALIZATION_COMPLETE = "Inicializace AutoGGUF dokončena"
- self.REFRESHING_BACKENDS = "Obnovování backendů"
- self.NO_BACKENDS_AVAILABLE = "Žádné dostupné backendy"
- self.FOUND_VALID_BACKENDS = "Nalezeno {0} platných backendů"
- self.SAVING_PRESET = "Ukládání předvolby"
- self.PRESET_SAVED_TO = "Předvolba uložena do {0}"
- self.LOADING_PRESET = "Načítání předvolby"
- self.PRESET_LOADED_FROM = "Předvolba načtena z {0}"
- self.ADDING_KV_OVERRIDE = "Přidávání přepsání KV: {0}"
- self.SAVING_TASK_PRESET = "Ukládání předvolby úkolu pro {0}"
- self.TASK_PRESET_SAVED = "Předvolba úkolu uložena"
- self.TASK_PRESET_SAVED_TO = "Předvolba úkolu uložena do {0}"
- self.RESTARTING_TASK = "Restartování úkolu: {0}"
- self.IN_PROGRESS = "Probíhá"
- self.DOWNLOAD_FINISHED_EXTRACTED_TO = "Stahování dokončeno. Extrahováno do: {0}"
- self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "Binární soubor llama.cpp byl stažen a extrahován do {0}\nSoubory CUDA extrahovány do {1}"
- self.NO_SUITABLE_CUDA_BACKEND_FOUND = "Nebyl nalezen žádný vhodný backend CUDA pro extrakci"
- self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = "Binární soubor llama.cpp byl stažen a extrahován do {0}"
- self.REFRESHING_LLAMACPP_RELEASES = "Obnovování verzí llama.cpp"
- self.UPDATING_ASSET_LIST = "Aktualizace seznamu aktiv"
- self.UPDATING_CUDA_OPTIONS = "Aktualizace možností CUDA"
- self.STARTING_LLAMACPP_DOWNLOAD = "Zahájení stahování llama.cpp"
- self.UPDATING_CUDA_BACKENDS = "Aktualizace backendů CUDA"
- self.NO_CUDA_BACKEND_SELECTED = "Nebyl vybrán žádný backend CUDA pro extrakci"
- self.EXTRACTING_CUDA_FILES = "Extrahování souborů CUDA z {0} do {1}"
- self.DOWNLOAD_ERROR = "Chyba stahování: {0}"
- self.SHOWING_TASK_CONTEXT_MENU = "Zobrazení kontextové nabídky úkolu"
- self.SHOWING_PROPERTIES_FOR_TASK = "Zobrazení vlastností úkolu: {0}"
- self.CANCELLING_TASK = "Zrušení úkolu: {0}"
- self.CANCELED = "Zrušeno"
- self.DELETING_TASK = "Mazání úkolu: {0}"
- self.LOADING_MODELS = "Načítání modelů"
- self.LOADED_MODELS = "Načteno {0} modelů"
- self.BROWSING_FOR_MODELS_DIRECTORY = "Procházení adresáře modelů"
- self.SELECT_MODELS_DIRECTORY = "Vyberte adresář modelů"
- self.BROWSING_FOR_OUTPUT_DIRECTORY = "Procházení výstupního adresáře"
- self.SELECT_OUTPUT_DIRECTORY = "Vyberte výstupní adresář"
- self.BROWSING_FOR_LOGS_DIRECTORY = "Procházení adresáře logů"
- self.SELECT_LOGS_DIRECTORY = "Vyberte adresář logů"
- self.BROWSING_FOR_IMATRIX_FILE = "Procházení souboru IMatrix"
- self.SELECT_IMATRIX_FILE = "Vyberte soubor IMatrix"
- self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} MB / {2} MB)"
- self.CPU_USAGE_FORMAT = "Využití CPU: {0:.1f}%"
- self.VALIDATING_QUANTIZATION_INPUTS = "Ověřování vstupů kvantizace"
- self.MODELS_PATH_REQUIRED = "Cesta k modelům je vyžadována"
- self.OUTPUT_PATH_REQUIRED = "Výstupní cesta je vyžadována"
- self.LOGS_PATH_REQUIRED = "Cesta k logům je vyžadována"
- self.STARTING_MODEL_QUANTIZATION = "Spuštění kvantizace modelu"
- self.INPUT_FILE_NOT_EXIST = "Vstupní soubor '{0}' neexistuje."
- self.QUANTIZING_MODEL_TO = "Kvantizace {0} na {1}"
- self.QUANTIZATION_TASK_STARTED = "Úkol kvantizace spuštěn pro {0}"
- self.ERROR_STARTING_QUANTIZATION = "Chyba při spuštění kvantizace: {0}"
- self.UPDATING_MODEL_INFO = "Aktualizace informací o modelu: {0}"
- self.TASK_FINISHED = "Úkol dokončen: {0}"
- self.SHOWING_TASK_DETAILS_FOR = "Zobrazení detailů úkolu pro: {0}"
- self.BROWSING_FOR_IMATRIX_DATA_FILE = "Procházení datového souboru IMatrix"
- self.SELECT_DATA_FILE = "Vyberte datový soubor"
- self.BROWSING_FOR_IMATRIX_MODEL_FILE = "Procházení souboru modelu IMatrix"
- self.SELECT_MODEL_FILE = "Vyberte soubor modelu"
- self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "Procházení výstupního souboru IMatrix"
- self.SELECT_OUTPUT_FILE = "Vyberte výstupní soubor"
- self.STARTING_IMATRIX_GENERATION = "Spuštění generování IMatrix"
- self.BACKEND_PATH_NOT_EXIST = "Cesta backendu neexistuje: {0}"
- self.GENERATING_IMATRIX = "Generování IMatrix"
- self.ERROR_STARTING_IMATRIX_GENERATION = "Chyba při spuštění generování IMatrix: {0}"
- self.IMATRIX_GENERATION_TASK_STARTED = "Úkol generování IMatrix spuštěn"
- self.ERROR_MESSAGE = "Chyba: {0}"
- self.TASK_ERROR = "Chyba úkolu: {0}"
- self.APPLICATION_CLOSING = "Zavírání aplikace"
- self.APPLICATION_CLOSED = "Aplikace zavřena"
- self.SELECT_QUANTIZATION_TYPE = "Vyberte typ kvantizace"
- self.ALLOWS_REQUANTIZING = "Umožňuje rekvantizovat tenzory, které již byly kvantizovány"
- self.LEAVE_OUTPUT_WEIGHT = "Ponechá output.weight nekvantizovaný (nebo rekvantizovaný)"
- self.DISABLE_K_QUANT_MIXTURES = "Zakázat k-kvantové směsi a kvantizovat všechny tenzory na stejný typ"
- self.USE_DATA_AS_IMPORTANCE_MATRIX = "Použít data v souboru jako matici důležitosti pro optimalizace kvantizace"
- self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Použít matici důležitosti pro tyto tenzory"
- self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Nepoužívat matici důležitosti pro tyto tenzory"
- self.OUTPUT_TENSOR_TYPE = "Typ výstupního tenzoru:"
- self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = "Použít tento typ pro tenzor output.weight"
- self.TOKEN_EMBEDDING_TYPE = "Typ vkládání tokenů:"
- self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = "Použít tento typ pro tenzor vkládání tokenů"
- self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = "Vygeneruje kvantizovaný model ve stejných fragmentech jako vstup"
- self.OVERRIDE_MODEL_METADATA = "Přepsat metadata modelu"
- self.INPUT_DATA_FILE_FOR_IMATRIX = "Vstupní datový soubor pro generování IMatrix"
- self.MODEL_TO_BE_QUANTIZED = "Model, který má být kvantizován"
- self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "Výstupní cesta pro generovaný IMatrix"
- self.HOW_OFTEN_TO_SAVE_IMATRIX = "Jak často ukládat IMatrix"
- self.SET_GPU_OFFLOAD_VALUE = "Nastavit hodnotu odlehčení GPU (-ngl)"
- self.COMPLETED = "Dokončeno"
- self.REFRESH_MODELS = "Reîmprospătează modelele"
-
-class _CanadianFrench(_Localization):
- def __init__(self):
- super().__init__()
- self.WINDOW_TITLE = "AutoGGUF (Quantificateur automatique de modèles GGUF)"
- self.RAM_USAGE = "Utilisation de la RAM :" # Spacing
- self.CPU_USAGE = "Utilisation du CPU :" # Spacing
- self.BACKEND = "Moteur d'arrière-plan Llama.cpp :" # Spacing and terminology
- self.REFRESH_BACKENDS = "Actualiser les moteurs d'arrière-plan"
- self.MODELS_PATH = "Chemin des modèles :" # Spacing
- self.OUTPUT_PATH = "Chemin de sortie :" # Spacing
- self.LOGS_PATH = "Chemin des journaux :" # Spacing
- self.BROWSE = "Parcourir"
- self.AVAILABLE_MODELS = "Modèles disponibles :" # Spacing
- self.QUANTIZATION_TYPE = "Type de quantification :" # Spacing
- self.ALLOW_REQUANTIZE = "Autoriser la requantification"
- self.LEAVE_OUTPUT_TENSOR = "Laisser le tenseur de sortie"
- self.PURE = "Pur"
- self.IMATRIX = "IMatrix :" # Spacing
- self.INCLUDE_WEIGHTS = "Inclure les poids :" # Spacing
- self.EXCLUDE_WEIGHTS = "Exclure les poids :" # Spacing
- self.USE_OUTPUT_TENSOR_TYPE = "Utiliser le type de tenseur de sortie"
- self.USE_TOKEN_EMBEDDING_TYPE = "Utiliser le type d'intégration de jeton"
- self.KEEP_SPLIT = "Conserver la division"
- self.KV_OVERRIDES = "Remplacements KV :" # Spacing
- self.ADD_NEW_OVERRIDE = "Ajouter un nouveau remplacement"
- self.QUANTIZE_MODEL = "Quantifier le modèle"
- self.SAVE_PRESET = "Enregistrer le préréglage"
- self.LOAD_PRESET = "Charger le préréglage"
- self.TASKS = "Tâches :" # Spacing
- self.DOWNLOAD_LLAMACPP = "Télécharger llama.cpp"
- self.SELECT_RELEASE = "Sélectionner la version :" # Spacing
- self.SELECT_ASSET = "Sélectionner l'actif :" # Spacing
- self.EXTRACT_CUDA_FILES = "Extraire les fichiers CUDA"
- self.SELECT_CUDA_BACKEND = "Sélectionner le backend CUDA :" # Spacing
- self.DOWNLOAD = "Télécharger"
- self.IMATRIX_GENERATION = "Génération d'IMatrix"
- self.DATA_FILE = "Fichier de données :" # Spacing
- self.MODEL = "Modèle :" # Spacing
- self.OUTPUT = "Sortie :" # Spacing
- self.OUTPUT_FREQUENCY = "Fréquence de sortie :" # Spacing
- self.GPU_OFFLOAD = "Déchargement GPU :" # Spacing
- self.AUTO = "Auto"
- self.GENERATE_IMATRIX = "Générer IMatrix"
- self.ERROR = "Erreur"
- self.WARNING = "Avertissement"
- self.PROPERTIES = "Propriétés"
- self.CANCEL = "Annuler"
- self.RESTART = "Redémarrer"
- self.DELETE = "Supprimer"
- self.CONFIRM_DELETION = "Êtes-vous sûr de vouloir supprimer cette tâche ?" # Spacing
- self.TASK_RUNNING_WARNING = "Certaines tâches sont encore en cours d'exécution. Êtes-vous sûr de vouloir quitter ?" # Spacing
- self.YES = "Oui"
- self.NO = "Non"
- self.DOWNLOAD_COMPLETE = "Téléchargement terminé"
- self.CUDA_EXTRACTION_FAILED = "Échec de l'extraction CUDA"
- self.PRESET_SAVED = "Préréglage enregistré"
- self.PRESET_LOADED = "Préréglage chargé"
- self.NO_ASSET_SELECTED = "Aucun actif sélectionné"
- self.DOWNLOAD_FAILED = "Échec du téléchargement"
- self.NO_BACKEND_SELECTED = "Aucun backend sélectionné"
- self.NO_MODEL_SELECTED = "Aucun modèle sélectionné"
- self.REFRESH_RELEASES = "Actualiser les versions"
- self.NO_SUITABLE_CUDA_BACKENDS = "Aucun backend CUDA approprié trouvé"
- self.LLAMACPP_DOWNLOADED_EXTRACTED = "Le fichier binaire llama.cpp a été téléchargé et extrait dans {0}\nLes fichiers CUDA ont été extraits dans {1}"
- self.CUDA_FILES_EXTRACTED = "Les fichiers CUDA ont été extraits dans"
- self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = "Aucun backend CUDA approprié trouvé pour l'extraction"
- self.ERROR_FETCHING_RELEASES = "Erreur lors de la récupération des versions : {0}" # Spacing
- self.CONFIRM_DELETION_TITLE = "Confirmer la suppression"
- self.LOG_FOR = "Journal pour {0}"
- self.ALL_FILES = "Tous les fichiers (*)"
- self.GGUF_FILES = "Fichiers GGUF (*.gguf)"
- self.DAT_FILES = "Fichiers DAT (*.dat)"
- self.JSON_FILES = "Fichiers JSON (*.json)"
- self.FAILED_LOAD_PRESET = "Échec du chargement du préréglage : {0}" # Spacing
- self.INITIALIZING_AUTOGGUF = "Initialisation de l'application AutoGGUF"
- self.AUTOGGUF_INITIALIZATION_COMPLETE = "Initialisation d'AutoGGUF terminée"
- self.REFRESHING_BACKENDS = "Actualisation des moteurs d'arrière-plan"
- self.NO_BACKENDS_AVAILABLE = "Aucun moteur d'arrière-plan disponible"
- self.FOUND_VALID_BACKENDS = "{0} moteurs d'arrière-plan valides trouvés"
- self.SAVING_PRESET = "Enregistrement du préréglage"
- self.PRESET_SAVED_TO = "Préréglage enregistré dans {0}"
- self.LOADING_PRESET = "Chargement du préréglage"
- self.PRESET_LOADED_FROM = "Préréglage chargé depuis {0}"
- self.ADDING_KV_OVERRIDE = "Ajout de remplacement KV : {0}" # Spacing
- self.SAVING_TASK_PRESET = "Enregistrement du préréglage de tâche pour {0}"
- self.TASK_PRESET_SAVED = "Préréglage de tâche enregistré"
- self.TASK_PRESET_SAVED_TO = "Préréglage de tâche enregistré dans {0}"
- self.RESTARTING_TASK = "Redémarrage de la tâche : {0}" # Spacing
- self.IN_PROGRESS = "En cours"
- self.DOWNLOAD_FINISHED_EXTRACTED_TO = "Téléchargement terminé. Extrait dans : {0}" # Spacing
- self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "Le fichier binaire llama.cpp a été téléchargé et extrait dans {0}\nLes fichiers CUDA ont été extraits dans {1}"
- self.NO_SUITABLE_CUDA_BACKEND_FOUND = "Aucun backend CUDA approprié trouvé pour l'extraction"
- self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = "Le fichier binaire llama.cpp a été téléchargé et extrait dans {0}"
- self.REFRESHING_LLAMACPP_RELEASES = "Actualisation des versions de llama.cpp"
- self.UPDATING_ASSET_LIST = "Mise à jour de la liste des actifs"
- self.UPDATING_CUDA_OPTIONS = "Mise à jour des options CUDA"
- self.STARTING_LLAMACPP_DOWNLOAD = "Démarrage du téléchargement de llama.cpp"
- self.UPDATING_CUDA_BACKENDS = "Mise à jour des backends CUDA"
- self.NO_CUDA_BACKEND_SELECTED = "Aucun backend CUDA sélectionné pour l'extraction"
- self.EXTRACTING_CUDA_FILES = "Extraction des fichiers CUDA de {0} à {1}"
- self.DOWNLOAD_ERROR = "Erreur de téléchargement : {0}" # Spacing
- self.SHOWING_TASK_CONTEXT_MENU = "Affichage du menu contextuel de la tâche"
- self.SHOWING_PROPERTIES_FOR_TASK = "Affichage des propriétés de la tâche : {0}" # Spacing
- self.CANCELLING_TASK = "Annulation de la tâche : {0}" # Spacing
- self.CANCELED = "Annulée"
- self.DELETING_TASK = "Suppression de la tâche : {0}" # Spacing
- self.LOADING_MODELS = "Chargement des modèles"
- self.LOADED_MODELS = "{0} modèles chargés"
- self.BROWSING_FOR_MODELS_DIRECTORY = "Navigation dans le répertoire des modèles"
- self.SELECT_MODELS_DIRECTORY = "Sélectionner le répertoire des modèles"
- self.BROWSING_FOR_OUTPUT_DIRECTORY = "Navigation dans le répertoire de sortie"
- self.SELECT_OUTPUT_DIRECTORY = "Sélectionner le répertoire de sortie"
- self.BROWSING_FOR_LOGS_DIRECTORY = "Navigation dans le répertoire des journaux"
- self.SELECT_LOGS_DIRECTORY = "Sélectionner le répertoire des journaux"
- self.BROWSING_FOR_IMATRIX_FILE = "Navigation dans le fichier IMatrix"
- self.SELECT_IMATRIX_FILE = "Sélectionner le fichier IMatrix"
- self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} Mo / {2} Mo)"
- self.CPU_USAGE_FORMAT = "Utilisation du CPU : {0:.1f}%" # Spacing
- self.VALIDATING_QUANTIZATION_INPUTS = "Validation des entrées de quantification"
- self.MODELS_PATH_REQUIRED = "Le chemin des modèles est requis"
- self.OUTPUT_PATH_REQUIRED = "Le chemin de sortie est requis"
- self.LOGS_PATH_REQUIRED = "Le chemin des journaux est requis"
- self.STARTING_MODEL_QUANTIZATION = "Démarrage de la quantification du modèle"
- self.INPUT_FILE_NOT_EXIST = "Le fichier d'entrée '{0}' n'existe pas."
- self.QUANTIZING_MODEL_TO = "Quantification de {0} en {1}"
- self.QUANTIZATION_TASK_STARTED = "Tâche de quantification démarrée pour {0}"
- self.ERROR_STARTING_QUANTIZATION = "Erreur lors du démarrage de la quantification : {0}" # Spacing
- self.UPDATING_MODEL_INFO = "Mise à jour des informations sur le modèle : {0}" # Spacing
- self.TASK_FINISHED = "Tâche terminée : {0}" # Spacing
- self.SHOWING_TASK_DETAILS_FOR = "Affichage des détails de la tâche pour : {0}" # Spacing
- self.BROWSING_FOR_IMATRIX_DATA_FILE = "Navigation dans le fichier de données IMatrix"
- self.SELECT_DATA_FILE = "Sélectionner le fichier de données"
- self.BROWSING_FOR_IMATRIX_MODEL_FILE = "Navigation dans le fichier de modèle IMatrix"
- self.SELECT_MODEL_FILE = "Sélectionner le fichier de modèle"
- self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "Navigation dans le fichier de sortie IMatrix"
- self.SELECT_OUTPUT_FILE = "Sélectionner le fichier de sortie"
- self.STARTING_IMATRIX_GENERATION = "Démarrage de la génération d'IMatrix"
- self.BACKEND_PATH_NOT_EXIST = "Le chemin du backend n'existe pas : {0}" # Spacing
- self.GENERATING_IMATRIX = "Génération d'IMatrix"
- self.ERROR_STARTING_IMATRIX_GENERATION = "Erreur lors du démarrage de la génération d'IMatrix : {0}" # Spacing
- self.IMATRIX_GENERATION_TASK_STARTED = "Tâche de génération d'IMatrix démarrée"
- self.ERROR_MESSAGE = "Erreur : {0}" # Spacing
- self.TASK_ERROR = "Erreur de tâche : {0}" # Spacing
- self.APPLICATION_CLOSING = "Fermeture de l'application"
- self.APPLICATION_CLOSED = "Application fermée"
- self.SELECT_QUANTIZATION_TYPE = "Sélectionnez le type de quantification"
- self.ALLOWS_REQUANTIZING = "Permet de requantifier les tenseurs qui ont déjà été quantifiés"
- self.LEAVE_OUTPUT_WEIGHT = "Laissera output.weight non (re)quantifié"
- self.DISABLE_K_QUANT_MIXTURES = "Désactiver les mélanges k-quant et quantifier tous les tenseurs du même type"
- self.USE_DATA_AS_IMPORTANCE_MATRIX = "Utiliser les données du fichier comme matrice d'importance pour les optimisations de quant"
- self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Utiliser la matrice d'importance pour ces tenseurs"
- self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Ne pas utiliser la matrice d'importance pour ces tenseurs"
- self.OUTPUT_TENSOR_TYPE = "Type de tenseur de sortie :" # Spacing
- self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = "Utiliser ce type pour le tenseur output.weight"
- self.TOKEN_EMBEDDING_TYPE = "Type d'intégration de jeton :" # Spacing
- self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = "Utiliser ce type pour le tenseur d'intégration de jetons"
- self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = "Générera le modèle quantifié dans les mêmes fragments que l'entrée"
- self.OVERRIDE_MODEL_METADATA = "Remplacer les métadonnées du modèle"
- self.INPUT_DATA_FILE_FOR_IMATRIX = "Fichier de données d'entrée pour la génération d'IMatrix"
- self.MODEL_TO_BE_QUANTIZED = "Modèle à quantifier"
- self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "Chemin de sortie pour l'IMatrix généré"
- self.HOW_OFTEN_TO_SAVE_IMATRIX = "Fréquence d'enregistrement de l'IMatrix"
- self.SET_GPU_OFFLOAD_VALUE = "Définir la valeur de déchargement GPU (-ngl)"
- self.COMPLETED = "Terminé"
- self.REFRESH_MODELS = "Rafraîchir les modèles"
-
-class _Portuguese_PT(_Localization):
- def __init__(self):
- super().__init__()
- self.WINDOW_TITLE = "AutoGGUF (Quantificador Automático de Modelos GGUF)"
- self.RAM_USAGE = "Utilização de RAM:"
- self.CPU_USAGE = "Utilização da CPU:"
- self.BACKEND = "Backend Llama.cpp:"
- self.REFRESH_BACKENDS = "Atualizar Backends"
- self.MODELS_PATH = "Caminho dos Modelos:"
- self.OUTPUT_PATH = "Caminho de Saída:"
- self.LOGS_PATH = "Caminho dos Logs:"
- self.BROWSE = "Navegar"
- self.AVAILABLE_MODELS = "Modelos Disponíveis:"
- self.QUANTIZATION_TYPE = "Tipo de Quantização:"
- self.ALLOW_REQUANTIZE = "Permitir Requantização"
- self.LEAVE_OUTPUT_TENSOR = "Manter Tensor de Saída"
- self.PURE = "Puro"
- self.IMATRIX = "IMatrix:"
- self.INCLUDE_WEIGHTS = "Incluir Pesos:"
- self.EXCLUDE_WEIGHTS = "Excluir Pesos:"
- self.USE_OUTPUT_TENSOR_TYPE = "Usar Tipo de Tensor de Saída"
- self.USE_TOKEN_EMBEDDING_TYPE = "Usar Tipo de Incorporação de Token"
- self.KEEP_SPLIT = "Manter Divisão"
- self.KV_OVERRIDES = "Substituições KV:"
- self.ADD_NEW_OVERRIDE = "Adicionar Nova Substituição"
- self.QUANTIZE_MODEL = "Quantizar Modelo"
- self.SAVE_PRESET = "Guardar Predefinição"
- self.LOAD_PRESET = "Carregar Predefinição"
- self.TASKS = "Tarefas:"
- self.DOWNLOAD_LLAMACPP = "Descarregar llama.cpp"
- self.SELECT_RELEASE = "Selecionar Versão:"
- self.SELECT_ASSET = "Selecionar Ativo:"
- self.EXTRACT_CUDA_FILES = "Extrair Ficheiros CUDA"
- self.SELECT_CUDA_BACKEND = "Selecionar Backend CUDA:"
- self.DOWNLOAD = "Descarregar"
- self.IMATRIX_GENERATION = "Geração de IMatrix"
- self.DATA_FILE = "Ficheiro de Dados:"
- self.MODEL = "Modelo:"
- self.OUTPUT = "Saída:"
- self.OUTPUT_FREQUENCY = "Frequência de Saída:"
- self.GPU_OFFLOAD = "Offload da GPU:"
- self.AUTO = "Automático"
- self.GENERATE_IMATRIX = "Gerar IMatrix"
- self.ERROR = "Erro"
- self.WARNING = "Aviso"
- self.PROPERTIES = "Propriedades"
- self.CANCEL = "Cancelar"
- self.RESTART = "Reiniciar"
- self.DELETE = "Eliminar"
- self.CONFIRM_DELETION = "Tem a certeza de que pretende eliminar esta tarefa?"
- self.TASK_RUNNING_WARNING = "Algumas tarefas ainda estão em execução. Tem a certeza de que pretende sair?"
- self.YES = "Sim"
- self.NO = "Não"
- self.DOWNLOAD_COMPLETE = "Transferência Concluída"
- self.CUDA_EXTRACTION_FAILED = "Falha na Extração do CUDA"
- self.PRESET_SAVED = "Predefinição Guardada"
- self.PRESET_LOADED = "Predefinição Carregada"
- self.NO_ASSET_SELECTED = "Nenhum ativo selecionado"
- self.DOWNLOAD_FAILED = "Falha na transferência"
- self.NO_BACKEND_SELECTED = "Nenhum backend selecionado"
- self.NO_MODEL_SELECTED = "Nenhum modelo selecionado"
- self.REFRESH_RELEASES = "Atualizar Versões"
- self.NO_SUITABLE_CUDA_BACKENDS = "Nenhum backend CUDA adequado encontrado"
- self.LLAMACPP_DOWNLOADED_EXTRACTED = "Binário llama.cpp transferido e extraído para {0}\nFicheiros CUDA extraídos para {1}"
- self.CUDA_FILES_EXTRACTED = "Ficheiros CUDA extraídos para"
- self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = "Nenhum backend CUDA adequado encontrado para extração"
- self.ERROR_FETCHING_RELEASES = "Erro ao obter versões: {0}"
- self.CONFIRM_DELETION_TITLE = "Confirmar Eliminação"
- self.LOG_FOR = "Log para {0}"
- self.ALL_FILES = "Todos os Ficheiros (*)"
- self.GGUF_FILES = "Ficheiros GGUF (*.gguf)"
- self.DAT_FILES = "Ficheiros DAT (*.dat)"
- self.JSON_FILES = "Ficheiros JSON (*.json)"
- self.FAILED_LOAD_PRESET = "Falha ao carregar a predefinição: {0}"
- self.INITIALIZING_AUTOGGUF = "A inicializar a aplicação AutoGGUF"
- self.AUTOGGUF_INITIALIZATION_COMPLETE = "Inicialização do AutoGGUF concluída"
- self.REFRESHING_BACKENDS = "A atualizar backends"
- self.NO_BACKENDS_AVAILABLE = "Nenhum backend disponível"
- self.FOUND_VALID_BACKENDS = "{0} backends válidos encontrados"
- self.SAVING_PRESET = "A guardar predefinição"
- self.PRESET_SAVED_TO = "Predefinição guardada em {0}"
- self.LOADING_PRESET = "A carregar predefinição"
- self.PRESET_LOADED_FROM = "Predefinição carregada de {0}"
- self.ADDING_KV_OVERRIDE = "A adicionar substituição KV: {0}"
- self.SAVING_TASK_PRESET = "A guardar predefinição de tarefa para {0}"
- self.TASK_PRESET_SAVED = "Predefinição de Tarefa Guardada"
- self.TASK_PRESET_SAVED_TO = "Predefinição de tarefa guardada em {0}"
- self.RESTARTING_TASK = "A reiniciar tarefa: {0}"
- self.IN_PROGRESS = "Em Andamento"
- self.DOWNLOAD_FINISHED_EXTRACTED_TO = "Transferência concluída. Extraído para: {0}"
- self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "Binário llama.cpp transferido e extraído para {0}\nFicheiros CUDA extraídos para {1}"
- self.NO_SUITABLE_CUDA_BACKEND_FOUND = "Nenhum backend CUDA adequado encontrado para extração"
- self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = "Binário llama.cpp transferido e extraído para {0}"
- self.REFRESHING_LLAMACPP_RELEASES = "A atualizar versões do llama.cpp"
- self.UPDATING_ASSET_LIST = "A atualizar lista de ativos"
- self.UPDATING_CUDA_OPTIONS = "A atualizar opções CUDA"
- self.STARTING_LLAMACPP_DOWNLOAD = "A iniciar transferência do llama.cpp"
- self.UPDATING_CUDA_BACKENDS = "A atualizar backends CUDA"
- self.NO_CUDA_BACKEND_SELECTED = "Nenhum backend CUDA selecionado para extração"
- self.EXTRACTING_CUDA_FILES = "A extrair ficheiros CUDA de {0} para {1}"
- self.DOWNLOAD_ERROR = "Erro de transferência: {0}"
- self.SHOWING_TASK_CONTEXT_MENU = "A exibir menu de contexto da tarefa"
- self.SHOWING_PROPERTIES_FOR_TASK = "A exibir propriedades para a tarefa: {0}"
- self.CANCELLING_TASK = "A cancelar tarefa: {0}"
- self.CANCELED = "Cancelado"
- self.DELETING_TASK = "A eliminar tarefa: {0}"
- self.LOADING_MODELS = "A carregar modelos"
- self.LOADED_MODELS = "{0} modelos carregados"
- self.BROWSING_FOR_MODELS_DIRECTORY = "A navegar pelo diretório de modelos"
- self.SELECT_MODELS_DIRECTORY = "Selecionar Diretório de Modelos"
- self.BROWSING_FOR_OUTPUT_DIRECTORY = "A navegar pelo diretório de saída"
- self.SELECT_OUTPUT_DIRECTORY = "Selecionar Diretório de Saída"
- self.BROWSING_FOR_LOGS_DIRECTORY = "A navegar pelo diretório de logs"
- self.SELECT_LOGS_DIRECTORY = "Selecionar Diretório de Logs"
- self.BROWSING_FOR_IMATRIX_FILE = "A navegar pelo ficheiro IMatrix"
- self.SELECT_IMATRIX_FILE = "Selecionar Ficheiro IMatrix"
- self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} MB / {2} MB)"
- self.CPU_USAGE_FORMAT = "Utilização da CPU: {0:.1f}%"
- self.VALIDATING_QUANTIZATION_INPUTS = "A validar entradas de quantização"
- self.MODELS_PATH_REQUIRED = "O caminho dos modelos é obrigatório"
- self.OUTPUT_PATH_REQUIRED = "O caminho de saída é obrigatório"
- self.LOGS_PATH_REQUIRED = "O caminho dos logs é obrigatório"
- self.STARTING_MODEL_QUANTIZATION = "A iniciar a quantização do modelo"
- self.INPUT_FILE_NOT_EXIST = "O ficheiro de entrada '{0}' não existe."
- self.QUANTIZING_MODEL_TO = "A quantizar {0} para {1}"
- self.QUANTIZATION_TASK_STARTED = "Tarefa de quantização iniciada para {0}"
- self.ERROR_STARTING_QUANTIZATION = "Erro ao iniciar a quantização: {0}"
- self.UPDATING_MODEL_INFO = "A atualizar informações do modelo: {0}"
- self.TASK_FINISHED = "Tarefa concluída: {0}"
- self.SHOWING_TASK_DETAILS_FOR = "A mostrar detalhes da tarefa para: {0}"
- self.BROWSING_FOR_IMATRIX_DATA_FILE = "A navegar pelo ficheiro de dados IMatrix"
- self.SELECT_DATA_FILE = "Selecionar Ficheiro de Dados"
- self.BROWSING_FOR_IMATRIX_MODEL_FILE = "A navegar pelo ficheiro de modelo IMatrix"
- self.SELECT_MODEL_FILE = "Selecionar Ficheiro de Modelo"
- self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "A navegar pelo ficheiro de saída IMatrix"
- self.SELECT_OUTPUT_FILE = "Selecionar Ficheiro de Saída"
- self.STARTING_IMATRIX_GENERATION = "A iniciar a geração de IMatrix"
- self.BACKEND_PATH_NOT_EXIST = "O caminho do backend não existe: {0}"
- self.GENERATING_IMATRIX = "A gerar IMatrix"
- self.ERROR_STARTING_IMATRIX_GENERATION = "Erro ao iniciar a geração de IMatrix: {0}"
- self.IMATRIX_GENERATION_TASK_STARTED = "Tarefa de geração de IMatrix iniciada"
- self.ERROR_MESSAGE = "Erro: {0}"
- self.TASK_ERROR = "Erro de tarefa: {0}"
- self.APPLICATION_CLOSING = "A fechar a aplicação"
- self.APPLICATION_CLOSED = "Aplicação fechada"
- self.SELECT_QUANTIZATION_TYPE = "Selecione o tipo de quantização"
- self.ALLOWS_REQUANTIZING = "Permite requantizar tensores que já foram quantizados"
- self.LEAVE_OUTPUT_WEIGHT = "Deixará output.weight não (re)quantizado"
- self.DISABLE_K_QUANT_MIXTURES = "Desativar misturas k-quant e quantizar todos os tensores para o mesmo tipo"
- self.USE_DATA_AS_IMPORTANCE_MATRIX = "Usar os dados no ficheiro como matriz de importância para otimizações de quantização"
- self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Usar matriz de importância para estes tensores"
- self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Não usar matriz de importância para estes tensores"
- self.OUTPUT_TENSOR_TYPE = "Tipo de Tensor de Saída:"
- self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = "Usar este tipo para o tensor output.weight"
- self.TOKEN_EMBEDDING_TYPE = "Tipo de Incorporação de Token:"
- self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = "Usar este tipo para o tensor de incorporações de token"
- self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = "Irá gerar o modelo quantizado nos mesmos shards da entrada"
- self.OVERRIDE_MODEL_METADATA = "Substituir metadados do modelo"
- self.INPUT_DATA_FILE_FOR_IMATRIX = "Ficheiro de dados de entrada para geração de IMatrix"
- self.MODEL_TO_BE_QUANTIZED = "Modelo a ser quantizado"
- self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "Caminho de saída para o IMatrix gerado"
- self.HOW_OFTEN_TO_SAVE_IMATRIX = "Com que frequência guardar o IMatrix"
- self.SET_GPU_OFFLOAD_VALUE = "Definir valor de offload da GPU (-ngl)"
- self.COMPLETED = "Concluído"
- self.REFRESH_MODELS = "Atualizar modelos"
-
-class _Greek(_Localization):
- def __init__(self):
- super().__init__()
- self.WINDOW_TITLE = "AutoGGUF (Αυτόματος Κβαντιστής Μοντέλων GGUF)"
- self.RAM_USAGE = "Χρήση RAM:"
- self.CPU_USAGE = "Χρήση CPU:"
- self.BACKEND = "Backend Llama.cpp:"
- self.REFRESH_BACKENDS = "Ανανέωση Backends"
- self.MODELS_PATH = "Διαδρομή Μοντέλων:"
- self.OUTPUT_PATH = "Διαδρομή Εξόδου:"
- self.LOGS_PATH = "Διαδρομή Αρχείων Καταγραφής:"
- self.BROWSE = "Περιήγηση"
- self.AVAILABLE_MODELS = "Διαθέσιμα Μοντέλα:"
- self.QUANTIZATION_TYPE = "Τύπος Κβαντισμού:"
- self.ALLOW_REQUANTIZE = "Να Επιτρέπεται η Επανακβάντιση"
- self.LEAVE_OUTPUT_TENSOR = "Διατήρηση Tensor Εξόδου"
- self.PURE = "Καθαρό"
- self.IMATRIX = "IMatrix:"
- self.INCLUDE_WEIGHTS = "Συμπερίληψη Βαρών:"
- self.EXCLUDE_WEIGHTS = "Εξαίρεση Βαρών:"
- self.USE_OUTPUT_TENSOR_TYPE = "Χρήση Τύπου Tensor Εξόδου"
- self.USE_TOKEN_EMBEDDING_TYPE = "Χρήση Τύπου Ενσωμάτωσης Token"
- self.KEEP_SPLIT = "Διατήρηση Διαίρεσης"
- self.KV_OVERRIDES = "Υπερβάσεις KV:"
- self.ADD_NEW_OVERRIDE = "Προσθήκη Νέας Υπέρβασης"
- self.QUANTIZE_MODEL = "Κβάντιση Μοντέλου"
- self.SAVE_PRESET = "Αποθήκευση Προεπιλογής"
- self.LOAD_PRESET = "Φόρτωση Προεπιλογής"
- self.TASKS = "Εργασίες:"
- self.DOWNLOAD_LLAMACPP = "Λήψη llama.cpp"
- self.SELECT_RELEASE = "Επιλογή Έκδοσης:"
- self.SELECT_ASSET = "Επιλογή Στοιχείου:"
- self.EXTRACT_CUDA_FILES = "Εξαγωγή Αρχείων CUDA"
- self.SELECT_CUDA_BACKEND = "Επιλογή Backend CUDA:"
- self.DOWNLOAD = "Λήψη"
- self.IMATRIX_GENERATION = "Δημιουργία IMatrix"
- self.DATA_FILE = "Αρχείο Δεδομένων:"
- self.MODEL = "Μοντέλο:"
- self.OUTPUT = "Έξοδος:"
- self.OUTPUT_FREQUENCY = "Συχνότητα Εξόδου:"
- self.GPU_OFFLOAD = "Εκφόρτωση GPU:"
- self.AUTO = "Αυτόματο"
- self.GENERATE_IMATRIX = "Δημιουργία IMatrix"
- self.ERROR = "Σφάλμα"
- self.WARNING = "Προειδοποίηση"
- self.PROPERTIES = "Ιδιότητες"
- self.CANCEL = "Ακύρωση"
- self.RESTART = "Επανεκκίνηση"
- self.DELETE = "Διαγραφή"
- self.CONFIRM_DELETION = "Είστε βέβαιοι ότι θέλετε να διαγράψετε αυτήν την εργασία;"
- self.TASK_RUNNING_WARNING = "Ορισμένες εργασίες εκτελούνται ακόμη. Είστε βέβαιοι ότι θέλετε να τερματίσετε;"
- self.YES = "Ναι"
- self.NO = "Όχι"
- self.DOWNLOAD_COMPLETE = "Η Λήψη Ολοκληρώθηκε"
- self.CUDA_EXTRACTION_FAILED = "Αποτυχία Εξαγωγής CUDA"
- self.PRESET_SAVED = "Η Προεπιλογή Αποθηκεύτηκε"
- self.PRESET_LOADED = "Η Προεπιλογή Φορτώθηκε"
- self.NO_ASSET_SELECTED = "Δεν Έχει Επιλεγεί Στοιχείο"
- self.DOWNLOAD_FAILED = "Αποτυχία Λήψης"
- self.NO_BACKEND_SELECTED = "Δεν Έχει Επιλεγεί Backend"
- self.NO_MODEL_SELECTED = "Δεν Έχει Επιλεγεί Μοντέλο"
- self.REFRESH_RELEASES = "Ανανέωση Εκδόσεων"
- self.NO_SUITABLE_CUDA_BACKENDS = "Δεν Βρέθηκαν Κατάλληλα Backends CUDA"
- self.LLAMACPP_DOWNLOADED_EXTRACTED = "Το Δυαδικό Αρχείο llama.cpp Λήφθηκε και Εξήχθη στο {0}\nΤα Αρχεία CUDA Εξήχθησαν στο {1}"
- self.CUDA_FILES_EXTRACTED = "Τα Αρχεία CUDA Εξήχθησαν στο"
- self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = "Δεν Βρέθηκε Κατάλληλο Backend CUDA για Εξαγωγή"
- self.ERROR_FETCHING_RELEASES = "Σφάλμα κατά την Ανάκτηση Εκδόσεων: {0}"
- self.CONFIRM_DELETION_TITLE = "Επιβεβαίωση Διαγραφής"
- self.LOG_FOR = "Αρχείο Καταγραφής για {0}"
- self.ALL_FILES = "Όλα τα Αρχεία (*)"
- self.GGUF_FILES = "Αρχεία GGUF (*.gguf)"
- self.DAT_FILES = "Αρχεία DAT (*.dat)"
- self.JSON_FILES = "Αρχεία JSON (*.json)"
- self.FAILED_LOAD_PRESET = "Αποτυχία Φόρτωσης Προεπιλογής: {0}"
- self.INITIALIZING_AUTOGGUF = "Εκκίνηση Εφαρμογής AutoGGUF"
- self.AUTOGGUF_INITIALIZATION_COMPLETE = "Η Εκκίνηση του AutoGGUF Ολοκληρώθηκε"
- self.REFRESHING_BACKENDS = "Ανανέωση Backends"
- self.NO_BACKENDS_AVAILABLE = "Δεν Υπάρχουν Διαθέσιμα Backends"
- self.FOUND_VALID_BACKENDS = "Βρέθηκαν {0} Έγκυρα Backends"
- self.SAVING_PRESET = "Αποθήκευση Προεπιλογής"
- self.PRESET_SAVED_TO = "Η Προεπιλογή Αποθηκεύτηκε στο {0}"
- self.LOADING_PRESET = "Φόρτωση Προεπιλογής"
- self.PRESET_LOADED_FROM = "Η Προεπιλογή Φορτώθηκε από το {0}"
- self.ADDING_KV_OVERRIDE = "Προσθήκη Υπέρβασης KV: {0}"
- self.SAVING_TASK_PRESET = "Αποθήκευση Προεπιλογής Εργασίας για {0}"
- self.TASK_PRESET_SAVED = "Η Προεπιλογή Εργασίας Αποθηκεύτηκε"
- self.TASK_PRESET_SAVED_TO = "Η Προεπιλογή Εργασίας Αποθηκεύτηκε στο {0}"
- self.RESTARTING_TASK = "Επανεκκίνηση Εργασίας: {0}"
- self.IN_PROGRESS = "Σε Εξέλιξη"
- self.DOWNLOAD_FINISHED_EXTRACTED_TO = "Η Λήψη Ολοκληρώθηκε. Εξήχθη στο: {0}"
- self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "Το Δυαδικό Αρχείο llama.cpp Λήφθηκε και Εξήχθη στο {0}\nΤα Αρχεία CUDA Εξήχθησαν στο {1}"
- self.NO_SUITABLE_CUDA_BACKEND_FOUND = "Δεν Βρέθηκε Κατάλληλο Backend CUDA για Εξαγωγή"
- self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = "Το Δυαδικό Αρχείο llama.cpp Λήφθηκε και Εξήχθη στο {0}"
- self.REFRESHING_LLAMACPP_RELEASES = "Ανανέωση Εκδόσεων llama.cpp"
- self.UPDATING_ASSET_LIST = "Ενημέρωση Λίστας Στοιχείων"
- self.UPDATING_CUDA_OPTIONS = "Ενημέρωση Επιλογών CUDA"
- self.STARTING_LLAMACPP_DOWNLOAD = "Έναρξη Λήψης llama.cpp"
- self.UPDATING_CUDA_BACKENDS = "Ενημέρωση Backends CUDA"
- self.NO_CUDA_BACKEND_SELECTED = "Δεν Έχει Επιλεγεί Backend CUDA για Εξαγωγή"
- self.EXTRACTING_CUDA_FILES = "Εξαγωγή Αρχείων CUDA από {0} στο {1}"
- self.DOWNLOAD_ERROR = "Σφάλμα Λήψης: {0}"
- self.SHOWING_TASK_CONTEXT_MENU = "Εμφάνιση Μενού Περιεχομένου Εργασίας"
- self.SHOWING_PROPERTIES_FOR_TASK = "Εμφάνιση Ιδιοτήτων για την Εργασία: {0}"
- self.CANCELLING_TASK = "Ακύρωση Εργασίας: {0}"
- self.CANCELED = "Ακυρώθηκε"
- self.DELETING_TASK = "Διαγραφή Εργασίας: {0}"
- self.LOADING_MODELS = "Φόρτωση Μοντέλων"
- self.LOADED_MODELS = "{0} Μοντέλα Φορτώθηκαν"
- self.BROWSING_FOR_MODELS_DIRECTORY = "Περιήγηση σε Φάκελο Μοντέλων"
- self.SELECT_MODELS_DIRECTORY = "Επιλέξτε Φάκελο Μοντέλων"
- self.BROWSING_FOR_OUTPUT_DIRECTORY = "Περιήγηση σε Φάκελο Εξόδου"
- self.SELECT_OUTPUT_DIRECTORY = "Επιλέξτε Φάκελο Εξόδου"
- self.BROWSING_FOR_LOGS_DIRECTORY = "Περιήγηση σε Φάκελο Αρχείων Καταγραφής"
- self.SELECT_LOGS_DIRECTORY = "Επιλέξτε Φάκελο Αρχείων Καταγραφής"
- self.BROWSING_FOR_IMATRIX_FILE = "Περιήγηση σε Αρχείο IMatrix"
- self.SELECT_IMATRIX_FILE = "Επιλέξτε Αρχείο IMatrix"
- self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} MB / {2} MB)"
- self.CPU_USAGE_FORMAT = "Χρήση CPU: {0:.1f}%"
- self.VALIDATING_QUANTIZATION_INPUTS = "Επικύρωση Εισόδων Κβαντισμού"
- self.MODELS_PATH_REQUIRED = "Απαιτείται η Διαδρομή Μοντέλων"
- self.OUTPUT_PATH_REQUIRED = "Απαιτείται η Διαδρομή Εξόδου"
- self.LOGS_PATH_REQUIRED = "Απαιτείται η Διαδρομή Αρχείων Καταγραφής"
- self.STARTING_MODEL_QUANTIZATION = "Έναρξη Κβαντισμού Μοντέλου"
- self.INPUT_FILE_NOT_EXIST = "Το Αρχείο Εισόδου '{0}' Δεν Υπάρχει."
- self.QUANTIZING_MODEL_TO = "Κβάντιση του {0} σε {1}"
- self.QUANTIZATION_TASK_STARTED = "Η Εργασία Κβαντισμού Ξεκίνησε για {0}"
- self.ERROR_STARTING_QUANTIZATION = "Σφάλμα κατά την Έναρξη Κβαντισμού: {0}"
- self.UPDATING_MODEL_INFO = "Ενημέρωση Πληροφοριών Μοντέλου: {0}"
- self.TASK_FINISHED = "Η Εργασία Ολοκληρώθηκε: {0}"
- self.SHOWING_TASK_DETAILS_FOR = "Εμφάνιση Λεπτομερειών Εργασίας για: {0}"
- self.BROWSING_FOR_IMATRIX_DATA_FILE = "Περιήγηση σε Αρχείο Δεδομένων IMatrix"
- self.SELECT_DATA_FILE = "Επιλέξτε Αρχείο Δεδομένων"
- self.BROWSING_FOR_IMATRIX_MODEL_FILE = "Περιήγηση σε Αρχείο Μοντέλου IMatrix"
- self.SELECT_MODEL_FILE = "Επιλέξτε Αρχείο Μοντέλου"
- self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "Περιήγηση σε Αρχείο Εξόδου IMatrix"
- self.SELECT_OUTPUT_FILE = "Επιλέξτε Αρχείο Εξόδου"
- self.STARTING_IMATRIX_GENERATION = "Έναρξη Δημιουργίας IMatrix"
- self.BACKEND_PATH_NOT_EXIST = "Η Διαδρομή Backend Δεν Υπάρχει: {0}"
- self.GENERATING_IMATRIX = "Δημιουργία IMatrix"
- self.ERROR_STARTING_IMATRIX_GENERATION = "Σφάλμα κατά την Έναρξη Δημιουργίας IMatrix: {0}"
- self.IMATRIX_GENERATION_TASK_STARTED = "Η Εργασία Δημιουργίας IMatrix Ξεκίνησε"
- self.ERROR_MESSAGE = "Σφάλμα: {0}"
- self.TASK_ERROR = "Σφάλμα Εργασίας: {0}"
- self.APPLICATION_CLOSING = "Κλείσιμο Εφαρμογής"
- self.APPLICATION_CLOSED = "Η Εφαρμογή Έκλεισε"
- self.SELECT_QUANTIZATION_TYPE = "Επιλέξτε τον τύπο κβαντισμού"
- self.ALLOWS_REQUANTIZING = "Επιτρέπει την επανακβάντιση τανυστών που έχουν ήδη κβαντιστεί"
- self.LEAVE_OUTPUT_WEIGHT = "Θα αφήσει το output.weight χωρίς (επανα)κβάντιση"
- self.DISABLE_K_QUANT_MIXTURES = "Απενεργοποιήστε τα μείγματα k-quant και κβαντίστε όλους τους τανυστές στον ίδιο τύπο"
- self.USE_DATA_AS_IMPORTANCE_MATRIX = "Χρησιμοποιήστε τα δεδομένα στο αρχείο ως πίνακα σημασίας για βελτιστοποιήσεις κβαντισμού"
- self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Χρησιμοποιήστε τον πίνακα σημασίας για αυτούς τους τανυστές"
- self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Μην χρησιμοποιείτε τον πίνακα σημασίας για αυτούς τους τανυστές"
- self.OUTPUT_TENSOR_TYPE = "Τύπος Tensor Εξόδου:"
- self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = "Χρησιμοποιήστε αυτόν τον τύπο για τον τανυστή output.weight"
- self.TOKEN_EMBEDDING_TYPE = "Τύπος Ενσωμάτωσης Token:"
- self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = "Χρησιμοποιήστε αυτόν τον τύπο για τον τανυστή ενσωματώσεων token"
- self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = "Θα δημιουργήσει το κβαντισμένο μοντέλο στα ίδια θραύσματα με την είσοδο"
- self.OVERRIDE_MODEL_METADATA = "Αντικατάσταση μεταδεδομένων μοντέλου"
- self.INPUT_DATA_FILE_FOR_IMATRIX = "Αρχείο δεδομένων εισόδου για τη δημιουργία IMatrix"
- self.MODEL_TO_BE_QUANTIZED = "Μοντέλο προς κβάντιση"
- self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "Διαδρομή εξόδου για το δημιουργημένο IMatrix"
- self.HOW_OFTEN_TO_SAVE_IMATRIX = "Πόσο συχνά να αποθηκεύεται το IMatrix"
- self.SET_GPU_OFFLOAD_VALUE = "Ορίστε την τιμή εκφόρτωσης GPU (-ngl)"
- self.COMPLETED = "Ολοκληρώθηκε"
- self.REFRESH_MODELS = "Ανανέωση μοντέλων"
-
-class _Hungarian(_Localization):
- def __init__(self):
- super().__init__()
- self.WINDOW_TITLE = "AutoGGUF (Automatizált GGUF modell kvantáló)"
- self.RAM_USAGE = "RAM használat:"
- self.CPU_USAGE = "CPU használat:"
- self.BACKEND = "Llama.cpp háttérrendszer:"
- self.REFRESH_BACKENDS = "Háttérrendszerek frissítése"
- self.MODELS_PATH = "Modellek elérési útja:"
- self.OUTPUT_PATH = "Kimeneti útvonal:"
- self.LOGS_PATH = "Naplók elérési útja:"
- self.BROWSE = "Tallózás"
- self.AVAILABLE_MODELS = "Elérhető modellek:"
- self.QUANTIZATION_TYPE = "Kvantálási típus:"
- self.ALLOW_REQUANTIZE = "Újrakvantálás engedélyezése"
- self.LEAVE_OUTPUT_TENSOR = "Kimeneti tenzor meghagyása"
- self.PURE = "Tiszta"
- self.IMATRIX = "IMatrix:"
- self.INCLUDE_WEIGHTS = "Súlyok belefoglalása:"
- self.EXCLUDE_WEIGHTS = "Súlyok kizárása:"
- self.USE_OUTPUT_TENSOR_TYPE = "Kimeneti tenzor típusának használata"
- self.USE_TOKEN_EMBEDDING_TYPE = "Token beágyazási típusának használata"
- self.KEEP_SPLIT = "Felosztás megtartása"
- self.KV_OVERRIDES = "KV felülbírálások:"
- self.ADD_NEW_OVERRIDE = "Új felülbírálás hozzáadása"
- self.QUANTIZE_MODEL = "Modell kvantálása"
- self.SAVE_PRESET = "Esetbeállítás mentése"
- self.LOAD_PRESET = "Esetbeállítás betöltése"
- self.TASKS = "Feladatok:"
- self.DOWNLOAD_LLAMACPP = "llama.cpp letöltése"
- self.SELECT_RELEASE = "Kiadás kiválasztása:"
- self.SELECT_ASSET = "Eszköz kiválasztása:"
- self.EXTRACT_CUDA_FILES = "CUDA fájlok kibontása"
- self.SELECT_CUDA_BACKEND = "CUDA háttérrendszer kiválasztása:"
- self.DOWNLOAD = "Letöltés"
- self.IMATRIX_GENERATION = "IMatrix generálás"
- self.DATA_FILE = "Adatfájl:"
- self.MODEL = "Modell:"
- self.OUTPUT = "Kimenet:"
- self.OUTPUT_FREQUENCY = "Kimeneti frekvencia:"
- self.GPU_OFFLOAD = "GPU tehermentesítés:"
- self.AUTO = "Automatikus"
- self.GENERATE_IMATRIX = "IMatrix generálása"
- self.ERROR = "Hiba"
- self.WARNING = "Figyelmeztetés"
- self.PROPERTIES = "Tulajdonságok"
- self.CANCEL = "Mégse"
- self.RESTART = "Újraindítás"
- self.DELETE = "Törlés"
- self.CONFIRM_DELETION = "Biztosan törölni szeretné ezt a feladatot?"
- self.TASK_RUNNING_WARNING = "Néhány feladat még fut. Biztosan kilép?"
- self.YES = "Igen"
- self.NO = "Nem"
- self.DOWNLOAD_COMPLETE = "Letöltés befejeződött"
- self.CUDA_EXTRACTION_FAILED = "CUDA kibontás sikertelen"
- self.PRESET_SAVED = "Esetbeállítás mentve"
- self.PRESET_LOADED = "Esetbeállítás betöltve"
- self.NO_ASSET_SELECTED = "Nincs kiválasztott eszköz"
- self.DOWNLOAD_FAILED = "Letöltés sikertelen"
- self.NO_BACKEND_SELECTED = "Nincs kiválasztott háttérrendszer"
- self.NO_MODEL_SELECTED = "Nincs kiválasztott modell"
- self.REFRESH_RELEASES = "Kiadások frissítése"
- self.NO_SUITABLE_CUDA_BACKENDS = "Nem található megfelelő CUDA háttérrendszer"
- self.LLAMACPP_DOWNLOADED_EXTRACTED = "A llama.cpp bináris fájl letöltve és kibontva ide: {0}\nA CUDA fájlok kibontva ide: {1}"
- self.CUDA_FILES_EXTRACTED = "A CUDA fájlok kibontva ide:"
- self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = "Nem található megfelelő CUDA háttérrendszer a kibontáshoz"
- self.ERROR_FETCHING_RELEASES = "Hiba a kiadások lekérdezésekor: {0}"
- self.CONFIRM_DELETION_TITLE = "Törlés megerősítése"
- self.LOG_FOR = "Napló a következőhöz: {0}"
- self.ALL_FILES = "Minden fájl (*)"
- self.GGUF_FILES = "GGUF fájlok (*.gguf)"
- self.DAT_FILES = "DAT fájlok (*.dat)"
- self.JSON_FILES = "JSON fájlok (*.json)"
- self.FAILED_LOAD_PRESET = "Az esetbeállítás betöltése sikertelen: {0}"
- self.INITIALIZING_AUTOGGUF = "Az AutoGGUF alkalmazás inicializálása"
- self.AUTOGGUF_INITIALIZATION_COMPLETE = "Az AutoGGUF inicializálása befejeződött"
- self.REFRESHING_BACKENDS = "Háttérrendszerek frissítése"
- self.NO_BACKENDS_AVAILABLE = "Nincsenek elérhető háttérrendszerek"
- self.FOUND_VALID_BACKENDS = "{0} érvényes háttérrendszer található"
- self.SAVING_PRESET = "Esetbeállítás mentése"
- self.PRESET_SAVED_TO = "Esetbeállítás mentve ide: {0}"
- self.LOADING_PRESET = "Esetbeállítás betöltése"
- self.PRESET_LOADED_FROM = "Esetbeállítás betöltve innen: {0}"
- self.ADDING_KV_OVERRIDE = "KV felülbírálás hozzáadása: {0}"
- self.SAVING_TASK_PRESET = "Feladat esetbeállítás mentése ehhez: {0}"
- self.TASK_PRESET_SAVED = "Feladat esetbeállítás mentve"
- self.TASK_PRESET_SAVED_TO = "Feladat esetbeállítás mentve ide: {0}"
- self.RESTARTING_TASK = "Feladat újraindítása: {0}"
- self.IN_PROGRESS = "Folyamatban"
- self.DOWNLOAD_FINISHED_EXTRACTED_TO = "Letöltés befejeződött. Kibontva ide: {0}"
- self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "A llama.cpp bináris fájl letöltve és kibontva ide: {0}\nA CUDA fájlok kibontva ide: {1}"
- self.NO_SUITABLE_CUDA_BACKEND_FOUND = "Nem található megfelelő CUDA háttérrendszer a kibontáshoz"
- self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = "A llama.cpp bináris fájl letöltve és kibontva ide: {0}"
- self.REFRESHING_LLAMACPP_RELEASES = "A llama.cpp kiadások frissítése"
- self.UPDATING_ASSET_LIST = "Eszközlista frissítése"
- self.UPDATING_CUDA_OPTIONS = "CUDA beállítások frissítése"
- self.STARTING_LLAMACPP_DOWNLOAD = "A llama.cpp letöltésének megkezdése"
- self.UPDATING_CUDA_BACKENDS = "CUDA háttérrendszerek frissítése"
- self.NO_CUDA_BACKEND_SELECTED = "Nincs kiválasztott CUDA háttérrendszer a kibontáshoz"
- self.EXTRACTING_CUDA_FILES = "CUDA fájlok kibontása innen: {0} ide: {1}"
- self.DOWNLOAD_ERROR = "Letöltési hiba: {0}"
- self.SHOWING_TASK_CONTEXT_MENU = "Feladat helyi menüjének megjelenítése"
- self.SHOWING_PROPERTIES_FOR_TASK = "Feladat tulajdonságainak megjelenítése: {0}"
- self.CANCELLING_TASK = "Feladat megszakítása: {0}"
- self.CANCELED = "Megszakítva"
- self.DELETING_TASK = "Feladat törlése: {0}"
- self.LOADING_MODELS = "Modellek betöltése"
- self.LOADED_MODELS = "{0} modell betöltve"
- self.BROWSING_FOR_MODELS_DIRECTORY = "Modellek könyvtárának tallózása"
- self.SELECT_MODELS_DIRECTORY = "Modellek könyvtárának kiválasztása"
- self.BROWSING_FOR_OUTPUT_DIRECTORY = "Kimeneti könyvtár tallózása"
- self.SELECT_OUTPUT_DIRECTORY = "Kimeneti könyvtár kiválasztása"
- self.BROWSING_FOR_LOGS_DIRECTORY = "Naplók könyvtárának tallózása"
- self.SELECT_LOGS_DIRECTORY = "Naplók könyvtárának kiválasztása"
- self.BROWSING_FOR_IMATRIX_FILE = "IMatrix fájl tallózása"
- self.SELECT_IMATRIX_FILE = "IMatrix fájl kiválasztása"
- self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} MB / {2} MB)"
- self.CPU_USAGE_FORMAT = "CPU használat: {0:.1f}%"
- self.VALIDATING_QUANTIZATION_INPUTS = "Kvantálási bemenetek ellenőrzése"
- self.MODELS_PATH_REQUIRED = "A modellek elérési útja kötelező"
- self.OUTPUT_PATH_REQUIRED = "A kimeneti útvonal kötelező"
- self.LOGS_PATH_REQUIRED = "A naplók elérési útja kötelező"
- self.STARTING_MODEL_QUANTIZATION = "Modell kvantálásának indítása"
- self.INPUT_FILE_NOT_EXIST = "A bemeneti fájl '{0}' nem létezik."
- self.QUANTIZING_MODEL_TO = "{0} kvantálása erre: {1}"
- self.QUANTIZATION_TASK_STARTED = "Kvantálási feladat elindítva ehhez: {0}"
- self.ERROR_STARTING_QUANTIZATION = "Hiba a kvantálás indításakor: {0}"
- self.UPDATING_MODEL_INFO = "Modellinformációk frissítése: {0}"
- self.TASK_FINISHED = "Feladat befejezve: {0}"
- self.SHOWING_TASK_DETAILS_FOR = "Feladat részleteinek megjelenítése ehhez: {0}"
- self.BROWSING_FOR_IMATRIX_DATA_FILE = "IMatrix adatfájl tallózása"
- self.SELECT_DATA_FILE = "Adatfájl kiválasztása"
- self.BROWSING_FOR_IMATRIX_MODEL_FILE = "IMatrix modellfájl tallózása"
- self.SELECT_MODEL_FILE = "Modellfájl kiválasztása"
- self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "IMatrix kimeneti fájl tallózása"
- self.SELECT_OUTPUT_FILE = "Kimeneti fájl kiválasztása"
- self.STARTING_IMATRIX_GENERATION = "IMatrix generálásának indítása"
- self.BACKEND_PATH_NOT_EXIST = "A háttérrendszer elérési útja nem létezik: {0}"
- self.GENERATING_IMATRIX = "IMatrix generálása"
- self.ERROR_STARTING_IMATRIX_GENERATION = "Hiba az IMatrix generálásának indításakor: {0}"
- self.IMATRIX_GENERATION_TASK_STARTED = "IMatrix generálási feladat elindítva"
- self.ERROR_MESSAGE = "Hiba: {0}"
- self.TASK_ERROR = "Feladat hiba: {0}"
- self.APPLICATION_CLOSING = "Alkalmazás bezárása"
- self.APPLICATION_CLOSED = "Alkalmazás bezárva"
- self.SELECT_QUANTIZATION_TYPE = "Válassza ki a kvantálási típust"
- self.ALLOWS_REQUANTIZING = "Lehetővé teszi a már kvantált tenzorok újrakvantálását"
- self.LEAVE_OUTPUT_WEIGHT = "Az output.weight-et (újra)kvantálatlanul hagyja"
- self.DISABLE_K_QUANT_MIXTURES = "Tiltsa le a k-kvant keverékeket, és kvantálja az összes tenzort ugyanarra a típusra"
- self.USE_DATA_AS_IMPORTANCE_MATRIX = "Használja a fájlban lévő adatokat fontossági mátrixként a kvantálási optimalizálásokhoz"
- self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Használja a fontossági mátrixot ezekre a tenzorokra"
- self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Ne használja a fontossági mátrixot ezekre a tenzorokra"
- self.OUTPUT_TENSOR_TYPE = "Kimeneti tenzor típusa:"
- self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = "Használja ezt a típust az output.weight tenzorhoz"
- self.TOKEN_EMBEDDING_TYPE = "Token beágyazási típusa:"
- self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = "Használja ezt a típust a token beágyazási tenzorhoz"
- self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = "A kvantált modellt ugyanazokban a szegmensekben fogja generálni, mint a bemenet"
- self.OVERRIDE_MODEL_METADATA = "Modell metaadatok felülbírálása"
- self.INPUT_DATA_FILE_FOR_IMATRIX = "IMatrix generáláshoz bemeneti adatfájl"
- self.MODEL_TO_BE_QUANTIZED = "Kvantálandó modell"
- self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "A generált IMatrix kimeneti útvonala"
- self.HOW_OFTEN_TO_SAVE_IMATRIX = "Milyen gyakran mentse az IMatrixot"
- self.SET_GPU_OFFLOAD_VALUE = "GPU tehermentesítési érték beállítása (-ngl)"
- self.COMPLETED = "Befejezve"
- self.REFRESH_MODELS = "Modellek frissítése"
-
-class _BritishEnglish(_Localization):
- def __init__(self):
- super().__init__()
- self.WINDOW_TITLE = "AutoGGUF (automated GGUF model quantiser)"
- self.RAM_USAGE = "RAM Usage:"
- self.CPU_USAGE = "CPU Usage:"
- self.BACKEND = "Llama.cpp Backend:"
- self.REFRESH_BACKENDS = "Refresh Backends"
- self.MODELS_PATH = "Models Path:"
- self.OUTPUT_PATH = "Output Path:"
- self.LOGS_PATH = "Logs Path:"
- self.BROWSE = "Browse"
- self.AVAILABLE_MODELS = "Available Models:"
- self.QUANTIZATION_TYPE = "Quantisation Type:" # Note the British spelling
- self.ALLOW_REQUANTIZE = "Allow Requantise"
- self.LEAVE_OUTPUT_TENSOR = "Leave Output Tensor"
- self.PURE = "Pure"
- self.IMATRIX = "IMatrix:"
- self.INCLUDE_WEIGHTS = "Include Weights:"
- self.EXCLUDE_WEIGHTS = "Exclude Weights:"
- self.USE_OUTPUT_TENSOR_TYPE = "Use Output Tensor Type"
- self.USE_TOKEN_EMBEDDING_TYPE = "Use Token Embedding Type"
- self.KEEP_SPLIT = "Keep Split"
- self.KV_OVERRIDES = "KV Overrides:"
- self.ADD_NEW_OVERRIDE = "Add new override"
- self.QUANTIZE_MODEL = "Quantise Model" # Note the British spelling
- self.SAVE_PRESET = "Save Preset"
- self.LOAD_PRESET = "Load Preset"
- self.TASKS = "Tasks:"
- self.DOWNLOAD_LLAMACPP = "Download llama.cpp"
- self.SELECT_RELEASE = "Select Release:"
- self.SELECT_ASSET = "Select Asset:"
- self.EXTRACT_CUDA_FILES = "Extract CUDA files"
- self.SELECT_CUDA_BACKEND = "Select CUDA Backend:"
- self.DOWNLOAD = "Download"
- self.IMATRIX_GENERATION = "IMatrix Generation"
- self.DATA_FILE = "Data File:"
- self.MODEL = "Model:"
- self.OUTPUT = "Output:"
- self.OUTPUT_FREQUENCY = "Output Frequency:"
- self.GPU_OFFLOAD = "GPU Offload:"
- self.AUTO = "Auto"
- self.GENERATE_IMATRIX = "Generate IMatrix"
- self.ERROR = "Error"
- self.WARNING = "Warning"
- self.PROPERTIES = "Properties"
- self.CANCEL = "Cancel"
- self.RESTART = "Restart"
- self.DELETE = "Delete"
- self.CONFIRM_DELETION = "Are you sure you want to delete this task?"
- self.TASK_RUNNING_WARNING = "Some tasks are still running. Are you sure you want to quit?"
- self.YES = "Yes"
- self.NO = "No"
- self.DOWNLOAD_COMPLETE = "Download Complete"
- self.CUDA_EXTRACTION_FAILED = "CUDA Extraction Failed"
- self.PRESET_SAVED = "Preset Saved"
- self.PRESET_LOADED = "Preset Loaded"
- self.NO_ASSET_SELECTED = "No asset selected"
- self.DOWNLOAD_FAILED = "Download failed"
- self.NO_BACKEND_SELECTED = "No backend selected"
- self.NO_MODEL_SELECTED = "No model selected"
- self.REFRESH_RELEASES = "Refresh Releases"
- self.NO_SUITABLE_CUDA_BACKENDS = "No suitable CUDA backends found"
- self.LLAMACPP_DOWNLOADED_EXTRACTED = "llama.cpp binary downloaded and extracted to {0}\nCUDA files extracted to {1}"
- self.CUDA_FILES_EXTRACTED = "CUDA files extracted to"
- self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = "No suitable CUDA backend found for extraction"
- self.ERROR_FETCHING_RELEASES = "Error fetching releases: {0}"
- self.CONFIRM_DELETION_TITLE = "Confirm Deletion"
- self.LOG_FOR = "Log for {0}"
- self.ALL_FILES = "All Files (*)"
- self.GGUF_FILES = "GGUF Files (*.gguf)"
- self.DAT_FILES = "DAT Files (*.dat)"
- self.JSON_FILES = "JSON Files (*.json)"
- self.FAILED_LOAD_PRESET = "Failed to load preset: {0}"
- self.INITIALIZING_AUTOGGUF = "Initialising AutoGGUF application" # Note the British spelling
- self.AUTOGGUF_INITIALIZATION_COMPLETE = "AutoGGUF initialisation complete" # Note the British spelling
- self.REFRESHING_BACKENDS = "Refreshing backends"
- self.NO_BACKENDS_AVAILABLE = "No backends available"
- self.FOUND_VALID_BACKENDS = "Found {0} valid backends"
- self.SAVING_PRESET = "Saving preset"
- self.PRESET_SAVED_TO = "Preset saved to {0}"
- self.LOADING_PRESET = "Loading preset"
- self.PRESET_LOADED_FROM = "Preset loaded from {0}"
- self.ADDING_KV_OVERRIDE = "Adding KV override: {0}"
- self.SAVING_TASK_PRESET = "Saving task preset for {0}"
- self.TASK_PRESET_SAVED = "Task Preset Saved"
- self.TASK_PRESET_SAVED_TO = "Task preset saved to {0}"
- self.RESTARTING_TASK = "Restarting task: {0}"
- self.IN_PROGRESS = "In Progress"
- self.DOWNLOAD_FINISHED_EXTRACTED_TO = "Download finished. Extracted to: {0}"
- self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "llama.cpp binary downloaded and extracted to {0}\nCUDA files extracted to {1}"
- self.NO_SUITABLE_CUDA_BACKEND_FOUND = "No suitable CUDA backend found for extraction"
- self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = "llama.cpp binary downloaded and extracted to {0}"
- self.REFRESHING_LLAMACPP_RELEASES = "Refreshing llama.cpp releases"
- self.UPDATING_ASSET_LIST = "Updating asset list"
- self.UPDATING_CUDA_OPTIONS = "Updating CUDA options"
- self.STARTING_LLAMACPP_DOWNLOAD = "Starting llama.cpp download"
- self.UPDATING_CUDA_BACKENDS = "Updating CUDA backends"
- self.NO_CUDA_BACKEND_SELECTED = "No CUDA backend selected for extraction"
- self.EXTRACTING_CUDA_FILES = "Extracting CUDA files from {0} to {1}"
- self.DOWNLOAD_ERROR = "Download error: {0}"
- self.SHOWING_TASK_CONTEXT_MENU = "Showing task context menu"
- self.SHOWING_PROPERTIES_FOR_TASK = "Showing properties for task: {0}"
- self.CANCELLING_TASK = "Cancelling task: {0}"
- self.CANCELED = "Cancelled"
- self.DELETING_TASK = "Deleting task: {0}"
- self.LOADING_MODELS = "Loading models"
- self.LOADED_MODELS = "Loaded {0} models"
- self.BROWSING_FOR_MODELS_DIRECTORY = "Browsing for models directory"
- self.SELECT_MODELS_DIRECTORY = "Select Models Directory"
- self.BROWSING_FOR_OUTPUT_DIRECTORY = "Browsing for output directory"
- self.SELECT_OUTPUT_DIRECTORY = "Select Output Directory"
- self.BROWSING_FOR_LOGS_DIRECTORY = "Browsing for logs directory"
- self.SELECT_LOGS_DIRECTORY = "Select Logs Directory"
- self.BROWSING_FOR_IMATRIX_FILE = "Browsing for IMatrix file"
- self.SELECT_IMATRIX_FILE = "Select IMatrix File"
- self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} MB / {2} MB)"
- self.CPU_USAGE_FORMAT = "CPU Usage: {0:.1f}%"
- self.VALIDATING_QUANTIZATION_INPUTS = "Validating quantisation inputs" # Note the British spelling
- self.MODELS_PATH_REQUIRED = "Models path is required"
- self.OUTPUT_PATH_REQUIRED = "Output path is required"
- self.LOGS_PATH_REQUIRED = "Logs path is required"
- self.STARTING_MODEL_QUANTIZATION = "Starting model quantisation" # Note the British spelling
- self.INPUT_FILE_NOT_EXIST = "Input file '{0}' does not exist."
- self.QUANTIZING_MODEL_TO = "Quantizing {0} to {1}"
- self.QUANTIZATION_TASK_STARTED = "Quantisation task started for {0}" # Note the British spelling
- self.ERROR_STARTING_QUANTIZATION = "Error starting quantisation: {0}" # Note the British spelling
- self.UPDATING_MODEL_INFO = "Updating model info: {0}"
- self.TASK_FINISHED = "Task finished: {0}"
- self.SHOWING_TASK_DETAILS_FOR = "Showing task details for: {0}"
- self.BROWSING_FOR_IMATRIX_DATA_FILE = "Browsing for IMatrix data file"
- self.SELECT_DATA_FILE = "Select Data File"
- self.BROWSING_FOR_IMATRIX_MODEL_FILE = "Browsing for IMatrix model file"
- self.SELECT_MODEL_FILE = "Select Model File"
- self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "Browsing for IMatrix output file"
- self.SELECT_OUTPUT_FILE = "Select Output File"
- self.STARTING_IMATRIX_GENERATION = "Starting IMatrix generation"
- self.BACKEND_PATH_NOT_EXIST = "Backend path does not exist: {0}"
- self.GENERATING_IMATRIX = "Generating IMatrix"
- self.ERROR_STARTING_IMATRIX_GENERATION = "Error starting IMatrix generation: {0}"
- self.IMATRIX_GENERATION_TASK_STARTED = "IMatrix generation task started"
- self.ERROR_MESSAGE = "Error: {0}"
- self.TASK_ERROR = "Task error: {0}"
- self.APPLICATION_CLOSING = "Application closing"
- self.APPLICATION_CLOSED = "Application closed"
- self.SELECT_QUANTIZATION_TYPE = "Select the quantisation type" # Note the British spelling
- self.ALLOWS_REQUANTIZING = "Allows requantising tensors that have already been quantised" # Note the British spelling
- self.LEAVE_OUTPUT_WEIGHT = "Will leave output.weight un(re)quantised"
- self.DISABLE_K_QUANT_MIXTURES = "Disable k-quant mixtures and quantise all tensors to the same type" # Note the British spelling
- self.USE_DATA_AS_IMPORTANCE_MATRIX = "Use data in file as importance matrix for quant optimisations" # Note the British spelling
- self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Use importance matrix for these tensors"
- self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Don't use importance matrix for these tensors"
- self.OUTPUT_TENSOR_TYPE = "Output Tensor Type:"
- self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = "Use this type for the output.weight tensor"
- self.TOKEN_EMBEDDING_TYPE = "Token Embedding Type:"
- self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = "Use this type for the token embeddings tensor"
- self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = "Will generate quantised model in the same shards as input" # Note the British spelling
- self.OVERRIDE_MODEL_METADATA = "Override model metadata"
- self.INPUT_DATA_FILE_FOR_IMATRIX = "Input data file for IMatrix generation"
- self.MODEL_TO_BE_QUANTIZED = "Model to be quantised" # Note the British spelling
- self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "Output path for the generated IMatrix"
- self.HOW_OFTEN_TO_SAVE_IMATRIX = "How often to save the IMatrix"
- self.SET_GPU_OFFLOAD_VALUE = "Set GPU offload value (-ngl)"
- self.COMPLETED = "Completed"
- self.REFRESH_MODELS = "Refresh Models"
-
-class _IndianEnglish(_Localization):
- def __init__(self):
- super().__init__()
- self.WINDOW_TITLE = "AutoGGUF (automated GGUF model quantizer)"
- self.RAM_USAGE = "RAM Usage:"
- self.CPU_USAGE = "CPU Usage:"
- self.BACKEND = "Llama.cpp Backend:"
- self.REFRESH_BACKENDS = "Refresh Backends"
- self.MODELS_PATH = "Models Path:"
- self.OUTPUT_PATH = "Output Path:"
- self.LOGS_PATH = "Logs Path:"
- self.BROWSE = "Browse"
- self.AVAILABLE_MODELS = "Available Models:"
- self.QUANTIZATION_TYPE = "Quantization Type:"
- self.ALLOW_REQUANTIZE = "Allow Requantize"
- self.LEAVE_OUTPUT_TENSOR = "Leave Output Tensor"
- self.PURE = "Pure"
- self.IMATRIX = "IMatrix:"
- self.INCLUDE_WEIGHTS = "Include Weights:"
- self.EXCLUDE_WEIGHTS = "Exclude Weights:"
- self.USE_OUTPUT_TENSOR_TYPE = "Use Output Tensor Type"
- self.USE_TOKEN_EMBEDDING_TYPE = "Use Token Embedding Type"
- self.KEEP_SPLIT = "Keep Split"
- self.KV_OVERRIDES = "KV Overrides:"
- self.ADD_NEW_OVERRIDE = "Add new override"
- self.QUANTIZE_MODEL = "Quantize Model"
- self.SAVE_PRESET = "Save Preset"
- self.LOAD_PRESET = "Load Preset"
- self.TASKS = "Tasks:"
- self.DOWNLOAD_LLAMACPP = "Download llama.cpp"
- self.SELECT_RELEASE = "Select Release:"
- self.SELECT_ASSET = "Select Asset:"
- self.EXTRACT_CUDA_FILES = "Extract CUDA files"
- self.SELECT_CUDA_BACKEND = "Select CUDA Backend:"
- self.DOWNLOAD = "Download"
- self.IMATRIX_GENERATION = "IMatrix Generation"
- self.DATA_FILE = "Data File:"
- self.MODEL = "Model:"
- self.OUTPUT = "Output:"
- self.OUTPUT_FREQUENCY = "Output Frequency:"
- self.GPU_OFFLOAD = "GPU Offload:"
- self.AUTO = "Auto"
- self.GENERATE_IMATRIX = "Generate IMatrix"
- self.ERROR = "Error"
- self.WARNING = "Warning"
- self.PROPERTIES = "Properties"
- self.CANCEL = "Cancel"
- self.RESTART = "Restart"
- self.DELETE = "Delete"
- self.CONFIRM_DELETION = "Are you sure you want to delete this task?"
- self.TASK_RUNNING_WARNING = "Some tasks are still running. Are you sure you want to quit?"
- self.YES = "Yes"
- self.NO = "No"
- self.DOWNLOAD_COMPLETE = "Download Complete"
- self.CUDA_EXTRACTION_FAILED = "CUDA Extraction Failed"
- self.PRESET_SAVED = "Preset Saved"
- self.PRESET_LOADED = "Preset Loaded"
- self.NO_ASSET_SELECTED = "No asset selected"
- self.DOWNLOAD_FAILED = "Download failed"
- self.NO_BACKEND_SELECTED = "No backend selected"
- self.NO_MODEL_SELECTED = "No model selected"
- self.REFRESH_RELEASES = "Refresh Releases"
- self.NO_SUITABLE_CUDA_BACKENDS = "No suitable CUDA backends found"
- self.LLAMACPP_DOWNLOADED_EXTRACTED = "llama.cpp binary downloaded and extracted to {0}\nCUDA files extracted to {1}"
- self.CUDA_FILES_EXTRACTED = "CUDA files extracted to"
- self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = "No suitable CUDA backend found for extraction"
- self.ERROR_FETCHING_RELEASES = "Error fetching releases: {0}"
- self.CONFIRM_DELETION_TITLE = "Confirm Deletion"
- self.LOG_FOR = "Log for {0}"
- self.ALL_FILES = "All Files (*)"
- self.GGUF_FILES = "GGUF Files (*.gguf)"
- self.DAT_FILES = "DAT Files (*.dat)"
- self.JSON_FILES = "JSON Files (*.json)"
- self.FAILED_LOAD_PRESET = "Failed to load preset: {0}"
- self.INITIALIZING_AUTOGGUF = "Initializing AutoGGUF application"
- self.AUTOGGUF_INITIALIZATION_COMPLETE = "AutoGGUF initialization complete"
- self.REFRESHING_BACKENDS = "Refreshing backends"
- self.NO_BACKENDS_AVAILABLE = "No backends available"
- self.FOUND_VALID_BACKENDS = "Found {0} valid backends"
- self.SAVING_PRESET = "Saving preset"
- self.PRESET_SAVED_TO = "Preset saved to {0}"
- self.LOADING_PRESET = "Loading preset"
- self.PRESET_LOADED_FROM = "Preset loaded from {0}"
- self.ADDING_KV_OVERRIDE = "Adding KV override: {0}"
- self.SAVING_TASK_PRESET = "Saving task preset for {0}"
- self.TASK_PRESET_SAVED = "Task Preset Saved"
- self.TASK_PRESET_SAVED_TO = "Task preset saved to {0}"
- self.RESTARTING_TASK = "Restarting task: {0}"
- self.IN_PROGRESS = "In Progress"
- self.DOWNLOAD_FINISHED_EXTRACTED_TO = "Download finished. Extracted to: {0}"
- self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "llama.cpp binary downloaded and extracted to {0}\nCUDA files extracted to {1}"
- self.NO_SUITABLE_CUDA_BACKEND_FOUND = "No suitable CUDA backend found for extraction"
- self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = "llama.cpp binary downloaded and extracted to {0}"
- self.REFRESHING_LLAMACPP_RELEASES = "Refreshing llama.cpp releases"
- self.UPDATING_ASSET_LIST = "Updating asset list"
- self.UPDATING_CUDA_OPTIONS = "Updating CUDA options"
- self.STARTING_LLAMACPP_DOWNLOAD = "Starting llama.cpp download"
- self.UPDATING_CUDA_BACKENDS = "Updating CUDA backends"
- self.NO_CUDA_BACKEND_SELECTED = "No CUDA backend selected for extraction"
- self.EXTRACTING_CUDA_FILES = "Extracting CUDA files from {0} to {1}"
- self.DOWNLOAD_ERROR = "Download error: {0}"
- self.SHOWING_TASK_CONTEXT_MENU = "Showing task context menu"
- self.SHOWING_PROPERTIES_FOR_TASK = "Showing properties for task: {0}"
- self.CANCELLING_TASK = "Cancelling task: {0}"
- self.CANCELED = "Cancelled"
- self.DELETING_TASK = "Deleting task: {0}"
- self.LOADING_MODELS = "Loading models"
- self.LOADED_MODELS = "Loaded {0} models"
- self.BROWSING_FOR_MODELS_DIRECTORY = "Browsing for models directory"
- self.SELECT_MODELS_DIRECTORY = "Select Models Directory"
- self.BROWSING_FOR_OUTPUT_DIRECTORY = "Browsing for output directory"
- self.SELECT_OUTPUT_DIRECTORY = "Select Output Directory"
- self.BROWSING_FOR_LOGS_DIRECTORY = "Browsing for logs directory"
- self.SELECT_LOGS_DIRECTORY = "Select Logs Directory"
- self.BROWSING_FOR_IMATRIX_FILE = "Browsing for IMatrix file"
- self.SELECT_IMATRIX_FILE = "Select IMatrix File"
- self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} MB / {2} MB)"
- self.CPU_USAGE_FORMAT = "CPU Usage: {0:.1f}%"
- self.VALIDATING_QUANTIZATION_INPUTS = "Validating quantization inputs"
- self.MODELS_PATH_REQUIRED = "Models path is required"
- self.OUTPUT_PATH_REQUIRED = "Output path is required"
- self.LOGS_PATH_REQUIRED = "Logs path is required"
- self.STARTING_MODEL_QUANTIZATION = "Starting model quantization"
- self.INPUT_FILE_NOT_EXIST = "Input file '{0}' does not exist."
- self.QUANTIZING_MODEL_TO = "Quantizing {0} to {1}"
- self.QUANTIZATION_TASK_STARTED = "Quantization task started for {0}"
- self.ERROR_STARTING_QUANTIZATION = "Error starting quantization: {0}"
- self.UPDATING_MODEL_INFO = "Updating model info: {0}"
- self.TASK_FINISHED = "Task finished: {0}"
- self.SHOWING_TASK_DETAILS_FOR = "Showing task details for: {0}"
- self.BROWSING_FOR_IMATRIX_DATA_FILE = "Browsing for IMatrix data file"
- self.SELECT_DATA_FILE = "Select Data File"
- self.BROWSING_FOR_IMATRIX_MODEL_FILE = "Browsing for IMatrix model file"
- self.SELECT_MODEL_FILE = "Select Model File"
- self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "Browsing for IMatrix output file"
- self.SELECT_OUTPUT_FILE = "Select Output File"
- self.STARTING_IMATRIX_GENERATION = "Starting IMatrix generation"
- self.BACKEND_PATH_NOT_EXIST = "Backend path does not exist: {0}"
- self.GENERATING_IMATRIX = "Generating IMatrix"
- self.ERROR_STARTING_IMATRIX_GENERATION = "Error starting IMatrix generation: {0}"
- self.IMATRIX_GENERATION_TASK_STARTED = "IMatrix generation task started"
- self.ERROR_MESSAGE = "Error: {0}"
- self.TASK_ERROR = "Task error: {0}"
- self.APPLICATION_CLOSING = "Application closing"
- self.APPLICATION_CLOSED = "Application closed"
- self.SELECT_QUANTIZATION_TYPE = "Select the quantization type"
- self.ALLOWS_REQUANTIZING = "Allows requantizing tensors that have already been quantized"
- self.LEAVE_OUTPUT_WEIGHT = "Will leave output.weight un(re)quantized"
- self.DISABLE_K_QUANT_MIXTURES = "Disable k-quant mixtures and quantize all tensors to the same type"
- self.USE_DATA_AS_IMPORTANCE_MATRIX = "Use data in file as importance matrix for quant optimisations"
- self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Use importance matrix for these tensors"
- self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Don't use importance matrix for these tensors"
- self.OUTPUT_TENSOR_TYPE = "Output Tensor Type:"
- self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = "Use this type for the output.weight tensor"
- self.TOKEN_EMBEDDING_TYPE = "Token Embedding Type:"
- self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = "Use this type for the token embeddings tensor"
- self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = "Will generate quantized model in the same shards as input"
- self.OVERRIDE_MODEL_METADATA = "Override model metadata"
- self.INPUT_DATA_FILE_FOR_IMATRIX = "Input data file for IMatrix generation"
- self.MODEL_TO_BE_QUANTIZED = "Model to be quantized"
- self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "Output path for the generated IMatrix"
- self.HOW_OFTEN_TO_SAVE_IMATRIX = "How often to save the IMatrix"
- self.SET_GPU_OFFLOAD_VALUE = "Set GPU offload value (-ngl)"
- self.COMPLETED = "Completed"
- self.REFRESH_MODELS = "Refresh Models"
-
-class _CanadianEnglish(_Localization):
- def __init__(self):
- super().__init__()
- self.WINDOW_TITLE = "AutoGGUF (automated GGUF model quantizer)"
- self.RAM_USAGE = "RAM Usage:"
- self.CPU_USAGE = "CPU Usage:"
- self.BACKEND = "Llama.cpp Backend:"
- self.REFRESH_BACKENDS = "Refresh Backends"
- self.MODELS_PATH = "Models Path:"
- self.OUTPUT_PATH = "Output Path:"
- self.LOGS_PATH = "Logs Path:"
- self.BROWSE = "Browse"
- self.AVAILABLE_MODELS = "Available Models:"
- self.QUANTIZATION_TYPE = "Quantization Type:"
- self.ALLOW_REQUANTIZE = "Allow Requantize"
- self.LEAVE_OUTPUT_TENSOR = "Leave Output Tensor"
- self.PURE = "Pure"
- self.IMATRIX = "IMatrix:"
- self.INCLUDE_WEIGHTS = "Include Weights:"
- self.EXCLUDE_WEIGHTS = "Exclude Weights:"
- self.USE_OUTPUT_TENSOR_TYPE = "Use Output Tensor Type"
- self.USE_TOKEN_EMBEDDING_TYPE = "Use Token Embedding Type"
- self.KEEP_SPLIT = "Keep Split"
- self.KV_OVERRIDES = "KV Overrides:"
- self.ADD_NEW_OVERRIDE = "Add new override"
- self.QUANTIZE_MODEL = "Quantize Model"
- self.SAVE_PRESET = "Save Preset"
- self.LOAD_PRESET = "Load Preset"
- self.TASKS = "Tasks:"
- self.DOWNLOAD_LLAMACPP = "Download llama.cpp"
- self.SELECT_RELEASE = "Select Release:"
- self.SELECT_ASSET = "Select Asset:"
- self.EXTRACT_CUDA_FILES = "Extract CUDA files"
- self.SELECT_CUDA_BACKEND = "Select CUDA Backend:"
- self.DOWNLOAD = "Download"
- self.IMATRIX_GENERATION = "IMatrix Generation"
- self.DATA_FILE = "Data File:"
- self.MODEL = "Model:"
- self.OUTPUT = "Output:"
- self.OUTPUT_FREQUENCY = "Output Frequency:"
- self.GPU_OFFLOAD = "GPU Offload:"
- self.AUTO = "Auto"
- self.GENERATE_IMATRIX = "Generate IMatrix"
- self.ERROR = "Error"
- self.WARNING = "Warning"
- self.PROPERTIES = "Properties"
- self.CANCEL = "Cancel"
- self.RESTART = "Restart"
- self.DELETE = "Delete"
- self.CONFIRM_DELETION = "Are you sure you want to delete this task?"
- self.TASK_RUNNING_WARNING = "Some tasks are still running. Are you sure you want to quit?"
- self.YES = "Yes"
- self.NO = "No"
- self.DOWNLOAD_COMPLETE = "Download Complete"
- self.CUDA_EXTRACTION_FAILED = "CUDA Extraction Failed"
- self.PRESET_SAVED = "Preset Saved"
- self.PRESET_LOADED = "Preset Loaded"
- self.NO_ASSET_SELECTED = "No asset selected"
- self.DOWNLOAD_FAILED = "Download failed"
- self.NO_BACKEND_SELECTED = "No backend selected"
- self.NO_MODEL_SELECTED = "No model selected"
- self.REFRESH_RELEASES = "Refresh Releases"
- self.NO_SUITABLE_CUDA_BACKENDS = "No suitable CUDA backends found"
- self.LLAMACPP_DOWNLOADED_EXTRACTED = "llama.cpp binary downloaded and extracted to {0}\nCUDA files extracted to {1}"
- self.CUDA_FILES_EXTRACTED = "CUDA files extracted to"
- self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = "No suitable CUDA backend found for extraction"
- self.ERROR_FETCHING_RELEASES = "Error fetching releases: {0}"
- self.CONFIRM_DELETION_TITLE = "Confirm Deletion"
- self.LOG_FOR = "Log for {0}"
- self.ALL_FILES = "All Files (*)"
- self.GGUF_FILES = "GGUF Files (*.gguf)"
- self.DAT_FILES = "DAT Files (*.dat)"
- self.JSON_FILES = "JSON Files (*.json)"
- self.FAILED_LOAD_PRESET = "Failed to load preset: {0}"
- self.INITIALIZING_AUTOGGUF = "Initializing AutoGGUF application"
- self.AUTOGGUF_INITIALIZATION_COMPLETE = "AutoGGUF initialization complete"
- self.REFRESHING_BACKENDS = "Refreshing backends"
- self.NO_BACKENDS_AVAILABLE = "No backends available"
- self.FOUND_VALID_BACKENDS = "Found {0} valid backends"
- self.SAVING_PRESET = "Saving preset"
- self.PRESET_SAVED_TO = "Preset saved to {0}"
- self.LOADING_PRESET = "Loading preset"
- self.PRESET_LOADED_FROM = "Preset loaded from {0}"
- self.ADDING_KV_OVERRIDE = "Adding KV override: {0}"
- self.SAVING_TASK_PRESET = "Saving task preset for {0}"
- self.TASK_PRESET_SAVED = "Task Preset Saved"
- self.TASK_PRESET_SAVED_TO = "Task preset saved to {0}"
- self.RESTARTING_TASK = "Restarting task: {0}"
- self.IN_PROGRESS = "In Progress"
- self.DOWNLOAD_FINISHED_EXTRACTED_TO = "Download finished. Extracted to: {0}"
- self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "llama.cpp binary downloaded and extracted to {0}\nCUDA files extracted to {1}"
- self.NO_SUITABLE_CUDA_BACKEND_FOUND = "No suitable CUDA backend found for extraction"
- self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = "llama.cpp binary downloaded and extracted to {0}"
- self.REFRESHING_LLAMACPP_RELEASES = "Refreshing llama.cpp releases"
- self.UPDATING_ASSET_LIST = "Updating asset list"
- self.UPDATING_CUDA_OPTIONS = "Updating CUDA options"
- self.STARTING_LLAMACPP_DOWNLOAD = "Starting llama.cpp download"
- self.UPDATING_CUDA_BACKENDS = "Updating CUDA backends"
- self.NO_CUDA_BACKEND_SELECTED = "No CUDA backend selected for extraction"
- self.EXTRACTING_CUDA_FILES = "Extracting CUDA files from {0} to {1}"
- self.DOWNLOAD_ERROR = "Download error: {0}"
- self.SHOWING_TASK_CONTEXT_MENU = "Showing task context menu"
- self.SHOWING_PROPERTIES_FOR_TASK = "Showing properties for task: {0}"
- self.CANCELLING_TASK = "Cancelling task: {0}"
- self.CANCELED = "Cancelled"
- self.DELETING_TASK = "Deleting task: {0}"
- self.LOADING_MODELS = "Loading models"
- self.LOADED_MODELS = "Loaded {0} models"
- self.BROWSING_FOR_MODELS_DIRECTORY = "Browsing for models directory"
- self.SELECT_MODELS_DIRECTORY = "Select Models Directory"
- self.BROWSING_FOR_OUTPUT_DIRECTORY = "Browsing for output directory"
- self.SELECT_OUTPUT_DIRECTORY = "Select Output Directory"
- self.BROWSING_FOR_LOGS_DIRECTORY = "Browsing for logs directory"
- self.SELECT_LOGS_DIRECTORY = "Select Logs Directory"
- self.BROWSING_FOR_IMATRIX_FILE = "Browsing for IMatrix file"
- self.SELECT_IMATRIX_FILE = "Select IMatrix File"
- self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} MB / {2} MB)"
- self.CPU_USAGE_FORMAT = "CPU Usage: {0:.1f}%"
- self.VALIDATING_QUANTIZATION_INPUTS = "Validating quantization inputs"
- self.MODELS_PATH_REQUIRED = "Models path is required"
- self.OUTPUT_PATH_REQUIRED = "Output path is required"
- self.LOGS_PATH_REQUIRED = "Logs path is required"
- self.STARTING_MODEL_QUANTIZATION = "Starting model quantization"
- self.INPUT_FILE_NOT_EXIST = "Input file '{0}' does not exist."
- self.QUANTIZING_MODEL_TO = "Quantizing {0} to {1}"
- self.QUANTIZATION_TASK_STARTED = "Quantization task started for {0}"
- self.ERROR_STARTING_QUANTIZATION = "Error starting quantization: {0}"
- self.UPDATING_MODEL_INFO = "Updating model info: {0}"
- self.TASK_FINISHED = "Task finished: {0}"
- self.SHOWING_TASK_DETAILS_FOR = "Showing task details for: {0}"
- self.BROWSING_FOR_IMATRIX_DATA_FILE = "Browsing for IMatrix data file"
- self.SELECT_DATA_FILE = "Select Data File"
- self.BROWSING_FOR_IMATRIX_MODEL_FILE = "Browsing for IMatrix model file"
- self.SELECT_MODEL_FILE = "Select Model File"
- self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "Browsing for IMatrix output file"
- self.SELECT_OUTPUT_FILE = "Select Output File"
- self.STARTING_IMATRIX_GENERATION = "Starting IMatrix generation"
- self.BACKEND_PATH_NOT_EXIST = "Backend path does not exist: {0}"
- self.GENERATING_IMATRIX = "Generating IMatrix"
- self.ERROR_STARTING_IMATRIX_GENERATION = "Error starting IMatrix generation: {0}"
- self.IMATRIX_GENERATION_TASK_STARTED = "IMatrix generation task started"
- self.ERROR_MESSAGE = "Error: {0}"
- self.TASK_ERROR = "Task error: {0}"
- self.APPLICATION_CLOSING = "Application closing"
- self.APPLICATION_CLOSED = "Application closed"
- self.SELECT_QUANTIZATION_TYPE = "Select the quantization type"
- self.ALLOWS_REQUANTIZING = "Allows requantizing tensors that have already been quantized"
- self.LEAVE_OUTPUT_WEIGHT = "Will leave output.weight un(re)quantized"
- self.DISABLE_K_QUANT_MIXTURES = "Disable k-quant mixtures and quantize all tensors to the same type"
- self.USE_DATA_AS_IMPORTANCE_MATRIX = "Use data in file as importance matrix for quant optimisations"
- self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Use importance matrix for these tensors"
- self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = "Don't use importance matrix for these tensors"
- self.OUTPUT_TENSOR_TYPE = "Output Tensor Type:"
- self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = "Use this type for the output.weight tensor"
- self.TOKEN_EMBEDDING_TYPE = "Token Embedding Type:"
- self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = "Use this type for the token embeddings tensor"
- self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = "Will generate quantized model in the same shards as input"
- self.OVERRIDE_MODEL_METADATA = "Override model metadata"
- self.INPUT_DATA_FILE_FOR_IMATRIX = "Input data file for IMatrix generation"
- self.MODEL_TO_BE_QUANTIZED = "Model to be quantized"
- self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "Output path for the generated IMatrix"
- self.HOW_OFTEN_TO_SAVE_IMATRIX = "How often to save the IMatrix"
- self.SET_GPU_OFFLOAD_VALUE = "Set GPU offload value (-ngl)"
- self.COMPLETED = "Completed"
- self.REFRESH_MODELS = "Refresh Models"
-
-class _TraditionalChinese(_Localization):
- def __init__(self):
- super().__init__()
- self.WINDOW_TITLE = "AutoGGUF(自動 GGUF 模型量化器)"
- self.RAM_USAGE = "RAM 使用量:"
- self.CPU_USAGE = "CPU 使用率:"
- self.BACKEND = "Llama.cpp 後端:"
- self.REFRESH_BACKENDS = "重新整理後端"
- self.MODELS_PATH = "模型路徑:"
- self.OUTPUT_PATH = "輸出路徑:"
- self.LOGS_PATH = "日誌路徑:"
- self.BROWSE = "瀏覽"
- self.AVAILABLE_MODELS = "可用模型:"
- self.QUANTIZATION_TYPE = "量化類型:"
- self.ALLOW_REQUANTIZE = "允許重新量化"
- self.LEAVE_OUTPUT_TENSOR = "保留輸出張量"
- self.PURE = "純粹"
- self.IMATRIX = "IMatrix:"
- self.INCLUDE_WEIGHTS = "包含權重:"
- self.EXCLUDE_WEIGHTS = "排除權重:"
- self.USE_OUTPUT_TENSOR_TYPE = "使用輸出張量類型"
- self.USE_TOKEN_EMBEDDING_TYPE = "使用權杖嵌入類型"
- self.KEEP_SPLIT = "保持分割"
- self.KV_OVERRIDES = "KV 覆蓋:"
- self.ADD_NEW_OVERRIDE = "新增覆蓋"
- self.QUANTIZE_MODEL = "量化模型"
- self.SAVE_PRESET = "儲存預設"
- self.LOAD_PRESET = "載入預設"
- self.TASKS = "任務:"
- self.DOWNLOAD_LLAMACPP = "下載 llama.cpp"
- self.SELECT_RELEASE = "選擇版本:"
- self.SELECT_ASSET = "選擇資源:"
- self.EXTRACT_CUDA_FILES = "解壓縮 CUDA 檔案"
- self.SELECT_CUDA_BACKEND = "選擇 CUDA 後端:"
- self.DOWNLOAD = "下載"
- self.IMATRIX_GENERATION = "IMatrix 產生"
- self.DATA_FILE = "資料檔案:"
- self.MODEL = "模型:"
- self.OUTPUT = "輸出:"
- self.OUTPUT_FREQUENCY = "輸出頻率:"
- self.GPU_OFFLOAD = "GPU 卸載:"
- self.AUTO = "自動"
- self.GENERATE_IMATRIX = "產生 IMatrix"
- self.ERROR = "錯誤"
- self.WARNING = "警告"
- self.PROPERTIES = "屬性"
- self.CANCEL = "取消"
- self.RESTART = "重新啟動"
- self.DELETE = "刪除"
- self.CONFIRM_DELETION = "您確定要刪除此任務嗎?"
- self.TASK_RUNNING_WARNING = "某些任務仍在執行中。您確定要結束嗎?"
- self.YES = "是"
- self.NO = "否"
- self.DOWNLOAD_COMPLETE = "下載完成"
- self.CUDA_EXTRACTION_FAILED = "CUDA 解壓縮失敗"
- self.PRESET_SAVED = "預設已儲存"
- self.PRESET_LOADED = "預設已載入"
- self.NO_ASSET_SELECTED = "未選擇資源"
- self.DOWNLOAD_FAILED = "下載失敗"
- self.NO_BACKEND_SELECTED = "未選擇後端"
- self.NO_MODEL_SELECTED = "未選擇模型"
- self.REFRESH_RELEASES = "重新整理版本"
- self.NO_SUITABLE_CUDA_BACKENDS = "找不到合適的 CUDA 後端"
- self.LLAMACPP_DOWNLOADED_EXTRACTED = "llama.cpp 二進位檔案已下載並解壓縮至 {0}\nCUDA 檔案已解壓縮至 {1}"
- self.CUDA_FILES_EXTRACTED = "CUDA 檔案已解壓縮至"
- self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = "找不到合適的 CUDA 後端進行解壓縮"
- self.ERROR_FETCHING_RELEASES = "擷取版本時發生錯誤:{0}"
- self.CONFIRM_DELETION_TITLE = "確認刪除"
- self.LOG_FOR = "{0} 的日誌"
- self.ALL_FILES = "所有檔案 (*)"
- self.GGUF_FILES = "GGUF 檔案 (*.gguf)"
- self.DAT_FILES = "DAT 檔案 (*.dat)"
- self.JSON_FILES = "JSON 檔案 (*.json)"
- self.FAILED_LOAD_PRESET = "載入預設失敗:{0}"
- self.INITIALIZING_AUTOGGUF = "正在初始化 AutoGGUF 應用程式"
- self.AUTOGGUF_INITIALIZATION_COMPLETE = "AutoGGUF 初始化完成"
- self.REFRESHING_BACKENDS = "正在重新整理後端"
- self.NO_BACKENDS_AVAILABLE = "沒有可用的後端"
- self.FOUND_VALID_BACKENDS = "找到 {0} 個有效的後端"
- self.SAVING_PRESET = "正在儲存預設"
- self.PRESET_SAVED_TO = "預設已儲存至 {0}"
- self.LOADING_PRESET = "正在載入預設"
- self.PRESET_LOADED_FROM = "從 {0} 載入了預設"
- self.ADDING_KV_OVERRIDE = "正在新增 KV 覆蓋:{0}"
- self.SAVING_TASK_PRESET = "正在儲存 {0} 的任務預設"
- self.TASK_PRESET_SAVED = "任務預設已儲存"
- self.TASK_PRESET_SAVED_TO = "任務預設已儲存至 {0}"
- self.RESTARTING_TASK = "正在重新啟動任務:{0}"
- self.IN_PROGRESS = "處理中"
- self.DOWNLOAD_FINISHED_EXTRACTED_TO = "下載完成。已解壓縮至:{0}"
- self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "llama.cpp 二進位檔案已下載並解壓縮至 {0}\nCUDA 檔案已解壓縮至 {1}"
- self.NO_SUITABLE_CUDA_BACKEND_FOUND = "找不到合適的 CUDA 後端進行解壓縮"
- self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = "llama.cpp 二進位檔案已下載並解壓縮至 {0}"
- self.REFRESHING_LLAMACPP_RELEASES = "正在重新整理 llama.cpp 版本"
- self.UPDATING_ASSET_LIST = "正在更新資源清單"
- self.UPDATING_CUDA_OPTIONS = "正在更新 CUDA 選項"
- self.STARTING_LLAMACPP_DOWNLOAD = "正在開始下載 llama.cpp"
- self.UPDATING_CUDA_BACKENDS = "正在更新 CUDA 後端"
- self.NO_CUDA_BACKEND_SELECTED = "未選擇要解壓縮的 CUDA 後端"
- self.EXTRACTING_CUDA_FILES = "正在從 {0} 解壓縮 CUDA 檔案至 {1}"
- self.DOWNLOAD_ERROR = "下載錯誤:{0}"
- self.SHOWING_TASK_CONTEXT_MENU = "正在顯示任務操作選單"
- self.SHOWING_PROPERTIES_FOR_TASK = "正在顯示任務的屬性:{0}"
- self.CANCELLING_TASK = "正在取消任務:{0}"
- self.CANCELED = "已取消"
- self.DELETING_TASK = "正在刪除任務:{0}"
- self.LOADING_MODELS = "正在載入模型"
- self.LOADED_MODELS = "已載入 {0} 個模型"
- self.BROWSING_FOR_MODELS_DIRECTORY = "正在瀏覽模型目錄"
- self.SELECT_MODELS_DIRECTORY = "選擇模型目錄"
- self.BROWSING_FOR_OUTPUT_DIRECTORY = "正在瀏覽輸出目錄"
- self.SELECT_OUTPUT_DIRECTORY = "選擇輸出目錄"
- self.BROWSING_FOR_LOGS_DIRECTORY = "正在瀏覽日誌目錄"
- self.SELECT_LOGS_DIRECTORY = "選擇日誌目錄"
- self.BROWSING_FOR_IMATRIX_FILE = "正在瀏覽 IMatrix 檔案"
- self.SELECT_IMATRIX_FILE = "選擇 IMatrix 檔案"
- self.RAM_USAGE_FORMAT = "{0:.1f}%({1} MB / {2} MB)"
- self.CPU_USAGE_FORMAT = "CPU 使用率:{0:.1f}%"
- self.VALIDATING_QUANTIZATION_INPUTS = "正在驗證量化輸入"
- self.MODELS_PATH_REQUIRED = "需要模型路徑"
- self.OUTPUT_PATH_REQUIRED = "需要輸出路徑"
- self.LOGS_PATH_REQUIRED = "需要日誌路徑"
- self.STARTING_MODEL_QUANTIZATION = "正在開始模型量化"
- self.INPUT_FILE_NOT_EXIST = "輸入檔案 '{0}' 不存在。"
- self.QUANTIZING_MODEL_TO = "正在將 {0} 量化為 {1}"
- self.QUANTIZATION_TASK_STARTED = "已啟動 {0} 的量化任務"
- self.ERROR_STARTING_QUANTIZATION = "啟動量化時發生錯誤:{0}"
- self.UPDATING_MODEL_INFO = "正在更新模型資訊:{0}"
- self.TASK_FINISHED = "任務完成:{0}"
- self.SHOWING_TASK_DETAILS_FOR = "正在顯示任務詳細資訊:{0}"
- self.BROWSING_FOR_IMATRIX_DATA_FILE = "正在瀏覽 IMatrix 資料檔案"
- self.SELECT_DATA_FILE = "選擇資料檔案"
- self.BROWSING_FOR_IMATRIX_MODEL_FILE = "正在瀏覽 IMatrix 模型檔案"
- self.SELECT_MODEL_FILE = "選擇模型檔案"
- self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "正在瀏覽 IMatrix 輸出檔案"
- self.SELECT_OUTPUT_FILE = "選擇輸出檔案"
- self.STARTING_IMATRIX_GENERATION = "正在開始 IMatrix 產生"
- self.BACKEND_PATH_NOT_EXIST = "後端路徑不存在:{0}"
- self.GENERATING_IMATRIX = "正在產生 IMatrix"
- self.ERROR_STARTING_IMATRIX_GENERATION = "啟動 IMatrix 產生時發生錯誤:{0}"
- self.IMATRIX_GENERATION_TASK_STARTED = "IMatrix 產生任務已啟動"
- self.ERROR_MESSAGE = "錯誤:{0}"
- self.TASK_ERROR = "任務錯誤:{0}"
- self.APPLICATION_CLOSING = "應用程式正在關閉"
- self.APPLICATION_CLOSED = "應用程式已關閉"
- self.SELECT_QUANTIZATION_TYPE = "請選擇量化類型"
- self.ALLOWS_REQUANTIZING = "允許重新量化已量化的張量"
- self.LEAVE_OUTPUT_WEIGHT = "將保留 output.weight 不被(重新)量化"
- self.DISABLE_K_QUANT_MIXTURES = "停用 k-quant 混合並將所有張量量化為相同類型"
- self.USE_DATA_AS_IMPORTANCE_MATRIX = "使用檔案中的資料作為量化最佳化的重要性矩陣"
- self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "對這些張量使用重要性矩陣"
- self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = "不要對這些張量使用重要性矩陣"
- self.OUTPUT_TENSOR_TYPE = "輸出張量類型:"
- self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = "對 output.weight 張量使用此類型"
- self.TOKEN_EMBEDDING_TYPE = "權杖嵌入類型:"
- self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = "對權杖嵌入張量使用此類型"
- self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = "將在與輸入相同的分片中產生量化模型"
- self.OVERRIDE_MODEL_METADATA = "覆蓋模型中繼資料"
- self.INPUT_DATA_FILE_FOR_IMATRIX = "IMatrix 產生的輸入資料檔案"
- self.MODEL_TO_BE_QUANTIZED = "要量化的模型"
- self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "產生的 IMatrix 的輸出路徑"
- self.HOW_OFTEN_TO_SAVE_IMATRIX = "儲存 IMatrix 的頻率"
- self.SET_GPU_OFFLOAD_VALUE = "設定 GPU 卸載值(-ngl)"
- self.COMPLETED = "已完成"
- self.REFRESH_MODELS = "重新整理模型"
-
-# Dictionary to map language codes to classes
-_languages = {
- 'en-US': _English, # American English
- 'fr-FR': _French, # Metropolitan French
- 'zh-CN': _SimplifiedChinese, # Simplified Chinese
- 'es-ES': _Spanish, # Spanish (Spain)
- 'hi-IN': _Hindi, # Hindi (India)
- 'ru-RU': _Russian, # Russian (Russia)
- 'uk-UA': _Ukrainian, # Ukrainian (Ukraine)
- 'ja-JP': _Japanese, # Japanese (Japan)
- 'de-DE': _German, # German (Germany)
- 'pt-BR': _Portuguese, # Portuguese (Brazil)
- 'ar-SA': _Arabic, # Arabic (Saudi Arabia)
- 'ko-KR': _Korean, # Korean (Korea)
- 'it-IT': _Italian, # Italian (Italy)
- 'tr-TR': _Turkish, # Turkish (Turkey)
- 'nl-NL': _Dutch, # Dutch (Netherlands)
- 'fi-FI': _Finnish, # Finnish (Finland)
- 'bn-BD': _Bengali, # Bengali (Bangladesh)
- 'cs-CZ': _Czech, # Czech (Czech Republic)
- 'pl-PL': _Polish, # Polish (Poland)
- 'ro-RO': _Romanian, # Romanian (Romania)
- 'el-GR': _Greek, # Greek (Greece)
- 'pt-PT': _Portuguese_PT, # Portuguese (Portugal)
- 'hu-HU': _Hungarian, # Hungarian (Hungary)
- 'en-GB': _BritishEnglish, # British English
- 'fr-CA': _CanadianFrench, # Canadian French
- 'en-IN': _IndianEnglish, # Indian English
- 'en-CA': _CanadianEnglish, # Canadian English
- 'zh-TW': _TraditionalChinese, # Traditional Chinese (Taiwan)
-}
-
-def set_language(lang_code):
- # Globals
- global WINDOW_TITLE, RAM_USAGE, CPU_USAGE, BACKEND, REFRESH_BACKENDS, MODELS_PATH, OUTPUT_PATH, LOGS_PATH
- global BROWSE, AVAILABLE_MODELS, QUANTIZATION_TYPE, ALLOW_REQUANTIZE, LEAVE_OUTPUT_TENSOR, PURE, IMATRIX
- global INCLUDE_WEIGHTS, EXCLUDE_WEIGHTS, USE_OUTPUT_TENSOR_TYPE, USE_TOKEN_EMBEDDING_TYPE, KEEP_SPLIT
- global KV_OVERRIDES, ADD_NEW_OVERRIDE, QUANTIZE_MODEL, SAVE_PRESET, LOAD_PRESET, TASKS, DOWNLOAD_LLAMACPP
- global SELECT_RELEASE, SELECT_ASSET, EXTRACT_CUDA_FILES, SELECT_CUDA_BACKEND, DOWNLOAD, IMATRIX_GENERATION
- global DATA_FILE, MODEL, OUTPUT, OUTPUT_FREQUENCY, GPU_OFFLOAD, AUTO, GENERATE_IMATRIX, ERROR, WARNING
- global PROPERTIES, CANCEL, RESTART, DELETE, CONFIRM_DELETION, TASK_RUNNING_WARNING, YES, NO, DOWNLOAD_COMPLETE
- global CUDA_EXTRACTION_FAILED, PRESET_SAVED, PRESET_LOADED, NO_ASSET_SELECTED, DOWNLOAD_FAILED, NO_BACKEND_SELECTED
- global NO_MODEL_SELECTED, REFRESH_RELEASES, NO_SUITABLE_CUDA_BACKENDS, LLAMACPP_DOWNLOADED_EXTRACTED, CUDA_FILES_EXTRACTED
- global NO_SUITABLE_CUDA_BACKEND_EXTRACTION, ERROR_FETCHING_RELEASES, CONFIRM_DELETION_TITLE, LOG_FOR, ALL_FILES
- global GGUF_FILES, DAT_FILES, JSON_FILES, FAILED_LOAD_PRESET, INITIALIZING_AUTOGGUF, AUTOGGUF_INITIALIZATION_COMPLETE
- global REFRESHING_BACKENDS, NO_BACKENDS_AVAILABLE, FOUND_VALID_BACKENDS, SAVING_PRESET, PRESET_SAVED_TO, LOADING_PRESET
- global PRESET_LOADED_FROM, ADDING_KV_OVERRIDE, SAVING_TASK_PRESET, TASK_PRESET_SAVED, TASK_PRESET_SAVED_TO, RESTARTING_TASK
- global IN_PROGRESS, DOWNLOAD_FINISHED_EXTRACTED_TO, LLAMACPP_DOWNLOADED_AND_EXTRACTED, NO_SUITABLE_CUDA_BACKEND_FOUND
- global LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED, REFRESHING_LLAMACPP_RELEASES, UPDATING_ASSET_LIST, UPDATING_CUDA_OPTIONS
- global STARTING_LLAMACPP_DOWNLOAD, UPDATING_CUDA_BACKENDS, NO_CUDA_BACKEND_SELECTED, EXTRACTING_CUDA_FILES, DOWNLOAD_ERROR
- global SHOWING_TASK_CONTEXT_MENU, SHOWING_PROPERTIES_FOR_TASK, CANCELLING_TASK, CANCELED, DELETING_TASK, LOADING_MODELS, LOADED_MODELS
- global BROWSING_FOR_MODELS_DIRECTORY, SELECT_MODELS_DIRECTORY, BROWSING_FOR_OUTPUT_DIRECTORY, SELECT_OUTPUT_DIRECTORY
- global BROWSING_FOR_LOGS_DIRECTORY, SELECT_LOGS_DIRECTORY, BROWSING_FOR_IMATRIX_FILE, SELECT_IMATRIX_FILE, RAM_USAGE_FORMAT
- global CPU_USAGE_FORMAT, VALIDATING_QUANTIZATION_INPUTS, MODELS_PATH_REQUIRED, OUTPUT_PATH_REQUIRED, LOGS_PATH_REQUIRED
- global STARTING_MODEL_QUANTIZATION, INPUT_FILE_NOT_EXIST, QUANTIZING_MODEL_TO, QUANTIZATION_TASK_STARTED, ERROR_STARTING_QUANTIZATION
- global UPDATING_MODEL_INFO, TASK_FINISHED, SHOWING_TASK_DETAILS_FOR, BROWSING_FOR_IMATRIX_DATA_FILE, SELECT_DATA_FILE
- global BROWSING_FOR_IMATRIX_MODEL_FILE, SELECT_MODEL_FILE, BROWSING_FOR_IMATRIX_OUTPUT_FILE, SELECT_OUTPUT_FILE
- global STARTING_IMATRIX_GENERATION, BACKEND_PATH_NOT_EXIST, GENERATING_IMATRIX, ERROR_STARTING_IMATRIX_GENERATION
- global IMATRIX_GENERATION_TASK_STARTED, ERROR_MESSAGE, TASK_ERROR, APPLICATION_CLOSING, APPLICATION_CLOSED, SELECT_QUANTIZATION_TYPE
- global ALLOWS_REQUANTIZING, LEAVE_OUTPUT_WEIGHT, DISABLE_K_QUANT_MIXTURES, USE_DATA_AS_IMPORTANCE_MATRIX, USE_IMPORTANCE_MATRIX_FOR_TENSORS
- global DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS, OUTPUT_TENSOR_TYPE, USE_THIS_TYPE_FOR_OUTPUT_WEIGHT, TOKEN_EMBEDDING_TYPE, USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS
- global WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS, OVERRIDE_MODEL_METADATA, INPUT_DATA_FILE_FOR_IMATRIX, MODEL_TO_BE_QUANTIZED
- global OUTPUT_PATH_FOR_GENERATED_IMATRIX, HOW_OFTEN_TO_SAVE_IMATRIX, SET_GPU_OFFLOAD_VALUE, COMPLETED, REFRESH_MODELS
- global CONTEXT_SIZE, CONTEXT_SIZE_FOR_IMATRIX, THREADS, NUMBER_OF_THREADS_FOR_IMATRIX, EXTRA_ARGUMENTS, EXTRA_ARGUMENTS_LABEL
- global LORA_CONVERSION, LORA_INPUT_PATH, LORA_OUTPUT_PATH, SELECT_LORA_INPUT_DIRECTORY, SELECT_LORA_OUTPUT_FILE
- global CONVERT_LORA, STARTING_LORA_CONVERSION, LORA_INPUT_PATH_REQUIRED, LORA_OUTPUT_PATH_REQUIRED, ERROR_STARTING_LORA_CONVERSION
- global LORA_CONVERSION_TASK_STARTED, BIN_FILES, BROWSING_FOR_LORA_INPUT_DIRECTORY, BROWSING_FOR_LORA_OUTPUT_FILE, CONVERTING_LORA
- global LORA_CONVERSION_FINISHED, LORA_FILE_MOVED, LORA_FILE_NOT_FOUND, ERROR_MOVING_LORA_FILE, EXPORT_LORA
- global MODEL_PATH_REQUIRED, AT_LEAST_ONE_LORA_ADAPTER_REQUIRED, INVALID_LORA_SCALE_VALUE, ERROR_STARTING_LORA_EXPORT, LORA_EXPORT_TASK_STARTED
- global GGML_LORA_ADAPTERS, SELECT_LORA_ADAPTER_FILES, ADD_ADAPTER, DELETE_ADAPTER, LORA_SCALE
- global ENTER_LORA_SCALE_VALUE, NUMBER_OF_THREADS_FOR_LORA_EXPORT, EXPORTING_LORA, BROWSING_FOR_EXPORT_LORA_MODEL_FILE, BROWSING_FOR_EXPORT_LORA_OUTPUT_FILE
- global ADDING_LORA_ADAPTER, DELETING_LORA_ADAPTER, LORA_FILES, SELECT_LORA_ADAPTER_FILE, STARTING_LORA_EXPORT
- global OUTPUT_TYPE, SELECT_OUTPUT_TYPE, GGUF_AND_BIN_FILES, BASE_MODEL, SELECT_BASE_MODEL_FILE
- global BASE_MODEL_PATH_REQUIRED, BROWSING_FOR_BASE_MODEL_FILE, SELECT_BASE_MODEL_FOLDER, BROWSING_FOR_BASE_MODEL_FOLDER
- global LORA_CONVERSION_FROM_TO, GENERATING_IMATRIX_FOR, MODEL_PATH_REQUIRED_FOR_IMATRIX, NO_ASSET_SELECTED_FOR_CUDA_CHECK, QUANTIZATION_COMMAND
- global IMATRIX_GENERATION_COMMAND, LORA_CONVERSION_COMMAND, LORA_EXPORT_COMMAND
-
- loc = _languages.get(lang_code, _English)()
- english_loc = _English() # Create an instance of English localization for fallback
-
- for key in dir(english_loc):
- if not key.startswith('_'):
- globals()[key] = getattr(loc, key, getattr(english_loc, key))
-
-# Get the language from the AUTOGGUF_LANGUAGE environment variable, default to 'en'
-language_code = os.getenv('AUTOGGUF_LANGUAGE', 'en-US')
-
-# Set default language
-set_language(language_code)
\ No newline at end of file
+import os
+
+
+class _Localization:
+ def __init__(self):
+ super().__init__()
+ self.WINDOW_TITLE = ""
+ self.RAM_USAGE = ""
+ self.CPU_USAGE = ""
+ self.BACKEND = ""
+ self.REFRESH_BACKENDS = ""
+ self.MODELS_PATH = ""
+ self.OUTPUT_PATH = ""
+ self.LOGS_PATH = ""
+ self.BROWSE = ""
+ self.AVAILABLE_MODELS = ""
+ self.QUANTIZATION_TYPE = ""
+ self.ALLOW_REQUANTIZE = ""
+ self.LEAVE_OUTPUT_TENSOR = ""
+ self.PURE = ""
+ self.IMATRIX = ""
+ self.INCLUDE_WEIGHTS = ""
+ self.EXCLUDE_WEIGHTS = ""
+ self.USE_OUTPUT_TENSOR_TYPE = ""
+ self.USE_TOKEN_EMBEDDING_TYPE = ""
+ self.KEEP_SPLIT = ""
+ self.KV_OVERRIDES = ""
+ self.ADD_NEW_OVERRIDE = ""
+ self.QUANTIZE_MODEL = ""
+ self.SAVE_PRESET = ""
+ self.LOAD_PRESET = ""
+ self.TASKS = ""
+ self.DOWNLOAD_LLAMACPP = ""
+ self.SELECT_RELEASE = ""
+ self.SELECT_ASSET = ""
+ self.EXTRACT_CUDA_FILES = ""
+ self.SELECT_CUDA_BACKEND = ""
+ self.DOWNLOAD = ""
+ self.IMATRIX_GENERATION = ""
+ self.DATA_FILE = ""
+ self.MODEL = ""
+ self.OUTPUT = ""
+ self.OUTPUT_FREQUENCY = ""
+ self.GPU_OFFLOAD = ""
+ self.AUTO = ""
+ self.GENERATE_IMATRIX = ""
+ self.ERROR = ""
+ self.WARNING = ""
+ self.PROPERTIES = ""
+ self.CANCEL = ""
+ self.RESTART = ""
+ self.DELETE = ""
+ self.CONFIRM_DELETION = ""
+ self.TASK_RUNNING_WARNING = ""
+ self.YES = ""
+ self.NO = ""
+ self.DOWNLOAD_COMPLETE = ""
+ self.CUDA_EXTRACTION_FAILED = ""
+ self.PRESET_SAVED = ""
+ self.PRESET_LOADED = ""
+ self.NO_ASSET_SELECTED = ""
+ self.DOWNLOAD_FAILED = ""
+ self.NO_BACKEND_SELECTED = ""
+ self.NO_MODEL_SELECTED = ""
+ self.REFRESH_RELEASES = ""
+ self.NO_SUITABLE_CUDA_BACKENDS = ""
+ self.LLAMACPP_DOWNLOADED_EXTRACTED = ""
+ self.CUDA_FILES_EXTRACTED = ""
+ self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = ""
+ self.ERROR_FETCHING_RELEASES = ""
+ self.CONFIRM_DELETION_TITLE = ""
+ self.LOG_FOR = ""
+ self.ALL_FILES = ""
+ self.GGUF_FILES = ""
+ self.DAT_FILES = ""
+ self.JSON_FILES = ""
+ self.FAILED_LOAD_PRESET = ""
+ self.INITIALIZING_AUTOGGUF = ""
+ self.AUTOGGUF_INITIALIZATION_COMPLETE = ""
+ self.REFRESHING_BACKENDS = ""
+ self.NO_BACKENDS_AVAILABLE = ""
+ self.FOUND_VALID_BACKENDS = ""
+ self.SAVING_PRESET = ""
+ self.PRESET_SAVED_TO = ""
+ self.LOADING_PRESET = ""
+ self.PRESET_LOADED_FROM = ""
+ self.ADDING_KV_OVERRIDE = ""
+ self.SAVING_TASK_PRESET = ""
+ self.TASK_PRESET_SAVED = ""
+ self.TASK_PRESET_SAVED_TO = ""
+ self.RESTARTING_TASK = ""
+ self.IN_PROGRESS = ""
+ self.DOWNLOAD_FINISHED_EXTRACTED_TO = ""
+ self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = ""
+ self.NO_SUITABLE_CUDA_BACKEND_FOUND = ""
+ self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = ""
+ self.REFRESHING_LLAMACPP_RELEASES = ""
+ self.UPDATING_ASSET_LIST = ""
+ self.UPDATING_CUDA_OPTIONS = ""
+ self.STARTING_LLAMACPP_DOWNLOAD = ""
+ self.UPDATING_CUDA_BACKENDS = ""
+ self.NO_CUDA_BACKEND_SELECTED = ""
+ self.EXTRACTING_CUDA_FILES = ""
+ self.DOWNLOAD_ERROR = ""
+ self.SHOWING_TASK_CONTEXT_MENU = ""
+ self.SHOWING_PROPERTIES_FOR_TASK = ""
+ self.CANCELLING_TASK = ""
+ self.CANCELED = ""
+ self.DELETING_TASK = ""
+ self.LOADING_MODELS = ""
+ self.LOADED_MODELS = ""
+ self.BROWSING_FOR_MODELS_DIRECTORY = ""
+ self.SELECT_MODELS_DIRECTORY = ""
+ self.BROWSING_FOR_OUTPUT_DIRECTORY = ""
+ self.SELECT_OUTPUT_DIRECTORY = ""
+ self.BROWSING_FOR_LOGS_DIRECTORY = ""
+ self.SELECT_LOGS_DIRECTORY = ""
+ self.BROWSING_FOR_IMATRIX_FILE = ""
+ self.SELECT_IMATRIX_FILE = ""
+ self.RAM_USAGE_FORMAT = ""
+ self.CPU_USAGE_FORMAT = ""
+ self.VALIDATING_QUANTIZATION_INPUTS = ""
+ self.MODELS_PATH_REQUIRED = ""
+ self.OUTPUT_PATH_REQUIRED = ""
+ self.LOGS_PATH_REQUIRED = ""
+ self.STARTING_MODEL_QUANTIZATION = ""
+ self.INPUT_FILE_NOT_EXIST = ""
+ self.QUANTIZING_MODEL_TO = ""
+ self.QUANTIZATION_TASK_STARTED = ""
+ self.ERROR_STARTING_QUANTIZATION = ""
+ self.UPDATING_MODEL_INFO = ""
+ self.TASK_FINISHED = ""
+ self.SHOWING_TASK_DETAILS_FOR = ""
+ self.BROWSING_FOR_IMATRIX_DATA_FILE = ""
+ self.SELECT_DATA_FILE = ""
+ self.BROWSING_FOR_IMATRIX_MODEL_FILE = ""
+ self.SELECT_MODEL_FILE = ""
+ self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = ""
+ self.SELECT_OUTPUT_FILE = ""
+ self.STARTING_IMATRIX_GENERATION = ""
+ self.BACKEND_PATH_NOT_EXIST = ""
+ self.GENERATING_IMATRIX = ""
+ self.ERROR_STARTING_IMATRIX_GENERATION = ""
+ self.IMATRIX_GENERATION_TASK_STARTED = ""
+ self.ERROR_MESSAGE = ""
+ self.TASK_ERROR = ""
+ self.APPLICATION_CLOSING = ""
+ self.APPLICATION_CLOSED = ""
+ self.SELECT_QUANTIZATION_TYPE = ""
+ self.ALLOWS_REQUANTIZING = ""
+ self.LEAVE_OUTPUT_WEIGHT = ""
+ self.DISABLE_K_QUANT_MIXTURES = ""
+ self.USE_DATA_AS_IMPORTANCE_MATRIX = ""
+ self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = ""
+ self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = ""
+ self.OUTPUT_TENSOR_TYPE = ""
+ self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = ""
+ self.TOKEN_EMBEDDING_TYPE = ""
+ self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = ""
+ self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = ""
+ self.OVERRIDE_MODEL_METADATA = ""
+ self.INPUT_DATA_FILE_FOR_IMATRIX = ""
+ self.MODEL_TO_BE_QUANTIZED = ""
+ self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = ""
+ self.HOW_OFTEN_TO_SAVE_IMATRIX = ""
+ self.SET_GPU_OFFLOAD_VALUE = ""
+ self.COMPLETED = ""
+ self.REFRESH_MODELS = ""
+ self.EXTRA_ARGUMENTS = ""
+ self.EXTRA_ARGUMENTS_LABEL = ""
+ self.CONTEXT_SIZE = ""
+ self.CONTEXT_SIZE_FOR_IMATRIX = ""
+ self.THREADS = ""
+ self.NUMBER_OF_THREADS_FOR_IMATRIX = ""
+ self.LORA_CONVERSION = ""
+ self.LORA_INPUT_PATH = ""
+ self.LORA_OUTPUT_PATH = ""
+ self.SELECT_LORA_INPUT_DIRECTORY = ""
+ self.SELECT_LORA_OUTPUT_FILE = ""
+ self.CONVERT_LORA = ""
+ self.STARTING_LORA_CONVERSION = ""
+ self.LORA_INPUT_PATH_REQUIRED = ""
+ self.LORA_OUTPUT_PATH_REQUIRED = ""
+ self.ERROR_STARTING_LORA_CONVERSION = ""
+ self.LORA_CONVERSION_TASK_STARTED = ""
+ self.BIN_FILES = ""
+ self.BROWSING_FOR_LORA_INPUT_DIRECTORY = ""
+ self.BROWSING_FOR_LORA_OUTPUT_FILE = ""
+ self.CONVERTING_LORA = ""
+ self.LORA_CONVERSION_FINISHED = ""
+ self.LORA_FILE_MOVED = ""
+ self.LORA_FILE_NOT_FOUND = ""
+ self.ERROR_MOVING_LORA_FILE = ""
+ self.EXPORT_LORA = ""
+ self.MODEL_PATH_REQUIRED = ""
+ self.OUTPUT_PATH_REQUIRED = ""
+ self.AT_LEAST_ONE_LORA_ADAPTER_REQUIRED = ""
+ self.INVALID_LORA_SCALE_VALUE = ""
+ self.ERROR_STARTING_LORA_EXPORT = ""
+ self.LORA_EXPORT_TASK_STARTED = ""
+ self.GGML_LORA_ADAPTERS = ""
+ self.SELECT_LORA_ADAPTER_FILES = ""
+ self.ADD_ADAPTER = ""
+ self.DELETE_ADAPTER = ""
+ self.LORA_SCALE = ""
+ self.ENTER_LORA_SCALE_VALUE = ""
+ self.NUMBER_OF_THREADS_FOR_LORA_EXPORT = ""
+ self.EXPORTING_LORA = ""
+ self.BROWSING_FOR_EXPORT_LORA_MODEL_FILE = ""
+ self.BROWSING_FOR_EXPORT_LORA_OUTPUT_FILE = ""
+ self.ADDING_LORA_ADAPTER = ""
+ self.DELETING_LORA_ADAPTER = ""
+ self.LORA_FILES = ""
+ self.SELECT_LORA_ADAPTER_FILE = ""
+ self.STARTING_LORA_EXPORT = ""
+ self.OUTPUT_TYPE = ""
+ self.SELECT_OUTPUT_TYPE = ""
+ self.GGUF_AND_BIN_FILES = ""
+ self.BASE_MODEL = ""
+ self.SELECT_BASE_MODEL_FILE = ""
+ self.BASE_MODEL_PATH_REQUIRED = ""
+ self.BROWSING_FOR_BASE_MODEL_FILE = ""
+ self.SELECT_BASE_MODEL_FOLDER = ""
+ self.BROWSING_FOR_BASE_MODEL_FOLDER = ""
+ self.LORA_CONVERSION_FROM_TO = ""
+ self.GENERATING_IMATRIX_FOR = ""
+ self.MODEL_PATH_REQUIRED_FOR_IMATRIX = ""
+ self.NO_ASSET_SELECTED_FOR_CUDA_CHECK = ""
+ self.QUANTIZATION_COMMAND = ""
+ self.IMATRIX_GENERATION_COMMAND = ""
+ self.LORA_CONVERSION_COMMAND = ""
+ self.LORA_EXPORT_COMMAND = ""
+
+
+class _English(_Localization):
+ def __init__(self):
+ super().__init__()
+ self.WINDOW_TITLE = "AutoGGUF (automated GGUF model quantizer)"
+ self.RAM_USAGE = "RAM Usage:"
+ self.CPU_USAGE = "CPU Usage:"
+ self.BACKEND = "Llama.cpp Backend:"
+ self.REFRESH_BACKENDS = "Refresh Backends"
+ self.MODELS_PATH = "Models Path:"
+ self.OUTPUT_PATH = "Output Path:"
+ self.LOGS_PATH = "Logs Path:"
+ self.BROWSE = "Browse"
+ self.AVAILABLE_MODELS = "Available Models:"
+ self.QUANTIZATION_TYPE = "Quantization Type:"
+ self.ALLOW_REQUANTIZE = "Allow Requantize"
+ self.LEAVE_OUTPUT_TENSOR = "Leave Output Tensor"
+ self.PURE = "Pure"
+ self.IMATRIX = "IMatrix:"
+ self.INCLUDE_WEIGHTS = "Include Weights:"
+ self.EXCLUDE_WEIGHTS = "Exclude Weights:"
+ self.USE_OUTPUT_TENSOR_TYPE = "Use Output Tensor Type"
+ self.USE_TOKEN_EMBEDDING_TYPE = "Use Token Embedding Type"
+ self.KEEP_SPLIT = "Keep Split"
+ self.KV_OVERRIDES = "KV Overrides:"
+ self.ADD_NEW_OVERRIDE = "Add new override"
+ self.QUANTIZE_MODEL = "Quantize Model"
+ self.SAVE_PRESET = "Save Preset"
+ self.LOAD_PRESET = "Load Preset"
+ self.TASKS = "Tasks:"
+ self.DOWNLOAD_LLAMACPP = "Download llama.cpp"
+ self.SELECT_RELEASE = "Select Release:"
+ self.SELECT_ASSET = "Select Asset:"
+ self.EXTRACT_CUDA_FILES = "Extract CUDA files"
+ self.SELECT_CUDA_BACKEND = "Select CUDA Backend:"
+ self.DOWNLOAD = "Download"
+ self.IMATRIX_GENERATION = "IMatrix Generation"
+ self.DATA_FILE = "Data File:"
+ self.MODEL = "Model:"
+ self.OUTPUT = "Output:"
+ self.OUTPUT_FREQUENCY = "Output Frequency:"
+ self.GPU_OFFLOAD = "GPU Offload:"
+ self.AUTO = "Auto"
+ self.GENERATE_IMATRIX = "Generate IMatrix"
+ self.ERROR = "Error"
+ self.WARNING = "Warning"
+ self.PROPERTIES = "Properties"
+ self.CANCEL = "Cancel"
+ self.RESTART = "Restart"
+ self.DELETE = "Delete"
+ self.CONFIRM_DELETION = "Are you sure you want to delete this task?"
+ self.TASK_RUNNING_WARNING = (
+ "Some tasks are still running. Are you sure you want to quit?"
+ )
+ self.YES = "Yes"
+ self.NO = "No"
+ self.DOWNLOAD_COMPLETE = "Download Complete"
+ self.CUDA_EXTRACTION_FAILED = "CUDA Extraction Failed"
+ self.PRESET_SAVED = "Preset Saved"
+ self.PRESET_LOADED = "Preset Loaded"
+ self.NO_ASSET_SELECTED = "No asset selected"
+ self.DOWNLOAD_FAILED = "Download failed"
+ self.NO_BACKEND_SELECTED = "No backend selected"
+ self.NO_MODEL_SELECTED = "No model selected"
+ self.REFRESH_RELEASES = "Refresh Releases"
+ self.NO_SUITABLE_CUDA_BACKENDS = "No suitable CUDA backends found"
+ self.LLAMACPP_DOWNLOADED_EXTRACTED = (
+ "llama.cpp binary downloaded and extracted to {0}"
+ )
+ self.CUDA_FILES_EXTRACTED = "CUDA files extracted to"
+ self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = (
+ "No suitable CUDA backend found for extraction"
+ )
+ self.ERROR_FETCHING_RELEASES = "Error fetching releases: {0}"
+ self.CONFIRM_DELETION_TITLE = "Confirm Deletion"
+ self.LOG_FOR = "Log for {0}"
+ self.ALL_FILES = "All Files (*)"
+ self.GGUF_FILES = "GGUF Files (*.gguf)"
+ self.DAT_FILES = "DAT Files (*.dat)"
+ self.JSON_FILES = "JSON Files (*.json)"
+ self.FAILED_LOAD_PRESET = "Failed to load preset: {0}"
+ self.INITIALIZING_AUTOGGUF = "Initializing AutoGGUF application"
+ self.AUTOGGUF_INITIALIZATION_COMPLETE = "AutoGGUF initialization complete"
+ self.REFRESHING_BACKENDS = "Refreshing backends"
+ self.NO_BACKENDS_AVAILABLE = "No backends available"
+ self.FOUND_VALID_BACKENDS = "Found {0} valid backends"
+ self.SAVING_PRESET = "Saving preset"
+ self.PRESET_SAVED_TO = "Preset saved to {0}"
+ self.LOADING_PRESET = "Loading preset"
+ self.PRESET_LOADED_FROM = "Preset loaded from {0}"
+ self.ADDING_KV_OVERRIDE = "Adding KV override: {0}"
+ self.SAVING_TASK_PRESET = "Saving task preset for {0}"
+ self.TASK_PRESET_SAVED = "Task Preset Saved"
+ self.TASK_PRESET_SAVED_TO = "Task preset saved to {0}"
+ self.RESTARTING_TASK = "Restarting task: {0}"
+ self.IN_PROGRESS = "In Progress"
+ self.DOWNLOAD_FINISHED_EXTRACTED_TO = "Download finished. Extracted to: {0}"
+ self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = (
+ "llama.cpp binary downloaded and extracted to {0}"
+ )
+ self.NO_SUITABLE_CUDA_BACKEND_FOUND = (
+ "No suitable CUDA backend found for extraction"
+ )
+ self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = (
+ "llama.cpp binary downloaded and extracted to {0}"
+ )
+ self.REFRESHING_LLAMACPP_RELEASES = "Refreshing llama.cpp releases"
+ self.UPDATING_ASSET_LIST = "Updating asset list"
+ self.UPDATING_CUDA_OPTIONS = "Updating CUDA options"
+ self.STARTING_LLAMACPP_DOWNLOAD = "Starting llama.cpp download"
+ self.UPDATING_CUDA_BACKENDS = "Updating CUDA backends"
+ self.NO_CUDA_BACKEND_SELECTED = "No CUDA backend selected for extraction"
+ self.EXTRACTING_CUDA_FILES = "Extracting CUDA files from {0} to {1}"
+ self.DOWNLOAD_ERROR = "Download error: {0}"
+ self.SHOWING_TASK_CONTEXT_MENU = "Showing task context menu"
+ self.SHOWING_PROPERTIES_FOR_TASK = "Showing properties for task: {0}"
+ self.CANCELLING_TASK = "Cancelling task: {0}"
+ self.CANCELED = "Canceled"
+ self.DELETING_TASK = "Deleting task: {0}"
+ self.LOADING_MODELS = "Loading models"
+ self.LOADED_MODELS = "Loaded {0} models"
+ self.BROWSING_FOR_MODELS_DIRECTORY = "Browsing for models directory"
+ self.SELECT_MODELS_DIRECTORY = "Select Models Directory"
+ self.BROWSING_FOR_OUTPUT_DIRECTORY = "Browsing for output directory"
+ self.SELECT_OUTPUT_DIRECTORY = "Select Output Directory"
+ self.BROWSING_FOR_LOGS_DIRECTORY = "Browsing for logs directory"
+ self.SELECT_LOGS_DIRECTORY = "Select Logs Directory"
+ self.BROWSING_FOR_IMATRIX_FILE = "Browsing for IMatrix file"
+ self.SELECT_IMATRIX_FILE = "Select IMatrix File"
+ self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} MB / {2} MB)"
+ self.CPU_USAGE_FORMAT = "CPU Usage: {0:.1f}%"
+ self.VALIDATING_QUANTIZATION_INPUTS = "Validating quantization inputs"
+ self.MODELS_PATH_REQUIRED = "Models path is required"
+ self.OUTPUT_PATH_REQUIRED = "Output path is required"
+ self.LOGS_PATH_REQUIRED = "Logs path is required"
+ self.STARTING_MODEL_QUANTIZATION = "Starting model quantization"
+ self.INPUT_FILE_NOT_EXIST = "Input file '{0}' does not exist."
+ self.QUANTIZING_MODEL_TO = "Quantizing {0} to {1}"
+ self.QUANTIZATION_TASK_STARTED = "Quantization task started for {0}"
+ self.ERROR_STARTING_QUANTIZATION = "Error starting quantization: {0}"
+ self.UPDATING_MODEL_INFO = "Updating model info: {0}"
+ self.TASK_FINISHED = "Task finished: {0}"
+ self.SHOWING_TASK_DETAILS_FOR = "Showing task details for: {0}"
+ self.BROWSING_FOR_IMATRIX_DATA_FILE = "Browsing for IMatrix data file"
+ self.SELECT_DATA_FILE = "Select Data File"
+ self.BROWSING_FOR_IMATRIX_MODEL_FILE = "Browsing for IMatrix model file"
+ self.SELECT_MODEL_FILE = "Select Model File"
+ self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "Browsing for IMatrix output file"
+ self.SELECT_OUTPUT_FILE = "Select Output File"
+ self.STARTING_IMATRIX_GENERATION = "Starting IMatrix generation"
+ self.BACKEND_PATH_NOT_EXIST = "Backend path does not exist: {0}"
+ self.GENERATING_IMATRIX = "Generating IMatrix"
+ self.ERROR_STARTING_IMATRIX_GENERATION = (
+ "Error starting IMatrix generation: {0}"
+ )
+ self.IMATRIX_GENERATION_TASK_STARTED = "IMatrix generation task started"
+ self.ERROR_MESSAGE = "Error: {0}"
+ self.TASK_ERROR = "Task error: {0}"
+ self.APPLICATION_CLOSING = "Application closing"
+ self.APPLICATION_CLOSED = "Application closed"
+ self.SELECT_QUANTIZATION_TYPE = "Select the quantization type"
+ self.ALLOWS_REQUANTIZING = (
+ "Allows requantizing tensors that have already been quantized"
+ )
+ self.LEAVE_OUTPUT_WEIGHT = "Will leave output.weight un(re)quantized"
+ self.DISABLE_K_QUANT_MIXTURES = (
+ "Disable k-quant mixtures and quantize all tensors to the same type"
+ )
+ self.USE_DATA_AS_IMPORTANCE_MATRIX = (
+ "Use data in file as importance matrix for quant optimizations"
+ )
+ self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = (
+ "Use importance matrix for these tensors"
+ )
+ self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = (
+ "Don't use importance matrix for these tensors"
+ )
+ self.OUTPUT_TENSOR_TYPE = "Output Tensor Type:"
+ self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = (
+ "Use this type for the output.weight tensor"
+ )
+ self.TOKEN_EMBEDDING_TYPE = "Token Embedding Type:"
+ self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = (
+ "Use this type for the token embeddings tensor"
+ )
+ self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = (
+ "Will generate quantized model in the same shards as input"
+ )
+ self.OVERRIDE_MODEL_METADATA = "Override model metadata"
+ self.INPUT_DATA_FILE_FOR_IMATRIX = "Input data file for IMatrix generation"
+ self.MODEL_TO_BE_QUANTIZED = "Model to be quantized"
+ self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "Output path for the generated IMatrix"
+ self.HOW_OFTEN_TO_SAVE_IMATRIX = "How often to save the IMatrix"
+ self.SET_GPU_OFFLOAD_VALUE = "Set GPU offload value (-ngl)"
+ self.COMPLETED = "Completed"
+ # TODO: Add the following keys to other languages
+ self.REFRESH_MODELS = "Refresh Models"
+ self.EXTRA_ARGUMENTS = "Extra Arguments:"
+ self.EXTRA_ARGUMENTS_LABEL = "Additional command-line arguments"
+ self.CONTEXT_SIZE = "Context Size:"
+ self.CONTEXT_SIZE_FOR_IMATRIX = "Context size for IMatrix generation"
+ self.THREADS = "Threads:"
+ self.NUMBER_OF_THREADS_FOR_IMATRIX = "Number of threads for IMatrix generation"
+ self.LORA_CONVERSION = "LoRA Conversion"
+ self.LORA_INPUT_PATH = "LoRA Input Path"
+ self.LORA_OUTPUT_PATH = "LoRA Output Path"
+ self.SELECT_LORA_INPUT_DIRECTORY = "Select LoRA Input Directory"
+ self.SELECT_LORA_OUTPUT_FILE = "Select LoRA Output File"
+ self.CONVERT_LORA = "Convert LoRA"
+ self.STARTING_LORA_CONVERSION = "Starting LoRA Conversion"
+ self.LORA_INPUT_PATH_REQUIRED = "LoRA input path is required."
+ self.LORA_OUTPUT_PATH_REQUIRED = "LoRA output path is required."
+ self.ERROR_STARTING_LORA_CONVERSION = "Error starting LoRA conversion: {}"
+ self.LORA_CONVERSION_TASK_STARTED = "LoRA conversion task started."
+ self.BIN_FILES = "Binary Files (*.bin)"
+ self.BROWSING_FOR_LORA_INPUT_DIRECTORY = "Browsing for LoRA input directory..."
+ self.BROWSING_FOR_LORA_OUTPUT_FILE = "Browsing for LoRA output file..."
+ self.CONVERTING_LORA = "LoRA Conversion"
+ self.LORA_CONVERSION_FINISHED = "LoRA conversion finished."
+ self.LORA_FILE_MOVED = "LoRA file moved from {} to {}."
+ self.LORA_FILE_NOT_FOUND = "LoRA file not found: {}."
+ self.ERROR_MOVING_LORA_FILE = "Error moving LoRA file: {}"
+ self.EXPORT_LORA = "Export LoRA"
+ self.MODEL_PATH_REQUIRED = "Model path is required."
+ self.OUTPUT_PATH_REQUIRED = "Output path is required."
+ self.AT_LEAST_ONE_LORA_ADAPTER_REQUIRED = (
+ "At least one LoRA adapter is required."
+ )
+ self.INVALID_LORA_SCALE_VALUE = "Invalid LoRA scale value."
+ self.ERROR_STARTING_LORA_EXPORT = "Error starting LoRA export: {}"
+ self.LORA_EXPORT_TASK_STARTED = "LoRA export task started."
+ self.GGML_LORA_ADAPTERS = "GGML LoRA Adapters"
+ self.SELECT_LORA_ADAPTER_FILES = "Select LoRA Adapter Files"
+ self.ADD_ADAPTER = "Add Adapter"
+ self.DELETE_ADAPTER = "Delete"
+ self.LORA_SCALE = "LoRA Scale"
+ self.ENTER_LORA_SCALE_VALUE = "Enter LoRA Scale Value (Optional)"
+ self.NUMBER_OF_THREADS_FOR_LORA_EXPORT = "Number of Threads for LoRA Export"
+ self.EXPORTING_LORA = "Exporting LoRA..."
+ self.BROWSING_FOR_EXPORT_LORA_MODEL_FILE = (
+ "Browsing for Export LoRA Model File..."
+ )
+ self.BROWSING_FOR_EXPORT_LORA_OUTPUT_FILE = (
+ "Browsing for Export LoRA Output File..."
+ )
+ self.ADDING_LORA_ADAPTER = "Adding LoRA Adapter..."
+ self.DELETING_LORA_ADAPTER = "Deleting LoRA Adapter..."
+ self.LORA_FILES = "LoRA Files (*.bin)"
+ self.SELECT_LORA_ADAPTER_FILE = "Select LoRA Adapter File"
+ self.STARTING_LORA_EXPORT = "Starting LoRA export..."
+ self.OUTPUT_TYPE = "Output Type"
+ self.SELECT_OUTPUT_TYPE = "Select Output Type (GGUF or GGML)"
+ self.GGUF_AND_BIN_FILES = "GGUF and Binary Files (*.gguf *.bin)"
+ self.BASE_MODEL = "Base Model"
+ self.SELECT_BASE_MODEL_FILE = "Select Base Model File (GGUF)"
+ self.BASE_MODEL_PATH_REQUIRED = "Base model path is required for GGUF output."
+ self.BROWSING_FOR_BASE_MODEL_FILE = "Browsing for base model file..."
+ self.SELECT_BASE_MODEL_FOLDER = (
+ "Select Base Model Folder (containing safetensors)"
+ )
+ self.BROWSING_FOR_BASE_MODEL_FOLDER = "Browsing for base model folder..."
+ self.LORA_CONVERSION_FROM_TO = "LoRA Conversion from {} to {}"
+ self.GENERATING_IMATRIX_FOR = "Generating IMatrix for {}"
+ self.MODEL_PATH_REQUIRED_FOR_IMATRIX = (
+ "Model path is required for IMatrix generation."
+ )
+ self.NO_ASSET_SELECTED_FOR_CUDA_CHECK = "No asset selected for CUDA check"
+ self.QUANTIZATION_COMMAND = "Quantization command"
+ self.IMATRIX_GENERATION_COMMAND = "IMatrix generation command"
+ self.LORA_CONVERSION_COMMAND = "LoRA conversion command"
+ self.LORA_EXPORT_COMMAND = "LoRA export command"
+
+
+class _French:
+ # French localization
+ def __init__(self):
+ super().__init__()
+ self.WINDOW_TITLE = "AutoGGUF (quantificateur automatisé de modèles GGUF)"
+ self.RAM_USAGE = "Utilisation RAM :"
+ self.CPU_USAGE = "Utilisation CPU :"
+ self.BACKEND = "Backend Llama.cpp :"
+ self.REFRESH_BACKENDS = "Actualiser les Backends"
+ self.MODELS_PATH = "Chemin des Modèles :"
+ self.OUTPUT_PATH = "Chemin de Sortie :"
+ self.LOGS_PATH = "Chemin des Logs :"
+ self.BROWSE = "Parcourir"
+ self.AVAILABLE_MODELS = "Modèles Disponibles :"
+ self.QUANTIZATION_TYPE = "Type de Quantification :"
+ self.ALLOW_REQUANTIZE = "Autoriser la Requantification"
+ self.LEAVE_OUTPUT_TENSOR = "Laisser le Tenseur de Sortie"
+ self.PURE = "Pur"
+ self.IMATRIX = "IMatrix :"
+ self.INCLUDE_WEIGHTS = "Inclure les Poids :"
+ self.EXCLUDE_WEIGHTS = "Exclure les Poids :"
+ self.USE_OUTPUT_TENSOR_TYPE = "Utiliser le Type de Tenseur de Sortie"
+ self.USE_TOKEN_EMBEDDING_TYPE = "Utiliser le Type d'Embedding de Token"
+ self.KEEP_SPLIT = "Garder la Division"
+ self.KV_OVERRIDES = "Remplacements KV :"
+ self.ADD_NEW_OVERRIDE = "Ajouter un nouveau remplacement"
+ self.QUANTIZE_MODEL = "Quantifier le Modèle"
+ self.SAVE_PRESET = "Sauvegarder le Préréglage"
+ self.LOAD_PRESET = "Charger le Préréglage"
+ self.TASKS = "Tâches :"
+ self.DOWNLOAD_LLAMACPP = "Télécharger llama.cpp"
+ self.SELECT_RELEASE = "Sélectionner la Version :"
+ self.SELECT_ASSET = "Sélectionner l'Asset :"
+ self.EXTRACT_CUDA_FILES = "Extraire les fichiers CUDA"
+ self.SELECT_CUDA_BACKEND = "Sélectionner le Backend CUDA :"
+ self.DOWNLOAD = "Télécharger"
+ self.IMATRIX_GENERATION = "Génération IMatrix"
+ self.DATA_FILE = "Fichier de Données :"
+ self.MODEL = "Modèle :"
+ self.OUTPUT = "Sortie :"
+ self.OUTPUT_FREQUENCY = "Fréquence de Sortie :"
+ self.GPU_OFFLOAD = "Déchargement GPU :"
+ self.AUTO = "Auto"
+ self.GENERATE_IMATRIX = "Générer IMatrix"
+ self.ERROR = "Erreur"
+ self.WARNING = "Avertissement"
+ self.PROPERTIES = "Propriétés"
+ self.CANCEL = "Annuler"
+ self.RESTART = "Redémarrer"
+ self.DELETE = "Supprimer"
+ self.CONFIRM_DELETION = "Êtes-vous sûr de vouloir supprimer cette tâche ?"
+ self.TASK_RUNNING_WARNING = (
+ "Certaines tâches sont encore en cours. Êtes-vous sûr de vouloir quitter ?"
+ )
+ self.YES = "Oui"
+ self.NO = "Non"
+ self.DOWNLOAD_COMPLETE = "Téléchargement Terminé"
+ self.CUDA_EXTRACTION_FAILED = "Échec de l'Extraction CUDA"
+ self.PRESET_SAVED = "Préréglage Sauvegardé"
+ self.PRESET_LOADED = "Préréglage Chargé"
+ self.NO_ASSET_SELECTED = "Aucun asset sélectionné"
+ self.DOWNLOAD_FAILED = "Échec du téléchargement"
+ self.NO_BACKEND_SELECTED = "Aucun backend sélectionné"
+ self.NO_MODEL_SELECTED = "Aucun modèle sélectionné"
+ self.REFRESH_RELEASES = "Actualiser les Versions"
+ self.NO_SUITABLE_CUDA_BACKENDS = "Aucun backend CUDA approprié trouvé"
+ self.LLAMACPP_DOWNLOADED_EXTRACTED = "Binaire llama.cpp téléchargé et extrait vers {0}\nFichiers CUDA extraits vers {1}"
+ self.CUDA_FILES_EXTRACTED = "Fichiers CUDA extraits vers"
+ self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = (
+ "Aucun backend CUDA approprié trouvé pour l'extraction"
+ )
+ self.ERROR_FETCHING_RELEASES = (
+ "Erreur lors de la récupération des versions : {0}"
+ )
+ self.CONFIRM_DELETION_TITLE = "Confirmer la Suppression"
+ self.LOG_FOR = "Log pour {0}"
+ self.ALL_FILES = "Tous les Fichiers (*)"
+ self.GGUF_FILES = "Fichiers GGUF (*.gguf)"
+ self.DAT_FILES = "Fichiers DAT (*.dat)"
+ self.JSON_FILES = "Fichiers JSON (*.json)"
+ self.FAILED_LOAD_PRESET = "Échec du chargement du préréglage : {0}"
+ self.INITIALIZING_AUTOGGUF = "Initialisation de l'application AutoGGUF"
+ self.AUTOGGUF_INITIALIZATION_COMPLETE = "Initialisation d'AutoGGUF terminée"
+ self.REFRESHING_BACKENDS = "Actualisation des backends"
+ self.NO_BACKENDS_AVAILABLE = "Aucun backend disponible"
+ self.FOUND_VALID_BACKENDS = "{0} backends valides trouvés"
+ self.SAVING_PRESET = "Sauvegarde du préréglage"
+ self.PRESET_SAVED_TO = "Préréglage sauvegardé dans {0}"
+ self.LOADING_PRESET = "Chargement du préréglage"
+ self.PRESET_LOADED_FROM = "Préréglage chargé depuis {0}"
+ self.ADDING_KV_OVERRIDE = "Ajout du remplacement KV : {0}"
+ self.SAVING_TASK_PRESET = "Sauvegarde du préréglage de tâche pour {0}"
+ self.TASK_PRESET_SAVED = "Préréglage de Tâche Sauvegardé"
+ self.TASK_PRESET_SAVED_TO = "Préréglage de tâche sauvegardé dans {0}"
+ self.RESTARTING_TASK = "Redémarrage de la tâche : {0}"
+ self.IN_PROGRESS = "En Cours"
+ self.DOWNLOAD_FINISHED_EXTRACTED_TO = (
+ "Téléchargement terminé. Extrait vers : {0}"
+ )
+ self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "Binaire llama.cpp téléchargé et extrait vers {0}\nFichiers CUDA extraits vers {1}"
+ self.NO_SUITABLE_CUDA_BACKEND_FOUND = (
+ "Aucun backend CUDA approprié trouvé pour l'extraction"
+ )
+ self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = (
+ "Binaire llama.cpp téléchargé et extrait vers {0}"
+ )
+ self.REFRESHING_LLAMACPP_RELEASES = "Actualisation des versions de llama.cpp"
+ self.UPDATING_ASSET_LIST = "Mise à jour de la liste des assets"
+ self.UPDATING_CUDA_OPTIONS = "Mise à jour des options CUDA"
+ self.STARTING_LLAMACPP_DOWNLOAD = "Démarrage du téléchargement de llama.cpp"
+ self.UPDATING_CUDA_BACKENDS = "Mise à jour des backends CUDA"
+ self.NO_CUDA_BACKEND_SELECTED = (
+ "Aucun backend CUDA sélectionné pour l'extraction"
+ )
+ self.EXTRACTING_CUDA_FILES = "Extraction des fichiers CUDA de {0} vers {1}"
+ self.DOWNLOAD_ERROR = "Erreur de téléchargement : {0}"
+ self.SHOWING_TASK_CONTEXT_MENU = "Affichage du menu contextuel de tâche"
+ self.SHOWING_PROPERTIES_FOR_TASK = (
+ "Affichage des propriétés pour la tâche : {0}"
+ )
+ self.CANCELLING_TASK = "Annulation de la tâche : {0}"
+ self.CANCELED = "Annulé"
+ self.DELETING_TASK = "Suppression de la tâche : {0}"
+ self.LOADING_MODELS = "Chargement des modèles"
+ self.LOADED_MODELS = "{0} modèles chargés"
+ self.BROWSING_FOR_MODELS_DIRECTORY = "Recherche du répertoire des modèles"
+ self.SELECT_MODELS_DIRECTORY = "Sélectionner le Répertoire des Modèles"
+ self.BROWSING_FOR_OUTPUT_DIRECTORY = "Recherche du répertoire de sortie"
+ self.SELECT_OUTPUT_DIRECTORY = "Sélectionner le Répertoire de Sortie"
+ self.BROWSING_FOR_LOGS_DIRECTORY = "Recherche du répertoire des logs"
+ self.SELECT_LOGS_DIRECTORY = "Sélectionner le Répertoire des Logs"
+ self.BROWSING_FOR_IMATRIX_FILE = "Recherche du fichier IMatrix"
+ self.SELECT_IMATRIX_FILE = "Sélectionner le Fichier IMatrix"
+ self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} Mo / {2} Mo)"
+ self.CPU_USAGE_FORMAT = "Utilisation CPU : {0:.1f}%"
+ self.VALIDATING_QUANTIZATION_INPUTS = "Validation des entrées de quantification"
+ self.MODELS_PATH_REQUIRED = "Le chemin des modèles est requis"
+ self.OUTPUT_PATH_REQUIRED = "Le chemin de sortie est requis"
+ self.LOGS_PATH_REQUIRED = "Le chemin des logs est requis"
+ self.STARTING_MODEL_QUANTIZATION = "Démarrage de la quantification du modèle"
+ self.INPUT_FILE_NOT_EXIST = "Le fichier d'entrée '{0}' n'existe pas."
+ self.QUANTIZING_MODEL_TO = "Quantification de {0} vers {1}"
+ self.QUANTIZATION_TASK_STARTED = "Tâche de quantification démarrée pour {0}"
+ self.ERROR_STARTING_QUANTIZATION = (
+ "Erreur au démarrage de la quantification : {0}"
+ )
+ self.UPDATING_MODEL_INFO = "Mise à jour des infos du modèle : {0}"
+ self.TASK_FINISHED = "Tâche terminée : {0}"
+ self.SHOWING_TASK_DETAILS_FOR = "Affichage des détails de la tâche pour : {0}"
+ self.BROWSING_FOR_IMATRIX_DATA_FILE = "Recherche du fichier de données IMatrix"
+ self.SELECT_DATA_FILE = "Sélectionner le Fichier de Données"
+ self.BROWSING_FOR_IMATRIX_MODEL_FILE = "Recherche du fichier modèle IMatrix"
+ self.SELECT_MODEL_FILE = "Sélectionner le Fichier Modèle"
+ self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "Recherche du fichier de sortie IMatrix"
+ self.SELECT_OUTPUT_FILE = "Sélectionner le Fichier de Sortie"
+ self.STARTING_IMATRIX_GENERATION = "Démarrage de la génération IMatrix"
+ self.BACKEND_PATH_NOT_EXIST = "Le chemin du backend n'existe pas : {0}"
+ self.GENERATING_IMATRIX = "Génération de l'IMatrix"
+ self.ERROR_STARTING_IMATRIX_GENERATION = (
+ "Erreur au démarrage de la génération IMatrix : {0}"
+ )
+ self.IMATRIX_GENERATION_TASK_STARTED = "Tâche de génération IMatrix démarrée"
+ self.ERROR_MESSAGE = "Erreur : {0}"
+ self.TASK_ERROR = "Erreur de tâche : {0}"
+ self.APPLICATION_CLOSING = "Fermeture de l'application"
+ self.APPLICATION_CLOSED = "Application fermée"
+ self.SELECT_QUANTIZATION_TYPE = "Sélectionnez le type de quantification"
+ self.ALLOWS_REQUANTIZING = "Permet de requantifier les tenseurs déjà quantifiés"
+ self.LEAVE_OUTPUT_WEIGHT = "Laissera output.weight non (re)quantifié"
+ self.DISABLE_K_QUANT_MIXTURES = (
+ "Désactive les mélanges k-quant et quantifie tous les tenseurs au même type"
+ )
+ self.USE_DATA_AS_IMPORTANCE_MATRIX = "Utilise les données du fichier comme matrice d'importance pour les optimisations de quant"
+ self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = (
+ "Utiliser la matrice d'importance pour ces tenseurs"
+ )
+ self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = (
+ "Ne pas utiliser la matrice d'importance pour ces tenseurs"
+ )
+ self.OUTPUT_TENSOR_TYPE = "Type de Tenseur de Sortie :"
+ self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = (
+ "Utiliser ce type pour le tenseur output.weight"
+ )
+ self.TOKEN_EMBEDDING_TYPE = "Type d'Embedding de Token :"
+ self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = (
+ "Utiliser ce type pour le tenseur des embeddings de token"
+ )
+ self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = (
+ "Générera le modèle quantifié dans les mêmes shards que l'entrée"
+ )
+ self.OVERRIDE_MODEL_METADATA = "Remplacer les métadonnées du modèle"
+ self.INPUT_DATA_FILE_FOR_IMATRIX = (
+ "Fichier de données d'entrée pour la génération IMatrix"
+ )
+ self.MODEL_TO_BE_QUANTIZED = "Modèle à quantifier"
+ self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = (
+ "Chemin de sortie pour l'IMatrix généré"
+ )
+ self.HOW_OFTEN_TO_SAVE_IMATRIX = "Fréquence de sauvegarde de l'IMatrix"
+ self.SET_GPU_OFFLOAD_VALUE = "Définir la valeur de déchargement GPU (-ngl)"
+ self.COMPLETED = "Terminé"
+ self.REFRESH_MODELS = "Actualiser les modèles"
+ self.REFRESH_MODELS = "Actualiser les modèles"
+ self.EXTRA_ARGUMENTS = "Arguments supplémentaires :"
+ self.EXTRA_ARGUMENTS_LABEL = "Arguments supplémentaires en ligne de commande"
+ self.CONTEXT_SIZE = "Taille du contexte :"
+ self.CONTEXT_SIZE_FOR_IMATRIX = (
+ "Taille du contexte pour la génération d'IMatrix"
+ )
+ self.THREADS = "Threads :"
+ self.NUMBER_OF_THREADS_FOR_IMATRIX = (
+ "Nombre de threads pour la génération d'IMatrix"
+ )
+ self.LORA_CONVERSION = "Conversion LoRA"
+ self.LORA_INPUT_PATH = "Chemin d'entrée LoRA"
+ self.LORA_OUTPUT_PATH = "Chemin de sortie LoRA"
+ self.SELECT_LORA_INPUT_DIRECTORY = "Sélectionner le répertoire d'entrée LoRA"
+ self.SELECT_LORA_OUTPUT_FILE = "Sélectionner le fichier de sortie LoRA"
+ self.CONVERT_LORA = "Convertir LoRA"
+ self.STARTING_LORA_CONVERSION = "Démarrage de la conversion LoRA"
+ self.LORA_INPUT_PATH_REQUIRED = "Le chemin d'entrée LoRA est requis."
+ self.LORA_OUTPUT_PATH_REQUIRED = "Le chemin de sortie LoRA est requis."
+ self.ERROR_STARTING_LORA_CONVERSION = (
+ "Erreur lors du démarrage de la conversion LoRA : {}"
+ )
+ self.LORA_CONVERSION_TASK_STARTED = "Tâche de conversion LoRA démarrée."
+ self.BIN_FILES = "Fichiers binaires (*.bin)"
+ self.BROWSING_FOR_LORA_INPUT_DIRECTORY = (
+ "Recherche du répertoire d'entrée LoRA..."
+ )
+ self.BROWSING_FOR_LORA_OUTPUT_FILE = "Recherche du fichier de sortie LoRA..."
+ self.CONVERTING_LORA = "Conversion LoRA"
+ self.LORA_CONVERSION_FINISHED = "Conversion LoRA terminée."
+ self.LORA_FILE_MOVED = "Fichier LoRA déplacé de {} à {}."
+ self.LORA_FILE_NOT_FOUND = "Fichier LoRA non trouvé : {}."
+ self.ERROR_MOVING_LORA_FILE = "Erreur lors du déplacement du fichier LoRA : {}"
+ self.EXPORT_LORA = "Exporter LoRA"
+ self.MODEL_PATH_REQUIRED = "Le chemin du modèle est requis."
+ self.OUTPUT_PATH_REQUIRED = "Le chemin de sortie est requis."
+ self.AT_LEAST_ONE_LORA_ADAPTER_REQUIRED = (
+ "Au moins un adaptateur LoRA est requis."
+ )
+ self.INVALID_LORA_SCALE_VALUE = "Valeur d'échelle LoRA invalide."
+ self.ERROR_STARTING_LORA_EXPORT = (
+ "Erreur lors du démarrage de l'exportation LoRA : {}"
+ )
+ self.LORA_EXPORT_TASK_STARTED = "Tâche d'exportation LoRA démarrée."
+ self.GGML_LORA_ADAPTERS = "Adaptateurs LoRA GGML"
+ self.SELECT_LORA_ADAPTER_FILES = "Sélectionner les fichiers d'adaptateur LoRA"
+ self.ADD_ADAPTER = "Ajouter un adaptateur"
+ self.DELETE_ADAPTER = "Supprimer"
+ self.LORA_SCALE = "Échelle LoRA"
+ self.ENTER_LORA_SCALE_VALUE = "Entrez la valeur d'échelle LoRA (Optionnel)"
+ self.NUMBER_OF_THREADS_FOR_LORA_EXPORT = (
+ "Nombre de threads pour l'exportation LoRA"
+ )
+ self.EXPORTING_LORA = "Exportation de LoRA..."
+ self.BROWSING_FOR_EXPORT_LORA_MODEL_FILE = (
+ "Recherche du fichier de modèle LoRA à exporter..."
+ )
+ self.BROWSING_FOR_EXPORT_LORA_OUTPUT_FILE = (
+ "Recherche du fichier de sortie LoRA à exporter..."
+ )
+ self.ADDING_LORA_ADAPTER = "Ajout d'un adaptateur LoRA..."
+ self.DELETING_LORA_ADAPTER = "Suppression de l'adaptateur LoRA..."
+ self.LORA_FILES = "Fichiers LoRA (*.bin)"
+ self.SELECT_LORA_ADAPTER_FILE = "Sélectionner le fichier d'adaptateur LoRA"
+ self.STARTING_LORA_EXPORT = "Démarrage de l'exportation LoRA..."
+ self.OUTPUT_TYPE = "Type de sortie"
+ self.SELECT_OUTPUT_TYPE = "Sélectionner le type de sortie (GGUF ou GGML)"
+ self.GGUF_AND_BIN_FILES = "Fichiers GGUF et binaires (*.gguf *.bin)"
+ self.BASE_MODEL = "Modèle de base"
+ self.SELECT_BASE_MODEL_FILE = "Sélectionner le fichier du modèle de base (GGUF)"
+ self.BASE_MODEL_PATH_REQUIRED = (
+ "Le chemin du modèle de base est requis pour la sortie GGUF."
+ )
+ self.BROWSING_FOR_BASE_MODEL_FILE = "Recherche du fichier du modèle de base..."
+ self.SELECT_BASE_MODEL_FOLDER = (
+ "Sélectionner le dossier du modèle de base (contenant safetensors)"
+ )
+ self.BROWSING_FOR_BASE_MODEL_FOLDER = (
+ "Recherche du dossier du modèle de base..."
+ )
+ self.LORA_CONVERSION_FROM_TO = "Conversion LoRA de {} à {}"
+ self.GENERATING_IMATRIX_FOR = "Génération d'IMatrix pour {}"
+ self.MODEL_PATH_REQUIRED_FOR_IMATRIX = (
+ "Le chemin du modèle est requis pour la génération d'IMatrix."
+ )
+
+
+class _SimplifiedChinese(_Localization):
+ def __init__(self):
+ super().__init__()
+ self.WINDOW_TITLE = "AutoGGUF(自动GGUF模型量化器)"
+ self.RAM_USAGE = "内存使用率:"
+ self.CPU_USAGE = "CPU使用率:"
+ self.BACKEND = "Llama.cpp后端:"
+ self.REFRESH_BACKENDS = "刷新后端"
+ self.MODELS_PATH = "模型路径:"
+ self.OUTPUT_PATH = "输出路径:"
+ self.LOGS_PATH = "日志路径:"
+ self.BROWSE = "浏览"
+ self.AVAILABLE_MODELS = "可用模型:"
+ self.QUANTIZATION_TYPE = "量化类型:"
+ self.ALLOW_REQUANTIZE = "允许重新量化"
+ self.LEAVE_OUTPUT_TENSOR = "保留输出张量"
+ self.PURE = "纯净"
+ self.IMATRIX = "IMatrix:"
+ self.INCLUDE_WEIGHTS = "包含权重:"
+ self.EXCLUDE_WEIGHTS = "排除权重:"
+ self.USE_OUTPUT_TENSOR_TYPE = "使用输出张量类型"
+ self.USE_TOKEN_EMBEDDING_TYPE = "使用令牌嵌入类型"
+ self.KEEP_SPLIT = "保持分割"
+ self.KV_OVERRIDES = "KV覆盖:"
+ self.ADD_NEW_OVERRIDE = "添加新覆盖"
+ self.QUANTIZE_MODEL = "量化模型"
+ self.SAVE_PRESET = "保存预设"
+ self.LOAD_PRESET = "加载预设"
+ self.TASKS = "任务:"
+ self.DOWNLOAD_LLAMACPP = "下载llama.cpp"
+ self.SELECT_RELEASE = "选择发布版本:"
+ self.SELECT_ASSET = "选择资源:"
+ self.EXTRACT_CUDA_FILES = "提取CUDA文件"
+ self.SELECT_CUDA_BACKEND = "选择CUDA后端:"
+ self.DOWNLOAD = "下载"
+ self.IMATRIX_GENERATION = "IMatrix生成"
+ self.DATA_FILE = "数据文件:"
+ self.MODEL = "模型:"
+ self.OUTPUT = "输出:"
+ self.OUTPUT_FREQUENCY = "输出频率:"
+ self.GPU_OFFLOAD = "GPU卸载:"
+ self.AUTO = "自动"
+ self.GENERATE_IMATRIX = "生成IMatrix"
+ self.ERROR = "错误"
+ self.WARNING = "警告"
+ self.PROPERTIES = "属性"
+ self.CANCEL = "取消"
+ self.RESTART = "重启"
+ self.DELETE = "删除"
+ self.CONFIRM_DELETION = "您确定要删除此任务吗?"
+ self.TASK_RUNNING_WARNING = "某些任务仍在运行。您确定要退出吗?"
+ self.YES = "是"
+ self.NO = "否"
+ self.DOWNLOAD_COMPLETE = "下载完成"
+ self.CUDA_EXTRACTION_FAILED = "CUDA提取失败"
+ self.PRESET_SAVED = "预设已保存"
+ self.PRESET_LOADED = "预设已加载"
+ self.NO_ASSET_SELECTED = "未选择资源"
+ self.DOWNLOAD_FAILED = "下载失败"
+ self.NO_BACKEND_SELECTED = "未选择后端"
+ self.NO_MODEL_SELECTED = "未选择模型"
+ self.REFRESH_RELEASES = "刷新发布版本"
+ self.NO_SUITABLE_CUDA_BACKENDS = "未找到合适的CUDA后端"
+ self.LLAMACPP_DOWNLOADED_EXTRACTED = (
+ "llama.cpp二进制文件已下载并提取到{0}\nCUDA文件已提取到{1}"
+ )
+ self.CUDA_FILES_EXTRACTED = "CUDA文件已提取到"
+ self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = "未找到适合提取的CUDA后端"
+ self.ERROR_FETCHING_RELEASES = "获取发布版本时出错:{0}"
+ self.CONFIRM_DELETION_TITLE = "确认删除"
+ self.LOG_FOR = "{0}的日志"
+ self.ALL_FILES = "所有文件 (*)"
+ self.GGUF_FILES = "GGUF文件 (*.gguf)"
+ self.DAT_FILES = "DAT文件 (*.dat)"
+ self.JSON_FILES = "JSON文件 (*.json)"
+ self.FAILED_LOAD_PRESET = "加载预设失败:{0}"
+ self.INITIALIZING_AUTOGGUF = "初始化AutoGGUF应用程序"
+ self.AUTOGGUF_INITIALIZATION_COMPLETE = "AutoGGUF初始化完成"
+ self.REFRESHING_BACKENDS = "刷新后端"
+ self.NO_BACKENDS_AVAILABLE = "没有可用的后端"
+ self.FOUND_VALID_BACKENDS = "找到{0}个有效后端"
+ self.SAVING_PRESET = "保存预设"
+ self.PRESET_SAVED_TO = "预设已保存到{0}"
+ self.LOADING_PRESET = "加载预设"
+ self.PRESET_LOADED_FROM = "从{0}加载了预设"
+ self.ADDING_KV_OVERRIDE = "添加KV覆盖:{0}"
+ self.SAVING_TASK_PRESET = "保存{0}的任务预设"
+ self.TASK_PRESET_SAVED = "任务预设已保存"
+ self.TASK_PRESET_SAVED_TO = "任务预设已保存到{0}"
+ self.RESTARTING_TASK = "重启任务:{0}"
+ self.IN_PROGRESS = "进行中"
+ self.DOWNLOAD_FINISHED_EXTRACTED_TO = "下载完成。已提取到:{0}"
+ self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = (
+ "llama.cpp二进制文件已下载并提取到{0}\nCUDA文件已提取到{1}"
+ )
+ self.NO_SUITABLE_CUDA_BACKEND_FOUND = "未找到适合提取的CUDA后端"
+ self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = (
+ "llama.cpp二进制文件已下载并提取到{0}"
+ )
+ self.REFRESHING_LLAMACPP_RELEASES = "刷新llama.cpp发布版本"
+ self.UPDATING_ASSET_LIST = "更新资源列表"
+ self.UPDATING_CUDA_OPTIONS = "更新CUDA选项"
+ self.STARTING_LLAMACPP_DOWNLOAD = "开始下载llama.cpp"
+ self.UPDATING_CUDA_BACKENDS = "更新CUDA后端"
+ self.NO_CUDA_BACKEND_SELECTED = "未选择要提取的CUDA后端"
+ self.EXTRACTING_CUDA_FILES = "从{0}提取CUDA文件到{1}"
+ self.DOWNLOAD_ERROR = "下载错误:{0}"
+ self.SHOWING_TASK_CONTEXT_MENU = "显示任务上下文菜单"
+ self.SHOWING_PROPERTIES_FOR_TASK = "显示任务属性:{0}"
+ self.CANCELLING_TASK = "取消任务:{0}"
+ self.CANCELED = "已取消"
+ self.DELETING_TASK = "删除任务:{0}"
+ self.LOADING_MODELS = "加载模型"
+ self.LOADED_MODELS = "已加载{0}个模型"
+ self.BROWSING_FOR_MODELS_DIRECTORY = "浏览模型目录"
+ self.SELECT_MODELS_DIRECTORY = "选择模型目录"
+ self.BROWSING_FOR_OUTPUT_DIRECTORY = "浏览输出目录"
+ self.SELECT_OUTPUT_DIRECTORY = "选择输出目录"
+ self.BROWSING_FOR_LOGS_DIRECTORY = "浏览日志目录"
+ self.SELECT_LOGS_DIRECTORY = "选择日志目录"
+ self.BROWSING_FOR_IMATRIX_FILE = "浏览IMatrix文件"
+ self.SELECT_IMATRIX_FILE = "选择IMatrix文件"
+ self.RAM_USAGE_FORMAT = "{0:.1f}%({1} MB / {2} MB)"
+ self.CPU_USAGE_FORMAT = "CPU使用率:{0:.1f}%"
+ self.VALIDATING_QUANTIZATION_INPUTS = "验证量化输入"
+ self.MODELS_PATH_REQUIRED = "需要模型路径"
+ self.OUTPUT_PATH_REQUIRED = "需要输出路径"
+ self.LOGS_PATH_REQUIRED = "需要日志路径"
+ self.STARTING_MODEL_QUANTIZATION = "开始模型量化"
+ self.INPUT_FILE_NOT_EXIST = "输入文件'{0}'不存在。"
+ self.QUANTIZING_MODEL_TO = "将{0}量化为{1}"
+ self.QUANTIZATION_TASK_STARTED = "已启动{0}的量化任务"
+ self.ERROR_STARTING_QUANTIZATION = "启动量化时出错:{0}"
+ self.UPDATING_MODEL_INFO = "更新模型信息:{0}"
+ self.TASK_FINISHED = "任务完成:{0}"
+ self.SHOWING_TASK_DETAILS_FOR = "显示任务详情:{0}"
+ self.BROWSING_FOR_IMATRIX_DATA_FILE = "浏览IMatrix数据文件"
+ self.SELECT_DATA_FILE = "选择数据文件"
+ self.BROWSING_FOR_IMATRIX_MODEL_FILE = "浏览IMatrix模型文件"
+ self.SELECT_MODEL_FILE = "选择模型文件"
+ self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "浏览IMatrix输出文件"
+ self.SELECT_OUTPUT_FILE = "选择输出文件"
+ self.STARTING_IMATRIX_GENERATION = "开始IMatrix生成"
+ self.BACKEND_PATH_NOT_EXIST = "后端路径不存在:{0}"
+ self.GENERATING_IMATRIX = "生成IMatrix"
+ self.ERROR_STARTING_IMATRIX_GENERATION = "启动IMatrix生成时出错:{0}"
+ self.IMATRIX_GENERATION_TASK_STARTED = "IMatrix生成任务已启动"
+ self.ERROR_MESSAGE = "错误:{0}"
+ self.TASK_ERROR = "任务错误:{0}"
+ self.APPLICATION_CLOSING = "应用程序正在关闭"
+ self.APPLICATION_CLOSED = "应用程序已关闭"
+ self.SELECT_QUANTIZATION_TYPE = "选择量化类型"
+ self.ALLOWS_REQUANTIZING = "允许重新量化已经量化的张量"
+ self.LEAVE_OUTPUT_WEIGHT = "将保留output.weight不被(重新)量化"
+ self.DISABLE_K_QUANT_MIXTURES = "禁用k-quant混合并将所有张量量化为相同类型"
+ self.USE_DATA_AS_IMPORTANCE_MATRIX = "使用文件中的数据作为量化优化的重要性矩阵"
+ self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "对这些张量使用重要性矩阵"
+ self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = "不对这些张量使用重要性矩阵"
+ self.OUTPUT_TENSOR_TYPE = "输出张量类型:"
+ self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = "对output.weight张量使用此类型"
+ self.TOKEN_EMBEDDING_TYPE = "令牌嵌入类型:"
+ self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = "对令牌嵌入张量使用此类型"
+ self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = (
+ "将在与输入相同的分片中生成量化模型"
+ )
+ self.OVERRIDE_MODEL_METADATA = "覆盖模型元数据"
+ self.INPUT_DATA_FILE_FOR_IMATRIX = "IMatrix生成的输入数据文件"
+ self.MODEL_TO_BE_QUANTIZED = "要量化的模型"
+ self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "生成的IMatrix的输出路径"
+ self.HOW_OFTEN_TO_SAVE_IMATRIX = "保存IMatrix的频率"
+ self.SET_GPU_OFFLOAD_VALUE = "设置GPU卸载值(-ngl)"
+ self.COMPLETED = "已完成"
+ self.REFRESH_MODELS = "刷新模型"
+ self.REFRESH_MODELS = "刷新模型"
+ self.EXTRA_ARGUMENTS = "额外参数:"
+ self.EXTRA_ARGUMENTS_LABEL = "附加命令行参数"
+ self.CONTEXT_SIZE = "上下文大小:"
+ self.CONTEXT_SIZE_FOR_IMATRIX = "IMatrix生成的上下文大小"
+ self.THREADS = "线程数:"
+ self.NUMBER_OF_THREADS_FOR_IMATRIX = "IMatrix生成的线程数"
+ self.LORA_CONVERSION = "LoRA转换"
+ self.LORA_INPUT_PATH = "LoRA输入路径"
+ self.LORA_OUTPUT_PATH = "LoRA输出路径"
+ self.SELECT_LORA_INPUT_DIRECTORY = "选择LoRA输入目录"
+ self.SELECT_LORA_OUTPUT_FILE = "选择LoRA输出文件"
+ self.CONVERT_LORA = "转换LoRA"
+ self.STARTING_LORA_CONVERSION = "开始LoRA转换"
+ self.LORA_INPUT_PATH_REQUIRED = "需要LoRA输入路径。"
+ self.LORA_OUTPUT_PATH_REQUIRED = "需要LoRA输出路径。"
+ self.ERROR_STARTING_LORA_CONVERSION = "启动LoRA转换时出错:{}"
+ self.LORA_CONVERSION_TASK_STARTED = "LoRA转换任务已启动。"
+ self.BIN_FILES = "二进制文件 (*.bin)"
+ self.BROWSING_FOR_LORA_INPUT_DIRECTORY = "正在浏览LoRA输入目录..."
+ self.BROWSING_FOR_LORA_OUTPUT_FILE = "正在浏览LoRA输出文件..."
+ self.CONVERTING_LORA = "LoRA转换"
+ self.LORA_CONVERSION_FINISHED = "LoRA转换完成。"
+ self.LORA_FILE_MOVED = "LoRA文件已从{}移动到{}。"
+ self.LORA_FILE_NOT_FOUND = "未找到LoRA文件:{}。"
+ self.ERROR_MOVING_LORA_FILE = "移动LoRA文件时出错:{}"
+ self.EXPORT_LORA = "导出LoRA"
+ self.MODEL_PATH_REQUIRED = "需要模型路径。"
+ self.OUTPUT_PATH_REQUIRED = "需要输出路径。"
+ self.AT_LEAST_ONE_LORA_ADAPTER_REQUIRED = "至少需要一个LoRA适配器。"
+ self.INVALID_LORA_SCALE_VALUE = "无效的LoRA比例值。"
+ self.ERROR_STARTING_LORA_EXPORT = "启动LoRA导出时出错:{}"
+ self.LORA_EXPORT_TASK_STARTED = "LoRA导出任务已启动。"
+ self.GGML_LORA_ADAPTERS = "GGML LoRA适配器"
+ self.SELECT_LORA_ADAPTER_FILES = "选择LoRA适配器文件"
+ self.ADD_ADAPTER = "添加适配器"
+ self.DELETE_ADAPTER = "删除"
+ self.LORA_SCALE = "LoRA比例"
+ self.ENTER_LORA_SCALE_VALUE = "输入LoRA比例值(可选)"
+ self.NUMBER_OF_THREADS_FOR_LORA_EXPORT = "LoRA导出的线程数"
+ self.EXPORTING_LORA = "正在导出LoRA..."
+ self.BROWSING_FOR_EXPORT_LORA_MODEL_FILE = "正在浏览导出LoRA模型文件..."
+ self.BROWSING_FOR_EXPORT_LORA_OUTPUT_FILE = "正在浏览导出LoRA输出文件..."
+ self.ADDING_LORA_ADAPTER = "正在添加LoRA适配器..."
+ self.DELETING_LORA_ADAPTER = "正在删除LoRA适配器..."
+ self.LORA_FILES = "LoRA文件 (*.bin)"
+ self.SELECT_LORA_ADAPTER_FILE = "选择LoRA适配器文件"
+ self.STARTING_LORA_EXPORT = "开始LoRA导出..."
+ self.OUTPUT_TYPE = "输出类型"
+ self.SELECT_OUTPUT_TYPE = "选择输出类型(GGUF或GGML)"
+ self.GGUF_AND_BIN_FILES = "GGUF和二进制文件 (*.gguf *.bin)"
+ self.BASE_MODEL = "基础模型"
+ self.SELECT_BASE_MODEL_FILE = "选择基础模型文件(GGUF)"
+ self.BASE_MODEL_PATH_REQUIRED = "GGUF输出需要基础模型路径。"
+ self.BROWSING_FOR_BASE_MODEL_FILE = "正在浏览基础模型文件..."
+ self.SELECT_BASE_MODEL_FOLDER = "选择基础模型文件夹(包含safetensors)"
+ self.BROWSING_FOR_BASE_MODEL_FOLDER = "正在浏览基础模型文件夹..."
+ self.LORA_CONVERSION_FROM_TO = "LoRA从{}转换到{}"
+ self.GENERATING_IMATRIX_FOR = "正在为{}生成IMatrix"
+ self.MODEL_PATH_REQUIRED_FOR_IMATRIX = "IMatrix生成需要模型路径。"
+
+
+class _Spanish(_Localization):
+ def __init__(self):
+ super().__init__()
+ self.WINDOW_TITLE = "AutoGGUF (cuantizador automático de modelos GGUF)"
+ self.RAM_USAGE = "Uso de RAM:"
+ self.CPU_USAGE = "Uso de CPU:"
+ self.BACKEND = "Backend de Llama.cpp:"
+ self.REFRESH_BACKENDS = "Actualizar Backends"
+ self.MODELS_PATH = "Ruta de Modelos:"
+ self.OUTPUT_PATH = "Ruta de Salida:"
+ self.LOGS_PATH = "Ruta de Registros:"
+ self.BROWSE = "Explorar"
+ self.AVAILABLE_MODELS = "Modelos Disponibles:"
+ self.QUANTIZATION_TYPE = "Tipo de Cuantización:"
+ self.ALLOW_REQUANTIZE = "Permitir Recuantización"
+ self.LEAVE_OUTPUT_TENSOR = "Dejar Tensor de Salida"
+ self.PURE = "Puro"
+ self.IMATRIX = "IMatrix:"
+ self.INCLUDE_WEIGHTS = "Incluir Pesos:"
+ self.EXCLUDE_WEIGHTS = "Excluir Pesos:"
+ self.USE_OUTPUT_TENSOR_TYPE = "Usar Tipo de Tensor de Salida"
+ self.USE_TOKEN_EMBEDDING_TYPE = "Usar Tipo de Incrustación de Token"
+ self.KEEP_SPLIT = "Mantener División"
+ self.KV_OVERRIDES = "Anulaciones KV:"
+ self.ADD_NEW_OVERRIDE = "Agregar nueva anulación"
+ self.QUANTIZE_MODEL = "Cuantizar Modelo"
+ self.SAVE_PRESET = "Guardar Preajuste"
+ self.LOAD_PRESET = "Cargar Preajuste"
+ self.TASKS = "Tareas:"
+ self.DOWNLOAD_LLAMACPP = "Descargar llama.cpp"
+ self.SELECT_RELEASE = "Seleccionar Versión:"
+ self.SELECT_ASSET = "Seleccionar Activo:"
+ self.EXTRACT_CUDA_FILES = "Extraer archivos CUDA"
+ self.SELECT_CUDA_BACKEND = "Seleccionar Backend CUDA:"
+ self.DOWNLOAD = "Descargar"
+ self.IMATRIX_GENERATION = "Generación de IMatrix"
+ self.DATA_FILE = "Archivo de Datos:"
+ self.MODEL = "Modelo:"
+ self.OUTPUT = "Salida:"
+ self.OUTPUT_FREQUENCY = "Frecuencia de Salida:"
+ self.GPU_OFFLOAD = "Descarga GPU:"
+ self.AUTO = "Auto"
+ self.GENERATE_IMATRIX = "Generar IMatrix"
+ self.ERROR = "Error"
+ self.WARNING = "Advertencia"
+ self.PROPERTIES = "Propiedades"
+ self.CANCEL = "Cancelar"
+ self.RESTART = "Reiniciar"
+ self.DELETE = "Eliminar"
+ self.CONFIRM_DELETION = "¿Estás seguro de que quieres eliminar esta tarea?"
+ self.TASK_RUNNING_WARNING = "Algunas tareas aún se están ejecutando. ¿Estás seguro de que quieres salir?"
+ self.YES = "Sí"
+ self.NO = "No"
+ self.DOWNLOAD_COMPLETE = "Descarga Completa"
+ self.CUDA_EXTRACTION_FAILED = "Extracción de CUDA Fallida"
+ self.PRESET_SAVED = "Preajuste Guardado"
+ self.PRESET_LOADED = "Preajuste Cargado"
+ self.NO_ASSET_SELECTED = "Ningún activo seleccionado"
+ self.DOWNLOAD_FAILED = "Descarga fallida"
+ self.NO_BACKEND_SELECTED = "Ningún backend seleccionado"
+ self.NO_MODEL_SELECTED = "Ningún modelo seleccionado"
+ self.REFRESH_RELEASES = "Actualizar Versiones"
+ self.NO_SUITABLE_CUDA_BACKENDS = "No se encontraron backends CUDA adecuados"
+ self.LLAMACPP_DOWNLOADED_EXTRACTED = "Binario de llama.cpp descargado y extraído en {0}\nArchivos CUDA extraídos en {1}"
+ self.CUDA_FILES_EXTRACTED = "Archivos CUDA extraídos en"
+ self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = (
+ "No se encontró un backend CUDA adecuado para la extracción"
+ )
+ self.ERROR_FETCHING_RELEASES = "Error al obtener versiones: {0}"
+ self.CONFIRM_DELETION_TITLE = "Confirmar Eliminación"
+ self.LOG_FOR = "Registro para {0}"
+ self.ALL_FILES = "Todos los Archivos (*)"
+ self.GGUF_FILES = "Archivos GGUF (*.gguf)"
+ self.DAT_FILES = "Archivos DAT (*.dat)"
+ self.JSON_FILES = "Archivos JSON (*.json)"
+ self.FAILED_LOAD_PRESET = "Error al cargar el preajuste: {0}"
+ self.INITIALIZING_AUTOGGUF = "Inicializando aplicación AutoGGUF"
+ self.AUTOGGUF_INITIALIZATION_COMPLETE = "Inicialización de AutoGGUF completa"
+ self.REFRESHING_BACKENDS = "Actualizando backends"
+ self.NO_BACKENDS_AVAILABLE = "No hay backends disponibles"
+ self.FOUND_VALID_BACKENDS = "Se encontraron {0} backends válidos"
+ self.SAVING_PRESET = "Guardando preajuste"
+ self.PRESET_SAVED_TO = "Preajuste guardado en {0}"
+ self.LOADING_PRESET = "Cargando preajuste"
+ self.PRESET_LOADED_FROM = "Preajuste cargado desde {0}"
+ self.ADDING_KV_OVERRIDE = "Agregando anulación KV: {0}"
+ self.SAVING_TASK_PRESET = "Guardando preajuste de tarea para {0}"
+ self.TASK_PRESET_SAVED = "Preajuste de Tarea Guardado"
+ self.TASK_PRESET_SAVED_TO = "Preajuste de tarea guardado en {0}"
+ self.RESTARTING_TASK = "Reiniciando tarea: {0}"
+ self.IN_PROGRESS = "En Progreso"
+ self.DOWNLOAD_FINISHED_EXTRACTED_TO = "Descarga finalizada. Extraído en: {0}"
+ self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "Binario de llama.cpp descargado y extraído en {0}\nArchivos CUDA extraídos en {1}"
+ self.NO_SUITABLE_CUDA_BACKEND_FOUND = (
+ "No se encontró un backend CUDA adecuado para la extracción"
+ )
+ self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = (
+ "Binario de llama.cpp descargado y extraído en {0}"
+ )
+ self.REFRESHING_LLAMACPP_RELEASES = "Actualizando versiones de llama.cpp"
+ self.UPDATING_ASSET_LIST = "Actualizando lista de activos"
+ self.UPDATING_CUDA_OPTIONS = "Actualizando opciones de CUDA"
+ self.STARTING_LLAMACPP_DOWNLOAD = "Iniciando descarga de llama.cpp"
+ self.UPDATING_CUDA_BACKENDS = "Actualizando backends CUDA"
+ self.NO_CUDA_BACKEND_SELECTED = "No se seleccionó backend CUDA para extracción"
+ self.EXTRACTING_CUDA_FILES = "Extrayendo archivos CUDA de {0} a {1}"
+ self.DOWNLOAD_ERROR = "Error de descarga: {0}"
+ self.SHOWING_TASK_CONTEXT_MENU = "Mostrando menú contextual de tarea"
+ self.SHOWING_PROPERTIES_FOR_TASK = "Mostrando propiedades para la tarea: {0}"
+ self.CANCELLING_TASK = "Cancelando tarea: {0}"
+ self.CANCELED = "Cancelado"
+ self.DELETING_TASK = "Eliminando tarea: {0}"
+ self.LOADING_MODELS = "Cargando modelos"
+ self.LOADED_MODELS = "Cargados {0} modelos"
+ self.BROWSING_FOR_MODELS_DIRECTORY = "Explorando directorio de modelos"
+ self.SELECT_MODELS_DIRECTORY = "Seleccionar Directorio de Modelos"
+ self.BROWSING_FOR_OUTPUT_DIRECTORY = "Explorando directorio de salida"
+ self.SELECT_OUTPUT_DIRECTORY = "Seleccionar Directorio de Salida"
+ self.BROWSING_FOR_LOGS_DIRECTORY = "Explorando directorio de registros"
+ self.SELECT_LOGS_DIRECTORY = "Seleccionar Directorio de Registros"
+ self.BROWSING_FOR_IMATRIX_FILE = "Explorando archivo IMatrix"
+ self.SELECT_IMATRIX_FILE = "Seleccionar Archivo IMatrix"
+ self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} MB / {2} MB)"
+ self.CPU_USAGE_FORMAT = "Uso de CPU: {0:.1f}%"
+ self.VALIDATING_QUANTIZATION_INPUTS = "Validando entradas de cuantización"
+ self.MODELS_PATH_REQUIRED = "Se requiere la ruta de modelos"
+ self.OUTPUT_PATH_REQUIRED = "Se requiere la ruta de salida"
+ self.LOGS_PATH_REQUIRED = "Se requiere la ruta de registros"
+ self.STARTING_MODEL_QUANTIZATION = "Iniciando cuantización de modelo"
+ self.INPUT_FILE_NOT_EXIST = "El archivo de entrada '{0}' no existe."
+ self.QUANTIZING_MODEL_TO = "Cuantizando {0} a {1}"
+ self.QUANTIZATION_TASK_STARTED = "Tarea de cuantización iniciada para {0}"
+ self.ERROR_STARTING_QUANTIZATION = "Error al iniciar la cuantización: {0}"
+ self.UPDATING_MODEL_INFO = "Actualizando información del modelo: {0}"
+ self.TASK_FINISHED = "Tarea finalizada: {0}"
+ self.SHOWING_TASK_DETAILS_FOR = "Mostrando detalles de la tarea para: {0}"
+ self.BROWSING_FOR_IMATRIX_DATA_FILE = "Explorando archivo de datos IMatrix"
+ self.SELECT_DATA_FILE = "Seleccionar Archivo de Datos"
+ self.BROWSING_FOR_IMATRIX_MODEL_FILE = "Explorando archivo de modelo IMatrix"
+ self.SELECT_MODEL_FILE = "Seleccionar Archivo de Modelo"
+ self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "Explorando archivo de salida IMatrix"
+ self.SELECT_OUTPUT_FILE = "Seleccionar Archivo de Salida"
+ self.STARTING_IMATRIX_GENERATION = "Iniciando generación de IMatrix"
+ self.BACKEND_PATH_NOT_EXIST = "La ruta del backend no existe: {0}"
+ self.GENERATING_IMATRIX = "Generando IMatrix"
+ self.ERROR_STARTING_IMATRIX_GENERATION = (
+ "Error al iniciar la generación de IMatrix: {0}"
+ )
+ self.IMATRIX_GENERATION_TASK_STARTED = "Tarea de generación de IMatrix iniciada"
+ self.ERROR_MESSAGE = "Error: {0}"
+ self.TASK_ERROR = "Error de tarea: {0}"
+ self.APPLICATION_CLOSING = "Cerrando aplicación"
+ self.APPLICATION_CLOSED = "Aplicación cerrada"
+ self.SELECT_QUANTIZATION_TYPE = "Seleccione el tipo de cuantización"
+ self.ALLOWS_REQUANTIZING = (
+ "Permite recuantizar tensores que ya han sido cuantizados"
+ )
+ self.LEAVE_OUTPUT_WEIGHT = "Dejará output.weight sin (re)cuantizar"
+ self.DISABLE_K_QUANT_MIXTURES = (
+ "Desactiva las mezclas k-quant y cuantiza todos los tensores al mismo tipo"
+ )
+ self.USE_DATA_AS_IMPORTANCE_MATRIX = "Usa los datos en el archivo como matriz de importancia para optimizaciones de cuantización"
+ self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = (
+ "Usar matriz de importancia para estos tensores"
+ )
+ self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = (
+ "No usar matriz de importancia para estos tensores"
+ )
+ self.OUTPUT_TENSOR_TYPE = "Tipo de Tensor de Salida:"
+ self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = (
+ "Usar este tipo para el tensor output.weight"
+ )
+ self.TOKEN_EMBEDDING_TYPE = "Tipo de Incrustación de Token:"
+ self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = (
+ "Usar este tipo para el tensor de incrustaciones de token"
+ )
+ self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = (
+ "Generará el modelo cuantizado en los mismos fragmentos que la entrada"
+ )
+ self.OVERRIDE_MODEL_METADATA = "Anular metadatos del modelo"
+ self.INPUT_DATA_FILE_FOR_IMATRIX = (
+ "Archivo de datos de entrada para generación de IMatrix"
+ )
+ self.MODEL_TO_BE_QUANTIZED = "Modelo a cuantizar"
+ self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = (
+ "Ruta de salida para el IMatrix generado"
+ )
+ self.HOW_OFTEN_TO_SAVE_IMATRIX = "Con qué frecuencia guardar el IMatrix"
+ self.SET_GPU_OFFLOAD_VALUE = "Establecer valor de descarga GPU (-ngl)"
+ self.COMPLETED = "Completado"
+ self.REFRESH_MODELS = "Actualizar modelos"
+ self.REFRESH_MODELS = "Actualizar modelos"
+ self.EXTRA_ARGUMENTS = "Argumentos adicionales:"
+ self.EXTRA_ARGUMENTS_LABEL = "Argumentos adicionales de línea de comandos"
+ self.CONTEXT_SIZE = "Tamaño del contexto:"
+ self.CONTEXT_SIZE_FOR_IMATRIX = "Tamaño del contexto para generación de IMatrix"
+ self.THREADS = "Hilos:"
+ self.NUMBER_OF_THREADS_FOR_IMATRIX = (
+ "Número de hilos para generación de IMatrix"
+ )
+ self.LORA_CONVERSION = "Conversión LoRA"
+ self.LORA_INPUT_PATH = "Ruta de entrada LoRA"
+ self.LORA_OUTPUT_PATH = "Ruta de salida LoRA"
+ self.SELECT_LORA_INPUT_DIRECTORY = "Seleccionar directorio de entrada LoRA"
+ self.SELECT_LORA_OUTPUT_FILE = "Seleccionar archivo de salida LoRA"
+ self.CONVERT_LORA = "Convertir LoRA"
+ self.STARTING_LORA_CONVERSION = "Iniciando conversión LoRA"
+ self.LORA_INPUT_PATH_REQUIRED = "Se requiere la ruta de entrada LoRA."
+ self.LORA_OUTPUT_PATH_REQUIRED = "Se requiere la ruta de salida LoRA."
+ self.ERROR_STARTING_LORA_CONVERSION = "Error al iniciar la conversión LoRA: {}"
+ self.LORA_CONVERSION_TASK_STARTED = "Tarea de conversión LoRA iniciada."
+ self.BIN_FILES = "Archivos binarios (*.bin)"
+ self.BROWSING_FOR_LORA_INPUT_DIRECTORY = (
+ "Buscando directorio de entrada LoRA..."
+ )
+ self.BROWSING_FOR_LORA_OUTPUT_FILE = "Buscando archivo de salida LoRA..."
+ self.CONVERTING_LORA = "Convirtiendo LoRA"
+ self.LORA_CONVERSION_FINISHED = "Conversión LoRA finalizada."
+ self.LORA_FILE_MOVED = "Archivo LoRA movido de {} a {}."
+ self.LORA_FILE_NOT_FOUND = "Archivo LoRA no encontrado: {}."
+ self.ERROR_MOVING_LORA_FILE = "Error al mover el archivo LoRA: {}"
+ self.EXPORT_LORA = "Exportar LoRA"
+ self.MODEL_PATH_REQUIRED = "Se requiere la ruta del modelo."
+ self.OUTPUT_PATH_REQUIRED = "Se requiere la ruta de salida."
+ self.AT_LEAST_ONE_LORA_ADAPTER_REQUIRED = (
+ "Se requiere al menos un adaptador LoRA."
+ )
+ self.INVALID_LORA_SCALE_VALUE = "Valor de escala LoRA inválido."
+ self.ERROR_STARTING_LORA_EXPORT = "Error al iniciar la exportación LoRA: {}"
+ self.LORA_EXPORT_TASK_STARTED = "Tarea de exportación LoRA iniciada."
+ self.GGML_LORA_ADAPTERS = "Adaptadores LoRA GGML"
+ self.SELECT_LORA_ADAPTER_FILES = "Seleccionar archivos de adaptador LoRA"
+ self.ADD_ADAPTER = "Añadir adaptador"
+ self.DELETE_ADAPTER = "Eliminar"
+ self.LORA_SCALE = "Escala LoRA"
+ self.ENTER_LORA_SCALE_VALUE = "Ingresar valor de escala LoRA (Opcional)"
+ self.NUMBER_OF_THREADS_FOR_LORA_EXPORT = "Número de hilos para exportación LoRA"
+ self.EXPORTING_LORA = "Exportando LoRA..."
+ self.BROWSING_FOR_EXPORT_LORA_MODEL_FILE = (
+ "Buscando archivo de modelo LoRA para exportar..."
+ )
+ self.BROWSING_FOR_EXPORT_LORA_OUTPUT_FILE = (
+ "Buscando archivo de salida LoRA para exportar..."
+ )
+ self.ADDING_LORA_ADAPTER = "Añadiendo adaptador LoRA..."
+ self.DELETING_LORA_ADAPTER = "Eliminando adaptador LoRA..."
+ self.LORA_FILES = "Archivos LoRA (*.bin)"
+ self.SELECT_LORA_ADAPTER_FILE = "Seleccionar archivo de adaptador LoRA"
+ self.STARTING_LORA_EXPORT = "Iniciando exportación LoRA..."
+ self.OUTPUT_TYPE = "Tipo de salida"
+ self.SELECT_OUTPUT_TYPE = "Seleccionar tipo de salida (GGUF o GGML)"
+ self.GGUF_AND_BIN_FILES = "Archivos GGUF y binarios (*.gguf *.bin)"
+ self.BASE_MODEL = "Modelo base"
+ self.SELECT_BASE_MODEL_FILE = "Seleccionar archivo de modelo base (GGUF)"
+ self.BASE_MODEL_PATH_REQUIRED = (
+ "Se requiere la ruta del modelo base para la salida GGUF."
+ )
+ self.BROWSING_FOR_BASE_MODEL_FILE = "Buscando archivo de modelo base..."
+ self.SELECT_BASE_MODEL_FOLDER = (
+ "Seleccionar carpeta de modelo base (que contiene safetensors)"
+ )
+ self.BROWSING_FOR_BASE_MODEL_FOLDER = "Buscando carpeta de modelo base..."
+ self.LORA_CONVERSION_FROM_TO = "Conversión LoRA de {} a {}"
+ self.GENERATING_IMATRIX_FOR = "Generando IMatrix para {}"
+ self.MODEL_PATH_REQUIRED_FOR_IMATRIX = (
+ "Se requiere la ruta del modelo para la generación de IMatrix."
+ )
+
+
+class _Hindi(_Localization):
+ def __init__(self):
+ super().__init__()
+ self.WINDOW_TITLE = "AutoGGUF (स्वचालित GGUF मॉडल क्वांटाइज़र)"
+ self.RAM_USAGE = "RAM उपयोग:"
+ self.CPU_USAGE = "CPU उपयोग:"
+ self.BACKEND = "Llama.cpp बैकएंड:"
+ self.REFRESH_BACKENDS = "बैकएंड रीफ्रेश करें"
+ self.MODELS_PATH = "मॉडल पथ:"
+ self.OUTPUT_PATH = "आउटपुट पथ:"
+ self.LOGS_PATH = "लॉग पथ:"
+ self.BROWSE = "ब्राउज़ करें"
+ self.AVAILABLE_MODELS = "उपलब्ध मॉडल:"
+ self.QUANTIZATION_TYPE = "क्वांटाइजेशन प्रकार:"
+ self.ALLOW_REQUANTIZE = "पुनः क्वांटाइज़ करने की अनुमति दें"
+ self.LEAVE_OUTPUT_TENSOR = "आउटपुट टेंसर छोड़ें"
+ self.PURE = "शुद्ध"
+ self.IMATRIX = "IMatrix:"
+ self.INCLUDE_WEIGHTS = "वेट शामिल करें:"
+ self.EXCLUDE_WEIGHTS = "वेट बाहर रखें:"
+ self.USE_OUTPUT_TENSOR_TYPE = "आउटपुट टेंसर प्रकार का उपयोग करें"
+ self.USE_TOKEN_EMBEDDING_TYPE = "टोकन एम्बेडिंग प्रकार का उपयोग करें"
+ self.KEEP_SPLIT = "विभाजन रखें"
+ self.KV_OVERRIDES = "KV ओवरराइड:"
+ self.ADD_NEW_OVERRIDE = "नया ओवरराइड जोड़ें"
+ self.QUANTIZE_MODEL = "मॉडल क्वांटाइज़ करें"
+ self.SAVE_PRESET = "प्रीसेट सहेजें"
+ self.LOAD_PRESET = "प्रीसेट लोड करें"
+ self.TASKS = "कार्य:"
+ self.DOWNLOAD_LLAMACPP = "llama.cpp डाउनलोड करें"
+ self.SELECT_RELEASE = "रिलीज़ चुनें:"
+ self.SELECT_ASSET = "एसेट चुनें:"
+ self.EXTRACT_CUDA_FILES = "CUDA फ़ाइलें निकालें"
+ self.SELECT_CUDA_BACKEND = "CUDA बैकएंड चुनें:"
+ self.DOWNLOAD = "डाउनलोड करें"
+ self.IMATRIX_GENERATION = "IMatrix उत्पादन"
+ self.DATA_FILE = "डेटा फ़ाइल:"
+ self.MODEL = "मॉडल:"
+ self.OUTPUT = "आउटपुट:"
+ self.OUTPUT_FREQUENCY = "आउटपुट आवृत्ति:"
+ self.GPU_OFFLOAD = "GPU ऑफलोड:"
+ self.AUTO = "स्वचालित"
+ self.GENERATE_IMATRIX = "IMatrix उत्पन्न करें"
+ self.ERROR = "त्रुटि"
+ self.WARNING = "चेतावनी"
+ self.PROPERTIES = "गुण"
+ self.WINDOW_TITLE = "AutoGGUF (स्वचालित GGUF मॉडल क्वांटाइज़र)"
+ self.RAM_USAGE = "RAM उपयोग:"
+ self.CPU_USAGE = "CPU उपयोग:"
+ self.BACKEND = "Llama.cpp बैकएंड:"
+ self.REFRESH_BACKENDS = "बैकएंड रीफ्रेश करें"
+ self.MODELS_PATH = "मॉडल पथ:"
+ self.OUTPUT_PATH = "आउटपुट पथ:"
+ self.LOGS_PATH = "लॉग पथ:"
+ self.BROWSE = "ब्राउज़ करें"
+ self.AVAILABLE_MODELS = "उपलब्ध मॉडल:"
+ self.QUANTIZATION_TYPE = "क्वांटाइजेशन प्रकार:"
+ self.ALLOW_REQUANTIZE = "पुनः क्वांटाइज़ करने की अनुमति दें"
+ self.LEAVE_OUTPUT_TENSOR = "आउटपुट टेंसर छोड़ें"
+ self.PURE = "शुद्ध"
+ self.IMATRIX = "IMatrix:"
+ self.INCLUDE_WEIGHTS = "वेट शामिल करें:"
+ self.EXCLUDE_WEIGHTS = "वेट बाहर रखें:"
+ self.USE_OUTPUT_TENSOR_TYPE = "आउटपुट टेंसर प्रकार का उपयोग करें"
+ self.USE_TOKEN_EMBEDDING_TYPE = "टोकन एम्बेडिंग प्रकार का उपयोग करें"
+ self.KEEP_SPLIT = "विभाजन रखें"
+ self.KV_OVERRIDES = "KV ओवरराइड:"
+ self.ADD_NEW_OVERRIDE = "नया ओवरराइड जोड़ें"
+ self.QUANTIZE_MODEL = "मॉडल क्वांटाइज़ करें"
+ self.SAVE_PRESET = "प्रीसेट सहेजें"
+ self.LOAD_PRESET = "प्रीसेट लोड करें"
+ self.TASKS = "कार्य:"
+ self.DOWNLOAD_LLAMACPP = "llama.cpp डाउनलोड करें"
+ self.SELECT_RELEASE = "रिलीज़ चुनें:"
+ self.SELECT_ASSET = "एसेट चुनें:"
+ self.EXTRACT_CUDA_FILES = "CUDA फ़ाइलें निकालें"
+ self.SELECT_CUDA_BACKEND = "CUDA बैकएंड चुनें:"
+ self.DOWNLOAD = "डाउनलोड करें"
+ self.IMATRIX_GENERATION = "IMatrix उत्पादन"
+ self.DATA_FILE = "डेटा फ़ाइल:"
+ self.MODEL = "मॉडल:"
+ self.OUTPUT = "आउटपुट:"
+ self.OUTPUT_FREQUENCY = "आउटपुट आवृत्ति:"
+ self.GPU_OFFLOAD = "GPU ऑफलोड:"
+ self.AUTO = "स्वचालित"
+ self.GENERATE_IMATRIX = "IMatrix उत्पन्न करें"
+ self.ERROR = "त्रुटि"
+ self.WARNING = "चेतावनी"
+ self.PROPERTIES = "गुण"
+ self.CANCEL = "रद्द करें"
+ self.RESTART = "पुनः आरंभ करें"
+ self.DELETE = "हटाएं"
+ self.CONFIRM_DELETION = "क्या आप वाकई इस कार्य को हटाना चाहते हैं?"
+ self.TASK_RUNNING_WARNING = (
+ "कुछ कार्य अभी भी चल रहे हैं। क्या आप वाकई बाहर निकलना चाहते हैं?"
+ )
+ self.YES = "हां"
+ self.NO = "नहीं"
+ self.DOWNLOAD_COMPLETE = "डाउनलोड पूरा हुआ"
+ self.CUDA_EXTRACTION_FAILED = "CUDA निष्कर्षण विफल"
+ self.PRESET_SAVED = "प्रीसेट सहेजा गया"
+ self.PRESET_LOADED = "प्रीसेट लोड किया गया"
+ self.NO_ASSET_SELECTED = "कोई एसेट चयनित नहीं"
+ self.DOWNLOAD_FAILED = "डाउनलोड विफल"
+ self.NO_BACKEND_SELECTED = "कोई बैकएंड चयनित नहीं"
+ self.NO_MODEL_SELECTED = "कोई मॉडल चयनित नहीं"
+ self.REFRESH_RELEASES = "रिलीज़ रीफ्रेश करें"
+ self.NO_SUITABLE_CUDA_BACKENDS = "कोई उपयुक्त CUDA बैकएंड नहीं मिला"
+ self.LLAMACPP_DOWNLOADED_EXTRACTED = (
+ "llama.cpp बाइनरी डाउनलोड और {0} में निकाली गई\nCUDA फ़ाइलें {1} में निकाली गईं"
+ )
+ self.CUDA_FILES_EXTRACTED = "CUDA फ़ाइलें निकाली गईं"
+ self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = (
+ "निष्कर्षण के लिए कोई उपयुक्त CUDA बैकएंड नहीं मिला"
+ )
+ self.ERROR_FETCHING_RELEASES = "रिलीज़ प्राप्त करने में त्रुटि: {0}"
+ self.CONFIRM_DELETION_TITLE = "हटाने की पुष्टि करें"
+ self.LOG_FOR = "{0} के लिए लॉग"
+ self.ALL_FILES = "सभी फ़ाइलें (*)"
+ self.GGUF_FILES = "GGUF फ़ाइलें (*.gguf)"
+ self.DAT_FILES = "DAT फ़ाइलें (*.dat)"
+ self.JSON_FILES = "JSON फ़ाइलें (*.json)"
+ self.FAILED_LOAD_PRESET = "प्रीसेट लोड करने में विफल: {0}"
+ self.INITIALIZING_AUTOGGUF = "AutoGGUF एप्लिकेशन प्रारंभ हो रहा है"
+ self.AUTOGGUF_INITIALIZATION_COMPLETE = "AutoGGUF प्रारंभीकरण पूरा हुआ"
+ self.REFRESHING_BACKENDS = "बैकएंड रीफ्रेश हो रहे हैं"
+ self.NO_BACKENDS_AVAILABLE = "कोई बैकएंड उपलब्ध नहीं"
+ self.FOUND_VALID_BACKENDS = "{0} मान्य बैकएंड मिले"
+ self.SAVING_PRESET = "प्रीसेट सहेजा जा रहा है"
+ self.PRESET_SAVED_TO = "प्रीसेट {0} में सहेजा गया"
+ self.LOADING_PRESET = "प्रीसेट लोड हो रहा है"
+ self.PRESET_LOADED_FROM = "{0} से प्रीसेट लोड किया गया"
+ self.ADDING_KV_OVERRIDE = "KV ओवरराइड जोड़ा जा रहा है: {0}"
+ self.SAVING_TASK_PRESET = "{0} के लिए कार्य प्रीसेट सहेजा जा रहा है"
+ self.TASK_PRESET_SAVED = "कार्य प्रीसेट सहेजा गया"
+ self.TASK_PRESET_SAVED_TO = "कार्य प्रीसेट {0} में सहेजा गया"
+ self.RESTARTING_TASK = "कार्य पुनः आरंभ हो रहा है: {0}"
+ self.IN_PROGRESS = "प्रगति में"
+ self.DOWNLOAD_FINISHED_EXTRACTED_TO = "डाउनलोड समाप्त। निकाला गया: {0}"
+ self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = (
+ "llama.cpp बाइनरी डाउनलोड और {0} में निकाली गई\nCUDA फ़ाइलें {1} में निकाली गईं"
+ )
+ self.NO_SUITABLE_CUDA_BACKEND_FOUND = (
+ "निष्कर्षण के लिए कोई उपयुक्त CUDA बैकएंड नहीं मिला"
+ )
+ self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = (
+ "llama.cpp बाइनरी डाउनलोड और {0} में निकाली गई"
+ )
+ self.REFRESHING_LLAMACPP_RELEASES = "llama.cpp रिलीज़ रीफ्रेश हो रही हैं"
+ self.UPDATING_ASSET_LIST = "एसेट सूची अपडेट हो रही है"
+ self.UPDATING_CUDA_OPTIONS = "CUDA विकल्प अपडेट हो रहे हैं"
+ self.STARTING_LLAMACPP_DOWNLOAD = "llama.cpp डाउनलोड शुरू हो रहा है"
+ self.UPDATING_CUDA_BACKENDS = "CUDA बैकएंड अपडेट हो रहे हैं"
+ self.NO_CUDA_BACKEND_SELECTED = "निष्कर्षण के लिए कोई CUDA बैकएंड चयनित नहीं"
+ self.EXTRACTING_CUDA_FILES = "{0} से {1} में CUDA फ़ाइलें निकाली जा रही हैं"
+ self.DOWNLOAD_ERROR = "डाउनलोड त्रुटि: {0}"
+ self.SHOWING_TASK_CONTEXT_MENU = "कार्य संदर्भ मेनू दिखाया जा रहा है"
+ self.SHOWING_PROPERTIES_FOR_TASK = "कार्य के लिए गुण दिखाए जा रहे हैं: {0}"
+ self.CANCELLING_TASK = "कार्य रद्द किया जा रहा है: {0}"
+ self.CANCELED = "रद्द किया गया"
+ self.DELETING_TASK = "कार्य हटाया जा रहा है: {0}"
+ self.LOADING_MODELS = "मॉडल लोड हो रहे हैं"
+ self.LOADED_MODELS = "{0} मॉडल लोड किए गए"
+ self.BROWSING_FOR_MODELS_DIRECTORY = "मॉडल निर्देशिका के लिए ब्राउज़ किया जा रहा है"
+ self.SELECT_MODELS_DIRECTORY = "मॉडल निर्देशिका चुनें"
+ self.BROWSING_FOR_OUTPUT_DIRECTORY = "आउटपुट निर्देशिका के लिए ब्राउज़ किया जा रहा है"
+ self.SELECT_OUTPUT_DIRECTORY = "आउटपुट निर्देशिका चुनें"
+ self.BROWSING_FOR_LOGS_DIRECTORY = "लॉग निर्देशिका के लिए ब्राउज़ किया जा रहा है"
+ self.SELECT_LOGS_DIRECTORY = "लॉग निर्देशिका चुनें"
+ self.BROWSING_FOR_IMATRIX_FILE = "IMatrix फ़ाइल के लिए ब्राउज़ किया जा रहा है"
+ self.SELECT_IMATRIX_FILE = "IMatrix फ़ाइल चुनें"
+ self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} MB / {2} MB)"
+ self.CPU_USAGE_FORMAT = "CPU उपयोग: {0:.1f}%"
+ self.VALIDATING_QUANTIZATION_INPUTS = "क्वांटाइजेशन इनपुट सत्यापित किए जा रहे हैं"
+ self.MODELS_PATH_REQUIRED = "मॉडल पथ आवश्यक है"
+ self.OUTPUT_PATH_REQUIRED = "आउटपुट पथ आवश्यक है"
+ self.LOGS_PATH_REQUIRED = "लॉग पथ आवश्यक है"
+ self.STARTING_MODEL_QUANTIZATION = "मॉडल क्वांटाइजेशन शुरू हो रहा है"
+ self.INPUT_FILE_NOT_EXIST = "इनपुट फ़ाइल '{0}' मौजूद नहीं है।"
+ self.QUANTIZING_MODEL_TO = "{0} को {1} में क्वांटाइज़ किया जा रहा है"
+ self.QUANTIZATION_TASK_STARTED = "{0} के लिए क्वांटाइजेशन कार्य शुरू हुआ"
+ self.ERROR_STARTING_QUANTIZATION = "क्वांटाइजेशन शुरू करने में त्रुटि: {0}"
+ self.UPDATING_MODEL_INFO = "मॉडल जानकारी अपडेट हो रही है: {0}"
+ self.TASK_FINISHED = "कार्य समाप्त: {0}"
+ self.SHOWING_TASK_DETAILS_FOR = "कार्य विवरण दिखाए जा रहे हैं: {0}"
+ self.BROWSING_FOR_IMATRIX_DATA_FILE = (
+ "IMatrix डेटा फ़ाइल के लिए ब्राउज़ किया जा रहा है"
+ )
+ self.SELECT_DATA_FILE = "डेटा फ़ाइल चुनें"
+ self.BROWSING_FOR_IMATRIX_MODEL_FILE = (
+ "IMatrix मॉडल फ़ाइल के लिए ब्राउज़ किया जा रहा है"
+ )
+ self.SELECT_MODEL_FILE = "मॉडल फ़ाइल चुनें"
+ self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = (
+ "IMatrix आउटपुट फ़ाइल के लिए ब्राउज़ किया जा रहा है"
+ )
+ self.SELECT_OUTPUT_FILE = "आउटपुट फ़ाइल चुनें"
+ self.STARTING_IMATRIX_GENERATION = "IMatrix उत्पादन शुरू हो रहा है"
+ self.BACKEND_PATH_NOT_EXIST = "बैकएंड पथ मौजूद नहीं है: {0}"
+ self.GENERATING_IMATRIX = "IMatrix उत्पन्न किया जा रहा है"
+ self.ERROR_STARTING_IMATRIX_GENERATION = "IMatrix उत्पादन शुरू करने में त्रुटि: {0}"
+ self.IMATRIX_GENERATION_TASK_STARTED = "IMatrix उत्पादन कार्य शुरू हुआ"
+ self.ERROR_MESSAGE = "त्रुटि: {0}"
+ self.TASK_ERROR = "कार्य त्रुटि: {0}"
+ self.APPLICATION_CLOSING = "एप्लिकेशन बंद हो रहा है"
+ self.APPLICATION_CLOSED = "एप्लिकेशन बंद हो गया"
+ self.SELECT_QUANTIZATION_TYPE = "क्वांटाइजेशन प्रकार चुनें"
+ self.ALLOWS_REQUANTIZING = (
+ "पहले से क्वांटाइज़ किए गए टेंसर को पुनः क्वांटाइज़ करने की अनुमति देता है"
+ )
+ self.LEAVE_OUTPUT_WEIGHT = "output.weight को अक्वांटाइज़ (या पुनः क्वांटाइज़) छोड़ देगा"
+ self.DISABLE_K_QUANT_MIXTURES = (
+ "k-quant मिश्रण को अक्षम करें और सभी टेंसर को एक ही प्रकार में क्वांटाइज़ करें"
+ )
+ self.USE_DATA_AS_IMPORTANCE_MATRIX = (
+ "क्वांट अनुकूलन के लिए फ़ाइल में डेटा को महत्व मैट्रिक्स के रूप में उपयोग करें"
+ )
+ self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "इन टेंसर के लिए महत्व मैट्रिक्स का उपयोग करें"
+ self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = (
+ "इन टेंसर के लिए महत्व मैट्रिक्स का उपयोग न करें"
+ )
+ self.OUTPUT_TENSOR_TYPE = "आउटपुट टेंसर प्रकार:"
+ self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = (
+ "output.weight टेंसर के लिए इस प्रकार का उपयोग करें"
+ )
+ self.TOKEN_EMBEDDING_TYPE = "टोकन एम्बेडिंग प्रकार:"
+ self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = (
+ "टोकन एम्बेडिंग टेंसर के लिए इस प्रकार का उपयोग करें"
+ )
+ self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = (
+ "इनपुट के समान शार्ड्स में क्वांटाइज़ किए गए मॉडल को उत्पन्न करेगा"
+ )
+ self.OVERRIDE_MODEL_METADATA = "मॉडल मेटाडेटा को ओवरराइड करें"
+ self.INPUT_DATA_FILE_FOR_IMATRIX = "IMatrix उत्पादन के लिए इनपुट डेटा फ़ाइल"
+ self.MODEL_TO_BE_QUANTIZED = "क्वांटाइज़ किए जाने वाला मॉडल"
+ self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "उत्पन्न IMatrix के लिए आउटपुट पथ"
+ self.HOW_OFTEN_TO_SAVE_IMATRIX = "IMatrix को कितनी बार सहेजना है"
+ self.SET_GPU_OFFLOAD_VALUE = "GPU ऑफलोड मान सेट करें (-ngl)"
+ self.COMPLETED = "पूरा हुआ"
+ self.REFRESH_MODELS = "मॉडल रीफ्रेश करें"
+
+
+class _Russian(_Localization):
+ def __init__(self):
+ super().__init__()
+ self.WINDOW_TITLE = "AutoGGUF (автоматический квантователь моделей GGUF)"
+ self.RAM_USAGE = "Использование ОЗУ:"
+ self.CPU_USAGE = "Использование ЦП:"
+ self.BACKEND = "Бэкенд Llama.cpp:"
+ self.REFRESH_BACKENDS = "Обновить бэкенды"
+ self.MODELS_PATH = "Путь к моделям:"
+ self.OUTPUT_PATH = "Путь вывода:"
+ self.LOGS_PATH = "Путь к логам:"
+ self.BROWSE = "Обзор"
+ self.AVAILABLE_MODELS = "Доступные модели:"
+ self.QUANTIZATION_TYPE = "Тип квантования:"
+ self.ALLOW_REQUANTIZE = "Разрешить переквантование"
+ self.LEAVE_OUTPUT_TENSOR = "Оставить выходной тензор"
+ self.PURE = "Чистый"
+ self.IMATRIX = "IMatrix:"
+ self.INCLUDE_WEIGHTS = "Включить веса:"
+ self.EXCLUDE_WEIGHTS = "Исключить веса:"
+ self.USE_OUTPUT_TENSOR_TYPE = "Использовать тип выходного тензора"
+ self.USE_TOKEN_EMBEDDING_TYPE = "Использовать тип встраивания токенов"
+ self.KEEP_SPLIT = "Сохранить разделение"
+ self.KV_OVERRIDES = "KV переопределения:"
+ self.ADD_NEW_OVERRIDE = "Добавить новое переопределение"
+ self.QUANTIZE_MODEL = "Квантовать модель"
+ self.SAVE_PRESET = "Сохранить пресет"
+ self.LOAD_PRESET = "Загрузить пресет"
+ self.TASKS = "Задачи:"
+ self.DOWNLOAD_LLAMACPP = "Скачать llama.cpp"
+ self.SELECT_RELEASE = "Выбрать релиз:"
+ self.SELECT_ASSET = "Выбрать актив:"
+ self.EXTRACT_CUDA_FILES = "Извлечь файлы CUDA"
+ self.SELECT_CUDA_BACKEND = "Выбрать бэкенд CUDA:"
+ self.DOWNLOAD = "Скачать"
+ self.IMATRIX_GENERATION = "Генерация IMatrix"
+ self.DATA_FILE = "Файл данных:"
+ self.MODEL = "Модель:"
+ self.OUTPUT = "Вывод:"
+ self.OUTPUT_FREQUENCY = "Частота вывода:"
+ self.GPU_OFFLOAD = "Разгрузка GPU:"
+ self.AUTO = "Авто"
+ self.GENERATE_IMATRIX = "Сгенерировать IMatrix"
+ self.ERROR = "Ошибка"
+ self.WARNING = "Предупреждение"
+ self.PROPERTIES = "Свойства"
+ self.CANCEL = "Отмена"
+ self.RESTART = "Перезапуск"
+ self.DELETE = "Удалить"
+ self.CONFIRM_DELETION = "Вы уверены, что хотите удалить эту задачу?"
+ self.TASK_RUNNING_WARNING = (
+ "Некоторые задачи все еще выполняются. Вы уверены, что хотите выйти?"
+ )
+ self.YES = "Да"
+ self.NO = "Нет"
+ self.DOWNLOAD_COMPLETE = "Загрузка завершена"
+ self.CUDA_EXTRACTION_FAILED = "Извлечение CUDA не удалось"
+ self.PRESET_SAVED = "Пресет сохранен"
+ self.PRESET_LOADED = "Пресет загружен"
+ self.NO_ASSET_SELECTED = "Актив не выбран"
+ self.DOWNLOAD_FAILED = "Загрузка не удалась"
+ self.NO_BACKEND_SELECTED = "Бэкенд не выбран"
+ self.NO_MODEL_SELECTED = "Модель не выбрана"
+ self.REFRESH_RELEASES = "Обновить релизы"
+ self.NO_SUITABLE_CUDA_BACKENDS = "Подходящие бэкенды CUDA не найдены"
+ self.LLAMACPP_DOWNLOADED_EXTRACTED = "Бинарный файл llama.cpp загружен и извлечен в {0}\nФайлы CUDA извлечены в {1}"
+ self.CUDA_FILES_EXTRACTED = "Файлы CUDA извлечены в"
+ self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = (
+ "Подходящий бэкенд CUDA для извлечения не найден"
+ )
+ self.ERROR_FETCHING_RELEASES = "Ошибка получения релизов: {0}"
+ self.CONFIRM_DELETION_TITLE = "Подтвердить удаление"
+ self.LOG_FOR = "Лог для {0}"
+ self.ALL_FILES = "Все файлы (*)"
+ self.GGUF_FILES = "Файлы GGUF (*.gguf)"
+ self.DAT_FILES = "Файлы DAT (*.dat)"
+ self.JSON_FILES = "Файлы JSON (*.json)"
+ self.FAILED_LOAD_PRESET = "Не удалось загрузить пресет: {0}"
+ self.INITIALIZING_AUTOGGUF = "Инициализация приложения AutoGGUF"
+ self.AUTOGGUF_INITIALIZATION_COMPLETE = "Инициализация AutoGGUF завершена"
+ self.REFRESHING_BACKENDS = "Обновление бэкендов"
+ self.NO_BACKENDS_AVAILABLE = "Бэкенды недоступны"
+ self.FOUND_VALID_BACKENDS = "Найдено {0} действительных бэкендов"
+ self.SAVING_PRESET = "Сохранение пресета"
+ self.PRESET_SAVED_TO = "Пресет сохранен в {0}"
+ self.LOADING_PRESET = "Загрузка пресета"
+ self.PRESET_LOADED_FROM = "Пресет загружен из {0}"
+ self.ADDING_KV_OVERRIDE = "Добавление KV переопределения: {0}"
+ self.SAVING_TASK_PRESET = "Сохранение пресета задачи для {0}"
+ self.TASK_PRESET_SAVED = "Пресет задачи сохранен"
+ self.TASK_PRESET_SAVED_TO = "Пресет задачи сохранен в {0}"
+ self.RESTARTING_TASK = "Перезапуск задачи: {0}"
+ self.IN_PROGRESS = "В процессе"
+ self.DOWNLOAD_FINISHED_EXTRACTED_TO = "Загрузка завершена. Извлечено в: {0}"
+ self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "Бинарный файл llama.cpp загружен и извлечен в {0}\nФайлы CUDA извлечены в {1}"
+ self.NO_SUITABLE_CUDA_BACKEND_FOUND = (
+ "Подходящий бэкенд CUDA для извлечения не найден"
+ )
+ self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = (
+ "Бинарный файл llama.cpp загружен и извлечен в {0}"
+ )
+ self.REFRESHING_LLAMACPP_RELEASES = "Обновление релизов llama.cpp"
+ self.UPDATING_ASSET_LIST = "Обновление списка активов"
+ self.UPDATING_CUDA_OPTIONS = "Обновление параметров CUDA"
+ self.STARTING_LLAMACPP_DOWNLOAD = "Начало загрузки llama.cpp"
+ self.UPDATING_CUDA_BACKENDS = "Обновление бэкендов CUDA"
+ self.NO_CUDA_BACKEND_SELECTED = "Бэкенд CUDA для извлечения не выбран"
+ self.EXTRACTING_CUDA_FILES = "Извлечение файлов CUDA из {0} в {1}"
+ self.DOWNLOAD_ERROR = "Ошибка загрузки: {0}"
+ self.SHOWING_TASK_CONTEXT_MENU = "Отображение контекстного меню задачи"
+ self.SHOWING_PROPERTIES_FOR_TASK = "Отображение свойств задачи: {0}"
+ self.CANCELLING_TASK = "Отмена задачи: {0}"
+ self.CANCELED = "Отменено"
+ self.DELETING_TASK = "Удаление задачи: {0}"
+ self.LOADING_MODELS = "Загрузка моделей"
+ self.LOADED_MODELS = "Загружено {0} моделей"
+ self.BROWSING_FOR_MODELS_DIRECTORY = "Поиск каталога моделей"
+ self.SELECT_MODELS_DIRECTORY = "Выберите каталог моделей"
+ self.BROWSING_FOR_OUTPUT_DIRECTORY = "Поиск выходного каталога"
+ self.SELECT_OUTPUT_DIRECTORY = "Выберите выходной каталог"
+ self.BROWSING_FOR_LOGS_DIRECTORY = "Поиск каталога логов"
+ self.SELECT_LOGS_DIRECTORY = "Выберите каталог логов"
+ self.BROWSING_FOR_IMATRIX_FILE = "Поиск файла IMatrix"
+ self.SELECT_IMATRIX_FILE = "Выберите файл IMatrix"
+ self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} МБ / {2} МБ)"
+ self.CPU_USAGE_FORMAT = "Использование ЦП: {0:.1f}%"
+ self.VALIDATING_QUANTIZATION_INPUTS = "Проверка входных данных квантования"
+ self.MODELS_PATH_REQUIRED = "Требуется путь к моделям"
+ self.OUTPUT_PATH_REQUIRED = "Требуется путь вывода"
+ self.LOGS_PATH_REQUIRED = "Требуется путь к логам"
+ self.STARTING_MODEL_QUANTIZATION = "Начало квантования модели"
+ self.INPUT_FILE_NOT_EXIST = "Входной файл '{0}' не существует."
+ self.QUANTIZING_MODEL_TO = "Квантование {0} в {1}"
+ self.QUANTIZATION_TASK_STARTED = "Задача квантования запущена для {0}"
+ self.ERROR_STARTING_QUANTIZATION = "Ошибка запуска квантования: {0}"
+ self.UPDATING_MODEL_INFO = "Обновление информации о модели: {0}"
+ self.TASK_FINISHED = "Задача завершена: {0}"
+ self.SHOWING_TASK_DETAILS_FOR = "Отображение сведений о задаче для: {0}"
+ self.BROWSING_FOR_IMATRIX_DATA_FILE = "Поиск файла данных IMatrix"
+ self.SELECT_DATA_FILE = "Выберите файл данных"
+ self.BROWSING_FOR_IMATRIX_MODEL_FILE = "Поиск файла модели IMatrix"
+ self.SELECT_MODEL_FILE = "Выберите файл модели"
+ self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "Поиск выходного файла IMatrix"
+ self.SELECT_OUTPUT_FILE = "Выберите выходной файл"
+ self.STARTING_IMATRIX_GENERATION = "Начало генерации IMatrix"
+ self.BACKEND_PATH_NOT_EXIST = "Путь бэкенда не существует: {0}"
+ self.GENERATING_IMATRIX = "Генерация IMatrix"
+ self.ERROR_STARTING_IMATRIX_GENERATION = "Ошибка запуска генерации IMatrix: {0}"
+ self.IMATRIX_GENERATION_TASK_STARTED = "Задача генерации IMatrix запущена"
+ self.ERROR_MESSAGE = "Ошибка: {0}"
+ self.TASK_ERROR = "Ошибка задачи: {0}"
+ self.APPLICATION_CLOSING = "Закрытие приложения"
+ self.APPLICATION_CLOSED = "Приложение закрыто"
+ self.SELECT_QUANTIZATION_TYPE = "Выберите тип квантования"
+ self.ALLOWS_REQUANTIZING = (
+ "Позволяет переквантовать тензоры, которые уже были квантованы"
+ )
+ self.LEAVE_OUTPUT_WEIGHT = "Оставит output.weight не (пере)квантованным"
+ self.DISABLE_K_QUANT_MIXTURES = (
+ "Отключить k-квантовые смеси и квантовать все тензоры к одному типу"
+ )
+ self.USE_DATA_AS_IMPORTANCE_MATRIX = "Использовать данные в файле как матрицу важности для оптимизации квантования"
+ self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = (
+ "Использовать матрицу важности для этих тензоров"
+ )
+ self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = (
+ "Не использовать матрицу важности для этих тензоров"
+ )
+ self.OUTPUT_TENSOR_TYPE = "Тип выходного тензора:"
+ self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = (
+ "Использовать этот тип для тензора output.weight"
+ )
+ self.TOKEN_EMBEDDING_TYPE = "Тип встраивания токенов:"
+ self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = (
+ "Использовать этот тип для тензора встраивания токенов"
+ )
+ self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = "Будет генерировать квантованную модель в тех же шардах, что и входные данные"
+ self.OVERRIDE_MODEL_METADATA = "Переопределить метаданные модели"
+ self.INPUT_DATA_FILE_FOR_IMATRIX = "Входной файл данных для генерации IMatrix"
+ self.MODEL_TO_BE_QUANTIZED = "Модель для квантования"
+ self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = (
+ "Выходной путь для сгенерированного IMatrix"
+ )
+ self.HOW_OFTEN_TO_SAVE_IMATRIX = "Как часто сохранять IMatrix"
+ self.SET_GPU_OFFLOAD_VALUE = "Установить значение разгрузки GPU (-ngl)"
+ self.COMPLETED = "Завершено"
+ self.REFRESH_MODELS = "Обновить модели"
+
+
+class _Ukrainian(_Localization):
+ def __init__(self):
+ super().__init__()
+ self.WINDOW_TITLE = "AutoGGUF (автоматичний квантувальник моделей GGUF)"
+ self.RAM_USAGE = "Використання ОЗУ:"
+ self.CPU_USAGE = "Використання ЦП:"
+ self.BACKEND = "Бекенд Llama.cpp:"
+ self.REFRESH_BACKENDS = "Оновити бекенди"
+ self.MODELS_PATH = "Шлях до моделей:"
+ self.OUTPUT_PATH = "Шлях виводу:"
+ self.LOGS_PATH = "Шлях до логів:"
+ self.BROWSE = "Огляд"
+ self.AVAILABLE_MODELS = "Доступні моделі:"
+ self.QUANTIZATION_TYPE = "Тип квантування:"
+ self.ALLOW_REQUANTIZE = "Дозволити переквантування"
+ self.LEAVE_OUTPUT_TENSOR = "Залишити вихідний тензор"
+ self.PURE = "Чистий"
+ self.IMATRIX = "IMatrix:"
+ self.INCLUDE_WEIGHTS = "Включити ваги:"
+ self.EXCLUDE_WEIGHTS = "Виключити ваги:"
+ self.USE_OUTPUT_TENSOR_TYPE = "Використовувати тип вихідного тензора"
+ self.USE_TOKEN_EMBEDDING_TYPE = "Використовувати тип вбудовування токенів"
+ self.KEEP_SPLIT = "Зберегти розділення"
+ self.KV_OVERRIDES = "KV перевизначення:"
+ self.ADD_NEW_OVERRIDE = "Додати нове перевизначення"
+ self.QUANTIZE_MODEL = "Квантувати модель"
+ self.SAVE_PRESET = "Зберегти пресет"
+ self.LOAD_PRESET = "Завантажити пресет"
+ self.TASKS = "Завдання:"
+ self.DOWNLOAD_LLAMACPP = "Завантажити llama.cpp"
+ self.SELECT_RELEASE = "Вибрати реліз:"
+ self.SELECT_ASSET = "Вибрати актив:"
+ self.EXTRACT_CUDA_FILES = "Витягнути файли CUDA"
+ self.SELECT_CUDA_BACKEND = "Вибрати бекенд CUDA:"
+ self.DOWNLOAD = "Завантажити"
+ self.IMATRIX_GENERATION = "Генерація IMatrix"
+ self.DATA_FILE = "Файл даних:"
+ self.MODEL = "Модель:"
+ self.OUTPUT = "Вивід:"
+ self.OUTPUT_FREQUENCY = "Частота виводу:"
+ self.GPU_OFFLOAD = "Розвантаження GPU:"
+ self.AUTO = "Авто"
+ self.GENERATE_IMATRIX = "Згенерувати IMatrix"
+ self.ERROR = "Помилка"
+ self.WARNING = "Попередження"
+ self.PROPERTIES = "Властивості"
+ self.CANCEL = "Скасувати"
+ self.RESTART = "Перезапустити"
+ self.DELETE = "Видалити"
+ self.CONFIRM_DELETION = "Ви впевнені, що хочете видалити це завдання?"
+ self.TASK_RUNNING_WARNING = (
+ "Деякі завдання все ще виконуються. Ви впевнені, що хочете вийти?"
+ )
+ self.YES = "Так"
+ self.NO = "Ні"
+ self.DOWNLOAD_COMPLETE = "Завантаження завершено"
+ self.CUDA_EXTRACTION_FAILED = "Витягнення CUDA не вдалося"
+ self.PRESET_SAVED = "Пресет збережено"
+ self.PRESET_LOADED = "Пресет завантажено"
+ self.NO_ASSET_SELECTED = "Актив не вибрано"
+ self.DOWNLOAD_FAILED = "Завантаження не вдалося"
+ self.NO_BACKEND_SELECTED = "Бекенд не вибрано"
+ self.NO_MODEL_SELECTED = "Модель не вибрано"
+ self.REFRESH_RELEASES = "Оновити релізи"
+ self.NO_SUITABLE_CUDA_BACKENDS = "Підходящі бекенди CUDA не знайдено"
+ self.LLAMACPP_DOWNLOADED_EXTRACTED = "Бінарний файл llama.cpp завантажено та витягнуто в {0}\nФайли CUDA витягнуто в {1}"
+ self.CUDA_FILES_EXTRACTED = "Файли CUDA витягнуто в"
+ self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = (
+ "Підходящий бекенд CUDA для витягнення не знайдено"
+ )
+ self.ERROR_FETCHING_RELEASES = "Помилка отримання релізів: {0}"
+ self.CONFIRM_DELETION_TITLE = "Підтвердити видалення"
+ self.LOG_FOR = "Лог для {0}"
+ self.ALL_FILES = "Всі файли (*)"
+ self.GGUF_FILES = "Файли GGUF (*.gguf)"
+ self.DAT_FILES = "Файли DAT (*.dat)"
+ self.JSON_FILES = "Файли JSON (*.json)"
+ self.FAILED_LOAD_PRESET = "Не вдалося завантажити пресет: {0}"
+ self.INITIALIZING_AUTOGGUF = "Ініціалізація програми AutoGGUF"
+ self.AUTOGGUF_INITIALIZATION_COMPLETE = "Ініціалізація AutoGGUF завершена"
+ self.REFRESHING_BACKENDS = "Оновлення бекендів"
+ self.NO_BACKENDS_AVAILABLE = "Бекенди недоступні"
+ self.FOUND_VALID_BACKENDS = "Знайдено {0} дійсних бекендів"
+ self.SAVING_PRESET = "Збереження пресета"
+ self.PRESET_SAVED_TO = "Пресет збережено в {0}"
+ self.LOADING_PRESET = "Завантаження пресета"
+ self.PRESET_LOADED_FROM = "Пресет завантажено з {0}"
+ self.ADDING_KV_OVERRIDE = "Додавання KV перевизначення: {0}"
+ self.SAVING_TASK_PRESET = "Збереження пресета завдання для {0}"
+ self.TASK_PRESET_SAVED = "Пресет завдання збережено"
+ self.TASK_PRESET_SAVED_TO = "Пресет завдання збережено в {0}"
+ self.RESTARTING_TASK = "Перезапуск завдання: {0}"
+ self.IN_PROGRESS = "В процесі"
+ self.DOWNLOAD_FINISHED_EXTRACTED_TO = "Завантаження завершено. Витягнуто в: {0}"
+ self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "Бінарний файл llama.cpp завантажено та витягнуто в {0}\nФайли CUDA витягнуто в {1}"
+ self.NO_SUITABLE_CUDA_BACKEND_FOUND = (
+ "Підходящий бекенд CUDA для витягнення не знайдено"
+ )
+ self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = (
+ "Бінарний файл llama.cpp завантажено та витягнуто в {0}"
+ )
+ self.REFRESHING_LLAMACPP_RELEASES = "Оновлення релізів llama.cpp"
+ self.UPDATING_ASSET_LIST = "Оновлення списку активів"
+ self.UPDATING_CUDA_OPTIONS = "Оновлення параметрів CUDA"
+ self.STARTING_LLAMACPP_DOWNLOAD = "Початок завантаження llama.cpp"
+ self.UPDATING_CUDA_BACKENDS = "Оновлення бекендів CUDA"
+ self.NO_CUDA_BACKEND_SELECTED = "Бекенд CUDA для витягнення не вибрано"
+ self.EXTRACTING_CUDA_FILES = "Витягнення файлів CUDA з {0} в {1}"
+ self.DOWNLOAD_ERROR = "Помилка завантаження: {0}"
+ self.SHOWING_TASK_CONTEXT_MENU = "Відображення контекстного меню завдання"
+ self.SHOWING_PROPERTIES_FOR_TASK = "Відображення властивостей завдання: {0}"
+ self.CANCELLING_TASK = "Скасування завдання: {0}"
+ self.CANCELED = "Скасовано"
+ self.DELETING_TASK = "Видалення завдання: {0}"
+ self.LOADING_MODELS = "Завантаження моделей"
+ self.LOADED_MODELS = "Завантажено {0} моделей"
+ self.BROWSING_FOR_MODELS_DIRECTORY = "Пошук каталогу моделей"
+ self.SELECT_MODELS_DIRECTORY = "Виберіть каталог моделей"
+ self.BROWSING_FOR_OUTPUT_DIRECTORY = "Пошук вихідного каталогу"
+ self.SELECT_OUTPUT_DIRECTORY = "Виберіть вихідний каталог"
+ self.BROWSING_FOR_LOGS_DIRECTORY = "Пошук каталогу логів"
+ self.SELECT_LOGS_DIRECTORY = "Виберіть каталог логів"
+ self.BROWSING_FOR_IMATRIX_FILE = "Пошук файлу IMatrix"
+ self.SELECT_IMATRIX_FILE = "Виберіть файл IMatrix"
+ self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} МБ / {2} МБ)"
+ self.CPU_USAGE_FORMAT = "Використання ЦП: {0:.1f}%"
+ self.VALIDATING_QUANTIZATION_INPUTS = "Перевірка вхідних даних квантування"
+ self.MODELS_PATH_REQUIRED = "Потрібен шлях до моделей"
+ self.OUTPUT_PATH_REQUIRED = "Потрібен шлях виводу"
+ self.LOGS_PATH_REQUIRED = "Потрібен шлях до логів"
+ self.STARTING_MODEL_QUANTIZATION = "Початок квантування моделі"
+ self.INPUT_FILE_NOT_EXIST = "Вхідний файл '{0}' не існує."
+ self.QUANTIZING_MODEL_TO = "Квантування {0} в {1}"
+ self.QUANTIZATION_TASK_STARTED = "Завдання квантування запущено для {0}"
+ self.ERROR_STARTING_QUANTIZATION = "Помилка запуску квантування: {0}"
+ self.UPDATING_MODEL_INFO = "Оновлення інформації про модель: {0}"
+ self.TASK_FINISHED = "Завдання завершено: {0}"
+ self.SHOWING_TASK_DETAILS_FOR = "Відображення відомостей про завдання для: {0}"
+ self.BROWSING_FOR_IMATRIX_DATA_FILE = "Пошук файлу даних IMatrix"
+ self.SELECT_DATA_FILE = "Виберіть файл даних"
+ self.BROWSING_FOR_IMATRIX_MODEL_FILE = "Пошук файлу моделі IMatrix"
+ self.SELECT_MODEL_FILE = "Виберіть файл моделі"
+ self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "Пошук вихідного файлу IMatrix"
+ self.SELECT_OUTPUT_FILE = "Виберіть вихідний файл"
+ self.STARTING_IMATRIX_GENERATION = "Початок генерації IMatrix"
+ self.BACKEND_PATH_NOT_EXIST = "Шлях бекенда не існує: {0}"
+ self.GENERATING_IMATRIX = "Генерація IMatrix"
+ self.ERROR_STARTING_IMATRIX_GENERATION = (
+ "Помилка запуску генерації IMatrix: {0}"
+ )
+ self.IMATRIX_GENERATION_TASK_STARTED = "Завдання генерації IMatrix запущено"
+ self.ERROR_MESSAGE = "Помилка: {0}"
+ self.TASK_ERROR = "Помилка завдання: {0}"
+ self.APPLICATION_CLOSING = "Закриття програми"
+ self.APPLICATION_CLOSED = "Програма закрита"
+ self.SELECT_QUANTIZATION_TYPE = "Виберіть тип квантування"
+ self.ALLOWS_REQUANTIZING = (
+ "Дозволяє переквантувати тензори, які вже були квантовані"
+ )
+ self.LEAVE_OUTPUT_WEIGHT = "Залишить output.weight не (пере)квантованим"
+ self.DISABLE_K_QUANT_MIXTURES = (
+ "Вимкнути k-квантові суміші та квантувати всі тензори до одного типу"
+ )
+ self.USE_DATA_AS_IMPORTANCE_MATRIX = "Використовувати дані у файлі як матрицю важливості для оптимізації квантування"
+ self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = (
+ "Використовувати матрицю важливості для цих тензорів"
+ )
+ self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = (
+ "Не використовувати матрицю важливості для цих тензорів"
+ )
+ self.OUTPUT_TENSOR_TYPE = "Тип вихідного тензора:"
+ self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = (
+ "Використовувати цей тип для тензора output.weight"
+ )
+ self.TOKEN_EMBEDDING_TYPE = "Тип вбудовування токенів:"
+ self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = (
+ "Використовувати цей тип для тензора вбудовування токенів"
+ )
+ self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = (
+ "Генеруватиме квантовану модель у тих самих шардах, що й вхідні дані"
+ )
+ self.OVERRIDE_MODEL_METADATA = "Перевизначити метадані моделі"
+ self.INPUT_DATA_FILE_FOR_IMATRIX = "Вхідний файл даних для генерації IMatrix"
+ self.MODEL_TO_BE_QUANTIZED = "Модель для квантування"
+ self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = (
+ "Вихідний шлях для згенерованого IMatrix"
+ )
+ self.HOW_OFTEN_TO_SAVE_IMATRIX = "Як часто зберігати IMatrix"
+ self.SET_GPU_OFFLOAD_VALUE = "Встановити значення розвантаження GPU (-ngl)"
+ self.COMPLETED = "Завершено"
+ self.REFRESH_MODELS = "Оновити моделі"
+
+
+class _Japanese(_Localization):
+ def __init__(self):
+ super().__init__()
+ self.WINDOW_TITLE = "AutoGGUF (自動GGUFモデル量子化器)"
+ self.RAM_USAGE = "RAM使用量:"
+ self.CPU_USAGE = "CPU使用率:"
+ self.BACKEND = "Llama.cppバックエンド:"
+ self.REFRESH_BACKENDS = "バックエンドを更新"
+ self.MODELS_PATH = "モデルパス:"
+ self.OUTPUT_PATH = "出力パス:"
+ self.LOGS_PATH = "ログパス:"
+ self.BROWSE = "参照"
+ self.AVAILABLE_MODELS = "利用可能なモデル:"
+ self.QUANTIZATION_TYPE = "量子化タイプ:"
+ self.ALLOW_REQUANTIZE = "再量子化を許可"
+ self.LEAVE_OUTPUT_TENSOR = "出力テンソルを残す"
+ self.PURE = "純粋"
+ self.IMATRIX = "IMatrix:"
+ self.INCLUDE_WEIGHTS = "重みを含める:"
+ self.EXCLUDE_WEIGHTS = "重みを含めない:"
+ self.USE_OUTPUT_TENSOR_TYPE = "出力テンソルタイプを使用"
+ self.USE_TOKEN_EMBEDDING_TYPE = "トークン埋め込みタイプを使用"
+ self.KEEP_SPLIT = "分割を維持"
+ self.KV_OVERRIDES = "KVオーバーライド:"
+ self.ADD_NEW_OVERRIDE = "新しいオーバーライドを追加"
+ self.QUANTIZE_MODEL = "モデルを量子化"
+ self.SAVE_PRESET = "プリセットを保存"
+ self.LOAD_PRESET = "プリセットを読み込む"
+ self.TASKS = "タスク:"
+ self.DOWNLOAD_LLAMACPP = "llama.cppをダウンロード"
+ self.SELECT_RELEASE = "リリースを選択:"
+ self.SELECT_ASSET = "アセットを選択:"
+ self.EXTRACT_CUDA_FILES = "CUDAファイルを抽出"
+ self.SELECT_CUDA_BACKEND = "CUDAバックエンドを選択:"
+ self.DOWNLOAD = "ダウンロード"
+ self.IMATRIX_GENERATION = "IMatrix生成"
+ self.DATA_FILE = "データファイル:"
+ self.MODEL = "モデル:"
+ self.OUTPUT = "出力:"
+ self.OUTPUT_FREQUENCY = "出力頻度:"
+ self.GPU_OFFLOAD = "GPUオフロード:"
+ self.AUTO = "自動"
+ self.GENERATE_IMATRIX = "IMatrixを生成"
+ self.ERROR = "エラー"
+ self.WARNING = "警告"
+ self.PROPERTIES = "プロパティ"
+ self.CANCEL = "キャンセル"
+ self.RESTART = "再起動"
+ self.DELETE = "削除"
+ self.CONFIRM_DELETION = "このタスクを削除してもよろしいですか?"
+ self.TASK_RUNNING_WARNING = (
+ "一部のタスクはまだ実行中です。終了してもよろしいですか?"
+ )
+ self.YES = "はい"
+ self.NO = "いいえ"
+ self.DOWNLOAD_COMPLETE = "ダウンロード完了"
+ self.CUDA_EXTRACTION_FAILED = "CUDA抽出に失敗しました"
+ self.PRESET_SAVED = "プリセットが保存されました"
+ self.PRESET_LOADED = "プリセットが読み込まれました"
+ self.NO_ASSET_SELECTED = "アセットが選択されていません"
+ self.DOWNLOAD_FAILED = "ダウンロードに失敗しました"
+ self.NO_BACKEND_SELECTED = "バックエンドが選択されていません"
+ self.NO_MODEL_SELECTED = "モデルが選択されていません"
+ self.REFRESH_RELEASES = "リリースを更新"
+ self.NO_SUITABLE_CUDA_BACKENDS = "適切なCUDAバックエンドが見つかりませんでした"
+ self.LLAMACPP_DOWNLOADED_EXTRACTED = "llama.cppバイナリがダウンロードされ、{0}に抽出されました\nCUDAファイルは{1}に抽出されました"
+ self.CUDA_FILES_EXTRACTED = "CUDAファイルはに抽出されました"
+ self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = (
+ "抽出に適したCUDAバックエンドが見つかりませんでした"
+ )
+ self.ERROR_FETCHING_RELEASES = "リリースの取得中にエラーが発生しました: {0}"
+ self.CONFIRM_DELETION_TITLE = "削除の確認"
+ self.LOG_FOR = "{0}のログ"
+ self.ALL_FILES = "すべてのファイル (*)"
+ self.GGUF_FILES = "GGUFファイル (*.gguf)"
+ self.DAT_FILES = "DATファイル (*.dat)"
+ self.JSON_FILES = "JSONファイル (*.json)"
+ self.FAILED_LOAD_PRESET = "プリセットの読み込みに失敗しました: {0}"
+ self.INITIALIZING_AUTOGGUF = "AutoGGUFアプリケーションを初期化しています"
+ self.AUTOGGUF_INITIALIZATION_COMPLETE = "AutoGGUFの初期化が完了しました"
+ self.REFRESHING_BACKENDS = "バックエンドを更新しています"
+ self.NO_BACKENDS_AVAILABLE = "利用可能なバックエンドがありません"
+ self.FOUND_VALID_BACKENDS = "{0}個の有効なバックエンドが見つかりました"
+ self.SAVING_PRESET = "プリセットを保存しています"
+ self.PRESET_SAVED_TO = "プリセットは{0}に保存されました"
+ self.LOADING_PRESET = "プリセットを読み込んでいます"
+ self.PRESET_LOADED_FROM = "{0}からプリセットが読み込まれました"
+ self.ADDING_KV_OVERRIDE = "KVオーバーライドを追加しています: {0}"
+ self.SAVING_TASK_PRESET = "{0}のタスクプリセットを保存しています"
+ self.TASK_PRESET_SAVED = "タスクプリセットが保存されました"
+ self.TASK_PRESET_SAVED_TO = "タスクプリセットは{0}に保存されました"
+ self.RESTARTING_TASK = "タスクを再起動しています: {0}"
+ self.IN_PROGRESS = "処理中"
+ self.DOWNLOAD_FINISHED_EXTRACTED_TO = "ダウンロードが完了しました。抽出先: {0}"
+ self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "llama.cppバイナリがダウンロードされ、{0}に抽出されました\nCUDAファイルは{1}に抽出されました"
+ self.NO_SUITABLE_CUDA_BACKEND_FOUND = (
+ "抽出に適したCUDAバックエンドが見つかりませんでした"
+ )
+ self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = (
+ "llama.cppバイナリがダウンロードされ、{0}に抽出されました"
+ )
+ self.REFRESHING_LLAMACPP_RELEASES = "llama.cppリリースを更新しています"
+ self.UPDATING_ASSET_LIST = "アセットリストを更新しています"
+ self.UPDATING_CUDA_OPTIONS = "CUDAオプションを更新しています"
+ self.STARTING_LLAMACPP_DOWNLOAD = "llama.cppのダウンロードを開始しています"
+ self.UPDATING_CUDA_BACKENDS = "CUDAバックエンドを更新しています"
+ self.NO_CUDA_BACKEND_SELECTED = "抽出にCUDAバックエンドが選択されていません"
+ self.EXTRACTING_CUDA_FILES = "{0}から{1}にCUDAファイルを抽出しています"
+ self.DOWNLOAD_ERROR = "ダウンロードエラー: {0}"
+ self.SHOWING_TASK_CONTEXT_MENU = "タスクコンテキストメニューを表示しています"
+ self.SHOWING_PROPERTIES_FOR_TASK = "タスクのプロパティを表示しています: {0}"
+ self.CANCELLING_TASK = "タスクをキャンセルしています: {0}"
+ self.CANCELED = "キャンセル済み"
+ self.DELETING_TASK = "タスクを削除しています: {0}"
+ self.LOADING_MODELS = "モデルを読み込んでいます"
+ self.LOADED_MODELS = "{0}個のモデルが読み込まれました"
+ self.BROWSING_FOR_MODELS_DIRECTORY = "モデルディレクトリを参照しています"
+ self.SELECT_MODELS_DIRECTORY = "モデルディレクトリを選択"
+ self.BROWSING_FOR_OUTPUT_DIRECTORY = "出力ディレクトリを参照しています"
+ self.SELECT_OUTPUT_DIRECTORY = "出力ディレクトリを選択"
+ self.BROWSING_FOR_LOGS_DIRECTORY = "ログディレクトリを参照しています"
+ self.SELECT_LOGS_DIRECTORY = "ログディレクトリを選択"
+ self.BROWSING_FOR_IMATRIX_FILE = "IMatrixファイルを参照しています"
+ self.SELECT_IMATRIX_FILE = "IMatrixファイルを選択"
+ self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} MB / {2} MB)"
+ self.CPU_USAGE_FORMAT = "CPU使用率: {0:.1f}%"
+ self.VALIDATING_QUANTIZATION_INPUTS = "量子化入力を検証しています"
+ self.MODELS_PATH_REQUIRED = "モデルパスが必要です"
+ self.OUTPUT_PATH_REQUIRED = "出力パスが必要です"
+ self.LOGS_PATH_REQUIRED = "ログパスが必要です"
+ self.STARTING_MODEL_QUANTIZATION = "モデルの量子化を開始しています"
+ self.INPUT_FILE_NOT_EXIST = "入力ファイル '{0}' は存在しません。"
+ self.QUANTIZING_MODEL_TO = "{0} を {1} に量子化しています"
+ self.QUANTIZATION_TASK_STARTED = "{0} の量子化タスクが開始されました"
+ self.ERROR_STARTING_QUANTIZATION = "量子化の開始中にエラーが発生しました: {0}"
+ self.UPDATING_MODEL_INFO = "モデル情報を更新しています: {0}"
+ self.TASK_FINISHED = "タスクが完了しました: {0}"
+ self.SHOWING_TASK_DETAILS_FOR = "タスクの詳細を表示しています: {0}"
+ self.BROWSING_FOR_IMATRIX_DATA_FILE = "IMatrixデータファイルを参照しています"
+ self.SELECT_DATA_FILE = "データファイルを選択"
+ self.BROWSING_FOR_IMATRIX_MODEL_FILE = "IMatrixモデルファイルを参照しています"
+ self.SELECT_MODEL_FILE = "モデルファイルを選択"
+ self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "IMatrix出力ファイルを参照しています"
+ self.SELECT_OUTPUT_FILE = "出力ファイルを選択"
+ self.STARTING_IMATRIX_GENERATION = "IMatrixの生成を開始しています"
+ self.BACKEND_PATH_NOT_EXIST = "バックエンドパスが存在しません: {0}"
+ self.GENERATING_IMATRIX = "IMatrixを生成しています"
+ self.ERROR_STARTING_IMATRIX_GENERATION = (
+ "IMatrixの生成を開始中にエラーが発生しました: {0}"
+ )
+ self.IMATRIX_GENERATION_TASK_STARTED = "IMatrix生成タスクが開始されました"
+ self.ERROR_MESSAGE = "エラー: {0}"
+ self.TASK_ERROR = "タスクエラー: {0}"
+ self.APPLICATION_CLOSING = "アプリケーションを終了しています"
+ self.APPLICATION_CLOSED = "アプリケーションが終了しました"
+ self.SELECT_QUANTIZATION_TYPE = "量子化タイプを選択してください"
+ self.ALLOWS_REQUANTIZING = (
+ "すでに量子化されているテンソルの再量子化を許可します"
+ )
+ self.LEAVE_OUTPUT_WEIGHT = "output.weightは(再)量子化されません"
+ self.DISABLE_K_QUANT_MIXTURES = (
+ "k-quant混合を無効にし、すべてのテンソルを同じタイプに量子化します"
+ )
+ self.USE_DATA_AS_IMPORTANCE_MATRIX = (
+ "量子化最適化の重要度マトリックスとしてファイル内のデータを使用します"
+ )
+ self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = (
+ "これらのテンソルに重要度マトリックスを使用します"
+ )
+ self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = (
+ "これらのテンソルに重要度マトリックスを使用しません"
+ )
+ self.OUTPUT_TENSOR_TYPE = "出力テンソルタイプ:"
+ self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = (
+ "output.weightテンソルにこのタイプを使用します"
+ )
+ self.TOKEN_EMBEDDING_TYPE = "トークン埋め込みタイプ:"
+ self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = (
+ "トークン埋め込みテンソルにこのタイプを使用します"
+ )
+ self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = (
+ "入力と同じシャードで量子化されたモデルを生成します"
+ )
+ self.OVERRIDE_MODEL_METADATA = "モデルメタデータを上書きする"
+ self.INPUT_DATA_FILE_FOR_IMATRIX = "IMatrix生成用の入力データファイル"
+ self.MODEL_TO_BE_QUANTIZED = "量子化されるモデル"
+ self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "生成されたIMatrixの出力パス"
+ self.HOW_OFTEN_TO_SAVE_IMATRIX = "IMatrixを保存する頻度"
+ self.SET_GPU_OFFLOAD_VALUE = "GPUオフロード値を設定 (-ngl)"
+ self.COMPLETED = "完了しました"
+ self.REFRESH_MODELS = "モデルを更新"
+
+
+class _German(_Localization):
+ def __init__(self):
+ super().__init__()
+ self.WINDOW_TITLE = "AutoGGUF (automatisierter GGUF-Modellquantisierer)"
+ self.RAM_USAGE = "RAM-Nutzung:"
+ self.CPU_USAGE = "CPU-Auslastung:"
+ self.BACKEND = "Llama.cpp-Backend:"
+ self.REFRESH_BACKENDS = "Backends aktualisieren"
+ self.MODELS_PATH = "Modelle Pfad:"
+ self.OUTPUT_PATH = "Ausgabepfad:"
+ self.LOGS_PATH = "Log-Pfad:"
+ self.BROWSE = "Durchsuchen"
+ self.AVAILABLE_MODELS = "Verfügbare Modelle:"
+ self.QUANTIZATION_TYPE = "Quantisierungstyp:"
+ self.ALLOW_REQUANTIZE = "Requantisierung zulassen"
+ self.LEAVE_OUTPUT_TENSOR = "Ausgabetensor belassen"
+ self.PURE = "Rein"
+ self.IMATRIX = "IMatrix:"
+ self.INCLUDE_WEIGHTS = "Gewichte einschließen:"
+ self.EXCLUDE_WEIGHTS = "Gewichte ausschließen:"
+ self.USE_OUTPUT_TENSOR_TYPE = "Ausgabetensortyp verwenden"
+ self.USE_TOKEN_EMBEDDING_TYPE = "Token-Einbettungstyp verwenden"
+ self.KEEP_SPLIT = "Aufteilung beibehalten"
+ self.KV_OVERRIDES = "KV-Überschreibungen:"
+ self.ADD_NEW_OVERRIDE = "Neue Überschreibung hinzufügen"
+ self.QUANTIZE_MODEL = "Modell quantisieren"
+ self.SAVE_PRESET = "Preset speichern"
+ self.LOAD_PRESET = "Preset laden"
+ self.TASKS = "Aufgaben:"
+ self.DOWNLOAD_LLAMACPP = "llama.cpp herunterladen"
+ self.SELECT_RELEASE = "Release auswählen:"
+ self.SELECT_ASSET = "Asset auswählen:"
+ self.EXTRACT_CUDA_FILES = "CUDA-Dateien extrahieren"
+ self.SELECT_CUDA_BACKEND = "CUDA-Backend auswählen:"
+ self.DOWNLOAD = "Herunterladen"
+ self.IMATRIX_GENERATION = "IMatrix-Generierung"
+ self.DATA_FILE = "Datendatei:"
+ self.MODEL = "Modell:"
+ self.OUTPUT = "Ausgabe:"
+ self.OUTPUT_FREQUENCY = "Ausgabefrequenz:"
+ self.GPU_OFFLOAD = "GPU-Offload:"
+ self.AUTO = "Auto"
+ self.GENERATE_IMATRIX = "IMatrix generieren"
+ self.ERROR = "Fehler"
+ self.WARNING = "Warnung"
+ self.PROPERTIES = "Eigenschaften"
+ self.CANCEL = "Abbrechen"
+ self.RESTART = "Neustart"
+ self.DELETE = "Löschen"
+ self.CONFIRM_DELETION = (
+ "Sind Sie sicher, dass Sie diese Aufgabe löschen möchten?"
+ )
+ self.TASK_RUNNING_WARNING = (
+ "Einige Aufgaben laufen noch. Möchten Sie wirklich beenden?"
+ )
+ self.YES = "Ja"
+ self.NO = "Nein"
+ self.DOWNLOAD_COMPLETE = "Download abgeschlossen"
+ self.CUDA_EXTRACTION_FAILED = "CUDA-Extraktion fehlgeschlagen"
+ self.PRESET_SAVED = "Preset gespeichert"
+ self.PRESET_LOADED = "Preset geladen"
+ self.NO_ASSET_SELECTED = "Kein Asset ausgewählt"
+ self.DOWNLOAD_FAILED = "Download fehlgeschlagen"
+ self.NO_BACKEND_SELECTED = "Kein Backend ausgewählt"
+ self.NO_MODEL_SELECTED = "Kein Modell ausgewählt"
+ self.REFRESH_RELEASES = "Releases aktualisieren"
+ self.NO_SUITABLE_CUDA_BACKENDS = "Keine geeigneten CUDA-Backends gefunden"
+ self.LLAMACPP_DOWNLOADED_EXTRACTED = "llama.cpp-Binärdatei heruntergeladen und extrahiert nach {0}\nCUDA-Dateien extrahiert nach {1}"
+ self.CUDA_FILES_EXTRACTED = "CUDA-Dateien extrahiert nach"
+ self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = (
+ "Kein geeignetes CUDA-Backend für die Extraktion gefunden"
+ )
+ self.ERROR_FETCHING_RELEASES = "Fehler beim Abrufen der Releases: {0}"
+ self.CONFIRM_DELETION_TITLE = "Löschen bestätigen"
+ self.LOG_FOR = "Log für {0}"
+ self.ALL_FILES = "Alle Dateien (*)"
+ self.GGUF_FILES = "GGUF-Dateien (*.gguf)"
+ self.DAT_FILES = "DAT-Dateien (*.dat)"
+ self.JSON_FILES = "JSON-Dateien (*.json)"
+ self.FAILED_LOAD_PRESET = "Preset konnte nicht geladen werden: {0}"
+ self.INITIALIZING_AUTOGGUF = "AutoGGUF-Anwendung wird initialisiert"
+ self.AUTOGGUF_INITIALIZATION_COMPLETE = "AutoGGUF-Initialisierung abgeschlossen"
+ self.REFRESHING_BACKENDS = "Backends werden aktualisiert"
+ self.NO_BACKENDS_AVAILABLE = "Keine Backends verfügbar"
+ self.FOUND_VALID_BACKENDS = "{0} gültige Backends gefunden"
+ self.SAVING_PRESET = "Preset wird gespeichert"
+ self.PRESET_SAVED_TO = "Preset gespeichert unter {0}"
+ self.LOADING_PRESET = "Preset wird geladen"
+ self.PRESET_LOADED_FROM = "Preset von {0} geladen"
+ self.ADDING_KV_OVERRIDE = "KV-Überschreibung wird hinzugefügt: {0}"
+ self.SAVING_TASK_PRESET = "Task-Preset für {0} wird gespeichert"
+ self.TASK_PRESET_SAVED = "Task-Preset gespeichert"
+ self.TASK_PRESET_SAVED_TO = "Task-Preset gespeichert unter {0}"
+ self.RESTARTING_TASK = "Aufgabe wird neu gestartet: {0}"
+ self.IN_PROGRESS = "In Bearbeitung"
+ self.DOWNLOAD_FINISHED_EXTRACTED_TO = (
+ "Download abgeschlossen. Extrahiert nach: {0}"
+ )
+ self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "llama.cpp-Binärdatei heruntergeladen und extrahiert nach {0}\nCUDA-Dateien extrahiert nach {1}"
+ self.NO_SUITABLE_CUDA_BACKEND_FOUND = (
+ "Kein geeignetes CUDA-Backend für die Extraktion gefunden"
+ )
+ self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = (
+ "llama.cpp-Binärdatei heruntergeladen und extrahiert nach {0}"
+ )
+ self.REFRESHING_LLAMACPP_RELEASES = "llama.cpp-Releases werden aktualisiert"
+ self.UPDATING_ASSET_LIST = "Asset-Liste wird aktualisiert"
+ self.UPDATING_CUDA_OPTIONS = "CUDA-Optionen werden aktualisiert"
+ self.STARTING_LLAMACPP_DOWNLOAD = "Download von llama.cpp wird gestartet"
+ self.UPDATING_CUDA_BACKENDS = "CUDA-Backends werden aktualisiert"
+ self.NO_CUDA_BACKEND_SELECTED = (
+ "Kein CUDA-Backend für die Extraktion ausgewählt"
+ )
+ self.EXTRACTING_CUDA_FILES = "CUDA-Dateien werden von {0} nach {1} extrahiert"
+ self.DOWNLOAD_ERROR = "Download-Fehler: {0}"
+ self.SHOWING_TASK_CONTEXT_MENU = "Kontextmenü der Aufgabe wird angezeigt"
+ self.SHOWING_PROPERTIES_FOR_TASK = (
+ "Eigenschaften für Aufgabe werden angezeigt: {0}"
+ )
+ self.CANCELLING_TASK = "Aufgabe wird abgebrochen: {0}"
+ self.CANCELED = "Abgebrochen"
+ self.DELETING_TASK = "Aufgabe wird gelöscht: {0}"
+ self.LOADING_MODELS = "Modelle werden geladen"
+ self.LOADED_MODELS = "{0} Modelle geladen"
+ self.BROWSING_FOR_MODELS_DIRECTORY = "Modelle-Verzeichnis wird durchsucht"
+ self.SELECT_MODELS_DIRECTORY = "Modelle-Verzeichnis auswählen"
+ self.BROWSING_FOR_OUTPUT_DIRECTORY = "Ausgabeverzeichnis wird durchsucht"
+ self.SELECT_OUTPUT_DIRECTORY = "Ausgabeverzeichnis auswählen"
+ self.BROWSING_FOR_LOGS_DIRECTORY = "Log-Verzeichnis wird durchsucht"
+ self.SELECT_LOGS_DIRECTORY = "Log-Verzeichnis auswählen"
+ self.BROWSING_FOR_IMATRIX_FILE = "IMatrix-Datei wird durchsucht"
+ self.SELECT_IMATRIX_FILE = "IMatrix-Datei auswählen"
+ self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} MB / {2} MB)"
+ self.CPU_USAGE_FORMAT = "CPU-Auslastung: {0:.1f}%"
+ self.VALIDATING_QUANTIZATION_INPUTS = "Quantisierungseingaben werden validiert"
+ self.MODELS_PATH_REQUIRED = "Modelle-Pfad ist erforderlich"
+ self.OUTPUT_PATH_REQUIRED = "Ausgabepfad ist erforderlich"
+ self.LOGS_PATH_REQUIRED = "Log-Pfad ist erforderlich"
+ self.STARTING_MODEL_QUANTIZATION = "Modellquantisierung wird gestartet"
+ self.INPUT_FILE_NOT_EXIST = "Die Eingabedatei '{0}' existiert nicht."
+ self.QUANTIZING_MODEL_TO = "Quantisierung von {0} zu {1}"
+ self.QUANTIZATION_TASK_STARTED = "Quantisierungsaufgabe für {0} gestartet"
+ self.ERROR_STARTING_QUANTIZATION = "Fehler beim Starten der Quantisierung: {0}"
+ self.UPDATING_MODEL_INFO = "Modellinformationen werden aktualisiert: {0}"
+ self.TASK_FINISHED = "Aufgabe abgeschlossen: {0}"
+ self.SHOWING_TASK_DETAILS_FOR = "Aufgabendetails werden angezeigt für: {0}"
+ self.BROWSING_FOR_IMATRIX_DATA_FILE = "IMatrix-Datendatei wird durchsucht"
+ self.SELECT_DATA_FILE = "Datendatei auswählen"
+ self.BROWSING_FOR_IMATRIX_MODEL_FILE = "IMatrix-Modelldatei wird durchsucht"
+ self.SELECT_MODEL_FILE = "Modelldatei auswählen"
+ self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "IMatrix-Ausgabedatei wird durchsucht"
+ self.SELECT_OUTPUT_FILE = "Ausgabedatei auswählen"
+ self.STARTING_IMATRIX_GENERATION = "IMatrix-Generierung wird gestartet"
+ self.BACKEND_PATH_NOT_EXIST = "Backend-Pfad existiert nicht: {0}"
+ self.GENERATING_IMATRIX = "IMatrix wird generiert"
+ self.ERROR_STARTING_IMATRIX_GENERATION = (
+ "Fehler beim Starten der IMatrix-Generierung: {0}"
+ )
+ self.IMATRIX_GENERATION_TASK_STARTED = "IMatrix-Generierungsaufgabe gestartet"
+ self.ERROR_MESSAGE = "Fehler: {0}"
+ self.TASK_ERROR = "Aufgabenfehler: {0}"
+ self.APPLICATION_CLOSING = "Anwendung wird geschlossen"
+ self.APPLICATION_CLOSED = "Anwendung geschlossen"
+ self.SELECT_QUANTIZATION_TYPE = "Wählen Sie den Quantisierungstyp aus"
+ self.ALLOWS_REQUANTIZING = "Ermöglicht die Requantisierung von Tensoren, die bereits quantisiert wurden"
+ self.LEAVE_OUTPUT_WEIGHT = "Lässt output.weight nicht (re)quantisiert"
+ self.DISABLE_K_QUANT_MIXTURES = "Deaktivieren Sie k-Quant-Mischungen und quantisieren Sie alle Tensoren auf denselben Typ"
+ self.USE_DATA_AS_IMPORTANCE_MATRIX = "Verwenden Sie Daten in der Datei als Wichtigkeitsmatrix für Quant-Optimierungen"
+ self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = (
+ "Verwenden Sie die Wichtigkeitsmatrix für diese Tensoren"
+ )
+ self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = (
+ "Verwenden Sie die Wichtigkeitsmatrix nicht für diese Tensoren"
+ )
+ self.OUTPUT_TENSOR_TYPE = "Ausgabetensortyp:"
+ self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = (
+ "Verwenden Sie diesen Typ für den output.weight-Tensor"
+ )
+ self.TOKEN_EMBEDDING_TYPE = "Token-Einbettungstyp:"
+ self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = (
+ "Verwenden Sie diesen Typ für den Token-Einbettungstensor"
+ )
+ self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = (
+ "Generiert ein quantisiertes Modell in denselben Shards wie die Eingabe"
+ )
+ self.OVERRIDE_MODEL_METADATA = "Modellmetadaten überschreiben"
+ self.INPUT_DATA_FILE_FOR_IMATRIX = (
+ "Eingabedatendatei für die IMatrix-Generierung"
+ )
+ self.MODEL_TO_BE_QUANTIZED = "Zu quantisierendes Modell"
+ self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = (
+ "Ausgabepfad für die generierte IMatrix"
+ )
+ self.HOW_OFTEN_TO_SAVE_IMATRIX = "Wie oft die IMatrix gespeichert werden soll"
+ self.SET_GPU_OFFLOAD_VALUE = "GPU-Offload-Wert festlegen (-ngl)"
+ self.COMPLETED = "Abgeschlossen"
+ self.REFRESH_MODELS = "Modelle aktualisieren"
+
+
+class _Portuguese(_Localization):
+ def __init__(self):
+ super().__init__()
+ self.WINDOW_TITLE = "AutoGGUF (Quantizador Automático de Modelos GGUF)"
+ self.RAM_USAGE = "Uso de RAM:"
+ self.CPU_USAGE = "Uso da CPU:"
+ self.BACKEND = "Backend do Llama.cpp:"
+ self.REFRESH_BACKENDS = "Atualizar Backends"
+ self.MODELS_PATH = "Caminho dos Modelos:"
+ self.OUTPUT_PATH = "Caminho de Saída:"
+ self.LOGS_PATH = "Caminho dos Logs:"
+ self.BROWSE = "Navegar"
+ self.AVAILABLE_MODELS = "Modelos Disponíveis:"
+ self.QUANTIZATION_TYPE = "Tipo de Quantização:"
+ self.ALLOW_REQUANTIZE = "Permitir Requantização"
+ self.LEAVE_OUTPUT_TENSOR = "Manter Tensor de Saída"
+ self.PURE = "Puro"
+ self.IMATRIX = "IMatrix:"
+ self.INCLUDE_WEIGHTS = "Incluir Pesos:"
+ self.EXCLUDE_WEIGHTS = "Excluir Pesos:"
+ self.USE_OUTPUT_TENSOR_TYPE = "Usar Tipo de Tensor de Saída"
+ self.USE_TOKEN_EMBEDDING_TYPE = "Usar Tipo de Incorporação de Token"
+ self.KEEP_SPLIT = "Manter Divisão"
+ self.KV_OVERRIDES = "Substituições KV:"
+ self.ADD_NEW_OVERRIDE = "Adicionar Nova Substituição"
+ self.QUANTIZE_MODEL = "Quantizar Modelo"
+ self.SAVE_PRESET = "Salvar Predefinição"
+ self.LOAD_PRESET = "Carregar Predefinição"
+ self.TASKS = "Tarefas:"
+ self.DOWNLOAD_LLAMACPP = "Baixar llama.cpp"
+ self.SELECT_RELEASE = "Selecionar Versão:"
+ self.SELECT_ASSET = "Selecionar Ativo:"
+ self.EXTRACT_CUDA_FILES = "Extrair Arquivos CUDA"
+ self.SELECT_CUDA_BACKEND = "Selecionar Backend CUDA:"
+ self.DOWNLOAD = "Baixar"
+ self.IMATRIX_GENERATION = "Geração de IMatrix"
+ self.DATA_FILE = "Arquivo de Dados:"
+ self.MODEL = "Modelo:"
+ self.OUTPUT = "Saída:"
+ self.OUTPUT_FREQUENCY = "Frequência de Saída:"
+ self.GPU_OFFLOAD = "Offload da GPU:"
+ self.AUTO = "Automático"
+ self.GENERATE_IMATRIX = "Gerar IMatrix"
+ self.ERROR = "Erro"
+ self.WARNING = "Aviso"
+ self.PROPERTIES = "Propriedades"
+ self.CANCEL = "Cancelar"
+ self.RESTART = "Reiniciar"
+ self.DELETE = "Excluir"
+ self.CONFIRM_DELETION = "Tem certeza de que deseja excluir esta tarefa?"
+ self.TASK_RUNNING_WARNING = (
+ "Algumas tarefas ainda estão em execução. Tem certeza de que deseja sair?"
+ )
+ self.YES = "Sim"
+ self.NO = "Não"
+ self.DOWNLOAD_COMPLETE = "Download Concluído"
+ self.CUDA_EXTRACTION_FAILED = "Falha na Extração do CUDA"
+ self.PRESET_SAVED = "Predefinição Salva"
+ self.PRESET_LOADED = "Predefinição Carregada"
+ self.NO_ASSET_SELECTED = "Nenhum ativo selecionado"
+ self.DOWNLOAD_FAILED = "Falha no download"
+ self.NO_BACKEND_SELECTED = "Nenhum backend selecionado"
+ self.NO_MODEL_SELECTED = "Nenhum modelo selecionado"
+ self.REFRESH_RELEASES = "Atualizar Versões"
+ self.NO_SUITABLE_CUDA_BACKENDS = "Nenhum backend CUDA adequado encontrado"
+ self.LLAMACPP_DOWNLOADED_EXTRACTED = "Binário llama.cpp baixado e extraído para {0}\nArquivos CUDA extraídos para {1}"
+ self.CUDA_FILES_EXTRACTED = "Arquivos CUDA extraídos para"
+ self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = (
+ "Nenhum backend CUDA adequado encontrado para extração"
+ )
+ self.ERROR_FETCHING_RELEASES = "Erro ao buscar versões: {0}"
+ self.CONFIRM_DELETION_TITLE = "Confirmar Exclusão"
+ self.LOG_FOR = "Log para {0}"
+ self.ALL_FILES = "Todos os Arquivos (*)"
+ self.GGUF_FILES = "Arquivos GGUF (*.gguf)"
+ self.DAT_FILES = "Arquivos DAT (*.dat)"
+ self.JSON_FILES = "Arquivos JSON (*.json)"
+ self.FAILED_LOAD_PRESET = "Falha ao carregar a predefinição: {0}"
+ self.INITIALIZING_AUTOGGUF = "Inicializando o aplicativo AutoGGUF"
+ self.AUTOGGUF_INITIALIZATION_COMPLETE = "Inicialização do AutoGGUF concluída"
+ self.REFRESHING_BACKENDS = "Atualizando backends"
+ self.NO_BACKENDS_AVAILABLE = "Nenhum backend disponível"
+ self.FOUND_VALID_BACKENDS = "{0} backends válidos encontrados"
+ self.SAVING_PRESET = "Salvando predefinição"
+ self.PRESET_SAVED_TO = "Predefinição salva em {0}"
+ self.LOADING_PRESET = "Carregando predefinição"
+ self.PRESET_LOADED_FROM = "Predefinição carregada de {0}"
+ self.ADDING_KV_OVERRIDE = "Adicionando substituição KV: {0}"
+ self.SAVING_TASK_PRESET = "Salvando predefinição de tarefa para {0}"
+ self.TASK_PRESET_SAVED = "Predefinição de Tarefa Salva"
+ self.TASK_PRESET_SAVED_TO = "Predefinição de tarefa salva em {0}"
+ self.RESTARTING_TASK = "Reiniciando tarefa: {0}"
+ self.IN_PROGRESS = "Em Andamento"
+ self.DOWNLOAD_FINISHED_EXTRACTED_TO = "Download concluído. Extraído para: {0}"
+ self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "Binário llama.cpp baixado e extraído para {0}\nArquivos CUDA extraídos para {1}"
+ self.NO_SUITABLE_CUDA_BACKEND_FOUND = (
+ "Nenhum backend CUDA adequado encontrado para extração"
+ )
+ self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = (
+ "Binário llama.cpp baixado e extraído para {0}"
+ )
+ self.REFRESHING_LLAMACPP_RELEASES = "Atualizando versões do llama.cpp"
+ self.UPDATING_ASSET_LIST = "Atualizando lista de ativos"
+ self.UPDATING_CUDA_OPTIONS = "Atualizando opções CUDA"
+ self.STARTING_LLAMACPP_DOWNLOAD = "Iniciando download do llama.cpp"
+ self.UPDATING_CUDA_BACKENDS = "Atualizando backends CUDA"
+ self.NO_CUDA_BACKEND_SELECTED = "Nenhum backend CUDA selecionado para extração"
+ self.EXTRACTING_CUDA_FILES = "Extraindo arquivos CUDA de {0} para {1}"
+ self.DOWNLOAD_ERROR = "Erro de download: {0}"
+ self.SHOWING_TASK_CONTEXT_MENU = "Exibindo menu de contexto da tarefa"
+ self.SHOWING_PROPERTIES_FOR_TASK = "Exibindo propriedades para a tarefa: {0}"
+ self.CANCELLING_TASK = "Cancelando tarefa: {0}"
+ self.CANCELED = "Cancelado"
+ self.DELETING_TASK = "Excluindo tarefa: {0}"
+ self.LOADING_MODELS = "Carregando modelos"
+ self.LOADED_MODELS = "{0} modelos carregados"
+ self.BROWSING_FOR_MODELS_DIRECTORY = "Navegando pelo diretório de modelos"
+ self.SELECT_MODELS_DIRECTORY = "Selecionar Diretório de Modelos"
+ self.BROWSING_FOR_OUTPUT_DIRECTORY = "Navegando pelo diretório de saída"
+ self.SELECT_OUTPUT_DIRECTORY = "Selecionar Diretório de Saída"
+ self.BROWSING_FOR_LOGS_DIRECTORY = "Navegando pelo diretório de logs"
+ self.SELECT_LOGS_DIRECTORY = "Selecionar Diretório de Logs"
+ self.BROWSING_FOR_IMATRIX_FILE = "Navegando pelo arquivo IMatrix"
+ self.SELECT_IMATRIX_FILE = "Selecionar Arquivo IMatrix"
+ self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} MB / {2} MB)"
+ self.CPU_USAGE_FORMAT = "Uso da CPU: {0:.1f}%"
+ self.VALIDATING_QUANTIZATION_INPUTS = "Validando entradas de quantização"
+ self.MODELS_PATH_REQUIRED = "O caminho dos modelos é obrigatório"
+ self.OUTPUT_PATH_REQUIRED = "O caminho de saída é obrigatório"
+ self.LOGS_PATH_REQUIRED = "O caminho dos logs é obrigatório"
+ self.STARTING_MODEL_QUANTIZATION = "Iniciando a quantização do modelo"
+ self.INPUT_FILE_NOT_EXIST = "O arquivo de entrada '{0}' não existe."
+ self.QUANTIZING_MODEL_TO = "Quantizando {0} para {1}"
+ self.QUANTIZATION_TASK_STARTED = "Tarefa de quantização iniciada para {0}"
+ self.ERROR_STARTING_QUANTIZATION = "Erro ao iniciar a quantização: {0}"
+ self.UPDATING_MODEL_INFO = "Atualizando informações do modelo: {0}"
+ self.TASK_FINISHED = "Tarefa concluída: {0}"
+ self.SHOWING_TASK_DETAILS_FOR = "Mostrando detalhes da tarefa para: {0}"
+ self.BROWSING_FOR_IMATRIX_DATA_FILE = "Navegando pelo arquivo de dados IMatrix"
+ self.SELECT_DATA_FILE = "Selecionar Arquivo de Dados"
+ self.BROWSING_FOR_IMATRIX_MODEL_FILE = (
+ "Navegando pelo arquivo de modelo IMatrix"
+ )
+ self.SELECT_MODEL_FILE = "Selecionar Arquivo de Modelo"
+ self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = (
+ "Navegando pelo arquivo de saída IMatrix"
+ )
+ self.SELECT_OUTPUT_FILE = "Selecionar Arquivo de Saída"
+ self.STARTING_IMATRIX_GENERATION = "Iniciando a geração de IMatrix"
+ self.BACKEND_PATH_NOT_EXIST = "O caminho do backend não existe: {0}"
+ self.GENERATING_IMATRIX = "Gerando IMatrix"
+ self.ERROR_STARTING_IMATRIX_GENERATION = (
+ "Erro ao iniciar a geração de IMatrix: {0}"
+ )
+ self.IMATRIX_GENERATION_TASK_STARTED = "Tarefa de geração de IMatrix iniciada"
+ self.ERROR_MESSAGE = "Erro: {0}"
+ self.TASK_ERROR = "Erro de tarefa: {0}"
+ self.APPLICATION_CLOSING = "Fechando o aplicativo"
+ self.APPLICATION_CLOSED = "Aplicativo fechado"
+ self.SELECT_QUANTIZATION_TYPE = "Selecione o tipo de quantização"
+ self.ALLOWS_REQUANTIZING = (
+ "Permite requantizar tensores que já foram quantizados"
+ )
+ self.LEAVE_OUTPUT_WEIGHT = "Deixará output.weight não (re)quantizado"
+ self.DISABLE_K_QUANT_MIXTURES = "Desabilitar misturas k-quant e quantizar todos os tensores para o mesmo tipo"
+ self.USE_DATA_AS_IMPORTANCE_MATRIX = "Usar os dados no arquivo como matriz de importância para otimizações de quantização"
+ self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = (
+ "Usar matriz de importância para estes tensores"
+ )
+ self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = (
+ "Não usar matriz de importância para estes tensores"
+ )
+ self.OUTPUT_TENSOR_TYPE = "Tipo de Tensor de Saída:"
+ self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = (
+ "Usar este tipo para o tensor output.weight"
+ )
+ self.TOKEN_EMBEDDING_TYPE = "Tipo de Incorporação de Token:"
+ self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = (
+ "Usar este tipo para o tensor de incorporações de token"
+ )
+ self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = (
+ "Irá gerar o modelo quantizado nos mesmos shards da entrada"
+ )
+ self.OVERRIDE_MODEL_METADATA = "Substituir metadados do modelo"
+ self.INPUT_DATA_FILE_FOR_IMATRIX = (
+ "Arquivo de dados de entrada para geração de IMatrix"
+ )
+ self.MODEL_TO_BE_QUANTIZED = "Modelo a ser quantizado"
+ self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = (
+ "Caminho de saída para o IMatrix gerado"
+ )
+ self.HOW_OFTEN_TO_SAVE_IMATRIX = "Com que frequência salvar o IMatrix"
+ self.SET_GPU_OFFLOAD_VALUE = "Definir valor de offload da GPU (-ngl)"
+ self.COMPLETED = "Concluído"
+ self.REFRESH_MODELS = "Atualizar modelos"
+
+
+class _Arabic(_Localization):
+ def __init__(self):
+ super().__init__()
+ self.WINDOW_TITLE = "AutoGGUF (مُكَمِّم نماذج GGUF التلقائي)"
+ self.RAM_USAGE = "استخدام ذاكرة الوصول العشوائي:"
+ self.CPU_USAGE = "استخدام وحدة المعالجة المركزية:"
+ self.BACKEND = "الخلفية Llama.cpp:"
+ self.REFRESH_BACKENDS = "تحديث الخلفيات"
+ self.MODELS_PATH = "مسار النماذج:"
+ self.OUTPUT_PATH = "مسار الإخراج:"
+ self.LOGS_PATH = "مسار السجلات:"
+ self.BROWSE = "استعراض"
+ self.AVAILABLE_MODELS = "النماذج المتاحة:"
+ self.QUANTIZATION_TYPE = "نوع التكميم:"
+ self.ALLOW_REQUANTIZE = "السماح بإعادة التكميم"
+ self.LEAVE_OUTPUT_TENSOR = "ترك موتر الإخراج"
+ self.PURE = "نقي"
+ self.IMATRIX = "IMatrix:"
+ self.INCLUDE_WEIGHTS = "تضمين الأوزان:"
+ self.EXCLUDE_WEIGHTS = "استبعاد الأوزان:"
+ self.USE_OUTPUT_TENSOR_TYPE = "استخدام نوع موتر الإخراج"
+ self.USE_TOKEN_EMBEDDING_TYPE = "استخدام نوع تضمين الرمز المميز"
+ self.KEEP_SPLIT = "الحفاظ على التقسيم"
+ self.KV_OVERRIDES = "تجاوزات KV:"
+ self.ADD_NEW_OVERRIDE = "إضافة تجاوز جديد"
+ self.QUANTIZE_MODEL = "تكميم النموذج"
+ self.SAVE_PRESET = "حفظ الإعداد المسبق"
+ self.LOAD_PRESET = "تحميل الإعداد المسبق"
+ self.TASKS = "المهام:"
+ self.DOWNLOAD_LLAMACPP = "تنزيل llama.cpp"
+ self.SELECT_RELEASE = "تحديد الإصدار:"
+ self.SELECT_ASSET = "تحديد الأصل:"
+ self.EXTRACT_CUDA_FILES = "استخراج ملفات CUDA"
+ self.SELECT_CUDA_BACKEND = "تحديد خلفية CUDA:"
+ self.DOWNLOAD = "تنزيل"
+ self.IMATRIX_GENERATION = "توليد IMatrix"
+ self.DATA_FILE = "ملف البيانات:"
+ self.MODEL = "النموذج:"
+ self.OUTPUT = "الإخراج:"
+ self.OUTPUT_FREQUENCY = "تردد الإخراج:"
+ self.GPU_OFFLOAD = "تفريغ GPU:"
+ self.AUTO = "تلقائي"
+ self.GENERATE_IMATRIX = "توليد IMatrix"
+ self.ERROR = "خطأ"
+ self.WARNING = "تحذير"
+ self.PROPERTIES = "الخصائص"
+ self.CANCEL = "إلغاء"
+ self.RESTART = "إعادة تشغيل"
+ self.DELETE = "حذف"
+ self.CONFIRM_DELETION = "هل أنت متأكد أنك تريد حذف هذه المهمة؟"
+ self.TASK_RUNNING_WARNING = (
+ "لا تزال بعض المهام قيد التشغيل. هل أنت متأكد أنك تريد الإنهاء؟"
+ )
+ self.YES = "نعم"
+ self.NO = "لا"
+ self.DOWNLOAD_COMPLETE = "اكتمل التنزيل"
+ self.CUDA_EXTRACTION_FAILED = "فشل استخراج CUDA"
+ self.PRESET_SAVED = "تم حفظ الإعداد المسبق"
+ self.PRESET_LOADED = "تم تحميل الإعداد المسبق"
+ self.NO_ASSET_SELECTED = "لم يتم تحديد أصل"
+ self.DOWNLOAD_FAILED = "فشل التنزيل"
+ self.NO_BACKEND_SELECTED = "لم يتم تحديد خلفية"
+ self.NO_MODEL_SELECTED = "لم يتم تحديد نموذج"
+ self.REFRESH_RELEASES = "تحديث الإصدارات"
+ self.NO_SUITABLE_CUDA_BACKENDS = "لم يتم العثور على خلفيات CUDA مناسبة"
+ self.LLAMACPP_DOWNLOADED_EXTRACTED = "تم تنزيل ملف llama.cpp الثنائي واستخراجه إلى {0}\nتم استخراج ملفات CUDA إلى {1}"
+ self.CUDA_FILES_EXTRACTED = "تم استخراج ملفات CUDA إلى"
+ self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = (
+ "لم يتم العثور على خلفية CUDA مناسبة للاستخراج"
+ )
+ self.ERROR_FETCHING_RELEASES = "خطأ في جلب الإصدارات: {0}"
+ self.CONFIRM_DELETION_TITLE = "تأكيد الحذف"
+ self.LOG_FOR = "سجل لـ {0}"
+ self.ALL_FILES = "جميع الملفات (*)"
+ self.GGUF_FILES = "ملفات GGUF (*.gguf)"
+ self.DAT_FILES = "ملفات DAT (*.dat)"
+ self.JSON_FILES = "ملفات JSON (*.json)"
+ self.FAILED_LOAD_PRESET = "فشل تحميل الإعداد المسبق: {0}"
+ self.INITIALIZING_AUTOGGUF = "تهيئة تطبيق AutoGGUF"
+ self.AUTOGGUF_INITIALIZATION_COMPLETE = "اكتملت تهيئة AutoGGUF"
+ self.REFRESHING_BACKENDS = "تحديث الخلفيات"
+ self.NO_BACKENDS_AVAILABLE = "لا توجد خلفيات متاحة"
+ self.FOUND_VALID_BACKENDS = "تم العثور على {0} خلفيات صالحة"
+ self.SAVING_PRESET = "حفظ الإعداد المسبق"
+ self.PRESET_SAVED_TO = "تم حفظ الإعداد المسبق إلى {0}"
+ self.LOADING_PRESET = "تحميل الإعداد المسبق"
+ self.PRESET_LOADED_FROM = "تم تحميل الإعداد المسبق من {0}"
+ self.ADDING_KV_OVERRIDE = "إضافة تجاوز KV: {0}"
+ self.SAVING_TASK_PRESET = "حفظ الإعداد المسبق للمهمة لـ {0}"
+ self.TASK_PRESET_SAVED = "تم حفظ الإعداد المسبق للمهمة"
+ self.TASK_PRESET_SAVED_TO = "تم حفظ الإعداد المسبق للمهمة إلى {0}"
+ self.RESTARTING_TASK = "إعادة تشغيل المهمة: {0}"
+ self.IN_PROGRESS = "قيد التقدم"
+ self.DOWNLOAD_FINISHED_EXTRACTED_TO = "اكتمل التنزيل. تم الاستخراج إلى: {0}"
+ self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "تم تنزيل ملف llama.cpp الثنائي واستخراجه إلى {0}\nتم استخراج ملفات CUDA إلى {1}"
+ self.NO_SUITABLE_CUDA_BACKEND_FOUND = (
+ "لم يتم العثور على خلفية CUDA مناسبة للاستخراج"
+ )
+ self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = (
+ "تم تنزيل ملف llama.cpp الثنائي واستخراجه إلى {0}"
+ )
+ self.REFRESHING_LLAMACPP_RELEASES = "تحديث إصدارات llama.cpp"
+ self.UPDATING_ASSET_LIST = "تحديث قائمة الأصول"
+ self.UPDATING_CUDA_OPTIONS = "تحديث خيارات CUDA"
+ self.STARTING_LLAMACPP_DOWNLOAD = "بدء تنزيل llama.cpp"
+ self.UPDATING_CUDA_BACKENDS = "تحديث خلفيات CUDA"
+ self.NO_CUDA_BACKEND_SELECTED = "لم يتم تحديد خلفية CUDA للاستخراج"
+ self.EXTRACTING_CUDA_FILES = "استخراج ملفات CUDA من {0} إلى {1}"
+ self.DOWNLOAD_ERROR = "خطأ في التنزيل: {0}"
+ self.SHOWING_TASK_CONTEXT_MENU = "إظهار قائمة سياق المهمة"
+ self.SHOWING_PROPERTIES_FOR_TASK = "إظهار خصائص المهمة: {0}"
+ self.CANCELLING_TASK = "إلغاء المهمة: {0}"
+ self.CANCELED = "تم الإلغاء"
+ self.DELETING_TASK = "حذف المهمة: {0}"
+ self.LOADING_MODELS = "تحميل النماذج"
+ self.LOADED_MODELS = "تم تحميل {0} نماذج"
+ self.BROWSING_FOR_MODELS_DIRECTORY = "استعراض دليل النماذج"
+ self.SELECT_MODELS_DIRECTORY = "حدد دليل النماذج"
+ self.BROWSING_FOR_OUTPUT_DIRECTORY = "استعراض دليل الإخراج"
+ self.SELECT_OUTPUT_DIRECTORY = "حدد دليل الإخراج"
+ self.BROWSING_FOR_LOGS_DIRECTORY = "استعراض دليل السجلات"
+ self.SELECT_LOGS_DIRECTORY = "حدد دليل السجلات"
+ self.BROWSING_FOR_IMATRIX_FILE = "استعراض ملف IMatrix"
+ self.SELECT_IMATRIX_FILE = "حدد ملف IMatrix"
+ self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} ميغابايت / {2} ميغابايت)"
+ self.CPU_USAGE_FORMAT = "استخدام وحدة المعالجة المركزية: {0:.1f}%"
+ self.VALIDATING_QUANTIZATION_INPUTS = "التحقق من صحة مدخلات التكميم"
+ self.MODELS_PATH_REQUIRED = "مسار النماذج مطلوب"
+ self.OUTPUT_PATH_REQUIRED = "مسار الإخراج مطلوب"
+ self.LOGS_PATH_REQUIRED = "مسار السجلات مطلوب"
+ self.STARTING_MODEL_QUANTIZATION = "بدء تكميم النموذج"
+ self.INPUT_FILE_NOT_EXIST = "ملف الإدخال '{0}' غير موجود."
+ self.QUANTIZING_MODEL_TO = "تكميم {0} إلى {1}"
+ self.QUANTIZATION_TASK_STARTED = "بدأت مهمة التكميم لـ {0}"
+ self.ERROR_STARTING_QUANTIZATION = "خطأ في بدء التكميم: {0}"
+ self.UPDATING_MODEL_INFO = "تحديث معلومات النموذج: {0}"
+ self.TASK_FINISHED = "انتهت المهمة: {0}"
+ self.SHOWING_TASK_DETAILS_FOR = "إظهار تفاصيل المهمة لـ: {0}"
+ self.BROWSING_FOR_IMATRIX_DATA_FILE = "استعراض ملف بيانات IMatrix"
+ self.SELECT_DATA_FILE = "حدد ملف البيانات"
+ self.BROWSING_FOR_IMATRIX_MODEL_FILE = "استعراض ملف نموذج IMatrix"
+ self.SELECT_MODEL_FILE = "حدد ملف النموذج"
+ self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "استعراض ملف إخراج IMatrix"
+ self.SELECT_OUTPUT_FILE = "حدد ملف الإخراج"
+ self.STARTING_IMATRIX_GENERATION = "بدء توليد IMatrix"
+ self.BACKEND_PATH_NOT_EXIST = "مسار الخلفية غير موجود: {0}"
+ self.GENERATING_IMATRIX = "توليد IMatrix"
+ self.ERROR_STARTING_IMATRIX_GENERATION = "خطأ في بدء توليد IMatrix: {0}"
+ self.IMATRIX_GENERATION_TASK_STARTED = "بدأت مهمة توليد IMatrix"
+ self.ERROR_MESSAGE = "خطأ: {0}"
+ self.TASK_ERROR = "خطأ في المهمة: {0}"
+ self.APPLICATION_CLOSING = "إغلاق التطبيق"
+ self.APPLICATION_CLOSED = "تم إغلاق التطبيق"
+ self.SELECT_QUANTIZATION_TYPE = "حدد نوع التكميم"
+ self.ALLOWS_REQUANTIZING = "يسمح بإعادة تكميم الموترات التي تم تكميمها بالفعل"
+ self.LEAVE_OUTPUT_WEIGHT = "سيترك output.weight غير مُكَمَّم (أو مُعاد تكميمه)"
+ self.DISABLE_K_QUANT_MIXTURES = (
+ "تعطيل خلطات k-quant وتكميم جميع الموترات إلى نفس النوع"
+ )
+ self.USE_DATA_AS_IMPORTANCE_MATRIX = (
+ "استخدام البيانات في الملف كمصفوفة أهمية لتحسينات التكميم"
+ )
+ self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "استخدام مصفوفة الأهمية لهذه الموترات"
+ self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = (
+ "عدم استخدام مصفوفة الأهمية لهذه الموترات"
+ )
+ self.OUTPUT_TENSOR_TYPE = "نوع موتر الإخراج:"
+ self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = "استخدم هذا النوع لموتر output.weight"
+ self.TOKEN_EMBEDDING_TYPE = "نوع تضمين الرمز المميز:"
+ self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = (
+ "استخدم هذا النوع لموتر تضمينات الرمز المميز"
+ )
+ self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = (
+ "سيولد نموذجًا مُكَمَّمًا في نفس شظايا الإدخال"
+ )
+ self.OVERRIDE_MODEL_METADATA = "تجاوز بيانات تعريف النموذج"
+ self.INPUT_DATA_FILE_FOR_IMATRIX = "ملف بيانات الإدخال لتوليد IMatrix"
+ self.MODEL_TO_BE_QUANTIZED = "النموذج المراد تكميمه"
+ self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = (
+ "مسار الإخراج لـ IMatrix الذي تم إنشاؤه"
+ )
+ self.HOW_OFTEN_TO_SAVE_IMATRIX = "عدد مرات حفظ IMatrix"
+ self.SET_GPU_OFFLOAD_VALUE = "تعيين قيمة تفريغ GPU (-ngl)"
+ self.COMPLETED = "مكتمل"
+ self.REFRESH_MODELS = "تحديث النماذج"
+
+
+class _Korean(_Localization):
+ def __init__(self):
+ super().__init__()
+ self.WINDOW_TITLE = "AutoGGUF (자동 GGUF 모델 양자화기)"
+ self.RAM_USAGE = "RAM 사용량:"
+ self.CPU_USAGE = "CPU 사용량:"
+ self.BACKEND = "Llama.cpp 백엔드:"
+ self.REFRESH_BACKENDS = "백엔드 새로 고침"
+ self.MODELS_PATH = "모델 경로:"
+ self.OUTPUT_PATH = "출력 경로:"
+ self.LOGS_PATH = "로그 경로:"
+ self.BROWSE = "찾아보기"
+ self.AVAILABLE_MODELS = "사용 가능한 모델:"
+ self.QUANTIZATION_TYPE = "양자화 유형:"
+ self.ALLOW_REQUANTIZE = "재양자화 허용"
+ self.LEAVE_OUTPUT_TENSOR = "출력 텐서 유지"
+ self.PURE = "순수"
+ self.IMATRIX = "IMatrix:"
+ self.INCLUDE_WEIGHTS = "가중치 포함:"
+ self.EXCLUDE_WEIGHTS = "가중치 제외:"
+ self.USE_OUTPUT_TENSOR_TYPE = "출력 텐서 유형 사용"
+ self.USE_TOKEN_EMBEDDING_TYPE = "토큰 임베딩 유형 사용"
+ self.KEEP_SPLIT = "분할 유지"
+ self.KV_OVERRIDES = "KV 재정의:"
+ self.ADD_NEW_OVERRIDE = "새 재정의 추가"
+ self.QUANTIZE_MODEL = "모델 양자화"
+ self.SAVE_PRESET = "프리셋 저장"
+ self.LOAD_PRESET = "프리셋 로드"
+ self.TASKS = "작업:"
+ self.DOWNLOAD_LLAMACPP = "llama.cpp 다운로드"
+ self.SELECT_RELEASE = "릴리스 선택:"
+ self.SELECT_ASSET = "자산 선택:"
+ self.EXTRACT_CUDA_FILES = "CUDA 파일 추출"
+ self.SELECT_CUDA_BACKEND = "CUDA 백엔드 선택:"
+ self.DOWNLOAD = "다운로드"
+ self.IMATRIX_GENERATION = "IMatrix 생성"
+ self.DATA_FILE = "데이터 파일:"
+ self.MODEL = "모델:"
+ self.OUTPUT = "출력:"
+ self.OUTPUT_FREQUENCY = "출력 빈도:"
+ self.GPU_OFFLOAD = "GPU 오프로드:"
+ self.AUTO = "자동"
+ self.GENERATE_IMATRIX = "IMatrix 생성"
+ self.ERROR = "오류"
+ self.WARNING = "경고"
+ self.PROPERTIES = "속성"
+ self.CANCEL = "취소"
+ self.RESTART = "다시 시작"
+ self.DELETE = "삭제"
+ self.CONFIRM_DELETION = "이 작업을 삭제하시겠습니까?"
+ self.TASK_RUNNING_WARNING = "일부 작업이 아직 실행 중입니다. 종료하시겠습니까?"
+ self.YES = "예"
+ self.NO = "아니요"
+ self.DOWNLOAD_COMPLETE = "다운로드 완료"
+ self.CUDA_EXTRACTION_FAILED = "CUDA 추출 실패"
+ self.PRESET_SAVED = "프리셋 저장됨"
+ self.PRESET_LOADED = "프리셋 로드됨"
+ self.NO_ASSET_SELECTED = "자산이 선택되지 않았습니다"
+ self.DOWNLOAD_FAILED = "다운로드 실패"
+ self.NO_BACKEND_SELECTED = "백엔드가 선택되지 않았습니다"
+ self.NO_MODEL_SELECTED = "모델이 선택되지 않았습니다"
+ self.REFRESH_RELEASES = "릴리스 새로 고침"
+ self.NO_SUITABLE_CUDA_BACKENDS = "적합한 CUDA 백엔드를 찾을 수 없습니다"
+ self.LLAMACPP_DOWNLOADED_EXTRACTED = "llama.cpp 바이너리가 다운로드되어 {0}에 추출되었습니다.\nCUDA 파일이 {1}에 추출되었습니다."
+ self.CUDA_FILES_EXTRACTED = "CUDA 파일이 에 추출되었습니다."
+ self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = (
+ "추출에 적합한 CUDA 백엔드를 찾을 수 없습니다."
+ )
+ self.ERROR_FETCHING_RELEASES = "릴리스를 가져오는 중 오류가 발생했습니다: {0}"
+ self.CONFIRM_DELETION_TITLE = "삭제 확인"
+ self.LOG_FOR = "{0}에 대한 로그"
+ self.ALL_FILES = "모든 파일 (*)"
+ self.GGUF_FILES = "GGUF 파일 (*.gguf)"
+ self.DAT_FILES = "DAT 파일 (*.dat)"
+ self.JSON_FILES = "JSON 파일 (*.json)"
+ self.FAILED_LOAD_PRESET = "프리셋을 로드하지 못했습니다: {0}"
+ self.INITIALIZING_AUTOGGUF = "AutoGGUF 애플리케이션을 초기화하는 중입니다."
+ self.AUTOGGUF_INITIALIZATION_COMPLETE = "AutoGGUF 초기화가 완료되었습니다."
+ self.REFRESHING_BACKENDS = "백엔드를 새로 고치는 중입니다."
+ self.NO_BACKENDS_AVAILABLE = "사용 가능한 백엔드가 없습니다."
+ self.FOUND_VALID_BACKENDS = "{0}개의 유효한 백엔드를 찾았습니다."
+ self.SAVING_PRESET = "프리셋을 저장하는 중입니다."
+ self.PRESET_SAVED_TO = "프리셋이 {0}에 저장되었습니다."
+ self.LOADING_PRESET = "프리셋을 로드하는 중입니다."
+ self.PRESET_LOADED_FROM = "{0}에서 프리셋을 로드했습니다."
+ self.ADDING_KV_OVERRIDE = "KV 재정의를 추가하는 중입니다: {0}"
+ self.SAVING_TASK_PRESET = "{0}에 대한 작업 프리셋을 저장하는 중입니다."
+ self.TASK_PRESET_SAVED = "작업 프리셋이 저장되었습니다."
+ self.TASK_PRESET_SAVED_TO = "작업 프리셋이 {0}에 저장되었습니다."
+ self.RESTARTING_TASK = "작업을 다시 시작하는 중입니다: {0}"
+ self.IN_PROGRESS = "진행 중"
+ self.DOWNLOAD_FINISHED_EXTRACTED_TO = (
+ "다운로드가 완료되었습니다. 추출 위치: {0}"
+ )
+ self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "llama.cpp 바이너리가 다운로드되어 {0}에 추출되었습니다.\nCUDA 파일이 {1}에 추출되었습니다."
+ self.NO_SUITABLE_CUDA_BACKEND_FOUND = (
+ "추출에 적합한 CUDA 백엔드를 찾을 수 없습니다."
+ )
+ self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = (
+ "llama.cpp 바이너리가 다운로드되어 {0}에 추출되었습니다."
+ )
+ self.REFRESHING_LLAMACPP_RELEASES = "llama.cpp 릴리스를 새로 고치는 중입니다."
+ self.UPDATING_ASSET_LIST = "자산 목록을 업데이트하는 중입니다."
+ self.UPDATING_CUDA_OPTIONS = "CUDA 옵션을 업데이트하는 중입니다."
+ self.STARTING_LLAMACPP_DOWNLOAD = "llama.cpp 다운로드를 시작하는 중입니다."
+ self.UPDATING_CUDA_BACKENDS = "CUDA 백엔드를 업데이트하는 중입니다."
+ self.NO_CUDA_BACKEND_SELECTED = "추출에 CUDA 백엔드가 선택되지 않았습니다."
+ self.EXTRACTING_CUDA_FILES = "{0}에서 {1}로 CUDA 파일을 추출하는 중입니다."
+ self.DOWNLOAD_ERROR = "다운로드 오류: {0}"
+ self.SHOWING_TASK_CONTEXT_MENU = "작업 컨텍스트 메뉴를 표시하는 중입니다."
+ self.SHOWING_PROPERTIES_FOR_TASK = "작업에 대한 속성을 표시하는 중입니다: {0}"
+ self.CANCELLING_TASK = "작업을 취소하는 중입니다: {0}"
+ self.CANCELED = "취소됨"
+ self.DELETING_TASK = "작업을 삭제하는 중입니다: {0}"
+ self.LOADING_MODELS = "모델을 로드하는 중입니다."
+ self.LOADED_MODELS = "{0}개의 모델이 로드되었습니다."
+ self.BROWSING_FOR_MODELS_DIRECTORY = "모델 디렉토리를 찾아보는 중입니다."
+ self.SELECT_MODELS_DIRECTORY = "모델 디렉토리 선택"
+ self.BROWSING_FOR_OUTPUT_DIRECTORY = "출력 디렉토리를 찾아보는 중입니다."
+ self.SELECT_OUTPUT_DIRECTORY = "출력 디렉토리 선택"
+ self.BROWSING_FOR_LOGS_DIRECTORY = "로그 디렉토리를 찾아보는 중입니다."
+ self.SELECT_LOGS_DIRECTORY = "로그 디렉토리 선택"
+ self.BROWSING_FOR_IMATRIX_FILE = "IMatrix 파일을 찾아보는 중입니다."
+ self.SELECT_IMATRIX_FILE = "IMatrix 파일 선택"
+ self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} MB / {2} MB)"
+ self.CPU_USAGE_FORMAT = "CPU 사용량: {0:.1f}%"
+ self.VALIDATING_QUANTIZATION_INPUTS = "양자화 입력을 검증하는 중입니다."
+ self.MODELS_PATH_REQUIRED = "모델 경로가 필요합니다."
+ self.OUTPUT_PATH_REQUIRED = "출력 경로가 필요합니다."
+ self.LOGS_PATH_REQUIRED = "로그 경로가 필요합니다."
+ self.STARTING_MODEL_QUANTIZATION = "모델 양자화를 시작하는 중입니다."
+ self.INPUT_FILE_NOT_EXIST = "입력 파일 '{0}'이 존재하지 않습니다."
+ self.QUANTIZING_MODEL_TO = "{0}을 {1}(으)로 양자화하는 중입니다."
+ self.QUANTIZATION_TASK_STARTED = "{0}에 대한 양자화 작업이 시작되었습니다."
+ self.ERROR_STARTING_QUANTIZATION = (
+ "양자화를 시작하는 중 오류가 발생했습니다: {0}"
+ )
+ self.UPDATING_MODEL_INFO = "모델 정보를 업데이트하는 중입니다: {0}"
+ self.TASK_FINISHED = "작업이 완료되었습니다: {0}"
+ self.SHOWING_TASK_DETAILS_FOR = (
+ "다음에 대한 작업 세부 정보를 표시하는 중입니다: {0}"
+ )
+ self.BROWSING_FOR_IMATRIX_DATA_FILE = "IMatrix 데이터 파일을 찾아보는 중입니다."
+ self.SELECT_DATA_FILE = "데이터 파일 선택"
+ self.BROWSING_FOR_IMATRIX_MODEL_FILE = "IMatrix 모델 파일을 찾아보는 중입니다."
+ self.SELECT_MODEL_FILE = "모델 파일 선택"
+ self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "IMatrix 출력 파일을 찾아보는 중입니다."
+ self.SELECT_OUTPUT_FILE = "출력 파일 선택"
+ self.STARTING_IMATRIX_GENERATION = "IMatrix 생성을 시작하는 중입니다."
+ self.BACKEND_PATH_NOT_EXIST = "백엔드 경로가 존재하지 않습니다: {0}"
+ self.GENERATING_IMATRIX = "IMatrix를 생성하는 중입니다."
+ self.ERROR_STARTING_IMATRIX_GENERATION = (
+ "IMatrix 생성을 시작하는 중 오류가 발생했습니다: {0}"
+ )
+ self.IMATRIX_GENERATION_TASK_STARTED = "IMatrix 생성 작업이 시작되었습니다."
+ self.ERROR_MESSAGE = "오류: {0}"
+ self.TASK_ERROR = "작업 오류: {0}"
+ self.APPLICATION_CLOSING = "애플리케이션을 닫는 중입니다."
+ self.APPLICATION_CLOSED = "애플리케이션이 닫혔습니다."
+ self.SELECT_QUANTIZATION_TYPE = "양자화 유형을 선택하세요."
+ self.ALLOWS_REQUANTIZING = "이미 양자화된 텐서의 재양자화를 허용합니다."
+ self.LEAVE_OUTPUT_WEIGHT = "output.weight를 (재)양자화하지 않은 상태로 둡니다."
+ self.DISABLE_K_QUANT_MIXTURES = (
+ "k-양자 혼합을 비활성화하고 모든 텐서를 동일한 유형으로 양자화합니다."
+ )
+ self.USE_DATA_AS_IMPORTANCE_MATRIX = (
+ "양자 최적화를 위한 중요도 행렬로 파일의 데이터를 사용합니다."
+ )
+ self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = (
+ "이러한 텐서에 중요도 행렬을 사용합니다."
+ )
+ self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = (
+ "이러한 텐서에 중요도 행렬을 사용하지 않습니다."
+ )
+ self.OUTPUT_TENSOR_TYPE = "출력 텐서 유형:"
+ self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = (
+ "output.weight 텐서에 이 유형을 사용합니다."
+ )
+ self.TOKEN_EMBEDDING_TYPE = "토큰 임베딩 유형:"
+ self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = (
+ "토큰 임베딩 텐서에 이 유형을 사용합니다."
+ )
+ self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = (
+ "입력과 동일한 샤드에 양자화된 모델을 생성합니다."
+ )
+ self.OVERRIDE_MODEL_METADATA = "모델 메타데이터를 재정의합니다."
+ self.INPUT_DATA_FILE_FOR_IMATRIX = "IMatrix 생성을 위한 입력 데이터 파일"
+ self.MODEL_TO_BE_QUANTIZED = "양자화될 모델"
+ self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "생성된 IMatrix의 출력 경로"
+ self.HOW_OFTEN_TO_SAVE_IMATRIX = "IMatrix를 저장할 빈도"
+ self.SET_GPU_OFFLOAD_VALUE = "GPU 오프로드 값 설정 (-ngl)"
+ self.COMPLETED = "완료됨"
+ self.REFRESH_MODELS = "모델 새로고침"
+
+
+class _Italian(_Localization):
+ def __init__(self):
+ super().__init__()
+ self.WINDOW_TITLE = "AutoGGUF (Quantizzatore Automatico di Modelli GGUF)"
+ self.RAM_USAGE = "Utilizzo RAM:"
+ self.CPU_USAGE = "Utilizzo CPU:"
+ self.BACKEND = "Backend Llama.cpp:"
+ self.REFRESH_BACKENDS = "Aggiorna Backend"
+ self.MODELS_PATH = "Percorso Modelli:"
+ self.OUTPUT_PATH = "Percorso Output:"
+ self.LOGS_PATH = "Percorso Log:"
+ self.BROWSE = "Sfoglia"
+ self.AVAILABLE_MODELS = "Modelli Disponibili:"
+ self.QUANTIZATION_TYPE = "Tipo di Quantizzazione:"
+ self.ALLOW_REQUANTIZE = "Consenti Riquantizzazione"
+ self.LEAVE_OUTPUT_TENSOR = "Lascia Tensore di Output"
+ self.PURE = "Puro"
+ self.IMATRIX = "IMatrix:"
+ self.INCLUDE_WEIGHTS = "Includi Pesi:"
+ self.EXCLUDE_WEIGHTS = "Escludi Pesi:"
+ self.USE_OUTPUT_TENSOR_TYPE = "Usa Tipo di Tensore di Output"
+ self.USE_TOKEN_EMBEDDING_TYPE = "Usa Tipo di Incorporamento Token"
+ self.KEEP_SPLIT = "Mantieni Divisione"
+ self.KV_OVERRIDES = "Override KV:"
+ self.ADD_NEW_OVERRIDE = "Aggiungi Nuovo Override"
+ self.QUANTIZE_MODEL = "Quantizza Modello"
+ self.SAVE_PRESET = "Salva Preimpostazione"
+ self.LOAD_PRESET = "Carica Preimpostazione"
+ self.TASKS = "Attività:"
+ self.DOWNLOAD_LLAMACPP = "Scarica llama.cpp"
+ self.SELECT_RELEASE = "Seleziona Versione:"
+ self.SELECT_ASSET = "Seleziona Asset:"
+ self.EXTRACT_CUDA_FILES = "Estrai File CUDA"
+ self.SELECT_CUDA_BACKEND = "Seleziona Backend CUDA:"
+ self.DOWNLOAD = "Scarica"
+ self.IMATRIX_GENERATION = "Generazione IMatrix"
+ self.DATA_FILE = "File Dati:"
+ self.MODEL = "Modello:"
+ self.OUTPUT = "Output:"
+ self.OUTPUT_FREQUENCY = "Frequenza di Output:"
+ self.GPU_OFFLOAD = "Offload GPU:"
+ self.AUTO = "Auto"
+ self.GENERATE_IMATRIX = "Genera IMatrix"
+ self.ERROR = "Errore"
+ self.WARNING = "Avviso"
+ self.PROPERTIES = "Proprietà"
+ self.CANCEL = "Annulla"
+ self.RESTART = "Riavvia"
+ self.DELETE = "Elimina"
+ self.CONFIRM_DELETION = "Sei sicuro di voler eliminare questa attività?"
+ self.TASK_RUNNING_WARNING = (
+ "Alcune attività sono ancora in esecuzione. Sei sicuro di voler uscire?"
+ )
+ self.YES = "Sì"
+ self.NO = "No"
+ self.DOWNLOAD_COMPLETE = "Download Completato"
+ self.CUDA_EXTRACTION_FAILED = "Estrazione CUDA Fallita"
+ self.PRESET_SAVED = "Preimpostazione Salvata"
+ self.PRESET_LOADED = "Preimpostazione Caricata"
+ self.NO_ASSET_SELECTED = "Nessun asset selezionato"
+ self.DOWNLOAD_FAILED = "Download fallito"
+ self.NO_BACKEND_SELECTED = "Nessun backend selezionato"
+ self.NO_MODEL_SELECTED = "Nessun modello selezionato"
+ self.REFRESH_RELEASES = "Aggiorna Versioni"
+ self.NO_SUITABLE_CUDA_BACKENDS = "Nessun backend CUDA adatto trovato"
+ self.LLAMACPP_DOWNLOADED_EXTRACTED = (
+ "Binario llama.cpp scaricato ed estratto in {0}\nFile CUDA estratti in {1}"
+ )
+ self.CUDA_FILES_EXTRACTED = "File CUDA estratti in"
+ self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = (
+ "Nessun backend CUDA adatto trovato per l'estrazione"
+ )
+ self.ERROR_FETCHING_RELEASES = "Errore durante il recupero delle versioni: {0}"
+ self.CONFIRM_DELETION_TITLE = "Conferma Eliminazione"
+ self.LOG_FOR = "Log per {0}"
+ self.ALL_FILES = "Tutti i File (*)"
+ self.GGUF_FILES = "File GGUF (*.gguf)"
+ self.DAT_FILES = "File DAT (*.dat)"
+ self.JSON_FILES = "File JSON (*.json)"
+ self.FAILED_LOAD_PRESET = "Impossibile caricare la preimpostazione: {0}"
+ self.INITIALIZING_AUTOGGUF = "Inizializzazione dell'applicazione AutoGGUF"
+ self.AUTOGGUF_INITIALIZATION_COMPLETE = (
+ "Inizializzazione di AutoGGUF completata"
+ )
+ self.REFRESHING_BACKENDS = "Aggiornamento backend"
+ self.NO_BACKENDS_AVAILABLE = "Nessun backend disponibile"
+ self.FOUND_VALID_BACKENDS = "Trovati {0} backend validi"
+ self.SAVING_PRESET = "Salvataggio preimpostazione"
+ self.PRESET_SAVED_TO = "Preimpostazione salvata in {0}"
+ self.LOADING_PRESET = "Caricamento preimpostazione"
+ self.PRESET_LOADED_FROM = "Preimpostazione caricata da {0}"
+ self.ADDING_KV_OVERRIDE = "Aggiunta override KV: {0}"
+ self.SAVING_TASK_PRESET = "Salvataggio preimpostazione attività per {0}"
+ self.TASK_PRESET_SAVED = "Preimpostazione Attività Salvata"
+ self.TASK_PRESET_SAVED_TO = "Preimpostazione attività salvata in {0}"
+ self.RESTARTING_TASK = "Riavvio attività: {0}"
+ self.IN_PROGRESS = "In Corso"
+ self.DOWNLOAD_FINISHED_EXTRACTED_TO = "Download completato. Estratto in: {0}"
+ self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = (
+ "Binario llama.cpp scaricato ed estratto in {0}\nFile CUDA estratti in {1}"
+ )
+ self.NO_SUITABLE_CUDA_BACKEND_FOUND = (
+ "Nessun backend CUDA adatto trovato per l'estrazione"
+ )
+ self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = (
+ "Binario llama.cpp scaricato ed estratto in {0}"
+ )
+ self.REFRESHING_LLAMACPP_RELEASES = "Aggiornamento versioni di llama.cpp"
+ self.UPDATING_ASSET_LIST = "Aggiornamento elenco asset"
+ self.UPDATING_CUDA_OPTIONS = "Aggiornamento opzioni CUDA"
+ self.STARTING_LLAMACPP_DOWNLOAD = "Avvio download di llama.cpp"
+ self.UPDATING_CUDA_BACKENDS = "Aggiornamento backend CUDA"
+ self.NO_CUDA_BACKEND_SELECTED = (
+ "Nessun backend CUDA selezionato per l'estrazione"
+ )
+ self.EXTRACTING_CUDA_FILES = "Estrazione file CUDA da {0} a {1}"
+ self.DOWNLOAD_ERROR = "Errore di download: {0}"
+ self.SHOWING_TASK_CONTEXT_MENU = "Visualizzazione menu contestuale attività"
+ self.SHOWING_PROPERTIES_FOR_TASK = (
+ "Visualizzazione proprietà per l'attività: {0}"
+ )
+ self.CANCELLING_TASK = "Annullamento attività: {0}"
+ self.CANCELED = "Annullato"
+ self.DELETING_TASK = "Eliminazione attività: {0}"
+ self.LOADING_MODELS = "Caricamento modelli"
+ self.LOADED_MODELS = "{0} modelli caricati"
+ self.BROWSING_FOR_MODELS_DIRECTORY = "Esplorazione directory modelli"
+ self.SELECT_MODELS_DIRECTORY = "Seleziona Directory Modelli"
+ self.BROWSING_FOR_OUTPUT_DIRECTORY = "Esplorazione directory output"
+ self.SELECT_OUTPUT_DIRECTORY = "Seleziona Directory Output"
+ self.BROWSING_FOR_LOGS_DIRECTORY = "Esplorazione directory log"
+ self.SELECT_LOGS_DIRECTORY = "Seleziona Directory Log"
+ self.BROWSING_FOR_IMATRIX_FILE = "Esplorazione file IMatrix"
+ self.SELECT_IMATRIX_FILE = "Seleziona File IMatrix"
+ self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} MB / {2} MB)"
+ self.CPU_USAGE_FORMAT = "Utilizzo CPU: {0:.1f}%"
+ self.VALIDATING_QUANTIZATION_INPUTS = "Convalida input di quantizzazione"
+ self.MODELS_PATH_REQUIRED = "Il percorso dei modelli è obbligatorio"
+ self.OUTPUT_PATH_REQUIRED = "Il percorso di output è obbligatorio"
+ self.LOGS_PATH_REQUIRED = "Il percorso dei log è obbligatorio"
+ self.STARTING_MODEL_QUANTIZATION = "Avvio quantizzazione del modello"
+ self.INPUT_FILE_NOT_EXIST = "Il file di input '{0}' non esiste."
+ self.QUANTIZING_MODEL_TO = "Quantizzazione di {0} a {1}"
+ self.QUANTIZATION_TASK_STARTED = "Attività di quantizzazione avviata per {0}"
+ self.ERROR_STARTING_QUANTIZATION = (
+ "Errore durante l'avvio della quantizzazione: {0}"
+ )
+ self.UPDATING_MODEL_INFO = "Aggiornamento informazioni sul modello: {0}"
+ self.TASK_FINISHED = "Attività completata: {0}"
+ self.SHOWING_TASK_DETAILS_FOR = "Visualizzazione dettagli attività per: {0}"
+ self.BROWSING_FOR_IMATRIX_DATA_FILE = "Esplorazione file dati IMatrix"
+ self.SELECT_DATA_FILE = "Seleziona File Dati"
+ self.BROWSING_FOR_IMATRIX_MODEL_FILE = "Esplorazione file modello IMatrix"
+ self.SELECT_MODEL_FILE = "Seleziona File Modello"
+ self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "Esplorazione file output IMatrix"
+ self.SELECT_OUTPUT_FILE = "Seleziona File Output"
+ self.STARTING_IMATRIX_GENERATION = "Avvio generazione IMatrix"
+ self.BACKEND_PATH_NOT_EXIST = "Il percorso del backend non esiste: {0}"
+ self.GENERATING_IMATRIX = "Generazione IMatrix"
+ self.ERROR_STARTING_IMATRIX_GENERATION = (
+ "Errore durante l'avvio della generazione di IMatrix: {0}"
+ )
+ self.IMATRIX_GENERATION_TASK_STARTED = "Attività di generazione IMatrix avviata"
+ self.ERROR_MESSAGE = "Errore: {0}"
+ self.TASK_ERROR = "Errore attività: {0}"
+ self.APPLICATION_CLOSING = "Chiusura applicazione"
+ self.APPLICATION_CLOSED = "Applicazione chiusa"
+ self.SELECT_QUANTIZATION_TYPE = "Seleziona il tipo di quantizzazione"
+ self.ALLOWS_REQUANTIZING = (
+ "Consente di riquantizzare tensori che sono già stati quantizzati"
+ )
+ self.LEAVE_OUTPUT_WEIGHT = "Lascerà output.weight non (ri)quantizzato"
+ self.DISABLE_K_QUANT_MIXTURES = (
+ "Disabilita le miscele k-quant e quantizza tutti i tensori allo stesso tipo"
+ )
+ self.USE_DATA_AS_IMPORTANCE_MATRIX = "Utilizza i dati nel file come matrice di importanza per le ottimizzazioni di quantizzazione"
+ self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = (
+ "Usa la matrice di importanza per questi tensori"
+ )
+ self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = (
+ "Non usare la matrice di importanza per questi tensori"
+ )
+ self.OUTPUT_TENSOR_TYPE = "Tipo di Tensore di Output:"
+ self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = (
+ "Usa questo tipo per il tensore output.weight"
+ )
+ self.TOKEN_EMBEDDING_TYPE = "Tipo di Incorporamento Token:"
+ self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = (
+ "Usa questo tipo per il tensore di incorporamenti token"
+ )
+ self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = (
+ "Genererà il modello quantizzato negli stessi frammenti dell'input"
+ )
+ self.OVERRIDE_MODEL_METADATA = "Sovrascrivi i metadati del modello"
+ self.INPUT_DATA_FILE_FOR_IMATRIX = (
+ "File di dati di input per la generazione di IMatrix"
+ )
+ self.MODEL_TO_BE_QUANTIZED = "Modello da quantizzare"
+ self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = (
+ "Percorso di output per l'IMatrix generato"
+ )
+ self.HOW_OFTEN_TO_SAVE_IMATRIX = "Con quale frequenza salvare l'IMatrix"
+ self.SET_GPU_OFFLOAD_VALUE = "Imposta il valore di offload GPU (-ngl)"
+ self.COMPLETED = "Completato"
+ self.REFRESH_MODELS = "Aggiorna modelli"
+
+
+class _Turkish(_Localization):
+ def __init__(self):
+ super().__init__()
+ self.WINDOW_TITLE = "AutoGGUF (Otomatik GGUF Modeli Niceleyici)"
+ self.RAM_USAGE = "RAM Kullanımı:"
+ self.CPU_USAGE = "CPU Kullanımı:"
+ self.BACKEND = "Llama.cpp Arka Uç:"
+ self.REFRESH_BACKENDS = "Arka Uçları Yenile"
+ self.MODELS_PATH = "Modeller Yolu:"
+ self.OUTPUT_PATH = "Çıkış Yolu:"
+ self.LOGS_PATH = "Günlükler Yolu:"
+ self.BROWSE = "Gözat"
+ self.AVAILABLE_MODELS = "Kullanılabilir Modeller:"
+ self.QUANTIZATION_TYPE = "Niceleme Türü:"
+ self.ALLOW_REQUANTIZE = "Yeniden Nicelemeye İzin Ver"
+ self.LEAVE_OUTPUT_TENSOR = "Çıkış Tensörünü Bırak"
+ self.PURE = "Saf"
+ self.IMATRIX = "IMatrix:"
+ self.INCLUDE_WEIGHTS = "Ağırlıkları Dahil Et:"
+ self.EXCLUDE_WEIGHTS = "Ağırlıkları Hariç Tut:"
+ self.USE_OUTPUT_TENSOR_TYPE = "Çıkış Tensör Türünü Kullan"
+ self.USE_TOKEN_EMBEDDING_TYPE = "Token Gömme Türünü Kullan"
+ self.KEEP_SPLIT = "Bölmeyi Koru"
+ self.KV_OVERRIDES = "KV Geçersiz Kılmaları:"
+ self.ADD_NEW_OVERRIDE = "Yeni Geçersiz Kılma Ekle"
+ self.QUANTIZE_MODEL = "Modeli Nicele"
+ self.SAVE_PRESET = "Ön Ayarı Kaydet"
+ self.LOAD_PRESET = "Ön Ayarı Yükle"
+ self.TASKS = "Görevler:"
+ self.DOWNLOAD_LLAMACPP = "llama.cpp'yi İndir"
+ self.SELECT_RELEASE = "Sürümü Seç:"
+ self.SELECT_ASSET = "Varlığı Seç:"
+ self.EXTRACT_CUDA_FILES = "CUDA Dosyalarını Çıkar"
+ self.SELECT_CUDA_BACKEND = "CUDA Arka Ucunu Seç:"
+ self.DOWNLOAD = "İndir"
+ self.IMATRIX_GENERATION = "IMatrix Üretimi"
+ self.DATA_FILE = "Veri Dosyası:"
+ self.MODEL = "Model:"
+ self.OUTPUT = "Çıkış:"
+ self.OUTPUT_FREQUENCY = "Çıkış Sıklığı:"
+ self.GPU_OFFLOAD = "GPU Yük Boşaltma:"
+ self.AUTO = "Otomatik"
+ self.GENERATE_IMATRIX = "IMatrix Oluştur"
+ self.ERROR = "Hata"
+ self.WARNING = "Uyarı"
+ self.PROPERTIES = "Özellikler"
+ self.CANCEL = "İptal"
+ self.RESTART = "Yeniden Başlat"
+ self.DELETE = "Sil"
+ self.CONFIRM_DELETION = "Bu görevi silmek istediğinizden emin misiniz?"
+ self.TASK_RUNNING_WARNING = (
+ "Bazı görevler hala çalışıyor. Çıkmak istediğinizden emin misiniz?"
+ )
+ self.YES = "Evet"
+ self.NO = "Hayır"
+ self.DOWNLOAD_COMPLETE = "İndirme Tamamlandı"
+ self.CUDA_EXTRACTION_FAILED = "CUDA Çıkarma Başarısız"
+ self.PRESET_SAVED = "Ön Ayar Kaydedildi"
+ self.PRESET_LOADED = "Ön Ayar Yüklendi"
+ self.NO_ASSET_SELECTED = "Varlık seçilmedi"
+ self.DOWNLOAD_FAILED = "İndirme başarısız"
+ self.NO_BACKEND_SELECTED = "Arka uç seçilmedi"
+ self.NO_MODEL_SELECTED = "Model seçilmedi"
+ self.REFRESH_RELEASES = "Sürümleri Yenile"
+ self.NO_SUITABLE_CUDA_BACKENDS = "Uygun CUDA arka uçları bulunamadı"
+ self.LLAMACPP_DOWNLOADED_EXTRACTED = "llama.cpp ikili dosyası indirildi ve {0} konumuna çıkarıldı\nCUDA dosyaları {1} konumuna çıkarıldı"
+ self.CUDA_FILES_EXTRACTED = "CUDA dosyaları konumuna çıkarıldı"
+ self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = (
+ "Çıkarma için uygun bir CUDA arka ucu bulunamadı"
+ )
+ self.ERROR_FETCHING_RELEASES = "Sürümleri getirirken hata oluştu: {0}"
+ self.CONFIRM_DELETION_TITLE = "Silmeyi Onayla"
+ self.LOG_FOR = "{0} için Günlük"
+ self.ALL_FILES = "Tüm Dosyalar (*)"
+ self.GGUF_FILES = "GGUF Dosyaları (*.gguf)"
+ self.DAT_FILES = "DAT Dosyaları (*.dat)"
+ self.JSON_FILES = "JSON Dosyaları (*.json)"
+ self.FAILED_LOAD_PRESET = "Ön ayarı yükleme başarısız: {0}"
+ self.INITIALIZING_AUTOGGUF = "AutoGGUF uygulaması başlatılıyor"
+ self.AUTOGGUF_INITIALIZATION_COMPLETE = "AutoGGUF başlatması tamamlandı"
+ self.REFRESHING_BACKENDS = "Arka uçlar yenileniyor"
+ self.NO_BACKENDS_AVAILABLE = "Kullanılabilir arka uç yok"
+ self.FOUND_VALID_BACKENDS = "{0} geçerli arka uç bulundu"
+ self.SAVING_PRESET = "Ön ayar kaydediliyor"
+ self.PRESET_SAVED_TO = "Ön ayar {0} konumuna kaydedildi"
+ self.LOADING_PRESET = "Ön ayar yükleniyor"
+ self.PRESET_LOADED_FROM = "Ön ayar {0} konumundan yüklendi"
+ self.ADDING_KV_OVERRIDE = "KV geçersiz kılma ekleniyor: {0}"
+ self.SAVING_TASK_PRESET = "{0} için görev ön ayarı kaydediliyor"
+ self.TASK_PRESET_SAVED = "Görev Ön Ayarı Kaydedildi"
+ self.TASK_PRESET_SAVED_TO = "Görev ön ayarı {0} konumuna kaydedildi"
+ self.RESTARTING_TASK = "Görev yeniden başlatılıyor: {0}"
+ self.IN_PROGRESS = "Devam Ediyor"
+ self.DOWNLOAD_FINISHED_EXTRACTED_TO = (
+ "İndirme tamamlandı. Şuraya çıkarıldı: {0}"
+ )
+ self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "llama.cpp ikili dosyası indirildi ve {0} konumuna çıkarıldı\nCUDA dosyaları {1} konumuna çıkarıldı"
+ self.NO_SUITABLE_CUDA_BACKEND_FOUND = (
+ "Çıkarma için uygun bir CUDA arka ucu bulunamadı"
+ )
+ self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = (
+ "llama.cpp ikili dosyası indirildi ve {0} konumuna çıkarıldı"
+ )
+ self.REFRESHING_LLAMACPP_RELEASES = "llama.cpp sürümleri yenileniyor"
+ self.UPDATING_ASSET_LIST = "Varlık listesi güncelleniyor"
+ self.UPDATING_CUDA_OPTIONS = "CUDA seçenekleri güncelleniyor"
+ self.STARTING_LLAMACPP_DOWNLOAD = "llama.cpp indirme başlatılıyor"
+ self.UPDATING_CUDA_BACKENDS = "CUDA arka uçları güncelleniyor"
+ self.NO_CUDA_BACKEND_SELECTED = "Çıkarma için CUDA arka ucu seçilmedi"
+ self.EXTRACTING_CUDA_FILES = (
+ "CUDA dosyaları {0} konumundan {1} konumuna çıkarılıyor"
+ )
+ self.DOWNLOAD_ERROR = "İndirme hatası: {0}"
+ self.SHOWING_TASK_CONTEXT_MENU = "Görev bağlam menüsü gösteriliyor"
+ self.SHOWING_PROPERTIES_FOR_TASK = "Görev için özellikler gösteriliyor: {0}"
+ self.CANCELLING_TASK = "Görev iptal ediliyor: {0}"
+ self.CANCELED = "İptal Edildi"
+ self.DELETING_TASK = "Görev siliniyor: {0}"
+ self.LOADING_MODELS = "Modeller yükleniyor"
+ self.LOADED_MODELS = "{0} model yüklendi"
+ self.BROWSING_FOR_MODELS_DIRECTORY = "Modeller dizinine göz atılıyor"
+ self.SELECT_MODELS_DIRECTORY = "Modeller Dizini Seç"
+ self.BROWSING_FOR_OUTPUT_DIRECTORY = "Çıkış dizinine göz atılıyor"
+ self.SELECT_OUTPUT_DIRECTORY = "Çıkış Dizini Seç"
+ self.BROWSING_FOR_LOGS_DIRECTORY = "Günlükler dizinine göz atılıyor"
+ self.SELECT_LOGS_DIRECTORY = "Günlükler Dizini Seç"
+ self.BROWSING_FOR_IMATRIX_FILE = "IMatrix dosyasına göz atılıyor"
+ self.SELECT_IMATRIX_FILE = "IMatrix Dosyası Seç"
+ self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} MB / {2} MB)"
+ self.CPU_USAGE_FORMAT = "CPU Kullanımı: {0:.1f}%"
+ self.VALIDATING_QUANTIZATION_INPUTS = "Niceleme girişleri doğrulanıyor"
+ self.MODELS_PATH_REQUIRED = "Modeller yolu gerekli"
+ self.OUTPUT_PATH_REQUIRED = "Çıkış yolu gerekli"
+ self.LOGS_PATH_REQUIRED = "Günlükler yolu gerekli"
+ self.STARTING_MODEL_QUANTIZATION = "Model niceleme başlatılıyor"
+ self.INPUT_FILE_NOT_EXIST = "Giriş dosyası '{0}' mevcut değil."
+ self.QUANTIZING_MODEL_TO = "{0} öğesini {1} öğesine niceleme"
+ self.QUANTIZATION_TASK_STARTED = "{0} için niceleme görevi başlatıldı"
+ self.ERROR_STARTING_QUANTIZATION = "Niceleme başlatılırken hata oluştu: {0}"
+ self.UPDATING_MODEL_INFO = "Model bilgileri güncelleniyor: {0}"
+ self.TASK_FINISHED = "Görev tamamlandı: {0}"
+ self.SHOWING_TASK_DETAILS_FOR = "Şunun için görev ayrıntıları gösteriliyor: {0}"
+ self.BROWSING_FOR_IMATRIX_DATA_FILE = "IMatrix veri dosyasına göz atılıyor"
+ self.SELECT_DATA_FILE = "Veri Dosyası Seç"
+ self.BROWSING_FOR_IMATRIX_MODEL_FILE = "IMatrix model dosyasına göz atılıyor"
+ self.SELECT_MODEL_FILE = "Model Dosyası Seç"
+ self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "IMatrix çıkış dosyasına göz atılıyor"
+ self.SELECT_OUTPUT_FILE = "Çıkış Dosyası Seç"
+ self.STARTING_IMATRIX_GENERATION = "IMatrix üretimi başlatılıyor"
+ self.BACKEND_PATH_NOT_EXIST = "Arka uç yolu mevcut değil: {0}"
+ self.GENERATING_IMATRIX = "IMatrix oluşturuluyor"
+ self.ERROR_STARTING_IMATRIX_GENERATION = (
+ "IMatrix üretimi başlatılırken hata oluştu: {0}"
+ )
+ self.IMATRIX_GENERATION_TASK_STARTED = "IMatrix oluşturma görevi başlatıldı"
+ self.ERROR_MESSAGE = "Hata: {0}"
+ self.TASK_ERROR = "Görev hatası: {0}"
+ self.APPLICATION_CLOSING = "Uygulama kapatılıyor"
+ self.APPLICATION_CLOSED = "Uygulama kapatıldı"
+ self.SELECT_QUANTIZATION_TYPE = "Niceleme türünü seçin"
+ self.ALLOWS_REQUANTIZING = (
+ "Zaten niceleme yapılmış tensörlerin yeniden nicelemesine izin verir"
+ )
+ self.LEAVE_OUTPUT_WEIGHT = (
+ "output.weight öğesini (yeniden) nicelememiş halde bırakır"
+ )
+ self.DISABLE_K_QUANT_MIXTURES = "k-Quant karışımlarını devre dışı bırakın ve tüm tensörleri aynı türe niceleyin"
+ self.USE_DATA_AS_IMPORTANCE_MATRIX = "Quant optimizasyonları için dosyadaki verileri önem matrisi olarak kullanın"
+ self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = (
+ "Bu tensörler için önem matrisini kullanın"
+ )
+ self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = (
+ "Bu tensörler için önem matrisini kullanmayın"
+ )
+ self.OUTPUT_TENSOR_TYPE = "Çıkış Tensör Türü:"
+ self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = (
+ "output.weight tensörü için bu türü kullanın"
+ )
+ self.TOKEN_EMBEDDING_TYPE = "Token Gömme Türü:"
+ self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = (
+ "Token gömme tensörü için bu türü kullanın"
+ )
+ self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = (
+ "Nicelemeli modeli girişle aynı parçalarda oluşturacaktır"
+ )
+ self.OVERRIDE_MODEL_METADATA = "Model meta verilerini geçersiz kıl"
+ self.INPUT_DATA_FILE_FOR_IMATRIX = "IMatrix oluşturma için giriş veri dosyası"
+ self.MODEL_TO_BE_QUANTIZED = "Nicelemeli model"
+ self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "Oluşturulan IMatrix için çıkış yolu"
+ self.HOW_OFTEN_TO_SAVE_IMATRIX = "IMatrix'in ne sıklıkta kaydedileceği"
+ self.SET_GPU_OFFLOAD_VALUE = "GPU yük boşaltma değerini ayarla (-ngl)"
+ self.COMPLETED = "Tamamlandı"
+ self.REFRESH_MODELS = "Modelleri yenile"
+
+
+class _Dutch(_Localization):
+ def __init__(self):
+ super().__init__()
+ self.WINDOW_TITLE = "AutoGGUF (automatische GGUF-modelkwantisering)"
+ self.RAM_USAGE = "RAM-gebruik:"
+ self.CPU_USAGE = "CPU-gebruik:"
+ self.BACKEND = "Llama.cpp Backend:"
+ self.REFRESH_BACKENDS = "Backends vernieuwen"
+ self.MODELS_PATH = "Modelpad:"
+ self.OUTPUT_PATH = "Uitvoerpad:"
+ self.LOGS_PATH = "Logboekpad:"
+ self.BROWSE = "Bladeren"
+ self.AVAILABLE_MODELS = "Beschikbare modellen:"
+ self.QUANTIZATION_TYPE = "Kwantiseringstype:"
+ self.ALLOW_REQUANTIZE = "Herkwantisering toestaan"
+ self.LEAVE_OUTPUT_TENSOR = "Uitvoertensor behouden"
+ self.PURE = "Zuiver"
+ self.IMATRIX = "IMatrix:"
+ self.INCLUDE_WEIGHTS = "Gewichten opnemen:"
+ self.EXCLUDE_WEIGHTS = "Gewichten uitsluiten:"
+ self.USE_OUTPUT_TENSOR_TYPE = "Uitvoertensortype gebruiken"
+ self.USE_TOKEN_EMBEDDING_TYPE = "Tokeninbeddingstype gebruiken"
+ self.KEEP_SPLIT = "Splitsing behouden"
+ self.KV_OVERRIDES = "KV-overschrijvingen:"
+ self.ADD_NEW_OVERRIDE = "Nieuwe overschrijving toevoegen"
+ self.QUANTIZE_MODEL = "Model kwantiseren"
+ self.SAVE_PRESET = "Voorinstelling opslaan"
+ self.LOAD_PRESET = "Voorinstelling laden"
+ self.TASKS = "Taken:"
+ self.DOWNLOAD_LLAMACPP = "Download llama.cpp"
+ self.SELECT_RELEASE = "Selecteer release:"
+ self.SELECT_ASSET = "Selecteer item:"
+ self.EXTRACT_CUDA_FILES = "CUDA-bestanden uitpakken"
+ self.SELECT_CUDA_BACKEND = "Selecteer CUDA-backend:"
+ self.DOWNLOAD = "Downloaden"
+ self.IMATRIX_GENERATION = "IMatrix-generatie"
+ self.DATA_FILE = "Gegevensbestand:"
+ self.MODEL = "Model:"
+ self.OUTPUT = "Uitvoer:"
+ self.OUTPUT_FREQUENCY = "Uitvoerfrequentie:"
+ self.GPU_OFFLOAD = "GPU-offload:"
+ self.AUTO = "Automatisch"
+ self.GENERATE_IMATRIX = "IMatrix genereren"
+ self.ERROR = "Fout"
+ self.WARNING = "Waarschuwing"
+ self.PROPERTIES = "Eigenschappen"
+ self.CANCEL = "Annuleren"
+ self.RESTART = "Opnieuw starten"
+ self.DELETE = "Verwijderen"
+ self.CONFIRM_DELETION = "Weet u zeker dat u deze taak wilt verwijderen?"
+ self.TASK_RUNNING_WARNING = (
+ "Sommige taken worden nog uitgevoerd. Weet u zeker dat u wilt afsluiten?"
+ )
+ self.YES = "Ja"
+ self.NO = "Nee"
+ self.DOWNLOAD_COMPLETE = "Download voltooid"
+ self.CUDA_EXTRACTION_FAILED = "CUDA-extractie mislukt"
+ self.PRESET_SAVED = "Voorinstelling opgeslagen"
+ self.PRESET_LOADED = "Voorinstelling geladen"
+ self.NO_ASSET_SELECTED = "Geen item geselecteerd"
+ self.DOWNLOAD_FAILED = "Download mislukt"
+ self.NO_BACKEND_SELECTED = "Geen backend geselecteerd"
+ self.NO_MODEL_SELECTED = "Geen model geselecteerd"
+ self.REFRESH_RELEASES = "Releases vernieuwen"
+ self.NO_SUITABLE_CUDA_BACKENDS = "Geen geschikte CUDA-backends gevonden"
+ self.LLAMACPP_DOWNLOADED_EXTRACTED = "llama.cpp-binairbestand gedownload en uitgepakt naar {0}\nCUDA-bestanden uitgepakt naar {1}"
+ self.CUDA_FILES_EXTRACTED = "CUDA-bestanden uitgepakt naar"
+ self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = (
+ "Geen geschikte CUDA-backend gevonden voor extractie"
+ )
+ self.ERROR_FETCHING_RELEASES = "Fout bij het ophalen van releases: {0}"
+ self.CONFIRM_DELETION_TITLE = "Verwijdering bevestigen"
+ self.LOG_FOR = "Logboek voor {0}"
+ self.ALL_FILES = "Alle bestanden (*)"
+ self.GGUF_FILES = "GGUF-bestanden (*.gguf)"
+ self.DAT_FILES = "DAT-bestanden (*.dat)"
+ self.JSON_FILES = "JSON-bestanden (*.json)"
+ self.FAILED_LOAD_PRESET = "Voorinstelling laden mislukt: {0}"
+ self.INITIALIZING_AUTOGGUF = "AutoGGUF-applicatie wordt geïnitialiseerd"
+ self.AUTOGGUF_INITIALIZATION_COMPLETE = "AutoGGUF-initialisatie voltooid"
+ self.REFRESHING_BACKENDS = "Backends worden vernieuwd"
+ self.NO_BACKENDS_AVAILABLE = "Geen backends beschikbaar"
+ self.FOUND_VALID_BACKENDS = "{0} geldige backends gevonden"
+ self.SAVING_PRESET = "Voorinstelling wordt opgeslagen"
+ self.PRESET_SAVED_TO = "Voorinstelling opgeslagen in {0}"
+ self.LOADING_PRESET = "Voorinstelling wordt geladen"
+ self.PRESET_LOADED_FROM = "Voorinstelling geladen van {0}"
+ self.ADDING_KV_OVERRIDE = "KV-overschrijving toevoegen: {0}"
+ self.SAVING_TASK_PRESET = "Taakvoorinstelling opslaan voor {0}"
+ self.TASK_PRESET_SAVED = "Taakvoorinstelling opgeslagen"
+ self.TASK_PRESET_SAVED_TO = "Taakvoorinstelling opgeslagen in {0}"
+ self.RESTARTING_TASK = "Taak opnieuw starten: {0}"
+ self.IN_PROGRESS = "Bezig"
+ self.DOWNLOAD_FINISHED_EXTRACTED_TO = "Download voltooid. Uitgepakt naar: {0}"
+ self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "llama.cpp-binairbestand gedownload en uitgepakt naar {0}\nCUDA-bestanden uitgepakt naar {1}"
+ self.NO_SUITABLE_CUDA_BACKEND_FOUND = (
+ "Geen geschikte CUDA-backend gevonden voor extractie"
+ )
+ self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = (
+ "llama.cpp-binairbestand gedownload en uitgepakt naar {0}"
+ )
+ self.REFRESHING_LLAMACPP_RELEASES = "llama.cpp-releases worden vernieuwd"
+ self.UPDATING_ASSET_LIST = "Itemlijst wordt bijgewerkt"
+ self.UPDATING_CUDA_OPTIONS = "CUDA-opties worden bijgewerkt"
+ self.STARTING_LLAMACPP_DOWNLOAD = "Downloaden van llama.cpp wordt gestart"
+ self.UPDATING_CUDA_BACKENDS = "CUDA-backends worden bijgewerkt"
+ self.NO_CUDA_BACKEND_SELECTED = "Geen CUDA-backend geselecteerd voor extractie"
+ self.EXTRACTING_CUDA_FILES = "CUDA-bestanden uitpakken van {0} naar {1}"
+ self.DOWNLOAD_ERROR = "Downloadfout: {0}"
+ self.SHOWING_TASK_CONTEXT_MENU = "Taakcontextmenu weergeven"
+ self.SHOWING_PROPERTIES_FOR_TASK = "Eigenschappen voor taak weergeven: {0}"
+ self.CANCELLING_TASK = "Taak annuleren: {0}"
+ self.CANCELED = "Geannuleerd"
+ self.DELETING_TASK = "Taak verwijderen: {0}"
+ self.LOADING_MODELS = "Modellen laden"
+ self.LOADED_MODELS = "{0} modellen geladen"
+ self.BROWSING_FOR_MODELS_DIRECTORY = "Bladeren naar modelmap"
+ self.SELECT_MODELS_DIRECTORY = "Selecteer modelmap"
+ self.BROWSING_FOR_OUTPUT_DIRECTORY = "Bladeren naar uitvoermap"
+ self.SELECT_OUTPUT_DIRECTORY = "Selecteer uitvoermap"
+ self.BROWSING_FOR_LOGS_DIRECTORY = "Bladeren naar logboekmap"
+ self.SELECT_LOGS_DIRECTORY = "Selecteer logboekmap"
+ self.BROWSING_FOR_IMATRIX_FILE = "Bladeren naar IMatrix-bestand"
+ self.SELECT_IMATRIX_FILE = "Selecteer IMatrix-bestand"
+ self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} MB / {2} MB)"
+ self.CPU_USAGE_FORMAT = "CPU-gebruik: {0:.1f}%"
+ self.VALIDATING_QUANTIZATION_INPUTS = "Kwantiseringsinvoer valideren"
+ self.MODELS_PATH_REQUIRED = "Modelpad is vereist"
+ self.OUTPUT_PATH_REQUIRED = "Uitvoerpad is vereist"
+ self.LOGS_PATH_REQUIRED = "Logboekpad is vereist"
+ self.STARTING_MODEL_QUANTIZATION = "Modelkwantisering starten"
+ self.INPUT_FILE_NOT_EXIST = "Invoerbestand '{0}' bestaat niet."
+ self.QUANTIZING_MODEL_TO = "Kwantiseren van {0} naar {1}"
+ self.QUANTIZATION_TASK_STARTED = "Kwantiseringstaak gestart voor {0}"
+ self.ERROR_STARTING_QUANTIZATION = "Fout bij het starten van kwantisering: {0}"
+ self.UPDATING_MODEL_INFO = "Modelinformatie bijwerken: {0}"
+ self.TASK_FINISHED = "Taak voltooid: {0}"
+ self.SHOWING_TASK_DETAILS_FOR = "Taakdetails weergeven voor: {0}"
+ self.BROWSING_FOR_IMATRIX_DATA_FILE = "Bladeren naar IMatrix-gegevensbestand"
+ self.SELECT_DATA_FILE = "Selecteer gegevensbestand"
+ self.BROWSING_FOR_IMATRIX_MODEL_FILE = "Bladeren naar IMatrix-modelbestand"
+ self.SELECT_MODEL_FILE = "Selecteer modelbestand"
+ self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "Bladeren naar IMatrix-uitvoerbestand"
+ self.SELECT_OUTPUT_FILE = "Selecteer uitvoerbestand"
+ self.STARTING_IMATRIX_GENERATION = "IMatrix-generatie starten"
+ self.BACKEND_PATH_NOT_EXIST = "Backendpad bestaat niet: {0}"
+ self.GENERATING_IMATRIX = "IMatrix genereren"
+ self.ERROR_STARTING_IMATRIX_GENERATION = (
+ "Fout bij het starten van IMatrix-generatie: {0}"
+ )
+ self.IMATRIX_GENERATION_TASK_STARTED = "IMatrix-generatietaak gestart"
+ self.ERROR_MESSAGE = "Fout: {0}"
+ self.TASK_ERROR = "Taakfout: {0}"
+ self.APPLICATION_CLOSING = "Applicatie wordt afgesloten"
+ self.APPLICATION_CLOSED = "Applicatie afgesloten"
+ self.SELECT_QUANTIZATION_TYPE = "Selecteer het kwantiseringstype"
+ self.ALLOWS_REQUANTIZING = (
+ "Staat herkwantisering toe van tensoren die al gekwantiseerd zijn"
+ )
+ self.LEAVE_OUTPUT_WEIGHT = "Laat output.weight niet (opnieuw) gekwantiseerd"
+ self.DISABLE_K_QUANT_MIXTURES = "Schakel k-kwant-mengsels uit en kwantiseer alle tensoren naar hetzelfde type"
+ self.USE_DATA_AS_IMPORTANCE_MATRIX = "Gebruik gegevens in bestand als belangrijkheidsmatrix voor kwant-optimalisaties"
+ self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = (
+ "Gebruik belangrijkheidsmatrix voor deze tensoren"
+ )
+ self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = (
+ "Gebruik geen belangrijkheidsmatrix voor deze tensoren"
+ )
+ self.OUTPUT_TENSOR_TYPE = "Uitvoertensortype:"
+ self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = (
+ "Gebruik dit type voor de output.weight-tensor"
+ )
+ self.TOKEN_EMBEDDING_TYPE = "Tokeninbeddingstype:"
+ self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = (
+ "Gebruik dit type voor de tokeninbeddingstensor"
+ )
+ self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = (
+ "Genereert een gekwantiseerd model in dezelfde shards als de invoer"
+ )
+ self.OVERRIDE_MODEL_METADATA = "Modelmetadata overschrijven"
+ self.INPUT_DATA_FILE_FOR_IMATRIX = (
+ "Invoergegevensbestand voor IMatrix-generatie"
+ )
+ self.MODEL_TO_BE_QUANTIZED = "Te kwantiseren model"
+ self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = (
+ "Uitvoerpad voor de gegenereerde IMatrix"
+ )
+ self.HOW_OFTEN_TO_SAVE_IMATRIX = "Hoe vaak de IMatrix moet worden opgeslagen"
+ self.SET_GPU_OFFLOAD_VALUE = "Stel de GPU-offloadwaarde in (-ngl)"
+ self.COMPLETED = "Voltooid"
+ self.REFRESH_MODELS = "Modellen vernieuwen"
+
+
+class _Finnish(_Localization):
+ def __init__(self):
+ super().__init__()
+ self.WINDOW_TITLE = "AutoGGUF (automaattinen GGUF-mallien kvantisoija)"
+ self.RAM_USAGE = "RAM-muistin käyttö:"
+ self.CPU_USAGE = "CPU:n käyttö:"
+ self.BACKEND = "Llama.cpp-taustaosa:"
+ self.REFRESH_BACKENDS = "Päivitä taustaosat"
+ self.MODELS_PATH = "Mallien polku:"
+ self.OUTPUT_PATH = "Tulostepolku:"
+ self.LOGS_PATH = "Lokien polku:"
+ self.BROWSE = "Selaa"
+ self.AVAILABLE_MODELS = "Käytettävissä olevat mallit:"
+ self.QUANTIZATION_TYPE = "Kvantisointityyppi:"
+ self.ALLOW_REQUANTIZE = "Salli uudelleenkvantisointi"
+ self.LEAVE_OUTPUT_TENSOR = "Jätä tulostensori"
+ self.PURE = "Puhdas"
+ self.IMATRIX = "IMatrix:"
+ self.INCLUDE_WEIGHTS = "Sisällytä painot:"
+ self.EXCLUDE_WEIGHTS = "Sulje pois painot:"
+ self.USE_OUTPUT_TENSOR_TYPE = "Käytä tulostensorin tyyppiä"
+ self.USE_TOKEN_EMBEDDING_TYPE = "Käytä token-upotustyyppiä"
+ self.KEEP_SPLIT = "Säilytä jako"
+ self.KV_OVERRIDES = "KV-ohitukset:"
+ self.ADD_NEW_OVERRIDE = "Lisää uusi ohitus"
+ self.QUANTIZE_MODEL = "Kvantisoi malli"
+ self.SAVE_PRESET = "Tallenna esiasetus"
+ self.LOAD_PRESET = "Lataa esiasetus"
+ self.TASKS = "Tehtävät:"
+ self.DOWNLOAD_LLAMACPP = "Lataa llama.cpp"
+ self.SELECT_RELEASE = "Valitse julkaisu:"
+ self.SELECT_ASSET = "Valitse resurssi:"
+ self.EXTRACT_CUDA_FILES = "Pura CUDA-tiedostot"
+ self.SELECT_CUDA_BACKEND = "Valitse CUDA-taustaosa:"
+ self.DOWNLOAD = "Lataa"
+ self.IMATRIX_GENERATION = "IMatrix-generointi"
+ self.DATA_FILE = "Datatiedosto:"
+ self.MODEL = "Malli:"
+ self.OUTPUT = "Tuloste:"
+ self.OUTPUT_FREQUENCY = "Tulostetaajuus:"
+ self.GPU_OFFLOAD = "GPU-kuormansiirto:"
+ self.AUTO = "Automaattinen"
+ self.GENERATE_IMATRIX = "Generoi IMatrix"
+ self.ERROR = "Virhe"
+ self.WARNING = "Varoitus"
+ self.PROPERTIES = "Ominaisuudet"
+ self.CANCEL = "Peruuta"
+ self.RESTART = "Käynnistä uudelleen"
+ self.DELETE = "Poista"
+ self.CONFIRM_DELETION = "Haluatko varmasti poistaa tämän tehtävän?"
+ self.TASK_RUNNING_WARNING = (
+ "Jotkin tehtävät ovat vielä käynnissä. Haluatko varmasti lopettaa?"
+ )
+ self.YES = "Kyllä"
+ self.NO = "Ei"
+ self.DOWNLOAD_COMPLETE = "Lataus valmis"
+ self.CUDA_EXTRACTION_FAILED = "CUDA-purku epäonnistui"
+ self.PRESET_SAVED = "Esiasetus tallennettu"
+ self.PRESET_LOADED = "Esiasetus ladattu"
+ self.NO_ASSET_SELECTED = "Ei resurssia valittuna"
+ self.DOWNLOAD_FAILED = "Lataus epäonnistui"
+ self.NO_BACKEND_SELECTED = "Ei taustaosaa valittuna"
+ self.NO_MODEL_SELECTED = "Ei mallia valittuna"
+ self.REFRESH_RELEASES = "Päivitä julkaisut"
+ self.NO_SUITABLE_CUDA_BACKENDS = "Sopivia CUDA-taustaosoja ei löytynyt"
+ self.LLAMACPP_DOWNLOADED_EXTRACTED = "llama.cpp-binaaritiedosto ladattu ja purettu kansioon {0}\nCUDA-tiedostot purettu kansioon {1}"
+ self.CUDA_FILES_EXTRACTED = "CUDA-tiedostot purettu kansioon"
+ self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = (
+ "Sopivaa CUDA-taustaosaa purkua varten ei löytynyt"
+ )
+ self.ERROR_FETCHING_RELEASES = "Virhe haettaessa julkaisuja: {0}"
+ self.CONFIRM_DELETION_TITLE = "Vahvista poisto"
+ self.LOG_FOR = "Loki kohteelle {0}"
+ self.ALL_FILES = "Kaikki tiedostot (*)"
+ self.GGUF_FILES = "GGUF-tiedostot (*.gguf)"
+ self.DAT_FILES = "DAT-tiedostot (*.dat)"
+ self.JSON_FILES = "JSON-tiedostot (*.json)"
+ self.FAILED_LOAD_PRESET = "Esiasetuksen lataus epäonnistui: {0}"
+ self.INITIALIZING_AUTOGGUF = "Alustetaan AutoGGUF-sovellusta"
+ self.AUTOGGUF_INITIALIZATION_COMPLETE = "AutoGGUF-alustus valmis"
+ self.REFRESHING_BACKENDS = "Päivitetään taustaosoja"
+ self.NO_BACKENDS_AVAILABLE = "Ei käytettävissä olevia taustaosoja"
+ self.FOUND_VALID_BACKENDS = "Löydettiin {0} kelvollista taustaosaa"
+ self.SAVING_PRESET = "Tallennetaan esiasetusta"
+ self.PRESET_SAVED_TO = "Esiasetus tallennettu kansioon {0}"
+ self.LOADING_PRESET = "Ladataan esiasetusta"
+ self.PRESET_LOADED_FROM = "Esiasetus ladattu kansiosta {0}"
+ self.ADDING_KV_OVERRIDE = "Lisätään KV-ohitus: {0}"
+ self.SAVING_TASK_PRESET = "Tallennetaan tehtäväesiasetusta kohteelle {0}"
+ self.TASK_PRESET_SAVED = "Tehtäväesiasetus tallennettu"
+ self.TASK_PRESET_SAVED_TO = "Tehtäväesiasetus tallennettu kansioon {0}"
+ self.RESTARTING_TASK = "Käynnistetään tehtävä uudelleen: {0}"
+ self.IN_PROGRESS = "Käynnissä"
+ self.DOWNLOAD_FINISHED_EXTRACTED_TO = "Lataus valmis. Purettu kansioon: {0}"
+ self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "llama.cpp-binaaritiedosto ladattu ja purettu kansioon {0}\nCUDA-tiedostot purettu kansioon {1}"
+ self.NO_SUITABLE_CUDA_BACKEND_FOUND = (
+ "Sopivaa CUDA-taustaosaa purkua varten ei löytynyt"
+ )
+ self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = (
+ "llama.cpp-binaaritiedosto ladattu ja purettu kansioon {0}"
+ )
+ self.REFRESHING_LLAMACPP_RELEASES = "Päivitetään llama.cpp-julkaisuja"
+ self.UPDATING_ASSET_LIST = "Päivitetään resurssilistaa"
+ self.UPDATING_CUDA_OPTIONS = "Päivitetään CUDA-asetuksia"
+ self.STARTING_LLAMACPP_DOWNLOAD = "Aloitetaan llama.cpp:n lataus"
+ self.UPDATING_CUDA_BACKENDS = "Päivitetään CUDA-taustaosoja"
+ self.NO_CUDA_BACKEND_SELECTED = "Ei CUDA-taustaosaa valittuna purkua varten"
+ self.EXTRACTING_CUDA_FILES = (
+ "Puretaan CUDA-tiedostoja kansiosta {0} kansioon {1}"
+ )
+ self.DOWNLOAD_ERROR = "Latausvirhe: {0}"
+ self.SHOWING_TASK_CONTEXT_MENU = "Näytetään tehtäväkontekstivalikko"
+ self.SHOWING_PROPERTIES_FOR_TASK = "Näytetään tehtävän ominaisuudet: {0}"
+ self.CANCELLING_TASK = "Peruutetaan tehtävää: {0}"
+ self.CANCELED = "Peruutettu"
+ self.DELETING_TASK = "Poistetaan tehtävää: {0}"
+ self.LOADING_MODELS = "Ladataan malleja"
+ self.LOADED_MODELS = "{0} mallia ladattu"
+ self.BROWSING_FOR_MODELS_DIRECTORY = "Selaillaan mallikansiota"
+ self.SELECT_MODELS_DIRECTORY = "Valitse mallikansio"
+ self.BROWSING_FOR_OUTPUT_DIRECTORY = "Selaillaan tulostekansiota"
+ self.SELECT_OUTPUT_DIRECTORY = "Valitse tulostekansio"
+ self.BROWSING_FOR_LOGS_DIRECTORY = "Selaillaan lokikansiota"
+ self.SELECT_LOGS_DIRECTORY = "Valitse lokikansio"
+ self.BROWSING_FOR_IMATRIX_FILE = "Selaillaan IMatrix-tiedostoa"
+ self.SELECT_IMATRIX_FILE = "Valitse IMatrix-tiedosto"
+ self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} Mt / {2} Mt)"
+ self.CPU_USAGE_FORMAT = "CPU:n käyttö: {0:.1f}%"
+ self.VALIDATING_QUANTIZATION_INPUTS = "Vahvistetaan kvantisointisyötteet"
+ self.MODELS_PATH_REQUIRED = "Mallien polku on pakollinen"
+ self.OUTPUT_PATH_REQUIRED = "Tulostepolku on pakollinen"
+ self.LOGS_PATH_REQUIRED = "Lokien polku on pakollinen"
+ self.STARTING_MODEL_QUANTIZATION = "Aloitetaan mallin kvantisointi"
+ self.INPUT_FILE_NOT_EXIST = "Syötetiedostoa '{0}' ei ole."
+ self.QUANTIZING_MODEL_TO = "Kvantisoidaan mallia {0} muotoon {1}"
+ self.QUANTIZATION_TASK_STARTED = (
+ "Kvantisointitehtävä käynnistetty kohteelle {0}"
+ )
+ self.ERROR_STARTING_QUANTIZATION = "Virhe kvantisoinnin käynnistyksessä: {0}"
+ self.UPDATING_MODEL_INFO = "Päivitetään mallitietoja: {0}"
+ self.TASK_FINISHED = "Tehtävä valmis: {0}"
+ self.SHOWING_TASK_DETAILS_FOR = "Näytetään tehtävän tiedot kohteelle: {0}"
+ self.BROWSING_FOR_IMATRIX_DATA_FILE = "Selaillaan IMatrix-datatiedostoa"
+ self.SELECT_DATA_FILE = "Valitse datatiedosto"
+ self.BROWSING_FOR_IMATRIX_MODEL_FILE = "Selaillaan IMatrix-mallitiedostoa"
+ self.SELECT_MODEL_FILE = "Valitse mallitiedosto"
+ self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "Selaillaan IMatrix-tulostetiedostoa"
+ self.SELECT_OUTPUT_FILE = "Valitse tulostetiedosto"
+ self.STARTING_IMATRIX_GENERATION = "Aloitetaan IMatrix-generointi"
+ self.BACKEND_PATH_NOT_EXIST = "Taustaosan polkua ei ole: {0}"
+ self.GENERATING_IMATRIX = "Generoidaan IMatrixia"
+ self.ERROR_STARTING_IMATRIX_GENERATION = (
+ "Virhe IMatrix-generoinnin käynnistyksessä: {0}"
+ )
+ self.IMATRIX_GENERATION_TASK_STARTED = "IMatrix-generointi käynnistetty"
+ self.ERROR_MESSAGE = "Virhe: {0}"
+ self.TASK_ERROR = "Tehtävävirhe: {0}"
+ self.APPLICATION_CLOSING = "Sovellus suljetaan"
+ self.APPLICATION_CLOSED = "Sovellus suljettu"
+ self.SELECT_QUANTIZATION_TYPE = "Valitse kvantisointityyppi"
+ self.ALLOWS_REQUANTIZING = (
+ "Sallii jo kvantisoitujen tensoreiden uudelleenkvantisoinnin"
+ )
+ self.LEAVE_OUTPUT_WEIGHT = (
+ "Jättää output.weight-tensorin (uudelleen)kvantisoimatta"
+ )
+ self.DISABLE_K_QUANT_MIXTURES = "Poista käytöstä k-kvanttisekoitukset ja kvantisoi kaikki tensorit samaan tyyppiin"
+ self.USE_DATA_AS_IMPORTANCE_MATRIX = (
+ "Käytä tiedoston tietoja kvantisoinnin optimoinnin tärkeysmatriisina"
+ )
+ self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = (
+ "Käytä tärkeysmatriisia näille tensoreille"
+ )
+ self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = (
+ "Älä käytä tärkeysmatriisia näille tensoreille"
+ )
+ self.OUTPUT_TENSOR_TYPE = "Tulostensorin tyyppi:"
+ self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = (
+ "Käytä tätä tyyppiä output.weight-tensorille"
+ )
+ self.TOKEN_EMBEDDING_TYPE = "Token-upotustyyppi:"
+ self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = (
+ "Käytä tätä tyyppiä token-upotustensorille"
+ )
+ self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = (
+ "Generoi kvantisoidun mallin samoihin osiin kuin syöte"
+ )
+ self.OVERRIDE_MODEL_METADATA = "Ohita mallitiedot"
+ self.INPUT_DATA_FILE_FOR_IMATRIX = "IMatrix-generoinnin syötedatatiedosto"
+ self.MODEL_TO_BE_QUANTIZED = "Kvantisoitava malli"
+ self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "Generoidun IMatrixin tulostepolku"
+ self.HOW_OFTEN_TO_SAVE_IMATRIX = "Kuinka usein IMatrix tallennetaan"
+ self.SET_GPU_OFFLOAD_VALUE = "Aseta GPU-kuormansiirron arvo (-ngl)"
+ self.COMPLETED = "Valmis"
+ self.REFRESH_MODELS = "Päivitä mallit"
+
+
+class _Bengali(_Localization):
+ def __init__(self):
+ super().__init__()
+ self.WINDOW_TITLE = "AutoGGUF (স্বয়ংক্রিয় GGUF মডেল কোয়ান্টাইজার)"
+ self.RAM_USAGE = "RAM ব্যবহার:"
+ self.CPU_USAGE = "CPU ব্যবহার:"
+ self.BACKEND = "Llama.cpp ব্যাকএন্ড:"
+ self.REFRESH_BACKENDS = "ব্যাকএন্ড রিফ্রেশ করুন"
+ self.MODELS_PATH = "মডেল পাথ:"
+ self.OUTPUT_PATH = "আউটপুট পাথ:"
+ self.LOGS_PATH = "লগ পাথ:"
+ self.BROWSE = "ব্রাউজ করুন"
+ self.AVAILABLE_MODELS = "উপলব্ধ মডেল:"
+ self.QUANTIZATION_TYPE = "কোয়ান্টাইজেশন ধরণ:"
+ self.ALLOW_REQUANTIZE = "পুনরায় কোয়ান্টাইজ করার অনুমতি দিন"
+ self.LEAVE_OUTPUT_TENSOR = "আউটপুট টেন্সর রেখে দিন"
+ self.PURE = "বিশুদ্ধ"
+ self.IMATRIX = "IMatrix:"
+ self.INCLUDE_WEIGHTS = "ওজন অন্তর্ভুক্ত করুন:"
+ self.EXCLUDE_WEIGHTS = "ওজন বাদ দিন:"
+ self.USE_OUTPUT_TENSOR_TYPE = "আউটপুট টেন্সর ধরণ ব্যবহার করুন"
+ self.USE_TOKEN_EMBEDDING_TYPE = "টোকেন এম্বেডিং ধরণ ব্যবহার করুন"
+ self.KEEP_SPLIT = "বিভাজন রাখুন"
+ self.KV_OVERRIDES = "KV ওভাররাইড:"
+ self.ADD_NEW_OVERRIDE = "নতুন ওভাররাইড যুক্ত করুন"
+ self.QUANTIZE_MODEL = "মডেল কোয়ান্টাইজ করুন"
+ self.SAVE_PRESET = "প্রিসেট সংরক্ষণ করুন"
+ self.LOAD_PRESET = "প্রিসেট লোড করুন"
+ self.TASKS = "কার্য:"
+ self.DOWNLOAD_LLAMACPP = "llama.cpp ডাউনলোড করুন"
+ self.SELECT_RELEASE = "রিলিজ নির্বাচন করুন:"
+ self.SELECT_ASSET = "অ্যাসেট নির্বাচন করুন:"
+ self.EXTRACT_CUDA_FILES = "CUDA ফাইলগুলি বের করুন"
+ self.SELECT_CUDA_BACKEND = "CUDA ব্যাকএন্ড নির্বাচন করুন:"
+ self.DOWNLOAD = "ডাউনলোড করুন"
+ self.IMATRIX_GENERATION = "IMatrix জেনারেশন"
+ self.DATA_FILE = "ডেটা ফাইল:"
+ self.MODEL = "মডেল:"
+ self.OUTPUT = "আউটপুট:"
+ self.OUTPUT_FREQUENCY = "আউটপুট ফ্রিকোয়েন্সি:"
+ self.GPU_OFFLOAD = "GPU অফলোড:"
+ self.AUTO = "স্বয়ংক্রিয়"
+ self.GENERATE_IMATRIX = "IMatrix তৈরি করুন"
+ self.ERROR = "ত্রুটি"
+ self.WARNING = "সতর্কীকরণ"
+ self.PROPERTIES = "বৈশিষ্ট্য"
+ self.CANCEL = "বাতিল করুন"
+ self.RESTART = "পুনরায় আরম্ভ করুন"
+ self.DELETE = "মুছে ফেলুন"
+ self.CONFIRM_DELETION = "আপনি কি নিশ্চিত যে আপনি এই কাজটি মুছে ফেলতে চান?"
+ self.TASK_RUNNING_WARNING = (
+ "কিছু কাজ এখনও চলছে। আপনি কি নিশ্চিত যে আপনি প্রস্থান করতে চান?"
+ )
+ self.YES = "হ্যাঁ"
+ self.NO = "না"
+ self.DOWNLOAD_COMPLETE = "ডাউনলোড সম্পন্ন"
+ self.CUDA_EXTRACTION_FAILED = "CUDA এক্সট্র্যাকশন ব্যর্থ"
+ self.PRESET_SAVED = "প্রিসেট সংরক্ষিত"
+ self.PRESET_LOADED = "প্রিসেট লোড করা হয়েছে"
+ self.NO_ASSET_SELECTED = "কোন অ্যাসেট নির্বাচন করা হয়নি"
+ self.DOWNLOAD_FAILED = "ডাউনলোড ব্যর্থ"
+ self.NO_BACKEND_SELECTED = "কোন ব্যাকএন্ড নির্বাচন করা হয়নি"
+ self.NO_MODEL_SELECTED = "কোন মডেল নির্বাচন করা হয়নি"
+ self.REFRESH_RELEASES = "রিলিজগুলি রিফ্রেশ করুন"
+ self.NO_SUITABLE_CUDA_BACKENDS = "কোন উপযুক্ত CUDA ব্যাকএন্ড পাওয়া যায়নি"
+ self.LLAMACPP_DOWNLOADED_EXTRACTED = "llama.cpp বাইনারি ফাইল ডাউনলোড এবং {0} এ বের করা হয়েছে\nCUDA ফাইলগুলি {1} এ বের করা হয়েছে"
+ self.CUDA_FILES_EXTRACTED = "CUDA ফাইলগুলি তে বের করা হয়েছে"
+ self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = (
+ "এক্সট্র্যাকশনের জন্য কোন উপযুক্ত CUDA ব্যাকএন্ড পাওয়া যায়নি"
+ )
+ self.ERROR_FETCHING_RELEASES = "রিলিজগুলি আনতে ত্রুটি: {0}"
+ self.CONFIRM_DELETION_TITLE = "মুছে ফেলা নিশ্চিত করুন"
+ self.LOG_FOR = "{0} এর জন্য লগ"
+ self.ALL_FILES = "সমস্ত ফাইল (*)"
+ self.GGUF_FILES = "GGUF ফাইল (*.gguf)"
+ self.DAT_FILES = "DAT ফাইল (*.dat)"
+ self.JSON_FILES = "JSON ফাইল (*.json)"
+ self.FAILED_LOAD_PRESET = "প্রিসেট লোড করতে ব্যর্থ: {0}"
+ self.INITIALIZING_AUTOGGUF = "AutoGGUF অ্যাপ্লিকেশন শুরু হচ্ছে"
+ self.AUTOGGUF_INITIALIZATION_COMPLETE = "AutoGGUF ইনিশিয়ালাইজেশন সম্পন্ন"
+ self.REFRESHING_BACKENDS = "ব্যাকএন্ডগুলি রিফ্রেশ করা হচ্ছে"
+ self.NO_BACKENDS_AVAILABLE = "কোন ব্যাকএন্ড উপলব্ধ নেই"
+ self.FOUND_VALID_BACKENDS = "{0} টি বৈধ ব্যাকএন্ড পাওয়া গেছে"
+ self.SAVING_PRESET = "প্রিসেট সংরক্ষণ করা হচ্ছে"
+ self.PRESET_SAVED_TO = "{0} এ প্রিসেট সংরক্ষিত"
+ self.LOADING_PRESET = "প্রিসেট লোড করা হচ্ছে"
+ self.PRESET_LOADED_FROM = "{0} থেকে প্রিসেট লোড করা হয়েছে"
+ self.ADDING_KV_OVERRIDE = "KV ওভাররাইড যুক্ত করা হচ্ছে: {0}"
+ self.SAVING_TASK_PRESET = "{0} এর জন্য টাস্ক প্রিসেট সংরক্ষণ করা হচ্ছে"
+ self.TASK_PRESET_SAVED = "টাস্ক প্রিসেট সংরক্ষিত"
+ self.TASK_PRESET_SAVED_TO = "{0} এ টাস্ক প্রিসেট সংরক্ষিত"
+ self.RESTARTING_TASK = "টাস্ক পুনরায় শুরু করা হচ্ছে: {0}"
+ self.IN_PROGRESS = "চলছে"
+ self.DOWNLOAD_FINISHED_EXTRACTED_TO = "ডাউনলোড সম্পন্ন। বের করা হয়েছে: {0}"
+ self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "llama.cpp বাইনারি ফাইল ডাউনলোড এবং {0} এ বের করা হয়েছে\nCUDA ফাইলগুলি {1} এ বের করা হয়েছে"
+ self.NO_SUITABLE_CUDA_BACKEND_FOUND = (
+ "এক্সট্র্যাকশনের জন্য কোন উপযুক্ত CUDA ব্যাকএন্ড পাওয়া যায়নি"
+ )
+ self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = (
+ "llama.cpp বাইনারি ফাইল ডাউনলোড এবং {0} এ বের করা হয়েছে"
+ )
+ self.REFRESHING_LLAMACPP_RELEASES = "llama.cpp রিলিজগুলি রিফ্রেশ করা হচ্ছে"
+ self.UPDATING_ASSET_LIST = "অ্যাসেট তালিকা আপডেট করা হচ্ছে"
+ self.UPDATING_CUDA_OPTIONS = "CUDA অপশনগুলি আপডেট করা হচ্ছে"
+ self.STARTING_LLAMACPP_DOWNLOAD = "llama.cpp ডাউনলোড শুরু করা হচ্ছে"
+ self.UPDATING_CUDA_BACKENDS = "CUDA ব্যাকএন্ডগুলি আপডেট করা হচ্ছে"
+ self.NO_CUDA_BACKEND_SELECTED = (
+ "এক্সট্র্যাকশনের জন্য কোন CUDA ব্যাকএন্ড নির্বাচন করা হয়নি"
+ )
+ self.EXTRACTING_CUDA_FILES = "{0} থেকে {1} এ CUDA ফাইলগুলি বের করা হচ্ছে"
+ self.DOWNLOAD_ERROR = "ডাউনলোড ত্রুটি: {0}"
+ self.SHOWING_TASK_CONTEXT_MENU = "টাস্ক কনটেক্সট মেনু দেখানো হচ্ছে"
+ self.SHOWING_PROPERTIES_FOR_TASK = "টাস্কের জন্য বৈশিষ্ট্য দেখানো হচ্ছে: {0}"
+ self.CANCELLING_TASK = "টাস্ক বাতিল করা হচ্ছে: {0}"
+ self.CANCELED = "বাতিল করা হয়েছে"
+ self.DELETING_TASK = "টাস্ক মুছে ফেলা হচ্ছে: {0}"
+ self.LOADING_MODELS = "মডেলগুলি লোড করা হচ্ছে"
+ self.LOADED_MODELS = "{0} টি মডেল লোড করা হয়েছে"
+ self.BROWSING_FOR_MODELS_DIRECTORY = "মডেল ডিরেক্টরি ব্রাউজ করা হচ্ছে"
+ self.SELECT_MODELS_DIRECTORY = "মডেল ডিরেক্টরি নির্বাচন করুন"
+ self.BROWSING_FOR_OUTPUT_DIRECTORY = "আউটপুট ডিরেক্টরি ব্রাউজ করা হচ্ছে"
+ self.SELECT_OUTPUT_DIRECTORY = "আউটপুট ডিরেক্টরি নির্বাচন করুন"
+ self.BROWSING_FOR_LOGS_DIRECTORY = "লগ ডিরেক্টরি ব্রাউজ করা হচ্ছে"
+ self.SELECT_LOGS_DIRECTORY = "লগ ডিরেক্টরি নির্বাচন করুন"
+ self.BROWSING_FOR_IMATRIX_FILE = "IMatrix ফাইল ব্রাউজ করা হচ্ছে"
+ self.SELECT_IMATRIX_FILE = "IMatrix ফাইল নির্বাচন করুন"
+ self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} MB / {2} MB)"
+ self.CPU_USAGE_FORMAT = "CPU ব্যবহার: {0:.1f}%"
+ self.VALIDATING_QUANTIZATION_INPUTS = "কোয়ান্টাইজেশন ইনপুট যাচাই করা হচ্ছে"
+ self.MODELS_PATH_REQUIRED = "মডেল পাথ প্রয়োজন"
+ self.OUTPUT_PATH_REQUIRED = "আউটপুট পাথ প্রয়োজন"
+ self.LOGS_PATH_REQUIRED = "লগ পাথ প্রয়োজন"
+ self.STARTING_MODEL_QUANTIZATION = "মডেল কোয়ান্টাইজেশন শুরু হচ্ছে"
+ self.INPUT_FILE_NOT_EXIST = "ইনপুট ফাইল '{0}' বিদ্যমান নেই।"
+ self.QUANTIZING_MODEL_TO = "{0} কে {1} এ কোয়ান্টাইজ করা হচ্ছে"
+ self.QUANTIZATION_TASK_STARTED = "{0} এর জন্য কোয়ান্টাইজেশন টাস্ক শুরু হয়েছে"
+ self.ERROR_STARTING_QUANTIZATION = "কোয়ান্টাইজেশন শুরু করতে ত্রুটি: {0}"
+ self.UPDATING_MODEL_INFO = "মডেল তথ্য আপডেট করা হচ্ছে: {0}"
+ self.TASK_FINISHED = "টাস্ক সম্পন্ন: {0}"
+ self.SHOWING_TASK_DETAILS_FOR = "এর জন্য টাস্কের বিবরণ দেখানো হচ্ছে: {0}"
+ self.BROWSING_FOR_IMATRIX_DATA_FILE = "IMatrix ডেটা ফাইল ব্রাউজ করা হচ্ছে"
+ self.SELECT_DATA_FILE = "ডেটা ফাইল নির্বাচন করুন"
+ self.BROWSING_FOR_IMATRIX_MODEL_FILE = "IMatrix মডেল ফাইল ব্রাউজ করা হচ্ছে"
+ self.SELECT_MODEL_FILE = "মডেল ফাইল নির্বাচন করুন"
+ self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "IMatrix আউটপুট ফাইল ব্রাউজ করা হচ্ছে"
+ self.SELECT_OUTPUT_FILE = "আউটপুট ফাইল নির্বাচন করুন"
+ self.STARTING_IMATRIX_GENERATION = "IMatrix জেনারেশন শুরু হচ্ছে"
+ self.BACKEND_PATH_NOT_EXIST = "ব্যাকএন্ড পাথ বিদ্যমান নেই: {0}"
+ self.GENERATING_IMATRIX = "IMatrix তৈরি করা হচ্ছে"
+ self.ERROR_STARTING_IMATRIX_GENERATION = "IMatrix জেনারেশন শুরু করতে ত্রুটি: {0}"
+ self.IMATRIX_GENERATION_TASK_STARTED = "IMatrix জেনারেশন টাস্ক শুরু হয়েছে"
+ self.ERROR_MESSAGE = "ত্রুটি: {0}"
+ self.TASK_ERROR = "টাস্ক ত্রুটি: {0}"
+ self.APPLICATION_CLOSING = "অ্যাপ্লিকেশন বন্ধ করা হচ্ছে"
+ self.APPLICATION_CLOSED = "অ্যাপ্লিকেশন বন্ধ"
+ self.SELECT_QUANTIZATION_TYPE = "কোয়ান্টাইজেশন ধরণ নির্বাচন করুন"
+ self.ALLOWS_REQUANTIZING = "যে টেন্সরগুলি ইতিমধ্যে কোয়ান্টাইজ করা হয়েছে তাদের পুনরায় কোয়ান্টাইজ করার অনুমতি দেয়"
+ self.LEAVE_OUTPUT_WEIGHT = "output.weight কে (পুনরায়) কোয়ান্টাইজ না করে রেখে দেবে"
+ self.DISABLE_K_QUANT_MIXTURES = (
+ "k-কোয়ান্ট মিশ্রণগুলি অক্ষম করুন এবং সমস্ত টেন্সরকে একই ধরণের কোয়ান্টাইজ করুন"
+ )
+ self.USE_DATA_AS_IMPORTANCE_MATRIX = (
+ "কোয়ান্ট অপ্টিমাইজেশনের জন্য ফাইলের ডেটা গুরুত্বপূর্ণ ম্যাট্রিক্স হিসাবে ব্যবহার করুন"
+ )
+ self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = (
+ "এই টেন্সরগুলির জন্য গুরুত্বপূর্ণ ম্যাট্রিক্স ব্যবহার করুন"
+ )
+ self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = (
+ "এই টেন্সরগুলির জন্য গুরুত্বপূর্ণ ম্যাট্রিক্স ব্যবহার করবেন না"
+ )
+ self.OUTPUT_TENSOR_TYPE = "আউটপুট টেন্সর ধরণ:"
+ self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = (
+ "output.weight টেন্সরের জন্য এই ধরণটি ব্যবহার করুন"
+ )
+ self.TOKEN_EMBEDDING_TYPE = "টোকেন এম্বেডিং ধরণ:"
+ self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = (
+ "টোকেন এম্বেডিং টেন্সরের জন্য এই ধরণটি ব্যবহার করুন"
+ )
+ self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = (
+ "ইনপুটের মতো একই শার্ডে কোয়ান্টাইজ করা মডেল তৈরি করবে"
+ )
+ self.OVERRIDE_MODEL_METADATA = "মডেল মেটাডেটা ওভাররাইড করুন"
+ self.INPUT_DATA_FILE_FOR_IMATRIX = "IMatrix জেনারেশনের জন্য ইনপুট ডেটা ফাইল"
+ self.MODEL_TO_BE_QUANTIZED = "কোয়ান্টাইজ করার জন্য মডেল"
+ self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "তৈরি করা IMatrix এর জন্য আউটপুট পাথ"
+ self.HOW_OFTEN_TO_SAVE_IMATRIX = "IMatrix কতবার সংরক্ষণ করবেন"
+ self.SET_GPU_OFFLOAD_VALUE = "GPU অফলোড মান সেট করুন (-ngl)"
+ self.COMPLETED = "সম্পন্ন"
+ self.REFRESH_MODELS = "মডেল রিফ্রেশ করুন"
+
+
+class _Polish(_Localization):
+ def __init__(self):
+ super().__init__()
+ self.WINDOW_TITLE = "AutoGGUF (Automatyczny kwantyzator modeli GGUF)"
+ self.RAM_USAGE = "Użycie pamięci RAM:"
+ self.CPU_USAGE = "Użycie procesora:"
+ self.BACKEND = "Backend Llama.cpp:"
+ self.REFRESH_BACKENDS = "Odśwież backendy"
+ self.MODELS_PATH = "Ścieżka modeli:"
+ self.OUTPUT_PATH = "Ścieżka wyjściowa:"
+ self.LOGS_PATH = "Ścieżka logów:"
+ self.BROWSE = "Przeglądaj"
+ self.AVAILABLE_MODELS = "Dostępne modele:"
+ self.QUANTIZATION_TYPE = "Typ kwantyzacji:"
+ self.ALLOW_REQUANTIZE = "Zezwól na ponowną kwantyzację"
+ self.LEAVE_OUTPUT_TENSOR = "Pozostaw tensor wyjściowy"
+ self.PURE = "Czysty"
+ self.IMATRIX = "IMatrix:"
+ self.INCLUDE_WEIGHTS = "Uwzględnij wagi:"
+ self.EXCLUDE_WEIGHTS = "Wyklucz wagi:"
+ self.USE_OUTPUT_TENSOR_TYPE = "Użyj typu tensora wyjściowego"
+ self.USE_TOKEN_EMBEDDING_TYPE = "Użyj typu osadzania tokenów"
+ self.KEEP_SPLIT = "Zachowaj podział"
+ self.KV_OVERRIDES = "Nadpisania KV:"
+ self.ADD_NEW_OVERRIDE = "Dodaj nowe nadpisanie"
+ self.QUANTIZE_MODEL = "Kwantyzuj model"
+ self.SAVE_PRESET = "Zapisz ustawienia predefiniowane"
+ self.LOAD_PRESET = "Wczytaj ustawienia predefiniowane"
+ self.TASKS = "Zadania:"
+ self.DOWNLOAD_LLAMACPP = "Pobierz llama.cpp"
+ self.SELECT_RELEASE = "Wybierz wersję:"
+ self.SELECT_ASSET = "Wybierz zasób:"
+ self.EXTRACT_CUDA_FILES = "Wyodrębnij pliki CUDA"
+ self.SELECT_CUDA_BACKEND = "Wybierz backend CUDA:"
+ self.DOWNLOAD = "Pobierz"
+ self.IMATRIX_GENERATION = "Generowanie IMatrix"
+ self.DATA_FILE = "Plik danych:"
+ self.MODEL = "Model:"
+ self.OUTPUT = "Wyjście:"
+ self.OUTPUT_FREQUENCY = "Częstotliwość wyjścia:"
+ self.GPU_OFFLOAD = "Odciążenie GPU:"
+ self.AUTO = "Automatyczny"
+ self.GENERATE_IMATRIX = "Generuj IMatrix"
+ self.ERROR = "Błąd"
+ self.WARNING = "Ostrzeżenie"
+ self.PROPERTIES = "Właściwości"
+ self.CANCEL = "Anuluj"
+ self.RESTART = "Uruchom ponownie"
+ self.DELETE = "Usuń"
+ self.CONFIRM_DELETION = "Czy na pewno chcesz usunąć to zadanie?"
+ self.TASK_RUNNING_WARNING = (
+ "Niektóre zadania są nadal uruchomione. Czy na pewno chcesz wyjść?"
+ )
+ self.YES = "Tak"
+ self.NO = "Nie"
+ self.DOWNLOAD_COMPLETE = "Pobieranie zakończone"
+ self.CUDA_EXTRACTION_FAILED = "Wyodrębnianie CUDA nie powiodło się"
+ self.PRESET_SAVED = "Ustawienia predefiniowane zapisane"
+ self.PRESET_LOADED = "Ustawienia predefiniowane wczytane"
+ self.NO_ASSET_SELECTED = "Nie wybrano zasobu"
+ self.DOWNLOAD_FAILED = "Pobieranie nie powiodło się"
+ self.NO_BACKEND_SELECTED = "Nie wybrano backendu"
+ self.NO_MODEL_SELECTED = "Nie wybrano modelu"
+ self.REFRESH_RELEASES = "Odśwież wersje"
+ self.NO_SUITABLE_CUDA_BACKENDS = "Nie znaleziono odpowiednich backendów CUDA"
+ self.LLAMACPP_DOWNLOADED_EXTRACTED = "Plik binarny llama.cpp został pobrany i wyodrębniony do {0}\nPliki CUDA wyodrębnione do {1}"
+ self.CUDA_FILES_EXTRACTED = "Pliki CUDA wyodrębnione do"
+ self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = (
+ "Nie znaleziono odpowiedniego backendu CUDA do wyodrębnienia"
+ )
+ self.ERROR_FETCHING_RELEASES = "Błąd podczas pobierania wersji: {0}"
+ self.CONFIRM_DELETION_TITLE = "Potwierdź usunięcie"
+ self.LOG_FOR = "Dziennik dla {0}"
+ self.ALL_FILES = "Wszystkie pliki (*)"
+ self.GGUF_FILES = "Pliki GGUF (*.gguf)"
+ self.DAT_FILES = "Pliki DAT (*.dat)"
+ self.JSON_FILES = "Pliki JSON (*.json)"
+ self.FAILED_LOAD_PRESET = "Nie udało się wczytać ustawień predefiniowanych: {0}"
+ self.INITIALIZING_AUTOGGUF = "Inicjalizacja aplikacji AutoGGUF"
+ self.AUTOGGUF_INITIALIZATION_COMPLETE = "Inicjalizacja AutoGGUF zakończona"
+ self.REFRESHING_BACKENDS = "Odświeżanie backendów"
+ self.NO_BACKENDS_AVAILABLE = "Brak dostępnych backendów"
+ self.FOUND_VALID_BACKENDS = "Znaleziono {0} prawidłowych backendów"
+ self.SAVING_PRESET = "Zapisywanie ustawień predefiniowanych"
+ self.PRESET_SAVED_TO = "Ustawienia predefiniowane zapisane do {0}"
+ self.LOADING_PRESET = "Wczytywanie ustawień predefiniowanych"
+ self.PRESET_LOADED_FROM = "Ustawienia predefiniowane wczytane z {0}"
+ self.ADDING_KV_OVERRIDE = "Dodawanie nadpisania KV: {0}"
+ self.SAVING_TASK_PRESET = (
+ "Zapisywanie ustawień predefiniowanych zadania dla {0}"
+ )
+ self.TASK_PRESET_SAVED = "Ustawienia predefiniowane zadania zapisane"
+ self.TASK_PRESET_SAVED_TO = "Ustawienia predefiniowane zadania zapisane do {0}"
+ self.RESTARTING_TASK = "Ponowne uruchamianie zadania: {0}"
+ self.IN_PROGRESS = "W trakcie"
+ self.DOWNLOAD_FINISHED_EXTRACTED_TO = (
+ "Pobieranie zakończone. Wyodrębniono do: {0}"
+ )
+ self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "Plik binarny llama.cpp został pobrany i wyodrębniony do {0}\nPliki CUDA wyodrębnione do {1}"
+ self.NO_SUITABLE_CUDA_BACKEND_FOUND = (
+ "Nie znaleziono odpowiedniego backendu CUDA do wyodrębnienia"
+ )
+ self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = (
+ "Plik binarny llama.cpp został pobrany i wyodrębniony do {0}"
+ )
+ self.REFRESHING_LLAMACPP_RELEASES = "Odświeżanie wersji llama.cpp"
+ self.UPDATING_ASSET_LIST = "Aktualizacja listy zasobów"
+ self.UPDATING_CUDA_OPTIONS = "Aktualizacja opcji CUDA"
+ self.STARTING_LLAMACPP_DOWNLOAD = "Rozpoczynanie pobierania llama.cpp"
+ self.UPDATING_CUDA_BACKENDS = "Aktualizacja backendów CUDA"
+ self.NO_CUDA_BACKEND_SELECTED = "Nie wybrano backendu CUDA do wyodrębnienia"
+ self.EXTRACTING_CUDA_FILES = "Wyodrębnianie plików CUDA z {0} do {1}"
+ self.DOWNLOAD_ERROR = "Błąd pobierania: {0}"
+ self.SHOWING_TASK_CONTEXT_MENU = "Wyświetlanie menu kontekstowego zadania"
+ self.SHOWING_PROPERTIES_FOR_TASK = "Wyświetlanie właściwości zadania: {0}"
+ self.CANCELLING_TASK = "Anulowanie zadania: {0}"
+ self.CANCELED = "Anulowano"
+ self.DELETING_TASK = "Usuwanie zadania: {0}"
+ self.LOADING_MODELS = "Ładowanie modeli"
+ self.LOADED_MODELS = "Załadowano {0} modeli"
+ self.BROWSING_FOR_MODELS_DIRECTORY = "Przeglądanie katalogu modeli"
+ self.SELECT_MODELS_DIRECTORY = "Wybierz katalog modeli"
+ self.BROWSING_FOR_OUTPUT_DIRECTORY = "Przeglądanie katalogu wyjściowego"
+ self.SELECT_OUTPUT_DIRECTORY = "Wybierz katalog wyjściowy"
+ self.BROWSING_FOR_LOGS_DIRECTORY = "Przeglądanie katalogu logów"
+ self.SELECT_LOGS_DIRECTORY = "Wybierz katalog logów"
+ self.BROWSING_FOR_IMATRIX_FILE = "Przeglądanie pliku IMatrix"
+ self.SELECT_IMATRIX_FILE = "Wybierz plik IMatrix"
+ self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} MB / {2} MB)"
+ self.CPU_USAGE_FORMAT = "Użycie procesora: {0:.1f}%"
+ self.VALIDATING_QUANTIZATION_INPUTS = "Walidacja danych wejściowych kwantyzacji"
+ self.MODELS_PATH_REQUIRED = "Ścieżka modeli jest wymagana"
+ self.OUTPUT_PATH_REQUIRED = "Ścieżka wyjściowa jest wymagana"
+ self.LOGS_PATH_REQUIRED = "Ścieżka logów jest wymagana"
+ self.STARTING_MODEL_QUANTIZATION = "Rozpoczynanie kwantyzacji modelu"
+ self.INPUT_FILE_NOT_EXIST = "Plik wejściowy '{0}' nie istnieje."
+ self.QUANTIZING_MODEL_TO = "Kwantyzacja {0} do {1}"
+ self.QUANTIZATION_TASK_STARTED = "Zadanie kwantyzacji uruchomione dla {0}"
+ self.ERROR_STARTING_QUANTIZATION = "Błąd podczas uruchamiania kwantyzacji: {0}"
+ self.UPDATING_MODEL_INFO = "Aktualizacja informacji o modelu: {0}"
+ self.TASK_FINISHED = "Zadanie zakończone: {0}"
+ self.SHOWING_TASK_DETAILS_FOR = "Wyświetlanie szczegółów zadania dla: {0}"
+ self.BROWSING_FOR_IMATRIX_DATA_FILE = "Przeglądanie pliku danych IMatrix"
+ self.SELECT_DATA_FILE = "Wybierz plik danych"
+ self.BROWSING_FOR_IMATRIX_MODEL_FILE = "Przeglądanie pliku modelu IMatrix"
+ self.SELECT_MODEL_FILE = "Wybierz plik modelu"
+ self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "Przeglądanie pliku wyjściowego IMatrix"
+ self.SELECT_OUTPUT_FILE = "Wybierz plik wyjściowy"
+ self.STARTING_IMATRIX_GENERATION = "Rozpoczynanie generowania IMatrix"
+ self.BACKEND_PATH_NOT_EXIST = "Ścieżka backendu nie istnieje: {0}"
+ self.GENERATING_IMATRIX = "Generowanie IMatrix"
+ self.ERROR_STARTING_IMATRIX_GENERATION = (
+ "Błąd podczas uruchamiania generowania IMatrix: {0}"
+ )
+ self.IMATRIX_GENERATION_TASK_STARTED = "Zadanie generowania IMatrix uruchomione"
+ self.ERROR_MESSAGE = "Błąd: {0}"
+ self.TASK_ERROR = "Błąd zadania: {0}"
+ self.APPLICATION_CLOSING = "Zamykanie aplikacji"
+ self.APPLICATION_CLOSED = "Aplikacja zamknięta"
+ self.SELECT_QUANTIZATION_TYPE = "Wybierz typ kwantyzacji"
+ self.ALLOWS_REQUANTIZING = (
+ "Pozwala na ponowną kwantyzację tensorów, które zostały już skwantyzowane"
+ )
+ self.LEAVE_OUTPUT_WEIGHT = (
+ "Pozostawi output.weight nieskwantyzowany (lub nieskwantyzowany ponownie)"
+ )
+ self.DISABLE_K_QUANT_MIXTURES = (
+ "Wyłącz mieszanki k-kwant i kwantyzuj wszystkie tensory do tego samego typu"
+ )
+ self.USE_DATA_AS_IMPORTANCE_MATRIX = (
+ "Użyj danych w pliku jako macierzy ważności dla optymalizacji kwantyzacji"
+ )
+ self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = (
+ "Użyj macierzy ważności dla tych tensorów"
+ )
+ self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = (
+ "Nie używaj macierzy ważności dla tych tensorów"
+ )
+ self.OUTPUT_TENSOR_TYPE = "Typ tensora wyjściowego:"
+ self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = (
+ "Użyj tego typu dla tensora output.weight"
+ )
+ self.TOKEN_EMBEDDING_TYPE = "Typ osadzania tokenów:"
+ self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = (
+ "Użyj tego typu dla tensora osadzania tokenów"
+ )
+ self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = (
+ "Wygeneruje skwantyzowany model w tych samych fragmentach co dane wejściowe"
+ )
+ self.OVERRIDE_MODEL_METADATA = "Zastąp metadane modelu"
+ self.INPUT_DATA_FILE_FOR_IMATRIX = (
+ "Plik danych wejściowych do generowania IMatrix"
+ )
+ self.MODEL_TO_BE_QUANTIZED = "Model do kwantyzacji"
+ self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = (
+ "Ścieżka wyjściowa dla wygenerowanego IMatrix"
+ )
+ self.HOW_OFTEN_TO_SAVE_IMATRIX = "Jak często zapisywać IMatrix"
+ self.SET_GPU_OFFLOAD_VALUE = "Ustaw wartość odciążenia GPU (-ngl)"
+ self.COMPLETED = "Ukończono"
+ self.REFRESH_MODELS = "Obnovit modely"
+
+
+class _Romanian(_Localization):
+ def __init__(self):
+ super().__init__()
+ self.WINDOW_TITLE = "AutoGGUF (Cuantizator automat de modele GGUF)"
+ self.RAM_USAGE = "Utilizare RAM:"
+ self.CPU_USAGE = "Utilizare CPU:"
+ self.BACKEND = "Backend Llama.cpp:"
+ self.REFRESH_BACKENDS = "Reîmprospătați backends"
+ self.MODELS_PATH = "Cale modele:"
+ self.OUTPUT_PATH = "Cale ieșire:"
+ self.LOGS_PATH = "Cale jurnale:"
+ self.BROWSE = "Răsfoiți"
+ self.AVAILABLE_MODELS = "Modele disponibile:"
+ self.QUANTIZATION_TYPE = "Tipul de cuantizare:"
+ self.ALLOW_REQUANTIZE = "Permiteți recuantizarea"
+ self.LEAVE_OUTPUT_TENSOR = "Lăsați tensorul de ieșire"
+ self.PURE = "Pur"
+ self.IMATRIX = "IMatrix:"
+ self.INCLUDE_WEIGHTS = "Includeți ponderile:"
+ self.EXCLUDE_WEIGHTS = "Excludeți ponderile:"
+ self.USE_OUTPUT_TENSOR_TYPE = "Utilizați tipul tensorului de ieșire"
+ self.USE_TOKEN_EMBEDDING_TYPE = "Utilizați tipul de încorporare a tokenului"
+ self.KEEP_SPLIT = "Păstrați divizarea"
+ self.KV_OVERRIDES = "Suprascrieri KV:"
+ self.ADD_NEW_OVERRIDE = "Adăugați o nouă suprascriere"
+ self.QUANTIZE_MODEL = "Cuantizați modelul"
+ self.SAVE_PRESET = "Salvați presetarea"
+ self.LOAD_PRESET = "Încărcați presetarea"
+ self.TASKS = "Sarcini:"
+ self.DOWNLOAD_LLAMACPP = "Descărcați llama.cpp"
+ self.SELECT_RELEASE = "Selectați versiunea:"
+ self.SELECT_ASSET = "Selectați activul:"
+ self.EXTRACT_CUDA_FILES = "Extrageți fișierele CUDA"
+ self.SELECT_CUDA_BACKEND = "Selectați backend CUDA:"
+ self.DOWNLOAD = "Descărcați"
+ self.IMATRIX_GENERATION = "Generare IMatrix"
+ self.DATA_FILE = "Fișier de date:"
+ self.MODEL = "Model:"
+ self.OUTPUT = "Ieșire:"
+ self.OUTPUT_FREQUENCY = "Frecvența ieșirii:"
+ self.GPU_OFFLOAD = "Descărcare GPU:"
+ self.AUTO = "Automat"
+ self.GENERATE_IMATRIX = "Generați IMatrix"
+ self.ERROR = "Eroare"
+ self.WARNING = "Avertisment"
+ self.PROPERTIES = "Proprietăți"
+ self.CANCEL = "Anulați"
+ self.RESTART = "Reporniți"
+ self.DELETE = "Ștergeți"
+ self.CONFIRM_DELETION = "Sunteți sigur că doriți să ștergeți această sarcină?"
+ self.TASK_RUNNING_WARNING = "Unele sarcini sunt încă în curs de execuție. Sunteți sigur că doriți să ieșiți?"
+ self.YES = "Da"
+ self.NO = "Nu"
+ self.DOWNLOAD_COMPLETE = "Descărcare finalizată"
+ self.CUDA_EXTRACTION_FAILED = "Extragerea CUDA a eșuat"
+ self.PRESET_SAVED = "Presetare salvată"
+ self.PRESET_LOADED = "Presetare încărcată"
+ self.NO_ASSET_SELECTED = "Niciun activ selectat"
+ self.DOWNLOAD_FAILED = "Descărcarea a eșuat"
+ self.NO_BACKEND_SELECTED = "Niciun backend selectat"
+ self.NO_MODEL_SELECTED = "Niciun model selectat"
+ self.REFRESH_RELEASES = "Reîmprospătați versiunile"
+ self.NO_SUITABLE_CUDA_BACKENDS = "Nu s-au găsit backends CUDA potrivite"
+ self.LLAMACPP_DOWNLOADED_EXTRACTED = "Fișierul binar llama.cpp a fost descărcat și extras în {0}\nFișierele CUDA au fost extrase în {1}"
+ self.CUDA_FILES_EXTRACTED = "Fișierele CUDA au fost extrase în"
+ self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = (
+ "Nu s-a găsit un backend CUDA potrivit pentru extragere"
+ )
+ self.ERROR_FETCHING_RELEASES = "Eroare la preluarea versiunilor: {0}"
+ self.CONFIRM_DELETION_TITLE = "Confirmați ștergerea"
+ self.LOG_FOR = "Jurnal pentru {0}"
+ self.ALL_FILES = "Toate fișierele (*)"
+ self.GGUF_FILES = "Fișiere GGUF (*.gguf)"
+ self.DAT_FILES = "Fișiere DAT (*.dat)"
+ self.JSON_FILES = "Fișiere JSON (*.json)"
+ self.FAILED_LOAD_PRESET = "Nu s-a putut încărca presetarea: {0}"
+ self.INITIALIZING_AUTOGGUF = "Inițializarea aplicației AutoGGUF"
+ self.AUTOGGUF_INITIALIZATION_COMPLETE = "Inițializarea AutoGGUF finalizată"
+ self.REFRESHING_BACKENDS = "Reîmprospătarea backends"
+ self.NO_BACKENDS_AVAILABLE = "Nu există backends disponibile"
+ self.FOUND_VALID_BACKENDS = "S-au găsit {0} backends valide"
+ self.SAVING_PRESET = "Salvarea presetării"
+ self.PRESET_SAVED_TO = "Presetare salvată în {0}"
+ self.LOADING_PRESET = "Încărcarea presetării"
+ self.PRESET_LOADED_FROM = "Presetare încărcată din {0}"
+ self.ADDING_KV_OVERRIDE = "Adăugarea suprascrierii KV: {0}"
+ self.SAVING_TASK_PRESET = "Salvarea presetării sarcinii pentru {0}"
+ self.TASK_PRESET_SAVED = "Presetare sarcină salvată"
+ self.TASK_PRESET_SAVED_TO = "Presetare sarcină salvată în {0}"
+ self.RESTARTING_TASK = "Repornirea sarcinii: {0}"
+ self.IN_PROGRESS = "În curs"
+ self.DOWNLOAD_FINISHED_EXTRACTED_TO = "Descărcare finalizată. Extras în: {0}"
+ self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "Fișierul binar llama.cpp a fost descărcat și extras în {0}\nFișierele CUDA au fost extrase în {1}"
+ self.NO_SUITABLE_CUDA_BACKEND_FOUND = (
+ "Nu s-a găsit un backend CUDA potrivit pentru extragere"
+ )
+ self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = (
+ "Fișierul binar llama.cpp a fost descărcat și extras în {0}"
+ )
+ self.REFRESHING_LLAMACPP_RELEASES = "Reîmprospătarea versiunilor llama.cpp"
+ self.UPDATING_ASSET_LIST = "Actualizarea listei de active"
+ self.UPDATING_CUDA_OPTIONS = "Actualizarea opțiunilor CUDA"
+ self.STARTING_LLAMACPP_DOWNLOAD = "Începerea descărcării llama.cpp"
+ self.UPDATING_CUDA_BACKENDS = "Actualizarea backends CUDA"
+ self.NO_CUDA_BACKEND_SELECTED = "Niciun backend CUDA selectat pentru extragere"
+ self.EXTRACTING_CUDA_FILES = "Extragerea fișierelor CUDA din {0} în {1}"
+ self.DOWNLOAD_ERROR = "Eroare de descărcare: {0}"
+ self.SHOWING_TASK_CONTEXT_MENU = "Afișarea meniului contextual al sarcinii"
+ self.SHOWING_PROPERTIES_FOR_TASK = "Afișarea proprietăților pentru sarcina: {0}"
+ self.CANCELLING_TASK = "Anularea sarcinii: {0}"
+ self.CANCELED = "Anulat"
+ self.DELETING_TASK = "Ștergerea sarcinii: {0}"
+ self.LOADING_MODELS = "Încărcarea modelelor"
+ self.LOADED_MODELS = "{0} modele încărcate"
+ self.BROWSING_FOR_MODELS_DIRECTORY = "Răsfoirea directorului de modele"
+ self.SELECT_MODELS_DIRECTORY = "Selectați directorul de modele"
+ self.BROWSING_FOR_OUTPUT_DIRECTORY = "Răsfoirea directorului de ieșire"
+ self.SELECT_OUTPUT_DIRECTORY = "Selectați directorul de ieșire"
+ self.BROWSING_FOR_LOGS_DIRECTORY = "Răsfoirea directorului de jurnale"
+ self.SELECT_LOGS_DIRECTORY = "Selectați directorul de jurnale"
+ self.BROWSING_FOR_IMATRIX_FILE = "Răsfoirea fișierului IMatrix"
+ self.SELECT_IMATRIX_FILE = "Selectați fișierul IMatrix"
+ self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} MB / {2} MB)"
+ self.CPU_USAGE_FORMAT = "Utilizare CPU: {0:.1f}%"
+ self.VALIDATING_QUANTIZATION_INPUTS = "Validarea intrărilor de cuantizare"
+ self.MODELS_PATH_REQUIRED = "Calea modelelor este obligatorie"
+ self.OUTPUT_PATH_REQUIRED = "Calea ieșirii este obligatorie"
+ self.LOGS_PATH_REQUIRED = "Calea jurnalelor este obligatorie"
+ self.STARTING_MODEL_QUANTIZATION = "Pornirea cuantizării modelului"
+ self.INPUT_FILE_NOT_EXIST = "Fișierul de intrare '{0}' nu există."
+ self.QUANTIZING_MODEL_TO = "Cuantizarea {0} la {1}"
+ self.QUANTIZATION_TASK_STARTED = (
+ "Sarcina de cuantizare a fost pornită pentru {0}"
+ )
+ self.ERROR_STARTING_QUANTIZATION = "Eroare la pornirea cuantizării: {0}"
+ self.UPDATING_MODEL_INFO = "Actualizarea informațiilor despre model: {0}"
+ self.TASK_FINISHED = "Sarcină finalizată: {0}"
+ self.SHOWING_TASK_DETAILS_FOR = "Afișarea detaliilor sarcinii pentru: {0}"
+ self.BROWSING_FOR_IMATRIX_DATA_FILE = "Răsfoirea fișierului de date IMatrix"
+ self.SELECT_DATA_FILE = "Selectați fișierul de date"
+ self.BROWSING_FOR_IMATRIX_MODEL_FILE = "Răsfoirea fișierului de model IMatrix"
+ self.SELECT_MODEL_FILE = "Selectați fișierul model"
+ self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "Răsfoirea fișierului de ieșire IMatrix"
+ self.SELECT_OUTPUT_FILE = "Selectați fișierul de ieșire"
+ self.STARTING_IMATRIX_GENERATION = "Pornirea generării IMatrix"
+ self.BACKEND_PATH_NOT_EXIST = "Calea backendului nu există: {0}"
+ self.GENERATING_IMATRIX = "Generarea IMatrix"
+ self.ERROR_STARTING_IMATRIX_GENERATION = (
+ "Eroare la pornirea generării IMatrix: {0}"
+ )
+ self.IMATRIX_GENERATION_TASK_STARTED = (
+ "Sarcina de generare IMatrix a fost pornită"
+ )
+ self.ERROR_MESSAGE = "Eroare: {0}"
+ self.TASK_ERROR = "Eroare de sarcină: {0}"
+ self.APPLICATION_CLOSING = "Închiderea aplicației"
+ self.APPLICATION_CLOSED = "Aplicație închisă"
+ self.SELECT_QUANTIZATION_TYPE = "Selectați tipul de cuantizare"
+ self.ALLOWS_REQUANTIZING = (
+ "Permite recuantizarea tensorilor care au fost deja cuantizați"
+ )
+ self.LEAVE_OUTPUT_WEIGHT = "Va lăsa output.weight necuantizat (sau recuantizat)"
+ self.DISABLE_K_QUANT_MIXTURES = (
+ "Dezactivați mixurile k-quant și cuantizați toți tensorii la același tip"
+ )
+ self.USE_DATA_AS_IMPORTANCE_MATRIX = "Utilizați datele din fișier ca matrice de importanță pentru optimizările de cuantizare"
+ self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = (
+ "Utilizați matricea de importanță pentru acești tensori"
+ )
+ self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = (
+ "Nu utilizați matricea de importanță pentru acești tensori"
+ )
+ self.OUTPUT_TENSOR_TYPE = "Tipul tensorului de ieșire:"
+ self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = (
+ "Utilizați acest tip pentru tensorul output.weight"
+ )
+ self.TOKEN_EMBEDDING_TYPE = "Tipul de încorporare a tokenului:"
+ self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = (
+ "Utilizați acest tip pentru tensorul de încorporări ale tokenului"
+ )
+ self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = (
+ "Va genera modelul cuantizat în aceleași fragmente ca și intrarea"
+ )
+ self.OVERRIDE_MODEL_METADATA = "Suprascrieți metadatele modelului"
+ self.INPUT_DATA_FILE_FOR_IMATRIX = (
+ "Fișier de date de intrare pentru generarea IMatrix"
+ )
+ self.MODEL_TO_BE_QUANTIZED = "Modelul de cuantizat"
+ self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = (
+ "Calea de ieșire pentru IMatrix generat"
+ )
+ self.HOW_OFTEN_TO_SAVE_IMATRIX = "Cât de des să salvați IMatrix"
+ self.SET_GPU_OFFLOAD_VALUE = "Setați valoarea de descărcare GPU (-ngl)"
+ self.COMPLETED = "Finalizat"
+ self.REFRESH_MODELS = "Odśwież modele"
+
+
+class _Czech(_Localization):
+ def __init__(self):
+ super().__init__()
+ self.WINDOW_TITLE = "AutoGGUF (Automatický kvantizátor modelů GGUF)"
+ self.RAM_USAGE = "Využití RAM:"
+ self.CPU_USAGE = "Využití CPU:"
+ self.BACKEND = "Backend Llama.cpp:"
+ self.REFRESH_BACKENDS = "Obnovit backendy"
+ self.MODELS_PATH = "Cesta k modelům:"
+ self.OUTPUT_PATH = "Výstupní cesta:"
+ self.LOGS_PATH = "Cesta k logům:"
+ self.BROWSE = "Procházet"
+ self.AVAILABLE_MODELS = "Dostupné modely:"
+ self.QUANTIZATION_TYPE = "Typ kvantizace:"
+ self.ALLOW_REQUANTIZE = "Povolit rekvantizaci"
+ self.LEAVE_OUTPUT_TENSOR = "Ponechat výstupní tenzor"
+ self.PURE = "Čistý"
+ self.IMATRIX = "IMatrix:"
+ self.INCLUDE_WEIGHTS = "Zahrnout váhy:"
+ self.EXCLUDE_WEIGHTS = "Vyloučit váhy:"
+ self.USE_OUTPUT_TENSOR_TYPE = "Použít typ výstupního tenzoru"
+ self.USE_TOKEN_EMBEDDING_TYPE = "Použít typ vkládání tokenů"
+ self.KEEP_SPLIT = "Zachovat rozdělení"
+ self.KV_OVERRIDES = "Přepsání KV:"
+ self.ADD_NEW_OVERRIDE = "Přidat nové přepsání"
+ self.QUANTIZE_MODEL = "Kvantizovat model"
+ self.SAVE_PRESET = "Uložit předvolbu"
+ self.LOAD_PRESET = "Načíst předvolbu"
+ self.TASKS = "Úkoly:"
+ self.DOWNLOAD_LLAMACPP = "Stáhnout llama.cpp"
+ self.SELECT_RELEASE = "Vybrat verzi:"
+ self.SELECT_ASSET = "Vybrat aktivum:"
+ self.EXTRACT_CUDA_FILES = "Extrahovat soubory CUDA"
+ self.SELECT_CUDA_BACKEND = "Vybrat backend CUDA:"
+ self.DOWNLOAD = "Stáhnout"
+ self.IMATRIX_GENERATION = "Generování IMatrix"
+ self.DATA_FILE = "Datový soubor:"
+ self.MODEL = "Model:"
+ self.OUTPUT = "Výstup:"
+ self.OUTPUT_FREQUENCY = "Frekvence výstupu:"
+ self.GPU_OFFLOAD = "Odlehčení GPU:"
+ self.AUTO = "Automaticky"
+ self.GENERATE_IMATRIX = "Generovat IMatrix"
+ self.ERROR = "Chyba"
+ self.WARNING = "Varování"
+ self.PROPERTIES = "Vlastnosti"
+ self.CANCEL = "Zrušit"
+ self.RESTART = "Restartovat"
+ self.DELETE = "Smazat"
+ self.CONFIRM_DELETION = "Jste si jisti, že chcete smazat tento úkol?"
+ self.TASK_RUNNING_WARNING = (
+ "Některé úkoly stále běží. Jste si jisti, že chcete ukončit?"
+ )
+ self.YES = "Ano"
+ self.NO = "Ne"
+ self.DOWNLOAD_COMPLETE = "Stahování dokončeno"
+ self.CUDA_EXTRACTION_FAILED = "Extrahování CUDA se nezdařilo"
+ self.PRESET_SAVED = "Předvolba uložena"
+ self.PRESET_LOADED = "Předvolba načtena"
+ self.NO_ASSET_SELECTED = "Nebylo vybráno žádné aktivum"
+ self.DOWNLOAD_FAILED = "Stahování se nezdařilo"
+ self.NO_BACKEND_SELECTED = "Nebyl vybrán žádný backend"
+ self.NO_MODEL_SELECTED = "Nebyl vybrán žádný model"
+ self.REFRESH_RELEASES = "Obnovit verze"
+ self.NO_SUITABLE_CUDA_BACKENDS = "Nebyly nalezeny žádné vhodné backendy CUDA"
+ self.LLAMACPP_DOWNLOADED_EXTRACTED = "Binární soubor llama.cpp byl stažen a extrahován do {0}\nSoubory CUDA extrahovány do {1}"
+ self.CUDA_FILES_EXTRACTED = "Soubory CUDA extrahovány do"
+ self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = (
+ "Nebyl nalezen žádný vhodný backend CUDA pro extrakci"
+ )
+ self.ERROR_FETCHING_RELEASES = "Chyba při načítání verzí: {0}"
+ self.CONFIRM_DELETION_TITLE = "Potvrdit smazání"
+ self.LOG_FOR = "Log pro {0}"
+ self.ALL_FILES = "Všechny soubory (*)"
+ self.GGUF_FILES = "Soubory GGUF (*.gguf)"
+ self.DAT_FILES = "Soubory DAT (*.dat)"
+ self.JSON_FILES = "Soubory JSON (*.json)"
+ self.FAILED_LOAD_PRESET = "Nepodařilo se načíst předvolbu: {0}"
+ self.INITIALIZING_AUTOGGUF = "Inicializace aplikace AutoGGUF"
+ self.AUTOGGUF_INITIALIZATION_COMPLETE = "Inicializace AutoGGUF dokončena"
+ self.REFRESHING_BACKENDS = "Obnovování backendů"
+ self.NO_BACKENDS_AVAILABLE = "Žádné dostupné backendy"
+ self.FOUND_VALID_BACKENDS = "Nalezeno {0} platných backendů"
+ self.SAVING_PRESET = "Ukládání předvolby"
+ self.PRESET_SAVED_TO = "Předvolba uložena do {0}"
+ self.LOADING_PRESET = "Načítání předvolby"
+ self.PRESET_LOADED_FROM = "Předvolba načtena z {0}"
+ self.ADDING_KV_OVERRIDE = "Přidávání přepsání KV: {0}"
+ self.SAVING_TASK_PRESET = "Ukládání předvolby úkolu pro {0}"
+ self.TASK_PRESET_SAVED = "Předvolba úkolu uložena"
+ self.TASK_PRESET_SAVED_TO = "Předvolba úkolu uložena do {0}"
+ self.RESTARTING_TASK = "Restartování úkolu: {0}"
+ self.IN_PROGRESS = "Probíhá"
+ self.DOWNLOAD_FINISHED_EXTRACTED_TO = "Stahování dokončeno. Extrahováno do: {0}"
+ self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "Binární soubor llama.cpp byl stažen a extrahován do {0}\nSoubory CUDA extrahovány do {1}"
+ self.NO_SUITABLE_CUDA_BACKEND_FOUND = (
+ "Nebyl nalezen žádný vhodný backend CUDA pro extrakci"
+ )
+ self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = (
+ "Binární soubor llama.cpp byl stažen a extrahován do {0}"
+ )
+ self.REFRESHING_LLAMACPP_RELEASES = "Obnovování verzí llama.cpp"
+ self.UPDATING_ASSET_LIST = "Aktualizace seznamu aktiv"
+ self.UPDATING_CUDA_OPTIONS = "Aktualizace možností CUDA"
+ self.STARTING_LLAMACPP_DOWNLOAD = "Zahájení stahování llama.cpp"
+ self.UPDATING_CUDA_BACKENDS = "Aktualizace backendů CUDA"
+ self.NO_CUDA_BACKEND_SELECTED = "Nebyl vybrán žádný backend CUDA pro extrakci"
+ self.EXTRACTING_CUDA_FILES = "Extrahování souborů CUDA z {0} do {1}"
+ self.DOWNLOAD_ERROR = "Chyba stahování: {0}"
+ self.SHOWING_TASK_CONTEXT_MENU = "Zobrazení kontextové nabídky úkolu"
+ self.SHOWING_PROPERTIES_FOR_TASK = "Zobrazení vlastností úkolu: {0}"
+ self.CANCELLING_TASK = "Zrušení úkolu: {0}"
+ self.CANCELED = "Zrušeno"
+ self.DELETING_TASK = "Mazání úkolu: {0}"
+ self.LOADING_MODELS = "Načítání modelů"
+ self.LOADED_MODELS = "Načteno {0} modelů"
+ self.BROWSING_FOR_MODELS_DIRECTORY = "Procházení adresáře modelů"
+ self.SELECT_MODELS_DIRECTORY = "Vyberte adresář modelů"
+ self.BROWSING_FOR_OUTPUT_DIRECTORY = "Procházení výstupního adresáře"
+ self.SELECT_OUTPUT_DIRECTORY = "Vyberte výstupní adresář"
+ self.BROWSING_FOR_LOGS_DIRECTORY = "Procházení adresáře logů"
+ self.SELECT_LOGS_DIRECTORY = "Vyberte adresář logů"
+ self.BROWSING_FOR_IMATRIX_FILE = "Procházení souboru IMatrix"
+ self.SELECT_IMATRIX_FILE = "Vyberte soubor IMatrix"
+ self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} MB / {2} MB)"
+ self.CPU_USAGE_FORMAT = "Využití CPU: {0:.1f}%"
+ self.VALIDATING_QUANTIZATION_INPUTS = "Ověřování vstupů kvantizace"
+ self.MODELS_PATH_REQUIRED = "Cesta k modelům je vyžadována"
+ self.OUTPUT_PATH_REQUIRED = "Výstupní cesta je vyžadována"
+ self.LOGS_PATH_REQUIRED = "Cesta k logům je vyžadována"
+ self.STARTING_MODEL_QUANTIZATION = "Spuštění kvantizace modelu"
+ self.INPUT_FILE_NOT_EXIST = "Vstupní soubor '{0}' neexistuje."
+ self.QUANTIZING_MODEL_TO = "Kvantizace {0} na {1}"
+ self.QUANTIZATION_TASK_STARTED = "Úkol kvantizace spuštěn pro {0}"
+ self.ERROR_STARTING_QUANTIZATION = "Chyba při spuštění kvantizace: {0}"
+ self.UPDATING_MODEL_INFO = "Aktualizace informací o modelu: {0}"
+ self.TASK_FINISHED = "Úkol dokončen: {0}"
+ self.SHOWING_TASK_DETAILS_FOR = "Zobrazení detailů úkolu pro: {0}"
+ self.BROWSING_FOR_IMATRIX_DATA_FILE = "Procházení datového souboru IMatrix"
+ self.SELECT_DATA_FILE = "Vyberte datový soubor"
+ self.BROWSING_FOR_IMATRIX_MODEL_FILE = "Procházení souboru modelu IMatrix"
+ self.SELECT_MODEL_FILE = "Vyberte soubor modelu"
+ self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "Procházení výstupního souboru IMatrix"
+ self.SELECT_OUTPUT_FILE = "Vyberte výstupní soubor"
+ self.STARTING_IMATRIX_GENERATION = "Spuštění generování IMatrix"
+ self.BACKEND_PATH_NOT_EXIST = "Cesta backendu neexistuje: {0}"
+ self.GENERATING_IMATRIX = "Generování IMatrix"
+ self.ERROR_STARTING_IMATRIX_GENERATION = (
+ "Chyba při spuštění generování IMatrix: {0}"
+ )
+ self.IMATRIX_GENERATION_TASK_STARTED = "Úkol generování IMatrix spuštěn"
+ self.ERROR_MESSAGE = "Chyba: {0}"
+ self.TASK_ERROR = "Chyba úkolu: {0}"
+ self.APPLICATION_CLOSING = "Zavírání aplikace"
+ self.APPLICATION_CLOSED = "Aplikace zavřena"
+ self.SELECT_QUANTIZATION_TYPE = "Vyberte typ kvantizace"
+ self.ALLOWS_REQUANTIZING = (
+ "Umožňuje rekvantizovat tenzory, které již byly kvantizovány"
+ )
+ self.LEAVE_OUTPUT_WEIGHT = (
+ "Ponechá output.weight nekvantizovaný (nebo rekvantizovaný)"
+ )
+ self.DISABLE_K_QUANT_MIXTURES = (
+ "Zakázat k-kvantové směsi a kvantizovat všechny tenzory na stejný typ"
+ )
+ self.USE_DATA_AS_IMPORTANCE_MATRIX = (
+ "Použít data v souboru jako matici důležitosti pro optimalizace kvantizace"
+ )
+ self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = (
+ "Použít matici důležitosti pro tyto tenzory"
+ )
+ self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = (
+ "Nepoužívat matici důležitosti pro tyto tenzory"
+ )
+ self.OUTPUT_TENSOR_TYPE = "Typ výstupního tenzoru:"
+ self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = (
+ "Použít tento typ pro tenzor output.weight"
+ )
+ self.TOKEN_EMBEDDING_TYPE = "Typ vkládání tokenů:"
+ self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = (
+ "Použít tento typ pro tenzor vkládání tokenů"
+ )
+ self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = (
+ "Vygeneruje kvantizovaný model ve stejných fragmentech jako vstup"
+ )
+ self.OVERRIDE_MODEL_METADATA = "Přepsat metadata modelu"
+ self.INPUT_DATA_FILE_FOR_IMATRIX = (
+ "Vstupní datový soubor pro generování IMatrix"
+ )
+ self.MODEL_TO_BE_QUANTIZED = "Model, který má být kvantizován"
+ self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "Výstupní cesta pro generovaný IMatrix"
+ self.HOW_OFTEN_TO_SAVE_IMATRIX = "Jak často ukládat IMatrix"
+ self.SET_GPU_OFFLOAD_VALUE = "Nastavit hodnotu odlehčení GPU (-ngl)"
+ self.COMPLETED = "Dokončeno"
+ self.REFRESH_MODELS = "Reîmprospătează modelele"
+
+
+class _CanadianFrench(_Localization):
+ def __init__(self):
+ super().__init__()
+ self.WINDOW_TITLE = "AutoGGUF (Quantificateur automatique de modèles GGUF)"
+ self.RAM_USAGE = "Utilisation de la RAM :" # Spacing
+ self.CPU_USAGE = "Utilisation du CPU :" # Spacing
+ self.BACKEND = "Moteur d'arrière-plan Llama.cpp :" # Spacing and terminology
+ self.REFRESH_BACKENDS = "Actualiser les moteurs d'arrière-plan"
+ self.MODELS_PATH = "Chemin des modèles :" # Spacing
+ self.OUTPUT_PATH = "Chemin de sortie :" # Spacing
+ self.LOGS_PATH = "Chemin des journaux :" # Spacing
+ self.BROWSE = "Parcourir"
+ self.AVAILABLE_MODELS = "Modèles disponibles :" # Spacing
+ self.QUANTIZATION_TYPE = "Type de quantification :" # Spacing
+ self.ALLOW_REQUANTIZE = "Autoriser la requantification"
+ self.LEAVE_OUTPUT_TENSOR = "Laisser le tenseur de sortie"
+ self.PURE = "Pur"
+ self.IMATRIX = "IMatrix :" # Spacing
+ self.INCLUDE_WEIGHTS = "Inclure les poids :" # Spacing
+ self.EXCLUDE_WEIGHTS = "Exclure les poids :" # Spacing
+ self.USE_OUTPUT_TENSOR_TYPE = "Utiliser le type de tenseur de sortie"
+ self.USE_TOKEN_EMBEDDING_TYPE = "Utiliser le type d'intégration de jeton"
+ self.KEEP_SPLIT = "Conserver la division"
+ self.KV_OVERRIDES = "Remplacements KV :" # Spacing
+ self.ADD_NEW_OVERRIDE = "Ajouter un nouveau remplacement"
+ self.QUANTIZE_MODEL = "Quantifier le modèle"
+ self.SAVE_PRESET = "Enregistrer le préréglage"
+ self.LOAD_PRESET = "Charger le préréglage"
+ self.TASKS = "Tâches :" # Spacing
+ self.DOWNLOAD_LLAMACPP = "Télécharger llama.cpp"
+ self.SELECT_RELEASE = "Sélectionner la version :" # Spacing
+ self.SELECT_ASSET = "Sélectionner l'actif :" # Spacing
+ self.EXTRACT_CUDA_FILES = "Extraire les fichiers CUDA"
+ self.SELECT_CUDA_BACKEND = "Sélectionner le backend CUDA :" # Spacing
+ self.DOWNLOAD = "Télécharger"
+ self.IMATRIX_GENERATION = "Génération d'IMatrix"
+ self.DATA_FILE = "Fichier de données :" # Spacing
+ self.MODEL = "Modèle :" # Spacing
+ self.OUTPUT = "Sortie :" # Spacing
+ self.OUTPUT_FREQUENCY = "Fréquence de sortie :" # Spacing
+ self.GPU_OFFLOAD = "Déchargement GPU :" # Spacing
+ self.AUTO = "Auto"
+ self.GENERATE_IMATRIX = "Générer IMatrix"
+ self.ERROR = "Erreur"
+ self.WARNING = "Avertissement"
+ self.PROPERTIES = "Propriétés"
+ self.CANCEL = "Annuler"
+ self.RESTART = "Redémarrer"
+ self.DELETE = "Supprimer"
+ self.CONFIRM_DELETION = (
+ "Êtes-vous sûr de vouloir supprimer cette tâche ?" # Spacing
+ )
+ self.TASK_RUNNING_WARNING = "Certaines tâches sont encore en cours d'exécution. Êtes-vous sûr de vouloir quitter ?" # Spacing
+ self.YES = "Oui"
+ self.NO = "Non"
+ self.DOWNLOAD_COMPLETE = "Téléchargement terminé"
+ self.CUDA_EXTRACTION_FAILED = "Échec de l'extraction CUDA"
+ self.PRESET_SAVED = "Préréglage enregistré"
+ self.PRESET_LOADED = "Préréglage chargé"
+ self.NO_ASSET_SELECTED = "Aucun actif sélectionné"
+ self.DOWNLOAD_FAILED = "Échec du téléchargement"
+ self.NO_BACKEND_SELECTED = "Aucun backend sélectionné"
+ self.NO_MODEL_SELECTED = "Aucun modèle sélectionné"
+ self.REFRESH_RELEASES = "Actualiser les versions"
+ self.NO_SUITABLE_CUDA_BACKENDS = "Aucun backend CUDA approprié trouvé"
+ self.LLAMACPP_DOWNLOADED_EXTRACTED = "Le fichier binaire llama.cpp a été téléchargé et extrait dans {0}\nLes fichiers CUDA ont été extraits dans {1}"
+ self.CUDA_FILES_EXTRACTED = "Les fichiers CUDA ont été extraits dans"
+ self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = (
+ "Aucun backend CUDA approprié trouvé pour l'extraction"
+ )
+ self.ERROR_FETCHING_RELEASES = (
+ "Erreur lors de la récupération des versions : {0}" # Spacing
+ )
+ self.CONFIRM_DELETION_TITLE = "Confirmer la suppression"
+ self.LOG_FOR = "Journal pour {0}"
+ self.ALL_FILES = "Tous les fichiers (*)"
+ self.GGUF_FILES = "Fichiers GGUF (*.gguf)"
+ self.DAT_FILES = "Fichiers DAT (*.dat)"
+ self.JSON_FILES = "Fichiers JSON (*.json)"
+ self.FAILED_LOAD_PRESET = "Échec du chargement du préréglage : {0}" # Spacing
+ self.INITIALIZING_AUTOGGUF = "Initialisation de l'application AutoGGUF"
+ self.AUTOGGUF_INITIALIZATION_COMPLETE = "Initialisation d'AutoGGUF terminée"
+ self.REFRESHING_BACKENDS = "Actualisation des moteurs d'arrière-plan"
+ self.NO_BACKENDS_AVAILABLE = "Aucun moteur d'arrière-plan disponible"
+ self.FOUND_VALID_BACKENDS = "{0} moteurs d'arrière-plan valides trouvés"
+ self.SAVING_PRESET = "Enregistrement du préréglage"
+ self.PRESET_SAVED_TO = "Préréglage enregistré dans {0}"
+ self.LOADING_PRESET = "Chargement du préréglage"
+ self.PRESET_LOADED_FROM = "Préréglage chargé depuis {0}"
+ self.ADDING_KV_OVERRIDE = "Ajout de remplacement KV : {0}" # Spacing
+ self.SAVING_TASK_PRESET = "Enregistrement du préréglage de tâche pour {0}"
+ self.TASK_PRESET_SAVED = "Préréglage de tâche enregistré"
+ self.TASK_PRESET_SAVED_TO = "Préréglage de tâche enregistré dans {0}"
+ self.RESTARTING_TASK = "Redémarrage de la tâche : {0}" # Spacing
+ self.IN_PROGRESS = "En cours"
+ self.DOWNLOAD_FINISHED_EXTRACTED_TO = (
+ "Téléchargement terminé. Extrait dans : {0}" # Spacing
+ )
+ self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "Le fichier binaire llama.cpp a été téléchargé et extrait dans {0}\nLes fichiers CUDA ont été extraits dans {1}"
+ self.NO_SUITABLE_CUDA_BACKEND_FOUND = (
+ "Aucun backend CUDA approprié trouvé pour l'extraction"
+ )
+ self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = (
+ "Le fichier binaire llama.cpp a été téléchargé et extrait dans {0}"
+ )
+ self.REFRESHING_LLAMACPP_RELEASES = "Actualisation des versions de llama.cpp"
+ self.UPDATING_ASSET_LIST = "Mise à jour de la liste des actifs"
+ self.UPDATING_CUDA_OPTIONS = "Mise à jour des options CUDA"
+ self.STARTING_LLAMACPP_DOWNLOAD = "Démarrage du téléchargement de llama.cpp"
+ self.UPDATING_CUDA_BACKENDS = "Mise à jour des backends CUDA"
+ self.NO_CUDA_BACKEND_SELECTED = (
+ "Aucun backend CUDA sélectionné pour l'extraction"
+ )
+ self.EXTRACTING_CUDA_FILES = "Extraction des fichiers CUDA de {0} à {1}"
+ self.DOWNLOAD_ERROR = "Erreur de téléchargement : {0}" # Spacing
+ self.SHOWING_TASK_CONTEXT_MENU = "Affichage du menu contextuel de la tâche"
+ self.SHOWING_PROPERTIES_FOR_TASK = (
+ "Affichage des propriétés de la tâche : {0}" # Spacing
+ )
+ self.CANCELLING_TASK = "Annulation de la tâche : {0}" # Spacing
+ self.CANCELED = "Annulée"
+ self.DELETING_TASK = "Suppression de la tâche : {0}" # Spacing
+ self.LOADING_MODELS = "Chargement des modèles"
+ self.LOADED_MODELS = "{0} modèles chargés"
+ self.BROWSING_FOR_MODELS_DIRECTORY = "Navigation dans le répertoire des modèles"
+ self.SELECT_MODELS_DIRECTORY = "Sélectionner le répertoire des modèles"
+ self.BROWSING_FOR_OUTPUT_DIRECTORY = "Navigation dans le répertoire de sortie"
+ self.SELECT_OUTPUT_DIRECTORY = "Sélectionner le répertoire de sortie"
+ self.BROWSING_FOR_LOGS_DIRECTORY = "Navigation dans le répertoire des journaux"
+ self.SELECT_LOGS_DIRECTORY = "Sélectionner le répertoire des journaux"
+ self.BROWSING_FOR_IMATRIX_FILE = "Navigation dans le fichier IMatrix"
+ self.SELECT_IMATRIX_FILE = "Sélectionner le fichier IMatrix"
+ self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} Mo / {2} Mo)"
+ self.CPU_USAGE_FORMAT = "Utilisation du CPU : {0:.1f}%" # Spacing
+ self.VALIDATING_QUANTIZATION_INPUTS = "Validation des entrées de quantification"
+ self.MODELS_PATH_REQUIRED = "Le chemin des modèles est requis"
+ self.OUTPUT_PATH_REQUIRED = "Le chemin de sortie est requis"
+ self.LOGS_PATH_REQUIRED = "Le chemin des journaux est requis"
+ self.STARTING_MODEL_QUANTIZATION = "Démarrage de la quantification du modèle"
+ self.INPUT_FILE_NOT_EXIST = "Le fichier d'entrée '{0}' n'existe pas."
+ self.QUANTIZING_MODEL_TO = "Quantification de {0} en {1}"
+ self.QUANTIZATION_TASK_STARTED = "Tâche de quantification démarrée pour {0}"
+ self.ERROR_STARTING_QUANTIZATION = (
+ "Erreur lors du démarrage de la quantification : {0}" # Spacing
+ )
+ self.UPDATING_MODEL_INFO = (
+ "Mise à jour des informations sur le modèle : {0}" # Spacing
+ )
+ self.TASK_FINISHED = "Tâche terminée : {0}" # Spacing
+ self.SHOWING_TASK_DETAILS_FOR = (
+ "Affichage des détails de la tâche pour : {0}" # Spacing
+ )
+ self.BROWSING_FOR_IMATRIX_DATA_FILE = (
+ "Navigation dans le fichier de données IMatrix"
+ )
+ self.SELECT_DATA_FILE = "Sélectionner le fichier de données"
+ self.BROWSING_FOR_IMATRIX_MODEL_FILE = (
+ "Navigation dans le fichier de modèle IMatrix"
+ )
+ self.SELECT_MODEL_FILE = "Sélectionner le fichier de modèle"
+ self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = (
+ "Navigation dans le fichier de sortie IMatrix"
+ )
+ self.SELECT_OUTPUT_FILE = "Sélectionner le fichier de sortie"
+ self.STARTING_IMATRIX_GENERATION = "Démarrage de la génération d'IMatrix"
+ self.BACKEND_PATH_NOT_EXIST = (
+ "Le chemin du backend n'existe pas : {0}" # Spacing
+ )
+ self.GENERATING_IMATRIX = "Génération d'IMatrix"
+ self.ERROR_STARTING_IMATRIX_GENERATION = (
+ "Erreur lors du démarrage de la génération d'IMatrix : {0}" # Spacing
+ )
+ self.IMATRIX_GENERATION_TASK_STARTED = "Tâche de génération d'IMatrix démarrée"
+ self.ERROR_MESSAGE = "Erreur : {0}" # Spacing
+ self.TASK_ERROR = "Erreur de tâche : {0}" # Spacing
+ self.APPLICATION_CLOSING = "Fermeture de l'application"
+ self.APPLICATION_CLOSED = "Application fermée"
+ self.SELECT_QUANTIZATION_TYPE = "Sélectionnez le type de quantification"
+ self.ALLOWS_REQUANTIZING = (
+ "Permet de requantifier les tenseurs qui ont déjà été quantifiés"
+ )
+ self.LEAVE_OUTPUT_WEIGHT = "Laissera output.weight non (re)quantifié"
+ self.DISABLE_K_QUANT_MIXTURES = "Désactiver les mélanges k-quant et quantifier tous les tenseurs du même type"
+ self.USE_DATA_AS_IMPORTANCE_MATRIX = "Utiliser les données du fichier comme matrice d'importance pour les optimisations de quant"
+ self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = (
+ "Utiliser la matrice d'importance pour ces tenseurs"
+ )
+ self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = (
+ "Ne pas utiliser la matrice d'importance pour ces tenseurs"
+ )
+ self.OUTPUT_TENSOR_TYPE = "Type de tenseur de sortie :" # Spacing
+ self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = (
+ "Utiliser ce type pour le tenseur output.weight"
+ )
+ self.TOKEN_EMBEDDING_TYPE = "Type d'intégration de jeton :" # Spacing
+ self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = (
+ "Utiliser ce type pour le tenseur d'intégration de jetons"
+ )
+ self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = (
+ "Générera le modèle quantifié dans les mêmes fragments que l'entrée"
+ )
+ self.OVERRIDE_MODEL_METADATA = "Remplacer les métadonnées du modèle"
+ self.INPUT_DATA_FILE_FOR_IMATRIX = (
+ "Fichier de données d'entrée pour la génération d'IMatrix"
+ )
+ self.MODEL_TO_BE_QUANTIZED = "Modèle à quantifier"
+ self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = (
+ "Chemin de sortie pour l'IMatrix généré"
+ )
+ self.HOW_OFTEN_TO_SAVE_IMATRIX = "Fréquence d'enregistrement de l'IMatrix"
+ self.SET_GPU_OFFLOAD_VALUE = "Définir la valeur de déchargement GPU (-ngl)"
+ self.COMPLETED = "Terminé"
+ self.REFRESH_MODELS = "Rafraîchir les modèles"
+
+
+class _Portuguese_PT(_Localization):
+ def __init__(self):
+ super().__init__()
+ self.WINDOW_TITLE = "AutoGGUF (Quantificador Automático de Modelos GGUF)"
+ self.RAM_USAGE = "Utilização de RAM:"
+ self.CPU_USAGE = "Utilização da CPU:"
+ self.BACKEND = "Backend Llama.cpp:"
+ self.REFRESH_BACKENDS = "Atualizar Backends"
+ self.MODELS_PATH = "Caminho dos Modelos:"
+ self.OUTPUT_PATH = "Caminho de Saída:"
+ self.LOGS_PATH = "Caminho dos Logs:"
+ self.BROWSE = "Navegar"
+ self.AVAILABLE_MODELS = "Modelos Disponíveis:"
+ self.QUANTIZATION_TYPE = "Tipo de Quantização:"
+ self.ALLOW_REQUANTIZE = "Permitir Requantização"
+ self.LEAVE_OUTPUT_TENSOR = "Manter Tensor de Saída"
+ self.PURE = "Puro"
+ self.IMATRIX = "IMatrix:"
+ self.INCLUDE_WEIGHTS = "Incluir Pesos:"
+ self.EXCLUDE_WEIGHTS = "Excluir Pesos:"
+ self.USE_OUTPUT_TENSOR_TYPE = "Usar Tipo de Tensor de Saída"
+ self.USE_TOKEN_EMBEDDING_TYPE = "Usar Tipo de Incorporação de Token"
+ self.KEEP_SPLIT = "Manter Divisão"
+ self.KV_OVERRIDES = "Substituições KV:"
+ self.ADD_NEW_OVERRIDE = "Adicionar Nova Substituição"
+ self.QUANTIZE_MODEL = "Quantizar Modelo"
+ self.SAVE_PRESET = "Guardar Predefinição"
+ self.LOAD_PRESET = "Carregar Predefinição"
+ self.TASKS = "Tarefas:"
+ self.DOWNLOAD_LLAMACPP = "Descarregar llama.cpp"
+ self.SELECT_RELEASE = "Selecionar Versão:"
+ self.SELECT_ASSET = "Selecionar Ativo:"
+ self.EXTRACT_CUDA_FILES = "Extrair Ficheiros CUDA"
+ self.SELECT_CUDA_BACKEND = "Selecionar Backend CUDA:"
+ self.DOWNLOAD = "Descarregar"
+ self.IMATRIX_GENERATION = "Geração de IMatrix"
+ self.DATA_FILE = "Ficheiro de Dados:"
+ self.MODEL = "Modelo:"
+ self.OUTPUT = "Saída:"
+ self.OUTPUT_FREQUENCY = "Frequência de Saída:"
+ self.GPU_OFFLOAD = "Offload da GPU:"
+ self.AUTO = "Automático"
+ self.GENERATE_IMATRIX = "Gerar IMatrix"
+ self.ERROR = "Erro"
+ self.WARNING = "Aviso"
+ self.PROPERTIES = "Propriedades"
+ self.CANCEL = "Cancelar"
+ self.RESTART = "Reiniciar"
+ self.DELETE = "Eliminar"
+ self.CONFIRM_DELETION = "Tem a certeza de que pretende eliminar esta tarefa?"
+ self.TASK_RUNNING_WARNING = "Algumas tarefas ainda estão em execução. Tem a certeza de que pretende sair?"
+ self.YES = "Sim"
+ self.NO = "Não"
+ self.DOWNLOAD_COMPLETE = "Transferência Concluída"
+ self.CUDA_EXTRACTION_FAILED = "Falha na Extração do CUDA"
+ self.PRESET_SAVED = "Predefinição Guardada"
+ self.PRESET_LOADED = "Predefinição Carregada"
+ self.NO_ASSET_SELECTED = "Nenhum ativo selecionado"
+ self.DOWNLOAD_FAILED = "Falha na transferência"
+ self.NO_BACKEND_SELECTED = "Nenhum backend selecionado"
+ self.NO_MODEL_SELECTED = "Nenhum modelo selecionado"
+ self.REFRESH_RELEASES = "Atualizar Versões"
+ self.NO_SUITABLE_CUDA_BACKENDS = "Nenhum backend CUDA adequado encontrado"
+ self.LLAMACPP_DOWNLOADED_EXTRACTED = "Binário llama.cpp transferido e extraído para {0}\nFicheiros CUDA extraídos para {1}"
+ self.CUDA_FILES_EXTRACTED = "Ficheiros CUDA extraídos para"
+ self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = (
+ "Nenhum backend CUDA adequado encontrado para extração"
+ )
+ self.ERROR_FETCHING_RELEASES = "Erro ao obter versões: {0}"
+ self.CONFIRM_DELETION_TITLE = "Confirmar Eliminação"
+ self.LOG_FOR = "Log para {0}"
+ self.ALL_FILES = "Todos os Ficheiros (*)"
+ self.GGUF_FILES = "Ficheiros GGUF (*.gguf)"
+ self.DAT_FILES = "Ficheiros DAT (*.dat)"
+ self.JSON_FILES = "Ficheiros JSON (*.json)"
+ self.FAILED_LOAD_PRESET = "Falha ao carregar a predefinição: {0}"
+ self.INITIALIZING_AUTOGGUF = "A inicializar a aplicação AutoGGUF"
+ self.AUTOGGUF_INITIALIZATION_COMPLETE = "Inicialização do AutoGGUF concluída"
+ self.REFRESHING_BACKENDS = "A atualizar backends"
+ self.NO_BACKENDS_AVAILABLE = "Nenhum backend disponível"
+ self.FOUND_VALID_BACKENDS = "{0} backends válidos encontrados"
+ self.SAVING_PRESET = "A guardar predefinição"
+ self.PRESET_SAVED_TO = "Predefinição guardada em {0}"
+ self.LOADING_PRESET = "A carregar predefinição"
+ self.PRESET_LOADED_FROM = "Predefinição carregada de {0}"
+ self.ADDING_KV_OVERRIDE = "A adicionar substituição KV: {0}"
+ self.SAVING_TASK_PRESET = "A guardar predefinição de tarefa para {0}"
+ self.TASK_PRESET_SAVED = "Predefinição de Tarefa Guardada"
+ self.TASK_PRESET_SAVED_TO = "Predefinição de tarefa guardada em {0}"
+ self.RESTARTING_TASK = "A reiniciar tarefa: {0}"
+ self.IN_PROGRESS = "Em Andamento"
+ self.DOWNLOAD_FINISHED_EXTRACTED_TO = (
+ "Transferência concluída. Extraído para: {0}"
+ )
+ self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "Binário llama.cpp transferido e extraído para {0}\nFicheiros CUDA extraídos para {1}"
+ self.NO_SUITABLE_CUDA_BACKEND_FOUND = (
+ "Nenhum backend CUDA adequado encontrado para extração"
+ )
+ self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = (
+ "Binário llama.cpp transferido e extraído para {0}"
+ )
+ self.REFRESHING_LLAMACPP_RELEASES = "A atualizar versões do llama.cpp"
+ self.UPDATING_ASSET_LIST = "A atualizar lista de ativos"
+ self.UPDATING_CUDA_OPTIONS = "A atualizar opções CUDA"
+ self.STARTING_LLAMACPP_DOWNLOAD = "A iniciar transferência do llama.cpp"
+ self.UPDATING_CUDA_BACKENDS = "A atualizar backends CUDA"
+ self.NO_CUDA_BACKEND_SELECTED = "Nenhum backend CUDA selecionado para extração"
+ self.EXTRACTING_CUDA_FILES = "A extrair ficheiros CUDA de {0} para {1}"
+ self.DOWNLOAD_ERROR = "Erro de transferência: {0}"
+ self.SHOWING_TASK_CONTEXT_MENU = "A exibir menu de contexto da tarefa"
+ self.SHOWING_PROPERTIES_FOR_TASK = "A exibir propriedades para a tarefa: {0}"
+ self.CANCELLING_TASK = "A cancelar tarefa: {0}"
+ self.CANCELED = "Cancelado"
+ self.DELETING_TASK = "A eliminar tarefa: {0}"
+ self.LOADING_MODELS = "A carregar modelos"
+ self.LOADED_MODELS = "{0} modelos carregados"
+ self.BROWSING_FOR_MODELS_DIRECTORY = "A navegar pelo diretório de modelos"
+ self.SELECT_MODELS_DIRECTORY = "Selecionar Diretório de Modelos"
+ self.BROWSING_FOR_OUTPUT_DIRECTORY = "A navegar pelo diretório de saída"
+ self.SELECT_OUTPUT_DIRECTORY = "Selecionar Diretório de Saída"
+ self.BROWSING_FOR_LOGS_DIRECTORY = "A navegar pelo diretório de logs"
+ self.SELECT_LOGS_DIRECTORY = "Selecionar Diretório de Logs"
+ self.BROWSING_FOR_IMATRIX_FILE = "A navegar pelo ficheiro IMatrix"
+ self.SELECT_IMATRIX_FILE = "Selecionar Ficheiro IMatrix"
+ self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} MB / {2} MB)"
+ self.CPU_USAGE_FORMAT = "Utilização da CPU: {0:.1f}%"
+ self.VALIDATING_QUANTIZATION_INPUTS = "A validar entradas de quantização"
+ self.MODELS_PATH_REQUIRED = "O caminho dos modelos é obrigatório"
+ self.OUTPUT_PATH_REQUIRED = "O caminho de saída é obrigatório"
+ self.LOGS_PATH_REQUIRED = "O caminho dos logs é obrigatório"
+ self.STARTING_MODEL_QUANTIZATION = "A iniciar a quantização do modelo"
+ self.INPUT_FILE_NOT_EXIST = "O ficheiro de entrada '{0}' não existe."
+ self.QUANTIZING_MODEL_TO = "A quantizar {0} para {1}"
+ self.QUANTIZATION_TASK_STARTED = "Tarefa de quantização iniciada para {0}"
+ self.ERROR_STARTING_QUANTIZATION = "Erro ao iniciar a quantização: {0}"
+ self.UPDATING_MODEL_INFO = "A atualizar informações do modelo: {0}"
+ self.TASK_FINISHED = "Tarefa concluída: {0}"
+ self.SHOWING_TASK_DETAILS_FOR = "A mostrar detalhes da tarefa para: {0}"
+ self.BROWSING_FOR_IMATRIX_DATA_FILE = "A navegar pelo ficheiro de dados IMatrix"
+ self.SELECT_DATA_FILE = "Selecionar Ficheiro de Dados"
+ self.BROWSING_FOR_IMATRIX_MODEL_FILE = (
+ "A navegar pelo ficheiro de modelo IMatrix"
+ )
+ self.SELECT_MODEL_FILE = "Selecionar Ficheiro de Modelo"
+ self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = (
+ "A navegar pelo ficheiro de saída IMatrix"
+ )
+ self.SELECT_OUTPUT_FILE = "Selecionar Ficheiro de Saída"
+ self.STARTING_IMATRIX_GENERATION = "A iniciar a geração de IMatrix"
+ self.BACKEND_PATH_NOT_EXIST = "O caminho do backend não existe: {0}"
+ self.GENERATING_IMATRIX = "A gerar IMatrix"
+ self.ERROR_STARTING_IMATRIX_GENERATION = (
+ "Erro ao iniciar a geração de IMatrix: {0}"
+ )
+ self.IMATRIX_GENERATION_TASK_STARTED = "Tarefa de geração de IMatrix iniciada"
+ self.ERROR_MESSAGE = "Erro: {0}"
+ self.TASK_ERROR = "Erro de tarefa: {0}"
+ self.APPLICATION_CLOSING = "A fechar a aplicação"
+ self.APPLICATION_CLOSED = "Aplicação fechada"
+ self.SELECT_QUANTIZATION_TYPE = "Selecione o tipo de quantização"
+ self.ALLOWS_REQUANTIZING = (
+ "Permite requantizar tensores que já foram quantizados"
+ )
+ self.LEAVE_OUTPUT_WEIGHT = "Deixará output.weight não (re)quantizado"
+ self.DISABLE_K_QUANT_MIXTURES = (
+ "Desativar misturas k-quant e quantizar todos os tensores para o mesmo tipo"
+ )
+ self.USE_DATA_AS_IMPORTANCE_MATRIX = "Usar os dados no ficheiro como matriz de importância para otimizações de quantização"
+ self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = (
+ "Usar matriz de importância para estes tensores"
+ )
+ self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = (
+ "Não usar matriz de importância para estes tensores"
+ )
+ self.OUTPUT_TENSOR_TYPE = "Tipo de Tensor de Saída:"
+ self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = (
+ "Usar este tipo para o tensor output.weight"
+ )
+ self.TOKEN_EMBEDDING_TYPE = "Tipo de Incorporação de Token:"
+ self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = (
+ "Usar este tipo para o tensor de incorporações de token"
+ )
+ self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = (
+ "Irá gerar o modelo quantizado nos mesmos shards da entrada"
+ )
+ self.OVERRIDE_MODEL_METADATA = "Substituir metadados do modelo"
+ self.INPUT_DATA_FILE_FOR_IMATRIX = (
+ "Ficheiro de dados de entrada para geração de IMatrix"
+ )
+ self.MODEL_TO_BE_QUANTIZED = "Modelo a ser quantizado"
+ self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = (
+ "Caminho de saída para o IMatrix gerado"
+ )
+ self.HOW_OFTEN_TO_SAVE_IMATRIX = "Com que frequência guardar o IMatrix"
+ self.SET_GPU_OFFLOAD_VALUE = "Definir valor de offload da GPU (-ngl)"
+ self.COMPLETED = "Concluído"
+ self.REFRESH_MODELS = "Atualizar modelos"
+
+
+class _Greek(_Localization):
+ def __init__(self):
+ super().__init__()
+ self.WINDOW_TITLE = "AutoGGUF (Αυτόματος Κβαντιστής Μοντέλων GGUF)"
+ self.RAM_USAGE = "Χρήση RAM:"
+ self.CPU_USAGE = "Χρήση CPU:"
+ self.BACKEND = "Backend Llama.cpp:"
+ self.REFRESH_BACKENDS = "Ανανέωση Backends"
+ self.MODELS_PATH = "Διαδρομή Μοντέλων:"
+ self.OUTPUT_PATH = "Διαδρομή Εξόδου:"
+ self.LOGS_PATH = "Διαδρομή Αρχείων Καταγραφής:"
+ self.BROWSE = "Περιήγηση"
+ self.AVAILABLE_MODELS = "Διαθέσιμα Μοντέλα:"
+ self.QUANTIZATION_TYPE = "Τύπος Κβαντισμού:"
+ self.ALLOW_REQUANTIZE = "Να Επιτρέπεται η Επανακβάντιση"
+ self.LEAVE_OUTPUT_TENSOR = "Διατήρηση Tensor Εξόδου"
+ self.PURE = "Καθαρό"
+ self.IMATRIX = "IMatrix:"
+ self.INCLUDE_WEIGHTS = "Συμπερίληψη Βαρών:"
+ self.EXCLUDE_WEIGHTS = "Εξαίρεση Βαρών:"
+ self.USE_OUTPUT_TENSOR_TYPE = "Χρήση Τύπου Tensor Εξόδου"
+ self.USE_TOKEN_EMBEDDING_TYPE = "Χρήση Τύπου Ενσωμάτωσης Token"
+ self.KEEP_SPLIT = "Διατήρηση Διαίρεσης"
+ self.KV_OVERRIDES = "Υπερβάσεις KV:"
+ self.ADD_NEW_OVERRIDE = "Προσθήκη Νέας Υπέρβασης"
+ self.QUANTIZE_MODEL = "Κβάντιση Μοντέλου"
+ self.SAVE_PRESET = "Αποθήκευση Προεπιλογής"
+ self.LOAD_PRESET = "Φόρτωση Προεπιλογής"
+ self.TASKS = "Εργασίες:"
+ self.DOWNLOAD_LLAMACPP = "Λήψη llama.cpp"
+ self.SELECT_RELEASE = "Επιλογή Έκδοσης:"
+ self.SELECT_ASSET = "Επιλογή Στοιχείου:"
+ self.EXTRACT_CUDA_FILES = "Εξαγωγή Αρχείων CUDA"
+ self.SELECT_CUDA_BACKEND = "Επιλογή Backend CUDA:"
+ self.DOWNLOAD = "Λήψη"
+ self.IMATRIX_GENERATION = "Δημιουργία IMatrix"
+ self.DATA_FILE = "Αρχείο Δεδομένων:"
+ self.MODEL = "Μοντέλο:"
+ self.OUTPUT = "Έξοδος:"
+ self.OUTPUT_FREQUENCY = "Συχνότητα Εξόδου:"
+ self.GPU_OFFLOAD = "Εκφόρτωση GPU:"
+ self.AUTO = "Αυτόματο"
+ self.GENERATE_IMATRIX = "Δημιουργία IMatrix"
+ self.ERROR = "Σφάλμα"
+ self.WARNING = "Προειδοποίηση"
+ self.PROPERTIES = "Ιδιότητες"
+ self.CANCEL = "Ακύρωση"
+ self.RESTART = "Επανεκκίνηση"
+ self.DELETE = "Διαγραφή"
+ self.CONFIRM_DELETION = (
+ "Είστε βέβαιοι ότι θέλετε να διαγράψετε αυτήν την εργασία;"
+ )
+ self.TASK_RUNNING_WARNING = "Ορισμένες εργασίες εκτελούνται ακόμη. Είστε βέβαιοι ότι θέλετε να τερματίσετε;"
+ self.YES = "Ναι"
+ self.NO = "Όχι"
+ self.DOWNLOAD_COMPLETE = "Η Λήψη Ολοκληρώθηκε"
+ self.CUDA_EXTRACTION_FAILED = "Αποτυχία Εξαγωγής CUDA"
+ self.PRESET_SAVED = "Η Προεπιλογή Αποθηκεύτηκε"
+ self.PRESET_LOADED = "Η Προεπιλογή Φορτώθηκε"
+ self.NO_ASSET_SELECTED = "Δεν Έχει Επιλεγεί Στοιχείο"
+ self.DOWNLOAD_FAILED = "Αποτυχία Λήψης"
+ self.NO_BACKEND_SELECTED = "Δεν Έχει Επιλεγεί Backend"
+ self.NO_MODEL_SELECTED = "Δεν Έχει Επιλεγεί Μοντέλο"
+ self.REFRESH_RELEASES = "Ανανέωση Εκδόσεων"
+ self.NO_SUITABLE_CUDA_BACKENDS = "Δεν Βρέθηκαν Κατάλληλα Backends CUDA"
+ self.LLAMACPP_DOWNLOADED_EXTRACTED = "Το Δυαδικό Αρχείο llama.cpp Λήφθηκε και Εξήχθη στο {0}\nΤα Αρχεία CUDA Εξήχθησαν στο {1}"
+ self.CUDA_FILES_EXTRACTED = "Τα Αρχεία CUDA Εξήχθησαν στο"
+ self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = (
+ "Δεν Βρέθηκε Κατάλληλο Backend CUDA για Εξαγωγή"
+ )
+ self.ERROR_FETCHING_RELEASES = "Σφάλμα κατά την Ανάκτηση Εκδόσεων: {0}"
+ self.CONFIRM_DELETION_TITLE = "Επιβεβαίωση Διαγραφής"
+ self.LOG_FOR = "Αρχείο Καταγραφής για {0}"
+ self.ALL_FILES = "Όλα τα Αρχεία (*)"
+ self.GGUF_FILES = "Αρχεία GGUF (*.gguf)"
+ self.DAT_FILES = "Αρχεία DAT (*.dat)"
+ self.JSON_FILES = "Αρχεία JSON (*.json)"
+ self.FAILED_LOAD_PRESET = "Αποτυχία Φόρτωσης Προεπιλογής: {0}"
+ self.INITIALIZING_AUTOGGUF = "Εκκίνηση Εφαρμογής AutoGGUF"
+ self.AUTOGGUF_INITIALIZATION_COMPLETE = "Η Εκκίνηση του AutoGGUF Ολοκληρώθηκε"
+ self.REFRESHING_BACKENDS = "Ανανέωση Backends"
+ self.NO_BACKENDS_AVAILABLE = "Δεν Υπάρχουν Διαθέσιμα Backends"
+ self.FOUND_VALID_BACKENDS = "Βρέθηκαν {0} Έγκυρα Backends"
+ self.SAVING_PRESET = "Αποθήκευση Προεπιλογής"
+ self.PRESET_SAVED_TO = "Η Προεπιλογή Αποθηκεύτηκε στο {0}"
+ self.LOADING_PRESET = "Φόρτωση Προεπιλογής"
+ self.PRESET_LOADED_FROM = "Η Προεπιλογή Φορτώθηκε από το {0}"
+ self.ADDING_KV_OVERRIDE = "Προσθήκη Υπέρβασης KV: {0}"
+ self.SAVING_TASK_PRESET = "Αποθήκευση Προεπιλογής Εργασίας για {0}"
+ self.TASK_PRESET_SAVED = "Η Προεπιλογή Εργασίας Αποθηκεύτηκε"
+ self.TASK_PRESET_SAVED_TO = "Η Προεπιλογή Εργασίας Αποθηκεύτηκε στο {0}"
+ self.RESTARTING_TASK = "Επανεκκίνηση Εργασίας: {0}"
+ self.IN_PROGRESS = "Σε Εξέλιξη"
+ self.DOWNLOAD_FINISHED_EXTRACTED_TO = "Η Λήψη Ολοκληρώθηκε. Εξήχθη στο: {0}"
+ self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "Το Δυαδικό Αρχείο llama.cpp Λήφθηκε και Εξήχθη στο {0}\nΤα Αρχεία CUDA Εξήχθησαν στο {1}"
+ self.NO_SUITABLE_CUDA_BACKEND_FOUND = (
+ "Δεν Βρέθηκε Κατάλληλο Backend CUDA για Εξαγωγή"
+ )
+ self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = (
+ "Το Δυαδικό Αρχείο llama.cpp Λήφθηκε και Εξήχθη στο {0}"
+ )
+ self.REFRESHING_LLAMACPP_RELEASES = "Ανανέωση Εκδόσεων llama.cpp"
+ self.UPDATING_ASSET_LIST = "Ενημέρωση Λίστας Στοιχείων"
+ self.UPDATING_CUDA_OPTIONS = "Ενημέρωση Επιλογών CUDA"
+ self.STARTING_LLAMACPP_DOWNLOAD = "Έναρξη Λήψης llama.cpp"
+ self.UPDATING_CUDA_BACKENDS = "Ενημέρωση Backends CUDA"
+ self.NO_CUDA_BACKEND_SELECTED = "Δεν Έχει Επιλεγεί Backend CUDA για Εξαγωγή"
+ self.EXTRACTING_CUDA_FILES = "Εξαγωγή Αρχείων CUDA από {0} στο {1}"
+ self.DOWNLOAD_ERROR = "Σφάλμα Λήψης: {0}"
+ self.SHOWING_TASK_CONTEXT_MENU = "Εμφάνιση Μενού Περιεχομένου Εργασίας"
+ self.SHOWING_PROPERTIES_FOR_TASK = "Εμφάνιση Ιδιοτήτων για την Εργασία: {0}"
+ self.CANCELLING_TASK = "Ακύρωση Εργασίας: {0}"
+ self.CANCELED = "Ακυρώθηκε"
+ self.DELETING_TASK = "Διαγραφή Εργασίας: {0}"
+ self.LOADING_MODELS = "Φόρτωση Μοντέλων"
+ self.LOADED_MODELS = "{0} Μοντέλα Φορτώθηκαν"
+ self.BROWSING_FOR_MODELS_DIRECTORY = "Περιήγηση σε Φάκελο Μοντέλων"
+ self.SELECT_MODELS_DIRECTORY = "Επιλέξτε Φάκελο Μοντέλων"
+ self.BROWSING_FOR_OUTPUT_DIRECTORY = "Περιήγηση σε Φάκελο Εξόδου"
+ self.SELECT_OUTPUT_DIRECTORY = "Επιλέξτε Φάκελο Εξόδου"
+ self.BROWSING_FOR_LOGS_DIRECTORY = "Περιήγηση σε Φάκελο Αρχείων Καταγραφής"
+ self.SELECT_LOGS_DIRECTORY = "Επιλέξτε Φάκελο Αρχείων Καταγραφής"
+ self.BROWSING_FOR_IMATRIX_FILE = "Περιήγηση σε Αρχείο IMatrix"
+ self.SELECT_IMATRIX_FILE = "Επιλέξτε Αρχείο IMatrix"
+ self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} MB / {2} MB)"
+ self.CPU_USAGE_FORMAT = "Χρήση CPU: {0:.1f}%"
+ self.VALIDATING_QUANTIZATION_INPUTS = "Επικύρωση Εισόδων Κβαντισμού"
+ self.MODELS_PATH_REQUIRED = "Απαιτείται η Διαδρομή Μοντέλων"
+ self.OUTPUT_PATH_REQUIRED = "Απαιτείται η Διαδρομή Εξόδου"
+ self.LOGS_PATH_REQUIRED = "Απαιτείται η Διαδρομή Αρχείων Καταγραφής"
+ self.STARTING_MODEL_QUANTIZATION = "Έναρξη Κβαντισμού Μοντέλου"
+ self.INPUT_FILE_NOT_EXIST = "Το Αρχείο Εισόδου '{0}' Δεν Υπάρχει."
+ self.QUANTIZING_MODEL_TO = "Κβάντιση του {0} σε {1}"
+ self.QUANTIZATION_TASK_STARTED = "Η Εργασία Κβαντισμού Ξεκίνησε για {0}"
+ self.ERROR_STARTING_QUANTIZATION = "Σφάλμα κατά την Έναρξη Κβαντισμού: {0}"
+ self.UPDATING_MODEL_INFO = "Ενημέρωση Πληροφοριών Μοντέλου: {0}"
+ self.TASK_FINISHED = "Η Εργασία Ολοκληρώθηκε: {0}"
+ self.SHOWING_TASK_DETAILS_FOR = "Εμφάνιση Λεπτομερειών Εργασίας για: {0}"
+ self.BROWSING_FOR_IMATRIX_DATA_FILE = "Περιήγηση σε Αρχείο Δεδομένων IMatrix"
+ self.SELECT_DATA_FILE = "Επιλέξτε Αρχείο Δεδομένων"
+ self.BROWSING_FOR_IMATRIX_MODEL_FILE = "Περιήγηση σε Αρχείο Μοντέλου IMatrix"
+ self.SELECT_MODEL_FILE = "Επιλέξτε Αρχείο Μοντέλου"
+ self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "Περιήγηση σε Αρχείο Εξόδου IMatrix"
+ self.SELECT_OUTPUT_FILE = "Επιλέξτε Αρχείο Εξόδου"
+ self.STARTING_IMATRIX_GENERATION = "Έναρξη Δημιουργίας IMatrix"
+ self.BACKEND_PATH_NOT_EXIST = "Η Διαδρομή Backend Δεν Υπάρχει: {0}"
+ self.GENERATING_IMATRIX = "Δημιουργία IMatrix"
+ self.ERROR_STARTING_IMATRIX_GENERATION = (
+ "Σφάλμα κατά την Έναρξη Δημιουργίας IMatrix: {0}"
+ )
+ self.IMATRIX_GENERATION_TASK_STARTED = "Η Εργασία Δημιουργίας IMatrix Ξεκίνησε"
+ self.ERROR_MESSAGE = "Σφάλμα: {0}"
+ self.TASK_ERROR = "Σφάλμα Εργασίας: {0}"
+ self.APPLICATION_CLOSING = "Κλείσιμο Εφαρμογής"
+ self.APPLICATION_CLOSED = "Η Εφαρμογή Έκλεισε"
+ self.SELECT_QUANTIZATION_TYPE = "Επιλέξτε τον τύπο κβαντισμού"
+ self.ALLOWS_REQUANTIZING = (
+ "Επιτρέπει την επανακβάντιση τανυστών που έχουν ήδη κβαντιστεί"
+ )
+ self.LEAVE_OUTPUT_WEIGHT = "Θα αφήσει το output.weight χωρίς (επανα)κβάντιση"
+ self.DISABLE_K_QUANT_MIXTURES = "Απενεργοποιήστε τα μείγματα k-quant και κβαντίστε όλους τους τανυστές στον ίδιο τύπο"
+ self.USE_DATA_AS_IMPORTANCE_MATRIX = "Χρησιμοποιήστε τα δεδομένα στο αρχείο ως πίνακα σημασίας για βελτιστοποιήσεις κβαντισμού"
+ self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = (
+ "Χρησιμοποιήστε τον πίνακα σημασίας για αυτούς τους τανυστές"
+ )
+ self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = (
+ "Μην χρησιμοποιείτε τον πίνακα σημασίας για αυτούς τους τανυστές"
+ )
+ self.OUTPUT_TENSOR_TYPE = "Τύπος Tensor Εξόδου:"
+ self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = (
+ "Χρησιμοποιήστε αυτόν τον τύπο για τον τανυστή output.weight"
+ )
+ self.TOKEN_EMBEDDING_TYPE = "Τύπος Ενσωμάτωσης Token:"
+ self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = (
+ "Χρησιμοποιήστε αυτόν τον τύπο για τον τανυστή ενσωματώσεων token"
+ )
+ self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = (
+ "Θα δημιουργήσει το κβαντισμένο μοντέλο στα ίδια θραύσματα με την είσοδο"
+ )
+ self.OVERRIDE_MODEL_METADATA = "Αντικατάσταση μεταδεδομένων μοντέλου"
+ self.INPUT_DATA_FILE_FOR_IMATRIX = (
+ "Αρχείο δεδομένων εισόδου για τη δημιουργία IMatrix"
+ )
+ self.MODEL_TO_BE_QUANTIZED = "Μοντέλο προς κβάντιση"
+ self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = (
+ "Διαδρομή εξόδου για το δημιουργημένο IMatrix"
+ )
+ self.HOW_OFTEN_TO_SAVE_IMATRIX = "Πόσο συχνά να αποθηκεύεται το IMatrix"
+ self.SET_GPU_OFFLOAD_VALUE = "Ορίστε την τιμή εκφόρτωσης GPU (-ngl)"
+ self.COMPLETED = "Ολοκληρώθηκε"
+ self.REFRESH_MODELS = "Ανανέωση μοντέλων"
+
+
+class _Hungarian(_Localization):
+ def __init__(self):
+ super().__init__()
+ self.WINDOW_TITLE = "AutoGGUF (Automatizált GGUF modell kvantáló)"
+ self.RAM_USAGE = "RAM használat:"
+ self.CPU_USAGE = "CPU használat:"
+ self.BACKEND = "Llama.cpp háttérrendszer:"
+ self.REFRESH_BACKENDS = "Háttérrendszerek frissítése"
+ self.MODELS_PATH = "Modellek elérési útja:"
+ self.OUTPUT_PATH = "Kimeneti útvonal:"
+ self.LOGS_PATH = "Naplók elérési útja:"
+ self.BROWSE = "Tallózás"
+ self.AVAILABLE_MODELS = "Elérhető modellek:"
+ self.QUANTIZATION_TYPE = "Kvantálási típus:"
+ self.ALLOW_REQUANTIZE = "Újrakvantálás engedélyezése"
+ self.LEAVE_OUTPUT_TENSOR = "Kimeneti tenzor meghagyása"
+ self.PURE = "Tiszta"
+ self.IMATRIX = "IMatrix:"
+ self.INCLUDE_WEIGHTS = "Súlyok belefoglalása:"
+ self.EXCLUDE_WEIGHTS = "Súlyok kizárása:"
+ self.USE_OUTPUT_TENSOR_TYPE = "Kimeneti tenzor típusának használata"
+ self.USE_TOKEN_EMBEDDING_TYPE = "Token beágyazási típusának használata"
+ self.KEEP_SPLIT = "Felosztás megtartása"
+ self.KV_OVERRIDES = "KV felülbírálások:"
+ self.ADD_NEW_OVERRIDE = "Új felülbírálás hozzáadása"
+ self.QUANTIZE_MODEL = "Modell kvantálása"
+ self.SAVE_PRESET = "Esetbeállítás mentése"
+ self.LOAD_PRESET = "Esetbeállítás betöltése"
+ self.TASKS = "Feladatok:"
+ self.DOWNLOAD_LLAMACPP = "llama.cpp letöltése"
+ self.SELECT_RELEASE = "Kiadás kiválasztása:"
+ self.SELECT_ASSET = "Eszköz kiválasztása:"
+ self.EXTRACT_CUDA_FILES = "CUDA fájlok kibontása"
+ self.SELECT_CUDA_BACKEND = "CUDA háttérrendszer kiválasztása:"
+ self.DOWNLOAD = "Letöltés"
+ self.IMATRIX_GENERATION = "IMatrix generálás"
+ self.DATA_FILE = "Adatfájl:"
+ self.MODEL = "Modell:"
+ self.OUTPUT = "Kimenet:"
+ self.OUTPUT_FREQUENCY = "Kimeneti frekvencia:"
+ self.GPU_OFFLOAD = "GPU tehermentesítés:"
+ self.AUTO = "Automatikus"
+ self.GENERATE_IMATRIX = "IMatrix generálása"
+ self.ERROR = "Hiba"
+ self.WARNING = "Figyelmeztetés"
+ self.PROPERTIES = "Tulajdonságok"
+ self.CANCEL = "Mégse"
+ self.RESTART = "Újraindítás"
+ self.DELETE = "Törlés"
+ self.CONFIRM_DELETION = "Biztosan törölni szeretné ezt a feladatot?"
+ self.TASK_RUNNING_WARNING = "Néhány feladat még fut. Biztosan kilép?"
+ self.YES = "Igen"
+ self.NO = "Nem"
+ self.DOWNLOAD_COMPLETE = "Letöltés befejeződött"
+ self.CUDA_EXTRACTION_FAILED = "CUDA kibontás sikertelen"
+ self.PRESET_SAVED = "Esetbeállítás mentve"
+ self.PRESET_LOADED = "Esetbeállítás betöltve"
+ self.NO_ASSET_SELECTED = "Nincs kiválasztott eszköz"
+ self.DOWNLOAD_FAILED = "Letöltés sikertelen"
+ self.NO_BACKEND_SELECTED = "Nincs kiválasztott háttérrendszer"
+ self.NO_MODEL_SELECTED = "Nincs kiválasztott modell"
+ self.REFRESH_RELEASES = "Kiadások frissítése"
+ self.NO_SUITABLE_CUDA_BACKENDS = "Nem található megfelelő CUDA háttérrendszer"
+ self.LLAMACPP_DOWNLOADED_EXTRACTED = "A llama.cpp bináris fájl letöltve és kibontva ide: {0}\nA CUDA fájlok kibontva ide: {1}"
+ self.CUDA_FILES_EXTRACTED = "A CUDA fájlok kibontva ide:"
+ self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = (
+ "Nem található megfelelő CUDA háttérrendszer a kibontáshoz"
+ )
+ self.ERROR_FETCHING_RELEASES = "Hiba a kiadások lekérdezésekor: {0}"
+ self.CONFIRM_DELETION_TITLE = "Törlés megerősítése"
+ self.LOG_FOR = "Napló a következőhöz: {0}"
+ self.ALL_FILES = "Minden fájl (*)"
+ self.GGUF_FILES = "GGUF fájlok (*.gguf)"
+ self.DAT_FILES = "DAT fájlok (*.dat)"
+ self.JSON_FILES = "JSON fájlok (*.json)"
+ self.FAILED_LOAD_PRESET = "Az esetbeállítás betöltése sikertelen: {0}"
+ self.INITIALIZING_AUTOGGUF = "Az AutoGGUF alkalmazás inicializálása"
+ self.AUTOGGUF_INITIALIZATION_COMPLETE = (
+ "Az AutoGGUF inicializálása befejeződött"
+ )
+ self.REFRESHING_BACKENDS = "Háttérrendszerek frissítése"
+ self.NO_BACKENDS_AVAILABLE = "Nincsenek elérhető háttérrendszerek"
+ self.FOUND_VALID_BACKENDS = "{0} érvényes háttérrendszer található"
+ self.SAVING_PRESET = "Esetbeállítás mentése"
+ self.PRESET_SAVED_TO = "Esetbeállítás mentve ide: {0}"
+ self.LOADING_PRESET = "Esetbeállítás betöltése"
+ self.PRESET_LOADED_FROM = "Esetbeállítás betöltve innen: {0}"
+ self.ADDING_KV_OVERRIDE = "KV felülbírálás hozzáadása: {0}"
+ self.SAVING_TASK_PRESET = "Feladat esetbeállítás mentése ehhez: {0}"
+ self.TASK_PRESET_SAVED = "Feladat esetbeállítás mentve"
+ self.TASK_PRESET_SAVED_TO = "Feladat esetbeállítás mentve ide: {0}"
+ self.RESTARTING_TASK = "Feladat újraindítása: {0}"
+ self.IN_PROGRESS = "Folyamatban"
+ self.DOWNLOAD_FINISHED_EXTRACTED_TO = "Letöltés befejeződött. Kibontva ide: {0}"
+ self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "A llama.cpp bináris fájl letöltve és kibontva ide: {0}\nA CUDA fájlok kibontva ide: {1}"
+ self.NO_SUITABLE_CUDA_BACKEND_FOUND = (
+ "Nem található megfelelő CUDA háttérrendszer a kibontáshoz"
+ )
+ self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = (
+ "A llama.cpp bináris fájl letöltve és kibontva ide: {0}"
+ )
+ self.REFRESHING_LLAMACPP_RELEASES = "A llama.cpp kiadások frissítése"
+ self.UPDATING_ASSET_LIST = "Eszközlista frissítése"
+ self.UPDATING_CUDA_OPTIONS = "CUDA beállítások frissítése"
+ self.STARTING_LLAMACPP_DOWNLOAD = "A llama.cpp letöltésének megkezdése"
+ self.UPDATING_CUDA_BACKENDS = "CUDA háttérrendszerek frissítése"
+ self.NO_CUDA_BACKEND_SELECTED = (
+ "Nincs kiválasztott CUDA háttérrendszer a kibontáshoz"
+ )
+ self.EXTRACTING_CUDA_FILES = "CUDA fájlok kibontása innen: {0} ide: {1}"
+ self.DOWNLOAD_ERROR = "Letöltési hiba: {0}"
+ self.SHOWING_TASK_CONTEXT_MENU = "Feladat helyi menüjének megjelenítése"
+ self.SHOWING_PROPERTIES_FOR_TASK = "Feladat tulajdonságainak megjelenítése: {0}"
+ self.CANCELLING_TASK = "Feladat megszakítása: {0}"
+ self.CANCELED = "Megszakítva"
+ self.DELETING_TASK = "Feladat törlése: {0}"
+ self.LOADING_MODELS = "Modellek betöltése"
+ self.LOADED_MODELS = "{0} modell betöltve"
+ self.BROWSING_FOR_MODELS_DIRECTORY = "Modellek könyvtárának tallózása"
+ self.SELECT_MODELS_DIRECTORY = "Modellek könyvtárának kiválasztása"
+ self.BROWSING_FOR_OUTPUT_DIRECTORY = "Kimeneti könyvtár tallózása"
+ self.SELECT_OUTPUT_DIRECTORY = "Kimeneti könyvtár kiválasztása"
+ self.BROWSING_FOR_LOGS_DIRECTORY = "Naplók könyvtárának tallózása"
+ self.SELECT_LOGS_DIRECTORY = "Naplók könyvtárának kiválasztása"
+ self.BROWSING_FOR_IMATRIX_FILE = "IMatrix fájl tallózása"
+ self.SELECT_IMATRIX_FILE = "IMatrix fájl kiválasztása"
+ self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} MB / {2} MB)"
+ self.CPU_USAGE_FORMAT = "CPU használat: {0:.1f}%"
+ self.VALIDATING_QUANTIZATION_INPUTS = "Kvantálási bemenetek ellenőrzése"
+ self.MODELS_PATH_REQUIRED = "A modellek elérési útja kötelező"
+ self.OUTPUT_PATH_REQUIRED = "A kimeneti útvonal kötelező"
+ self.LOGS_PATH_REQUIRED = "A naplók elérési útja kötelező"
+ self.STARTING_MODEL_QUANTIZATION = "Modell kvantálásának indítása"
+ self.INPUT_FILE_NOT_EXIST = "A bemeneti fájl '{0}' nem létezik."
+ self.QUANTIZING_MODEL_TO = "{0} kvantálása erre: {1}"
+ self.QUANTIZATION_TASK_STARTED = "Kvantálási feladat elindítva ehhez: {0}"
+ self.ERROR_STARTING_QUANTIZATION = "Hiba a kvantálás indításakor: {0}"
+ self.UPDATING_MODEL_INFO = "Modellinformációk frissítése: {0}"
+ self.TASK_FINISHED = "Feladat befejezve: {0}"
+ self.SHOWING_TASK_DETAILS_FOR = "Feladat részleteinek megjelenítése ehhez: {0}"
+ self.BROWSING_FOR_IMATRIX_DATA_FILE = "IMatrix adatfájl tallózása"
+ self.SELECT_DATA_FILE = "Adatfájl kiválasztása"
+ self.BROWSING_FOR_IMATRIX_MODEL_FILE = "IMatrix modellfájl tallózása"
+ self.SELECT_MODEL_FILE = "Modellfájl kiválasztása"
+ self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "IMatrix kimeneti fájl tallózása"
+ self.SELECT_OUTPUT_FILE = "Kimeneti fájl kiválasztása"
+ self.STARTING_IMATRIX_GENERATION = "IMatrix generálásának indítása"
+ self.BACKEND_PATH_NOT_EXIST = "A háttérrendszer elérési útja nem létezik: {0}"
+ self.GENERATING_IMATRIX = "IMatrix generálása"
+ self.ERROR_STARTING_IMATRIX_GENERATION = (
+ "Hiba az IMatrix generálásának indításakor: {0}"
+ )
+ self.IMATRIX_GENERATION_TASK_STARTED = "IMatrix generálási feladat elindítva"
+ self.ERROR_MESSAGE = "Hiba: {0}"
+ self.TASK_ERROR = "Feladat hiba: {0}"
+ self.APPLICATION_CLOSING = "Alkalmazás bezárása"
+ self.APPLICATION_CLOSED = "Alkalmazás bezárva"
+ self.SELECT_QUANTIZATION_TYPE = "Válassza ki a kvantálási típust"
+ self.ALLOWS_REQUANTIZING = (
+ "Lehetővé teszi a már kvantált tenzorok újrakvantálását"
+ )
+ self.LEAVE_OUTPUT_WEIGHT = "Az output.weight-et (újra)kvantálatlanul hagyja"
+ self.DISABLE_K_QUANT_MIXTURES = "Tiltsa le a k-kvant keverékeket, és kvantálja az összes tenzort ugyanarra a típusra"
+ self.USE_DATA_AS_IMPORTANCE_MATRIX = "Használja a fájlban lévő adatokat fontossági mátrixként a kvantálási optimalizálásokhoz"
+ self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = (
+ "Használja a fontossági mátrixot ezekre a tenzorokra"
+ )
+ self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = (
+ "Ne használja a fontossági mátrixot ezekre a tenzorokra"
+ )
+ self.OUTPUT_TENSOR_TYPE = "Kimeneti tenzor típusa:"
+ self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = (
+ "Használja ezt a típust az output.weight tenzorhoz"
+ )
+ self.TOKEN_EMBEDDING_TYPE = "Token beágyazási típusa:"
+ self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = (
+ "Használja ezt a típust a token beágyazási tenzorhoz"
+ )
+ self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = "A kvantált modellt ugyanazokban a szegmensekben fogja generálni, mint a bemenet"
+ self.OVERRIDE_MODEL_METADATA = "Modell metaadatok felülbírálása"
+ self.INPUT_DATA_FILE_FOR_IMATRIX = "IMatrix generáláshoz bemeneti adatfájl"
+ self.MODEL_TO_BE_QUANTIZED = "Kvantálandó modell"
+ self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "A generált IMatrix kimeneti útvonala"
+ self.HOW_OFTEN_TO_SAVE_IMATRIX = "Milyen gyakran mentse az IMatrixot"
+ self.SET_GPU_OFFLOAD_VALUE = "GPU tehermentesítési érték beállítása (-ngl)"
+ self.COMPLETED = "Befejezve"
+ self.REFRESH_MODELS = "Modellek frissítése"
+
+
+class _BritishEnglish(_Localization):
+ def __init__(self):
+ super().__init__()
+ self.WINDOW_TITLE = "AutoGGUF (automated GGUF model quantiser)"
+ self.RAM_USAGE = "RAM Usage:"
+ self.CPU_USAGE = "CPU Usage:"
+ self.BACKEND = "Llama.cpp Backend:"
+ self.REFRESH_BACKENDS = "Refresh Backends"
+ self.MODELS_PATH = "Models Path:"
+ self.OUTPUT_PATH = "Output Path:"
+ self.LOGS_PATH = "Logs Path:"
+ self.BROWSE = "Browse"
+ self.AVAILABLE_MODELS = "Available Models:"
+ self.QUANTIZATION_TYPE = "Quantisation Type:" # Note the British spelling
+ self.ALLOW_REQUANTIZE = "Allow Requantise"
+ self.LEAVE_OUTPUT_TENSOR = "Leave Output Tensor"
+ self.PURE = "Pure"
+ self.IMATRIX = "IMatrix:"
+ self.INCLUDE_WEIGHTS = "Include Weights:"
+ self.EXCLUDE_WEIGHTS = "Exclude Weights:"
+ self.USE_OUTPUT_TENSOR_TYPE = "Use Output Tensor Type"
+ self.USE_TOKEN_EMBEDDING_TYPE = "Use Token Embedding Type"
+ self.KEEP_SPLIT = "Keep Split"
+ self.KV_OVERRIDES = "KV Overrides:"
+ self.ADD_NEW_OVERRIDE = "Add new override"
+ self.QUANTIZE_MODEL = "Quantise Model" # Note the British spelling
+ self.SAVE_PRESET = "Save Preset"
+ self.LOAD_PRESET = "Load Preset"
+ self.TASKS = "Tasks:"
+ self.DOWNLOAD_LLAMACPP = "Download llama.cpp"
+ self.SELECT_RELEASE = "Select Release:"
+ self.SELECT_ASSET = "Select Asset:"
+ self.EXTRACT_CUDA_FILES = "Extract CUDA files"
+ self.SELECT_CUDA_BACKEND = "Select CUDA Backend:"
+ self.DOWNLOAD = "Download"
+ self.IMATRIX_GENERATION = "IMatrix Generation"
+ self.DATA_FILE = "Data File:"
+ self.MODEL = "Model:"
+ self.OUTPUT = "Output:"
+ self.OUTPUT_FREQUENCY = "Output Frequency:"
+ self.GPU_OFFLOAD = "GPU Offload:"
+ self.AUTO = "Auto"
+ self.GENERATE_IMATRIX = "Generate IMatrix"
+ self.ERROR = "Error"
+ self.WARNING = "Warning"
+ self.PROPERTIES = "Properties"
+ self.CANCEL = "Cancel"
+ self.RESTART = "Restart"
+ self.DELETE = "Delete"
+ self.CONFIRM_DELETION = "Are you sure you want to delete this task?"
+ self.TASK_RUNNING_WARNING = (
+ "Some tasks are still running. Are you sure you want to quit?"
+ )
+ self.YES = "Yes"
+ self.NO = "No"
+ self.DOWNLOAD_COMPLETE = "Download Complete"
+ self.CUDA_EXTRACTION_FAILED = "CUDA Extraction Failed"
+ self.PRESET_SAVED = "Preset Saved"
+ self.PRESET_LOADED = "Preset Loaded"
+ self.NO_ASSET_SELECTED = "No asset selected"
+ self.DOWNLOAD_FAILED = "Download failed"
+ self.NO_BACKEND_SELECTED = "No backend selected"
+ self.NO_MODEL_SELECTED = "No model selected"
+ self.REFRESH_RELEASES = "Refresh Releases"
+ self.NO_SUITABLE_CUDA_BACKENDS = "No suitable CUDA backends found"
+ self.LLAMACPP_DOWNLOADED_EXTRACTED = "llama.cpp binary downloaded and extracted to {0}\nCUDA files extracted to {1}"
+ self.CUDA_FILES_EXTRACTED = "CUDA files extracted to"
+ self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = (
+ "No suitable CUDA backend found for extraction"
+ )
+ self.ERROR_FETCHING_RELEASES = "Error fetching releases: {0}"
+ self.CONFIRM_DELETION_TITLE = "Confirm Deletion"
+ self.LOG_FOR = "Log for {0}"
+ self.ALL_FILES = "All Files (*)"
+ self.GGUF_FILES = "GGUF Files (*.gguf)"
+ self.DAT_FILES = "DAT Files (*.dat)"
+ self.JSON_FILES = "JSON Files (*.json)"
+ self.FAILED_LOAD_PRESET = "Failed to load preset: {0}"
+ self.INITIALIZING_AUTOGGUF = (
+ "Initialising AutoGGUF application" # Note the British spelling
+ )
+ self.AUTOGGUF_INITIALIZATION_COMPLETE = (
+ "AutoGGUF initialisation complete" # Note the British spelling
+ )
+ self.REFRESHING_BACKENDS = "Refreshing backends"
+ self.NO_BACKENDS_AVAILABLE = "No backends available"
+ self.FOUND_VALID_BACKENDS = "Found {0} valid backends"
+ self.SAVING_PRESET = "Saving preset"
+ self.PRESET_SAVED_TO = "Preset saved to {0}"
+ self.LOADING_PRESET = "Loading preset"
+ self.PRESET_LOADED_FROM = "Preset loaded from {0}"
+ self.ADDING_KV_OVERRIDE = "Adding KV override: {0}"
+ self.SAVING_TASK_PRESET = "Saving task preset for {0}"
+ self.TASK_PRESET_SAVED = "Task Preset Saved"
+ self.TASK_PRESET_SAVED_TO = "Task preset saved to {0}"
+ self.RESTARTING_TASK = "Restarting task: {0}"
+ self.IN_PROGRESS = "In Progress"
+ self.DOWNLOAD_FINISHED_EXTRACTED_TO = "Download finished. Extracted to: {0}"
+ self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "llama.cpp binary downloaded and extracted to {0}\nCUDA files extracted to {1}"
+ self.NO_SUITABLE_CUDA_BACKEND_FOUND = (
+ "No suitable CUDA backend found for extraction"
+ )
+ self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = (
+ "llama.cpp binary downloaded and extracted to {0}"
+ )
+ self.REFRESHING_LLAMACPP_RELEASES = "Refreshing llama.cpp releases"
+ self.UPDATING_ASSET_LIST = "Updating asset list"
+ self.UPDATING_CUDA_OPTIONS = "Updating CUDA options"
+ self.STARTING_LLAMACPP_DOWNLOAD = "Starting llama.cpp download"
+ self.UPDATING_CUDA_BACKENDS = "Updating CUDA backends"
+ self.NO_CUDA_BACKEND_SELECTED = "No CUDA backend selected for extraction"
+ self.EXTRACTING_CUDA_FILES = "Extracting CUDA files from {0} to {1}"
+ self.DOWNLOAD_ERROR = "Download error: {0}"
+ self.SHOWING_TASK_CONTEXT_MENU = "Showing task context menu"
+ self.SHOWING_PROPERTIES_FOR_TASK = "Showing properties for task: {0}"
+ self.CANCELLING_TASK = "Cancelling task: {0}"
+ self.CANCELED = "Cancelled"
+ self.DELETING_TASK = "Deleting task: {0}"
+ self.LOADING_MODELS = "Loading models"
+ self.LOADED_MODELS = "Loaded {0} models"
+ self.BROWSING_FOR_MODELS_DIRECTORY = "Browsing for models directory"
+ self.SELECT_MODELS_DIRECTORY = "Select Models Directory"
+ self.BROWSING_FOR_OUTPUT_DIRECTORY = "Browsing for output directory"
+ self.SELECT_OUTPUT_DIRECTORY = "Select Output Directory"
+ self.BROWSING_FOR_LOGS_DIRECTORY = "Browsing for logs directory"
+ self.SELECT_LOGS_DIRECTORY = "Select Logs Directory"
+ self.BROWSING_FOR_IMATRIX_FILE = "Browsing for IMatrix file"
+ self.SELECT_IMATRIX_FILE = "Select IMatrix File"
+ self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} MB / {2} MB)"
+ self.CPU_USAGE_FORMAT = "CPU Usage: {0:.1f}%"
+ self.VALIDATING_QUANTIZATION_INPUTS = (
+ "Validating quantisation inputs" # Note the British spelling
+ )
+ self.MODELS_PATH_REQUIRED = "Models path is required"
+ self.OUTPUT_PATH_REQUIRED = "Output path is required"
+ self.LOGS_PATH_REQUIRED = "Logs path is required"
+ self.STARTING_MODEL_QUANTIZATION = (
+ "Starting model quantisation" # Note the British spelling
+ )
+ self.INPUT_FILE_NOT_EXIST = "Input file '{0}' does not exist."
+ self.QUANTIZING_MODEL_TO = "Quantizing {0} to {1}"
+ self.QUANTIZATION_TASK_STARTED = (
+ "Quantisation task started for {0}" # Note the British spelling
+ )
+ self.ERROR_STARTING_QUANTIZATION = (
+ "Error starting quantisation: {0}" # Note the British spelling
+ )
+ self.UPDATING_MODEL_INFO = "Updating model info: {0}"
+ self.TASK_FINISHED = "Task finished: {0}"
+ self.SHOWING_TASK_DETAILS_FOR = "Showing task details for: {0}"
+ self.BROWSING_FOR_IMATRIX_DATA_FILE = "Browsing for IMatrix data file"
+ self.SELECT_DATA_FILE = "Select Data File"
+ self.BROWSING_FOR_IMATRIX_MODEL_FILE = "Browsing for IMatrix model file"
+ self.SELECT_MODEL_FILE = "Select Model File"
+ self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "Browsing for IMatrix output file"
+ self.SELECT_OUTPUT_FILE = "Select Output File"
+ self.STARTING_IMATRIX_GENERATION = "Starting IMatrix generation"
+ self.BACKEND_PATH_NOT_EXIST = "Backend path does not exist: {0}"
+ self.GENERATING_IMATRIX = "Generating IMatrix"
+ self.ERROR_STARTING_IMATRIX_GENERATION = (
+ "Error starting IMatrix generation: {0}"
+ )
+ self.IMATRIX_GENERATION_TASK_STARTED = "IMatrix generation task started"
+ self.ERROR_MESSAGE = "Error: {0}"
+ self.TASK_ERROR = "Task error: {0}"
+ self.APPLICATION_CLOSING = "Application closing"
+ self.APPLICATION_CLOSED = "Application closed"
+ self.SELECT_QUANTIZATION_TYPE = (
+ "Select the quantisation type" # Note the British spelling
+ )
+ self.ALLOWS_REQUANTIZING = "Allows requantising tensors that have already been quantised" # Note the British spelling
+ self.LEAVE_OUTPUT_WEIGHT = "Will leave output.weight un(re)quantised"
+ self.DISABLE_K_QUANT_MIXTURES = "Disable k-quant mixtures and quantise all tensors to the same type" # Note the British spelling
+ self.USE_DATA_AS_IMPORTANCE_MATRIX = "Use data in file as importance matrix for quant optimisations" # Note the British spelling
+ self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = (
+ "Use importance matrix for these tensors"
+ )
+ self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = (
+ "Don't use importance matrix for these tensors"
+ )
+ self.OUTPUT_TENSOR_TYPE = "Output Tensor Type:"
+ self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = (
+ "Use this type for the output.weight tensor"
+ )
+ self.TOKEN_EMBEDDING_TYPE = "Token Embedding Type:"
+ self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = (
+ "Use this type for the token embeddings tensor"
+ )
+ self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = "Will generate quantised model in the same shards as input" # Note the British spelling
+ self.OVERRIDE_MODEL_METADATA = "Override model metadata"
+ self.INPUT_DATA_FILE_FOR_IMATRIX = "Input data file for IMatrix generation"
+ self.MODEL_TO_BE_QUANTIZED = (
+ "Model to be quantised" # Note the British spelling
+ )
+ self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "Output path for the generated IMatrix"
+ self.HOW_OFTEN_TO_SAVE_IMATRIX = "How often to save the IMatrix"
+ self.SET_GPU_OFFLOAD_VALUE = "Set GPU offload value (-ngl)"
+ self.COMPLETED = "Completed"
+ self.REFRESH_MODELS = "Refresh Models"
+
+
+class _IndianEnglish(_Localization):
+ def __init__(self):
+ super().__init__()
+ self.WINDOW_TITLE = "AutoGGUF (automated GGUF model quantizer)"
+ self.RAM_USAGE = "RAM Usage:"
+ self.CPU_USAGE = "CPU Usage:"
+ self.BACKEND = "Llama.cpp Backend:"
+ self.REFRESH_BACKENDS = "Refresh Backends"
+ self.MODELS_PATH = "Models Path:"
+ self.OUTPUT_PATH = "Output Path:"
+ self.LOGS_PATH = "Logs Path:"
+ self.BROWSE = "Browse"
+ self.AVAILABLE_MODELS = "Available Models:"
+ self.QUANTIZATION_TYPE = "Quantization Type:"
+ self.ALLOW_REQUANTIZE = "Allow Requantize"
+ self.LEAVE_OUTPUT_TENSOR = "Leave Output Tensor"
+ self.PURE = "Pure"
+ self.IMATRIX = "IMatrix:"
+ self.INCLUDE_WEIGHTS = "Include Weights:"
+ self.EXCLUDE_WEIGHTS = "Exclude Weights:"
+ self.USE_OUTPUT_TENSOR_TYPE = "Use Output Tensor Type"
+ self.USE_TOKEN_EMBEDDING_TYPE = "Use Token Embedding Type"
+ self.KEEP_SPLIT = "Keep Split"
+ self.KV_OVERRIDES = "KV Overrides:"
+ self.ADD_NEW_OVERRIDE = "Add new override"
+ self.QUANTIZE_MODEL = "Quantize Model"
+ self.SAVE_PRESET = "Save Preset"
+ self.LOAD_PRESET = "Load Preset"
+ self.TASKS = "Tasks:"
+ self.DOWNLOAD_LLAMACPP = "Download llama.cpp"
+ self.SELECT_RELEASE = "Select Release:"
+ self.SELECT_ASSET = "Select Asset:"
+ self.EXTRACT_CUDA_FILES = "Extract CUDA files"
+ self.SELECT_CUDA_BACKEND = "Select CUDA Backend:"
+ self.DOWNLOAD = "Download"
+ self.IMATRIX_GENERATION = "IMatrix Generation"
+ self.DATA_FILE = "Data File:"
+ self.MODEL = "Model:"
+ self.OUTPUT = "Output:"
+ self.OUTPUT_FREQUENCY = "Output Frequency:"
+ self.GPU_OFFLOAD = "GPU Offload:"
+ self.AUTO = "Auto"
+ self.GENERATE_IMATRIX = "Generate IMatrix"
+ self.ERROR = "Error"
+ self.WARNING = "Warning"
+ self.PROPERTIES = "Properties"
+ self.CANCEL = "Cancel"
+ self.RESTART = "Restart"
+ self.DELETE = "Delete"
+ self.CONFIRM_DELETION = "Are you sure you want to delete this task?"
+ self.TASK_RUNNING_WARNING = (
+ "Some tasks are still running. Are you sure you want to quit?"
+ )
+ self.YES = "Yes"
+ self.NO = "No"
+ self.DOWNLOAD_COMPLETE = "Download Complete"
+ self.CUDA_EXTRACTION_FAILED = "CUDA Extraction Failed"
+ self.PRESET_SAVED = "Preset Saved"
+ self.PRESET_LOADED = "Preset Loaded"
+ self.NO_ASSET_SELECTED = "No asset selected"
+ self.DOWNLOAD_FAILED = "Download failed"
+ self.NO_BACKEND_SELECTED = "No backend selected"
+ self.NO_MODEL_SELECTED = "No model selected"
+ self.REFRESH_RELEASES = "Refresh Releases"
+ self.NO_SUITABLE_CUDA_BACKENDS = "No suitable CUDA backends found"
+ self.LLAMACPP_DOWNLOADED_EXTRACTED = "llama.cpp binary downloaded and extracted to {0}\nCUDA files extracted to {1}"
+ self.CUDA_FILES_EXTRACTED = "CUDA files extracted to"
+ self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = (
+ "No suitable CUDA backend found for extraction"
+ )
+ self.ERROR_FETCHING_RELEASES = "Error fetching releases: {0}"
+ self.CONFIRM_DELETION_TITLE = "Confirm Deletion"
+ self.LOG_FOR = "Log for {0}"
+ self.ALL_FILES = "All Files (*)"
+ self.GGUF_FILES = "GGUF Files (*.gguf)"
+ self.DAT_FILES = "DAT Files (*.dat)"
+ self.JSON_FILES = "JSON Files (*.json)"
+ self.FAILED_LOAD_PRESET = "Failed to load preset: {0}"
+ self.INITIALIZING_AUTOGGUF = "Initializing AutoGGUF application"
+ self.AUTOGGUF_INITIALIZATION_COMPLETE = "AutoGGUF initialization complete"
+ self.REFRESHING_BACKENDS = "Refreshing backends"
+ self.NO_BACKENDS_AVAILABLE = "No backends available"
+ self.FOUND_VALID_BACKENDS = "Found {0} valid backends"
+ self.SAVING_PRESET = "Saving preset"
+ self.PRESET_SAVED_TO = "Preset saved to {0}"
+ self.LOADING_PRESET = "Loading preset"
+ self.PRESET_LOADED_FROM = "Preset loaded from {0}"
+ self.ADDING_KV_OVERRIDE = "Adding KV override: {0}"
+ self.SAVING_TASK_PRESET = "Saving task preset for {0}"
+ self.TASK_PRESET_SAVED = "Task Preset Saved"
+ self.TASK_PRESET_SAVED_TO = "Task preset saved to {0}"
+ self.RESTARTING_TASK = "Restarting task: {0}"
+ self.IN_PROGRESS = "In Progress"
+ self.DOWNLOAD_FINISHED_EXTRACTED_TO = "Download finished. Extracted to: {0}"
+ self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "llama.cpp binary downloaded and extracted to {0}\nCUDA files extracted to {1}"
+ self.NO_SUITABLE_CUDA_BACKEND_FOUND = (
+ "No suitable CUDA backend found for extraction"
+ )
+ self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = (
+ "llama.cpp binary downloaded and extracted to {0}"
+ )
+ self.REFRESHING_LLAMACPP_RELEASES = "Refreshing llama.cpp releases"
+ self.UPDATING_ASSET_LIST = "Updating asset list"
+ self.UPDATING_CUDA_OPTIONS = "Updating CUDA options"
+ self.STARTING_LLAMACPP_DOWNLOAD = "Starting llama.cpp download"
+ self.UPDATING_CUDA_BACKENDS = "Updating CUDA backends"
+ self.NO_CUDA_BACKEND_SELECTED = "No CUDA backend selected for extraction"
+ self.EXTRACTING_CUDA_FILES = "Extracting CUDA files from {0} to {1}"
+ self.DOWNLOAD_ERROR = "Download error: {0}"
+ self.SHOWING_TASK_CONTEXT_MENU = "Showing task context menu"
+ self.SHOWING_PROPERTIES_FOR_TASK = "Showing properties for task: {0}"
+ self.CANCELLING_TASK = "Cancelling task: {0}"
+ self.CANCELED = "Cancelled"
+ self.DELETING_TASK = "Deleting task: {0}"
+ self.LOADING_MODELS = "Loading models"
+ self.LOADED_MODELS = "Loaded {0} models"
+ self.BROWSING_FOR_MODELS_DIRECTORY = "Browsing for models directory"
+ self.SELECT_MODELS_DIRECTORY = "Select Models Directory"
+ self.BROWSING_FOR_OUTPUT_DIRECTORY = "Browsing for output directory"
+ self.SELECT_OUTPUT_DIRECTORY = "Select Output Directory"
+ self.BROWSING_FOR_LOGS_DIRECTORY = "Browsing for logs directory"
+ self.SELECT_LOGS_DIRECTORY = "Select Logs Directory"
+ self.BROWSING_FOR_IMATRIX_FILE = "Browsing for IMatrix file"
+ self.SELECT_IMATRIX_FILE = "Select IMatrix File"
+ self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} MB / {2} MB)"
+ self.CPU_USAGE_FORMAT = "CPU Usage: {0:.1f}%"
+ self.VALIDATING_QUANTIZATION_INPUTS = "Validating quantization inputs"
+ self.MODELS_PATH_REQUIRED = "Models path is required"
+ self.OUTPUT_PATH_REQUIRED = "Output path is required"
+ self.LOGS_PATH_REQUIRED = "Logs path is required"
+ self.STARTING_MODEL_QUANTIZATION = "Starting model quantization"
+ self.INPUT_FILE_NOT_EXIST = "Input file '{0}' does not exist."
+ self.QUANTIZING_MODEL_TO = "Quantizing {0} to {1}"
+ self.QUANTIZATION_TASK_STARTED = "Quantization task started for {0}"
+ self.ERROR_STARTING_QUANTIZATION = "Error starting quantization: {0}"
+ self.UPDATING_MODEL_INFO = "Updating model info: {0}"
+ self.TASK_FINISHED = "Task finished: {0}"
+ self.SHOWING_TASK_DETAILS_FOR = "Showing task details for: {0}"
+ self.BROWSING_FOR_IMATRIX_DATA_FILE = "Browsing for IMatrix data file"
+ self.SELECT_DATA_FILE = "Select Data File"
+ self.BROWSING_FOR_IMATRIX_MODEL_FILE = "Browsing for IMatrix model file"
+ self.SELECT_MODEL_FILE = "Select Model File"
+ self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "Browsing for IMatrix output file"
+ self.SELECT_OUTPUT_FILE = "Select Output File"
+ self.STARTING_IMATRIX_GENERATION = "Starting IMatrix generation"
+ self.BACKEND_PATH_NOT_EXIST = "Backend path does not exist: {0}"
+ self.GENERATING_IMATRIX = "Generating IMatrix"
+ self.ERROR_STARTING_IMATRIX_GENERATION = (
+ "Error starting IMatrix generation: {0}"
+ )
+ self.IMATRIX_GENERATION_TASK_STARTED = "IMatrix generation task started"
+ self.ERROR_MESSAGE = "Error: {0}"
+ self.TASK_ERROR = "Task error: {0}"
+ self.APPLICATION_CLOSING = "Application closing"
+ self.APPLICATION_CLOSED = "Application closed"
+ self.SELECT_QUANTIZATION_TYPE = "Select the quantization type"
+ self.ALLOWS_REQUANTIZING = (
+ "Allows requantizing tensors that have already been quantized"
+ )
+ self.LEAVE_OUTPUT_WEIGHT = "Will leave output.weight un(re)quantized"
+ self.DISABLE_K_QUANT_MIXTURES = (
+ "Disable k-quant mixtures and quantize all tensors to the same type"
+ )
+ self.USE_DATA_AS_IMPORTANCE_MATRIX = (
+ "Use data in file as importance matrix for quant optimisations"
+ )
+ self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = (
+ "Use importance matrix for these tensors"
+ )
+ self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = (
+ "Don't use importance matrix for these tensors"
+ )
+ self.OUTPUT_TENSOR_TYPE = "Output Tensor Type:"
+ self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = (
+ "Use this type for the output.weight tensor"
+ )
+ self.TOKEN_EMBEDDING_TYPE = "Token Embedding Type:"
+ self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = (
+ "Use this type for the token embeddings tensor"
+ )
+ self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = (
+ "Will generate quantized model in the same shards as input"
+ )
+ self.OVERRIDE_MODEL_METADATA = "Override model metadata"
+ self.INPUT_DATA_FILE_FOR_IMATRIX = "Input data file for IMatrix generation"
+ self.MODEL_TO_BE_QUANTIZED = "Model to be quantized"
+ self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "Output path for the generated IMatrix"
+ self.HOW_OFTEN_TO_SAVE_IMATRIX = "How often to save the IMatrix"
+ self.SET_GPU_OFFLOAD_VALUE = "Set GPU offload value (-ngl)"
+ self.COMPLETED = "Completed"
+ self.REFRESH_MODELS = "Refresh Models"
+
+
+class _CanadianEnglish(_Localization):
+ def __init__(self):
+ super().__init__()
+ self.WINDOW_TITLE = "AutoGGUF (automated GGUF model quantizer)"
+ self.RAM_USAGE = "RAM Usage:"
+ self.CPU_USAGE = "CPU Usage:"
+ self.BACKEND = "Llama.cpp Backend:"
+ self.REFRESH_BACKENDS = "Refresh Backends"
+ self.MODELS_PATH = "Models Path:"
+ self.OUTPUT_PATH = "Output Path:"
+ self.LOGS_PATH = "Logs Path:"
+ self.BROWSE = "Browse"
+ self.AVAILABLE_MODELS = "Available Models:"
+ self.QUANTIZATION_TYPE = "Quantization Type:"
+ self.ALLOW_REQUANTIZE = "Allow Requantize"
+ self.LEAVE_OUTPUT_TENSOR = "Leave Output Tensor"
+ self.PURE = "Pure"
+ self.IMATRIX = "IMatrix:"
+ self.INCLUDE_WEIGHTS = "Include Weights:"
+ self.EXCLUDE_WEIGHTS = "Exclude Weights:"
+ self.USE_OUTPUT_TENSOR_TYPE = "Use Output Tensor Type"
+ self.USE_TOKEN_EMBEDDING_TYPE = "Use Token Embedding Type"
+ self.KEEP_SPLIT = "Keep Split"
+ self.KV_OVERRIDES = "KV Overrides:"
+ self.ADD_NEW_OVERRIDE = "Add new override"
+ self.QUANTIZE_MODEL = "Quantize Model"
+ self.SAVE_PRESET = "Save Preset"
+ self.LOAD_PRESET = "Load Preset"
+ self.TASKS = "Tasks:"
+ self.DOWNLOAD_LLAMACPP = "Download llama.cpp"
+ self.SELECT_RELEASE = "Select Release:"
+ self.SELECT_ASSET = "Select Asset:"
+ self.EXTRACT_CUDA_FILES = "Extract CUDA files"
+ self.SELECT_CUDA_BACKEND = "Select CUDA Backend:"
+ self.DOWNLOAD = "Download"
+ self.IMATRIX_GENERATION = "IMatrix Generation"
+ self.DATA_FILE = "Data File:"
+ self.MODEL = "Model:"
+ self.OUTPUT = "Output:"
+ self.OUTPUT_FREQUENCY = "Output Frequency:"
+ self.GPU_OFFLOAD = "GPU Offload:"
+ self.AUTO = "Auto"
+ self.GENERATE_IMATRIX = "Generate IMatrix"
+ self.ERROR = "Error"
+ self.WARNING = "Warning"
+ self.PROPERTIES = "Properties"
+ self.CANCEL = "Cancel"
+ self.RESTART = "Restart"
+ self.DELETE = "Delete"
+ self.CONFIRM_DELETION = "Are you sure you want to delete this task?"
+ self.TASK_RUNNING_WARNING = (
+ "Some tasks are still running. Are you sure you want to quit?"
+ )
+ self.YES = "Yes"
+ self.NO = "No"
+ self.DOWNLOAD_COMPLETE = "Download Complete"
+ self.CUDA_EXTRACTION_FAILED = "CUDA Extraction Failed"
+ self.PRESET_SAVED = "Preset Saved"
+ self.PRESET_LOADED = "Preset Loaded"
+ self.NO_ASSET_SELECTED = "No asset selected"
+ self.DOWNLOAD_FAILED = "Download failed"
+ self.NO_BACKEND_SELECTED = "No backend selected"
+ self.NO_MODEL_SELECTED = "No model selected"
+ self.REFRESH_RELEASES = "Refresh Releases"
+ self.NO_SUITABLE_CUDA_BACKENDS = "No suitable CUDA backends found"
+ self.LLAMACPP_DOWNLOADED_EXTRACTED = "llama.cpp binary downloaded and extracted to {0}\nCUDA files extracted to {1}"
+ self.CUDA_FILES_EXTRACTED = "CUDA files extracted to"
+ self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = (
+ "No suitable CUDA backend found for extraction"
+ )
+ self.ERROR_FETCHING_RELEASES = "Error fetching releases: {0}"
+ self.CONFIRM_DELETION_TITLE = "Confirm Deletion"
+ self.LOG_FOR = "Log for {0}"
+ self.ALL_FILES = "All Files (*)"
+ self.GGUF_FILES = "GGUF Files (*.gguf)"
+ self.DAT_FILES = "DAT Files (*.dat)"
+ self.JSON_FILES = "JSON Files (*.json)"
+ self.FAILED_LOAD_PRESET = "Failed to load preset: {0}"
+ self.INITIALIZING_AUTOGGUF = "Initializing AutoGGUF application"
+ self.AUTOGGUF_INITIALIZATION_COMPLETE = "AutoGGUF initialization complete"
+ self.REFRESHING_BACKENDS = "Refreshing backends"
+ self.NO_BACKENDS_AVAILABLE = "No backends available"
+ self.FOUND_VALID_BACKENDS = "Found {0} valid backends"
+ self.SAVING_PRESET = "Saving preset"
+ self.PRESET_SAVED_TO = "Preset saved to {0}"
+ self.LOADING_PRESET = "Loading preset"
+ self.PRESET_LOADED_FROM = "Preset loaded from {0}"
+ self.ADDING_KV_OVERRIDE = "Adding KV override: {0}"
+ self.SAVING_TASK_PRESET = "Saving task preset for {0}"
+ self.TASK_PRESET_SAVED = "Task Preset Saved"
+ self.TASK_PRESET_SAVED_TO = "Task preset saved to {0}"
+ self.RESTARTING_TASK = "Restarting task: {0}"
+ self.IN_PROGRESS = "In Progress"
+ self.DOWNLOAD_FINISHED_EXTRACTED_TO = "Download finished. Extracted to: {0}"
+ self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = "llama.cpp binary downloaded and extracted to {0}\nCUDA files extracted to {1}"
+ self.NO_SUITABLE_CUDA_BACKEND_FOUND = (
+ "No suitable CUDA backend found for extraction"
+ )
+ self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = (
+ "llama.cpp binary downloaded and extracted to {0}"
+ )
+ self.REFRESHING_LLAMACPP_RELEASES = "Refreshing llama.cpp releases"
+ self.UPDATING_ASSET_LIST = "Updating asset list"
+ self.UPDATING_CUDA_OPTIONS = "Updating CUDA options"
+ self.STARTING_LLAMACPP_DOWNLOAD = "Starting llama.cpp download"
+ self.UPDATING_CUDA_BACKENDS = "Updating CUDA backends"
+ self.NO_CUDA_BACKEND_SELECTED = "No CUDA backend selected for extraction"
+ self.EXTRACTING_CUDA_FILES = "Extracting CUDA files from {0} to {1}"
+ self.DOWNLOAD_ERROR = "Download error: {0}"
+ self.SHOWING_TASK_CONTEXT_MENU = "Showing task context menu"
+ self.SHOWING_PROPERTIES_FOR_TASK = "Showing properties for task: {0}"
+ self.CANCELLING_TASK = "Cancelling task: {0}"
+ self.CANCELED = "Cancelled"
+ self.DELETING_TASK = "Deleting task: {0}"
+ self.LOADING_MODELS = "Loading models"
+ self.LOADED_MODELS = "Loaded {0} models"
+ self.BROWSING_FOR_MODELS_DIRECTORY = "Browsing for models directory"
+ self.SELECT_MODELS_DIRECTORY = "Select Models Directory"
+ self.BROWSING_FOR_OUTPUT_DIRECTORY = "Browsing for output directory"
+ self.SELECT_OUTPUT_DIRECTORY = "Select Output Directory"
+ self.BROWSING_FOR_LOGS_DIRECTORY = "Browsing for logs directory"
+ self.SELECT_LOGS_DIRECTORY = "Select Logs Directory"
+ self.BROWSING_FOR_IMATRIX_FILE = "Browsing for IMatrix file"
+ self.SELECT_IMATRIX_FILE = "Select IMatrix File"
+ self.RAM_USAGE_FORMAT = "{0:.1f}% ({1} MB / {2} MB)"
+ self.CPU_USAGE_FORMAT = "CPU Usage: {0:.1f}%"
+ self.VALIDATING_QUANTIZATION_INPUTS = "Validating quantization inputs"
+ self.MODELS_PATH_REQUIRED = "Models path is required"
+ self.OUTPUT_PATH_REQUIRED = "Output path is required"
+ self.LOGS_PATH_REQUIRED = "Logs path is required"
+ self.STARTING_MODEL_QUANTIZATION = "Starting model quantization"
+ self.INPUT_FILE_NOT_EXIST = "Input file '{0}' does not exist."
+ self.QUANTIZING_MODEL_TO = "Quantizing {0} to {1}"
+ self.QUANTIZATION_TASK_STARTED = "Quantization task started for {0}"
+ self.ERROR_STARTING_QUANTIZATION = "Error starting quantization: {0}"
+ self.UPDATING_MODEL_INFO = "Updating model info: {0}"
+ self.TASK_FINISHED = "Task finished: {0}"
+ self.SHOWING_TASK_DETAILS_FOR = "Showing task details for: {0}"
+ self.BROWSING_FOR_IMATRIX_DATA_FILE = "Browsing for IMatrix data file"
+ self.SELECT_DATA_FILE = "Select Data File"
+ self.BROWSING_FOR_IMATRIX_MODEL_FILE = "Browsing for IMatrix model file"
+ self.SELECT_MODEL_FILE = "Select Model File"
+ self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "Browsing for IMatrix output file"
+ self.SELECT_OUTPUT_FILE = "Select Output File"
+ self.STARTING_IMATRIX_GENERATION = "Starting IMatrix generation"
+ self.BACKEND_PATH_NOT_EXIST = "Backend path does not exist: {0}"
+ self.GENERATING_IMATRIX = "Generating IMatrix"
+ self.ERROR_STARTING_IMATRIX_GENERATION = (
+ "Error starting IMatrix generation: {0}"
+ )
+ self.IMATRIX_GENERATION_TASK_STARTED = "IMatrix generation task started"
+ self.ERROR_MESSAGE = "Error: {0}"
+ self.TASK_ERROR = "Task error: {0}"
+ self.APPLICATION_CLOSING = "Application closing"
+ self.APPLICATION_CLOSED = "Application closed"
+ self.SELECT_QUANTIZATION_TYPE = "Select the quantization type"
+ self.ALLOWS_REQUANTIZING = (
+ "Allows requantizing tensors that have already been quantized"
+ )
+ self.LEAVE_OUTPUT_WEIGHT = "Will leave output.weight un(re)quantized"
+ self.DISABLE_K_QUANT_MIXTURES = (
+ "Disable k-quant mixtures and quantize all tensors to the same type"
+ )
+ self.USE_DATA_AS_IMPORTANCE_MATRIX = (
+ "Use data in file as importance matrix for quant optimisations"
+ )
+ self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = (
+ "Use importance matrix for these tensors"
+ )
+ self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = (
+ "Don't use importance matrix for these tensors"
+ )
+ self.OUTPUT_TENSOR_TYPE = "Output Tensor Type:"
+ self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = (
+ "Use this type for the output.weight tensor"
+ )
+ self.TOKEN_EMBEDDING_TYPE = "Token Embedding Type:"
+ self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = (
+ "Use this type for the token embeddings tensor"
+ )
+ self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = (
+ "Will generate quantized model in the same shards as input"
+ )
+ self.OVERRIDE_MODEL_METADATA = "Override model metadata"
+ self.INPUT_DATA_FILE_FOR_IMATRIX = "Input data file for IMatrix generation"
+ self.MODEL_TO_BE_QUANTIZED = "Model to be quantized"
+ self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "Output path for the generated IMatrix"
+ self.HOW_OFTEN_TO_SAVE_IMATRIX = "How often to save the IMatrix"
+ self.SET_GPU_OFFLOAD_VALUE = "Set GPU offload value (-ngl)"
+ self.COMPLETED = "Completed"
+ self.REFRESH_MODELS = "Refresh Models"
+
+
+class _TraditionalChinese(_Localization):
+ def __init__(self):
+ super().__init__()
+ self.WINDOW_TITLE = "AutoGGUF(自動 GGUF 模型量化器)"
+ self.RAM_USAGE = "RAM 使用量:"
+ self.CPU_USAGE = "CPU 使用率:"
+ self.BACKEND = "Llama.cpp 後端:"
+ self.REFRESH_BACKENDS = "重新整理後端"
+ self.MODELS_PATH = "模型路徑:"
+ self.OUTPUT_PATH = "輸出路徑:"
+ self.LOGS_PATH = "日誌路徑:"
+ self.BROWSE = "瀏覽"
+ self.AVAILABLE_MODELS = "可用模型:"
+ self.QUANTIZATION_TYPE = "量化類型:"
+ self.ALLOW_REQUANTIZE = "允許重新量化"
+ self.LEAVE_OUTPUT_TENSOR = "保留輸出張量"
+ self.PURE = "純粹"
+ self.IMATRIX = "IMatrix:"
+ self.INCLUDE_WEIGHTS = "包含權重:"
+ self.EXCLUDE_WEIGHTS = "排除權重:"
+ self.USE_OUTPUT_TENSOR_TYPE = "使用輸出張量類型"
+ self.USE_TOKEN_EMBEDDING_TYPE = "使用權杖嵌入類型"
+ self.KEEP_SPLIT = "保持分割"
+ self.KV_OVERRIDES = "KV 覆蓋:"
+ self.ADD_NEW_OVERRIDE = "新增覆蓋"
+ self.QUANTIZE_MODEL = "量化模型"
+ self.SAVE_PRESET = "儲存預設"
+ self.LOAD_PRESET = "載入預設"
+ self.TASKS = "任務:"
+ self.DOWNLOAD_LLAMACPP = "下載 llama.cpp"
+ self.SELECT_RELEASE = "選擇版本:"
+ self.SELECT_ASSET = "選擇資源:"
+ self.EXTRACT_CUDA_FILES = "解壓縮 CUDA 檔案"
+ self.SELECT_CUDA_BACKEND = "選擇 CUDA 後端:"
+ self.DOWNLOAD = "下載"
+ self.IMATRIX_GENERATION = "IMatrix 產生"
+ self.DATA_FILE = "資料檔案:"
+ self.MODEL = "模型:"
+ self.OUTPUT = "輸出:"
+ self.OUTPUT_FREQUENCY = "輸出頻率:"
+ self.GPU_OFFLOAD = "GPU 卸載:"
+ self.AUTO = "自動"
+ self.GENERATE_IMATRIX = "產生 IMatrix"
+ self.ERROR = "錯誤"
+ self.WARNING = "警告"
+ self.PROPERTIES = "屬性"
+ self.CANCEL = "取消"
+ self.RESTART = "重新啟動"
+ self.DELETE = "刪除"
+ self.CONFIRM_DELETION = "您確定要刪除此任務嗎?"
+ self.TASK_RUNNING_WARNING = "某些任務仍在執行中。您確定要結束嗎?"
+ self.YES = "是"
+ self.NO = "否"
+ self.DOWNLOAD_COMPLETE = "下載完成"
+ self.CUDA_EXTRACTION_FAILED = "CUDA 解壓縮失敗"
+ self.PRESET_SAVED = "預設已儲存"
+ self.PRESET_LOADED = "預設已載入"
+ self.NO_ASSET_SELECTED = "未選擇資源"
+ self.DOWNLOAD_FAILED = "下載失敗"
+ self.NO_BACKEND_SELECTED = "未選擇後端"
+ self.NO_MODEL_SELECTED = "未選擇模型"
+ self.REFRESH_RELEASES = "重新整理版本"
+ self.NO_SUITABLE_CUDA_BACKENDS = "找不到合適的 CUDA 後端"
+ self.LLAMACPP_DOWNLOADED_EXTRACTED = (
+ "llama.cpp 二進位檔案已下載並解壓縮至 {0}\nCUDA 檔案已解壓縮至 {1}"
+ )
+ self.CUDA_FILES_EXTRACTED = "CUDA 檔案已解壓縮至"
+ self.NO_SUITABLE_CUDA_BACKEND_EXTRACTION = "找不到合適的 CUDA 後端進行解壓縮"
+ self.ERROR_FETCHING_RELEASES = "擷取版本時發生錯誤:{0}"
+ self.CONFIRM_DELETION_TITLE = "確認刪除"
+ self.LOG_FOR = "{0} 的日誌"
+ self.ALL_FILES = "所有檔案 (*)"
+ self.GGUF_FILES = "GGUF 檔案 (*.gguf)"
+ self.DAT_FILES = "DAT 檔案 (*.dat)"
+ self.JSON_FILES = "JSON 檔案 (*.json)"
+ self.FAILED_LOAD_PRESET = "載入預設失敗:{0}"
+ self.INITIALIZING_AUTOGGUF = "正在初始化 AutoGGUF 應用程式"
+ self.AUTOGGUF_INITIALIZATION_COMPLETE = "AutoGGUF 初始化完成"
+ self.REFRESHING_BACKENDS = "正在重新整理後端"
+ self.NO_BACKENDS_AVAILABLE = "沒有可用的後端"
+ self.FOUND_VALID_BACKENDS = "找到 {0} 個有效的後端"
+ self.SAVING_PRESET = "正在儲存預設"
+ self.PRESET_SAVED_TO = "預設已儲存至 {0}"
+ self.LOADING_PRESET = "正在載入預設"
+ self.PRESET_LOADED_FROM = "從 {0} 載入了預設"
+ self.ADDING_KV_OVERRIDE = "正在新增 KV 覆蓋:{0}"
+ self.SAVING_TASK_PRESET = "正在儲存 {0} 的任務預設"
+ self.TASK_PRESET_SAVED = "任務預設已儲存"
+ self.TASK_PRESET_SAVED_TO = "任務預設已儲存至 {0}"
+ self.RESTARTING_TASK = "正在重新啟動任務:{0}"
+ self.IN_PROGRESS = "處理中"
+ self.DOWNLOAD_FINISHED_EXTRACTED_TO = "下載完成。已解壓縮至:{0}"
+ self.LLAMACPP_DOWNLOADED_AND_EXTRACTED = (
+ "llama.cpp 二進位檔案已下載並解壓縮至 {0}\nCUDA 檔案已解壓縮至 {1}"
+ )
+ self.NO_SUITABLE_CUDA_BACKEND_FOUND = "找不到合適的 CUDA 後端進行解壓縮"
+ self.LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED = (
+ "llama.cpp 二進位檔案已下載並解壓縮至 {0}"
+ )
+ self.REFRESHING_LLAMACPP_RELEASES = "正在重新整理 llama.cpp 版本"
+ self.UPDATING_ASSET_LIST = "正在更新資源清單"
+ self.UPDATING_CUDA_OPTIONS = "正在更新 CUDA 選項"
+ self.STARTING_LLAMACPP_DOWNLOAD = "正在開始下載 llama.cpp"
+ self.UPDATING_CUDA_BACKENDS = "正在更新 CUDA 後端"
+ self.NO_CUDA_BACKEND_SELECTED = "未選擇要解壓縮的 CUDA 後端"
+ self.EXTRACTING_CUDA_FILES = "正在從 {0} 解壓縮 CUDA 檔案至 {1}"
+ self.DOWNLOAD_ERROR = "下載錯誤:{0}"
+ self.SHOWING_TASK_CONTEXT_MENU = "正在顯示任務操作選單"
+ self.SHOWING_PROPERTIES_FOR_TASK = "正在顯示任務的屬性:{0}"
+ self.CANCELLING_TASK = "正在取消任務:{0}"
+ self.CANCELED = "已取消"
+ self.DELETING_TASK = "正在刪除任務:{0}"
+ self.LOADING_MODELS = "正在載入模型"
+ self.LOADED_MODELS = "已載入 {0} 個模型"
+ self.BROWSING_FOR_MODELS_DIRECTORY = "正在瀏覽模型目錄"
+ self.SELECT_MODELS_DIRECTORY = "選擇模型目錄"
+ self.BROWSING_FOR_OUTPUT_DIRECTORY = "正在瀏覽輸出目錄"
+ self.SELECT_OUTPUT_DIRECTORY = "選擇輸出目錄"
+ self.BROWSING_FOR_LOGS_DIRECTORY = "正在瀏覽日誌目錄"
+ self.SELECT_LOGS_DIRECTORY = "選擇日誌目錄"
+ self.BROWSING_FOR_IMATRIX_FILE = "正在瀏覽 IMatrix 檔案"
+ self.SELECT_IMATRIX_FILE = "選擇 IMatrix 檔案"
+ self.RAM_USAGE_FORMAT = "{0:.1f}%({1} MB / {2} MB)"
+ self.CPU_USAGE_FORMAT = "CPU 使用率:{0:.1f}%"
+ self.VALIDATING_QUANTIZATION_INPUTS = "正在驗證量化輸入"
+ self.MODELS_PATH_REQUIRED = "需要模型路徑"
+ self.OUTPUT_PATH_REQUIRED = "需要輸出路徑"
+ self.LOGS_PATH_REQUIRED = "需要日誌路徑"
+ self.STARTING_MODEL_QUANTIZATION = "正在開始模型量化"
+ self.INPUT_FILE_NOT_EXIST = "輸入檔案 '{0}' 不存在。"
+ self.QUANTIZING_MODEL_TO = "正在將 {0} 量化為 {1}"
+ self.QUANTIZATION_TASK_STARTED = "已啟動 {0} 的量化任務"
+ self.ERROR_STARTING_QUANTIZATION = "啟動量化時發生錯誤:{0}"
+ self.UPDATING_MODEL_INFO = "正在更新模型資訊:{0}"
+ self.TASK_FINISHED = "任務完成:{0}"
+ self.SHOWING_TASK_DETAILS_FOR = "正在顯示任務詳細資訊:{0}"
+ self.BROWSING_FOR_IMATRIX_DATA_FILE = "正在瀏覽 IMatrix 資料檔案"
+ self.SELECT_DATA_FILE = "選擇資料檔案"
+ self.BROWSING_FOR_IMATRIX_MODEL_FILE = "正在瀏覽 IMatrix 模型檔案"
+ self.SELECT_MODEL_FILE = "選擇模型檔案"
+ self.BROWSING_FOR_IMATRIX_OUTPUT_FILE = "正在瀏覽 IMatrix 輸出檔案"
+ self.SELECT_OUTPUT_FILE = "選擇輸出檔案"
+ self.STARTING_IMATRIX_GENERATION = "正在開始 IMatrix 產生"
+ self.BACKEND_PATH_NOT_EXIST = "後端路徑不存在:{0}"
+ self.GENERATING_IMATRIX = "正在產生 IMatrix"
+ self.ERROR_STARTING_IMATRIX_GENERATION = "啟動 IMatrix 產生時發生錯誤:{0}"
+ self.IMATRIX_GENERATION_TASK_STARTED = "IMatrix 產生任務已啟動"
+ self.ERROR_MESSAGE = "錯誤:{0}"
+ self.TASK_ERROR = "任務錯誤:{0}"
+ self.APPLICATION_CLOSING = "應用程式正在關閉"
+ self.APPLICATION_CLOSED = "應用程式已關閉"
+ self.SELECT_QUANTIZATION_TYPE = "請選擇量化類型"
+ self.ALLOWS_REQUANTIZING = "允許重新量化已量化的張量"
+ self.LEAVE_OUTPUT_WEIGHT = "將保留 output.weight 不被(重新)量化"
+ self.DISABLE_K_QUANT_MIXTURES = "停用 k-quant 混合並將所有張量量化為相同類型"
+ self.USE_DATA_AS_IMPORTANCE_MATRIX = (
+ "使用檔案中的資料作為量化最佳化的重要性矩陣"
+ )
+ self.USE_IMPORTANCE_MATRIX_FOR_TENSORS = "對這些張量使用重要性矩陣"
+ self.DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS = "不要對這些張量使用重要性矩陣"
+ self.OUTPUT_TENSOR_TYPE = "輸出張量類型:"
+ self.USE_THIS_TYPE_FOR_OUTPUT_WEIGHT = "對 output.weight 張量使用此類型"
+ self.TOKEN_EMBEDDING_TYPE = "權杖嵌入類型:"
+ self.USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS = "對權杖嵌入張量使用此類型"
+ self.WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS = (
+ "將在與輸入相同的分片中產生量化模型"
+ )
+ self.OVERRIDE_MODEL_METADATA = "覆蓋模型中繼資料"
+ self.INPUT_DATA_FILE_FOR_IMATRIX = "IMatrix 產生的輸入資料檔案"
+ self.MODEL_TO_BE_QUANTIZED = "要量化的模型"
+ self.OUTPUT_PATH_FOR_GENERATED_IMATRIX = "產生的 IMatrix 的輸出路徑"
+ self.HOW_OFTEN_TO_SAVE_IMATRIX = "儲存 IMatrix 的頻率"
+ self.SET_GPU_OFFLOAD_VALUE = "設定 GPU 卸載值(-ngl)"
+ self.COMPLETED = "已完成"
+ self.REFRESH_MODELS = "重新整理模型"
+
+
+# Dictionary to map language codes to classes
+_languages = {
+ "en-US": _English, # American English
+ "fr-FR": _French, # Metropolitan French
+ "zh-CN": _SimplifiedChinese, # Simplified Chinese
+ "es-ES": _Spanish, # Spanish (Spain)
+ "hi-IN": _Hindi, # Hindi (India)
+ "ru-RU": _Russian, # Russian (Russia)
+ "uk-UA": _Ukrainian, # Ukrainian (Ukraine)
+ "ja-JP": _Japanese, # Japanese (Japan)
+ "de-DE": _German, # German (Germany)
+ "pt-BR": _Portuguese, # Portuguese (Brazil)
+ "ar-SA": _Arabic, # Arabic (Saudi Arabia)
+ "ko-KR": _Korean, # Korean (Korea)
+ "it-IT": _Italian, # Italian (Italy)
+ "tr-TR": _Turkish, # Turkish (Turkey)
+ "nl-NL": _Dutch, # Dutch (Netherlands)
+ "fi-FI": _Finnish, # Finnish (Finland)
+ "bn-BD": _Bengali, # Bengali (Bangladesh)
+ "cs-CZ": _Czech, # Czech (Czech Republic)
+ "pl-PL": _Polish, # Polish (Poland)
+ "ro-RO": _Romanian, # Romanian (Romania)
+ "el-GR": _Greek, # Greek (Greece)
+ "pt-PT": _Portuguese_PT, # Portuguese (Portugal)
+ "hu-HU": _Hungarian, # Hungarian (Hungary)
+ "en-GB": _BritishEnglish, # British English
+ "fr-CA": _CanadianFrench, # Canadian French
+ "en-IN": _IndianEnglish, # Indian English
+ "en-CA": _CanadianEnglish, # Canadian English
+ "zh-TW": _TraditionalChinese, # Traditional Chinese (Taiwan)
+}
+
+
+def set_language(lang_code):
+ # Globals
+ global WINDOW_TITLE, RAM_USAGE, CPU_USAGE, BACKEND, REFRESH_BACKENDS, MODELS_PATH, OUTPUT_PATH, LOGS_PATH
+ global BROWSE, AVAILABLE_MODELS, QUANTIZATION_TYPE, ALLOW_REQUANTIZE, LEAVE_OUTPUT_TENSOR, PURE, IMATRIX
+ global INCLUDE_WEIGHTS, EXCLUDE_WEIGHTS, USE_OUTPUT_TENSOR_TYPE, USE_TOKEN_EMBEDDING_TYPE, KEEP_SPLIT
+ global KV_OVERRIDES, ADD_NEW_OVERRIDE, QUANTIZE_MODEL, SAVE_PRESET, LOAD_PRESET, TASKS, DOWNLOAD_LLAMACPP
+ global SELECT_RELEASE, SELECT_ASSET, EXTRACT_CUDA_FILES, SELECT_CUDA_BACKEND, DOWNLOAD, IMATRIX_GENERATION
+ global DATA_FILE, MODEL, OUTPUT, OUTPUT_FREQUENCY, GPU_OFFLOAD, AUTO, GENERATE_IMATRIX, ERROR, WARNING
+ global PROPERTIES, CANCEL, RESTART, DELETE, CONFIRM_DELETION, TASK_RUNNING_WARNING, YES, NO, DOWNLOAD_COMPLETE
+ global CUDA_EXTRACTION_FAILED, PRESET_SAVED, PRESET_LOADED, NO_ASSET_SELECTED, DOWNLOAD_FAILED, NO_BACKEND_SELECTED
+ global NO_MODEL_SELECTED, REFRESH_RELEASES, NO_SUITABLE_CUDA_BACKENDS, LLAMACPP_DOWNLOADED_EXTRACTED, CUDA_FILES_EXTRACTED
+ global NO_SUITABLE_CUDA_BACKEND_EXTRACTION, ERROR_FETCHING_RELEASES, CONFIRM_DELETION_TITLE, LOG_FOR, ALL_FILES
+ global GGUF_FILES, DAT_FILES, JSON_FILES, FAILED_LOAD_PRESET, INITIALIZING_AUTOGGUF, AUTOGGUF_INITIALIZATION_COMPLETE
+ global REFRESHING_BACKENDS, NO_BACKENDS_AVAILABLE, FOUND_VALID_BACKENDS, SAVING_PRESET, PRESET_SAVED_TO, LOADING_PRESET
+ global PRESET_LOADED_FROM, ADDING_KV_OVERRIDE, SAVING_TASK_PRESET, TASK_PRESET_SAVED, TASK_PRESET_SAVED_TO, RESTARTING_TASK
+ global IN_PROGRESS, DOWNLOAD_FINISHED_EXTRACTED_TO, LLAMACPP_DOWNLOADED_AND_EXTRACTED, NO_SUITABLE_CUDA_BACKEND_FOUND
+ global LLAMACPP_BINARY_DOWNLOADED_AND_EXTRACTED, REFRESHING_LLAMACPP_RELEASES, UPDATING_ASSET_LIST, UPDATING_CUDA_OPTIONS
+ global STARTING_LLAMACPP_DOWNLOAD, UPDATING_CUDA_BACKENDS, NO_CUDA_BACKEND_SELECTED, EXTRACTING_CUDA_FILES, DOWNLOAD_ERROR
+ global SHOWING_TASK_CONTEXT_MENU, SHOWING_PROPERTIES_FOR_TASK, CANCELLING_TASK, CANCELED, DELETING_TASK, LOADING_MODELS, LOADED_MODELS
+ global BROWSING_FOR_MODELS_DIRECTORY, SELECT_MODELS_DIRECTORY, BROWSING_FOR_OUTPUT_DIRECTORY, SELECT_OUTPUT_DIRECTORY
+ global BROWSING_FOR_LOGS_DIRECTORY, SELECT_LOGS_DIRECTORY, BROWSING_FOR_IMATRIX_FILE, SELECT_IMATRIX_FILE, RAM_USAGE_FORMAT
+ global CPU_USAGE_FORMAT, VALIDATING_QUANTIZATION_INPUTS, MODELS_PATH_REQUIRED, OUTPUT_PATH_REQUIRED, LOGS_PATH_REQUIRED
+ global STARTING_MODEL_QUANTIZATION, INPUT_FILE_NOT_EXIST, QUANTIZING_MODEL_TO, QUANTIZATION_TASK_STARTED, ERROR_STARTING_QUANTIZATION
+ global UPDATING_MODEL_INFO, TASK_FINISHED, SHOWING_TASK_DETAILS_FOR, BROWSING_FOR_IMATRIX_DATA_FILE, SELECT_DATA_FILE
+ global BROWSING_FOR_IMATRIX_MODEL_FILE, SELECT_MODEL_FILE, BROWSING_FOR_IMATRIX_OUTPUT_FILE, SELECT_OUTPUT_FILE
+ global STARTING_IMATRIX_GENERATION, BACKEND_PATH_NOT_EXIST, GENERATING_IMATRIX, ERROR_STARTING_IMATRIX_GENERATION
+ global IMATRIX_GENERATION_TASK_STARTED, ERROR_MESSAGE, TASK_ERROR, APPLICATION_CLOSING, APPLICATION_CLOSED, SELECT_QUANTIZATION_TYPE
+ global ALLOWS_REQUANTIZING, LEAVE_OUTPUT_WEIGHT, DISABLE_K_QUANT_MIXTURES, USE_DATA_AS_IMPORTANCE_MATRIX, USE_IMPORTANCE_MATRIX_FOR_TENSORS
+ global DONT_USE_IMPORTANCE_MATRIX_FOR_TENSORS, OUTPUT_TENSOR_TYPE, USE_THIS_TYPE_FOR_OUTPUT_WEIGHT, TOKEN_EMBEDDING_TYPE, USE_THIS_TYPE_FOR_TOKEN_EMBEDDINGS
+ global WILL_GENERATE_QUANTIZED_MODEL_IN_SAME_SHARDS, OVERRIDE_MODEL_METADATA, INPUT_DATA_FILE_FOR_IMATRIX, MODEL_TO_BE_QUANTIZED
+ global OUTPUT_PATH_FOR_GENERATED_IMATRIX, HOW_OFTEN_TO_SAVE_IMATRIX, SET_GPU_OFFLOAD_VALUE, COMPLETED, REFRESH_MODELS
+ global CONTEXT_SIZE, CONTEXT_SIZE_FOR_IMATRIX, THREADS, NUMBER_OF_THREADS_FOR_IMATRIX, EXTRA_ARGUMENTS, EXTRA_ARGUMENTS_LABEL
+ global LORA_CONVERSION, LORA_INPUT_PATH, LORA_OUTPUT_PATH, SELECT_LORA_INPUT_DIRECTORY, SELECT_LORA_OUTPUT_FILE
+ global CONVERT_LORA, STARTING_LORA_CONVERSION, LORA_INPUT_PATH_REQUIRED, LORA_OUTPUT_PATH_REQUIRED, ERROR_STARTING_LORA_CONVERSION
+ global LORA_CONVERSION_TASK_STARTED, BIN_FILES, BROWSING_FOR_LORA_INPUT_DIRECTORY, BROWSING_FOR_LORA_OUTPUT_FILE, CONVERTING_LORA
+ global LORA_CONVERSION_FINISHED, LORA_FILE_MOVED, LORA_FILE_NOT_FOUND, ERROR_MOVING_LORA_FILE, EXPORT_LORA
+ global MODEL_PATH_REQUIRED, AT_LEAST_ONE_LORA_ADAPTER_REQUIRED, INVALID_LORA_SCALE_VALUE, ERROR_STARTING_LORA_EXPORT, LORA_EXPORT_TASK_STARTED
+ global GGML_LORA_ADAPTERS, SELECT_LORA_ADAPTER_FILES, ADD_ADAPTER, DELETE_ADAPTER, LORA_SCALE
+ global ENTER_LORA_SCALE_VALUE, NUMBER_OF_THREADS_FOR_LORA_EXPORT, EXPORTING_LORA, BROWSING_FOR_EXPORT_LORA_MODEL_FILE, BROWSING_FOR_EXPORT_LORA_OUTPUT_FILE
+ global ADDING_LORA_ADAPTER, DELETING_LORA_ADAPTER, LORA_FILES, SELECT_LORA_ADAPTER_FILE, STARTING_LORA_EXPORT
+ global OUTPUT_TYPE, SELECT_OUTPUT_TYPE, GGUF_AND_BIN_FILES, BASE_MODEL, SELECT_BASE_MODEL_FILE
+ global BASE_MODEL_PATH_REQUIRED, BROWSING_FOR_BASE_MODEL_FILE, SELECT_BASE_MODEL_FOLDER, BROWSING_FOR_BASE_MODEL_FOLDER
+ global LORA_CONVERSION_FROM_TO, GENERATING_IMATRIX_FOR, MODEL_PATH_REQUIRED_FOR_IMATRIX, NO_ASSET_SELECTED_FOR_CUDA_CHECK, QUANTIZATION_COMMAND
+ global IMATRIX_GENERATION_COMMAND, LORA_CONVERSION_COMMAND, LORA_EXPORT_COMMAND
+
+ loc = _languages.get(lang_code, _English)()
+ english_loc = _English() # Create an instance of English localization for fallback
+
+ for key in dir(english_loc):
+ if not key.startswith("_"):
+ globals()[key] = getattr(loc, key, getattr(english_loc, key))
+
+
+# Get the language from the AUTOGGUF_LANGUAGE environment variable, default to 'en'
+language_code = os.getenv("AUTOGGUF_LANGUAGE", "en-US")
+
+# Set default language
+set_language(language_code)
diff --git a/src/main.py b/src/main.py
index 6e24354..d466fc6 100644
--- a/src/main.py
+++ b/src/main.py
@@ -1,9 +1,9 @@
-import sys
-from PyQt6.QtWidgets import QApplication
-from AutoGGUF import AutoGGUF
-
-if __name__ == "__main__":
- app = QApplication(sys.argv)
- window = AutoGGUF()
- window.show()
- sys.exit(app.exec())
\ No newline at end of file
+import sys
+from PyQt6.QtWidgets import QApplication
+from AutoGGUF import AutoGGUF
+
+if __name__ == "__main__":
+ app = QApplication(sys.argv)
+ window = AutoGGUF()
+ window.show()
+ sys.exit(app.exec())