Update local-ai.conf

This commit is contained in:
Vitalii Kuzhdin 2025-08-03 22:30:24 +02:00
parent f38d9504c1
commit c07cbd33b1
3 changed files with 69 additions and 134 deletions

View file

@ -1,7 +1,7 @@
pkgbase = local-ai-bin
pkgdesc = Free, Open Source OpenAI alternative. Self-hosted, community-driven and local-first
pkgver = 3.3.1
pkgrel = 1
pkgrel = 2
url = https://localai.io
arch = aarch64
arch = x86_64

View file

@ -3,7 +3,7 @@
_pkgname="local-ai"
pkgname="${_pkgname}-bin"
pkgver=3.3.1
pkgrel=1
pkgrel=2
pkgdesc="Free, Open Source OpenAI alternative. Self-hosted, community-driven and local-first"
arch=('aarch64' 'x86_64')
url="https://localai.io"

View file

@ -1,142 +1,77 @@
# Configuration file for https://github.com/mudler/LocalAI
# LocalAI Configuration File
# Path containing models used for inferencing
LOCALAI_MODELS_PATH="/var/lib/local-ai/models"
# Logging
# Valid values: [error,warn,info,debug,trace]
LOCALAI_LOG_LEVEL="info"
# Path used to extract libraries required by some backends at runtime
LOCALAI_BACKEND_ASSETS_PATH="/var/lib/local-ai/backend_data"
# Storage Configuration
AUDIO_PATH="/var/lib/local-ai/generated/audio"
BACKEND_ASSETS_PATH="/var/lib/local-ai/backend_data"
# CONFIG_DIR_POLL_INTERVAL="1m"
# CONFIG_PATH="/tmp/localai/config"
GENERATED_CONTENT_PATH="/var/lib/local-ai/generated"
IMAGE_PATH="/var/lib/local-ai/generated/images"
LOCALAI_CONFIG_DIR="/var/lib/local-ai/configuration"
# MODELS_CONFIG_FILE="config.yaml"
MODELS_PATH="/var/lib/local-ai/models"
UPLOAD_PATH="/tmp/localai/upload"
# Location for images generated by backends (e.g., stablediffusion)
LOCALAI_IMAGE_PATH="/var/lib/local-ai/generated/images"
# Backend Configuration
# AUTOLOAD_BACKEND_GALLERIES=false
BACKENDS_PATH="/var/lib/local-ai/backends"
# BACKEND_GALLERIES='[{"name":"localai", "url":"github:mudler/LocalAI/backend/index.yaml@master"}]'
# EXTERNAL_BACKENDS="[]"
# EXTERNAL_GRPC_BACKENDS="[]"
# LIBRARY_PATH="/usr/share/local-ai/libs"
# PARALLEL_REQUESTS=true
# PRELOAD_BACKEND_ONLY=false
# SINGLE_ACTIVE_BACKEND=false
# Location for audio generated by backends (e.g., piper)
LOCALAI_AUDIO_PATH="/var/lib/local-ai/generated/audio"
# Watchdog Configuration
# WATCHDOG_BUSY=false
# WATCHDOG_BUSY_TIMEOUT="5m"
# WATCHDOG_IDLE=false
# WATCHDOG_IDLE_TIMEOUT="15m"
# Path to store uploads from files API
# LOCALAI_UPLOAD_PATH="/tmp/localai/upload"
# Model Configuration
# AUTOLOAD_GALLERIES=false
# GALLERIES='[{"name":"localai", "url":"github:mudler/LocalAI/gallery/index.yaml@master"}]'
# LOAD_TO_MEMORY="[]"
# MODELS="[]"
# PRELOAD_MODELS="[]"
# PRELOAD_MODELS_CONFIG=""
# REMOTE_LIBRARY=""
# Path for configuration files
# LOCALAI_CONFIG_PATH="/tmp/localai/config"
# Performance Configuration
# CONTEXT_SIZE=512
# F16=false
# THREADS=4
# Directory for dynamic loading of certain configuration files
LOCALAI_CONFIG_DIR="/etc/local-ai"
# API Server Configuration
# ADDRESS=":8080"
# API_KEY="[]"
# CORS=true
# CORS_ALLOW_ORIGINS=""
# CSRF=false
# DISABLE_GALLERY_ENDPOINT=false
# DISABLE_METRICS_ENDPOINT=false
# DISABLE_WEBUI=false
# MACHINE_TAG=""
# UPLOAD_LIMIT=15
# Interval to poll the LocalAI Config Directory for changes
# LOCALAI_CONFIG_DIR_POLL_INTERVAL="1m"
# Security/Hardening Configuration
# DISABLE_API_KEY_REQUIREMENT_FOR_HTTP_GET=false
# DISABLE_PREDOWNLOAD_SCAN=false
# HTTP_GET_EXEMPTED_ENDPOINTS="^/$,^/browse/?$,^/talk/?$,^/p2p/?$,^/chat/?$,^/text2image/?$,^/tts/?$,^/static/.*$,^/swagger.*$"
# OPAQUE_ERRORS=false
# SUBTLE_KEY_COMPARISON=false
# YAML file containing a list of model backend configs
# LOCALAI_MODELS_CONFIG_FILE="config.yaml"
# P2P Configuration
# P2P=false
# P2P_DHT_INTERVAL=360
# P2P_NETWORK_ID=""
# P2P_OTP_INTERVAL=9000
# P2P_TOKEN=""
# JSON list of
# LOCALAI_GALLERIES='[{"name":"model-gallery", "url":"github:go-skynet/model-gallery/index.yaml"}, {"url": "github:go-skynet/model-gallery/huggingface.yaml","name":"huggingface"}]'
# Enable automatic loading of galleries
# LOCALAI_AUTOLOAD_GALLERIES=true
# A LocalAI remote library URL
# LOCALAI_REMOTE_LIBRARY="${remoteLibraryURL}"
# List of models to apply at startup in JSON format
# LOCALAI_PRELOAD_MODELS="[]"
# List of model configuration URLs to load
# LOCALAI_MODELS="[]"
# Path to a YAML config file for preloading models at startup
# LOCALAI_PRELOAD_MODELS_CONFIG="preload_config.yaml"
# Enable GPU acceleration
# LOCALAI_F16=true
# Number of threads used for parallel computation
# LOCALAI_THREADS=4
# Default context size for models
# LOCALAI_CONTEXT_SIZE=512
# Bind address for the API server
# LOCALAI_ADDRESS=":8080"
# Enable CORS for the API
LOCALAI_CORS=true
# Path to the library directory for external libraries used by backends
# LOCALAI_LIBRARY_PATH="/usr/share/local-ai/libs"
# Enable fiber CSRF middleware
# LOCALAI_CSRF=true
# Default upload limit in MB
# LOCALAI_UPLOAD_LIMIT=15
# List of API Keys for authentication
# LOCALAI_API_KEY="[]"
# Disable web UI
# LOCALAI_DISABLE_WEBUI=false
# Disable the best-effort security scanner before downloading files
# LOCALAI_DISABLE_PREDOWNLOAD_SCAN=false
# Replace all error responses with blank 500 errors
# LOCALAI_OPAQUE_ERRORS=false
# Use constant-time comparisons for API Key validation
# LOCALAI_SUBTLE_KEY_COMPARISON=false
# Disable API key requirement for HTTP GET requests
# LOCALAI_DISABLE_API_KEY_REQUIREMENT_FOR_HTTP_GET=false
# Disable the /metrics endpoint
# LOCALAI_DISABLE_METRICS_ENDPOINT=false
# List of endpoints exempt from API key requirement for GET requests
# LOCALAI_HTTP_GET_EXEMPTED_ENDPOINTS="^/$,^/browse/?$,^/talk/?$,^/p2p/?$,^/chat/?$,^/text2image/?$,^/tts/?$,^/static/.*$,^/swagger.*$"
# Enable P2P mode
# LOCALAI_P2P=false
# Interval for DHT refresh during token generation
# LOCALAI_P2P_DHT_INTERVAL=360
# Interval for OTP refresh during token generation
# LOCALAI_P2P_OTP_INTERVAL=9000
# Token for P2P mode (optional)
# LOCALAI_P2P_TOKEN=""
# Network ID for P2P mode
# LOCALAI_P2P_NETWORK_ID=""
# Enable backends to handle multiple requests in parallel
# LOCALAI_PARALLEL_REQUESTS=true
# Allow only one backend to be run at a time
# LOCALAI_SINGLE_ACTIVE_BACKEND=false
# Do not launch API services, only preloaded models/backends are started
# LOCALAI_PRELOAD_BACKEND_ONLY=false
# List of external gRPC backends
# LOCALAI_EXTERNAL_GRPC_BACKENDS="[]"
# Enable watchdog for stopping idle backends
# LOCALAI_WATCHDOG_IDLE=false
# Threshold for stopping idle backends
# LOCALAI_WATCHDOG_IDLE_TIMEOUT="15m"
# Enable watchdog for stopping busy backends
# LOCALAI_WATCHDOG_BUSY=false
# Threshold for stopping busy backends
# LOCALAI_WATCHDOG_BUSY_TIMEOUT="5m"
# Enable federated instance
# LOCALAI_FEDERATED=false
# Disable the gallery endpoints
# LOCALAI_DISABLE_GALLERY_ENDPOINT=false
# List of models to load into memory at startup
# LOCALAI_LOAD_TO_MEMORY="[]"
# Federated Configuration
# FEDERATED=false