- Replace Docker/LXC-based approach with direct binary download - Download LocalAI v2.25.0 binary from GitHub releases - Add localaictl CLI for install, model management, and service control - Change default port to 8081 (avoid CrowdSec conflict on 8080) - Remove secubox-app-localai-wb (merged into secubox-app-localai) - Add model presets: tinyllama, phi2, mistral Usage: localaictl install localaictl model-install tinyllama /etc/init.d/localai enable && /etc/init.d/localai start Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
31 lines
1008 B
Plaintext
31 lines
1008 B
Plaintext
config main 'main'
|
|
option enabled '0'
|
|
option installed '0'
|
|
option api_port '8081'
|
|
option api_host '0.0.0.0'
|
|
option data_path '/srv/localai'
|
|
option models_path '/srv/localai/models'
|
|
option threads '4'
|
|
option context_size '2048'
|
|
option debug '0'
|
|
option cors '1'
|
|
|
|
# Model presets
|
|
config preset 'tinyllama'
|
|
option name 'tinyllama'
|
|
option url 'https://huggingface.co/TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF/resolve/main/tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf'
|
|
option size '669M'
|
|
option description 'TinyLlama 1.1B - Ultra-lightweight'
|
|
|
|
config preset 'phi2'
|
|
option name 'phi-2'
|
|
option url 'https://huggingface.co/TheBloke/phi-2-GGUF/resolve/main/phi-2.Q4_K_M.gguf'
|
|
option size '1.6G'
|
|
option description 'Microsoft Phi-2 - Compact and efficient'
|
|
|
|
config preset 'mistral'
|
|
option name 'mistral-7b'
|
|
option url 'https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.2-GGUF/resolve/main/mistral-7b-instruct-v0.2.Q4_K_M.gguf'
|
|
option size '4.1G'
|
|
option description 'Mistral 7B Instruct - High quality'
|