diff --git a/env.example b/env.example index aab943cc..30faaeac 100644 --- a/env.example +++ b/env.example @@ -77,14 +77,17 @@ MAX_ASYNC=4 ### MAX_TOKENS: max tokens send to LLM for entity relation summaries (less than context size of the model) ### MAX_TOKENS: set as num_ctx option for Ollama by API Server MAX_TOKENS=32768 -### LLM Binding type: openai, ollama, lollms +### LLM Binding type: openai, ollama, lollms, azure_openai LLM_BINDING=openai LLM_MODEL=gpt-4o LLM_BINDING_HOST=https://api.openai.com/v1 LLM_BINDING_API_KEY=your_api_key +### Optional for Azure +# AZURE_OPENAI_API_VERSION=2024-08-01-preview +# AZURE_OPENAI_DEPLOYMENT=gpt-4o ### Embedding Configuration -### Embedding Binding type: openai, ollama, lollms +### Embedding Binding type: openai, ollama, lollms, azure_openai EMBEDDING_BINDING=ollama EMBEDDING_MODEL=bge-m3:latest EMBEDDING_DIM=1024 @@ -97,6 +100,9 @@ EMBEDDING_BINDING_HOST=http://localhost:11434 # EMBEDDING_FUNC_MAX_ASYNC=16 ### Maximum tokens sent to Embedding for each chunk (no longer in use?) # MAX_EMBED_TOKENS=8192 +### Optional for Azure +# AZURE_EMBEDDING_DEPLOYMENT=text-embedding-3-large +# AZURE_EMBEDDING_API_VERSION=2023-05-15 ### Data storage selection # LIGHTRAG_KV_STORAGE=PGKVStorage