Fix INIT_YAML embeddings default settings (#1039)

Co-authored-by: Thanh Long Phan <long.phan@dida.do>
Co-authored-by: Alonso Guevara <alonsog@microsoft.com>
This commit is contained in:
TLP 2024-08-28 22:18:59 +02:00 committed by GitHub
parent 22df2f80d0
commit 1b51827c66
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 6 additions and 2 deletions

View File

@ -0,0 +1,4 @@
{
"type": "patch",
"description": "Fix default settings for embedding"
}

View File

@ -38,6 +38,8 @@ embeddings:
## parallelization: override the global parallelization settings for embeddings
async_mode: {defs.ASYNC_MODE.value} # or asyncio
# target: {defs.EMBEDDING_TARGET.value} # or all
# batch_size: {defs.EMBEDDING_BATCH_SIZE} # the number of documents to send in a single request
# batch_max_tokens: {defs.EMBEDDING_BATCH_MAX_TOKENS} # the maximum number of tokens to send in a single request
llm:
api_key: ${{GRAPHRAG_API_KEY}}
type: {defs.EMBEDDING_TYPE.value} # or azure_openai_embedding
@ -52,8 +54,6 @@ embeddings:
# max_retry_wait: {defs.LLM_MAX_RETRY_WAIT}
# sleep_on_rate_limit_recommendation: true # whether to sleep when azure suggests wait-times
# concurrent_requests: {defs.LLM_CONCURRENT_REQUESTS} # the number of parallel inflight requests that may be made
# batch_size: {defs.EMBEDDING_BATCH_SIZE} # the number of documents to send in a single request
# batch_max_tokens: {defs.EMBEDDING_BATCH_MAX_TOKENS} # the maximum number of tokens to send in a single request