mirror of
https://github.com/NVIDIA/TensorRT-LLM.git
synced 2026-02-12 14:03:48 +08:00
72 lines
1.7 KiB
YAML
72 lines
1.7 KiB
YAML
# Hardware Config
|
|
hardware:
|
|
gpus_per_node: 4
|
|
gpus_per_server: 8
|
|
|
|
server_configs:
|
|
- name: "r1_fp4_v2_dep8_mtp1"
|
|
model_name: "deepseek_r1_0528_fp4_v2"
|
|
gpus: 8
|
|
gpus_per_node: 4
|
|
trust_remote_code: true
|
|
tensor_parallel_size: 8
|
|
moe_expert_parallel_size: 8
|
|
pipeline_parallel_size: 1
|
|
max_batch_size: 512
|
|
max_num_tokens: 3136
|
|
attn_backend: "TRTLLM"
|
|
enable_attention_dp: true
|
|
attention_dp_config:
|
|
batching_wait_iters: 0
|
|
enable_balance: true
|
|
timeout_iters: 60
|
|
moe_config:
|
|
backend: 'CUTLASS'
|
|
cuda_graph_config:
|
|
enable_padding: true
|
|
max_batch_size: 512
|
|
kv_cache_config:
|
|
dtype: 'fp8'
|
|
enable_block_reuse: false
|
|
free_gpu_memory_fraction: 0.5
|
|
client_configs:
|
|
- name: "con32_iter12_1k1k"
|
|
concurrency: 32
|
|
iterations: 12
|
|
isl: 1024
|
|
osl: 1024
|
|
random_range_ratio: 0.2
|
|
backend: "openai"
|
|
|
|
- name: "r1_fp4_v2_tep8_mtp3"
|
|
model_name: "deepseek_r1_0528_fp4_v2"
|
|
gpus: 8
|
|
gpus_per_node: 4
|
|
trust_remote_code: true
|
|
tensor_parallel_size: 8
|
|
moe_expert_parallel_size: 8
|
|
pipeline_parallel_size: 1
|
|
max_batch_size: 512
|
|
max_num_tokens: 3136
|
|
attn_backend: "TRTLLM"
|
|
enable_attention_dp: false
|
|
moe_config:
|
|
backend: "TRTLLM"
|
|
cuda_graph_config:
|
|
enable_padding: true
|
|
max_batch_size: 512
|
|
kv_cache_config:
|
|
dtype: 'fp8'
|
|
free_gpu_memory_fraction: 0.5
|
|
speculative_config:
|
|
decoding_type: 'MTP'
|
|
num_nextn_predict_layers: 3
|
|
client_configs:
|
|
- name: "con32_iter12_1k1k"
|
|
concurrency: 32
|
|
iterations: 12
|
|
isl: 1024
|
|
osl: 1024
|
|
random_range_ratio: 0.2
|
|
backend: "openai"
|