Name | minerva-7bI-q6-llama-inference_176166_1739055910.676393_5299_1 |
Workunit | 51670 |
Created | 16 Feb 2025, 15:36:47 UTC |
Sent | 16 Feb 2025, 15:36:57 UTC |
Report deadline | 23 Feb 2025, 15:36:57 UTC |
Received | 17 Feb 2025, 19:17:45 UTC |
Server state | Over |
Outcome | Success |
Client state | Done |
Exit status | 0 (0x00000000) |
Computer ID | 89 |
Run time | 9 min 40 sec |
CPU time | 37 min 41 sec |
Validate state | Valid |
Credit | 66.61 |
Device peak FLOPS | 75.58 GFLOPS |
Application version | Minerva 7B Instruct Q6 inference via LLama.cpp v4.00 (mt) x86_64-pc-linux-gnu |
Peak working set size | 5.72 GB |
Peak swap size | 5.80 GB |
Peak disk usage | 31.14 KB |
<core_client_version>7.24.1</core_client_version> <![CDATA[ <stderr_txt> llama_model_loader: loaded meta data with 51 key-value pairs and 291 tensors from ../../projects/boinc.llmentor.org_LLMentorGrid/minerva-7b-instruct-v1.0-q6_k.gguf (version GGUF V3 (latest)) llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output. llama_model_loader: - kv 0: general.architecture str = llama llama_model_loader: - kv 1: general.type str = model llama_model_loader: - kv 2: general.name str = Minerva 7b Inst llama_model_loader: - kv 3: general.finetune str = inst llama_model_loader: - kv 4: general.basename str = minerva llama_model_loader: - kv 5: general.size_label str = 7B llama_model_loader: - kv 6: general.license str = apache-2.0 llama_model_loader: - kv 7: general.base_model.count u32 = 1 llama_model_loader: - kv 8: general.base_model.0.name str = Minerva 7B Base v1.0 llama_model_loader: - kv 9: general.base_model.0.version str = v1.0 llama_model_loader: - kv 10: general.base_model.0.organization str = Sapienzanlp llama_model_loader: - kv 11: general.base_model.0.repo_url str = https://huggingface.co/sapienzanlp/Mi... llama_model_loader: - kv 12: general.dataset.count u32 = 3 llama_model_loader: - kv 13: general.dataset.0.name str = Ultrafeedback_Binarized llama_model_loader: - kv 14: general.dataset.0.organization str = HuggingFaceH4 llama_model_loader: - kv 15: general.dataset.0.repo_url str = https://huggingface.co/HuggingFaceH4/... llama_model_loader: - kv 16: general.dataset.1.name str = ALERT llama_model_loader: - kv 17: general.dataset.1.organization str = Babelscape llama_model_loader: - kv 18: general.dataset.1.repo_url str = https://huggingface.co/Babelscape/ALERT llama_model_loader: - kv 19: general.dataset.2.name str = Evol Dpo Ita llama_model_loader: - kv 20: general.dataset.2.organization str = Efederici llama_model_loader: - kv 21: general.dataset.2.repo_url str = https://huggingface.co/efederici/evol... llama_model_loader: - kv 22: general.tags arr[str,3] = ["sft", "dpo", "text-generation"] llama_model_loader: - kv 23: general.languages arr[str,2] = ["it", "en"] llama_model_loader: - kv 24: llama.block_count u32 = 32 llama_model_loader: - kv 25: llama.context_length u32 = 4096 llama_model_loader: - kv 26: llama.embedding_length u32 = 4096 llama_model_loader: - kv 27: llama.feed_forward_length u32 = 14336 llama_model_loader: - kv 28: llama.attention.head_count u32 = 32 llama_model_loader: - kv 29: llama.attention.head_count_kv u32 = 8 llama_model_loader: - kv 30: llama.rope.freq_base f32 = 10000.000000 llama_model_loader: - kv 31: llama.attention.layer_norm_rms_epsilon f32 = 0.000010 llama_model_loader: - kv 32: llama.attention.key_length u32 = 128 llama_model_loader: - kv 33: llama.attention.value_length u32 = 128 llama_model_loader: - kv 34: general.file_type u32 = 18 llama_model_loader: - kv 35: llama.vocab_size u32 = 51264 llama_model_loader: - kv 36: llama.rope.dimension_count u32 = 128 llama_model_loader: - kv 37: tokenizer.ggml.model str = gpt2 llama_model_loader: - kv 38: tokenizer.ggml.pre str = minerva-7b llama_model_loader: - kv 39: tokenizer.ggml.tokens arr[str,51264] = ["<s>", "</s>", "<unk>", "!", "\"", "... llama_model_loader: - kv 40: tokenizer.ggml.token_type arr[i32,51264] = [3, 3, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, ... llama_model_loader: - kv 41: tokenizer.ggml.merges arr[str,50955] = ["Ġ a", "i n", "Ġ Ġ", "e r", "o n"... llama_model_loader: - kv 42: tokenizer.ggml.bos_token_id u32 = 0 llama_model_loader: - kv 43: tokenizer.ggml.eos_token_id u32 = 51202 llama_model_loader: - kv 44: tokenizer.ggml.unknown_token_id u32 = 2 llama_model_loader: - kv 45: tokenizer.ggml.padding_token_id u32 = 51202 llama_model_loader: - kv 46: tokenizer.ggml.add_bos_token bool = true llama_model_loader: - kv 47: tokenizer.ggml.add_eos_token bool = false llama_model_loader: - kv 48: tokenizer.chat_template str = {% set loop_messages = messages %}{% ... llama_model_loader: - kv 49: tokenizer.ggml.add_space_prefix bool = false llama_model_loader: - kv 50: general.quantization_version u32 = 2 llama_model_loader: - type f32: 65 tensors llama_model_loader: - type q6_K: 226 tensors llm_load_vocab: control token: 51201 '<|end_header_id|>' is not marked as EOG llm_load_vocab: control token: 51200 '<|start_header_id|>' is not marked as EOG llm_load_vocab: control token: 0 '<s>' is not marked as EOG llm_load_vocab: control token: 1 '</s>' is not marked as EOG llm_load_vocab: control token: 2 '<unk>' is not marked as EOG llm_load_vocab: special tokens cache size = 6 llm_load_vocab: token to piece cache size = 0.3264 MB llm_load_print_meta: format = GGUF V3 (latest) llm_load_print_meta: arch = llama llm_load_print_meta: vocab type = BPE llm_load_print_meta: n_vocab = 51264 llm_load_print_meta: n_merges = 50955 llm_load_print_meta: vocab_only = 0 llm_load_print_meta: n_ctx_train = 4096 llm_load_print_meta: n_embd = 4096 llm_load_print_meta: n_layer = 32 llm_load_print_meta: n_head = 32 llm_load_print_meta: n_head_kv = 8 llm_load_print_meta: n_rot = 128 llm_load_print_meta: n_swa = 0 llm_load_print_meta: n_embd_head_k = 128 llm_load_print_meta: n_embd_head_v = 128 llm_load_print_meta: n_gqa = 4 llm_load_print_meta: n_embd_k_gqa = 1024 llm_load_print_meta: n_embd_v_gqa = 1024 llm_load_print_meta: f_norm_eps = 0.0e+00 llm_load_print_meta: f_norm_rms_eps = 1.0e-05 llm_load_print_meta: f_clamp_kqv = 0.0e+00 llm_load_print_meta: f_max_alibi_bias = 0.0e+00 llm_load_print_meta: f_logit_scale = 0.0e+00 llm_load_print_meta: n_ff = 14336 llm_load_print_meta: n_expert = 0 llm_load_print_meta: n_expert_used = 0 llm_load_print_meta: causal attn = 1 llm_load_print_meta: pooling type = 0 llm_load_print_meta: rope type = 0 llm_load_print_meta: rope scaling = linear llm_load_print_meta: freq_base_train = 10000.0 llm_load_print_meta: freq_scale_train = 1 llm_load_print_meta: n_ctx_orig_yarn = 4096 llm_load_print_meta: rope_finetuned = unknown llm_load_print_meta: ssm_d_conv = 0 llm_load_print_meta: ssm_d_inner = 0 llm_load_print_meta: ssm_d_state = 0 llm_load_print_meta: ssm_dt_rank = 0 llm_load_print_meta: ssm_dt_b_c_rms = 0 llm_load_print_meta: model type = 8B llm_load_print_meta: model ftype = Q6_K llm_load_print_meta: model params = 7.40 B llm_load_print_meta: model size = 5.65 GiB (6.56 BPW) llm_load_print_meta: general.name = Minerva 7b Inst llm_load_print_meta: BOS token = 0 '<s>' llm_load_print_meta: EOS token = 51202 '<|eot_id|>' llm_load_print_meta: EOT token = 51202 '<|eot_id|>' llm_load_print_meta: UNK token = 2 '<unk>' llm_load_print_meta: PAD token = 51202 '<|eot_id|>' llm_load_print_meta: LF token = 129 'Ä' llm_load_print_meta: EOG token = 51202 '<|eot_id|>' llm_load_print_meta: max token length = 128 llm_load_tensors: tensor 'token_embd.weight' (q6_K) (and 290 others) cannot be used with preferred buffer type CPU_AARCH64, using CPU instead llm_load_tensors: CPU_Mapped model buffer size = 5789.55 MiB ................................................................................................. llama_new_context_with_model: n_seq_max = 1 llama_new_context_with_model: n_ctx = 96 llama_new_context_with_model: n_ctx_per_seq = 96 llama_new_context_with_model: n_batch = 33 llama_new_context_with_model: n_ubatch = 33 llama_new_context_with_model: flash_attn = 0 llama_new_context_with_model: freq_base = 10000.0 llama_new_context_with_model: freq_scale = 1 llama_new_context_with_model: n_ctx_per_seq (96) < n_ctx_train (4096) -- the full capacity of the model will not be utilized llama_kv_cache_init: kv_size = 96, offload = 1, type_k = 'f16', type_v = 'f16', n_layer = 32 llama_kv_cache_init: layer 0: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 1: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 2: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 3: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 4: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 5: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 6: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 7: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 8: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 9: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 10: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 11: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 12: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 13: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 14: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 15: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 16: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 17: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 18: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 19: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 20: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 21: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 22: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 23: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 24: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 25: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 26: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 27: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 28: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 29: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 30: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 31: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: CPU KV buffer size = 12.00 MiB llama_new_context_with_model: KV self size = 12.00 MiB, K (f16): 6.00 MiB, V (f16): 6.00 MiB llama_new_context_with_model: CPU output buffer size = 0.20 MiB llama_new_context_with_model: CPU compute buffer size = 6.97 MiB llama_new_context_with_model: graph nodes = 1030 llama_new_context_with_model: graph splits = 1 llama_new_context_with_model: n_seq_max = 1 llama_new_context_with_model: n_ctx = 96 llama_new_context_with_model: n_ctx_per_seq = 96 llama_new_context_with_model: n_batch = 37 llama_new_context_with_model: n_ubatch = 37 llama_new_context_with_model: flash_attn = 0 llama_new_context_with_model: freq_base = 10000.0 llama_new_context_with_model: freq_scale = 1 llama_new_context_with_model: n_ctx_per_seq (96) < n_ctx_train (4096) -- the full capacity of the model will not be utilized llama_kv_cache_init: kv_size = 96, offload = 1, type_k = 'f16', type_v = 'f16', n_layer = 32 llama_kv_cache_init: layer 0: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 1: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 2: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 3: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 4: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 5: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 6: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 7: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 8: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 9: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 10: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 11: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 12: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 13: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 14: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 15: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 16: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 17: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 18: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 19: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 20: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 21: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 22: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 23: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 24: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 25: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 26: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 27: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 28: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 29: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 30: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 31: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: CPU KV buffer size = 12.00 MiB llama_new_context_with_model: KV self size = 12.00 MiB, K (f16): 6.00 MiB, V (f16): 6.00 MiB llama_new_context_with_model: CPU output buffer size = 0.20 MiB llama_new_context_with_model: CPU compute buffer size = 7.81 MiB llama_new_context_with_model: graph nodes = 1030 llama_new_context_with_model: graph splits = 1 llama_new_context_with_model: n_seq_max = 1 llama_new_context_with_model: n_ctx = 96 llama_new_context_with_model: n_ctx_per_seq = 96 llama_new_context_with_model: n_batch = 58 llama_new_context_with_model: n_ubatch = 58 llama_new_context_with_model: flash_attn = 0 llama_new_context_with_model: freq_base = 10000.0 llama_new_context_with_model: freq_scale = 1 llama_new_context_with_model: n_ctx_per_seq (96) < n_ctx_train (4096) -- the full capacity of the model will not be utilized llama_kv_cache_init: kv_size = 96, offload = 1, type_k = 'f16', type_v = 'f16', n_layer = 32 llama_kv_cache_init: layer 0: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 1: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 2: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 3: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 4: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 5: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 6: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 7: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 8: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 9: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 10: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 11: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 12: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 13: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 14: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 15: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 16: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 17: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 18: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 19: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 20: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 21: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 22: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 23: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 24: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 25: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 26: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 27: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 28: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 29: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 30: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 31: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: CPU KV buffer size = 12.00 MiB llama_new_context_with_model: KV self size = 12.00 MiB, K (f16): 6.00 MiB, V (f16): 6.00 MiB llama_new_context_with_model: CPU output buffer size = 0.20 MiB llama_new_context_with_model: CPU compute buffer size = 12.25 MiB llama_new_context_with_model: graph nodes = 1030 llama_new_context_with_model: graph splits = 1 llama_new_context_with_model: n_seq_max = 1 llama_new_context_with_model: n_ctx = 96 llama_new_context_with_model: n_ctx_per_seq = 96 llama_new_context_with_model: n_batch = 50 llama_new_context_with_model: n_ubatch = 50 llama_new_context_with_model: flash_attn = 0 llama_new_context_with_model: freq_base = 10000.0 llama_new_context_with_model: freq_scale = 1 llama_new_context_with_model: n_ctx_per_seq (96) < n_ctx_train (4096) -- the full capacity of the model will not be utilized llama_kv_cache_init: kv_size = 96, offload = 1, type_k = 'f16', type_v = 'f16', n_layer = 32 llama_kv_cache_init: layer 0: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 1: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 2: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 3: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 4: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 5: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 6: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 7: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 8: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 9: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 10: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 11: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 12: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 13: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 14: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 15: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 16: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 17: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 18: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 19: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 20: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 21: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 22: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 23: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 24: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 25: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 26: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 27: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 28: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 29: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 30: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 31: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: CPU KV buffer size = 12.00 MiB llama_new_context_with_model: KV self size = 12.00 MiB, K (f16): 6.00 MiB, V (f16): 6.00 MiB llama_new_context_with_model: CPU output buffer size = 0.20 MiB llama_new_context_with_model: CPU compute buffer size = 10.56 MiB llama_new_context_with_model: graph nodes = 1030 llama_new_context_with_model: graph splits = 1 llama_new_context_with_model: n_seq_max = 1 llama_new_context_with_model: n_ctx = 128 llama_new_context_with_model: n_ctx_per_seq = 128 llama_new_context_with_model: n_batch = 78 llama_new_context_with_model: n_ubatch = 78 llama_new_context_with_model: flash_attn = 0 llama_new_context_with_model: freq_base = 10000.0 llama_new_context_with_model: freq_scale = 1 llama_new_context_with_model: n_ctx_per_seq (128) < n_ctx_train (4096) -- the full capacity of the model will not be utilized llama_kv_cache_init: kv_size = 128, offload = 1, type_k = 'f16', type_v = 'f16', n_layer = 32 llama_kv_cache_init: layer 0: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 1: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 2: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 3: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 4: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 5: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 6: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 7: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 8: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 9: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 10: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 11: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 12: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 13: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 14: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 15: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 16: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 17: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 18: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 19: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 20: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 21: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 22: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 23: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 24: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 25: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 26: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 27: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 28: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 29: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 30: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: layer 31: n_embd_k_gqa = 1024, n_embd_v_gqa = 1024 llama_kv_cache_init: CPU KV buffer size = 16.00 MiB llama_new_context_with_model: KV self size = 16.00 MiB, K (f16): 8.00 MiB, V (f16): 8.00 MiB llama_new_context_with_model: CPU output buffer size = 0.20 MiB llama_new_context_with_model: CPU compute buffer size = 16.47 MiB llama_new_context_with_model: graph nodes = 1030 llama_new_context_with_model: graph splits = 1 Processed 5 items Tokens per second: 1.30 2025-02-18 03:17:34 (72834): called boinc_finish(0) </stderr_txt> ]]>
©2025 Matteo Rinaldi