Print tensor dimensions

This commit is contained in:
Juarez Bochi
2024-01-03 17:38:37 -05:00
parent b1f32c4088
commit a7e99574e2
2 changed files with 15 additions and 11 deletions

View File

@@ -37,14 +37,14 @@ tokenizer.ggml.tokens: [array] [!, ", #, $, %, &, ', (, ), *, +, ,, -, ., /, 0,
... many more key-value pairs ... ... many more key-value pairs ...
q8_0 tensor token_embd.weight @1806176, 131072000 weights, 139264000 bytes q8_0 tensor token_embd.weight @1806176, 131072000 weights, dims [2560,51200], 139264000 bytes
f32 tensor blk.0.attn_norm.bias @141070176, 2560 weights, 10240 bytes f32 tensor blk.0.attn_norm.bias @141070176, 2560 weights, dims [2560], 10240 bytes
f32 tensor blk.0.attn_norm.weight @141080416, 2560 weights, 10240 bytes f32 tensor blk.0.attn_norm.weight @141080416, 2560 weights, dims [2560], 10240 bytes
f32 tensor blk.0.attn_qkv.bias @141090656, 7680 weights, 30720 bytes f32 tensor blk.0.attn_qkv.bias @141090656, 7680 weights, dims [7680], 30720 bytes
q8_0 tensor blk.0.attn_qkv.weight @141121376, 19660800 weights, 20889600 bytes q8_0 tensor blk.0.attn_qkv.weight @141121376, 19660800 weights, dims [2560,7680], 20889600 bytes
f32 tensor blk.0.attn_output.bias @162010976, 2560 weights, 10240 bytes f32 tensor blk.0.attn_output.bias @162010976, 2560 weights, dims [2560], 10240 bytes
q8_0 tensor blk.0.attn_output.weight @162021216, 6553600 weights, 6963200 bytes q8_0 tensor blk.0.attn_output.weight @162021216, 6553600 weights, dims [2560,2560], 6963200 bytes
f32 tensor blk.0.ffn_up.bias @168984416, 10240 weights, 40960 bytes f32 tensor blk.0.ffn_up.bias @168984416, 10240 weights, dims [10240], 40960 bytes
... many more tensors ... ... many more tensors ...
``` ```

View File

@@ -166,13 +166,17 @@ void gguf_tools_show(const char *filename) {
gguf_tensor tensor; gguf_tensor tensor;
uint64_t params = 0; uint64_t params = 0;
while (gguf_get_tensor(ctx,&tensor)) { while (gguf_get_tensor(ctx,&tensor)) {
printf("%s tensor %.*s @%llu, %llu weights, %llu bytes\n", printf("%s tensor %.*s @%llu, %llu weights, dims ",
gguf_get_tensor_type_name(tensor.type), gguf_get_tensor_type_name(tensor.type),
(int)tensor.namelen, (int)tensor.namelen,
tensor.name, tensor.name,
tensor.offset, tensor.offset,
tensor.num_weights, tensor.num_weights);
tensor.bsize); for (uint32_t j = 0; j < tensor.ndim; j++) {
printf("%s%llu",(j == 0) ? "[" : ",", tensor.dim[j]);
}
printf("], %llu bytes\n", tensor.bsize);
params += tensor.num_weights; params += tensor.num_weights;
} }
printf("gguf-tools.info.parameters: %.02fB\n", printf("gguf-tools.info.parameters: %.02fB\n",