Skip to content

Commit 4f9c43e

Browse files
committed
minor : warning fixes
1 parent 2c9380d commit 4f9c43e

File tree

2 files changed

+12
-7
lines changed

2 files changed

+12
-7
lines changed

examples/main/main.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -354,7 +354,7 @@ int main(int argc, char ** argv) {
354354
if ((int)embd.size() > max_embd_size) {
355355
auto skipped_tokens = embd.size() - max_embd_size;
356356
console_set_color(con_st, CONSOLE_COLOR_ERROR);
357-
printf("<<input too long: skipped %" PRIu64 " token%s>>", skipped_tokens, skipped_tokens != 1 ? "s" : "");
357+
printf("<<input too long: skipped %zu token%s>>", skipped_tokens, skipped_tokens != 1 ? "s" : "");
358358
console_set_color(con_st, CONSOLE_COLOR_DEFAULT);
359359
fflush(stdout);
360360
embd.resize(max_embd_size);

ggml-metal.m

Lines changed: 11 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -256,10 +256,10 @@ bool ggml_metal_add_buffer(
256256
if (ctx->buffers[ctx->n_buffers].metal == nil) {
257257
fprintf(stderr, "%s: failed to allocate '%-16s' buffer, size = %8.2f MB\n", __func__, name, aligned_size / 1024.0 / 1024.0);
258258
return false;
259-
} else {
260-
fprintf(stderr, "%s: allocated '%-16s' buffer, size = %8.2f MB\n", __func__, name, aligned_size / 1024.0 / 1024.0);
261259
}
262260

261+
fprintf(stderr, "%s: allocated '%-16s' buffer, size = %8.2f MB\n", __func__, name, aligned_size / 1024.0 / 1024.0);
262+
263263
++ctx->n_buffers;
264264
}
265265

@@ -765,18 +765,23 @@ void ggml_metal_graph_compute(
765765
} break;
766766
case GGML_OP_ALIBI:
767767
{
768+
if (encoder == nil) {
769+
encoder = [command_buffer computeCommandEncoder];
770+
}
771+
768772
GGML_ASSERT((src0t == GGML_TYPE_F32));
769-
const int n_past = ((int32_t *) src1->data)[0];
773+
774+
const int n_past = ((int32_t *) src1->data)[0]; UNUSED(n_past);
770775
const int n_head = ((int32_t *) src1->data)[1];
771776
const float max_bias = ((float *) src1->data)[2];
777+
772778
if (__builtin_popcount(n_head) != 1) {
773779
GGML_ASSERT(false && "only power-of-two n_head implemented");
774780
}
781+
775782
const int n_heads_log2_floor = 1 << (int) floor(log2(n_head));
776783
const float m0 = powf(2.0f, -(max_bias) / n_heads_log2_floor);
777-
if (encoder == nil) {
778-
encoder = [command_buffer computeCommandEncoder];
779-
}
784+
780785
[encoder setComputePipelineState:ctx->pipeline_alibi_f32];
781786
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
782787
[encoder setBuffer:id_dst offset:offs_dst atIndex:1];

0 commit comments

Comments
 (0)