AdventOfCode/2020/zig/src/day01.zig

49 lines
1.1 KiB
Zig
Raw Normal View History

2022-08-11 07:34:17 +00:00
const std = @import("std");
2022-08-11 09:45:13 +00:00
const tokenize = std.mem.tokenize;
const parseInt = std.fmt.parseInt;
const print = std.debug.print;
2022-08-11 07:34:17 +00:00
const data = @embedFile("../data/day01.txt");
pub fn main() !void {
2022-08-11 09:32:17 +00:00
var it = tokenize(u8, data, "\n");
2022-08-11 07:34:17 +00:00
2022-08-11 09:32:17 +00:00
// Not sure if allocating a massive buffer and tracking the size is the right approach?
var buffer: [1000]u16 = undefined;
var i: usize = 0;
while (it.next()) |line| {
const int = try parseInt(u16, line, 10);
buffer[i] = int;
// Can we use continue expression?
i = i + 1;
}
2022-08-11 09:55:26 +00:00
print("Part 1: {d}\n", .{part1(buffer[0..i])});
print("Part 2: {d}\n", .{part2(buffer[0..i])});
2022-08-11 09:41:00 +00:00
}
2022-08-11 09:45:13 +00:00
2022-08-11 09:55:26 +00:00
fn part1(input: []u16) u32 {
2022-08-11 09:41:00 +00:00
for (input) |a, i| {
for (input[(i+1)..]) |b| {
2022-08-11 09:32:17 +00:00
if (a + b == 2020) {
2022-08-11 09:55:26 +00:00
return @as(u32, a) * b;
2022-08-11 09:32:17 +00:00
}
}
}
2022-08-11 09:55:26 +00:00
return 0;
2022-08-11 07:34:17 +00:00
}
2022-08-11 09:55:26 +00:00
fn part2(input: []u16) u32 {
2022-08-11 09:45:13 +00:00
for (input) |a, i| {
for (input[(i+1)..]) |b, j| {
for (input[(j+1)..]) |c| {
if (a + b + c == 2020) {
2022-08-11 09:55:26 +00:00
return @as(u32, a) * b * c;
2022-08-11 09:45:13 +00:00
}
}
}
}
2022-08-11 09:55:26 +00:00
return 0;
2022-08-11 09:45:13 +00:00
}