Day 18: Ram Run
Megathread guidelines
- Keep top level comments as only solutions, if you want to say something other than a solution put it in a new post. (replies to comments can be whatever)
- You can send code in code blocks by using three backticks, the code, and then three backticks or use something such as https://topaz.github.io/paste/ if you prefer sending it through a URL
FAQ
- What is this?: Here is a post with a large amount of details: https://programming.dev/post/6637268
- Where do I participate?: https://adventofcode.com/
- Is there a leaderboard for the community?: We have a programming.dev leaderboard with the info on how to join in this post: https://programming.dev/post/6631465
C#
I did flood fill because i normally just do Dijkstra for this kind of stuff. watching the map print as it flooded was cool, had to disable it for part two though as it was too slow. Just let it run while I made a cup of tea instead of doing a binary search.
spoiler
namespace AoC2024.Day_18;
public class Day18 {
public const string CLEAR = "."; public const string BLOCKED = "#"; public const string TRAVELED = "O"; public void Go() { var testGrid = GenerateGrid(71, 71); PrintGrid(testGrid); var coords = GetInputCoordinates(File.ReadAllText("\\AdventOfCode2024\\AoC\\src\\Day_18\\input.txt")); testGrid = ApplyCoords(testGrid, coords.Take(1024).ToList(), BLOCKED); PrintGrid(testGrid); FloodFillGrid(testGrid, new Coordinate(0,0), new (70,70)); } public void GoPart2() { var testGrid = GenerateGrid(71, 71); PrintGrid(testGrid); var coords = GetInputCoordinates(File.ReadAllText("\\AdventOfCode2024\\AoC\\src\\Day_18\\input.txt")); for (int i = 1; i <= coords.Count; i++) { testGrid = ApplyCoords(testGrid, coords.Take(i).ToList(), BLOCKED); PrintGrid(testGrid); var result = FloodFillGrid(testGrid, new Coordinate(0,0), new (70,70)); if (result.Item2 == int.MaxValue) { var badCoord = coords[i - 1]; Console.WriteLine($"!!!!Coord Number: {i} with a value of ({badCoord.Column},{badCoord.Row}) IS A BLOCKER!!!!"); break; } else if (i%100 == 0) { var goodCoord = coords[i - 1]; Console.WriteLine($"Coord Number: {i} with a value of ({goodCoord.Column},{goodCoord.Row}) allows an exit in {result.Item2} steps"); } } } public List<List<string>> GenerateGrid(int width, int height) { var grid = new List<List<string>>(); for (int i = 0; i < height; i++) { var row = new List<string>(); for (int j = 0; j < width; j++) { row.Add(CLEAR); } grid.Add(row); } return grid; } public void PrintGrid(List<List<string>> grid) { // foreach (var row in grid) // { // foreach (var value in row) // { // Console.Write($" {value} "); // } // Console.WriteLine(); // } } public List<List<string>> ApplyCoords(List<List<string>> grid, List<Coordinate> coordinates, string value) { foreach (var coord in coordinates) { grid[coord.Row][coord.Column] = value; } return grid; } public List<Coordinate> GetInputCoordinates(string input) { var coords = new List<Coordinate>(); foreach (var pair in input.Split(Environment.NewLine, StringSplitOptions.RemoveEmptyEntries)) { var values = pair.Split(',', StringSplitOptions.RemoveEmptyEntries); coords.Add(new Coordinate(values[1], values[0])); } return coords; } public (List<List<string>>, int) FloodFillGrid(List<List<string>> grid, Coordinate start, Coordinate target) { var newGrid = grid.Select(list => new List<string>(list)).ToList(); var previousGrid = grid; newGrid[start.Row][start.Column] = TRAVELED; int stepCounter = 0; while (newGrid[target.Row][target.Column] != TRAVELED) { bool valueUpdatedInLoop = false; previousGrid = newGrid; newGrid = newGrid.Select(list => new List<string>(list)).ToList().ToList(); for (var row = 0; row < grid.Count; row++) { for (var column = 0; column < grid[row].Count; column++) { if (previousGrid[row][column] == CLEAR && IsAdjacentEqual(previousGrid, new Coordinate(row,column), TRAVELED)) { newGrid[row][column] = TRAVELED; valueUpdatedInLoop = true; } } } stepCounter++; if (!valueUpdatedInLoop) { return (newGrid,int.MaxValue); } //Console.WriteLine($"Step counter: {stepCounter}"); PrintGrid(newGrid); } return (newGrid,stepCounter); } private bool IsAdjacentEqual(List<List<string>> grid, Coordinate location, string value) { if (location.Row < grid.Count-1 && grid[location.Row+1][location.Column] == value) { return true; } if (location.Column < grid[0].Count-1 && grid[location.Row][location.Column+1] == value) { return true; } if (location.Row > 0 && grid[location.Row-1][location.Column] == value) { return true; } if (location.Column > 0 && grid[location.Row][location.Column-1] == value) { return true; } return false; } public struct Coordinate { public Coordinate(int row, int column) { Row = row; Column = column; } public Coordinate(string row, string column) { Row = int.Parse(row); Column = int.Parse(column); } public int Row { get; set; } public int Column { get; set; } }
}
Haskell
Not really happy with performance, binary search would speed this up a bunch, takes about 1.3 seconds.
Update: Binary search got it to 960 ms.
Code
import Data.Maybe import qualified Data.Set as S type Coord = (Int, Int) parse :: String -> [Coord] parse = map (read . ('(' :) . (++ ")")) . takeWhile (not . null) . lines shortest :: Coord -> [Coord] -> Maybe Int shortest (x0, y0) corrupted' = go $ S.singleton (x0 - 1, y0 - 1) where corrupted = S.fromList corrupted' inside (x, y) | x < 0 = False | y < 0 = False | x0 <= x = False | y0 <= y = False | otherwise = True grow cs = S.filter inside $ S.unions $ cs : [ S.mapMonotonic (\(x, y) -> (x + dx, y + dy)) cs | (dx, dy) <- [(-1, 0), (0, -1), (0, 1), (1, 0)] ] go visited | (0, 0) `S.member` visited = Just 0 | otherwise = case grow visited S.\\ corrupted of visited' | S.size visited == S.size visited' -> Nothing | otherwise -> succ <$> go visited' main :: IO () main = do rs <- parse <$> getContents let size = (71, 71) print $ fromJust $ shortest size $ take 1024 rs putStrLn $ init $ tail $ show $ last $ zipWith const (reverse rs) $ takeWhile (isNothing . shortest size) $ iterate init rs
Faster (binary search)
import Data.Maybe import qualified Data.Set as S type Coord = (Int, Int) parse :: String -> [Coord] parse = map (read . ('(' :) . (++ ")")) . takeWhile (not . null) . lines shortest :: Coord -> [Coord] -> Maybe Int shortest (x0, y0) corrupted' = go $ S.singleton (x0 - 1, y0 - 1) where corrupted = S.fromList corrupted' inside (x, y) | x < 0 = False | y < 0 = False | x0 <= x = False | y0 <= y = False | otherwise = True grow cs = S.filter inside $ S.unions $ cs : [ S.mapMonotonic (\(x, y) -> (x + dx, y + dy)) cs | (dx, dy) <- [(-1, 0), (0, -1), (0, 1), (1, 0)] ] go visited | (0, 0) `S.member` visited = Just 0 | otherwise = case grow visited S.\\ corrupted of visited' | S.size visited == S.size visited' -> Nothing | otherwise -> succ <$> go visited' solve2 :: Coord -> [Coord] -> Coord solve2 r0 corrupted = go 0 $ length corrupted where go a z | succ a == z = corrupted !! a | otherwise = let x = (a + z) `div` 2 in case shortest r0 $ take x corrupted of Nothing -> go a x Just _ -> go x z main :: IO () main = do rs <- parse <$> getContents let size = (71, 71) print $ fromJust $ shortest size $ take 1024 rs putStrLn $ init $ tail $ show $ solve2 size rs
Haskell
solution
import Control.Arrow import Control.Monad import Control.Monad.RWS import Control.Monad.Trans.Maybe import Data.Array (inRange) import Data.Char import Data.Set qualified as S import Text.ParserCombinators.ReadP hiding (get) parse = fst . last . readP_to_S (endBy ((,) <$> num <*> (char ',' *> num)) $ char '\n') where num = read <$> munch1 isDigit bounds = ((0, 0), (70, 70)) bfs :: MaybeT (RWS (S.Set (Int, Int)) () (S.Set (Int, Int), [(Int, (Int, Int))])) Int bfs = do (seen, (c, x) : xs) <- get modify . second $ const xs isCorrupt <- asks (S.member x) when (not (x `S.member` seen) && not isCorrupt && inRange bounds x) $ modify (S.insert x *** (++ ((succ c,) <$> neighbors x))) if x == snd bounds then return c else bfs neighbors (x, y) = [(succ x, y), (pred x, y), (x, succ y), (x, pred y)] findPath = fst . flip (evalRWS (runMaybeT bfs)) (mempty, [(0, (0, 0))]) . S.fromList part1 = findPath . take 1024 search corrupt = go 0 (length corrupt) where go l r = case (findPath $ take (pred m) corrupt, findPath $ take m corrupt) of (Just _, Just _) -> go m r (Just _, Nothing) -> Just $ pred m (Nothing, Nothing) -> go l m where m = (l + r) `div` 2 part2 = liftM2 fmap (!!) search main = getContents >>= print . (part1 &&& part2) . parse
Python
Nobody posted a solution in python for today???
Here is my solver with a little extra to print the Part 2 path. you can totally remove/comment out the printing out of the part 2 path, but it is neat to look at!
Execution time: ~25 milliseconds + an unnecessary ~7 ms to print part 2 path
This is the one where you don’t have it print out the Part 2 path and smaller: [ Paste ]
here is also a faster version that uses binary search instead. but its only a few milliseconds faster.
Execution time: ~21 milliseconds + an unnecessary ~7 ms to print part 2 path
Uiua
I didn’t think I could do this in Uiua this morning, but I gave it some thought while walking the dog and managed to wrangle the data into shape tonight. I factored out the binary chop as that seems like another useful tool to have up my sleeve.
EDIT: goddammit, Kai literally snuck a new RC release out just after I posted this, with a breaking change to how
path
works. Updated version below.Data ← ≡◇(⊜⋕⊸≠@,)°/$"_\n_" "5,4\n4,2\n4,5\n3,0\n2,1\n6,3\n2,4\n1,5\n0,6\n3,3\n2,6\n5,1\n1,2\n5,5\n2,5\n6,5\n1,4\n0,4\n6,4\n1,1\n6,1\n1,0\n0,5\n1,6\n2,0" End ← 6_6 Count ← 12 D₄ ← [1_0 ¯1_0 0_1 0_¯1] Valid ← ▽¬⊸∊:▽⊸(≡/××⊃(≤⊢End|≥0))+D₄¤ BestLen ← ⍣(-1⧻⊢path(Valid|≍End)0_0↙:Data|∞) Chop! ← ◌⍢(⨬(⊙◌+1|⊙⊙◌:):⟜^0⌊÷2+,,|>) &p BestLen Count &p/$"_,_"⊏:Data-1Chop!(=∞BestLen)Count ⧻Data
spoiler
C#
Part 1 was straight forward Dykstra with a cost of 1 for each move. Part 2 was a binary search from the number of corrupted bytes given to us for Part 1 (where we know a path can be found) to the total number of corrupted bytes.
using System.Collections.Immutable; using System.Diagnostics; using Common; namespace Day18; static class Program { static void Main() { var start = Stopwatch.GetTimestamp(); var sampleInput = ReceiveInput("sample.txt"); var sampleBounds = new Point(7,7); var programInput = ReceiveInput("input.txt"); var programBounds = new Point(71, 71); Console.WriteLine($"Part 1 sample: {Part1(sampleInput, 12, sampleBounds)}"); Console.WriteLine($"Part 1 input: {Part1(programInput, 1024, programBounds)}"); Console.WriteLine($"Part 2 sample: {Part2(sampleInput, 12, sampleBounds)}"); Console.WriteLine($"Part 2 input: {Part2(programInput, 1024, programBounds)}"); Console.WriteLine($"That took about {Stopwatch.GetElapsedTime(start)}"); } static int Part1(ImmutableArray<Point> input, int num, Point bounds) => FindBestPath( new Point(0, 0), new Point(bounds.Row - 1, bounds.Col - 1), input.Take(num).ToImmutableHashSet(), bounds); static object Part2(ImmutableArray<Point> input, int num, Point bounds) { var start = num; var end = input.Length; while (start != end) { var check = (start + end) / 2; if (Part1(input, check, bounds) < 0) end = check; else start = check + 1; } var lastPoint = input[start - 1]; return $"{lastPoint.Col},{lastPoint.Row}"; } record struct State(Point Location, int Steps); static int FindBestPath(Point start, Point end, ISet<Point> corruptedBytes, Point bounds) { var seenStates = new Dictionary<Point, int>(); var queue = new Queue<State>(); queue.Enqueue(new State(start, 0)); while (queue.TryDequeue(out var state)) { if (state.Location == end) return state.Steps; if (seenStates.TryGetValue(state.Location, out var bestSteps)) { if (state.Steps >= bestSteps) continue; } seenStates[state.Location] = state.Steps; queue.EnqueueRange(state.Location.GetCardinalMoves() .Where(p => p.IsInBounds(bounds) && !corruptedBytes.Contains(p)) .Select(p => new State(p, state.Steps + 1))); } return -1; } static ImmutableArray<Point> ReceiveInput(string file) => File.ReadAllLines(file) .Select(l => l.Split(',')) .Select(p => new Point(int.Parse(p[1]), int.Parse(p[0]))) .ToImmutableArray(); }
Rust
Naive approach running BFS after every dropped byte after 1024. Still runs in 50ms. This could be much optimized by using binary search to find the first blocked round and using A* instead of BFS, but I didn’t feel like doing more today.
Solution
use std::collections::VecDeque; use euclid::{default::*, vec2}; fn parse(input: &str) -> Vec<Point2D<i32>> { input .lines() .map(|l| { let (x, y) = l.split_once(',').unwrap(); Point2D::new(x.parse().unwrap(), y.parse().unwrap()) }) .collect() } const BOUNDS: Rect<i32> = Rect::new(Point2D::new(0, 0), Size2D::new(71, 71)); const START: Point2D<i32> = Point2D::new(0, 0); const TARGET: Point2D<i32> = Point2D::new(70, 70); const N_BYTES: usize = 1024; const DIRS: [Vector2D<i32>; 4] = [vec2(1, 0), vec2(0, 1), vec2(-1, 0), vec2(0, -1)]; fn adj( field: &[[bool; BOUNDS.size.width as usize]], v: Point2D<i32>, ) -> impl Iterator<Item = Point2D<i32>> + use<'_> { DIRS.iter() .map(move |&d| v + d) .filter(|&next| BOUNDS.contains(next) && !field[next.y as usize][next.x as usize]) } fn find_path(field: &[[bool; BOUNDS.size.width as usize]]) -> Option<u32> { let mut seen = [[false; BOUNDS.size.width as usize]; BOUNDS.size.height as usize]; let mut q = VecDeque::from([(START, 0)]); seen[START.y as usize][START.x as usize] = true; while let Some((v, dist)) = q.pop_front() { for w in adj(field, v) { if w == TARGET { return Some(dist + 1); } if !seen[w.y as usize][w.x as usize] { seen[w.y as usize][w.x as usize] = true; q.push_back((w, dist + 1)); } } } None } fn part1(input: String) { let bytes = parse(&input); let mut field = [[false; BOUNDS.size.width as usize]; BOUNDS.size.height as usize]; for b in &bytes[..N_BYTES] { field[b.y as usize][b.x as usize] = true; } println!("{}", find_path(&field).unwrap()); } fn part2(input: String) { let bytes = parse(&input); let mut field = [[false; BOUNDS.size.width as usize]; BOUNDS.size.height as usize]; for (i, b) in bytes.iter().enumerate() { field[b.y as usize][b.x as usize] = true; // We already know from part 1 that below N_BYTES there is a path if i > N_BYTES && find_path(&field).is_none() { println!("{},{}", b.x, b.y); break; } } } util::aoc_main!();
Also on github
C
Flood fill for part 1. Little tired so for part 2 I just retry the flood fill every step. Slow by C standards (2s) but I’ll let it brew and come back to it later.
Code
#include "common.h" #define SAMPLE 0 #define GZ (SAMPLE ? 9 : 73) #define NCORR (SAMPLE ? 12 : 1024) #define CORR -1 int g[GZ][GZ]; static void flood(void) { int x,y, dirty=1, lo; for (y=1; y<GZ-1; y++) for (x=1; x<GZ-1; x++) if (g[y][x] > 1) g[y][x] = 0; while (dirty) { dirty = 0; for (y=1; y<GZ-1; y++) for (x=1; x<GZ-1; x++) { if (g[y][x] == CORR) continue; lo = INT_MAX; if (g[y-1][x] > 0) lo = MIN(lo, g[y-1][x]); if (g[y+1][x] > 0) lo = MIN(lo, g[y+1][x]); if (g[y][x-1] > 0) lo = MIN(lo, g[y][x-1]); if (g[y][x+1] > 0) lo = MIN(lo, g[y][x+1]); if (lo != INT_MAX && (!g[y][x] || g[y][x]>lo+1)) { dirty=1; g[y][x] = lo+1; } } } } int main(int argc, char **argv) { int p1=0, x,y, i; if (argc > 1) DISCARD(freopen(argv[1], "r", stdin)); for (i=0; i<GZ; i++) g[0][i] = g[GZ-1][i] = g[i][0] = g[i][GZ-1] = CORR; g[1][1] = 1; for (i=0; scanf(" %d,%d", &x, &y) == 2; i++) { assert(x >= 0); assert(x < GZ-2); assert(y >= 0); assert(y < GZ-2); g[y+1][x+1] = CORR; flood(); if (i==NCORR-1) p1 = g[GZ-2][GZ-2]-1; if (g[GZ-2][GZ-2] <= 0) { printf("18: %d %d,%d\n", p1, x,y); return 0; } } assert(!"no solution"); return -1; }
Part 2 can be faster if you iteratively remove blocks until there is a path. This is because it is faster to fail to find a path and the flood fill algorithm does not need to fill as many spots because the map would be filled up with more blocks! this drops the part 2 solve to a few milliseconds. others have taken a binary search option which is faster.
Thanks, that’s exactly the sort of insight that I was too tired to have at that point 😅
The other thing I had to change was to make it recursive rather than iterating over the full grid - the latter is fast for large update, but very wasteful for local updates, like removing the points. Virtually instant now!
Code
#include "common.h" #define SAMPLE 0 #define PTZ 3600 #define GZ (SAMPLE ? 9 : 73) #define P1STEP (SAMPLE ? 12 : 1024) #define CORR -1 static int g[GZ][GZ]; static void flood(int x, int y) { int lo=INT_MAX; if (x <= 0 || x >= GZ-1 || y <= 0 || y >= GZ-1 || g[y][x] == CORR) return; if (g[y-1][x] > 0) lo = MIN(lo, g[y-1][x] +1); if (g[y+1][x] > 0) lo = MIN(lo, g[y+1][x] +1); if (g[y][x-1] > 0) lo = MIN(lo, g[y][x-1] +1); if (g[y][x+1] > 0) lo = MIN(lo, g[y][x+1] +1); if (lo != INT_MAX && (!g[y][x] || g[y][x] > lo)) { g[y][x] = lo; flood(x, y-1); flood(x, y+1); flood(x-1, y); flood(x+1, y); } } int main(int argc, char **argv) { static int xs[PTZ], ys[PTZ]; static char p2[32]; int p1=0, npt=0, i; if (argc > 1) DISCARD(freopen(argv[1], "r", stdin)); for (i=0; i<GZ; i++) g[0][i] = g[GZ-1][i] = g[i][0] = g[i][GZ-1] = CORR; for (npt=0; npt<PTZ && scanf(" %d,%d", xs+npt, ys+npt)==2; npt++) { assert(xs[npt] >= 0); assert(xs[npt] < GZ-2); assert(ys[npt] >= 0); assert(ys[npt] < GZ-2); } assert(npt < PTZ); for (i=0; i<npt; i++) g[ys[i]+1][xs[i]+1] = CORR; g[1][1] = 1; flood(2, 1); flood(1, 2); for (i=npt-1; i >= P1STEP; i--) { g[ys[i]+1][xs[i]+1] = 0; flood(xs[i]+1, ys[i]+1); if (!p2[0] && g[GZ-2][GZ-2] > 0) snprintf(p2, sizeof(p2), "%d,%d", xs[i],ys[i]); } p1 = g[GZ-2][GZ-2]-1; printf("18: %d %s\n", p1, p2); return 0; }
Wooo! instant is so good, I knew you could do it! When I see my python script getting close to 20 ms, I usually expect my fellow optimized language peers to be doing it faster. Pretty surprised to see so many varying solutions that ended up being a little slower just because people didnt realize the potential of speed from failing to find a path.
The first part has a guaranteed path! if you think about a binary search, when there is a path then the block is higher up the list, so we ignore the lower blocks in the list. move to the next “midpoint” to test and just fill and remove blocks as we go to each mid point. So I took the first part as the lower point and moved to a mid point above that.
at least that is how I saw it, when I first looked, but binary search is a little harder to think of than just a simple for loop from the end of the list back. Yet I still got it done! Even included a dead end filler that takes 7 ms to show the final path for Part 2, it was not needed but was a neat inclusion!
Awesome! I understood the idea behind the binary search but thought it wasn’t a good fit for the flood fill. As opposed to something like A* it will give you reachability and cost for every cell (at a cost), but that’s no use when you do repeated searches that are only meant to find a single path. So I was very happy with your suggestion, it fits better with the strengths.
“Virtually instant” btw is measured 0.00 by
time
. I like it when things are fast but I also prefer simper approaches (that is: loops and arrays) over the really optimized fast stuff. People do really amazing things but the really clever algorithms lean on optimized generic data structures that C lacks. It’s fun though to see how far you can drive loops and arrays! Perhaps next year I’ll pick a compiled language with a rich data structure library and really focus on effectively applying good algorithms and appropriate data structures.Btw how do you measure performance? I see a lot of people including timing things in their programs but I can’t be bothered. Some people also exclude parsing - which wouldn’t work for me because I try to process the input immediately, if possible.
On the topic about flood fill and other path finding algorithms. I do think your method is quite fast. However, I saw on reddit someone saw Part 2 as more of a tree phenomena called “Crown shyness” where two trees limit their growth to prevent touching each other.
so the idea behind the “Crown shyness” approach is that when you add a block, you find which corner(top right or bottom left) it is connect to(or in union) until one block connects both corners. so instead of path finding, you are connecting walls to one side. This is also called the “Union-Find algorithm” and the optimization is that when a block drops, you calculate what it is connect with. you can find some visualization of it as that would make it easier to see. This method is by far way more performant, because you can be sure that with all the blocks placed, then the blocks are all in one union, but as you remove blocks you eventually have two unions appear! That block would be the solution.
Your flood fill is mimicking this closely but instead of union of walls, it is finding if there is a union between the start and end nodes, or top left node with bottom right node. When that wall that blocks the path is placed, it will create two unions for the start and end node.
I think I saw the same! At first I thought it requires pathfinding to see what nodes are connected to the wall, but then someone pointed at disjoint sets and just a glance at Wikipedia made it click right away. What an ingeniously simple but useful data structure! Maybe I’ll reimplement my solution with that - mostly as an exercise for disjoint sets and finding a convenient representation for that in C.
That would be cool af to see in C, let me know if you do. In python, we can built the two sets, and have the convenient function call of
set( [iterate-able object/list/set] ).intersection( [iterate-able object/list/set] )
to see if the two sets touches/intersects as the block that connects the two sets would be in both sets/lists.The way I would build the two sets would be to start at the final state with all blocks placed and just union-find all the blocks. When we find that a block appears in both sets, then we stop the union and proceed with the other unions until we find all the blocks that would appear in both sets. then we iteratively find the first block that would appear in both sets. In python the intersection call returns a set, so you can stack the intersect call. like so:
set( [top right union set] ).intersection( [bottom left union set] ).intersection( [ one item list with the current block we are checking ] )
technically you can just save the intersections of the first two sets to save a little time because they would not change.I didn’t think of this until recently, but I also think it is such a simple and elegant solution. Live and learn! 😄
hope you are having a good holiday season!
ah, I exclude loading and reading the file. but since you are pasting it from pasting into the terminal, that is alright.
My main gripe is that I am looking at the performance of the algorithm/functions over the performance of the disk access and read, the startup/end overhead. Python is notorious in having overhead during startup for loading the code and before execution occurs. Why should I measure the performance of the language too harshly? I rather look at how my code performs. In windows, the python overhead adds 30-40 ms, while on Linux, it performs faster with only an overhead of consistent 20 ms. Though, that is just without importing heavy or many libraries. If startup is a concern, then a precompiled non-interpreted language is a better option.(along with the other benefits) This is my reasoning for only measuring my algorithm. I do include parsing the input as that is part of the challenge, but I do see there are reasons not to do that. when you are looking for code that is performant, you want to scientifically remove too many variables. If you are to reuse some code, lets say the input to that function is already parsed and then you just want performance. However, I do measure my parsing because for the AoC, I want to think about what would be faster to parsing and what is a bad parsing.
For AoC, I find a language overhead is not part of the challenge. we should rather learn new languages when we want or use what is comfortable. however, languages like Uiua with a lot of specialty functions is just not worth measuring performance as the main code is just a simple “function call”
I am sure there is a python package/module that includes a fast path finder, too. I just want to challenge myself mostly to learn instead. however, I am finding I would need to start learning rust instead, because my python skills are starting to plateau.
Dart
I knew keeping my search code from day 16 would come in handy, I just didn’t expect it to be so soon.
For Part 2 it finds that same path (laziness on my part), then does a simple binary chop to home in on the last valid path. (was
then searches for the first block that will erm block that path, and re-runs the search after that block has dropped, repeating until blocked. Simple but okay.)90 lines, half of which is my copied search method.
Runs in a couple of seconds which isn’t great, but isn’t bad.Binary chop dropped it to 200ms.import 'dart:math'; import 'package:collection/collection.dart'; import 'package:more/more.dart'; var d4 = <Point<num>>[Point(0, 1), Point(0, -1), Point(1, 0), Point(-1, 0)]; solve(List<String> lines, int count, Point end, bool inPart1) { var blocks = (lines .map((e) => e.split(',').map(int.parse).toList()) .map((p) => Point<num>(p[0], p[1]))).toList(); var blocksSofar = blocks.take(count).toSet(); var start = Point(0, 0); Map<Point, num> fNext(Point here) => { for (var d in d4 .map((d) => d + here) .where((e) => e.x.between(start.x, end.x) && e.y.between(start.y, end.y) && !blocksSofar.contains(e)) .toList()) d: 1 }; int fHeur(Point here) => 1; bool fAtEnd(Point here) => here == end; var cost = aStarSearch<Point>(start, fNext, fHeur, fAtEnd); if (inPart1) return cost.first; var lo = count, hi = blocks.length; while (lo <= hi) { var mid = (lo + hi) ~/ 2; blocksSofar = blocks.take(mid).toSet(); cost = aStarSearch<Point>(start, fNext, fHeur, fAtEnd); (cost.first > 0) ? lo = mid + 1 : hi = mid - 1; } var p = blocks[lo - 1]; return '${p.x},${p.y}'; } part1(lines, count, end) => solve(lines, count, end, true); part2(lines, count, end) => solve(lines, count, end, false);
That search method
/// Returns cost to destination, plus list of routes to destination. /// Does Dijkstra/A* search depending on whether heuristic returns 1 or /// something better. (num, List<List<T>>) aStarSearch<T>(T start, Map<T, num> Function(T) fNext, int Function(T) fHeur, bool Function(T) fAtEnd, {multiplePaths = false}) { var cameFrom = SetMultimap<T, T>.fromEntries([MapEntry(start, start)]); var ends = <T>{}; var front = PriorityQueue<T>((a, b) => fHeur(a).compareTo(fHeur(b))) ..add(start); var cost = <T, num>{start: 0}; while (front.isNotEmpty) { var here = front.removeFirst(); if (fAtEnd(here)) { ends.add(here); continue; } var ns = fNext(here); for (var n in ns.keys) { var nCost = cost[here]! + ns[n]!; if (!cost.containsKey(n) || nCost < cost[n]!) { cost[n] = nCost; front.add(n); cameFrom.removeAll(n); cameFrom[n].add(here); } if (multiplePaths && cost[n] == nCost) cameFrom[n].add(here); } } Iterable<List<T>> routes(T h) sync* { if (h == start) { yield [h]; return; } for (var p in cameFrom[h]) { yield* routes(p).map((e) => e + [h]); } } if (ends.isEmpty) return (-1, []); var minCost = ends.map((e) => cost[e]!).min; ends = ends.where((e) => cost[e]! == minCost).toSet(); return (minCost, ends.fold([], (s, t) => s..addAll(routes(t).toList()))); }
Haskell
I did an easy optimization for part 2, but it’s not too slow without.
Solution
import Control.Monad import Data.Ix import Data.List import Data.Map qualified as Map import Data.Maybe import Data.Set (Set) import Data.Set qualified as Set readInput :: String -> [(Int, Int)] readInput = map readCoords . lines where readCoords l = let (a, _ : b) = break (== ',') l in (read a, read b) findRoute :: (Int, Int) -> Set (Int, Int) -> Maybe [(Int, Int)] findRoute goal blocked = go Set.empty (Map.singleton (0, 0) []) where go seen paths | Map.null paths = Nothing | otherwise = (paths Map.!? goal) `mplus` let seen' = Set.union seen (Map.keysSet paths) paths' = (`Map.withoutKeys` seen') . foldl' (flip $ uncurry Map.insert) Map.empty . concatMap (\(p, path) -> (,p : path) <$> step p) $ Map.assocs paths in go seen' paths' step (x, y) = do (dx, dy) <- [(0, -1), (0, 1), (-1, 0), (1, 0)] let p' = (x + dx, y + dy) guard $ inRange ((0, 0), goal) p' guard $ p' `Set.notMember` blocked return p' dropAndFindRoutes goal skip bytes = let drops = drop skip $ zip bytes $ drop 1 $ scanl' (flip Set.insert) Set.empty bytes in zip (map fst drops) $ scanl' go (findRoute goal (snd $ head drops)) $ tail drops where go route (p, blocked) = do r <- route if p `elem` r then findRoute goal blocked else route main = do input <- readInput <$> readFile "input18" let routes = dropAndFindRoutes (70, 70) 1024 input print $ length <$> (snd . head) routes print $ fst <$> find (isNothing . snd) routes
Javascript
Reused my logic from Day 16. For part two I manually changed the bytes (
i
on line 271) to narrow in on a solution faster, but this solution should solve it eventually.https://blocks.programming.dev/Zikeji/c8fdef54f78c4fb6a79cf1dc5551ff4d
Haskell
Wasn’t there a pathfinding problem just recently?
Edit: Optimization to avoid recalculating paths all the time
Haskell with lambdas
import Control.Arrow import Control.Monad import Data.Bifunctor hiding (first, second) import Data.Set (Set) import Data.Map (Map) import qualified Data.List as List import qualified Data.Set as Set import qualified Data.Map as Map import qualified Data.Maybe as Maybe parse :: String -> [(Int, Int)] parse = map (join bimap read) . map (break (== ',') >>> second (drop 1)) . filter (/= "") . lines lowerBounds = (0, 0) exitPosition = (70, 70) initialBytes = 1024 adjacent (py, px) = Set.fromDistinctAscList [(py-1, px), (py, px-1), (py, px+1), (py+1, px)] data Cost = Wall | Explored Int deriving (Show, Eq) inBounds (py, px) | py < 0 = False | px < 0 = False | py > fst exitPosition = False | px > snd exitPosition = False | otherwise = True dijkstra :: Map Int (Set (Int, Int)) -> Map (Int, Int) Cost -> (Int, (Int, Int), Map (Int, Int) Cost) dijkstra queue walls | Map.null queue = (-1, (-1, -1), Map.empty) | minPos == exitPosition = (minKey, minPos, walls) | Maybe.isJust (walls Map.!? minPos) = dijkstra remainingQueue' walls | not . inBounds $ minPos = dijkstra remainingQueue' walls | otherwise = dijkstra neighborQueue updatedWalls where ((minKey, posSet), remainingQueue) = Maybe.fromJust . Map.minViewWithKey $ queue (minPos, remainingPosSet) = Maybe.fromJust . Set.minView $ posSet remainingQueue' = if not . Set.null $ remainingPosSet then Map.insert minKey remainingPosSet remainingQueue else remainingQueue neighborQueue = List.foldl (\ m n -> Map.insertWith (Set.union) neighborKey (Set.singleton n) m) remainingQueue' neighbors updatedWalls = Map.insert minPos (Explored minKey) walls neighborKey = minKey + 1 neighbors = adjacent minPos isExplored :: Cost -> Bool isExplored Wall = False isExplored (Explored _) = True findPath :: Int -> (Int, Int) -> Map (Int, Int) Cost -> [(Int, Int)] findPath n p ts | p == lowerBounds = [lowerBounds] | n == 0 = error "Out of steps when tracing backwards" | List.null neighbors = error "No matching neighbors when tracing backwards" | otherwise = p : findPath (pred n) (fst . head $ neighbors) ts where neighbors = List.filter ((== Explored (pred n)) . snd) . List.filter (isExplored . snd) . List.map (join (,) >>> second (ts Map.!)) . List.filter inBounds . Set.toList . adjacent $ p runDijkstra = flip zip (repeat Wall) >>> Map.fromList >>> dijkstra (Map.singleton 0 (Set.singleton lowerBounds)) fst3 :: (a, b, c) -> a fst3 (a, _, _) = a thrd :: (a, b, c) -> c thrd (_, _, c) = c part1 = take initialBytes >>> runDijkstra >>> \ (n, _, _) -> n firstFailing :: [(Int, Int)] -> [[(Int, Int)]] -> (Int, Int) firstFailing path (bs:bss) | List.last bs `List.notElem` path = firstFailing path bss | c == (-1) = List.last bs | otherwise = firstFailing (findPath c p ts) bss where (c, p, ts) = runDijkstra bs part2 bs = repeat >>> zip [initialBytes..length bs] >>> map (uncurry take) >>> firstFailing path $ bs where (n, p, ts) = runDijkstra . take 1024 $ bs path = findPath n p ts main = getContents >>= print . (part1 &&& part2) . parse
C#
using QuickGraph; using QuickGraph.Algorithms.ShortestPath; namespace aoc24; public class Day18 : Solver { private int width = 71, height = 71, bytes = 1024; private HashSet<(int, int)> fallen_bytes; private List<(int, int)> fallen_bytes_in_order; private record class Edge((int, int) Source, (int, int) Target) : IEdge<(int, int)>; private DelegateVertexAndEdgeListGraph<(int, int), Edge> MakeGraph() => new(GetAllVertices(), GetOutEdges); private readonly (int, int)[] directions = [(-1, 0), (0, 1), (1, 0), (0, -1)]; private bool GetOutEdges((int, int) arg, out IEnumerable<Edge> result_enumerable) { List<Edge> result = []; foreach (var (dx, dy) in directions) { var (nx, ny) = (arg.Item1 + dx, arg.Item2 + dy); if (nx < 0 || ny < 0 || nx >= width || ny >= height) continue; if (fallen_bytes.Contains((nx, ny))) continue; result.Add(new(arg, (nx, ny))); } result_enumerable = result; return true; } private IEnumerable<(int, int)> GetAllVertices() { for (int i = 0; i < width; i++) { for (int j = 0; j < height; j++) { yield return (i, j); } } } public void Presolve(string input) { fallen_bytes_in_order = [..input.Trim().Split("\n") .Select(line => line.Split(",")) .Select(pair => (int.Parse(pair[0]), int.Parse(pair[1])))]; fallen_bytes = [.. fallen_bytes_in_order.Take(bytes)]; } private double Solve() { var graph = MakeGraph(); var search = new AStarShortestPathAlgorithm<(int, int), Edge>(graph, _ => 1, vtx => vtx.Item1 + vtx.Item2); search.SetRootVertex((0, 0)); search.ExamineVertex += vertex => { if (vertex.Item1 == width - 1 && vertex.Item2 == width - 1) search.Abort(); }; search.Compute(); return search.Distances[(width - 1, height - 1)]; } public string SolveFirst() => Solve().ToString(); public string SolveSecond() { foreach (var b in fallen_bytes_in_order[bytes..]) { fallen_bytes.Add(b); if (Solve() > width*height) return $"{b.Item1},{b.Item2}"; } throw new Exception("solution not found"); } }