diff --git a/data-structure/BST.js b/data-structure/BST.js new file mode 100644 index 00000000..481ced66 --- /dev/null +++ b/data-structure/BST.js @@ -0,0 +1,148 @@ +/** + * BST — Binary Search Tree (unbalanced) + * + * Invariant: for every node, all values in the LEFT subtree are < node.val, + * all values in the RIGHT subtree are > node.val. + * + * 5 + * / \ + * 3 8 + * / \ \ + * 1 4 9 + * + * That invariant is what makes search work: at each step you can throw + * away half the tree. + * + * Worst case: insertions in sorted order create a degenerate tree — + * essentially a linked list, all operations O(n): + * + * 1 + * \ + * 2 + * \ + * 3 ... + * + * To guarantee O(log n) you need self-balancing variants (AVL, Red-Black). + * Those rotate on insert/delete to keep height ≈ log n. They're outside this + * file's scope — start here, level up later. + * + * Delete is the spicy operation. Three cases: + * 1. Leaf → null it out + * 2. One child → splice the child up + * 3. Two children → replace value with in-order successor (min of right + * subtree), then delete that successor + * + * In-order traversal yields values in sorted order — useful for problems + * that need a sorted view without re-sorting. + * + * Space: O(n) + * + * insert O(log n) avg / O(n) worst (skewed) + * search O(log n) avg / O(n) worst + * delete O(log n) avg / O(n) worst + * inOrder O(n) + */ +class BSTNode { + constructor(val) { + this.val = val; + this.left = null; + this.right = null; + } +} + +class BST { + #root = null; + #size = 0; + + insert(val) { + if (this.#root === null) { + this.#root = new BSTNode(val); + this.#size++; + return; + } + let node = this.#root; + while (true) { + if (val === node.val) return; // duplicates ignored + const goLeft = val < node.val; + const child = goLeft ? node.left : node.right; + if (child === null) { + const created = new BSTNode(val); + if (goLeft) node.left = created; else node.right = created; + this.#size++; + return; + } + node = child; + } + } + + search(val) { + let node = this.#root; + while (node !== null) { + if (val === node.val) return true; + node = val < node.val ? node.left : node.right; + } + return false; + } + + delete(val) { + const before = this.#size; + this.#root = this.#deleteFrom(this.#root, val); + return this.#size < before; + } + + /** + * Returns array of values in ascending order. + * Iterative in-order traversal — avoids recursion depth issues on deep trees. + */ + inOrder() { + const out = []; + const stack = []; + let node = this.#root; + while (node !== null || stack.length > 0) { + // dive left as far as possible, recording the path + while (node !== null) { + stack.push(node); + node = node.left; + } + node = stack.pop(); + out.push(node.val); + node = node.right; + } + return out; + } + + size() { + return this.#size; + } + + #deleteFrom(node, val) { + if (node === null) return null; + if (val < node.val) { + node.left = this.#deleteFrom(node.left, val); + return node; + } + if (val > node.val) { + node.right = this.#deleteFrom(node.right, val); + return node; + } + // found target — handle the three cases + this.#size--; + if (node.left === null) return node.right; // 0 or 1 child (right only) + if (node.right === null) return node.left; // 1 child (left only) + // 2 children: pull up the in-order successor's value, then delete it. + // We re-increment #size to cancel the early decrement above; the recursive + // call will decrement once when it actually removes the successor leaf. + this.#size++; + const succ = this.#minNode(node.right); + node.val = succ.val; + node.right = this.#deleteFrom(node.right, succ.val); + return node; + } + + #minNode(node) { + while (node.left !== null) node = node.left; + return node; + } +} + +export { BST, BSTNode }; diff --git a/data-structure/DoublyLinkedList.js b/data-structure/DoublyLinkedList.js new file mode 100644 index 00000000..9b86edde --- /dev/null +++ b/data-structure/DoublyLinkedList.js @@ -0,0 +1,109 @@ +/** + * DoublyLinkedList — each node has prev AND next pointers + * + * null ← [a] ⇄ [b] ⇄ [c] → null + * ↑ ↑ + * head tail + * + * Why pay 2× the pointers? Because given a node reference, you can: + * - remove it in O(1) without scanning to find the predecessor + * - walk backward as well as forward + * + * This is exactly what an LRU cache needs: a HashMap maps keys → nodes, + * and the list reorders itself on every access. Without `prev`, the + * "remove a node" step becomes O(n) and the cache is broken. + * + * The `removeNode(node)` method below is the headline feature — it's why + * this structure exists. Everything else is the same as a singly-linked list + * with an extra back-pointer kept in sync. + * + * Sentinel-node trick (used here): keep dummy `head` and `tail` nodes that + * never hold values. Removes all "is this the first/last node?" branches — + * every real node has a real prev and next. Cleaner code, fewer bugs. + * + * [HEAD] ⇄ [a] ⇄ [b] ⇄ [TAIL] + * + * Space: O(n) + * + * addFirst O(1) + * addLast O(1) + * removeFirst O(1) + * removeLast O(1) + * removeNode O(1) ← given a node reference + * size O(1) + */ +class DListNode { + constructor(val) { + this.val = val; + this.prev = null; + this.next = null; + } +} + +class DoublyLinkedList { + #head; // sentinel — not a real value + #tail; // sentinel — not a real value + #size = 0; + + constructor() { + this.#head = new DListNode(null); + this.#tail = new DListNode(null); + this.#head.next = this.#tail; + this.#tail.prev = this.#head; + } + + addFirst(val) { + return this.#insertAfter(this.#head, val); + } + + addLast(val) { + return this.#insertAfter(this.#tail.prev, val); + } + + removeFirst() { + if (this.#size === 0) return undefined; + return this.removeNode(this.#head.next); + } + + removeLast() { + if (this.#size === 0) return undefined; + return this.removeNode(this.#tail.prev); + } + + /** + * O(1) removal — the whole point of the doubly-linked design. + * Caller must own the node reference (e.g. obtained from addFirst/addLast). + */ + removeNode(node) { + node.prev.next = node.next; + node.next.prev = node.prev; + this.#size--; + return node.val; + } + + toArray() { + const out = []; + for (let n = this.#head.next; n !== this.#tail; n = n.next) out.push(n.val); + return out; + } + + size() { + return this.#size; + } + + isEmpty() { + return this.#size === 0; + } + + #insertAfter(prev, val) { + const node = new DListNode(val); + node.prev = prev; + node.next = prev.next; + prev.next.prev = node; + prev.next = node; + this.#size++; + return node; // returned so caller can remove in O(1) later + } +} + +export { DoublyLinkedList, DListNode }; diff --git a/data-structure/Graph.js b/data-structure/Graph.js new file mode 100644 index 00000000..f87619d7 --- /dev/null +++ b/data-structure/Graph.js @@ -0,0 +1,140 @@ +/** + * Graph — adjacency list (undirected by default, supports directed) + * + * Storage: a Map from node → Set of neighbors. + * + * A — B + * | | + * C — D + * + * adj = { + * A: {B, C}, + * B: {A, D}, + * C: {A, D}, + * D: {B, C}, + * } + * + * Adjacency list vs adjacency matrix: + * - matrix: V×V bits, edge lookup O(1), space O(V²) — wasteful for sparse graphs + * - list: neighbors easy to iterate, space O(V + E) — wins when E ≪ V² + * + * Most real graphs (social, road, web) are sparse → list wins. + * + * Why a Set per node, not an Array? `addEdge(a, b)` becomes idempotent in O(1) + * — no duplicate edges from re-adding. Removal is also O(1) instead of O(deg). + * + * Directedness: pass `{ directed: true }` to skip the reverse-edge insert. + * + * BFS = "spread one ring at a time" — finds shortest path in unweighted graphs. + * DFS = "go deep, backtrack" — natural for cycle detection, topo sort, components. + * + * V = vertex count, E = edge count, deg(v) = neighbor count of v. + * + * Space: O(V + E) + * + * addNode O(1) + * addEdge O(1) + * removeEdge O(1) + * removeNode O(deg(v)) + * neighbors O(1) — returns the Set + * bfs / dfs O(V + E) + */ +class Graph { + #adj = new Map(); + #directed; + + constructor({ directed = false } = {}) { + this.#directed = directed; + } + + addNode(v) { + if (!this.#adj.has(v)) this.#adj.set(v, new Set()); + } + + addEdge(u, v) { + this.addNode(u); + this.addNode(v); + this.#adj.get(u).add(v); + if (!this.#directed) this.#adj.get(v).add(u); + } + + removeEdge(u, v) { + this.#adj.get(u)?.delete(v); + if (!this.#directed) this.#adj.get(v)?.delete(u); + } + + removeNode(v) { + if (!this.#adj.has(v)) return; + // remove every incoming edge — costs O(deg) for undirected, + // O(V) worst case for directed (must scan everyone) + if (this.#directed) { + for (const set of this.#adj.values()) set.delete(v); + } else { + for (const u of this.#adj.get(v)) this.#adj.get(u).delete(v); + } + this.#adj.delete(v); + } + + hasNode(v) { + return this.#adj.has(v); + } + + hasEdge(u, v) { + return this.#adj.get(u)?.has(v) ?? false; + } + + neighbors(v) { + return this.#adj.get(v) ?? new Set(); + } + + nodes() { + return [...this.#adj.keys()]; + } + + /** + * BFS from `start`. Returns nodes in visit order. + * Uses a plain array as a queue with a head index — avoids `shift()` O(n). + */ + bfs(start) { + if (!this.#adj.has(start)) return []; + const visited = new Set([start]); + const order = []; + const queue = [start]; + let head = 0; + while (head < queue.length) { + const node = queue[head++]; + order.push(node); + for (const next of this.#adj.get(node)) { + if (!visited.has(next)) { + visited.add(next); + queue.push(next); + } + } + } + return order; + } + + /** + * Iterative DFS using an explicit stack — avoids JS call-stack overflow on + * deep graphs. Visit order matches recursive DFS only if neighbor iteration + * is reversed when pushing (so the first neighbor is popped first). + */ + dfs(start) { + if (!this.#adj.has(start)) return []; + const visited = new Set(); + const order = []; + const stack = [start]; + while (stack.length > 0) { + const node = stack.pop(); + if (visited.has(node)) continue; + visited.add(node); + order.push(node); + // reverse so iteration order matches a recursive `for (n of neighbors)` + const ns = [...this.#adj.get(node)].reverse(); + for (const next of ns) if (!visited.has(next)) stack.push(next); + } + return order; + } +} + +export { Graph }; diff --git a/data-structure/HashMap.js b/data-structure/HashMap.js new file mode 100644 index 00000000..df138985 --- /dev/null +++ b/data-structure/HashMap.js @@ -0,0 +1,129 @@ +/** + * HashMap — separate-chaining hash table + * + * Internal layout: a fixed-size array of buckets, each bucket is itself an + * array of [key, value] pairs (the "chain"). + * + * buckets[0] → [ [k1, v1] ] + * buckets[1] → [ [k7, v7], [k15, v15] ] ← collision chain + * buckets[2] → [ ] + * buckets[3] → [ [k3, v3] ] + * + * index = hash(key) % buckets.length + * + * Two collisions land in the same bucket; we walk the chain to find the + * exact key. Average chain length ≈ size / capacity = "load factor". + * Keep load factor low (<0.75 here) and operations stay O(1) average. + * + * What "rehash" does: when load factor crosses the threshold, double the + * bucket array and re-insert every entry. Costs O(n) but amortizes to O(1) + * per insert because doublings are exponentially spaced. + * + * Why NOT just use the native `Map`? In production, do. This exists so the + * mechanics — hashing, bucketing, collision handling, resizing — are visible. + * + * Hash function caveat: this implementation uses a simple string-based hash + * (djb2 variant). Real hash maps use richer hashing and randomization to + * defeat collision-based DoS attacks. + * + * Space: O(n + capacity) + * + * set O(1) avg, O(n) worst (all keys hash to the same bucket) + * get O(1) avg, O(n) worst + * delete O(1) avg, O(n) worst + * has O(1) avg, O(n) worst + */ +class HashMap { + #buckets; + #capacity; + #size = 0; + static #LOAD_FACTOR = 0.75; + static #INITIAL_CAPACITY = 16; + + constructor() { + this.#capacity = HashMap.#INITIAL_CAPACITY; + this.#buckets = Array.from({ length: this.#capacity }, () => []); + } + + set(key, value) { + const bucket = this.#bucketFor(key); + // walk the chain — update if key already present + for (const pair of bucket) { + if (pair[0] === key) { pair[1] = value; return; } + } + bucket.push([key, value]); + this.#size++; + if (this.#size / this.#capacity > HashMap.#LOAD_FACTOR) this.#rehash(); + } + + get(key) { + const bucket = this.#bucketFor(key); + for (const [k, v] of bucket) { + if (k === key) return v; + } + return undefined; + } + + has(key) { + const bucket = this.#bucketFor(key); + for (const [k] of bucket) { + if (k === key) return true; + } + return false; + } + + delete(key) { + const bucket = this.#bucketFor(key); + for (let i = 0; i < bucket.length; i++) { + if (bucket[i][0] === key) { + // swap-with-last to avoid O(n) splice — order in a chain is irrelevant + bucket[i] = bucket[bucket.length - 1]; + bucket.pop(); + this.#size--; + return true; + } + } + return false; + } + + size() { + return this.#size; + } + + *entries() { + for (const bucket of this.#buckets) { + for (const pair of bucket) yield pair; + } + } + + #bucketFor(key) { + return this.#buckets[this.#hash(key) % this.#capacity]; + } + + /** + * djb2-style hash on the string form of the key. + * Good enough for teaching; production-grade maps use stronger hashing. + */ + #hash(key) { + const s = String(key); + let h = 5381; + for (let i = 0; i < s.length; i++) { + // (h << 5) + h === h * 33; bitwise shift keeps it in i32 range + h = ((h << 5) + h + s.charCodeAt(i)) | 0; + } + return h >>> 0; // unsigned for modulo math + } + + #rehash() { + const old = this.#buckets; + this.#capacity *= 2; + this.#buckets = Array.from({ length: this.#capacity }, () => []); + this.#size = 0; + // re-insert everything — bucket index changes because capacity changed + for (const bucket of old) { + for (const [k, v] of bucket) this.set(k, v); + } + } +} + +export { HashMap }; diff --git a/data-structure/LinkedList.js b/data-structure/LinkedList.js new file mode 100644 index 00000000..83e2bbb6 --- /dev/null +++ b/data-structure/LinkedList.js @@ -0,0 +1,121 @@ +/** + * LinkedList — singly linked + * + * head → [a|·] → [b|·] → [c|null] + * + * Each node holds a value and ONE pointer to the next node. The list ends + * when `next === null`. + * + * Why use one over an array? + * - O(1) insert/remove at the head + * - size grows without re-allocating a contiguous buffer + * - good for problems where you splice nodes mid-list given a reference + * + * Why NOT use one? + * - no random access — `get(i)` is O(i), not O(1) + * - poor cache locality — nodes scatter across memory + * - higher memory per element (pointer overhead) + * + * In practice, most "linked list" LeetCode problems are about pointer + * gymnastics (reverse, detect cycle, merge sorted, etc.) — not because + * linked lists are inherently better, but because they force you to reason + * about references explicitly. + * + * Space: O(n) + * + * prepend O(1) + * append O(1) — tail pointer kept in sync + * removeHead O(1) + * removeTail O(n) — must walk to find new tail (no prev pointer) + * insertAt O(n) + * removeAt O(n) + * find O(n) + * size O(1) + */ +class ListNode { + constructor(val, next = null) { + this.val = val; + this.next = next; + } +} + +class LinkedList { + #head = null; + #tail = null; + #size = 0; + + prepend(val) { + this.#head = new ListNode(val, this.#head); + if (this.#tail === null) this.#tail = this.#head; + this.#size++; + } + + append(val) { + const node = new ListNode(val); + if (this.#tail === null) { + this.#head = node; + this.#tail = node; + } else { + this.#tail.next = node; + this.#tail = node; + } + this.#size++; + } + + removeHead() { + if (this.#head === null) return undefined; + const val = this.#head.val; + this.#head = this.#head.next; + if (this.#head === null) this.#tail = null; + this.#size--; + return val; + } + + insertAt(index, val) { + if (index < 0 || index > this.#size) return false; + if (index === 0) { this.prepend(val); return true; } + if (index === this.#size) { this.append(val); return true; } + // walk to node at (index - 1) so we can splice between it and its next + let prev = this.#head; + for (let i = 0; i < index - 1; i++) prev = prev.next; + prev.next = new ListNode(val, prev.next); + this.#size++; + return true; + } + + removeAt(index) { + if (index < 0 || index >= this.#size) return undefined; + if (index === 0) return this.removeHead(); + let prev = this.#head; + for (let i = 0; i < index - 1; i++) prev = prev.next; + const removed = prev.next; + prev.next = removed.next; + if (removed === this.#tail) this.#tail = prev; + this.#size--; + return removed.val; + } + + find(val) { + let i = 0; + for (let node = this.#head; node !== null; node = node.next, i++) { + if (node.val === val) return i; + } + return -1; + } + + toArray() { + const out = []; + for (let node = this.#head; node !== null; node = node.next) out.push(node.val); + return out; + } + + size() { + return this.#size; + } + + isEmpty() { + return this.#size === 0; + } +} + +export { LinkedList, ListNode }; diff --git a/data-structure/MaxPriorityQueue.js b/data-structure/MaxPriorityQueue.js new file mode 100644 index 00000000..a0a0db50 --- /dev/null +++ b/data-structure/MaxPriorityQueue.js @@ -0,0 +1,121 @@ +/** + * MaxPriorityQueue — binary max-heap + * + * Conceptually a complete binary tree, but stored as a flat array. + * No Node objects, no pointers — the tree exists only in the index formula: + * + * parent of i → (i - 1) >> 1 + * left child → 2 * i + 1 + * right child → 2 * i + 2 + * + * This works because a heap is always a COMPLETE binary tree (filled left + * to right, no gaps), so indices never have holes. BSTs/AVL/Red-Black trees + * can't use this trick — their arbitrary shape breaks the formula, so they + * need actual node pointers. + * + * Array beats node-based for heaps: + * - no pointer overhead per node + * - contiguous memory → CPU cache friendly + * + * Example — array [6, 4, 5, 2, 3] represents: + * + * 6(0) + * / \ + * 4(1) 5(2) + * / \ + * 2(3) 3(4) + * + * Space: O(n) + * + * enqueue O(log n) — bubble up at most tree height + * dequeue O(log n) — sink down at most tree height + * front O(1) — max is always at index 0 + * size O(1) + * isEmpty O(1) + */ +class MaxPriorityQueue { + #heap = []; + #priorityOf; + + /** + * @param {object} [opts] + * @param {(item: any) => number} [opts.priorityOf] — extracts numeric priority from an item. + * NOTE: do NOT name this `valueOf` — it collides with Object.prototype.valueOf, + * which destructuring picks up via the prototype chain when the option object is empty. + */ + constructor({ priorityOf = (x) => x } = {}) { + this.#priorityOf = priorityOf; + } + + enqueue(val) { + this.#heap.push(val); + this.#bubbleUp(this.#heap.length - 1); + } + + dequeue() { + // never shifts the array — avoids O(n) cost + // + // e.g. heap = [6, 4, 5, 2, 3, 1] + // + // step 1 — swap root with last: [1, 4, 5, 2, 3, 6] + this.#swap(0, this.#heap.length - 1); + // step 2 — pop from tail (O(1)): [1, 4, 5, 2, 3] top = 6 + const top = this.#heap.pop(); + // step 3 — sink new root down: [5, 4, 1, 2, 3] O(log n) + this.#sinkDown(0); + return top; + } + + front() { + return this.#heap[0]; + } + + size() { + return this.#heap.length; + } + + isEmpty() { + return this.#heap.length === 0; + } + + #priority(i) { + return this.#priorityOf(this.#heap[i]); + } + + #swap(i, j) { + [this.#heap[i], this.#heap[j]] = [this.#heap[j], this.#heap[i]]; + } + + #bubbleUp(i) { + while (i > 0) { + // tree structure lives in the index formula — no pointers, no nodes + // parent of i = (i - 1) / 2 e.g. parent of index 3 → (3-1)>>1 = 1 + const parent = (i - 1) >> 1; + if (this.#priority(parent) >= this.#priority(i)) break; + this.#swap(parent, i); + i = parent; // "move up" = just change which index we're looking at + } + } + + #sinkDown(i) { + const n = this.#heap.length; + while (true) { + let largest = i; + // left/right child indices are pure arithmetic — the tree only exists in this formula + // array: [6, 4, 5, 2, 3] → tree: 6(0) + // / \ + // 4(1) 5(2) + // / \ + // 2(3) 3(4) + const l = 2 * i + 1; // left child of i + const r = 2 * i + 2; // right child of i + if (l < n && this.#priority(l) > this.#priority(largest)) largest = l; + if (r < n && this.#priority(r) > this.#priority(largest)) largest = r; + if (largest === i) break; + this.#swap(i, largest); + i = largest; // "move down" = just update i to the child index + } + } +} + +export { MaxPriorityQueue }; diff --git a/data-structure/MinPriorityQueue.js b/data-structure/MinPriorityQueue.js new file mode 100644 index 00000000..46cb88ef --- /dev/null +++ b/data-structure/MinPriorityQueue.js @@ -0,0 +1,109 @@ +/** + * MinPriorityQueue — binary min-heap + * + * Same array-as-tree trick as MaxPriorityQueue (see that file for full + * heap-vs-pointer-tree explanation). The ONLY difference is comparator + * direction: parent ≤ children instead of parent ≥ children. + * + * parent of i → (i - 1) >> 1 + * left child → 2 * i + 1 + * right child → 2 * i + 2 + * + * Example — array [1, 3, 2, 7, 4] represents: + * + * 1(0) + * / \ + * 3(1) 2(2) + * / \ + * 7(3) 4(4) + * + * Common uses (these are why min-heap is the default flavor in algorithms): + * - Dijkstra / A\* — always pop the lowest-cost frontier node + * - merge K sorted lists — always pop the smallest head + * - top-K LARGEST (counterintuitive): keep K elements in a min-heap; if a + * new element exceeds the min, evict the min. Min stays ready to evict. + * + * For min-heap-of-objects, pass `priorityOf` to extract a numeric priority: + * + * new MinPriorityQueue({ priorityOf: (task) => task.deadline }) + * + * (NOT `valueOf` — that name collides with Object.prototype.valueOf, which + * destructuring will pick up via the prototype chain on an empty options object.) + * + * Space: O(n) + * + * enqueue O(log n) — bubble up at most tree height + * dequeue O(log n) — sink down at most tree height + * front O(1) — min is always at index 0 + * size O(1) + * isEmpty O(1) + */ +class MinPriorityQueue { + #heap = []; + #priorityOf; + + constructor({ priorityOf = (x) => x } = {}) { + this.#priorityOf = priorityOf; + } + + enqueue(val) { + this.#heap.push(val); + this.#bubbleUp(this.#heap.length - 1); + } + + dequeue() { + if (this.#heap.length === 0) return undefined; + // swap-with-last + pop avoids O(n) shift + this.#swap(0, this.#heap.length - 1); + const top = this.#heap.pop(); + this.#sinkDown(0); + return top; + } + + front() { + return this.#heap[0]; + } + + size() { + return this.#heap.length; + } + + isEmpty() { + return this.#heap.length === 0; + } + + #priority(i) { + return this.#priorityOf(this.#heap[i]); + } + + #swap(i, j) { + [this.#heap[i], this.#heap[j]] = [this.#heap[j], this.#heap[i]]; + } + + #bubbleUp(i) { + while (i > 0) { + const parent = (i - 1) >> 1; + // min-heap: stop when parent already ≤ child + if (this.#priority(parent) <= this.#priority(i)) break; + this.#swap(parent, i); + i = parent; + } + } + + #sinkDown(i) { + const n = this.#heap.length; + while (true) { + let smallest = i; + const l = 2 * i + 1; + const r = 2 * i + 2; + // min-heap: pick the SMALLER child to swap with + if (l < n && this.#priority(l) < this.#priority(smallest)) smallest = l; + if (r < n && this.#priority(r) < this.#priority(smallest)) smallest = r; + if (smallest === i) break; + this.#swap(i, smallest); + i = smallest; + } + } +} + +export { MinPriorityQueue }; diff --git a/data-structure/Queue.js b/data-structure/Queue.js new file mode 100644 index 00000000..92731f61 --- /dev/null +++ b/data-structure/Queue.js @@ -0,0 +1,72 @@ +/** + * Queue — FIFO (First In, First Out) + * + * Backed by a singly-linked list with head + tail pointers. + * + * head → [a] → [b] → [c] ← tail + * ↑ ↑ + * dequeue here enqueue here + * + * Why NOT a plain JS array? `arr.shift()` is O(n) — every dequeue re-indexes + * the entire array. A million-item queue would be quadratic in total work. + * + * Alternatives considered: + * - circular buffer (fixed capacity) — fast but needs resize logic on grow + * - two-stack queue — amortized O(1) but worst-case O(n) on a single op + * - linked list — true O(1) per op, simple invariant; the teaching pick + * + * Common uses: + * - BFS (level-order traversal) + * - task scheduling, request buffering + * - producer/consumer pipelines + * + * Space: O(n) — extra pointer per node vs. array + * + * enqueue O(1) — append at tail + * dequeue O(1) — drop head + * front O(1) + * size O(1) — counter kept on every op + * isEmpty O(1) + */ +class Queue { + #head = null; + #tail = null; + #size = 0; + + enqueue(val) { + const node = { val, next: null }; + if (this.#tail === null) { + // empty queue — head and tail both point at the new node + this.#head = node; + this.#tail = node; + } else { + this.#tail.next = node; + this.#tail = node; + } + this.#size++; + } + + dequeue() { + if (this.#head === null) return undefined; + const val = this.#head.val; + this.#head = this.#head.next; + // last item just left — keep tail in sync, otherwise enqueue breaks + if (this.#head === null) this.#tail = null; + this.#size--; + return val; + } + + front() { + return this.#head?.val; + } + + size() { + return this.#size; + } + + isEmpty() { + return this.#size === 0; + } +} + +export { Queue }; diff --git a/data-structure/README.md b/data-structure/README.md new file mode 100644 index 00000000..c36b196f --- /dev/null +++ b/data-structure/README.md @@ -0,0 +1,55 @@ +# Data Structures (JavaScript) + +Hand-rolled implementations for learning. Each file has: + +- Concept explanation with ASCII diagram +- Time/space complexity table at the top +- Inline comments explaining *why* (not *what*) — design tradeoffs, alternatives ruled out, common pitfalls +- Private fields (`#`) for true encapsulation + +These are NOT meant to replace `Map`/`Set`/`Array` in production. They exist to make the underlying machinery visible. + +## When to use what + +| Structure | Use when | Avoid when | File | +|---|---|---|---| +| **Stack** | LIFO order — undo, parens matching, DFS stack | FIFO order needed | [`Stack.js`](./Stack.js) | +| **Queue** | FIFO order — BFS, scheduling, buffers | LIFO or random access | [`Queue.js`](./Queue.js) | +| **LinkedList** | Frequent insert/remove at head; size unknown | Random access by index | [`LinkedList.js`](./LinkedList.js) | +| **DoublyLinkedList** | O(1) remove given a node ref (LRU cache, deque) | Memory tight (2× pointer overhead) | [`DoublyLinkedList.js`](./DoublyLinkedList.js) | +| **HashMap** | Key→value lookups in O(1) avg; learning hashing | Native `Map` does this in prod | [`HashMap.js`](./HashMap.js) | +| **MaxPriorityQueue** | Always pop the largest — Dijkstra (negated), top-K, schedulers | Need sorted iteration of all elements | [`MaxPriorityQueue.js`](./MaxPriorityQueue.js) | +| **MinPriorityQueue** | Always pop the smallest — Dijkstra, A\*, merge-K-sorted | Need sorted iteration of all elements | [`MinPriorityQueue.js`](./MinPriorityQueue.js) | +| **Trie** | Prefix queries, autocomplete, word-search grids | Only need exact-match lookup (use HashMap) | [`Trie.js`](./Trie.js) | +| **BST** | Sorted in-order traversal + dynamic inserts | Worst-case O(n) skew without balancing | [`BST.js`](./BST.js) | +| **Graph** | Edges between entities — BFS/DFS, shortest path, topo sort | Tree-shaped data (use a tree) | [`Graph.js`](./Graph.js) | +| **UnionFind** | Connectivity / grouping — Kruskal's MST, dynamic islands | Need to *split* groups (UF only merges) | [`UnionFind.js`](./UnionFind.js) | + +## Complexity cheat sheet + +| Structure | Insert | Delete | Search | Access by key/index | Notes | +|---|---|---|---|---|---| +| Stack | O(1) | O(1) (top) | O(n) | — | `push`/`pop` only | +| Queue | O(1) | O(1) (front) | O(n) | — | linked-list backed | +| LinkedList | O(1) head / O(n) middle | O(1) head / O(n) middle | O(n) | O(n) | no random access | +| DoublyLinkedList | O(1) head/tail | O(1) given node | O(n) | O(n) | O(1) remove given ref | +| HashMap | O(1) avg | O(1) avg | O(1) avg | O(1) avg | O(n) worst (collisions) | +| MaxPQ / MinPQ | O(log n) | O(log n) (root) | O(n) | — | front O(1) | +| Trie | O(L) | O(L) | O(L) | — | L = key length | +| BST | O(log n) avg / O(n) worst | O(log n) avg / O(n) worst | O(log n) avg / O(n) worst | — | unbalanced; AVL/RB needed for guarantee | +| Graph (adj. list) | O(1) edge | O(deg) edge | O(1) node | — | BFS/DFS O(V+E) | +| UnionFind | O(α(n)) | — | O(α(n)) | — | α = inverse Ackermann ≈ constant | + +## Run tests + +```bash +npm run test -- data-structure +``` + +## Style guide for adding new structures + +1. Top-of-file JSDoc with ASCII diagram + complexity table +2. Explain *why* this representation beats alternatives (e.g. heap-as-array vs node-pointers) +3. Use `#privateFields` — encapsulation matters for teaching invariants +4. Comments explain reasoning, not syntax +5. Test file in `tests/` covers happy path + edge cases (empty, single-element, duplicates where relevant) diff --git a/data-structure/Stack.js b/data-structure/Stack.js new file mode 100644 index 00000000..1af67133 --- /dev/null +++ b/data-structure/Stack.js @@ -0,0 +1,55 @@ +/** + * Stack — LIFO (Last In, First Out) + * + * Backed by a JS array. `push`/`pop` on the tail are amortized O(1) because + * V8 grows the underlying buffer geometrically (doubles when full), so the + * average copy cost per push is constant. + * + * top → [ d ] ← push/pop here + * [ c ] + * [ b ] + * bot → [ a ] + * + * Why NOT shift/unshift at the head? Both are O(n) — every element re-indexes. + * Stacks always grow at the tail. + * + * Common uses: + * - parentheses matching, expression evaluation + * - DFS without recursion (avoid call-stack overflow) + * - undo/redo history + * - call frame simulation + * + * Space: O(n) + * + * push O(1) amortized + * pop O(1) + * peek O(1) + * size O(1) + * isEmpty O(1) + */ +class Stack { + #data = []; + + push(val) { + this.#data.push(val); + } + + pop() { + // returns undefined on empty — matches Array.prototype.pop semantics + return this.#data.pop(); + } + + peek() { + return this.#data[this.#data.length - 1]; + } + + size() { + return this.#data.length; + } + + isEmpty() { + return this.#data.length === 0; + } +} + +export { Stack }; diff --git a/data-structure/Trie.js b/data-structure/Trie.js new file mode 100644 index 00000000..a544463a --- /dev/null +++ b/data-structure/Trie.js @@ -0,0 +1,80 @@ +/** + * Trie — prefix tree + * + * Each node represents one character. A path from root to a node spells a + * prefix; a node flagged `isEnd = true` means a word terminates there. + * + * insert: "cat", "car", "cars", "dog" + * + * (root) + * / \ + * c d + * | | + * a o + * / \ \ + * t* r g* + * | + * * + * | + * s* (* = isEnd) + * + * Nodes are NOT 26-element arrays — children live in a Map so keys can be + * any character (Unicode, digits, symbols) without wasting space. + * + * Why use a trie instead of a HashMap of words? + * - HashMap is O(1) for *exact* match but useless for "starts with" + * - Trie is O(L) — proportional to query length, not dictionary size + * - autocomplete, spellcheck, word-search grids all want prefix queries + * + * Tradeoff: memory. Many short words = many shallow nodes. For very large + * dictionaries, a compressed trie (radix tree) is more memory-efficient. + * + * L = length of the word/prefix being queried. + * + * Space: O(total characters across all inserted words) + * + * insert O(L) + * search O(L) — exact word + * startsWith O(L) — any word with this prefix + */ +class TrieNode { + constructor() { + this.children = new Map(); + this.isEnd = false; + } +} + +class Trie { + #root = new TrieNode(); + + insert(word) { + let node = this.#root; + for (const ch of word) { + if (!node.children.has(ch)) node.children.set(ch, new TrieNode()); + node = node.children.get(ch); + } + node.isEnd = true; + } + + search(word) { + const node = this.#walk(word); + // a prefix that lands on a real node ISN'T a word unless flagged + return node !== null && node.isEnd; + } + + startsWith(prefix) { + return this.#walk(prefix) !== null; + } + + #walk(s) { + let node = this.#root; + for (const ch of s) { + const next = node.children.get(ch); + if (next === undefined) return null; + node = next; + } + return node; + } +} + +export { Trie, TrieNode }; diff --git a/data-structure/UnionFind.js b/data-structure/UnionFind.js new file mode 100644 index 00000000..1170aab8 --- /dev/null +++ b/data-structure/UnionFind.js @@ -0,0 +1,103 @@ +/** + * UnionFind (a.k.a. Disjoint Set Union, DSU) + * + * Tracks which group each element belongs to. Two operations: + * + * find(x) → representative ("root") of x's group + * union(a, b) → merge a's group with b's group + * + * Two elements are in the same group iff they have the same root. + * + * parent: [0, 0, 0, 3, 3, 5] + * rank: [1, 0, 0, 1, 0, 0] + * + * 0 3 5 + * / \ | + * 1 2 4 ← three groups: {0,1,2}, {3,4}, {5} + * + * Two optimizations get find/union to nearly O(1): + * + * 1. Path compression — when finding root of x, point every node on the + * path directly at the root. Future finds become O(1). + * + * Before find(2): 0 ← 1 ← 2 After: 0 ← 1 + * ↑ + * 2 + * + * 2. Union by rank — attach the shorter tree under the taller one. Keeps + * tree height ≤ log n even before compression kicks in. + * + * Combined complexity: O(α(n)) per operation, where α is the inverse Ackermann + * function. For any practical n, α(n) ≤ 4. Effectively constant time. + * + * What you CAN'T do: split a group. UF only ever merges. If you need to undo + * unions, you need a different structure (link-cut tree, or rebuild). + * + * Common uses: + * - Kruskal's MST (cycle detection on edge addition) + * - dynamic connectivity / number of islands variants + * - account merging, equation equality (LeetCode 952, 990) + * + * Space: O(n) + * + * find O(α(n)) ≈ O(1) + * union O(α(n)) ≈ O(1) + * connected O(α(n)) ≈ O(1) + * count O(1) — number of disjoint groups + */ +class UnionFind { + #parent; + #rank; + #count; + + constructor(n) { + // each element starts as its own group — parent[i] = i + this.#parent = Array.from({ length: n }, (_, i) => i); + this.#rank = new Array(n).fill(0); + this.#count = n; + } + + find(x) { + // path compression — climb to root, then re-point everything on the path + let root = x; + while (this.#parent[root] !== root) root = this.#parent[root]; + while (this.#parent[x] !== root) { + const next = this.#parent[x]; + this.#parent[x] = root; + x = next; + } + return root; + } + + /** + * Returns true if a merge actually happened (a and b were in different groups). + * Returning the merge status avoids re-checking afterwards in callers like + * Kruskal's algorithm. + */ + union(a, b) { + const ra = this.find(a); + const rb = this.find(b); + if (ra === rb) return false; + // attach shorter tree under taller — keeps height balanced + if (this.#rank[ra] < this.#rank[rb]) { + this.#parent[ra] = rb; + } else if (this.#rank[ra] > this.#rank[rb]) { + this.#parent[rb] = ra; + } else { + this.#parent[rb] = ra; + this.#rank[ra]++; + } + this.#count--; + return true; + } + + connected(a, b) { + return this.find(a) === this.find(b); + } + + count() { + return this.#count; + } +} + +export { UnionFind }; diff --git a/data-structure/tests/BST.test.js b/data-structure/tests/BST.test.js new file mode 100644 index 00000000..e6dd074b --- /dev/null +++ b/data-structure/tests/BST.test.js @@ -0,0 +1,66 @@ +import { BST } from '../BST.js'; + +describe('BST', () => { + it('starts empty', () => { + const t = new BST(); + expect(t.size()).toBe(0); + expect(t.inOrder()).toEqual([]); + expect(t.search(1)).toBe(false); + }); + + it('insert + search', () => { + const t = new BST(); + [5, 3, 8, 1, 4, 9].forEach(v => t.insert(v)); + expect(t.search(4)).toBe(true); + expect(t.search(7)).toBe(false); + expect(t.size()).toBe(6); + }); + + it('inOrder yields sorted values', () => { + const t = new BST(); + [5, 3, 8, 1, 4, 9, 7].forEach(v => t.insert(v)); + expect(t.inOrder()).toEqual([1, 3, 4, 5, 7, 8, 9]); + }); + + it('duplicates are ignored', () => { + const t = new BST(); + t.insert(1); t.insert(1); t.insert(1); + expect(t.size()).toBe(1); + }); + + it('delete leaf', () => { + const t = new BST(); + [5, 3, 8].forEach(v => t.insert(v)); + expect(t.delete(3)).toBe(true); + expect(t.search(3)).toBe(false); + expect(t.inOrder()).toEqual([5, 8]); + }); + + it('delete node with one child', () => { + const t = new BST(); + [5, 3, 8, 9].forEach(v => t.insert(v)); + t.delete(8); // 8 has only a right child (9) + expect(t.inOrder()).toEqual([3, 5, 9]); + }); + + it('delete node with two children — uses in-order successor', () => { + const t = new BST(); + [5, 3, 8, 1, 4, 7, 9].forEach(v => t.insert(v)); + t.delete(5); // root with two children + expect(t.search(5)).toBe(false); + expect(t.inOrder()).toEqual([1, 3, 4, 7, 8, 9]); + }); + + it('delete on missing value returns false', () => { + const t = new BST(); + t.insert(1); + expect(t.delete(99)).toBe(false); + expect(t.size()).toBe(1); + }); + + it('handles skewed insert (worst case structure)', () => { + const t = new BST(); + for (let i = 0; i < 50; i++) t.insert(i); + expect(t.inOrder()).toEqual(Array.from({ length: 50 }, (_, i) => i)); + }); +}); diff --git a/data-structure/tests/DoublyLinkedList.test.js b/data-structure/tests/DoublyLinkedList.test.js new file mode 100644 index 00000000..10dbc21e --- /dev/null +++ b/data-structure/tests/DoublyLinkedList.test.js @@ -0,0 +1,48 @@ +import { DoublyLinkedList } from '../DoublyLinkedList.js'; + +describe('DoublyLinkedList', () => { + it('starts empty', () => { + const l = new DoublyLinkedList(); + expect(l.isEmpty()).toBe(true); + expect(l.toArray()).toEqual([]); + }); + + it('addFirst prepends, addLast appends', () => { + const l = new DoublyLinkedList(); + l.addLast(2); + l.addLast(3); + l.addFirst(1); + expect(l.toArray()).toEqual([1, 2, 3]); + }); + + it('removeFirst / removeLast drain in correct order', () => { + const l = new DoublyLinkedList(); + l.addLast(1); l.addLast(2); l.addLast(3); + expect(l.removeFirst()).toBe(1); + expect(l.removeLast()).toBe(3); + expect(l.toArray()).toEqual([2]); + }); + + it('removeNode is O(1) given a node reference (LRU pattern)', () => { + const l = new DoublyLinkedList(); + l.addLast('a'); + const nodeB = l.addLast('b'); + l.addLast('c'); + expect(l.removeNode(nodeB)).toBe('b'); + expect(l.toArray()).toEqual(['a', 'c']); + }); + + it('handles drain to empty then refill', () => { + const l = new DoublyLinkedList(); + l.addLast(1); l.removeFirst(); + expect(l.isEmpty()).toBe(true); + l.addLast(2); + expect(l.toArray()).toEqual([2]); + }); + + it('removeFirst/removeLast on empty returns undefined', () => { + const l = new DoublyLinkedList(); + expect(l.removeFirst()).toBeUndefined(); + expect(l.removeLast()).toBeUndefined(); + }); +}); diff --git a/data-structure/tests/Graph.test.js b/data-structure/tests/Graph.test.js new file mode 100644 index 00000000..e7ea7c35 --- /dev/null +++ b/data-structure/tests/Graph.test.js @@ -0,0 +1,83 @@ +import { Graph } from '../Graph.js'; + +describe('Graph (undirected)', () => { + it('addEdge auto-creates nodes on both ends', () => { + const g = new Graph(); + g.addEdge('A', 'B'); + expect(g.hasNode('A')).toBe(true); + expect(g.hasNode('B')).toBe(true); + expect(g.hasEdge('A', 'B')).toBe(true); + expect(g.hasEdge('B', 'A')).toBe(true); // undirected → both directions + }); + + it('addEdge is idempotent — duplicate edges collapse', () => { + const g = new Graph(); + g.addEdge('A', 'B'); + g.addEdge('A', 'B'); + expect([...g.neighbors('A')]).toEqual(['B']); + }); + + it('removeEdge clears both directions for undirected', () => { + const g = new Graph(); + g.addEdge('A', 'B'); + g.removeEdge('A', 'B'); + expect(g.hasEdge('A', 'B')).toBe(false); + expect(g.hasEdge('B', 'A')).toBe(false); + }); + + it('removeNode strips all references', () => { + const g = new Graph(); + g.addEdge('A', 'B'); g.addEdge('A', 'C'); g.addEdge('B', 'C'); + g.removeNode('A'); + expect(g.hasNode('A')).toBe(false); + expect(g.hasEdge('B', 'A')).toBe(false); + expect(g.hasEdge('C', 'A')).toBe(false); + }); + + it('bfs returns level-order from start', () => { + const g = new Graph(); + // A - B - C + // | | + // D - E - F + g.addEdge('A', 'B'); g.addEdge('B', 'C'); + g.addEdge('A', 'D'); g.addEdge('D', 'E'); + g.addEdge('E', 'F'); g.addEdge('C', 'F'); + const order = g.bfs('A'); + expect(order[0]).toBe('A'); + expect(order.length).toBe(6); + // A appears before its neighbors, all distance-1 nodes before distance-2 + expect(order.indexOf('A')).toBeLessThan(order.indexOf('B')); + expect(order.indexOf('A')).toBeLessThan(order.indexOf('D')); + }); + + it('dfs visits every reachable node', () => { + const g = new Graph(); + g.addEdge('A', 'B'); g.addEdge('A', 'C'); g.addEdge('B', 'D'); + const order = g.dfs('A'); + expect(order.sort()).toEqual(['A', 'B', 'C', 'D']); + }); + + it('bfs/dfs on disconnected node returns just that node', () => { + const g = new Graph(); + g.addNode('X'); + expect(g.bfs('X')).toEqual(['X']); + expect(g.dfs('X')).toEqual(['X']); + }); +}); + +describe('Graph (directed)', () => { + it('addEdge only inserts one direction', () => { + const g = new Graph({ directed: true }); + g.addEdge('A', 'B'); + expect(g.hasEdge('A', 'B')).toBe(true); + expect(g.hasEdge('B', 'A')).toBe(false); + }); + + it('bfs respects edge direction', () => { + const g = new Graph({ directed: true }); + g.addEdge('A', 'B'); + g.addEdge('B', 'C'); + expect(g.bfs('A')).toEqual(['A', 'B', 'C']); + expect(g.bfs('C')).toEqual(['C']); // C has no outgoing edges + }); +}); diff --git a/data-structure/tests/HashMap.test.js b/data-structure/tests/HashMap.test.js new file mode 100644 index 00000000..b097a644 --- /dev/null +++ b/data-structure/tests/HashMap.test.js @@ -0,0 +1,63 @@ +import { HashMap } from '../HashMap.js'; + +describe('HashMap', () => { + it('starts empty', () => { + const m = new HashMap(); + expect(m.size()).toBe(0); + expect(m.get('x')).toBeUndefined(); + expect(m.has('x')).toBe(false); + }); + + it('set/get round trip', () => { + const m = new HashMap(); + m.set('a', 1); + m.set('b', 2); + expect(m.get('a')).toBe(1); + expect(m.get('b')).toBe(2); + expect(m.size()).toBe(2); + }); + + it('set on existing key updates value, not size', () => { + const m = new HashMap(); + m.set('a', 1); + m.set('a', 99); + expect(m.get('a')).toBe(99); + expect(m.size()).toBe(1); + }); + + it('delete removes entry', () => { + const m = new HashMap(); + m.set('a', 1); m.set('b', 2); + expect(m.delete('a')).toBe(true); + expect(m.has('a')).toBe(false); + expect(m.size()).toBe(1); + expect(m.delete('missing')).toBe(false); + }); + + it('survives rehash — insert past load factor threshold', () => { + const m = new HashMap(); + // initial capacity = 16, load factor 0.75 → resize after ~12 entries + for (let i = 0; i < 200; i++) m.set(`k${i}`, i); + for (let i = 0; i < 200; i++) expect(m.get(`k${i}`)).toBe(i); + expect(m.size()).toBe(200); + }); + + it('distinguishes numeric and string keys with same hash', () => { + // 1 and "1" hash to the same bucket (String(key) for hashing) but the + // chain uses strict === to compare keys, so they remain distinct entries. + // Same semantics as native Map. + const m = new HashMap(); + m.set(1, 'one'); + m.set('1', 'one-string'); + expect(m.get(1)).toBe('one'); + expect(m.get('1')).toBe('one-string'); + expect(m.size()).toBe(2); + }); + + it('entries() yields all pairs', () => { + const m = new HashMap(); + m.set('a', 1); m.set('b', 2); m.set('c', 3); + const collected = [...m.entries()].sort(); + expect(collected).toEqual([['a', 1], ['b', 2], ['c', 3]]); + }); +}); diff --git a/data-structure/tests/LinkedList.test.js b/data-structure/tests/LinkedList.test.js new file mode 100644 index 00000000..1b281fd8 --- /dev/null +++ b/data-structure/tests/LinkedList.test.js @@ -0,0 +1,58 @@ +import { LinkedList } from '../LinkedList.js'; + +describe('LinkedList', () => { + it('starts empty', () => { + const l = new LinkedList(); + expect(l.isEmpty()).toBe(true); + expect(l.size()).toBe(0); + expect(l.toArray()).toEqual([]); + }); + + it('append builds list in order', () => { + const l = new LinkedList(); + l.append(1); l.append(2); l.append(3); + expect(l.toArray()).toEqual([1, 2, 3]); + expect(l.size()).toBe(3); + }); + + it('prepend reverses insertion order', () => { + const l = new LinkedList(); + l.prepend(1); l.prepend(2); l.prepend(3); + expect(l.toArray()).toEqual([3, 2, 1]); + }); + + it('insertAt handles head, middle, tail, out-of-range', () => { + const l = new LinkedList(); + l.append(1); l.append(3); + l.insertAt(1, 2); // middle + l.insertAt(0, 0); // head + l.insertAt(4, 4); // tail (size === 4) + expect(l.toArray()).toEqual([0, 1, 2, 3, 4]); + expect(l.insertAt(-1, 99)).toBe(false); + expect(l.insertAt(99, 99)).toBe(false); + }); + + it('removeAt returns value and updates tail when removing last', () => { + const l = new LinkedList(); + l.append(1); l.append(2); l.append(3); + expect(l.removeAt(2)).toBe(3); + l.append(4); + expect(l.toArray()).toEqual([1, 2, 4]); + }); + + it('removeHead drains correctly', () => { + const l = new LinkedList(); + l.append(1); l.append(2); + expect(l.removeHead()).toBe(1); + expect(l.removeHead()).toBe(2); + expect(l.removeHead()).toBeUndefined(); + expect(l.isEmpty()).toBe(true); + }); + + it('find returns index or -1', () => { + const l = new LinkedList(); + l.append('a'); l.append('b'); l.append('c'); + expect(l.find('b')).toBe(1); + expect(l.find('z')).toBe(-1); + }); +}); diff --git a/data-structure/tests/MaxPriorityQueue.test.js b/data-structure/tests/MaxPriorityQueue.test.js new file mode 100644 index 00000000..1727bc32 --- /dev/null +++ b/data-structure/tests/MaxPriorityQueue.test.js @@ -0,0 +1,58 @@ +import { MaxPriorityQueue } from '../MaxPriorityQueue.js'; + +describe('MaxPriorityQueue', () => { + it('starts empty', () => { + const pq = new MaxPriorityQueue(); + expect(pq.isEmpty()).toBe(true); + expect(pq.front()).toBeUndefined(); + expect(pq.size()).toBe(0); + }); + + it('drains in descending order', () => { + const pq = new MaxPriorityQueue(); + [5, 1, 3, 9, 2, 7].forEach(v => pq.enqueue(v)); + const out = []; + while (!pq.isEmpty()) out.push(pq.dequeue()); + expect(out).toEqual([9, 7, 5, 3, 2, 1]); + }); + + it('front always points at the max', () => { + const pq = new MaxPriorityQueue(); + pq.enqueue(5); + expect(pq.front()).toBe(5); + pq.enqueue(8); + expect(pq.front()).toBe(8); + pq.enqueue(2); + expect(pq.front()).toBe(8); + pq.enqueue(15); + expect(pq.front()).toBe(15); + }); + + it('handles duplicates', () => { + const pq = new MaxPriorityQueue(); + [3, 3, 1, 1, 2, 2].forEach(v => pq.enqueue(v)); + const out = []; + while (!pq.isEmpty()) out.push(pq.dequeue()); + expect(out).toEqual([3, 3, 2, 2, 1, 1]); + }); + + it('custom priorityOf — max by extracted priority', () => { + // top-K-largest pattern: keep biggest task by priority field + const pq = new MaxPriorityQueue({ priorityOf: (t) => t.priority }); + pq.enqueue({ name: 'A', priority: 1 }); + pq.enqueue({ name: 'B', priority: 5 }); + pq.enqueue({ name: 'C', priority: 3 }); + expect(pq.dequeue().name).toBe('B'); + expect(pq.dequeue().name).toBe('C'); + expect(pq.dequeue().name).toBe('A'); + }); + + it('handles many random inserts (stress)', () => { + const pq = new MaxPriorityQueue(); + const nums = Array.from({ length: 500 }, () => Math.floor(Math.random() * 10000)); + nums.forEach(v => pq.enqueue(v)); + const out = []; + while (!pq.isEmpty()) out.push(pq.dequeue()); + expect(out).toEqual([...nums].sort((a, b) => b - a)); + }); +}); diff --git a/data-structure/tests/MinPriorityQueue.test.js b/data-structure/tests/MinPriorityQueue.test.js new file mode 100644 index 00000000..cdb21321 --- /dev/null +++ b/data-structure/tests/MinPriorityQueue.test.js @@ -0,0 +1,58 @@ +import { MinPriorityQueue } from '../MinPriorityQueue.js'; + +describe('MinPriorityQueue', () => { + it('starts empty', () => { + const pq = new MinPriorityQueue(); + expect(pq.isEmpty()).toBe(true); + expect(pq.front()).toBeUndefined(); + expect(pq.dequeue()).toBeUndefined(); + }); + + it('drains in ascending order', () => { + const pq = new MinPriorityQueue(); + [5, 1, 3, 9, 2, 7].forEach(v => pq.enqueue(v)); + const out = []; + while (!pq.isEmpty()) out.push(pq.dequeue()); + expect(out).toEqual([1, 2, 3, 5, 7, 9]); + }); + + it('front always points at the min', () => { + const pq = new MinPriorityQueue(); + pq.enqueue(5); + expect(pq.front()).toBe(5); + pq.enqueue(2); + expect(pq.front()).toBe(2); + pq.enqueue(8); + expect(pq.front()).toBe(2); + pq.enqueue(1); + expect(pq.front()).toBe(1); + }); + + it('handles duplicates', () => { + const pq = new MinPriorityQueue(); + [3, 3, 1, 1, 2, 2].forEach(v => pq.enqueue(v)); + const out = []; + while (!pq.isEmpty()) out.push(pq.dequeue()); + expect(out).toEqual([1, 1, 2, 2, 3, 3]); + }); + + it('custom priorityOf — sort by extracted priority', () => { + // classic Dijkstra-style usage: object with a numeric priority field + const pq = new MinPriorityQueue({ priorityOf: (t) => t.cost }); + pq.enqueue({ name: 'B', cost: 5 }); + pq.enqueue({ name: 'A', cost: 1 }); + pq.enqueue({ name: 'C', cost: 3 }); + expect(pq.dequeue().name).toBe('A'); + expect(pq.dequeue().name).toBe('C'); + expect(pq.dequeue().name).toBe('B'); + }); + + it('handles many random inserts (stress)', () => { + const pq = new MinPriorityQueue(); + const nums = Array.from({ length: 500 }, () => Math.floor(Math.random() * 10000)); + nums.forEach(v => pq.enqueue(v)); + const out = []; + while (!pq.isEmpty()) out.push(pq.dequeue()); + expect(out).toEqual([...nums].sort((a, b) => a - b)); + }); +}); diff --git a/data-structure/tests/Queue.test.js b/data-structure/tests/Queue.test.js new file mode 100644 index 00000000..b681c898 --- /dev/null +++ b/data-structure/tests/Queue.test.js @@ -0,0 +1,49 @@ +import { Queue } from '../Queue.js'; + +describe('Queue', () => { + it('starts empty', () => { + const q = new Queue(); + expect(q.isEmpty()).toBe(true); + expect(q.size()).toBe(0); + expect(q.front()).toBeUndefined(); + }); + + it('enqueue/dequeue follows FIFO order', () => { + const q = new Queue(); + q.enqueue(1); q.enqueue(2); q.enqueue(3); + expect(q.dequeue()).toBe(1); + expect(q.dequeue()).toBe(2); + expect(q.dequeue()).toBe(3); + expect(q.isEmpty()).toBe(true); + }); + + it('front returns head without removing', () => { + const q = new Queue(); + q.enqueue('a'); q.enqueue('b'); + expect(q.front()).toBe('a'); + expect(q.size()).toBe(2); + }); + + it('dequeue on empty returns undefined', () => { + const q = new Queue(); + expect(q.dequeue()).toBeUndefined(); + }); + + it('reuses correctly after fully draining', () => { + // regression: tail must reset to null when the last item is dequeued, + // otherwise the next enqueue corrupts the list + const q = new Queue(); + q.enqueue(1); + q.dequeue(); + q.enqueue(2); + expect(q.front()).toBe(2); + expect(q.size()).toBe(1); + }); + + it('handles many items', () => { + const q = new Queue(); + for (let i = 0; i < 1000; i++) q.enqueue(i); + for (let i = 0; i < 1000; i++) expect(q.dequeue()).toBe(i); + expect(q.isEmpty()).toBe(true); + }); +}); diff --git a/data-structure/tests/Stack.test.js b/data-structure/tests/Stack.test.js new file mode 100644 index 00000000..ba8000a3 --- /dev/null +++ b/data-structure/tests/Stack.test.js @@ -0,0 +1,39 @@ +import { Stack } from '../Stack.js'; + +describe('Stack', () => { + it('starts empty', () => { + const s = new Stack(); + expect(s.isEmpty()).toBe(true); + expect(s.size()).toBe(0); + expect(s.peek()).toBeUndefined(); + }); + + it('push/pop follows LIFO order', () => { + const s = new Stack(); + s.push(1); s.push(2); s.push(3); + expect(s.pop()).toBe(3); + expect(s.pop()).toBe(2); + expect(s.pop()).toBe(1); + expect(s.isEmpty()).toBe(true); + }); + + it('peek returns top without removing', () => { + const s = new Stack(); + s.push('a'); s.push('b'); + expect(s.peek()).toBe('b'); + expect(s.size()).toBe(2); + }); + + it('pop on empty returns undefined', () => { + const s = new Stack(); + expect(s.pop()).toBeUndefined(); + }); + + it('handles mixed types', () => { + const s = new Stack(); + s.push(null); s.push({ x: 1 }); s.push([1, 2]); + expect(s.pop()).toEqual([1, 2]); + expect(s.pop()).toEqual({ x: 1 }); + expect(s.pop()).toBeNull(); + }); +}); diff --git a/data-structure/tests/Trie.test.js b/data-structure/tests/Trie.test.js new file mode 100644 index 00000000..45416891 --- /dev/null +++ b/data-structure/tests/Trie.test.js @@ -0,0 +1,54 @@ +import { Trie } from '../Trie.js'; + +describe('Trie', () => { + it('starts empty — search and startsWith both false', () => { + const t = new Trie(); + expect(t.search('a')).toBe(false); + expect(t.startsWith('a')).toBe(false); + }); + + it('insert + exact search', () => { + const t = new Trie(); + t.insert('cat'); + expect(t.search('cat')).toBe(true); + expect(t.search('ca')).toBe(false); // prefix only — not a word + expect(t.search('cats')).toBe(false); // not inserted + }); + + it('startsWith matches any inserted prefix path', () => { + const t = new Trie(); + t.insert('apple'); + expect(t.startsWith('app')).toBe(true); + expect(t.startsWith('apple')).toBe(true); + expect(t.startsWith('apples')).toBe(false); + expect(t.startsWith('b')).toBe(false); + }); + + it('handles overlapping words sharing prefixes', () => { + const t = new Trie(); + t.insert('car'); + t.insert('cars'); + t.insert('cat'); + expect(t.search('car')).toBe(true); + expect(t.search('cars')).toBe(true); + expect(t.search('cat')).toBe(true); + expect(t.search('ca')).toBe(false); + expect(t.startsWith('ca')).toBe(true); + }); + + it('handles empty string', () => { + const t = new Trie(); + t.insert(''); + expect(t.search('')).toBe(true); + expect(t.startsWith('')).toBe(true); + }); + + it('handles unicode characters', () => { + const t = new Trie(); + t.insert('café'); + t.insert('日本'); + expect(t.search('café')).toBe(true); + expect(t.search('日本')).toBe(true); + expect(t.startsWith('日')).toBe(true); + }); +}); diff --git a/data-structure/tests/UnionFind.test.js b/data-structure/tests/UnionFind.test.js new file mode 100644 index 00000000..dd32bbc5 --- /dev/null +++ b/data-structure/tests/UnionFind.test.js @@ -0,0 +1,47 @@ +import { UnionFind } from '../UnionFind.js'; + +describe('UnionFind', () => { + it('starts with n disjoint singletons', () => { + const uf = new UnionFind(5); + expect(uf.count()).toBe(5); + for (let i = 0; i < 5; i++) expect(uf.find(i)).toBe(i); + }); + + it('union merges groups and decrements count', () => { + const uf = new UnionFind(5); + expect(uf.union(0, 1)).toBe(true); + expect(uf.union(2, 3)).toBe(true); + expect(uf.count()).toBe(3); // {0,1}, {2,3}, {4} + }); + + it('union of already-connected returns false', () => { + const uf = new UnionFind(3); + uf.union(0, 1); + expect(uf.union(0, 1)).toBe(false); + expect(uf.count()).toBe(2); + }); + + it('connected reflects transitive closure', () => { + const uf = new UnionFind(4); + uf.union(0, 1); + uf.union(1, 2); + expect(uf.connected(0, 2)).toBe(true); + expect(uf.connected(0, 3)).toBe(false); + }); + + it('handles long chain with path compression', () => { + const uf = new UnionFind(1000); + for (let i = 1; i < 1000; i++) uf.union(i - 1, i); + expect(uf.count()).toBe(1); + for (let i = 0; i < 1000; i++) expect(uf.connected(0, i)).toBe(true); + }); + + it('models number-of-islands style merging', () => { + // edges: (0,1) (1,2) (3,4) — leaves groups {0,1,2}, {3,4}, {5} + const uf = new UnionFind(6); + uf.union(0, 1); uf.union(1, 2); uf.union(3, 4); + expect(uf.count()).toBe(3); + expect(uf.connected(0, 2)).toBe(true); + expect(uf.connected(2, 3)).toBe(false); + }); +});