Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
148 changes: 148 additions & 0 deletions data-structure/BST.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,148 @@
/**
* BST — Binary Search Tree (unbalanced)
*
* Invariant: for every node, all values in the LEFT subtree are < node.val,
* all values in the RIGHT subtree are > node.val.
*
* 5
* / \
* 3 8
* / \ \
* 1 4 9
*
* That invariant is what makes search work: at each step you can throw
* away half the tree.
*
* Worst case: insertions in sorted order create a degenerate tree —
* essentially a linked list, all operations O(n):
*
* 1
* \
* 2
* \
* 3 ...
*
* To guarantee O(log n) you need self-balancing variants (AVL, Red-Black).
* Those rotate on insert/delete to keep height ≈ log n. They're outside this
* file's scope — start here, level up later.
*
* Delete is the spicy operation. Three cases:
* 1. Leaf → null it out
* 2. One child → splice the child up
* 3. Two children → replace value with in-order successor (min of right
* subtree), then delete that successor
*
* In-order traversal yields values in sorted order — useful for problems
* that need a sorted view without re-sorting.
*
* Space: O(n)
*
* insert O(log n) avg / O(n) worst (skewed)
* search O(log n) avg / O(n) worst
* delete O(log n) avg / O(n) worst
* inOrder O(n)
*/
class BSTNode {
constructor(val) {
this.val = val;
this.left = null;
this.right = null;
}
}

class BST {
#root = null;
#size = 0;

insert(val) {
if (this.#root === null) {
this.#root = new BSTNode(val);
this.#size++;
return;
}
let node = this.#root;
while (true) {
if (val === node.val) return; // duplicates ignored
const goLeft = val < node.val;
const child = goLeft ? node.left : node.right;
if (child === null) {
const created = new BSTNode(val);
if (goLeft) node.left = created; else node.right = created;
this.#size++;
return;
}
node = child;
}
}

search(val) {
let node = this.#root;
while (node !== null) {
if (val === node.val) return true;
node = val < node.val ? node.left : node.right;
}
return false;
}

delete(val) {
const before = this.#size;
this.#root = this.#deleteFrom(this.#root, val);
return this.#size < before;
}

/**
* Returns array of values in ascending order.
* Iterative in-order traversal — avoids recursion depth issues on deep trees.
*/
inOrder() {
const out = [];
const stack = [];
let node = this.#root;
while (node !== null || stack.length > 0) {
// dive left as far as possible, recording the path
while (node !== null) {
stack.push(node);
node = node.left;
}
node = stack.pop();
out.push(node.val);
node = node.right;
}
return out;
}

size() {
return this.#size;
}

#deleteFrom(node, val) {
if (node === null) return null;
if (val < node.val) {
node.left = this.#deleteFrom(node.left, val);
return node;
}
if (val > node.val) {
node.right = this.#deleteFrom(node.right, val);
return node;
}
// found target — handle the three cases
this.#size--;
if (node.left === null) return node.right; // 0 or 1 child (right only)
if (node.right === null) return node.left; // 1 child (left only)
// 2 children: pull up the in-order successor's value, then delete it.
// We re-increment #size to cancel the early decrement above; the recursive
// call will decrement once when it actually removes the successor leaf.
this.#size++;
const succ = this.#minNode(node.right);
node.val = succ.val;
node.right = this.#deleteFrom(node.right, succ.val);
return node;
}

#minNode(node) {
while (node.left !== null) node = node.left;
return node;
}
}

export { BST, BSTNode };
109 changes: 109 additions & 0 deletions data-structure/DoublyLinkedList.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,109 @@
/**
* DoublyLinkedList — each node has prev AND next pointers
*
* null ← [a] ⇄ [b] ⇄ [c] → null
* ↑ ↑
* head tail
*
* Why pay 2× the pointers? Because given a node reference, you can:
* - remove it in O(1) without scanning to find the predecessor
* - walk backward as well as forward
*
* This is exactly what an LRU cache needs: a HashMap maps keys → nodes,
* and the list reorders itself on every access. Without `prev`, the
* "remove a node" step becomes O(n) and the cache is broken.
*
* The `removeNode(node)` method below is the headline feature — it's why
* this structure exists. Everything else is the same as a singly-linked list
* with an extra back-pointer kept in sync.
*
* Sentinel-node trick (used here): keep dummy `head` and `tail` nodes that
* never hold values. Removes all "is this the first/last node?" branches —
* every real node has a real prev and next. Cleaner code, fewer bugs.
*
* [HEAD] ⇄ [a] ⇄ [b] ⇄ [TAIL]
*
* Space: O(n)
*
* addFirst O(1)
* addLast O(1)
* removeFirst O(1)
* removeLast O(1)
* removeNode O(1) ← given a node reference
* size O(1)
*/
class DListNode {
constructor(val) {
this.val = val;
this.prev = null;
this.next = null;
}
}

class DoublyLinkedList {
#head; // sentinel — not a real value
#tail; // sentinel — not a real value
#size = 0;

constructor() {
this.#head = new DListNode(null);
this.#tail = new DListNode(null);
this.#head.next = this.#tail;
this.#tail.prev = this.#head;
}

addFirst(val) {
return this.#insertAfter(this.#head, val);
}

addLast(val) {
return this.#insertAfter(this.#tail.prev, val);
}

removeFirst() {
if (this.#size === 0) return undefined;
return this.removeNode(this.#head.next);
}

removeLast() {
if (this.#size === 0) return undefined;
return this.removeNode(this.#tail.prev);
}

/**
* O(1) removal — the whole point of the doubly-linked design.
* Caller must own the node reference (e.g. obtained from addFirst/addLast).
*/
removeNode(node) {
node.prev.next = node.next;
node.next.prev = node.prev;
this.#size--;
return node.val;
}

toArray() {
const out = [];
for (let n = this.#head.next; n !== this.#tail; n = n.next) out.push(n.val);
return out;
}

size() {
return this.#size;
}

isEmpty() {
return this.#size === 0;
}

#insertAfter(prev, val) {
const node = new DListNode(val);
node.prev = prev;
node.next = prev.next;
prev.next.prev = node;
prev.next = node;
this.#size++;
return node; // returned so caller can remove in O(1) later
}
}

export { DoublyLinkedList, DListNode };
140 changes: 140 additions & 0 deletions data-structure/Graph.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,140 @@
/**
* Graph — adjacency list (undirected by default, supports directed)
*
* Storage: a Map from node → Set of neighbors.
*
* A — B
* | |
* C — D
*
* adj = {
* A: {B, C},
* B: {A, D},
* C: {A, D},
* D: {B, C},
* }
*
* Adjacency list vs adjacency matrix:
* - matrix: V×V bits, edge lookup O(1), space O(V²) — wasteful for sparse graphs
* - list: neighbors easy to iterate, space O(V + E) — wins when E ≪ V²
*
* Most real graphs (social, road, web) are sparse → list wins.
*
* Why a Set per node, not an Array? `addEdge(a, b)` becomes idempotent in O(1)
* — no duplicate edges from re-adding. Removal is also O(1) instead of O(deg).
*
* Directedness: pass `{ directed: true }` to skip the reverse-edge insert.
*
* BFS = "spread one ring at a time" — finds shortest path in unweighted graphs.
* DFS = "go deep, backtrack" — natural for cycle detection, topo sort, components.
*
* V = vertex count, E = edge count, deg(v) = neighbor count of v.
*
* Space: O(V + E)
*
* addNode O(1)
* addEdge O(1)
* removeEdge O(1)
* removeNode O(deg(v))
* neighbors O(1) — returns the Set
* bfs / dfs O(V + E)
*/
class Graph {
#adj = new Map();
#directed;

constructor({ directed = false } = {}) {
this.#directed = directed;
}

addNode(v) {
if (!this.#adj.has(v)) this.#adj.set(v, new Set());
}

addEdge(u, v) {
this.addNode(u);
this.addNode(v);
this.#adj.get(u).add(v);
if (!this.#directed) this.#adj.get(v).add(u);
}

removeEdge(u, v) {
this.#adj.get(u)?.delete(v);
if (!this.#directed) this.#adj.get(v)?.delete(u);
}

removeNode(v) {
if (!this.#adj.has(v)) return;
// remove every incoming edge — costs O(deg) for undirected,
// O(V) worst case for directed (must scan everyone)
if (this.#directed) {
for (const set of this.#adj.values()) set.delete(v);
} else {
for (const u of this.#adj.get(v)) this.#adj.get(u).delete(v);
}
this.#adj.delete(v);
}

hasNode(v) {
return this.#adj.has(v);
}

hasEdge(u, v) {
return this.#adj.get(u)?.has(v) ?? false;
}

neighbors(v) {
return this.#adj.get(v) ?? new Set();
}

nodes() {
return [...this.#adj.keys()];
}

/**
* BFS from `start`. Returns nodes in visit order.
* Uses a plain array as a queue with a head index — avoids `shift()` O(n).
*/
bfs(start) {
if (!this.#adj.has(start)) return [];
const visited = new Set([start]);
const order = [];
const queue = [start];
let head = 0;
while (head < queue.length) {
const node = queue[head++];
order.push(node);
for (const next of this.#adj.get(node)) {
if (!visited.has(next)) {
visited.add(next);
queue.push(next);
}
}
}
return order;
}

/**
* Iterative DFS using an explicit stack — avoids JS call-stack overflow on
* deep graphs. Visit order matches recursive DFS only if neighbor iteration
* is reversed when pushing (so the first neighbor is popped first).
*/
dfs(start) {
if (!this.#adj.has(start)) return [];
const visited = new Set();
const order = [];
const stack = [start];
while (stack.length > 0) {
const node = stack.pop();
if (visited.has(node)) continue;
visited.add(node);
order.push(node);
// reverse so iteration order matches a recursive `for (n of neighbors)`
const ns = [...this.#adj.get(node)].reverse();
for (const next of ns) if (!visited.has(next)) stack.push(next);
}
return order;
}
}

export { Graph };
Loading