start = 0
for end in range(len(nums)):
add nums[end]
while not valid:
remove nums[start]; start += 1
ans = max(ans, end-start+1)
start = 0
for end in range(len(nums)):
add nums[end]
if end - start + 1 == k:
if valid: update ans
remove nums[start]; start += 1
prefix[i+1] = prefix[i] + nums[i] # size n+1 sum L..R = prefix[R+1] - prefix[L]
cur_sum = 0; counts = {0:1}
for num in nums:
cur_sum += num
count += counts.get(cur_sum - k, 0)
counts[cur_sum] = counts.get(cur_sum,0)+1
prefix[i+1][j+1] = matrix[i][j] + prefix[i][j+1] + prefix[i+1][j] - prefix[i][j] query(r1,c1,r2,c2) = prefix[r2+1][c2+1] - prefix[r1][c2+1] - prefix[r2+1][c1] + prefix[r1][c1]
res = -1
while lo <= hi:
mid = (lo+hi)//2
if f(mid): res=mid; hi=mid-1 # search left
else: lo=mid+1
res = -1
while lo <= hi:
mid = (lo+hi)//2
if f(mid): res=mid; lo=mid+1 # search right
else: hi=mid-1
Always: l≤r, l=m+1 / r=m-1, define monotonic f(x)
| Problem | f(x) |
|---|---|
| Rotated min | nums[x] ≤ nums[-1] |
| Koko Bananas | can finish at speed x in h hrs? |
| Ship Capacity | can ship w/ cap x in d days? |
| ↑ Increasing | ↓ Decreasing | |
|---|---|---|
| Pop when | new < top | new > top |
| Below | nearest smaller LEFT | nearest larger LEFT |
| Popper | nearest smaller RIGHT | nearest larger RIGHT |
# Next Greater Element (decreasing stack)
stack = []
for i in range(n):
while stack and nums[i] > nums[stack[-1]]:
res[stack.pop()] = nums[i]
stack.append(i)
Sliding window max/min — maintain deque of candidates, pop from back when new elem is better, pop from front when out of window.
# Phase 1: detect cycle
slow = fast = head
while fast and fast.next:
slow = slow.next
fast = fast.next.next
if slow == fast: break
else: return None # no cycle
# Phase 2: find entry (F = kC - a)
slow2 = head
while slow2 != slow:
slow2 = slow2.next
slow = slow.next
return slow # cycle entry node
| Problem | = Blocks |
|---|---|
| Reorder List | split + reverse 2nd + interleave |
| Sort List | split + recurse + merge (mergesort) |
| Merge K | D&C merge pairs / heap |
BST: insert/delete/search O(log n) avg, O(n) worst
Space: balanced→DFS O(log n), BFS O(n) | skewed→DFS O(n), BFS O(1)
| push/pop | O(log n) | peek | O(1) |
| heapify | O(n) | ||
import heapq heapq.heapify(arr) # O(n) build heapq.heappush(arr, x) heapq.heappop(arr) # returns min arr[0] # peek # Max-heap: negate values
Use for: prefix search, wildcard match, grid word search
class TrieNode:
def __init__(self):
self.children = {} # char→TrieNode
self.is_end = False
# insert: walk/create nodes, set is_end=True
# search: walk nodes, return node.is_end
# startsWith: walk nodes, return True
TC: O(L) per op | SC: O(N·L) total
def dfs(node):
if node in visited: return
visited.add(node)
for nb in adj[node]: dfs(nb)
visited.add(node); q = deque([node])
while q:
cur = q.popleft()
for nb in adj[cur]:
if nb not in visited:
visited.add(nb); q.append(nb)
Both: TC O(V+E) | SC O(V)
Ask: self-loops? directed? weighted? neg weights? disconnected? cycles?
def dijkstra(adj, src, n):
dist = [float('inf')] * n
dist[src] = 0
heap = [(0, src)] # (cost, node)
while heap:
d, u = heapq.heappop(heap)
if d > dist[u]: continue # stale
for v, w in adj[u]:
if dist[u] + w < dist[v]:
dist[v] = dist[u] + w
heapq.heappush(heap, (dist[v], v))
return dist
TC: O((V+E) log V) | SC: O(V+E)
No decrease-key → lazy deletion (skip stale entries)
Like Dijkstra but: edge weight only (not cumulative dist). Grows MST one edge at a time.
def prim(adj, n):
visited = set()
heap = [(0, 0)] # (weight, node) start from 0
mst_cost = 0
while heap and len(visited) < n:
w, u = heapq.heappop(heap)
if u in visited: continue
visited.add(u)
mst_cost += w
for v, weight in adj[u]:
if v not in visited:
heapq.heappush(heap, (weight, v))
return mst_cost
TC: O((V+E) log V) | SC: O(V+E)
from collections import deque
indegree = [0]*n
for u in adj:
for v in adj[u]: indegree[v] += 1
q = deque(u for u in range(n) if indegree[u]==0)
order = []
while q:
u = q.popleft(); order.append(u)
for v in adj[u]:
indegree[v] -= 1
if indegree[v] == 0: q.append(v)
def dfs(node):
visited[node] = True
for nb in adj[node]:
if not visited[nb]: dfs(nb)
res.append(node) # post-order
for node in range(n):
if not visited[node]: dfs(node)
res.reverse()
TC: O(V+E) | SC: O(V)
def bucket_sort(arr):
if not arr: return arr
mn, mx = min(arr), max(arr)
n = len(arr)
bucket_count = n # or custom
bkt_size = (mx - mn) / bucket_count + 1
buckets = [[] for _ in range(bucket_count)]
for x in arr:
idx = int((x - mn) / bkt_size)
buckets[idx].append(x)
res = []
for b in buckets:
b.sort() # small sort per bucket
res.extend(b)
return res
TC: O(n + k) avg, O(n²) worst | SC: O(n + k)
k = # buckets. Best when input uniformly distributed.
# Subsets template
def search(k):
if k == n: process(subset)
else:
subset.append(k); search(k+1)
subset.pop(); search(k+1)
Min removals for non-overlap = n − max non-overlapping
Top-down: greedy choice → solve remaining subproblem
Prove: swap argument (exchange arg) + contradiction for substructure