/* allPathsSourceTarget ==================== * Given a directed acyclic graph (DAG) of n nodes labeled from 0 to n - 1, find all possible paths from node 0 to node n - 1 and return them in any order. * The graph is represented as an adjacency list: graph[i] is a list of all nodes that node i has a directed edge to. Example: node 0 1 2 3 Input: graph = [[1,2],[3],[3],[]] Output: [[0,1,3],[0,2,3]] ----- ----- | 0 |--->| 1 | | | | | ----- ----- | | v v ----- ----- | 2 |--->| 3 | | | | | ----- ----- * Start DFS from node 0 with a current path containing {0}. * At each node, recurse on all its neighbors, adding them to the current path. * When we reach the target (n-1), make a copy of the current path and add it to the result. * Backtrack by popping the last node to explore other branches. * Time: O(N * 2^P) where P is number of paths (worst-case exponential, but n≤15 → acceptable) * Space: O(N) recursion stack + O(total output size) */ vector> allPathsSourceTarget(vector>& graph) { int target = int(graph.size()) - 1; vector path{}; vector> result; // node 0 1 2 3 // Input: graph = [[1,2],[3],[3],[]] function&)> dfs = [&](int node, vector& path) { path.push_back(node); // add current node if (node == target) { result.push_back(path); // found a valid path - copy it } else { for (int neighbor : graph[node]) { dfs(neighbor, path); } } path.pop_back(); // backtrack }; dfs(0, path); return result; } /* combinationSum ============== * Given an array of distinct integers candidates and a target integer target, return a list of all unique combinations of candidates where the chosen numbers sum to target. You may return the combinations in any order. * The same number may be chosen from candidates an unlimited number of times. Two combinations are unique if the frequency of at least one of the chosen numbers is different. Example: Input: candidates = [2,3,6,7], target = 7 Output: [[2,2,3],[7]] * think this is a classic backtracking with unlimited repetition problem, very similar to the coin change combinations problem. * Pseudo-code If target < 0 : not a valid combination so return back. If target == 0 : valid combination so store it as an answer. Recursive choices: At each step, we have two options for the current number: Pick the current element, reduce the target by arr[i] and call recursion with (i, target - arr[i]). Keeping the index unchanged allows us to reuse the current element until the target is either reached or exceeded. Skip the current element, move to the next index and call recursion with (i+1, target). Time Complexity: Exponential in worst case. * Roughly O(2^{target / min(candidates)}) or bounded by the number of combinations × average length. * Because of the constraint (<150 combinations) and small target (≤500), it runs efficiently in practice. * Each path in the recursion tree corresponds to a partial combination. Space Complexity: * O(target / min(cand)) for recursion depth (max length of combo). * Output space: O(number of combinations × average length) — guaranteed small. */ // Recursion / Backtracking — build combinations incrementally. void recurWithIndex(vector& input, vector>& result, size_t index, int remainingSum, vector& combinations) { if (remainingSum == 0) { result.emplace_back(combinations); return; } if (remainingSum < 0) return; for (size_t i = index; i < input.size(); i++) { if (input[i] > remainingSum) // important optimization { break; } combinations.push_back(input[i]); recurWithIndex(input, result, i, remainingSum - input[i], combinations); combinations.pop_back(); // backtrack } } vector> combinationSum(vector& input, int target) { vector> result; vector combinations; int index = 0; recurWithIndex(input, result, index, target, combinations); } /* dailyTemps ========== * Given an array of integers temperatures represents the daily temperatures, return an array answer such that answer[i] is the number of days you have to wait after the ith day to get a warmer temperature. If there is no future day for which this ispossible, keep answer[i] == 0 instead. Example Input: temperatures = [73,74,75,71,69,72,76,73] Output: [1,1,4,2,1,1,0,0] Brute force (show you know it, but say it's too slow) → For each i, scan j = i+1 to n-1, find first j where temperatures[j] > temperatures[i] → O(n²) → TLE on n=10⁵ Optimal approach: Monotonic decreasing stack (stores indices) Maintain a stack of indices in decreasing order of temperatures (from bottom to top). When we find a temperature warmer than the top of the stack → that day solves multiple previous waiting days → pop them all and calculate distances. */ std::vector dailyTemperatures(std::vector& temperatures) { int n = temperatures.size(); vector result(n, 0); // default 0 = no warmer day stack st; // stores indices of temps decreasing for (int i = 0; i < n; ++i) { while (!st.empty() && temperatures[i] > temperatures[st.top()]) { int days = i - st.top(); result[st.top()] = days; st.pop(); } st.push(i); } return result; } /* flattenMultiLevelList ===================== You are given a doubly linked list, which contains nodes that have a next pointer, a previous pointer, and an additional child pointer. This child pointer may or may not point to a separate doubly linked list, also containing these special nodes. These child lists may have one or more children of their own, and so on, to produce a multilevel data structure as shown in the example below. Given the head of the first level of the list, flatten the list so that all the nodes appear in a single-level, doubly linked list. Let curr be a node with a child list. The nodes in the child list should appear after curr and before curr.next in the flattened list. Return the head of the flattened list. The nodes in the list must have all of their child pointers set to null. */ class SolutionStack { Node* flatten(Node* head) { if (head == nullptr) return nullptr; Node* curr = head; stack stk; while (curr != nullptr) { if (curr->child) { // process child, save next if (curr->next) stk.push(curr->next); curr->next = curr->child; curr->child->prev = curr; curr->child = nullptr; } // If we finish traversing child, flatten parent if (curr->next == nullptr && !stk.empty()) { Node* node = stk.top(); curr->next = node; node->prev = curr; stk.pop(); } curr = curr->next; } return head; } }; class SolutionRecursiveDFS { public: Node* flatten(Node* head) { if (!head) return nullptr; // Helper returns the tail of the flattened list starting from curr function dfs = [&](Node* curr, Node* rest) -> Node* { if (!curr) return rest; // Flatten child first, then original next becomes the new "rest" curr->next = dfs(curr->child, dfs(curr->next, rest)); if (curr->next) { curr->next->prev = curr; } curr->child = nullptr; // Important: clear child return curr; }; dfs(head, nullptr); return head; } string name() { return CLASS_NAME(SolutionRecursiveDFS); } }; class SolutionIterativeTailFinding { public: Node* flatten(Node* head) { if (!head) return nullptr; for (Node* curr = head; curr; curr = curr->next) { if (curr->child) { Node* nextSaved = curr->next; // Save sibling // Attach child curr->next = curr->child; curr->child->prev = curr; curr->child = nullptr; // Clear child // Find tail of the child subtree Node* tail = curr->next; while (tail->next) { tail = tail->next; } // Attach saved next after tail tail->next = nextSaved; if (nextSaved) { nextSaved->prev = tail; } } } return head; } string name() { return CLASS_NAME(SolutionIterativeTailFinding); } }; /* groupAnagrams ============= Given an array of strings strs, group the anagrams together. You can return the answer in any order. Example 1: Input: strs = ["eat","tea","tan","ate","nat","bat"] Output: [["bat"],["nat","tan"],["ate","eat","tea"]] Explanation: There is no string in strs that can be rearranged to form "bat". The strings "nat" and "tan" are anagrams as they can be rearranged to form each other. The strings "ate", "eat", and "tea" are anagrams as they can be rearranged to form each othe.r Solution most preferred in 2024–2026 interviews: frequency-count key Sorting gives O(k log k) per string. Since input contains only lowercase letters (problem constraint), we can use O(k) key generation: */ struct SolutionMap { vector> groupAnagrams(vector& strs) { unordered_map> anagramGroups; for (auto& str: strs) { string key = str; sort(key.begin(), key.end()); anagramGroups[key].push_back(str); } vector> result; for (auto& [k,v]: anagramGroups) { result.push_back(v); } return result; } string name() { return CLASS_NAME(SolutionMap); } }; struct SolutionStdArray { vector> groupAnagrams(vector& strs) { unordered_map, vector, ArrayHasher> groupsWithArrKey; for (const string& str: strs) { array key{}; for (char c: str) key[c - 'a']++; groupsWithArrKey[key].push_back(str); } vector> result; for (auto& [k,v]: groupsWithArrKey) { result.push_back(v); } return result; } string name() { return CLASS_NAME(SolutionStdArray); } }; struct SolutionBitSet { vector> groupAnagrams(vector& strs) { unordered_map, vector> groups; for (const string& s: strs) { bitset<26> mask; for (char c : s) mask.set(c - 'a'); groups[mask].push_back(s); } vector> result; for (auto& [k,v]: groups) { result.push_back(v); } return result; } string name() { return CLASS_NAME(SolutionBitSet); } }; /* insertInterval ============== You are given an array of non-overlapping intervals intervals where intervals[i] = [start_i, end_i] represent the start and the end of the ith interval and intervals is sorted in ascending order by starti. You are also given an interval newInterval = [start, end] that represents the start and end of another interval. Insert newInterval into intervals such that intervals is still sorted in ascending order by starti and intervals still does not have any overlapping intervals (merge overlapping intervals if necessary). Optimal Approach – Greedy One-Pass (O(n) time) Because intervals are sorted and non-overlapping, we can do three clear phases in a single pass: 1. Add all non-overlapping intervals that end before newInterval starts (intervals[i][1] < newInterval[0]) 2. Merge all overlapping intervals into newInterval itself * While current interval overlaps: * newInterval[0] = min(newInterval[0], intervals[i][0]) * newInterval[1] = max(newInterval[1], intervals[i][1]) 3. Add the (possibly merged) newInterval, then add all remaining intervals. This is O(n) time and O(n) space (for the result vector). Why it's optimal: We traverse the list only once. No sorting needed. */ vector> insert(vector>& intervals, vector& newInterval) { if (intervals.size() == 0) return vector> {{newInterval}}; vector> result; // 1. find start interval for merge int i = 0; int begin = 0, end = 1; //example: { {{1,2},{3,5},{6,7},{8,10},{12,16}}, {4,8}, {{1,2},{3,10},{12,16}}}, // find start of merge while (i < (int) intervals.size() && newInterval[begin] > intervals[i][end]) { result.push_back(intervals[i]); i++; } // {1,2} // 2. merge {4,8} while (i < (int)intervals.size() && newInterval[end] >= intervals[i][begin]) { newInterval[begin] = min(newInterval[begin], intervals[i][begin]); newInterval[end] = max(newInterval[end], intervals[i][end]); cout << format("newInterval: {},{}\n", newInterval[begin], newInterval[end]); i++; // {3,5} -> {3,8} // {6,7} -> {3,8} // {8,10} -> {3,10} } result.push_back(newInterval); // 3. push back any remaining intervals while (i < (int)intervals.size()) { result.push_back(intervals[i]); i++; } // {12,16} return result; } /* isomorphicStrings ================ Given two strings s and t, determine if they are isomorphic. Two strings s and t are isomorphic if the characters in s can be replaced to get t. All occurrences of a character must be replaced with another character while preserving the order of characters. No two characters may map to the same character, but a character may map to itself. Example : Input: s = "egg", t = "add" Optimal Approach – Two Hash Maps (or arrays for O(1) space) We need a bidirectional mapping: * Every char in s must map to exactly one char in t. * Every char in t must map back to exactly one char in s. If at any position the mapping conflicts, return false. */ bool isIsomorphic(string s, string t) { unordered_map s_to_t; // map of characters in s to to be // replaced to get t. Each s(i) value // must map to the same t(i) value unordered_map t_to_s; // map from t to s, no two chars in s // can map to the same char // s t s t // --- --- --- --- // 012 012 012 012 // e.g. egg -> add foo -> bar for (size_t i = 0; i < s.length(); ++i) { char s_char = s[i]; char t_char = t[i]; // Check mapping from s -> t if (s_to_t.count(s_char)) { if (s_to_t[s_char] != t_char) return false; } else { // insert s_to_t[s_char] = t_char; } // Check mapping from t -> s (to enforce 1-to-1) if (t_to_s.count(t_char)) { if (t_to_s[t_char] != s_char) return false; } else { // insert t_to_s[t_char] = s_char; } } return true; } /* longestCommonPrefix =================== Write a function to find the longest common prefix string amongst an array of strings. If there is no common prefix, return an empty string "". Example 1: Input: strs = ["flower","flow","flight"] Output: "fl" Optimal Approach (Vertical Scanning - Most Common in Interviews) Idea: Take the first string as reference. For each character position i in the first string, check if all other strings have the same character at position i. As soon as one string mismatches or ends, stop and return the prefix built so far. This is clean, easy to code, and efficient. Time Complexity: * O(S) where S = sum of all characters in all strings (worst case all chars are checked). * In practice: O(N * M) with N = number of strings, M = min length. * Space Complexity: O(1) extra space (excluding output). Alternative Approach Sort + Compare First & Last (Clever Trick) Sort the array. The longest common prefix must be shared by the first and last string after sorting (lexicographically). Pros: * Very short code. * Cons: Sorting costs O(N log N * M) time → acceptable here (N=200), but vertical scan is usually preferred in interviews. */ class Solution_verticalScan { public: string longestCommonPrefix(vector& strs) { if (strs.size() == 1) return strs[0]; // vertical scanning // ---------------- // flower -> fl // flow -> fl // flight -> fl // // start with flower, scan vertically over other strings. string first = strs[0]; // iterate over chars in first string for (size_t i = 0; i < first.size(); i++) { char c = first[i]; // scan chars in other strings for (size_t j = 1; j < strs.size(); ++j) { if (i > strs[j].length() || strs[j][i] != c) return first.substr(0,i); } } return first; } }; class Solution_sort { public: string longestCommonPrefix(vector& strs) { if (strs.size() == 1) return strs[0]; // sort string vector lexigraphically and compare first and last strings sort(strs.begin(), strs.end()); print("sorted", strs); string first = strs[0]; string last = strs[strs.size() - 1]; for (size_t i = 0; i < first.length() && i < last.length(); i++) { if (first[i] != last[i]) return first.substr(0, i); } return first; // while loop // // size_t i = 0; // while (i < first.length() && i < last.length() && first[i] == last[i]) { // ++i; // } // return first.substr(0, i); } }; /* longestSubstr ============ Given a string s, find the length of the longest substring without duplicate characters. Example 1: Input: s = "abcabcbb" Output: 3 Explanation: The answer is "abc", with the length of 3. Note that "bca" and "cab" are also correct answers. Optimal Approach: Sliding Window + Frequency Map (O(n) time, O(1) space) * Use two pointers (left, right) to maintain a valid window [left, right] with no duplicates. * Expand right pointer, add s[right] to a frequency count. * If s[right] count > 1 (duplicate found), shrink from left until no duplicate. * At every step, update max_length = max(max_length, right - left + 1) Since characters are limited (ASCII), we use a fixed-size array int count[128] = {0}; → O(1) space. This is the classic variable-size sliding window for "longest substring with constraint". Why O(n)? Each character is added once (right moves n times) and removed at most once (left moves at most n times total). Amortized O(1) per character. */ int lengthOfLongestSubstring(std::string s) { if (s.empty()) return 0; // int count[128] = {0}; // ASCII, or 256 for safety unordered_map count; int left = 0; int maxLen = 0; int n = s.length(); for (int right = 0; right < n; ++right) { ++count[s[right]]; // add current char // shrink window from left while duplicate exists while (count[s[right]] > 1) { --count[s[left]]; ++left; } maxLen = std::max(maxLen, right - left + 1); cout << "maxLen: " << maxLen << " window left: " << left << " right: " << right << " " << s.substr(left, right - left + 1) << endl; } return maxLen; } /* lruCache ======== All operations (get and put) must run in O(1) average time complexity. Optimal approach (what Bloomberg expects): std::unordered_map for O(1) key → node access. Doubly Linked List (manual nodes with prev/next) to maintain usage order: Head (dummy) → Most Recently Used (MRU) Tail (dummy) → Least Recently Used (LRU) On get or put (if key exists): move the node to the head (right after dummy head) in O(1) using prev/next pointers. On put when full: remove the node before dummy tail, erase from map. This gives O(1) for get/put. Space: O(capacity) */ class LRUCache { public: unordered_map nodeCache; Node* head; Node* tail; size_t capacity; // Helper: remove node from doubly linked list (O(1)) void removeNode(Node* node) { // prev node next node // ---- <- ---- <- ---- // | | -> |node| -> | | // ---- ---- ---- node->prev->next = node->next; node->next->prev = node->prev; } // Helper: add node right after head (most recent) void addToFront(Node* newNode) { // dummy // ---- <- ---- <- ---- -> // |head| -> |new | -> |prior| // ---- ---- ---- Node* prior = head->next; prior->prev = newNode; newNode->next = prior; newNode->prev = head; head->next = newNode; } // Helper: move existing node to front void moveToFront(Node* node) { removeNode(node); addToFront(node); } public: LRUCache(size_t capacity) : capacity(capacity) { head = new Node(); tail = new Node(); head->next = tail; tail->prev = head; } // Return the value of the key if the key exists in the cache, otherwise return -1. int get(int key) { if (nodeCache.find(key) == nodeCache.end()) return -1; Node* node = nodeCache[key]; moveToFront(node); return node->value; } // Update the value of the key if the key exists. Otherwise, add the key-value pair to the // cache. If the number of keys exceeds the capacity, evict the least recently used key. void put(int key, int value) { // update if exists auto search = nodeCache.find(key); if (search != nodeCache.end()) // key exists, update and make node MRU { Node* node = search->second; node->value = value; moveToFront(node); } else // add key-value pair, check capacity and evict lru key if needed { if (nodeCache.size() == capacity) { Node* lruNode = tail->prev; removeNode(lruNode); // remove from map and delete nodeCache.erase(lruNode->key); delete lruNode; } // add Node* node = new Node(key, value); nodeCache[key] = node; addToFront(node); } } ~LRUCache() { // delete nodes } }; /* maxSlidingWindow ================ You are given an array of integers nums, there is a sliding window of size k which is moving from the very left of the array to the very right. You can only see the k numbers in the window. Each time the sliding window moves right by one position. Return the max sliding window. Example 1: Input: nums = [1,3,-1,-3,5,3,6,7], k = 3 Output: [3,3,5,5,6,7] Explanation: Window position Max --------------- ----- [1 3 -1] -3 5 3 6 7 3 1 [3 -1 -3] 5 3 6 7 3 1 3 [-1 -3 5] 3 6 7 5 1 3 -1 [-3 5 3] 6 7 5 1 3 -1 -3 [5 3 6] 7 6 1 3 -1 -3 5 [3 6 7] 7 LeetCode 239: Sliding Window Maximum is a very common Bloomberg-style interview question (hard difficulty, frequently asked). Fixed sized window problem. Problem recap (you should be able to state this clearly in 30 seconds): Given array nums of length n and integer k, return an array of size n−k+1 where each position i contains the maximum in the sliding window nums[i..i+k-1]. → Time goal: O(n) (or at worst O(n log k)) → Space: preferably O(k) 1. Brute Force – O(n⋅k) – Too Slow vector maxSlidingWindow(vector& nums, int k) { int n = nums.size(); vector ans; for (int i = 0; i <= n - k; ++i) { int mx = *max_element(nums.begin() + i, nums.begin() + i + k); ans.push_back(mx); } return ans; } -> TLE on n = 10^5, k = 5 * 10^4 2. Best Solution: Monotonic Deque (Decreasing) – O(n) We maintain a deque of indices (not values!) in strictly decreasing order of nums values. Front of deque = index of current maximum in window → nums[deque.front()] is always the largest in current window Three cleaning rules (done for every new index i): 1. Remove elements out of current window while (!dq.empty() && dq.front() <= i - k) dq.pop_front(); 2. Remove useless (smaller) elements from back — they can never be max while new element is in window while (!dq.empty() && nums[dq.back()] <= nums[i]) dq.pop_back(); (Use ≤ to keep deque smaller and handle duplicates cleanly) 3. Add current index dq.push_back(i); 4. After i ≥ k-1, answer for window ending at i is nums[dq.front()] */ vector maxSlidingWindowDeque(vector& nums, int k) { // maintain a deque of indices (not values!) in strictly decreasing order of nums values. // Front of deque = index of current maximum in window // Remove elements out of current window // while (!dq.empty() && dq.front() <= i - k) dq.pop_front(); // Remove useless (smaller) elements from back — they can // never be max while new element is in window // while (!dq.empty() && nums[dq.back()] <= nums[i]) dq.pop_back(); //(Use ≤ to keep deque smaller and handle duplicates cleanly) // Add current index // dq.push_back(i); // After i ≥ k-1, answer for window ending at i is nums[dq.front()] // Input: nums = [1,3,-1,-3,5,3,6,7], k = 3 int n = nums.size(); vector ans; deque dq; // stores indices, nums[dq.front()] is max for (int i = 0; i < n; ++i) { // 1. Remove indices outside current window [i-k+1, i] if (!dq.empty() && dq.front() == i - k) { dq.pop_front(); } // 2. Remove smaller (or equal) elements than current element from back while (!dq.empty() && nums[dq.back()] <= nums[i]) { dq.pop_back(); } // 3. Add current dq.push_back(i); // 4. After first k elements → start collecting answers if (i >= k - 1) { ans.push_back(nums[dq.front()]); } } return ans; } // priority_queue // // Time complexity: O(n log k) worst case // * Each insertion: O(log k) // * Each element can cause multiple pops in worst case (but amortized still O(log k) per element) //→ Total: O(n log k) vector maxSlidingWindowMaxHeap(vector& nums, int k) { int n = nums.size(); vector ans; // max-heap: {value, index} priority_queue> pq; for (int i = 0; i < n; ++i) { pq.emplace(nums[i], i); // Remove invalid (out of window) elements while (!pq.empty() && pq.top().second <= i - k) { pq.pop(); } if (i >= k - 1) { ans.push_back(pq.top().first); } } return ans; } /* meetingRooms ============ Given an array of meeting time intervals intervals where intervals[i] = [start_i, end_i] and start_i < end_i, determine if a person can attend all the meetings without any conflicts. Note: Meetings that touch at the endpoint (e.g., [1,2] and [2,3]) do not overlap — you can attend both. Example 1: Input: intervals = [[0,30],[5,10],[15,20]] Output: false Example 2: Input: intervals = [[7,10],[2,4]] Output: true Optimal Solution Walkthrough (O(n log n) time, O(1) extra space if we can modify input): 1. Sort the intervals by start time. 2. Iterate through the sorted list and check if any two consecutive meetings overlap: if (intervals[i-1][1] > intervals[i][0]) → conflict. That's it. Very clean. Time Complexity: O(n log n) due to sorting Space Complexity: O(1) extra (if sort is in-place) or O(log n) for recursion stack in some implementations. */ bool canAttendMeetings(vector>& intervals) { if (intervals.empty()) return false; if (intervals.size() == 1) return true; // 1.Sort the intervals by start time ranges::sort(intervals); // c++17 sort(intervals.begin(), intervals.end()); int begin = 0, end = 1; // 2. Iterate through sorted list and check if any two consecutive meetings overlap for (int i = 1; i < (int) intervals.size(); i++) { if (intervals[i-1][end] > intervals[i][begin]) return false; } return true; } /* meetingRoomsII ============== Now, given the same intervals, return the minimum number of conference rooms required so that no two meetings are in the same room at the same time. Approach A: Min-Heap (Priority Queue) of End Times (most common in interviews) * Sort intervals by start time. * Use a min-heap to keep track of the end times of meetings currently happening. * For each new meeting: * If the earliest-ending meeting has already finished (heap.top() <= current.start), reuse that room (pop). * Else, need a new room. * Always push the current meeting's end time. Track the maximum size of the heap at any time → that's the answer. Approach B: Two Pointers / Sweep Line (also very popular) * Create two arrays: all start times and all end times. * Sort both. * Use two pointers: increment room count on start, decrement on end. * Track current and max rooms. This version often runs slightly faster in practice. Which one to code first? The heap version is more intuitive and directly shows room allocation. */ int minMeetingRooms(vector>& intervals) { if (intervals.empty()) return 0; if (intervals.size() == 1) return 1; // sort intervals sort(intervals.begin(), intervals.end()); // min heap to track end times of meetings priority_queue, greater> pq; int maxRooms = 0; for (const auto& interval: intervals) { int beginTime = interval[0]; int endTime = interval[1]; while (!pq.empty() && beginTime >= pq.top() /* end time */) { pq.pop(); } pq.push(endTime); maxRooms = max(maxRooms, (int)pq.size()); } return maxRooms; } /* mergeIntervals ============== Intervals are unsorted. Leet Code 57. Insert Interval has an sorted array of input intervals Given an array of intervals where intervals[i] = [starti, endi], merge all overlapping intervals, and return an array of the *non-overlapping* intervals that cover all the intervals in the input. Example 1: Input: intervals = [[1,3],[2,6],[8,10],[15,18]] Output: [[1,6],[8,10],[15,18]] Explanation: Since intervals [1,3] and [2,6] overlap, merge them into [1,6]. 4. Optimal Approach (Greedy + Sorting) – O(n log n) Key Insight: Sort all intervals by start time. Then do a single linear pass, merging only with the last merged interval. Steps: 1. If intervals is empty, return {}. 2. Sort intervals by starting point (ascending). If starts equal, sort by end doesn't matter much. 3. Initialize a result vector with the first interval. 4.For each subsequent interval: * If it overlaps with the last merged interval (curr_start <= last_end), update last_end = max(last_end, curr_end). * Else, append the current interval as a new merged one. 5. Return the result. This is greedy because once we decide not to merge, all futureintervals cannot overlap with previous merged ones (due to sorting). Clarification * After we sort the intervals by their start time, the intervals are now in non-decreasing order ofstart. * Now, suppose at some point during the linear pass, the current interval does not overlap with the last merged interval in our result. That means: current.start > last_merged.end * Because the array is sorted by start time, every remaining interval in the input (from current+1 to the end) has a start time that is ≥ current.start. Therefore: * future.start ≥ current.start > last_merged.end * This means no future interval can ever overlap with any previously merged interval that we have already finalized in the result. * Once we decide "this interval doesn't overlap with the last one", we can safely commit (finalize) that last merged interval forever — we will never need to go back and merge anything into it later. That’s why we can greedily append it to the result and move forward. We only ever need to look at the most recently merged interval, not the entire result list." Time Complexity: O(n log n) dominated by sorting + O(n) pass → overall O(n log n) Space Complexity: O(n) for the output (plus O(log n) for sort in practice, or O(1) extra if we sort in-place and reuse). */ std::vector> merge(std::vector>& intervals) { if (intervals.empty()) { return {}; } // sort intervals vec then single liner pass over sorted vector sort(intervals.begin(), intervals.end()); // push first interval into result result.push_back(intervals[0]); int begin = 0, end = 1; for (size_t i = 1; i < intervals.size(); i++) { // back interval[i] merged if (result.back()[end] >= intervals[i][begin]) // merge, e.g. [1,3] [2,6] -> [1,6] { result.back() = {result.back()[begin], intervals[i][end]}; } else // no merge { result.push_back(intervals[i]); } } return result; } /* minStack ============== Design a stack that supports push, pop, top, and retrieving the minimum element in constant time. Implement the MinStack class: MinStack() initializes the stack object. void push(int val) pushes the element val onto the stack. void pop() removes the element on the top of the stack. int top() gets the top element of the stack. int getMin() retrieves the minimum element in the stack. You must implement a solution with O(1) time complexity for each function. Example 1: Input ["MinStack","push","push","push","getMin","pop","top","getMin"] [[],[-2],[0],[-3],[],[],[],[]] Output [null,null,null,null,-3,null,0,-2] "I used two stacks — one for values, one that keeps non-increasing minima." "I push to the min stack only when val <= current min to correctly handle duplicate minimums." "In pop(), I compare before popping the main stack, but because the problem guarantees non-empty stacks, it's safe." "If the problem didn't have that guarantee, I would capture the top value first or add empty checks." "Space is O(n) in worst case (strictly decreasing sequence), but often much better in practice." */ class MinStack { public: // Normal stack handles push, pop, top in O(1) stack m_stack; stack m_minStack; MinStack() { } // Only push to min_stack if val <= current min void push(int val) { m_stack.push(val); if (m_minStack.empty() || val <= m_minStack.top()) m_minStack.push(val); } // Only pop from min_stack if we removed the current minimum void pop() { // better to capture stack top int val = m_stack.top(); m_stack.pop(); if (val == m_minStack.top()) m_minStack.pop(); } int top() { return m_stack.top(); } int getMin() { return m_minStack.top(); } }; /* minWindowSubstr =============== Hard Given two strings s and t of lengths m and n respectively, return the minimum window substring of s such that every character in t (including duplicates) is included in the window. If there is no such substring, return the empty string "". The testcases will be generated such that the answer is unique. Example 1: Input: s = "ADOBECODEBANC", t = "ABC" Output: "BANC" Explanation: The minimum window substring "BANC" includes 'A', 'B', and 'C' from string t. Core Idea: Use a dynamic (variable-size) window [left, right] over s. 1. Expand right to add characters until the window contains all required characters from t (with correct multiplicities). 2. Once valid, shrink from left as much as possible while keeping the window valid → this gives a candidate minimum. 3. Repeat until right reaches the end. 4. Track the smallest window found Key variables * need[128] or unordered_map — required count for each char in t * window[128] — current count in [left, right] * formed / have — how many unique chars from t are fully satisfied (count matches or exceeds need) * required — total unique chars in t (usually t.size() if duplicates, but actually number of distinct keys) * Track minLen + start index for the best window */ string minWindowOneMap(string s, string t) { unordered_map cnt; for (char c : t) cnt[c]++; int left = 0, minStart = 0, minLen = INT_MAX, count = t.size(); print(cnt, s, 0, left); for (int right = 0; right < (int) s.size(); ++right) { if (cnt[s[right]]-- > 0) { count--; } print(cnt, s, right, left); // still needed this char while (count == 0) { if (right - left + 1 < minLen) { minLen = right - left + 1; minStart = left; } if (cnt[s[left]]++ == 0) count++; // now missing this char again left++; } } return minLen == INT_MAX ? "" : s.substr(minStart, minLen); } /* Optimal Approach: Sliding Window + Two Frequency Maps (O(m + n) Time) This is a classic variable-size sliding window problem. Key Idea: 1. Use a frequency map (need) for characters in t. 2. Use another map (have) for the current window in s. 3. Maintain a counter valid = how many unique characters in the window satisfy the required count from t. 4. Expand the right pointer to include characters. 5. When the window is valid (valid == need.size()), try to shrink from the left while keeping it valid, and track the minimum length window. */ string minWindowTwoMaps(string s, string t) { if (s.empty() || t.empty() || s.length() < t.length()) { return ""; } // Frequency map for t (what we need) unordered_map need; for (char c : t) { need[c]++; } // Frequency map for current window unordered_map have; int left = 0; int minLen = INT_MAX; int minStart = 0; int valid = 0; // how many characters are fully satisfied for (int right = 0; right < (int) s.length(); ++right) { char c = s[right]; have[c]++; // If this character is needed and we now have exactly the required count if (need.count(c) && have[c] == need[c]) { valid++; } // Try to shrink the window from left as much as possible while (valid == (int) need.size() && left <= right) { // Update minimum window if (right - left + 1 < minLen) { minLen = right - left + 1; minStart = left; } // Remove left character char leftChar = s[left]; have[leftChar]--; if (need.count(leftChar) && have[leftChar] < need[leftChar]) { valid--; } left++; } } return (minLen == INT_MAX) ? "" : s.substr(minStart, minLen); } /* Non-overlapping Intervals ========================= Medium Given an array of intervals intervals where intervals[i] = [starti, endi], return the minimum number of intervals you need to remove to make the rest of the intervals non-overlapping. Note that intervals which only touch at a point are non-overlapping. For example, [1, 2] and [2,3] are non-overlapping. Example 1: Input: intervals = [[1,2],[2,3],[3,4],[1,3]] Output: 1 Explanation: [1,3] can be removed and the rest of the intervals are non-overlapping. Example 2: Input: intervals = [[1,2],[1,2],[1,2]] Output: 2 Explanation: You need to remove two [1,2] to make the rest of the intervals non-overlapping. Example 3: Input: intervals = [[1,2],[2,3]] Output: 0 Explanation: You don't need to remove any of the intervals since they're already non-overlapping. The problem is equivalent to: Find the maximum number of non-overlapping intervals you can keep, then removals = total - max_non_overlapping. To maximize the number kept: * Sort the intervals by their ending time (ascending). * Greedily pick the interval that ends earliest — this leaves the most room for future intervals. Then iterate * if the next interval starts after or at the current end, keep it and update the end; * else, it overlaps → count as removal. This is a classic greedy on intervals (similar to Activity Selection Problem). Why Sort by End Time? (Discussion Question) * Sorting by start time can lead to suboptimal choices (you might pick a long interval that blocks many short ones). * Ending earliest is optimal because it maximizes future options. * Proof intuition: Suppose you have two overlapping candidates; always better to keep the one that finishes first. Time & Space Complexity * Time: O(n log n) — dominated by sorting. Traversal is O(n). * Space: O(1) extra (ignoring input) or O(log n) if sort is in-place with extra space for comparator. In practice, very efficient for n=1e^5. */ if (intervals.empty()) return 0; // 1. First sort intervals by ending time ascending sort(intervals.begin(), intervals.end(), [](const vector& a, const vector& b) { int end = 1; return a[end] < b[end];}); print("sorted", intervals); int begin = 0, end = 1; int removals = 0; vector current = intervals[0]; resultVector.push_back(intervals[0]); // sorted: [[1,2],[2,3],[1,3],[3,4]] // 2. Iterate for (size_t i = 1; i < intervals.size(); i++) { if (current[end] <= intervals[i][begin]) { current = intervals[i]; resultVector.push_back(intervals[i]); } else removals++; } return removals; } /* LeetCode 51. nQueens Hard The n-queens puzzle is the problem of placing n queens on an n x n chessboard such that no two queens attack each other. Given an integer n, return all distinct solutions to the n-queens puzzle. You may return the answer in any order. Each solution contains a distinct board configuration of the n-queens' placement, where 'Q' and '.' both indicate a queen and an empty space, respectively. Example 1: ----------------------- ----------------------- | | | | | | | | | | | | Q | | | | | | Q | | |-----|-----|-----|-----| |-----|-----|-----|-----| | | | | | | | | | | | | | | Q | | Q | | | | |-----|-----|-----|-----| |-----|-----|-----|-----| | | | | | | | | | | | Q | | | | | | | | Q | |-----|-----|-----|-----| |-----|-----|-----|-----| | | | | | | | | | | | | | Q | | | | Q | | | |-----|-----|-----|-----| |-----|-----|-----|-----| Input: n = 4 Output: [[".Q..","...Q","Q...","..Q."],["..Q.","Q...","...Q",".Q.."]] Explanation: There exist two distinct solutions to the 4-queens puzzle as shown above For a given row,col another queen cannot be in that row or col as well as any adjacent (including diagonal cell. Diagonal indices ---------------------- Col 0 Col 1 Col 2 Col 3 Row 0 (0,3) (1,2) (2,1) (3,0) Row 1 (1,4) (2,3) (3,2) (4,1) Row 2 (2,5) (3,4) (4,3) (5,2) Row 3 (3,6) (4,5) (5,4) (6,3) First number = diag1 (row + col) → tracks \ diagonals (main diagonals, top-left to bottom-right) Second number = diag2 (row - col + (n-1)) → tracks / diagonals (anti-diagonals, top-right to bottom-left) main diagonal, top-left to bottom right = row + col ------------------------------------------- Col 0 Col 1 Col 2 Col 3 Row 0 0 1 2 3 Row 1 1 2 3 4 Row 2 2 3 4 5 Row 3 3 4 5 6 anti-diagonals, top-right to bottom-left = (row-col) + (n-1) ------------------------------------------- Col 0 Col 1 Col 2 Col 3 Row 0 3 2 1 0 Row 1 4 3 2 1 Row 2 5 4 3 2 Row 3 6 5 4 3 */ class Solution_bs { public: vector> solveNQueens(int n) { vector> result; vector board(n, string(n, '.')); // bitsets sized to maximum needed (n <= 9 → 2*9-1 = 17 bits) backtrack(0, bitset<4>{}, bitset<4>{}, bitset<4>{}, n, board, result); return result; } void backtrack(int row, bitset<4> cols, // occupied columns bitset<4> diag1, // main diagonals (row + col) bitset<4> diag2, // anti-diagonals (row - col + n-1) int n, vector& board, vector>& result) { if (row == n) { string b; for (auto i: board) b += i; cout << " result: " << b << endl; cout << "-----------------\n"; result.push_back(board); return; } string s; for (auto& i: board) s += i; cout << format("backtrack: row: {} cols: {} diag1: {} diag2: {} board: {}\n", row, cols.to_string(), diag1.to_string(), diag2.to_string(), s); // Available positions in current row bitset<4> available = ~ (cols | diag1 | diag2); available &= ((1ULL << n) - 1); // mask only first n bits cout << " available curr row: " << available << endl; // Try every possible column using bitset for (int col = 0; col < n; ++col) { if (available[col]) { // O(1) check with bitset // Place queen board[row][col] = 'Q'; string s; for (auto& i: board) s += i; cout << format(" Place Queen: row={} col={:<28} board: {} -> backtrack\n", row, col, s); // Update masks for next row cout << " update cols=" << (cols | (bitset<4>(1) << col)) << endl; cout << " update diag1=" << ((diag1 | (bitset<4>(1) << col)) << 1) << endl; cout << " update diag2=" << ((diag1 | (bitset<4>(1) << col)) >> 1) << endl; backtrack(row + 1, cols | (bitset<4>(1) << col), (diag1 | (bitset<4>(1) << col)) << 1, (diag2 | (bitset<4>(1) << col)) >> 1, n, board, result); string ss; for (auto& i: board) ss += i; cout << format(" return from backtrack row: {:<24} board: {}\n", row + 1, ss); // Backtrack board[row][col] = '.'; } } } }; /* Peak Element ============ A peak element is an element that is strictly greater than its neighbors. Given a 0-indexed integer array nums, find a peak element, and return its index. If the array contains multiple peaks, return the index to any of the peaks. You may imagine that nums[-1] = nums[n] = -∞. In other words, an element is always considered to be strictly greater than a neighbor that is outside the array. You must write an algorithm that runs in O(log n) time. Example 1: Input: nums = [1,2,3,1] Output: 2 Explanation: 3 is a peak element and your function should return the index number 2. Most Expected Solution in Bloomberg-style Interview: Binary Search (O(log n)) Key Insight Because we only need any peak and the array has the property that adjacent elements differ, we can use binary search by looking at the slope: If nums[mid] < nums[mid+1] → we're on an ascending slope → a peak must exist on the right side (including mid+1) If nums[mid] > nums[mid+1] → we're on a descending slope or at a peak → a peak exists on the left side (including mid) This works beautifully because of the -∞ boundaries and the no-equal-adjacent guarantee. */ int findPeakBinarySearch(const vector& arr) { int left = 0; int right = arr.size() - 1; while (left < right) { int mid = left + (right - left) / 2; if (arr[mid + 1] > arr[mid]) { left = mid + 1; // Ascending: peak on right } else { right = mid; // Descending: peak on left or at mid } } return left; } /* Remove Adjacent Duplicates In String II ====================================== You are given a string s and an integer k, a k duplicate removal consists of choosing k adjacent and equal letters from s and removing them, causing the left and the right side of the deleted substring to concatenate together. We repeatedly make k duplicate removals on s until we no longer can. Return the final string after all such duplicate removals have been made. It is guaranteed that the answer is unique. Example 1: Input: s = "abcd", k = 2 Output: "abcd" Explanation: There's nothing to delete. Example 2: Input: s = "deeedbbcccbdaa", k = 3 Output: "aa" Explanation: First delete "eee" and "ccc", get "ddbbbdaa" Then delete "bbb", get "dddaa" Finally delete "ddd", get "aa" Example 3: Input: s = "pbbcggttciiippooaais", k = 2 Output: "ps" Constraints: 1 <= s.length <= 105 2 <= k <= 104 s only contains lowercase English letters. Hint 1 Use a stack to store the characters, when there are k same characters, delete them. Hint 2 To make it more efficient, use a pair to store the value and the count of each character. deeedbbcccbdaa -------------- push ---- d eee -> pop d bb ccc -> pop b -> pop d -> pop a a ==> "aa" */ string removeDuplicates(string s, int k) { vector> stk; // loop over string pushing pair onto stack for (char c: s) { if (!stk.empty() && stk.back().first == c) // same char, pop if count reaches k { if (++(stk.back().second) == k) stk.pop_back(); } else { stk.emplace_back(c, 1); } } string result; for (auto& p: stk) { // create string with cnt of char string s(p.second, p.first); result += s; } return result; } /* Remove Duplicates from Sorted Array II ====================================== Given an integer array nums sorted in non-decreasing order, remove some duplicates in-place such that each unique element appears at most twice. The relative order of the elements should be kept the same. Since it is impossible to change the length of the array in some languages, you must instead have the result be placed in the first part of the array nums. More formally, if there are k elements after removing the duplicates, then the first k elements of nums should hold the final result. It does not matter what you leave beyond the first k elements. Return k after placing the final result in the first k slots of nums. Do not allocate extra space for another array. You must do this by modifying the input array in-place with O(1) extra memory. Custom Judge: The judge will test your solution with the following code: int[] nums = [...]; // Input array int[] expectedNums = [...]; // The expected answer with correct length int k = removeDuplicates(nums); // Calls your implementation assert k == expectedNums.length; for (int i = 0; i < k; i++) { assert nums[i] == expectedNums[i]; } If all assertions pass, then your solution will be accepted. Example 1: Input: nums = [1,1,1,2,2,3] Output: 5, nums = [1,1,2,2,3,_] Explanation: Your function should return k = 5, with the first five elements of nums being 1, 1, 2, 2 and 3 respectively. It does not matter what you leave beyond the returned k (hence they are underscores). Example 2: Input: nums = [0,0,1,1,1,1,2,3,3] Output: 7, nums = [0,0,1,1,2,3,3,_,_] Explanation: Your function should return k = 7, with the first seven elements of nums being 0, 0, 1, 1, 2, 3 and 3 respectively. It does not matter what you leave beyond the returned k (hence they are underscores). Optimal Two-Pointer Solution (Most Interview-Friendly) C++ * read pointer iterates through all elements for (size_t i = 2; i < nums.size(); i++) * write pointer k will become new length */ class Solution // : public testing::Test { public: int removeDuplicates(vector& nums) { if (nums.size() <= 2) return nums.size(); int k = 2; for (size_t i = 2; i < nums.size(); i++) { if (nums[k-2] != nums[i]) { nums[k++] = nums[i]; } } return k; } }; /* Reverse List ============ ---- 1. ---- --- --- --- --- head -->| 1 |-->| 2 |-->| 3 |-->| 4 |---> null --- --- --- --- ---- 2. ---- curr | v --- --- --- --- head -->| 1 |-->| 2 |-->| 3 |-->| 4 |---> null --- --- --- --- prev -> null next -> null ---- 3. ---- prev curr next | | | | v v v --- --- --- --- null<-- | 1 | | 2 |-->| 3 |-->| 4 |---> null --- --- --- --- ---- 4. ---- prev curr next | | | v v v --- --- --- --- null<-- | 1 |<--| 2 | | 3 |-->| 4 |---> null --- --- --- --- */ Node* reverseListIterative(Node* head) { Node* curr = head, *prev = nullptr, *next = nullptr; while (curr != nullptr) { // store next next = curr->next; // reverse curr->next = prev; // move to next node prev = curr; curr = next; }; // returns last node return prev; } Node* reverseListRecursiveNoPrint(Node* node) { if (node->next == nullptr) return node; Node* new_head = reverseListRecursiveNoPrint(node->next); // when reverseListRecursive intially returns new_head will be last node in // list and node will be the second to last node in the forward list. // Subsequent returns will be prior nodes in reverse // reverse, point next->next to current node node->next->next = node; // This is done so the last node traversed will have next == nullptr node->next = nullptr; // when recusrion ends new_head will be returned return new_head; } /* Stock Maximize ============== Each day, you can either buy one share of WOT, sell any number of shares of WOT that you own, or not make any transaction at all. What is the maximum profit you can obtain with an optimum trading strategy? Example price = [1,2] Buy one share day one, and sell it day two for a profit of 1. Return 1. prices = [2,1] No profit can be made so you do not buy or sell stock those days. Return 0. Efficient Way: For share bought on day i, sell it on the highest price after day i. Traverse bacwards - start from last day and keep track of max price seen so far from right */ long stockmax(vector prices) { long long profit = 0; int sell_price = 0; // future maximum selling price for (int i = (int)prices.size() - 1; i >= 0; --i) { // If current price > sell_price → update sell_price (this becomes a new selling opportunity) if (prices[i] > sell_price) { sell_price = prices[i]; } else // you would buy here and sell later at sell_price → add (sell_price - prices[i]) to profit { profit += (long long)sell_price - prices[i]; } } cout << " profit = " << profit << endl; return profit; } /* Sum of Two Integers =================== carry carry ----------- --------- a ^ b a & b (a&b) << 1 a ^ b a & b (a&b) << 1 2 ^ 2 2 & 2 (2&2) << 1 0 ^ 4 0 & 4 (0&2) << 1 ----- ----- ----------- ----- ----- ---------- 0010 0010 a=0 0000 0000 0010 0010 b=c=4 0100 0100 ==> a = 4, b = carry = 0 ---- ---- -----> ---- ---- 0000 0010 0100 0100 0000 0000 */ int getSum(int a, int b) { while (b != 0) { int carry = (a & b) << 1; // generate carry a = a ^ b; // add without carry b = carry; } return a; } /* Top K Frequent Elements ======================= Given an integer array nums and an integer k, return the k most frequent elements. You may return the answer in any order. Example 1: Input: nums = [1,1,1,2,2,3], k = 2 Output: [1,2] Example 3: Input: nums = [1,2,1,2,1,2,3,1,3,2], k = 2 Output: [1,2] algorithm's time complexity must be better than O(n log n), where n is the array's size. We need to return the k most frequent elements in any order. If there are ties in frequency, any of the tied elements is acceptable.” First I count frequencies using a hash map → O(n) time, O(n) space. Then I have two good options: - Build a max-heap of size ≈ number of unique elements and extract k times → O(n + m log m) where m ≤ n is number of unique elements - use a min-heap of size k (better when k << n) Today I’ll go with the max-heap approach because it's very straightforward and readable. (If they ask for optimal → mention bucket sort O(n) solution)” eTime: O(n + m log m) where m ≤ n is number of unique elements Worst case ≈ O(n log n) Space: O(n) for the map + O(m) for the heap Follow-up question they often ask Interviewer: “Can we do better than O(n log n)?” You: “Yes — we can use bucket sort / frequency bucket approach in O(n) time.” */ vector topKFrequent(vector& nums, int k) { vector result; // map of key, count of key - count frequency of each element unordered_map keyToCountMap; for (auto& number: nums) keyToCountMap[number]++; // priority queue with largest at top. // pair = priority_queue, vector>, less>> pq; for (auto& [key,count]: keyToCountMap) { pq.push({count, key}); } int num_elems = 0; while(!pq.empty() && num_elems < k) { auto& p = pq.top(); result.push_back(p.second); pq.pop(); num_elems++; } return result; } /* topKStocksByVol ====================== Design a system that tracks the trading volume of stocks. The system must support two main operations: - addTrade(string stockSymbol, int volume): Updates the total volume traded for a given stock. - getTopK(int k): Returns the k most traded stocks (those with the highest volume) at the current moment. This is a classic “maintain dynamic top-K with frequent updates” problem. We need O(log N) updates and O(K) queries (where N = number of distinct stocks). Data Structures chosen unordered_map → stock → current total volume (O(1) average lookup/update). set> → stores {volume, stockSymbol}. Why this beats alternatives priority_queue alone cannot delete arbitrary elements when a stock’s volume changes. Sorting the map on every getTopK would be O(N log N) → too slow for large N. The ordered-set + hashmap pattern is the standard interview solution for “top K with updates”. Time & Space (what you say in the interview) - addTrade → O(log N) (erase + insert on set) - getTopK → O(K) (just walk from begin()) - Space → O(N) */ #include using namespace std; class TopKStocksByVol { public: unordered_map stockToVolume; set, greater<>> volumeSet; // stores {volume, stockSymbol} public: // O(log N) – one erase + one insert on the set + O(1) unordered_map operations void update(const pair& stock) { const string& symbol = stock.first; const long long& vol = stock.second; // long long to avoid // overflow on totals // if stock already exists, remove old entry from ordered set if (stockToVolume.count(symbol)) { long long oldVol = stockToVolume[symbol]; volumeSet.erase({oldVol, symbol}); } stockToVolume[symbol] += vol; // Insert new ordered entry volumeSet.insert({stockToVolume[symbol], symbol}); } /** * Returns the k most traded stocks (highest volume first). * If k > number of stocks, returns all stocks. * Ties are broken by stock symbol (lexicographically). */ vector getTopK(int k) { vector result; if (k <= 0) return result; auto it = volumeSet.begin(); for (int i = 0; i < k && it != volumeSet.end(); ++i, ++it) { result.push_back(it->second); } return result; } }; int main(void) { vector>, int>> testCases { // input - vector {{ {"AAPL", 1000}, {"TSLA", 5000}, {"QCOM", 2000}, {"NVDA", 7000}, {"AMZN", 9000}, {"NFLX", 11000}, {"MSFT", 13000}, {"GOOGL", 15000} }, // output - top K {3}}, {{ {"AAPL", 16000} }, {3}}, }; TradingVolTracker tracker; for (auto& tc: testCases) { for (const auto& stock: get<0>(tc)) tracker.update(stock); vector stocks = tracker.getTopK(get<1>(tc)); for (const auto& i: stocks) cout << i << ", "; cout << endl << "---------------" << endl; } return 0; } /* Top K Stocks Moving Window In a Bloomberg interview, the Moving Window variation of the Top K problem is the "Final Boss" level. It transitions from a simple data structure problem to a Stream Processing problem. 1. The Challenge: Time-Based EvictionUnlike the basic version where volume only goes up, in a Moving Window (e.g., "Top stocks in the last 10 minutes"), volume also goes down as old trades "expire" and fall out of the window. 2. The Data Structure TrioTo handle this efficiently, you need three synchronized structures:StructurePurpos * deque A queue of {symbol, volume, timestamp} to track when trades expire. * unordered_map Maps symbol -> current_window_volume. * set>Keeps stocks sorted by their current window volume. */ class TopKStocksMovingWindow { struct Trade { std::string symbol; int volume; long long timestamp; }; int windowSizeSeconds; std::deque trades; std::unordered_map symbolToVolume; std::set, std::greater<>> sortedStocks; public: MovingWindowTicker(int seconds) : windowSizeSeconds(seconds) {} void addTrade(std::string symbol, int volume, long long timestamp) { cleanup(timestamp); // Remove from set to update volume if (symbolToVolume.count(symbol)) { sortedStocks.erase({symbolToVolume[symbol], symbol}); } symbolToVolume[symbol] += volume; trades.push_back({symbol, volume, timestamp}); // Re-insert updated volume sortedStocks.insert({symbolToVolume[symbol], symbol}); } std::vector getTopK(int k, long long currentTime) { cleanup(currentTime); std::vector result; auto it = sortedStocks.begin(); for (int i = 0; i < k && it != sortedStocks.end(); ++i, ++it) { result.push_back(it->second); } return result; } // Removes trades that are older than (currentTime - windowSizeSeconds) void cleanup(long long currentTime) { while (!trades.empty() && trades.front().timestamp <= currentTime - windowSizeSeconds) { Trade expired = trades.front(); trades.pop_front(); // 1. Remove old entry from set sortedStocks.erase({symbolToVolume[expired.symbol], expired.symbol}); // 2. Subtract volume symbolToVolume[expired.symbol] -= expired.volume; // 3. Re-insert if volume is still > 0, otherwise cleanup map if (symbolToVolume[expired.symbol] > 0) { sortedStocks.insert({symbolToVolume[expired.symbol], expired.symbol}); } else { symbolToVolume.erase(expired.symbol); } } } }; /* TwoSum ====== Given an array of integers nums and an integer target, return indices of the two numbers such that they add up to target. You may assume that each input would have exactly one solution, and you may not use the same element twice. You can return the answer in any order. Example 1: Input: nums = [2,7,11,15], target = 9 Output: [0,1] Explanation: Because nums[0] + nums[1] == 9, we return [0, 1]. Example 2: Input: nums = [3,2,4], target = 6 Output: [1,2] Example 3: Input: nums = [3,3], target = 6 Output: [0,1] Constraints: 2 <= nums.length <= 104 -109 <= nums[i] <= 109 -109 <= target <= 109 Only one valid answer exists. Follow-up: Can you come up with an algorithm that is less than O(n2) time complexity? */ #include #include using namespace std; class Solution { public: vector twoSum(const vector& nums, int target) { // map of (target - number) to index of number in nums vector unordered_map twoSumMap; for (int curr_index = 0; curr_index < (int)nums.size(); curr_index++) { int curr_num = nums[curr_index]; int difference = target - curr_num; if (twoSumMap.contains(difference)) { return vector{twoSumMap[difference], curr_index}; } else { twoSumMap[curr_num] = curr_index; } } return vector{}; } }; /* Underground System ================== see undergroundSystem.cpp */ /* Moving Average ============== std::queue q; q.push(0); // back pushes 0 q.push(1); // q = 0 1 q.push(2); // q = 0 1 2 q.push(3); // q = 0 1 2 3 // ^ ^ // | | // front back assert(q.front() == 0); assert(q.back() == 3); assert(q.size() == 4); q.pop(); // removes the front element, 0 */ struct MovingAverage { q.push(0); // back pushes 0 q.push(1); // q = 0 1 q.push(2); // q = 0 1 2 q.push(3); // q = 0 1 2 3 // ^ ^ // | | // front back queue que; // push inserts element at end // pop removes first element size_t windowSize; double sum; MovingAverage(size_t size) { windowSize = size; } void add(double val) { sum += val; que.push(val); if (que.size() > windowSize) { sum -= que.front(); que.pop(); } } double getAvg() { return que.size() == windowSize ? sum/windowSize : 0; } }; /* CRTP Curiously Recurring Template Pattern ======================================== */ template class Base { public: void DoSomething() { // Dispatch call to exact type static_cast(this)->DoSomething(); } }; class Derived : public Base { public: void DoSomething() { cout << "Derived::DoSomething() called" << endl; } }; /* Thread Safe SPSC Ring Buffer ============================ */ #include #include #include #include struct alignas(64) MarketTick { // Cache-line aligned uint64_t timestamp_ns; uint32_t symbol_id; // Integer ID instead of string double price; int64_t quantity; char side; // 'B' or 'S' }; template class SPSCQueue { static_assert((Capacity & (Capacity - 1)) == 0, "Capacity must be power of 2"); static constexpr size_t Mask = Capacity - 1; alignas(64) std::array buffer_; alignas(64) std::atomic head_{0}; // Producer alignas(64) std::atomic tail_{0}; // Consumer public: bool push(const T& item) { const size_t h = head_.load(std::memory_order_relaxed); const size_t t = tail_.load(std::memory_order_acquire); if ((h - t) == Capacity) return false; // full buffer_[h & Mask] = item; head_.store(h + 1, std::memory_order_release); return true; } bool pop(T& item) { const size_t t = tail_.load(std::memory_order_relaxed); const size_t h = head_.load(std::memory_order_acquire); if (t == h) return false; // empty item = buffer_[t & Mask]; tail_.store(t + 1, std::memory_order_release); return true; } // Optional: batch push/pop for higher throughput }; /* OrderBook ========= */ class OrderBook { // bid: descending prices, ask: ascending std::map> bids_; // price -> total qty std::map asks_; public: void addOrder(bool is_buy, double price, int64_t qty) { auto& side = is_buy ? bids_ : asks_; side[price] += qty; } // Get top of book std::pair bestBid() const { if (bids_.empty()) return {0.0, 0}; return {bids_.begin()->first, bids_.begin()->second}; } // Similarly for bestAsk };