All files CodeCompletionCore.ts

81.79% Statements 638/780
86.17% Branches 106/123
71.42% Functions 10/14
81.79% Lines 638/780

Press n or j to go to the next uncovered block, b, p or k for the previous block.

1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 7811x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 10x 10x 10x 10x 1x 1x 1x 1x 1x 1x 16184x 16184x 16184x 16184x 16184x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 10x 10x 10x 10x 10x 10x 10x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 27x 27x 27x 27x 27x 27x 27x 27x 27x 27x 27x 27x 7098x 7098x 7021x 7021x 7021x 27x 27x 7021x 7071x 7071x 7071x 7098x     7098x 27x 27x 27x 27x 27x 27x                                                   27x 27x 27x 1x 1x 1x 1x 1x 1x 1x 1x     1x 1x 1x 1x 1x 1x 1x 1x 1x 5413x 24x 24x 5389x 5389x 5413x 1784x 1784x 1784x 37356x 506x 506x 37356x 5413x 3605x 3605x 3605x 99316x 966x 966x 99316x 2639x 3917x 3917x 3917x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 136672x 136672x 1472x 1472x 1472x 1472x 1472x 2856x 1646x 1646x 1210x 1210x 2856x 1204x 1204x 1204x 2856x 1472x 1472x 268x 268x 268x 268x 268x     268x 1472x 1472x 1472x 135200x 135200x 135200x 1x 1x 1x 1x 1x 1x 1x 1x 1x 16204x 16204x 16204x 16204x 16204x 18140x 18140x 18140x 18140x 18545x 4117x 4117x 4117x 1936x 1936x 1936x 4117x     4117x 18140x 18140x 18140x 16204x 16204x 16204x 1x 1x 1x 1x 1x 1x 1x 1x 1x 173x 173x 173x 173x 173x 173x 173x 173x 16184x 16184x 173x 173x 173x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 54465x 54465x 54465x     54465x 54465x 54465x 8x 8x 8x 8x 54457x 54457x 54465x 70474x 14628x 14628x     14628x 14628x 14628x 14628x 14628x 14628x 14628x 14628x 14628x 2x 2x 2x 2x 14628x 70474x           55846x 39662x 39662x 39662x 55846x         16184x 16184x 16184x 16184x     16184x 16184x 16184x 16184x 16184x 16184x 16184x 70474x 54457x 54457x 54457x 54457x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 1x 181909x 181909x 181909x 181909x 181909x 181909x 181909x 801x 801x 181909x 181108x 24979x     24979x 24979x 24979x 181108x 156930x 156930x 156930x 156930x 156930x 156930x 156930x 156930x 156930x 156930x 156930x 181909x 3x 3x 3x 156930x 156930x 181909x 173x 173x 173x 173x 156930x 156930x 156930x 156930x 156930x 156930x 156930x 156930x 156930x 181909x 23x 3x 3x 23x 20x 20x 20x 5383x 5383x 5383x 5383x 83272x 83272x 5383x 5383x 5383x 5383x 3915x 4498x 3414x     3414x 442x 442x 3414x 2972x 2972x 2971x 2971x 2972x 3414x 4498x 3915x 5383x 20x 23x 23x 5x 5x 5x 5x 23x 23x 23x 23x 23x 181909x 156907x 156907x 156907x 156907x 156907x 78751x 78751x 78751x 78751x 156907x 78156x 181909x 21374x 21374x 78156x 78156x 78156x 78156x 78156x 78156x 78156x 78156x 78156x 181909x 1002581x 1002581x 1002581x 1002581x 1002581x 1002581x 1002581x             1002581x 1002581x 107691x 107691x 107691x 107691x 894890x 894890x 894890x 894890x 894890x 1002581x 1157981x 1157981x 181882x 181882x 181882x 181882x 120677x 120677x 120677x 120677x 120677x 181882x 181882x 1157981x 1157981x                 1157981x 1157981x 86016x 86016x 86016x 86016x 86016x 86016x 86016x 86016x 86016x 86016x 1157981x 1157981x                                   1157981x 1157981x 890083x 692512x 692512x 692512x 692512x 692512x 692512x 692512x 197571x 197571x 890083x 197571x     197571x 27x 26x 26x 26x 30x 24x       24x 24x 20x 24x 4x 4x 24x 30x 26x 197571x 197544x 25220x     25220x 25220x 25220x 25220x 25220x 197544x 197571x 890083x 1157981x 1157981x 894890x 78156x 78156x 181909x 21374x 21374x 78156x 78156x 78156x 78156x 78156x 78156x 1x 1x           1x 1x                                                                           1x 1x                     1x 1x  
/*
 * This file is released under the MIT license.
 * Copyright (c) 2016, 2021, Mike Lischke
 *
 * See LICENSE file for more info.
 */
 
/* eslint-disable max-classes-per-file */
 
import { Parser, Vocabulary, Token, TokenStream, ParserRuleContext } from "antlr4ts";
import {
    ATN, ATNState, ATNStateType, Transition, TransitionType, PredicateTransition, RuleTransition, RuleStartState,
    PrecedencePredicateTransition,
} from "antlr4ts/atn";
import { IntervalSet } from "antlr4ts/misc/IntervalSet";
 
export type TokenList = number[];
export type RuleList = number[];
 
export interface CandidateRule {
    startTokenIndex: number;
    ruleList: RuleList;
}
 
export interface RuleWithStartToken {
    startTokenIndex: number;
    ruleIndex: number;
}
 
export type RuleWithStartTokenList = RuleWithStartToken[];
 
// All the candidates which have been found. Tokens and rules are separated.
// Token entries include a list of tokens that directly follow them (see also the "following" member in the
// FollowSetWithPath class).
// Rule entries include the index of the starting token within the evaluated rule, along with a call stack of rules
// found during evaluation.
export class CandidatesCollection {
    public tokens: Map<number, TokenList> = new Map();
    public rules: Map<number, CandidateRule> = new Map();
}
 
// A record for a follow set along with the path at which this set was found.
// If there is only a single symbol in the interval set then we also collect and store tokens which follow
// this symbol directly in its rule (i.e. there is no intermediate rule transition). Only single label transitions
// are considered. This is useful if you have a chain of tokens which can be suggested as a whole, because there is
// a fixed sequence in the grammar.
class FollowSetWithPath {
    public intervals: IntervalSet;
    public path: RuleList = [];
    public following: TokenList = [];
}
 
// A list of follow sets (for a given state number) + all of them combined for quick hit tests + whether they are
// exhaustive (false if subsequent yet-unprocessed rules could add further tokens to the follow set, true otherwise).
// This data is static in nature (because the used ATN states are part of a static struct: the ATN).
// Hence it can be shared between all C3 instances, however it depends on the actual parser class (type).
class FollowSetsHolder {
    public sets: FollowSetWithPath[];
    public combined: IntervalSet;
    public isExhaustive: boolean;
}
 
type FollowSetsPerState = Map<number, FollowSetsHolder>;
 
// Token stream position info after a rule was processed.
type RuleEndStatus = Set<number>;
 
interface IPipelineEntry {
    state: ATNState;
    tokenListIndex: number;
}
 
// The main class for doing the collection process.
export class CodeCompletionCore {
    private static followSetsByATN = new Map<string, FollowSetsPerState>();
 
    private static atnStateTypeMap: string[] = [
        "invalid",
        "basic",
        "rule start",
        "block start",
        "plus block start",
        "star block start",
        "token start",
        "rule stop",
        "block end",
        "star loop back",
        "star loop entry",
        "plus loop back",
        "loop end",
    ];
 
    // Debugging options. Print human readable ATN state and other info.
 
    // Not dependent on showDebugOutput. Prints the collected rules + tokens to terminal.
    public showResult = false;
 
    // Enables printing ATN state info to terminal.
    public showDebugOutput = false;
 
    // Only relevant when showDebugOutput is true. Enables transition printing for a state.
    public debugOutputWithTransitions = false;
 
    // Also depends on showDebugOutput. Enables call stack printing for each rule recursion.
    public showRuleStack = false;
 
    // Tailoring of the result:
    // Tokens which should not appear in the candidates set.
    public ignoredTokens: Set<number>;
 
    // Rules which replace any candidate token they contain.
    // This allows to return descriptive rules (e.g. className, instead of ID/identifier).
    public preferredRules: Set<number>;
 
    // Specify if preferred rules should translated top-down (higher index rule returns first) or
    // bottom-up (lower index rule returns first).
    public translateRulesTopDown = false;
 
    private parser: Parser;
    private atn: ATN;
    private vocabulary: Vocabulary;
    private ruleNames: string[];
    private tokens: Token[];
    private precedenceStack: number[];
 
    private tokenStartIndex = 0;
    private statesProcessed = 0;
 
    // A mapping of rule index + token stream position to end token positions.
    // A rule which has been visited before with the same input position will always produce the same output positions.
    private shortcutMap: Map<number, Map<number, RuleEndStatus>> = new Map();
 
    // The collected candidates (rules and tokens).
    private candidates: CandidatesCollection = new CandidatesCollection();
 
    public constructor(parser: Parser) {
        this.parser = parser;
        this.atn = parser.atn;
        this.vocabulary = parser.vocabulary;
        this.ruleNames = parser.ruleNames;
        this.ignoredTokens = new Set();
        this.preferredRules = new Set();
    }
 
    /**
     * This is the main entry point. The caret token index specifies the token stream index for the token which
     * currently covers the caret (or any other position you want to get code completion candidates for).
     * Optionally you can pass in a parser rule context which limits the ATN walk to only that or called rules.
     * This can significantly speed up the retrieval process but might miss some candidates (if they are outside of
     * the given context).
     *
     * @param caretTokenIndex The index of the token at the caret position.
     * @param context An option parser rule context to limit the search space.
     * @returns The collection of completion candidates.
     */
    public collectCandidates(caretTokenIndex: number, context?: ParserRuleContext): CandidatesCollection {
        this.shortcutMap.clear();
        this.candidates.rules.clear();
        this.candidates.tokens.clear();
        this.statesProcessed = 0;
        this.precedenceStack = [];
 
        this.tokenStartIndex = context ? context.start.tokenIndex : 0;
        const tokenStream: TokenStream = this.parser.inputStream;
 
        this.tokens = [];
        let offset = this.tokenStartIndex;
        while (true) {
            const token = tokenStream.get(offset++);
            if (token.channel === Token.DEFAULT_CHANNEL) {
                this.tokens.push(token);
 
                if (token.tokenIndex >= caretTokenIndex || token.type === Token.EOF) {
                    break;
                }
            }
 
            // Do not check for the token index here, as we want to end with the first unhidden token on or after
            // the caret.
            if (token.type === Token.EOF) {
                break;
            }
        }
 
        const callStack: RuleWithStartTokenList = [];
        const startRule = context ? context.ruleIndex : 0;
        this.processRule(this.atn.ruleToStartState[startRule], 0, callStack, 0, 0);
 
        if (this.showResult) {
            console.log(`States processed: ${this.statesProcessed}`);
            console.log("\n\nCollected rules:\n");
            for (const rule of this.candidates.rules) {
                let path = "";
                for (const token of rule[1].ruleList) {
                    path += this.ruleNames[token] + " ";
                }
                console.log(this.ruleNames[rule[0]] + ", path: ", path);
            }

            const sortedTokens: Set<string> = new Set();
            for (const token of this.candidates.tokens) {
                let value = this.vocabulary.getDisplayName(token[0]);
                for (const following of token[1]) {
                    value += " " + this.vocabulary.getDisplayName(following);
                }
                sortedTokens.add(value);
            }

            console.log("\n\nCollected tokens:\n");
            for (const symbol of sortedTokens) {
                console.log(symbol);
            }
            console.log("\n\n");
        }
 
        return this.candidates;
    }
 
    /**
     * Checks if the predicate associated with the given transition evaluates to true.
     *
     * @param transition The transition to check.
     * @returns the evaluation result of the predicate.
     */
    private checkPredicate(transition: PredicateTransition): boolean {
        return transition.predicate.eval(this.parser, ParserRuleContext.emptyContext());
    }
 
    /**
     * Walks the rule chain upwards or downwards (depending on translateRulesTopDown) to see if that matches any of the
     * preferred rules. If found, that rule is added to the collection candidates and true is returned.
     *
     * @param ruleWithStartTokenList The list to convert.
     * @returns true if any of the stack entries was converted.
     */
    private translateStackToRuleIndex(ruleWithStartTokenList: RuleWithStartTokenList): boolean {
        if (this.preferredRules.size === 0) {
            return false;
        }
 
        // Change the direction we iterate over the rule stack
        if (this.translateRulesTopDown) {
            // Loop over the rule stack from lowest to highest rule level. This will prioritize a lower preferred rule
            // if it is a child of a higher one that is also a preferred rule.
            for (let i = ruleWithStartTokenList.length - 1; i >= 0; i--) {
                if (this.translateToRuleIndex(i, ruleWithStartTokenList)) {
                    return true;
                }
            }
        } else {
            // Loop over the rule stack from highest to lowest rule level. This will prioritize a higher preferred rule
            // if it contains a lower one that is also a preferred rule.
            for (let i = 0; i < ruleWithStartTokenList.length; i++) {
                if (this.translateToRuleIndex(i, ruleWithStartTokenList)) {
                    return true;
                }
            }
        }
 
        return false;
    }
 
    /**
     * Given the index of a rule from a rule chain, check if that matches any of the preferred rules. If it matches,
     * that rule is added to the collection candidates and true is returned.
     *
     * @param i The rule index.
     * @param ruleWithStartTokenList The list to check.
     * @returns true if the specified rule is in the list of preferred rules.
     */
    private translateToRuleIndex(i: number, ruleWithStartTokenList: RuleWithStartTokenList): boolean {
        const { ruleIndex, startTokenIndex } = ruleWithStartTokenList[i];
        if (this.preferredRules.has(ruleIndex)) {
            // Add the rule to our candidates list along with the current rule path,
            // but only if there isn't already an entry like that.
            const path = ruleWithStartTokenList.slice(0, i).map(({ ruleIndex: candidate }) => candidate);
            let addNew = true;
            for (const rule of this.candidates.rules) {
                if (rule[0] !== ruleIndex || rule[1].ruleList.length !== path.length) {
                    continue;
                }
 
                // Found an entry for this rule. Same path? If so don't add a new (duplicate) entry.
                if (path.every((v, j) => v === rule[1].ruleList[j])) {
                    addNew = false;
                    break;
                }
            }
 
            if (addNew) {
                this.candidates.rules.set(ruleIndex, {
                    startTokenIndex,
                    ruleList: path,
                });
                if (this.showDebugOutput) {
                    console.log("=====> collected: ", this.ruleNames[ruleIndex]);
                }
            }
 
            return true;
        }
 
        return false;
    }
 
    /**
     * This method follows the given transition and collects all symbols within the same rule that directly follow it
     * without intermediate transitions to other rules and only if there is a single symbol for a transition.
     *
     * @param transition The transition from which to start.
     * @returns A list of toke types.
     */
    private getFollowingTokens(transition: Transition): number[] {
        const result: number[] = [];
 
        const pipeline: ATNState[] = [transition.target];
 
        while (pipeline.length > 0) {
            const state = pipeline.pop();
 
            if (state) {
                state.getTransitions().forEach((outgoing) => {
                    if (outgoing.serializationType === TransitionType.ATOM) {
                        if (!outgoing.isEpsilon) {
                            const list = outgoing.label!.toArray();
                            if (list.length === 1 && !this.ignoredTokens.has(list[0])) {
                                result.push(list[0]);
                                pipeline.push(outgoing.target);
                            }
                        } else {
                            pipeline.push(outgoing.target);
                        }
                    }
                });
            }
        }
 
        return result;
    }
 
    /**
     * Entry point for the recursive follow set collection function.
     *
     * @param start Start state.
     * @param stop Stop state.
     * @returns Follow sets.
     */
    private determineFollowSets(start: ATNState, stop: ATNState): FollowSetsHolder {
        const sets: FollowSetWithPath[] = [];
        const stateStack: ATNState[] = [];
        const ruleStack: number[] = [];
        const isExhaustive = this.collectFollowSets(start, stop, sets, stateStack, ruleStack);
        // Sets are split by path to allow translating them to preferred rules. But for quick hit tests
        // it is also useful to have a set with all symbols combined.
        const combined = new IntervalSet();
        for (const set of sets) {
            combined.addAll(set.intervals);
        }
 
        return {sets, isExhaustive, combined};
    }
 
    /**
     * Collects possible tokens which could be matched following the given ATN state. This is essentially the same
     * algorithm as used in the LL1Analyzer class, but here we consider predicates also and use no parser rule context.
     *
     * @param s The state to continue from.
     * @param stopState The state which ends the collection routine.
     * @param followSets A pass through parameter to add found sets to.
     * @param stateStack A stack to avoid endless recursions.
     * @param ruleStack The current rule stack.
     * @returns true if the follow sets is exhaustive, i.e. we terminated before the rule end was reached, so no
     * subsequent rules could add tokens
     */
    private collectFollowSets(s: ATNState, stopState: ATNState, followSets: FollowSetWithPath[], stateStack: ATNState[],
        ruleStack: number[]): boolean {
 
        if (stateStack.find((x) => x === s)) {
            return true;
        }
        stateStack.push(s);
 
        if (s === stopState || s.stateType === ATNStateType.RULE_STOP) {
            stateStack.pop();
 
            return false;
        }
 
        let isExhaustive = true;
        for (const transition of s.getTransitions()) {
            if (transition.serializationType === TransitionType.RULE) {
                const ruleTransition: RuleTransition = transition as RuleTransition;
                if (ruleStack.indexOf(ruleTransition.target.ruleIndex) !== -1) {
                    continue;
                }
 
                ruleStack.push(ruleTransition.target.ruleIndex);
                const ruleFollowSetsIsExhaustive = this.collectFollowSets(
                    transition.target, stopState, followSets, stateStack, ruleStack);
                ruleStack.pop();
 
                // If the subrule had an epsilon transition to the rule end, the tokens added to
                // the follow set are non-exhaustive and we should continue processing subsequent transitions post-rule
                if (!ruleFollowSetsIsExhaustive) {
                    const nextStateFollowSetsIsExhaustive = this.collectFollowSets(
                        ruleTransition.followState, stopState, followSets, stateStack, ruleStack);
                    isExhaustive &&= nextStateFollowSetsIsExhaustive;
                }
 
            } else if (transition.serializationType === TransitionType.PREDICATE) {
                if (this.checkPredicate(transition as PredicateTransition)) {
                    const nextStateFollowSetsIsExhaustive = this.collectFollowSets(
                        transition.target, stopState, followSets, stateStack, ruleStack);
                    isExhaustive &&= nextStateFollowSetsIsExhaustive;
                }
            } else if (transition.isEpsilon) {
                const nextStateFollowSetsIsExhaustive = this.collectFollowSets(
                    transition.target, stopState, followSets, stateStack, ruleStack);
                isExhaustive &&= nextStateFollowSetsIsExhaustive;
            } else if (transition.serializationType === TransitionType.WILDCARD) {
                const set = new FollowSetWithPath();
                set.intervals = IntervalSet.of(Token.MIN_USER_TOKEN_TYPE, this.atn.maxTokenType);
                set.path = ruleStack.slice();
                followSets.push(set);
            } else {
                let label = transition.label;
                if (label && label.size > 0) {
                    if (transition.serializationType === TransitionType.NOT_SET) {
                        label = label.complement(IntervalSet.of(Token.MIN_USER_TOKEN_TYPE, this.atn.maxTokenType));
                    }
                    const set = new FollowSetWithPath();
                    set.intervals = label;
                    set.path = ruleStack.slice();
                    set.following = this.getFollowingTokens(transition);
                    followSets.push(set);
                }
            }
        }
        stateStack.pop();
 
        return isExhaustive;
    }
 
    /**
     * Walks the ATN for a single rule only. It returns the token stream position for each path that could be matched
     * in this rule.
     * The result can be empty in case we hit only non-epsilon transitions that didn't match the current input or if we
     * hit the caret position.
     *
     * @param startState The start state.
     * @param tokenListIndex The token index we are currently at.
     * @param callStack The stack that indicates where in the ATN we are currently.
     * @param precedence The current precedence level.
     * @param indentation A value to determine the current indentation when doing debug prints.
     * @returns the set of token stream indexes (which depend on the ways that had to be taken).
     */
    private processRule(startState: RuleStartState, tokenListIndex: number, callStack: RuleWithStartTokenList,
        precedence: number, indentation: number): RuleEndStatus {
 
        // Start with rule specific handling before going into the ATN walk.
 
        // Check first if we've taken this path with the same input before.
        let positionMap = this.shortcutMap.get(startState.ruleIndex);
        if (!positionMap) {
            positionMap = new Map();
            this.shortcutMap.set(startState.ruleIndex, positionMap);
        } else {
            if (positionMap.has(tokenListIndex)) {
                if (this.showDebugOutput) {
                    console.log("=====> shortcut");
                }
 
                return positionMap.get(tokenListIndex)!;
            }
        }
 
        const result: RuleEndStatus = new Set<number>();
 
        // For rule start states we determine and cache the follow set, which gives us 3 advantages:
        // 1) We can quickly check if a symbol would be matched when we follow that rule. We can so check in advance
        //    and can save us all the intermediate steps if there is no match.
        // 2) We'll have all symbols that are collectable already together when we are at the caret on rule enter.
        // 3) We get this lookup for free with any 2nd or further visit of the same rule, which often happens
        //    in non trivial grammars, especially with (recursive) expressions and of course when invoking code
        //    completion multiple times.
        let setsPerState = CodeCompletionCore.followSetsByATN.get(this.parser.constructor.name);
        if (!setsPerState) {
            setsPerState = new Map();
            CodeCompletionCore.followSetsByATN.set(this.parser.constructor.name, setsPerState);
        }
 
        let followSets = setsPerState.get(startState.stateNumber);
        if (!followSets) {
            const stop = this.atn.ruleToStopState[startState.ruleIndex];
            followSets = this.determineFollowSets(startState, stop);
            setsPerState.set(startState.stateNumber, followSets);
        }
 
        // Get the token index where our rule starts from our (possibly filtered) token list
        const startTokenIndex = this.tokens[tokenListIndex].tokenIndex;
 
        callStack.push({
            startTokenIndex,
            ruleIndex: startState.ruleIndex,
        });
 
        if (tokenListIndex >= this.tokens.length - 1) { // At caret?
            if (this.preferredRules.has(startState.ruleIndex)) {
                // No need to go deeper when collecting entries and we reach a rule that we want to collect anyway.
                this.translateStackToRuleIndex(callStack);
            } else {
                // Convert all follow sets to either single symbols or their associated preferred rule and add
                // the result to our candidates list.
                for (const set of followSets.sets) {
                    const fullPath = callStack.slice();
 
                    // Rules derived from our followSet will always start at the same token as our current rule
                    const followSetPath = set.path.map((path) => ({
                        startTokenIndex,
                        ruleIndex: path,
                    }));
 
                    fullPath.push(...followSetPath);
                    if (!this.translateStackToRuleIndex(fullPath)) {
                        for (const symbol of set.intervals.toArray()) {
                            if (!this.ignoredTokens.has(symbol)) {
                                if (this.showDebugOutput) {
                                    console.log("=====> collected: ", this.vocabulary.getDisplayName(symbol));
                                }
                                if (!this.candidates.tokens.has(symbol)) {
                                    // Following is empty if there is more than one entry in the set.
                                    this.candidates.tokens.set(symbol, set.following);
                                } else {
                                    // More than one following list for the same symbol.
                                    if (this.candidates.tokens.get(symbol) !== set.following) {
                                        this.candidates.tokens.set(symbol, []);
                                    }
                                }
                            }
                        }
                    }
                }
            }
 
            if (!followSets.isExhaustive) {
                // If we're at the caret but the follow sets is non-exhaustive (empty or all tokens are optional),
                // we should continue to collect tokens following this rule
                result.add(tokenListIndex);
            }
 
            callStack.pop();
 
            return result;
 
        } else {
            // Process the rule if we either could pass it without consuming anything (epsilon transition)
            // or if the current input symbol will be matched somewhere after this entry point.
            // Otherwise stop here.
            const currentSymbol = this.tokens[tokenListIndex].type;
            if (followSets.isExhaustive && !followSets.combined.contains(currentSymbol)) {
                callStack.pop();
 
                return result;
            }
        }
 
        if (startState.isPrecedenceRule) {
            this.precedenceStack.push(precedence);
        }
 
        // The current state execution pipeline contains all yet-to-be-processed ATN states in this rule.
        // For each such state we store the token index + a list of rules that lead to it.
        const statePipeline: IPipelineEntry[] = [];
        let currentEntry;
 
        // Bootstrap the pipeline.
        statePipeline.push({ state: startState, tokenListIndex });
 
        while (statePipeline.length > 0) {
            currentEntry = statePipeline.pop()!;
            ++this.statesProcessed;
 
            const currentSymbol = this.tokens[currentEntry.tokenListIndex].type;
 
            const atCaret = currentEntry.tokenListIndex >= this.tokens.length - 1;
            if (this.showDebugOutput) {
                this.printDescription(indentation, currentEntry.state, this.generateBaseDescription(currentEntry.state),
                    currentEntry.tokenListIndex);
                if (this.showRuleStack) {
                    this.printRuleState(callStack);
                }
            }
 
            if (currentEntry.state.stateType === ATNStateType.RULE_STOP) {
                // Record the token index we are at, to report it to the caller.
                result.add(currentEntry.tokenListIndex);
                continue;
            }
 
            const transitions = currentEntry.state.getTransitions();
 
            // We simulate here the same precedence handling as the parser does, which uses hard coded values.
            // For rules that are not left recursive this value is ignored (since there is no precedence transition).
            for (const transition of transitions) {
                switch (transition.serializationType) {
                    case TransitionType.RULE: {
                        const ruleTransition = transition as RuleTransition;
                        const endStatus = this.processRule(transition.target as RuleStartState,
                            currentEntry.tokenListIndex, callStack, ruleTransition.precedence, indentation + 1);
                        for (const position of endStatus) {
                            statePipeline.push({
                                state: (<RuleTransition>transition).followState,
                                tokenListIndex: position,
                            });
                        }
                        break;
                    }
 
                    case TransitionType.PREDICATE: {
                        if (this.checkPredicate(transition as PredicateTransition)) {
                            statePipeline.push({
                                state: transition.target,
                                tokenListIndex: currentEntry.tokenListIndex,
                            });
                        }
                        break;
                    }
 
                    case TransitionType.PRECEDENCE: {
                        const predTransition = transition as PrecedencePredicateTransition;
                        if (predTransition.precedence >= this.precedenceStack[this.precedenceStack.length - 1]) {
                            statePipeline.push({
                                state: transition.target,
                                tokenListIndex: currentEntry.tokenListIndex,
                            });
                        }
 
                        break;
                    }
 
                    case TransitionType.WILDCARD: {
                        if (atCaret) {
                            if (!this.translateStackToRuleIndex(callStack)) {
                                for (const token of IntervalSet.of(Token.MIN_USER_TOKEN_TYPE, this.atn.maxTokenType)
                                    .toArray()) {
                                    if (!this.ignoredTokens.has(token)) {
                                        this.candidates.tokens.set(token, []);
                                    }
                                }
                            }
                        } else {
                            statePipeline.push({
                                state: transition.target,
                                tokenListIndex: currentEntry.tokenListIndex + 1,
                            });
                        }
                        break;
                    }
 
                    default: {
                        if (transition.isEpsilon) {
                            // Jump over simple states with a single outgoing epsilon transition.
                            statePipeline.push({
                                state: transition.target,
                                tokenListIndex: currentEntry.tokenListIndex,
                            });
                            continue;
                        }
 
                        let set = transition.label;
                        if (set && set.size > 0) {
                            if (transition.serializationType === TransitionType.NOT_SET) {
                                set = set.complement(IntervalSet.of(Token.MIN_USER_TOKEN_TYPE, this.atn.maxTokenType));
                            }
                            if (atCaret) {
                                if (!this.translateStackToRuleIndex(callStack)) {
                                    const list = set.toArray();
                                    const addFollowing = list.length === 1;
                                    for (const symbol of list) {
                                        if (!this.ignoredTokens.has(symbol)) {
                                            if (this.showDebugOutput) {
                                                console.log("=====> collected: ",
                                                    this.vocabulary.getDisplayName(symbol));
                                            }
 
                                            if (addFollowing) {
                                                this.candidates.tokens.set(symbol, this.getFollowingTokens(transition));
                                            } else {
                                                this.candidates.tokens.set(symbol, []);
                                            }
                                        }
                                    }
                                }
                            } else {
                                if (set.contains(currentSymbol)) {
                                    if (this.showDebugOutput) {
                                        console.log("=====> consumed: ", this.vocabulary.getDisplayName(currentSymbol));
                                    }
                                    statePipeline.push({
                                        state: transition.target,
                                        tokenListIndex: currentEntry.tokenListIndex + 1,
                                    });
                                }
                            }
                        }
                    }
                }
            }
        }
 
        callStack.pop();
        if (startState.isPrecedenceRule) {
            this.precedenceStack.pop();
        }
 
        // Cache the result, for later lookup to avoid duplicate walks.
        positionMap.set(tokenListIndex, result);
 
        return result;
    }
 
    private generateBaseDescription(state: ATNState): string {
        const stateValue = state.stateNumber === ATNState.INVALID_STATE_NUMBER ? "Invalid" : state.stateNumber;

        return `[${stateValue} ${CodeCompletionCore.atnStateTypeMap[state.stateType]}] in ` +
            `${this.ruleNames[state.ruleIndex]}`;
    }
 
    private printDescription(indentation: number, state: ATNState, baseDescription: string, tokenIndex: number) {

        const indent = "  ".repeat(indentation);
        let output = indent;

        let transitionDescription = "";
        if (this.debugOutputWithTransitions) {
            for (const transition of state.getTransitions()) {
                let labels = "";
                const symbols: number[] = transition.label ? transition.label.toArray() : [];
                if (symbols.length > 2) {
                    // Only print start and end symbols to avoid large lists in debug output.
                    labels = this.vocabulary.getDisplayName(symbols[0]) + " .. " +
                        this.vocabulary.getDisplayName(symbols[symbols.length - 1]);
                } else {
                    for (const symbol of symbols) {
                        if (labels.length > 0) {
                            labels += ", ";
                        }
                        labels += this.vocabulary.getDisplayName(symbol);
                    }
                }
                if (labels.length === 0) {
                    labels = "ε";
                }
                transitionDescription += `\n${indent}\t(${labels}) [${transition.target.stateNumber} ` +
                    `${CodeCompletionCore.atnStateTypeMap[transition.target.stateType]}] in ` +
                    `${this.ruleNames[transition.target.ruleIndex]}`;
            }
        }

        if (tokenIndex >= this.tokens.length - 1) {
            output += `<<${this.tokenStartIndex + tokenIndex}>> `;
        } else {
            output += `<${this.tokenStartIndex + tokenIndex}> `;
        }
        console.log(output + "Current state: " + baseDescription + transitionDescription);
    }
 
    private printRuleState(stack: RuleWithStartTokenList) {
        if (stack.length === 0) {
            console.log("<empty stack>");

            return;
        }

        for (const rule of stack) {
            console.log(this.ruleNames[rule.ruleIndex]);
        }
    }
 
}