class YARP::LexCompat::Heredoc::DedentingHeredoc

Heredocs that are dedenting heredocs are a little more complicated. Ripper outputs on_ignored_sp tokens for the whitespace that is being removed from the output. YARP only modifies the node itself and keeps the token the same. This simplifies YARP, but makes comparing against Ripper much harder because there is a length mismatch.

Fortunately, we already have to pull out the heredoc tokens in order to insert them into the stream in the correct order. As such, we can do some extra manipulation on the tokens to make them match Ripper’s output by mirroring the dedent logic that Ripper uses.

Constants

TAB_WIDTH

Attributes

dedent[R]
dedent_next[R]
embexpr_balance[R]
tokens[R]

Public Class Methods

new() click to toggle source
# File yarp/lex_compat.rb, line 354
def initialize
  @tokens = []
  @dedent_next = true
  @dedent = nil
  @embexpr_balance = 0
end

Public Instance Methods

<<(token) click to toggle source

As tokens are coming in, we track the minimum amount of common leading whitespace on plain string content tokens. This allows us to later remove that amount of whitespace from the beginning of each line.

# File yarp/lex_compat.rb, line 364
def <<(token)
  case token.event
  when :on_embexpr_beg, :on_heredoc_beg
    @embexpr_balance += 1
  when :on_embexpr_end, :on_heredoc_end
    @embexpr_balance -= 1
  when :on_tstring_content
    if embexpr_balance == 0
      token.value.split(/(?<=\n)/).each_with_index do |line, index|
        next if line.strip.empty? && line.end_with?("\n")
        next if !(dedent_next || index > 0)

        leading = line[/\A(\s*)\n?/, 1]
        next_dedent = 0

        leading.each_char do |char|
          if char == "\t"
            next_dedent = next_dedent - (next_dedent % TAB_WIDTH) + TAB_WIDTH
          else
            next_dedent += 1
          end
        end

        @dedent = [dedent, next_dedent].compact.min
      end
    end
  end

  @dedent_next = token.event == :on_tstring_content && embexpr_balance == 0
  tokens << token
end
to_a() click to toggle source
# File yarp/lex_compat.rb, line 396
def to_a
  # If every line in the heredoc is blank, we still need to split up the
  # string content token into multiple tokens.
  if dedent.nil?
    results = []
    embexpr_balance = 0

    tokens.each do |token|
      case token.event
      when :on_embexpr_beg, :on_heredoc_beg
        embexpr_balance += 1
        results << token
      when :on_embexpr_end, :on_heredoc_end
        embexpr_balance -= 1
        results << token
      when :on_tstring_content
        if embexpr_balance == 0
          lineno = token[0][0]
          column = token[0][1]

          token.value.split(/(?<=\n)/).each_with_index do |value, index|
            column = 0 if index > 0
            results << Token.new([[lineno, column], :on_tstring_content, value, token.state])
            lineno += 1
          end
        else
          results << token
        end
      else
        results << token
      end
    end

    return results
  end

  # Otherwise, we're going to run through each token in the list and
  # insert on_ignored_sp tokens for the amount of dedent that we need to
  # perform. We also need to remove the dedent from the beginning of
  # each line of plain string content tokens.
  results = []
  dedent_next = true
  embexpr_balance = 0

  tokens.each do |token|
    # Notice that the structure of this conditional largely matches the
    # whitespace calculation we performed above. This is because
    # checking if the subsequent token needs to be dedented is common to
    # both the dedent calculation and the ignored_sp insertion.
    case token.event
    when :on_embexpr_beg
      embexpr_balance += 1
      results << token
    when :on_embexpr_end
      embexpr_balance -= 1
      results << token
    when :on_tstring_content
      if embexpr_balance == 0
        # Here we're going to split the string on newlines, but maintain
        # the newlines in the resulting array. We'll do that with a look
        # behind assertion.
        splits = token.value.split(/(?<=\n)/)
        index = 0

        while index < splits.length
          line = splits[index]
          lineno = token[0][0] + index
          column = token[0][1]

          # Blank lines do not count toward common leading whitespace
          # calculation and do not need to be dedented.
          if dedent_next || index > 0
            column = 0
          end

          # If the dedent is 0 and we're not supposed to dedent the next
          # line or this line doesn't start with whitespace, then we
          # should concatenate the rest of the string to match ripper.
          if dedent == 0 && (!dedent_next || !line.start_with?(/\s/))
            line = splits[index..].join
            index = splits.length
          end

          # If we are supposed to dedent this line or if this is not the
          # first line of the string and this line isn't entirely blank,
          # then we need to insert an on_ignored_sp token and remove the
          # dedent from the beginning of the line.
          if (dedent > 0) && (dedent_next || index > 0)
            deleting = 0
            deleted_chars = []

            # Gather up all of the characters that we're going to
            # delete, stopping when you hit a character that would put
            # you over the dedent amount.
            line.each_char.with_index do |char, i|
              case char
              when "\r"
                if line.chars[i + 1] == "\n"
                  break
                end
              when "\n"
                break
              when "\t"
                deleting = deleting - (deleting % TAB_WIDTH) + TAB_WIDTH
              else
                deleting += 1
              end

              break if deleting > dedent
              deleted_chars << char
            end

            # If we have something to delete, then delete it from the
            # string and insert an on_ignored_sp token.
            if deleted_chars.any?
              ignored = deleted_chars.join
              line.delete_prefix!(ignored)

              results << Token.new([[lineno, 0], :on_ignored_sp, ignored, token[3]])
              column = ignored.length
            end
          end

          results << Token.new([[lineno, column], token[1], line, token[3]]) unless line.empty?
          index += 1
        end
      else
        results << token
      end
    else
      results << token
    end

    dedent_next =
      ((token.event == :on_tstring_content) || (token.event == :on_heredoc_end)) &&
      embexpr_balance == 0
  end

  results
end