#!/usr/bin/env python

import sparse
import sys

class JavaDocParser(sparse.SimpleParser, object):
  def __init__(self):
    """sets up the JavaParser"""
    sparse.SimpleParser.__init__(self)
    self.defaulttokenlist.extend("{};")
    self.startcommenttokens = ["//", "/*"]
    self.endcommenttokens = {"//": "\n", "/*": "*/"}
    self.standardtokenizers = [self.commenttokenize, self.stringtokenize, \
        self.removewhitespace, self.separatetokens]

  def keeptogether(self, input):
    """checks whether a token should be kept together"""
    # don't retokenize strings or comments
    return sparse.SimpleParser.keeptogether(self, input) or self.iscommenttoken(input)

  def iscommenttoken(self, input):
    """returns whether the given token is a comment token"""
    for startcommenttoken in self.startcommenttokens:
      if input[:len(startcommenttoken)] == startcommenttoken:
        return True
    return False

  def stringtokenize(self, input):
    """makes strings in input into tokens... but keeps comment tokens together"""
    if self.iscommenttoken(input):
      return [input]
    return sparse.SimpleParser.stringtokenize(self, input)

  def commenttokenize(self, input):
    """makes comment in input into tokens"""
    if sparse.SimpleParser.keeptogether(self, input): return [input]
    tokens = []
    incomment = False
    laststart = 0
    endcommenttoken = None
    for pos in range(len(input)):
      if incomment:
        if input[pos:pos+len(endcommenttoken)] == endcommenttoken:
          pos += len(endcommenttoken)
          if pos > laststart: tokens.append(input[laststart:pos])
          incomment, laststart = False, pos
      else:
        for startcommenttoken in self.startcommenttokens:
          if input[pos:pos+len(startcommenttoken)] == startcommenttoken:
            if pos > laststart: tokens.append(input[laststart:pos])
            incomment, laststart = True, pos
            endcommenttoken = self.endcommenttokens[startcommenttoken]
    if laststart < len(input): tokens.append(input[laststart:])
    return tokens

  def parse(self, source):
    """tokenizes and analyzes the java source"""
    self.tokenize(source)
    self.comments = []
    lastcomment = None
    lastend = 0
    expectingclass = False
    nestedclasses = []
    nestlevel = 0
    varinit = False
    for pos, token in enumerate(self.tokens):
      directlybelowclass = nestedclasses and (nestedclasses[-1][-1] == nestlevel-1)
      if self.iscommenttoken(token) and token.startswith("/*"):
        lastcomment = token
      if expectingclass:
        self.comments.append((token, lastcomment))
        nestedclasses.append((token, nestlevel))
        expectingclass = False
      elif token == "class":
        expectingclass = True
      if token == "{":
        nestlevel += 1
      if token == "}":
        nestlevel -= 1
        if nestedclasses:
          if directlybelowclass:
            nestedclasses.pop()
      if token == "(" and directlybelowclass and not varinit:
        functionname = self.tokens[pos-1]
        declstart = lastend+1
        while self.iscommenttoken(self.tokens[declstart]):
          declstart += 1
        declend = declstart
        while self.tokens[declend] != ')':
          declend += 1
        decl = " ".join(self.tokens[declstart:declend+1])
        self.comments.append(((nestedclasses[-1][0], functionname), decl + "\n" + (lastcomment or "")))
      if token == "=" and directlybelowclass:
        varinit = True
      if token in ["{", "}", ";"]:
        lastcomment = None
        lastend = pos
        varinit = False

if __name__ == "__main__":
  parser = JavaDocParser()
  source = sys.stdin.read()
  parser.parse(source)

