[glassfish-toplink-essentials] Initial import (#824976).

gil gil at fedoraproject.org
Mon Jul 2 15:32:04 UTC 2012


commit 7143c140f2c9887d0a178d6dc7620c0e22b19df0
Author: gil <puntogil at libero.it>
Date:   Mon Jul 2 15:33:47 2012 +0200

    Initial import (#824976).

 .gitignore                                         |    2 +
 glassfish-entity-persistence-build.patch           |   17 +
 ...ce-2.0.41-agent-remove-manifest-classpath.patch |    7 +
 glassfish-persistence-2.0.41-jdk7.patch            |   21 +
 ...sfish-persistence-2.0.41-use_system_antlr.patch |48011 ++++++++++++++++++++
 glassfish-toplink-essentials.spec                  |  122 +
 sources                                            |    2 +
 7 files changed, 48182 insertions(+), 0 deletions(-)
---
diff --git a/.gitignore b/.gitignore
index e69de29..1852176 100644
--- a/.gitignore
+++ b/.gitignore
@@ -0,0 +1,2 @@
+/glassfish-bootstrap.tar.gz
+/glassfish-persistence-v2-b46-src.zip
diff --git a/glassfish-entity-persistence-build.patch b/glassfish-entity-persistence-build.patch
new file mode 100644
index 0000000..05d7eec
--- /dev/null
+++ b/glassfish-entity-persistence-build.patch
@@ -0,0 +1,17 @@
+--- glassfish/entity-persistence/build.xml.sav	2008-02-26 18:01:43.000000000 +0100
++++ glassfish/entity-persistence/build.xml	2008-02-26 18:06:35.000000000 +0100
+@@ -182,8 +182,12 @@
+   
+     <!-- javadocs -->
+     <target name="docs" depends="init">
+-        <mkdir dir="${release.javadocs.dir}"/>
+-<!-- TODO - create javadoc target -->
++        <mkdir dir="${build.dir}/javadoc"/>
++         <javadoc sourcepath="${src.dir}"
++                  packagenames="persistence.antlr,oracle.toplink"
++                  destdir="${build.dir}/javadoc"
++                  classpath="${javaee.jar}"/>
++
+     </target>
+ 
+     <!-- Run ANTLR. -->
diff --git a/glassfish-persistence-2.0.41-agent-remove-manifest-classpath.patch b/glassfish-persistence-2.0.41-agent-remove-manifest-classpath.patch
new file mode 100644
index 0000000..ffd57fd
--- /dev/null
+++ b/glassfish-persistence-2.0.41-agent-remove-manifest-classpath.patch
@@ -0,0 +1,7 @@
+--- glassfish/entity-persistence/toplink-essentials-agent.mf	2007-02-15 01:39:08.000000000 +0100
++++ glassfish/entity-persistence/toplink-essentials-agent.mf-gil	2012-05-24 17:05:27.294145430 +0200
+@@ -1,4 +1,3 @@
+-Class-Path: toplink-essentials.jar
+ Extension-Name: oracle.toplink.essentials.agent
+ Specification-Title: Java Persistence API
+ Specification-Version: 1.0
diff --git a/glassfish-persistence-2.0.41-jdk7.patch b/glassfish-persistence-2.0.41-jdk7.patch
new file mode 100644
index 0000000..2ec789c
--- /dev/null
+++ b/glassfish-persistence-2.0.41-jdk7.patch
@@ -0,0 +1,21 @@
+--- glassfish/entity-persistence/src/java/oracle/toplink/essentials/internal/ejb/cmp3/jdbc/base/DataSourceImpl.java	2007-01-09 02:14:12.000000000 +0100
++++ glassfish/entity-persistence/src/java/oracle/toplink/essentials/internal/ejb/cmp3/jdbc/base/DataSourceImpl.java-gil	2012-02-02 11:42:08.045005179 +0100
+@@ -26,6 +26,8 @@
+ import java.sql.SQLException;
+ import java.sql.DriverManager;
+ import javax.sql.DataSource;
++import java.sql.SQLFeatureNotSupportedException;
++import java.util.logging.Logger;
+ import oracle.toplink.essentials.internal.ejb.cmp3.transaction.base.TransactionManagerImpl;
+ 
+ /**
+@@ -162,4 +164,9 @@
+     public boolean isWrapperFor(Class<?> iface) throws SQLException {
+         return false;
+     }
++
++    public Logger getParentLogger() throws SQLFeatureNotSupportedException {
++        throw new SQLFeatureNotSupportedException("Not implemented method getParentLogger()");
++    }
++
+ }
diff --git a/glassfish-persistence-2.0.41-use_system_antlr.patch b/glassfish-persistence-2.0.41-use_system_antlr.patch
new file mode 100644
index 0000000..1111e56
--- /dev/null
+++ b/glassfish-persistence-2.0.41-use_system_antlr.patch
@@ -0,0 +1,48011 @@
+diff -Nru glassfish/entity-persistence/build.xml glassfish-gil/entity-persistence/build.xml
+--- glassfish/entity-persistence/build.xml	2012-05-24 17:51:52.505918327 +0200
++++ glassfish-gil/entity-persistence/build.xml	2012-05-24 17:40:05.000000000 +0200
+@@ -122,6 +122,7 @@
+                failonerror="true">
+             <classpath>
+                 <pathelement location="${javaee.jar}"/>
++                <pathelement location="/usr/share/java/antlr.jar"/>
+             </classpath>
+         </javac>
+         <delete file="${component.classes.dir}/oracle/toplink/essentials/Version.java"/>
+@@ -184,18 +185,23 @@
+     <target name="docs" depends="init">
+         <mkdir dir="${build.dir}/javadoc"/>
+          <javadoc sourcepath="${src.dir}"
+-                  packagenames="persistence.antlr,oracle.toplink"
+-                  destdir="${build.dir}/javadoc"
+-                  classpath="${javaee.jar}"/>
+-
++                  packagenames="oracle.toplink.*"
++                  destdir="${build.dir}/javadoc">
++            <classpath>
++                <pathelement location="${javaee.jar}"/>
++                <pathelement location="/usr/share/java/antlr.jar"/>
++                <pathelement location="/usr/share/java/geronimo-jpa.jar"/>
++                <pathelement location="/usr/share/java/geronimo-jta.jar"/>
++            </classpath>
++	  </javadoc>
+     </target>
+ 
+     <!-- Run ANTLR. -->
+-    <target name="antlr-generate" depends="compile.persistence.antlr">
+-	    <java classname="persistence.antlr.Tool" fork="true" dir="${query.antlr.dir}">
++    <target name="antlr-generate">
++         <java classname="antlr.Tool" fork="true" dir="${query.antlr.dir}">
+          <arg value="EJBQLParser.g"/>
+          <classpath>
+-           <pathelement location="${component.classes.dir}"/>
++           <pathelement location="/usr/share/java/antlr.jar"/>
+          </classpath>
+        </java>
+     </target>
+@@ -238,7 +244,7 @@
+              <fileset dir="${component.classes.dir}">
+                 <include name="oracle/toplink/essentials/**/*.class"/>
+                 <include name="oracle/toplink/libraries/**/*.class"/>
+-                <include name="persistence/antlr/**/*.class"/>
++                <!--include name="persistence/antlr/**/*.class"/-->
+                 <include name="javax/persistence/**/*.class"/>
+                 <include name="orm*.xsd"/>
+                 <include name="persistence*.xsd"/>
+@@ -269,7 +275,7 @@
+             <fileset dir="${src.dir}">
+                 <include name="oracle/toplink/essentials/**/*.java"/>
+                 <include name="oracle/toplink/libraries/**/*.java"/>
+-                <include name="persistence/antlr/**/*.java"/>
++                <!--include name="persistence/antlr/**/*.java"/-->
+             </fileset>
+             <fileset dir="${basedir}/../persistence-api/src/java">
+                 <include name="javax/persistence/**/*.java"/>
+diff -Nru glassfish/entity-persistence/src/java/oracle/toplink/essentials/internal/parsing/ejbql/antlr273/EJBQLParserBuilder.java glassfish-gil/entity-persistence/src/java/oracle/toplink/essentials/internal/parsing/ejbql/antlr273/EJBQLParserBuilder.java
+--- glassfish/entity-persistence/src/java/oracle/toplink/essentials/internal/parsing/ejbql/antlr273/EJBQLParserBuilder.java	2007-01-04 15:31:36.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/oracle/toplink/essentials/internal/parsing/ejbql/antlr273/EJBQLParserBuilder.java	2012-05-24 17:40:06.000000000 +0200
+@@ -21,7 +21,7 @@
+ // Copyright (c) 1998, 2007, Oracle. All rights reserved.  
+ package oracle.toplink.essentials.internal.parsing.ejbql.antlr273;
+ 
+-import persistence.antlr.TokenBuffer;
++import antlr.TokenBuffer;
+ 
+ //java imports
+ import java.io.StringReader;
+diff -Nru glassfish/entity-persistence/src/java/oracle/toplink/essentials/internal/parsing/ejbql/EJBQLParser.java glassfish-gil/entity-persistence/src/java/oracle/toplink/essentials/internal/parsing/ejbql/EJBQLParser.java
+--- glassfish/entity-persistence/src/java/oracle/toplink/essentials/internal/parsing/ejbql/EJBQLParser.java	2007-01-04 15:31:35.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/oracle/toplink/essentials/internal/parsing/ejbql/EJBQLParser.java	2012-05-24 17:40:06.000000000 +0200
+@@ -25,19 +25,19 @@
+ import java.util.ArrayList;
+ 
+ // Third party (ANLTR) stuff
+-import persistence.antlr.ANTLRException;
+-import persistence.antlr.LLkParser;
+-import persistence.antlr.MismatchedCharException;
+-import persistence.antlr.MismatchedTokenException;
+-import persistence.antlr.NoViableAltException;
+-import persistence.antlr.NoViableAltForCharException;
+-import persistence.antlr.ParserSharedInputState;
+-import persistence.antlr.RecognitionException;
+-import persistence.antlr.Token;
+-import persistence.antlr.TokenBuffer;
+-import persistence.antlr.TokenStream;
+-import persistence.antlr.TokenStreamException;
+-import persistence.antlr.TokenStreamRecognitionException;
++import antlr.ANTLRException;
++import antlr.LLkParser;
++import antlr.MismatchedCharException;
++import antlr.MismatchedTokenException;
++import antlr.NoViableAltException;
++import antlr.NoViableAltForCharException;
++import antlr.ParserSharedInputState;
++import antlr.RecognitionException;
++import antlr.Token;
++import antlr.TokenBuffer;
++import antlr.TokenStream;
++import antlr.TokenStreamException;
++import antlr.TokenStreamRecognitionException;
+ 
+ //toplink imports
+ import oracle.toplink.essentials.exceptions.EJBQLException;
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/ActionElement.java glassfish-gil/entity-persistence/src/java/persistence/antlr/ActionElement.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/ActionElement.java	2006-08-31 00:34:03.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/ActionElement.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,32 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-class ActionElement extends AlternativeElement {
+-    protected String actionText;
+-    protected boolean isSemPred = false;
+-
+-
+-    public ActionElement(Grammar g, Token t) {
+-        super(g);
+-        actionText = t.getText();
+-        line = t.getLine();
+-        column = t.getColumn();
+-    }
+-
+-    public void generate() {
+-        grammar.generator.gen(this);
+-    }
+-
+-    public Lookahead look(int k) {
+-        return grammar.theLLkAnalyzer.look(k, this);
+-    }
+-
+-    public String toString() {
+-        return " " + actionText + (isSemPred?"?":"");
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/actions/cpp/ActionLexer.java glassfish-gil/entity-persistence/src/java/persistence/antlr/actions/cpp/ActionLexer.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/actions/cpp/ActionLexer.java	2006-02-08 22:31:08.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/actions/cpp/ActionLexer.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,2545 +0,0 @@
+-// $ANTLR : "action.g" -> "ActionLexer.java"$
+-
+-	package persistence.antlr.actions.cpp;
+-
+-import java.io.InputStream;
+-import persistence.antlr.TokenStreamException;
+-import persistence.antlr.TokenStreamIOException;
+-import persistence.antlr.TokenStreamRecognitionException;
+-import persistence.antlr.CharStreamException;
+-import persistence.antlr.CharStreamIOException;
+-import persistence.antlr.ANTLRException;
+-import java.io.Reader;
+-import java.util.Hashtable;
+-import persistence.antlr.CharScanner;
+-import persistence.antlr.InputBuffer;
+-import persistence.antlr.ByteBuffer;
+-import persistence.antlr.CharBuffer;
+-import persistence.antlr.Token;
+-import persistence.antlr.CommonToken;
+-import persistence.antlr.RecognitionException;
+-import persistence.antlr.NoViableAltForCharException;
+-import persistence.antlr.MismatchedCharException;
+-import persistence.antlr.TokenStream;
+-import persistence.antlr.ANTLRHashString;
+-import persistence.antlr.LexerSharedInputState;
+-import persistence.antlr.collections.impl.BitSet;
+-import persistence.antlr.SemanticException;
+-
+-	import java.io.StringReader;
+-	import persistence.antlr.collections.impl.Vector;
+-	import persistence.antlr.*;
+-
+-/** Perform the following translations:
+-
+-    AST related translations
+-
+-	##				-> currentRule_AST
+-	#(x,y,z)		-> codeGenerator.getASTCreateString(vector-of(x,y,z))
+-	#[x]			-> codeGenerator.getASTCreateString(x)
+-	#x				-> codeGenerator.mapTreeId(x)
+-
+-	Inside context of #(...), you can ref (x,y,z), [x], and x as shortcuts.
+-
+-    Text related translations
+-
+-	$append(x)	  -> text.append(x)
+-	$setText(x)	  -> text.setLength(_begin); text.append(x)
+-	$getText		  -> new String(text.getBuffer(),_begin,text.length()-_begin)
+-	$setToken(x)  -> _token = x
+-	$setType(x)	  -> _ttype = x
+-   $FOLLOW(r)    -> FOLLOW set name for rule r (optional arg)
+-   $FIRST(r)     -> FIRST set name for rule r (optional arg)
+- */
+-public class ActionLexer extends persistence.antlr.CharScanner implements ActionLexerTokenTypes, TokenStream
+- {
+-
+-	protected RuleBlock currentRule;
+-	protected CodeGenerator generator;
+-	protected int lineOffset = 0;
+-	private Tool antlrTool;	// The ANTLR tool
+-	ActionTransInfo transInfo;
+-
+- 	public ActionLexer(String s, RuleBlock currentRule,
+-							 CodeGenerator generator,
+-							 ActionTransInfo transInfo )
+-	{
+-		this(new StringReader(s));
+-		this.currentRule = currentRule;
+-		this.generator = generator;
+-		this.transInfo = transInfo;
+-	}
+-
+-	public void setLineOffset(int lineOffset)
+-	{
+-		setLine(lineOffset);
+-	}
+-
+-	public void setTool(Tool tool)
+-	{
+-		this.antlrTool = tool;
+-	}
+-
+-	public void reportError(RecognitionException e)
+-	{
+-		antlrTool.error("Syntax error in action: "+e,getFilename(),getLine(),getColumn());
+-	}
+-
+-	public void reportError(String s)
+-	{
+-		antlrTool.error(s,getFilename(),getLine(),getColumn());
+-	}
+-
+-	public void reportWarning(String s)
+-	{
+-		if ( getFilename()==null )
+-			antlrTool.warning(s);
+-		else
+-			antlrTool.warning(s,getFilename(),getLine(),getColumn());
+-	}
+-public ActionLexer(InputStream in) {
+-	this(new ByteBuffer(in));
+-}
+-public ActionLexer(Reader in) {
+-	this(new CharBuffer(in));
+-}
+-public ActionLexer(InputBuffer ib) {
+-	this(new LexerSharedInputState(ib));
+-}
+-public ActionLexer(LexerSharedInputState state) {
+-	super(state);
+-	caseSensitiveLiterals = true;
+-	setCaseSensitive(true);
+-	literals = new Hashtable();
+-}
+-
+-public Token nextToken() throws TokenStreamException {
+-	Token theRetToken=null;
+-tryAgain:
+-	for (;;) {
+-		Token _token = null;
+-		int _ttype = Token.INVALID_TYPE;
+-		resetText();
+-		try {   // for char stream error handling
+-			try {   // for lexical error handling
+-				if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff'))) {
+-					mACTION(true);
+-					theRetToken=_returnToken;
+-				}
+-				else {
+-					if (LA(1)==EOF_CHAR) {uponEOF(); _returnToken = makeToken(Token.EOF_TYPE);}
+-				else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());}
+-				}
+-				
+-				if ( _returnToken==null ) continue tryAgain; // found SKIP token
+-				_ttype = _returnToken.getType();
+-				_returnToken.setType(_ttype);
+-				return _returnToken;
+-			}
+-			catch (RecognitionException e) {
+-				throw new TokenStreamRecognitionException(e);
+-			}
+-		}
+-		catch (CharStreamException cse) {
+-			if ( cse instanceof CharStreamIOException ) {
+-				throw new TokenStreamIOException(((CharStreamIOException)cse).io);
+-			}
+-			else {
+-				throw new TokenStreamException(cse.getMessage());
+-			}
+-		}
+-	}
+-}
+-
+-	public final void mACTION(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = ACTION;
+-		int _saveIndex;
+-		
+-		{
+-		int _cnt502=0;
+-		_loop502:
+-		do {
+-			switch ( LA(1)) {
+-			case '#':
+-			{
+-				mAST_ITEM(false);
+-				break;
+-			}
+-			case '$':
+-			{
+-				mTEXT_ITEM(false);
+-				break;
+-			}
+-			default:
+-				if ((_tokenSet_0.member(LA(1)))) {
+-					mSTUFF(false);
+-				}
+-			else {
+-				if ( _cnt502>=1 ) { break _loop502; } else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());}
+-			}
+-			}
+-			_cnt502++;
+-		} while (true);
+-		}
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-/** stuff in between #(...) and #id items
+- * Allow the escaping of the # for C preprocessor stuff.
+- */
+-	protected final void mSTUFF(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = STUFF;
+-		int _saveIndex;
+-		
+-		switch ( LA(1)) {
+-		case '"':
+-		{
+-			mSTRING(false);
+-			break;
+-		}
+-		case '\'':
+-		{
+-			mCHAR(false);
+-			break;
+-		}
+-		case '\n':
+-		{
+-			match('\n');
+-			newline();
+-			break;
+-		}
+-		default:
+-			if ((LA(1)=='/') && (LA(2)=='*'||LA(2)=='/')) {
+-				mCOMMENT(false);
+-			}
+-			else if ((LA(1)=='\r') && (LA(2)=='\n') && (true)) {
+-				match("\r\n");
+-				newline();
+-			}
+-			else if ((LA(1)=='\\') && (LA(2)=='#') && (true)) {
+-				match('\\');
+-				match('#');
+-				text.setLength(_begin); text.append("#");
+-			}
+-			else if ((LA(1)=='/') && (_tokenSet_1.member(LA(2)))) {
+-				match('/');
+-				{
+-				match(_tokenSet_1);
+-				}
+-			}
+-			else if ((LA(1)=='\r') && (true) && (true)) {
+-				match('\r');
+-				newline();
+-			}
+-			else if ((_tokenSet_2.member(LA(1))) && (true) && (true)) {
+-				{
+-				match(_tokenSet_2);
+-				}
+-			}
+-		else {
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		}
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mAST_ITEM(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = AST_ITEM;
+-		int _saveIndex;
+-		Token t=null;
+-		Token id=null;
+-		Token ctor=null;
+-		
+-		if ((LA(1)=='#') && (LA(2)=='(')) {
+-			_saveIndex=text.length();
+-			match('#');
+-			text.setLength(_saveIndex);
+-			mTREE(true);
+-			t=_returnToken;
+-		}
+-		else if ((LA(1)=='#') && (_tokenSet_3.member(LA(2)))) {
+-			_saveIndex=text.length();
+-			match('#');
+-			text.setLength(_saveIndex);
+-			{
+-			switch ( LA(1)) {
+-			case '\t':  case '\n':  case '\r':  case ' ':
+-			{
+-				mWS(false);
+-				break;
+-			}
+-			case ':':  case 'A':  case 'B':  case 'C':
+-			case 'D':  case 'E':  case 'F':  case 'G':
+-			case 'H':  case 'I':  case 'J':  case 'K':
+-			case 'L':  case 'M':  case 'N':  case 'O':
+-			case 'P':  case 'Q':  case 'R':  case 'S':
+-			case 'T':  case 'U':  case 'V':  case 'W':
+-			case 'X':  case 'Y':  case 'Z':  case '_':
+-			case 'a':  case 'b':  case 'c':  case 'd':
+-			case 'e':  case 'f':  case 'g':  case 'h':
+-			case 'i':  case 'j':  case 'k':  case 'l':
+-			case 'm':  case 'n':  case 'o':  case 'p':
+-			case 'q':  case 'r':  case 's':  case 't':
+-			case 'u':  case 'v':  case 'w':  case 'x':
+-			case 'y':  case 'z':
+-			{
+-				break;
+-			}
+-			default:
+-			{
+-				throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-			}
+-			}
+-			}
+-			mID(true);
+-			id=_returnToken;
+-			
+-						String idt = id.getText();
+-						String mapped = generator.mapTreeId(id.getText(), transInfo);
+-			
+-						// verify that it's not a preprocessor macro...
+-						if( mapped!=null && ! idt.equals( mapped ) )
+-						{
+-							text.setLength(_begin); text.append(mapped);
+-						}
+-						else
+-						{
+-							if(idt.equals("if") ||
+-								idt.equals("define") ||
+-								idt.equals("ifdef") ||
+-								idt.equals("ifndef") ||
+-								idt.equals("else") ||
+-								idt.equals("elif") ||
+-								idt.equals("endif") ||
+-								idt.equals("warning") ||
+-								idt.equals("error") ||
+-								idt.equals("ident") ||
+-								idt.equals("pragma") ||
+-								idt.equals("include"))
+-							{
+-								text.setLength(_begin); text.append("#"+idt);
+-							}
+-						}
+-					
+-			{
+-			if ((_tokenSet_4.member(LA(1))) && (true) && (true)) {
+-				mWS(false);
+-			}
+-			else {
+-			}
+-			
+-			}
+-			{
+-			if ((LA(1)=='=') && (true) && (true)) {
+-				mVAR_ASSIGN(false);
+-			}
+-			else {
+-			}
+-			
+-			}
+-		}
+-		else if ((LA(1)=='#') && (LA(2)=='[')) {
+-			_saveIndex=text.length();
+-			match('#');
+-			text.setLength(_saveIndex);
+-			mAST_CONSTRUCTOR(true);
+-			ctor=_returnToken;
+-		}
+-		else if ((LA(1)=='#') && (LA(2)=='#')) {
+-			match("##");
+-			
+-						if( currentRule != null )
+-						{
+-							String r = currentRule.getRuleName()+"_AST";
+-							text.setLength(_begin); text.append(r);
+-			
+-							if ( transInfo!=null ) {
+-								transInfo.refRuleRoot=r;	// we ref root of tree
+-							}
+-						}
+-						else
+-						{
+-							reportWarning("\"##\" not valid in this context");
+-							text.setLength(_begin); text.append("##");
+-						}
+-					
+-			{
+-			if ((_tokenSet_4.member(LA(1))) && (true) && (true)) {
+-				mWS(false);
+-			}
+-			else {
+-			}
+-			
+-			}
+-			{
+-			if ((LA(1)=='=') && (true) && (true)) {
+-				mVAR_ASSIGN(false);
+-			}
+-			else {
+-			}
+-			
+-			}
+-		}
+-		else {
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mTEXT_ITEM(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = TEXT_ITEM;
+-		int _saveIndex;
+-		Token a1=null;
+-		Token a2=null;
+-		Token a3=null;
+-		Token a4=null;
+-		Token a5=null;
+-		Token a6=null;
+-		
+-		if ((LA(1)=='$') && (LA(2)=='F') && (LA(3)=='O')) {
+-			match("$FOLLOW");
+-			{
+-			if ((_tokenSet_5.member(LA(1))) && (_tokenSet_6.member(LA(2))) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) {
+-				{
+-				switch ( LA(1)) {
+-				case '\t':  case '\n':  case '\r':  case ' ':
+-				{
+-					mWS(false);
+-					break;
+-				}
+-				case '(':
+-				{
+-					break;
+-				}
+-				default:
+-				{
+-					throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-				}
+-				}
+-				}
+-				match('(');
+-				mTEXT_ARG(true);
+-				a5=_returnToken;
+-				match(')');
+-			}
+-			else {
+-			}
+-			
+-			}
+-			
+-						String rule = currentRule.getRuleName();
+-						if ( a5!=null ) {
+-							rule = a5.getText();
+-						}
+-						String setName = generator.getFOLLOWBitSet(rule, 1);
+-						// System.out.println("FOLLOW("+rule+")="+setName);
+-						if ( setName==null ) {
+-							reportError("$FOLLOW("+rule+")"+
+-										": unknown rule or bad lookahead computation");
+-						}
+-						else {
+-							text.setLength(_begin); text.append(setName);
+-						}
+-					
+-		}
+-		else if ((LA(1)=='$') && (LA(2)=='F') && (LA(3)=='I')) {
+-			match("$FIRST");
+-			{
+-			if ((_tokenSet_5.member(LA(1))) && (_tokenSet_6.member(LA(2))) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) {
+-				{
+-				switch ( LA(1)) {
+-				case '\t':  case '\n':  case '\r':  case ' ':
+-				{
+-					mWS(false);
+-					break;
+-				}
+-				case '(':
+-				{
+-					break;
+-				}
+-				default:
+-				{
+-					throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-				}
+-				}
+-				}
+-				match('(');
+-				mTEXT_ARG(true);
+-				a6=_returnToken;
+-				match(')');
+-			}
+-			else {
+-			}
+-			
+-			}
+-			
+-						String rule = currentRule.getRuleName();
+-						if ( a6!=null ) {
+-							rule = a6.getText();
+-						}
+-						String setName = generator.getFIRSTBitSet(rule, 1);
+-						// System.out.println("FIRST("+rule+")="+setName);
+-						if ( setName==null ) {
+-							reportError("$FIRST("+rule+")"+
+-										": unknown rule or bad lookahead computation");
+-						}
+-						else {
+-							text.setLength(_begin); text.append(setName);
+-						}
+-					
+-		}
+-		else if ((LA(1)=='$') && (LA(2)=='a')) {
+-			match("$append");
+-			{
+-			switch ( LA(1)) {
+-			case '\t':  case '\n':  case '\r':  case ' ':
+-			{
+-				mWS(false);
+-				break;
+-			}
+-			case '(':
+-			{
+-				break;
+-			}
+-			default:
+-			{
+-				throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-			}
+-			}
+-			}
+-			match('(');
+-			mTEXT_ARG(true);
+-			a1=_returnToken;
+-			match(')');
+-			
+-						String t = "text += "+a1.getText();
+-						text.setLength(_begin); text.append(t);
+-					
+-		}
+-		else if ((LA(1)=='$') && (LA(2)=='s')) {
+-			match("$set");
+-			{
+-			if ((LA(1)=='T') && (LA(2)=='e')) {
+-				match("Text");
+-				{
+-				switch ( LA(1)) {
+-				case '\t':  case '\n':  case '\r':  case ' ':
+-				{
+-					mWS(false);
+-					break;
+-				}
+-				case '(':
+-				{
+-					break;
+-				}
+-				default:
+-				{
+-					throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-				}
+-				}
+-				}
+-				match('(');
+-				mTEXT_ARG(true);
+-				a2=_returnToken;
+-				match(')');
+-				
+-								String t;
+-								t = "{ text.erase(_begin); text += "+a2.getText()+"; }";
+-								text.setLength(_begin); text.append(t);
+-							
+-			}
+-			else if ((LA(1)=='T') && (LA(2)=='o')) {
+-				match("Token");
+-				{
+-				switch ( LA(1)) {
+-				case '\t':  case '\n':  case '\r':  case ' ':
+-				{
+-					mWS(false);
+-					break;
+-				}
+-				case '(':
+-				{
+-					break;
+-				}
+-				default:
+-				{
+-					throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-				}
+-				}
+-				}
+-				match('(');
+-				mTEXT_ARG(true);
+-				a3=_returnToken;
+-				match(')');
+-				
+-								String t="_token = "+a3.getText();
+-								text.setLength(_begin); text.append(t);
+-							
+-			}
+-			else if ((LA(1)=='T') && (LA(2)=='y')) {
+-				match("Type");
+-				{
+-				switch ( LA(1)) {
+-				case '\t':  case '\n':  case '\r':  case ' ':
+-				{
+-					mWS(false);
+-					break;
+-				}
+-				case '(':
+-				{
+-					break;
+-				}
+-				default:
+-				{
+-					throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-				}
+-				}
+-				}
+-				match('(');
+-				mTEXT_ARG(true);
+-				a4=_returnToken;
+-				match(')');
+-				
+-								String t="_ttype = "+a4.getText();
+-								text.setLength(_begin); text.append(t);
+-							
+-			}
+-			else {
+-				throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-			}
+-			
+-			}
+-		}
+-		else if ((LA(1)=='$') && (LA(2)=='g')) {
+-			match("$getText");
+-			
+-						text.setLength(_begin); text.append("text.substr(_begin,text.length()-_begin)");
+-					
+-		}
+-		else {
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mCOMMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = COMMENT;
+-		int _saveIndex;
+-		
+-		if ((LA(1)=='/') && (LA(2)=='/')) {
+-			mSL_COMMENT(false);
+-		}
+-		else if ((LA(1)=='/') && (LA(2)=='*')) {
+-			mML_COMMENT(false);
+-		}
+-		else {
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mSTRING(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = STRING;
+-		int _saveIndex;
+-		
+-		match('"');
+-		{
+-		_loop599:
+-		do {
+-			if ((LA(1)=='\\')) {
+-				mESC(false);
+-			}
+-			else if ((_tokenSet_7.member(LA(1)))) {
+-				matchNot('"');
+-			}
+-			else {
+-				break _loop599;
+-			}
+-			
+-		} while (true);
+-		}
+-		match('"');
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mCHAR(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = CHAR;
+-		int _saveIndex;
+-		
+-		match('\'');
+-		{
+-		if ((LA(1)=='\\')) {
+-			mESC(false);
+-		}
+-		else if ((_tokenSet_8.member(LA(1)))) {
+-			matchNot('\'');
+-		}
+-		else {
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		
+-		}
+-		match('\'');
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mTREE(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = TREE;
+-		int _saveIndex;
+-		Token t=null;
+-		Token t2=null;
+-		
+-			StringBuffer buf = new StringBuffer();
+-			int n=0;
+-			Vector terms = new Vector(10);
+-		
+-		
+-		_saveIndex=text.length();
+-		match('(');
+-		text.setLength(_saveIndex);
+-		{
+-		switch ( LA(1)) {
+-		case '\t':  case '\n':  case '\r':  case ' ':
+-		{
+-			_saveIndex=text.length();
+-			mWS(false);
+-			text.setLength(_saveIndex);
+-			break;
+-		}
+-		case '"':  case '#':  case '(':  case ':':
+-		case 'A':  case 'B':  case 'C':  case 'D':
+-		case 'E':  case 'F':  case 'G':  case 'H':
+-		case 'I':  case 'J':  case 'K':  case 'L':
+-		case 'M':  case 'N':  case 'O':  case 'P':
+-		case 'Q':  case 'R':  case 'S':  case 'T':
+-		case 'U':  case 'V':  case 'W':  case 'X':
+-		case 'Y':  case 'Z':  case '[':  case '_':
+-		case 'a':  case 'b':  case 'c':  case 'd':
+-		case 'e':  case 'f':  case 'g':  case 'h':
+-		case 'i':  case 'j':  case 'k':  case 'l':
+-		case 'm':  case 'n':  case 'o':  case 'p':
+-		case 'q':  case 'r':  case 's':  case 't':
+-		case 'u':  case 'v':  case 'w':  case 'x':
+-		case 'y':  case 'z':
+-		{
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		}
+-		}
+-		_saveIndex=text.length();
+-		mTREE_ELEMENT(true);
+-		text.setLength(_saveIndex);
+-		t=_returnToken;
+-		
+-					terms.appendElement(
+-						generator.processStringForASTConstructor(t.getText())
+-											 );
+-				
+-		{
+-		switch ( LA(1)) {
+-		case '\t':  case '\n':  case '\r':  case ' ':
+-		{
+-			_saveIndex=text.length();
+-			mWS(false);
+-			text.setLength(_saveIndex);
+-			break;
+-		}
+-		case ')':  case ',':
+-		{
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		}
+-		}
+-		{
+-		_loop528:
+-		do {
+-			if ((LA(1)==',')) {
+-				_saveIndex=text.length();
+-				match(',');
+-				text.setLength(_saveIndex);
+-				{
+-				switch ( LA(1)) {
+-				case '\t':  case '\n':  case '\r':  case ' ':
+-				{
+-					_saveIndex=text.length();
+-					mWS(false);
+-					text.setLength(_saveIndex);
+-					break;
+-				}
+-				case '"':  case '#':  case '(':  case ':':
+-				case 'A':  case 'B':  case 'C':  case 'D':
+-				case 'E':  case 'F':  case 'G':  case 'H':
+-				case 'I':  case 'J':  case 'K':  case 'L':
+-				case 'M':  case 'N':  case 'O':  case 'P':
+-				case 'Q':  case 'R':  case 'S':  case 'T':
+-				case 'U':  case 'V':  case 'W':  case 'X':
+-				case 'Y':  case 'Z':  case '[':  case '_':
+-				case 'a':  case 'b':  case 'c':  case 'd':
+-				case 'e':  case 'f':  case 'g':  case 'h':
+-				case 'i':  case 'j':  case 'k':  case 'l':
+-				case 'm':  case 'n':  case 'o':  case 'p':
+-				case 'q':  case 'r':  case 's':  case 't':
+-				case 'u':  case 'v':  case 'w':  case 'x':
+-				case 'y':  case 'z':
+-				{
+-					break;
+-				}
+-				default:
+-				{
+-					throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-				}
+-				}
+-				}
+-				_saveIndex=text.length();
+-				mTREE_ELEMENT(true);
+-				text.setLength(_saveIndex);
+-				t2=_returnToken;
+-				
+-								terms.appendElement(
+-									generator.processStringForASTConstructor(t2.getText())
+-														  );
+-							
+-				{
+-				switch ( LA(1)) {
+-				case '\t':  case '\n':  case '\r':  case ' ':
+-				{
+-					_saveIndex=text.length();
+-					mWS(false);
+-					text.setLength(_saveIndex);
+-					break;
+-				}
+-				case ')':  case ',':
+-				{
+-					break;
+-				}
+-				default:
+-				{
+-					throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-				}
+-				}
+-				}
+-			}
+-			else {
+-				break _loop528;
+-			}
+-			
+-		} while (true);
+-		}
+-		text.setLength(_begin); text.append(generator.getASTCreateString(terms));
+-		_saveIndex=text.length();
+-		match(')');
+-		text.setLength(_saveIndex);
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mWS(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = WS;
+-		int _saveIndex;
+-		
+-		{
+-		int _cnt619=0;
+-		_loop619:
+-		do {
+-			if ((LA(1)=='\r') && (LA(2)=='\n') && (true)) {
+-				match('\r');
+-				match('\n');
+-				newline();
+-			}
+-			else if ((LA(1)==' ') && (true) && (true)) {
+-				match(' ');
+-			}
+-			else if ((LA(1)=='\t') && (true) && (true)) {
+-				match('\t');
+-			}
+-			else if ((LA(1)=='\r') && (true) && (true)) {
+-				match('\r');
+-				newline();
+-			}
+-			else if ((LA(1)=='\n') && (true) && (true)) {
+-				match('\n');
+-				newline();
+-			}
+-			else {
+-				if ( _cnt619>=1 ) { break _loop619; } else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());}
+-			}
+-			
+-			_cnt619++;
+-		} while (true);
+-		}
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mID(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = ID;
+-		int _saveIndex;
+-		
+-		{
+-		switch ( LA(1)) {
+-		case 'a':  case 'b':  case 'c':  case 'd':
+-		case 'e':  case 'f':  case 'g':  case 'h':
+-		case 'i':  case 'j':  case 'k':  case 'l':
+-		case 'm':  case 'n':  case 'o':  case 'p':
+-		case 'q':  case 'r':  case 's':  case 't':
+-		case 'u':  case 'v':  case 'w':  case 'x':
+-		case 'y':  case 'z':
+-		{
+-			matchRange('a','z');
+-			break;
+-		}
+-		case 'A':  case 'B':  case 'C':  case 'D':
+-		case 'E':  case 'F':  case 'G':  case 'H':
+-		case 'I':  case 'J':  case 'K':  case 'L':
+-		case 'M':  case 'N':  case 'O':  case 'P':
+-		case 'Q':  case 'R':  case 'S':  case 'T':
+-		case 'U':  case 'V':  case 'W':  case 'X':
+-		case 'Y':  case 'Z':
+-		{
+-			matchRange('A','Z');
+-			break;
+-		}
+-		case '_':
+-		{
+-			match('_');
+-			break;
+-		}
+-		case ':':
+-		{
+-			match("::");
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		}
+-		}
+-		{
+-		_loop585:
+-		do {
+-			if ((_tokenSet_9.member(LA(1))) && (true) && (true)) {
+-				{
+-				switch ( LA(1)) {
+-				case 'a':  case 'b':  case 'c':  case 'd':
+-				case 'e':  case 'f':  case 'g':  case 'h':
+-				case 'i':  case 'j':  case 'k':  case 'l':
+-				case 'm':  case 'n':  case 'o':  case 'p':
+-				case 'q':  case 'r':  case 's':  case 't':
+-				case 'u':  case 'v':  case 'w':  case 'x':
+-				case 'y':  case 'z':
+-				{
+-					matchRange('a','z');
+-					break;
+-				}
+-				case 'A':  case 'B':  case 'C':  case 'D':
+-				case 'E':  case 'F':  case 'G':  case 'H':
+-				case 'I':  case 'J':  case 'K':  case 'L':
+-				case 'M':  case 'N':  case 'O':  case 'P':
+-				case 'Q':  case 'R':  case 'S':  case 'T':
+-				case 'U':  case 'V':  case 'W':  case 'X':
+-				case 'Y':  case 'Z':
+-				{
+-					matchRange('A','Z');
+-					break;
+-				}
+-				case '0':  case '1':  case '2':  case '3':
+-				case '4':  case '5':  case '6':  case '7':
+-				case '8':  case '9':
+-				{
+-					matchRange('0','9');
+-					break;
+-				}
+-				case '_':
+-				{
+-					match('_');
+-					break;
+-				}
+-				case ':':
+-				{
+-					match("::");
+-					break;
+-				}
+-				default:
+-				{
+-					throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-				}
+-				}
+-				}
+-			}
+-			else {
+-				break _loop585;
+-			}
+-			
+-		} while (true);
+-		}
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mVAR_ASSIGN(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = VAR_ASSIGN;
+-		int _saveIndex;
+-		
+-		match('=');
+-		
+-					// inform the code generator that an assignment was done to
+-					// AST root for the rule if invoker set refRuleRoot.
+-					if ( LA(1)!='=' && transInfo!=null && transInfo.refRuleRoot!=null ) {
+-						transInfo.assignToRoot=true;
+-					}
+-				
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mAST_CONSTRUCTOR(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = AST_CONSTRUCTOR;
+-		int _saveIndex;
+-		Token x=null;
+-		Token y=null;
+-		
+-		_saveIndex=text.length();
+-		match('[');
+-		text.setLength(_saveIndex);
+-		{
+-		switch ( LA(1)) {
+-		case '\t':  case '\n':  case '\r':  case ' ':
+-		{
+-			_saveIndex=text.length();
+-			mWS(false);
+-			text.setLength(_saveIndex);
+-			break;
+-		}
+-		case '"':  case '#':  case '(':  case '0':
+-		case '1':  case '2':  case '3':  case '4':
+-		case '5':  case '6':  case '7':  case '8':
+-		case '9':  case ':':  case 'A':  case 'B':
+-		case 'C':  case 'D':  case 'E':  case 'F':
+-		case 'G':  case 'H':  case 'I':  case 'J':
+-		case 'K':  case 'L':  case 'M':  case 'N':
+-		case 'O':  case 'P':  case 'Q':  case 'R':
+-		case 'S':  case 'T':  case 'U':  case 'V':
+-		case 'W':  case 'X':  case 'Y':  case 'Z':
+-		case '[':  case '_':  case 'a':  case 'b':
+-		case 'c':  case 'd':  case 'e':  case 'f':
+-		case 'g':  case 'h':  case 'i':  case 'j':
+-		case 'k':  case 'l':  case 'm':  case 'n':
+-		case 'o':  case 'p':  case 'q':  case 'r':
+-		case 's':  case 't':  case 'u':  case 'v':
+-		case 'w':  case 'x':  case 'y':  case 'z':
+-		{
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		}
+-		}
+-		_saveIndex=text.length();
+-		mAST_CTOR_ELEMENT(true);
+-		text.setLength(_saveIndex);
+-		x=_returnToken;
+-		{
+-		switch ( LA(1)) {
+-		case '\t':  case '\n':  case '\r':  case ' ':
+-		{
+-			_saveIndex=text.length();
+-			mWS(false);
+-			text.setLength(_saveIndex);
+-			break;
+-		}
+-		case ',':  case ']':
+-		{
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		}
+-		}
+-		{
+-		switch ( LA(1)) {
+-		case ',':
+-		{
+-			_saveIndex=text.length();
+-			match(',');
+-			text.setLength(_saveIndex);
+-			{
+-			switch ( LA(1)) {
+-			case '\t':  case '\n':  case '\r':  case ' ':
+-			{
+-				_saveIndex=text.length();
+-				mWS(false);
+-				text.setLength(_saveIndex);
+-				break;
+-			}
+-			case '"':  case '#':  case '(':  case '0':
+-			case '1':  case '2':  case '3':  case '4':
+-			case '5':  case '6':  case '7':  case '8':
+-			case '9':  case ':':  case 'A':  case 'B':
+-			case 'C':  case 'D':  case 'E':  case 'F':
+-			case 'G':  case 'H':  case 'I':  case 'J':
+-			case 'K':  case 'L':  case 'M':  case 'N':
+-			case 'O':  case 'P':  case 'Q':  case 'R':
+-			case 'S':  case 'T':  case 'U':  case 'V':
+-			case 'W':  case 'X':  case 'Y':  case 'Z':
+-			case '[':  case '_':  case 'a':  case 'b':
+-			case 'c':  case 'd':  case 'e':  case 'f':
+-			case 'g':  case 'h':  case 'i':  case 'j':
+-			case 'k':  case 'l':  case 'm':  case 'n':
+-			case 'o':  case 'p':  case 'q':  case 'r':
+-			case 's':  case 't':  case 'u':  case 'v':
+-			case 'w':  case 'x':  case 'y':  case 'z':
+-			{
+-				break;
+-			}
+-			default:
+-			{
+-				throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-			}
+-			}
+-			}
+-			_saveIndex=text.length();
+-			mAST_CTOR_ELEMENT(true);
+-			text.setLength(_saveIndex);
+-			y=_returnToken;
+-			{
+-			switch ( LA(1)) {
+-			case '\t':  case '\n':  case '\r':  case ' ':
+-			{
+-				_saveIndex=text.length();
+-				mWS(false);
+-				text.setLength(_saveIndex);
+-				break;
+-			}
+-			case ']':
+-			{
+-				break;
+-			}
+-			default:
+-			{
+-				throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-			}
+-			}
+-			}
+-			break;
+-		}
+-		case ']':
+-		{
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		}
+-		}
+-		_saveIndex=text.length();
+-		match(']');
+-		text.setLength(_saveIndex);
+-		
+-		//			System.out.println("AST_CONSTRUCTOR: "+((x==null)?"null":x.getText())+
+-		//									 ", "+((y==null)?"null":y.getText()));
+-					String ys = generator.processStringForASTConstructor(x.getText());
+-		
+-					// the second does not need processing coz it's a string
+-					// (eg second param of astFactory.create(x,y)
+-					if ( y!=null )
+-						ys += ","+y.getText();
+-		
+-					text.setLength(_begin); text.append( generator.getASTCreateString(null,ys) );
+-				
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mTEXT_ARG(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = TEXT_ARG;
+-		int _saveIndex;
+-		
+-		{
+-		switch ( LA(1)) {
+-		case '\t':  case '\n':  case '\r':  case ' ':
+-		{
+-			mWS(false);
+-			break;
+-		}
+-		case '"':  case '$':  case '\'':  case '+':
+-		case '0':  case '1':  case '2':  case '3':
+-		case '4':  case '5':  case '6':  case '7':
+-		case '8':  case '9':  case ':':  case 'A':
+-		case 'B':  case 'C':  case 'D':  case 'E':
+-		case 'F':  case 'G':  case 'H':  case 'I':
+-		case 'J':  case 'K':  case 'L':  case 'M':
+-		case 'N':  case 'O':  case 'P':  case 'Q':
+-		case 'R':  case 'S':  case 'T':  case 'U':
+-		case 'V':  case 'W':  case 'X':  case 'Y':
+-		case 'Z':  case '_':  case 'a':  case 'b':
+-		case 'c':  case 'd':  case 'e':  case 'f':
+-		case 'g':  case 'h':  case 'i':  case 'j':
+-		case 'k':  case 'l':  case 'm':  case 'n':
+-		case 'o':  case 'p':  case 'q':  case 'r':
+-		case 's':  case 't':  case 'u':  case 'v':
+-		case 'w':  case 'x':  case 'y':  case 'z':
+-		{
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		}
+-		}
+-		{
+-		int _cnt559=0;
+-		_loop559:
+-		do {
+-			if ((_tokenSet_10.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && (true)) {
+-				mTEXT_ARG_ELEMENT(false);
+-				{
+-				if ((_tokenSet_4.member(LA(1))) && (_tokenSet_11.member(LA(2))) && (true)) {
+-					mWS(false);
+-				}
+-				else if ((_tokenSet_11.member(LA(1))) && (true) && (true)) {
+-				}
+-				else {
+-					throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-				}
+-				
+-				}
+-			}
+-			else {
+-				if ( _cnt559>=1 ) { break _loop559; } else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());}
+-			}
+-			
+-			_cnt559++;
+-		} while (true);
+-		}
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mTREE_ELEMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = TREE_ELEMENT;
+-		int _saveIndex;
+-		Token id=null;
+-		boolean was_mapped;
+-		
+-		switch ( LA(1)) {
+-		case '(':
+-		{
+-			mTREE(false);
+-			break;
+-		}
+-		case '[':
+-		{
+-			mAST_CONSTRUCTOR(false);
+-			break;
+-		}
+-		case ':':  case 'A':  case 'B':  case 'C':
+-		case 'D':  case 'E':  case 'F':  case 'G':
+-		case 'H':  case 'I':  case 'J':  case 'K':
+-		case 'L':  case 'M':  case 'N':  case 'O':
+-		case 'P':  case 'Q':  case 'R':  case 'S':
+-		case 'T':  case 'U':  case 'V':  case 'W':
+-		case 'X':  case 'Y':  case 'Z':  case '_':
+-		case 'a':  case 'b':  case 'c':  case 'd':
+-		case 'e':  case 'f':  case 'g':  case 'h':
+-		case 'i':  case 'j':  case 'k':  case 'l':
+-		case 'm':  case 'n':  case 'o':  case 'p':
+-		case 'q':  case 'r':  case 's':  case 't':
+-		case 'u':  case 'v':  case 'w':  case 'x':
+-		case 'y':  case 'z':
+-		{
+-			mID_ELEMENT(false);
+-			break;
+-		}
+-		case '"':
+-		{
+-			mSTRING(false);
+-			break;
+-		}
+-		default:
+-			if ((LA(1)=='#') && (LA(2)=='(')) {
+-				_saveIndex=text.length();
+-				match('#');
+-				text.setLength(_saveIndex);
+-				mTREE(false);
+-			}
+-			else if ((LA(1)=='#') && (LA(2)=='[')) {
+-				_saveIndex=text.length();
+-				match('#');
+-				text.setLength(_saveIndex);
+-				mAST_CONSTRUCTOR(false);
+-			}
+-			else if ((LA(1)=='#') && (_tokenSet_12.member(LA(2)))) {
+-				_saveIndex=text.length();
+-				match('#');
+-				text.setLength(_saveIndex);
+-				was_mapped=mID_ELEMENT(true);
+-				id=_returnToken;
+-					// RK: I have a queer feeling that this maptreeid is redundant..
+-							if ( ! was_mapped )
+-							{
+-								String t = generator.mapTreeId(id.getText(), null);
+-				//				System.out.println("mapped: "+id.getText()+" -> "+t);
+-								if ( t!=null ) {
+-									text.setLength(_begin); text.append(t);
+-								}
+-							}
+-						
+-			}
+-			else if ((LA(1)=='#') && (LA(2)=='#')) {
+-				match("##");
+-				
+-							if( currentRule != null )
+-							{
+-								String t = currentRule.getRuleName()+"_AST";
+-								text.setLength(_begin); text.append(t);
+-							}
+-							else
+-							{
+-								reportError("\"##\" not valid in this context");
+-								text.setLength(_begin); text.append("##");
+-							}
+-						
+-			}
+-		else {
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		}
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-/** An ID_ELEMENT can be a func call, array ref, simple var,
+- *  or AST label ref.
+- */
+-	protected final boolean  mID_ELEMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		boolean mapped=false;
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = ID_ELEMENT;
+-		int _saveIndex;
+-		Token id=null;
+-		
+-		mID(true);
+-		id=_returnToken;
+-		{
+-		if ((_tokenSet_4.member(LA(1))) && (_tokenSet_13.member(LA(2))) && (true)) {
+-			_saveIndex=text.length();
+-			mWS(false);
+-			text.setLength(_saveIndex);
+-		}
+-		else if ((_tokenSet_13.member(LA(1))) && (true) && (true)) {
+-		}
+-		else {
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		
+-		}
+-		{
+-		switch ( LA(1)) {
+-		case '(':  case '<':
+-		{
+-			{
+-			switch ( LA(1)) {
+-			case '<':
+-			{
+-				match('<');
+-				{
+-				_loop542:
+-				do {
+-					if ((_tokenSet_14.member(LA(1)))) {
+-						matchNot('>');
+-					}
+-					else {
+-						break _loop542;
+-					}
+-					
+-				} while (true);
+-				}
+-				match('>');
+-				break;
+-			}
+-			case '(':
+-			{
+-				break;
+-			}
+-			default:
+-			{
+-				throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-			}
+-			}
+-			}
+-			match('(');
+-			{
+-			if ((_tokenSet_4.member(LA(1))) && (_tokenSet_15.member(LA(2))) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) {
+-				_saveIndex=text.length();
+-				mWS(false);
+-				text.setLength(_saveIndex);
+-			}
+-			else if ((_tokenSet_15.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && (true)) {
+-			}
+-			else {
+-				throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-			}
+-			
+-			}
+-			{
+-			switch ( LA(1)) {
+-			case '"':  case '#':  case '\'':  case '(':
+-			case '0':  case '1':  case '2':  case '3':
+-			case '4':  case '5':  case '6':  case '7':
+-			case '8':  case '9':  case ':':  case 'A':
+-			case 'B':  case 'C':  case 'D':  case 'E':
+-			case 'F':  case 'G':  case 'H':  case 'I':
+-			case 'J':  case 'K':  case 'L':  case 'M':
+-			case 'N':  case 'O':  case 'P':  case 'Q':
+-			case 'R':  case 'S':  case 'T':  case 'U':
+-			case 'V':  case 'W':  case 'X':  case 'Y':
+-			case 'Z':  case '[':  case '_':  case 'a':
+-			case 'b':  case 'c':  case 'd':  case 'e':
+-			case 'f':  case 'g':  case 'h':  case 'i':
+-			case 'j':  case 'k':  case 'l':  case 'm':
+-			case 'n':  case 'o':  case 'p':  case 'q':
+-			case 'r':  case 's':  case 't':  case 'u':
+-			case 'v':  case 'w':  case 'x':  case 'y':
+-			case 'z':
+-			{
+-				mARG(false);
+-				{
+-				_loop547:
+-				do {
+-					if ((LA(1)==',')) {
+-						match(',');
+-						{
+-						switch ( LA(1)) {
+-						case '\t':  case '\n':  case '\r':  case ' ':
+-						{
+-							_saveIndex=text.length();
+-							mWS(false);
+-							text.setLength(_saveIndex);
+-							break;
+-						}
+-						case '"':  case '#':  case '\'':  case '(':
+-						case '0':  case '1':  case '2':  case '3':
+-						case '4':  case '5':  case '6':  case '7':
+-						case '8':  case '9':  case ':':  case 'A':
+-						case 'B':  case 'C':  case 'D':  case 'E':
+-						case 'F':  case 'G':  case 'H':  case 'I':
+-						case 'J':  case 'K':  case 'L':  case 'M':
+-						case 'N':  case 'O':  case 'P':  case 'Q':
+-						case 'R':  case 'S':  case 'T':  case 'U':
+-						case 'V':  case 'W':  case 'X':  case 'Y':
+-						case 'Z':  case '[':  case '_':  case 'a':
+-						case 'b':  case 'c':  case 'd':  case 'e':
+-						case 'f':  case 'g':  case 'h':  case 'i':
+-						case 'j':  case 'k':  case 'l':  case 'm':
+-						case 'n':  case 'o':  case 'p':  case 'q':
+-						case 'r':  case 's':  case 't':  case 'u':
+-						case 'v':  case 'w':  case 'x':  case 'y':
+-						case 'z':
+-						{
+-							break;
+-						}
+-						default:
+-						{
+-							throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-						}
+-						}
+-						}
+-						mARG(false);
+-					}
+-					else {
+-						break _loop547;
+-					}
+-					
+-				} while (true);
+-				}
+-				break;
+-			}
+-			case '\t':  case '\n':  case '\r':  case ' ':
+-			case ')':
+-			{
+-				break;
+-			}
+-			default:
+-			{
+-				throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-			}
+-			}
+-			}
+-			{
+-			switch ( LA(1)) {
+-			case '\t':  case '\n':  case '\r':  case ' ':
+-			{
+-				_saveIndex=text.length();
+-				mWS(false);
+-				text.setLength(_saveIndex);
+-				break;
+-			}
+-			case ')':
+-			{
+-				break;
+-			}
+-			default:
+-			{
+-				throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-			}
+-			}
+-			}
+-			match(')');
+-			break;
+-		}
+-		case '[':
+-		{
+-			{
+-			int _cnt552=0;
+-			_loop552:
+-			do {
+-				if ((LA(1)=='[')) {
+-					match('[');
+-					{
+-					switch ( LA(1)) {
+-					case '\t':  case '\n':  case '\r':  case ' ':
+-					{
+-						_saveIndex=text.length();
+-						mWS(false);
+-						text.setLength(_saveIndex);
+-						break;
+-					}
+-					case '"':  case '#':  case '\'':  case '(':
+-					case '0':  case '1':  case '2':  case '3':
+-					case '4':  case '5':  case '6':  case '7':
+-					case '8':  case '9':  case ':':  case 'A':
+-					case 'B':  case 'C':  case 'D':  case 'E':
+-					case 'F':  case 'G':  case 'H':  case 'I':
+-					case 'J':  case 'K':  case 'L':  case 'M':
+-					case 'N':  case 'O':  case 'P':  case 'Q':
+-					case 'R':  case 'S':  case 'T':  case 'U':
+-					case 'V':  case 'W':  case 'X':  case 'Y':
+-					case 'Z':  case '[':  case '_':  case 'a':
+-					case 'b':  case 'c':  case 'd':  case 'e':
+-					case 'f':  case 'g':  case 'h':  case 'i':
+-					case 'j':  case 'k':  case 'l':  case 'm':
+-					case 'n':  case 'o':  case 'p':  case 'q':
+-					case 'r':  case 's':  case 't':  case 'u':
+-					case 'v':  case 'w':  case 'x':  case 'y':
+-					case 'z':
+-					{
+-						break;
+-					}
+-					default:
+-					{
+-						throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-					}
+-					}
+-					}
+-					mARG(false);
+-					{
+-					switch ( LA(1)) {
+-					case '\t':  case '\n':  case '\r':  case ' ':
+-					{
+-						_saveIndex=text.length();
+-						mWS(false);
+-						text.setLength(_saveIndex);
+-						break;
+-					}
+-					case ']':
+-					{
+-						break;
+-					}
+-					default:
+-					{
+-						throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-					}
+-					}
+-					}
+-					match(']');
+-				}
+-				else {
+-					if ( _cnt552>=1 ) { break _loop552; } else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());}
+-				}
+-				
+-				_cnt552++;
+-			} while (true);
+-			}
+-			break;
+-		}
+-		case '.':
+-		{
+-			match('.');
+-			mID_ELEMENT(false);
+-			break;
+-		}
+-		case ':':
+-		{
+-			match("::");
+-			mID_ELEMENT(false);
+-			break;
+-		}
+-		default:
+-			if ((LA(1)=='-') && (LA(2)=='>') && (_tokenSet_12.member(LA(3)))) {
+-				match("->");
+-				mID_ELEMENT(false);
+-			}
+-			else if ((_tokenSet_16.member(LA(1))) && (true) && (true)) {
+-				
+-								mapped = true;
+-								String t = generator.mapTreeId(id.getText(), transInfo);
+-				//				System.out.println("mapped: "+id.getText()+" -> "+t);
+-								if ( t!=null ) {
+-									text.setLength(_begin); text.append(t);
+-								}
+-							
+-				{
+-				if (((_tokenSet_17.member(LA(1))) && (_tokenSet_16.member(LA(2))) && (true))&&(transInfo!=null && transInfo.refRuleRoot!=null)) {
+-					{
+-					switch ( LA(1)) {
+-					case '\t':  case '\n':  case '\r':  case ' ':
+-					{
+-						mWS(false);
+-						break;
+-					}
+-					case '=':
+-					{
+-						break;
+-					}
+-					default:
+-					{
+-						throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-					}
+-					}
+-					}
+-					mVAR_ASSIGN(false);
+-				}
+-				else if ((_tokenSet_18.member(LA(1))) && (true) && (true)) {
+-				}
+-				else {
+-					throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-				}
+-				
+-				}
+-			}
+-		else {
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		}
+-		}
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-		return mapped;
+-	}
+-	
+-/** The arguments of a #[...] constructor are text, token type,
+- *  or a tree.
+- */
+-	protected final void mAST_CTOR_ELEMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = AST_CTOR_ELEMENT;
+-		int _saveIndex;
+-		
+-		if ((LA(1)=='"') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) {
+-			mSTRING(false);
+-		}
+-		else if ((_tokenSet_19.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && (true)) {
+-			mTREE_ELEMENT(false);
+-		}
+-		else if (((LA(1) >= '0' && LA(1) <= '9'))) {
+-			mINT(false);
+-		}
+-		else {
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mINT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = INT;
+-		int _saveIndex;
+-		
+-		{
+-		int _cnt610=0;
+-		_loop610:
+-		do {
+-			if (((LA(1) >= '0' && LA(1) <= '9'))) {
+-				mDIGIT(false);
+-			}
+-			else {
+-				if ( _cnt610>=1 ) { break _loop610; } else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());}
+-			}
+-			
+-			_cnt610++;
+-		} while (true);
+-		}
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mARG(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = ARG;
+-		int _saveIndex;
+-		
+-		{
+-		switch ( LA(1)) {
+-		case '\'':
+-		{
+-			mCHAR(false);
+-			break;
+-		}
+-		case '0':  case '1':  case '2':  case '3':
+-		case '4':  case '5':  case '6':  case '7':
+-		case '8':  case '9':
+-		{
+-			mINT_OR_FLOAT(false);
+-			break;
+-		}
+-		default:
+-			if ((_tokenSet_19.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) {
+-				mTREE_ELEMENT(false);
+-			}
+-			else if ((LA(1)=='"') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) {
+-				mSTRING(false);
+-			}
+-		else {
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		}
+-		}
+-		{
+-		_loop580:
+-		do {
+-			if ((_tokenSet_20.member(LA(1))) && (_tokenSet_21.member(LA(2))) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) {
+-				{
+-				switch ( LA(1)) {
+-				case '\t':  case '\n':  case '\r':  case ' ':
+-				{
+-					mWS(false);
+-					break;
+-				}
+-				case '*':  case '+':  case '-':  case '/':
+-				{
+-					break;
+-				}
+-				default:
+-				{
+-					throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-				}
+-				}
+-				}
+-				{
+-				switch ( LA(1)) {
+-				case '+':
+-				{
+-					match('+');
+-					break;
+-				}
+-				case '-':
+-				{
+-					match('-');
+-					break;
+-				}
+-				case '*':
+-				{
+-					match('*');
+-					break;
+-				}
+-				case '/':
+-				{
+-					match('/');
+-					break;
+-				}
+-				default:
+-				{
+-					throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-				}
+-				}
+-				}
+-				{
+-				switch ( LA(1)) {
+-				case '\t':  case '\n':  case '\r':  case ' ':
+-				{
+-					mWS(false);
+-					break;
+-				}
+-				case '"':  case '#':  case '\'':  case '(':
+-				case '0':  case '1':  case '2':  case '3':
+-				case '4':  case '5':  case '6':  case '7':
+-				case '8':  case '9':  case ':':  case 'A':
+-				case 'B':  case 'C':  case 'D':  case 'E':
+-				case 'F':  case 'G':  case 'H':  case 'I':
+-				case 'J':  case 'K':  case 'L':  case 'M':
+-				case 'N':  case 'O':  case 'P':  case 'Q':
+-				case 'R':  case 'S':  case 'T':  case 'U':
+-				case 'V':  case 'W':  case 'X':  case 'Y':
+-				case 'Z':  case '[':  case '_':  case 'a':
+-				case 'b':  case 'c':  case 'd':  case 'e':
+-				case 'f':  case 'g':  case 'h':  case 'i':
+-				case 'j':  case 'k':  case 'l':  case 'm':
+-				case 'n':  case 'o':  case 'p':  case 'q':
+-				case 'r':  case 's':  case 't':  case 'u':
+-				case 'v':  case 'w':  case 'x':  case 'y':
+-				case 'z':
+-				{
+-					break;
+-				}
+-				default:
+-				{
+-					throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-				}
+-				}
+-				}
+-				mARG(false);
+-			}
+-			else {
+-				break _loop580;
+-			}
+-			
+-		} while (true);
+-		}
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mTEXT_ARG_ELEMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = TEXT_ARG_ELEMENT;
+-		int _saveIndex;
+-		
+-		switch ( LA(1)) {
+-		case ':':  case 'A':  case 'B':  case 'C':
+-		case 'D':  case 'E':  case 'F':  case 'G':
+-		case 'H':  case 'I':  case 'J':  case 'K':
+-		case 'L':  case 'M':  case 'N':  case 'O':
+-		case 'P':  case 'Q':  case 'R':  case 'S':
+-		case 'T':  case 'U':  case 'V':  case 'W':
+-		case 'X':  case 'Y':  case 'Z':  case '_':
+-		case 'a':  case 'b':  case 'c':  case 'd':
+-		case 'e':  case 'f':  case 'g':  case 'h':
+-		case 'i':  case 'j':  case 'k':  case 'l':
+-		case 'm':  case 'n':  case 'o':  case 'p':
+-		case 'q':  case 'r':  case 's':  case 't':
+-		case 'u':  case 'v':  case 'w':  case 'x':
+-		case 'y':  case 'z':
+-		{
+-			mTEXT_ARG_ID_ELEMENT(false);
+-			break;
+-		}
+-		case '"':
+-		{
+-			mSTRING(false);
+-			break;
+-		}
+-		case '\'':
+-		{
+-			mCHAR(false);
+-			break;
+-		}
+-		case '0':  case '1':  case '2':  case '3':
+-		case '4':  case '5':  case '6':  case '7':
+-		case '8':  case '9':
+-		{
+-			mINT_OR_FLOAT(false);
+-			break;
+-		}
+-		case '$':
+-		{
+-			mTEXT_ITEM(false);
+-			break;
+-		}
+-		case '+':
+-		{
+-			match('+');
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		}
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mTEXT_ARG_ID_ELEMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = TEXT_ARG_ID_ELEMENT;
+-		int _saveIndex;
+-		Token id=null;
+-		
+-		mID(true);
+-		id=_returnToken;
+-		{
+-		if ((_tokenSet_4.member(LA(1))) && (_tokenSet_22.member(LA(2))) && (true)) {
+-			_saveIndex=text.length();
+-			mWS(false);
+-			text.setLength(_saveIndex);
+-		}
+-		else if ((_tokenSet_22.member(LA(1))) && (true) && (true)) {
+-		}
+-		else {
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		
+-		}
+-		{
+-		switch ( LA(1)) {
+-		case '(':
+-		{
+-			match('(');
+-			{
+-			if ((_tokenSet_4.member(LA(1))) && (_tokenSet_23.member(LA(2))) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) {
+-				_saveIndex=text.length();
+-				mWS(false);
+-				text.setLength(_saveIndex);
+-			}
+-			else if ((_tokenSet_23.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && (true)) {
+-			}
+-			else {
+-				throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-			}
+-			
+-			}
+-			{
+-			_loop568:
+-			do {
+-				if ((_tokenSet_24.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) {
+-					mTEXT_ARG(false);
+-					{
+-					_loop567:
+-					do {
+-						if ((LA(1)==',')) {
+-							match(',');
+-							mTEXT_ARG(false);
+-						}
+-						else {
+-							break _loop567;
+-						}
+-						
+-					} while (true);
+-					}
+-				}
+-				else {
+-					break _loop568;
+-				}
+-				
+-			} while (true);
+-			}
+-			{
+-			switch ( LA(1)) {
+-			case '\t':  case '\n':  case '\r':  case ' ':
+-			{
+-				_saveIndex=text.length();
+-				mWS(false);
+-				text.setLength(_saveIndex);
+-				break;
+-			}
+-			case ')':
+-			{
+-				break;
+-			}
+-			default:
+-			{
+-				throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-			}
+-			}
+-			}
+-			match(')');
+-			break;
+-		}
+-		case '[':
+-		{
+-			{
+-			int _cnt573=0;
+-			_loop573:
+-			do {
+-				if ((LA(1)=='[')) {
+-					match('[');
+-					{
+-					if ((_tokenSet_4.member(LA(1))) && (_tokenSet_24.member(LA(2))) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) {
+-						_saveIndex=text.length();
+-						mWS(false);
+-						text.setLength(_saveIndex);
+-					}
+-					else if ((_tokenSet_24.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) {
+-					}
+-					else {
+-						throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-					}
+-					
+-					}
+-					mTEXT_ARG(false);
+-					{
+-					switch ( LA(1)) {
+-					case '\t':  case '\n':  case '\r':  case ' ':
+-					{
+-						_saveIndex=text.length();
+-						mWS(false);
+-						text.setLength(_saveIndex);
+-						break;
+-					}
+-					case ']':
+-					{
+-						break;
+-					}
+-					default:
+-					{
+-						throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-					}
+-					}
+-					}
+-					match(']');
+-				}
+-				else {
+-					if ( _cnt573>=1 ) { break _loop573; } else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());}
+-				}
+-				
+-				_cnt573++;
+-			} while (true);
+-			}
+-			break;
+-		}
+-		case '.':
+-		{
+-			match('.');
+-			mTEXT_ARG_ID_ELEMENT(false);
+-			break;
+-		}
+-		case '-':
+-		{
+-			match("->");
+-			mTEXT_ARG_ID_ELEMENT(false);
+-			break;
+-		}
+-		default:
+-			if ((LA(1)==':') && (LA(2)==':') && (_tokenSet_12.member(LA(3)))) {
+-				match("::");
+-				mTEXT_ARG_ID_ELEMENT(false);
+-			}
+-			else if ((_tokenSet_11.member(LA(1))) && (true) && (true)) {
+-			}
+-		else {
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		}
+-		}
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mINT_OR_FLOAT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = INT_OR_FLOAT;
+-		int _saveIndex;
+-		
+-		{
+-		int _cnt613=0;
+-		_loop613:
+-		do {
+-			if (((LA(1) >= '0' && LA(1) <= '9')) && (_tokenSet_25.member(LA(2))) && (true)) {
+-				mDIGIT(false);
+-			}
+-			else {
+-				if ( _cnt613>=1 ) { break _loop613; } else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());}
+-			}
+-			
+-			_cnt613++;
+-		} while (true);
+-		}
+-		{
+-		if ((LA(1)=='L') && (_tokenSet_26.member(LA(2))) && (true)) {
+-			match('L');
+-		}
+-		else if ((LA(1)=='l') && (_tokenSet_26.member(LA(2))) && (true)) {
+-			match('l');
+-		}
+-		else if ((LA(1)=='.')) {
+-			match('.');
+-			{
+-			_loop616:
+-			do {
+-				if (((LA(1) >= '0' && LA(1) <= '9')) && (_tokenSet_26.member(LA(2))) && (true)) {
+-					mDIGIT(false);
+-				}
+-				else {
+-					break _loop616;
+-				}
+-				
+-			} while (true);
+-			}
+-		}
+-		else if ((_tokenSet_26.member(LA(1))) && (true) && (true)) {
+-		}
+-		else {
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		
+-		}
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mSL_COMMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = SL_COMMENT;
+-		int _saveIndex;
+-		
+-		match("//");
+-		{
+-		_loop590:
+-		do {
+-			// nongreedy exit test
+-			if ((LA(1)=='\n'||LA(1)=='\r') && (true) && (true)) break _loop590;
+-			if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && (true)) {
+-				matchNot(EOF_CHAR);
+-			}
+-			else {
+-				break _loop590;
+-			}
+-			
+-		} while (true);
+-		}
+-		{
+-		if ((LA(1)=='\r') && (LA(2)=='\n') && (true)) {
+-			match("\r\n");
+-		}
+-		else if ((LA(1)=='\n')) {
+-			match('\n');
+-		}
+-		else if ((LA(1)=='\r') && (true) && (true)) {
+-			match('\r');
+-		}
+-		else {
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		
+-		}
+-		newline();
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mML_COMMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = ML_COMMENT;
+-		int _saveIndex;
+-		
+-		match("/*");
+-		{
+-		_loop594:
+-		do {
+-			// nongreedy exit test
+-			if ((LA(1)=='*') && (LA(2)=='/') && (true)) break _loop594;
+-			if ((LA(1)=='\r') && (LA(2)=='\n') && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) {
+-				match('\r');
+-				match('\n');
+-				newline();
+-			}
+-			else if ((LA(1)=='\r') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) {
+-				match('\r');
+-				newline();
+-			}
+-			else if ((LA(1)=='\n') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) {
+-				match('\n');
+-				newline();
+-			}
+-			else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) {
+-				matchNot(EOF_CHAR);
+-			}
+-			else {
+-				break _loop594;
+-			}
+-			
+-		} while (true);
+-		}
+-		match("*/");
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mESC(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = ESC;
+-		int _saveIndex;
+-		
+-		match('\\');
+-		{
+-		switch ( LA(1)) {
+-		case 'n':
+-		{
+-			match('n');
+-			break;
+-		}
+-		case 'r':
+-		{
+-			match('r');
+-			break;
+-		}
+-		case 't':
+-		{
+-			match('t');
+-			break;
+-		}
+-		case 'v':
+-		{
+-			match('v');
+-			break;
+-		}
+-		case 'b':
+-		{
+-			match('b');
+-			break;
+-		}
+-		case 'f':
+-		{
+-			match('f');
+-			break;
+-		}
+-		case '"':
+-		{
+-			match('"');
+-			break;
+-		}
+-		case '\'':
+-		{
+-			match('\'');
+-			break;
+-		}
+-		case '\\':
+-		{
+-			match('\\');
+-			break;
+-		}
+-		case '0':  case '1':  case '2':  case '3':
+-		{
+-			{
+-			matchRange('0','3');
+-			}
+-			{
+-			if (((LA(1) >= '0' && LA(1) <= '9')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && (true)) {
+-				mDIGIT(false);
+-				{
+-				if (((LA(1) >= '0' && LA(1) <= '9')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && (true)) {
+-					mDIGIT(false);
+-				}
+-				else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && (true) && (true)) {
+-				}
+-				else {
+-					throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-				}
+-				
+-				}
+-			}
+-			else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && (true) && (true)) {
+-			}
+-			else {
+-				throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-			}
+-			
+-			}
+-			break;
+-		}
+-		case '4':  case '5':  case '6':  case '7':
+-		{
+-			{
+-			matchRange('4','7');
+-			}
+-			{
+-			if (((LA(1) >= '0' && LA(1) <= '9')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && (true)) {
+-				mDIGIT(false);
+-			}
+-			else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && (true) && (true)) {
+-			}
+-			else {
+-				throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-			}
+-			
+-			}
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		}
+-		}
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mDIGIT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = DIGIT;
+-		int _saveIndex;
+-		
+-		matchRange('0','9');
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	
+-	private static final long[] mk_tokenSet_0() {
+-		long[] data = new long[8];
+-		data[0]=-103079215112L;
+-		for (int i = 1; i<=3; i++) { data[i]=-1L; }
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_0 = new BitSet(mk_tokenSet_0());
+-	private static final long[] mk_tokenSet_1() {
+-		long[] data = new long[8];
+-		data[0]=-145135534866440L;
+-		for (int i = 1; i<=3; i++) { data[i]=-1L; }
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_1 = new BitSet(mk_tokenSet_1());
+-	private static final long[] mk_tokenSet_2() {
+-		long[] data = new long[8];
+-		data[0]=-141407503262728L;
+-		for (int i = 1; i<=3; i++) { data[i]=-1L; }
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_2 = new BitSet(mk_tokenSet_2());
+-	private static final long[] mk_tokenSet_3() {
+-		long[] data = { 288230380446688768L, 576460745995190270L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_3 = new BitSet(mk_tokenSet_3());
+-	private static final long[] mk_tokenSet_4() {
+-		long[] data = { 4294977024L, 0L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_4 = new BitSet(mk_tokenSet_4());
+-	private static final long[] mk_tokenSet_5() {
+-		long[] data = { 1103806604800L, 0L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_5 = new BitSet(mk_tokenSet_5());
+-	private static final long[] mk_tokenSet_6() {
+-		long[] data = { 576189812881499648L, 576460745995190270L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_6 = new BitSet(mk_tokenSet_6());
+-	private static final long[] mk_tokenSet_7() {
+-		long[] data = new long[8];
+-		data[0]=-17179869192L;
+-		data[1]=-268435457L;
+-		for (int i = 2; i<=3; i++) { data[i]=-1L; }
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_7 = new BitSet(mk_tokenSet_7());
+-	private static final long[] mk_tokenSet_8() {
+-		long[] data = new long[8];
+-		data[0]=-549755813896L;
+-		data[1]=-268435457L;
+-		for (int i = 2; i<=3; i++) { data[i]=-1L; }
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_8 = new BitSet(mk_tokenSet_8());
+-	private static final long[] mk_tokenSet_9() {
+-		long[] data = { 576179277326712832L, 576460745995190270L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_9 = new BitSet(mk_tokenSet_9());
+-	private static final long[] mk_tokenSet_10() {
+-		long[] data = { 576188709074894848L, 576460745995190270L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_10 = new BitSet(mk_tokenSet_10());
+-	private static final long[] mk_tokenSet_11() {
+-		long[] data = { 576208504579171840L, 576460746532061182L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_11 = new BitSet(mk_tokenSet_11());
+-	private static final long[] mk_tokenSet_12() {
+-		long[] data = { 288230376151711744L, 576460745995190270L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_12 = new BitSet(mk_tokenSet_12());
+-	private static final long[] mk_tokenSet_13() {
+-		long[] data = { 3747275269732312576L, 671088640L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_13 = new BitSet(mk_tokenSet_13());
+-	private static final long[] mk_tokenSet_14() {
+-		long[] data = new long[8];
+-		data[0]=-4611686018427387912L;
+-		for (int i = 1; i<=3; i++) { data[i]=-1L; }
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_14 = new BitSet(mk_tokenSet_14());
+-	private static final long[] mk_tokenSet_15() {
+-		long[] data = { 576183181451994624L, 576460746129407998L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_15 = new BitSet(mk_tokenSet_15());
+-	private static final long[] mk_tokenSet_16() {
+-		long[] data = { 2306051920717948416L, 536870912L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_16 = new BitSet(mk_tokenSet_16());
+-	private static final long[] mk_tokenSet_17() {
+-		long[] data = { 2305843013508670976L, 0L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_17 = new BitSet(mk_tokenSet_17());
+-	private static final long[] mk_tokenSet_18() {
+-		long[] data = { 208911504254464L, 536870912L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_18 = new BitSet(mk_tokenSet_18());
+-	private static final long[] mk_tokenSet_19() {
+-		long[] data = { 288231527202947072L, 576460746129407998L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_19 = new BitSet(mk_tokenSet_19());
+-	private static final long[] mk_tokenSet_20() {
+-		long[] data = { 189120294954496L, 0L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_20 = new BitSet(mk_tokenSet_20());
+-	private static final long[] mk_tokenSet_21() {
+-		long[] data = { 576370098428716544L, 576460746129407998L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_21 = new BitSet(mk_tokenSet_21());
+-	private static final long[] mk_tokenSet_22() {
+-		long[] data = { 576315157207066112L, 576460746666278910L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_22 = new BitSet(mk_tokenSet_22());
+-	private static final long[] mk_tokenSet_23() {
+-		long[] data = { 576190912393127424L, 576460745995190270L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_23 = new BitSet(mk_tokenSet_23());
+-	private static final long[] mk_tokenSet_24() {
+-		long[] data = { 576188713369871872L, 576460745995190270L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_24 = new BitSet(mk_tokenSet_24());
+-	private static final long[] mk_tokenSet_25() {
+-		long[] data = { 576459193230304768L, 576460746532061182L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_25 = new BitSet(mk_tokenSet_25());
+-	private static final long[] mk_tokenSet_26() {
+-		long[] data = { 576388824486127104L, 576460746532061182L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_26 = new BitSet(mk_tokenSet_26());
+-	
+-	}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/actions/cpp/ActionLexerTokenTypes.java glassfish-gil/entity-persistence/src/java/persistence/antlr/actions/cpp/ActionLexerTokenTypes.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/actions/cpp/ActionLexerTokenTypes.java	2006-02-08 22:31:09.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/actions/cpp/ActionLexerTokenTypes.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,33 +0,0 @@
+-// $ANTLR : "action.g" -> "ActionLexer.java"$
+-
+-	package persistence.antlr.actions.cpp;
+-
+-public interface ActionLexerTokenTypes {
+-	int EOF = 1;
+-	int NULL_TREE_LOOKAHEAD = 3;
+-	int ACTION = 4;
+-	int STUFF = 5;
+-	int AST_ITEM = 6;
+-	int TEXT_ITEM = 7;
+-	int TREE = 8;
+-	int TREE_ELEMENT = 9;
+-	int AST_CONSTRUCTOR = 10;
+-	int AST_CTOR_ELEMENT = 11;
+-	int ID_ELEMENT = 12;
+-	int TEXT_ARG = 13;
+-	int TEXT_ARG_ELEMENT = 14;
+-	int TEXT_ARG_ID_ELEMENT = 15;
+-	int ARG = 16;
+-	int ID = 17;
+-	int VAR_ASSIGN = 18;
+-	int COMMENT = 19;
+-	int SL_COMMENT = 20;
+-	int ML_COMMENT = 21;
+-	int CHAR = 22;
+-	int STRING = 23;
+-	int ESC = 24;
+-	int DIGIT = 25;
+-	int INT = 26;
+-	int INT_OR_FLOAT = 27;
+-	int WS = 28;
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/actions/csharp/ActionLexer.java glassfish-gil/entity-persistence/src/java/persistence/antlr/actions/csharp/ActionLexer.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/actions/csharp/ActionLexer.java	2006-02-08 22:31:09.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/actions/csharp/ActionLexer.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,2574 +0,0 @@
+-// $ANTLR : "action.g" -> "ActionLexer.java"$
+-
+-	package persistence.antlr.actions.csharp;
+-
+-import java.io.InputStream;
+-import persistence.antlr.TokenStreamException;
+-import persistence.antlr.TokenStreamIOException;
+-import persistence.antlr.TokenStreamRecognitionException;
+-import persistence.antlr.CharStreamException;
+-import persistence.antlr.CharStreamIOException;
+-import persistence.antlr.ANTLRException;
+-import java.io.Reader;
+-import java.util.Hashtable;
+-import persistence.antlr.CharScanner;
+-import persistence.antlr.InputBuffer;
+-import persistence.antlr.ByteBuffer;
+-import persistence.antlr.CharBuffer;
+-import persistence.antlr.Token;
+-import persistence.antlr.CommonToken;
+-import persistence.antlr.RecognitionException;
+-import persistence.antlr.NoViableAltForCharException;
+-import persistence.antlr.MismatchedCharException;
+-import persistence.antlr.TokenStream;
+-import persistence.antlr.ANTLRHashString;
+-import persistence.antlr.LexerSharedInputState;
+-import persistence.antlr.collections.impl.BitSet;
+-import persistence.antlr.SemanticException;
+-
+-	import java.io.StringReader;
+-	import persistence.antlr.collections.impl.Vector;
+-	import persistence.antlr.*;
+-
+-/** Perform the following translations:
+-
+-    AST related translations
+-
+-	##				-> currentRule_AST
+-	#(x,y,z)		-> codeGenerator.getASTCreateString(vector-of(x,y,z))
+-	#[x]			-> codeGenerator.getASTCreateString(x)
+-	#x				-> codeGenerator.mapTreeId(x)
+-
+-	Inside context of #(...), you can ref (x,y,z), [x], and x as shortcuts.
+-
+-    Text related translations
+-
+-	$append(x)	  -> text.append(x)
+-	$setText(x)	  -> text.setLength(_begin); text.append(x)
+-	$getText	  -> new String(text.getBuffer(),_begin,text.length()-_begin)
+-	$setToken(x)  -> _token = x
+-	$setType(x)	  -> _ttype = x
+-    $FOLLOW(r)    -> FOLLOW set name for rule r (optional arg)
+-    $FIRST(r)     -> FIRST set name for rule r (optional arg)
+- */
+-public class ActionLexer extends persistence.antlr.CharScanner implements ActionLexerTokenTypes, TokenStream
+- {
+-
+-	protected RuleBlock currentRule;
+-	protected CodeGenerator generator;
+-	protected int lineOffset = 0;
+-	private Tool antlrTool;	// The ANTLR tool
+-	ActionTransInfo transInfo;
+-
+- 	public ActionLexer( String s, RuleBlock currentRule,
+-						CodeGenerator generator,
+-						ActionTransInfo transInfo ) 
+-	{
+-		this(new StringReader(s));
+-		this.currentRule = currentRule;
+-		this.generator = generator;
+-		this.transInfo = transInfo;
+-	}
+-
+-	public void setLineOffset(int lineOffset) 
+-	{
+-		setLine(lineOffset);
+-	}
+-
+-	public void setTool(Tool tool) 
+-	{
+-		this.antlrTool = tool;
+-	}
+-
+-	public void reportError(RecognitionException e)
+-	{
+-		antlrTool.error("Syntax error in action: "+e,getFilename(),getLine(),getColumn());
+-	}
+-
+-	public void reportError(String s)
+-	{
+-		antlrTool.error(s,getFilename(),getLine(),getColumn());
+-	}
+-
+-	public void reportWarning(String s)
+-	{
+-		if ( getFilename()==null )
+-			antlrTool.warning(s);
+-		else
+-			antlrTool.warning(s,getFilename(),getLine(),getColumn());
+-	}
+-public ActionLexer(InputStream in) {
+-	this(new ByteBuffer(in));
+-}
+-public ActionLexer(Reader in) {
+-	this(new CharBuffer(in));
+-}
+-public ActionLexer(InputBuffer ib) {
+-	this(new LexerSharedInputState(ib));
+-}
+-public ActionLexer(LexerSharedInputState state) {
+-	super(state);
+-	caseSensitiveLiterals = true;
+-	setCaseSensitive(true);
+-	literals = new Hashtable();
+-}
+-
+-public Token nextToken() throws TokenStreamException {
+-	Token theRetToken=null;
+-tryAgain:
+-	for (;;) {
+-		Token _token = null;
+-		int _ttype = Token.INVALID_TYPE;
+-		resetText();
+-		try {   // for char stream error handling
+-			try {   // for lexical error handling
+-				if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff'))) {
+-					mACTION(true);
+-					theRetToken=_returnToken;
+-				}
+-				else {
+-					if (LA(1)==EOF_CHAR) {uponEOF(); _returnToken = makeToken(Token.EOF_TYPE);}
+-				else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());}
+-				}
+-				
+-				if ( _returnToken==null ) continue tryAgain; // found SKIP token
+-				_ttype = _returnToken.getType();
+-				_returnToken.setType(_ttype);
+-				return _returnToken;
+-			}
+-			catch (RecognitionException e) {
+-				throw new TokenStreamRecognitionException(e);
+-			}
+-		}
+-		catch (CharStreamException cse) {
+-			if ( cse instanceof CharStreamIOException ) {
+-				throw new TokenStreamIOException(((CharStreamIOException)cse).io);
+-			}
+-			else {
+-				throw new TokenStreamException(cse.getMessage());
+-			}
+-		}
+-	}
+-}
+-
+-	public final void mACTION(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = ACTION;
+-		int _saveIndex;
+-		
+-		{
+-		int _cnt623=0;
+-		_loop623:
+-		do {
+-			switch ( LA(1)) {
+-			case '#':
+-			{
+-				mAST_ITEM(false);
+-				break;
+-			}
+-			case '$':
+-			{
+-				mTEXT_ITEM(false);
+-				break;
+-			}
+-			default:
+-				if ((_tokenSet_0.member(LA(1)))) {
+-					mSTUFF(false);
+-				}
+-			else {
+-				if ( _cnt623>=1 ) { break _loop623; } else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());}
+-			}
+-			}
+-			_cnt623++;
+-		} while (true);
+-		}
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-/** stuff in between #(...) and #id items
+- * Allow the escaping of the # for C# preprocessor stuff.
+- */
+-	protected final void mSTUFF(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = STUFF;
+-		int _saveIndex;
+-		
+-		switch ( LA(1)) {
+-		case '"':
+-		{
+-			mSTRING(false);
+-			break;
+-		}
+-		case '\'':
+-		{
+-			mCHAR(false);
+-			break;
+-		}
+-		case '\n':
+-		{
+-			match('\n');
+-			newline();
+-			break;
+-		}
+-		default:
+-			if ((LA(1)=='/') && (LA(2)=='*'||LA(2)=='/')) {
+-				mCOMMENT(false);
+-			}
+-			else if ((LA(1)=='\r') && (LA(2)=='\n') && (true)) {
+-				match("\r\n");
+-				newline();
+-			}
+-			else if ((LA(1)=='\\') && (LA(2)=='#') && (true)) {
+-				match('\\');
+-				match('#');
+-				text.setLength(_begin); text.append("#");
+-			}
+-			else if ((LA(1)=='/') && (_tokenSet_1.member(LA(2)))) {
+-				match('/');
+-				{
+-				match(_tokenSet_1);
+-				}
+-			}
+-			else if ((LA(1)=='\r') && (true) && (true)) {
+-				match('\r');
+-				newline();
+-			}
+-			else if ((_tokenSet_2.member(LA(1))) && (true) && (true)) {
+-				{
+-				match(_tokenSet_2);
+-				}
+-			}
+-		else {
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		}
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mAST_ITEM(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = AST_ITEM;
+-		int _saveIndex;
+-		Token t=null;
+-		Token id=null;
+-		Token ctor=null;
+-		
+-		if ((LA(1)=='#') && (LA(2)=='(')) {
+-			_saveIndex=text.length();
+-			match('#');
+-			text.setLength(_saveIndex);
+-			mTREE(true);
+-			t=_returnToken;
+-		}
+-		else if ((LA(1)=='#') && (_tokenSet_3.member(LA(2)))) {
+-			_saveIndex=text.length();
+-			match('#');
+-			text.setLength(_saveIndex);
+-			{
+-			switch ( LA(1)) {
+-			case '\t':  case '\n':  case '\r':  case ' ':
+-			{
+-				mWS(false);
+-				break;
+-			}
+-			case 'A':  case 'B':  case 'C':  case 'D':
+-			case 'E':  case 'F':  case 'G':  case 'H':
+-			case 'I':  case 'J':  case 'K':  case 'L':
+-			case 'M':  case 'N':  case 'O':  case 'P':
+-			case 'Q':  case 'R':  case 'S':  case 'T':
+-			case 'U':  case 'V':  case 'W':  case 'X':
+-			case 'Y':  case 'Z':  case '_':  case 'a':
+-			case 'b':  case 'c':  case 'd':  case 'e':
+-			case 'f':  case 'g':  case 'h':  case 'i':
+-			case 'j':  case 'k':  case 'l':  case 'm':
+-			case 'n':  case 'o':  case 'p':  case 'q':
+-			case 'r':  case 's':  case 't':  case 'u':
+-			case 'v':  case 'w':  case 'x':  case 'y':
+-			case 'z':
+-			{
+-				break;
+-			}
+-			default:
+-			{
+-				throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-			}
+-			}
+-			}
+-			mID(true);
+-			id=_returnToken;
+-			
+-						String idt = id.getText();
+-						String mapped = generator.mapTreeId(id.getText(), transInfo);
+-			
+-						// verify that it's not a preprocessor macro...
+-						if ( (mapped != null) && !idt.equals(mapped) )
+-						{
+-							text.setLength(_begin); text.append(mapped);
+-						}
+-						else
+-						{
+-							if (idt.equals("define")	||
+-								idt.equals("undef")		||
+-								idt.equals("if")		||
+-								idt.equals("elif")		||
+-								idt.equals("else")		||
+-								idt.equals("endif")		||
+-								idt.equals("line")		||
+-								idt.equals("error")		||
+-								idt.equals("warning")	||
+-								idt.equals("region")	||
+-								idt.equals("endregion"))
+-							{
+-								text.setLength(_begin); text.append("#"+idt);
+-							}
+-						}
+-					
+-			{
+-			if ((_tokenSet_4.member(LA(1))) && (true) && (true)) {
+-				mWS(false);
+-			}
+-			else {
+-			}
+-			
+-			}
+-			{
+-			if ((LA(1)=='=') && (true) && (true)) {
+-				mVAR_ASSIGN(false);
+-			}
+-			else {
+-			}
+-			
+-			}
+-		}
+-		else if ((LA(1)=='#') && (LA(2)=='[')) {
+-			_saveIndex=text.length();
+-			match('#');
+-			text.setLength(_saveIndex);
+-			mAST_CONSTRUCTOR(true);
+-			ctor=_returnToken;
+-		}
+-		else if ((LA(1)=='#') && (LA(2)=='#')) {
+-			match("##");
+-			
+-						if( currentRule != null )
+-						{
+-							String r = currentRule.getRuleName()+"_AST";
+-							text.setLength(_begin); text.append(r);
+-			
+-							if ( transInfo!=null )  {
+-								transInfo.refRuleRoot=r;	// we ref root of tree
+-							}
+-						}
+-						else
+-						{
+-							reportWarning("\"##\" not valid in this context");
+-							text.setLength(_begin); text.append("##");
+-						}
+-					
+-			{
+-			if ((_tokenSet_4.member(LA(1))) && (true) && (true)) {
+-				mWS(false);
+-			}
+-			else {
+-			}
+-			
+-			}
+-			{
+-			if ((LA(1)=='=') && (true) && (true)) {
+-				mVAR_ASSIGN(false);
+-			}
+-			else {
+-			}
+-			
+-			}
+-		}
+-		else {
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mTEXT_ITEM(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = TEXT_ITEM;
+-		int _saveIndex;
+-		Token a1=null;
+-		Token a2=null;
+-		Token a3=null;
+-		Token a4=null;
+-		Token a5=null;
+-		Token a6=null;
+-		
+-		if ((LA(1)=='$') && (LA(2)=='F') && (LA(3)=='O')) {
+-			match("$FOLLOW");
+-			{
+-			if ((_tokenSet_5.member(LA(1))) && (_tokenSet_6.member(LA(2))) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) {
+-				{
+-				switch ( LA(1)) {
+-				case '\t':  case '\n':  case '\r':  case ' ':
+-				{
+-					mWS(false);
+-					break;
+-				}
+-				case '(':
+-				{
+-					break;
+-				}
+-				default:
+-				{
+-					throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-				}
+-				}
+-				}
+-				match('(');
+-				mTEXT_ARG(true);
+-				a5=_returnToken;
+-				match(')');
+-			}
+-			else {
+-			}
+-			
+-			}
+-			
+-						String rule = currentRule.getRuleName();
+-						if ( a5!=null ) {
+-							rule = a5.getText();
+-						}
+-						String setName = generator.getFOLLOWBitSet(rule, 1);
+-						// System.out.println("FOLLOW("+rule+")="+setName);
+-						if ( setName==null ) {
+-							reportError("$FOLLOW("+rule+")"+
+-										": unknown rule or bad lookahead computation");
+-						}
+-						else {
+-							text.setLength(_begin); text.append(setName);
+-						}
+-					
+-		}
+-		else if ((LA(1)=='$') && (LA(2)=='F') && (LA(3)=='I')) {
+-			match("$FIRST");
+-			{
+-			if ((_tokenSet_5.member(LA(1))) && (_tokenSet_6.member(LA(2))) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) {
+-				{
+-				switch ( LA(1)) {
+-				case '\t':  case '\n':  case '\r':  case ' ':
+-				{
+-					mWS(false);
+-					break;
+-				}
+-				case '(':
+-				{
+-					break;
+-				}
+-				default:
+-				{
+-					throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-				}
+-				}
+-				}
+-				match('(');
+-				mTEXT_ARG(true);
+-				a6=_returnToken;
+-				match(')');
+-			}
+-			else {
+-			}
+-			
+-			}
+-			
+-						String rule = currentRule.getRuleName();
+-						if ( a6!=null ) {
+-							rule = a6.getText();
+-						}
+-						String setName = generator.getFIRSTBitSet(rule, 1);
+-						// System.out.println("FIRST("+rule+")="+setName);
+-						if ( setName==null ) {
+-							reportError("$FIRST("+rule+")"+
+-										": unknown rule or bad lookahead computation");
+-						}
+-						else {
+-							text.setLength(_begin); text.append(setName);
+-						}
+-					
+-		}
+-		else if ((LA(1)=='$') && (LA(2)=='a')) {
+-			match("$append");
+-			{
+-			switch ( LA(1)) {
+-			case '\t':  case '\n':  case '\r':  case ' ':
+-			{
+-				mWS(false);
+-				break;
+-			}
+-			case '(':
+-			{
+-				break;
+-			}
+-			default:
+-			{
+-				throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-			}
+-			}
+-			}
+-			match('(');
+-			mTEXT_ARG(true);
+-			a1=_returnToken;
+-			match(')');
+-			
+-						String t = "text.Append("+a1.getText()+")";
+-						text.setLength(_begin); text.append(t);
+-					
+-		}
+-		else if ((LA(1)=='$') && (LA(2)=='s')) {
+-			match("$set");
+-			{
+-			if ((LA(1)=='T') && (LA(2)=='e')) {
+-				match("Text");
+-				{
+-				switch ( LA(1)) {
+-				case '\t':  case '\n':  case '\r':  case ' ':
+-				{
+-					mWS(false);
+-					break;
+-				}
+-				case '(':
+-				{
+-					break;
+-				}
+-				default:
+-				{
+-					throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-				}
+-				}
+-				}
+-				match('(');
+-				mTEXT_ARG(true);
+-				a2=_returnToken;
+-				match(')');
+-				
+-							String t;
+-							t = "text.Length = _begin; text.Append("+a2.getText()+")";
+-							text.setLength(_begin); text.append(t);
+-							
+-			}
+-			else if ((LA(1)=='T') && (LA(2)=='o')) {
+-				match("Token");
+-				{
+-				switch ( LA(1)) {
+-				case '\t':  case '\n':  case '\r':  case ' ':
+-				{
+-					mWS(false);
+-					break;
+-				}
+-				case '(':
+-				{
+-					break;
+-				}
+-				default:
+-				{
+-					throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-				}
+-				}
+-				}
+-				match('(');
+-				mTEXT_ARG(true);
+-				a3=_returnToken;
+-				match(')');
+-				
+-							String t="_token = "+a3.getText();
+-							text.setLength(_begin); text.append(t);
+-							
+-			}
+-			else if ((LA(1)=='T') && (LA(2)=='y')) {
+-				match("Type");
+-				{
+-				switch ( LA(1)) {
+-				case '\t':  case '\n':  case '\r':  case ' ':
+-				{
+-					mWS(false);
+-					break;
+-				}
+-				case '(':
+-				{
+-					break;
+-				}
+-				default:
+-				{
+-					throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-				}
+-				}
+-				}
+-				match('(');
+-				mTEXT_ARG(true);
+-				a4=_returnToken;
+-				match(')');
+-				
+-							String t="_ttype = "+a4.getText();
+-							text.setLength(_begin); text.append(t);
+-							
+-			}
+-			else {
+-				throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-			}
+-			
+-			}
+-		}
+-		else if ((LA(1)=='$') && (LA(2)=='g')) {
+-			match("$getText");
+-			
+-						text.setLength(_begin); text.append("text.ToString(_begin, text.Length-_begin)");
+-					
+-		}
+-		else {
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mCOMMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = COMMENT;
+-		int _saveIndex;
+-		
+-		if ((LA(1)=='/') && (LA(2)=='/')) {
+-			mSL_COMMENT(false);
+-		}
+-		else if ((LA(1)=='/') && (LA(2)=='*')) {
+-			mML_COMMENT(false);
+-		}
+-		else {
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mSTRING(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = STRING;
+-		int _saveIndex;
+-		
+-		match('"');
+-		{
+-		_loop720:
+-		do {
+-			if ((LA(1)=='\\')) {
+-				mESC(false);
+-			}
+-			else if ((_tokenSet_7.member(LA(1)))) {
+-				matchNot('"');
+-			}
+-			else {
+-				break _loop720;
+-			}
+-			
+-		} while (true);
+-		}
+-		match('"');
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mCHAR(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = CHAR;
+-		int _saveIndex;
+-		
+-		match('\'');
+-		{
+-		if ((LA(1)=='\\')) {
+-			mESC(false);
+-		}
+-		else if ((_tokenSet_8.member(LA(1)))) {
+-			matchNot('\'');
+-		}
+-		else {
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		
+-		}
+-		match('\'');
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mTREE(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = TREE;
+-		int _saveIndex;
+-		Token t=null;
+-		Token t2=null;
+-		
+-			StringBuffer buf = new StringBuffer();
+-			int n=0;
+-			Vector terms = new Vector(10);
+-		
+-		
+-		_saveIndex=text.length();
+-		match('(');
+-		text.setLength(_saveIndex);
+-		{
+-		switch ( LA(1)) {
+-		case '\t':  case '\n':  case '\r':  case ' ':
+-		{
+-			_saveIndex=text.length();
+-			mWS(false);
+-			text.setLength(_saveIndex);
+-			break;
+-		}
+-		case '"':  case '#':  case '(':  case 'A':
+-		case 'B':  case 'C':  case 'D':  case 'E':
+-		case 'F':  case 'G':  case 'H':  case 'I':
+-		case 'J':  case 'K':  case 'L':  case 'M':
+-		case 'N':  case 'O':  case 'P':  case 'Q':
+-		case 'R':  case 'S':  case 'T':  case 'U':
+-		case 'V':  case 'W':  case 'X':  case 'Y':
+-		case 'Z':  case '[':  case '_':  case 'a':
+-		case 'b':  case 'c':  case 'd':  case 'e':
+-		case 'f':  case 'g':  case 'h':  case 'i':
+-		case 'j':  case 'k':  case 'l':  case 'm':
+-		case 'n':  case 'o':  case 'p':  case 'q':
+-		case 'r':  case 's':  case 't':  case 'u':
+-		case 'v':  case 'w':  case 'x':  case 'y':
+-		case 'z':
+-		{
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		}
+-		}
+-		_saveIndex=text.length();
+-		mTREE_ELEMENT(true);
+-		text.setLength(_saveIndex);
+-		t=_returnToken;
+-		
+-					terms.appendElement(
+-						generator.processStringForASTConstructor(t.getText())
+-											 );
+-				
+-		{
+-		switch ( LA(1)) {
+-		case '\t':  case '\n':  case '\r':  case ' ':
+-		{
+-			_saveIndex=text.length();
+-			mWS(false);
+-			text.setLength(_saveIndex);
+-			break;
+-		}
+-		case ')':  case ',':
+-		{
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		}
+-		}
+-		{
+-		_loop649:
+-		do {
+-			if ((LA(1)==',')) {
+-				_saveIndex=text.length();
+-				match(',');
+-				text.setLength(_saveIndex);
+-				{
+-				switch ( LA(1)) {
+-				case '\t':  case '\n':  case '\r':  case ' ':
+-				{
+-					_saveIndex=text.length();
+-					mWS(false);
+-					text.setLength(_saveIndex);
+-					break;
+-				}
+-				case '"':  case '#':  case '(':  case 'A':
+-				case 'B':  case 'C':  case 'D':  case 'E':
+-				case 'F':  case 'G':  case 'H':  case 'I':
+-				case 'J':  case 'K':  case 'L':  case 'M':
+-				case 'N':  case 'O':  case 'P':  case 'Q':
+-				case 'R':  case 'S':  case 'T':  case 'U':
+-				case 'V':  case 'W':  case 'X':  case 'Y':
+-				case 'Z':  case '[':  case '_':  case 'a':
+-				case 'b':  case 'c':  case 'd':  case 'e':
+-				case 'f':  case 'g':  case 'h':  case 'i':
+-				case 'j':  case 'k':  case 'l':  case 'm':
+-				case 'n':  case 'o':  case 'p':  case 'q':
+-				case 'r':  case 's':  case 't':  case 'u':
+-				case 'v':  case 'w':  case 'x':  case 'y':
+-				case 'z':
+-				{
+-					break;
+-				}
+-				default:
+-				{
+-					throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-				}
+-				}
+-				}
+-				_saveIndex=text.length();
+-				mTREE_ELEMENT(true);
+-				text.setLength(_saveIndex);
+-				t2=_returnToken;
+-				
+-								terms.appendElement(
+-									generator.processStringForASTConstructor(t2.getText())
+-														  );
+-							
+-				{
+-				switch ( LA(1)) {
+-				case '\t':  case '\n':  case '\r':  case ' ':
+-				{
+-					_saveIndex=text.length();
+-					mWS(false);
+-					text.setLength(_saveIndex);
+-					break;
+-				}
+-				case ')':  case ',':
+-				{
+-					break;
+-				}
+-				default:
+-				{
+-					throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-				}
+-				}
+-				}
+-			}
+-			else {
+-				break _loop649;
+-			}
+-			
+-		} while (true);
+-		}
+-		text.setLength(_begin); text.append(generator.getASTCreateString(terms));
+-		_saveIndex=text.length();
+-		match(')');
+-		text.setLength(_saveIndex);
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mWS(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = WS;
+-		int _saveIndex;
+-		
+-		{
+-		int _cnt740=0;
+-		_loop740:
+-		do {
+-			if ((LA(1)=='\r') && (LA(2)=='\n') && (true)) {
+-				match('\r');
+-				match('\n');
+-				newline();
+-			}
+-			else if ((LA(1)==' ') && (true) && (true)) {
+-				match(' ');
+-			}
+-			else if ((LA(1)=='\t') && (true) && (true)) {
+-				match('\t');
+-			}
+-			else if ((LA(1)=='\r') && (true) && (true)) {
+-				match('\r');
+-				newline();
+-			}
+-			else if ((LA(1)=='\n') && (true) && (true)) {
+-				match('\n');
+-				newline();
+-			}
+-			else {
+-				if ( _cnt740>=1 ) { break _loop740; } else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());}
+-			}
+-			
+-			_cnt740++;
+-		} while (true);
+-		}
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mID(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = ID;
+-		int _saveIndex;
+-		
+-		{
+-		switch ( LA(1)) {
+-		case 'a':  case 'b':  case 'c':  case 'd':
+-		case 'e':  case 'f':  case 'g':  case 'h':
+-		case 'i':  case 'j':  case 'k':  case 'l':
+-		case 'm':  case 'n':  case 'o':  case 'p':
+-		case 'q':  case 'r':  case 's':  case 't':
+-		case 'u':  case 'v':  case 'w':  case 'x':
+-		case 'y':  case 'z':
+-		{
+-			matchRange('a','z');
+-			break;
+-		}
+-		case 'A':  case 'B':  case 'C':  case 'D':
+-		case 'E':  case 'F':  case 'G':  case 'H':
+-		case 'I':  case 'J':  case 'K':  case 'L':
+-		case 'M':  case 'N':  case 'O':  case 'P':
+-		case 'Q':  case 'R':  case 'S':  case 'T':
+-		case 'U':  case 'V':  case 'W':  case 'X':
+-		case 'Y':  case 'Z':
+-		{
+-			matchRange('A','Z');
+-			break;
+-		}
+-		case '_':
+-		{
+-			match('_');
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		}
+-		}
+-		{
+-		_loop706:
+-		do {
+-			if ((_tokenSet_9.member(LA(1))) && (true) && (true)) {
+-				{
+-				switch ( LA(1)) {
+-				case 'a':  case 'b':  case 'c':  case 'd':
+-				case 'e':  case 'f':  case 'g':  case 'h':
+-				case 'i':  case 'j':  case 'k':  case 'l':
+-				case 'm':  case 'n':  case 'o':  case 'p':
+-				case 'q':  case 'r':  case 's':  case 't':
+-				case 'u':  case 'v':  case 'w':  case 'x':
+-				case 'y':  case 'z':
+-				{
+-					matchRange('a','z');
+-					break;
+-				}
+-				case 'A':  case 'B':  case 'C':  case 'D':
+-				case 'E':  case 'F':  case 'G':  case 'H':
+-				case 'I':  case 'J':  case 'K':  case 'L':
+-				case 'M':  case 'N':  case 'O':  case 'P':
+-				case 'Q':  case 'R':  case 'S':  case 'T':
+-				case 'U':  case 'V':  case 'W':  case 'X':
+-				case 'Y':  case 'Z':
+-				{
+-					matchRange('A','Z');
+-					break;
+-				}
+-				case '0':  case '1':  case '2':  case '3':
+-				case '4':  case '5':  case '6':  case '7':
+-				case '8':  case '9':
+-				{
+-					matchRange('0','9');
+-					break;
+-				}
+-				case '_':
+-				{
+-					match('_');
+-					break;
+-				}
+-				default:
+-				{
+-					throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-				}
+-				}
+-				}
+-			}
+-			else {
+-				break _loop706;
+-			}
+-			
+-		} while (true);
+-		}
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mVAR_ASSIGN(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = VAR_ASSIGN;
+-		int _saveIndex;
+-		
+-		match('=');
+-		
+-					// inform the code generator that an assignment was done to
+-					// AST root for the rule if invoker set refRuleRoot.
+-					if ( LA(1)!='=' && transInfo!=null && transInfo.refRuleRoot!=null ) {
+-						transInfo.assignToRoot=true;
+-					}
+-				
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mAST_CONSTRUCTOR(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = AST_CONSTRUCTOR;
+-		int _saveIndex;
+-		Token x=null;
+-		Token y=null;
+-		Token z=null;
+-		
+-		_saveIndex=text.length();
+-		match('[');
+-		text.setLength(_saveIndex);
+-		{
+-		switch ( LA(1)) {
+-		case '\t':  case '\n':  case '\r':  case ' ':
+-		{
+-			_saveIndex=text.length();
+-			mWS(false);
+-			text.setLength(_saveIndex);
+-			break;
+-		}
+-		case '"':  case '#':  case '(':  case '0':
+-		case '1':  case '2':  case '3':  case '4':
+-		case '5':  case '6':  case '7':  case '8':
+-		case '9':  case 'A':  case 'B':  case 'C':
+-		case 'D':  case 'E':  case 'F':  case 'G':
+-		case 'H':  case 'I':  case 'J':  case 'K':
+-		case 'L':  case 'M':  case 'N':  case 'O':
+-		case 'P':  case 'Q':  case 'R':  case 'S':
+-		case 'T':  case 'U':  case 'V':  case 'W':
+-		case 'X':  case 'Y':  case 'Z':  case '[':
+-		case '_':  case 'a':  case 'b':  case 'c':
+-		case 'd':  case 'e':  case 'f':  case 'g':
+-		case 'h':  case 'i':  case 'j':  case 'k':
+-		case 'l':  case 'm':  case 'n':  case 'o':
+-		case 'p':  case 'q':  case 'r':  case 's':
+-		case 't':  case 'u':  case 'v':  case 'w':
+-		case 'x':  case 'y':  case 'z':
+-		{
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		}
+-		}
+-		_saveIndex=text.length();
+-		mAST_CTOR_ELEMENT(true);
+-		text.setLength(_saveIndex);
+-		x=_returnToken;
+-		{
+-		switch ( LA(1)) {
+-		case '\t':  case '\n':  case '\r':  case ' ':
+-		{
+-			_saveIndex=text.length();
+-			mWS(false);
+-			text.setLength(_saveIndex);
+-			break;
+-		}
+-		case ',':  case ']':
+-		{
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		}
+-		}
+-		{
+-		if ((LA(1)==',') && (_tokenSet_10.member(LA(2))) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) {
+-			_saveIndex=text.length();
+-			match(',');
+-			text.setLength(_saveIndex);
+-			{
+-			switch ( LA(1)) {
+-			case '\t':  case '\n':  case '\r':  case ' ':
+-			{
+-				_saveIndex=text.length();
+-				mWS(false);
+-				text.setLength(_saveIndex);
+-				break;
+-			}
+-			case '"':  case '#':  case '(':  case '0':
+-			case '1':  case '2':  case '3':  case '4':
+-			case '5':  case '6':  case '7':  case '8':
+-			case '9':  case 'A':  case 'B':  case 'C':
+-			case 'D':  case 'E':  case 'F':  case 'G':
+-			case 'H':  case 'I':  case 'J':  case 'K':
+-			case 'L':  case 'M':  case 'N':  case 'O':
+-			case 'P':  case 'Q':  case 'R':  case 'S':
+-			case 'T':  case 'U':  case 'V':  case 'W':
+-			case 'X':  case 'Y':  case 'Z':  case '[':
+-			case '_':  case 'a':  case 'b':  case 'c':
+-			case 'd':  case 'e':  case 'f':  case 'g':
+-			case 'h':  case 'i':  case 'j':  case 'k':
+-			case 'l':  case 'm':  case 'n':  case 'o':
+-			case 'p':  case 'q':  case 'r':  case 's':
+-			case 't':  case 'u':  case 'v':  case 'w':
+-			case 'x':  case 'y':  case 'z':
+-			{
+-				break;
+-			}
+-			default:
+-			{
+-				throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-			}
+-			}
+-			}
+-			_saveIndex=text.length();
+-			mAST_CTOR_ELEMENT(true);
+-			text.setLength(_saveIndex);
+-			y=_returnToken;
+-			{
+-			switch ( LA(1)) {
+-			case '\t':  case '\n':  case '\r':  case ' ':
+-			{
+-				_saveIndex=text.length();
+-				mWS(false);
+-				text.setLength(_saveIndex);
+-				break;
+-			}
+-			case ',':  case ']':
+-			{
+-				break;
+-			}
+-			default:
+-			{
+-				throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-			}
+-			}
+-			}
+-		}
+-		else if ((LA(1)==','||LA(1)==']') && (true) && (true)) {
+-		}
+-		else {
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		
+-		}
+-		{
+-		switch ( LA(1)) {
+-		case ',':
+-		{
+-			_saveIndex=text.length();
+-			match(',');
+-			text.setLength(_saveIndex);
+-			{
+-			switch ( LA(1)) {
+-			case '\t':  case '\n':  case '\r':  case ' ':
+-			{
+-				_saveIndex=text.length();
+-				mWS(false);
+-				text.setLength(_saveIndex);
+-				break;
+-			}
+-			case '"':  case '#':  case '(':  case '0':
+-			case '1':  case '2':  case '3':  case '4':
+-			case '5':  case '6':  case '7':  case '8':
+-			case '9':  case 'A':  case 'B':  case 'C':
+-			case 'D':  case 'E':  case 'F':  case 'G':
+-			case 'H':  case 'I':  case 'J':  case 'K':
+-			case 'L':  case 'M':  case 'N':  case 'O':
+-			case 'P':  case 'Q':  case 'R':  case 'S':
+-			case 'T':  case 'U':  case 'V':  case 'W':
+-			case 'X':  case 'Y':  case 'Z':  case '[':
+-			case '_':  case 'a':  case 'b':  case 'c':
+-			case 'd':  case 'e':  case 'f':  case 'g':
+-			case 'h':  case 'i':  case 'j':  case 'k':
+-			case 'l':  case 'm':  case 'n':  case 'o':
+-			case 'p':  case 'q':  case 'r':  case 's':
+-			case 't':  case 'u':  case 'v':  case 'w':
+-			case 'x':  case 'y':  case 'z':
+-			{
+-				break;
+-			}
+-			default:
+-			{
+-				throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-			}
+-			}
+-			}
+-			_saveIndex=text.length();
+-			mAST_CTOR_ELEMENT(true);
+-			text.setLength(_saveIndex);
+-			z=_returnToken;
+-			{
+-			switch ( LA(1)) {
+-			case '\t':  case '\n':  case '\r':  case ' ':
+-			{
+-				_saveIndex=text.length();
+-				mWS(false);
+-				text.setLength(_saveIndex);
+-				break;
+-			}
+-			case ']':
+-			{
+-				break;
+-			}
+-			default:
+-			{
+-				throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-			}
+-			}
+-			}
+-			break;
+-		}
+-		case ']':
+-		{
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		}
+-		}
+-		_saveIndex=text.length();
+-		match(']');
+-		text.setLength(_saveIndex);
+-		
+-					String args = generator.processStringForASTConstructor(x.getText());
+-		
+-					// the second does not need processing coz it's a string
+-					// (eg second param of astFactory.create(x,y)
+-					if ( y!=null )
+-						args += ","+y.getText();
+-					if ( z!=null )
+-						args += ","+z.getText();
+-		
+-					text.setLength(_begin); text.append(generator.getASTCreateString(null,args));
+-				
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mTEXT_ARG(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = TEXT_ARG;
+-		int _saveIndex;
+-		
+-		{
+-		switch ( LA(1)) {
+-		case '\t':  case '\n':  case '\r':  case ' ':
+-		{
+-			mWS(false);
+-			break;
+-		}
+-		case '"':  case '$':  case '\'':  case '+':
+-		case '0':  case '1':  case '2':  case '3':
+-		case '4':  case '5':  case '6':  case '7':
+-		case '8':  case '9':  case 'A':  case 'B':
+-		case 'C':  case 'D':  case 'E':  case 'F':
+-		case 'G':  case 'H':  case 'I':  case 'J':
+-		case 'K':  case 'L':  case 'M':  case 'N':
+-		case 'O':  case 'P':  case 'Q':  case 'R':
+-		case 'S':  case 'T':  case 'U':  case 'V':
+-		case 'W':  case 'X':  case 'Y':  case 'Z':
+-		case '_':  case 'a':  case 'b':  case 'c':
+-		case 'd':  case 'e':  case 'f':  case 'g':
+-		case 'h':  case 'i':  case 'j':  case 'k':
+-		case 'l':  case 'm':  case 'n':  case 'o':
+-		case 'p':  case 'q':  case 'r':  case 's':
+-		case 't':  case 'u':  case 'v':  case 'w':
+-		case 'x':  case 'y':  case 'z':
+-		{
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		}
+-		}
+-		{
+-		int _cnt680=0;
+-		_loop680:
+-		do {
+-			if ((_tokenSet_11.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && (true)) {
+-				mTEXT_ARG_ELEMENT(false);
+-				{
+-				if ((_tokenSet_4.member(LA(1))) && (_tokenSet_12.member(LA(2))) && (true)) {
+-					mWS(false);
+-				}
+-				else if ((_tokenSet_12.member(LA(1))) && (true) && (true)) {
+-				}
+-				else {
+-					throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-				}
+-				
+-				}
+-			}
+-			else {
+-				if ( _cnt680>=1 ) { break _loop680; } else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());}
+-			}
+-			
+-			_cnt680++;
+-		} while (true);
+-		}
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mTREE_ELEMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = TREE_ELEMENT;
+-		int _saveIndex;
+-		Token id=null;
+-		boolean was_mapped;
+-		
+-		switch ( LA(1)) {
+-		case '(':
+-		{
+-			mTREE(false);
+-			break;
+-		}
+-		case '[':
+-		{
+-			mAST_CONSTRUCTOR(false);
+-			break;
+-		}
+-		case 'A':  case 'B':  case 'C':  case 'D':
+-		case 'E':  case 'F':  case 'G':  case 'H':
+-		case 'I':  case 'J':  case 'K':  case 'L':
+-		case 'M':  case 'N':  case 'O':  case 'P':
+-		case 'Q':  case 'R':  case 'S':  case 'T':
+-		case 'U':  case 'V':  case 'W':  case 'X':
+-		case 'Y':  case 'Z':  case '_':  case 'a':
+-		case 'b':  case 'c':  case 'd':  case 'e':
+-		case 'f':  case 'g':  case 'h':  case 'i':
+-		case 'j':  case 'k':  case 'l':  case 'm':
+-		case 'n':  case 'o':  case 'p':  case 'q':
+-		case 'r':  case 's':  case 't':  case 'u':
+-		case 'v':  case 'w':  case 'x':  case 'y':
+-		case 'z':
+-		{
+-			mID_ELEMENT(false);
+-			break;
+-		}
+-		case '"':
+-		{
+-			mSTRING(false);
+-			break;
+-		}
+-		default:
+-			if ((LA(1)=='#') && (LA(2)=='(')) {
+-				_saveIndex=text.length();
+-				match('#');
+-				text.setLength(_saveIndex);
+-				mTREE(false);
+-			}
+-			else if ((LA(1)=='#') && (LA(2)=='[')) {
+-				_saveIndex=text.length();
+-				match('#');
+-				text.setLength(_saveIndex);
+-				mAST_CONSTRUCTOR(false);
+-			}
+-			else if ((LA(1)=='#') && (_tokenSet_13.member(LA(2)))) {
+-				_saveIndex=text.length();
+-				match('#');
+-				text.setLength(_saveIndex);
+-				was_mapped=mID_ELEMENT(true);
+-				id=_returnToken;
+-					// RK: I have a queer feeling that this maptreeid is redundant..
+-							if ( ! was_mapped )
+-							{
+-								String t = generator.mapTreeId(id.getText(), null);
+-								if ( t!=null ) {
+-									text.setLength(_begin); text.append(t);
+-								}
+-							}
+-						
+-			}
+-			else if ((LA(1)=='#') && (LA(2)=='#')) {
+-				match("##");
+-				
+-							if( currentRule != null )
+-							{
+-								String t = currentRule.getRuleName()+"_AST";
+-								text.setLength(_begin); text.append(t);
+-							}
+-							else
+-							{
+-								reportError("\"##\" not valid in this context");
+-								text.setLength(_begin); text.append("##");
+-							}
+-						
+-			}
+-		else {
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		}
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-/** An ID_ELEMENT can be a func call, array ref, simple var,
+- *  or AST label ref.
+- */
+-	protected final boolean  mID_ELEMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		boolean mapped=false;
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = ID_ELEMENT;
+-		int _saveIndex;
+-		Token id=null;
+-		
+-		mID(true);
+-		id=_returnToken;
+-		{
+-		if ((_tokenSet_4.member(LA(1))) && (_tokenSet_14.member(LA(2))) && (true)) {
+-			_saveIndex=text.length();
+-			mWS(false);
+-			text.setLength(_saveIndex);
+-		}
+-		else if ((_tokenSet_14.member(LA(1))) && (true) && (true)) {
+-		}
+-		else {
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		
+-		}
+-		{
+-		switch ( LA(1)) {
+-		case '(':
+-		{
+-			match('(');
+-			{
+-			if ((_tokenSet_4.member(LA(1))) && (_tokenSet_15.member(LA(2))) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) {
+-				_saveIndex=text.length();
+-				mWS(false);
+-				text.setLength(_saveIndex);
+-			}
+-			else if ((_tokenSet_15.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && (true)) {
+-			}
+-			else {
+-				throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-			}
+-			
+-			}
+-			{
+-			switch ( LA(1)) {
+-			case '"':  case '#':  case '\'':  case '(':
+-			case '0':  case '1':  case '2':  case '3':
+-			case '4':  case '5':  case '6':  case '7':
+-			case '8':  case '9':  case 'A':  case 'B':
+-			case 'C':  case 'D':  case 'E':  case 'F':
+-			case 'G':  case 'H':  case 'I':  case 'J':
+-			case 'K':  case 'L':  case 'M':  case 'N':
+-			case 'O':  case 'P':  case 'Q':  case 'R':
+-			case 'S':  case 'T':  case 'U':  case 'V':
+-			case 'W':  case 'X':  case 'Y':  case 'Z':
+-			case '[':  case '_':  case 'a':  case 'b':
+-			case 'c':  case 'd':  case 'e':  case 'f':
+-			case 'g':  case 'h':  case 'i':  case 'j':
+-			case 'k':  case 'l':  case 'm':  case 'n':
+-			case 'o':  case 'p':  case 'q':  case 'r':
+-			case 's':  case 't':  case 'u':  case 'v':
+-			case 'w':  case 'x':  case 'y':  case 'z':
+-			{
+-				mARG(false);
+-				{
+-				_loop668:
+-				do {
+-					if ((LA(1)==',')) {
+-						match(',');
+-						{
+-						switch ( LA(1)) {
+-						case '\t':  case '\n':  case '\r':  case ' ':
+-						{
+-							_saveIndex=text.length();
+-							mWS(false);
+-							text.setLength(_saveIndex);
+-							break;
+-						}
+-						case '"':  case '#':  case '\'':  case '(':
+-						case '0':  case '1':  case '2':  case '3':
+-						case '4':  case '5':  case '6':  case '7':
+-						case '8':  case '9':  case 'A':  case 'B':
+-						case 'C':  case 'D':  case 'E':  case 'F':
+-						case 'G':  case 'H':  case 'I':  case 'J':
+-						case 'K':  case 'L':  case 'M':  case 'N':
+-						case 'O':  case 'P':  case 'Q':  case 'R':
+-						case 'S':  case 'T':  case 'U':  case 'V':
+-						case 'W':  case 'X':  case 'Y':  case 'Z':
+-						case '[':  case '_':  case 'a':  case 'b':
+-						case 'c':  case 'd':  case 'e':  case 'f':
+-						case 'g':  case 'h':  case 'i':  case 'j':
+-						case 'k':  case 'l':  case 'm':  case 'n':
+-						case 'o':  case 'p':  case 'q':  case 'r':
+-						case 's':  case 't':  case 'u':  case 'v':
+-						case 'w':  case 'x':  case 'y':  case 'z':
+-						{
+-							break;
+-						}
+-						default:
+-						{
+-							throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-						}
+-						}
+-						}
+-						mARG(false);
+-					}
+-					else {
+-						break _loop668;
+-					}
+-					
+-				} while (true);
+-				}
+-				break;
+-			}
+-			case '\t':  case '\n':  case '\r':  case ' ':
+-			case ')':
+-			{
+-				break;
+-			}
+-			default:
+-			{
+-				throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-			}
+-			}
+-			}
+-			{
+-			switch ( LA(1)) {
+-			case '\t':  case '\n':  case '\r':  case ' ':
+-			{
+-				_saveIndex=text.length();
+-				mWS(false);
+-				text.setLength(_saveIndex);
+-				break;
+-			}
+-			case ')':
+-			{
+-				break;
+-			}
+-			default:
+-			{
+-				throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-			}
+-			}
+-			}
+-			match(')');
+-			break;
+-		}
+-		case '[':
+-		{
+-			{
+-			int _cnt673=0;
+-			_loop673:
+-			do {
+-				if ((LA(1)=='[')) {
+-					match('[');
+-					{
+-					switch ( LA(1)) {
+-					case '\t':  case '\n':  case '\r':  case ' ':
+-					{
+-						_saveIndex=text.length();
+-						mWS(false);
+-						text.setLength(_saveIndex);
+-						break;
+-					}
+-					case '"':  case '#':  case '\'':  case '(':
+-					case '0':  case '1':  case '2':  case '3':
+-					case '4':  case '5':  case '6':  case '7':
+-					case '8':  case '9':  case 'A':  case 'B':
+-					case 'C':  case 'D':  case 'E':  case 'F':
+-					case 'G':  case 'H':  case 'I':  case 'J':
+-					case 'K':  case 'L':  case 'M':  case 'N':
+-					case 'O':  case 'P':  case 'Q':  case 'R':
+-					case 'S':  case 'T':  case 'U':  case 'V':
+-					case 'W':  case 'X':  case 'Y':  case 'Z':
+-					case '[':  case '_':  case 'a':  case 'b':
+-					case 'c':  case 'd':  case 'e':  case 'f':
+-					case 'g':  case 'h':  case 'i':  case 'j':
+-					case 'k':  case 'l':  case 'm':  case 'n':
+-					case 'o':  case 'p':  case 'q':  case 'r':
+-					case 's':  case 't':  case 'u':  case 'v':
+-					case 'w':  case 'x':  case 'y':  case 'z':
+-					{
+-						break;
+-					}
+-					default:
+-					{
+-						throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-					}
+-					}
+-					}
+-					mARG(false);
+-					{
+-					switch ( LA(1)) {
+-					case '\t':  case '\n':  case '\r':  case ' ':
+-					{
+-						_saveIndex=text.length();
+-						mWS(false);
+-						text.setLength(_saveIndex);
+-						break;
+-					}
+-					case ']':
+-					{
+-						break;
+-					}
+-					default:
+-					{
+-						throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-					}
+-					}
+-					}
+-					match(']');
+-				}
+-				else {
+-					if ( _cnt673>=1 ) { break _loop673; } else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());}
+-				}
+-				
+-				_cnt673++;
+-			} while (true);
+-			}
+-			break;
+-		}
+-		case '.':
+-		{
+-			match('.');
+-			mID_ELEMENT(false);
+-			break;
+-		}
+-		default:
+-			if ((LA(1)=='-') && (LA(2)=='>') && (_tokenSet_13.member(LA(3)))) {
+-				match("->");
+-				mID_ELEMENT(false);
+-			}
+-			else if ((_tokenSet_16.member(LA(1))) && (true) && (true)) {
+-				
+-								mapped = true;
+-								String t = generator.mapTreeId(id.getText(), transInfo);
+-				//				System.out.println("mapped: "+id.getText()+" -> "+t);
+-								if ( t!=null ) {
+-									text.setLength(_begin); text.append(t);
+-								}
+-							
+-				{
+-				if (((_tokenSet_17.member(LA(1))) && (_tokenSet_16.member(LA(2))) && (true))&&(transInfo!=null && transInfo.refRuleRoot!=null)) {
+-					{
+-					switch ( LA(1)) {
+-					case '\t':  case '\n':  case '\r':  case ' ':
+-					{
+-						mWS(false);
+-						break;
+-					}
+-					case '=':
+-					{
+-						break;
+-					}
+-					default:
+-					{
+-						throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-					}
+-					}
+-					}
+-					mVAR_ASSIGN(false);
+-				}
+-				else if ((_tokenSet_18.member(LA(1))) && (true) && (true)) {
+-				}
+-				else {
+-					throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-				}
+-				
+-				}
+-			}
+-		else {
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		}
+-		}
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-		return mapped;
+-	}
+-	
+-/** The arguments of a #[...] constructor are text, token type,
+- *  or a tree.
+- */
+-	protected final void mAST_CTOR_ELEMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = AST_CTOR_ELEMENT;
+-		int _saveIndex;
+-		
+-		if ((LA(1)=='"') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) {
+-			mSTRING(false);
+-		}
+-		else if ((_tokenSet_19.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && (true)) {
+-			mTREE_ELEMENT(false);
+-		}
+-		else if (((LA(1) >= '0' && LA(1) <= '9'))) {
+-			mINT(false);
+-		}
+-		else {
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mINT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = INT;
+-		int _saveIndex;
+-		
+-		{
+-		int _cnt731=0;
+-		_loop731:
+-		do {
+-			if (((LA(1) >= '0' && LA(1) <= '9'))) {
+-				mDIGIT(false);
+-			}
+-			else {
+-				if ( _cnt731>=1 ) { break _loop731; } else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());}
+-			}
+-			
+-			_cnt731++;
+-		} while (true);
+-		}
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mARG(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = ARG;
+-		int _saveIndex;
+-		
+-		{
+-		switch ( LA(1)) {
+-		case '\'':
+-		{
+-			mCHAR(false);
+-			break;
+-		}
+-		case '0':  case '1':  case '2':  case '3':
+-		case '4':  case '5':  case '6':  case '7':
+-		case '8':  case '9':
+-		{
+-			mINT_OR_FLOAT(false);
+-			break;
+-		}
+-		default:
+-			if ((_tokenSet_19.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) {
+-				mTREE_ELEMENT(false);
+-			}
+-			else if ((LA(1)=='"') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) {
+-				mSTRING(false);
+-			}
+-		else {
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		}
+-		}
+-		{
+-		_loop701:
+-		do {
+-			if ((_tokenSet_20.member(LA(1))) && (_tokenSet_21.member(LA(2))) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) {
+-				{
+-				switch ( LA(1)) {
+-				case '\t':  case '\n':  case '\r':  case ' ':
+-				{
+-					mWS(false);
+-					break;
+-				}
+-				case '*':  case '+':  case '-':  case '/':
+-				{
+-					break;
+-				}
+-				default:
+-				{
+-					throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-				}
+-				}
+-				}
+-				{
+-				switch ( LA(1)) {
+-				case '+':
+-				{
+-					match('+');
+-					break;
+-				}
+-				case '-':
+-				{
+-					match('-');
+-					break;
+-				}
+-				case '*':
+-				{
+-					match('*');
+-					break;
+-				}
+-				case '/':
+-				{
+-					match('/');
+-					break;
+-				}
+-				default:
+-				{
+-					throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-				}
+-				}
+-				}
+-				{
+-				switch ( LA(1)) {
+-				case '\t':  case '\n':  case '\r':  case ' ':
+-				{
+-					mWS(false);
+-					break;
+-				}
+-				case '"':  case '#':  case '\'':  case '(':
+-				case '0':  case '1':  case '2':  case '3':
+-				case '4':  case '5':  case '6':  case '7':
+-				case '8':  case '9':  case 'A':  case 'B':
+-				case 'C':  case 'D':  case 'E':  case 'F':
+-				case 'G':  case 'H':  case 'I':  case 'J':
+-				case 'K':  case 'L':  case 'M':  case 'N':
+-				case 'O':  case 'P':  case 'Q':  case 'R':
+-				case 'S':  case 'T':  case 'U':  case 'V':
+-				case 'W':  case 'X':  case 'Y':  case 'Z':
+-				case '[':  case '_':  case 'a':  case 'b':
+-				case 'c':  case 'd':  case 'e':  case 'f':
+-				case 'g':  case 'h':  case 'i':  case 'j':
+-				case 'k':  case 'l':  case 'm':  case 'n':
+-				case 'o':  case 'p':  case 'q':  case 'r':
+-				case 's':  case 't':  case 'u':  case 'v':
+-				case 'w':  case 'x':  case 'y':  case 'z':
+-				{
+-					break;
+-				}
+-				default:
+-				{
+-					throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-				}
+-				}
+-				}
+-				mARG(false);
+-			}
+-			else {
+-				break _loop701;
+-			}
+-			
+-		} while (true);
+-		}
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mTEXT_ARG_ELEMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = TEXT_ARG_ELEMENT;
+-		int _saveIndex;
+-		
+-		switch ( LA(1)) {
+-		case 'A':  case 'B':  case 'C':  case 'D':
+-		case 'E':  case 'F':  case 'G':  case 'H':
+-		case 'I':  case 'J':  case 'K':  case 'L':
+-		case 'M':  case 'N':  case 'O':  case 'P':
+-		case 'Q':  case 'R':  case 'S':  case 'T':
+-		case 'U':  case 'V':  case 'W':  case 'X':
+-		case 'Y':  case 'Z':  case '_':  case 'a':
+-		case 'b':  case 'c':  case 'd':  case 'e':
+-		case 'f':  case 'g':  case 'h':  case 'i':
+-		case 'j':  case 'k':  case 'l':  case 'm':
+-		case 'n':  case 'o':  case 'p':  case 'q':
+-		case 'r':  case 's':  case 't':  case 'u':
+-		case 'v':  case 'w':  case 'x':  case 'y':
+-		case 'z':
+-		{
+-			mTEXT_ARG_ID_ELEMENT(false);
+-			break;
+-		}
+-		case '"':
+-		{
+-			mSTRING(false);
+-			break;
+-		}
+-		case '\'':
+-		{
+-			mCHAR(false);
+-			break;
+-		}
+-		case '0':  case '1':  case '2':  case '3':
+-		case '4':  case '5':  case '6':  case '7':
+-		case '8':  case '9':
+-		{
+-			mINT_OR_FLOAT(false);
+-			break;
+-		}
+-		case '$':
+-		{
+-			mTEXT_ITEM(false);
+-			break;
+-		}
+-		case '+':
+-		{
+-			match('+');
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		}
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mTEXT_ARG_ID_ELEMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = TEXT_ARG_ID_ELEMENT;
+-		int _saveIndex;
+-		Token id=null;
+-		
+-		mID(true);
+-		id=_returnToken;
+-		{
+-		if ((_tokenSet_4.member(LA(1))) && (_tokenSet_22.member(LA(2))) && (true)) {
+-			_saveIndex=text.length();
+-			mWS(false);
+-			text.setLength(_saveIndex);
+-		}
+-		else if ((_tokenSet_22.member(LA(1))) && (true) && (true)) {
+-		}
+-		else {
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		
+-		}
+-		{
+-		switch ( LA(1)) {
+-		case '(':
+-		{
+-			match('(');
+-			{
+-			if ((_tokenSet_4.member(LA(1))) && (_tokenSet_23.member(LA(2))) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) {
+-				_saveIndex=text.length();
+-				mWS(false);
+-				text.setLength(_saveIndex);
+-			}
+-			else if ((_tokenSet_23.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && (true)) {
+-			}
+-			else {
+-				throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-			}
+-			
+-			}
+-			{
+-			_loop689:
+-			do {
+-				if ((_tokenSet_24.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) {
+-					mTEXT_ARG(false);
+-					{
+-					_loop688:
+-					do {
+-						if ((LA(1)==',')) {
+-							match(',');
+-							mTEXT_ARG(false);
+-						}
+-						else {
+-							break _loop688;
+-						}
+-						
+-					} while (true);
+-					}
+-				}
+-				else {
+-					break _loop689;
+-				}
+-				
+-			} while (true);
+-			}
+-			{
+-			switch ( LA(1)) {
+-			case '\t':  case '\n':  case '\r':  case ' ':
+-			{
+-				_saveIndex=text.length();
+-				mWS(false);
+-				text.setLength(_saveIndex);
+-				break;
+-			}
+-			case ')':
+-			{
+-				break;
+-			}
+-			default:
+-			{
+-				throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-			}
+-			}
+-			}
+-			match(')');
+-			break;
+-		}
+-		case '[':
+-		{
+-			{
+-			int _cnt694=0;
+-			_loop694:
+-			do {
+-				if ((LA(1)=='[')) {
+-					match('[');
+-					{
+-					if ((_tokenSet_4.member(LA(1))) && (_tokenSet_24.member(LA(2))) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) {
+-						_saveIndex=text.length();
+-						mWS(false);
+-						text.setLength(_saveIndex);
+-					}
+-					else if ((_tokenSet_24.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) {
+-					}
+-					else {
+-						throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-					}
+-					
+-					}
+-					mTEXT_ARG(false);
+-					{
+-					switch ( LA(1)) {
+-					case '\t':  case '\n':  case '\r':  case ' ':
+-					{
+-						_saveIndex=text.length();
+-						mWS(false);
+-						text.setLength(_saveIndex);
+-						break;
+-					}
+-					case ']':
+-					{
+-						break;
+-					}
+-					default:
+-					{
+-						throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-					}
+-					}
+-					}
+-					match(']');
+-				}
+-				else {
+-					if ( _cnt694>=1 ) { break _loop694; } else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());}
+-				}
+-				
+-				_cnt694++;
+-			} while (true);
+-			}
+-			break;
+-		}
+-		case '.':
+-		{
+-			match('.');
+-			mTEXT_ARG_ID_ELEMENT(false);
+-			break;
+-		}
+-		case '-':
+-		{
+-			match("->");
+-			mTEXT_ARG_ID_ELEMENT(false);
+-			break;
+-		}
+-		case '\t':  case '\n':  case '\r':  case ' ':
+-		case '"':  case '$':  case '\'':  case ')':
+-		case '+':  case ',':  case '0':  case '1':
+-		case '2':  case '3':  case '4':  case '5':
+-		case '6':  case '7':  case '8':  case '9':
+-		case 'A':  case 'B':  case 'C':  case 'D':
+-		case 'E':  case 'F':  case 'G':  case 'H':
+-		case 'I':  case 'J':  case 'K':  case 'L':
+-		case 'M':  case 'N':  case 'O':  case 'P':
+-		case 'Q':  case 'R':  case 'S':  case 'T':
+-		case 'U':  case 'V':  case 'W':  case 'X':
+-		case 'Y':  case 'Z':  case ']':  case '_':
+-		case 'a':  case 'b':  case 'c':  case 'd':
+-		case 'e':  case 'f':  case 'g':  case 'h':
+-		case 'i':  case 'j':  case 'k':  case 'l':
+-		case 'm':  case 'n':  case 'o':  case 'p':
+-		case 'q':  case 'r':  case 's':  case 't':
+-		case 'u':  case 'v':  case 'w':  case 'x':
+-		case 'y':  case 'z':
+-		{
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		}
+-		}
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mINT_OR_FLOAT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = INT_OR_FLOAT;
+-		int _saveIndex;
+-		
+-		{
+-		int _cnt734=0;
+-		_loop734:
+-		do {
+-			if (((LA(1) >= '0' && LA(1) <= '9')) && (_tokenSet_25.member(LA(2))) && (true)) {
+-				mDIGIT(false);
+-			}
+-			else {
+-				if ( _cnt734>=1 ) { break _loop734; } else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());}
+-			}
+-			
+-			_cnt734++;
+-		} while (true);
+-		}
+-		{
+-		if ((LA(1)=='L') && (_tokenSet_26.member(LA(2))) && (true)) {
+-			match('L');
+-		}
+-		else if ((LA(1)=='l') && (_tokenSet_26.member(LA(2))) && (true)) {
+-			match('l');
+-		}
+-		else if ((LA(1)=='.')) {
+-			match('.');
+-			{
+-			_loop737:
+-			do {
+-				if (((LA(1) >= '0' && LA(1) <= '9')) && (_tokenSet_26.member(LA(2))) && (true)) {
+-					mDIGIT(false);
+-				}
+-				else {
+-					break _loop737;
+-				}
+-				
+-			} while (true);
+-			}
+-		}
+-		else if ((_tokenSet_26.member(LA(1))) && (true) && (true)) {
+-		}
+-		else {
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		
+-		}
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mSL_COMMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = SL_COMMENT;
+-		int _saveIndex;
+-		
+-		match("//");
+-		{
+-		_loop711:
+-		do {
+-			// nongreedy exit test
+-			if ((LA(1)=='\n'||LA(1)=='\r') && (true) && (true)) break _loop711;
+-			if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && (true)) {
+-				matchNot(EOF_CHAR);
+-			}
+-			else {
+-				break _loop711;
+-			}
+-			
+-		} while (true);
+-		}
+-		{
+-		if ((LA(1)=='\r') && (LA(2)=='\n') && (true)) {
+-			match("\r\n");
+-		}
+-		else if ((LA(1)=='\n')) {
+-			match('\n');
+-		}
+-		else if ((LA(1)=='\r') && (true) && (true)) {
+-			match('\r');
+-		}
+-		else {
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		
+-		}
+-		newline();
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mML_COMMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = ML_COMMENT;
+-		int _saveIndex;
+-		
+-		match("/*");
+-		{
+-		_loop715:
+-		do {
+-			// nongreedy exit test
+-			if ((LA(1)=='*') && (LA(2)=='/') && (true)) break _loop715;
+-			if ((LA(1)=='\r') && (LA(2)=='\n') && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) {
+-				match('\r');
+-				match('\n');
+-				newline();
+-			}
+-			else if ((LA(1)=='\r') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) {
+-				match('\r');
+-				newline();
+-			}
+-			else if ((LA(1)=='\n') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) {
+-				match('\n');
+-				newline();
+-			}
+-			else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) {
+-				matchNot(EOF_CHAR);
+-			}
+-			else {
+-				break _loop715;
+-			}
+-			
+-		} while (true);
+-		}
+-		match("*/");
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mESC(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = ESC;
+-		int _saveIndex;
+-		
+-		match('\\');
+-		{
+-		switch ( LA(1)) {
+-		case 'n':
+-		{
+-			match('n');
+-			break;
+-		}
+-		case 'r':
+-		{
+-			match('r');
+-			break;
+-		}
+-		case 't':
+-		{
+-			match('t');
+-			break;
+-		}
+-		case 'b':
+-		{
+-			match('b');
+-			break;
+-		}
+-		case 'f':
+-		{
+-			match('f');
+-			break;
+-		}
+-		case '"':
+-		{
+-			match('"');
+-			break;
+-		}
+-		case '\'':
+-		{
+-			match('\'');
+-			break;
+-		}
+-		case '\\':
+-		{
+-			match('\\');
+-			break;
+-		}
+-		case '0':  case '1':  case '2':  case '3':
+-		{
+-			{
+-			matchRange('0','3');
+-			}
+-			{
+-			if (((LA(1) >= '0' && LA(1) <= '9')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && (true)) {
+-				mDIGIT(false);
+-				{
+-				if (((LA(1) >= '0' && LA(1) <= '9')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && (true)) {
+-					mDIGIT(false);
+-				}
+-				else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && (true) && (true)) {
+-				}
+-				else {
+-					throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-				}
+-				
+-				}
+-			}
+-			else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && (true) && (true)) {
+-			}
+-			else {
+-				throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-			}
+-			
+-			}
+-			break;
+-		}
+-		case '4':  case '5':  case '6':  case '7':
+-		{
+-			{
+-			matchRange('4','7');
+-			}
+-			{
+-			if (((LA(1) >= '0' && LA(1) <= '9')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && (true)) {
+-				mDIGIT(false);
+-			}
+-			else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && (true) && (true)) {
+-			}
+-			else {
+-				throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-			}
+-			
+-			}
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		}
+-		}
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mDIGIT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = DIGIT;
+-		int _saveIndex;
+-		
+-		matchRange('0','9');
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	
+-	private static final long[] mk_tokenSet_0() {
+-		long[] data = new long[8];
+-		data[0]=-103079215112L;
+-		for (int i = 1; i<=3; i++) { data[i]=-1L; }
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_0 = new BitSet(mk_tokenSet_0());
+-	private static final long[] mk_tokenSet_1() {
+-		long[] data = new long[8];
+-		data[0]=-145135534866440L;
+-		for (int i = 1; i<=3; i++) { data[i]=-1L; }
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_1 = new BitSet(mk_tokenSet_1());
+-	private static final long[] mk_tokenSet_2() {
+-		long[] data = new long[8];
+-		data[0]=-141407503262728L;
+-		for (int i = 1; i<=3; i++) { data[i]=-1L; }
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_2 = new BitSet(mk_tokenSet_2());
+-	private static final long[] mk_tokenSet_3() {
+-		long[] data = { 4294977024L, 576460745995190270L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_3 = new BitSet(mk_tokenSet_3());
+-	private static final long[] mk_tokenSet_4() {
+-		long[] data = { 4294977024L, 0L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_4 = new BitSet(mk_tokenSet_4());
+-	private static final long[] mk_tokenSet_5() {
+-		long[] data = { 1103806604800L, 0L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_5 = new BitSet(mk_tokenSet_5());
+-	private static final long[] mk_tokenSet_6() {
+-		long[] data = { 287959436729787904L, 576460745995190270L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_6 = new BitSet(mk_tokenSet_6());
+-	private static final long[] mk_tokenSet_7() {
+-		long[] data = new long[8];
+-		data[0]=-17179869192L;
+-		data[1]=-268435457L;
+-		for (int i = 2; i<=3; i++) { data[i]=-1L; }
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_7 = new BitSet(mk_tokenSet_7());
+-	private static final long[] mk_tokenSet_8() {
+-		long[] data = new long[8];
+-		data[0]=-549755813896L;
+-		data[1]=-268435457L;
+-		for (int i = 2; i<=3; i++) { data[i]=-1L; }
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_8 = new BitSet(mk_tokenSet_8());
+-	private static final long[] mk_tokenSet_9() {
+-		long[] data = { 287948901175001088L, 576460745995190270L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_9 = new BitSet(mk_tokenSet_9());
+-	private static final long[] mk_tokenSet_10() {
+-		long[] data = { 287950056521213440L, 576460746129407998L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_10 = new BitSet(mk_tokenSet_10());
+-	private static final long[] mk_tokenSet_11() {
+-		long[] data = { 287958332923183104L, 576460745995190270L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_11 = new BitSet(mk_tokenSet_11());
+-	private static final long[] mk_tokenSet_12() {
+-		long[] data = { 287978128427460096L, 576460746532061182L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_12 = new BitSet(mk_tokenSet_12());
+-	private static final long[] mk_tokenSet_13() {
+-		long[] data = { 0L, 576460745995190270L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_13 = new BitSet(mk_tokenSet_13());
+-	private static final long[] mk_tokenSet_14() {
+-		long[] data = { 2306123388973753856L, 671088640L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_14 = new BitSet(mk_tokenSet_14());
+-	private static final long[] mk_tokenSet_15() {
+-		long[] data = { 287952805300282880L, 576460746129407998L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_15 = new BitSet(mk_tokenSet_15());
+-	private static final long[] mk_tokenSet_16() {
+-		long[] data = { 2306051920717948416L, 536870912L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_16 = new BitSet(mk_tokenSet_16());
+-	private static final long[] mk_tokenSet_17() {
+-		long[] data = { 2305843013508670976L, 0L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_17 = new BitSet(mk_tokenSet_17());
+-	private static final long[] mk_tokenSet_18() {
+-		long[] data = { 208911504254464L, 536870912L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_18 = new BitSet(mk_tokenSet_18());
+-	private static final long[] mk_tokenSet_19() {
+-		long[] data = { 1151051235328L, 576460746129407998L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_19 = new BitSet(mk_tokenSet_19());
+-	private static final long[] mk_tokenSet_20() {
+-		long[] data = { 189120294954496L, 0L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_20 = new BitSet(mk_tokenSet_20());
+-	private static final long[] mk_tokenSet_21() {
+-		long[] data = { 288139722277004800L, 576460746129407998L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_21 = new BitSet(mk_tokenSet_21());
+-	private static final long[] mk_tokenSet_22() {
+-		long[] data = { 288084781055354368L, 576460746666278910L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_22 = new BitSet(mk_tokenSet_22());
+-	private static final long[] mk_tokenSet_23() {
+-		long[] data = { 287960536241415680L, 576460745995190270L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_23 = new BitSet(mk_tokenSet_23());
+-	private static final long[] mk_tokenSet_24() {
+-		long[] data = { 287958337218160128L, 576460745995190270L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_24 = new BitSet(mk_tokenSet_24());
+-	private static final long[] mk_tokenSet_25() {
+-		long[] data = { 288228817078593024L, 576460746532061182L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_25 = new BitSet(mk_tokenSet_25());
+-	private static final long[] mk_tokenSet_26() {
+-		long[] data = { 288158448334415360L, 576460746532061182L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_26 = new BitSet(mk_tokenSet_26());
+-	
+-	}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/actions/csharp/ActionLexerTokenTypes.java glassfish-gil/entity-persistence/src/java/persistence/antlr/actions/csharp/ActionLexerTokenTypes.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/actions/csharp/ActionLexerTokenTypes.java	2006-02-08 22:31:09.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/actions/csharp/ActionLexerTokenTypes.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,33 +0,0 @@
+-// $ANTLR : "action.g" -> "ActionLexer.java"$
+-
+-	package persistence.antlr.actions.csharp;
+-
+-public interface ActionLexerTokenTypes {
+-	int EOF = 1;
+-	int NULL_TREE_LOOKAHEAD = 3;
+-	int ACTION = 4;
+-	int STUFF = 5;
+-	int AST_ITEM = 6;
+-	int TEXT_ITEM = 7;
+-	int TREE = 8;
+-	int TREE_ELEMENT = 9;
+-	int AST_CONSTRUCTOR = 10;
+-	int AST_CTOR_ELEMENT = 11;
+-	int ID_ELEMENT = 12;
+-	int TEXT_ARG = 13;
+-	int TEXT_ARG_ELEMENT = 14;
+-	int TEXT_ARG_ID_ELEMENT = 15;
+-	int ARG = 16;
+-	int ID = 17;
+-	int VAR_ASSIGN = 18;
+-	int COMMENT = 19;
+-	int SL_COMMENT = 20;
+-	int ML_COMMENT = 21;
+-	int CHAR = 22;
+-	int STRING = 23;
+-	int ESC = 24;
+-	int DIGIT = 25;
+-	int INT = 26;
+-	int INT_OR_FLOAT = 27;
+-	int WS = 28;
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/actions/java/ActionLexer.java glassfish-gil/entity-persistence/src/java/persistence/antlr/actions/java/ActionLexer.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/actions/java/ActionLexer.java	2006-02-08 22:31:10.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/actions/java/ActionLexer.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,2470 +0,0 @@
+-// $ANTLR : "action.g" -> "ActionLexer.java"$
+-
+-package persistence.antlr.actions.java;
+-
+-import java.io.InputStream;
+-import persistence.antlr.TokenStreamException;
+-import persistence.antlr.TokenStreamIOException;
+-import persistence.antlr.TokenStreamRecognitionException;
+-import persistence.antlr.CharStreamException;
+-import persistence.antlr.CharStreamIOException;
+-import persistence.antlr.ANTLRException;
+-import java.io.Reader;
+-import java.util.Hashtable;
+-import persistence.antlr.CharScanner;
+-import persistence.antlr.InputBuffer;
+-import persistence.antlr.ByteBuffer;
+-import persistence.antlr.CharBuffer;
+-import persistence.antlr.Token;
+-import persistence.antlr.CommonToken;
+-import persistence.antlr.RecognitionException;
+-import persistence.antlr.NoViableAltForCharException;
+-import persistence.antlr.MismatchedCharException;
+-import persistence.antlr.TokenStream;
+-import persistence.antlr.ANTLRHashString;
+-import persistence.antlr.LexerSharedInputState;
+-import persistence.antlr.collections.impl.BitSet;
+-import persistence.antlr.SemanticException;
+-
+-import java.io.StringReader;
+-import persistence.antlr.collections.impl.Vector;
+-import persistence.antlr.*;
+-
+-/** Perform the following translations:
+-
+- AST related translations
+-
+-   ##          -> currentRule_AST
+-   #(x,y,z)    -> codeGenerator.getASTCreateString(vector-of(x,y,z))
+-   #[x]        -> codeGenerator.getASTCreateString(x)
+-   #x          -> codeGenerator.mapTreeId(x)
+-
+-   Inside context of #(...), you can ref (x,y,z), [x], and x as shortcuts.
+-
+- Text related translations
+-
+-   $append(x)     -> text.append(x)
+-   $setText(x)    -> text.setLength(_begin); text.append(x)
+-   $getText       -> new String(text.getBuffer(),_begin,text.length()-_begin)
+-   $setToken(x)   -> _token = x
+-   $setType(x)    -> _ttype = x
+-   $FOLLOW(r)     -> FOLLOW set name for rule r (optional arg)
+-   $FIRST(r)      -> FIRST set name for rule r (optional arg)
+- */
+-public class ActionLexer extends persistence.antlr.CharScanner implements ActionLexerTokenTypes, TokenStream
+- {
+-
+-	protected RuleBlock currentRule;
+-	protected CodeGenerator generator;
+-	protected int lineOffset = 0;
+-	private Tool antlrTool;	// The ANTLR tool
+-	ActionTransInfo transInfo;
+-
+- 	public ActionLexer( String s,
+-						RuleBlock currentRule,
+-						CodeGenerator generator,
+-						ActionTransInfo transInfo) {
+-		this(new StringReader(s));
+-		this.currentRule = currentRule;
+-		this.generator = generator;
+-		this.transInfo = transInfo;
+-	}
+-
+-	public void setLineOffset(int lineOffset) {
+-		// this.lineOffset = lineOffset;
+-		setLine(lineOffset);
+-	}
+-
+-	public void setTool(Tool tool) {
+-		this.antlrTool = tool;
+-	}
+-
+-	public void reportError(RecognitionException e)
+-	{
+-		antlrTool.error("Syntax error in action: "+e,getFilename(),getLine(),getColumn());
+-	}
+-
+-	public void reportError(String s)
+-	{
+-		antlrTool.error(s,getFilename(),getLine(),getColumn());
+-	}
+-
+-	public void reportWarning(String s)
+-	{
+-		if ( getFilename()==null ) {
+-			antlrTool.warning(s);
+-		}
+-		else {
+-			antlrTool.warning(s,getFilename(),getLine(), getColumn());
+-		}
+-	}
+-public ActionLexer(InputStream in) {
+-	this(new ByteBuffer(in));
+-}
+-public ActionLexer(Reader in) {
+-	this(new CharBuffer(in));
+-}
+-public ActionLexer(InputBuffer ib) {
+-	this(new LexerSharedInputState(ib));
+-}
+-public ActionLexer(LexerSharedInputState state) {
+-	super(state);
+-	caseSensitiveLiterals = true;
+-	setCaseSensitive(true);
+-	literals = new Hashtable();
+-}
+-
+-public Token nextToken() throws TokenStreamException {
+-	Token theRetToken=null;
+-tryAgain:
+-	for (;;) {
+-		Token _token = null;
+-		int _ttype = Token.INVALID_TYPE;
+-		resetText();
+-		try {   // for char stream error handling
+-			try {   // for lexical error handling
+-				if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff'))) {
+-					mACTION(true);
+-					theRetToken=_returnToken;
+-				}
+-				else {
+-					if (LA(1)==EOF_CHAR) {uponEOF(); _returnToken = makeToken(Token.EOF_TYPE);}
+-				else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());}
+-				}
+-				
+-				if ( _returnToken==null ) continue tryAgain; // found SKIP token
+-				_ttype = _returnToken.getType();
+-				_returnToken.setType(_ttype);
+-				return _returnToken;
+-			}
+-			catch (RecognitionException e) {
+-				throw new TokenStreamRecognitionException(e);
+-			}
+-		}
+-		catch (CharStreamException cse) {
+-			if ( cse instanceof CharStreamIOException ) {
+-				throw new TokenStreamIOException(((CharStreamIOException)cse).io);
+-			}
+-			else {
+-				throw new TokenStreamException(cse.getMessage());
+-			}
+-		}
+-	}
+-}
+-
+-	public final void mACTION(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = ACTION;
+-		int _saveIndex;
+-		
+-		{
+-		int _cnt382=0;
+-		_loop382:
+-		do {
+-			switch ( LA(1)) {
+-			case '#':
+-			{
+-				mAST_ITEM(false);
+-				break;
+-			}
+-			case '$':
+-			{
+-				mTEXT_ITEM(false);
+-				break;
+-			}
+-			default:
+-				if ((_tokenSet_0.member(LA(1)))) {
+-					mSTUFF(false);
+-				}
+-			else {
+-				if ( _cnt382>=1 ) { break _loop382; } else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());}
+-			}
+-			}
+-			_cnt382++;
+-		} while (true);
+-		}
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mSTUFF(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = STUFF;
+-		int _saveIndex;
+-		
+-		switch ( LA(1)) {
+-		case '"':
+-		{
+-			mSTRING(false);
+-			break;
+-		}
+-		case '\'':
+-		{
+-			mCHAR(false);
+-			break;
+-		}
+-		case '\n':
+-		{
+-			match('\n');
+-			newline();
+-			break;
+-		}
+-		default:
+-			if ((LA(1)=='/') && (LA(2)=='*'||LA(2)=='/')) {
+-				mCOMMENT(false);
+-			}
+-			else if ((LA(1)=='\r') && (LA(2)=='\n') && (true)) {
+-				match("\r\n");
+-				newline();
+-			}
+-			else if ((LA(1)=='/') && (_tokenSet_1.member(LA(2)))) {
+-				match('/');
+-				{
+-				match(_tokenSet_1);
+-				}
+-			}
+-			else if ((LA(1)=='\r') && (true) && (true)) {
+-				match('\r');
+-				newline();
+-			}
+-			else if ((_tokenSet_2.member(LA(1)))) {
+-				{
+-				match(_tokenSet_2);
+-				}
+-			}
+-		else {
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		}
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mAST_ITEM(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = AST_ITEM;
+-		int _saveIndex;
+-		Token t=null;
+-		Token id=null;
+-		Token ctor=null;
+-		
+-		if ((LA(1)=='#') && (LA(2)=='(')) {
+-			_saveIndex=text.length();
+-			match('#');
+-			text.setLength(_saveIndex);
+-			mTREE(true);
+-			t=_returnToken;
+-		}
+-		else if ((LA(1)=='#') && (_tokenSet_3.member(LA(2)))) {
+-			_saveIndex=text.length();
+-			match('#');
+-			text.setLength(_saveIndex);
+-			mID(true);
+-			id=_returnToken;
+-			
+-					String idt = id.getText();
+-					String var = generator.mapTreeId(idt,transInfo);
+-					if ( var!=null ) {
+-						text.setLength(_begin); text.append(var);
+-					}
+-					
+-			{
+-			if ((_tokenSet_4.member(LA(1))) && (true) && (true)) {
+-				mWS(false);
+-			}
+-			else {
+-			}
+-			
+-			}
+-			{
+-			if ((LA(1)=='=') && (true) && (true)) {
+-				mVAR_ASSIGN(false);
+-			}
+-			else {
+-			}
+-			
+-			}
+-		}
+-		else if ((LA(1)=='#') && (LA(2)=='[')) {
+-			_saveIndex=text.length();
+-			match('#');
+-			text.setLength(_saveIndex);
+-			mAST_CONSTRUCTOR(true);
+-			ctor=_returnToken;
+-		}
+-		else if ((LA(1)=='#') && (LA(2)=='#')) {
+-			match("##");
+-			
+-					String r=currentRule.getRuleName()+"_AST"; text.setLength(_begin); text.append(r);
+-					if ( transInfo!=null ) {
+-						transInfo.refRuleRoot=r;	// we ref root of tree
+-					}
+-					
+-			{
+-			if ((_tokenSet_4.member(LA(1))) && (true) && (true)) {
+-				mWS(false);
+-			}
+-			else {
+-			}
+-			
+-			}
+-			{
+-			if ((LA(1)=='=') && (true) && (true)) {
+-				mVAR_ASSIGN(false);
+-			}
+-			else {
+-			}
+-			
+-			}
+-		}
+-		else {
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mTEXT_ITEM(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = TEXT_ITEM;
+-		int _saveIndex;
+-		Token a1=null;
+-		Token a2=null;
+-		Token a3=null;
+-		Token a4=null;
+-		Token a5=null;
+-		Token a6=null;
+-		
+-		if ((LA(1)=='$') && (LA(2)=='F') && (LA(3)=='O')) {
+-			match("$FOLLOW");
+-			{
+-			if ((_tokenSet_5.member(LA(1))) && (_tokenSet_6.member(LA(2))) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) {
+-				{
+-				switch ( LA(1)) {
+-				case '\t':  case '\n':  case '\r':  case ' ':
+-				{
+-					mWS(false);
+-					break;
+-				}
+-				case '(':
+-				{
+-					break;
+-				}
+-				default:
+-				{
+-					throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-				}
+-				}
+-				}
+-				match('(');
+-				mTEXT_ARG(true);
+-				a5=_returnToken;
+-				match(')');
+-			}
+-			else {
+-			}
+-			
+-			}
+-			
+-						String rule = currentRule.getRuleName();
+-						if ( a5!=null ) {
+-							rule = a5.getText();
+-						}
+-						String setName = generator.getFOLLOWBitSet(rule, 1);
+-						// System.out.println("FOLLOW("+rule+")="+setName);
+-						if ( setName==null ) {
+-							reportError("$FOLLOW("+rule+")"+
+-										": unknown rule or bad lookahead computation");
+-						}
+-						else {
+-							text.setLength(_begin); text.append(setName);
+-						}
+-					
+-		}
+-		else if ((LA(1)=='$') && (LA(2)=='F') && (LA(3)=='I')) {
+-			match("$FIRST");
+-			{
+-			if ((_tokenSet_5.member(LA(1))) && (_tokenSet_6.member(LA(2))) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) {
+-				{
+-				switch ( LA(1)) {
+-				case '\t':  case '\n':  case '\r':  case ' ':
+-				{
+-					mWS(false);
+-					break;
+-				}
+-				case '(':
+-				{
+-					break;
+-				}
+-				default:
+-				{
+-					throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-				}
+-				}
+-				}
+-				match('(');
+-				mTEXT_ARG(true);
+-				a6=_returnToken;
+-				match(')');
+-			}
+-			else {
+-			}
+-			
+-			}
+-			
+-						String rule = currentRule.getRuleName();
+-						if ( a6!=null ) {
+-							rule = a6.getText();
+-						}
+-						String setName = generator.getFIRSTBitSet(rule, 1);
+-						// System.out.println("FIRST("+rule+")="+setName);
+-						if ( setName==null ) {
+-							reportError("$FIRST("+rule+")"+
+-										": unknown rule or bad lookahead computation");
+-						}
+-						else {
+-							text.setLength(_begin); text.append(setName);
+-						}
+-					
+-		}
+-		else if ((LA(1)=='$') && (LA(2)=='a')) {
+-			match("$append");
+-			{
+-			switch ( LA(1)) {
+-			case '\t':  case '\n':  case '\r':  case ' ':
+-			{
+-				mWS(false);
+-				break;
+-			}
+-			case '(':
+-			{
+-				break;
+-			}
+-			default:
+-			{
+-				throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-			}
+-			}
+-			}
+-			match('(');
+-			mTEXT_ARG(true);
+-			a1=_returnToken;
+-			match(')');
+-			
+-						String t = "text.append("+a1.getText()+")";
+-						text.setLength(_begin); text.append(t);
+-					
+-		}
+-		else if ((LA(1)=='$') && (LA(2)=='s')) {
+-			match("$set");
+-			{
+-			if ((LA(1)=='T') && (LA(2)=='e')) {
+-				match("Text");
+-				{
+-				switch ( LA(1)) {
+-				case '\t':  case '\n':  case '\r':  case ' ':
+-				{
+-					mWS(false);
+-					break;
+-				}
+-				case '(':
+-				{
+-					break;
+-				}
+-				default:
+-				{
+-					throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-				}
+-				}
+-				}
+-				match('(');
+-				mTEXT_ARG(true);
+-				a2=_returnToken;
+-				match(')');
+-				
+-							String t;
+-							t = "text.setLength(_begin); text.append("+a2.getText()+")";
+-							text.setLength(_begin); text.append(t);
+-							
+-			}
+-			else if ((LA(1)=='T') && (LA(2)=='o')) {
+-				match("Token");
+-				{
+-				switch ( LA(1)) {
+-				case '\t':  case '\n':  case '\r':  case ' ':
+-				{
+-					mWS(false);
+-					break;
+-				}
+-				case '(':
+-				{
+-					break;
+-				}
+-				default:
+-				{
+-					throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-				}
+-				}
+-				}
+-				match('(');
+-				mTEXT_ARG(true);
+-				a3=_returnToken;
+-				match(')');
+-				
+-							String t="_token = "+a3.getText();
+-							text.setLength(_begin); text.append(t);
+-							
+-			}
+-			else if ((LA(1)=='T') && (LA(2)=='y')) {
+-				match("Type");
+-				{
+-				switch ( LA(1)) {
+-				case '\t':  case '\n':  case '\r':  case ' ':
+-				{
+-					mWS(false);
+-					break;
+-				}
+-				case '(':
+-				{
+-					break;
+-				}
+-				default:
+-				{
+-					throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-				}
+-				}
+-				}
+-				match('(');
+-				mTEXT_ARG(true);
+-				a4=_returnToken;
+-				match(')');
+-				
+-							String t="_ttype = "+a4.getText();
+-							text.setLength(_begin); text.append(t);
+-							
+-			}
+-			else {
+-				throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-			}
+-			
+-			}
+-		}
+-		else if ((LA(1)=='$') && (LA(2)=='g')) {
+-			match("$getText");
+-			
+-						text.setLength(_begin); text.append("new String(text.getBuffer(),_begin,text.length()-_begin)");
+-					
+-		}
+-		else {
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mCOMMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = COMMENT;
+-		int _saveIndex;
+-		
+-		if ((LA(1)=='/') && (LA(2)=='/')) {
+-			mSL_COMMENT(false);
+-		}
+-		else if ((LA(1)=='/') && (LA(2)=='*')) {
+-			mML_COMMENT(false);
+-		}
+-		else {
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mSTRING(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = STRING;
+-		int _saveIndex;
+-		
+-		match('"');
+-		{
+-		_loop478:
+-		do {
+-			if ((LA(1)=='\\')) {
+-				mESC(false);
+-			}
+-			else if ((_tokenSet_7.member(LA(1)))) {
+-				matchNot('"');
+-			}
+-			else {
+-				break _loop478;
+-			}
+-			
+-		} while (true);
+-		}
+-		match('"');
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mCHAR(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = CHAR;
+-		int _saveIndex;
+-		
+-		match('\'');
+-		{
+-		if ((LA(1)=='\\')) {
+-			mESC(false);
+-		}
+-		else if ((_tokenSet_8.member(LA(1)))) {
+-			matchNot('\'');
+-		}
+-		else {
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		
+-		}
+-		match('\'');
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mTREE(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = TREE;
+-		int _saveIndex;
+-		Token t=null;
+-		Token t2=null;
+-		
+-			StringBuffer buf = new StringBuffer();
+-			int n=0;
+-			Vector terms = new Vector(10);
+-		
+-		
+-		_saveIndex=text.length();
+-		match('(');
+-		text.setLength(_saveIndex);
+-		{
+-		switch ( LA(1)) {
+-		case '\t':  case '\n':  case '\r':  case ' ':
+-		{
+-			_saveIndex=text.length();
+-			mWS(false);
+-			text.setLength(_saveIndex);
+-			break;
+-		}
+-		case '"':  case '#':  case '(':  case 'A':
+-		case 'B':  case 'C':  case 'D':  case 'E':
+-		case 'F':  case 'G':  case 'H':  case 'I':
+-		case 'J':  case 'K':  case 'L':  case 'M':
+-		case 'N':  case 'O':  case 'P':  case 'Q':
+-		case 'R':  case 'S':  case 'T':  case 'U':
+-		case 'V':  case 'W':  case 'X':  case 'Y':
+-		case 'Z':  case '[':  case '_':  case 'a':
+-		case 'b':  case 'c':  case 'd':  case 'e':
+-		case 'f':  case 'g':  case 'h':  case 'i':
+-		case 'j':  case 'k':  case 'l':  case 'm':
+-		case 'n':  case 'o':  case 'p':  case 'q':
+-		case 'r':  case 's':  case 't':  case 'u':
+-		case 'v':  case 'w':  case 'x':  case 'y':
+-		case 'z':
+-		{
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		}
+-		}
+-		_saveIndex=text.length();
+-		mTREE_ELEMENT(true);
+-		text.setLength(_saveIndex);
+-		t=_returnToken;
+-		terms.appendElement(t.getText());
+-		{
+-		switch ( LA(1)) {
+-		case '\t':  case '\n':  case '\r':  case ' ':
+-		{
+-			_saveIndex=text.length();
+-			mWS(false);
+-			text.setLength(_saveIndex);
+-			break;
+-		}
+-		case ')':  case ',':
+-		{
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		}
+-		}
+-		{
+-		_loop407:
+-		do {
+-			if ((LA(1)==',')) {
+-				_saveIndex=text.length();
+-				match(',');
+-				text.setLength(_saveIndex);
+-				{
+-				switch ( LA(1)) {
+-				case '\t':  case '\n':  case '\r':  case ' ':
+-				{
+-					_saveIndex=text.length();
+-					mWS(false);
+-					text.setLength(_saveIndex);
+-					break;
+-				}
+-				case '"':  case '#':  case '(':  case 'A':
+-				case 'B':  case 'C':  case 'D':  case 'E':
+-				case 'F':  case 'G':  case 'H':  case 'I':
+-				case 'J':  case 'K':  case 'L':  case 'M':
+-				case 'N':  case 'O':  case 'P':  case 'Q':
+-				case 'R':  case 'S':  case 'T':  case 'U':
+-				case 'V':  case 'W':  case 'X':  case 'Y':
+-				case 'Z':  case '[':  case '_':  case 'a':
+-				case 'b':  case 'c':  case 'd':  case 'e':
+-				case 'f':  case 'g':  case 'h':  case 'i':
+-				case 'j':  case 'k':  case 'l':  case 'm':
+-				case 'n':  case 'o':  case 'p':  case 'q':
+-				case 'r':  case 's':  case 't':  case 'u':
+-				case 'v':  case 'w':  case 'x':  case 'y':
+-				case 'z':
+-				{
+-					break;
+-				}
+-				default:
+-				{
+-					throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-				}
+-				}
+-				}
+-				_saveIndex=text.length();
+-				mTREE_ELEMENT(true);
+-				text.setLength(_saveIndex);
+-				t2=_returnToken;
+-				terms.appendElement(t2.getText());
+-				{
+-				switch ( LA(1)) {
+-				case '\t':  case '\n':  case '\r':  case ' ':
+-				{
+-					_saveIndex=text.length();
+-					mWS(false);
+-					text.setLength(_saveIndex);
+-					break;
+-				}
+-				case ')':  case ',':
+-				{
+-					break;
+-				}
+-				default:
+-				{
+-					throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-				}
+-				}
+-				}
+-			}
+-			else {
+-				break _loop407;
+-			}
+-			
+-		} while (true);
+-		}
+-		text.setLength(_begin); text.append(generator.getASTCreateString(terms));
+-		_saveIndex=text.length();
+-		match(')');
+-		text.setLength(_saveIndex);
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mID(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = ID;
+-		int _saveIndex;
+-		
+-		{
+-		switch ( LA(1)) {
+-		case 'a':  case 'b':  case 'c':  case 'd':
+-		case 'e':  case 'f':  case 'g':  case 'h':
+-		case 'i':  case 'j':  case 'k':  case 'l':
+-		case 'm':  case 'n':  case 'o':  case 'p':
+-		case 'q':  case 'r':  case 's':  case 't':
+-		case 'u':  case 'v':  case 'w':  case 'x':
+-		case 'y':  case 'z':
+-		{
+-			matchRange('a','z');
+-			break;
+-		}
+-		case 'A':  case 'B':  case 'C':  case 'D':
+-		case 'E':  case 'F':  case 'G':  case 'H':
+-		case 'I':  case 'J':  case 'K':  case 'L':
+-		case 'M':  case 'N':  case 'O':  case 'P':
+-		case 'Q':  case 'R':  case 'S':  case 'T':
+-		case 'U':  case 'V':  case 'W':  case 'X':
+-		case 'Y':  case 'Z':
+-		{
+-			matchRange('A','Z');
+-			break;
+-		}
+-		case '_':
+-		{
+-			match('_');
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		}
+-		}
+-		{
+-		_loop464:
+-		do {
+-			if ((_tokenSet_9.member(LA(1))) && (true) && (true)) {
+-				{
+-				switch ( LA(1)) {
+-				case 'a':  case 'b':  case 'c':  case 'd':
+-				case 'e':  case 'f':  case 'g':  case 'h':
+-				case 'i':  case 'j':  case 'k':  case 'l':
+-				case 'm':  case 'n':  case 'o':  case 'p':
+-				case 'q':  case 'r':  case 's':  case 't':
+-				case 'u':  case 'v':  case 'w':  case 'x':
+-				case 'y':  case 'z':
+-				{
+-					matchRange('a','z');
+-					break;
+-				}
+-				case 'A':  case 'B':  case 'C':  case 'D':
+-				case 'E':  case 'F':  case 'G':  case 'H':
+-				case 'I':  case 'J':  case 'K':  case 'L':
+-				case 'M':  case 'N':  case 'O':  case 'P':
+-				case 'Q':  case 'R':  case 'S':  case 'T':
+-				case 'U':  case 'V':  case 'W':  case 'X':
+-				case 'Y':  case 'Z':
+-				{
+-					matchRange('A','Z');
+-					break;
+-				}
+-				case '0':  case '1':  case '2':  case '3':
+-				case '4':  case '5':  case '6':  case '7':
+-				case '8':  case '9':
+-				{
+-					matchRange('0','9');
+-					break;
+-				}
+-				case '_':
+-				{
+-					match('_');
+-					break;
+-				}
+-				default:
+-				{
+-					throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-				}
+-				}
+-				}
+-			}
+-			else {
+-				break _loop464;
+-			}
+-			
+-		} while (true);
+-		}
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mWS(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = WS;
+-		int _saveIndex;
+-		
+-		{
+-		int _cnt498=0;
+-		_loop498:
+-		do {
+-			if ((LA(1)=='\r') && (LA(2)=='\n') && (true)) {
+-				match('\r');
+-				match('\n');
+-				newline();
+-			}
+-			else if ((LA(1)==' ') && (true) && (true)) {
+-				match(' ');
+-			}
+-			else if ((LA(1)=='\t') && (true) && (true)) {
+-				match('\t');
+-			}
+-			else if ((LA(1)=='\r') && (true) && (true)) {
+-				match('\r');
+-				newline();
+-			}
+-			else if ((LA(1)=='\n') && (true) && (true)) {
+-				match('\n');
+-				newline();
+-			}
+-			else {
+-				if ( _cnt498>=1 ) { break _loop498; } else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());}
+-			}
+-			
+-			_cnt498++;
+-		} while (true);
+-		}
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mVAR_ASSIGN(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = VAR_ASSIGN;
+-		int _saveIndex;
+-		
+-		match('=');
+-		
+-				// inform the code generator that an assignment was done to
+-				// AST root for the rule if invoker set refRuleRoot.
+-				if ( LA(1)!='=' && transInfo!=null && transInfo.refRuleRoot!=null ) {
+-					transInfo.assignToRoot=true;
+-				}
+-				
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mAST_CONSTRUCTOR(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = AST_CONSTRUCTOR;
+-		int _saveIndex;
+-		Token x=null;
+-		Token y=null;
+-		Token z=null;
+-		
+-		_saveIndex=text.length();
+-		match('[');
+-		text.setLength(_saveIndex);
+-		{
+-		switch ( LA(1)) {
+-		case '\t':  case '\n':  case '\r':  case ' ':
+-		{
+-			_saveIndex=text.length();
+-			mWS(false);
+-			text.setLength(_saveIndex);
+-			break;
+-		}
+-		case '"':  case '#':  case '(':  case '0':
+-		case '1':  case '2':  case '3':  case '4':
+-		case '5':  case '6':  case '7':  case '8':
+-		case '9':  case 'A':  case 'B':  case 'C':
+-		case 'D':  case 'E':  case 'F':  case 'G':
+-		case 'H':  case 'I':  case 'J':  case 'K':
+-		case 'L':  case 'M':  case 'N':  case 'O':
+-		case 'P':  case 'Q':  case 'R':  case 'S':
+-		case 'T':  case 'U':  case 'V':  case 'W':
+-		case 'X':  case 'Y':  case 'Z':  case '[':
+-		case '_':  case 'a':  case 'b':  case 'c':
+-		case 'd':  case 'e':  case 'f':  case 'g':
+-		case 'h':  case 'i':  case 'j':  case 'k':
+-		case 'l':  case 'm':  case 'n':  case 'o':
+-		case 'p':  case 'q':  case 'r':  case 's':
+-		case 't':  case 'u':  case 'v':  case 'w':
+-		case 'x':  case 'y':  case 'z':
+-		{
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		}
+-		}
+-		_saveIndex=text.length();
+-		mAST_CTOR_ELEMENT(true);
+-		text.setLength(_saveIndex);
+-		x=_returnToken;
+-		{
+-		switch ( LA(1)) {
+-		case '\t':  case '\n':  case '\r':  case ' ':
+-		{
+-			_saveIndex=text.length();
+-			mWS(false);
+-			text.setLength(_saveIndex);
+-			break;
+-		}
+-		case ',':  case ']':
+-		{
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		}
+-		}
+-		{
+-		if ((LA(1)==',') && (_tokenSet_10.member(LA(2))) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) {
+-			_saveIndex=text.length();
+-			match(',');
+-			text.setLength(_saveIndex);
+-			{
+-			switch ( LA(1)) {
+-			case '\t':  case '\n':  case '\r':  case ' ':
+-			{
+-				_saveIndex=text.length();
+-				mWS(false);
+-				text.setLength(_saveIndex);
+-				break;
+-			}
+-			case '"':  case '#':  case '(':  case '0':
+-			case '1':  case '2':  case '3':  case '4':
+-			case '5':  case '6':  case '7':  case '8':
+-			case '9':  case 'A':  case 'B':  case 'C':
+-			case 'D':  case 'E':  case 'F':  case 'G':
+-			case 'H':  case 'I':  case 'J':  case 'K':
+-			case 'L':  case 'M':  case 'N':  case 'O':
+-			case 'P':  case 'Q':  case 'R':  case 'S':
+-			case 'T':  case 'U':  case 'V':  case 'W':
+-			case 'X':  case 'Y':  case 'Z':  case '[':
+-			case '_':  case 'a':  case 'b':  case 'c':
+-			case 'd':  case 'e':  case 'f':  case 'g':
+-			case 'h':  case 'i':  case 'j':  case 'k':
+-			case 'l':  case 'm':  case 'n':  case 'o':
+-			case 'p':  case 'q':  case 'r':  case 's':
+-			case 't':  case 'u':  case 'v':  case 'w':
+-			case 'x':  case 'y':  case 'z':
+-			{
+-				break;
+-			}
+-			default:
+-			{
+-				throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-			}
+-			}
+-			}
+-			_saveIndex=text.length();
+-			mAST_CTOR_ELEMENT(true);
+-			text.setLength(_saveIndex);
+-			y=_returnToken;
+-			{
+-			switch ( LA(1)) {
+-			case '\t':  case '\n':  case '\r':  case ' ':
+-			{
+-				_saveIndex=text.length();
+-				mWS(false);
+-				text.setLength(_saveIndex);
+-				break;
+-			}
+-			case ',':  case ']':
+-			{
+-				break;
+-			}
+-			default:
+-			{
+-				throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-			}
+-			}
+-			}
+-		}
+-		else if ((LA(1)==','||LA(1)==']') && (true) && (true)) {
+-		}
+-		else {
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		
+-		}
+-		{
+-		switch ( LA(1)) {
+-		case ',':
+-		{
+-			_saveIndex=text.length();
+-			match(',');
+-			text.setLength(_saveIndex);
+-			{
+-			switch ( LA(1)) {
+-			case '\t':  case '\n':  case '\r':  case ' ':
+-			{
+-				_saveIndex=text.length();
+-				mWS(false);
+-				text.setLength(_saveIndex);
+-				break;
+-			}
+-			case '"':  case '#':  case '(':  case '0':
+-			case '1':  case '2':  case '3':  case '4':
+-			case '5':  case '6':  case '7':  case '8':
+-			case '9':  case 'A':  case 'B':  case 'C':
+-			case 'D':  case 'E':  case 'F':  case 'G':
+-			case 'H':  case 'I':  case 'J':  case 'K':
+-			case 'L':  case 'M':  case 'N':  case 'O':
+-			case 'P':  case 'Q':  case 'R':  case 'S':
+-			case 'T':  case 'U':  case 'V':  case 'W':
+-			case 'X':  case 'Y':  case 'Z':  case '[':
+-			case '_':  case 'a':  case 'b':  case 'c':
+-			case 'd':  case 'e':  case 'f':  case 'g':
+-			case 'h':  case 'i':  case 'j':  case 'k':
+-			case 'l':  case 'm':  case 'n':  case 'o':
+-			case 'p':  case 'q':  case 'r':  case 's':
+-			case 't':  case 'u':  case 'v':  case 'w':
+-			case 'x':  case 'y':  case 'z':
+-			{
+-				break;
+-			}
+-			default:
+-			{
+-				throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-			}
+-			}
+-			}
+-			_saveIndex=text.length();
+-			mAST_CTOR_ELEMENT(true);
+-			text.setLength(_saveIndex);
+-			z=_returnToken;
+-			{
+-			switch ( LA(1)) {
+-			case '\t':  case '\n':  case '\r':  case ' ':
+-			{
+-				_saveIndex=text.length();
+-				mWS(false);
+-				text.setLength(_saveIndex);
+-				break;
+-			}
+-			case ']':
+-			{
+-				break;
+-			}
+-			default:
+-			{
+-				throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-			}
+-			}
+-			}
+-			break;
+-		}
+-		case ']':
+-		{
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		}
+-		}
+-		_saveIndex=text.length();
+-		match(']');
+-		text.setLength(_saveIndex);
+-		
+-				String args = x.getText();
+-				if ( y!=null ) {
+-					args += ","+y.getText();
+-				}
+-				if ( z!=null ) {
+-					args += ","+z.getText();
+-				}
+-				text.setLength(_begin); text.append(generator.getASTCreateString(null,args));
+-				
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mTEXT_ARG(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = TEXT_ARG;
+-		int _saveIndex;
+-		
+-		{
+-		switch ( LA(1)) {
+-		case '\t':  case '\n':  case '\r':  case ' ':
+-		{
+-			mWS(false);
+-			break;
+-		}
+-		case '"':  case '$':  case '\'':  case '+':
+-		case '0':  case '1':  case '2':  case '3':
+-		case '4':  case '5':  case '6':  case '7':
+-		case '8':  case '9':  case 'A':  case 'B':
+-		case 'C':  case 'D':  case 'E':  case 'F':
+-		case 'G':  case 'H':  case 'I':  case 'J':
+-		case 'K':  case 'L':  case 'M':  case 'N':
+-		case 'O':  case 'P':  case 'Q':  case 'R':
+-		case 'S':  case 'T':  case 'U':  case 'V':
+-		case 'W':  case 'X':  case 'Y':  case 'Z':
+-		case '_':  case 'a':  case 'b':  case 'c':
+-		case 'd':  case 'e':  case 'f':  case 'g':
+-		case 'h':  case 'i':  case 'j':  case 'k':
+-		case 'l':  case 'm':  case 'n':  case 'o':
+-		case 'p':  case 'q':  case 'r':  case 's':
+-		case 't':  case 'u':  case 'v':  case 'w':
+-		case 'x':  case 'y':  case 'z':
+-		{
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		}
+-		}
+-		{
+-		int _cnt438=0;
+-		_loop438:
+-		do {
+-			if ((_tokenSet_11.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && (true)) {
+-				mTEXT_ARG_ELEMENT(false);
+-				{
+-				if ((_tokenSet_4.member(LA(1))) && (_tokenSet_12.member(LA(2))) && (true)) {
+-					mWS(false);
+-				}
+-				else if ((_tokenSet_12.member(LA(1))) && (true) && (true)) {
+-				}
+-				else {
+-					throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-				}
+-				
+-				}
+-			}
+-			else {
+-				if ( _cnt438>=1 ) { break _loop438; } else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());}
+-			}
+-			
+-			_cnt438++;
+-		} while (true);
+-		}
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mTREE_ELEMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = TREE_ELEMENT;
+-		int _saveIndex;
+-		Token id=null;
+-		boolean was_mapped;
+-		
+-		switch ( LA(1)) {
+-		case '(':
+-		{
+-			mTREE(false);
+-			break;
+-		}
+-		case '[':
+-		{
+-			mAST_CONSTRUCTOR(false);
+-			break;
+-		}
+-		case 'A':  case 'B':  case 'C':  case 'D':
+-		case 'E':  case 'F':  case 'G':  case 'H':
+-		case 'I':  case 'J':  case 'K':  case 'L':
+-		case 'M':  case 'N':  case 'O':  case 'P':
+-		case 'Q':  case 'R':  case 'S':  case 'T':
+-		case 'U':  case 'V':  case 'W':  case 'X':
+-		case 'Y':  case 'Z':  case '_':  case 'a':
+-		case 'b':  case 'c':  case 'd':  case 'e':
+-		case 'f':  case 'g':  case 'h':  case 'i':
+-		case 'j':  case 'k':  case 'l':  case 'm':
+-		case 'n':  case 'o':  case 'p':  case 'q':
+-		case 'r':  case 's':  case 't':  case 'u':
+-		case 'v':  case 'w':  case 'x':  case 'y':
+-		case 'z':
+-		{
+-			mID_ELEMENT(false);
+-			break;
+-		}
+-		case '"':
+-		{
+-			mSTRING(false);
+-			break;
+-		}
+-		default:
+-			if ((LA(1)=='#') && (LA(2)=='(')) {
+-				_saveIndex=text.length();
+-				match('#');
+-				text.setLength(_saveIndex);
+-				mTREE(false);
+-			}
+-			else if ((LA(1)=='#') && (LA(2)=='[')) {
+-				_saveIndex=text.length();
+-				match('#');
+-				text.setLength(_saveIndex);
+-				mAST_CONSTRUCTOR(false);
+-			}
+-			else if ((LA(1)=='#') && (_tokenSet_3.member(LA(2)))) {
+-				_saveIndex=text.length();
+-				match('#');
+-				text.setLength(_saveIndex);
+-				was_mapped=mID_ELEMENT(true);
+-				id=_returnToken;
+-					// RK: I have a queer feeling that this maptreeid is redundant
+-							if( ! was_mapped )
+-							{
+-								String t = generator.mapTreeId(id.getText(), null);
+-								text.setLength(_begin); text.append(t);
+-							}
+-						
+-			}
+-			else if ((LA(1)=='#') && (LA(2)=='#')) {
+-				match("##");
+-				String t = currentRule.getRuleName()+"_AST"; text.setLength(_begin); text.append(t);
+-			}
+-		else {
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		}
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-/** An ID_ELEMENT can be a func call, array ref, simple var,
+- *  or AST label ref.
+- */
+-	protected final boolean  mID_ELEMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		boolean mapped=false;
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = ID_ELEMENT;
+-		int _saveIndex;
+-		Token id=null;
+-		
+-		mID(true);
+-		id=_returnToken;
+-		{
+-		if ((_tokenSet_4.member(LA(1))) && (_tokenSet_13.member(LA(2))) && (true)) {
+-			_saveIndex=text.length();
+-			mWS(false);
+-			text.setLength(_saveIndex);
+-		}
+-		else if ((_tokenSet_13.member(LA(1))) && (true) && (true)) {
+-		}
+-		else {
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		
+-		}
+-		{
+-		switch ( LA(1)) {
+-		case '(':
+-		{
+-			match('(');
+-			{
+-			if ((_tokenSet_4.member(LA(1))) && (_tokenSet_14.member(LA(2))) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) {
+-				_saveIndex=text.length();
+-				mWS(false);
+-				text.setLength(_saveIndex);
+-			}
+-			else if ((_tokenSet_14.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && (true)) {
+-			}
+-			else {
+-				throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-			}
+-			
+-			}
+-			{
+-			switch ( LA(1)) {
+-			case '"':  case '#':  case '\'':  case '(':
+-			case '0':  case '1':  case '2':  case '3':
+-			case '4':  case '5':  case '6':  case '7':
+-			case '8':  case '9':  case 'A':  case 'B':
+-			case 'C':  case 'D':  case 'E':  case 'F':
+-			case 'G':  case 'H':  case 'I':  case 'J':
+-			case 'K':  case 'L':  case 'M':  case 'N':
+-			case 'O':  case 'P':  case 'Q':  case 'R':
+-			case 'S':  case 'T':  case 'U':  case 'V':
+-			case 'W':  case 'X':  case 'Y':  case 'Z':
+-			case '[':  case '_':  case 'a':  case 'b':
+-			case 'c':  case 'd':  case 'e':  case 'f':
+-			case 'g':  case 'h':  case 'i':  case 'j':
+-			case 'k':  case 'l':  case 'm':  case 'n':
+-			case 'o':  case 'p':  case 'q':  case 'r':
+-			case 's':  case 't':  case 'u':  case 'v':
+-			case 'w':  case 'x':  case 'y':  case 'z':
+-			{
+-				mARG(false);
+-				{
+-				_loop426:
+-				do {
+-					if ((LA(1)==',')) {
+-						match(',');
+-						{
+-						switch ( LA(1)) {
+-						case '\t':  case '\n':  case '\r':  case ' ':
+-						{
+-							_saveIndex=text.length();
+-							mWS(false);
+-							text.setLength(_saveIndex);
+-							break;
+-						}
+-						case '"':  case '#':  case '\'':  case '(':
+-						case '0':  case '1':  case '2':  case '3':
+-						case '4':  case '5':  case '6':  case '7':
+-						case '8':  case '9':  case 'A':  case 'B':
+-						case 'C':  case 'D':  case 'E':  case 'F':
+-						case 'G':  case 'H':  case 'I':  case 'J':
+-						case 'K':  case 'L':  case 'M':  case 'N':
+-						case 'O':  case 'P':  case 'Q':  case 'R':
+-						case 'S':  case 'T':  case 'U':  case 'V':
+-						case 'W':  case 'X':  case 'Y':  case 'Z':
+-						case '[':  case '_':  case 'a':  case 'b':
+-						case 'c':  case 'd':  case 'e':  case 'f':
+-						case 'g':  case 'h':  case 'i':  case 'j':
+-						case 'k':  case 'l':  case 'm':  case 'n':
+-						case 'o':  case 'p':  case 'q':  case 'r':
+-						case 's':  case 't':  case 'u':  case 'v':
+-						case 'w':  case 'x':  case 'y':  case 'z':
+-						{
+-							break;
+-						}
+-						default:
+-						{
+-							throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-						}
+-						}
+-						}
+-						mARG(false);
+-					}
+-					else {
+-						break _loop426;
+-					}
+-					
+-				} while (true);
+-				}
+-				break;
+-			}
+-			case '\t':  case '\n':  case '\r':  case ' ':
+-			case ')':
+-			{
+-				break;
+-			}
+-			default:
+-			{
+-				throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-			}
+-			}
+-			}
+-			{
+-			switch ( LA(1)) {
+-			case '\t':  case '\n':  case '\r':  case ' ':
+-			{
+-				_saveIndex=text.length();
+-				mWS(false);
+-				text.setLength(_saveIndex);
+-				break;
+-			}
+-			case ')':
+-			{
+-				break;
+-			}
+-			default:
+-			{
+-				throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-			}
+-			}
+-			}
+-			match(')');
+-			break;
+-		}
+-		case '[':
+-		{
+-			{
+-			int _cnt431=0;
+-			_loop431:
+-			do {
+-				if ((LA(1)=='[')) {
+-					match('[');
+-					{
+-					switch ( LA(1)) {
+-					case '\t':  case '\n':  case '\r':  case ' ':
+-					{
+-						_saveIndex=text.length();
+-						mWS(false);
+-						text.setLength(_saveIndex);
+-						break;
+-					}
+-					case '"':  case '#':  case '\'':  case '(':
+-					case '0':  case '1':  case '2':  case '3':
+-					case '4':  case '5':  case '6':  case '7':
+-					case '8':  case '9':  case 'A':  case 'B':
+-					case 'C':  case 'D':  case 'E':  case 'F':
+-					case 'G':  case 'H':  case 'I':  case 'J':
+-					case 'K':  case 'L':  case 'M':  case 'N':
+-					case 'O':  case 'P':  case 'Q':  case 'R':
+-					case 'S':  case 'T':  case 'U':  case 'V':
+-					case 'W':  case 'X':  case 'Y':  case 'Z':
+-					case '[':  case '_':  case 'a':  case 'b':
+-					case 'c':  case 'd':  case 'e':  case 'f':
+-					case 'g':  case 'h':  case 'i':  case 'j':
+-					case 'k':  case 'l':  case 'm':  case 'n':
+-					case 'o':  case 'p':  case 'q':  case 'r':
+-					case 's':  case 't':  case 'u':  case 'v':
+-					case 'w':  case 'x':  case 'y':  case 'z':
+-					{
+-						break;
+-					}
+-					default:
+-					{
+-						throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-					}
+-					}
+-					}
+-					mARG(false);
+-					{
+-					switch ( LA(1)) {
+-					case '\t':  case '\n':  case '\r':  case ' ':
+-					{
+-						_saveIndex=text.length();
+-						mWS(false);
+-						text.setLength(_saveIndex);
+-						break;
+-					}
+-					case ']':
+-					{
+-						break;
+-					}
+-					default:
+-					{
+-						throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-					}
+-					}
+-					}
+-					match(']');
+-				}
+-				else {
+-					if ( _cnt431>=1 ) { break _loop431; } else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());}
+-				}
+-				
+-				_cnt431++;
+-			} while (true);
+-			}
+-			break;
+-		}
+-		case '.':
+-		{
+-			match('.');
+-			mID_ELEMENT(false);
+-			break;
+-		}
+-		case '\t':  case '\n':  case '\r':  case ' ':
+-		case ')':  case '*':  case '+':  case ',':
+-		case '-':  case '/':  case '=':  case ']':
+-		{
+-			
+-							mapped = true;
+-							String t = generator.mapTreeId(id.getText(), transInfo);
+-							text.setLength(_begin); text.append(t);
+-						
+-			{
+-			if (((_tokenSet_15.member(LA(1))) && (_tokenSet_16.member(LA(2))) && (true))&&(transInfo!=null && transInfo.refRuleRoot!=null)) {
+-				{
+-				switch ( LA(1)) {
+-				case '\t':  case '\n':  case '\r':  case ' ':
+-				{
+-					mWS(false);
+-					break;
+-				}
+-				case '=':
+-				{
+-					break;
+-				}
+-				default:
+-				{
+-					throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-				}
+-				}
+-				}
+-				mVAR_ASSIGN(false);
+-			}
+-			else if ((_tokenSet_17.member(LA(1))) && (true) && (true)) {
+-			}
+-			else {
+-				throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-			}
+-			
+-			}
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		}
+-		}
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-		return mapped;
+-	}
+-	
+-/** The arguments of a #[...] constructor are text, token type,
+- *  or a tree.
+- */
+-	protected final void mAST_CTOR_ELEMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = AST_CTOR_ELEMENT;
+-		int _saveIndex;
+-		
+-		if ((LA(1)=='"') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) {
+-			mSTRING(false);
+-		}
+-		else if ((_tokenSet_18.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && (true)) {
+-			mTREE_ELEMENT(false);
+-		}
+-		else if (((LA(1) >= '0' && LA(1) <= '9'))) {
+-			mINT(false);
+-		}
+-		else {
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mINT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = INT;
+-		int _saveIndex;
+-		
+-		{
+-		int _cnt489=0;
+-		_loop489:
+-		do {
+-			if (((LA(1) >= '0' && LA(1) <= '9'))) {
+-				mDIGIT(false);
+-			}
+-			else {
+-				if ( _cnt489>=1 ) { break _loop489; } else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());}
+-			}
+-			
+-			_cnt489++;
+-		} while (true);
+-		}
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mARG(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = ARG;
+-		int _saveIndex;
+-		
+-		{
+-		switch ( LA(1)) {
+-		case '\'':
+-		{
+-			mCHAR(false);
+-			break;
+-		}
+-		case '0':  case '1':  case '2':  case '3':
+-		case '4':  case '5':  case '6':  case '7':
+-		case '8':  case '9':
+-		{
+-			mINT_OR_FLOAT(false);
+-			break;
+-		}
+-		default:
+-			if ((_tokenSet_18.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) {
+-				mTREE_ELEMENT(false);
+-			}
+-			else if ((LA(1)=='"') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) {
+-				mSTRING(false);
+-			}
+-		else {
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		}
+-		}
+-		{
+-		_loop459:
+-		do {
+-			if ((_tokenSet_19.member(LA(1))) && (_tokenSet_20.member(LA(2))) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) {
+-				{
+-				switch ( LA(1)) {
+-				case '\t':  case '\n':  case '\r':  case ' ':
+-				{
+-					mWS(false);
+-					break;
+-				}
+-				case '*':  case '+':  case '-':  case '/':
+-				{
+-					break;
+-				}
+-				default:
+-				{
+-					throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-				}
+-				}
+-				}
+-				{
+-				switch ( LA(1)) {
+-				case '+':
+-				{
+-					match('+');
+-					break;
+-				}
+-				case '-':
+-				{
+-					match('-');
+-					break;
+-				}
+-				case '*':
+-				{
+-					match('*');
+-					break;
+-				}
+-				case '/':
+-				{
+-					match('/');
+-					break;
+-				}
+-				default:
+-				{
+-					throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-				}
+-				}
+-				}
+-				{
+-				switch ( LA(1)) {
+-				case '\t':  case '\n':  case '\r':  case ' ':
+-				{
+-					mWS(false);
+-					break;
+-				}
+-				case '"':  case '#':  case '\'':  case '(':
+-				case '0':  case '1':  case '2':  case '3':
+-				case '4':  case '5':  case '6':  case '7':
+-				case '8':  case '9':  case 'A':  case 'B':
+-				case 'C':  case 'D':  case 'E':  case 'F':
+-				case 'G':  case 'H':  case 'I':  case 'J':
+-				case 'K':  case 'L':  case 'M':  case 'N':
+-				case 'O':  case 'P':  case 'Q':  case 'R':
+-				case 'S':  case 'T':  case 'U':  case 'V':
+-				case 'W':  case 'X':  case 'Y':  case 'Z':
+-				case '[':  case '_':  case 'a':  case 'b':
+-				case 'c':  case 'd':  case 'e':  case 'f':
+-				case 'g':  case 'h':  case 'i':  case 'j':
+-				case 'k':  case 'l':  case 'm':  case 'n':
+-				case 'o':  case 'p':  case 'q':  case 'r':
+-				case 's':  case 't':  case 'u':  case 'v':
+-				case 'w':  case 'x':  case 'y':  case 'z':
+-				{
+-					break;
+-				}
+-				default:
+-				{
+-					throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-				}
+-				}
+-				}
+-				mARG(false);
+-			}
+-			else {
+-				break _loop459;
+-			}
+-			
+-		} while (true);
+-		}
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mTEXT_ARG_ELEMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = TEXT_ARG_ELEMENT;
+-		int _saveIndex;
+-		
+-		switch ( LA(1)) {
+-		case 'A':  case 'B':  case 'C':  case 'D':
+-		case 'E':  case 'F':  case 'G':  case 'H':
+-		case 'I':  case 'J':  case 'K':  case 'L':
+-		case 'M':  case 'N':  case 'O':  case 'P':
+-		case 'Q':  case 'R':  case 'S':  case 'T':
+-		case 'U':  case 'V':  case 'W':  case 'X':
+-		case 'Y':  case 'Z':  case '_':  case 'a':
+-		case 'b':  case 'c':  case 'd':  case 'e':
+-		case 'f':  case 'g':  case 'h':  case 'i':
+-		case 'j':  case 'k':  case 'l':  case 'm':
+-		case 'n':  case 'o':  case 'p':  case 'q':
+-		case 'r':  case 's':  case 't':  case 'u':
+-		case 'v':  case 'w':  case 'x':  case 'y':
+-		case 'z':
+-		{
+-			mTEXT_ARG_ID_ELEMENT(false);
+-			break;
+-		}
+-		case '"':
+-		{
+-			mSTRING(false);
+-			break;
+-		}
+-		case '\'':
+-		{
+-			mCHAR(false);
+-			break;
+-		}
+-		case '0':  case '1':  case '2':  case '3':
+-		case '4':  case '5':  case '6':  case '7':
+-		case '8':  case '9':
+-		{
+-			mINT_OR_FLOAT(false);
+-			break;
+-		}
+-		case '$':
+-		{
+-			mTEXT_ITEM(false);
+-			break;
+-		}
+-		case '+':
+-		{
+-			match('+');
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		}
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mTEXT_ARG_ID_ELEMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = TEXT_ARG_ID_ELEMENT;
+-		int _saveIndex;
+-		Token id=null;
+-		
+-		mID(true);
+-		id=_returnToken;
+-		{
+-		if ((_tokenSet_4.member(LA(1))) && (_tokenSet_21.member(LA(2))) && (true)) {
+-			_saveIndex=text.length();
+-			mWS(false);
+-			text.setLength(_saveIndex);
+-		}
+-		else if ((_tokenSet_21.member(LA(1))) && (true) && (true)) {
+-		}
+-		else {
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		
+-		}
+-		{
+-		switch ( LA(1)) {
+-		case '(':
+-		{
+-			match('(');
+-			{
+-			if ((_tokenSet_4.member(LA(1))) && (_tokenSet_22.member(LA(2))) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) {
+-				_saveIndex=text.length();
+-				mWS(false);
+-				text.setLength(_saveIndex);
+-			}
+-			else if ((_tokenSet_22.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && (true)) {
+-			}
+-			else {
+-				throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-			}
+-			
+-			}
+-			{
+-			_loop447:
+-			do {
+-				if ((_tokenSet_23.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) {
+-					mTEXT_ARG(false);
+-					{
+-					_loop446:
+-					do {
+-						if ((LA(1)==',')) {
+-							match(',');
+-							mTEXT_ARG(false);
+-						}
+-						else {
+-							break _loop446;
+-						}
+-						
+-					} while (true);
+-					}
+-				}
+-				else {
+-					break _loop447;
+-				}
+-				
+-			} while (true);
+-			}
+-			{
+-			switch ( LA(1)) {
+-			case '\t':  case '\n':  case '\r':  case ' ':
+-			{
+-				_saveIndex=text.length();
+-				mWS(false);
+-				text.setLength(_saveIndex);
+-				break;
+-			}
+-			case ')':
+-			{
+-				break;
+-			}
+-			default:
+-			{
+-				throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-			}
+-			}
+-			}
+-			match(')');
+-			break;
+-		}
+-		case '[':
+-		{
+-			{
+-			int _cnt452=0;
+-			_loop452:
+-			do {
+-				if ((LA(1)=='[')) {
+-					match('[');
+-					{
+-					if ((_tokenSet_4.member(LA(1))) && (_tokenSet_23.member(LA(2))) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) {
+-						_saveIndex=text.length();
+-						mWS(false);
+-						text.setLength(_saveIndex);
+-					}
+-					else if ((_tokenSet_23.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) {
+-					}
+-					else {
+-						throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-					}
+-					
+-					}
+-					mTEXT_ARG(false);
+-					{
+-					switch ( LA(1)) {
+-					case '\t':  case '\n':  case '\r':  case ' ':
+-					{
+-						_saveIndex=text.length();
+-						mWS(false);
+-						text.setLength(_saveIndex);
+-						break;
+-					}
+-					case ']':
+-					{
+-						break;
+-					}
+-					default:
+-					{
+-						throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-					}
+-					}
+-					}
+-					match(']');
+-				}
+-				else {
+-					if ( _cnt452>=1 ) { break _loop452; } else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());}
+-				}
+-				
+-				_cnt452++;
+-			} while (true);
+-			}
+-			break;
+-		}
+-		case '.':
+-		{
+-			match('.');
+-			mTEXT_ARG_ID_ELEMENT(false);
+-			break;
+-		}
+-		case '\t':  case '\n':  case '\r':  case ' ':
+-		case '"':  case '$':  case '\'':  case ')':
+-		case '+':  case ',':  case '0':  case '1':
+-		case '2':  case '3':  case '4':  case '5':
+-		case '6':  case '7':  case '8':  case '9':
+-		case 'A':  case 'B':  case 'C':  case 'D':
+-		case 'E':  case 'F':  case 'G':  case 'H':
+-		case 'I':  case 'J':  case 'K':  case 'L':
+-		case 'M':  case 'N':  case 'O':  case 'P':
+-		case 'Q':  case 'R':  case 'S':  case 'T':
+-		case 'U':  case 'V':  case 'W':  case 'X':
+-		case 'Y':  case 'Z':  case ']':  case '_':
+-		case 'a':  case 'b':  case 'c':  case 'd':
+-		case 'e':  case 'f':  case 'g':  case 'h':
+-		case 'i':  case 'j':  case 'k':  case 'l':
+-		case 'm':  case 'n':  case 'o':  case 'p':
+-		case 'q':  case 'r':  case 's':  case 't':
+-		case 'u':  case 'v':  case 'w':  case 'x':
+-		case 'y':  case 'z':
+-		{
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		}
+-		}
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mINT_OR_FLOAT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = INT_OR_FLOAT;
+-		int _saveIndex;
+-		
+-		{
+-		int _cnt492=0;
+-		_loop492:
+-		do {
+-			if (((LA(1) >= '0' && LA(1) <= '9')) && (_tokenSet_24.member(LA(2))) && (true)) {
+-				mDIGIT(false);
+-			}
+-			else {
+-				if ( _cnt492>=1 ) { break _loop492; } else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());}
+-			}
+-			
+-			_cnt492++;
+-		} while (true);
+-		}
+-		{
+-		if ((LA(1)=='L') && (_tokenSet_25.member(LA(2))) && (true)) {
+-			match('L');
+-		}
+-		else if ((LA(1)=='l') && (_tokenSet_25.member(LA(2))) && (true)) {
+-			match('l');
+-		}
+-		else if ((LA(1)=='.')) {
+-			match('.');
+-			{
+-			_loop495:
+-			do {
+-				if (((LA(1) >= '0' && LA(1) <= '9')) && (_tokenSet_25.member(LA(2))) && (true)) {
+-					mDIGIT(false);
+-				}
+-				else {
+-					break _loop495;
+-				}
+-				
+-			} while (true);
+-			}
+-		}
+-		else if ((_tokenSet_25.member(LA(1))) && (true) && (true)) {
+-		}
+-		else {
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		
+-		}
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mSL_COMMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = SL_COMMENT;
+-		int _saveIndex;
+-		
+-		match("//");
+-		{
+-		_loop469:
+-		do {
+-			// nongreedy exit test
+-			if ((LA(1)=='\n'||LA(1)=='\r') && (true) && (true)) break _loop469;
+-			if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && (true)) {
+-				matchNot(EOF_CHAR);
+-			}
+-			else {
+-				break _loop469;
+-			}
+-			
+-		} while (true);
+-		}
+-		{
+-		if ((LA(1)=='\r') && (LA(2)=='\n') && (true)) {
+-			match("\r\n");
+-		}
+-		else if ((LA(1)=='\n')) {
+-			match('\n');
+-		}
+-		else if ((LA(1)=='\r') && (true) && (true)) {
+-			match('\r');
+-		}
+-		else {
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		
+-		}
+-		newline();
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mML_COMMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = ML_COMMENT;
+-		int _saveIndex;
+-		
+-		match("/*");
+-		{
+-		_loop473:
+-		do {
+-			// nongreedy exit test
+-			if ((LA(1)=='*') && (LA(2)=='/') && (true)) break _loop473;
+-			if ((LA(1)=='\r') && (LA(2)=='\n') && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) {
+-				match('\r');
+-				match('\n');
+-				newline();
+-			}
+-			else if ((LA(1)=='\r') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) {
+-				match('\r');
+-				newline();
+-			}
+-			else if ((LA(1)=='\n') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) {
+-				match('\n');
+-				newline();
+-			}
+-			else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && ((LA(3) >= '\u0003' && LA(3) <= '\u00ff'))) {
+-				matchNot(EOF_CHAR);
+-			}
+-			else {
+-				break _loop473;
+-			}
+-			
+-		} while (true);
+-		}
+-		match("*/");
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mESC(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = ESC;
+-		int _saveIndex;
+-		
+-		match('\\');
+-		{
+-		switch ( LA(1)) {
+-		case 'n':
+-		{
+-			match('n');
+-			break;
+-		}
+-		case 'r':
+-		{
+-			match('r');
+-			break;
+-		}
+-		case 't':
+-		{
+-			match('t');
+-			break;
+-		}
+-		case 'b':
+-		{
+-			match('b');
+-			break;
+-		}
+-		case 'f':
+-		{
+-			match('f');
+-			break;
+-		}
+-		case '"':
+-		{
+-			match('"');
+-			break;
+-		}
+-		case '\'':
+-		{
+-			match('\'');
+-			break;
+-		}
+-		case '\\':
+-		{
+-			match('\\');
+-			break;
+-		}
+-		case '0':  case '1':  case '2':  case '3':
+-		{
+-			{
+-			matchRange('0','3');
+-			}
+-			{
+-			if (((LA(1) >= '0' && LA(1) <= '9')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && (true)) {
+-				mDIGIT(false);
+-				{
+-				if (((LA(1) >= '0' && LA(1) <= '9')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && (true)) {
+-					mDIGIT(false);
+-				}
+-				else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && (true) && (true)) {
+-				}
+-				else {
+-					throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-				}
+-				
+-				}
+-			}
+-			else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && (true) && (true)) {
+-			}
+-			else {
+-				throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-			}
+-			
+-			}
+-			break;
+-		}
+-		case '4':  case '5':  case '6':  case '7':
+-		{
+-			{
+-			matchRange('4','7');
+-			}
+-			{
+-			if (((LA(1) >= '0' && LA(1) <= '9')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')) && (true)) {
+-				mDIGIT(false);
+-			}
+-			else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && (true) && (true)) {
+-			}
+-			else {
+-				throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-			}
+-			
+-			}
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		}
+-		}
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mDIGIT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = DIGIT;
+-		int _saveIndex;
+-		
+-		matchRange('0','9');
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	
+-	private static final long[] mk_tokenSet_0() {
+-		long[] data = new long[8];
+-		data[0]=-103079215112L;
+-		for (int i = 1; i<=3; i++) { data[i]=-1L; }
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_0 = new BitSet(mk_tokenSet_0());
+-	private static final long[] mk_tokenSet_1() {
+-		long[] data = new long[8];
+-		data[0]=-145135534866440L;
+-		for (int i = 1; i<=3; i++) { data[i]=-1L; }
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_1 = new BitSet(mk_tokenSet_1());
+-	private static final long[] mk_tokenSet_2() {
+-		long[] data = new long[8];
+-		data[0]=-141407503262728L;
+-		for (int i = 1; i<=3; i++) { data[i]=-1L; }
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_2 = new BitSet(mk_tokenSet_2());
+-	private static final long[] mk_tokenSet_3() {
+-		long[] data = { 0L, 576460745995190270L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_3 = new BitSet(mk_tokenSet_3());
+-	private static final long[] mk_tokenSet_4() {
+-		long[] data = { 4294977024L, 0L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_4 = new BitSet(mk_tokenSet_4());
+-	private static final long[] mk_tokenSet_5() {
+-		long[] data = { 1103806604800L, 0L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_5 = new BitSet(mk_tokenSet_5());
+-	private static final long[] mk_tokenSet_6() {
+-		long[] data = { 287959436729787904L, 576460745995190270L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_6 = new BitSet(mk_tokenSet_6());
+-	private static final long[] mk_tokenSet_7() {
+-		long[] data = new long[8];
+-		data[0]=-17179869192L;
+-		data[1]=-268435457L;
+-		for (int i = 2; i<=3; i++) { data[i]=-1L; }
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_7 = new BitSet(mk_tokenSet_7());
+-	private static final long[] mk_tokenSet_8() {
+-		long[] data = new long[8];
+-		data[0]=-549755813896L;
+-		data[1]=-268435457L;
+-		for (int i = 2; i<=3; i++) { data[i]=-1L; }
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_8 = new BitSet(mk_tokenSet_8());
+-	private static final long[] mk_tokenSet_9() {
+-		long[] data = { 287948901175001088L, 576460745995190270L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_9 = new BitSet(mk_tokenSet_9());
+-	private static final long[] mk_tokenSet_10() {
+-		long[] data = { 287950056521213440L, 576460746129407998L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_10 = new BitSet(mk_tokenSet_10());
+-	private static final long[] mk_tokenSet_11() {
+-		long[] data = { 287958332923183104L, 576460745995190270L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_11 = new BitSet(mk_tokenSet_11());
+-	private static final long[] mk_tokenSet_12() {
+-		long[] data = { 287978128427460096L, 576460746532061182L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_12 = new BitSet(mk_tokenSet_12());
+-	private static final long[] mk_tokenSet_13() {
+-		long[] data = { 2306123388973753856L, 671088640L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_13 = new BitSet(mk_tokenSet_13());
+-	private static final long[] mk_tokenSet_14() {
+-		long[] data = { 287952805300282880L, 576460746129407998L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_14 = new BitSet(mk_tokenSet_14());
+-	private static final long[] mk_tokenSet_15() {
+-		long[] data = { 2305843013508670976L, 0L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_15 = new BitSet(mk_tokenSet_15());
+-	private static final long[] mk_tokenSet_16() {
+-		long[] data = { 2306051920717948416L, 536870912L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_16 = new BitSet(mk_tokenSet_16());
+-	private static final long[] mk_tokenSet_17() {
+-		long[] data = { 208911504254464L, 536870912L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_17 = new BitSet(mk_tokenSet_17());
+-	private static final long[] mk_tokenSet_18() {
+-		long[] data = { 1151051235328L, 576460746129407998L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_18 = new BitSet(mk_tokenSet_18());
+-	private static final long[] mk_tokenSet_19() {
+-		long[] data = { 189120294954496L, 0L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_19 = new BitSet(mk_tokenSet_19());
+-	private static final long[] mk_tokenSet_20() {
+-		long[] data = { 288139722277004800L, 576460746129407998L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_20 = new BitSet(mk_tokenSet_20());
+-	private static final long[] mk_tokenSet_21() {
+-		long[] data = { 288049596683265536L, 576460746666278910L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_21 = new BitSet(mk_tokenSet_21());
+-	private static final long[] mk_tokenSet_22() {
+-		long[] data = { 287960536241415680L, 576460745995190270L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_22 = new BitSet(mk_tokenSet_22());
+-	private static final long[] mk_tokenSet_23() {
+-		long[] data = { 287958337218160128L, 576460745995190270L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_23 = new BitSet(mk_tokenSet_23());
+-	private static final long[] mk_tokenSet_24() {
+-		long[] data = { 288228817078593024L, 576460746532061182L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_24 = new BitSet(mk_tokenSet_24());
+-	private static final long[] mk_tokenSet_25() {
+-		long[] data = { 288158448334415360L, 576460746532061182L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_25 = new BitSet(mk_tokenSet_25());
+-	
+-	}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/actions/java/ActionLexerTokenTypes.java glassfish-gil/entity-persistence/src/java/persistence/antlr/actions/java/ActionLexerTokenTypes.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/actions/java/ActionLexerTokenTypes.java	2006-02-08 22:31:10.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/actions/java/ActionLexerTokenTypes.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,33 +0,0 @@
+-// $ANTLR : "action.g" -> "ActionLexer.java"$
+-
+-package persistence.antlr.actions.java;
+-
+-public interface ActionLexerTokenTypes {
+-	int EOF = 1;
+-	int NULL_TREE_LOOKAHEAD = 3;
+-	int ACTION = 4;
+-	int STUFF = 5;
+-	int AST_ITEM = 6;
+-	int TEXT_ITEM = 7;
+-	int TREE = 8;
+-	int TREE_ELEMENT = 9;
+-	int AST_CONSTRUCTOR = 10;
+-	int AST_CTOR_ELEMENT = 11;
+-	int ID_ELEMENT = 12;
+-	int TEXT_ARG = 13;
+-	int TEXT_ARG_ELEMENT = 14;
+-	int TEXT_ARG_ID_ELEMENT = 15;
+-	int ARG = 16;
+-	int ID = 17;
+-	int VAR_ASSIGN = 18;
+-	int COMMENT = 19;
+-	int SL_COMMENT = 20;
+-	int ML_COMMENT = 21;
+-	int CHAR = 22;
+-	int STRING = 23;
+-	int ESC = 24;
+-	int DIGIT = 25;
+-	int INT = 26;
+-	int INT_OR_FLOAT = 27;
+-	int WS = 28;
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/ActionTransInfo.java glassfish-gil/entity-persistence/src/java/persistence/antlr/ActionTransInfo.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/ActionTransInfo.java	2006-08-31 00:34:03.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/ActionTransInfo.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,22 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-/**
+- * This class contains information about how an action
+- * was translated (using the AST conversion rules).
+- */
+-public class ActionTransInfo {
+-    public boolean assignToRoot = false;	// somebody did a "#rule = "
+-    public String refRuleRoot = null;		// somebody referenced #rule; string is translated var
+-    public String followSetName = null;		// somebody referenced $FOLLOW; string is the name of the lookahead set
+-
+-    public String toString() {
+-        return "assignToRoot:" + assignToRoot + ", refRuleRoot:"
+-				+ refRuleRoot + ", FOLLOW Set:" + followSetName;
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/AlternativeBlock.java glassfish-gil/entity-persistence/src/java/persistence/antlr/AlternativeBlock.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/AlternativeBlock.java	2006-08-31 00:34:03.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/AlternativeBlock.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,225 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import persistence.antlr.collections.impl.Vector;
+-
+-/**A list of alternatives */
+-class AlternativeBlock extends AlternativeElement {
+-    protected String initAction = null;	// string for init action {...}
+-    protected Vector alternatives;	// Contains Alternatives
+-
+-    protected String label;			// can label a looping block to break out of it.
+-
+-    protected int alti, altj;		// which alts are being compared at the moment with
+-    // deterministic()?
+-    protected int analysisAlt;		// which alt are we computing look on?  Must be alti or altj
+-
+-    protected boolean hasAnAction = false;	// does any alt have an action?
+-    protected boolean hasASynPred = false;	// does any alt have a syntactic predicate?
+-
+-    protected int ID = 0;				// used to generate unique variables
+-    protected static int nblks;	// how many blocks have we allocated?
+-    boolean not = false;				// true if block is inverted.
+-
+-    boolean greedy = true;			// Blocks are greedy by default
+-    boolean greedySet = false;		// but, if not explicitly greedy, warning might be generated
+-
+-    protected boolean doAutoGen = true;	// false if no AST (or text) to be generated for block
+-
+-    protected boolean warnWhenFollowAmbig = true; // warn when an empty path or exit path
+-
+-    protected boolean generateAmbigWarnings = true;  // the general warning "shut-up" mechanism
+-    // conflicts with alt of subrule.
+-    // Turning this off will suppress stuff
+-    // like the if-then-else ambig.
+-
+-    public AlternativeBlock(Grammar g) {
+-        super(g);
+-        alternatives = new Vector(5);
+-        this.not = false;
+-        nblks++;
+-        ID = nblks;
+-    }
+-
+-    public AlternativeBlock(Grammar g, Token start, boolean not) {
+-        super(g, start);
+-        alternatives = new Vector(5);
+-//		this.line = start.getLine();
+-//		this.column = start.getColumn();
+-        this.not = not;
+-        nblks++;
+-        ID = nblks;
+-    }
+-
+-    public void addAlternative(Alternative alt) {
+-        alternatives.appendElement(alt);
+-    }
+-
+-    public void generate() {
+-        grammar.generator.gen(this);
+-    }
+-
+-    public Alternative getAlternativeAt(int i) {
+-        return (Alternative)alternatives.elementAt(i);
+-    }
+-
+-    public Vector getAlternatives() {
+-        return alternatives;
+-    }
+-
+-    public boolean getAutoGen() {
+-        return doAutoGen;
+-    }
+-
+-    public String getInitAction() {
+-        return initAction;
+-    }
+-
+-    public String getLabel() {
+-        return label;
+-    }
+-
+-    public Lookahead look(int k) {
+-        return grammar.theLLkAnalyzer.look(k, this);
+-    }
+-
+-    public void prepareForAnalysis() {
+-        for (int i = 0; i < alternatives.size(); i++) {
+-            // deterministic() uses an alternative cache and sets lookahead depth
+-            Alternative a = (Alternative)alternatives.elementAt(i);
+-            a.cache = new Lookahead[grammar.maxk + 1];
+-            a.lookaheadDepth = GrammarAnalyzer.LOOKAHEAD_DEPTH_INIT;
+-        }
+-    }
+-
+-    /**Walk the syntactic predicate and, for a rule ref R, remove
+-     * the ref from the list of FOLLOW references for R (stored
+-     * in the symbol table.
+-     */
+-    public void removeTrackingOfRuleRefs(Grammar g) {
+-        for (int i = 0; i < alternatives.size(); i++) {
+-            Alternative alt = getAlternativeAt(i);
+-            AlternativeElement elem = alt.head;
+-            while (elem != null) {
+-                if (elem instanceof RuleRefElement) {
+-                    RuleRefElement rr = (RuleRefElement)elem;
+-                    RuleSymbol rs = (RuleSymbol)g.getSymbol(rr.targetRule);
+-                    if (rs == null) {
+-                        grammar.antlrTool.error("rule " + rr.targetRule + " referenced in (...)=>, but not defined");
+-                    }
+-                    else {
+-                        rs.references.removeElement(rr);
+-                    }
+-                }
+-                else if (elem instanceof AlternativeBlock) {// recurse into subrules
+-                    ((AlternativeBlock)elem).removeTrackingOfRuleRefs(g);
+-                }
+-                elem = elem.next;
+-            }
+-        }
+-    }
+-
+-    public void setAlternatives(Vector v) {
+-        alternatives = v;
+-    }
+-
+-    public void setAutoGen(boolean doAutoGen_) {
+-        doAutoGen = doAutoGen_;
+-    }
+-
+-    public void setInitAction(String initAction_) {
+-        initAction = initAction_;
+-    }
+-
+-    public void setLabel(String label_) {
+-        label = label_;
+-    }
+-
+-    public void setOption(Token key, Token value) {
+-        if (key.getText().equals("warnWhenFollowAmbig")) {
+-            if (value.getText().equals("true")) {
+-                warnWhenFollowAmbig = true;
+-            }
+-            else if (value.getText().equals("false")) {
+-                warnWhenFollowAmbig = false;
+-            }
+-            else {
+-                grammar.antlrTool.error("Value for warnWhenFollowAmbig must be true or false", grammar.getFilename(), key.getLine(), key.getColumn());
+-            }
+-        }
+-        else if (key.getText().equals("generateAmbigWarnings")) {
+-            if (value.getText().equals("true")) {
+-                generateAmbigWarnings = true;
+-            }
+-            else if (value.getText().equals("false")) {
+-                generateAmbigWarnings = false;
+-            }
+-            else {
+-                grammar.antlrTool.error("Value for generateAmbigWarnings must be true or false", grammar.getFilename(), key.getLine(), key.getColumn());
+-            }
+-        }
+-        else if (key.getText().equals("greedy")) {
+-            if (value.getText().equals("true")) {
+-                greedy = true;
+-                greedySet = true;
+-            }
+-            else if (value.getText().equals("false")) {
+-                greedy = false;
+-                greedySet = true;
+-            }
+-            else {
+-                grammar.antlrTool.error("Value for greedy must be true or false", grammar.getFilename(), key.getLine(), key.getColumn());
+-            }
+-        }
+-        else {
+-            grammar.antlrTool.error("Invalid subrule option: " + key.getText(), grammar.getFilename(), key.getLine(), key.getColumn());
+-        }
+-    }
+-
+-    public String toString() {
+-        String s = " (";
+-        if (initAction != null) {
+-            s += initAction;
+-        }
+-        for (int i = 0; i < alternatives.size(); i++) {
+-            Alternative alt = getAlternativeAt(i);
+-            Lookahead cache[] = alt.cache;
+-            int k = alt.lookaheadDepth;
+-            // dump lookahead set
+-            if (k == GrammarAnalyzer.LOOKAHEAD_DEPTH_INIT) {
+-            }
+-            else if (k == GrammarAnalyzer.NONDETERMINISTIC) {
+-                s += "{?}:";
+-            }
+-            else {
+-                s += " {";
+-                for (int j = 1; j <= k; j++) {
+-                    s += cache[j].toString(",", grammar.tokenManager.getVocabulary());
+-                    if (j < k && cache[j + 1] != null) s += ";";
+-                }
+-                s += "}:";
+-            }
+-            // dump alternative including pred (if any)
+-            AlternativeElement p = alt.head;
+-            String pred = alt.semPred;
+-            if (pred != null) {
+-                s += pred;
+-            }
+-            while (p != null) {
+-                s += p;
+-                p = p.next;
+-            }
+-            if (i < (alternatives.size() - 1)) {
+-                s += " |";
+-            }
+-        }
+-        s += " )";
+-        return s;
+-    }
+-
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/AlternativeElement.java glassfish-gil/entity-persistence/src/java/persistence/antlr/AlternativeElement.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/AlternativeElement.java	2006-08-31 00:34:03.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/AlternativeElement.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,42 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-abstract class AlternativeElement extends GrammarElement {
+-    AlternativeElement next;
+-    protected int autoGenType = AUTO_GEN_NONE;
+-
+-    protected String enclosingRuleName;
+-
+-    public AlternativeElement(Grammar g) {
+-        super(g);
+-    }
+-
+-    public AlternativeElement(Grammar g, Token start) {
+-        super(g, start);
+-    }
+-
+-    public AlternativeElement(Grammar g, Token start, int autoGenType_) {
+-        super(g, start);
+-        autoGenType = autoGenType_;
+-    }
+-
+-    public int getAutoGenType() {
+-        return autoGenType;
+-    }
+-
+-    public void setAutoGenType(int a) {
+-        autoGenType = a;
+-    }
+-
+-    public String getLabel() {
+-        return null;
+-    }
+-
+-    public void setLabel(String label) {
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/Alternative.java glassfish-gil/entity-persistence/src/java/persistence/antlr/Alternative.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/Alternative.java	2006-08-31 00:34:03.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/Alternative.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,72 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import java.util.Hashtable;
+-
+-/** Intermediate data class holds information about an alternative */
+-class Alternative {
+-    // Tracking alternative linked list
+-    AlternativeElement head;   // head of alt element list
+-    AlternativeElement tail;  // last element added
+-
+-    // Syntactic predicate block if non-null
+-    protected SynPredBlock synPred;
+-    // Semantic predicate action if non-null
+-    protected String semPred;
+-    // Exception specification if non-null
+-    protected ExceptionSpec exceptionSpec;
+-    // Init action if non-null;
+-    protected Lookahead[] cache;	// lookahead for alt.  Filled in by
+-    // deterministic() only!!!!!!!  Used for
+-    // code gen after calls to deterministic()
+-    // and used by deterministic for (...)*, (..)+,
+-    // and (..)? blocks.  1..k
+-    protected int lookaheadDepth;	// each alt has different look depth possibly.
+-    // depth can be NONDETERMINISTIC too.
+-    // 0..n-1
+-// If non-null, Tree specification ala -> A B C (not implemented)
+-    protected Token treeSpecifier = null;
+-    // True of AST generation is on for this alt
+-    private boolean doAutoGen;
+-
+-
+-    public Alternative() {
+-    }
+-
+-    public Alternative(AlternativeElement firstElement) {
+-        addElement(firstElement);
+-    }
+-
+-    public void addElement(AlternativeElement e) {
+-        // Link the element into the list
+-        if (head == null) {
+-            head = tail = e;
+-        }
+-        else {
+-            tail.next = e;
+-            tail = e;
+-        }
+-    }
+-
+-    public boolean atStart() {
+-        return head == null;
+-    }
+-
+-    public boolean getAutoGen() {
+-        // Don't build an AST if there is a tree-rewrite-specifier
+-        return doAutoGen && treeSpecifier == null;
+-    }
+-
+-    public Token getTreeSpecifier() {
+-        return treeSpecifier;
+-    }
+-
+-    public void setAutoGen(boolean doAutoGen_) {
+-        doAutoGen = doAutoGen_;
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/ANTLRError.java glassfish-gil/entity-persistence/src/java/persistence/antlr/ANTLRError.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/ANTLRError.java	2006-08-31 00:34:02.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/ANTLRError.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,25 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-public class ANTLRError extends Error {
+-
+-    /**
+-     * ANTLRError constructor comment.
+-     */
+-    public ANTLRError() {
+-        super();
+-    }
+-
+-    /**
+-     * ANTLRError constructor comment.
+-     * @param s java.lang.String
+-     */
+-    public ANTLRError(String s) {
+-        super(s);
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/ANTLRException.java glassfish-gil/entity-persistence/src/java/persistence/antlr/ANTLRException.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/ANTLRException.java	2006-08-31 00:34:02.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/ANTLRException.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,18 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-public class ANTLRException extends Exception {
+-
+-    public ANTLRException() {
+-        super();
+-    }
+-
+-    public ANTLRException(String s) {
+-        super(s);
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/ANTLRGrammarParseBehavior.java glassfish-gil/entity-persistence/src/java/persistence/antlr/ANTLRGrammarParseBehavior.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/ANTLRGrammarParseBehavior.java	2006-08-31 00:34:02.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/ANTLRGrammarParseBehavior.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,123 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import persistence.antlr.collections.impl.BitSet;
+-
+-public interface ANTLRGrammarParseBehavior {
+-    public void abortGrammar();
+-
+-    public void beginAlt(boolean doAST_);
+-
+-    public void beginChildList();
+-
+-    // Exception handling
+-    public void beginExceptionGroup();
+-
+-    public void beginExceptionSpec(Token label);
+-
+-    public void beginSubRule(Token label, Token start, boolean not);
+-
+-    // Trees
+-    public void beginTree(Token tok) throws SemanticException;
+-
+-    public void defineRuleName(Token r, String access, boolean ruleAST, String docComment) throws SemanticException;
+-
+-    public void defineToken(Token tokname, Token tokliteral);
+-
+-    public void endAlt();
+-
+-    public void endChildList();
+-
+-    public void endExceptionGroup();
+-
+-    public void endExceptionSpec();
+-
+-    public void endGrammar();
+-
+-    public void endOptions();
+-
+-    public void endRule(String r);
+-
+-    public void endSubRule();
+-
+-    public void endTree();
+-
+-    public void hasError();
+-
+-    public void noASTSubRule();
+-
+-    public void oneOrMoreSubRule();
+-
+-    public void optionalSubRule();
+-
+-    public void refAction(Token action);
+-
+-    public void refArgAction(Token action);
+-
+-    public void setUserExceptions(String thr);
+-
+-    public void refCharLiteral(Token lit, Token label, boolean inverted, int autoGenType, boolean lastInRule);
+-
+-    public void refCharRange(Token t1, Token t2, Token label, int autoGenType, boolean lastInRule);
+-
+-    public void refElementOption(Token option, Token value);
+-
+-    public void refTokensSpecElementOption(Token tok, Token option, Token value);
+-
+-    public void refExceptionHandler(Token exTypeAndName, Token action);
+-
+-    public void refHeaderAction(Token name, Token act);
+-
+-    public void refInitAction(Token action);
+-
+-    public void refMemberAction(Token act);
+-
+-    public void refPreambleAction(Token act);
+-
+-    public void refReturnAction(Token returnAction);
+-
+-    public void refRule(Token idAssign, Token r, Token label, Token arg, int autoGenType);
+-
+-    public void refSemPred(Token pred);
+-
+-    public void refStringLiteral(Token lit, Token label, int autoGenType, boolean lastInRule);
+-
+-    public void refToken(Token assignId, Token t, Token label, Token args,
+-                         boolean inverted, int autoGenType, boolean lastInRule);
+-
+-    public void refTokenRange(Token t1, Token t2, Token label, int autoGenType, boolean lastInRule);
+-
+-    // Tree specifiers
+-    public void refTreeSpecifier(Token treeSpec);
+-
+-    public void refWildcard(Token t, Token label, int autoGenType);
+-
+-    public void setArgOfRuleRef(Token argaction);
+-
+-    public void setCharVocabulary(BitSet b);
+-
+-    // Options
+-    public void setFileOption(Token key, Token value, String filename);
+-
+-    public void setGrammarOption(Token key, Token value);
+-
+-    public void setRuleOption(Token key, Token value);
+-
+-    public void setSubruleOption(Token key, Token value);
+-
+-    public void startLexer(String file, Token name, String superClass, String doc);
+-
+-    // Flow control for grammars
+-    public void startParser(String file, Token name, String superClass, String doc);
+-
+-    public void startTreeWalker(String file, Token name, String superClass, String doc);
+-
+-    public void synPred();
+-
+-    public void zeroOrMoreSubRule();
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/ANTLRHashString.java glassfish-gil/entity-persistence/src/java/persistence/antlr/ANTLRHashString.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/ANTLRHashString.java	2006-08-31 00:34:02.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/ANTLRHashString.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,106 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-// class implements a String-like object whose sole purpose is to be
+-// entered into a lexer HashTable.  It uses a lexer object to get
+-// information about case sensitivity.
+-
+-public class ANTLRHashString {
+-    // only one of s or buf is non-null
+-    private String s;
+-    private char[] buf;
+-    private int len;
+-    private CharScanner lexer;
+-    private static final int prime = 151;
+-
+-
+-    public ANTLRHashString(char[] buf, int length, CharScanner lexer) {
+-        this.lexer = lexer;
+-        setBuffer(buf, length);
+-    }
+-
+-    // Hash strings constructed this way are unusable until setBuffer or setString are called.
+-    public ANTLRHashString(CharScanner lexer) {
+-        this.lexer = lexer;
+-    }
+-
+-    public ANTLRHashString(String s, CharScanner lexer) {
+-        this.lexer = lexer;
+-        setString(s);
+-    }
+-
+-    private final char charAt(int index) {
+-        return (s != null) ? s.charAt(index) : buf[index];
+-    }
+-
+-    // Return true if o is an ANTLRHashString equal to this.
+-    public boolean equals(Object o) {
+-        if (!(o instanceof ANTLRHashString) && !(o instanceof String)) {
+-            return false;
+-        }
+-
+-        ANTLRHashString s;
+-        if (o instanceof String) {
+-            s = new ANTLRHashString((String)o, lexer);
+-        }
+-        else {
+-            s = (ANTLRHashString)o;
+-        }
+-        int l = length();
+-        if (s.length() != l) {
+-            return false;
+-        }
+-        if (lexer.getCaseSensitiveLiterals()) {
+-            for (int i = 0; i < l; i++) {
+-                if (charAt(i) != s.charAt(i)) {
+-                    return false;
+-                }
+-            }
+-        }
+-        else {
+-            for (int i = 0; i < l; i++) {
+-                if (lexer.toLower(charAt(i)) != lexer.toLower(s.charAt(i))) {
+-                    return false;
+-                }
+-            }
+-        }
+-        return true;
+-    }
+-
+-    public int hashCode() {
+-        int hashval = 0;
+-        int l = length();
+-
+-        if (lexer.getCaseSensitiveLiterals()) {
+-            for (int i = 0; i < l; i++) {
+-                hashval = hashval * prime + charAt(i);
+-            }
+-        }
+-        else {
+-            for (int i = 0; i < l; i++) {
+-                hashval = hashval * prime + lexer.toLower(charAt(i));
+-            }
+-        }
+-        return hashval;
+-    }
+-
+-    private final int length() {
+-        return (s != null) ? s.length() : len;
+-    }
+-
+-    public void setBuffer(char[] buf, int length) {
+-        this.buf = buf;
+-        this.len = length;
+-        s = null;
+-    }
+-
+-    public void setString(String s) {
+-        this.s = s;
+-        buf = null;
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/ANTLRLexer.java glassfish-gil/entity-persistence/src/java/persistence/antlr/ANTLRLexer.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/ANTLRLexer.java	2006-02-08 22:30:32.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/ANTLRLexer.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,1468 +0,0 @@
+-// $ANTLR : "antlr.g" -> "ANTLRLexer.java"$
+-
+-package persistence.antlr;
+-
+-import java.io.InputStream;
+-import persistence.antlr.TokenStreamException;
+-import persistence.antlr.TokenStreamIOException;
+-import persistence.antlr.TokenStreamRecognitionException;
+-import persistence.antlr.CharStreamException;
+-import persistence.antlr.CharStreamIOException;
+-import persistence.antlr.ANTLRException;
+-import java.io.Reader;
+-import java.util.Hashtable;
+-import persistence.antlr.CharScanner;
+-import persistence.antlr.InputBuffer;
+-import persistence.antlr.ByteBuffer;
+-import persistence.antlr.CharBuffer;
+-import persistence.antlr.Token;
+-import persistence.antlr.CommonToken;
+-import persistence.antlr.RecognitionException;
+-import persistence.antlr.NoViableAltForCharException;
+-import persistence.antlr.MismatchedCharException;
+-import persistence.antlr.TokenStream;
+-import persistence.antlr.ANTLRHashString;
+-import persistence.antlr.LexerSharedInputState;
+-import persistence.antlr.collections.impl.BitSet;
+-import persistence.antlr.SemanticException;
+-
+-public class ANTLRLexer extends persistence.antlr.CharScanner implements ANTLRTokenTypes, TokenStream
+- {
+-
+-	/**Convert 'c' to an integer char value. */
+-	public static int escapeCharValue(String cs) {
+-		//System.out.println("escapeCharValue("+cs+")");
+-		if ( cs.charAt(1)!='\\' ) return 0;
+-		switch ( cs.charAt(2) ) {
+-		case 'b' : return '\b';
+-		case 'r' : return '\r';
+-		case 't' : return '\t';
+-		case 'n' : return '\n';
+-		case 'f' : return '\f';
+-		case '"' : return '\"';
+-		case '\'' :return '\'';
+-		case '\\' :return '\\';
+-
+-		case 'u' :
+-			// Unicode char
+-			if (cs.length() != 8) {
+-				return 0;
+-			}
+-			else {
+-				return
+-					Character.digit(cs.charAt(3), 16) * 16 * 16 * 16 +
+-					Character.digit(cs.charAt(4), 16) * 16 * 16 +
+-					Character.digit(cs.charAt(5), 16) * 16 +
+-					Character.digit(cs.charAt(6), 16);
+-			}
+-
+-		case '0' :
+-		case '1' :
+-		case '2' :
+-		case '3' :
+-			if ( cs.length()>5 && Character.isDigit(cs.charAt(4)) ) {
+-				return (cs.charAt(2)-'0')*8*8 + (cs.charAt(3)-'0')*8 + (cs.charAt(4)-'0');
+-			}
+-			if ( cs.length()>4 && Character.isDigit(cs.charAt(3)) ) {
+-				return (cs.charAt(2)-'0')*8 + (cs.charAt(3)-'0');
+-			}
+-			return cs.charAt(2)-'0';
+-
+-		case '4' :
+-		case '5' :
+-		case '6' :
+-		case '7' :
+-			if ( cs.length()>4 && Character.isDigit(cs.charAt(3)) ) {
+-				return (cs.charAt(2)-'0')*8 + (cs.charAt(3)-'0');
+-			}
+-			return cs.charAt(2)-'0';
+-
+-		default :
+-			return 0;
+-		}
+-	}
+-
+-	public static int tokenTypeForCharLiteral(String lit) {
+-		if ( lit.length()>3 ) {  // does char contain escape?
+-			return escapeCharValue(lit);
+-		}
+-		else {
+-			return lit.charAt(1);
+-		}
+-	}
+-public ANTLRLexer(InputStream in) {
+-	this(new ByteBuffer(in));
+-}
+-public ANTLRLexer(Reader in) {
+-	this(new CharBuffer(in));
+-}
+-public ANTLRLexer(InputBuffer ib) {
+-	this(new LexerSharedInputState(ib));
+-}
+-public ANTLRLexer(LexerSharedInputState state) {
+-	super(state);
+-	caseSensitiveLiterals = true;
+-	setCaseSensitive(true);
+-	literals = new Hashtable();
+-	literals.put(new ANTLRHashString("public", this), new Integer(31));
+-	literals.put(new ANTLRHashString("class", this), new Integer(10));
+-	literals.put(new ANTLRHashString("header", this), new Integer(5));
+-	literals.put(new ANTLRHashString("throws", this), new Integer(37));
+-	literals.put(new ANTLRHashString("lexclass", this), new Integer(9));
+-	literals.put(new ANTLRHashString("catch", this), new Integer(40));
+-	literals.put(new ANTLRHashString("private", this), new Integer(32));
+-	literals.put(new ANTLRHashString("options", this), new Integer(51));
+-	literals.put(new ANTLRHashString("extends", this), new Integer(11));
+-	literals.put(new ANTLRHashString("protected", this), new Integer(30));
+-	literals.put(new ANTLRHashString("TreeParser", this), new Integer(13));
+-	literals.put(new ANTLRHashString("Parser", this), new Integer(29));
+-	literals.put(new ANTLRHashString("Lexer", this), new Integer(12));
+-	literals.put(new ANTLRHashString("returns", this), new Integer(35));
+-	literals.put(new ANTLRHashString("charVocabulary", this), new Integer(18));
+-	literals.put(new ANTLRHashString("tokens", this), new Integer(4));
+-	literals.put(new ANTLRHashString("exception", this), new Integer(39));
+-}
+-
+-public Token nextToken() throws TokenStreamException {
+-	Token theRetToken=null;
+-tryAgain:
+-	for (;;) {
+-		Token _token = null;
+-		int _ttype = Token.INVALID_TYPE;
+-		resetText();
+-		try {   // for char stream error handling
+-			try {   // for lexical error handling
+-				switch ( LA(1)) {
+-				case '\t':  case '\n':  case '\r':  case ' ':
+-				{
+-					mWS(true);
+-					theRetToken=_returnToken;
+-					break;
+-				}
+-				case '/':
+-				{
+-					mCOMMENT(true);
+-					theRetToken=_returnToken;
+-					break;
+-				}
+-				case '<':
+-				{
+-					mOPEN_ELEMENT_OPTION(true);
+-					theRetToken=_returnToken;
+-					break;
+-				}
+-				case '>':
+-				{
+-					mCLOSE_ELEMENT_OPTION(true);
+-					theRetToken=_returnToken;
+-					break;
+-				}
+-				case ',':
+-				{
+-					mCOMMA(true);
+-					theRetToken=_returnToken;
+-					break;
+-				}
+-				case '?':
+-				{
+-					mQUESTION(true);
+-					theRetToken=_returnToken;
+-					break;
+-				}
+-				case '#':
+-				{
+-					mTREE_BEGIN(true);
+-					theRetToken=_returnToken;
+-					break;
+-				}
+-				case '(':
+-				{
+-					mLPAREN(true);
+-					theRetToken=_returnToken;
+-					break;
+-				}
+-				case ')':
+-				{
+-					mRPAREN(true);
+-					theRetToken=_returnToken;
+-					break;
+-				}
+-				case ':':
+-				{
+-					mCOLON(true);
+-					theRetToken=_returnToken;
+-					break;
+-				}
+-				case '*':
+-				{
+-					mSTAR(true);
+-					theRetToken=_returnToken;
+-					break;
+-				}
+-				case '+':
+-				{
+-					mPLUS(true);
+-					theRetToken=_returnToken;
+-					break;
+-				}
+-				case ';':
+-				{
+-					mSEMI(true);
+-					theRetToken=_returnToken;
+-					break;
+-				}
+-				case '^':
+-				{
+-					mCARET(true);
+-					theRetToken=_returnToken;
+-					break;
+-				}
+-				case '!':
+-				{
+-					mBANG(true);
+-					theRetToken=_returnToken;
+-					break;
+-				}
+-				case '|':
+-				{
+-					mOR(true);
+-					theRetToken=_returnToken;
+-					break;
+-				}
+-				case '~':
+-				{
+-					mNOT_OP(true);
+-					theRetToken=_returnToken;
+-					break;
+-				}
+-				case '}':
+-				{
+-					mRCURLY(true);
+-					theRetToken=_returnToken;
+-					break;
+-				}
+-				case '\'':
+-				{
+-					mCHAR_LITERAL(true);
+-					theRetToken=_returnToken;
+-					break;
+-				}
+-				case '"':
+-				{
+-					mSTRING_LITERAL(true);
+-					theRetToken=_returnToken;
+-					break;
+-				}
+-				case '0':  case '1':  case '2':  case '3':
+-				case '4':  case '5':  case '6':  case '7':
+-				case '8':  case '9':
+-				{
+-					mINT(true);
+-					theRetToken=_returnToken;
+-					break;
+-				}
+-				case '[':
+-				{
+-					mARG_ACTION(true);
+-					theRetToken=_returnToken;
+-					break;
+-				}
+-				case '{':
+-				{
+-					mACTION(true);
+-					theRetToken=_returnToken;
+-					break;
+-				}
+-				case 'A':  case 'B':  case 'C':  case 'D':
+-				case 'E':  case 'F':  case 'G':  case 'H':
+-				case 'I':  case 'J':  case 'K':  case 'L':
+-				case 'M':  case 'N':  case 'O':  case 'P':
+-				case 'Q':  case 'R':  case 'S':  case 'T':
+-				case 'U':  case 'V':  case 'W':  case 'X':
+-				case 'Y':  case 'Z':
+-				{
+-					mTOKEN_REF(true);
+-					theRetToken=_returnToken;
+-					break;
+-				}
+-				case 'a':  case 'b':  case 'c':  case 'd':
+-				case 'e':  case 'f':  case 'g':  case 'h':
+-				case 'i':  case 'j':  case 'k':  case 'l':
+-				case 'm':  case 'n':  case 'o':  case 'p':
+-				case 'q':  case 'r':  case 's':  case 't':
+-				case 'u':  case 'v':  case 'w':  case 'x':
+-				case 'y':  case 'z':
+-				{
+-					mRULE_REF(true);
+-					theRetToken=_returnToken;
+-					break;
+-				}
+-				default:
+-					if ((LA(1)=='=') && (LA(2)=='>')) {
+-						mIMPLIES(true);
+-						theRetToken=_returnToken;
+-					}
+-					else if ((LA(1)=='.') && (LA(2)=='.')) {
+-						mRANGE(true);
+-						theRetToken=_returnToken;
+-					}
+-					else if ((LA(1)=='=') && (true)) {
+-						mASSIGN(true);
+-						theRetToken=_returnToken;
+-					}
+-					else if ((LA(1)=='.') && (true)) {
+-						mWILDCARD(true);
+-						theRetToken=_returnToken;
+-					}
+-				else {
+-					if (LA(1)==EOF_CHAR) {uponEOF(); _returnToken = makeToken(Token.EOF_TYPE);}
+-				else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());}
+-				}
+-				}
+-				if ( _returnToken==null ) continue tryAgain; // found SKIP token
+-				_ttype = _returnToken.getType();
+-				_returnToken.setType(_ttype);
+-				return _returnToken;
+-			}
+-			catch (RecognitionException e) {
+-				throw new TokenStreamRecognitionException(e);
+-			}
+-		}
+-		catch (CharStreamException cse) {
+-			if ( cse instanceof CharStreamIOException ) {
+-				throw new TokenStreamIOException(((CharStreamIOException)cse).io);
+-			}
+-			else {
+-				throw new TokenStreamException(cse.getMessage());
+-			}
+-		}
+-	}
+-}
+-
+-	public final void mWS(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = WS;
+-		int _saveIndex;
+-		
+-		{
+-		switch ( LA(1)) {
+-		case ' ':
+-		{
+-			match(' ');
+-			break;
+-		}
+-		case '\t':
+-		{
+-			match('\t');
+-			break;
+-		}
+-		case '\n':
+-		{
+-			match('\n');
+-			newline();
+-			break;
+-		}
+-		default:
+-			if ((LA(1)=='\r') && (LA(2)=='\n')) {
+-				match('\r');
+-				match('\n');
+-				newline();
+-			}
+-			else if ((LA(1)=='\r') && (true)) {
+-				match('\r');
+-				newline();
+-			}
+-		else {
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		}
+-		}
+-		_ttype = Token.SKIP;
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	public final void mCOMMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = COMMENT;
+-		int _saveIndex;
+-		Token t=null;
+-		
+-		{
+-		if ((LA(1)=='/') && (LA(2)=='/')) {
+-			mSL_COMMENT(false);
+-		}
+-		else if ((LA(1)=='/') && (LA(2)=='*')) {
+-			mML_COMMENT(true);
+-			t=_returnToken;
+-			_ttype = t.getType();
+-		}
+-		else {
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		
+-		}
+-		if ( _ttype != DOC_COMMENT ) _ttype = Token.SKIP;
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mSL_COMMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = SL_COMMENT;
+-		int _saveIndex;
+-		
+-		match("//");
+-		{
+-		_loop153:
+-		do {
+-			if ((_tokenSet_0.member(LA(1)))) {
+-				{
+-				match(_tokenSet_0);
+-				}
+-			}
+-			else {
+-				break _loop153;
+-			}
+-			
+-		} while (true);
+-		}
+-		{
+-		if ((LA(1)=='\r') && (LA(2)=='\n')) {
+-			match('\r');
+-			match('\n');
+-		}
+-		else if ((LA(1)=='\r') && (true)) {
+-			match('\r');
+-		}
+-		else if ((LA(1)=='\n')) {
+-			match('\n');
+-		}
+-		else {
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		
+-		}
+-		newline();
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mML_COMMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = ML_COMMENT;
+-		int _saveIndex;
+-		
+-		match("/*");
+-		{
+-		if (((LA(1)=='*') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')))&&( LA(2)!='/' )) {
+-			match('*');
+-			_ttype = DOC_COMMENT;
+-		}
+-		else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+-		}
+-		else {
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		
+-		}
+-		{
+-		_loop159:
+-		do {
+-			// nongreedy exit test
+-			if ((LA(1)=='*') && (LA(2)=='/')) break _loop159;
+-			if ((LA(1)=='\r') && (LA(2)=='\n')) {
+-				match('\r');
+-				match('\n');
+-				newline();
+-			}
+-			else if ((LA(1)=='\r') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+-				match('\r');
+-				newline();
+-			}
+-			else if ((_tokenSet_0.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+-				{
+-				match(_tokenSet_0);
+-				}
+-			}
+-			else if ((LA(1)=='\n')) {
+-				match('\n');
+-				newline();
+-			}
+-			else {
+-				break _loop159;
+-			}
+-			
+-		} while (true);
+-		}
+-		match("*/");
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	public final void mOPEN_ELEMENT_OPTION(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = OPEN_ELEMENT_OPTION;
+-		int _saveIndex;
+-		
+-		match('<');
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	public final void mCLOSE_ELEMENT_OPTION(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = CLOSE_ELEMENT_OPTION;
+-		int _saveIndex;
+-		
+-		match('>');
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	public final void mCOMMA(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = COMMA;
+-		int _saveIndex;
+-		
+-		match(',');
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	public final void mQUESTION(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = QUESTION;
+-		int _saveIndex;
+-		
+-		match('?');
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	public final void mTREE_BEGIN(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = TREE_BEGIN;
+-		int _saveIndex;
+-		
+-		match("#(");
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	public final void mLPAREN(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = LPAREN;
+-		int _saveIndex;
+-		
+-		match('(');
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	public final void mRPAREN(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = RPAREN;
+-		int _saveIndex;
+-		
+-		match(')');
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	public final void mCOLON(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = COLON;
+-		int _saveIndex;
+-		
+-		match(':');
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	public final void mSTAR(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = STAR;
+-		int _saveIndex;
+-		
+-		match('*');
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	public final void mPLUS(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = PLUS;
+-		int _saveIndex;
+-		
+-		match('+');
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	public final void mASSIGN(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = ASSIGN;
+-		int _saveIndex;
+-		
+-		match('=');
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	public final void mIMPLIES(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = IMPLIES;
+-		int _saveIndex;
+-		
+-		match("=>");
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	public final void mSEMI(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = SEMI;
+-		int _saveIndex;
+-		
+-		match(';');
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	public final void mCARET(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = CARET;
+-		int _saveIndex;
+-		
+-		match('^');
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	public final void mBANG(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = BANG;
+-		int _saveIndex;
+-		
+-		match('!');
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	public final void mOR(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = OR;
+-		int _saveIndex;
+-		
+-		match('|');
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	public final void mWILDCARD(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = WILDCARD;
+-		int _saveIndex;
+-		
+-		match('.');
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	public final void mRANGE(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = RANGE;
+-		int _saveIndex;
+-		
+-		match("..");
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	public final void mNOT_OP(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = NOT_OP;
+-		int _saveIndex;
+-		
+-		match('~');
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	public final void mRCURLY(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = RCURLY;
+-		int _saveIndex;
+-		
+-		match('}');
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	public final void mCHAR_LITERAL(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = CHAR_LITERAL;
+-		int _saveIndex;
+-		
+-		match('\'');
+-		{
+-		if ((LA(1)=='\\')) {
+-			mESC(false);
+-		}
+-		else if ((_tokenSet_1.member(LA(1)))) {
+-			matchNot('\'');
+-		}
+-		else {
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		
+-		}
+-		match('\'');
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mESC(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = ESC;
+-		int _saveIndex;
+-		
+-		match('\\');
+-		{
+-		switch ( LA(1)) {
+-		case 'n':
+-		{
+-			match('n');
+-			break;
+-		}
+-		case 'r':
+-		{
+-			match('r');
+-			break;
+-		}
+-		case 't':
+-		{
+-			match('t');
+-			break;
+-		}
+-		case 'b':
+-		{
+-			match('b');
+-			break;
+-		}
+-		case 'f':
+-		{
+-			match('f');
+-			break;
+-		}
+-		case 'w':
+-		{
+-			match('w');
+-			break;
+-		}
+-		case 'a':
+-		{
+-			match('a');
+-			break;
+-		}
+-		case '"':
+-		{
+-			match('"');
+-			break;
+-		}
+-		case '\'':
+-		{
+-			match('\'');
+-			break;
+-		}
+-		case '\\':
+-		{
+-			match('\\');
+-			break;
+-		}
+-		case '0':  case '1':  case '2':  case '3':
+-		{
+-			{
+-			matchRange('0','3');
+-			}
+-			{
+-			if (((LA(1) >= '0' && LA(1) <= '7')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+-				matchRange('0','7');
+-				{
+-				if (((LA(1) >= '0' && LA(1) <= '7')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+-					matchRange('0','7');
+-				}
+-				else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && (true)) {
+-				}
+-				else {
+-					throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-				}
+-				
+-				}
+-			}
+-			else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && (true)) {
+-			}
+-			else {
+-				throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-			}
+-			
+-			}
+-			break;
+-		}
+-		case '4':  case '5':  case '6':  case '7':
+-		{
+-			{
+-			matchRange('4','7');
+-			}
+-			{
+-			if (((LA(1) >= '0' && LA(1) <= '7')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+-				matchRange('0','7');
+-			}
+-			else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && (true)) {
+-			}
+-			else {
+-				throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-			}
+-			
+-			}
+-			break;
+-		}
+-		case 'u':
+-		{
+-			match('u');
+-			mXDIGIT(false);
+-			mXDIGIT(false);
+-			mXDIGIT(false);
+-			mXDIGIT(false);
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		}
+-		}
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	public final void mSTRING_LITERAL(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = STRING_LITERAL;
+-		int _saveIndex;
+-		
+-		match('"');
+-		{
+-		_loop184:
+-		do {
+-			if ((LA(1)=='\\')) {
+-				mESC(false);
+-			}
+-			else if ((_tokenSet_2.member(LA(1)))) {
+-				matchNot('"');
+-			}
+-			else {
+-				break _loop184;
+-			}
+-			
+-		} while (true);
+-		}
+-		match('"');
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mXDIGIT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = XDIGIT;
+-		int _saveIndex;
+-		
+-		switch ( LA(1)) {
+-		case '0':  case '1':  case '2':  case '3':
+-		case '4':  case '5':  case '6':  case '7':
+-		case '8':  case '9':
+-		{
+-			matchRange('0','9');
+-			break;
+-		}
+-		case 'a':  case 'b':  case 'c':  case 'd':
+-		case 'e':  case 'f':
+-		{
+-			matchRange('a','f');
+-			break;
+-		}
+-		case 'A':  case 'B':  case 'C':  case 'D':
+-		case 'E':  case 'F':
+-		{
+-			matchRange('A','F');
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		}
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mDIGIT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = DIGIT;
+-		int _saveIndex;
+-		
+-		matchRange('0','9');
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	public final void mINT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = INT;
+-		int _saveIndex;
+-		
+-		{
+-		int _cnt196=0;
+-		_loop196:
+-		do {
+-			if (((LA(1) >= '0' && LA(1) <= '9'))) {
+-				matchRange('0','9');
+-			}
+-			else {
+-				if ( _cnt196>=1 ) { break _loop196; } else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());}
+-			}
+-			
+-			_cnt196++;
+-		} while (true);
+-		}
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	public final void mARG_ACTION(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = ARG_ACTION;
+-		int _saveIndex;
+-		
+-		mNESTED_ARG_ACTION(false);
+-		setText(StringUtils.stripFrontBack(getText(), "[", "]"));
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mNESTED_ARG_ACTION(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = NESTED_ARG_ACTION;
+-		int _saveIndex;
+-		
+-		match('[');
+-		{
+-		_loop200:
+-		do {
+-			switch ( LA(1)) {
+-			case '[':
+-			{
+-				mNESTED_ARG_ACTION(false);
+-				break;
+-			}
+-			case '\n':
+-			{
+-				match('\n');
+-				newline();
+-				break;
+-			}
+-			case '\'':
+-			{
+-				mCHAR_LITERAL(false);
+-				break;
+-			}
+-			case '"':
+-			{
+-				mSTRING_LITERAL(false);
+-				break;
+-			}
+-			default:
+-				if ((LA(1)=='\r') && (LA(2)=='\n')) {
+-					match('\r');
+-					match('\n');
+-					newline();
+-				}
+-				else if ((LA(1)=='\r') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+-					match('\r');
+-					newline();
+-				}
+-				else if ((_tokenSet_3.member(LA(1)))) {
+-					matchNot(']');
+-				}
+-			else {
+-				break _loop200;
+-			}
+-			}
+-		} while (true);
+-		}
+-		match(']');
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	public final void mACTION(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = ACTION;
+-		int _saveIndex;
+-		int actionLine=getLine(); int actionColumn = getColumn();
+-		
+-		mNESTED_ACTION(false);
+-		{
+-		if ((LA(1)=='?')) {
+-			match('?');
+-			_ttype = SEMPRED;
+-		}
+-		else {
+-		}
+-		
+-		}
+-		
+-					if ( _ttype==ACTION ) {
+-						setText(StringUtils.stripFrontBack(getText(), "{", "}"));
+-					}
+-					else {
+-						setText(StringUtils.stripFrontBack(getText(), "{", "}?"));
+-					}
+-					CommonToken t = new CommonToken(_ttype,new String(text.getBuffer(),_begin,text.length()-_begin));
+-					t.setLine(actionLine);			// set action line to start
+-					t.setColumn(actionColumn);
+-					_token = t;
+-				
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mNESTED_ACTION(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = NESTED_ACTION;
+-		int _saveIndex;
+-		
+-		match('{');
+-		{
+-		_loop206:
+-		do {
+-			// nongreedy exit test
+-			if ((LA(1)=='}') && (true)) break _loop206;
+-			if ((LA(1)=='\n'||LA(1)=='\r') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+-				{
+-				if ((LA(1)=='\r') && (LA(2)=='\n')) {
+-					match('\r');
+-					match('\n');
+-					newline();
+-				}
+-				else if ((LA(1)=='\r') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+-					match('\r');
+-					newline();
+-				}
+-				else if ((LA(1)=='\n')) {
+-					match('\n');
+-					newline();
+-				}
+-				else {
+-					throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-				}
+-				
+-				}
+-			}
+-			else if ((LA(1)=='{') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+-				mNESTED_ACTION(false);
+-			}
+-			else if ((LA(1)=='\'') && (_tokenSet_4.member(LA(2)))) {
+-				mCHAR_LITERAL(false);
+-			}
+-			else if ((LA(1)=='/') && (LA(2)=='*'||LA(2)=='/')) {
+-				mCOMMENT(false);
+-			}
+-			else if ((LA(1)=='"') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+-				mSTRING_LITERAL(false);
+-			}
+-			else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+-				matchNot(EOF_CHAR);
+-			}
+-			else {
+-				break _loop206;
+-			}
+-			
+-		} while (true);
+-		}
+-		match('}');
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	public final void mTOKEN_REF(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = TOKEN_REF;
+-		int _saveIndex;
+-		
+-		matchRange('A','Z');
+-		{
+-		_loop209:
+-		do {
+-			switch ( LA(1)) {
+-			case 'a':  case 'b':  case 'c':  case 'd':
+-			case 'e':  case 'f':  case 'g':  case 'h':
+-			case 'i':  case 'j':  case 'k':  case 'l':
+-			case 'm':  case 'n':  case 'o':  case 'p':
+-			case 'q':  case 'r':  case 's':  case 't':
+-			case 'u':  case 'v':  case 'w':  case 'x':
+-			case 'y':  case 'z':
+-			{
+-				matchRange('a','z');
+-				break;
+-			}
+-			case 'A':  case 'B':  case 'C':  case 'D':
+-			case 'E':  case 'F':  case 'G':  case 'H':
+-			case 'I':  case 'J':  case 'K':  case 'L':
+-			case 'M':  case 'N':  case 'O':  case 'P':
+-			case 'Q':  case 'R':  case 'S':  case 'T':
+-			case 'U':  case 'V':  case 'W':  case 'X':
+-			case 'Y':  case 'Z':
+-			{
+-				matchRange('A','Z');
+-				break;
+-			}
+-			case '_':
+-			{
+-				match('_');
+-				break;
+-			}
+-			case '0':  case '1':  case '2':  case '3':
+-			case '4':  case '5':  case '6':  case '7':
+-			case '8':  case '9':
+-			{
+-				matchRange('0','9');
+-				break;
+-			}
+-			default:
+-			{
+-				break _loop209;
+-			}
+-			}
+-		} while (true);
+-		}
+-		_ttype = testLiteralsTable(_ttype);
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	public final void mRULE_REF(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = RULE_REF;
+-		int _saveIndex;
+-		
+-			int t=0;
+-		
+-		
+-		t=mINTERNAL_RULE_REF(false);
+-		_ttype=t;
+-		{
+-		if (( true )&&(t==LITERAL_options)) {
+-			mWS_LOOP(false);
+-			{
+-			if ((LA(1)=='{')) {
+-				match('{');
+-				_ttype = OPTIONS;
+-			}
+-			else {
+-			}
+-			
+-			}
+-		}
+-		else if (( true )&&(t==LITERAL_tokens)) {
+-			mWS_LOOP(false);
+-			{
+-			if ((LA(1)=='{')) {
+-				match('{');
+-				_ttype = TOKENS;
+-			}
+-			else {
+-			}
+-			
+-			}
+-		}
+-		else {
+-		}
+-		
+-		}
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final int  mINTERNAL_RULE_REF(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int t;
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = INTERNAL_RULE_REF;
+-		int _saveIndex;
+-		
+-			t = RULE_REF;
+-		
+-		
+-		matchRange('a','z');
+-		{
+-		_loop219:
+-		do {
+-			switch ( LA(1)) {
+-			case 'a':  case 'b':  case 'c':  case 'd':
+-			case 'e':  case 'f':  case 'g':  case 'h':
+-			case 'i':  case 'j':  case 'k':  case 'l':
+-			case 'm':  case 'n':  case 'o':  case 'p':
+-			case 'q':  case 'r':  case 's':  case 't':
+-			case 'u':  case 'v':  case 'w':  case 'x':
+-			case 'y':  case 'z':
+-			{
+-				matchRange('a','z');
+-				break;
+-			}
+-			case 'A':  case 'B':  case 'C':  case 'D':
+-			case 'E':  case 'F':  case 'G':  case 'H':
+-			case 'I':  case 'J':  case 'K':  case 'L':
+-			case 'M':  case 'N':  case 'O':  case 'P':
+-			case 'Q':  case 'R':  case 'S':  case 'T':
+-			case 'U':  case 'V':  case 'W':  case 'X':
+-			case 'Y':  case 'Z':
+-			{
+-				matchRange('A','Z');
+-				break;
+-			}
+-			case '_':
+-			{
+-				match('_');
+-				break;
+-			}
+-			case '0':  case '1':  case '2':  case '3':
+-			case '4':  case '5':  case '6':  case '7':
+-			case '8':  case '9':
+-			{
+-				matchRange('0','9');
+-				break;
+-			}
+-			default:
+-			{
+-				break _loop219;
+-			}
+-			}
+-		} while (true);
+-		}
+-		t = testLiteralsTable(t);
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-		return t;
+-	}
+-	
+-	protected final void mWS_LOOP(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = WS_LOOP;
+-		int _saveIndex;
+-		
+-		{
+-		_loop216:
+-		do {
+-			switch ( LA(1)) {
+-			case '\t':  case '\n':  case '\r':  case ' ':
+-			{
+-				mWS(false);
+-				break;
+-			}
+-			case '/':
+-			{
+-				mCOMMENT(false);
+-				break;
+-			}
+-			default:
+-			{
+-				break _loop216;
+-			}
+-			}
+-		} while (true);
+-		}
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mWS_OPT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = WS_OPT;
+-		int _saveIndex;
+-		
+-		{
+-		if ((_tokenSet_5.member(LA(1)))) {
+-			mWS(false);
+-		}
+-		else {
+-		}
+-		
+-		}
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	
+-	private static final long[] mk_tokenSet_0() {
+-		long[] data = new long[8];
+-		data[0]=-9224L;
+-		for (int i = 1; i<=3; i++) { data[i]=-1L; }
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_0 = new BitSet(mk_tokenSet_0());
+-	private static final long[] mk_tokenSet_1() {
+-		long[] data = new long[8];
+-		data[0]=-549755813896L;
+-		data[1]=-268435457L;
+-		for (int i = 2; i<=3; i++) { data[i]=-1L; }
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_1 = new BitSet(mk_tokenSet_1());
+-	private static final long[] mk_tokenSet_2() {
+-		long[] data = new long[8];
+-		data[0]=-17179869192L;
+-		data[1]=-268435457L;
+-		for (int i = 2; i<=3; i++) { data[i]=-1L; }
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_2 = new BitSet(mk_tokenSet_2());
+-	private static final long[] mk_tokenSet_3() {
+-		long[] data = new long[8];
+-		data[0]=-566935692296L;
+-		data[1]=-671088641L;
+-		for (int i = 2; i<=3; i++) { data[i]=-1L; }
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_3 = new BitSet(mk_tokenSet_3());
+-	private static final long[] mk_tokenSet_4() {
+-		long[] data = new long[8];
+-		data[0]=-549755813896L;
+-		for (int i = 1; i<=3; i++) { data[i]=-1L; }
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_4 = new BitSet(mk_tokenSet_4());
+-	private static final long[] mk_tokenSet_5() {
+-		long[] data = { 4294977024L, 0L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_5 = new BitSet(mk_tokenSet_5());
+-	
+-	}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/ANTLRParser.java glassfish-gil/entity-persistence/src/java/persistence/antlr/ANTLRParser.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/ANTLRParser.java	2006-02-08 22:30:33.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/ANTLRParser.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,2978 +0,0 @@
+-// $ANTLR : "antlr.g" -> "ANTLRParser.java"$
+-
+-package persistence.antlr;
+-
+-import persistence.antlr.TokenBuffer;
+-import persistence.antlr.TokenStreamException;
+-import persistence.antlr.TokenStreamIOException;
+-import persistence.antlr.ANTLRException;
+-import persistence.antlr.LLkParser;
+-import persistence.antlr.Token;
+-import persistence.antlr.TokenStream;
+-import persistence.antlr.RecognitionException;
+-import persistence.antlr.NoViableAltException;
+-import persistence.antlr.MismatchedTokenException;
+-import persistence.antlr.SemanticException;
+-import persistence.antlr.ParserSharedInputState;
+-import persistence.antlr.collections.impl.BitSet;
+-
+-import java.util.Enumeration;
+-import java.io.DataInputStream;
+-import java.io.InputStream;
+-import java.io.FileInputStream;
+-import java.io.IOException;
+-
+-public class ANTLRParser extends persistence.antlr.LLkParser       implements ANTLRTokenTypes
+- {
+-
+-	private static final boolean DEBUG_PARSER = false;
+-
+-	ANTLRGrammarParseBehavior behavior;
+-	Tool antlrTool;
+-	protected int blockNesting= -1;
+-
+-	public ANTLRParser(
+-		TokenBuffer tokenBuf,
+-		ANTLRGrammarParseBehavior behavior_,
+-		Tool tool_
+-	) {
+-		super(tokenBuf, 1);
+-		tokenNames = _tokenNames;
+-		behavior = behavior_;
+-		antlrTool = tool_;
+-	}
+-
+-        public void reportError(String s) {
+-            antlrTool.error(s, getFilename(), -1, -1);
+-        }
+-
+-        public void reportError(RecognitionException e) {
+-            reportError(e, e.getErrorMessage());
+-        }
+-
+-        public void reportError(RecognitionException e, String s) {
+-            antlrTool.error(s, e.getFilename(), e.getLine(), e.getColumn());
+-        }
+-
+-        public void reportWarning(String s) {
+-            antlrTool.warning(s, getFilename(), -1, -1);
+-        }
+-
+-	private boolean lastInRule() throws TokenStreamException {
+-		if ( blockNesting==0 && (LA(1)==SEMI || LA(1)==LITERAL_exception || LA(1)==OR) ) {
+-			return true;
+-		}
+-		return false;
+-	}
+-
+-	private void checkForMissingEndRule(Token label) {
+-		if ( label.getColumn()==1 ) {
+-			antlrTool.warning("did you forget to terminate previous rule?", getFilename(), label.getLine(), label.getColumn());
+-		}
+-	}
+-
+-protected ANTLRParser(TokenBuffer tokenBuf, int k) {
+-  super(tokenBuf,k);
+-  tokenNames = _tokenNames;
+-}
+-
+-public ANTLRParser(TokenBuffer tokenBuf) {
+-  this(tokenBuf,2);
+-}
+-
+-protected ANTLRParser(TokenStream lexer, int k) {
+-  super(lexer,k);
+-  tokenNames = _tokenNames;
+-}
+-
+-public ANTLRParser(TokenStream lexer) {
+-  this(lexer,2);
+-}
+-
+-public ANTLRParser(ParserSharedInputState state) {
+-  super(state,2);
+-  tokenNames = _tokenNames;
+-}
+-
+-	public final void grammar() throws RecognitionException, TokenStreamException {
+-		
+-		Token  n = null;
+-		Token  h = null;
+-		
+-		try {      // for error handling
+-			{
+-			_loop4:
+-			do {
+-				if ((LA(1)==LITERAL_header)) {
+-					if ( inputState.guessing==0 ) {
+-						
+-									n = null;	// RK: prevent certain orders of header actions
+-													// overwriting eachother.
+-								
+-					}
+-					match(LITERAL_header);
+-					{
+-					switch ( LA(1)) {
+-					case STRING_LITERAL:
+-					{
+-						n = LT(1);
+-						match(STRING_LITERAL);
+-						break;
+-					}
+-					case ACTION:
+-					{
+-						break;
+-					}
+-					default:
+-					{
+-						throw new NoViableAltException(LT(1), getFilename());
+-					}
+-					}
+-					}
+-					h = LT(1);
+-					match(ACTION);
+-					if ( inputState.guessing==0 ) {
+-						
+-									// store the header action
+-									// FIXME: 'n' should be checked for validity
+-									behavior.refHeaderAction(n,h);
+-								
+-					}
+-				}
+-				else {
+-					break _loop4;
+-				}
+-				
+-			} while (true);
+-			}
+-			{
+-			switch ( LA(1)) {
+-			case OPTIONS:
+-			{
+-				fileOptionsSpec();
+-				break;
+-			}
+-			case EOF:
+-			case ACTION:
+-			case DOC_COMMENT:
+-			case LITERAL_lexclass:
+-			case LITERAL_class:
+-			{
+-				break;
+-			}
+-			default:
+-			{
+-				throw new NoViableAltException(LT(1), getFilename());
+-			}
+-			}
+-			}
+-			{
+-			_loop7:
+-			do {
+-				if (((LA(1) >= ACTION && LA(1) <= LITERAL_class))) {
+-					classDef();
+-				}
+-				else {
+-					break _loop7;
+-				}
+-				
+-			} while (true);
+-			}
+-			match(Token.EOF_TYPE);
+-		}
+-		catch (RecognitionException ex) {
+-			if (inputState.guessing==0) {
+-				
+-				reportError(ex, "rule grammar trapped:\n"+ex.toString());
+-						consumeUntil(EOF);
+-					
+-			} else {
+-				throw ex;
+-			}
+-		}
+-	}
+-	
+-	public final void fileOptionsSpec() throws RecognitionException, TokenStreamException {
+-		
+-		Token idTok; Token value;
+-		
+-		match(OPTIONS);
+-		{
+-		_loop18:
+-		do {
+-			if ((LA(1)==TOKEN_REF||LA(1)==RULE_REF)) {
+-				idTok=id();
+-				match(ASSIGN);
+-				value=optionValue();
+-				if ( inputState.guessing==0 ) {
+-					behavior.setFileOption(idTok, value,getInputState().filename);
+-				}
+-				match(SEMI);
+-			}
+-			else {
+-				break _loop18;
+-			}
+-			
+-		} while (true);
+-		}
+-		match(RCURLY);
+-	}
+-	
+-	public final void classDef() throws RecognitionException, TokenStreamException {
+-		
+-		Token  a = null;
+-		Token  d = null;
+-		String doc=null;
+-		
+-		try {      // for error handling
+-			{
+-			switch ( LA(1)) {
+-			case ACTION:
+-			{
+-				a = LT(1);
+-				match(ACTION);
+-				if ( inputState.guessing==0 ) {
+-					behavior.refPreambleAction(a);
+-				}
+-				break;
+-			}
+-			case DOC_COMMENT:
+-			case LITERAL_lexclass:
+-			case LITERAL_class:
+-			{
+-				break;
+-			}
+-			default:
+-			{
+-				throw new NoViableAltException(LT(1), getFilename());
+-			}
+-			}
+-			}
+-			{
+-			switch ( LA(1)) {
+-			case DOC_COMMENT:
+-			{
+-				d = LT(1);
+-				match(DOC_COMMENT);
+-				if ( inputState.guessing==0 ) {
+-					doc=d.getText();
+-				}
+-				break;
+-			}
+-			case LITERAL_lexclass:
+-			case LITERAL_class:
+-			{
+-				break;
+-			}
+-			default:
+-			{
+-				throw new NoViableAltException(LT(1), getFilename());
+-			}
+-			}
+-			}
+-			{
+-			boolean synPredMatched13 = false;
+-			if (((LA(1)==LITERAL_lexclass||LA(1)==LITERAL_class) && (LA(2)==TOKEN_REF||LA(2)==RULE_REF))) {
+-				int _m13 = mark();
+-				synPredMatched13 = true;
+-				inputState.guessing++;
+-				try {
+-					{
+-					switch ( LA(1)) {
+-					case LITERAL_lexclass:
+-					{
+-						match(LITERAL_lexclass);
+-						break;
+-					}
+-					case LITERAL_class:
+-					{
+-						match(LITERAL_class);
+-						id();
+-						match(LITERAL_extends);
+-						match(LITERAL_Lexer);
+-						break;
+-					}
+-					default:
+-					{
+-						throw new NoViableAltException(LT(1), getFilename());
+-					}
+-					}
+-					}
+-				}
+-				catch (RecognitionException pe) {
+-					synPredMatched13 = false;
+-				}
+-				rewind(_m13);
+-				inputState.guessing--;
+-			}
+-			if ( synPredMatched13 ) {
+-				lexerSpec(doc);
+-			}
+-			else {
+-				boolean synPredMatched15 = false;
+-				if (((LA(1)==LITERAL_class) && (LA(2)==TOKEN_REF||LA(2)==RULE_REF))) {
+-					int _m15 = mark();
+-					synPredMatched15 = true;
+-					inputState.guessing++;
+-					try {
+-						{
+-						match(LITERAL_class);
+-						id();
+-						match(LITERAL_extends);
+-						match(LITERAL_TreeParser);
+-						}
+-					}
+-					catch (RecognitionException pe) {
+-						synPredMatched15 = false;
+-					}
+-					rewind(_m15);
+-					inputState.guessing--;
+-				}
+-				if ( synPredMatched15 ) {
+-					treeParserSpec(doc);
+-				}
+-				else if ((LA(1)==LITERAL_class) && (LA(2)==TOKEN_REF||LA(2)==RULE_REF)) {
+-					parserSpec(doc);
+-				}
+-				else {
+-					throw new NoViableAltException(LT(1), getFilename());
+-				}
+-				}
+-				}
+-				rules();
+-				if ( inputState.guessing==0 ) {
+-					behavior.endGrammar();
+-				}
+-			}
+-			catch (RecognitionException ex) {
+-				if (inputState.guessing==0) {
+-					
+-							if ( ex instanceof NoViableAltException ) {
+-								NoViableAltException e = (NoViableAltException)ex;
+-								// RK: These probably generate inconsequent error messages...
+-								// have to see how this comes out..
+-								if ( e.token.getType()==DOC_COMMENT ) {
+-									reportError(ex, "JAVADOC comments may only prefix rules and grammars");
+-								}
+-								else {
+-									reportError(ex, "rule classDef trapped:\n"+ex.toString());
+-								}
+-							}
+-							else {
+-								reportError(ex, "rule classDef trapped:\n"+ex.toString());
+-							}
+-							behavior.abortGrammar();
+-							boolean consuming = true;
+-							// consume everything until the next class definition or EOF
+-							while (consuming) {
+-								consume();
+-								switch(LA(1)) {
+-								case LITERAL_class:
+-								case LITERAL_lexclass:
+-								case EOF:
+-									consuming = false;
+-									break;
+-								}
+-							}
+-						
+-				} else {
+-					throw ex;
+-				}
+-			}
+-		}
+-		
+-	public final  Token  id() throws RecognitionException, TokenStreamException {
+-		 Token idTok ;
+-		
+-		Token  a = null;
+-		Token  b = null;
+-		idTok = null;
+-		
+-		switch ( LA(1)) {
+-		case TOKEN_REF:
+-		{
+-			a = LT(1);
+-			match(TOKEN_REF);
+-			if ( inputState.guessing==0 ) {
+-				idTok = a;
+-			}
+-			break;
+-		}
+-		case RULE_REF:
+-		{
+-			b = LT(1);
+-			match(RULE_REF);
+-			if ( inputState.guessing==0 ) {
+-				idTok = b;
+-			}
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltException(LT(1), getFilename());
+-		}
+-		}
+-		return idTok ;
+-	}
+-	
+-	public final void lexerSpec(
+-		String doc
+-	) throws RecognitionException, TokenStreamException {
+-		
+-		Token  lc = null;
+-		Token  a = null;
+-		
+-			Token idTok;
+-			String sup=null;
+-		
+-		
+-		{
+-		switch ( LA(1)) {
+-		case LITERAL_lexclass:
+-		{
+-			lc = LT(1);
+-			match(LITERAL_lexclass);
+-			idTok=id();
+-			if ( inputState.guessing==0 ) {
+-				
+-								antlrTool.warning("lexclass' is deprecated; use 'class X extends Lexer'",
+-												 getFilename(), lc.getLine(), lc.getColumn());
+-				//				System.out.println("warning: line " + lc.getLine() + ": 'lexclass' is deprecated; use 'class X extends Lexer'");
+-							
+-			}
+-			break;
+-		}
+-		case LITERAL_class:
+-		{
+-			match(LITERAL_class);
+-			idTok=id();
+-			match(LITERAL_extends);
+-			match(LITERAL_Lexer);
+-			{
+-			switch ( LA(1)) {
+-			case LPAREN:
+-			{
+-				sup=superClass();
+-				break;
+-			}
+-			case SEMI:
+-			{
+-				break;
+-			}
+-			default:
+-			{
+-				throw new NoViableAltException(LT(1), getFilename());
+-			}
+-			}
+-			}
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltException(LT(1), getFilename());
+-		}
+-		}
+-		}
+-		if ( inputState.guessing==0 ) {
+-			behavior.startLexer(getFilename(), idTok,sup,doc);
+-		}
+-		match(SEMI);
+-		{
+-		switch ( LA(1)) {
+-		case OPTIONS:
+-		{
+-			lexerOptionsSpec();
+-			break;
+-		}
+-		case ACTION:
+-		case DOC_COMMENT:
+-		case TOKENS:
+-		case TOKEN_REF:
+-		case LITERAL_protected:
+-		case LITERAL_public:
+-		case LITERAL_private:
+-		case RULE_REF:
+-		{
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltException(LT(1), getFilename());
+-		}
+-		}
+-		}
+-		if ( inputState.guessing==0 ) {
+-			behavior.endOptions();
+-		}
+-		{
+-		switch ( LA(1)) {
+-		case TOKENS:
+-		{
+-			tokensSpec();
+-			break;
+-		}
+-		case ACTION:
+-		case DOC_COMMENT:
+-		case TOKEN_REF:
+-		case LITERAL_protected:
+-		case LITERAL_public:
+-		case LITERAL_private:
+-		case RULE_REF:
+-		{
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltException(LT(1), getFilename());
+-		}
+-		}
+-		}
+-		{
+-		switch ( LA(1)) {
+-		case ACTION:
+-		{
+-			a = LT(1);
+-			match(ACTION);
+-			if ( inputState.guessing==0 ) {
+-				behavior.refMemberAction(a);
+-			}
+-			break;
+-		}
+-		case DOC_COMMENT:
+-		case TOKEN_REF:
+-		case LITERAL_protected:
+-		case LITERAL_public:
+-		case LITERAL_private:
+-		case RULE_REF:
+-		{
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltException(LT(1), getFilename());
+-		}
+-		}
+-		}
+-	}
+-	
+-	public final void treeParserSpec(
+-		String doc
+-	) throws RecognitionException, TokenStreamException {
+-		
+-		Token  a = null;
+-		
+-			Token idTok;
+-			String sup=null;
+-		
+-		
+-		match(LITERAL_class);
+-		idTok=id();
+-		match(LITERAL_extends);
+-		match(LITERAL_TreeParser);
+-		{
+-		switch ( LA(1)) {
+-		case LPAREN:
+-		{
+-			sup=superClass();
+-			break;
+-		}
+-		case SEMI:
+-		{
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltException(LT(1), getFilename());
+-		}
+-		}
+-		}
+-		if ( inputState.guessing==0 ) {
+-			behavior.startTreeWalker(getFilename(), idTok,sup,doc);
+-		}
+-		match(SEMI);
+-		{
+-		switch ( LA(1)) {
+-		case OPTIONS:
+-		{
+-			treeParserOptionsSpec();
+-			break;
+-		}
+-		case ACTION:
+-		case DOC_COMMENT:
+-		case TOKENS:
+-		case TOKEN_REF:
+-		case LITERAL_protected:
+-		case LITERAL_public:
+-		case LITERAL_private:
+-		case RULE_REF:
+-		{
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltException(LT(1), getFilename());
+-		}
+-		}
+-		}
+-		if ( inputState.guessing==0 ) {
+-			behavior.endOptions();
+-		}
+-		{
+-		switch ( LA(1)) {
+-		case TOKENS:
+-		{
+-			tokensSpec();
+-			break;
+-		}
+-		case ACTION:
+-		case DOC_COMMENT:
+-		case TOKEN_REF:
+-		case LITERAL_protected:
+-		case LITERAL_public:
+-		case LITERAL_private:
+-		case RULE_REF:
+-		{
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltException(LT(1), getFilename());
+-		}
+-		}
+-		}
+-		{
+-		switch ( LA(1)) {
+-		case ACTION:
+-		{
+-			a = LT(1);
+-			match(ACTION);
+-			if ( inputState.guessing==0 ) {
+-				behavior.refMemberAction(a);
+-			}
+-			break;
+-		}
+-		case DOC_COMMENT:
+-		case TOKEN_REF:
+-		case LITERAL_protected:
+-		case LITERAL_public:
+-		case LITERAL_private:
+-		case RULE_REF:
+-		{
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltException(LT(1), getFilename());
+-		}
+-		}
+-		}
+-	}
+-	
+-	public final void parserSpec(
+-		String doc
+-	) throws RecognitionException, TokenStreamException {
+-		
+-		Token  a = null;
+-		
+-			Token idTok;
+-			String sup=null;
+-		
+-		
+-		match(LITERAL_class);
+-		idTok=id();
+-		{
+-		switch ( LA(1)) {
+-		case LITERAL_extends:
+-		{
+-			match(LITERAL_extends);
+-			match(LITERAL_Parser);
+-			{
+-			switch ( LA(1)) {
+-			case LPAREN:
+-			{
+-				sup=superClass();
+-				break;
+-			}
+-			case SEMI:
+-			{
+-				break;
+-			}
+-			default:
+-			{
+-				throw new NoViableAltException(LT(1), getFilename());
+-			}
+-			}
+-			}
+-			break;
+-		}
+-		case SEMI:
+-		{
+-			if ( inputState.guessing==0 ) {
+-				
+-							antlrTool.warning("use 'class X extends Parser'", getFilename(), idTok.getLine(), idTok.getColumn());
+-				//			System.out.println("warning: line " +
+-				//				idTok.getLine() + ": use 'class X extends Parser'");
+-							
+-			}
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltException(LT(1), getFilename());
+-		}
+-		}
+-		}
+-		if ( inputState.guessing==0 ) {
+-			behavior.startParser(getFilename(), idTok, sup, doc);
+-		}
+-		match(SEMI);
+-		{
+-		switch ( LA(1)) {
+-		case OPTIONS:
+-		{
+-			parserOptionsSpec();
+-			break;
+-		}
+-		case ACTION:
+-		case DOC_COMMENT:
+-		case TOKENS:
+-		case TOKEN_REF:
+-		case LITERAL_protected:
+-		case LITERAL_public:
+-		case LITERAL_private:
+-		case RULE_REF:
+-		{
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltException(LT(1), getFilename());
+-		}
+-		}
+-		}
+-		if ( inputState.guessing==0 ) {
+-			behavior.endOptions();
+-		}
+-		{
+-		switch ( LA(1)) {
+-		case TOKENS:
+-		{
+-			tokensSpec();
+-			break;
+-		}
+-		case ACTION:
+-		case DOC_COMMENT:
+-		case TOKEN_REF:
+-		case LITERAL_protected:
+-		case LITERAL_public:
+-		case LITERAL_private:
+-		case RULE_REF:
+-		{
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltException(LT(1), getFilename());
+-		}
+-		}
+-		}
+-		{
+-		switch ( LA(1)) {
+-		case ACTION:
+-		{
+-			a = LT(1);
+-			match(ACTION);
+-			if ( inputState.guessing==0 ) {
+-				behavior.refMemberAction(a);
+-			}
+-			break;
+-		}
+-		case DOC_COMMENT:
+-		case TOKEN_REF:
+-		case LITERAL_protected:
+-		case LITERAL_public:
+-		case LITERAL_private:
+-		case RULE_REF:
+-		{
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltException(LT(1), getFilename());
+-		}
+-		}
+-		}
+-	}
+-	
+-	public final void rules() throws RecognitionException, TokenStreamException {
+-		
+-		
+-		{
+-		int _cnt68=0;
+-		_loop68:
+-		do {
+-			if ((_tokenSet_0.member(LA(1))) && (_tokenSet_1.member(LA(2)))) {
+-				rule();
+-			}
+-			else {
+-				if ( _cnt68>=1 ) { break _loop68; } else {throw new NoViableAltException(LT(1), getFilename());}
+-			}
+-			
+-			_cnt68++;
+-		} while (true);
+-		}
+-	}
+-	
+-	public final  Token  optionValue() throws RecognitionException, TokenStreamException {
+-		 Token retval ;
+-		
+-		Token  sl = null;
+-		Token  cl = null;
+-		Token  il = null;
+-		retval = null;
+-		
+-		switch ( LA(1)) {
+-		case TOKEN_REF:
+-		case RULE_REF:
+-		{
+-			retval=qualifiedID();
+-			break;
+-		}
+-		case STRING_LITERAL:
+-		{
+-			sl = LT(1);
+-			match(STRING_LITERAL);
+-			if ( inputState.guessing==0 ) {
+-				retval = sl;
+-			}
+-			break;
+-		}
+-		case CHAR_LITERAL:
+-		{
+-			cl = LT(1);
+-			match(CHAR_LITERAL);
+-			if ( inputState.guessing==0 ) {
+-				retval = cl;
+-			}
+-			break;
+-		}
+-		case INT:
+-		{
+-			il = LT(1);
+-			match(INT);
+-			if ( inputState.guessing==0 ) {
+-				retval = il;
+-			}
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltException(LT(1), getFilename());
+-		}
+-		}
+-		return retval ;
+-	}
+-	
+-	public final void parserOptionsSpec() throws RecognitionException, TokenStreamException {
+-		
+-		Token idTok; Token value;
+-		
+-		match(OPTIONS);
+-		{
+-		_loop21:
+-		do {
+-			if ((LA(1)==TOKEN_REF||LA(1)==RULE_REF)) {
+-				idTok=id();
+-				match(ASSIGN);
+-				value=optionValue();
+-				if ( inputState.guessing==0 ) {
+-					behavior.setGrammarOption(idTok, value);
+-				}
+-				match(SEMI);
+-			}
+-			else {
+-				break _loop21;
+-			}
+-			
+-		} while (true);
+-		}
+-		match(RCURLY);
+-	}
+-	
+-	public final void treeParserOptionsSpec() throws RecognitionException, TokenStreamException {
+-		
+-		Token idTok; Token value;
+-		
+-		match(OPTIONS);
+-		{
+-		_loop24:
+-		do {
+-			if ((LA(1)==TOKEN_REF||LA(1)==RULE_REF)) {
+-				idTok=id();
+-				match(ASSIGN);
+-				value=optionValue();
+-				if ( inputState.guessing==0 ) {
+-					behavior.setGrammarOption(idTok, value);
+-				}
+-				match(SEMI);
+-			}
+-			else {
+-				break _loop24;
+-			}
+-			
+-		} while (true);
+-		}
+-		match(RCURLY);
+-	}
+-	
+-	public final void lexerOptionsSpec() throws RecognitionException, TokenStreamException {
+-		
+-		Token idTok; Token value; BitSet b;
+-		
+-		match(OPTIONS);
+-		{
+-		_loop27:
+-		do {
+-			switch ( LA(1)) {
+-			case LITERAL_charVocabulary:
+-			{
+-				match(LITERAL_charVocabulary);
+-				match(ASSIGN);
+-				b=charSet();
+-				match(SEMI);
+-				if ( inputState.guessing==0 ) {
+-					behavior.setCharVocabulary(b);
+-				}
+-				break;
+-			}
+-			case TOKEN_REF:
+-			case RULE_REF:
+-			{
+-				idTok=id();
+-				match(ASSIGN);
+-				value=optionValue();
+-				if ( inputState.guessing==0 ) {
+-					behavior.setGrammarOption(idTok, value);
+-				}
+-				match(SEMI);
+-				break;
+-			}
+-			default:
+-			{
+-				break _loop27;
+-			}
+-			}
+-		} while (true);
+-		}
+-		match(RCURLY);
+-	}
+-	
+-	public final  BitSet  charSet() throws RecognitionException, TokenStreamException {
+-		 BitSet b ;
+-		
+-		
+-			b = null;
+-			BitSet tmpSet = null;
+-		
+-		
+-		b=setBlockElement();
+-		{
+-		_loop34:
+-		do {
+-			if ((LA(1)==OR)) {
+-				match(OR);
+-				tmpSet=setBlockElement();
+-				if ( inputState.guessing==0 ) {
+-					b.orInPlace(tmpSet);
+-				}
+-			}
+-			else {
+-				break _loop34;
+-			}
+-			
+-		} while (true);
+-		}
+-		return b ;
+-	}
+-	
+-	public final void subruleOptionsSpec() throws RecognitionException, TokenStreamException {
+-		
+-		Token idTok; Token value;
+-		
+-		match(OPTIONS);
+-		{
+-		_loop30:
+-		do {
+-			if ((LA(1)==TOKEN_REF||LA(1)==RULE_REF)) {
+-				idTok=id();
+-				match(ASSIGN);
+-				value=optionValue();
+-				if ( inputState.guessing==0 ) {
+-					behavior.setSubruleOption(idTok, value);
+-				}
+-				match(SEMI);
+-			}
+-			else {
+-				break _loop30;
+-			}
+-			
+-		} while (true);
+-		}
+-		match(RCURLY);
+-	}
+-	
+-/** Match a.b.c.d qualified ids; WILDCARD here is overloaded as
+- *  id separator; that is, I need a reference to the '.' token.
+- */
+-	public final Token  qualifiedID() throws RecognitionException, TokenStreamException {
+-		Token qidTok=null;
+-		
+-		
+-			StringBuffer buf = new StringBuffer(30);
+-			Token a;
+-		
+-		
+-		a=id();
+-		if ( inputState.guessing==0 ) {
+-			buf.append(a.getText());
+-		}
+-		{
+-		_loop144:
+-		do {
+-			if ((LA(1)==WILDCARD)) {
+-				match(WILDCARD);
+-				a=id();
+-				if ( inputState.guessing==0 ) {
+-					buf.append('.'); buf.append(a.getText());
+-				}
+-			}
+-			else {
+-				break _loop144;
+-			}
+-			
+-		} while (true);
+-		}
+-		if ( inputState.guessing==0 ) {
+-			
+-					 // can use either TOKEN_REF or RULE_REF; should
+-					 // really create a QID or something instead.
+-					 qidTok = new CommonToken(TOKEN_REF, buf.toString());
+-					 qidTok.setLine(a.getLine());
+-					
+-		}
+-		return qidTok;
+-	}
+-	
+-	public final  BitSet  setBlockElement() throws RecognitionException, TokenStreamException {
+-		 BitSet b ;
+-		
+-		Token  c1 = null;
+-		Token  c2 = null;
+-		
+-			b = null;
+-			int rangeMin = 0;
+-		
+-		
+-		c1 = LT(1);
+-		match(CHAR_LITERAL);
+-		if ( inputState.guessing==0 ) {
+-			
+-					rangeMin = ANTLRLexer.tokenTypeForCharLiteral(c1.getText());
+-					b = BitSet.of(rangeMin);
+-				
+-		}
+-		{
+-		switch ( LA(1)) {
+-		case RANGE:
+-		{
+-			match(RANGE);
+-			c2 = LT(1);
+-			match(CHAR_LITERAL);
+-			if ( inputState.guessing==0 ) {
+-				
+-							int rangeMax = ANTLRLexer.tokenTypeForCharLiteral(c2.getText());
+-							if (rangeMax < rangeMin) {
+-								antlrTool.error("Malformed range line ", getFilename(), c1.getLine(), c1.getColumn());
+-							}
+-							for (int i = rangeMin+1; i <= rangeMax; i++) {
+-								b.add(i);
+-							}
+-						
+-			}
+-			break;
+-		}
+-		case SEMI:
+-		case OR:
+-		{
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltException(LT(1), getFilename());
+-		}
+-		}
+-		}
+-		return b ;
+-	}
+-	
+-	public final void tokensSpec() throws RecognitionException, TokenStreamException {
+-		
+-		Token  t1 = null;
+-		Token  s1 = null;
+-		Token  s3 = null;
+-		
+-		match(TOKENS);
+-		{
+-		int _cnt43=0;
+-		_loop43:
+-		do {
+-			if ((LA(1)==STRING_LITERAL||LA(1)==TOKEN_REF)) {
+-				{
+-				switch ( LA(1)) {
+-				case TOKEN_REF:
+-				{
+-					if ( inputState.guessing==0 ) {
+-						s1=null;
+-					}
+-					t1 = LT(1);
+-					match(TOKEN_REF);
+-					{
+-					switch ( LA(1)) {
+-					case ASSIGN:
+-					{
+-						match(ASSIGN);
+-						s1 = LT(1);
+-						match(STRING_LITERAL);
+-						break;
+-					}
+-					case SEMI:
+-					case OPEN_ELEMENT_OPTION:
+-					{
+-						break;
+-					}
+-					default:
+-					{
+-						throw new NoViableAltException(LT(1), getFilename());
+-					}
+-					}
+-					}
+-					if ( inputState.guessing==0 ) {
+-						behavior.defineToken(t1, s1);
+-					}
+-					{
+-					switch ( LA(1)) {
+-					case OPEN_ELEMENT_OPTION:
+-					{
+-						tokensSpecOptions(t1);
+-						break;
+-					}
+-					case SEMI:
+-					{
+-						break;
+-					}
+-					default:
+-					{
+-						throw new NoViableAltException(LT(1), getFilename());
+-					}
+-					}
+-					}
+-					break;
+-				}
+-				case STRING_LITERAL:
+-				{
+-					s3 = LT(1);
+-					match(STRING_LITERAL);
+-					if ( inputState.guessing==0 ) {
+-						behavior.defineToken(null, s3);
+-					}
+-					{
+-					switch ( LA(1)) {
+-					case OPEN_ELEMENT_OPTION:
+-					{
+-						tokensSpecOptions(s3);
+-						break;
+-					}
+-					case SEMI:
+-					{
+-						break;
+-					}
+-					default:
+-					{
+-						throw new NoViableAltException(LT(1), getFilename());
+-					}
+-					}
+-					}
+-					break;
+-				}
+-				default:
+-				{
+-					throw new NoViableAltException(LT(1), getFilename());
+-				}
+-				}
+-				}
+-				match(SEMI);
+-			}
+-			else {
+-				if ( _cnt43>=1 ) { break _loop43; } else {throw new NoViableAltException(LT(1), getFilename());}
+-			}
+-			
+-			_cnt43++;
+-		} while (true);
+-		}
+-		match(RCURLY);
+-	}
+-	
+-	public final void tokensSpecOptions(
+-		Token t
+-	) throws RecognitionException, TokenStreamException {
+-		
+-		
+-			Token o=null, v=null;
+-		
+-		
+-		match(OPEN_ELEMENT_OPTION);
+-		o=id();
+-		match(ASSIGN);
+-		v=optionValue();
+-		if ( inputState.guessing==0 ) {
+-			behavior.refTokensSpecElementOption(t,o,v);
+-		}
+-		{
+-		_loop46:
+-		do {
+-			if ((LA(1)==SEMI)) {
+-				match(SEMI);
+-				o=id();
+-				match(ASSIGN);
+-				v=optionValue();
+-				if ( inputState.guessing==0 ) {
+-					behavior.refTokensSpecElementOption(t,o,v);
+-				}
+-			}
+-			else {
+-				break _loop46;
+-			}
+-			
+-		} while (true);
+-		}
+-		match(CLOSE_ELEMENT_OPTION);
+-	}
+-	
+-	public final String  superClass() throws RecognitionException, TokenStreamException {
+-		String sup;
+-		
+-		sup=null;
+-		
+-		match(LPAREN);
+-		if ( inputState.guessing==0 ) {
+-			
+-						sup = LT(1).getText();
+-						sup = StringUtils.stripFrontBack(sup, "\"", "\"");
+-						
+-		}
+-		{
+-		match(STRING_LITERAL);
+-		}
+-		match(RPAREN);
+-		return sup;
+-	}
+-	
+-	public final void rule() throws RecognitionException, TokenStreamException {
+-		
+-		Token  d = null;
+-		Token  p1 = null;
+-		Token  p2 = null;
+-		Token  p3 = null;
+-		Token  aa = null;
+-		Token  rt = null;
+-		Token  a = null;
+-		
+-			String access="public";
+-			Token idTok;
+-			String doc=null;
+-			boolean ruleAutoGen = true;
+-			blockNesting = -1;	// block increments, so -1 to make rule at level 0
+-		
+-		
+-		{
+-		switch ( LA(1)) {
+-		case DOC_COMMENT:
+-		{
+-			d = LT(1);
+-			match(DOC_COMMENT);
+-			if ( inputState.guessing==0 ) {
+-				doc=d.getText();
+-			}
+-			break;
+-		}
+-		case TOKEN_REF:
+-		case LITERAL_protected:
+-		case LITERAL_public:
+-		case LITERAL_private:
+-		case RULE_REF:
+-		{
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltException(LT(1), getFilename());
+-		}
+-		}
+-		}
+-		{
+-		switch ( LA(1)) {
+-		case LITERAL_protected:
+-		{
+-			p1 = LT(1);
+-			match(LITERAL_protected);
+-			if ( inputState.guessing==0 ) {
+-				access=p1.getText();
+-			}
+-			break;
+-		}
+-		case LITERAL_public:
+-		{
+-			p2 = LT(1);
+-			match(LITERAL_public);
+-			if ( inputState.guessing==0 ) {
+-				access=p2.getText();
+-			}
+-			break;
+-		}
+-		case LITERAL_private:
+-		{
+-			p3 = LT(1);
+-			match(LITERAL_private);
+-			if ( inputState.guessing==0 ) {
+-				access=p3.getText();
+-			}
+-			break;
+-		}
+-		case TOKEN_REF:
+-		case RULE_REF:
+-		{
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltException(LT(1), getFilename());
+-		}
+-		}
+-		}
+-		idTok=id();
+-		{
+-		switch ( LA(1)) {
+-		case BANG:
+-		{
+-			match(BANG);
+-			if ( inputState.guessing==0 ) {
+-				ruleAutoGen = false;
+-			}
+-			break;
+-		}
+-		case ACTION:
+-		case OPTIONS:
+-		case ARG_ACTION:
+-		case LITERAL_returns:
+-		case COLON:
+-		case LITERAL_throws:
+-		{
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltException(LT(1), getFilename());
+-		}
+-		}
+-		}
+-		if ( inputState.guessing==0 ) {
+-			
+-					behavior.defineRuleName(idTok, access, ruleAutoGen, doc);
+-				
+-		}
+-		{
+-		switch ( LA(1)) {
+-		case ARG_ACTION:
+-		{
+-			aa = LT(1);
+-			match(ARG_ACTION);
+-			if ( inputState.guessing==0 ) {
+-				behavior.refArgAction(aa);
+-			}
+-			break;
+-		}
+-		case ACTION:
+-		case OPTIONS:
+-		case LITERAL_returns:
+-		case COLON:
+-		case LITERAL_throws:
+-		{
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltException(LT(1), getFilename());
+-		}
+-		}
+-		}
+-		{
+-		switch ( LA(1)) {
+-		case LITERAL_returns:
+-		{
+-			match(LITERAL_returns);
+-			rt = LT(1);
+-			match(ARG_ACTION);
+-			if ( inputState.guessing==0 ) {
+-				behavior.refReturnAction(rt);
+-			}
+-			break;
+-		}
+-		case ACTION:
+-		case OPTIONS:
+-		case COLON:
+-		case LITERAL_throws:
+-		{
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltException(LT(1), getFilename());
+-		}
+-		}
+-		}
+-		{
+-		switch ( LA(1)) {
+-		case LITERAL_throws:
+-		{
+-			throwsSpec();
+-			break;
+-		}
+-		case ACTION:
+-		case OPTIONS:
+-		case COLON:
+-		{
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltException(LT(1), getFilename());
+-		}
+-		}
+-		}
+-		{
+-		switch ( LA(1)) {
+-		case OPTIONS:
+-		{
+-			ruleOptionsSpec();
+-			break;
+-		}
+-		case ACTION:
+-		case COLON:
+-		{
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltException(LT(1), getFilename());
+-		}
+-		}
+-		}
+-		{
+-		switch ( LA(1)) {
+-		case ACTION:
+-		{
+-			a = LT(1);
+-			match(ACTION);
+-			if ( inputState.guessing==0 ) {
+-				behavior.refInitAction(a);
+-			}
+-			break;
+-		}
+-		case COLON:
+-		{
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltException(LT(1), getFilename());
+-		}
+-		}
+-		}
+-		match(COLON);
+-		block();
+-		match(SEMI);
+-		{
+-		switch ( LA(1)) {
+-		case LITERAL_exception:
+-		{
+-			exceptionGroup();
+-			break;
+-		}
+-		case EOF:
+-		case ACTION:
+-		case DOC_COMMENT:
+-		case LITERAL_lexclass:
+-		case LITERAL_class:
+-		case TOKEN_REF:
+-		case LITERAL_protected:
+-		case LITERAL_public:
+-		case LITERAL_private:
+-		case RULE_REF:
+-		{
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltException(LT(1), getFilename());
+-		}
+-		}
+-		}
+-		if ( inputState.guessing==0 ) {
+-			behavior.endRule(idTok.getText());
+-		}
+-	}
+-	
+-	public final void throwsSpec() throws RecognitionException, TokenStreamException {
+-		
+-		
+-			String t=null;
+-			Token a,b;
+-		
+-		
+-		match(LITERAL_throws);
+-		a=id();
+-		if ( inputState.guessing==0 ) {
+-			t=a.getText();
+-		}
+-		{
+-		_loop84:
+-		do {
+-			if ((LA(1)==COMMA)) {
+-				match(COMMA);
+-				b=id();
+-				if ( inputState.guessing==0 ) {
+-					t+=","+b.getText();
+-				}
+-			}
+-			else {
+-				break _loop84;
+-			}
+-			
+-		} while (true);
+-		}
+-		if ( inputState.guessing==0 ) {
+-			behavior.setUserExceptions(t);	
+-		}
+-	}
+-	
+-	public final void ruleOptionsSpec() throws RecognitionException, TokenStreamException {
+-		
+-		Token idTok; Token value;
+-		
+-		match(OPTIONS);
+-		{
+-		_loop81:
+-		do {
+-			if ((LA(1)==TOKEN_REF||LA(1)==RULE_REF)) {
+-				idTok=id();
+-				match(ASSIGN);
+-				value=optionValue();
+-				if ( inputState.guessing==0 ) {
+-					behavior.setRuleOption(idTok, value);
+-				}
+-				match(SEMI);
+-			}
+-			else {
+-				break _loop81;
+-			}
+-			
+-		} while (true);
+-		}
+-		match(RCURLY);
+-	}
+-	
+-	public final void block() throws RecognitionException, TokenStreamException {
+-		
+-		
+-		if ( inputState.guessing==0 ) {
+-			blockNesting++;
+-		}
+-		alternative();
+-		{
+-		_loop87:
+-		do {
+-			if ((LA(1)==OR)) {
+-				match(OR);
+-				alternative();
+-			}
+-			else {
+-				break _loop87;
+-			}
+-			
+-		} while (true);
+-		}
+-		if ( inputState.guessing==0 ) {
+-			blockNesting--;
+-		}
+-	}
+-	
+-	public final void exceptionGroup() throws RecognitionException, TokenStreamException {
+-		
+-		
+-		if ( inputState.guessing==0 ) {
+-			behavior.beginExceptionGroup();
+-		}
+-		{
+-		int _cnt95=0;
+-		_loop95:
+-		do {
+-			if ((LA(1)==LITERAL_exception)) {
+-				exceptionSpec();
+-			}
+-			else {
+-				if ( _cnt95>=1 ) { break _loop95; } else {throw new NoViableAltException(LT(1), getFilename());}
+-			}
+-			
+-			_cnt95++;
+-		} while (true);
+-		}
+-		if ( inputState.guessing==0 ) {
+-			behavior.endExceptionGroup();
+-		}
+-	}
+-	
+-	public final void alternative() throws RecognitionException, TokenStreamException {
+-		
+-		boolean altAutoGen = true;
+-		
+-		{
+-		switch ( LA(1)) {
+-		case BANG:
+-		{
+-			match(BANG);
+-			if ( inputState.guessing==0 ) {
+-				altAutoGen=false;
+-			}
+-			break;
+-		}
+-		case STRING_LITERAL:
+-		case ACTION:
+-		case SEMI:
+-		case CHAR_LITERAL:
+-		case OR:
+-		case TOKEN_REF:
+-		case LPAREN:
+-		case RPAREN:
+-		case LITERAL_exception:
+-		case RULE_REF:
+-		case NOT_OP:
+-		case SEMPRED:
+-		case TREE_BEGIN:
+-		case WILDCARD:
+-		{
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltException(LT(1), getFilename());
+-		}
+-		}
+-		}
+-		if ( inputState.guessing==0 ) {
+-			behavior.beginAlt(altAutoGen);
+-		}
+-		{
+-		_loop91:
+-		do {
+-			if ((_tokenSet_2.member(LA(1)))) {
+-				element();
+-			}
+-			else {
+-				break _loop91;
+-			}
+-			
+-		} while (true);
+-		}
+-		{
+-		switch ( LA(1)) {
+-		case LITERAL_exception:
+-		{
+-			exceptionSpecNoLabel();
+-			break;
+-		}
+-		case SEMI:
+-		case OR:
+-		case RPAREN:
+-		{
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltException(LT(1), getFilename());
+-		}
+-		}
+-		}
+-		if ( inputState.guessing==0 ) {
+-			behavior.endAlt();
+-		}
+-	}
+-	
+-	public final void element() throws RecognitionException, TokenStreamException {
+-		
+-		
+-		elementNoOptionSpec();
+-		{
+-		switch ( LA(1)) {
+-		case OPEN_ELEMENT_OPTION:
+-		{
+-			elementOptionSpec();
+-			break;
+-		}
+-		case STRING_LITERAL:
+-		case ACTION:
+-		case SEMI:
+-		case CHAR_LITERAL:
+-		case OR:
+-		case TOKEN_REF:
+-		case LPAREN:
+-		case RPAREN:
+-		case LITERAL_exception:
+-		case RULE_REF:
+-		case NOT_OP:
+-		case SEMPRED:
+-		case TREE_BEGIN:
+-		case WILDCARD:
+-		{
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltException(LT(1), getFilename());
+-		}
+-		}
+-		}
+-	}
+-	
+-	public final void exceptionSpecNoLabel() throws RecognitionException, TokenStreamException {
+-		
+-		
+-		match(LITERAL_exception);
+-		if ( inputState.guessing==0 ) {
+-			behavior.beginExceptionSpec(null);
+-		}
+-		{
+-		_loop102:
+-		do {
+-			if ((LA(1)==LITERAL_catch)) {
+-				exceptionHandler();
+-			}
+-			else {
+-				break _loop102;
+-			}
+-			
+-		} while (true);
+-		}
+-		if ( inputState.guessing==0 ) {
+-			behavior.endExceptionSpec();
+-		}
+-	}
+-	
+-	public final void exceptionSpec() throws RecognitionException, TokenStreamException {
+-		
+-		Token  aa = null;
+-		Token labelAction = null;
+-		
+-		match(LITERAL_exception);
+-		{
+-		switch ( LA(1)) {
+-		case ARG_ACTION:
+-		{
+-			aa = LT(1);
+-			match(ARG_ACTION);
+-			if ( inputState.guessing==0 ) {
+-				labelAction = aa;
+-			}
+-			break;
+-		}
+-		case EOF:
+-		case ACTION:
+-		case DOC_COMMENT:
+-		case LITERAL_lexclass:
+-		case LITERAL_class:
+-		case TOKEN_REF:
+-		case LITERAL_protected:
+-		case LITERAL_public:
+-		case LITERAL_private:
+-		case LITERAL_exception:
+-		case LITERAL_catch:
+-		case RULE_REF:
+-		{
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltException(LT(1), getFilename());
+-		}
+-		}
+-		}
+-		if ( inputState.guessing==0 ) {
+-			behavior.beginExceptionSpec(labelAction);
+-		}
+-		{
+-		_loop99:
+-		do {
+-			if ((LA(1)==LITERAL_catch)) {
+-				exceptionHandler();
+-			}
+-			else {
+-				break _loop99;
+-			}
+-			
+-		} while (true);
+-		}
+-		if ( inputState.guessing==0 ) {
+-			behavior.endExceptionSpec();
+-		}
+-	}
+-	
+-	public final void exceptionHandler() throws RecognitionException, TokenStreamException {
+-		
+-		Token  a1 = null;
+-		Token  a2 = null;
+-		Token exType; Token exName;
+-		
+-		match(LITERAL_catch);
+-		a1 = LT(1);
+-		match(ARG_ACTION);
+-		a2 = LT(1);
+-		match(ACTION);
+-		if ( inputState.guessing==0 ) {
+-			behavior.refExceptionHandler(a1, a2);
+-		}
+-	}
+-	
+-	public final void elementNoOptionSpec() throws RecognitionException, TokenStreamException {
+-		
+-		Token  rr = null;
+-		Token  aa = null;
+-		Token  tr = null;
+-		Token  aa2 = null;
+-		Token  r2 = null;
+-		Token  aa3 = null;
+-		Token  a = null;
+-		Token  p = null;
+-		
+-			Token label = null;
+-			Token assignId = null;
+-			Token args = null;
+-			int autoGen = GrammarElement.AUTO_GEN_NONE;
+-		
+-		
+-		switch ( LA(1)) {
+-		case ACTION:
+-		{
+-			a = LT(1);
+-			match(ACTION);
+-			if ( inputState.guessing==0 ) {
+-				behavior.refAction(a);
+-			}
+-			break;
+-		}
+-		case SEMPRED:
+-		{
+-			p = LT(1);
+-			match(SEMPRED);
+-			if ( inputState.guessing==0 ) {
+-				behavior.refSemPred(p);
+-			}
+-			break;
+-		}
+-		case TREE_BEGIN:
+-		{
+-			tree();
+-			break;
+-		}
+-		default:
+-			if ((LA(1)==TOKEN_REF||LA(1)==RULE_REF) && (LA(2)==ASSIGN)) {
+-				assignId=id();
+-				match(ASSIGN);
+-				{
+-				if ((LA(1)==TOKEN_REF||LA(1)==RULE_REF) && (LA(2)==COLON)) {
+-					label=id();
+-					match(COLON);
+-					if ( inputState.guessing==0 ) {
+-						checkForMissingEndRule(label);
+-					}
+-				}
+-				else if ((LA(1)==TOKEN_REF||LA(1)==RULE_REF) && (_tokenSet_3.member(LA(2)))) {
+-				}
+-				else {
+-					throw new NoViableAltException(LT(1), getFilename());
+-				}
+-				
+-				}
+-				{
+-				switch ( LA(1)) {
+-				case RULE_REF:
+-				{
+-					rr = LT(1);
+-					match(RULE_REF);
+-					{
+-					switch ( LA(1)) {
+-					case ARG_ACTION:
+-					{
+-						aa = LT(1);
+-						match(ARG_ACTION);
+-						if ( inputState.guessing==0 ) {
+-							args=aa;
+-						}
+-						break;
+-					}
+-					case STRING_LITERAL:
+-					case ACTION:
+-					case SEMI:
+-					case CHAR_LITERAL:
+-					case OR:
+-					case TOKEN_REF:
+-					case OPEN_ELEMENT_OPTION:
+-					case LPAREN:
+-					case RPAREN:
+-					case BANG:
+-					case LITERAL_exception:
+-					case RULE_REF:
+-					case NOT_OP:
+-					case SEMPRED:
+-					case TREE_BEGIN:
+-					case WILDCARD:
+-					{
+-						break;
+-					}
+-					default:
+-					{
+-						throw new NoViableAltException(LT(1), getFilename());
+-					}
+-					}
+-					}
+-					{
+-					switch ( LA(1)) {
+-					case BANG:
+-					{
+-						match(BANG);
+-						if ( inputState.guessing==0 ) {
+-							autoGen = GrammarElement.AUTO_GEN_BANG;
+-						}
+-						break;
+-					}
+-					case STRING_LITERAL:
+-					case ACTION:
+-					case SEMI:
+-					case CHAR_LITERAL:
+-					case OR:
+-					case TOKEN_REF:
+-					case OPEN_ELEMENT_OPTION:
+-					case LPAREN:
+-					case RPAREN:
+-					case LITERAL_exception:
+-					case RULE_REF:
+-					case NOT_OP:
+-					case SEMPRED:
+-					case TREE_BEGIN:
+-					case WILDCARD:
+-					{
+-						break;
+-					}
+-					default:
+-					{
+-						throw new NoViableAltException(LT(1), getFilename());
+-					}
+-					}
+-					}
+-					if ( inputState.guessing==0 ) {
+-						behavior.refRule(assignId, rr, label, args, autoGen);
+-					}
+-					break;
+-				}
+-				case TOKEN_REF:
+-				{
+-					tr = LT(1);
+-					match(TOKEN_REF);
+-					{
+-					switch ( LA(1)) {
+-					case ARG_ACTION:
+-					{
+-						aa2 = LT(1);
+-						match(ARG_ACTION);
+-						if ( inputState.guessing==0 ) {
+-							args=aa2;
+-						}
+-						break;
+-					}
+-					case STRING_LITERAL:
+-					case ACTION:
+-					case SEMI:
+-					case CHAR_LITERAL:
+-					case OR:
+-					case TOKEN_REF:
+-					case OPEN_ELEMENT_OPTION:
+-					case LPAREN:
+-					case RPAREN:
+-					case LITERAL_exception:
+-					case RULE_REF:
+-					case NOT_OP:
+-					case SEMPRED:
+-					case TREE_BEGIN:
+-					case WILDCARD:
+-					{
+-						break;
+-					}
+-					default:
+-					{
+-						throw new NoViableAltException(LT(1), getFilename());
+-					}
+-					}
+-					}
+-					if ( inputState.guessing==0 ) {
+-						behavior.refToken(assignId, tr, label, args, false, autoGen, lastInRule());
+-					}
+-					break;
+-				}
+-				default:
+-				{
+-					throw new NoViableAltException(LT(1), getFilename());
+-				}
+-				}
+-				}
+-			}
+-			else if ((_tokenSet_4.member(LA(1))) && (_tokenSet_5.member(LA(2)))) {
+-				{
+-				if ((LA(1)==TOKEN_REF||LA(1)==RULE_REF) && (LA(2)==COLON)) {
+-					label=id();
+-					match(COLON);
+-					if ( inputState.guessing==0 ) {
+-						checkForMissingEndRule(label);
+-					}
+-				}
+-				else if ((_tokenSet_4.member(LA(1))) && (_tokenSet_6.member(LA(2)))) {
+-				}
+-				else {
+-					throw new NoViableAltException(LT(1), getFilename());
+-				}
+-				
+-				}
+-				{
+-				switch ( LA(1)) {
+-				case RULE_REF:
+-				{
+-					r2 = LT(1);
+-					match(RULE_REF);
+-					{
+-					switch ( LA(1)) {
+-					case ARG_ACTION:
+-					{
+-						aa3 = LT(1);
+-						match(ARG_ACTION);
+-						if ( inputState.guessing==0 ) {
+-							args=aa3;
+-						}
+-						break;
+-					}
+-					case STRING_LITERAL:
+-					case ACTION:
+-					case SEMI:
+-					case CHAR_LITERAL:
+-					case OR:
+-					case TOKEN_REF:
+-					case OPEN_ELEMENT_OPTION:
+-					case LPAREN:
+-					case RPAREN:
+-					case BANG:
+-					case LITERAL_exception:
+-					case RULE_REF:
+-					case NOT_OP:
+-					case SEMPRED:
+-					case TREE_BEGIN:
+-					case WILDCARD:
+-					{
+-						break;
+-					}
+-					default:
+-					{
+-						throw new NoViableAltException(LT(1), getFilename());
+-					}
+-					}
+-					}
+-					{
+-					switch ( LA(1)) {
+-					case BANG:
+-					{
+-						match(BANG);
+-						if ( inputState.guessing==0 ) {
+-							autoGen = GrammarElement.AUTO_GEN_BANG;
+-						}
+-						break;
+-					}
+-					case STRING_LITERAL:
+-					case ACTION:
+-					case SEMI:
+-					case CHAR_LITERAL:
+-					case OR:
+-					case TOKEN_REF:
+-					case OPEN_ELEMENT_OPTION:
+-					case LPAREN:
+-					case RPAREN:
+-					case LITERAL_exception:
+-					case RULE_REF:
+-					case NOT_OP:
+-					case SEMPRED:
+-					case TREE_BEGIN:
+-					case WILDCARD:
+-					{
+-						break;
+-					}
+-					default:
+-					{
+-						throw new NoViableAltException(LT(1), getFilename());
+-					}
+-					}
+-					}
+-					if ( inputState.guessing==0 ) {
+-						behavior.refRule(assignId, r2, label, args, autoGen);
+-					}
+-					break;
+-				}
+-				case NOT_OP:
+-				{
+-					match(NOT_OP);
+-					{
+-					switch ( LA(1)) {
+-					case CHAR_LITERAL:
+-					case TOKEN_REF:
+-					{
+-						notTerminal(label);
+-						break;
+-					}
+-					case LPAREN:
+-					{
+-						ebnf(label,true);
+-						break;
+-					}
+-					default:
+-					{
+-						throw new NoViableAltException(LT(1), getFilename());
+-					}
+-					}
+-					}
+-					break;
+-				}
+-				case LPAREN:
+-				{
+-					ebnf(label,false);
+-					break;
+-				}
+-				default:
+-					if ((LA(1)==STRING_LITERAL||LA(1)==CHAR_LITERAL||LA(1)==TOKEN_REF) && (LA(2)==RANGE)) {
+-						range(label);
+-					}
+-					else if ((_tokenSet_7.member(LA(1))) && (_tokenSet_8.member(LA(2)))) {
+-						terminal(label);
+-					}
+-				else {
+-					throw new NoViableAltException(LT(1), getFilename());
+-				}
+-				}
+-				}
+-			}
+-		else {
+-			throw new NoViableAltException(LT(1), getFilename());
+-		}
+-		}
+-	}
+-	
+-	public final void elementOptionSpec() throws RecognitionException, TokenStreamException {
+-		
+-		
+-			Token o=null, v=null;
+-		
+-		
+-		match(OPEN_ELEMENT_OPTION);
+-		o=id();
+-		match(ASSIGN);
+-		v=optionValue();
+-		if ( inputState.guessing==0 ) {
+-			behavior.refElementOption(o,v);
+-		}
+-		{
+-		_loop108:
+-		do {
+-			if ((LA(1)==SEMI)) {
+-				match(SEMI);
+-				o=id();
+-				match(ASSIGN);
+-				v=optionValue();
+-				if ( inputState.guessing==0 ) {
+-					behavior.refElementOption(o,v);
+-				}
+-			}
+-			else {
+-				break _loop108;
+-			}
+-			
+-		} while (true);
+-		}
+-		match(CLOSE_ELEMENT_OPTION);
+-	}
+-	
+-	public final void range(
+-		 Token label 
+-	) throws RecognitionException, TokenStreamException {
+-		
+-		Token  crLeft = null;
+-		Token  crRight = null;
+-		Token  t = null;
+-		Token  u = null;
+-		Token  v = null;
+-		Token  w = null;
+-		
+-			Token trLeft=null;
+-			Token trRight=null;
+-			int autoGen=GrammarElement.AUTO_GEN_NONE;
+-		
+-		
+-		switch ( LA(1)) {
+-		case CHAR_LITERAL:
+-		{
+-			crLeft = LT(1);
+-			match(CHAR_LITERAL);
+-			match(RANGE);
+-			crRight = LT(1);
+-			match(CHAR_LITERAL);
+-			{
+-			switch ( LA(1)) {
+-			case BANG:
+-			{
+-				match(BANG);
+-				if ( inputState.guessing==0 ) {
+-					autoGen = GrammarElement.AUTO_GEN_BANG;
+-				}
+-				break;
+-			}
+-			case STRING_LITERAL:
+-			case ACTION:
+-			case SEMI:
+-			case CHAR_LITERAL:
+-			case OR:
+-			case TOKEN_REF:
+-			case OPEN_ELEMENT_OPTION:
+-			case LPAREN:
+-			case RPAREN:
+-			case LITERAL_exception:
+-			case RULE_REF:
+-			case NOT_OP:
+-			case SEMPRED:
+-			case TREE_BEGIN:
+-			case WILDCARD:
+-			{
+-				break;
+-			}
+-			default:
+-			{
+-				throw new NoViableAltException(LT(1), getFilename());
+-			}
+-			}
+-			}
+-			if ( inputState.guessing==0 ) {
+-				behavior.refCharRange(crLeft, crRight, label, autoGen, lastInRule());
+-			}
+-			break;
+-		}
+-		case STRING_LITERAL:
+-		case TOKEN_REF:
+-		{
+-			{
+-			switch ( LA(1)) {
+-			case TOKEN_REF:
+-			{
+-				t = LT(1);
+-				match(TOKEN_REF);
+-				if ( inputState.guessing==0 ) {
+-					trLeft=t;
+-				}
+-				break;
+-			}
+-			case STRING_LITERAL:
+-			{
+-				u = LT(1);
+-				match(STRING_LITERAL);
+-				if ( inputState.guessing==0 ) {
+-					trLeft=u;
+-				}
+-				break;
+-			}
+-			default:
+-			{
+-				throw new NoViableAltException(LT(1), getFilename());
+-			}
+-			}
+-			}
+-			match(RANGE);
+-			{
+-			switch ( LA(1)) {
+-			case TOKEN_REF:
+-			{
+-				v = LT(1);
+-				match(TOKEN_REF);
+-				if ( inputState.guessing==0 ) {
+-					trRight=v;
+-				}
+-				break;
+-			}
+-			case STRING_LITERAL:
+-			{
+-				w = LT(1);
+-				match(STRING_LITERAL);
+-				if ( inputState.guessing==0 ) {
+-					trRight=w;
+-				}
+-				break;
+-			}
+-			default:
+-			{
+-				throw new NoViableAltException(LT(1), getFilename());
+-			}
+-			}
+-			}
+-			autoGen=ast_type_spec();
+-			if ( inputState.guessing==0 ) {
+-				behavior.refTokenRange(trLeft, trRight, label, autoGen, lastInRule());
+-			}
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltException(LT(1), getFilename());
+-		}
+-		}
+-	}
+-	
+-	public final void terminal(
+-		 Token label 
+-	) throws RecognitionException, TokenStreamException {
+-		
+-		Token  cl = null;
+-		Token  tr = null;
+-		Token  aa = null;
+-		Token  sl = null;
+-		Token  wi = null;
+-		
+-			int autoGen=GrammarElement.AUTO_GEN_NONE;
+-			Token args=null;
+-		
+-		
+-		switch ( LA(1)) {
+-		case CHAR_LITERAL:
+-		{
+-			cl = LT(1);
+-			match(CHAR_LITERAL);
+-			{
+-			switch ( LA(1)) {
+-			case BANG:
+-			{
+-				match(BANG);
+-				if ( inputState.guessing==0 ) {
+-					autoGen = GrammarElement.AUTO_GEN_BANG;
+-				}
+-				break;
+-			}
+-			case STRING_LITERAL:
+-			case ACTION:
+-			case SEMI:
+-			case CHAR_LITERAL:
+-			case OR:
+-			case TOKEN_REF:
+-			case OPEN_ELEMENT_OPTION:
+-			case LPAREN:
+-			case RPAREN:
+-			case LITERAL_exception:
+-			case RULE_REF:
+-			case NOT_OP:
+-			case SEMPRED:
+-			case TREE_BEGIN:
+-			case WILDCARD:
+-			{
+-				break;
+-			}
+-			default:
+-			{
+-				throw new NoViableAltException(LT(1), getFilename());
+-			}
+-			}
+-			}
+-			if ( inputState.guessing==0 ) {
+-				behavior.refCharLiteral(cl, label, false, autoGen, lastInRule());
+-			}
+-			break;
+-		}
+-		case TOKEN_REF:
+-		{
+-			tr = LT(1);
+-			match(TOKEN_REF);
+-			autoGen=ast_type_spec();
+-			{
+-			switch ( LA(1)) {
+-			case ARG_ACTION:
+-			{
+-				aa = LT(1);
+-				match(ARG_ACTION);
+-				if ( inputState.guessing==0 ) {
+-					args=aa;
+-				}
+-				break;
+-			}
+-			case STRING_LITERAL:
+-			case ACTION:
+-			case SEMI:
+-			case CHAR_LITERAL:
+-			case OR:
+-			case TOKEN_REF:
+-			case OPEN_ELEMENT_OPTION:
+-			case LPAREN:
+-			case RPAREN:
+-			case LITERAL_exception:
+-			case RULE_REF:
+-			case NOT_OP:
+-			case SEMPRED:
+-			case TREE_BEGIN:
+-			case WILDCARD:
+-			{
+-				break;
+-			}
+-			default:
+-			{
+-				throw new NoViableAltException(LT(1), getFilename());
+-			}
+-			}
+-			}
+-			if ( inputState.guessing==0 ) {
+-				behavior.refToken(null, tr, label, args, false, autoGen, lastInRule());
+-			}
+-			break;
+-		}
+-		case STRING_LITERAL:
+-		{
+-			sl = LT(1);
+-			match(STRING_LITERAL);
+-			autoGen=ast_type_spec();
+-			if ( inputState.guessing==0 ) {
+-				behavior.refStringLiteral(sl, label, autoGen, lastInRule());
+-			}
+-			break;
+-		}
+-		case WILDCARD:
+-		{
+-			wi = LT(1);
+-			match(WILDCARD);
+-			autoGen=ast_type_spec();
+-			if ( inputState.guessing==0 ) {
+-				behavior.refWildcard(wi, label, autoGen);
+-			}
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltException(LT(1), getFilename());
+-		}
+-		}
+-	}
+-	
+-	public final void notTerminal(
+-		 Token label 
+-	) throws RecognitionException, TokenStreamException {
+-		
+-		Token  cl = null;
+-		Token  tr = null;
+-		int autoGen=GrammarElement.AUTO_GEN_NONE;
+-		
+-		switch ( LA(1)) {
+-		case CHAR_LITERAL:
+-		{
+-			cl = LT(1);
+-			match(CHAR_LITERAL);
+-			{
+-			switch ( LA(1)) {
+-			case BANG:
+-			{
+-				match(BANG);
+-				if ( inputState.guessing==0 ) {
+-					autoGen = GrammarElement.AUTO_GEN_BANG;
+-				}
+-				break;
+-			}
+-			case STRING_LITERAL:
+-			case ACTION:
+-			case SEMI:
+-			case CHAR_LITERAL:
+-			case OR:
+-			case TOKEN_REF:
+-			case OPEN_ELEMENT_OPTION:
+-			case LPAREN:
+-			case RPAREN:
+-			case LITERAL_exception:
+-			case RULE_REF:
+-			case NOT_OP:
+-			case SEMPRED:
+-			case TREE_BEGIN:
+-			case WILDCARD:
+-			{
+-				break;
+-			}
+-			default:
+-			{
+-				throw new NoViableAltException(LT(1), getFilename());
+-			}
+-			}
+-			}
+-			if ( inputState.guessing==0 ) {
+-				behavior.refCharLiteral(cl, label, true, autoGen, lastInRule());
+-			}
+-			break;
+-		}
+-		case TOKEN_REF:
+-		{
+-			tr = LT(1);
+-			match(TOKEN_REF);
+-			autoGen=ast_type_spec();
+-			if ( inputState.guessing==0 ) {
+-				behavior.refToken(null, tr, label, null, true, autoGen, lastInRule());
+-			}
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltException(LT(1), getFilename());
+-		}
+-		}
+-	}
+-	
+-	public final void ebnf(
+-		 Token label, boolean not 
+-	) throws RecognitionException, TokenStreamException {
+-		
+-		Token  lp = null;
+-		Token  aa = null;
+-		Token  ab = null;
+-		
+-		lp = LT(1);
+-		match(LPAREN);
+-		if ( inputState.guessing==0 ) {
+-			behavior.beginSubRule(label, lp, not);
+-		}
+-		{
+-		if ((LA(1)==OPTIONS)) {
+-			subruleOptionsSpec();
+-			{
+-			switch ( LA(1)) {
+-			case ACTION:
+-			{
+-				aa = LT(1);
+-				match(ACTION);
+-				if ( inputState.guessing==0 ) {
+-					behavior.refInitAction(aa);
+-				}
+-				break;
+-			}
+-			case COLON:
+-			{
+-				break;
+-			}
+-			default:
+-			{
+-				throw new NoViableAltException(LT(1), getFilename());
+-			}
+-			}
+-			}
+-			match(COLON);
+-		}
+-		else if ((LA(1)==ACTION) && (LA(2)==COLON)) {
+-			ab = LT(1);
+-			match(ACTION);
+-			if ( inputState.guessing==0 ) {
+-				behavior.refInitAction(ab);
+-			}
+-			match(COLON);
+-		}
+-		else if ((_tokenSet_9.member(LA(1))) && (_tokenSet_10.member(LA(2)))) {
+-		}
+-		else {
+-			throw new NoViableAltException(LT(1), getFilename());
+-		}
+-		
+-		}
+-		block();
+-		match(RPAREN);
+-		{
+-		switch ( LA(1)) {
+-		case STRING_LITERAL:
+-		case ACTION:
+-		case SEMI:
+-		case CHAR_LITERAL:
+-		case OR:
+-		case TOKEN_REF:
+-		case OPEN_ELEMENT_OPTION:
+-		case LPAREN:
+-		case RPAREN:
+-		case BANG:
+-		case LITERAL_exception:
+-		case RULE_REF:
+-		case NOT_OP:
+-		case SEMPRED:
+-		case TREE_BEGIN:
+-		case QUESTION:
+-		case STAR:
+-		case PLUS:
+-		case WILDCARD:
+-		{
+-			{
+-			switch ( LA(1)) {
+-			case QUESTION:
+-			{
+-				match(QUESTION);
+-				if ( inputState.guessing==0 ) {
+-					behavior.optionalSubRule();
+-				}
+-				break;
+-			}
+-			case STAR:
+-			{
+-				match(STAR);
+-				if ( inputState.guessing==0 ) {
+-					behavior.zeroOrMoreSubRule();
+-				}
+-				break;
+-			}
+-			case PLUS:
+-			{
+-				match(PLUS);
+-				if ( inputState.guessing==0 ) {
+-					behavior.oneOrMoreSubRule();
+-				}
+-				break;
+-			}
+-			case STRING_LITERAL:
+-			case ACTION:
+-			case SEMI:
+-			case CHAR_LITERAL:
+-			case OR:
+-			case TOKEN_REF:
+-			case OPEN_ELEMENT_OPTION:
+-			case LPAREN:
+-			case RPAREN:
+-			case BANG:
+-			case LITERAL_exception:
+-			case RULE_REF:
+-			case NOT_OP:
+-			case SEMPRED:
+-			case TREE_BEGIN:
+-			case WILDCARD:
+-			{
+-				break;
+-			}
+-			default:
+-			{
+-				throw new NoViableAltException(LT(1), getFilename());
+-			}
+-			}
+-			}
+-			{
+-			switch ( LA(1)) {
+-			case BANG:
+-			{
+-				match(BANG);
+-				if ( inputState.guessing==0 ) {
+-					behavior.noASTSubRule();
+-				}
+-				break;
+-			}
+-			case STRING_LITERAL:
+-			case ACTION:
+-			case SEMI:
+-			case CHAR_LITERAL:
+-			case OR:
+-			case TOKEN_REF:
+-			case OPEN_ELEMENT_OPTION:
+-			case LPAREN:
+-			case RPAREN:
+-			case LITERAL_exception:
+-			case RULE_REF:
+-			case NOT_OP:
+-			case SEMPRED:
+-			case TREE_BEGIN:
+-			case WILDCARD:
+-			{
+-				break;
+-			}
+-			default:
+-			{
+-				throw new NoViableAltException(LT(1), getFilename());
+-			}
+-			}
+-			}
+-			break;
+-		}
+-		case IMPLIES:
+-		{
+-			match(IMPLIES);
+-			if ( inputState.guessing==0 ) {
+-				behavior.synPred();
+-			}
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltException(LT(1), getFilename());
+-		}
+-		}
+-		}
+-		if ( inputState.guessing==0 ) {
+-			behavior.endSubRule();
+-		}
+-	}
+-	
+-	public final void tree() throws RecognitionException, TokenStreamException {
+-		
+-		Token  lp = null;
+-		
+-		lp = LT(1);
+-		match(TREE_BEGIN);
+-		if ( inputState.guessing==0 ) {
+-			behavior.beginTree(lp);
+-		}
+-		rootNode();
+-		if ( inputState.guessing==0 ) {
+-			behavior.beginChildList();
+-		}
+-		{
+-		int _cnt122=0;
+-		_loop122:
+-		do {
+-			if ((_tokenSet_2.member(LA(1)))) {
+-				element();
+-			}
+-			else {
+-				if ( _cnt122>=1 ) { break _loop122; } else {throw new NoViableAltException(LT(1), getFilename());}
+-			}
+-			
+-			_cnt122++;
+-		} while (true);
+-		}
+-		if ( inputState.guessing==0 ) {
+-			behavior.endChildList();
+-		}
+-		match(RPAREN);
+-		if ( inputState.guessing==0 ) {
+-			behavior.endTree();
+-		}
+-	}
+-	
+-	public final void rootNode() throws RecognitionException, TokenStreamException {
+-		
+-		Token label = null;
+-		
+-		{
+-		if ((LA(1)==TOKEN_REF||LA(1)==RULE_REF) && (LA(2)==COLON)) {
+-			label=id();
+-			match(COLON);
+-			if ( inputState.guessing==0 ) {
+-				checkForMissingEndRule(label);
+-			}
+-		}
+-		else if ((_tokenSet_7.member(LA(1))) && (_tokenSet_11.member(LA(2)))) {
+-		}
+-		else {
+-			throw new NoViableAltException(LT(1), getFilename());
+-		}
+-		
+-		}
+-		terminal(label);
+-	}
+-	
+-	public final  int  ast_type_spec() throws RecognitionException, TokenStreamException {
+-		 int autoGen ;
+-		
+-		autoGen = GrammarElement.AUTO_GEN_NONE;
+-		
+-		{
+-		switch ( LA(1)) {
+-		case CARET:
+-		{
+-			match(CARET);
+-			if ( inputState.guessing==0 ) {
+-				autoGen = GrammarElement.AUTO_GEN_CARET;
+-			}
+-			break;
+-		}
+-		case BANG:
+-		{
+-			match(BANG);
+-			if ( inputState.guessing==0 ) {
+-				autoGen = GrammarElement.AUTO_GEN_BANG;
+-			}
+-			break;
+-		}
+-		case STRING_LITERAL:
+-		case ACTION:
+-		case SEMI:
+-		case CHAR_LITERAL:
+-		case OR:
+-		case TOKEN_REF:
+-		case OPEN_ELEMENT_OPTION:
+-		case LPAREN:
+-		case RPAREN:
+-		case ARG_ACTION:
+-		case LITERAL_exception:
+-		case RULE_REF:
+-		case NOT_OP:
+-		case SEMPRED:
+-		case TREE_BEGIN:
+-		case WILDCARD:
+-		{
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltException(LT(1), getFilename());
+-		}
+-		}
+-		}
+-		return autoGen ;
+-	}
+-	
+-	
+-	public static final String[] _tokenNames = {
+-		"<0>",
+-		"EOF",
+-		"<2>",
+-		"NULL_TREE_LOOKAHEAD",
+-		"\"tokens\"",
+-		"\"header\"",
+-		"STRING_LITERAL",
+-		"ACTION",
+-		"DOC_COMMENT",
+-		"\"lexclass\"",
+-		"\"class\"",
+-		"\"extends\"",
+-		"\"Lexer\"",
+-		"\"TreeParser\"",
+-		"OPTIONS",
+-		"ASSIGN",
+-		"SEMI",
+-		"RCURLY",
+-		"\"charVocabulary\"",
+-		"CHAR_LITERAL",
+-		"INT",
+-		"OR",
+-		"RANGE",
+-		"TOKENS",
+-		"TOKEN_REF",
+-		"OPEN_ELEMENT_OPTION",
+-		"CLOSE_ELEMENT_OPTION",
+-		"LPAREN",
+-		"RPAREN",
+-		"\"Parser\"",
+-		"\"protected\"",
+-		"\"public\"",
+-		"\"private\"",
+-		"BANG",
+-		"ARG_ACTION",
+-		"\"returns\"",
+-		"COLON",
+-		"\"throws\"",
+-		"COMMA",
+-		"\"exception\"",
+-		"\"catch\"",
+-		"RULE_REF",
+-		"NOT_OP",
+-		"SEMPRED",
+-		"TREE_BEGIN",
+-		"QUESTION",
+-		"STAR",
+-		"PLUS",
+-		"IMPLIES",
+-		"CARET",
+-		"WILDCARD",
+-		"\"options\"",
+-		"WS",
+-		"COMMENT",
+-		"SL_COMMENT",
+-		"ML_COMMENT",
+-		"ESC",
+-		"DIGIT",
+-		"XDIGIT",
+-		"NESTED_ARG_ACTION",
+-		"NESTED_ACTION",
+-		"WS_LOOP",
+-		"INTERNAL_RULE_REF",
+-		"WS_OPT"
+-	};
+-	
+-	private static final long[] mk_tokenSet_0() {
+-		long[] data = { 2206556225792L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_0 = new BitSet(mk_tokenSet_0());
+-	private static final long[] mk_tokenSet_1() {
+-		long[] data = { 2472844214400L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_1 = new BitSet(mk_tokenSet_1());
+-	private static final long[] mk_tokenSet_2() {
+-		long[] data = { 1158885407195328L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_2 = new BitSet(mk_tokenSet_2());
+-	private static final long[] mk_tokenSet_3() {
+-		long[] data = { 1159461236965568L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_3 = new BitSet(mk_tokenSet_3());
+-	private static final long[] mk_tokenSet_4() {
+-		long[] data = { 1132497128128576L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_4 = new BitSet(mk_tokenSet_4());
+-	private static final long[] mk_tokenSet_5() {
+-		long[] data = { 1722479914074304L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_5 = new BitSet(mk_tokenSet_5());
+-	private static final long[] mk_tokenSet_6() {
+-		long[] data = { 1722411194597568L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_6 = new BitSet(mk_tokenSet_6());
+-	private static final long[] mk_tokenSet_7() {
+-		long[] data = { 1125899924144192L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_7 = new BitSet(mk_tokenSet_7());
+-	private static final long[] mk_tokenSet_8() {
+-		long[] data = { 1722411190386880L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_8 = new BitSet(mk_tokenSet_8());
+-	private static final long[] mk_tokenSet_9() {
+-		long[] data = { 1159444023476416L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_9 = new BitSet(mk_tokenSet_9());
+-	private static final long[] mk_tokenSet_10() {
+-		long[] data = { 2251345007067328L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_10 = new BitSet(mk_tokenSet_10());
+-	private static final long[] mk_tokenSet_11() {
+-		long[] data = { 1721861130420416L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_11 = new BitSet(mk_tokenSet_11());
+-	
+-	}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/ANTLRStringBuffer.java glassfish-gil/entity-persistence/src/java/persistence/antlr/ANTLRStringBuffer.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/ANTLRStringBuffer.java	2006-08-31 00:34:02.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/ANTLRStringBuffer.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,81 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-// Implementation of a StringBuffer-like object that does not have the
+-// unfortunate side-effect of creating Strings with very large buffers.
+-
+-public class ANTLRStringBuffer {
+-    protected char[] buffer = null;
+-    protected int length = 0;		// length and also where to store next char
+-
+-
+-    public ANTLRStringBuffer() {
+-        buffer = new char[50];
+-    }
+-
+-    public ANTLRStringBuffer(int n) {
+-        buffer = new char[n];
+-    }
+-
+-    public final void append(char c) {
+-        // This would normally be  an "ensureCapacity" method, but inlined
+-        // here for speed.
+-        if (length >= buffer.length) {
+-            // Compute a new length that is at least double old length
+-            int newSize = buffer.length;
+-            while (length >= newSize) {
+-                newSize *= 2;
+-            }
+-            // Allocate new array and copy buffer
+-            char[] newBuffer = new char[newSize];
+-            for (int i = 0; i < length; i++) {
+-                newBuffer[i] = buffer[i];
+-            }
+-            buffer = newBuffer;
+-        }
+-        buffer[length] = c;
+-        length++;
+-    }
+-
+-    public final void append(String s) {
+-        for (int i = 0; i < s.length(); i++) {
+-            append(s.charAt(i));
+-        }
+-    }
+-
+-    public final char charAt(int index) {
+-        return buffer[index];
+-    }
+-
+-    final public char[] getBuffer() {
+-        return buffer;
+-    }
+-
+-    public final int length() {
+-        return length;
+-    }
+-
+-    public final void setCharAt(int index, char ch) {
+-        buffer[index] = ch;
+-    }
+-
+-    public final void setLength(int newLength) {
+-        if (newLength < length) {
+-            length = newLength;
+-        }
+-        else {
+-            while (newLength > length) {
+-                append('\0');
+-            }
+-        }
+-    }
+-
+-    public final String toString() {
+-        return new String(buffer, 0, length);
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/ANTLRTokdefLexer.java glassfish-gil/entity-persistence/src/java/persistence/antlr/ANTLRTokdefLexer.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/ANTLRTokdefLexer.java	2006-02-08 22:30:33.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/ANTLRTokdefLexer.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,664 +0,0 @@
+-// $ANTLR : "tokdef.g" -> "ANTLRTokdefLexer.java"$
+- package persistence.antlr; 
+-import java.io.InputStream;
+-import persistence.antlr.TokenStreamException;
+-import persistence.antlr.TokenStreamIOException;
+-import persistence.antlr.TokenStreamRecognitionException;
+-import persistence.antlr.CharStreamException;
+-import persistence.antlr.CharStreamIOException;
+-import persistence.antlr.ANTLRException;
+-import java.io.Reader;
+-import java.util.Hashtable;
+-import persistence.antlr.CharScanner;
+-import persistence.antlr.InputBuffer;
+-import persistence.antlr.ByteBuffer;
+-import persistence.antlr.CharBuffer;
+-import persistence.antlr.Token;
+-import persistence.antlr.CommonToken;
+-import persistence.antlr.RecognitionException;
+-import persistence.antlr.NoViableAltForCharException;
+-import persistence.antlr.MismatchedCharException;
+-import persistence.antlr.TokenStream;
+-import persistence.antlr.ANTLRHashString;
+-import persistence.antlr.LexerSharedInputState;
+-import persistence.antlr.collections.impl.BitSet;
+-import persistence.antlr.SemanticException;
+-
+-public class ANTLRTokdefLexer extends persistence.antlr.CharScanner implements ANTLRTokdefParserTokenTypes, TokenStream
+- {
+-public ANTLRTokdefLexer(InputStream in) {
+-	this(new ByteBuffer(in));
+-}
+-public ANTLRTokdefLexer(Reader in) {
+-	this(new CharBuffer(in));
+-}
+-public ANTLRTokdefLexer(InputBuffer ib) {
+-	this(new LexerSharedInputState(ib));
+-}
+-public ANTLRTokdefLexer(LexerSharedInputState state) {
+-	super(state);
+-	caseSensitiveLiterals = true;
+-	setCaseSensitive(true);
+-	literals = new Hashtable();
+-}
+-
+-public Token nextToken() throws TokenStreamException {
+-	Token theRetToken=null;
+-tryAgain:
+-	for (;;) {
+-		Token _token = null;
+-		int _ttype = Token.INVALID_TYPE;
+-		resetText();
+-		try {   // for char stream error handling
+-			try {   // for lexical error handling
+-				switch ( LA(1)) {
+-				case '\t':  case '\n':  case '\r':  case ' ':
+-				{
+-					mWS(true);
+-					theRetToken=_returnToken;
+-					break;
+-				}
+-				case '(':
+-				{
+-					mLPAREN(true);
+-					theRetToken=_returnToken;
+-					break;
+-				}
+-				case ')':
+-				{
+-					mRPAREN(true);
+-					theRetToken=_returnToken;
+-					break;
+-				}
+-				case '=':
+-				{
+-					mASSIGN(true);
+-					theRetToken=_returnToken;
+-					break;
+-				}
+-				case '"':
+-				{
+-					mSTRING(true);
+-					theRetToken=_returnToken;
+-					break;
+-				}
+-				case 'A':  case 'B':  case 'C':  case 'D':
+-				case 'E':  case 'F':  case 'G':  case 'H':
+-				case 'I':  case 'J':  case 'K':  case 'L':
+-				case 'M':  case 'N':  case 'O':  case 'P':
+-				case 'Q':  case 'R':  case 'S':  case 'T':
+-				case 'U':  case 'V':  case 'W':  case 'X':
+-				case 'Y':  case 'Z':  case 'a':  case 'b':
+-				case 'c':  case 'd':  case 'e':  case 'f':
+-				case 'g':  case 'h':  case 'i':  case 'j':
+-				case 'k':  case 'l':  case 'm':  case 'n':
+-				case 'o':  case 'p':  case 'q':  case 'r':
+-				case 's':  case 't':  case 'u':  case 'v':
+-				case 'w':  case 'x':  case 'y':  case 'z':
+-				{
+-					mID(true);
+-					theRetToken=_returnToken;
+-					break;
+-				}
+-				case '0':  case '1':  case '2':  case '3':
+-				case '4':  case '5':  case '6':  case '7':
+-				case '8':  case '9':
+-				{
+-					mINT(true);
+-					theRetToken=_returnToken;
+-					break;
+-				}
+-				default:
+-					if ((LA(1)=='/') && (LA(2)=='/')) {
+-						mSL_COMMENT(true);
+-						theRetToken=_returnToken;
+-					}
+-					else if ((LA(1)=='/') && (LA(2)=='*')) {
+-						mML_COMMENT(true);
+-						theRetToken=_returnToken;
+-					}
+-				else {
+-					if (LA(1)==EOF_CHAR) {uponEOF(); _returnToken = makeToken(Token.EOF_TYPE);}
+-				else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());}
+-				}
+-				}
+-				if ( _returnToken==null ) continue tryAgain; // found SKIP token
+-				_ttype = _returnToken.getType();
+-				_returnToken.setType(_ttype);
+-				return _returnToken;
+-			}
+-			catch (RecognitionException e) {
+-				throw new TokenStreamRecognitionException(e);
+-			}
+-		}
+-		catch (CharStreamException cse) {
+-			if ( cse instanceof CharStreamIOException ) {
+-				throw new TokenStreamIOException(((CharStreamIOException)cse).io);
+-			}
+-			else {
+-				throw new TokenStreamException(cse.getMessage());
+-			}
+-		}
+-	}
+-}
+-
+-	public final void mWS(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = WS;
+-		int _saveIndex;
+-		
+-		{
+-		switch ( LA(1)) {
+-		case ' ':
+-		{
+-			match(' ');
+-			break;
+-		}
+-		case '\t':
+-		{
+-			match('\t');
+-			break;
+-		}
+-		case '\r':
+-		{
+-			match('\r');
+-			{
+-			if ((LA(1)=='\n')) {
+-				match('\n');
+-			}
+-			else {
+-			}
+-			
+-			}
+-			newline();
+-			break;
+-		}
+-		case '\n':
+-		{
+-			match('\n');
+-			newline();
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		}
+-		}
+-		_ttype = Token.SKIP;
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	public final void mSL_COMMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = SL_COMMENT;
+-		int _saveIndex;
+-		
+-		match("//");
+-		{
+-		_loop234:
+-		do {
+-			if ((_tokenSet_0.member(LA(1)))) {
+-				{
+-				match(_tokenSet_0);
+-				}
+-			}
+-			else {
+-				break _loop234;
+-			}
+-			
+-		} while (true);
+-		}
+-		{
+-		switch ( LA(1)) {
+-		case '\n':
+-		{
+-			match('\n');
+-			break;
+-		}
+-		case '\r':
+-		{
+-			match('\r');
+-			{
+-			if ((LA(1)=='\n')) {
+-				match('\n');
+-			}
+-			else {
+-			}
+-			
+-			}
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		}
+-		}
+-		_ttype = Token.SKIP; newline();
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	public final void mML_COMMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = ML_COMMENT;
+-		int _saveIndex;
+-		
+-		match("/*");
+-		{
+-		_loop239:
+-		do {
+-			if ((LA(1)=='*') && (_tokenSet_1.member(LA(2)))) {
+-				match('*');
+-				matchNot('/');
+-			}
+-			else if ((LA(1)=='\n')) {
+-				match('\n');
+-				newline();
+-			}
+-			else if ((_tokenSet_2.member(LA(1)))) {
+-				matchNot('*');
+-			}
+-			else {
+-				break _loop239;
+-			}
+-			
+-		} while (true);
+-		}
+-		match("*/");
+-		_ttype = Token.SKIP;
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	public final void mLPAREN(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = LPAREN;
+-		int _saveIndex;
+-		
+-		match('(');
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	public final void mRPAREN(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = RPAREN;
+-		int _saveIndex;
+-		
+-		match(')');
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	public final void mASSIGN(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = ASSIGN;
+-		int _saveIndex;
+-		
+-		match('=');
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	public final void mSTRING(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = STRING;
+-		int _saveIndex;
+-		
+-		match('"');
+-		{
+-		_loop245:
+-		do {
+-			if ((LA(1)=='\\')) {
+-				mESC(false);
+-			}
+-			else if ((_tokenSet_3.member(LA(1)))) {
+-				matchNot('"');
+-			}
+-			else {
+-				break _loop245;
+-			}
+-			
+-		} while (true);
+-		}
+-		match('"');
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mESC(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = ESC;
+-		int _saveIndex;
+-		
+-		match('\\');
+-		{
+-		switch ( LA(1)) {
+-		case 'n':
+-		{
+-			match('n');
+-			break;
+-		}
+-		case 'r':
+-		{
+-			match('r');
+-			break;
+-		}
+-		case 't':
+-		{
+-			match('t');
+-			break;
+-		}
+-		case 'b':
+-		{
+-			match('b');
+-			break;
+-		}
+-		case 'f':
+-		{
+-			match('f');
+-			break;
+-		}
+-		case '"':
+-		{
+-			match('"');
+-			break;
+-		}
+-		case '\'':
+-		{
+-			match('\'');
+-			break;
+-		}
+-		case '\\':
+-		{
+-			match('\\');
+-			break;
+-		}
+-		case '0':  case '1':  case '2':  case '3':
+-		{
+-			{
+-			matchRange('0','3');
+-			}
+-			{
+-			if (((LA(1) >= '0' && LA(1) <= '9')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+-				mDIGIT(false);
+-				{
+-				if (((LA(1) >= '0' && LA(1) <= '9')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+-					mDIGIT(false);
+-				}
+-				else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && (true)) {
+-				}
+-				else {
+-					throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-				}
+-				
+-				}
+-			}
+-			else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && (true)) {
+-			}
+-			else {
+-				throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-			}
+-			
+-			}
+-			break;
+-		}
+-		case '4':  case '5':  case '6':  case '7':
+-		{
+-			{
+-			matchRange('4','7');
+-			}
+-			{
+-			if (((LA(1) >= '0' && LA(1) <= '9')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+-				mDIGIT(false);
+-			}
+-			else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && (true)) {
+-			}
+-			else {
+-				throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-			}
+-			
+-			}
+-			break;
+-		}
+-		case 'u':
+-		{
+-			match('u');
+-			mXDIGIT(false);
+-			mXDIGIT(false);
+-			mXDIGIT(false);
+-			mXDIGIT(false);
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		}
+-		}
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mDIGIT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = DIGIT;
+-		int _saveIndex;
+-		
+-		matchRange('0','9');
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mXDIGIT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = XDIGIT;
+-		int _saveIndex;
+-		
+-		switch ( LA(1)) {
+-		case '0':  case '1':  case '2':  case '3':
+-		case '4':  case '5':  case '6':  case '7':
+-		case '8':  case '9':
+-		{
+-			matchRange('0','9');
+-			break;
+-		}
+-		case 'a':  case 'b':  case 'c':  case 'd':
+-		case 'e':  case 'f':
+-		{
+-			matchRange('a','f');
+-			break;
+-		}
+-		case 'A':  case 'B':  case 'C':  case 'D':
+-		case 'E':  case 'F':
+-		{
+-			matchRange('A','F');
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		}
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	public final void mID(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = ID;
+-		int _saveIndex;
+-		
+-		{
+-		switch ( LA(1)) {
+-		case 'a':  case 'b':  case 'c':  case 'd':
+-		case 'e':  case 'f':  case 'g':  case 'h':
+-		case 'i':  case 'j':  case 'k':  case 'l':
+-		case 'm':  case 'n':  case 'o':  case 'p':
+-		case 'q':  case 'r':  case 's':  case 't':
+-		case 'u':  case 'v':  case 'w':  case 'x':
+-		case 'y':  case 'z':
+-		{
+-			matchRange('a','z');
+-			break;
+-		}
+-		case 'A':  case 'B':  case 'C':  case 'D':
+-		case 'E':  case 'F':  case 'G':  case 'H':
+-		case 'I':  case 'J':  case 'K':  case 'L':
+-		case 'M':  case 'N':  case 'O':  case 'P':
+-		case 'Q':  case 'R':  case 'S':  case 'T':
+-		case 'U':  case 'V':  case 'W':  case 'X':
+-		case 'Y':  case 'Z':
+-		{
+-			matchRange('A','Z');
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		}
+-		}
+-		{
+-		_loop258:
+-		do {
+-			switch ( LA(1)) {
+-			case 'a':  case 'b':  case 'c':  case 'd':
+-			case 'e':  case 'f':  case 'g':  case 'h':
+-			case 'i':  case 'j':  case 'k':  case 'l':
+-			case 'm':  case 'n':  case 'o':  case 'p':
+-			case 'q':  case 'r':  case 's':  case 't':
+-			case 'u':  case 'v':  case 'w':  case 'x':
+-			case 'y':  case 'z':
+-			{
+-				matchRange('a','z');
+-				break;
+-			}
+-			case 'A':  case 'B':  case 'C':  case 'D':
+-			case 'E':  case 'F':  case 'G':  case 'H':
+-			case 'I':  case 'J':  case 'K':  case 'L':
+-			case 'M':  case 'N':  case 'O':  case 'P':
+-			case 'Q':  case 'R':  case 'S':  case 'T':
+-			case 'U':  case 'V':  case 'W':  case 'X':
+-			case 'Y':  case 'Z':
+-			{
+-				matchRange('A','Z');
+-				break;
+-			}
+-			case '_':
+-			{
+-				match('_');
+-				break;
+-			}
+-			case '0':  case '1':  case '2':  case '3':
+-			case '4':  case '5':  case '6':  case '7':
+-			case '8':  case '9':
+-			{
+-				matchRange('0','9');
+-				break;
+-			}
+-			default:
+-			{
+-				break _loop258;
+-			}
+-			}
+-		} while (true);
+-		}
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	public final void mINT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = INT;
+-		int _saveIndex;
+-		
+-		{
+-		int _cnt261=0;
+-		_loop261:
+-		do {
+-			if (((LA(1) >= '0' && LA(1) <= '9'))) {
+-				mDIGIT(false);
+-			}
+-			else {
+-				if ( _cnt261>=1 ) { break _loop261; } else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());}
+-			}
+-			
+-			_cnt261++;
+-		} while (true);
+-		}
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	
+-	private static final long[] mk_tokenSet_0() {
+-		long[] data = new long[8];
+-		data[0]=-9224L;
+-		for (int i = 1; i<=3; i++) { data[i]=-1L; }
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_0 = new BitSet(mk_tokenSet_0());
+-	private static final long[] mk_tokenSet_1() {
+-		long[] data = new long[8];
+-		data[0]=-140737488355336L;
+-		for (int i = 1; i<=3; i++) { data[i]=-1L; }
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_1 = new BitSet(mk_tokenSet_1());
+-	private static final long[] mk_tokenSet_2() {
+-		long[] data = new long[8];
+-		data[0]=-4398046512136L;
+-		for (int i = 1; i<=3; i++) { data[i]=-1L; }
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_2 = new BitSet(mk_tokenSet_2());
+-	private static final long[] mk_tokenSet_3() {
+-		long[] data = new long[8];
+-		data[0]=-17179869192L;
+-		data[1]=-268435457L;
+-		for (int i = 2; i<=3; i++) { data[i]=-1L; }
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_3 = new BitSet(mk_tokenSet_3());
+-	
+-	}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/ANTLRTokdefParser.java glassfish-gil/entity-persistence/src/java/persistence/antlr/ANTLRTokdefParser.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/ANTLRTokdefParser.java	2006-02-08 22:30:34.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/ANTLRTokdefParser.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,252 +0,0 @@
+-// $ANTLR : "tokdef.g" -> "ANTLRTokdefParser.java"$
+- package persistence.antlr; 
+-import persistence.antlr.TokenBuffer;
+-import persistence.antlr.TokenStreamException;
+-import persistence.antlr.TokenStreamIOException;
+-import persistence.antlr.ANTLRException;
+-import persistence.antlr.LLkParser;
+-import persistence.antlr.Token;
+-import persistence.antlr.TokenStream;
+-import persistence.antlr.RecognitionException;
+-import persistence.antlr.NoViableAltException;
+-import persistence.antlr.MismatchedTokenException;
+-import persistence.antlr.SemanticException;
+-import persistence.antlr.ParserSharedInputState;
+-import persistence.antlr.collections.impl.BitSet;
+-
+-/** Simple lexer/parser for reading token definition files
+-  in support of the import/export vocab option for grammars.
+- */
+-public class ANTLRTokdefParser extends persistence.antlr.LLkParser       implements ANTLRTokdefParserTokenTypes
+- {
+-
+-	// This chunk of error reporting code provided by Brian Smith
+-
+-    private persistence.antlr.Tool antlrTool;
+-
+-    /** In order to make it so existing subclasses don't break, we won't require
+-     * that the persistence.antlr.Tool instance be passed as a constructor element. Instead,
+-     * the persistence.antlr.Tool instance should register itself via {@link #initTool(antlr.Tool)}
+-     * @throws IllegalStateException if a tool has already been registered
+-     * @since 2.7.2
+-     */
+-    public void setTool(persistence.antlr.Tool tool) {
+-        if (antlrTool == null) {
+-            antlrTool = tool;
+-		}
+-        else {
+-            throw new IllegalStateException("persistence.antlr.Tool already registered");
+-		}
+-    }
+-
+-    /** @since 2.7.2 */
+-    protected persistence.antlr.Tool getTool() {
+-        return antlrTool;
+-    }
+-
+-    /** Delegates the error message to the tool if any was registered via
+-     *  {@link #initTool(persistence.antlr.Tool)}
+-     *  @since 2.7.2
+-     */
+-    public void reportError(String s) {
+-        if (getTool() != null) {
+-            getTool().error(s, getFilename(), -1, -1);
+-		}
+-        else {
+-            super.reportError(s);
+-		}
+-    }
+-
+-    /** Delegates the error message to the tool if any was registered via
+-     *  {@link #initTool(persistence.antlr.Tool)}
+-     *  @since 2.7.2
+-     */
+-    public void reportError(RecognitionException e) {
+-        if (getTool() != null) {
+-            getTool().error(e.getErrorMessage(), e.getFilename(), e.getLine(), e.getColumn());
+-		}
+-        else {
+-            super.reportError(e);
+-		}
+-    }
+-
+-    /** Delegates the warning message to the tool if any was registered via
+-     *  {@link #initTool(persistence.antlr.Tool)}
+-     *  @since 2.7.2
+-     */
+-    public void reportWarning(String s) {
+-        if (getTool() != null) {
+-            getTool().warning(s, getFilename(), -1, -1);
+-		}
+-        else {
+-            super.reportWarning(s);
+-		}
+-    }
+-
+-protected ANTLRTokdefParser(TokenBuffer tokenBuf, int k) {
+-  super(tokenBuf,k);
+-  tokenNames = _tokenNames;
+-}
+-
+-public ANTLRTokdefParser(TokenBuffer tokenBuf) {
+-  this(tokenBuf,3);
+-}
+-
+-protected ANTLRTokdefParser(TokenStream lexer, int k) {
+-  super(lexer,k);
+-  tokenNames = _tokenNames;
+-}
+-
+-public ANTLRTokdefParser(TokenStream lexer) {
+-  this(lexer,3);
+-}
+-
+-public ANTLRTokdefParser(ParserSharedInputState state) {
+-  super(state,3);
+-  tokenNames = _tokenNames;
+-}
+-
+-	public final void file(
+-		ImportVocabTokenManager tm
+-	) throws RecognitionException, TokenStreamException {
+-		
+-		Token  name = null;
+-		
+-		try {      // for error handling
+-			name = LT(1);
+-			match(ID);
+-			{
+-			_loop225:
+-			do {
+-				if ((LA(1)==ID||LA(1)==STRING)) {
+-					line(tm);
+-				}
+-				else {
+-					break _loop225;
+-				}
+-				
+-			} while (true);
+-			}
+-		}
+-		catch (RecognitionException ex) {
+-			reportError(ex);
+-			consume();
+-			consumeUntil(_tokenSet_0);
+-		}
+-	}
+-	
+-	public final void line(
+-		ImportVocabTokenManager tm
+-	) throws RecognitionException, TokenStreamException {
+-		
+-		Token  s1 = null;
+-		Token  lab = null;
+-		Token  s2 = null;
+-		Token  id = null;
+-		Token  para = null;
+-		Token  id2 = null;
+-		Token  i = null;
+-		Token t=null; Token s=null;
+-		
+-		try {      // for error handling
+-			{
+-			if ((LA(1)==STRING)) {
+-				s1 = LT(1);
+-				match(STRING);
+-				s = s1;
+-			}
+-			else if ((LA(1)==ID) && (LA(2)==ASSIGN) && (LA(3)==STRING)) {
+-				lab = LT(1);
+-				match(ID);
+-				t = lab;
+-				match(ASSIGN);
+-				s2 = LT(1);
+-				match(STRING);
+-				s = s2;
+-			}
+-			else if ((LA(1)==ID) && (LA(2)==LPAREN)) {
+-				id = LT(1);
+-				match(ID);
+-				t=id;
+-				match(LPAREN);
+-				para = LT(1);
+-				match(STRING);
+-				match(RPAREN);
+-			}
+-			else if ((LA(1)==ID) && (LA(2)==ASSIGN) && (LA(3)==INT)) {
+-				id2 = LT(1);
+-				match(ID);
+-				t=id2;
+-			}
+-			else {
+-				throw new NoViableAltException(LT(1), getFilename());
+-			}
+-			
+-			}
+-			match(ASSIGN);
+-			i = LT(1);
+-			match(INT);
+-			
+-					Integer value = Integer.valueOf(i.getText());
+-					// if literal found, define as a string literal
+-					if ( s!=null ) {
+-						tm.define(s.getText(), value.intValue());
+-						// if label, then label the string and map label to token symbol also
+-						if ( t!=null ) {
+-							StringLiteralSymbol sl =
+-								(StringLiteralSymbol) tm.getTokenSymbol(s.getText());
+-							sl.setLabel(t.getText());
+-							tm.mapToTokenSymbol(t.getText(), sl);
+-						}
+-					}
+-					// define token (not a literal)
+-					else if ( t!=null ) {
+-						tm.define(t.getText(), value.intValue());
+-						if ( para!=null ) {
+-							TokenSymbol ts = tm.getTokenSymbol(t.getText());
+-							ts.setParaphrase(
+-								para.getText()
+-							);
+-						}
+-					}
+-					
+-		}
+-		catch (RecognitionException ex) {
+-			reportError(ex);
+-			consume();
+-			consumeUntil(_tokenSet_1);
+-		}
+-	}
+-	
+-	
+-	public static final String[] _tokenNames = {
+-		"<0>",
+-		"EOF",
+-		"<2>",
+-		"NULL_TREE_LOOKAHEAD",
+-		"ID",
+-		"STRING",
+-		"ASSIGN",
+-		"LPAREN",
+-		"RPAREN",
+-		"INT",
+-		"WS",
+-		"SL_COMMENT",
+-		"ML_COMMENT",
+-		"ESC",
+-		"DIGIT",
+-		"XDIGIT"
+-	};
+-	
+-	private static final long[] mk_tokenSet_0() {
+-		long[] data = { 2L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_0 = new BitSet(mk_tokenSet_0());
+-	private static final long[] mk_tokenSet_1() {
+-		long[] data = { 50L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_1 = new BitSet(mk_tokenSet_1());
+-	
+-	}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/ANTLRTokdefParserTokenTypes.java glassfish-gil/entity-persistence/src/java/persistence/antlr/ANTLRTokdefParserTokenTypes.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/ANTLRTokdefParserTokenTypes.java	2006-02-08 22:30:34.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/ANTLRTokdefParserTokenTypes.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,18 +0,0 @@
+-// $ANTLR : "tokdef.g" -> "ANTLRTokdefParser.java"$
+- package persistence.antlr; 
+-public interface ANTLRTokdefParserTokenTypes {
+-	int EOF = 1;
+-	int NULL_TREE_LOOKAHEAD = 3;
+-	int ID = 4;
+-	int STRING = 5;
+-	int ASSIGN = 6;
+-	int LPAREN = 7;
+-	int RPAREN = 8;
+-	int INT = 9;
+-	int WS = 10;
+-	int SL_COMMENT = 11;
+-	int ML_COMMENT = 12;
+-	int ESC = 13;
+-	int DIGIT = 14;
+-	int XDIGIT = 15;
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/ANTLRTokenTypes.java glassfish-gil/entity-persistence/src/java/persistence/antlr/ANTLRTokenTypes.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/ANTLRTokenTypes.java	2006-02-08 22:30:34.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/ANTLRTokenTypes.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,68 +0,0 @@
+-// $ANTLR : "antlr.g" -> "ANTLRLexer.java"$
+-
+-package persistence.antlr;
+-
+-public interface ANTLRTokenTypes {
+-	int EOF = 1;
+-	int NULL_TREE_LOOKAHEAD = 3;
+-	int LITERAL_tokens = 4;
+-	int LITERAL_header = 5;
+-	int STRING_LITERAL = 6;
+-	int ACTION = 7;
+-	int DOC_COMMENT = 8;
+-	int LITERAL_lexclass = 9;
+-	int LITERAL_class = 10;
+-	int LITERAL_extends = 11;
+-	int LITERAL_Lexer = 12;
+-	int LITERAL_TreeParser = 13;
+-	int OPTIONS = 14;
+-	int ASSIGN = 15;
+-	int SEMI = 16;
+-	int RCURLY = 17;
+-	int LITERAL_charVocabulary = 18;
+-	int CHAR_LITERAL = 19;
+-	int INT = 20;
+-	int OR = 21;
+-	int RANGE = 22;
+-	int TOKENS = 23;
+-	int TOKEN_REF = 24;
+-	int OPEN_ELEMENT_OPTION = 25;
+-	int CLOSE_ELEMENT_OPTION = 26;
+-	int LPAREN = 27;
+-	int RPAREN = 28;
+-	int LITERAL_Parser = 29;
+-	int LITERAL_protected = 30;
+-	int LITERAL_public = 31;
+-	int LITERAL_private = 32;
+-	int BANG = 33;
+-	int ARG_ACTION = 34;
+-	int LITERAL_returns = 35;
+-	int COLON = 36;
+-	int LITERAL_throws = 37;
+-	int COMMA = 38;
+-	int LITERAL_exception = 39;
+-	int LITERAL_catch = 40;
+-	int RULE_REF = 41;
+-	int NOT_OP = 42;
+-	int SEMPRED = 43;
+-	int TREE_BEGIN = 44;
+-	int QUESTION = 45;
+-	int STAR = 46;
+-	int PLUS = 47;
+-	int IMPLIES = 48;
+-	int CARET = 49;
+-	int WILDCARD = 50;
+-	int LITERAL_options = 51;
+-	int WS = 52;
+-	int COMMENT = 53;
+-	int SL_COMMENT = 54;
+-	int ML_COMMENT = 55;
+-	int ESC = 56;
+-	int DIGIT = 57;
+-	int XDIGIT = 58;
+-	int NESTED_ARG_ACTION = 59;
+-	int NESTED_ACTION = 60;
+-	int WS_LOOP = 61;
+-	int INTERNAL_RULE_REF = 62;
+-	int WS_OPT = 63;
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/ASTFactory.java glassfish-gil/entity-persistence/src/java/persistence/antlr/ASTFactory.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/ASTFactory.java	2006-08-31 00:34:03.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/ASTFactory.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,402 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import persistence.antlr.collections.AST;
+-import persistence.antlr.collections.impl.ASTArray;
+-
+-import java.util.Hashtable;
+-import java.lang.reflect.Constructor;
+-
+-/** AST Support code shared by TreeParser and Parser.
+- *  We use delegation to share code (and have only one
+- *  bit of code to maintain) rather than subclassing
+- *  or superclassing (forces AST support code to be
+- *  loaded even when you don't want to do AST stuff).
+- *
+- *  Typically, setASTNodeType is used to specify the
+- *  homogeneous type of node to create, but you can override
+- *  create to make heterogeneous nodes etc...
+- */
+-public class ASTFactory {
+-    /** Name of AST class to create during tree construction.
+-     *  Null implies that the create method should create
+-     *  a default AST type such as CommonAST.  This is for
+-	 *  homogeneous nodes.
+-     */
+-    protected String theASTNodeType = null;
+-    protected Class theASTNodeTypeClass = null;
+-
+-	/** How to specify the classname to create for a particular
+-	 *  token type.  Note that ANTLR allows you to say, for example,
+-	 *
+-	    tokens {
+-         PLUS<AST=PLUSNode>;
+-         ...
+-        }
+-	 *
+-	 *  and it tracks everything statically.  #[PLUS] will make you
+-	 *  a PLUSNode w/o use of this table.
+-	 *
+-	 *  For tokens that ANTLR cannot track statically like #[i],
+-	 *  you can use this table to map PLUS (Integer) -> PLUSNode (Class)
+-	 *  etc... ANTLR sets the class map from the tokens {...} section
+-	 *  via the ASTFactory(Hashtable) ctor in persistence.antlr.Parser.
+-	 */
+-	protected Hashtable tokenTypeToASTClassMap = null;
+-
+-	public ASTFactory() {
+-	}
+-
+-	/** Create factory with a specific mapping from token type
+-	 *  to Java AST node type.  Your subclasses of ASTFactory
+-	 *  can override and reuse the map stuff.
+-	 */
+-	public ASTFactory(Hashtable tokenTypeToClassMap) {
+-		setTokenTypeToASTClassMap(tokenTypeToClassMap);
+-	}
+-
+-	/** Specify an "override" for the Java AST object created for a
+-	 *  specific token.  It is provided as a convenience so
+-	 *  you can specify node types dynamically.  ANTLR sets
+-	 *  the token type mapping automatically from the tokens{...}
+-	 *  section, but you can change that mapping with this method.
+-	 *  ANTLR does it's best to statically determine the node
+-	 *  type for generating parsers, but it cannot deal with
+-	 *  dynamic values like #[LT(1)].  In this case, it relies
+-	 *  on the mapping.  Beware differences in the tokens{...}
+-	 *  section and what you set via this method.  Make sure
+-	 *  they are the same.
+-	 *
+-	 *  Set className to null to remove the mapping.
+-	 *
+-	 *  @since 2.7.2
+-	 */
+-	public void setTokenTypeASTNodeType(int tokenType, String className)
+-		throws IllegalArgumentException
+-	{
+-		if ( tokenTypeToASTClassMap==null ) {
+-			tokenTypeToASTClassMap = new Hashtable();
+-		}
+-		if ( className==null ) {
+-			tokenTypeToASTClassMap.remove(new Integer(tokenType));
+-			return;
+-		}
+-		Class c = null;
+-		try {
+-			c = Class.forName(className);
+-			tokenTypeToASTClassMap.put(new Integer(tokenType), c);
+-		}
+-		catch (Exception e) {
+-			throw new IllegalArgumentException("Invalid class, "+className);
+-		}
+-	}
+-
+-	/** For a given token type, what is the AST node object type to create
+-	 *  for it?
+-	 *  @since 2.7.2
+-	 */
+-	public Class getASTNodeType(int tokenType) {
+-		// try node specific class
+-		if ( tokenTypeToASTClassMap!=null ) {
+-			Class c = (Class)tokenTypeToASTClassMap.get(new Integer(tokenType));
+-			if ( c!=null ) {
+-				return c;
+-			}
+-		}
+-
+-		// try a global specified class
+-		if (theASTNodeTypeClass != null) {
+-			return theASTNodeTypeClass;
+-		}
+-
+-		// default to the common type
+-		return CommonAST.class;
+-	}
+-
+-    /** Add a child to the current AST */
+-    public void addASTChild(ASTPair currentAST, AST child) {
+-        if (child != null) {
+-            if (currentAST.root == null) {
+-                // Make new child the current root
+-                currentAST.root = child;
+-            }
+-            else {
+-                if (currentAST.child == null) {
+-                    // Add new child to current root
+-                    currentAST.root.setFirstChild(child);
+-                }
+-                else {
+-                    currentAST.child.setNextSibling(child);
+-                }
+-            }
+-            // Make new child the current child
+-            currentAST.child = child;
+-            currentAST.advanceChildToEnd();
+-        }
+-    }
+-
+-    /** Create a new empty AST node; if the user did not specify
+-     *  an AST node type, then create a default one: CommonAST.
+-     */
+-    public AST create() {
+-		return create(Token.INVALID_TYPE);
+-    }
+-
+-    public AST create(int type) {
+-		Class c = getASTNodeType(type);
+-		AST t = create(c);
+-		if ( t!=null ) {
+-			t.initialize(type, "");
+-		}
+-		return t;
+-	}
+-
+-	public AST create(int type, String txt) {
+-        AST t = create(type);
+-		if ( t!=null ) {
+-			t.initialize(type, txt);
+-		}
+-        return t;
+-    }
+-
+-	/** Create an AST node with the token type and text passed in, but
+-	 *  with a specific Java object type. Typically called when you
+-	 *  say @[PLUS,"+",PLUSNode] in an antlr action.
+-	 *  @since 2.7.2
+-	 */
+-	public AST create(int type, String txt, String className) {
+-        AST t = create(className);
+-		if ( t!=null ) {
+-			t.initialize(type, txt);
+-		}
+-        return t;
+-    }
+-
+-    /** Create a new empty AST node; if the user did not specify
+-     *  an AST node type, then create a default one: CommonAST.
+-     */
+-    public AST create(AST tr) {
+-        if (tr == null) return null;		// create(null) == null
+-        AST t = create(tr.getType());
+-		if ( t!=null ) {
+-			t.initialize(tr);
+-		}
+-        return t;
+-    }
+-
+-	public AST create(Token tok) {
+-        AST t = create(tok.getType());
+-		if ( t!=null ) {
+-			t.initialize(tok);
+-		}
+-        return t;
+-    }
+-
+-	/** ANTLR generates reference to this when you reference a token
+-	 *  that has a specified heterogeneous AST node type.  This is
+-	 *  also a special case node creation routine for backward
+-	 *  compatibility.  Before, ANTLR generated "new T(tokenObject)"
+-	 *  and so I must call the appropriate constructor not T().
+-	 *
+-	 * @since 2.7.2
+-	 */
+-	public AST create(Token tok, String className) {
+-        AST t = createUsingCtor(tok,className);
+-        return t;
+-    }
+-
+-	/**
+-	 * @since 2.7.2
+-	 */
+-	public AST create(String className) {
+-		Class c = null;
+-		try {
+-			c = Class.forName(className);
+-		}
+-		catch (Exception e) {
+-			throw new IllegalArgumentException("Invalid class, "+className);
+-		}
+-		return create(c);
+-	}
+-
+-	/**
+-	 * @since 2.7.2
+-	 */
+-	protected AST createUsingCtor(Token token, String className) {
+-		Class c = null;
+-		AST t = null;
+-		try {
+-			c = Class.forName(className);
+-			Class[] tokenArgType = new Class[] { persistence.antlr.Token.class };
+-			try {
+-				Constructor ctor = c.getConstructor(tokenArgType);
+-				t = (AST)ctor.newInstance(new Object[]{token}); // make a new one
+-			}
+-			catch (NoSuchMethodException e){
+-				// just do the regular thing if you can't find the ctor
+-				// Your AST must have default ctor to use this.
+-				t = create(c);
+-				if ( t!=null ) {
+-					t.initialize(token);
+-				}
+-			}
+-		}
+-		catch (Exception e) {
+-			throw new IllegalArgumentException("Invalid class or can't make instance, "+className);
+-		}
+-		return t;
+-	}
+-
+-	/**
+-	 * @since 2.7.2
+-	 */
+-	protected AST create(Class c) {
+-		AST t = null;
+-		try {
+-			t = (AST)c.newInstance(); // make a new one
+-		}
+-		catch (Exception e) {
+-			error("Can't create AST Node " + c.getName());
+-			return null;
+-		}
+-        return t;
+-    }
+-
+-    /** Copy a single node with same Java AST objec type.
+-	 *  Ignore the tokenType->Class mapping since you know
+-	 *  the type of the node, t.getClass(), and doing a dup.
+-	 *
+-	 *  clone() is not used because we want all AST creation
+-	 *  to go thru the factory so creation can be
+-     *  tracked.  Returns null if t is null.
+-     */
+-    public AST dup(AST t) {
+-		if ( t==null ) {
+-			return null;
+-		}
+-		AST dup_t = create(t.getClass());
+-		dup_t.initialize(t);
+-		return dup_t;
+-    }
+-
+-    /** Duplicate tree including siblings of root. */
+-    public AST dupList(AST t) {
+-        AST result = dupTree(t);            // if t == null, then result==null
+-        AST nt = result;
+-        while (t != null) {						// for each sibling of the root
+-            t = t.getNextSibling();
+-            nt.setNextSibling(dupTree(t));	// dup each subtree, building new tree
+-            nt = nt.getNextSibling();
+-        }
+-        return result;
+-    }
+-
+-    /**Duplicate a tree, assuming this is a root node of a tree--
+-     * duplicate that node and what's below; ignore siblings of root node.
+-     */
+-    public AST dupTree(AST t) {
+-        AST result = dup(t);		// make copy of root
+-        // copy all children of root.
+-        if (t != null) {
+-            result.setFirstChild(dupList(t.getFirstChild()));
+-        }
+-        return result;
+-    }
+-
+-    /** Make a tree from a list of nodes.  The first element in the
+-     *  array is the root.  If the root is null, then the tree is
+-     *  a simple list not a tree.  Handles null children nodes correctly.
+-     *  For example, build(a, b, null, c) yields tree (a b c).  build(null,a,b)
+-     *  yields tree (nil a b).
+-     */
+-    public AST make(AST[] nodes) {
+-        if (nodes == null || nodes.length == 0) return null;
+-        AST root = nodes[0];
+-        AST tail = null;
+-        if (root != null) {
+-            root.setFirstChild(null);	// don't leave any old pointers set
+-        }
+-        // link in children;
+-        for (int i = 1; i < nodes.length; i++) {
+-            if (nodes[i] == null) continue;	// ignore null nodes
+-            if (root == null) {
+-                // Set the root and set it up for a flat list
+-                root = tail = nodes[i];
+-            }
+-            else if (tail == null) {
+-                root.setFirstChild(nodes[i]);
+-                tail = root.getFirstChild();
+-            }
+-            else {
+-                tail.setNextSibling(nodes[i]);
+-                tail = tail.getNextSibling();
+-            }
+-            // Chase tail to last sibling
+-            while (tail.getNextSibling() != null) {
+-                tail = tail.getNextSibling();
+-            }
+-        }
+-        return root;
+-    }
+-
+-    /** Make a tree from a list of nodes, where the nodes are contained
+-     * in an ASTArray object
+-     */
+-    public AST make(ASTArray nodes) {
+-        return make(nodes.array);
+-    }
+-
+-    /** Make an AST the root of current AST */
+-    public void makeASTRoot(ASTPair currentAST, AST root) {
+-        if (root != null) {
+-            // Add the current root as a child of new root
+-            root.addChild(currentAST.root);
+-            // The new current child is the last sibling of the old root
+-            currentAST.child = currentAST.root;
+-            currentAST.advanceChildToEnd();
+-            // Set the new root
+-            currentAST.root = root;
+-        }
+-    }
+-
+-    public void setASTNodeClass(String t) {
+-        theASTNodeType = t;
+-        try {
+-            theASTNodeTypeClass = Class.forName(t); // get class def
+-        }
+-        catch (Exception e) {
+-            // either class not found,
+-            // class is interface/abstract, or
+-            // class or initializer is not accessible.
+-            error("Can't find/access AST Node type" + t);
+-        }
+-    }
+-
+-    /** Specify the type of node to create during tree building.
+-     * 	@deprecated since 2.7.1
+-     */
+-    public void setASTNodeType(String t) {
+-        setASTNodeClass(t);
+-    }
+-
+-	public Hashtable getTokenTypeToASTClassMap() {
+-		return tokenTypeToASTClassMap;
+-	}
+-
+-	public void setTokenTypeToASTClassMap(Hashtable tokenTypeToClassMap) {
+-		this.tokenTypeToASTClassMap = tokenTypeToClassMap;
+-	}
+-
+-    /** To change where error messages go, can subclass/override this method
+-     *  and then setASTFactory in Parser and TreeParser.  This method removes
+-     *  a prior dependency on class persistence.antlr.Tool.
+-     */
+-    public void error(String e) {
+-        System.err.println(e);
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/ASTIterator.java glassfish-gil/entity-persistence/src/java/persistence/antlr/ASTIterator.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/ASTIterator.java	2006-08-31 00:34:03.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/ASTIterator.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,74 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import persistence.antlr.collections.AST;
+-
+-public class ASTIterator {
+-    protected AST cursor = null;
+-    protected AST original = null;
+-
+-
+-    public ASTIterator(AST t) {
+-        original = cursor = t;
+-    }
+-
+-    /** Is 'sub' a subtree of 't' beginning at the root? */
+-    public boolean isSubtree(AST t, AST sub) {
+-        AST sibling;
+-
+-        // the empty tree is always a subset of any tree.
+-        if (sub == null) {
+-            return true;
+-        }
+-
+-        // if the tree is empty, return true if the subtree template is too.
+-        if (t == null) {
+-            if (sub != null) return false;
+-            return true;
+-        }
+-
+-        // Otherwise, start walking sibling lists.  First mismatch, return false.
+-        for (sibling = t;
+-             sibling != null && sub != null;
+-             sibling = sibling.getNextSibling(), sub = sub.getNextSibling()) {
+-            // as a quick optimization, check roots first.
+-            if (sibling.getType() != sub.getType()) return false;
+-            // if roots match, do full match test on children.
+-            if (sibling.getFirstChild() != null) {
+-                if (!isSubtree(sibling.getFirstChild(), sub.getFirstChild())) return false;
+-            }
+-        }
+-        return true;
+-    }
+-
+-    /** Find the next subtree with structure and token types equal to
+-     * those of 'template'.
+-     */
+-    public AST next(AST template) {
+-        AST t = null;
+-        AST sibling = null;
+-
+-        if (cursor == null) {	// do nothing if no tree to work on
+-            return null;
+-        }
+-
+-        // Start walking sibling list looking for subtree matches.
+-        for (; cursor != null; cursor = cursor.getNextSibling()) {
+-            // as a quick optimization, check roots first.
+-            if (cursor.getType() == template.getType()) {
+-                // if roots match, do full match test on children.
+-                if (cursor.getFirstChild() != null) {
+-                    if (isSubtree(cursor.getFirstChild(), template.getFirstChild())) {
+-                        return cursor;
+-                    }
+-                }
+-            }
+-        }
+-        return t;
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/ASTNULLType.java glassfish-gil/entity-persistence/src/java/persistence/antlr/ASTNULLType.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/ASTNULLType.java	2006-08-31 00:34:03.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/ASTNULLType.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,106 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import persistence.antlr.collections.AST;
+-import persistence.antlr.collections.ASTEnumeration;
+-import persistence.antlr.Token;
+-
+-/** There is only one instance of this class **/
+-public class ASTNULLType implements AST {
+-    public void addChild(AST c) {
+-    }
+-
+-    public boolean equals(AST t) {
+-        return false;
+-    }
+-
+-    public boolean equalsList(AST t) {
+-        return false;
+-    }
+-
+-    public boolean equalsListPartial(AST t) {
+-        return false;
+-    }
+-
+-    public boolean equalsTree(AST t) {
+-        return false;
+-    }
+-
+-    public boolean equalsTreePartial(AST t) {
+-        return false;
+-    }
+-
+-    public ASTEnumeration findAll(AST tree) {
+-        return null;
+-    }
+-
+-    public ASTEnumeration findAllPartial(AST subtree) {
+-        return null;
+-    }
+-
+-    public AST getFirstChild() {
+-        return this;
+-    }
+-
+-    public AST getNextSibling() {
+-        return this;
+-    }
+-
+-    public String getText() {
+-        return "<ASTNULL>";
+-    }
+-
+-    public int getType() {
+-        return Token.NULL_TREE_LOOKAHEAD;
+-    }
+-
+-    public int getLine() {
+-        return 0;
+-    }
+-
+-    public int getColumn() {
+-        return 0;
+-    }
+-
+-	public int getNumberOfChildren() {
+-		return 0;
+-	}
+-
+-    public void initialize(int t, String txt) {
+-    }
+-
+-    public void initialize(AST t) {
+-    }
+-
+-    public void initialize(Token t) {
+-    }
+-
+-    public void setFirstChild(AST c) {
+-    }
+-
+-    public void setNextSibling(AST n) {
+-    }
+-
+-    public void setText(String text) {
+-    }
+-
+-    public void setType(int ttype) {
+-    }
+-
+-    public String toString() {
+-        return getText();
+-    }
+-
+-    public String toStringList() {
+-        return getText();
+-    }
+-
+-    public String toStringTree() {
+-        return getText();
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/ASTPair.java glassfish-gil/entity-persistence/src/java/persistence/antlr/ASTPair.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/ASTPair.java	2006-08-31 00:34:03.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/ASTPair.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,42 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import persistence.antlr.collections.AST;
+-
+-/** ASTPair:  utility class used for manipulating a pair of ASTs
+- * representing the current AST root and current AST sibling.
+- * This exists to compensate for the lack of pointers or 'var'
+- * arguments in Java.
+- */
+-public class ASTPair {
+-    public AST root;		// current root of tree
+-    public AST child;		// current child to which siblings are added
+-
+-    /** Make sure that child is the last sibling */
+-    public final void advanceChildToEnd() {
+-        if (child != null) {
+-            while (child.getNextSibling() != null) {
+-                child = child.getNextSibling();
+-            }
+-        }
+-    }
+-
+-    /** Copy an ASTPair.  Don't call it clone() because we want type-safety */
+-    public ASTPair copy() {
+-        ASTPair tmp = new ASTPair();
+-        tmp.root = root;
+-        tmp.child = child;
+-        return tmp;
+-    }
+-
+-    public String toString() {
+-        String r = root == null ? "null" : root.getText();
+-        String c = child == null ? "null" : child.getText();
+-        return "[" + r + "," + c + "]";
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/ASTVisitor.java glassfish-gil/entity-persistence/src/java/persistence/antlr/ASTVisitor.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/ASTVisitor.java	2006-08-31 00:34:03.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/ASTVisitor.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,13 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import persistence.antlr.collections.AST;
+-
+-public interface ASTVisitor {
+-    public void visit(AST node);
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/BaseAST.java glassfish-gil/entity-persistence/src/java/persistence/antlr/BaseAST.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/BaseAST.java	2006-08-31 00:34:03.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/BaseAST.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,484 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import persistence.antlr.collections.AST;
+-import persistence.antlr.collections.ASTEnumeration;
+-import persistence.antlr.collections.impl.ASTEnumerator;
+-import persistence.antlr.collections.impl.Vector;
+-
+-import java.io.Serializable;
+-import java.io.IOException;
+-import java.io.Writer;
+-
+-/**
+- * A Child-Sibling Tree.
+- *
+- * A tree with PLUS at the root and with two children 3 and 4 is
+- * structured as:
+- *
+- *		PLUS
+- *		  |
+- *		  3 -- 4
+- *
+- * and can be specified easily in LISP notation as
+- *
+- * (PLUS 3 4)
+- *
+- * where every '(' starts a new subtree.
+- *
+- * These trees are particular useful for translators because of
+- * the flexibility of the children lists.  They are also very easy
+- * to walk automatically, whereas trees with specific children
+- * reference fields can't easily be walked automatically.
+- *
+- * This class contains the basic support for an AST.
+- * Most people will create ASTs that are subclasses of
+- * BaseAST or of CommonAST.
+- */
+-public abstract class BaseAST implements AST, Serializable {
+-    protected BaseAST down;
+-    protected BaseAST right;
+-
+-    private static boolean verboseStringConversion = false;
+-    private static String[] tokenNames = null;
+-
+-    /**Add a node to the end of the child list for this node */
+-    public void addChild(AST node) {
+-        if (node == null) return;
+-        BaseAST t = this.down;
+-        if (t != null) {
+-            while (t.right != null) {
+-                t = t.right;
+-            }
+-            t.right = (BaseAST)node;
+-        }
+-        else {
+-            this.down = (BaseAST)node;
+-        }
+-    }
+-
+-	/** How many children does this node have? */
+-    public int getNumberOfChildren() {
+-        BaseAST t = this.down;
+-		int n = 0;
+-        if (t != null) {
+-			n = 1;
+-			while (t.right != null) {
+-                t = t.right;
+-				n++;
+-            }
+-			return n;
+-        }
+-		return n;
+-    }
+-
+-    private void doWorkForFindAll(Vector v, AST target, boolean partialMatch) {
+-        AST sibling;
+-
+-        // Start walking sibling lists, looking for matches.
+-        siblingWalk:
+-        for (sibling = this;
+-             sibling != null;
+-             sibling = sibling.getNextSibling()) {
+-            if ((partialMatch && sibling.equalsTreePartial(target)) ||
+-                (!partialMatch && sibling.equalsTree(target))) {
+-                v.appendElement(sibling);
+-            }
+-            // regardless of match or not, check any children for matches
+-            if (sibling.getFirstChild() != null) {
+-                ((BaseAST)sibling.getFirstChild()).doWorkForFindAll(v, target, partialMatch);
+-            }
+-        }
+-    }
+-
+-    /** Is node t equal to this in terms of token type and text? */
+-    public boolean equals(AST t) {
+-        if (t == null) return false;
+-        return this.getText().equals(t.getText()) &&
+-            this.getType() == t.getType();
+-    }
+-
+-    /** Is t an exact structural and equals() match of this tree.  The
+-     *  'this' reference is considered the start of a sibling list.
+-     */
+-    public boolean equalsList(AST t) {
+-        AST sibling;
+-
+-        // the empty tree is not a match of any non-null tree.
+-        if (t == null) {
+-            return false;
+-        }
+-
+-        // Otherwise, start walking sibling lists.  First mismatch, return false.
+-        for (sibling = this;
+-			 sibling != null && t != null;
+-			 sibling = sibling.getNextSibling(), t = t.getNextSibling())
+-		{
+-            // as a quick optimization, check roots first.
+-            if (!sibling.equals(t)) {
+-                return false;
+-            }
+-            // if roots match, do full list match test on children.
+-            if (sibling.getFirstChild() != null) {
+-                if (!sibling.getFirstChild().equalsList(t.getFirstChild())) {
+-                    return false;
+-                }
+-            }
+-            // sibling has no kids, make sure t doesn't either
+-            else if (t.getFirstChild() != null) {
+-                return false;
+-            }
+-        }
+-        if (sibling == null && t == null) {
+-            return true;
+-        }
+-        // one sibling list has more than the other
+-        return false;
+-    }
+-
+-    /** Is 'sub' a subtree of this list?
+-     *  The siblings of the root are NOT ignored.
+-     */
+-    public boolean equalsListPartial(AST sub) {
+-        AST sibling;
+-
+-        // the empty tree is always a subset of any tree.
+-        if (sub == null) {
+-            return true;
+-        }
+-
+-        // Otherwise, start walking sibling lists.  First mismatch, return false.
+-        for (sibling = this;
+-             sibling != null && sub != null;
+-             sibling = sibling.getNextSibling(), sub = sub.getNextSibling()) {
+-            // as a quick optimization, check roots first.
+-            if (!sibling.equals(sub)) return false;
+-            // if roots match, do partial list match test on children.
+-            if (sibling.getFirstChild() != null) {
+-                if (!sibling.getFirstChild().equalsListPartial(sub.getFirstChild())) return false;
+-            }
+-        }
+-        if (sibling == null && sub != null) {
+-            // nothing left to match in this tree, but subtree has more
+-            return false;
+-        }
+-        // either both are null or sibling has more, but subtree doesn't
+-        return true;
+-    }
+-
+-    /** Is tree rooted at 'this' equal to 't'?  The siblings
+-     *  of 'this' are ignored.
+-     */
+-    public boolean equalsTree(AST t) {
+-        // check roots first.
+-        if (!this.equals(t)) return false;
+-        // if roots match, do full list match test on children.
+-        if (this.getFirstChild() != null) {
+-            if (!this.getFirstChild().equalsList(t.getFirstChild())) return false;
+-        }
+-        // sibling has no kids, make sure t doesn't either
+-        else if (t.getFirstChild() != null) {
+-            return false;
+-        }
+-        return true;
+-    }
+-
+-    /** Is 't' a subtree of the tree rooted at 'this'?  The siblings
+-     *  of 'this' are ignored.
+-     */
+-    public boolean equalsTreePartial(AST sub) {
+-        // the empty tree is always a subset of any tree.
+-        if (sub == null) {
+-            return true;
+-        }
+-
+-        // check roots first.
+-        if (!this.equals(sub)) return false;
+-        // if roots match, do full list partial match test on children.
+-        if (this.getFirstChild() != null) {
+-            if (!this.getFirstChild().equalsListPartial(sub.getFirstChild())) return false;
+-        }
+-        return true;
+-    }
+-
+-    /** Walk the tree looking for all exact subtree matches.  Return
+-     *  an ASTEnumerator that lets the caller walk the list
+-     *  of subtree roots found herein.
+-     */
+-    public ASTEnumeration findAll(AST target) {
+-        Vector roots = new Vector(10);
+-        AST sibling;
+-
+-        // the empty tree cannot result in an enumeration
+-        if (target == null) {
+-            return null;
+-        }
+-
+-        doWorkForFindAll(roots, target, false);  // find all matches recursively
+-
+-        return new ASTEnumerator(roots);
+-    }
+-
+-    /** Walk the tree looking for all subtrees.  Return
+-     *  an ASTEnumerator that lets the caller walk the list
+-     *  of subtree roots found herein.
+-     */
+-    public ASTEnumeration findAllPartial(AST sub) {
+-        Vector roots = new Vector(10);
+-        AST sibling;
+-
+-        // the empty tree cannot result in an enumeration
+-        if (sub == null) {
+-            return null;
+-        }
+-
+-        doWorkForFindAll(roots, sub, true);  // find all matches recursively
+-
+-        return new ASTEnumerator(roots);
+-    }
+-
+-    /** Get the first child of this node; null if not children */
+-    public AST getFirstChild() {
+-        return down;
+-    }
+-
+-    /** Get the next sibling in line after this one */
+-    public AST getNextSibling() {
+-        return right;
+-    }
+-
+-    /** Get the token text for this node */
+-    public String getText() {
+-        return "";
+-    }
+-
+-    /** Get the token type for this node */
+-    public int getType() {
+-        return 0;
+-    }
+-
+-    public int getLine() {
+-        return 0;
+-    }
+-
+-    public int getColumn() {
+-        return 0;
+-    }
+-
+-    public abstract void initialize(int t, String txt);
+-
+-    public abstract void initialize(AST t);
+-
+-    public abstract void initialize(Token t);
+-
+-    /** Remove all children */
+-    public void removeChildren() {
+-        down = null;
+-    }
+-
+-    public void setFirstChild(AST c) {
+-        down = (BaseAST)c;
+-    }
+-
+-    public void setNextSibling(AST n) {
+-        right = (BaseAST)n;
+-    }
+-
+-    /** Set the token text for this node */
+-    public void setText(String text) {
+-    }
+-
+-    /** Set the token type for this node */
+-    public void setType(int ttype) {
+-    }
+-
+-    public static void setVerboseStringConversion(boolean verbose, String[] names) {
+-        verboseStringConversion = verbose;
+-        tokenNames = names;
+-    }
+-
+-    /** Return an array of strings that maps token ID to it's text. @since 2.7.3 */
+-    public static String[] getTokenNames() {
+-        return tokenNames;
+-    }
+-
+-    public String toString() {
+-        StringBuffer b = new StringBuffer();
+-        // if verbose and type name not same as text (keyword probably)
+-        if (verboseStringConversion &&
+-            !getText().equalsIgnoreCase(tokenNames[getType()]) &&
+-            !getText().equalsIgnoreCase(StringUtils.stripFrontBack(tokenNames[getType()], "\"", "\""))) {
+-            b.append('[');
+-            b.append(getText());
+-            b.append(",<");
+-            b.append(tokenNames[getType()]);
+-            b.append(">]");
+-            return b.toString();
+-        }
+-        return getText();
+-    }
+-
+-    /** Print out a child-sibling tree in LISP notation */
+-    public String toStringList() {
+-        AST t = this;
+-        String ts = "";
+-        if (t.getFirstChild() != null) ts += " (";
+-        ts += " " + this.toString();
+-        if (t.getFirstChild() != null) {
+-            ts += ((BaseAST)t.getFirstChild()).toStringList();
+-        }
+-        if (t.getFirstChild() != null) ts += " )";
+-        if (t.getNextSibling() != null) {
+-            ts += ((BaseAST)t.getNextSibling()).toStringList();
+-        }
+-        return ts;
+-    }
+-
+-    public String toStringTree() {
+-        AST t = this;
+-        String ts = "";
+-        if (t.getFirstChild() != null) ts += " (";
+-        ts += " " + this.toString();
+-        if (t.getFirstChild() != null) {
+-            ts += ((BaseAST)t.getFirstChild()).toStringList();
+-        }
+-        if (t.getFirstChild() != null) ts += " )";
+-        return ts;
+-    }
+-
+-    public static String decode(String text) {
+-        char c, c1, c2, c3, c4, c5;
+-        StringBuffer n = new StringBuffer();
+-        for (int i = 0; i < text.length(); i++) {
+-            c = text.charAt(i);
+-            if (c == '&') {
+-                c1 = text.charAt(i + 1);
+-                c2 = text.charAt(i + 2);
+-                c3 = text.charAt(i + 3);
+-                c4 = text.charAt(i + 4);
+-                c5 = text.charAt(i + 5);
+-
+-                if (c1 == 'a' && c2 == 'm' && c3 == 'p' && c4 == ';') {
+-                    n.append("&");
+-                    i += 5;
+-                }
+-                else if (c1 == 'l' && c2 == 't' && c3 == ';') {
+-                    n.append("<");
+-                    i += 4;
+-                }
+-                else if (c1 == 'g' && c2 == 't' && c3 == ';') {
+-                    n.append(">");
+-                    i += 4;
+-                }
+-                else if (c1 == 'q' && c2 == 'u' && c3 == 'o' &&
+-                    c4 == 't' && c5 == ';') {
+-                    n.append("\"");
+-                    i += 6;
+-                }
+-                else if (c1 == 'a' && c2 == 'p' && c3 == 'o' &&
+-                    c4 == 's' && c5 == ';') {
+-                    n.append("'");
+-                    i += 6;
+-                }
+-                else
+-                    n.append("&");
+-            }
+-            else
+-                n.append(c);
+-        }
+-        return new String(n);
+-    }
+-
+-    public static String encode(String text) {
+-        char c;
+-        StringBuffer n = new StringBuffer();
+-        for (int i = 0; i < text.length(); i++) {
+-            c = text.charAt(i);
+-            switch (c) {
+-                case '&':
+-                    {
+-                        n.append("&amp;");
+-                        break;
+-                    }
+-                case '<':
+-                    {
+-                        n.append("&lt;");
+-                        break;
+-                    }
+-                case '>':
+-                    {
+-                        n.append("&gt;");
+-                        break;
+-                    }
+-                case '"':
+-                    {
+-                        n.append("&quot;");
+-                        break;
+-                    }
+-                case '\'':
+-                    {
+-                        n.append("&apos;");
+-                        break;
+-                    }
+-                default :
+-                    {
+-                        n.append(c);
+-                        break;
+-                    }
+-            }
+-        }
+-        return new String(n);
+-    }
+-
+-    public void xmlSerializeNode(Writer out)
+-        throws IOException {
+-        StringBuffer buf = new StringBuffer(100);
+-        buf.append("<");
+-        buf.append(getClass().getName() + " ");
+-        buf.append("text=\"" + encode(getText()) + "\" type=\"" +
+-                   getType() + "\"/>");
+-        out.write(buf.toString());
+-    }
+-
+-    public void xmlSerializeRootOpen(Writer out)
+-        throws IOException {
+-        StringBuffer buf = new StringBuffer(100);
+-        buf.append("<");
+-        buf.append(getClass().getName() + " ");
+-        buf.append("text=\"" + encode(getText()) + "\" type=\"" +
+-                   getType() + "\">\n");
+-        out.write(buf.toString());
+-    }
+-
+-    public void xmlSerializeRootClose(Writer out)
+-        throws IOException {
+-        out.write("</" + getClass().getName() + ">\n");
+-    }
+-
+-    public void xmlSerialize(Writer out) throws IOException {
+-        // print out this node and all siblings
+-        for (AST node = this;
+-             node != null;
+-             node = node.getNextSibling()) {
+-            if (node.getFirstChild() == null) {
+-                // print guts (class name, attributes)
+-                ((BaseAST)node).xmlSerializeNode(out);
+-            }
+-            else {
+-                ((BaseAST)node).xmlSerializeRootOpen(out);
+-
+-                // print children
+-                ((BaseAST)node.getFirstChild()).xmlSerialize(out);
+-
+-                // print end tag
+-                ((BaseAST)node).xmlSerializeRootClose(out);
+-            }
+-        }
+-    }
+-
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/BlockContext.java glassfish-gil/entity-persistence/src/java/persistence/antlr/BlockContext.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/BlockContext.java	2006-08-31 00:34:04.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/BlockContext.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,31 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-/**BlockContext stores the information needed when creating an
+- * alternative (list of elements).  Entering a subrule requires
+- * that we save this state as each block of alternatives
+- * requires state such as "tail of current alternative."
+- */
+-class BlockContext {
+-    AlternativeBlock block; // current block of alternatives
+-    int altNum;				// which alt are we accepting 0..n-1
+-    BlockEndElement blockEnd; // used if nested
+-
+-
+-    public void addAlternativeElement(AlternativeElement e) {
+-        currentAlt().addElement(e);
+-    }
+-
+-    public Alternative currentAlt() {
+-        return (Alternative)block.alternatives.elementAt(altNum);
+-    }
+-
+-    public AlternativeElement currentElement() {
+-        return currentAlt().tail;
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/BlockEndElement.java glassfish-gil/entity-persistence/src/java/persistence/antlr/BlockEndElement.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/BlockEndElement.java	2006-08-31 00:34:04.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/BlockEndElement.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,30 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-/**All alternative blocks are "terminated" by BlockEndElements unless
+- * they are rule blocks (in which case they use RuleEndElement).
+- */
+-class BlockEndElement extends AlternativeElement {
+-    protected boolean[] lock;	// for analysis; used to avoid infinite loops
+-    protected AlternativeBlock block;// ending blocks know what block they terminate
+-
+-
+-    public BlockEndElement(Grammar g) {
+-        super(g);
+-        lock = new boolean[g.maxk + 1];
+-    }
+-
+-    public Lookahead look(int k) {
+-        return grammar.theLLkAnalyzer.look(k, this);
+-    }
+-
+-    public String toString() {
+-        //return " [BlkEnd]";
+-        return "";
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/BlockWithImpliedExitPath.java glassfish-gil/entity-persistence/src/java/persistence/antlr/BlockWithImpliedExitPath.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/BlockWithImpliedExitPath.java	2006-08-31 00:34:04.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/BlockWithImpliedExitPath.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,23 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-abstract class BlockWithImpliedExitPath extends AlternativeBlock {
+-    protected int exitLookaheadDepth;	// lookahead needed to handle optional path
+-    /** lookahead to bypass block; set
+-     * by deterministic().  1..k of Lookahead
+-     */
+-    protected Lookahead[] exitCache = new Lookahead[grammar.maxk + 1];
+-
+-    public BlockWithImpliedExitPath(Grammar g) {
+-        super(g);
+-    }
+-
+-    public BlockWithImpliedExitPath(Grammar g, Token start) {
+-        super(g, start, false);
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/ByteBuffer.java glassfish-gil/entity-persistence/src/java/persistence/antlr/ByteBuffer.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/ByteBuffer.java	2006-08-31 00:34:04.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/ByteBuffer.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,52 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-/**A Stream of characters fed to the lexer from a InputStream that can
+- * be rewound via mark()/rewind() methods.
+- * <p>
+- * A dynamic array is used to buffer up all the input characters.  Normally,
+- * "k" characters are stored in the buffer.  More characters may be stored during
+- * guess mode (testing syntactic predicate), or when LT(i>k) is referenced.
+- * Consumption of characters is deferred.  In other words, reading the next
+- * character is not done by conume(), but deferred until needed by LA or LT.
+- * <p>
+- *
+- * @see persistence.antlr.CharQueue
+- */
+-// SAS: added this class to handle Binary input w/ FileInputStream
+-
+-import java.io.InputStream;
+-import java.io.IOException;
+-
+-public class ByteBuffer extends InputBuffer {
+-
+-    // char source
+-    transient InputStream input;
+-
+-
+-    /** Create a character buffer */
+-    public ByteBuffer(InputStream input_) {
+-        super();
+-        input = input_;
+-    }
+-
+-    /** Ensure that the character buffer is sufficiently full */
+-    public void fill(int amount) throws CharStreamException {
+-        try {
+-            syncConsume();
+-            // Fill the buffer sufficiently to hold needed characters
+-            while (queue.nbrEntries < amount + markerOffset) {
+-                // Append the next character
+-                queue.append((char)input.read());
+-            }
+-        }
+-        catch (IOException io) {
+-            throw new CharStreamIOException(io);
+-        }
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/CharBuffer.java glassfish-gil/entity-persistence/src/java/persistence/antlr/CharBuffer.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/CharBuffer.java	2006-08-31 00:34:04.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/CharBuffer.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,52 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-/**A Stream of characters fed to the lexer from a InputStream that can
+- * be rewound via mark()/rewind() methods.
+- * <p>
+- * A dynamic array is used to buffer up all the input characters.  Normally,
+- * "k" characters are stored in the buffer.  More characters may be stored during
+- * guess mode (testing syntactic predicate), or when LT(i>k) is referenced.
+- * Consumption of characters is deferred.  In other words, reading the next
+- * character is not done by conume(), but deferred until needed by LA or LT.
+- * <p>
+- *
+- * @see persistence.antlr.CharQueue
+- */
+-
+-import java.io.Reader; // SAS: changed to properly read text files
+-import java.io.IOException;
+-
+-// SAS: Move most functionality into InputBuffer -- just the file-specific
+-//      stuff is in here
+-
+-public class CharBuffer extends InputBuffer {
+-    // char source
+-    transient Reader input;
+-
+-    /** Create a character buffer */
+-    public CharBuffer(Reader input_) { // SAS: for proper text i/o
+-        super();
+-        input = input_;
+-    }
+-
+-    /** Ensure that the character buffer is sufficiently full */
+-    public void fill(int amount) throws CharStreamException {
+-        try {
+-            syncConsume();
+-            // Fill the buffer sufficiently to hold needed characters
+-            while (queue.nbrEntries < amount + markerOffset) {
+-                // Append the next character
+-                queue.append((char)input.read());
+-            }
+-        }
+-        catch (IOException io) {
+-            throw new CharStreamIOException(io);
+-        }
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/CharFormatter.java glassfish-gil/entity-persistence/src/java/persistence/antlr/CharFormatter.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/CharFormatter.java	2006-08-31 00:34:04.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/CharFormatter.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,22 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-/** Interface used by BitSet to format elements of the set when
+- * converting to string
+- */
+-public interface CharFormatter {
+-
+-
+-    public String escapeChar(int c, boolean forCharLiteral);
+-
+-    public String escapeString(String s);
+-
+-    public String literalChar(int c);
+-
+-    public String literalString(String s);
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/CharLiteralElement.java glassfish-gil/entity-persistence/src/java/persistence/antlr/CharLiteralElement.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/CharLiteralElement.java	2006-08-31 00:34:04.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/CharLiteralElement.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,28 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-class CharLiteralElement extends GrammarAtom {
+-
+-
+-    public CharLiteralElement(LexerGrammar g, Token t, boolean inverted, int autoGenType) {
+-        super(g, t, AUTO_GEN_NONE);
+-        tokenType = ANTLRLexer.tokenTypeForCharLiteral(t.getText());
+-        g.charVocabulary.add(tokenType);
+-        line = t.getLine();
+-        not = inverted;
+-        this.autoGenType = autoGenType;
+-    }
+-
+-    public void generate() {
+-        grammar.generator.gen(this);
+-    }
+-
+-    public Lookahead look(int k) {
+-        return grammar.theLLkAnalyzer.look(k, this);
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/CharQueue.java glassfish-gil/entity-persistence/src/java/persistence/antlr/CharQueue.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/CharQueue.java	2006-08-31 00:34:04.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/CharQueue.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,94 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-/** A circular buffer object used by CharBuffer */
+-public class CharQueue {
+-    /** Physical circular buffer of tokens */
+-    protected char[] buffer;
+-    /** buffer.length-1 for quick modulos */
+-    private int sizeLessOne;
+-    /** physical index of front token */
+-    private int offset;
+-    /** number of tokens in the queue */
+-    protected int nbrEntries;
+-
+-    public CharQueue(int minSize) {
+-        // Find first power of 2 >= to requested size
+-        int size;
+-        if ( minSize<0 ) {
+-            init(16); // pick some value for them
+-            return;
+-        }
+-        // check for overflow
+-        if ( minSize>=(Integer.MAX_VALUE/2) ) {
+-            init(Integer.MAX_VALUE); // wow that's big.
+-            return;
+-        }
+-        for (size = 2; size < minSize; size *= 2) {
+-        }
+-        init(size);
+-    }
+-
+-    /** Add token to end of the queue
+-     * @param tok The token to add
+-     */
+-    public final void append(char tok) {
+-        if (nbrEntries == buffer.length) {
+-            expand();
+-        }
+-        buffer[(offset + nbrEntries) & sizeLessOne] = tok;
+-        nbrEntries++;
+-    }
+-
+-    /** Fetch a token from the queue by index
+-     * @param idx The index of the token to fetch, where zero is the token at the front of the queue
+-     */
+-    public final char elementAt(int idx) {
+-        return buffer[(offset + idx) & sizeLessOne];
+-    }
+-
+-    /** Expand the token buffer by doubling its capacity */
+-    private final void expand() {
+-        char[] newBuffer = new char[buffer.length * 2];
+-        // Copy the contents to the new buffer
+-        // Note that this will store the first logical item in the
+-        // first physical array element.
+-        for (int i = 0; i < buffer.length; i++) {
+-            newBuffer[i] = elementAt(i);
+-        }
+-        // Re-initialize with new contents, keep old nbrEntries
+-        buffer = newBuffer;
+-        sizeLessOne = buffer.length - 1;
+-        offset = 0;
+-    }
+-
+-    /** Initialize the queue.
+-     * @param size The initial size of the queue
+-     */
+-    public void init(int size) {
+-        // Allocate buffer
+-        buffer = new char[size];
+-        // Other initialization
+-        sizeLessOne = size - 1;
+-        offset = 0;
+-        nbrEntries = 0;
+-    }
+-
+-    /** Clear the queue. Leaving the previous buffer alone.
+-     */
+-    public final void reset() {
+-        offset = 0;
+-        nbrEntries = 0;
+-    }
+-
+-    /** Remove char from front of queue */
+-    public final void removeFirst() {
+-        offset = (offset + 1) & sizeLessOne;
+-        nbrEntries--;
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/CharRangeElement.java glassfish-gil/entity-persistence/src/java/persistence/antlr/CharRangeElement.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/CharRangeElement.java	2006-08-31 00:34:05.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/CharRangeElement.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,53 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-class CharRangeElement extends AlternativeElement {
+-    String label;
+-    protected char begin = 0;
+-    protected char end = 0;
+-    protected String beginText;
+-    protected String endText;
+-
+-
+-    public CharRangeElement(LexerGrammar g, Token t1, Token t2, int autoGenType) {
+-        super(g);
+-        begin = (char)ANTLRLexer.tokenTypeForCharLiteral(t1.getText());
+-        beginText = t1.getText();
+-        end = (char)ANTLRLexer.tokenTypeForCharLiteral(t2.getText());
+-        endText = t2.getText();
+-        line = t1.getLine();
+-        // track which characters are referenced in the grammar
+-        for (int i = begin; i <= end; i++) {
+-            g.charVocabulary.add(i);
+-        }
+-        this.autoGenType = autoGenType;
+-    }
+-
+-    public void generate() {
+-        grammar.generator.gen(this);
+-    }
+-
+-    public String getLabel() {
+-        return label;
+-    }
+-
+-    public Lookahead look(int k) {
+-        return grammar.theLLkAnalyzer.look(k, this);
+-    }
+-
+-    public void setLabel(String label_) {
+-        label = label_;
+-    }
+-
+-    public String toString() {
+-        if (label != null)
+-            return " " + label + ":" + beginText + ".." + endText;
+-        else
+-            return " " + beginText + ".." + endText;
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/CharScanner.java glassfish-gil/entity-persistence/src/java/persistence/antlr/CharScanner.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/CharScanner.java	2006-08-31 00:34:05.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/CharScanner.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,407 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import java.util.Hashtable;
+-
+-import persistence.antlr.collections.impl.BitSet;
+-
+-import java.io.IOException;
+-
+-public abstract class CharScanner implements TokenStream {
+-    static final char NO_CHAR = 0;
+-    public static final char EOF_CHAR = (char)-1;
+-    protected ANTLRStringBuffer text; // text of current token
+-
+-    protected boolean saveConsumedInput = true; // does consume() save characters?
+-    protected Class tokenObjectClass; // what kind of tokens to create?
+-    protected boolean caseSensitive = true;
+-    protected boolean caseSensitiveLiterals = true;
+-    protected Hashtable literals; // set by subclass
+-
+-    /** Tab chars are handled by tab() according to this value; override
+-     *  method to do anything weird with tabs.
+-     */
+-    protected int tabsize = 8;
+-
+-    protected Token _returnToken = null; // used to return tokens w/o using return val.
+-
+-    // Hash string used so we don't new one every time to check literals table
+-    protected ANTLRHashString hashString;
+-
+-    protected LexerSharedInputState inputState;
+-
+-    /** Used during filter mode to indicate that path is desired.
+-     *  A subsequent scan error will report an error as usual if
+-     *  acceptPath=true;
+-     */
+-    protected boolean commitToPath = false;
+-
+-    /** Used to keep track of indentdepth for traceIn/Out */
+-    protected int traceDepth = 0;
+-
+-    public CharScanner() {
+-        text = new ANTLRStringBuffer();
+-        hashString = new ANTLRHashString(this);
+-        setTokenObjectClass("persistence.antlr.CommonToken");
+-    }
+-
+-    public CharScanner(InputBuffer cb) { // SAS: use generic buffer
+-        this();
+-        inputState = new LexerSharedInputState(cb);
+-    }
+-
+-    public CharScanner(LexerSharedInputState sharedState) {
+-        this();
+-        inputState = sharedState;
+-    }
+-
+-    public void append(char c) {
+-        if (saveConsumedInput) {
+-            text.append(c);
+-        }
+-    }
+-
+-    public void append(String s) {
+-        if (saveConsumedInput) {
+-            text.append(s);
+-        }
+-    }
+-
+-    public void commit() {
+-        inputState.input.commit();
+-    }
+-
+-    public void consume() throws CharStreamException {
+-        if (inputState.guessing == 0) {
+-            char c = LA(1);
+-            if (caseSensitive) {
+-                append(c);
+-            }
+-            else {
+-                // use input.LA(), not LA(), to get original case
+-                // CharScanner.LA() would toLower it.
+-                append(inputState.input.LA(1));
+-            }
+-            if (c == '\t') {
+-                tab();
+-            }
+-            else {
+-                inputState.column++;
+-            }
+-        }
+-        inputState.input.consume();
+-    }
+-
+-    /** Consume chars until one matches the given char */
+-    public void consumeUntil(int c) throws CharStreamException {
+-        while (LA(1) != EOF_CHAR && LA(1) != c) {
+-            consume();
+-        }
+-    }
+-
+-    /** Consume chars until one matches the given set */
+-    public void consumeUntil(BitSet set) throws CharStreamException {
+-        while (LA(1) != EOF_CHAR && !set.member(LA(1))) {
+-            consume();
+-        }
+-    }
+-
+-    public boolean getCaseSensitive() {
+-        return caseSensitive;
+-    }
+-
+-    public final boolean getCaseSensitiveLiterals() {
+-        return caseSensitiveLiterals;
+-    }
+-
+-    public int getColumn() {
+-        return inputState.column;
+-    }
+-
+-    public void setColumn(int c) {
+-        inputState.column = c;
+-    }
+-
+-    public boolean getCommitToPath() {
+-        return commitToPath;
+-    }
+-
+-    public String getFilename() {
+-        return inputState.filename;
+-    }
+-
+-    public InputBuffer getInputBuffer() {
+-        return inputState.input;
+-    }
+-
+-    public LexerSharedInputState getInputState() {
+-        return inputState;
+-    }
+-
+-    public void setInputState(LexerSharedInputState state) {
+-        inputState = state;
+-    }
+-
+-    public int getLine() {
+-        return inputState.line;
+-    }
+-
+-    /** return a copy of the current text buffer */
+-    public String getText() {
+-        return text.toString();
+-    }
+-
+-    public Token getTokenObject() {
+-        return _returnToken;
+-    }
+-
+-    public char LA(int i) throws CharStreamException {
+-        if (caseSensitive) {
+-            return inputState.input.LA(i);
+-        }
+-        else {
+-            return toLower(inputState.input.LA(i));
+-        }
+-    }
+-
+-    protected Token makeToken(int t) {
+-        try {
+-            Token tok = (Token)tokenObjectClass.newInstance();
+-            tok.setType(t);
+-            tok.setColumn(inputState.tokenStartColumn);
+-            tok.setLine(inputState.tokenStartLine);
+-            // tracking real start line now: tok.setLine(inputState.line);
+-            return tok;
+-        }
+-        catch (InstantiationException ie) {
+-            panic("can't instantiate token: " + tokenObjectClass);
+-        }
+-        catch (IllegalAccessException iae) {
+-            panic("Token class is not accessible" + tokenObjectClass);
+-        }
+-        return Token.badToken;
+-    }
+-
+-    public int mark() {
+-        return inputState.input.mark();
+-    }
+-
+-    public void match(char c) throws MismatchedCharException, CharStreamException {
+-        if (LA(1) != c) {
+-            throw new MismatchedCharException(LA(1), c, false, this);
+-        }
+-        consume();
+-    }
+-
+-    public void match(BitSet b) throws MismatchedCharException, CharStreamException {
+-        if (!b.member(LA(1))) {
+-            throw new MismatchedCharException(LA(1), b, false, this);
+-        }
+-        else {
+-            consume();
+-        }
+-    }
+-
+-    public void match(String s) throws MismatchedCharException, CharStreamException {
+-        int len = s.length();
+-        for (int i = 0; i < len; i++) {
+-            if (LA(1) != s.charAt(i)) {
+-                throw new MismatchedCharException(LA(1), s.charAt(i), false, this);
+-            }
+-            consume();
+-        }
+-    }
+-
+-    public void matchNot(char c) throws MismatchedCharException, CharStreamException {
+-        if (LA(1) == c) {
+-            throw new MismatchedCharException(LA(1), c, true, this);
+-        }
+-        consume();
+-    }
+-
+-    public void matchRange(char c1, char c2) throws MismatchedCharException, CharStreamException {
+-        if (LA(1) < c1 || LA(1) > c2) throw new MismatchedCharException(LA(1), c1, c2, false, this);
+-        consume();
+-    }
+-
+-    public void newline() {
+-        inputState.line++;
+-        inputState.column = 1;
+-    }
+-
+-    /** advance the current column number by an appropriate amount
+-     *  according to tab size. This method is called from consume().
+-     */
+-    public void tab() {
+-        int c = getColumn();
+-		int nc = ( ((c-1)/tabsize) + 1) * tabsize + 1;  // calculate tab stop
+-		setColumn( nc );
+-    }
+-
+-	public void setTabSize( int size ) {
+-	  	tabsize = size;
+-	}
+-
+-    public int getTabSize() {
+-        return tabsize;
+-    }
+-
+-    /** @see #panic(String)
+-     */
+-    public void panic() {
+-        System.err.println("CharScanner: panic");
+-        System.exit(1);
+-    }
+-
+-    /** This method is executed by ANTLR internally when it detected an illegal
+-     *  state that cannot be recovered from.
+-     *  The default implementation of this method calls
+-     *  {@link java.lang.System.exit(int)} and writes directly to
+-     *  {@link java.lang.System.err)} , which is usually not appropriate when
+-     *  a translator is embedded into a larger application. <em>It is highly
+-     *  recommended that this method be overridden to handle the error in a
+-     *  way appropriate for your application (e.g. throw an unchecked
+-     *  exception)</em>.
+-     */
+-    public void panic(String s) {
+-        System.err.println("CharScanner; panic: " + s);
+-        System.exit(1);
+-    }
+-
+-    /** Parser error-reporting function can be overridden in subclass */
+-    public void reportError(RecognitionException ex) {
+-        System.err.println(ex);
+-    }
+-
+-    /** Parser error-reporting function can be overridden in subclass */
+-    public void reportError(String s) {
+-        if (getFilename() == null) {
+-            System.err.println("error: " + s);
+-        }
+-        else {
+-            System.err.println(getFilename() + ": error: " + s);
+-        }
+-    }
+-
+-    /** Parser warning-reporting function can be overridden in subclass */
+-    public void reportWarning(String s) {
+-        if (getFilename() == null) {
+-            System.err.println("warning: " + s);
+-        }
+-        else {
+-            System.err.println(getFilename() + ": warning: " + s);
+-        }
+-    }
+-
+-    public void resetText() {
+-        text.setLength(0);
+-        inputState.tokenStartColumn = inputState.column;
+-        inputState.tokenStartLine = inputState.line;
+-    }
+-
+-    public void rewind(int pos) {
+-		 inputState.input.rewind(pos);
+-		 // RK: should not be here, it is messing up column calculation
+-		 // setColumn(inputState.tokenStartColumn);
+-    }
+-
+-    public void setCaseSensitive(boolean t) {
+-        caseSensitive = t;
+-    }
+-
+-    public void setCommitToPath(boolean commit) {
+-        commitToPath = commit;
+-    }
+-
+-    public void setFilename(String f) {
+-        inputState.filename = f;
+-    }
+-
+-    public void setLine(int line) {
+-        inputState.line = line;
+-    }
+-
+-    public void setText(String s) {
+-        resetText();
+-        text.append(s);
+-    }
+-
+-    public void setTokenObjectClass(String cl) {
+-        try {
+-            tokenObjectClass = Class.forName(cl);
+-        }
+-        catch (ClassNotFoundException ce) {
+-            panic("ClassNotFoundException: " + cl);
+-        }
+-    }
+-
+-    // Test the token text against the literals table
+-    // Override this method to perform a different literals test
+-    public int testLiteralsTable(int ttype) {
+-        hashString.setBuffer(text.getBuffer(), text.length());
+-        Integer literalsIndex = (Integer)literals.get(hashString);
+-        if (literalsIndex != null) {
+-            ttype = literalsIndex.intValue();
+-        }
+-        return ttype;
+-    }
+-
+-    /** Test the text passed in against the literals table
+-     * Override this method to perform a different literals test
+-     * This is used primarily when you want to test a portion of
+-     * a token.
+-     */
+-    public int testLiteralsTable(String text, int ttype) {
+-        ANTLRHashString s = new ANTLRHashString(text, this);
+-        Integer literalsIndex = (Integer)literals.get(s);
+-        if (literalsIndex != null) {
+-            ttype = literalsIndex.intValue();
+-        }
+-        return ttype;
+-    }
+-
+-    // Override this method to get more specific case handling
+-    public char toLower(char c) {
+-        return Character.toLowerCase(c);
+-    }
+-
+-    public void traceIndent() {
+-        for (int i = 0; i < traceDepth; i++)
+-            System.out.print(" ");
+-    }
+-
+-    public void traceIn(String rname) throws CharStreamException {
+-        traceDepth += 1;
+-        traceIndent();
+-        System.out.println("> lexer " + rname + "; c==" + LA(1));
+-    }
+-
+-    public void traceOut(String rname) throws CharStreamException {
+-        traceIndent();
+-        System.out.println("< lexer " + rname + "; c==" + LA(1));
+-        traceDepth -= 1;
+-    }
+-
+-    /** This method is called by YourLexer.nextToken() when the lexer has
+-     *  hit EOF condition.  EOF is NOT a character.
+-     *  This method is not called if EOF is reached during
+-     *  syntactic predicate evaluation or during evaluation
+-     *  of normal lexical rules, which presumably would be
+-     *  an IOException.  This traps the "normal" EOF condition.
+-     *
+-     *  uponEOF() is called after the complete evaluation of
+-     *  the previous token and only if your parser asks
+-     *  for another token beyond that last non-EOF token.
+-     *
+-     *  You might want to throw token or char stream exceptions
+-     *  like: "Heh, premature eof" or a retry stream exception
+-     *  ("I found the end of this file, go back to referencing file").
+-     */
+-    public void uponEOF() throws TokenStreamException, CharStreamException {
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/CharStreamException.java glassfish-gil/entity-persistence/src/java/persistence/antlr/CharStreamException.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/CharStreamException.java	2006-08-31 00:34:05.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/CharStreamException.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,20 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-/**
+- * Anything that goes wrong while generating a stream of characters
+- */
+-public class CharStreamException extends ANTLRException {
+-    /**
+-     * CharStreamException constructor comment.
+-     * @param s java.lang.String
+-     */
+-    public CharStreamException(String s) {
+-        super(s);
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/CharStreamIOException.java glassfish-gil/entity-persistence/src/java/persistence/antlr/CharStreamIOException.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/CharStreamIOException.java	2006-08-31 00:34:05.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/CharStreamIOException.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,21 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import java.io.IOException;
+-
+-/**
+- * Wrap an IOException in a CharStreamException
+- */
+-public class CharStreamIOException extends CharStreamException {
+-    public IOException io;
+-
+-    public CharStreamIOException(IOException io) {
+-        super(io.getMessage());
+-        this.io = io;
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/CodeGenerator.java glassfish-gil/entity-persistence/src/java/persistence/antlr/CodeGenerator.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/CodeGenerator.java	2006-08-31 00:34:05.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/CodeGenerator.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,662 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import java.io.PrintWriter;
+-import java.io.IOException;
+-import java.io.FileWriter;
+-
+-import persistence.antlr.collections.impl.Vector;
+-import persistence.antlr.collections.impl.BitSet;
+-
+-/**A generic ANTLR code generator.  All code generators
+- * Derive from this class.
+- *
+- * <p>
+- * A CodeGenerator knows about a Grammar data structure and
+- * a grammar analyzer.  The Grammar is walked to generate the
+- * appropriate code for both a parser and lexer (if present).
+- * This interface may change slightly so that the lexer is
+- * itself living inside of a Grammar object (in which case,
+- * this class generates only one recognizer).  The main method
+- * to call is <tt>gen()</tt>, which initiates all code gen.
+- *
+- * <p>
+- * The interaction of the code generator with the analyzer is
+- * simple: each subrule block calls deterministic() before generating
+- * code for the block.  Method deterministic() sets lookahead caches
+- * in each Alternative object.  Technically, a code generator
+- * doesn't need the grammar analyzer if all lookahead analysis
+- * is done at runtime, but this would result in a slower parser.
+- *
+- * <p>
+- * This class provides a set of support utilities to handle argument
+- * list parsing and so on.
+- *
+- * @author  Terence Parr, John Lilley
+- * @version 2.00a
+- * @see     persistence.antlr.JavaCodeGenerator
+- * @see     persistence.antlr.DiagnosticCodeGenerator
+- * @see     persistence.antlr.LLkAnalyzer
+- * @see     persistence.antlr.Grammar
+- * @see     persistence.antlr.AlternativeElement
+- * @see     persistence.antlr.Lookahead
+- */
+-public abstract class CodeGenerator {
+-    protected persistence.antlr.Tool antlrTool;
+-
+-    /** Current tab indentation for code output */
+-    protected int tabs = 0;
+-
+-    /** Current output Stream */
+-    transient protected PrintWriter currentOutput; // SAS: for proper text i/o
+-
+-    /** The grammar for which we generate code */
+-    protected Grammar grammar = null;
+-
+-    /** List of all bitsets that must be dumped.  These are Vectors of BitSet. */
+-    protected Vector bitsetsUsed;
+-
+-    /** The grammar behavior */
+-    protected DefineGrammarSymbols behavior;
+-
+-    /** The LLk analyzer */
+-    protected LLkGrammarAnalyzer analyzer;
+-
+-    /** Object used to format characters in the target language.
+-     * subclass must initialize this to the language-specific formatter
+-     */
+-    protected CharFormatter charFormatter;
+-
+-    /** Use option "codeGenDebug" to generate debugging output */
+-    protected boolean DEBUG_CODE_GENERATOR = false;
+-
+-    /** Default values for code-generation thresholds */
+-    protected static final int DEFAULT_MAKE_SWITCH_THRESHOLD = 2;
+-    protected static final int DEFAULT_BITSET_TEST_THRESHOLD = 4;
+-
+-    /** If there are more than 8 long words to init in a bitset,
+-     *  try to optimize it; e.g., detect runs of -1L and 0L.
+-     */
+-    protected static final int BITSET_OPTIMIZE_INIT_THRESHOLD = 8;
+-
+-    /** This is a hint for the language-specific code generator.
+-     * A switch() or language-specific equivalent will be generated instead
+-     * of a series of if/else statements for blocks with number of alternates
+-     * greater than or equal to this number of non-predicated LL(1) alternates.
+-     * This is modified by the grammar option "codeGenMakeSwitchThreshold"
+-     */
+-    protected int makeSwitchThreshold = DEFAULT_MAKE_SWITCH_THRESHOLD;
+-
+-    /** This is a hint for the language-specific code generator.
+-     * A bitset membership test will be generated instead of an
+-     * ORed series of LA(k) comparisions for lookahead sets with
+-     * degree greater than or equal to this value.
+-     * This is modified by the grammar option "codeGenBitsetTestThreshold"
+-     */
+-    protected int bitsetTestThreshold = DEFAULT_BITSET_TEST_THRESHOLD;
+-
+-    private static boolean OLD_ACTION_TRANSLATOR = true;
+-
+-    public static String TokenTypesFileSuffix = "TokenTypes";
+-    public static String TokenTypesFileExt = ".txt";
+-
+-    /** Construct code generator base class */
+-    public CodeGenerator() {
+-    }
+-
+-    /** Output a String to the currentOutput stream.
+-     * Ignored if string is null.
+-     * @param s The string to output
+-     */
+-    protected void _print(String s) {
+-        if (s != null) {
+-            currentOutput.print(s);
+-        }
+-    }
+-
+-    /** Print an action without leading tabs, attempting to
+-     * preserve the current indentation level for multi-line actions
+-     * Ignored if string is null.
+-     * @param s The action string to output
+-     */
+-    protected void _printAction(String s) {
+-        if (s == null) {
+-            return;
+-        }
+-
+-        // Skip leading newlines, tabs and spaces
+-        int start = 0;
+-        while (start < s.length() && Character.isSpaceChar(s.charAt(start))) {
+-            start++;
+-        }
+-
+-        // Skip leading newlines, tabs and spaces
+-        int end = s.length() - 1;
+-        while (end > start && Character.isSpaceChar(s.charAt(end))) {
+-            end--;
+-        }
+-
+-        char c = 0;
+-        for (int i = start; i <= end;) {
+-            c = s.charAt(i);
+-            i++;
+-            boolean newline = false;
+-            switch (c) {
+-                case '\n':
+-                    newline = true;
+-                    break;
+-                case '\r':
+-                    if (i <= end && s.charAt(i) == '\n') {
+-                        i++;
+-                    }
+-                    newline = true;
+-                    break;
+-                default:
+-                    currentOutput.print(c);
+-                    break;
+-            }
+-            if (newline) {
+-                currentOutput.println();
+-                printTabs();
+-                // Absorb leading whitespace
+-                while (i <= end && Character.isSpaceChar(s.charAt(i))) {
+-                    i++;
+-                }
+-                newline = false;
+-            }
+-        }
+-        currentOutput.println();
+-    }
+-
+-    /** Output a String followed by newline, to the currentOutput stream.
+-     * Ignored if string is null.
+-     * @param s The string to output
+-     */
+-    protected void _println(String s) {
+-        if (s != null) {
+-            currentOutput.println(s);
+-        }
+-    }
+-
+-    /** Test if a set element array represents a contiguous range.
+-     * @param elems The array of elements representing the set, usually from BitSet.toArray().
+-     * @return true if the elements are a contiguous range (with two or more).
+-     */
+-    public static boolean elementsAreRange(int[] elems) {
+-        if (elems.length == 0) {
+-            return false;
+-        }
+-        int begin = elems[0];
+-        int end = elems[elems.length - 1];
+-        if (elems.length <= 2) {
+-            // Not enough elements for a range expression
+-            return false;
+-        }
+-        if (end - begin + 1 > elems.length) {
+-            // The set does not represent a contiguous range
+-            return false;
+-        }
+-        int v = begin + 1;
+-        for (int i = 1; i < elems.length - 1; i++) {
+-            if (v != elems[i]) {
+-                // The set does not represent a contiguous range
+-                return false;
+-            }
+-            v++;
+-        }
+-        return true;
+-    }
+-
+-    /** Get the identifier portion of an argument-action token.
+-     * The ID of an action is assumed to be a trailing identifier.
+-     * Specific code-generators may want to override this
+-     * if the language has unusual declaration syntax.
+-     * @param t The action token
+-     * @return A string containing the text of the identifier
+-     */
+-    protected String extractIdOfAction(Token t) {
+-        return extractIdOfAction(t.getText(), t.getLine(), t.getColumn());
+-    }
+-
+-    /** Get the identifier portion of an argument-action.
+-     * The ID of an action is assumed to be a trailing identifier.
+-     * Specific code-generators may want to override this
+-     * if the language has unusual declaration syntax.
+-     * @param s The action text
+-     * @param line Line used for error reporting.
+-     * @param column Line used for error reporting.
+-     * @return A string containing the text of the identifier
+-     */
+-    protected String extractIdOfAction(String s, int line, int column) {
+-        s = removeAssignmentFromDeclaration(s);
+-        // Search back from the end for a non alphanumeric.  That marks the
+-        // beginning of the identifier
+-        for (int i = s.length() - 2; i >= 0; i--) {
+-            // TODO: make this work for language-independent identifiers?
+-            if (!Character.isLetterOrDigit(s.charAt(i)) && s.charAt(i) != '_') {
+-                // Found end of type part
+-                return s.substring(i + 1);
+-            }
+-        }
+-        // Something is bogus, but we cannot parse the language-specific
+-        // actions any better.  The compiler will have to catch the problem.
+-        antlrTool.warning("Ill-formed action", grammar.getFilename(), line, column);
+-        return "";
+-    }
+-
+-    /** Get the type string out of an argument-action token.
+-     * The type of an action is assumed to precede a trailing identifier
+-     * Specific code-generators may want to override this
+-     * if the language has unusual declaration syntax.
+-     * @param t The action token
+-     * @return A string containing the text of the type
+-     */
+-    protected String extractTypeOfAction(Token t) {
+-        return extractTypeOfAction(t.getText(), t.getLine(), t.getColumn());
+-    }
+-
+-    /** Get the type portion of an argument-action.
+-     * The type of an action is assumed to precede a trailing identifier
+-     * Specific code-generators may want to override this
+-     * if the language has unusual declaration syntax.
+-     * @param s The action text
+-     * @param line Line used for error reporting.
+-     * @return A string containing the text of the type
+-     */
+-    protected String extractTypeOfAction(String s, int line, int column) {
+-        s = removeAssignmentFromDeclaration(s);
+-        // Search back from the end for a non alphanumeric.  That marks the
+-        // beginning of the identifier
+-        for (int i = s.length() - 2; i >= 0; i--) {
+-            // TODO: make this work for language-independent identifiers?
+-            if (!Character.isLetterOrDigit(s.charAt(i)) && s.charAt(i) != '_') {
+-                // Found end of type part
+-                return s.substring(0, i + 1);
+-            }
+-        }
+-        // Something is bogus, but we cannot parse the language-specific
+-        // actions any better.  The compiler will have to catch the problem.
+-        antlrTool.warning("Ill-formed action", grammar.getFilename(), line, column);
+-        return "";
+-    }
+-
+-    /** Generate the code for all grammars
+-     */
+-    public abstract void gen();
+-
+-    /** Generate code for the given grammar element.
+-     * @param action The {...} action to generate
+-     */
+-    public abstract void gen(ActionElement action);
+-
+-    /** Generate code for the given grammar element.
+-     * @param blk The "x|y|z|..." block to generate
+-     */
+-    public abstract void gen(AlternativeBlock blk);
+-
+-    /** Generate code for the given grammar element.
+-     * @param end The block-end element to generate.  Block-end
+-     * elements are synthesized by the grammar parser to represent
+-     * the end of a block.
+-     */
+-    public abstract void gen(BlockEndElement end);
+-
+-    /** Generate code for the given grammar element.
+-     * @param atom The character literal reference to generate
+-     */
+-    public abstract void gen(CharLiteralElement atom);
+-
+-    /** Generate code for the given grammar element.
+-     * @param r The character-range reference to generate
+-     */
+-    public abstract void gen(CharRangeElement r);
+-
+-    /** Generate the code for a parser */
+-    public abstract void gen(LexerGrammar g) throws IOException;
+-
+-    /** Generate code for the given grammar element.
+-     * @param blk The (...)+ block to generate
+-     */
+-    public abstract void gen(OneOrMoreBlock blk);
+-
+-    /** Generate the code for a parser */
+-    public abstract void gen(ParserGrammar g) throws IOException;
+-
+-    /** Generate code for the given grammar element.
+-     * @param rr The rule-reference to generate
+-     */
+-    public abstract void gen(RuleRefElement rr);
+-
+-    /** Generate code for the given grammar element.
+-     * @param atom The string-literal reference to generate
+-     */
+-    public abstract void gen(StringLiteralElement atom);
+-
+-    /** Generate code for the given grammar element.
+-     * @param r The token-range reference to generate
+-     */
+-    public abstract void gen(TokenRangeElement r);
+-
+-    /** Generate code for the given grammar element.
+-     * @param atom The token-reference to generate
+-     */
+-    public abstract void gen(TokenRefElement atom);
+-
+-    /** Generate code for the given grammar element.
+-     * @param blk The tree to generate code for.
+-     */
+-    public abstract void gen(TreeElement t);
+-
+-    /** Generate the code for a parser */
+-    public abstract void gen(TreeWalkerGrammar g) throws IOException;
+-
+-    /** Generate code for the given grammar element.
+-     * @param wc The wildcard element to generate
+-     */
+-    public abstract void gen(WildcardElement wc);
+-
+-    /** Generate code for the given grammar element.
+-     * @param blk The (...)* block to generate
+-     */
+-    public abstract void gen(ZeroOrMoreBlock blk);
+-
+-    /** Generate the token types as a text file for persistence across shared lexer/parser */
+-    protected void genTokenInterchange(TokenManager tm) throws IOException {
+-        // Open the token output Java file and set the currentOutput stream
+-        String fName = tm.getName() + TokenTypesFileSuffix + TokenTypesFileExt;
+-        currentOutput = antlrTool.openOutputFile(fName);
+-
+-        println("// $ANTLR " + antlrTool.version + ": " +
+-                antlrTool.fileMinusPath(antlrTool.grammarFile) +
+-                " -> " +
+-                fName +
+-                "$");
+-
+-        tabs = 0;
+-
+-        // Header
+-        println(tm.getName() + "    // output token vocab name");
+-
+-        // Generate a definition for each token type
+-        Vector v = tm.getVocabulary();
+-        for (int i = Token.MIN_USER_TYPE; i < v.size(); i++) {
+-            String s = (String)v.elementAt(i);
+-            if (DEBUG_CODE_GENERATOR) {
+-                System.out.println("gen persistence file entry for: " + s);
+-            }
+-            if (s != null && !s.startsWith("<")) {
+-                // if literal, find label
+-                if (s.startsWith("\"")) {
+-                    StringLiteralSymbol sl = (StringLiteralSymbol)tm.getTokenSymbol(s);
+-                    if (sl != null && sl.label != null) {
+-                        print(sl.label + "=");
+-                    }
+-                    println(s + "=" + i);
+-                }
+-                else {
+-                    print(s);
+-                    // check for a paraphrase
+-                    TokenSymbol ts = (TokenSymbol)tm.getTokenSymbol(s);
+-                    if (ts == null) {
+-                        antlrTool.warning("undefined token symbol: " + s);
+-                    }
+-                    else {
+-                        if (ts.getParaphrase() != null) {
+-                            print("(" + ts.getParaphrase() + ")");
+-                        }
+-                    }
+-                    println("=" + i);
+-                }
+-            }
+-        }
+-
+-        // Close the tokens output file
+-        currentOutput.close();
+-        currentOutput = null;
+-    }
+-
+-    /** Process a string for an simple expression for use in xx/action.g
+-     * it is used to cast simple tokens/references to the right type for
+-     * the generated language.
+-     * @param str A String.
+-     */
+-    public String processStringForASTConstructor(String str) {
+-        return str;
+-    }
+-
+-    /** Get a string for an expression to generate creation of an AST subtree.
+-     * @param v A Vector of String, where each element is an expression in the target language yielding an AST node.
+-     */
+-    public abstract String getASTCreateString(Vector v);
+-
+-    /** Get a string for an expression to generate creating of an AST node
+-     * @param str The text of the arguments to the AST construction
+-     */
+-    public abstract String getASTCreateString(GrammarAtom atom, String str);
+-
+-    /** Given the index of a bitset in the bitset list, generate a unique name.
+-     * Specific code-generators may want to override this
+-     * if the language does not allow '_' or numerals in identifiers.
+-     * @param index  The index of the bitset in the bitset list.
+-     */
+-    protected String getBitsetName(int index) {
+-        return "_tokenSet_" + index;
+-    }
+-
+-    public static String encodeLexerRuleName(String id) {
+-        return "m" + id;
+-    }
+-
+-    public static String decodeLexerRuleName(String id) {
+-        if ( id==null ) {
+-            return null;
+-        }
+-        return id.substring(1,id.length());
+-    }
+-
+-    /** Map an identifier to it's corresponding tree-node variable.
+-     * This is context-sensitive, depending on the rule and alternative
+-     * being generated
+-     * @param id The identifier name to map
+-     * @param forInput true if the input tree node variable is to be returned, otherwise the output variable is returned.
+-     * @return The mapped id (which may be the same as the input), or null if the mapping is invalid due to duplicates
+-     */
+-    public abstract String mapTreeId(String id, ActionTransInfo tInfo);
+-
+-    /** Add a bitset to the list of bitsets to be generated.
+-     * if the bitset is already in the list, ignore the request.
+-     * Always adds the bitset to the end of the list, so the
+-     * caller can rely on the position of bitsets in the list.
+-     * The returned position can be used to format the bitset
+-     * name, since it is invariant.
+-     * @param p Bit set to mark for code generation
+-     * @param forParser true if the bitset is used for the parser, false for the lexer
+-     * @return The position of the bitset in the list.
+-     */
+-    protected int markBitsetForGen(BitSet p) {
+-        // Is the bitset (or an identical one) already marked for gen?
+-        for (int i = 0; i < bitsetsUsed.size(); i++) {
+-            BitSet set = (BitSet)bitsetsUsed.elementAt(i);
+-            if (p.equals(set)) {
+-                // Use the identical one already stored
+-                return i;
+-            }
+-        }
+-
+-        // Add the new bitset
+-        bitsetsUsed.appendElement(p.clone());
+-        return bitsetsUsed.size() - 1;
+-    }
+-
+-    /** Output tab indent followed by a String, to the currentOutput stream.
+-     * Ignored if string is null.
+-     * @param s The string to output.
+-     */
+-    protected void print(String s) {
+-        if (s != null) {
+-            printTabs();
+-            currentOutput.print(s);
+-        }
+-    }
+-
+-    /** Print an action with leading tabs, attempting to
+-     * preserve the current indentation level for multi-line actions
+-     * Ignored if string is null.
+-     * @param s The action string to output
+-     */
+-    protected void printAction(String s) {
+-        if (s != null) {
+-            printTabs();
+-            _printAction(s);
+-        }
+-    }
+-
+-    /** Output tab indent followed by a String followed by newline,
+-     * to the currentOutput stream.  Ignored if string is null.
+-     * @param s The string to output
+-     */
+-    protected void println(String s) {
+-        if (s != null) {
+-            printTabs();
+-            currentOutput.println(s);
+-        }
+-    }
+-
+-    /** Output the current tab indentation.  This outputs the number of tabs
+-     * indicated by the "tabs" variable to the currentOutput stream.
+-     */
+-    protected void printTabs() {
+-        for (int i = 1; i <= tabs; i++) {
+-            currentOutput.print("\t");
+-        }
+-    }
+-
+-    /** Lexically process $ and # references within the action.
+-     *  This will replace #id and #(...) with the appropriate
+-     *  function calls and/or variables etc...
+-     */
+-    protected abstract String processActionForSpecialSymbols(String actionStr,
+-															 int line,
+-															 RuleBlock currentRule,
+-															 ActionTransInfo tInfo);
+-
+-	public String getFOLLOWBitSet(String ruleName, int k) {
+-		GrammarSymbol rs = grammar.getSymbol(ruleName);
+-		if ( !(rs instanceof RuleSymbol) ) {
+-			return null;
+-		}
+-		RuleBlock blk = ((RuleSymbol)rs).getBlock();
+-        Lookahead follow = grammar.theLLkAnalyzer.FOLLOW(k, blk.endNode);
+-		String followSetName = getBitsetName(markBitsetForGen(follow.fset));
+-		return followSetName;
+-    }
+-
+-	public String getFIRSTBitSet(String ruleName, int k) {
+-		GrammarSymbol rs = grammar.getSymbol(ruleName);
+-		if ( !(rs instanceof RuleSymbol) ) {
+-			return null;
+-		}
+-		RuleBlock blk = ((RuleSymbol)rs).getBlock();
+-        Lookahead first = grammar.theLLkAnalyzer.look(k, blk);
+-		String firstSetName = getBitsetName(markBitsetForGen(first.fset));
+-		return firstSetName;
+-    }
+-
+-    /**
+-     * Remove the assignment portion of a declaration, if any.
+-     * @param d the declaration
+-     * @return the declaration without any assignment portion
+-     */
+-    protected String removeAssignmentFromDeclaration(String d) {
+-        // If d contains an equal sign, then it's a declaration
+-        // with an initialization.  Strip off the initialization part.
+-        if (d.indexOf('=') >= 0) d = d.substring(0, d.indexOf('=')).trim();
+-        return d;
+-    }
+-
+-    /** Set all fields back like one just created */
+-    private void reset() {
+-        tabs = 0;
+-        // Allocate list of bitsets tagged for code generation
+-        bitsetsUsed = new Vector();
+-        currentOutput = null;
+-        grammar = null;
+-        DEBUG_CODE_GENERATOR = false;
+-        makeSwitchThreshold = DEFAULT_MAKE_SWITCH_THRESHOLD;
+-        bitsetTestThreshold = DEFAULT_BITSET_TEST_THRESHOLD;
+-    }
+-
+-    public static String reverseLexerRuleName(String id) {
+-        return id.substring(1, id.length());
+-    }
+-
+-    public void setAnalyzer(LLkGrammarAnalyzer analyzer_) {
+-        analyzer = analyzer_;
+-    }
+-
+-    public void setBehavior(DefineGrammarSymbols behavior_) {
+-        behavior = behavior_;
+-    }
+-
+-    /** Set a grammar for the code generator to use */
+-    protected void setGrammar(Grammar g) {
+-        reset();
+-        grammar = g;
+-        // Lookup make-switch threshold in the grammar generic options
+-        if (grammar.hasOption("codeGenMakeSwitchThreshold")) {
+-            try {
+-                makeSwitchThreshold = grammar.getIntegerOption("codeGenMakeSwitchThreshold");
+-                //System.out.println("setting codeGenMakeSwitchThreshold to " + makeSwitchThreshold);
+-            }
+-            catch (NumberFormatException e) {
+-                Token tok = grammar.getOption("codeGenMakeSwitchThreshold");
+-                antlrTool.error(
+-                    "option 'codeGenMakeSwitchThreshold' must be an integer",
+-                    grammar.getClassName(),
+-                    tok.getLine(), tok.getColumn()
+-                );
+-            }
+-        }
+-
+-        // Lookup bitset-test threshold in the grammar generic options
+-        if (grammar.hasOption("codeGenBitsetTestThreshold")) {
+-            try {
+-                bitsetTestThreshold = grammar.getIntegerOption("codeGenBitsetTestThreshold");
+-                //System.out.println("setting codeGenBitsetTestThreshold to " + bitsetTestThreshold);
+-            }
+-            catch (NumberFormatException e) {
+-                Token tok = grammar.getOption("codeGenBitsetTestThreshold");
+-                antlrTool.error(
+-                    "option 'codeGenBitsetTestThreshold' must be an integer",
+-                    grammar.getClassName(),
+-                    tok.getLine(), tok.getColumn()
+-                );
+-            }
+-        }
+-
+-        // Lookup debug code-gen in the grammar generic options
+-        if (grammar.hasOption("codeGenDebug")) {
+-            Token t = grammar.getOption("codeGenDebug");
+-            if (t.getText().equals("true")) {
+-                //System.out.println("setting code-generation debug ON");
+-                DEBUG_CODE_GENERATOR = true;
+-            }
+-            else if (t.getText().equals("false")) {
+-                //System.out.println("setting code-generation debug OFF");
+-                DEBUG_CODE_GENERATOR = false;
+-            }
+-            else {
+-                antlrTool.error("option 'codeGenDebug' must be true or false", grammar.getClassName(), t.getLine(), t.getColumn());
+-            }
+-        }
+-    }
+-
+-    public void setTool(Tool tool) {
+-        antlrTool = tool;
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/collections/ASTEnumeration.java glassfish-gil/entity-persistence/src/java/persistence/antlr/collections/ASTEnumeration.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/collections/ASTEnumeration.java	2006-08-31 00:34:12.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/collections/ASTEnumeration.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,13 +0,0 @@
+-package persistence.antlr.collections;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-public interface ASTEnumeration {
+-    public boolean hasMoreNodes();
+-
+-    public AST nextNode();
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/collections/AST.java glassfish-gil/entity-persistence/src/java/persistence/antlr/collections/AST.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/collections/AST.java	2006-08-31 00:34:12.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/collections/AST.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,76 +0,0 @@
+-package persistence.antlr.collections;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import persistence.antlr.Token;
+-
+-/** Minimal AST node interface used by ANTLR AST generation
+- * and tree-walker.
+- */
+-public interface AST {
+-    /** Add a (rightmost) child to this node */
+-    public void addChild(AST c);
+-
+-    public boolean equals(AST t);
+-
+-    public boolean equalsList(AST t);
+-
+-    public boolean equalsListPartial(AST t);
+-
+-    public boolean equalsTree(AST t);
+-
+-    public boolean equalsTreePartial(AST t);
+-
+-    public ASTEnumeration findAll(AST tree);
+-
+-    public ASTEnumeration findAllPartial(AST subtree);
+-
+-    /** Get the first child of this node; null if no children */
+-    public AST getFirstChild();
+-
+-    /** Get	the next sibling in line after this one */
+-    public AST getNextSibling();
+-
+-    /** Get the token text for this node */
+-    public String getText();
+-
+-    /** Get the token type for this node */
+-    public int getType();
+-
+-    /** @since 2.7.3 Need for error handling */
+-    public int getLine();
+-
+-    /** @since 2.7.3 Need for error handling */
+-    public int getColumn();
+-
+-	/** Get number of children of this node; if leaf, returns 0 */
+-	public int getNumberOfChildren();
+-
+-    public void initialize(int t, String txt);
+-
+-    public void initialize(AST t);
+-
+-    public void initialize(Token t);
+-
+-    /** Set the first child of a node. */
+-    public void setFirstChild(AST c);
+-
+-    /** Set the next sibling after this one. */
+-    public void setNextSibling(AST n);
+-
+-    /** Set the token text for this node */
+-    public void setText(String text);
+-
+-    /** Set the token type for this node */
+-    public void setType(int ttype);
+-
+-    public String toString();
+-
+-    public String toStringList();
+-
+-    public String toStringTree();
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/collections/Enumerator.java glassfish-gil/entity-persistence/src/java/persistence/antlr/collections/Enumerator.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/collections/Enumerator.java	2006-08-31 00:34:12.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/collections/Enumerator.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,22 +0,0 @@
+-package persistence.antlr.collections;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-public interface Enumerator {
+-    /**Return the element under the cursor; return null if !valid() or
+-     * if called before first next() call.
+-     */
+-    public Object cursor();
+-
+-    /**Return the next element in the enumeration; first call to next()
+-     * returns the first element.
+-     */
+-    public Object next();
+-
+-    /**Any more elements in the enumeration? */
+-    public boolean valid();
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/collections/impl/ASTArray.java glassfish-gil/entity-persistence/src/java/persistence/antlr/collections/impl/ASTArray.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/collections/impl/ASTArray.java	2006-08-31 00:34:14.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/collections/impl/ASTArray.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,29 +0,0 @@
+-package persistence.antlr.collections.impl;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import persistence.antlr.collections.AST;
+-
+-/** ASTArray is a class that allows ANTLR to
+- * generate code that can create and initialize an array
+- * in one expression, like:
+- *    (new ASTArray(3)).add(x).add(y).add(z)
+- */
+-public class ASTArray {
+-    public int size = 0;
+-    public AST[] array;
+-
+-
+-    public ASTArray(int capacity) {
+-        array = new AST[capacity];
+-    }
+-
+-    public ASTArray add(AST node) {
+-        array[size++] = node;
+-        return this;
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/collections/impl/ASTEnumerator.java glassfish-gil/entity-persistence/src/java/persistence/antlr/collections/impl/ASTEnumerator.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/collections/impl/ASTEnumerator.java	2006-08-31 00:34:14.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/collections/impl/ASTEnumerator.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,39 +0,0 @@
+-package persistence.antlr.collections.impl;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import persistence.antlr.collections.impl.Vector;
+-import persistence.antlr.collections.ASTEnumeration;
+-import persistence.antlr.collections.AST;
+-
+-import java.util.NoSuchElementException;
+-
+-public class ASTEnumerator implements persistence.antlr.collections.ASTEnumeration {
+-    /** The list of root nodes for subtrees that match */
+-    VectorEnumerator nodes;
+-    int i = 0;
+-
+-
+-    public ASTEnumerator(Vector v) {
+-        nodes = new VectorEnumerator(v);
+-    }
+-
+-    public boolean hasMoreNodes() {
+-        synchronized (nodes) {
+-            return i <= nodes.vector.lastElement;
+-        }
+-    }
+-
+-    public persistence.antlr.collections.AST nextNode() {
+-        synchronized (nodes) {
+-            if (i <= nodes.vector.lastElement) {
+-                return (AST)nodes.vector.data[i++];
+-            }
+-            throw new NoSuchElementException("ASTEnumerator");
+-        }
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/collections/impl/BitSet.java glassfish-gil/entity-persistence/src/java/persistence/antlr/collections/impl/BitSet.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/collections/impl/BitSet.java	2006-08-31 00:34:14.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/collections/impl/BitSet.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,482 +0,0 @@
+-package persistence.antlr.collections.impl;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import persistence.antlr.CharFormatter;
+-
+-/**A BitSet to replace java.util.BitSet.
+- * Primary differences are that most set operators return new sets
+- * as opposed to oring and anding "in place".  Further, a number of
+- * operations were added.  I cannot contain a BitSet because there
+- * is no way to access the internal bits (which I need for speed)
+- * and, because it is final, I cannot subclass to add functionality.
+- * Consider defining set degree.  Without access to the bits, I must
+- * call a method n times to test the ith bit...ack!
+- *
+- * Also seems like or() from util is wrong when size of incoming set is bigger
+- * than this.bits.length.
+- *
+- * @author Terence Parr
+- * @author <br><a href="mailto:pete at yamuna.demon.co.uk">Pete Wells</a>
+- */
+-public class BitSet implements Cloneable {
+-    protected final static int BITS = 64;    // number of bits / long
+-    protected final static int NIBBLE = 4;
+-    protected final static int LOG_BITS = 6; // 2^6 == 64
+-
+-    /* We will often need to do a mod operator (i mod nbits).  Its
+-     * turns out that, for powers of two, this mod operation is
+-     * same as (i & (nbits-1)).  Since mod is slow, we use a
+-     * precomputed mod mask to do the mod instead.
+-     */
+-    protected final static int MOD_MASK = BITS - 1;
+-
+-    /** The actual data bits */
+-    protected long bits[];
+-
+-    /** Construct a bitset of size one word (64 bits) */
+-    public BitSet() {
+-        this(BITS);
+-    }
+-
+-    /** Construction from a static array of longs */
+-    public BitSet(long[] bits_) {
+-        bits = bits_;
+-    }
+-
+-    /** Construct a bitset given the size
+-     * @param nbits The size of the bitset in bits
+-     */
+-    public BitSet(int nbits) {
+-        bits = new long[((nbits - 1) >> LOG_BITS) + 1];
+-    }
+-
+-    /** or this element into this set (grow as necessary to accommodate) */
+-    public void add(int el) {
+-        //System.out.println("add("+el+")");
+-        int n = wordNumber(el);
+-        //System.out.println("word number is "+n);
+-        //System.out.println("bits.length "+bits.length);
+-        if (n >= bits.length) {
+-            growToInclude(el);
+-        }
+-        bits[n] |= bitMask(el);
+-    }
+-
+-    public BitSet and(BitSet a) {
+-        BitSet s = (BitSet)this.clone();
+-        s.andInPlace(a);
+-        return s;
+-    }
+-
+-    public void andInPlace(BitSet a) {
+-        int min = Math.min(bits.length, a.bits.length);
+-        for (int i = min - 1; i >= 0; i--) {
+-            bits[i] &= a.bits[i];
+-        }
+-        // clear all bits in this not present in a (if this bigger than a).
+-        for (int i = min; i < bits.length; i++) {
+-            bits[i] = 0;
+-        }
+-    }
+-
+-    private final static long bitMask(int bitNumber) {
+-        int bitPosition = bitNumber & MOD_MASK; // bitNumber mod BITS
+-        return 1L << bitPosition;
+-    }
+-
+-    public void clear() {
+-        for (int i = bits.length - 1; i >= 0; i--) {
+-            bits[i] = 0;
+-        }
+-    }
+-
+-    public void clear(int el) {
+-        int n = wordNumber(el);
+-        if (n >= bits.length) {	// grow as necessary to accommodate
+-            growToInclude(el);
+-        }
+-        bits[n] &= ~bitMask(el);
+-    }
+-
+-    public Object clone() {
+-        BitSet s;
+-        try {
+-            s = (BitSet)super.clone();
+-            s.bits = new long[bits.length];
+-            System.arraycopy(bits, 0, s.bits, 0, bits.length);
+-        }
+-        catch (CloneNotSupportedException e) {
+-            throw new InternalError();
+-        }
+-        return s;
+-    }
+-
+-    public int degree() {
+-        int deg = 0;
+-        for (int i = bits.length - 1; i >= 0; i--) {
+-            long word = bits[i];
+-            if (word != 0L) {
+-                for (int bit = BITS - 1; bit >= 0; bit--) {
+-                    if ((word & (1L << bit)) != 0) {
+-                        deg++;
+-                    }
+-                }
+-            }
+-        }
+-        return deg;
+-    }
+-
+-    /** code "inherited" from java.util.BitSet */
+-    public boolean equals(Object obj) {
+-        if ((obj != null) && (obj instanceof BitSet)) {
+-            BitSet set = (BitSet)obj;
+-
+-            int n = Math.min(bits.length, set.bits.length);
+-            for (int i = n; i-- > 0;) {
+-                if (bits[i] != set.bits[i]) {
+-                    return false;
+-                }
+-            }
+-            if (bits.length > n) {
+-                for (int i = bits.length; i-- > n;) {
+-                    if (bits[i] != 0) {
+-                        return false;
+-                    }
+-                }
+-            }
+-            else if (set.bits.length > n) {
+-                for (int i = set.bits.length; i-- > n;) {
+-                    if (set.bits[i] != 0) {
+-                        return false;
+-                    }
+-                }
+-            }
+-            return true;
+-        }
+-        return false;
+-    }
+-
+-    /** Find ranges in a set element array.  @param elems The array of
+-     * elements representing the set, usually from Bit Set.toArray().
+-     * @return Vector of ranges.
+-     */
+-    public static Vector getRanges(int[] elems) {
+-        if (elems.length == 0) {
+-            return null;
+-        }
+-        int begin = elems[0];
+-        int end = elems[elems.length - 1];
+-        if (elems.length <= 2) {
+-            // Not enough elements for a range expression
+-            return null;
+-        }
+-
+-        Vector ranges = new Vector(5);
+-        // look for ranges
+-        for (int i = 0; i < elems.length - 2; i++) {
+-            int lastInRange;
+-            lastInRange = elems.length - 1;
+-            for (int j = i + 1; j < elems.length; j++) {
+-                if (elems[j] != elems[j - 1] + 1) {
+-                    lastInRange = j - 1;
+-                    break;
+-                }
+-            }
+-            // found a range
+-            if (lastInRange - i > 2) {
+-                ranges.appendElement(new IntRange(elems[i], elems[lastInRange]));
+-            }
+-        }
+-        return ranges;
+-    }
+-
+-    /**
+-     * Grows the set to a larger number of bits.
+-     * @param bit element that must fit in set
+-     */
+-    public void growToInclude(int bit) {
+-        int newSize = Math.max(bits.length << 1, numWordsToHold(bit));
+-        long newbits[] = new long[newSize];
+-        System.arraycopy(bits, 0, newbits, 0, bits.length);
+-        bits = newbits;
+-    }
+-
+-    public boolean member(int el) {
+-        int n = wordNumber(el);
+-        if (n >= bits.length) return false;
+-        return (bits[n] & bitMask(el)) != 0;
+-    }
+-
+-    public boolean nil() {
+-        for (int i = bits.length - 1; i >= 0; i--) {
+-            if (bits[i] != 0) return false;
+-        }
+-        return true;
+-    }
+-
+-    public BitSet not() {
+-        BitSet s = (BitSet)this.clone();
+-        s.notInPlace();
+-        return s;
+-    }
+-
+-    public void notInPlace() {
+-        for (int i = bits.length - 1; i >= 0; i--) {
+-            bits[i] = ~bits[i];
+-        }
+-    }
+-
+-    /** complement bits in the range 0..maxBit. */
+-    public void notInPlace(int maxBit) {
+-        notInPlace(0, maxBit);
+-    }
+-
+-    /** complement bits in the range minBit..maxBit.*/
+-    public void notInPlace(int minBit, int maxBit) {
+-        // make sure that we have room for maxBit
+-        growToInclude(maxBit);
+-        for (int i = minBit; i <= maxBit; i++) {
+-            int n = wordNumber(i);
+-            bits[n] ^= bitMask(i);
+-        }
+-    }
+-
+-    private final int numWordsToHold(int el) {
+-        return (el >> LOG_BITS) + 1;
+-    }
+-
+-    public static BitSet of(int el) {
+-        BitSet s = new BitSet(el + 1);
+-        s.add(el);
+-        return s;
+-    }
+-
+-    /** return this | a in a new set */
+-    public BitSet or(BitSet a) {
+-        BitSet s = (BitSet)this.clone();
+-        s.orInPlace(a);
+-        return s;
+-    }
+-
+-    public void orInPlace(BitSet a) {
+-        // If this is smaller than a, grow this first
+-        if (a.bits.length > bits.length) {
+-            setSize(a.bits.length);
+-        }
+-        int min = Math.min(bits.length, a.bits.length);
+-        for (int i = min - 1; i >= 0; i--) {
+-            bits[i] |= a.bits[i];
+-        }
+-    }
+-
+-    // remove this element from this set
+-    public void remove(int el) {
+-        int n = wordNumber(el);
+-        if (n >= bits.length) {
+-            growToInclude(el);
+-        }
+-        bits[n] &= ~bitMask(el);
+-    }
+-
+-    /**
+-     * Sets the size of a set.
+-     * @param nwords how many words the new set should be
+-     */
+-    private void setSize(int nwords) {
+-        long newbits[] = new long[nwords];
+-        int n = Math.min(nwords, bits.length);
+-        System.arraycopy(bits, 0, newbits, 0, n);
+-        bits = newbits;
+-    }
+-
+-    public int size() {
+-        return bits.length << LOG_BITS; // num words * bits per word
+-    }
+-
+-    /** return how much space is being used by the bits array not
+-     *  how many actually have member bits on.
+-     */
+-    public int lengthInLongWords() {
+-        return bits.length;
+-    }
+-
+-    /**Is this contained within a? */
+-    public boolean subset(BitSet a) {
+-        if (a == null || !(a instanceof BitSet)) return false;
+-        return this.and(a).equals(this);
+-    }
+-
+-    /**Subtract the elements of 'a' from 'this' in-place.
+-     * Basically, just turn off all bits of 'this' that are in 'a'.
+-     */
+-    public void subtractInPlace(BitSet a) {
+-        if (a == null) return;
+-        // for all words of 'a', turn off corresponding bits of 'this'
+-        for (int i = 0; i < bits.length && i < a.bits.length; i++) {
+-            bits[i] &= ~a.bits[i];
+-        }
+-    }
+-
+-    public int[] toArray() {
+-        int[] elems = new int[degree()];
+-        int en = 0;
+-        for (int i = 0; i < (bits.length << LOG_BITS); i++) {
+-            if (member(i)) {
+-                elems[en++] = i;
+-            }
+-        }
+-        return elems;
+-    }
+-
+-    public long[] toPackedArray() {
+-        return bits;
+-    }
+-
+-    public String toString() {
+-        return toString(",");
+-    }
+-
+-    /** Transform a bit set into a string by formatting each element as an integer
+-     * @separator The string to put in between elements
+-     * @return A commma-separated list of values
+-     */
+-    public String toString(String separator) {
+-        String str = "";
+-        for (int i = 0; i < (bits.length << LOG_BITS); i++) {
+-            if (member(i)) {
+-                if (str.length() > 0) {
+-                    str += separator;
+-                }
+-                str = str + i;
+-            }
+-        }
+-        return str;
+-    }
+-
+-    /** Transform a bit set into a string of characters.
+-     * @separator The string to put in between elements
+-     * @param formatter An object implementing the CharFormatter interface.
+-     * @return A commma-separated list of character constants.
+-     */
+-    public String toString(String separator, CharFormatter formatter) {
+-        String str = "";
+-
+-        for (int i = 0; i < (bits.length << LOG_BITS); i++) {
+-            if (member(i)) {
+-                if (str.length() > 0) {
+-                    str += separator;
+-                }
+-                str = str + formatter.literalChar(i);
+-            }
+-        }
+-        return str;
+-    }
+-
+-    /**Create a string representation where instead of integer elements, the
+-     * ith element of vocabulary is displayed instead.  Vocabulary is a Vector
+-     * of Strings.
+-     * @separator The string to put in between elements
+-     * @return A commma-separated list of character constants.
+-     */
+-    public String toString(String separator, Vector vocabulary) {
+-        if (vocabulary == null) {
+-            return toString(separator);
+-        }
+-        String str = "";
+-        for (int i = 0; i < (bits.length << LOG_BITS); i++) {
+-            if (member(i)) {
+-                if (str.length() > 0) {
+-                    str += separator;
+-                }
+-                if (i >= vocabulary.size()) {
+-                    str += "<bad element " + i + ">";
+-                }
+-                else if (vocabulary.elementAt(i) == null) {
+-                    str += "<" + i + ">";
+-                }
+-                else {
+-                    str += (String)vocabulary.elementAt(i);
+-                }
+-            }
+-        }
+-        return str;
+-    }
+-
+-    /**
+-     * Dump a comma-separated list of the words making up the bit set.
+-     * Split each 64 bit number into two more manageable 32 bit numbers.
+-     * This generates a comma-separated list of C++-like unsigned long constants.
+-     */
+-    public String toStringOfHalfWords() {
+-        String s = new String();
+-        for (int i = 0; i < bits.length; i++) {
+-            if (i != 0) s += ", ";
+-            long tmp = bits[i];
+-            tmp &= 0xFFFFFFFFL;
+-            s += (tmp + "UL");
+-            s += ", ";
+-            tmp = bits[i] >>> 32;
+-            tmp &= 0xFFFFFFFFL;
+-            s += (tmp + "UL");
+-        }
+-        return s;
+-    }
+-
+-    /**
+-     * Dump a comma-separated list of the words making up the bit set.
+-     * This generates a comma-separated list of Java-like long int constants.
+-     */
+-    public String toStringOfWords() {
+-        String s = new String();
+-        for (int i = 0; i < bits.length; i++) {
+-            if (i != 0) s += ", ";
+-            s += (bits[i] + "L");
+-        }
+-        return s;
+-    }
+-
+-    /** Print out the bit set but collapse char ranges. */
+-    public String toStringWithRanges(String separator, CharFormatter formatter) {
+-        String str = "";
+-        int[] elems = this.toArray();
+-        if (elems.length == 0) {
+-            return "";
+-        }
+-        // look for ranges
+-        int i = 0;
+-        while (i < elems.length) {
+-            int lastInRange;
+-            lastInRange = 0;
+-            for (int j = i + 1; j < elems.length; j++) {
+-                if (elems[j] != elems[j - 1] + 1) {
+-                    break;
+-                }
+-                lastInRange = j;
+-            }
+-            // found a range
+-            if (str.length() > 0) {
+-                str += separator;
+-            }
+-            if (lastInRange - i >= 2) {
+-                str += formatter.literalChar(elems[i]);
+-                str += "..";
+-                str += formatter.literalChar(elems[lastInRange]);
+-                i = lastInRange;	// skip past end of range for next range
+-            }
+-            else {	// no range, just print current char and move on
+-                str += formatter.literalChar(elems[i]);
+-            }
+-            i++;
+-        }
+-        return str;
+-    }
+-
+-    private final static int wordNumber(int bit) {
+-        return bit >> LOG_BITS; // bit / BITS
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/collections/impl/IndexedVector.java glassfish-gil/entity-persistence/src/java/persistence/antlr/collections/impl/IndexedVector.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/collections/impl/IndexedVector.java	2006-08-31 00:34:14.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/collections/impl/IndexedVector.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,79 +0,0 @@
+-package persistence.antlr.collections.impl;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import java.util.Hashtable;
+-import java.util.Enumeration;
+-
+-import persistence.antlr.collections.impl.Vector;
+-
+-/**
+- * A simple indexed vector: a normal vector except that you must
+- * specify a key when adding an element.  This allows fast lookup
+- * and allows the order of specification to be preserved.
+- */
+-public class IndexedVector {
+-    protected Vector elements;
+-    protected Hashtable index;
+-
+-
+-    /**
+-     * IndexedVector constructor comment.
+-     */
+-    public IndexedVector() {
+-        elements = new Vector(10);
+-        index = new Hashtable(10);
+-    }
+-
+-    /**
+-     * IndexedVector constructor comment.
+-     * @param size int
+-     */
+-    public IndexedVector(int size) {
+-        elements = new Vector(size);
+-        index = new Hashtable(size);
+-    }
+-
+-    public synchronized void appendElement(Object key, Object value) {
+-        elements.appendElement(value);
+-        index.put(key, value);
+-    }
+-
+-    /**
+-     * Returns the element at the specified index.
+-     * @param index the index of the desired element
+-     * @exception ArrayIndexOutOfBoundsException If an invalid
+-     * index was given.
+-     */
+-    public Object elementAt(int i) {
+-        return elements.elementAt(i);
+-    }
+-
+-    public Enumeration elements() {
+-        return elements.elements();
+-    }
+-
+-    public Object getElement(Object key) {
+-        Object o = index.get(key);
+-        return o;
+-    }
+-
+-    /** remove element referred to by key NOT value; return false if not found. */
+-    public synchronized boolean removeElement(Object key) {
+-        Object value = index.get(key);
+-        if (value == null) {
+-            return false;
+-        }
+-        index.remove(key);
+-        elements.removeElement(value);
+-        return false;
+-    }
+-
+-    public int size() {
+-        return elements.size();
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/collections/impl/IntRange.java glassfish-gil/entity-persistence/src/java/persistence/antlr/collections/impl/IntRange.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/collections/impl/IntRange.java	2006-08-31 00:34:15.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/collections/impl/IntRange.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,21 +0,0 @@
+-package persistence.antlr.collections.impl;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-public class IntRange {
+-    int begin, end;
+-
+-
+-    public IntRange(int begin, int end) {
+-        this.begin = begin;
+-        this.end = end;
+-    }
+-
+-    public String toString() {
+-        return begin + ".." + end;
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/collections/impl/LLCell.java glassfish-gil/entity-persistence/src/java/persistence/antlr/collections/impl/LLCell.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/collections/impl/LLCell.java	2006-08-31 00:34:15.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/collections/impl/LLCell.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,24 +0,0 @@
+-package persistence.antlr.collections.impl;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-/**A linked list cell, which contains a ref to the object and next cell.
+- * The data,next members are public to this class, but not outside the
+- * collections.impl package.
+- *
+- * @author Terence Parr
+- * <a href=http://www.MageLang.com>MageLang Institute</a>
+- */
+-class LLCell {
+-    Object data;
+-    LLCell next;
+-
+-
+-    public LLCell(Object o) {
+-        data = o;
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/collections/impl/LLEnumeration.java glassfish-gil/entity-persistence/src/java/persistence/antlr/collections/impl/LLEnumeration.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/collections/impl/LLEnumeration.java	2006-08-31 00:34:15.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/collections/impl/LLEnumeration.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,53 +0,0 @@
+-package persistence.antlr.collections.impl;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import persistence.antlr.collections.List;
+-import persistence.antlr.collections.Stack;
+-
+-import java.util.Enumeration;
+-import java.util.NoSuchElementException;
+-
+-import persistence.antlr.collections.impl.LLCell;
+-
+-/**An enumeration of a LList.  Maintains a cursor through the list.
+- * bad things would happen if the list changed via another thread
+- * while we were walking this list.
+- */
+-final class LLEnumeration implements Enumeration {
+-    LLCell cursor;
+-    LList list;
+-
+-
+-    /**Create an enumeration attached to a LList*/
+-    public LLEnumeration(LList l) {
+-        list = l;
+-        cursor = list.head;
+-    }
+-
+-    /** Return true/false depending on whether there are more
+-     * elements to enumerate.
+-     */
+-    public boolean hasMoreElements() {
+-        if (cursor != null)
+-            return true;
+-        else
+-            return false;
+-    }
+-
+-    /**Get the next element in the enumeration.  Destructive in that
+-     * the returned element is removed from the enumeration.  This
+-     * does not affect the list itself.
+-     * @return the next object in the enumeration.
+-     */
+-    public Object nextElement() {
+-        if (!hasMoreElements()) throw new NoSuchElementException();
+-        LLCell p = cursor;
+-        cursor = cursor.next;
+-        return p.data;
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/collections/impl/LList.java glassfish-gil/entity-persistence/src/java/persistence/antlr/collections/impl/LList.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/collections/impl/LList.java	2006-08-31 00:34:15.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/collections/impl/LList.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,133 +0,0 @@
+-package persistence.antlr.collections.impl;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import persistence.antlr.collections.List;
+-import persistence.antlr.collections.Stack;
+-
+-import java.util.Enumeration;
+-import java.util.NoSuchElementException;
+-
+-import persistence.antlr.collections.impl.LLCell;
+-
+-/**A Linked List Implementation (not thread-safe for simplicity)
+- * (adds to the tail) (has an enumeration)
+- */
+-public class LList implements List, Stack {
+-    protected LLCell head = null, tail = null;
+-    protected int length = 0;
+-
+-
+-    /** Add an object to the end of the list.
+-     * @param o the object to add
+-     */
+-    public void add(Object o) {
+-        append(o);
+-    }
+-
+-    /** Append an object to the end of the list.
+-     * @param o the object to append
+-     */
+-    public void append(Object o) {
+-        LLCell n = new LLCell(o);
+-        if (length == 0) {
+-            head = tail = n;
+-            length = 1;
+-        }
+-        else {
+-            tail.next = n;
+-            tail = n;
+-            length++;
+-        }
+-    }
+-
+-    /**Delete the object at the head of the list.
+-     * @return the object found at the head of the list.
+-     * @exception NoSuchElementException if the list is empty.
+-     */
+-    protected Object deleteHead() throws NoSuchElementException {
+-        if (head == null) throw new NoSuchElementException();
+-        Object o = head.data;
+-        head = head.next;
+-        length--;
+-        return o;
+-    }
+-
+-    /**Get the ith element in the list.
+-     * @param i the index (from 0) of the requested element.
+-     * @return the object at index i
+-     * NoSuchElementException is thrown if i out of range
+-     */
+-    public Object elementAt(int i) throws NoSuchElementException {
+-        int j = 0;
+-        for (LLCell p = head; p != null; p = p.next) {
+-            if (i == j) return p.data;
+-            j++;
+-        }
+-        throw new NoSuchElementException();
+-    }
+-
+-    /**Return an enumeration of the list elements */
+-    public Enumeration elements() {
+-        return new LLEnumeration(this);
+-    }
+-
+-    /** How high is the stack? */
+-    public int height() {
+-        return length;
+-    }
+-
+-    /** Answers whether or not an object is contained in the list
+-     * @param o the object to test for inclusion.
+-     * @return true if object is contained else false.
+-     */
+-    public boolean includes(Object o) {
+-        for (LLCell p = head; p != null; p = p.next) {
+-            if (p.data.equals(o)) return true;
+-        }
+-        return false;
+-    }
+-    // The next two methods make LLQueues and LLStacks easier.
+-
+-    /** Insert an object at the head of the list.
+-     * @param o the object to add
+-     */
+-    protected void insertHead(Object o) {
+-        LLCell c = head;
+-        head = new LLCell(o);
+-        head.next = c;
+-        length++;
+-        if (tail == null) tail = head;
+-    }
+-
+-    /**Return the length of the list.*/
+-    public int length() {
+-        return length;
+-    }
+-
+-    /** Pop the top element of the stack off.
+-     * @return the top of stack that was popped off.
+-     * @exception NoSuchElementException if the stack is empty.
+-     */
+-    public Object pop() throws NoSuchElementException {
+-        Object o = deleteHead();
+-        return o;
+-    }
+-    // Satisfy the Stack interface now.
+-
+-    /** Push an object onto the stack.
+-     * @param o the object to push
+-     */
+-    public void push(Object o) {
+-        insertHead(o);
+-    }
+-
+-    public Object top() throws NoSuchElementException {
+-        if (head == null) throw new NoSuchElementException();
+-        return head.data;
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/collections/impl/VectorEnumeration.java glassfish-gil/entity-persistence/src/java/persistence/antlr/collections/impl/VectorEnumeration.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/collections/impl/VectorEnumeration.java	2006-08-31 00:34:15.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/collections/impl/VectorEnumeration.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,40 +0,0 @@
+-package persistence.antlr.collections.impl;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import java.util.Enumeration;
+-import java.util.NoSuchElementException;
+-
+-import persistence.antlr.collections.Enumerator;
+-
+-// based on java.lang.Vector; returns any null indices between non-null ones.
+-
+-class VectorEnumeration implements Enumeration {
+-    Vector vector;
+-    int i;
+-
+-
+-    VectorEnumeration(Vector v) {
+-        vector = v;
+-        i = 0;
+-    }
+-
+-    public boolean hasMoreElements() {
+-        synchronized (vector) {
+-            return i <= vector.lastElement;
+-        }
+-    }
+-
+-    public Object nextElement() {
+-        synchronized (vector) {
+-            if (i <= vector.lastElement) {
+-                return vector.data[i++];
+-            }
+-            throw new NoSuchElementException("VectorEnumerator");
+-        }
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/collections/impl/VectorEnumerator.java glassfish-gil/entity-persistence/src/java/persistence/antlr/collections/impl/VectorEnumerator.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/collections/impl/VectorEnumerator.java	2006-08-31 00:34:15.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/collections/impl/VectorEnumerator.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,40 +0,0 @@
+-package persistence.antlr.collections.impl;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import java.util.Enumeration;
+-import java.util.NoSuchElementException;
+-
+-import persistence.antlr.collections.Enumerator;
+-
+-// based on java.lang.Vector; returns any null indices between non-null ones.
+-
+-class VectorEnumerator implements Enumeration {
+-    Vector vector;
+-    int i;
+-
+-
+-    VectorEnumerator(Vector v) {
+-        vector = v;
+-        i = 0;
+-    }
+-
+-    public boolean hasMoreElements() {
+-        synchronized (vector) {
+-            return i <= vector.lastElement;
+-        }
+-    }
+-
+-    public Object nextElement() {
+-        synchronized (vector) {
+-            if (i <= vector.lastElement) {
+-                return vector.data[i++];
+-            }
+-            throw new NoSuchElementException("VectorEnumerator");
+-        }
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/collections/impl/Vector.java glassfish-gil/entity-persistence/src/java/persistence/antlr/collections/impl/Vector.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/collections/impl/Vector.java	2006-08-31 00:34:15.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/collections/impl/Vector.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,120 +0,0 @@
+-package persistence.antlr.collections.impl;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import java.util.Enumeration;
+-import java.util.NoSuchElementException;
+-
+-import persistence.antlr.collections.Enumerator;
+-
+-public class Vector implements Cloneable {
+-    protected Object[] data;
+-    protected int lastElement = -1;
+-
+-    public Vector() {
+-        this(10);
+-    }
+-
+-    public Vector(int size) {
+-        data = new Object[size];
+-    }
+-
+-    public synchronized void appendElement(Object o) {
+-        ensureCapacity(lastElement + 2);
+-        data[++lastElement] = o;
+-    }
+-
+-    /**
+-     * Returns the current capacity of the vector.
+-     */
+-    public int capacity() {
+-        return data.length;
+-    }
+-
+-    public Object clone() {
+-        Vector v = null;
+-        try {
+-            v = (Vector)super.clone();
+-        }
+-        catch (CloneNotSupportedException e) {
+-            System.err.println("cannot clone Vector.super");
+-            return null;
+-        }
+-        v.data = new Object[size()];
+-        System.arraycopy(data, 0, v.data, 0, size());
+-        return v;
+-    }
+-
+-    /**
+-     * Returns the element at the specified index.
+-     * @param index the index of the desired element
+-     * @exception ArrayIndexOutOfBoundsException If an invalid
+-     * index was given.
+-     */
+-    public synchronized Object elementAt(int i) {
+-        if (i >= data.length) {
+-            throw new ArrayIndexOutOfBoundsException(i + " >= " + data.length);
+-        }
+-        if (i < 0) {
+-            throw new ArrayIndexOutOfBoundsException(i + " < 0 ");
+-        }
+-        return data[i];
+-    }
+-
+-    public synchronized Enumeration elements() {
+-        return new VectorEnumerator(this);
+-    }
+-
+-    public synchronized void ensureCapacity(int minIndex) {
+-        if (minIndex + 1 > data.length) {
+-            Object oldData[] = data;
+-            int n = data.length * 2;
+-            if (minIndex + 1 > n) {
+-                n = minIndex + 1;
+-            }
+-            data = new Object[n];
+-            System.arraycopy(oldData, 0, data, 0, oldData.length);
+-        }
+-    }
+-
+-    public synchronized boolean removeElement(Object o) {
+-        // find element
+-        int i;
+-        for (i = 0; i <= lastElement && data[i] != o; i++) {
+-            ;
+-        }
+-        if (i <= lastElement) { // if found it
+-            data[i] = null;		// kill ref for GC
+-            int above = lastElement - i;
+-            if (above > 0) {
+-                System.arraycopy(data, i + 1, data, i, above);
+-            }
+-            lastElement--;
+-            return true;
+-        }
+-        else {
+-            return false;
+-        }
+-    }
+-
+-    public synchronized void setElementAt(Object obj, int i) {
+-        if (i >= data.length) {
+-            throw new ArrayIndexOutOfBoundsException(i + " >= " + data.length);
+-        }
+-        data[i] = obj;
+-        // track last element in the vector so we can append things
+-        if (i > lastElement) {
+-            lastElement = i;
+-        }
+-    }
+-
+-    // return number of slots in the vector; e.g., you can set
+-    // the 30th element and size() will return 31.
+-    public int size() {
+-        return lastElement + 1;
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/collections/List.java glassfish-gil/entity-persistence/src/java/persistence/antlr/collections/List.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/collections/List.java	2006-08-31 00:34:13.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/collections/List.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,27 +0,0 @@
+-package persistence.antlr.collections;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import java.util.Enumeration;
+-import java.util.NoSuchElementException;
+-
+-/**A simple List interface that describes operations
+- * on a list.
+- */
+-public interface List {
+-    public void add(Object o); // can insert at head or append.
+-
+-    public void append(Object o);
+-
+-    public Object elementAt(int index) throws NoSuchElementException;
+-
+-    public Enumeration elements();
+-
+-    public boolean includes(Object o);
+-
+-    public int length();
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/collections/Stack.java glassfish-gil/entity-persistence/src/java/persistence/antlr/collections/Stack.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/collections/Stack.java	2006-08-31 00:34:13.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/collections/Stack.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,25 +0,0 @@
+-package persistence.antlr.collections;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import java.util.NoSuchElementException;
+-
+-/** A simple stack definition; restrictive in that you cannot
+- * access arbitrary stack elements.
+- *
+- * @author Terence Parr
+- * <a href=http://www.MageLang.com>MageLang Institute</a>
+- */
+-public interface Stack {
+-    public int height();
+-
+-    public Object pop() throws NoSuchElementException;
+-
+-    public void push(Object o);
+-
+-    public Object top() throws NoSuchElementException;
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/CommonAST.java glassfish-gil/entity-persistence/src/java/persistence/antlr/CommonAST.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/CommonAST.java	2006-08-31 00:34:05.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/CommonAST.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,58 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import persistence.antlr.collections.AST;
+-
+-/** Common AST node implementation */
+-public class CommonAST extends BaseAST {
+-    int ttype = Token.INVALID_TYPE;
+-    String text;
+-
+-
+-    /** Get the token text for this node */
+-    public String getText() {
+-        return text;
+-    }
+-
+-    /** Get the token type for this node */
+-    public int getType() {
+-        return ttype;
+-    }
+-
+-    public void initialize(int t, String txt) {
+-        setType(t);
+-        setText(txt);
+-    }
+-
+-    public void initialize(AST t) {
+-        setText(t.getText());
+-        setType(t.getType());
+-    }
+-
+-    public CommonAST() {
+-    }
+-
+-    public CommonAST(Token tok) {
+-        initialize(tok);
+-    }
+-
+-    public void initialize(Token tok) {
+-        setText(tok.getText());
+-        setType(tok.getType());
+-    }
+-
+-    /** Set the token text for this node */
+-    public void setText(String text_) {
+-        text = text_;
+-    }
+-
+-    /** Set the token type for this node */
+-    public void setType(int ttype_) {
+-        ttype = ttype_;
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/CommonASTWithHiddenTokens.java glassfish-gil/entity-persistence/src/java/persistence/antlr/CommonASTWithHiddenTokens.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/CommonASTWithHiddenTokens.java	2006-08-31 00:34:05.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/CommonASTWithHiddenTokens.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,37 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-/** A CommonAST whose initialization copies hidden token
+- *  information from the Token used to create a node.
+- */
+-public class CommonASTWithHiddenTokens extends CommonAST {
+-    protected CommonHiddenStreamToken hiddenBefore, hiddenAfter; // references to hidden tokens
+-
+-    public CommonASTWithHiddenTokens() {
+-        super();
+-    }
+-
+-    public CommonASTWithHiddenTokens(Token tok) {
+-        super(tok);
+-    }
+-
+-    public CommonHiddenStreamToken getHiddenAfter() {
+-        return hiddenAfter;
+-    }
+-
+-    public CommonHiddenStreamToken getHiddenBefore() {
+-        return hiddenBefore;
+-    }
+-
+-    public void initialize(Token tok) {
+-        CommonHiddenStreamToken t = (CommonHiddenStreamToken)tok;
+-        super.initialize(t);
+-        hiddenBefore = t.getHiddenBefore();
+-        hiddenAfter = t.getHiddenAfter();
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/CommonHiddenStreamToken.java glassfish-gil/entity-persistence/src/java/persistence/antlr/CommonHiddenStreamToken.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/CommonHiddenStreamToken.java	2006-08-31 00:34:05.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/CommonHiddenStreamToken.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,40 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-public class CommonHiddenStreamToken extends CommonToken {
+-    protected CommonHiddenStreamToken hiddenBefore;
+-    protected CommonHiddenStreamToken hiddenAfter;
+-
+-    public CommonHiddenStreamToken() {
+-        super();
+-    }
+-
+-    public CommonHiddenStreamToken(int t, String txt) {
+-        super(t, txt);
+-    }
+-
+-    public CommonHiddenStreamToken(String s) {
+-        super(s);
+-    }
+-
+-    public CommonHiddenStreamToken getHiddenAfter() {
+-        return hiddenAfter;
+-    }
+-
+-    public CommonHiddenStreamToken getHiddenBefore() {
+-        return hiddenBefore;
+-    }
+-
+-    protected void setHiddenAfter(CommonHiddenStreamToken t) {
+-        hiddenAfter = t;
+-    }
+-
+-    protected void setHiddenBefore(CommonHiddenStreamToken t) {
+-        hiddenBefore = t;
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/CommonToken.java glassfish-gil/entity-persistence/src/java/persistence/antlr/CommonToken.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/CommonToken.java	2006-08-31 00:34:05.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/CommonToken.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,55 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-public class CommonToken extends Token {
+-    // most tokens will want line and text information
+-    protected int line;
+-    protected String text = null;
+-    protected int col;
+-
+-    public CommonToken() {
+-    }
+-
+-    public CommonToken(int t, String txt) {
+-        type = t;
+-        setText(txt);
+-    }
+-
+-    public CommonToken(String s) {
+-        text = s;
+-    }
+-
+-    public int getLine() {
+-        return line;
+-    }
+-
+-    public String getText() {
+-        return text;
+-    }
+-
+-    public void setLine(int l) {
+-        line = l;
+-    }
+-
+-    public void setText(String s) {
+-        text = s;
+-    }
+-
+-    public String toString() {
+-        return "[\"" + getText() + "\",<" + type + ">,line=" + line + ",col=" + col + "]";
+-    }
+-
+-    /** Return token's start column */
+-    public int getColumn() {
+-        return col;
+-    }
+-
+-    public void setColumn(int c) {
+-        col = c;
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/CppBlockFinishingInfo.java glassfish-gil/entity-persistence/src/java/persistence/antlr/CppBlockFinishingInfo.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/CppBlockFinishingInfo.java	2006-08-31 00:34:05.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/CppBlockFinishingInfo.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,34 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-// C++ code generator by Pete Wells: pete at yamuna.demon.co.uk
+-
+-class CppBlockFinishingInfo {
+-	String postscript;		// what to generate to terminate block
+-	boolean generatedSwitch;// did block finish with "default:" of switch?
+-	boolean generatedAnIf;
+-
+-	/** When generating an if or switch, end-of-token lookahead sets
+-	 *  will become the else or default clause, don't generate an
+-	 *  error clause in this case.
+-	 */
+-	boolean needAnErrorClause;
+-
+-
+-	public CppBlockFinishingInfo() {
+-		postscript=null;
+-		generatedSwitch=false;
+-		needAnErrorClause = true;
+-	}
+-	public CppBlockFinishingInfo(String ps, boolean genS, boolean generatedAnIf, boolean n) {
+-		postscript = ps;
+-		generatedSwitch = genS;
+-		this.generatedAnIf = generatedAnIf;
+-		needAnErrorClause = n;
+-	}
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/CppCharFormatter.java glassfish-gil/entity-persistence/src/java/persistence/antlr/CppCharFormatter.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/CppCharFormatter.java	2006-08-31 00:34:05.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/CppCharFormatter.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,95 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-// C++ code generator by Pete Wells: pete at yamuna.demon.co.uk
+-
+-class CppCharFormatter implements CharFormatter {
+-
+-	/** Given a character value, return a string representing the character
+-	 * that can be embedded inside a string literal or character literal
+-	 * This works for Java/C/C++ code-generation and languages with compatible
+-	 * special-character-escapment.
+-	 *
+-	 * Used internally in CppCharFormatter and in
+-	 * CppCodeGenerator.converJavaToCppString.
+-	 *
+-	 * @param c   The character of interest.
+-	 * @param forCharLiteral  true to escape for char literal, false for string literal
+-	 */
+-	public String escapeChar(int c, boolean forCharLiteral) {
+-		// System.out.println("CppCharFormatter.escapeChar("+c+")");
+-		switch (c) {
+-		case '\n' : return "\\n";
+-		case '\t' : return "\\t";
+-		case '\r' : return "\\r";
+-		case '\\' : return "\\\\";
+-		case '\'' : return forCharLiteral ? "\\'" : "'";
+-		case '"' :  return forCharLiteral ? "\"" : "\\\"";
+-		default :
+-			if ( c < ' ' || c > 126 )
+-			{
+-				if (c > 255)
+-				{
+-					String s = Integer.toString(c,16);
+-					// put leading zeroes in front of the thing..
+-					while( s.length() < 4 )
+-						s = '0' + s;
+-					return "\\u" + s;
+-				}
+-				else {
+-					return "\\" + Integer.toString(c,8);
+-				}
+-			}
+-			else {
+-				return String.valueOf((char)c);
+-			}
+-		}
+-	}
+-
+-	/** Converts a String into a representation that can be use as a literal
+-	 * when surrounded by double-quotes.
+-	 *
+-	 * Used for escaping semantic predicate strings for exceptions.
+-	 *
+-	 * @param s The String to be changed into a literal
+-	 */
+-	public String escapeString(String s)
+-	{
+-		String retval = new String();
+-		for (int i = 0; i < s.length(); i++)
+-			retval += escapeChar(s.charAt(i), false);
+-
+-		return retval;
+-	}
+-
+-	/** Given a character value, return a string representing the character
+-	 * literal that can be recognized by the target language compiler.
+-	 * This works for languages that use single-quotes for character literals.
+-	 * @param c The character of interest.
+-	 */
+-	public String literalChar(int c) {
+-		String ret = "0x"+Integer.toString(c,16);
+-		if( c >= 0 && c <= 126 )
+-			ret += " /* '"+escapeChar(c,true)+"' */ ";
+-		return ret;
+-	}
+-
+-	/** Converts a String into a string literal
+-	 * This works for languages that use double-quotes for string literals.
+-	 * Code-generators for languages should override this method.
+-	 *
+-	 * Used for the generation of the tables with token names
+-	 *
+-	 * @param s The String to be changed into a literal
+-	 */
+-	public String literalString(String s)
+-	{
+-		return "\"" + escapeString(s) + "\"";
+-	}
+-
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/CppCodeGenerator.java glassfish-gil/entity-persistence/src/java/persistence/antlr/CppCodeGenerator.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/CppCodeGenerator.java	2006-08-31 00:34:05.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/CppCodeGenerator.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,4673 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-// C++ code generator by Pete Wells: pete at yamuna.demon.co.uk
+-// #line generation contributed by: Ric Klaren <klaren at cs.utwente.nl>
+-
+-import java.util.Enumeration;
+-import java.util.Hashtable;
+-import persistence.antlr.collections.impl.BitSet;
+-import persistence.antlr.collections.impl.Vector;
+-import java.io.PrintWriter; //SAS: changed for proper text file io
+-import java.io.IOException;
+-import java.io.FileWriter;
+-
+-/** Generate MyParser.cpp, MyParser.hpp, MyLexer.cpp, MyLexer.hpp
+- * and MyParserTokenTypes.hpp
+- */
+-public class CppCodeGenerator extends CodeGenerator {
+-	boolean DEBUG_CPP_CODE_GENERATOR = false;
+-	// non-zero if inside syntactic predicate generation
+-	protected int syntacticPredLevel = 0;
+-
+-	// Are we generating ASTs (for parsers and tree parsers) right now?
+-	protected boolean genAST = false;
+-
+-	// Are we saving the text consumed (for lexers) right now?
+-	protected boolean saveText = false;
+-
+-	// Generate #line's
+-	protected boolean genHashLines = true;
+-	// Generate constructors or not
+-	protected boolean noConstructors = false;
+-
+-	// Used to keep track of lineno in output
+-	protected int outputLine;
+-	protected String outputFile;
+-
+-	// Grammar parameters set up to handle different grammar classes.
+-	// These are used to get instanceof tests out of code generation
+-	boolean usingCustomAST = false;
+-	String labeledElementType;
+-	String labeledElementASTType; // mostly the same as labeledElementType except in parsers
+-	String labeledElementASTInit;
+-	String labeledElementInit;
+-	String commonExtraArgs;
+-	String commonExtraParams;
+-	String commonLocalVars;
+-	String lt1Value;
+-	String exceptionThrown;
+-	String throwNoViable;
+-
+-	// Tracks the rule being generated.  Used for mapTreeId
+-	RuleBlock currentRule;
+-	// Tracks the rule or labeled subrule being generated.  Used for AST generation.
+-	String currentASTResult;
+-	// Mapping between the ids used in the current alt, and the
+-	// names of variables used to represent their AST values.
+-	Hashtable treeVariableMap = new Hashtable();
+-
+-	/** Used to keep track of which AST variables have been defined in a rule
+-	 * (except for the #rule_name and #rule_name_in var's
+-	 */
+-	Hashtable declaredASTVariables = new Hashtable();
+-
+-	// Count of unnamed generated variables
+-	int astVarNumber = 1;
+-	// Special value used to mark duplicate in treeVariableMap
+-	protected static final String NONUNIQUE = new String();
+-
+-	public static final int caseSizeThreshold = 127; // ascii is max
+-
+-	private Vector semPreds;
+-
+-	// Used to keep track of which (heterogeneous AST types are used)
+-	// which need to be set in the ASTFactory of the generated parser
+-	private Vector astTypes;
+-
+-	private static String namespaceStd   = "ANTLR_USE_NAMESPACE(std)";
+-	private static String namespaceAntlr = "ANTLR_USE_NAMESPACE(antlr)";
+-	private static NameSpace nameSpace = null;
+-
+-	private static final String preIncludeCpp  = "pre_include_cpp";
+-	private static final String preIncludeHpp  = "pre_include_hpp";
+-	private static final String postIncludeCpp = "post_include_cpp";
+-	private static final String postIncludeHpp = "post_include_hpp";
+-
+-	/** Create a C++ code-generator using the given Grammar.
+-	 * The caller must still call setTool, setBehavior, and setAnalyzer
+-	 * before generating code.
+-	 */
+-	public CppCodeGenerator() {
+-		super();
+-		charFormatter = new CppCharFormatter();
+-	}
+-	/** Adds a semantic predicate string to the sem pred vector
+-	    These strings will be used to build an array of sem pred names
+-	    when building a debugging parser.  This method should only be
+-	    called when the debug option is specified
+-	 */
+-	protected int addSemPred(String predicate) {
+-		semPreds.appendElement(predicate);
+-		return semPreds.size()-1;
+-	}
+-	public void exitIfError()
+-	{
+-		if (antlrTool.hasError())
+-		{
+-			antlrTool.fatalError("Exiting due to errors.");
+-		}
+-	}
+-	protected int countLines( String s )
+-	{
+-		int lines = 0;
+-		for( int i = 0; i < s.length(); i++ )
+-		{
+-			if( s.charAt(i) == '\n' )
+-				lines++;
+-		}
+-		return lines;
+-	}
+-	/** Output a String to the currentOutput stream.
+-	 * Ignored if string is null.
+-	 * @param s The string to output
+-	 */
+-	protected void _print(String s)
+-	{
+-		if (s != null)
+-		{
+-			outputLine += countLines(s);
+-			currentOutput.print(s);
+-		}
+-	}
+-	/** Print an action without leading tabs, attempting to
+-	 * preserve the current indentation level for multi-line actions
+-	 * Ignored if string is null.
+-	 * @param s The action string to output
+-	 */
+-	protected void _printAction(String s)
+-	{
+-		if (s != null)
+-		{
+-			outputLine += countLines(s)+1;
+-			super._printAction(s);
+-		}
+-	}
+-	/** Print an action stored in a token surrounded by #line stuff */
+-	public void printAction(Token t)
+-	{
+-		if (t != null)
+-		{
+-			genLineNo(t.getLine());
+-			printTabs();
+-			_printAction(processActionForSpecialSymbols(t.getText(), t.getLine(),
+-																null, null) );
+-			genLineNo2();
+-		}
+-	}
+-	/** Print a header action by #line stuff also process any tree construction
+-	 * @param name The name of the header part
+-	 */
+-	public void printHeaderAction(String name)
+-	{
+-		Token a = (persistence.antlr.Token)behavior.headerActions.get(name);
+-		if (a != null)
+-		{
+-			genLineNo(a.getLine());
+-			println(processActionForSpecialSymbols(a.getText(), a.getLine(),
+-																null, null) );
+-			genLineNo2();
+-		}
+-	}
+-	/** Output a String followed by newline, to the currentOutput stream.
+-	 * Ignored if string is null.
+-	 * @param s The string to output
+-	 */
+-	protected void _println(String s) {
+-		if (s != null) {
+-			outputLine += countLines(s)+1;
+-			currentOutput.println(s);
+-		}
+-	}
+-	/** Output tab indent followed by a String followed by newline,
+-	 * to the currentOutput stream.  Ignored if string is null.
+-	 * @param s The string to output
+-	 */
+-	protected void println(String s) {
+-		if (s != null) {
+-			printTabs();
+-			outputLine += countLines(s)+1;
+-			currentOutput.println(s);
+-		}
+-	}
+-
+-	/** Generate a #line or // line depending on options */
+-	public void genLineNo(int line) {
+-		if ( line == 0 ) {
+-			line++;
+-		}
+-		if( genHashLines )
+-			_println("#line "+line+" \""+antlrTool.fileMinusPath(antlrTool.grammarFile)+"\"");
+-	}
+-
+-	/** Generate a #line or // line depending on options */
+-	public void genLineNo(GrammarElement el)
+-	{
+-		if( el != null )
+-			genLineNo(el.getLine());
+-	}
+-	/** Generate a #line or // line depending on options */
+-	public void genLineNo(Token t)
+-	{
+-		if (t != null)
+-			genLineNo(t.getLine());
+-	}
+-	/** Generate a #line or // line depending on options */
+-	public void genLineNo2()
+-	{
+-		if( genHashLines )
+-		{
+-			_println("#line "+(outputLine+1)+" \""+outputFile+"\"");
+-		}
+-	}
+-	/** Sanitize a string coming from antlr's lexer to something that's ok
+-	 * Also bomb out on multibyte char attempts.
+-	 * The bombing out on mb char's is a bit crude but alas.
+-	 */
+-	private String convertJavaToCppString( String s )
+-	{
+-		String ret = new String();
+-
+-		int i = 0;
+-		int val;
+-		while ( i < s.length() )
+-		{
+-			if( s.charAt(i) == '\\' )
+-			{
+-				// deal with escaped junk
+-				switch ( s.charAt(i+1) ) {
+-				case 'b' :
+-				case 'r' :
+-				case 't' :
+-				case 'n' :
+-				case 'f' :
+-				case '"' :
+-				case '\'' :
+-				case '\\' :
+-					ret += "\\"+s.charAt(i+1);
+-					i+=2;
+-					continue;
+-
+-				case 'u' :
+-					// Unicode char \u1234
+-					val = Character.digit(s.charAt(i+2), 16) * 16 * 16 * 16 +
+-						Character.digit(s.charAt(i+3), 16) * 16 * 16 +
+-						Character.digit(s.charAt(i+4), 16) * 16 +
+-						Character.digit(s.charAt(i+5), 16);
+-					i += 6;
+-					break;
+-
+-				case '0' :					// \123
+-				case '1' :
+-				case '2' :
+-				case '3' :
+-					if( Character.isDigit(s.charAt(i+2)) )
+-					{
+-						if( Character.isDigit(s.charAt(i+3)) )
+-						{
+-							val = (s.charAt(i+1)-'0')*8*8 + (s.charAt(i+2)-'0')*8 +
+-								(s.charAt(i+3)-'0');
+-							i += 4;
+-						}
+-						else
+-						{
+-							val = (s.charAt(i+1)-'0')*8 + (s.charAt(i+2)-'0');
+-							i += 3;
+-						}
+-					}
+-					else
+-					{
+-						val = s.charAt(i+1)-'0';
+-						i += 2;
+-					}
+-					break;
+-
+-				case '4' :
+-				case '5' :
+-				case '6' :
+-				case '7' :
+-					if ( Character.isDigit(s.charAt(i+2)) )
+-					{
+-						val = (s.charAt(i+1)-'0')*8 + (s.charAt(i+2)-'0');
+-						i += 3;
+-					}
+-					else
+-					{
+-						val = s.charAt(i+1)-'0';
+-						i += 2;
+-					}
+-				default:
+-					antlrTool.error("Unhandled escape in string constant: '"+s+"'");
+-					val = 0;
+-				}
+-				if( val >= ' ' && val <= 126 )		// just concat if printable
+-					ret += (char)val;
+-				else if( val > 255 )				// abort if multibyte
+-					antlrTool.error("Multibyte character found in string constant: '"+s+"'");
+-				else
+-					ret += charFormatter.escapeChar(val,true);
+-			}
+-			else if( s.charAt(i) >= ' ' && s.charAt(i) <= 126 )
+-				ret += s.charAt(i++);
+-			else
+-				ret += charFormatter.escapeChar(s.charAt(i++),true);
+-		}
+-		// System.out.println("convertJavaToCppString: "+s+" -> "+ret);
+-		return ret;
+-	}
+-	/** Generate the parser, lexer, treeparser, and token types in C++
+-	 */
+-	public void gen() {
+-		// Do the code generation
+-		try {
+-			// Loop over all grammars
+-			Enumeration grammarIter = behavior.grammars.elements();
+-			while (grammarIter.hasMoreElements()) {
+-				Grammar g = (Grammar)grammarIter.nextElement();
+-				if ( g.debuggingOutput ) {
+-					antlrTool.error(g.getFilename()+": C++ mode does not support -debug");
+-				}
+-				// Connect all the components to each other
+-				g.setGrammarAnalyzer(analyzer);
+-				g.setCodeGenerator(this);
+-				analyzer.setGrammar(g);
+-				// To get right overloading behavior across hetrogeneous grammars
+-				setupGrammarParameters(g);
+-				g.generate();
+-				exitIfError();
+-			}
+-
+-			// Loop over all token managers (some of which are lexers)
+-			Enumeration tmIter = behavior.tokenManagers.elements();
+-			while (tmIter.hasMoreElements()) {
+-				TokenManager tm = (TokenManager)tmIter.nextElement();
+-				if (!tm.isReadOnly()) {
+-					// Write the token manager tokens as C++
+-					// this must appear before genTokenInterchange so that
+-					// labels are set on string literals
+-					genTokenTypes(tm);
+-					// Write the token manager tokens as plain text
+-					genTokenInterchange(tm);
+-				}
+-				exitIfError();
+-			}
+-		}
+-		catch (IOException e) {
+-			antlrTool.reportException(e, null);
+-		}
+-	}
+-	/** Generate code for the given grammar element.
+-	 * @param blk The {...} action to generate
+-	 */
+-	public void gen(ActionElement action) {
+-		if ( DEBUG_CODE_GENERATOR || DEBUG_CPP_CODE_GENERATOR ) System.out.println("genAction("+action+")");
+-		if ( action.isSemPred ) {
+-			genSemPred(action.actionText, action.line);
+-		}
+-		else {
+-			if ( grammar.hasSyntacticPredicate ) {
+-				println("if ( inputState->guessing==0 ) {");
+-				tabs++;
+-			}
+-
+-			ActionTransInfo tInfo = new ActionTransInfo();
+-			String actionStr = processActionForSpecialSymbols(action.actionText,
+-																			  action.getLine(),
+-																			  currentRule, tInfo);
+-
+-			if ( tInfo.refRuleRoot!=null ) {
+-				// Somebody referenced "#rule", make sure translated var is valid
+-				// assignment to #rule is left as a ref also, meaning that assignments
+-				// with no other refs like "#rule = foo();" still forces this code to be
+-				// generated (unnecessarily).
+-				println(tInfo.refRuleRoot + " = "+labeledElementASTType+"(currentAST.root);");
+-			}
+-
+-			// dump the translated action
+-			genLineNo(action);
+-			printAction(actionStr);
+-			genLineNo2();
+-
+-			if ( tInfo.assignToRoot ) {
+-				// Somebody did a "#rule=", reset internal currentAST.root
+-				println("currentAST.root = "+tInfo.refRuleRoot+";");
+-				// reset the child pointer too to be last sibling in sibling list
+-				// now use if else in stead of x ? y : z to shut CC 4.2 up.
+-				println("if ( "+tInfo.refRuleRoot+"!="+labeledElementASTInit+" &&");
+-				tabs++;
+-				println(tInfo.refRuleRoot+"->getFirstChild() != "+labeledElementASTInit+" )");
+-				println("  currentAST.child = "+tInfo.refRuleRoot+"->getFirstChild();");
+-			  	tabs--;
+-				println("else");
+-				tabs++;
+-				println("currentAST.child = "+tInfo.refRuleRoot+";");
+-				tabs--;
+-				println("currentAST.advanceChildToEnd();");
+-			}
+-
+-			if ( grammar.hasSyntacticPredicate ) {
+-				tabs--;
+-				println("}");
+-			}
+-		}
+-	}
+-
+-	/** Generate code for the given grammar element.
+-	 * @param blk The "x|y|z|..." block to generate
+-	 */
+-	public void gen(AlternativeBlock blk) {
+-		if ( DEBUG_CODE_GENERATOR || DEBUG_CPP_CODE_GENERATOR ) System.out.println("gen("+blk+")");
+-		println("{");
+-		genBlockPreamble(blk);
+-		genBlockInitAction(blk);
+-
+-		// Tell AST generation to build subrule result
+-		String saveCurrentASTResult = currentASTResult;
+-		if (blk.getLabel() != null) {
+-			currentASTResult = blk.getLabel();
+-		}
+-
+-		boolean ok = grammar.theLLkAnalyzer.deterministic(blk);
+-
+-		CppBlockFinishingInfo howToFinish = genCommonBlock(blk, true);
+-		genBlockFinish(howToFinish, throwNoViable);
+-
+-		println("}");
+-
+-		// Restore previous AST generation
+-		currentASTResult = saveCurrentASTResult;
+-	}
+-	/** Generate code for the given grammar element.
+-	 * @param blk The block-end element to generate.  Block-end
+-	 * elements are synthesized by the grammar parser to represent
+-	 * the end of a block.
+-	 */
+-	public void gen(BlockEndElement end) {
+-		if ( DEBUG_CODE_GENERATOR || DEBUG_CPP_CODE_GENERATOR ) System.out.println("genRuleEnd("+end+")");
+-	}
+-	/** Generate code for the given grammar element.
+-	 * @param blk The character literal reference to generate
+-	 */
+-	public void gen(CharLiteralElement atom) {
+-		if ( DEBUG_CODE_GENERATOR || DEBUG_CPP_CODE_GENERATOR )
+-			System.out.println("genChar("+atom+")");
+-
+-		if ( atom.getLabel()!=null ) {
+-			println(atom.getLabel() + " = " + lt1Value + ";");
+-		}
+-
+-		boolean oldsaveText = saveText;
+-		saveText = saveText && atom.getAutoGenType()==GrammarElement.AUTO_GEN_NONE;
+-		genMatch(atom);
+-		saveText = oldsaveText;
+-	}
+-	/** Generate code for the given grammar element.
+-	 * @param blk The character-range reference to generate
+-	 */
+-	public void gen(CharRangeElement r) {
+-		if ( DEBUG_CODE_GENERATOR || DEBUG_CPP_CODE_GENERATOR )
+-			System.out.println("genCharRangeElement("+r.beginText+".."+r.endText+")");
+-
+-		if ( r.getLabel()!=null  && syntacticPredLevel == 0) {
+-			println(r.getLabel() + " = " + lt1Value + ";");
+-		}
+-		// Correctly take care of saveIndex stuff...
+-		boolean save = ( grammar instanceof LexerGrammar &&
+-							  ( !saveText ||
+-								 r.getAutoGenType() == GrammarElement.AUTO_GEN_BANG )
+-						   );
+-		if (save)
+-         println("_saveIndex=text.length();");
+-
+-		if( grammar instanceof LexerGrammar )
+-			println("matchRange("+convertJavaToCppString(r.beginText)+","+convertJavaToCppString(r.endText)+");");
+-		else
+-			println("matchRange("+r.beginText+","+r.endText+");");
+-
+-		if (save)
+-         println("text.setLength(_saveIndex);");
+-	}
+-	/** Generate the lexer C++ files */
+-	public  void gen(LexerGrammar g) throws IOException {
+-		// If debugging, create a new sempred vector for this grammar
+-		if (g.debuggingOutput)
+-			semPreds = new Vector();
+-
+-		if( g.charVocabulary.size() > 256 )
+-			antlrTool.warning(g.getFilename()+": C++ mode does not support more than 8 bit characters (vocabulary size now: "+g.charVocabulary.size()+")");
+-
+-		setGrammar(g);
+-		if (!(grammar instanceof LexerGrammar)) {
+-			antlrTool.panic("Internal error generating lexer");
+-		}
+-
+-		genBody(g);
+-		genInclude(g);
+-	}
+-	/** Generate code for the given grammar element.
+-	 * @param blk The (...)+ block to generate
+-	 */
+-	public void gen(OneOrMoreBlock blk) {
+-		if ( DEBUG_CODE_GENERATOR || DEBUG_CPP_CODE_GENERATOR ) System.out.println("gen+("+blk+")");
+-		String label;
+-		String cnt;
+-		println("{ // ( ... )+");
+-		genBlockPreamble(blk);
+-		if ( blk.getLabel() != null ) {
+-			cnt = "_cnt_"+blk.getLabel();
+-		}
+-		else {
+-			cnt = "_cnt" + blk.ID;
+-		}
+-		println("int "+cnt+"=0;");
+-		if ( blk.getLabel() != null ) {
+-			label = blk.getLabel();
+-		}
+-		else {
+-			label = "_loop" + blk.ID;
+-		}
+-
+-		println("for (;;) {");
+-		tabs++;
+-		// generate the init action for ()+ ()* inside the loop
+-		// this allows us to do usefull EOF checking...
+-		genBlockInitAction(blk);
+-
+-		// Tell AST generation to build subrule result
+-		String saveCurrentASTResult = currentASTResult;
+-		if (blk.getLabel() != null) {
+-			currentASTResult = blk.getLabel();
+-		}
+-
+-		boolean ok = grammar.theLLkAnalyzer.deterministic(blk);
+-
+-		// generate exit test if greedy set to false
+-		// and an alt is ambiguous with exit branch
+-		// or when lookahead derived purely from end-of-file
+-		// Lookahead analysis stops when end-of-file is hit,
+-		// returning set {epsilon}.  Since {epsilon} is not
+-		// ambig with any real tokens, no error is reported
+-		// by deterministic() routines and we have to check
+-		// for the case where the lookahead depth didn't get
+-		// set to NONDETERMINISTIC (this only happens when the
+-		// FOLLOW contains real atoms + epsilon).
+-		boolean generateNonGreedyExitPath = false;
+-		int nonGreedyExitDepth = grammar.maxk;
+-
+-		if ( !blk.greedy &&
+-			 blk.exitLookaheadDepth<=grammar.maxk &&
+-			 blk.exitCache[blk.exitLookaheadDepth].containsEpsilon() )
+-		{
+-			generateNonGreedyExitPath = true;
+-			nonGreedyExitDepth = blk.exitLookaheadDepth;
+-		}
+-		else if ( !blk.greedy &&
+-				  blk.exitLookaheadDepth==LLkGrammarAnalyzer.NONDETERMINISTIC )
+-		{
+-			generateNonGreedyExitPath = true;
+-		}
+-
+-		// generate exit test if greedy set to false
+-		// and an alt is ambiguous with exit branch
+-		if ( generateNonGreedyExitPath ) {
+-			if ( DEBUG_CODE_GENERATOR || DEBUG_CPP_CODE_GENERATOR ) {
+-				System.out.println("nongreedy (...)+ loop; exit depth is "+
+-								   blk.exitLookaheadDepth);
+-			}
+-			String predictExit =
+-				getLookaheadTestExpression(blk.exitCache,
+-										   nonGreedyExitDepth);
+-			println("// nongreedy exit test");
+-			println("if ( "+cnt+">=1 && "+predictExit+") goto "+label+";");
+-		}
+-
+-		CppBlockFinishingInfo howToFinish = genCommonBlock(blk, false);
+-		genBlockFinish(
+-			howToFinish,
+-			"if ( "+cnt+">=1 ) { goto "+label+"; } else {" + throwNoViable + "}"
+-		);
+-
+-		println(cnt+"++;");
+-		tabs--;
+-		println("}");
+-		println(label+":;");
+-		println("}  // ( ... )+");
+-
+-		// Restore previous AST generation
+-		currentASTResult = saveCurrentASTResult;
+-	}
+-	/** Generate the parser C++ file */
+-	public void gen(ParserGrammar g) throws IOException {
+-
+-		// if debugging, set up a new vector to keep track of sempred
+-		//   strings for this grammar
+-		if (g.debuggingOutput)
+-			semPreds = new Vector();
+-
+-		setGrammar(g);
+-		if (!(grammar instanceof ParserGrammar)) {
+-			antlrTool.panic("Internal error generating parser");
+-		}
+-
+-		genBody(g);
+-		genInclude(g);
+-	}
+-	/** Generate code for the given grammar element.
+-	 * @param blk The rule-reference to generate
+-	 */
+-	public void gen(RuleRefElement rr)
+-	{
+-		if ( DEBUG_CODE_GENERATOR || DEBUG_CPP_CODE_GENERATOR ) System.out.println("genRR("+rr+")");
+-		RuleSymbol rs = (RuleSymbol)grammar.getSymbol(rr.targetRule);
+-		if (rs == null || !rs.isDefined())
+-		{
+-			// Is this redundant???
+-			antlrTool.error("Rule '" + rr.targetRule + "' is not defined", grammar.getFilename(), rr.getLine(), rr.getColumn());
+-			return;
+-		}
+-		if (!(rs instanceof RuleSymbol))
+-		{
+-			// Is this redundant???
+-			antlrTool.error("'" + rr.targetRule + "' does not name a grammar rule", grammar.getFilename(), rr.getLine(), rr.getColumn());
+-			return;
+-		}
+-
+-		genErrorTryForElement(rr);
+-
+-		// AST value for labeled rule refs in tree walker.
+-		// This is not AST construction;  it is just the input tree node value.
+-		if ( grammar instanceof TreeWalkerGrammar &&
+-			rr.getLabel() != null &&
+-			syntacticPredLevel == 0 )
+-		{
+-			println(rr.getLabel() + " = (_t == ASTNULL) ? "+labeledElementASTInit+" : "+lt1Value+";");
+-		}
+-
+-		// if in lexer and ! on rule ref or alt or rule, save buffer index to
+-		// kill later
+-		if ( grammar instanceof LexerGrammar && (!saveText||rr.getAutoGenType()==GrammarElement.AUTO_GEN_BANG) )
+-		{
+-			println("_saveIndex = text.length();");
+-		}
+-
+-		// Process return value assignment if any
+-		printTabs();
+-		if (rr.idAssign != null)
+-		{
+-			// Warn if the rule has no return type
+-			if (rs.block.returnAction == null)
+-			{
+-				antlrTool.warning("Rule '" + rr.targetRule + "' has no return type", grammar.getFilename(), rr.getLine(), rr.getColumn());
+-			}
+-			_print(rr.idAssign + "=");
+-		} else {
+-			// Warn about return value if any, but not inside syntactic predicate
+-			if ( !(grammar instanceof LexerGrammar) && syntacticPredLevel == 0 && rs.block.returnAction != null)
+-			{
+-				antlrTool.warning("Rule '" + rr.targetRule + "' returns a value", grammar.getFilename(), rr.getLine(), rr.getColumn());
+-			}
+-		}
+-
+-		// Call the rule
+-		GenRuleInvocation(rr);
+-
+-		// if in lexer and ! on element or alt or rule, save buffer index to kill later
+-		if ( grammar instanceof LexerGrammar && (!saveText||rr.getAutoGenType()==GrammarElement.AUTO_GEN_BANG) ) {
+-			println("text.erase(_saveIndex);");
+-		}
+-
+-		// if not in a syntactic predicate
+-		if (syntacticPredLevel == 0)
+-		{
+-			boolean doNoGuessTest = (
+-				grammar.hasSyntacticPredicate &&
+-				(
+-					grammar.buildAST && rr.getLabel() != null ||
+-					(genAST && rr.getAutoGenType() == GrammarElement.AUTO_GEN_NONE)
+-				)
+-			);
+-
+-			if (doNoGuessTest) {
+-				println("if (inputState->guessing==0) {");
+-				tabs++;
+-			}
+-
+-			if (grammar.buildAST && rr.getLabel() != null)
+-			{
+-				// always gen variable for rule return on labeled rules
+-				// RK: hmm do I know here if the returnAST needs a cast ?
+-				println(rr.getLabel() + "_AST = returnAST;");
+-			}
+-
+-			if (genAST)
+-			{
+-				switch (rr.getAutoGenType())
+-				{
+-				case GrammarElement.AUTO_GEN_NONE:
+-					if( usingCustomAST )
+-						println("astFactory->addASTChild(currentAST, "+namespaceAntlr+"RefAST(returnAST));");
+-					else
+-						println("astFactory->addASTChild( currentAST, returnAST );");
+-					break;
+-				case GrammarElement.AUTO_GEN_CARET:
+-					// FIXME: RK: I'm not so sure this should be an error..
+-					// I think it might actually work and be usefull at times.
+-					antlrTool.error("Internal: encountered ^ after rule reference");
+-					break;
+-				default:
+-					break;
+-				}
+-			}
+-
+-			// if a lexer and labeled, Token label defined at rule level, just set it here
+-			if ( grammar instanceof LexerGrammar && rr.getLabel() != null )
+-			{
+-				println(rr.getLabel()+"=_returnToken;");
+-			}
+-
+-			if (doNoGuessTest)
+-			{
+-				tabs--;
+-				println("}");
+-			}
+-		}
+-		genErrorCatchForElement(rr);
+-	}
+-	/** Generate code for the given grammar element.
+-	 * @param blk The string-literal reference to generate
+-	 */
+-	public void gen(StringLiteralElement atom) {
+-		if ( DEBUG_CODE_GENERATOR || DEBUG_CPP_CODE_GENERATOR ) System.out.println("genString("+atom+")");
+-
+-		// Variable declarations for labeled elements
+-		if (atom.getLabel()!=null && syntacticPredLevel == 0) {
+-			println(atom.getLabel() + " = " + lt1Value + ";");
+-		}
+-
+-		// AST
+-		genElementAST(atom);
+-
+-		// is there a bang on the literal?
+-		boolean oldsaveText = saveText;
+-		saveText = saveText && atom.getAutoGenType()==GrammarElement.AUTO_GEN_NONE;
+-
+-		// matching
+-		genMatch(atom);
+-
+-		saveText = oldsaveText;
+-
+-		// tack on tree cursor motion if doing a tree walker
+-		if (grammar instanceof TreeWalkerGrammar) {
+-			println("_t = _t->getNextSibling();");
+-		}
+-	}
+-	/** Generate code for the given grammar element.
+-	 * @param blk The token-range reference to generate
+-	 */
+-	public void gen(TokenRangeElement r) {
+-		genErrorTryForElement(r);
+-		if ( r.getLabel()!=null  && syntacticPredLevel == 0) {
+-			println(r.getLabel() + " = " + lt1Value + ";");
+-		}
+-
+-		// AST
+-		genElementAST(r);
+-
+-		// match
+-		println("matchRange("+r.beginText+","+r.endText+");");
+-		genErrorCatchForElement(r);
+-	}
+-	/** Generate code for the given grammar element.
+-	 * @param blk The token-reference to generate
+-	 */
+-	public void gen(TokenRefElement atom) {
+-		if ( DEBUG_CODE_GENERATOR || DEBUG_CPP_CODE_GENERATOR ) System.out.println("genTokenRef("+atom+")");
+-		if ( grammar instanceof LexerGrammar ) {
+-			antlrTool.panic("Token reference found in lexer");
+-		}
+-		genErrorTryForElement(atom);
+-		// Assign Token value to token label variable
+-		if ( atom.getLabel()!=null && syntacticPredLevel == 0) {
+-			println(atom.getLabel() + " = " + lt1Value + ";");
+-		}
+-
+-		// AST
+-		genElementAST(atom);
+-		// matching
+-		genMatch(atom);
+-		genErrorCatchForElement(atom);
+-
+-		// tack on tree cursor motion if doing a tree walker
+-		if (grammar instanceof TreeWalkerGrammar) {
+-			println("_t = _t->getNextSibling();");
+-		}
+-	}
+-	public void gen(TreeElement t) {
+-		// save AST cursor
+-		println(labeledElementType+" __t" + t.ID + " = _t;");
+-
+-		// If there is a label on the root, then assign that to the variable
+-		if (t.root.getLabel() != null) {
+-			println(t.root.getLabel() + " = (_t == ASTNULL) ? "+labeledElementASTInit+" : _t;");
+-		}
+-
+-		// check for invalid modifiers ! and ^ on tree element roots
+-		if ( t.root.getAutoGenType() == GrammarElement.AUTO_GEN_BANG ) {
+-			antlrTool.error("Suffixing a root node with '!' is not implemented",
+-						  grammar.getFilename(), t.getLine(), t.getColumn());
+-			t.root.setAutoGenType(GrammarElement.AUTO_GEN_NONE);
+-		}
+-		if ( t.root.getAutoGenType() == GrammarElement.AUTO_GEN_CARET ) {
+-			antlrTool.warning("Suffixing a root node with '^' is redundant; already a root",
+-							 grammar.getFilename(), t.getLine(), t.getColumn());
+-			t.root.setAutoGenType(GrammarElement.AUTO_GEN_NONE);
+-		}
+-
+-		// Generate AST variables
+-		genElementAST(t.root);
+-		if (grammar.buildAST) {
+-			// Save the AST construction state
+-			println(namespaceAntlr+"ASTPair __currentAST" + t.ID + " = currentAST;");
+-			// Make the next item added a child of the TreeElement root
+-			println("currentAST.root = currentAST.child;");
+-			println("currentAST.child = "+labeledElementASTInit+";");
+-		}
+-
+-		// match root
+-		if ( t.root instanceof WildcardElement ) {
+-			println("if ( _t == ASTNULL ) throw "+namespaceAntlr+"MismatchedTokenException();");
+-		}
+-		else {
+-			genMatch(t.root);
+-		}
+-		// move to list of children
+-		println("_t = _t->getFirstChild();");
+-
+-		// walk list of children, generating code for each
+-		for (int i=0; i<t.getAlternatives().size(); i++) {
+-			Alternative a = t.getAlternativeAt(i);
+-			AlternativeElement e = a.head;
+-			while ( e != null ) {
+-				e.generate();
+-				e = e.next;
+-			}
+-		}
+-
+-		if (grammar.buildAST) {
+-			// restore the AST construction state to that just after the
+-			// tree root was added
+-			println("currentAST = __currentAST" + t.ID + ";");
+-		}
+-		// restore AST cursor
+-		println("_t = __t" + t.ID + ";");
+-		// move cursor to sibling of tree just parsed
+-		println("_t = _t->getNextSibling();");
+-	}
+-	/** Generate the tree-parser C++ files */
+-	public void gen(TreeWalkerGrammar g) throws IOException {
+-		setGrammar(g);
+-		if (!(grammar instanceof TreeWalkerGrammar)) {
+-			antlrTool.panic("Internal error generating tree-walker");
+-		}
+-
+-		genBody(g);
+-		genInclude(g);
+-	}
+-	/** Generate code for the given grammar element.
+-	 * @param wc The wildcard element to generate
+-	 */
+-	public void gen(WildcardElement wc) {
+-		// Variable assignment for labeled elements
+-		if (wc.getLabel()!=null && syntacticPredLevel == 0) {
+-			println(wc.getLabel() + " = " + lt1Value + ";");
+-		}
+-
+-		// AST
+-		genElementAST(wc);
+-		// Match anything but EOF
+-		if (grammar instanceof TreeWalkerGrammar) {
+-			println("if ( _t == "+labeledElementASTInit+" ) throw "+namespaceAntlr+"MismatchedTokenException();");
+-		}
+-		else if (grammar instanceof LexerGrammar) {
+-			if ( grammar instanceof LexerGrammar &&
+-					(!saveText||wc.getAutoGenType()==GrammarElement.AUTO_GEN_BANG) ) {
+-				println("_saveIndex = text.length();");
+-			}
+-			println("matchNot(EOF/*_CHAR*/);");
+-			if ( grammar instanceof LexerGrammar &&
+-					(!saveText||wc.getAutoGenType()==GrammarElement.AUTO_GEN_BANG) ) {
+-				println("text.erase(_saveIndex);");      // kill text atom put in buffer
+-			}
+-		}
+-		else {
+-			println("matchNot(" + getValueString(Token.EOF_TYPE) + ");");
+-		}
+-
+-		// tack on tree cursor motion if doing a tree walker
+-		if (grammar instanceof TreeWalkerGrammar) {
+-			println("_t = _t->getNextSibling();");
+-		}
+-	}
+-	/** Generate code for the given grammar element.
+-	 * @param blk The (...)* block to generate
+-	 */
+-	public void gen(ZeroOrMoreBlock blk) {
+-		if ( DEBUG_CODE_GENERATOR || DEBUG_CPP_CODE_GENERATOR ) System.out.println("gen*("+blk+")");
+-		println("{ // ( ... )*");
+-		genBlockPreamble(blk);
+-		String label;
+-		if ( blk.getLabel() != null ) {
+-			label = blk.getLabel();
+-		}
+-		else {
+-			label = "_loop" + blk.ID;
+-		}
+-		println("for (;;) {");
+-		tabs++;
+-		// generate the init action for ()+ ()* inside the loop
+-		// this allows us to do usefull EOF checking...
+-		genBlockInitAction(blk);
+-
+-		// Tell AST generation to build subrule result
+-		String saveCurrentASTResult = currentASTResult;
+-		if (blk.getLabel() != null) {
+-			currentASTResult = blk.getLabel();
+-		}
+-
+-		boolean ok = grammar.theLLkAnalyzer.deterministic(blk);
+-
+-		// generate exit test if greedy set to false
+-		// and an alt is ambiguous with exit branch
+-		// or when lookahead derived purely from end-of-file
+-		// Lookahead analysis stops when end-of-file is hit,
+-		// returning set {epsilon}.  Since {epsilon} is not
+-		// ambig with any real tokens, no error is reported
+-		// by deterministic() routines and we have to check
+-		// for the case where the lookahead depth didn't get
+-		// set to NONDETERMINISTIC (this only happens when the
+-		// FOLLOW contains real atoms + epsilon).
+-		boolean generateNonGreedyExitPath = false;
+-		int nonGreedyExitDepth = grammar.maxk;
+-
+-		if ( !blk.greedy &&
+-			 blk.exitLookaheadDepth<=grammar.maxk &&
+-			 blk.exitCache[blk.exitLookaheadDepth].containsEpsilon() )
+-		{
+-			generateNonGreedyExitPath = true;
+-			nonGreedyExitDepth = blk.exitLookaheadDepth;
+-		}
+-		else if ( !blk.greedy &&
+-				  blk.exitLookaheadDepth==LLkGrammarAnalyzer.NONDETERMINISTIC )
+-		{
+-			generateNonGreedyExitPath = true;
+-		}
+-		if ( generateNonGreedyExitPath ) {
+-			if ( DEBUG_CODE_GENERATOR || DEBUG_CPP_CODE_GENERATOR ) {
+-				System.out.println("nongreedy (...)* loop; exit depth is "+
+-								   blk.exitLookaheadDepth);
+-			}
+-			String predictExit =
+-				getLookaheadTestExpression(blk.exitCache,
+-										   nonGreedyExitDepth);
+-			println("// nongreedy exit test");
+-			println("if ("+predictExit+") goto "+label+";");
+-		}
+-
+-		CppBlockFinishingInfo howToFinish = genCommonBlock(blk, false);
+-		genBlockFinish(howToFinish, "goto " + label + ";");
+-
+-		tabs--;
+-		println("}");
+-		println(label+":;");
+-		println("} // ( ... )*");
+-
+-		// Restore previous AST generation
+-		currentASTResult = saveCurrentASTResult;
+-	}
+-	/** Generate an alternative.
+-	  * @param alt  The alternative to generate
+-	  * @param blk The block to which the alternative belongs
+-	  */
+-	protected void genAlt(Alternative alt, AlternativeBlock blk)
+-	{
+-		// Save the AST generation state, and set it to that of the alt
+-		boolean savegenAST = genAST;
+-		genAST = genAST && alt.getAutoGen();
+-
+-		boolean oldsaveTest = saveText;
+-		saveText = saveText && alt.getAutoGen();
+-
+-		// Reset the variable name map for the alternative
+-		Hashtable saveMap = treeVariableMap;
+-		treeVariableMap = new Hashtable();
+-
+-		// Generate try block around the alt for  error handling
+-		if (alt.exceptionSpec != null) {
+-			println("try {      // for error handling");
+-			tabs++;
+-		}
+-
+-		AlternativeElement elem = alt.head;
+-		while ( !(elem instanceof BlockEndElement) ) {
+-			elem.generate(); // alt can begin with anything. Ask target to gen.
+-			elem = elem.next;
+-		}
+-
+-		if ( genAST)
+-		{
+-			if (blk instanceof RuleBlock)
+-			{
+-				// Set the AST return value for the rule
+-				RuleBlock rblk = (RuleBlock)blk;
+-				if( usingCustomAST )
+-					println(rblk.getRuleName() + "_AST = "+labeledElementASTType+"(currentAST.root);");
+-				else
+-					println(rblk.getRuleName() + "_AST = currentAST.root;");
+-			}
+-			else if (blk.getLabel() != null) {
+-				// ### future: also set AST value for labeled subrules.
+-				// println(blk.getLabel() + "_AST = "+labeledElementASTType+"(currentAST.root);");
+-				antlrTool.warning("Labeled subrules are not implemented", grammar.getFilename(), blk.getLine(), blk.getColumn());
+-			}
+-		}
+-
+-		if (alt.exceptionSpec != null)
+-		{
+-			// close try block
+-			tabs--;
+-			println("}");
+-			genErrorHandler(alt.exceptionSpec);
+-		}
+-
+-		genAST = savegenAST;
+-		saveText = oldsaveTest;
+-
+-		treeVariableMap = saveMap;
+-	}
+-	/** Generate all the bitsets to be used in the parser or lexer
+-	 * Generate the raw bitset data like "long _tokenSet1_data[] = {...};"
+-	 * and the BitSet object declarations like
+-	 * "BitSet _tokenSet1 = new BitSet(_tokenSet1_data);"
+-	 * Note that most languages do not support object initialization inside a
+-	 * class definition, so other code-generators may have to separate the
+-	 * bitset declarations from the initializations (e.g., put the
+-	 * initializations in the generated constructor instead).
+-	 * @param bitsetList The list of bitsets to generate.
+-	 * @param maxVocabulary Ensure that each generated bitset can contain at
+-	 *        least this value.
+-	 * @param prefix string glued in from of bitset names used for namespace
+-	 *        qualifications.
+-	 */
+-	protected void genBitsets(
+-		Vector bitsetList,
+-		int maxVocabulary,
+-		String prefix
+-	)
+-	{
+-		TokenManager tm = grammar.tokenManager;
+-
+-		println("");
+-
+-		for (int i = 0; i < bitsetList.size(); i++)
+-		{
+-			BitSet p = (BitSet)bitsetList.elementAt(i);
+-			// Ensure that generated BitSet is large enough for vocabulary
+-			p.growToInclude(maxVocabulary);
+-
+-			// initialization data
+-			println(
+-				"const unsigned long " + prefix + getBitsetName(i) + "_data_" + "[] = { " +
+-				p.toStringOfHalfWords() +
+-				" };"
+-			);
+-
+-			// Dump the contents of the bitset in readable format...
+-			String t = "// ";
+-			for( int j = 0; j < tm.getVocabulary().size(); j++ )
+-			{
+-				if ( p.member( j ) )
+-				{
+-					if ( (grammar instanceof LexerGrammar) )
+-						t += tm.getVocabulary().elementAt(j)+" ";
+-					else
+-						t += tm.getTokenStringAt(j)+" ";
+-
+-					if( t.length() > 70 )
+-					{
+-						println(t);
+-						t = "// ";
+-					}
+-				}
+-			}
+-			if ( t != "// " )
+-				println(t);
+-
+-			// BitSet object
+-			println(
+-				"const "+namespaceAntlr+"BitSet " + prefix + getBitsetName(i) + "(" +
+-				getBitsetName(i) + "_data_," + p.size()/32 +
+-				");"
+-			);
+-		}
+-	}
+-	protected void genBitsetsHeader(
+-		Vector bitsetList,
+-		int maxVocabulary
+-	) {
+-		println("");
+-		for (int i = 0; i < bitsetList.size(); i++)
+-		{
+-			BitSet p = (BitSet)bitsetList.elementAt(i);
+-			// Ensure that generated BitSet is large enough for vocabulary
+-			p.growToInclude(maxVocabulary);
+-			// initialization data
+-			println("static const unsigned long " + getBitsetName(i) + "_data_" + "[];");
+-			// BitSet object
+-			println("static const "+namespaceAntlr+"BitSet " + getBitsetName(i) + ";");
+-		}
+-	}
+-	/** Generate the finish of a block, using a combination of the info
+-	 * returned from genCommonBlock() and the action to perform when
+-	 * no alts were taken
+-	 * @param howToFinish The return of genCommonBlock()
+-	 * @param noViableAction What to generate when no alt is taken
+-	 */
+-	private void genBlockFinish(CppBlockFinishingInfo howToFinish, String noViableAction)
+-	{
+-		if (howToFinish.needAnErrorClause &&
+-			 (howToFinish.generatedAnIf || howToFinish.generatedSwitch)) {
+-			if ( howToFinish.generatedAnIf ) {
+-				println("else {");
+-			}
+-			else {
+-				println("{");
+-			}
+-			tabs++;
+-			println(noViableAction);
+-			tabs--;
+-			println("}");
+-		}
+-
+-		if ( howToFinish.postscript!=null ) {
+-			println(howToFinish.postscript);
+-		}
+-	}
+-	/** Generate the initaction for a block, which may be a RuleBlock or a
+-	 * plain AlternativeBLock.
+-	 * @blk The block for which the preamble is to be generated.
+-	 */
+-	protected void genBlockInitAction( AlternativeBlock blk )
+-	{
+-		// dump out init action
+-		if ( blk.initAction!=null ) {
+-			genLineNo(blk);
+-			printAction(processActionForSpecialSymbols(blk.initAction, blk.line,
+-																	 currentRule, null) );
+-			genLineNo2();
+-		}
+-	}
+-	/** Generate the header for a block, which may be a RuleBlock or a
+-	 * plain AlternativeBlock. This generates any variable declarations
+-	 * and syntactic-predicate-testing variables.
+-	 * @blk The block for which the preamble is to be generated.
+-	 */
+-	protected void genBlockPreamble(AlternativeBlock blk) {
+-		// define labels for rule blocks.
+-		if ( blk instanceof RuleBlock ) {
+-			RuleBlock rblk = (RuleBlock)blk;
+-			if ( rblk.labeledElements!=null ) {
+-				for (int i=0; i<rblk.labeledElements.size(); i++) {
+-
+-					AlternativeElement a = (AlternativeElement)rblk.labeledElements.elementAt(i);
+-					//System.out.println("looking at labeled element: "+a);
+-					// Variables for labeled rule refs and subrules are different than
+-					// variables for grammar atoms.  This test is a little tricky because
+-					// we want to get all rule refs and ebnf, but not rule blocks or
+-					// syntactic predicates
+-					if (
+-						a instanceof RuleRefElement ||
+-						a instanceof AlternativeBlock &&
+-						!(a instanceof RuleBlock) &&
+-						!(a instanceof SynPredBlock) )
+-					{
+-						if ( !(a instanceof RuleRefElement) &&
+-							  ((AlternativeBlock)a).not &&
+-							  analyzer.subruleCanBeInverted(((AlternativeBlock)a), grammar instanceof LexerGrammar)
+-						) {
+-							// Special case for inverted subrules that will be
+-							// inlined. Treat these like token or char literal
+-							// references
+-							println(labeledElementType + " " + a.getLabel() + " = " + labeledElementInit + ";");
+-							if (grammar.buildAST) {
+-								genASTDeclaration( a );
+-							}
+-						}
+-						else
+-						{
+-							if (grammar.buildAST)
+-							{
+-								// Always gen AST variables for labeled elements,
+-								// even if the element itself is marked with !
+-								genASTDeclaration( a );
+-							}
+-							if ( grammar instanceof LexerGrammar )
+-								println(namespaceAntlr+"RefToken "+a.getLabel()+";");
+-
+-							if (grammar instanceof TreeWalkerGrammar) {
+-								// always generate rule-ref variables for tree walker
+-								println(labeledElementType + " " + a.getLabel() + " = " + labeledElementInit + ";");
+-							}
+-						}
+-					}
+-					else
+-					{
+-						// It is a token or literal reference.  Generate the
+-						// correct variable type for this grammar
+-						println(labeledElementType + " " + a.getLabel() + " = " + labeledElementInit + ";");
+-						// In addition, generate *_AST variables if building ASTs
+-						if (grammar.buildAST)
+-						{
+-							if (a instanceof GrammarAtom &&
+-								 ((GrammarAtom)a).getASTNodeType() != null )
+-							{
+-								GrammarAtom ga = (GrammarAtom)a;
+-								genASTDeclaration( a, "Ref"+ga.getASTNodeType() );
+-							}
+-							else
+-							{
+-								genASTDeclaration( a );
+-							}
+-						}
+-					}
+-				}
+-			}
+-		}
+-	}
+-	public void genBody(LexerGrammar g) throws IOException
+-	{
+-		outputFile = grammar.getClassName() + ".cpp";
+-		outputLine = 1;
+-		currentOutput = antlrTool.openOutputFile(outputFile);
+-		//SAS: changed for proper text file io
+-
+-		genAST = false;	// no way to gen trees.
+-		saveText = true;	// save consumed characters.
+-
+-		tabs=0;
+-
+-		// Generate header common to all C++ output files
+-		genHeader(outputFile);
+-
+-		printHeaderAction(preIncludeCpp);
+-		// Generate header specific to lexer C++ file
+-		println("#include \"" + grammar.getClassName() + ".hpp\"");
+-		println("#include <antlr/CharBuffer.hpp>");
+-		println("#include <antlr/TokenStreamException.hpp>");
+-		println("#include <antlr/TokenStreamIOException.hpp>");
+-		println("#include <antlr/TokenStreamRecognitionException.hpp>");
+-		println("#include <antlr/CharStreamException.hpp>");
+-		println("#include <antlr/CharStreamIOException.hpp>");
+-		println("#include <antlr/NoViableAltForCharException.hpp>");
+-		if (grammar.debuggingOutput)
+-			println("#include <antlr/DebuggingInputBuffer.hpp>");
+-		println("");
+-		printHeaderAction(postIncludeCpp);
+-
+-		if (nameSpace != null)
+-			nameSpace.emitDeclarations(currentOutput);
+-
+-		// Generate user-defined lexer file preamble
+-		printAction(grammar.preambleAction);
+-
+-		// Generate lexer class definition
+-		String sup=null;
+-		if ( grammar.superClass!=null ) {
+-			sup = grammar.superClass;
+-		}
+-		else {
+-			sup = grammar.getSuperClass();
+-			if (sup.lastIndexOf('.') != -1)
+-				sup = sup.substring(sup.lastIndexOf('.')+1);
+-			sup = namespaceAntlr + sup;
+-		}
+-
+-		if( noConstructors )
+-		{
+-			println("#if 0");
+-			println("// constructor creation turned of with 'noConstructor' option");
+-		}
+-		//
+-		// Generate the constructor from InputStream
+-		//
+-		println(grammar.getClassName() + "::" + grammar.getClassName() + "(" + namespaceStd + "istream& in)");
+-		tabs++;
+-		// if debugging, wrap the input buffer in a debugger
+-		if (grammar.debuggingOutput)
+-			println(": " + sup + "(new "+namespaceAntlr+"DebuggingInputBuffer(new "+namespaceAntlr+"CharBuffer(in)),"+g.caseSensitive+")");
+-		else
+-			println(": " + sup + "(new "+namespaceAntlr+"CharBuffer(in),"+g.caseSensitive+")");
+-		tabs--;
+-		println("{");
+-		tabs++;
+-
+-		// if debugging, set up array variables and call user-overridable
+-		//   debugging setup method
+-		if ( grammar.debuggingOutput ) {
+-			println("setRuleNames(_ruleNames);");
+-			println("setSemPredNames(_semPredNames);");
+-			println("setupDebugging();");
+-		}
+-
+-//		println("setCaseSensitive("+g.caseSensitive+");");
+-		println("initLiterals();");
+-		tabs--;
+-		println("}");
+-		println("");
+-
+-		// Generate the constructor from InputBuffer
+-		println(grammar.getClassName() + "::" + grammar.getClassName() + "("+namespaceAntlr+"InputBuffer& ib)");
+-		tabs++;
+-		// if debugging, wrap the input buffer in a debugger
+-		if (grammar.debuggingOutput)
+-			println(": " + sup + "(new "+namespaceAntlr+"DebuggingInputBuffer(ib),"+g.caseSensitive+")");
+-		else
+-			println(": " + sup + "(ib,"+g.caseSensitive+")");
+-		tabs--;
+-		println("{");
+-		tabs++;
+-
+-		// if debugging, set up array variables and call user-overridable
+-		//   debugging setup method
+-		if ( grammar.debuggingOutput ) {
+-			println("setRuleNames(_ruleNames);");
+-			println("setSemPredNames(_semPredNames);");
+-			println("setupDebugging();");
+-		}
+-
+-//		println("setCaseSensitive("+g.caseSensitive+");");
+-		println("initLiterals();");
+-		tabs--;
+-		println("}");
+-		println("");
+-
+-		// Generate the constructor from LexerSharedInputState
+-		println(grammar.getClassName() + "::" + grammar.getClassName() + "(const "+namespaceAntlr+"LexerSharedInputState& state)");
+-		tabs++;
+-		println(": " + sup + "(state,"+g.caseSensitive+")");
+-		tabs--;
+-		println("{");
+-		tabs++;
+-
+-		// if debugging, set up array variables and call user-overridable
+-		//   debugging setup method
+-		if ( grammar.debuggingOutput ) {
+-			println("setRuleNames(_ruleNames);");
+-			println("setSemPredNames(_semPredNames);");
+-			println("setupDebugging();");
+-		}
+-
+-//		println("setCaseSensitive("+g.caseSensitive+");");
+-		println("initLiterals();");
+-		tabs--;
+-		println("}");
+-		println("");
+-
+-		if( noConstructors )
+-		{
+-			println("// constructor creation turned of with 'noConstructor' option");
+-			println("#endif");
+-		}
+-
+-		println("void " + grammar.getClassName() + "::initLiterals()");
+-		println("{");
+-		tabs++;
+-		// Generate the initialization of the map
+-		// containing the string literals used in the lexer
+-		// The literals variable itself is in CharScanner
+-		Enumeration keys = grammar.tokenManager.getTokenSymbolKeys();
+-		while ( keys.hasMoreElements() ) {
+-			String key = (String)keys.nextElement();
+-			if ( key.charAt(0) != '"' ) {
+-				continue;
+-			}
+-			TokenSymbol sym = grammar.tokenManager.getTokenSymbol(key);
+-			if ( sym instanceof StringLiteralSymbol ) {
+-				StringLiteralSymbol s = (StringLiteralSymbol)sym;
+-				println("literals["+s.getId()+"] = "+s.getTokenType()+";");
+-			}
+-		}
+-
+-		// Generate the setting of various generated options.
+-		tabs--;
+-		println("}");
+-
+-		Enumeration ids;
+-		// generate the rule name array for debugging
+-		if (grammar.debuggingOutput) {
+-			println("const char* "+grammar.getClassName()+"::_ruleNames[] = {");
+-			tabs++;
+-
+-			ids = grammar.rules.elements();
+-			int ruleNum=0;
+-			while ( ids.hasMoreElements() ) {
+-				GrammarSymbol sym = (GrammarSymbol) ids.nextElement();
+-				if ( sym instanceof RuleSymbol)
+-					println("\""+((RuleSymbol)sym).getId()+"\",");
+-			}
+-			println("0");
+-			tabs--;
+-			println("};");
+-		}
+-
+-		// Generate nextToken() rule.
+-		// nextToken() is a synthetic lexer rule that is the implicit OR of all
+-		// user-defined lexer rules.
+-		genNextToken();
+-
+-		// Generate code for each rule in the lexer
+-		ids = grammar.rules.elements();
+-		int ruleNum=0;
+-		while ( ids.hasMoreElements() ) {
+-			RuleSymbol sym = (RuleSymbol) ids.nextElement();
+-			// Don't generate the synthetic rules
+-			if (!sym.getId().equals("mnextToken")) {
+-				genRule(sym, false, ruleNum++, grammar.getClassName() + "::");
+-			}
+-			exitIfError();
+-		}
+-
+-		// Generate the semantic predicate map for debugging
+-		if (grammar.debuggingOutput)
+-			genSemPredMap(grammar.getClassName() + "::");
+-
+-		// Generate the bitsets used throughout the lexer
+-		genBitsets(bitsetsUsed, ((LexerGrammar)grammar).charVocabulary.size(), grammar.getClassName() + "::" );
+-
+-		println("");
+-		if (nameSpace != null)
+-			nameSpace.emitClosures(currentOutput);
+-
+-		// Close the lexer output stream
+-		currentOutput.close();
+-		currentOutput = null;
+-	}
+-	public void genInitFactory( Grammar g )
+-	{
+-		// Generate the method to initialize an ASTFactory when we're
+-		// building AST's
+-		String param_name = "factory ";
+-		if( ! g.buildAST )
+-			param_name = "";
+-
+-		println("void "+ g.getClassName() + "::initializeASTFactory( "+namespaceAntlr+"ASTFactory& "+param_name+")");
+-		println("{");
+-		tabs++;
+-
+-		if( g.buildAST )
+-		{
+-			// print out elements collected...
+-			Enumeration e = astTypes.elements();
+-			while( e.hasMoreElements() )
+-				println((String)e.nextElement());
+-
+-			println("factory.setMaxNodeType("+grammar.tokenManager.maxTokenType()+");");
+-		}
+-		tabs--;
+-		println("}");
+-	}
+-	// FIXME: and so why are we passing here a g param while inside
+-	// we merrily use the global grammar.
+-	public void genBody(ParserGrammar g) throws IOException
+-	{
+-		// Open the output stream for the parser and set the currentOutput
+-		outputFile = grammar.getClassName() + ".cpp";
+-		outputLine = 1;
+-		currentOutput = antlrTool.openOutputFile(outputFile);
+-
+-		genAST = grammar.buildAST;
+-
+-		tabs = 0;
+-
+-		// Generate the header common to all output files.
+-		genHeader(outputFile);
+-
+-		printHeaderAction(preIncludeCpp);
+-
+-		// Generate header for the parser
+-		println("#include \"" + grammar.getClassName() + ".hpp\"");
+-		println("#include <antlr/NoViableAltException.hpp>");
+-		println("#include <antlr/SemanticException.hpp>");
+-		println("#include <antlr/ASTFactory.hpp>");
+-
+-		printHeaderAction(postIncludeCpp);
+-
+-		if (nameSpace != null)
+-			nameSpace.emitDeclarations(currentOutput);
+-
+-		// Output the user-defined parser preamble
+-		printAction(grammar.preambleAction);
+-
+-		String sup=null;
+-		if ( grammar.superClass!=null )
+-			sup = grammar.superClass;
+-		else {
+-			sup = grammar.getSuperClass();
+-			if (sup.lastIndexOf('.') != -1)
+-				sup = sup.substring(sup.lastIndexOf('.')+1);
+-			sup = namespaceAntlr + sup;
+-		}
+-
+-		// set up an array of all the rule names so the debugger can
+-		// keep track of them only by number -- less to store in tree...
+-		if (grammar.debuggingOutput) {
+-			println("const char* "+grammar.getClassName()+"::_ruleNames[] = {");
+-			tabs++;
+-
+-			Enumeration ids = grammar.rules.elements();
+-			int ruleNum=0;
+-			while ( ids.hasMoreElements() ) {
+-				GrammarSymbol sym = (GrammarSymbol) ids.nextElement();
+-				if ( sym instanceof RuleSymbol)
+-					println("\""+((RuleSymbol)sym).getId()+"\",");
+-			}
+-			println("0");
+-			tabs--;
+-			println("};");
+-		}
+-
+-		// Generate _initialize function
+-		// disabled since it isn't used anymore..
+-
+-//		println("void " + grammar.getClassName() + "::_initialize(void)");
+-//		println("{");
+-//		tabs++;
+-
+-		// if debugging, set up arrays and call the user-overridable
+-		//   debugging setup method
+-//		if ( grammar.debuggingOutput ) {
+-//			println("setRuleNames(_ruleNames);");
+-//			println("setSemPredNames(_semPredNames);");
+-//			println("setupDebugging();");
+-//		}
+-//		tabs--;
+-//		println("}");
+-		if( noConstructors )
+-		{
+-			println("#if 0");
+-			println("// constructor creation turned of with 'noConstructor' option");
+-		}
+-
+-		// Generate parser class constructor from TokenBuffer
+-		print(grammar.getClassName() + "::" + grammar.getClassName());
+-		println("("+namespaceAntlr+"TokenBuffer& tokenBuf, int k)");
+-		println(": " + sup + "(tokenBuf,k)");
+-		println("{");
+-//		tabs++;
+-//		println("_initialize();");
+-//		tabs--;
+-		println("}");
+-		println("");
+-
+-		print(grammar.getClassName() + "::" + grammar.getClassName());
+-		println("("+namespaceAntlr+"TokenBuffer& tokenBuf)");
+-		println(": " + sup + "(tokenBuf," + grammar.maxk + ")");
+-		println("{");
+-//		tabs++;
+-//		println("_initialize();");
+-//		tabs--;
+-		println("}");
+-		println("");
+-
+-		// Generate parser class constructor from TokenStream
+-		print(grammar.getClassName() + "::" + grammar.getClassName());
+-		println("("+namespaceAntlr+"TokenStream& lexer, int k)");
+-		println(": " + sup + "(lexer,k)");
+-		println("{");
+-//		tabs++;
+-//		println("_initialize();");
+-//		tabs--;
+-		println("}");
+-		println("");
+-
+-		print(grammar.getClassName() + "::" + grammar.getClassName());
+-		println("("+namespaceAntlr+"TokenStream& lexer)");
+-		println(": " + sup + "(lexer," + grammar.maxk + ")");
+-		println("{");
+-//		tabs++;
+-//		println("_initialize();");
+-//		tabs--;
+-		println("}");
+-		println("");
+-
+-		print(grammar.getClassName() + "::" + grammar.getClassName());
+-		println("(const "+namespaceAntlr+"ParserSharedInputState& state)");
+-		println(": " + sup + "(state," + grammar.maxk + ")");
+-		println("{");
+-//		tabs++;
+-//		println("_initialize();");
+-//		tabs--;
+-		println("}");
+-		println("");
+-
+-		if( noConstructors )
+-		{
+-			println("// constructor creation turned of with 'noConstructor' option");
+-			println("#endif");
+-		}
+-
+-		astTypes = new Vector();
+-
+-		// Generate code for each rule in the grammar
+-		Enumeration ids = grammar.rules.elements();
+-		int ruleNum=0;
+-		while ( ids.hasMoreElements() ) {
+-			GrammarSymbol sym = (GrammarSymbol) ids.nextElement();
+-			if ( sym instanceof RuleSymbol) {
+-				RuleSymbol rs = (RuleSymbol)sym;
+-				genRule(rs, rs.references.size()==0, ruleNum++, grammar.getClassName() + "::");
+-			}
+-			exitIfError();
+-		}
+-
+-		genInitFactory( g );
+-
+-		// Generate the token names
+-		genTokenStrings(grammar.getClassName() + "::");
+-
+-		// Generate the bitsets used throughout the grammar
+-		genBitsets(bitsetsUsed, grammar.tokenManager.maxTokenType(), grammar.getClassName() + "::" );
+-
+-		// Generate the semantic predicate map for debugging
+-		if (grammar.debuggingOutput)
+-			genSemPredMap(grammar.getClassName() + "::");
+-
+-		// Close class definition
+-		println("");
+-		println("");
+-		if (nameSpace != null)
+-			nameSpace.emitClosures(currentOutput);
+-
+-		// Close the parser output stream
+-		currentOutput.close();
+-		currentOutput = null;
+-	}
+-	public void genBody(TreeWalkerGrammar g) throws IOException
+-	{
+-		// Open the output stream for the parser and set the currentOutput
+-		outputFile = grammar.getClassName() + ".cpp";
+-		outputLine = 1;
+-		currentOutput = antlrTool.openOutputFile(outputFile);
+-		//SAS: changed for proper text file io
+-
+-		genAST = grammar.buildAST;
+-		tabs = 0;
+-
+-		// Generate the header common to all output files.
+-		genHeader(outputFile);
+-
+-		printHeaderAction(preIncludeCpp);
+-
+-		// Generate header for the parser
+-		println("#include \"" + grammar.getClassName() + ".hpp\"");
+-		println("#include <antlr/Token.hpp>");
+-		println("#include <antlr/AST.hpp>");
+-		println("#include <antlr/NoViableAltException.hpp>");
+-		println("#include <antlr/MismatchedTokenException.hpp>");
+-		println("#include <antlr/SemanticException.hpp>");
+-		println("#include <antlr/BitSet.hpp>");
+-
+-		printHeaderAction(postIncludeCpp);
+-
+-		if (nameSpace != null)
+-			nameSpace.emitDeclarations(currentOutput);
+-
+-		// Output the user-defined parser premamble
+-		printAction(grammar.preambleAction);
+-
+-		// Generate parser class definition
+-		String sup = null;
+-		if ( grammar.superClass!=null ) {
+-			sup = grammar.superClass;
+-		}
+-		else {
+-			sup = grammar.getSuperClass();
+-			if (sup.lastIndexOf('.') != -1)
+-				sup = sup.substring(sup.lastIndexOf('.')+1);
+-			sup = namespaceAntlr + sup;
+-		}
+-		if( noConstructors )
+-		{
+-			println("#if 0");
+-			println("// constructor creation turned of with 'noConstructor' option");
+-		}
+-
+-		// Generate default parser class constructor
+-		println(grammar.getClassName() + "::" + grammar.getClassName() + "()");
+-		println("\t: "+namespaceAntlr+"TreeParser() {");
+-		tabs++;
+-//		println("setTokenNames(_tokenNames);");
+-		tabs--;
+-		println("}");
+-
+-		if( noConstructors )
+-		{
+-			println("// constructor creation turned of with 'noConstructor' option");
+-			println("#endif");
+-		}
+-		println("");
+-
+-		astTypes = new Vector();
+-
+-		// Generate code for each rule in the grammar
+-		Enumeration ids = grammar.rules.elements();
+-		int ruleNum=0;
+-		String ruleNameInits = "";
+-		while ( ids.hasMoreElements() ) {
+-			GrammarSymbol sym = (GrammarSymbol) ids.nextElement();
+-			if ( sym instanceof RuleSymbol) {
+-				RuleSymbol rs = (RuleSymbol)sym;
+-				genRule(rs, rs.references.size()==0, ruleNum++, grammar.getClassName() + "::");
+-			}
+-			exitIfError();
+-		}
+-
+-		// Generate the ASTFactory initialization function
+-		genInitFactory( grammar );
+-		// Generate the token names
+-		genTokenStrings(grammar.getClassName() + "::");
+-
+-		// Generate the bitsets used throughout the grammar
+-		genBitsets(bitsetsUsed, grammar.tokenManager.maxTokenType(), grammar.getClassName() + "::" );
+-
+-		// Close class definition
+-		println("");
+-		println("");
+-
+-		if (nameSpace != null)
+-			nameSpace.emitClosures(currentOutput);
+-
+-		// Close the parser output stream
+-		currentOutput.close();
+-		currentOutput = null;
+-	}
+-	/** Generate a series of case statements that implement a BitSet test.
+-	 * @param p The Bitset for which cases are to be generated
+-	 */
+-	protected void genCases(BitSet p) {
+-		if ( DEBUG_CODE_GENERATOR || DEBUG_CPP_CODE_GENERATOR ) System.out.println("genCases("+p+")");
+-		int[] elems;
+-
+-		elems = p.toArray();
+-		// Wrap cases four-per-line for lexer, one-per-line for parser
+-		int wrap = 1; //(grammar instanceof LexerGrammar) ? 4 : 1;
+-		int j=1;
+-		boolean startOfLine = true;
+-		for (int i = 0; i < elems.length; i++) {
+-			if (j==1) {
+-				print("");
+-			} else {
+-				_print("  ");
+-			}
+-			_print("case " + getValueString(elems[i]) + ":");
+-
+-			if (j==wrap) {
+-				_println("");
+-				startOfLine = true;
+-				j=1;
+-			}
+-			else {
+-				j++;
+-				startOfLine = false;
+-			}
+-		}
+-		if (!startOfLine) {
+-			_println("");
+-		}
+-	}
+-	/** Generate common code for a block of alternatives; return a postscript
+-	 * that needs to be generated at the end of the block.  Other routines
+-	 * may append else-clauses and such for error checking before the postfix
+-	 * is generated.
+-	 * If the grammar is a lexer, then generate alternatives in an order where
+-	 * alternatives requiring deeper lookahead are generated first, and
+-	 * EOF in the lookahead set reduces the depth of the lookahead.
+-	 * @param blk The block to generate
+-	 * @param noTestForSingle If true, then it does not generate a test for a single alternative.
+-	 */
+-	public CppBlockFinishingInfo genCommonBlock(
+-		AlternativeBlock blk,
+-		boolean noTestForSingle )
+-	{
+-		int nIF=0;
+-		boolean createdLL1Switch = false;
+-		int closingBracesOfIFSequence = 0;
+-		CppBlockFinishingInfo finishingInfo = new CppBlockFinishingInfo();
+-		if ( DEBUG_CODE_GENERATOR || DEBUG_CPP_CODE_GENERATOR ) System.out.println("genCommonBlk("+blk+")");
+-
+-		// Save the AST generation state, and set it to that of the block
+-		boolean savegenAST = genAST;
+-		genAST = genAST && blk.getAutoGen();
+-
+-		boolean oldsaveTest = saveText;
+-		saveText = saveText && blk.getAutoGen();
+-
+-		// Is this block inverted?  If so, generate special-case code
+-		if ( blk.not &&
+-			analyzer.subruleCanBeInverted(blk, grammar instanceof LexerGrammar) )
+-		{
+-			Lookahead p = analyzer.look(1, blk);
+-			// Variable assignment for labeled elements
+-			if (blk.getLabel() != null && syntacticPredLevel == 0) {
+-				println(blk.getLabel() + " = " + lt1Value + ";");
+-			}
+-
+-			// AST
+-			genElementAST(blk);
+-
+-			String astArgs="";
+-			if (grammar instanceof TreeWalkerGrammar) {
+-				if( usingCustomAST )
+-					astArgs=namespaceAntlr+"RefAST"+"(_t),";
+-				else
+-					astArgs="_t,";
+-			}
+-
+-			// match the bitset for the alternative
+-			println("match(" + astArgs + getBitsetName(markBitsetForGen(p.fset)) + ");");
+-
+-			// tack on tree cursor motion if doing a tree walker
+-			if (grammar instanceof TreeWalkerGrammar)
+-			{
+-				println("_t = _t->getNextSibling();");
+-			}
+-			return finishingInfo;
+-		}
+-
+-		// Special handling for single alt
+-		if (blk.getAlternatives().size() == 1)
+-		{
+-			Alternative alt = blk.getAlternativeAt(0);
+-			// Generate a warning if there is a synPred for single alt.
+-			if (alt.synPred != null)
+-			{
+-				antlrTool.warning(
+-								 "Syntactic predicate superfluous for single alternative",
+-								 grammar.getFilename(),
+-								 blk.getAlternativeAt(0).synPred.getLine(),
+-								 blk.getAlternativeAt(0).synPred.getColumn()
+-				);
+-			}
+-			if (noTestForSingle)
+-			{
+-				if (alt.semPred != null)
+-				{
+-					// Generate validating predicate
+-					genSemPred(alt.semPred, blk.line);
+-				}
+-				genAlt(alt, blk);
+-				return finishingInfo;
+-			}
+-		}
+-
+-		// count number of simple LL(1) cases; only do switch for
+-		// many LL(1) cases (no preds, no end of token refs)
+-		// We don't care about exit paths for (...)*, (...)+
+-		// because we don't explicitly have a test for them
+-		// as an alt in the loop.
+-		//
+-		// Also, we now count how many unicode lookahead sets
+-		// there are--they must be moved to DEFAULT or ELSE
+-		// clause.
+-
+-		int nLL1 = 0;
+-		for (int i=0; i<blk.getAlternatives().size(); i++)
+-		{
+-			Alternative a = blk.getAlternativeAt(i);
+-			if ( suitableForCaseExpression(a) )
+-				nLL1++;
+-		}
+-
+-		// do LL(1) cases
+-		if ( nLL1 >= makeSwitchThreshold )
+-		{
+-			// Determine the name of the item to be compared
+-			String testExpr = lookaheadString(1);
+-			createdLL1Switch = true;
+-			// when parsing trees, convert null to valid tree node with NULL lookahead
+-			if ( grammar instanceof TreeWalkerGrammar )
+-			{
+-				println("if (_t == "+labeledElementASTInit+" )");
+-				tabs++;
+-				println("_t = ASTNULL;");
+-				tabs--;
+-			}
+-			println("switch ( "+testExpr+") {");
+-			for (int i=0; i<blk.alternatives.size(); i++)
+-			{
+-				Alternative alt = blk.getAlternativeAt(i);
+-				// ignore any non-LL(1) alts, predicated alts or end-of-token alts
+-				// or end-of-token alts for case expressions
+-				if ( !suitableForCaseExpression(alt) )
+-				{
+-					continue;
+-				}
+-				Lookahead p = alt.cache[1];
+-				if (p.fset.degree() == 0 && !p.containsEpsilon())
+-				{
+-					antlrTool.warning("Alternate omitted due to empty prediction set",
+-						grammar.getFilename(),
+-						alt.head.getLine(), alt.head.getColumn());
+-				}
+-				else
+-				{
+-					genCases(p.fset);
+-					println("{");
+-					tabs++;
+-					genAlt(alt, blk);
+-					println("break;");
+-					tabs--;
+-					println("}");
+-				}
+-			}
+-			println("default:");
+-			tabs++;
+-		}
+-
+-		// do non-LL(1) and nondeterministic cases
+-		// This is tricky in the lexer, because of cases like:
+-		//     STAR : '*' ;
+-		//     ASSIGN_STAR : "*=";
+-		// Since nextToken is generated without a loop, then the STAR will
+-		// have end-of-token as it's lookahead set for LA(2).  So, we must generate the
+-		// alternatives containing trailing end-of-token in their lookahead sets *after*
+-		// the alternatives without end-of-token.  This implements the usual
+-		// lexer convention that longer matches come before shorter ones, e.g.
+-		// "*=" matches ASSIGN_STAR not STAR
+-		//
+-		// For non-lexer grammars, this does not sort the alternates by depth
+-		// Note that alts whose lookahead is purely end-of-token at k=1 end up
+-		// as default or else clauses.
+-		int startDepth = (grammar instanceof LexerGrammar) ? grammar.maxk : 0;
+-		for (int altDepth = startDepth; altDepth >= 0; altDepth--) {
+-			if ( DEBUG_CODE_GENERATOR || DEBUG_CPP_CODE_GENERATOR ) System.out.println("checking depth "+altDepth);
+-			for (int i=0; i<blk.alternatives.size(); i++) {
+-				Alternative alt = blk.getAlternativeAt(i);
+-				if ( DEBUG_CODE_GENERATOR || DEBUG_CPP_CODE_GENERATOR ) System.out.println("genAlt: "+i);
+-				// if we made a switch above, ignore what we already took care
+-				// of.  Specifically, LL(1) alts with no preds
+-				// that do not have end-of-token in their prediction set
+-				if ( createdLL1Switch &&
+-					 suitableForCaseExpression(alt) )
+-				{
+-					if ( DEBUG_CODE_GENERATOR || DEBUG_CPP_CODE_GENERATOR )
+-						System.out.println("ignoring alt because it was in the switch");
+-					continue;
+-				}
+-				String e;
+-
+-				boolean unpredicted = false;
+-
+-				if (grammar instanceof LexerGrammar) {
+-					// Calculate the "effective depth" of the alt, which is the max
+-					// depth at which cache[depth]!=end-of-token
+-					int effectiveDepth = alt.lookaheadDepth;
+-					if (effectiveDepth == GrammarAnalyzer.NONDETERMINISTIC)
+-					{
+-						// use maximum lookahead
+-						effectiveDepth = grammar.maxk;
+-					}
+-					while ( effectiveDepth >= 1 &&
+-							 alt.cache[effectiveDepth].containsEpsilon() )
+-					{
+-						effectiveDepth--;
+-					}
+-					// Ignore alts whose effective depth is other than the ones we
+-					// are generating for this iteration.
+-					if (effectiveDepth != altDepth)
+-					{
+-						if ( DEBUG_CODE_GENERATOR || DEBUG_CPP_CODE_GENERATOR )
+-							System.out.println("ignoring alt because effectiveDepth!=altDepth;"+effectiveDepth+"!="+altDepth);
+-						continue;
+-					}
+-					unpredicted = lookaheadIsEmpty(alt, effectiveDepth);
+-					e = getLookaheadTestExpression(alt, effectiveDepth);
+-				}
+-				else
+-				{
+-					unpredicted = lookaheadIsEmpty(alt, grammar.maxk);
+-					e = getLookaheadTestExpression(alt, grammar.maxk);
+-				}
+-
+-				// Was it a big unicode range that forced unsuitability
+-				// for a case expression?
+-				if ( alt.cache[1].fset.degree() > caseSizeThreshold &&
+-					  suitableForCaseExpression(alt))
+-				{
+-					if ( nIF==0 )
+-					{
+-						// generate this only for the first if the elseif's
+-						// are covered by this one
+-						if ( grammar instanceof TreeWalkerGrammar ) {
+-							println("if (_t == "+labeledElementASTInit+" )");
+-							tabs++;
+-							println("_t = ASTNULL;");
+-							tabs--;
+-						}
+-						println("if " + e + " {");
+-					}
+-					else
+-						println("else if " + e + " {");
+-				}
+-				else if (unpredicted &&
+-							alt.semPred==null &&
+-							alt.synPred==null)
+-				{
+-					// The alt has empty prediction set and no
+-					// predicate to help out.  if we have not
+-					// generated a previous if, just put {...} around
+-					// the end-of-token clause
+-					if ( nIF==0 ) {
+-						println("{");
+-					}
+-					else {
+-						println("else {");
+-					}
+-					finishingInfo.needAnErrorClause = false;
+-				}
+-				else
+-				{
+-					// check for sem and syn preds
+-					// Add any semantic predicate expression to the lookahead test
+-					if ( alt.semPred != null ) {
+-						// if debugging, wrap the evaluation of the predicate in a method
+-						//
+-						// translate $ and # references
+-						ActionTransInfo tInfo = new ActionTransInfo();
+-						String actionStr = processActionForSpecialSymbols(alt.semPred,
+-																		  blk.line,
+-																		  currentRule,
+-																		  tInfo);
+-						// ignore translation info...we don't need to do anything with it.
+-
+-						// call that will inform SemanticPredicateListeners of the
+-						// result
+-						if ( grammar.debuggingOutput &&
+-							  ((grammar instanceof ParserGrammar) || (grammar instanceof LexerGrammar))
+-							 )
+-							e = "("+e+"&& fireSemanticPredicateEvaluated(persistence.antlr.debug.SemanticPredicateEvent.PREDICTING,"+ //FIXME
+-									addSemPred(charFormatter.escapeString(actionStr))+","+actionStr+"))";
+-						else
+-							e = "("+e+"&&("+actionStr +"))";
+-					}
+-
+-					// Generate any syntactic predicates
+-					if ( nIF>0 ) {
+-						if ( alt.synPred != null ) {
+-							println("else {");
+-							tabs++;
+-							genSynPred( alt.synPred, e );
+-							closingBracesOfIFSequence++;
+-						}
+-						else {
+-							println("else if " + e + " {");
+-						}
+-					}
+-					else {
+-						if ( alt.synPred != null ) {
+-							genSynPred( alt.synPred, e );
+-						}
+-						else {
+-							// when parsing trees, convert null to valid tree node
+-							// with NULL lookahead.
+-							if ( grammar instanceof TreeWalkerGrammar ) {
+-								println("if (_t == "+labeledElementASTInit+" )");
+-								tabs++;
+-								println("_t = ASTNULL;");
+-								tabs--;
+-							}
+-							println("if " + e + " {");
+-						}
+-					}
+-
+-				}
+-
+-				nIF++;
+-				tabs++;
+-				genAlt(alt, blk);
+-				tabs--;
+-				println("}");
+-			}
+-		}
+-		String ps = "";
+-		for (int i=1; i<=closingBracesOfIFSequence; i++) {
+-			tabs--; // does JavaCodeGenerator need this?
+-			ps+="}";
+-		}
+-
+-		// Restore the AST generation state
+-		genAST = savegenAST;
+-
+-		// restore save text state
+-		saveText=oldsaveTest;
+-
+-		// Return the finishing info.
+-		if ( createdLL1Switch ) {
+-			tabs--;
+-			finishingInfo.postscript = ps+"}";
+-			finishingInfo.generatedSwitch = true;
+-			finishingInfo.generatedAnIf = nIF>0;
+-			//return new CppBlockFinishingInfo(ps+"}",true,nIF>0); // close up switch statement
+-
+-		}
+-		else {
+-			finishingInfo.postscript = ps;
+-			finishingInfo.generatedSwitch = false;
+-			finishingInfo.generatedAnIf = nIF>0;
+-			//return new CppBlockFinishingInfo(ps, false,nIF>0);
+-		}
+-		return finishingInfo;
+-	}
+-
+-	private static boolean suitableForCaseExpression(Alternative a) {
+-		return a.lookaheadDepth == 1 &&
+-			a.semPred == null &&
+-			!a.cache[1].containsEpsilon() &&
+-			a.cache[1].fset.degree()<=caseSizeThreshold;
+-	}
+-
+-	/** Generate code to link an element reference into the AST
+-	 */
+-	private void genElementAST(AlternativeElement el) {
+-
+-		// handle case where you're not building trees, but are in tree walker.
+-		// Just need to get labels set up.
+-		if ( grammar instanceof TreeWalkerGrammar && !grammar.buildAST )
+-		{
+-			String elementRef;
+-			String astName;
+-
+-			// Generate names and declarations of the AST variable(s)
+-			if (el.getLabel() == null)
+-			{
+-				elementRef = lt1Value;
+-				// Generate AST variables for unlabeled stuff
+-				astName = "tmp" + astVarNumber + "_AST";
+-				astVarNumber++;
+-				// Map the generated AST variable in the alternate
+-				mapTreeVariable(el, astName);
+-				// Generate an "input" AST variable also
+-				println(labeledElementASTType+" "+astName+"_in = "+elementRef+";");
+-			}
+-			return;
+-		}
+-
+-		if (grammar.buildAST && syntacticPredLevel == 0)
+-		{
+-			boolean needASTDecl =
+-				( genAST && (el.getLabel() != null ||
+-				  el.getAutoGenType() != GrammarElement.AUTO_GEN_BANG ));
+-
+-			// RK: if we have a grammar element always generate the decl
+-			// since some guy can access it from an action and we can't
+-			// peek ahead (well not without making a mess).
+-			// I'd prefer taking this out.
+-			if( el.getAutoGenType() != GrammarElement.AUTO_GEN_BANG &&
+-				 (el instanceof TokenRefElement) )
+-				needASTDecl = true;
+-
+-			boolean doNoGuessTest =
+-				( grammar.hasSyntacticPredicate && needASTDecl );
+-
+-			String elementRef;
+-			String astNameBase;
+-
+-			// Generate names and declarations of the AST variable(s)
+-			if (el.getLabel() != null)
+-			{
+-				// if the element is labeled use that name...
+-				elementRef = el.getLabel();
+-				astNameBase = el.getLabel();
+-			}
+-			else
+-			{
+-				// else generate a temporary name...
+-				elementRef = lt1Value;
+-				// Generate AST variables for unlabeled stuff
+-				astNameBase = "tmp" + astVarNumber;
+-				astVarNumber++;
+-			}
+-
+-			// Generate the declaration if required.
+-			if ( needASTDecl )
+-			{
+-				if ( el instanceof GrammarAtom )
+-				{
+-					GrammarAtom ga = (GrammarAtom)el;
+-					if ( ga.getASTNodeType()!=null )
+-					{
+-						genASTDeclaration( el, astNameBase, "Ref"+ga.getASTNodeType() );
+-//						println("Ref"+ga.getASTNodeType()+" " + astName + ";");
+-					}
+-					else
+-					{
+-						genASTDeclaration( el, astNameBase, labeledElementASTType );
+-//						println(labeledElementASTType+" " + astName + " = "+labeledElementASTInit+";");
+-					}
+-				}
+-				else
+-				{
+-					genASTDeclaration( el, astNameBase, labeledElementASTType );
+-//					println(labeledElementASTType+" " + astName + " = "+labeledElementASTInit+";");
+-				}
+-			}
+-
+-			// for convenience..
+-			String astName = astNameBase + "_AST";
+-
+-			// Map the generated AST variable in the alternate
+-			mapTreeVariable(el, astName);
+-			if (grammar instanceof TreeWalkerGrammar)
+-			{
+-				// Generate an "input" AST variable also
+-				println(labeledElementASTType+" " + astName + "_in = "+labeledElementASTInit+";");
+-			}
+-
+-			// Enclose actions with !guessing
+-			if (doNoGuessTest) {
+-				println("if ( inputState->guessing == 0 ) {");
+-				tabs++;
+-			}
+-
+-			// if something has a label assume it will be used
+-			// so we must initialize the RefAST
+-			if (el.getLabel() != null)
+-			{
+-				if ( el instanceof GrammarAtom )
+-				{
+-					println(astName + " = "+
+-							  getASTCreateString((GrammarAtom)el,elementRef) + ";");
+-				}
+-				else
+-				{
+-					println(astName + " = "+
+-							  getASTCreateString(elementRef) + ";");
+-				}
+-			}
+-
+-			// if it has no label but a declaration exists initialize it.
+-			if( el.getLabel() == null && needASTDecl )
+-			{
+-				elementRef = lt1Value;
+-				if ( el instanceof GrammarAtom )
+-				{
+-					println(astName + " = "+
+-							  getASTCreateString((GrammarAtom)el,elementRef) + ";");
+-				}
+-				else
+-				{
+-					println(astName + " = "+
+-							  getASTCreateString(elementRef) + ";");
+-				}
+-				// Map the generated AST variable in the alternate
+-				if (grammar instanceof TreeWalkerGrammar)
+-				{
+-					// set "input" AST variable also
+-					println(astName + "_in = " + elementRef + ";");
+-				}
+-			}
+-
+-			if (genAST)
+-			{
+-				switch (el.getAutoGenType())
+-				{
+-				case GrammarElement.AUTO_GEN_NONE:
+-					if( usingCustomAST ||
+-						 (el instanceof GrammarAtom &&
+-						  ((GrammarAtom)el).getASTNodeType() != null) )
+-						println("astFactory->addASTChild(currentAST, "+namespaceAntlr+"RefAST("+ astName + "));");
+-					else
+-						println("astFactory->addASTChild(currentAST, "+ astName + ");");
+-					//						println("astFactory.addASTChild(currentAST, "+namespaceAntlr+"RefAST(" + astName + "));");
+-					break;
+-				case GrammarElement.AUTO_GEN_CARET:
+-					if( usingCustomAST ||
+-						 (el instanceof GrammarAtom &&
+-						 ((GrammarAtom)el).getASTNodeType() != null) )
+-						println("astFactory->makeASTRoot(currentAST, "+namespaceAntlr+"RefAST(" + astName + "));");
+-					else
+-						println("astFactory->makeASTRoot(currentAST, " + astName + ");");
+-					break;
+-				default:
+-					break;
+-				}
+-			}
+-			if (doNoGuessTest)
+-			{
+-				tabs--;
+-				println("}");
+-			}
+-		}
+-	}
+-	/** Close the try block and generate catch phrases
+-	 * if the element has a labeled handler in the rule
+-	 */
+-	private void genErrorCatchForElement(AlternativeElement el) {
+-		if (el.getLabel() == null) return;
+-		String r = el.enclosingRuleName;
+-		if ( grammar instanceof LexerGrammar ) {
+-			r = CodeGenerator.encodeLexerRuleName(el.enclosingRuleName);
+-		}
+-		RuleSymbol rs = (RuleSymbol)grammar.getSymbol(r);
+-		if (rs == null) {
+-			antlrTool.panic("Enclosing rule not found!");
+-		}
+-		ExceptionSpec ex = rs.block.findExceptionSpec(el.getLabel());
+-		if (ex != null) {
+-			tabs--;
+-			println("}");
+-			genErrorHandler(ex);
+-		}
+-	}
+-	/** Generate the catch phrases for a user-specified error handler */
+-	private void genErrorHandler(ExceptionSpec ex)
+-	{
+-		// Each ExceptionHandler in the ExceptionSpec is a separate catch
+-		for (int i = 0; i < ex.handlers.size(); i++)
+-		{
+-			ExceptionHandler handler = (ExceptionHandler)ex.handlers.elementAt(i);
+-			// Generate catch phrase
+-			println("catch (" + handler.exceptionTypeAndName.getText() + ") {");
+-			tabs++;
+-			if (grammar.hasSyntacticPredicate) {
+-				println("if (inputState->guessing==0) {");
+-				tabs++;
+-			}
+-
+-			// When not guessing, execute user handler action
+-			ActionTransInfo tInfo = new ActionTransInfo();
+-			genLineNo(handler.action);
+-			printAction(
+-				processActionForSpecialSymbols( handler.action.getText(),
+-														 handler.action.getLine(),
+-														 currentRule, tInfo )
+-			);
+-			genLineNo2();
+-
+-			if (grammar.hasSyntacticPredicate)
+-			{
+-				tabs--;
+-				println("} else {");
+-				tabs++;
+-				// When guessing, rethrow exception
+-				println("throw;");
+-				tabs--;
+-				println("}");
+-			}
+-			// Close catch phrase
+-			tabs--;
+-			println("}");
+-		}
+-	}
+-	/** Generate a try { opening if the element has a labeled handler in the rule */
+-	private void genErrorTryForElement(AlternativeElement el) {
+-		if (el.getLabel() == null) return;
+-		String r = el.enclosingRuleName;
+-		if ( grammar instanceof LexerGrammar ) {
+-			r = CodeGenerator.encodeLexerRuleName(el.enclosingRuleName);
+-		}
+-		RuleSymbol rs = (RuleSymbol)grammar.getSymbol(r);
+-		if (rs == null) {
+-			antlrTool.panic("Enclosing rule not found!");
+-		}
+-		ExceptionSpec ex = rs.block.findExceptionSpec(el.getLabel());
+-		if (ex != null) {
+-			println("try { // for error handling");
+-			tabs++;
+-		}
+-	}
+-	/** Generate a header that is common to all C++ files */
+-	protected void genHeader(String fileName)
+-	{
+-		println("/* $ANTLR "+antlrTool.version+": "+
+-				"\""+antlrTool.fileMinusPath(antlrTool.grammarFile)+"\""+
+-				" -> "+
+-				"\""+fileName+"\"$ */");
+-	}
+-
+-	// these are unique to C++ mode
+-	public void genInclude(LexerGrammar g) throws IOException
+-	{
+-		outputFile = grammar.getClassName() + ".hpp";
+-		outputLine = 1;
+-		currentOutput = antlrTool.openOutputFile(outputFile);
+-		//SAS: changed for proper text file io
+-
+-		genAST = false;	// no way to gen trees.
+-		saveText = true;	// save consumed characters.
+-
+-		tabs=0;
+-
+-		// Generate a guard wrapper
+-		println("#ifndef INC_"+grammar.getClassName()+"_hpp_");
+-		println("#define INC_"+grammar.getClassName()+"_hpp_");
+-		println("");
+-
+-		printHeaderAction(preIncludeHpp);
+-
+-		println("#include <antlr/config.hpp>");
+-
+-		// Generate header common to all C++ output files
+-		genHeader(outputFile);
+-
+-		// Generate header specific to lexer header file
+-		println("#include <antlr/CommonToken.hpp>");
+-		println("#include <antlr/InputBuffer.hpp>");
+-		println("#include <antlr/BitSet.hpp>");
+-		println("#include \"" + grammar.tokenManager.getName() + TokenTypesFileSuffix+".hpp\"");
+-
+-		// Find the name of the super class
+-		String sup=null;
+-		if ( grammar.superClass!=null ) {
+-			sup = grammar.superClass;
+-
+-			println("\n// Include correct superclass header with a header statement for example:");
+-			println("// header \"post_include_hpp\" {");
+-			println("// #include \""+sup+".hpp\"");
+-			println("// }");
+-			println("// Or....");
+-			println("// header {");
+-			println("// #include \""+sup+".hpp\"");
+-			println("// }\n");
+-		}
+-		else {
+-			sup = grammar.getSuperClass();
+-			if (sup.lastIndexOf('.') != -1)
+-				sup = sup.substring(sup.lastIndexOf('.')+1);
+-			println("#include <antlr/"+sup+".hpp>");
+-			sup = namespaceAntlr + sup;
+-		}
+-
+-		// Do not use printAction because we assume tabs==0
+-		printHeaderAction(postIncludeHpp);
+-
+-		if (nameSpace != null)
+-			   nameSpace.emitDeclarations(currentOutput);
+-
+-		printHeaderAction("");
+-
+-		// print javadoc comment if any
+-		if ( grammar.comment!=null ) {
+-			_println(grammar.comment);
+-		}
+-
+-		// Generate lexer class definition
+-		print("class CUSTOM_API " + grammar.getClassName() + " : public " + sup);
+-		println(", public " + grammar.tokenManager.getName() + TokenTypesFileSuffix);
+-
+-		Token tsuffix = (Token)grammar.options.get("classHeaderSuffix");
+-		if ( tsuffix != null ) {
+-			String suffix = StringUtils.stripFrontBack(tsuffix.getText(),"\"","\"");
+-			if ( suffix != null ) {
+-				print(", "+suffix);  // must be an interface name for Java
+-			}
+-		}
+-		println("{");
+-
+-		// Generate user-defined lexer class members
+-		if (grammar.classMemberAction != null) {
+-			genLineNo(grammar.classMemberAction);
+-			print(
+-				processActionForSpecialSymbols(grammar.classMemberAction.getText(),
+-														 grammar.classMemberAction.getLine(),
+-														 currentRule, null)
+-			);
+-			genLineNo2();
+-		}
+-
+-		// Generate initLiterals() method
+-		tabs=0;
+-		println("private:");
+-		tabs=1;
+-		println("void initLiterals();");
+-
+-		// Generate getCaseSensitiveLiterals() method
+-		tabs=0;
+-		println("public:");
+-		tabs=1;
+-		println("bool getCaseSensitiveLiterals() const");
+-		println("{");
+-		tabs++;
+-		println("return "+g.caseSensitiveLiterals + ";");
+-		tabs--;
+-		println("}");
+-
+-		// Make constructors public
+-		tabs=0;
+-		println("public:");
+-		tabs=1;
+-
+-		if( noConstructors )
+-		{
+-			tabs = 0;
+-			println("#if 0");
+-			println("// constructor creation turned of with 'noConstructor' option");
+-			tabs = 1;
+-		}
+-
+-		// Generate the constructor from std::istream
+-		println(grammar.getClassName() + "(" + namespaceStd + "istream& in);");
+-
+-		// Generate the constructor from InputBuffer
+-		println(grammar.getClassName() + "("+namespaceAntlr+"InputBuffer& ib);");
+-
+-		println(grammar.getClassName() + "(const "+namespaceAntlr+"LexerSharedInputState& state);");
+-		if( noConstructors )
+-		{
+-			tabs = 0;
+-			println("// constructor creation turned of with 'noConstructor' option");
+-			println("#endif");
+-			tabs = 1;
+-		}
+-
+-		// Generate nextToken() rule.
+-		// nextToken() is a synthetic lexer rule that is the implicit OR of all
+-		// user-defined lexer rules.
+-		println(namespaceAntlr+"RefToken nextToken();");
+-
+-		// Generate code for each rule in the lexer
+-		Enumeration ids = grammar.rules.elements();
+-		while ( ids.hasMoreElements() ) {
+-			RuleSymbol sym = (RuleSymbol) ids.nextElement();
+-			// Don't generate the synthetic rules
+-			if (!sym.getId().equals("mnextToken")) {
+-				genRuleHeader(sym, false);
+-			}
+-			exitIfError();
+-		}
+-
+-		// Make the rest private
+-		tabs=0;
+-		println("private:");
+-		tabs=1;
+-
+-		// generate the rule name array for debugging
+-		if ( grammar.debuggingOutput ) {
+-			println("static const char* _ruleNames[];");
+-		}
+-
+-		// Generate the semantic predicate map for debugging
+-		if (grammar.debuggingOutput)
+-			println("static const char* _semPredNames[];");
+-
+-		// Generate the bitsets used throughout the lexer
+-		genBitsetsHeader(bitsetsUsed, ((LexerGrammar)grammar).charVocabulary.size());
+-
+-		tabs=0;
+-		println("};");
+-		println("");
+-		if (nameSpace != null)
+-			nameSpace.emitClosures(currentOutput);
+-
+-		// Generate a guard wrapper
+-		println("#endif /*INC_"+grammar.getClassName()+"_hpp_*/");
+-
+-		// Close the lexer output stream
+-		currentOutput.close();
+-		currentOutput = null;
+-	}
+-	public void genInclude(ParserGrammar g) throws IOException
+-	{
+-		// Open the output stream for the parser and set the currentOutput
+-		outputFile = grammar.getClassName() + ".hpp";
+-		outputLine = 1;
+-		currentOutput = antlrTool.openOutputFile(outputFile);
+-		//SAS: changed for proper text file io
+-
+-		genAST = grammar.buildAST;
+-
+-		tabs = 0;
+-
+-		// Generate a guard wrapper
+-		println("#ifndef INC_"+grammar.getClassName()+"_hpp_");
+-		println("#define INC_"+grammar.getClassName()+"_hpp_");
+-		println("");
+-		printHeaderAction(preIncludeHpp);
+-		println("#include <antlr/config.hpp>");
+-
+-		// Generate the header common to all output files.
+-		genHeader(outputFile);
+-
+-		// Generate header for the parser
+-		println("#include <antlr/TokenStream.hpp>");
+-		println("#include <antlr/TokenBuffer.hpp>");
+-		println("#include \"" + grammar.tokenManager.getName() + TokenTypesFileSuffix+".hpp\"");
+-
+-		// Generate parser class definition
+-		String sup=null;
+-		if ( grammar.superClass!=null ) {
+-			sup = grammar.superClass;
+-			println("\n// Include correct superclass header with a header statement for example:");
+-			println("// header \"post_include_hpp\" {");
+-			println("// #include \""+sup+".hpp\"");
+-			println("// }");
+-			println("// Or....");
+-			println("// header {");
+-			println("// #include \""+sup+".hpp\"");
+-			println("// }\n");
+-		}
+-		else {
+-			sup = grammar.getSuperClass();
+-			if (sup.lastIndexOf('.') != -1)
+-				sup = sup.substring(sup.lastIndexOf('.')+1);
+-			println("#include <antlr/"+sup+".hpp>");
+-			sup = namespaceAntlr + sup;
+-		}
+-		println("");
+-
+-		// Do not use printAction because we assume tabs==0
+-		printHeaderAction(postIncludeHpp);
+-
+-		if (nameSpace != null)
+-			nameSpace.emitDeclarations(currentOutput);
+-
+-		printHeaderAction("");
+-
+-		// print javadoc comment if any
+-		if ( grammar.comment!=null ) {
+-			_println(grammar.comment);
+-		}
+-
+-		// generate the actual class definition
+-		print("class CUSTOM_API " + grammar.getClassName() + " : public " + sup);
+-		println(", public " + grammar.tokenManager.getName() + TokenTypesFileSuffix);
+-
+-		Token tsuffix = (Token)grammar.options.get("classHeaderSuffix");
+-		if ( tsuffix != null ) {
+-			String suffix = StringUtils.stripFrontBack(tsuffix.getText(),"\"","\"");
+-			if ( suffix != null )
+-				print(", "+suffix);  // must be an interface name for Java
+-		}
+-		println("{");
+-
+-		// set up an array of all the rule names so the debugger can
+-		// keep track of them only by number -- less to store in tree...
+-		if (grammar.debuggingOutput) {
+-			println("public: static const char* _ruleNames[];");
+-		}
+-		// Generate user-defined parser class members
+-		if (grammar.classMemberAction != null) {
+-			genLineNo(grammar.classMemberAction.getLine());
+-			print(
+-				processActionForSpecialSymbols(grammar.classMemberAction.getText(),
+-														 grammar.classMemberAction.getLine(),
+-														 currentRule, null)
+-			);
+-			genLineNo2();
+-		}
+-		println("public:");
+-		tabs = 1;
+-		println("void initializeASTFactory( "+namespaceAntlr+"ASTFactory& factory );");
+-//		println("// called from constructors");
+-//		println("void _initialize( void );");
+-
+-		// Generate parser class constructor from TokenBuffer
+-		tabs=0;
+-		if( noConstructors )
+-		{
+-			println("#if 0");
+-			println("// constructor creation turned of with 'noConstructor' option");
+-		}
+-		println("protected:");
+-		tabs=1;
+-		println(grammar.getClassName() + "("+namespaceAntlr+"TokenBuffer& tokenBuf, int k);");
+-		tabs=0;
+-		println("public:");
+-		tabs=1;
+-		println(grammar.getClassName() + "("+namespaceAntlr+"TokenBuffer& tokenBuf);");
+-
+-		// Generate parser class constructor from TokenStream
+-		tabs=0;
+-		println("protected:");
+-		tabs=1;
+-		println(grammar.getClassName()+"("+namespaceAntlr+"TokenStream& lexer, int k);");
+-		tabs=0;
+-		println("public:");
+-		tabs=1;
+-		println(grammar.getClassName()+"("+namespaceAntlr+"TokenStream& lexer);");
+-
+-		println(grammar.getClassName()+"(const "+namespaceAntlr+"ParserSharedInputState& state);");
+-		if( noConstructors )
+-		{
+-			tabs = 0;
+-			println("// constructor creation turned of with 'noConstructor' option");
+-			println("#endif");
+-			tabs = 1;
+-		}
+-
+-		println("int getNumTokens() const");
+-		println("{"); tabs++;
+-		println("return "+grammar.getClassName()+"::NUM_TOKENS;");
+-		tabs--; println("}");
+-		println("const char* getTokenName( int type ) const");
+-		println("{"); tabs++;
+-		println("if( type > getNumTokens() ) return 0;");
+-		println("return "+grammar.getClassName()+"::tokenNames[type];");
+-		tabs--; println("}");
+-		println("const char* const* getTokenNames() const");
+-		println("{"); tabs++;
+-		println("return "+grammar.getClassName()+"::tokenNames;");
+-		tabs--; println("}");
+-
+-		// Generate code for each rule in the grammar
+-		Enumeration ids = grammar.rules.elements();
+-		while ( ids.hasMoreElements() ) {
+-			GrammarSymbol sym = (GrammarSymbol) ids.nextElement();
+-			if ( sym instanceof RuleSymbol) {
+-				RuleSymbol rs = (RuleSymbol)sym;
+-				genRuleHeader(rs, rs.references.size()==0);
+-			}
+-			exitIfError();
+-		}
+-
+-		// RK: when we are using a custom ast override Parser::getAST to return
+-		// the custom AST type. Ok, this does not work anymore with newer
+-		// compilers gcc 3.2.x and up. The reference counter is probably
+-		// getting in the way.
+-		// So now we just patch the return type back to RefAST
+-		tabs = 0; println("public:"); tabs = 1;
+-		println(namespaceAntlr+"RefAST getAST()");
+-		println("{");
+-		if( usingCustomAST )
+-		{
+-			tabs++;
+-			println("return "+namespaceAntlr+"RefAST(returnAST);");
+-			tabs--;
+-		}
+-		else
+-		{
+-			tabs++;
+-			println("return returnAST;");
+-			tabs--;
+-		}
+-		println("}");
+-		println("");
+-
+-		tabs=0; println("protected:"); tabs=1;
+-		println(labeledElementASTType+" returnAST;");
+-
+-		// Make the rest private
+-		tabs=0;
+-		println("private:");
+-		tabs=1;
+-
+-		// Generate the token names
+-		println("static const char* tokenNames[];");
+-		// and how many there are of them
+-		_println("#ifndef NO_STATIC_CONSTS");
+-		println("static const int NUM_TOKENS = "+grammar.tokenManager.getVocabulary().size()+";");
+-		_println("#else");
+-		println("enum {");
+-		println("\tNUM_TOKENS = "+grammar.tokenManager.getVocabulary().size());
+-		println("};");
+-		_println("#endif");
+-
+-		// Generate the bitsets used throughout the grammar
+-		genBitsetsHeader(bitsetsUsed, grammar.tokenManager.maxTokenType());
+-
+-		// Generate the semantic predicate map for debugging
+-		if (grammar.debuggingOutput)
+-			println("static const char* _semPredNames[];");
+-
+-		// Close class definition
+-		tabs=0;
+-		println("};");
+-		println("");
+-		if (nameSpace != null)
+-			nameSpace.emitClosures(currentOutput);
+-
+-		// Generate a guard wrapper
+-		println("#endif /*INC_"+grammar.getClassName()+"_hpp_*/");
+-
+-		// Close the parser output stream
+-		currentOutput.close();
+-		currentOutput = null;
+-	}
+-	public void genInclude(TreeWalkerGrammar g) throws IOException
+-	{
+-		// Open the output stream for the parser and set the currentOutput
+-		outputFile = grammar.getClassName() + ".hpp";
+-		outputLine = 1;
+-		currentOutput = antlrTool.openOutputFile(outputFile);
+-		//SAS: changed for proper text file io
+-
+-		genAST = grammar.buildAST;
+-		tabs = 0;
+-
+-		// Generate a guard wrapper
+-		println("#ifndef INC_"+grammar.getClassName()+"_hpp_");
+-		println("#define INC_"+grammar.getClassName()+"_hpp_");
+-		println("");
+-		printHeaderAction(preIncludeHpp);
+-		println("#include <antlr/config.hpp>");
+-		println("#include \"" + grammar.tokenManager.getName() + TokenTypesFileSuffix+".hpp\"");
+-
+-		// Generate the header common to all output files.
+-		genHeader(outputFile);
+-
+-		// Find the name of the super class
+-		String sup=null;
+-		if ( grammar.superClass!=null ) {
+-			sup = grammar.superClass;
+-			println("\n// Include correct superclass header with a header statement for example:");
+-			println("// header \"post_include_hpp\" {");
+-			println("// #include \""+sup+".hpp\"");
+-			println("// }");
+-			println("// Or....");
+-			println("// header {");
+-			println("// #include \""+sup+".hpp\"");
+-			println("// }\n");
+-		}
+-		else {
+-			sup = grammar.getSuperClass();
+-			if (sup.lastIndexOf('.') != -1)
+-				sup = sup.substring(sup.lastIndexOf('.')+1);
+-			println("#include <antlr/"+sup+".hpp>");
+-			sup = namespaceAntlr + sup;
+-		}
+-		println("");
+-
+-		// Generate header for the parser
+-		//
+-		// Do not use printAction because we assume tabs==0
+-		printHeaderAction(postIncludeHpp);
+-
+-		if (nameSpace != null)
+-			nameSpace.emitDeclarations(currentOutput);
+-
+-		printHeaderAction("");
+-
+-		// print javadoc comment if any
+-		if ( grammar.comment!=null ) {
+-			_println(grammar.comment);
+-		}
+-
+-		// Generate parser class definition
+-		print("class CUSTOM_API " + grammar.getClassName() + " : public "+sup);
+-		println(", public " + grammar.tokenManager.getName() + TokenTypesFileSuffix);
+-
+-		Token tsuffix = (Token)grammar.options.get("classHeaderSuffix");
+-		if ( tsuffix != null ) {
+-			String suffix = StringUtils.stripFrontBack(tsuffix.getText(),"\"","\"");
+-			if ( suffix != null ) {
+-				print(", "+suffix);  // must be an interface name for Java
+-			}
+-		}
+-		println("{");
+-
+-		// Generate user-defined parser class members
+-		if (grammar.classMemberAction != null) {
+-			genLineNo(grammar.classMemberAction.getLine());
+-			print(
+-					processActionForSpecialSymbols(grammar.classMemberAction.getText(),
+-															 grammar.classMemberAction.getLine(),
+-															 currentRule, null)
+-					);
+-			genLineNo2();
+-		}
+-
+-		// Generate default parser class constructor
+-		tabs=0;
+-		println("public:");
+-
+-		if( noConstructors )
+-		{
+-			println("#if 0");
+-			println("// constructor creation turned of with 'noConstructor' option");
+-		}
+-		tabs=1;
+-		println(grammar.getClassName() + "();");
+-		if( noConstructors )
+-		{
+-			tabs = 0;
+-			println("#endif");
+-			tabs = 1;
+-		}
+-
+-		// Generate declaration for the initializeFactory method
+-		println("static void initializeASTFactory( "+namespaceAntlr+"ASTFactory& factory );");
+-
+-		println("int getNumTokens() const");
+-		println("{"); tabs++;
+-		println("return "+grammar.getClassName()+"::NUM_TOKENS;");
+-		tabs--; println("}");
+-		println("const char* getTokenName( int type ) const");
+-		println("{"); tabs++;
+-		println("if( type > getNumTokens() ) return 0;");
+-		println("return "+grammar.getClassName()+"::tokenNames[type];");
+-		tabs--; println("}");
+-		println("const char* const* getTokenNames() const");
+-		println("{"); tabs++;
+-		println("return "+grammar.getClassName()+"::tokenNames;");
+-		tabs--; println("}");
+-
+-		// Generate code for each rule in the grammar
+-		Enumeration ids = grammar.rules.elements();
+-		String ruleNameInits = "";
+-		while ( ids.hasMoreElements() ) {
+-			GrammarSymbol sym = (GrammarSymbol) ids.nextElement();
+-			if ( sym instanceof RuleSymbol) {
+-				RuleSymbol rs = (RuleSymbol)sym;
+-				genRuleHeader(rs, rs.references.size()==0);
+-			}
+-			exitIfError();
+-		}
+-		tabs = 0; println("public:"); tabs = 1;
+-		println(namespaceAntlr+"RefAST getAST()");
+-		println("{");
+-		if( usingCustomAST )
+-		{
+-			tabs++;
+-			println("return "+namespaceAntlr+"RefAST(returnAST);");
+-			tabs--;
+-		}
+-		else
+-		{
+-			tabs++;
+-			println("return returnAST;");
+-			tabs--;
+-		}
+-		println("}");
+-		println("");
+-
+-		tabs=0; println("protected:"); tabs=1;
+-		println(labeledElementASTType+" returnAST;");
+-		println(labeledElementASTType+" _retTree;");
+-
+-		// Make the rest private
+-		tabs=0;
+-		println("private:");
+-		tabs=1;
+-
+-		// Generate the token names
+-		println("static const char* tokenNames[];");
+-		// and how many there are of them
+-		_println("#ifndef NO_STATIC_CONSTS");
+-		println("static const int NUM_TOKENS = "+grammar.tokenManager.getVocabulary().size()+";");
+-		_println("#else");
+-		println("enum {");
+-		println("\tNUM_TOKENS = "+grammar.tokenManager.getVocabulary().size());
+-		println("};");
+-		_println("#endif");
+-
+-		// Generate the bitsets used throughout the grammar
+-		genBitsetsHeader(bitsetsUsed, grammar.tokenManager.maxTokenType());
+-
+-		// Close class definition
+-		tabs=0;
+-		println("};");
+-		println("");
+-		if (nameSpace != null)
+-			nameSpace.emitClosures(currentOutput);
+-
+-		// Generate a guard wrapper
+-		println("#endif /*INC_"+grammar.getClassName()+"_hpp_*/");
+-
+-		// Close the parser output stream
+-		currentOutput.close();
+-		currentOutput = null;
+-	}
+-	/// for convenience
+-	protected void genASTDeclaration( AlternativeElement el ) {
+-		genASTDeclaration( el, labeledElementASTType );
+-	}
+-	/// for convenience
+-	protected void genASTDeclaration( AlternativeElement el, String node_type ) {
+-		genASTDeclaration( el, el.getLabel(), node_type );
+-	}
+-	/// Generate (if not already done) a declaration for the AST for el.
+-	protected void genASTDeclaration( AlternativeElement el, String var_name, String node_type ) {
+-		// already declared?
+-		if( declaredASTVariables.contains(el) )
+-			return;
+-
+-		String init = labeledElementASTInit;
+-
+-		if (el instanceof GrammarAtom &&
+-			 ((GrammarAtom)el).getASTNodeType() != null )
+-			init = "Ref"+((GrammarAtom)el).getASTNodeType()+"("+labeledElementASTInit+")";
+-
+-		// emit code
+-		println(node_type+" " + var_name + "_AST = "+init+";");
+-
+-		// mark as declared
+-		declaredASTVariables.put(el, el);
+-	}
+-	private void genLiteralsTest() {
+-		println("_ttype = testLiteralsTable(_ttype);");
+-	}
+-	private void genLiteralsTestForPartialToken() {
+-		println("_ttype = testLiteralsTable(text.substr(_begin, text.length()-_begin),_ttype);");
+-	}
+-	protected void genMatch(BitSet b) {
+-	}
+-	protected void genMatch(GrammarAtom atom) {
+-		if ( atom instanceof StringLiteralElement ) {
+-			if ( grammar instanceof LexerGrammar ) {
+-				genMatchUsingAtomText(atom);
+-			}
+-			else {
+-				genMatchUsingAtomTokenType(atom);
+-			}
+-		}
+-		else if ( atom instanceof CharLiteralElement ) {
+-			if ( grammar instanceof LexerGrammar ) {
+-				genMatchUsingAtomText(atom);
+-			}
+-			else {
+-				antlrTool.error("cannot ref character literals in grammar: "+atom);
+-			}
+-		}
+-		else if ( atom instanceof TokenRefElement ) {
+-			genMatchUsingAtomTokenType(atom);
+-		} else if (atom instanceof WildcardElement) {
+-			gen((WildcardElement)atom);
+-		}
+-	}
+-	protected void genMatchUsingAtomText(GrammarAtom atom) {
+-		// match() for trees needs the _t cursor
+-		String astArgs="";
+-		if (grammar instanceof TreeWalkerGrammar) {
+-			if( usingCustomAST )
+-				astArgs=namespaceAntlr+"RefAST"+"(_t),";
+-			else
+-				astArgs="_t,";
+-		}
+-
+-		// if in lexer and ! on element, save buffer index to kill later
+-		if ( grammar instanceof LexerGrammar && (!saveText||atom.getAutoGenType()==GrammarElement.AUTO_GEN_BANG) ) {
+-			println("_saveIndex = text.length();");
+-		}
+-
+-		print(atom.not ? "matchNot(" : "match(");
+-		_print(astArgs);
+-
+-		// print out what to match
+-		if (atom.atomText.equals("EOF")) {
+-			// horrible hack to handle EOF case
+-			_print(namespaceAntlr+"Token::EOF_TYPE");
+-		}
+-		else
+-		{
+-			if( grammar instanceof LexerGrammar )	// lexer needs special handling
+-			{
+-				String cppstring = convertJavaToCppString( atom.atomText );
+-				_print(cppstring);
+-			}
+-			else
+-				_print(atom.atomText);
+-		}
+-
+-		_println(");");
+-
+-		if ( grammar instanceof LexerGrammar && (!saveText||atom.getAutoGenType()==GrammarElement.AUTO_GEN_BANG) ) {
+-			println("text.erase(_saveIndex);");      // kill text atom put in buffer
+-		}
+-	}
+-	protected void genMatchUsingAtomTokenType(GrammarAtom atom) {
+-		// match() for trees needs the _t cursor
+-		String astArgs="";
+-		if (grammar instanceof TreeWalkerGrammar) {
+-			if( usingCustomAST )
+-				astArgs=namespaceAntlr+"RefAST"+"(_t),";
+-			else
+-				astArgs="_t,";
+-		}
+-
+-		// If the literal can be mangled, generate the symbolic constant instead
+-		String s = astArgs + getValueString(atom.getType());
+-
+-		// matching
+-		println( (atom.not ? "matchNot(" : "match(") + s + ");");
+-	}
+-	/** Generate the nextToken() rule.
+-	 * nextToken() is a synthetic lexer rule that is the implicit OR of all
+-	 * user-defined lexer rules.
+-	 * @param RuleBlock
+-	 */
+-	public void genNextToken() {
+-		// Are there any public rules?  If not, then just generate a
+-		// fake nextToken().
+-		boolean hasPublicRules = false;
+-		for (int i = 0; i < grammar.rules.size(); i++) {
+-			RuleSymbol rs = (RuleSymbol)grammar.rules.elementAt(i);
+-			if ( rs.isDefined() && rs.access.equals("public") ) {
+-				hasPublicRules = true;
+-				break;
+-			}
+-		}
+-		if (!hasPublicRules) {
+-			println("");
+-			println(namespaceAntlr+"RefToken "+grammar.getClassName()+"::nextToken() { return "+namespaceAntlr+"RefToken(new "+namespaceAntlr+"CommonToken("+namespaceAntlr+"Token::EOF_TYPE, \"\")); }");
+-			println("");
+-			return;
+-		}
+-
+-		// Create the synthesized nextToken() rule
+-		RuleBlock nextTokenBlk = MakeGrammar.createNextTokenRule(grammar, grammar.rules, "nextToken");
+-		// Define the nextToken rule symbol
+-		RuleSymbol nextTokenRs = new RuleSymbol("mnextToken");
+-		nextTokenRs.setDefined();
+-		nextTokenRs.setBlock(nextTokenBlk);
+-		nextTokenRs.access = "private";
+-		grammar.define(nextTokenRs);
+-		// Analyze the nextToken rule
+-		boolean ok = grammar.theLLkAnalyzer.deterministic(nextTokenBlk);
+-
+-		// Generate the next token rule
+-		String filterRule=null;
+-		if ( ((LexerGrammar)grammar).filterMode ) {
+-			filterRule = ((LexerGrammar)grammar).filterRule;
+-		}
+-
+-		println("");
+-		println(namespaceAntlr+"RefToken "+grammar.getClassName()+"::nextToken()");
+-		println("{");
+-		tabs++;
+-		println(namespaceAntlr+"RefToken theRetToken;");
+-		println("for (;;) {");
+-		tabs++;
+-		println(namespaceAntlr+"RefToken theRetToken;");
+-		println("int _ttype = "+namespaceAntlr+"Token::INVALID_TYPE;");
+-		if ( ((LexerGrammar)grammar).filterMode ) {
+-			println("setCommitToPath(false);");
+-			if ( filterRule!=null ) {
+-				// Here's a good place to ensure that the filter rule actually exists
+-				if ( !grammar.isDefined(CodeGenerator.encodeLexerRuleName(filterRule)) ) {
+-					grammar.antlrTool.error("Filter rule "+filterRule+" does not exist in this lexer");
+-				}
+-				else {
+-					RuleSymbol rs = (RuleSymbol)grammar.getSymbol(CodeGenerator.encodeLexerRuleName(filterRule));
+-					if ( !rs.isDefined() ) {
+-						grammar.antlrTool.error("Filter rule "+filterRule+" does not exist in this lexer");
+-					}
+-					else if ( rs.access.equals("public") ) {
+-						grammar.antlrTool.error("Filter rule "+filterRule+" must be protected");
+-					}
+-				}
+-				println("int _m;");
+-				println("_m = mark();");
+-			}
+-		}
+-		println("resetText();");
+-
+-		// Generate try around whole thing to trap scanner errors
+-		println("try {   // for lexical and char stream error handling");
+-		tabs++;
+-
+-		// Test for public lexical rules with empty paths
+-		for (int i=0; i<nextTokenBlk.getAlternatives().size(); i++) {
+-			Alternative a = nextTokenBlk.getAlternativeAt(i);
+-			if ( a.cache[1].containsEpsilon() ) {
+-				antlrTool.warning("found optional path in nextToken()");
+-			}
+-		}
+-
+-		// Generate the block
+-		String newline = System.getProperty("line.separator");
+-		CppBlockFinishingInfo howToFinish = genCommonBlock(nextTokenBlk, false);
+-		String errFinish = "if (LA(1)==EOF_CHAR)"+newline+
+-			"\t\t\t\t{"+newline+"\t\t\t\t\tuponEOF();"+newline+
+-			"\t\t\t\t\t_returnToken = makeToken("+namespaceAntlr+"Token::EOF_TYPE);"+
+-			newline+"\t\t\t\t}";
+-		errFinish += newline+"\t\t\t\t";
+-		if ( ((LexerGrammar)grammar).filterMode ) {
+-			if ( filterRule==null ) {
+-				errFinish += "else {consume(); goto tryAgain;}";
+-			}
+-			else {
+-				errFinish += "else {"+newline+
+-						"\t\t\t\t\tcommit();"+newline+
+-						"\t\t\t\t\ttry {m"+filterRule+"(false);}"+newline+
+-						"\t\t\t\t\tcatch("+namespaceAntlr+"RecognitionException& e) {"+newline+
+-						"\t\t\t\t\t	// catastrophic failure"+newline+
+-						"\t\t\t\t\t	reportError(e);"+newline+
+-						"\t\t\t\t\t	consume();"+newline+
+-						"\t\t\t\t\t}"+newline+
+- 						"\t\t\t\t\tgoto tryAgain;"+newline+
+- 						"\t\t\t\t}";
+-			}
+-		}
+-		else {
+-			errFinish += "else {"+throwNoViable+"}";
+-		}
+-		genBlockFinish(howToFinish, errFinish);
+-
+-		// at this point a valid token has been matched, undo "mark" that was done
+-		if ( ((LexerGrammar)grammar).filterMode && filterRule!=null ) {
+-			println("commit();");
+-		}
+-
+-		// Generate literals test if desired
+-		// make sure _ttype is set first; note _returnToken must be
+-		// non-null as the rule was required to create it.
+-		println("if ( !_returnToken )"+newline+
+-				  "\t\t\t\tgoto tryAgain; // found SKIP token"+newline);
+-		println("_ttype = _returnToken->getType();");
+-		if ( ((LexerGrammar)grammar).getTestLiterals()) {
+-			genLiteralsTest();
+-		}
+-
+-		// return token created by rule reference in switch
+-		println("_returnToken->setType(_ttype);");
+-		println("return _returnToken;");
+-
+-		// Close try block
+-		tabs--;
+-		println("}");
+-		println("catch ("+namespaceAntlr+"RecognitionException& e) {");
+-		tabs++;
+-		if ( ((LexerGrammar)grammar).filterMode ) {
+-			if ( filterRule==null ) {
+-				println("if ( !getCommitToPath() ) {");
+-				tabs++;
+-				println("consume();");
+-				println("goto tryAgain;");
+-				tabs--;
+-				println("}");
+-			}
+-			else {
+-				println("if ( !getCommitToPath() ) {");
+-				tabs++;
+-				println("rewind(_m);");
+-				println("resetText();");
+-				println("try {m"+filterRule+"(false);}");
+-				println("catch("+namespaceAntlr+"RecognitionException& ee) {");
+-				println("	// horrendous failure: error in filter rule");
+-				println("	reportError(ee);");
+-				println("	consume();");
+-				println("}");
+-				// println("goto tryAgain;");
+-				tabs--;
+-				println("}");
+-				println("else");
+-			}
+-		}
+-		if ( nextTokenBlk.getDefaultErrorHandler() ) {
+-			println("{");
+-			tabs++;
+-			println("reportError(e);");
+-			println("consume();");
+-			tabs--;
+-			println("}");
+-		}
+-		else {
+-		    // pass on to invoking routine
+-          tabs++;
+-		    println("throw "+namespaceAntlr+"TokenStreamRecognitionException(e);");
+-			 tabs--;
+-		}
+-
+-		// close CharStreamException try
+-		tabs--;
+-		println("}");
+-		println("catch ("+namespaceAntlr+"CharStreamIOException& csie) {");
+-		println("\tthrow "+namespaceAntlr+"TokenStreamIOException(csie.io);");
+-		println("}");
+-		println("catch ("+namespaceAntlr+"CharStreamException& cse) {");
+-		println("\tthrow "+namespaceAntlr+"TokenStreamException(cse.getMessage());");
+-		println("}");
+-
+-		// close for-loop
+-		_println("tryAgain:;");
+-		tabs--;
+-		println("}");
+-
+-		// close method nextToken
+-		tabs--;
+-		println("}");
+-		println("");
+-	}
+-	/** Gen a named rule block.
+-	 * ASTs are generated for each element of an alternative unless
+-	 * the rule or the alternative have a '!' modifier.
+-	 *
+-	 * If an alternative defeats the default tree construction, it
+-	 * must set <rule>_AST to the root of the returned AST.
+-	 *
+-	 * Each alternative that does automatic tree construction, builds
+-	 * up root and child list pointers in an ASTPair structure.
+-	 *
+-	 * A rule finishes by setting the returnAST variable from the
+-	 * ASTPair.
+-	 *
+-	 * @param rule The name of the rule to generate
+-	 * @param startSymbol true if the rule is a start symbol (i.e., not referenced elsewhere)
+-	*/
+-	public void genRule(RuleSymbol s, boolean startSymbol, int ruleNum, String prefix) {
+-//		tabs=1; // JavaCodeGenerator needs this
+-		if ( DEBUG_CODE_GENERATOR || DEBUG_CPP_CODE_GENERATOR ) System.out.println("genRule("+ s.getId() +")");
+-		if ( !s.isDefined() ) {
+-			antlrTool.error("undefined rule: "+ s.getId());
+-			return;
+-		}
+-
+-		// Generate rule return type, name, arguments
+-		RuleBlock rblk = s.getBlock();
+-
+-		currentRule = rblk;
+-		currentASTResult = s.getId();
+-
+-		// clear list of declared ast variables..
+-		declaredASTVariables.clear();
+-
+-		// Save the AST generation state, and set it to that of the rule
+-		boolean savegenAST = genAST;
+-		genAST = genAST && rblk.getAutoGen();
+-
+-		// boolean oldsaveTest = saveText;
+-		saveText = rblk.getAutoGen();
+-
+-		// print javadoc comment if any
+-		if ( s.comment!=null ) {
+-			_println(s.comment);
+-		}
+-
+-		// Gen method return type (note lexer return action set at rule creation)
+-		if (rblk.returnAction != null)
+-		{
+-			// Has specified return value
+-			_print(extractTypeOfAction(rblk.returnAction, rblk.getLine(), rblk.getColumn()) + " ");
+-		} else {
+-			// No specified return value
+-			_print("void ");
+-		}
+-
+-		// Gen method name
+-		_print(prefix + s.getId() + "(");
+-
+-		// Additional rule parameters common to all rules for this grammar
+-		_print(commonExtraParams);
+-		if (commonExtraParams.length() != 0 && rblk.argAction != null ) {
+-			_print(",");
+-		}
+-
+-		// Gen arguments
+-		if (rblk.argAction != null)
+-		{
+-			// Has specified arguments
+-			_println("");
+-// FIXME: make argAction also a token? Hmmmmm
+-//			genLineNo(rblk);
+-			tabs++;
+-
+-			// Process arguments for default arguments
+-			// newer gcc's don't accept these in two places (header/cpp)
+-			//
+-			// Old appraoch with StringBuffer gave trouble with gcj.
+-			//
+-			// RK: Actually this breaks with string default arguments containing
+-			// a comma's or equal signs. Then again the old StringBuffer method
+-			// suffered from the same.
+-			String oldarg = rblk.argAction;
+-			String newarg = "";
+-
+-			String comma = "";
+-			int eqpos = oldarg.indexOf( '=' );
+-			if( eqpos != -1 )
+-			{
+-				int cmpos = 0;
+-				while( cmpos != -1 )
+-				{
+-					newarg = newarg + comma + oldarg.substring( 0, eqpos ).trim();
+-					comma = ", ";
+-					cmpos = oldarg.indexOf( ',', eqpos );
+-					if( cmpos != -1 )
+-					{
+-						// cut off part we just handled
+-						oldarg = oldarg.substring( cmpos+1 ).trim();
+-						eqpos = oldarg.indexOf( '=' );
+-					}
+-				}
+-			}
+-			else
+-				newarg = oldarg;
+-
+-			println( newarg );
+-
+-//			println(rblk.argAction);
+-			tabs--;
+-			print(") ");
+-//			genLineNo2();	// gcc gives error on the brace... hope it works for the others too
+-		} else {
+-			// No specified arguments
+-			_print(") ");
+-		}
+-		_println("{");
+-		tabs++;
+-
+-		if (grammar.traceRules) {
+-			if ( grammar instanceof TreeWalkerGrammar ) {
+-				if ( usingCustomAST )
+-					println("Tracer traceInOut(this,\""+ s.getId() +"\","+namespaceAntlr+"RefAST"+"(_t));");
+-				else
+-					println("Tracer traceInOut(this,\""+ s.getId() +"\",_t);");
+-			}
+-			else {
+-				println("Tracer traceInOut(this, \""+ s.getId() +"\");");
+-			}
+-		}
+-
+-		// Convert return action to variable declaration
+-		if (rblk.returnAction != null)
+-		{
+-			genLineNo(rblk);
+-			println(rblk.returnAction + ";");
+-			genLineNo2();
+-		}
+-
+-		// print out definitions needed by rules for various grammar types
+-		if (!commonLocalVars.equals(""))
+-			println(commonLocalVars);
+-
+-		if ( grammar instanceof LexerGrammar ) {
+-			// RK: why is this here? It seems not supported in the rest of the
+-			// tool.
+-			// lexer rule default return value is the rule's token name
+-			// This is a horrible hack to support the built-in EOF lexer rule.
+-			if (s.getId().equals("mEOF"))
+-				println("_ttype = "+namespaceAntlr+"Token::EOF_TYPE;");
+-			else
+-				println("_ttype = "+ s.getId().substring(1)+";");
+-			println("int _saveIndex;");		// used for element! (so we can kill text matched for element)
+-/*
+-			println("boolean old_saveConsumedInput=saveConsumedInput;");
+-			if ( !rblk.getAutoGen() ) {      // turn off "save input" if ! on rule
+-				println("saveConsumedInput=false;");
+-			}
+-*/
+-		}
+-
+-		// if debugging, write code to mark entry to the rule
+-		if ( grammar.debuggingOutput)
+-		    if (grammar instanceof ParserGrammar)
+-				println("fireEnterRule(" + ruleNum + ",0);");
+-			else if (grammar instanceof LexerGrammar)
+-				println("fireEnterRule(" + ruleNum + ",_ttype);");
+-
+-		// Generate trace code if desired
+-//		if ( grammar.debuggingOutput || grammar.traceRules) {
+-//			println("try { // debugging");
+-//			tabs++;
+-//		}
+-
+-		// Initialize AST variables
+-		if (grammar instanceof TreeWalkerGrammar) {
+-			// "Input" value for rule
+-//			println(labeledElementASTType+" " + s.getId() + "_AST_in = "+labeledElementASTType+"(_t);");
+-			println(labeledElementASTType+" " + s.getId() + "_AST_in = (_t == ASTNULL) ? "+labeledElementASTInit+" : _t;");
+-		}
+-		if (grammar.buildAST) {
+-			// Parser member used to pass AST returns from rule invocations
+-			println("returnAST = "+labeledElementASTInit+";");
+-			// Tracks AST construction
+-			println(namespaceAntlr+"ASTPair currentAST;"); // = new ASTPair();");
+-			// User-settable return value for rule.
+-			println(labeledElementASTType+" " + s.getId() + "_AST = "+labeledElementASTInit+";");
+-		}
+-
+-		genBlockPreamble(rblk);
+-		genBlockInitAction(rblk);
+-		println("");
+-
+-		// Search for an unlabeled exception specification attached to the rule
+-		ExceptionSpec unlabeledUserSpec = rblk.findExceptionSpec("");
+-
+-		// Generate try block around the entire rule for  error handling
+-		if (unlabeledUserSpec != null || rblk.getDefaultErrorHandler() ) {
+-			println("try {      // for error handling");
+-			tabs++;
+-		}
+-
+-		// Generate the alternatives
+-		if ( rblk.alternatives.size()==1 )
+-		{
+-			// One alternative -- use simple form
+-			Alternative alt = rblk.getAlternativeAt(0);
+-			String pred = alt.semPred;
+-			if ( pred!=null )
+-				genSemPred(pred, currentRule.line);
+-			if (alt.synPred != null) {
+-				antlrTool.warning(
+-					"Syntactic predicate ignored for single alternative",
+-					grammar.getFilename(),
+-					alt.synPred.getLine(),
+-					alt.synPred.getColumn()
+-				);
+-			}
+-			genAlt(alt, rblk);
+-		}
+-		else
+-		{
+-			// Multiple alternatives -- generate complex form
+-			boolean ok = grammar.theLLkAnalyzer.deterministic(rblk);
+-
+-			CppBlockFinishingInfo howToFinish = genCommonBlock(rblk, false);
+-			genBlockFinish(howToFinish, throwNoViable);
+-		}
+-
+-		// Generate catch phrase for error handling
+-		if (unlabeledUserSpec != null || rblk.getDefaultErrorHandler() ) {
+-			// Close the try block
+-			tabs--;
+-			println("}");
+-		}
+-
+-		// Generate user-defined or default catch phrases
+-		if (unlabeledUserSpec != null)
+-		{
+-			genErrorHandler(unlabeledUserSpec);
+-		}
+-		else if (rblk.getDefaultErrorHandler())
+-		{
+-			// Generate default catch phrase
+-			println("catch (" + exceptionThrown + "& ex) {");
+-			tabs++;
+-			// Generate code to handle error if not guessing
+-			if (grammar.hasSyntacticPredicate) {
+-				println("if( inputState->guessing == 0 ) {");
+-				tabs++;
+-			}
+-			println("reportError(ex);");
+-			if ( !(grammar instanceof TreeWalkerGrammar) )
+-			{
+-				// Generate code to consume until token in k==1 follow set
+-				Lookahead follow = grammar.theLLkAnalyzer.FOLLOW(1, rblk.endNode);
+-				String followSetName = getBitsetName(markBitsetForGen(follow.fset));
+-				println("consume();");
+-				println("consumeUntil(" + followSetName + ");");
+-			}
+-			else
+-			{
+-				// Just consume one token
+-				println("if ( _t != "+labeledElementASTInit+" )");
+-				tabs++;
+-				println("_t = _t->getNextSibling();");
+-				tabs--;
+-			}
+-			if (grammar.hasSyntacticPredicate)
+-			{
+-				tabs--;
+-				// When guessing, rethrow exception
+-				println("} else {");
+-				tabs++;
+-				println("throw;");
+-				tabs--;
+-				println("}");
+-			}
+-			// Close catch phrase
+-			tabs--;
+-			println("}");
+-		}
+-
+-		// Squirrel away the AST "return" value
+-		if (grammar.buildAST) {
+-			println("returnAST = " + s.getId() + "_AST;");
+-		}
+-
+-		// Set return tree value for tree walkers
+-		if ( grammar instanceof TreeWalkerGrammar ) {
+-			println("_retTree = _t;");
+-		}
+-
+-		// Generate literals test for lexer rules so marked
+-		if (rblk.getTestLiterals()) {
+-			if ( s.access.equals("protected") ) {
+-				genLiteralsTestForPartialToken();
+-			}
+-			else {
+-				genLiteralsTest();
+-			}
+-		}
+-
+-		// if doing a lexer rule, dump code to create token if necessary
+-		if ( grammar instanceof LexerGrammar ) {
+-			println("if ( _createToken && _token=="+namespaceAntlr+"nullToken && _ttype!="+namespaceAntlr+"Token::SKIP ) {");
+-			println("   _token = makeToken(_ttype);");
+-			println("   _token->setText(text.substr(_begin, text.length()-_begin));");
+-			println("}");
+-			println("_returnToken = _token;");
+-			// It should be easy for an optimizing compiler to realize this does nothing
+-			// but it avoids the warning about the variable being unused.
+-			println("_saveIndex=0;");
+-		}
+-
+-		// Gen the return statement if there is one (lexer has hard-wired return action)
+-		if (rblk.returnAction != null) {
+-			println("return " + extractIdOfAction(rblk.returnAction, rblk.getLine(), rblk.getColumn()) + ";");
+-		}
+-
+-//		if ( grammar.debuggingOutput || grammar.traceRules) {
+-////			tabs--;
+-////			println("} finally { // debugging");
+-////			tabs++;
+-//
+-//			// Generate trace code if desired
+-//			if ( grammar.debuggingOutput)
+-//				if (grammar instanceof ParserGrammar)
+-//					println("fireExitRule(" + ruleNum + ",0);");
+-//				else if (grammar instanceof LexerGrammar)
+-//					println("fireExitRule(" + ruleNum + ",_ttype);");
+-//
+-////			if (grammar.traceRules) {
+-////				if ( grammar instanceof TreeWalkerGrammar ) {
+-////					println("traceOut(\""+ s.getId() +"\",_t);");
+-////				}
+-////				else {
+-////					println("traceOut(\""+ s.getId() +"\");");
+-////				}
+-////			}
+-////
+-////			tabs--;
+-////			println("}");
+-//		}
+-
+-		tabs--;
+-		println("}");
+-		println("");
+-
+-		// Restore the AST generation state
+-		genAST = savegenAST;
+-
+-		// restore char save state
+-		// saveText = oldsaveTest;
+-	}
+-	public void genRuleHeader(RuleSymbol s, boolean startSymbol) {
+-		tabs=1;
+-		if ( DEBUG_CODE_GENERATOR || DEBUG_CPP_CODE_GENERATOR ) System.out.println("genRuleHeader("+ s.getId() +")");
+-		if ( !s.isDefined() ) {
+-			antlrTool.error("undefined rule: "+ s.getId());
+-			return;
+-		}
+-
+-		// Generate rule return type, name, arguments
+-		RuleBlock rblk = s.getBlock();
+-		currentRule = rblk;
+-		currentASTResult = s.getId();
+-
+-		// Save the AST generation state, and set it to that of the rule
+-		boolean savegenAST = genAST;
+-		genAST = genAST && rblk.getAutoGen();
+-
+-		// boolean oldsaveTest = saveText;
+-		saveText = rblk.getAutoGen();
+-
+-		// Gen method access
+-		print(s.access + ": ");
+-
+-		// Gen method return type (note lexer return action set at rule creation)
+-		if (rblk.returnAction != null)
+-		{
+-			// Has specified return value
+-			_print(extractTypeOfAction(rblk.returnAction, rblk.getLine(), rblk.getColumn()) + " ");
+-		} else {
+-			// No specified return value
+-			_print("void ");
+-		}
+-
+-		// Gen method name
+-		_print(s.getId() + "(");
+-
+-		// Additional rule parameters common to all rules for this grammar
+-		_print(commonExtraParams);
+-		if (commonExtraParams.length() != 0 && rblk.argAction != null ) {
+-			_print(",");
+-		}
+-
+-		// Gen arguments
+-		if (rblk.argAction != null)
+-		{
+-			// Has specified arguments
+-			_println("");
+-			tabs++;
+-			println(rblk.argAction);
+-			tabs--;
+-			print(")");
+-		} else {
+-			// No specified arguments
+-			_print(")");
+-		}
+-		_println(";");
+-
+-		tabs--;
+-
+-		// Restore the AST generation state
+-		genAST = savegenAST;
+-
+-		// restore char save state
+-		// saveText = oldsaveTest;
+-	}
+-	private void GenRuleInvocation(RuleRefElement rr) {
+-		// dump rule name
+-		_print(rr.targetRule + "(");
+-
+-		// lexers must tell rule if it should set _returnToken
+-		if ( grammar instanceof LexerGrammar ) {
+-			// if labeled, could access Token, so tell rule to create
+-			if ( rr.getLabel() != null ) {
+-				_print("true");
+-			}
+-			else {
+-				_print("false");
+-			}
+-			if (commonExtraArgs.length() != 0 || rr.args!=null ) {
+-				_print(",");
+-			}
+-		}
+-
+-		// Extra arguments common to all rules for this grammar
+-		_print(commonExtraArgs);
+-		if (commonExtraArgs.length() != 0 && rr.args!=null ) {
+-			_print(",");
+-		}
+-
+-		// Process arguments to method, if any
+-		RuleSymbol rs = (RuleSymbol)grammar.getSymbol(rr.targetRule);
+-		if (rr.args != null)
+-		{
+-			// When not guessing, execute user arg action
+-			ActionTransInfo tInfo = new ActionTransInfo();
+-			// FIXME: fix line number passed to processActionForTreeSpecifiers here..
+-			// this one might be a bit off..
+-			String args = processActionForSpecialSymbols(rr.args, rr.line,
+-																		currentRule, tInfo);
+-			if ( tInfo.assignToRoot || tInfo.refRuleRoot!=null )
+-			{
+-				antlrTool.error("Arguments of rule reference '" + rr.targetRule + "' cannot set or ref #"+
+-					currentRule.getRuleName()+" on line "+rr.getLine());
+-			}
+-			_print(args);
+-
+-			// Warn if the rule accepts no arguments
+-			if (rs.block.argAction == null)
+-			{
+-				antlrTool.warning("Rule '" + rr.targetRule + "' accepts no arguments",
+-					grammar.getFilename(),
+-					rr.getLine(), rr.getColumn());
+-			}
+-		}
+-		else
+-		{
+-			// For C++, no warning if rule has parameters, because there may be default
+-			// values for all of the parameters
+-			//if (rs.block.argAction != null) {
+-			//	tool.warning("Missing parameters on reference to rule "+rr.targetRule, rr.getLine());
+-			//}
+-		}
+-		_println(");");
+-
+-		// move down to the first child while parsing
+-		if ( grammar instanceof TreeWalkerGrammar ) {
+-			println("_t = _retTree;");
+-		}
+-	}
+-	protected void genSemPred(String pred, int line) {
+-		// translate $ and # references
+-		ActionTransInfo tInfo = new ActionTransInfo();
+-		pred = processActionForSpecialSymbols(pred, line, currentRule, tInfo);
+-		// ignore translation info...we don't need to do anything with it.
+-		String escapedPred = charFormatter.escapeString(pred);
+-
+-		// if debugging, wrap the semantic predicate evaluation in a method
+-		// that can tell SemanticPredicateListeners the result
+-		if (grammar.debuggingOutput && ((grammar instanceof ParserGrammar) ||
+-			  (grammar instanceof LexerGrammar)))
+-			pred = "fireSemanticPredicateEvaluated(persistence.antlr.debug.SemanticPredicateEvent.VALIDATING," //FIXME
+-				+ addSemPred(escapedPred) + "," + pred + ")";
+-		println("if (!(" + pred + "))");
+-		tabs++;
+-		println("throw "+namespaceAntlr+"SemanticException(\"" + escapedPred + "\");");
+-		tabs--;
+-	}
+-	/** Write an array of Strings which are the semantic predicate
+-	 *  expressions.  The debugger will reference them by number only
+-	 */
+-	protected void genSemPredMap(String prefix) {
+-		Enumeration e = semPreds.elements();
+-		println("const char* " + prefix + "_semPredNames[] = {");
+-		tabs++;
+-		while(e.hasMoreElements())
+-			println("\""+e.nextElement()+"\",");
+-		println("0");
+-		tabs--;
+-		println("};");
+-	}
+-	protected void genSynPred(SynPredBlock blk, String lookaheadExpr) {
+-		if ( DEBUG_CODE_GENERATOR || DEBUG_CPP_CODE_GENERATOR ) System.out.println("gen=>("+blk+")");
+-
+-		// Dump synpred result variable
+-		println("bool synPredMatched" + blk.ID + " = false;");
+-		// Gen normal lookahead test
+-		println("if (" + lookaheadExpr + ") {");
+-		tabs++;
+-
+-		// Save input state
+-		if ( grammar instanceof TreeWalkerGrammar ) {
+-			println(labeledElementType + " __t" + blk.ID + " = _t;");
+-		}
+-		else {
+-			println("int _m" + blk.ID + " = mark();");
+-		}
+-
+-		// Once inside the try, assume synpred works unless exception caught
+-		println("synPredMatched" + blk.ID + " = true;");
+-		println("inputState->guessing++;");
+-
+-		// if debugging, tell listeners that a synpred has started
+-		if (grammar.debuggingOutput && ((grammar instanceof ParserGrammar) ||
+-			 (grammar instanceof LexerGrammar))) {
+-			println("fireSyntacticPredicateStarted();");
+-		}
+-
+-		syntacticPredLevel++;
+-		println("try {");
+-		tabs++;
+-		gen((AlternativeBlock)blk);		// gen code to test predicate
+-		tabs--;
+-		//println("System.out.println(\"pred "+blk+" succeeded\");");
+-		println("}");
+-		println("catch (" + exceptionThrown + "& pe) {");
+-		tabs++;
+-		println("synPredMatched"+blk.ID+" = false;");
+-		//println("System.out.println(\"pred "+blk+" failed\");");
+-		tabs--;
+-		println("}");
+-
+-		// Restore input state
+-		if ( grammar instanceof TreeWalkerGrammar ) {
+-			println("_t = __t"+blk.ID+";");
+-		}
+-		else {
+-			println("rewind(_m"+blk.ID+");");
+-		}
+-
+-		println("inputState->guessing--;");
+-
+-		// if debugging, tell listeners how the synpred turned out
+-		if (grammar.debuggingOutput && ((grammar instanceof ParserGrammar) ||
+-		     (grammar instanceof LexerGrammar))) {
+-			println("if (synPredMatched" + blk.ID +")");
+-			println("  fireSyntacticPredicateSucceeded();");
+-			println("else");
+-			println("  fireSyntacticPredicateFailed();");
+-		}
+-
+-		syntacticPredLevel--;
+-		tabs--;
+-
+-		// Close lookahead test
+-		println("}");
+-
+-		// Test synpred result
+-		println("if ( synPredMatched"+blk.ID+" ) {");
+-	}
+-	/** Generate a static array containing the names of the tokens,
+-	 * indexed by the token type values.  This static array is used
+-	 * to format error messages so that the token identifers or literal
+-	 * strings are displayed instead of the token numbers.
+-	 *
+-	 * If a lexical rule has a paraphrase, use it rather than the
+-	 * token label.
+-	 */
+-	public void genTokenStrings(String prefix) {
+-		// Generate a string for each token.  This creates a static
+-		// array of Strings indexed by token type.
+-//		println("");
+-		println("const char* " + prefix + "tokenNames[] = {");
+-		tabs++;
+-
+-		// Walk the token vocabulary and generate a Vector of strings
+-		// from the tokens.
+-		Vector v = grammar.tokenManager.getVocabulary();
+-		for (int i = 0; i < v.size(); i++)
+-		{
+-			String s = (String)v.elementAt(i);
+-			if (s == null)
+-			{
+-				s = "<"+String.valueOf(i)+">";
+-			}
+-			if ( !s.startsWith("\"") && !s.startsWith("<") ) {
+-				TokenSymbol ts = (TokenSymbol)grammar.tokenManager.getTokenSymbol(s);
+-				if ( ts!=null && ts.getParaphrase()!=null ) {
+-					s = StringUtils.stripFrontBack(ts.getParaphrase(), "\"", "\"");
+-				}
+-			}
+-			print(charFormatter.literalString(s));
+-			_println(",");
+-		}
+-		println("0");
+-
+-		// Close the string array initailizer
+-		tabs--;
+-		println("};");
+-	}
+-	/** Generate the token types C++ file */
+-	protected void genTokenTypes(TokenManager tm) throws IOException {
+-		// Open the token output header file and set the currentOutput stream
+-		outputFile = tm.getName() + TokenTypesFileSuffix+".hpp";
+-		outputLine = 1;
+-		currentOutput = antlrTool.openOutputFile(outputFile);
+-		//SAS: changed for proper text file io
+-
+-		tabs = 0;
+-
+-		// Generate a guard wrapper
+-		println("#ifndef INC_"+tm.getName()+TokenTypesFileSuffix+"_hpp_");
+-		println("#define INC_"+tm.getName()+TokenTypesFileSuffix+"_hpp_");
+-		println("");
+-
+-		if (nameSpace != null)
+-			nameSpace.emitDeclarations(currentOutput);
+-
+-		// Generate the header common to all C++ files
+-		genHeader(outputFile);
+-
+-		// Encapsulate the definitions in an interface.  This can be done
+-		// because they are all constants.
+-		println("");
+-		println("#ifndef CUSTOM_API");
+-		println("# define CUSTOM_API");
+-		println("#endif");
+-		println("");
+-		// In the case that the .hpp is included from C source (flexLexer!)
+-		// we just turn things into a plain enum
+-		println("#ifdef __cplusplus");
+-		println("struct CUSTOM_API " + tm.getName() + TokenTypesFileSuffix+" {");
+-		println("#endif");
+-		tabs++;
+-		println("enum {");
+-		tabs++;
+-
+-		// Generate a definition for each token type
+-		Vector v = tm.getVocabulary();
+-
+-		// Do special tokens manually
+-		println("EOF_ = " + Token.EOF_TYPE + ",");
+-
+-		// Move the other special token to the end, so we can solve
+-		// the superfluous comma problem easily
+-
+-		for (int i = Token.MIN_USER_TYPE; i < v.size(); i++) {
+-			String s = (String)v.elementAt(i);
+-			if (s != null) {
+-				if ( s.startsWith("\"") ) {
+-					// a string literal
+-					StringLiteralSymbol sl = (StringLiteralSymbol)tm.getTokenSymbol(s);
+-					if ( sl==null ) {
+-						antlrTool.panic("String literal "+s+" not in symbol table");
+-					}
+-					else if ( sl.label != null ) {
+-						println(sl.label + " = " + i + ",");
+-					}
+-					else {
+-						String mangledName = mangleLiteral(s);
+-						if (mangledName != null) {
+-							// We were able to create a meaningful mangled token name
+-							println(mangledName + " = " + i + ",");
+-							// if no label specified, make the label equal to the mangled name
+-							sl.label = mangledName;
+-						}
+-						else {
+-							println("// " + s + " = " + i);
+-						}
+-					}
+-				}
+-				else if ( !s.startsWith("<") ) {
+-					println(s + " = " + i + ",");
+-				}
+-			}
+-		}
+-
+-		// Moved from above
+-		println("NULL_TREE_LOOKAHEAD = " + Token.NULL_TREE_LOOKAHEAD);
+-
+-		// Close the enum
+-		tabs--;
+-		println("};");
+-
+-		// Close the interface
+-		tabs--;
+-		println("#ifdef __cplusplus");
+-		println("};");
+-		println("#endif");
+-
+-		if (nameSpace != null)
+-			nameSpace.emitClosures(currentOutput);
+-
+-		// Generate a guard wrapper
+-		println("#endif /*INC_"+tm.getName()+TokenTypesFileSuffix+"_hpp_*/");
+-
+-		// Close the tokens output file
+-		currentOutput.close();
+-		currentOutput = null;
+-		exitIfError();
+-	}
+-	/** Process a string for an simple expression for use in xx/action.g
+-	 * it is used to cast simple tokens/references to the right type for
+-	 * the generated language. Basically called for every element in
+-	 * the vector to getASTCreateString(vector V)
+-	 * @param str A String.
+-	 */
+-	public String processStringForASTConstructor( String str )
+-	{
+-		if( usingCustomAST &&
+-			((grammar instanceof TreeWalkerGrammar) ||
+-			 (grammar instanceof ParserGrammar))  &&
+-			!(grammar.tokenManager.tokenDefined(str) ) )
+-		{
+-//			System.out.println("processStringForASTConstructor: "+str+" with cast");
+-			return namespaceAntlr+"RefAST("+str+")";
+-		}
+-		else
+-		{
+-//			System.out.println("processStringForASTConstructor: "+str);
+-			return str;
+-		}
+-	}
+-	/** Get a string for an expression to generate creation of an AST subtree.
+-	  * @param v A Vector of String, where each element is an expression
+-	  * in the target language yielding an AST node.
+-	  */
+-	public String getASTCreateString(Vector v) {
+-		if (v.size() == 0) {
+-			return "";
+-		}
+-		StringBuffer buf = new StringBuffer();
+-		// the labeledElementASTType here can probably be a cast or nothing
+-		// in the case of ! usingCustomAST
+-		buf.append(labeledElementASTType+
+-					"(astFactory->make((new "+namespaceAntlr+
+-					  "ASTArray("+v.size()+"))");
+-		for (int i = 0; i < v.size(); i++) {
+-			buf.append("->add("+ v.elementAt(i) + ")");
+-		}
+-		buf.append("))");
+-		return buf.toString();
+-	}
+-	/** Get a string for an expression to generate creating of an AST node
+-	 * @param str The arguments to the AST constructor
+-	 */
+-	public String getASTCreateString(GrammarAtom atom, String str) {
+-		if ( atom!=null && atom.getASTNodeType() != null ) {
+-
+-			// this atom is using a heterogeneous AST type.
+-			// make note of the factory needed to generate it..
+-			// later this is inserted into the initializeFactory method.
+-			astTypes.appendElement("factory.registerFactory("+
+-									  atom.getType() + ", \""+atom.getASTNodeType()+
+-									  "\", "+atom.getASTNodeType()+"::factory);");
+-
+-			// after above init the factory knows what to generate...
+-			return "astFactory->create("+str+")";
+-		}
+-		else
+-		{
+-			// FIXME: This is *SO* ugly! but it will have to do for now...
+-			// 2.7.2 will have better I hope
+-			// this is due to the usage of getASTCreateString from inside
+-			// actions/cpp/action.g
+-			boolean is_constructor = false;
+-			if( str.indexOf(',') != -1 )
+-				is_constructor = grammar.tokenManager.tokenDefined(str.substring(0,str.indexOf(',')));
+-
+-//			System.out.println("getAstCreateString(as): "+str+" "+grammar.tokenManager.tokenDefined(str));
+-			if( usingCustomAST &&
+-			   (grammar instanceof TreeWalkerGrammar) &&
+-				!(grammar.tokenManager.tokenDefined(str) ) &&
+-				! is_constructor )
+-				return "astFactory->create("+namespaceAntlr+"RefAST("+str+"))";
+-			else
+-				return "astFactory->create("+str+")";
+-		}
+-	}
+-
+-	/** Get a string for an expression to generate creating of an AST node
+-	 * @param str The arguments to the AST constructor
+-	 */
+-	public String getASTCreateString(String str) {
+-//		System.out.println("getAstCreateString(str): "+str+" "+grammar.tokenManager.tokenDefined(str));
+-		if( usingCustomAST )
+-			return labeledElementASTType+"(astFactory->create("+namespaceAntlr+"RefAST("+str+")))";
+-		else
+-			return "astFactory->create("+str+")";
+-	}
+-
+-	protected String getLookaheadTestExpression(Lookahead[] look, int k) {
+-		StringBuffer e = new StringBuffer(100);
+-		boolean first = true;
+-
+-		e.append("(");
+-		for (int i = 1; i <= k; i++) {
+-			BitSet p = look[i].fset;
+-			if (!first) {
+-				e.append(") && (");
+-			}
+-			first = false;
+-
+-			// Syn preds can yield <end-of-syn-pred> (epsilon) lookahead.
+-			// There is no way to predict what that token would be.  Just
+-			// allow anything instead.
+-			if (look[i].containsEpsilon()) {
+-				e.append("true");
+-			} else {
+-				e.append(getLookaheadTestTerm(i, p));
+-			}
+-		}
+-		e.append(")");
+-
+-		return e.toString();
+-	}
+-	/** Generate a lookahead test expression for an alternate.  This
+-	 * will be a series of tests joined by '&&' and enclosed by '()',
+-	 * the number of such tests being determined by the depth of the lookahead.
+-	 */
+-	protected String getLookaheadTestExpression(Alternative alt, int maxDepth) {
+-		int depth = alt.lookaheadDepth;
+-		if ( depth == GrammarAnalyzer.NONDETERMINISTIC ) {
+-			// if the decision is nondeterministic, do the best we can: LL(k)
+-			// any predicates that are around will be generated later.
+-			depth = grammar.maxk;
+-		}
+-
+-		if ( maxDepth==0 ) {
+-			// empty lookahead can result from alt with sem pred
+-			// that can see end of token.  E.g., A : {pred}? ('a')? ;
+-			return "true";
+-		}
+-
+-/*
+-boolean first = true;
+-		for (int i=1; i<=depth && i<=maxDepth; i++) {
+-			BitSet p = alt.cache[i].fset;
+-			if (!first) {
+-				e.append(") && (");
+-			}
+-			first = false;
+-
+-			// Syn preds can yield <end-of-syn-pred> (epsilon) lookahead.
+-			// There is no way to predict what that token would be.  Just
+-			// allow anything instead.
+-			if ( alt.cache[i].containsEpsilon() ) {
+-				e.append("true");
+-			}
+-			else {
+-				e.append(getLookaheadTestTerm(i, p));
+-			}
+-		}
+-
+-		e.append(")");
+-*/
+-
+-		return "(" + getLookaheadTestExpression(alt.cache,depth) + ")";
+-	}
+-	/**Generate a depth==1 lookahead test expression given the BitSet.
+-	 * This may be one of:
+-	 * 1) a series of 'x==X||' tests
+-	 * 2) a range test using >= && <= where possible,
+-	 * 3) a bitset membership test for complex comparisons
+-	 * @param k The lookahead level
+-	 * @param p The lookahead set for level k
+-	 */
+-	protected String getLookaheadTestTerm(int k, BitSet p) {
+-		// Determine the name of the item to be compared
+-		String ts = lookaheadString(k);
+-
+-		// Generate a range expression if possible
+-		int[] elems = p.toArray();
+-		if (elementsAreRange(elems)) {
+-			return getRangeExpression(k, elems);
+-		}
+-
+-		// Generate a bitset membership test if possible
+-		StringBuffer e;
+-		int degree = p.degree();
+-		if ( degree == 0 ) {
+-			return "true";
+-		}
+-
+-		if (degree >= bitsetTestThreshold) {
+-			int bitsetIdx = markBitsetForGen(p);
+-			return getBitsetName(bitsetIdx) + ".member(" + ts + ")";
+-		}
+-
+-		// Otherwise, generate the long-winded series of "x==X||" tests
+-		e = new StringBuffer();
+-		for (int i = 0; i < elems.length; i++) {
+-			// Get the compared-to item (token or character value)
+-			String cs = getValueString(elems[i]);
+-
+-			// Generate the element comparison
+-			if( i > 0 ) e.append(" || ");
+-			e.append(ts);
+-			e.append(" == ");
+-			e.append(cs);
+-		}
+-		return e.toString();
+-	}
+-	/** Return an expression for testing a contiguous renage of elements
+-	 * @param k The lookahead level
+-	 * @param elems The elements representing the set, usually from BitSet.toArray().
+-	 * @return String containing test expression.
+-	 */
+-	public String getRangeExpression(int k, int[] elems) {
+-		if (!elementsAreRange(elems)) {
+-			antlrTool.panic("getRangeExpression called with non-range");
+-		}
+-		int begin = elems[0];
+-		int end = elems[elems.length-1];
+-		return
+-			"(" + lookaheadString(k) + " >= " + getValueString(begin) + " && " +
+-			  lookaheadString(k) + " <= " + getValueString(end) + ")";
+-	}
+-	/** getValueString: get a string representation of a token or char value
+-	 * @param value The token or char value
+-	 */
+-	private String getValueString(int value) {
+-		String cs;
+-		if ( grammar instanceof LexerGrammar ) {
+-			cs = charFormatter.literalChar(value);
+-		}
+-		else
+-		{
+-			TokenSymbol ts = grammar.tokenManager.getTokenSymbolAt(value);
+-			if ( ts == null ) {
+-				return ""+value; // return token type as string
+-				// tool.panic("vocabulary for token type " + value + " is null");
+-			}
+-			String tId = ts.getId();
+-			if ( ts instanceof StringLiteralSymbol ) {
+-				// if string literal, use predefined label if any
+-				// if no predefined, try to mangle into LITERAL_xxx.
+-				// if can't mangle, use int value as last resort
+-				StringLiteralSymbol sl = (StringLiteralSymbol)ts;
+-				String label = sl.getLabel();
+-				if ( label!=null ) {
+-					cs = label;
+-				}
+-				else {
+-					cs = mangleLiteral(tId);
+-					if (cs == null) {
+-						cs = String.valueOf(value);
+-					}
+-				}
+-			}
+-			else {
+-				if ( tId.equals("EOF") )
+-					cs = namespaceAntlr+"Token::EOF_TYPE";
+-				else
+-					cs = tId;
+-			}
+-		}
+-		return cs;
+-	}
+-	/**Is the lookahead for this alt empty? */
+-	protected boolean lookaheadIsEmpty(Alternative alt, int maxDepth) {
+-		int depth = alt.lookaheadDepth;
+-		if ( depth == GrammarAnalyzer.NONDETERMINISTIC ) {
+-			depth = grammar.maxk;
+-		}
+-		for (int i=1; i<=depth && i<=maxDepth; i++) {
+-			BitSet p = alt.cache[i].fset;
+-			if (p.degree() != 0) {
+-				return false;
+-			}
+-		}
+-		return true;
+-	}
+-	private String lookaheadString(int k) {
+-		if (grammar instanceof TreeWalkerGrammar) {
+-			return "_t->getType()";
+-		}
+-		return "LA(" + k + ")";
+-	}
+-	/** Mangle a string literal into a meaningful token name.  This is
+-	  * only possible for literals that are all characters.  The resulting
+-	  * mangled literal name is literalsPrefix with the text of the literal
+-	  * appended.
+-	  * @return A string representing the mangled literal, or null if not possible.
+-	  */
+-	private String mangleLiteral(String s) {
+-		String mangled = antlrTool.literalsPrefix;
+-		for (int i = 1; i < s.length()-1; i++) {
+-			if (!Character.isLetter(s.charAt(i)) &&
+-				 s.charAt(i) != '_') {
+-				return null;
+-			}
+-			mangled += s.charAt(i);
+-		}
+-		if ( antlrTool.upperCaseMangledLiterals ) {
+-			mangled = mangled.toUpperCase();
+-		}
+-		return mangled;
+-	}
+-	/** Map an identifier to it's corresponding tree-node variable.
+-	  * This is context-sensitive, depending on the rule and alternative
+-	  * being generated
+-	  * @param idParam The identifier name to map
+-	  * @return The mapped id (which may be the same as the input), or null if the mapping is invalid due to duplicates
+-	  */
+-	public String mapTreeId(String idParam, ActionTransInfo transInfo) {
+-		// if not in an action of a rule, nothing to map.
+-		if ( currentRule==null ) return idParam;
+-//		System.out.print("mapTreeId: "+idParam+" "+currentRule.getRuleName()+" ");
+-
+-		boolean in_var = false;
+-		String id = idParam;
+-		if (grammar instanceof TreeWalkerGrammar)
+-		{
+-//			RK: hmmm this seems odd. If buildAST is false it translates
+-//			#rulename_in to 'rulename_in' else to 'rulename_AST_in' which indeed
+-//			exists. disabling for now.. and hope it doesn't blow up somewhere.
+-			if ( !grammar.buildAST )
+-			{
+-				in_var = true;
+-//				System.out.println("in_var1");
+-			}
+-			// If the id ends with "_in", then map it to the input variable
+-//			else
+-			if (id.length() > 3 && id.lastIndexOf("_in") == id.length()-3)
+-			{
+-				// Strip off the "_in"
+-				id = id.substring(0, id.length()-3);
+-				in_var = true;
+-//				System.out.println("in_var2");
+-			}
+-		}
+-//		System.out.print(in_var+"\t");
+-
+-		// Check the rule labels.  If id is a label, then the output
+-		// variable is label_AST, and the input variable is plain label.
+-		for (int i = 0; i < currentRule.labeledElements.size(); i++)
+-		{
+-			AlternativeElement elt = (AlternativeElement)currentRule.labeledElements.elementAt(i);
+-			if (elt.getLabel().equals(id))
+-			{
+-//				if( in_var )
+-//					System.out.println("returning (vec) "+(in_var ? id : id + "_AST"));
+-				return in_var ? id : id + "_AST";
+-			}
+-		}
+-
+-		// Failing that, check the id-to-variable map for the alternative.
+-		// If the id is in the map, then output variable is the name in the
+-		// map, and input variable is name_in
+-		String s = (String)treeVariableMap.get(id);
+-		if (s != null)
+-		{
+-			if (s == NONUNIQUE)
+-			{
+-//				if( in_var )
+-//					System.out.println("returning null (nonunique)");
+-				// There is more than one element with this id
+-				antlrTool.error("Ambiguous reference to AST element "+id+
+-								" in rule "+currentRule.getRuleName());
+-				return null;
+-			}
+-			else if (s.equals(currentRule.getRuleName()))
+-			{
+-				// a recursive call to the enclosing rule is
+-				// ambiguous with the rule itself.
+-//				if( in_var )
+-//					System.out.println("returning null (rulename)");
+-				antlrTool.error("Ambiguous reference to AST element "+id+
+-								" in rule "+currentRule.getRuleName());
+-				return null;
+-			}
+-			else
+-			{
+-//				if( in_var )
+-//				System.out.println("returning "+(in_var?s+"_in":s));
+-				return in_var ? s + "_in" : s;
+-			}
+-		}
+-
+-//		System.out.println("Last check: "+id+" == "+currentRule.getRuleName());
+-		// Failing that, check the rule name itself.  Output variable
+-		// is rule_AST; input variable is rule_AST_in (treeparsers).
+-		if( id.equals(currentRule.getRuleName()) )
+-		{
+-			String r = in_var ? id + "_AST_in" : id + "_AST";
+-			if ( transInfo!=null ) {
+-				if ( !in_var ) {
+-					transInfo.refRuleRoot = r;
+-				}
+-			}
+-//			if( in_var )
+-//				System.out.println("returning (r) "+r);
+-			return r;
+-		}
+-		else
+-		{
+-//			if( in_var )
+-//			System.out.println("returning (last) "+id);
+-			// id does not map to anything -- return itself.
+-			return id;
+-		}
+-	}
+-	/** Given an element and the name of an associated AST variable,
+-	  * create a mapping between the element "name" and the variable name.
+-	  */
+-	private void mapTreeVariable(AlternativeElement e, String name)
+-	{
+-		// For tree elements, defer to the root
+-		if (e instanceof TreeElement) {
+-			mapTreeVariable( ((TreeElement)e).root, name);
+-			return;
+-		}
+-
+-		// Determine the name of the element, if any, for mapping purposes
+-		String elName = null;
+-
+-		// Don't map labeled items
+-		if (e.getLabel() == null) {
+-			if (e instanceof TokenRefElement) {
+-				// use the token id
+-				elName = ((TokenRefElement)e).atomText;
+-			}
+-			else if (e instanceof RuleRefElement) {
+-				// use the rule name
+-				elName = ((RuleRefElement)e).targetRule;
+-			}
+-		}
+-		// Add the element to the tree variable map if it has a name
+-		if (elName != null) {
+-			if (treeVariableMap.get(elName) != null) {
+-				// Name is already in the map -- mark it as duplicate
+-				treeVariableMap.remove(elName);
+-				treeVariableMap.put(elName, NONUNIQUE);
+-			}
+-			else {
+-				treeVariableMap.put(elName, name);
+-			}
+-		}
+-	}
+-
+-	/** Lexically process tree-specifiers in the action.
+-	 * This will replace #id and #(...) with the appropriate
+-	 * function calls and/or variables.
+-	 */
+-	protected String processActionForSpecialSymbols(String actionStr,
+-																	int line,
+-																	RuleBlock currentRule,
+-																	ActionTransInfo tInfo)
+-	{
+-		if ( actionStr==null || actionStr.length()==0 )
+-			return null;
+-
+-		// The action trans info tells us (at the moment) whether an
+-		// assignment was done to the rule's tree root.
+-		if (grammar==null)
+-			return actionStr;
+-
+-		if ((grammar.buildAST && actionStr.indexOf('#') != -1) ||
+-			 grammar instanceof TreeWalkerGrammar ||
+-			 ((grammar instanceof LexerGrammar ||
+-				grammar instanceof ParserGrammar)
+-			  	&& actionStr.indexOf('$') != -1) )
+-		{
+-			// Create a lexer to read an action and return the translated version
+-			persistence.antlr.actions.cpp.ActionLexer lexer =
+-				new persistence.antlr.actions.cpp.ActionLexer(actionStr, currentRule, this, tInfo);
+-			lexer.setLineOffset(line);
+-			lexer.setFilename(grammar.getFilename());
+-			lexer.setTool(antlrTool);
+-
+-			try {
+-				lexer.mACTION(true);
+-				actionStr = lexer.getTokenObject().getText();
+-				// System.out.println("action translated: "+actionStr);
+-				// System.out.println("trans info is "+tInfo);
+-			}
+-			catch (RecognitionException ex) {
+-				lexer.reportError(ex);
+-				return actionStr;
+-			}
+-			catch (TokenStreamException tex) {
+-				antlrTool.panic("Error reading action:"+actionStr);
+-				return actionStr;
+-			}
+-			catch (CharStreamException io) {
+-				antlrTool.panic("Error reading action:"+actionStr);
+-				return actionStr;
+-			}
+-		}
+-		return actionStr;
+-	}
+-
+-	private String fixNameSpaceOption( String ns )
+-	{
+-		ns = StringUtils.stripFrontBack(ns,"\"","\"");
+-		if( ns.length() > 2 &&
+-			 !ns.substring(ns.length()-2, ns.length()).equals("::") )
+-		ns += "::";
+-		return ns;
+-	}
+-
+-	private void setupGrammarParameters(Grammar g) {
+-		if (g instanceof ParserGrammar ||
+-			 g instanceof LexerGrammar  ||
+-			 g instanceof TreeWalkerGrammar
+-			)
+-		{
+-			/* RK: options also have to be added to Grammar.java and for options
+-			 * on the file level entries have to be defined in
+-			 * DefineGrammarSymbols.java and passed around via 'globals' in
+-			 * antlrTool.java
+-			 */
+-			if( antlrTool.nameSpace != null )
+-				nameSpace = antlrTool.nameSpace;
+-
+-			if( antlrTool.namespaceStd != null )
+-				namespaceStd = fixNameSpaceOption(antlrTool.namespaceStd);
+-
+-			if( antlrTool.namespaceAntlr != null )
+-				namespaceAntlr = fixNameSpaceOption(antlrTool.namespaceAntlr);
+-
+-			genHashLines = antlrTool.genHashLines;
+-
+-			/* let grammar level options override filelevel ones...
+-			 */
+-			if( g.hasOption("namespace") ) {
+-				Token t = g.getOption("namespace");
+-				if( t != null ) {
+-					nameSpace = new NameSpace(t.getText());
+-				}
+-			}
+-			if( g.hasOption("namespaceAntlr") ) {
+-				Token t = g.getOption("namespaceAntlr");
+-				if( t != null ) {
+-					String ns = StringUtils.stripFrontBack(t.getText(),"\"","\"");
+-					if ( ns != null ) {
+-						if( ns.length() > 2 &&
+-							 !ns.substring(ns.length()-2, ns.length()).equals("::") )
+-							ns += "::";
+-						namespaceAntlr = ns;
+-					}
+-				}
+-			}
+-			if( g.hasOption("namespaceStd") ) {
+-				Token t = g.getOption("namespaceStd");
+-				if( t != null ) {
+-					String ns = StringUtils.stripFrontBack(t.getText(),"\"","\"");
+-					if ( ns != null ) {
+-						if( ns.length() > 2 &&
+-							 !ns.substring(ns.length()-2, ns.length()).equals("::") )
+-							ns += "::";
+-						namespaceStd = ns;
+-					}
+-				}
+-			}
+-			if( g.hasOption("genHashLines") ) {
+-				Token t = g.getOption("genHashLines");
+-				if( t != null ) {
+-					String val = StringUtils.stripFrontBack(t.getText(),"\"","\"");
+-					genHashLines = val.equals("true");
+-				}
+-			}
+-			noConstructors = antlrTool.noConstructors;	// get the default
+-			if( g.hasOption("noConstructors") ) {
+-				Token t = g.getOption("noConstructors");
+-				if( (t != null) && !(t.getText().equals("true") || t.getText().equals("false")))
+-					antlrTool.error("noConstructors option must be true or false", antlrTool.getGrammarFile(), t.getLine(), t.getColumn());
+-				noConstructors = t.getText().equals("true");
+-			}
+-		}
+-		if (g instanceof ParserGrammar) {
+-			labeledElementASTType = namespaceAntlr+"RefAST";
+-			labeledElementASTInit = namespaceAntlr+"nullAST";
+-			if ( g.hasOption("ASTLabelType") ) {
+-				Token tsuffix = g.getOption("ASTLabelType");
+-				if ( tsuffix != null ) {
+-					String suffix = StringUtils.stripFrontBack(tsuffix.getText(),"\"","\"");
+-					if ( suffix != null ) {
+-						usingCustomAST = true;
+-						labeledElementASTType = suffix;
+-						labeledElementASTInit = suffix+"("+namespaceAntlr+"nullAST)";
+-					}
+-				}
+-			}
+-			labeledElementType = namespaceAntlr+"RefToken ";
+-			labeledElementInit = namespaceAntlr+"nullToken";
+-			commonExtraArgs = "";
+-			commonExtraParams = "";
+-			commonLocalVars = "";
+-			lt1Value = "LT(1)";
+-			exceptionThrown = namespaceAntlr+"RecognitionException";
+-			throwNoViable = "throw "+namespaceAntlr+"NoViableAltException(LT(1), getFilename());";
+-		}
+-		else if (g instanceof LexerGrammar) {
+-			labeledElementType = "char ";
+-			labeledElementInit = "'\\0'";
+-			commonExtraArgs = "";
+-			commonExtraParams = "bool _createToken";
+-			commonLocalVars = "int _ttype; "+namespaceAntlr+"RefToken _token; int _begin=text.length();";
+-			lt1Value = "LA(1)";
+-			exceptionThrown = namespaceAntlr+"RecognitionException";
+-			throwNoViable = "throw "+namespaceAntlr+"NoViableAltForCharException(LA(1), getFilename(), getLine(), getColumn());";
+-		}
+-		else if (g instanceof TreeWalkerGrammar) {
+-			labeledElementInit = namespaceAntlr+"nullAST";
+-			labeledElementASTInit = namespaceAntlr+"nullAST";
+-			labeledElementASTType = namespaceAntlr+"RefAST";
+-			labeledElementType = namespaceAntlr+"RefAST";
+-			commonExtraParams = namespaceAntlr+"RefAST _t";
+-			throwNoViable = "throw "+namespaceAntlr+"NoViableAltException(_t);";
+-			lt1Value = "_t";
+-			if ( g.hasOption("ASTLabelType") ) {
+-				Token tsuffix = g.getOption("ASTLabelType");
+-				if ( tsuffix != null ) {
+-					String suffix = StringUtils.stripFrontBack(tsuffix.getText(),"\"","\"");
+-					if ( suffix != null ) {
+-						usingCustomAST = true;
+-						labeledElementASTType = suffix;
+-						labeledElementType = suffix;
+-						labeledElementInit = suffix+"("+namespaceAntlr+"nullAST)";
+-						labeledElementASTInit = labeledElementInit;
+-						commonExtraParams = suffix+" _t";
+-						throwNoViable = "throw "+namespaceAntlr+"NoViableAltException("+namespaceAntlr+"RefAST(_t));";
+-						lt1Value = "_t";
+-					}
+-				}
+-			}
+-			if ( !g.hasOption("ASTLabelType") ) {
+-				g.setOption("ASTLabelType", new Token(ANTLRTokenTypes.STRING_LITERAL,namespaceAntlr+"RefAST"));
+-			}
+-			commonExtraArgs = "_t";
+-			commonLocalVars = "";
+-			exceptionThrown = namespaceAntlr+"RecognitionException";
+-		}
+-		else {
+-			antlrTool.panic("Unknown grammar type");
+-		}
+-	}
+-	// Convert a char or string constant to something C++ likes and
+-	// check wether it's in range for the current charvocab size.
+-	private String normalizeStringOrChar(String text) {
+-		// check to see if the text is a single character
+-		if (text.startsWith("'")) {
+-			// assume it also ends with '
+-
+-			return charFormatter.literalChar(ANTLRLexer.tokenTypeForCharLiteral(text));
+-		}
+-		else
+-		{
+-			// must be string literal strip of the quotes so
+-			// they won't get quoted
+-			return "\""+charFormatter.escapeString(StringUtils.stripFrontBack(text,"\"","\""))+"\"";
+-		}
+-	}
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/CSharpBlockFinishingInfo.java glassfish-gil/entity-persistence/src/java/persistence/antlr/CSharpBlockFinishingInfo.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/CSharpBlockFinishingInfo.java	2006-08-31 00:34:04.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/CSharpBlockFinishingInfo.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,40 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-//
+-// ANTLR C# Code Generator by Kunle Odutola : kunle UNDERSCORE odutola AT hotmail DOT com
+-//
+-
+-class CSharpBlockFinishingInfo 
+-{
+-	String postscript;		// what to generate to terminate block
+-	boolean generatedSwitch;// did block finish with "default:" of switch?
+-	boolean generatedAnIf;
+-	
+-	/** When generating an if or switch, end-of-token lookahead sets
+-	 *  will become the else or default clause, don't generate an
+-	 *  error clause in this case.
+-	 */
+-	boolean needAnErrorClause;
+-
+-
+-	public CSharpBlockFinishingInfo() 
+-	{
+-		postscript=null;
+-		generatedSwitch=generatedSwitch = false;
+-		needAnErrorClause = true;
+-	}
+-	
+-	public CSharpBlockFinishingInfo(String ps, boolean genS, boolean generatedAnIf, boolean n) 
+-	{
+-		postscript = ps;
+-		generatedSwitch = genS;
+-		this.generatedAnIf = generatedAnIf;
+-		needAnErrorClause = n;
+-	}
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/CSharpCharFormatter.java glassfish-gil/entity-persistence/src/java/persistence/antlr/CSharpCharFormatter.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/CSharpCharFormatter.java	2006-08-31 00:34:04.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/CSharpCharFormatter.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,99 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-//
+-// ANTLR C# Code Generator by Kunle Odutola : kunle UNDERSCORE odutola AT hotmail DOT com
+-//
+-
+-class CSharpCharFormatter implements CharFormatter {
+-
+-
+-    /** Given a character value, return a string representing the character
+-     * that can be embedded inside a string literal or character literal
+-     * This works for Java/C/C++ code-generation and languages with compatible
+-     * special-character-escapment.
+-     * Code-generators for languages should override this method.
+-     * @param c   The character of interest.
+-     * @param forCharLiteral  true to escape for char literal, false for string literal
+-     */
+-    public String escapeChar(int c, boolean forCharLiteral) {
+-		switch (c) 
+-		{
+-			    //		case GrammarAnalyzer.EPSILON_TYPE : return "<end-of-token>";
+-			case '\n' : return "\\n";
+-			case '\t' : return "\\t";
+-			case '\r' : return "\\r";
+-			case '\\' : return "\\\\";
+-			case '\'' : return forCharLiteral ? "\\'" : "'";
+-			case '"' :  return forCharLiteral ? "\"" : "\\\"";
+-			default :
+-			    if ( c<' '||c>126 ) 
+-				{
+-		        	if ( ( 0x0000 <= c ) && ( c <= 0x000F ) ) 
+-					{
+-						return "\\u000" + Integer.toString(c,16);				
+-					}
+-					else if ( ( 0x0010 <= c ) && ( c <= 0x00FF ) ) 
+-					{
+-						return "\\u00" + Integer.toString(c,16);
+-					}
+-					else if ( ( 0x0100 <= c ) && ( c <= 0x0FFF )) 
+-					{
+-		            	return "\\u0" + Integer.toString(c,16);
+-					}
+-					else 
+-					{
+-		            	return "\\u" + Integer.toString(c,16);
+-					}
+-			    }
+-			    else 
+-				{
+-					return String.valueOf((char)c);
+-			    }
+-		}
+-    }
+-	
+-	
+-    /** Converts a String into a representation that can be use as a literal
+-	 * when surrounded by double-quotes.
+-	 * @param s The String to be changed into a literal
+-	 */
+-    public String escapeString(String s)
+-    {
+-		String retval = new String();
+-		for (int i = 0; i < s.length(); i++)
+-	    {
+-			retval += escapeChar(s.charAt(i), false);
+-	    }
+-		return retval;
+-    }
+-	
+-	
+-    /** Given a character value, return a string representing the character
+-	 * literal that can be recognized by the target language compiler.
+-	 * This works for languages that use single-quotes for character literals.
+-	 * Code-generators for languages should override this method.
+-	 * @param c   The character of interest.
+-	 */
+-    public String literalChar(int c) 
+-	{
+-		return "'"  + escapeChar(c, true) + "'";
+-    }
+-	
+-	
+-    /** Converts a String into a string literal
+-	 * This works for languages that use double-quotes for string literals.
+-	 * Code-generators for languages should override this method.
+-	 * @param s The String to be changed into a literal
+-	 */
+-    public String literalString(String s)
+-    {
+-		//return "\"" + escapeString(s) + "\"";
+-		return "@\"\"\"" + escapeString(s) + "\"\"\"";
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/CSharpCodeGenerator.java glassfish-gil/entity-persistence/src/java/persistence/antlr/CSharpCodeGenerator.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/CSharpCodeGenerator.java	2006-08-31 00:34:04.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/CSharpCodeGenerator.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,4023 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-//
+-// ANTLR C# Code Generator by Micheal Jordan
+-//                            Kunle Odutola       : kunle UNDERSCORE odutola AT hotmail DOT com
+-//                            Anthony Oguntimehin
+-//
+-// With many thanks to Eric V. Smith from the ANTLR list.
+-//
+-
+-// HISTORY:
+-//
+-// 17-May-2002 kunle    Fixed bug in OctalToUnicode() - was processing non-Octal escape sequences
+-//                      Also added namespace support based on Cpp version.
+-// 07-Jun-2002 kunle    Added Scott Ellis's _saveIndex creation optimizations
+-// 09-Sep-2002 richardN Richard Ney's bug-fix for literals table construction.
+-//                      [ Hashtable ctor needed instance of hash code provider not it's class name. ]
+-// 17-Sep-2002 kunle &  Added all Token ID definitions as data member of every Lexer/Parser/TreeParser
+-//             AOg      [ A by-product of problem-solving phase of the hetero-AST changes below
+-//                        but, it breaks nothing and restores "normal" ANTLR codegen behaviour. ]
+-// 19-Oct-2002 kunle &  Completed the work required to support heterogenous ASTs (many changes)
+-//             AOg   &
+-//             michealj
+-// 14-Nov-2002 michealj Added "initializeASTFactory()" to support flexible ASTFactory initialization.
+-//						[ Thanks to Ric Klaren - for suggesting it and implementing it for Cpp. ]
+-// 18-Nov-2002 kunle    Added fix to make xx_tokenSet_xx names CLS compliant.
+-// 01-Dec-2002 richardN Patch to reduce "unreachable code" warnings
+-// 01-Dec-2002 richardN Fix to generate correct TreeParser token-type classnames.
+-// 12-Jan-2002 kunle  & Generated Lexers, Parsers and TreeParsers now support ANTLR's tracing option.
+-//             michealj
+-// 12-Jan-2003 kunle    Fixed issue where initializeASTFactory() was generated when "buildAST=false"
+-// 14-Jan-2003 AOg      initializeASTFactory(AST factory) method was modifying the Parser's "astFactory"
+-//                      member rather than it's own "factory" parameter. Fixed.
+-// 18-Jan-2003 kunle  & Fixed reported issues with ASTFactory create() calls for hetero ASTs
+-//             michealj - code generated for LEXER token with hetero-AST option specified does not compile
+-//                      - code generated for imaginary tokens with hetero-AST option specified uses default AST type
+-//                      - code generated for per-TokenRef hetero-AST option specified does not compile
+-// 18-Jan-2003 kunle    initializeASTFactory(AST) method is now a static public member
+-// 18-May-2003 kunle    Changes to address outstanding reported issues::
+-//                      - Fixed reported issues with support for case-sensitive literals
+-//                      - persistence.antlr.SemanticException now imported for all Lexers.
+-//                        [ This exception is thrown on predicate failure. ]
+-// 12-Jan-2004 kunle    Added fix for reported issue with un-compileable generated lexers
+-//
+-//
+-
+-import java.util.Enumeration;
+-import java.util.Hashtable;
+-import persistence.antlr.collections.impl.BitSet;
+-import persistence.antlr.collections.impl.Vector;
+-import java.io.PrintWriter; //SAS: changed for proper text file io
+-import java.io.IOException;
+-import java.io.FileWriter;
+-
+-/** Generates MyParser.cs, MyLexer.cs and MyParserTokenTypes.cs */
+-public class CSharpCodeGenerator extends CodeGenerator {
+-    // non-zero if inside syntactic predicate generation
+-    protected int syntacticPredLevel = 0;
+-
+-	// Are we generating ASTs (for parsers and tree parsers) right now?
+-	protected boolean genAST = false;
+-
+-    // Are we saving the text consumed (for lexers) right now?
+-    protected boolean saveText = false;
+-
+-    // Grammar parameters set up to handle different grammar classes.
+-    // These are used to get instanceof tests out of code generation
+-	boolean usingCustomAST = false;
+-	String labeledElementType;
+-    String labeledElementASTType;
+-	String labeledElementInit;
+-	String commonExtraArgs;
+-	String commonExtraParams;
+-	String commonLocalVars;
+-	String lt1Value;
+-	String exceptionThrown;
+-	String throwNoViable;
+-
+-	// Tracks the rule being generated.  Used for mapTreeId
+-    RuleBlock currentRule;
+-	// Tracks the rule or labeled subrule being generated.  Used for AST generation.
+-    String currentASTResult;
+-
+-    /** Mapping between the ids used in the current alt, and the
+-     * names of variables used to represent their AST values.
+-     */
+-    Hashtable treeVariableMap = new Hashtable();
+-
+-    /** Used to keep track of which AST variables have been defined in a rule
+-     * (except for the #rule_name and #rule_name_in var's
+-     */
+-    Hashtable declaredASTVariables = new Hashtable();
+-
+-    /* Count of unnamed generated variables */
+-    int astVarNumber = 1;
+-
+-    /** Special value used to mark duplicate in treeVariableMap */
+-    protected static final String NONUNIQUE = new String();
+-
+-    public static final int caseSizeThreshold = 127; // ascii is max
+-
+-    private Vector semPreds;
+-	// Used to keep track of which (heterogeneous AST types are used)
+-	// which need to be set in the ASTFactory of the generated parser
+-	private java.util.Vector astTypes;
+-
+-	private static CSharpNameSpace nameSpace = null;
+-
+-	// _saveIndex creation optimization -- don't create it unless we need to use it
+-	boolean bSaveIndexCreated = false;
+-
+-
+-    /** Create a CSharp code-generator using the given Grammar.
+-     * The caller must still call setTool, setBehavior, and setAnalyzer
+-     * before generating code.
+-     */
+-	public CSharpCodeGenerator() {
+-		super();
+-		charFormatter = new CSharpCharFormatter();
+-	}
+-
+-	/** Adds a semantic predicate string to the sem pred vector
+-	    These strings will be used to build an array of sem pred names
+-	    when building a debugging parser.  This method should only be
+-	    called when the debug option is specified
+-	 */
+-	protected int addSemPred(String predicate) {
+-		semPreds.appendElement(predicate);
+-		return semPreds.size()-1;
+-	}
+-
+-	public void exitIfError()
+-	{
+-		if (antlrTool.hasError())
+-		{
+-			antlrTool.fatalError("Exiting due to errors.");
+-		}
+-	}
+-
+-	/**Generate the parser, lexer, treeparser, and token types in CSharp */
+-	public void gen() {
+-		// Do the code generation
+-		try {
+-			// Loop over all grammars
+-			Enumeration grammarIter = behavior.grammars.elements();
+-			while (grammarIter.hasMoreElements()) {
+-				Grammar g = (Grammar)grammarIter.nextElement();
+-				// Connect all the components to each other
+-				g.setGrammarAnalyzer(analyzer);
+-				g.setCodeGenerator(this);
+-				analyzer.setGrammar(g);
+-				// To get right overloading behavior across heterogeneous grammars
+-				setupGrammarParameters(g);
+-				g.generate();
+-				exitIfError();
+-			}
+-
+-			// Loop over all token managers (some of which are lexers)
+-			Enumeration tmIter = behavior.tokenManagers.elements();
+-			while (tmIter.hasMoreElements()) {
+-				TokenManager tm = (TokenManager)tmIter.nextElement();
+-				if (!tm.isReadOnly()) {
+-					// Write the token manager tokens as CSharp
+-					// this must appear before genTokenInterchange so that
+-					// labels are set on string literals
+-					genTokenTypes(tm);
+-					// Write the token manager tokens as plain text
+-					genTokenInterchange(tm);
+-				}
+-				exitIfError();
+-			}
+-		}
+-		catch (IOException e) {
+-			antlrTool.reportException(e, null);
+-		}
+-	}
+-
+-	/** Generate code for the given grammar element.
+-	 * @param blk The {...} action to generate
+-	 */
+-	public void gen(ActionElement action) {
+-		if ( DEBUG_CODE_GENERATOR ) System.out.println("genAction("+action+")");
+-		if ( action.isSemPred ) {
+-			genSemPred(action.actionText, action.line);
+-		}
+-		else {
+-			if ( grammar.hasSyntacticPredicate ) {
+-				println("if (0==inputState.guessing)");
+-				println("{");
+-				tabs++;
+-			}
+-
+-			ActionTransInfo tInfo = new ActionTransInfo();
+-			String actionStr = processActionForSpecialSymbols(action.actionText,
+-																			 action.getLine(),
+-																			 currentRule, tInfo);
+-
+-			if ( tInfo.refRuleRoot!=null ) {
+-				// Somebody referenced "#rule", make sure translated var is valid
+-				// assignment to #rule is left as a ref also, meaning that assignments
+-				// with no other refs like "#rule = foo();" still forces this code to be
+-				// generated (unnecessarily).
+-				println(tInfo.refRuleRoot + " = ("+labeledElementASTType+")currentAST.root;");
+-			}
+-
+-			// dump the translated action
+-			printAction(actionStr);
+-
+-			if ( tInfo.assignToRoot ) {
+-				// Somebody did a "#rule=", reset internal currentAST.root
+-				println("currentAST.root = "+tInfo.refRuleRoot+";");
+-				// reset the child pointer too to be last sibling in sibling list
+-				println("if ( (null != "+tInfo.refRuleRoot+") && (null != "+tInfo.refRuleRoot+".getFirstChild()) )");
+-				tabs++;
+-				println("currentAST.child = "+tInfo.refRuleRoot+".getFirstChild();");
+-				tabs--;
+-				println("else");
+-				tabs++;
+-				println("currentAST.child = "+tInfo.refRuleRoot+";");
+-				tabs--;
+-				println("currentAST.advanceChildToEnd();");
+-			}
+-
+-			if ( grammar.hasSyntacticPredicate ) {
+-				tabs--;
+-				println("}");
+-			}
+-		}
+-	}
+-
+-	/** Generate code for the given grammar element.
+-	 * @param blk The "x|y|z|..." block to generate
+-	 */
+-	public void gen(AlternativeBlock blk) {
+-		if ( DEBUG_CODE_GENERATOR ) System.out.println("gen("+blk+")");
+-		println("{");
+-		tabs++;
+-
+-		genBlockPreamble(blk);
+-		genBlockInitAction(blk);
+-
+-		// Tell AST generation to build subrule result
+-		String saveCurrentASTResult = currentASTResult;
+-		if (blk.getLabel() != null) {
+-			currentASTResult = blk.getLabel();
+-		}
+-
+-		boolean ok = grammar.theLLkAnalyzer.deterministic(blk);
+-
+-		CSharpBlockFinishingInfo howToFinish = genCommonBlock(blk, true);
+-		genBlockFinish(howToFinish, throwNoViable);
+-
+-		tabs--;
+-		println("}");
+-
+-		// Restore previous AST generation
+-		currentASTResult = saveCurrentASTResult;
+-	}
+-	/** Generate code for the given grammar element.
+-	 * @param blk The block-end element to generate.  Block-end
+-	 * elements are synthesized by the grammar parser to represent
+-	 * the end of a block.
+-	 */
+-	public void gen(BlockEndElement end) {
+-		if ( DEBUG_CODE_GENERATOR ) System.out.println("genRuleEnd("+end+")");
+-	}
+-	/** Generate code for the given grammar element.
+-	 * @param blk The character literal reference to generate
+-	 */
+-	public void gen(CharLiteralElement atom) {
+-		if ( DEBUG_CODE_GENERATOR ) System.out.println("genChar("+atom+")");
+-
+-		if ( atom.getLabel()!=null ) {
+-			println(atom.getLabel() + " = " + lt1Value + ";");
+-		}
+-
+-		boolean oldsaveText = saveText;
+-		saveText = saveText && atom.getAutoGenType()==GrammarElement.AUTO_GEN_NONE;
+-		genMatch(atom);
+-		saveText = oldsaveText;
+-	}
+-	/** Generate code for the given grammar element.
+-	 * @param blk The character-range reference to generate
+-	 */
+-	public void gen(CharRangeElement r) {
+-		if ( r.getLabel()!=null  && syntacticPredLevel == 0) {
+-			println(r.getLabel() + " = " + lt1Value + ";");
+-		}
+-      boolean flag = ( grammar instanceof LexerGrammar &&
+-            (!saveText || (r.getAutoGenType() == GrammarElement.AUTO_GEN_BANG)) );
+-      if (flag)
+-          println("_saveIndex = text.Length;");
+-
+-      println("matchRange("+OctalToUnicode(r.beginText)+","+OctalToUnicode(r.endText)+");");
+-
+-      if (flag)
+-          println("text.Length = _saveIndex;");
+-	}
+-	/** Generate the lexer CSharp file */
+-	public  void gen(LexerGrammar g) throws IOException {
+-		// If debugging, create a new sempred vector for this grammar
+-		if (g.debuggingOutput)
+-			semPreds = new Vector();
+-
+-		setGrammar(g);
+-		if (!(grammar instanceof LexerGrammar)) {
+-			antlrTool.panic("Internal error generating lexer");
+-		}
+-		genBody(g);
+-	}
+-	/** Generate code for the given grammar element.
+-	 * @param blk The (...)+ block to generate
+-	 */
+-	public void gen(OneOrMoreBlock blk) {
+-		if ( DEBUG_CODE_GENERATOR ) System.out.println("gen+("+blk+")");
+-		String label;
+-		String cnt;
+-		println("{ // ( ... )+");
+-		genBlockPreamble(blk);
+-		if ( blk.getLabel() != null ) {
+-			cnt = "_cnt_"+blk.getLabel();
+-		}
+-		else {
+-			cnt = "_cnt" + blk.ID;
+-		}
+-		println("int "+cnt+"=0;");
+-		if ( blk.getLabel() != null ) {
+-			label = blk.getLabel();
+-		}
+-		else {
+-			label = "_loop" + blk.ID;
+-		}
+-
+-		println("for (;;)");
+-		println("{");
+-		tabs++;
+-      // generate the init action for ()+ ()* inside the loop
+-      // this allows us to do usefull EOF checking...
+-      genBlockInitAction(blk);
+-
+-		// Tell AST generation to build subrule result
+-		String saveCurrentASTResult = currentASTResult;
+-		if (blk.getLabel() != null) {
+-			currentASTResult = blk.getLabel();
+-		}
+-
+-		boolean ok = grammar.theLLkAnalyzer.deterministic(blk);
+-
+-		// generate exit test if greedy set to false
+-		// and an alt is ambiguous with exit branch
+-		// or when lookahead derived purely from end-of-file
+-		// Lookahead analysis stops when end-of-file is hit,
+-		// returning set {epsilon}.  Since {epsilon} is not
+-		// ambig with any real tokens, no error is reported
+-		// by deterministic() routines and we have to check
+-		// for the case where the lookahead depth didn't get
+-		// set to NONDETERMINISTIC (this only happens when the
+-		// FOLLOW contains real atoms + epsilon).
+-		boolean generateNonGreedyExitPath = false;
+-		int nonGreedyExitDepth = grammar.maxk;
+-
+-		if ( !blk.greedy &&
+-			blk.exitLookaheadDepth<=grammar.maxk &&
+-			blk.exitCache[blk.exitLookaheadDepth].containsEpsilon() )
+-		{
+-			generateNonGreedyExitPath = true;
+-			nonGreedyExitDepth = blk.exitLookaheadDepth;
+-		}
+-		else if ( !blk.greedy &&
+-			blk.exitLookaheadDepth==LLkGrammarAnalyzer.NONDETERMINISTIC )
+-		{
+-			generateNonGreedyExitPath = true;
+-		}
+-
+-		// generate exit test if greedy set to false
+-		// and an alt is ambiguous with exit branch
+-		if ( generateNonGreedyExitPath ) {
+-			if ( DEBUG_CODE_GENERATOR ) {
+-				System.out.println("nongreedy (...)+ loop; exit depth is "+
+-					blk.exitLookaheadDepth);
+-			}
+-			String predictExit =
+-				getLookaheadTestExpression(blk.exitCache,
+-				nonGreedyExitDepth);
+-			println("// nongreedy exit test");
+-			println("if (("+cnt+" >= 1) && "+predictExit+") goto "+label+"_breakloop;");
+-		}
+-
+-		CSharpBlockFinishingInfo howToFinish = genCommonBlock(blk, false);
+-		genBlockFinish(
+-			howToFinish,
+-			"if ("+cnt+" >= 1) { goto "+label+"_breakloop; } else { " + throwNoViable + "; }"
+-			);
+-
+-		println(cnt+"++;");
+-		tabs--;
+-		println("}");
+-		_print(label + "_breakloop:");
+-		println(";");
+-		println("}    // ( ... )+");
+-
+-		// Restore previous AST generation
+-		currentASTResult = saveCurrentASTResult;
+-	}
+-	/** Generate the parser CSharp file */
+-	public void gen(ParserGrammar g) throws IOException {
+-
+-		// if debugging, set up a new vector to keep track of sempred
+-		//   strings for this grammar
+-		if (g.debuggingOutput)
+-			semPreds = new Vector();
+-
+-		setGrammar(g);
+-		if (!(grammar instanceof ParserGrammar)) {
+-			antlrTool.panic("Internal error generating parser");
+-		}
+-		genBody(g);
+-	}
+-	/** Generate code for the given grammar element.
+-	 * @param blk The rule-reference to generate
+-	 */
+-	public void gen(RuleRefElement rr)
+-	{
+-		if ( DEBUG_CODE_GENERATOR ) System.out.println("genRR("+rr+")");
+-		RuleSymbol rs = (RuleSymbol)grammar.getSymbol(rr.targetRule);
+-		if (rs == null || !rs.isDefined())
+-		{
+-			// Is this redundant???
+-			antlrTool.error("Rule '" + rr.targetRule + "' is not defined", grammar.getFilename(), rr.getLine(), rr.getColumn());
+-			return;
+-		}
+-		if (!(rs instanceof RuleSymbol))
+-		{
+-			// Is this redundant???
+-			antlrTool.error("'" + rr.targetRule + "' does not name a grammar rule", grammar.getFilename(), rr.getLine(), rr.getColumn());
+-			return;
+-		}
+-
+-		genErrorTryForElement(rr);
+-
+-		// AST value for labeled rule refs in tree walker.
+-		// This is not AST construction;  it is just the input tree node value.
+-		if ( grammar instanceof TreeWalkerGrammar &&
+-			rr.getLabel() != null &&
+-			syntacticPredLevel == 0 )
+-		{
+-			println(rr.getLabel() + " = _t==ASTNULL ? null : "+lt1Value+";");
+-		}
+-
+-		// if in lexer and ! on rule ref or alt or rule, save buffer index to kill later
+-        if (grammar instanceof LexerGrammar && (!saveText || rr.getAutoGenType() == GrammarElement.AUTO_GEN_BANG))
+-		{
+-			declareSaveIndexVariableIfNeeded();
+-			println("_saveIndex = text.Length;");
+-		}
+-
+-		// Process return value assignment if any
+-		printTabs();
+-		if (rr.idAssign != null)
+-		{
+-			// Warn if the rule has no return type
+-			if (rs.block.returnAction == null)
+-			{
+-				antlrTool.warning("Rule '" + rr.targetRule + "' has no return type", grammar.getFilename(), rr.getLine(), rr.getColumn());
+-			}
+-			_print(rr.idAssign + "=");
+-		} else {
+-			// Warn about return value if any, but not inside syntactic predicate
+-			if ( !(grammar instanceof LexerGrammar) && syntacticPredLevel == 0 && rs.block.returnAction != null)
+-			{
+-				antlrTool.warning("Rule '" + rr.targetRule + "' returns a value", grammar.getFilename(), rr.getLine(), rr.getColumn());
+-			}
+-		}
+-
+-		// Call the rule
+-		GenRuleInvocation(rr);
+-
+-		// if in lexer and ! on element or alt or rule, save buffer index to kill later
+-		if ( grammar instanceof LexerGrammar && (!saveText||rr.getAutoGenType()==GrammarElement.AUTO_GEN_BANG) ) {
+-			declareSaveIndexVariableIfNeeded();
+-			println("text.Length = _saveIndex;");
+-		}
+-
+-		// if not in a syntactic predicate
+-		if (syntacticPredLevel == 0)
+-		{
+-			boolean doNoGuessTest = (
+-				grammar.hasSyntacticPredicate &&
+-				(
+-				grammar.buildAST && rr.getLabel() != null ||
+-				(genAST && rr.getAutoGenType() == GrammarElement.AUTO_GEN_NONE)
+-				)
+-				);
+-			if (doNoGuessTest) 	{
+-				println("if (0 == inputState.guessing)");
+-				println("{");
+-				tabs++;
+-			}
+-
+-			if (grammar.buildAST && rr.getLabel() != null)
+-			{
+-				// always gen variable for rule return on labeled rules
+-				println(rr.getLabel() + "_AST = ("+labeledElementASTType+")returnAST;");
+-			}
+-			if (genAST)
+-			{
+-				switch (rr.getAutoGenType())
+-				{
+-				case GrammarElement.AUTO_GEN_NONE:
+-					if( usingCustomAST )
+-						println("astFactory.addASTChild(currentAST, (AST)returnAST);");
+-					else
+-						println("astFactory.addASTChild(currentAST, returnAST);");
+-					break;
+-				case GrammarElement.AUTO_GEN_CARET:
+-					antlrTool.error("Internal: encountered ^ after rule reference");
+-					break;
+-				default:
+-					break;
+-				}
+-			}
+-
+-			// if a lexer and labeled, Token label defined at rule level, just set it here
+-			if ( grammar instanceof LexerGrammar && rr.getLabel() != null )
+-			{
+-				println(rr.getLabel()+" = returnToken_;");
+-			}
+-
+-			if (doNoGuessTest)
+-			{
+-				tabs--;
+-				println("}");
+-			}
+-		}
+-		genErrorCatchForElement(rr);
+-	}
+-	/** Generate code for the given grammar element.
+-	 * @param blk The string-literal reference to generate
+-	 */
+-	public void gen(StringLiteralElement atom) {
+-		if ( DEBUG_CODE_GENERATOR ) System.out.println("genString("+atom+")");
+-
+-		// Variable declarations for labeled elements
+-		if (atom.getLabel()!=null && syntacticPredLevel == 0) {
+-			println(atom.getLabel() + " = " + lt1Value + ";");
+-		}
+-
+-		// AST
+-		genElementAST(atom);
+-
+-		// is there a bang on the literal?
+-		boolean oldsaveText = saveText;
+-		saveText = saveText && atom.getAutoGenType()==GrammarElement.AUTO_GEN_NONE;
+-
+-		// matching
+-		genMatch(atom);
+-
+-			saveText = oldsaveText;
+-
+-		// tack on tree cursor motion if doing a tree walker
+-		if (grammar instanceof TreeWalkerGrammar) {
+-			println("_t = _t.getNextSibling();");
+-		}
+-	}
+-
+-	/** Generate code for the given grammar element.
+-	 * @param blk The token-range reference to generate
+-	 */
+-	public void gen(TokenRangeElement r) {
+-		genErrorTryForElement(r);
+-		if ( r.getLabel()!=null  && syntacticPredLevel == 0) {
+-			println(r.getLabel() + " = " + lt1Value + ";");
+-		}
+-
+-		// AST
+-		genElementAST(r);
+-
+-		// match
+-		println("matchRange("+OctalToUnicode(r.beginText)+","+OctalToUnicode(r.endText)+");");
+-		genErrorCatchForElement(r);
+-	}
+-
+-	/** Generate code for the given grammar element.
+-	 * @param blk The token-reference to generate
+-	 */
+-	public void gen(TokenRefElement atom) {
+-		if ( DEBUG_CODE_GENERATOR ) System.out.println("genTokenRef("+atom+")");
+-		if ( grammar instanceof LexerGrammar ) {
+-			antlrTool.panic("Token reference found in lexer");
+-		}
+-		genErrorTryForElement(atom);
+-		// Assign Token value to token label variable
+-		if ( atom.getLabel()!=null && syntacticPredLevel == 0) {
+-			println(atom.getLabel() + " = " + lt1Value + ";");
+-		}
+-
+-		// AST
+-		genElementAST(atom);
+-		// matching
+-		genMatch(atom);
+-		genErrorCatchForElement(atom);
+-
+-		// tack on tree cursor motion if doing a tree walker
+-		if (grammar instanceof TreeWalkerGrammar) {
+-			println("_t = _t.getNextSibling();");
+-		}
+-	}
+-
+-	public void gen(TreeElement t) {
+-		// save AST cursor
+-		println("AST __t" + t.ID + " = _t;");
+-
+-		// If there is a label on the root, then assign that to the variable
+-		if (t.root.getLabel() != null) {
+-			println(t.root.getLabel() + " = (ASTNULL == _t) ? null : ("+labeledElementASTType +")_t;");
+-		}
+-
+-      // check for invalid modifiers ! and ^ on tree element roots
+-      if ( t.root.getAutoGenType() == GrammarElement.AUTO_GEN_BANG ) {
+-          antlrTool.error("Suffixing a root node with '!' is not implemented",
+-                       grammar.getFilename(), t.getLine(), t.getColumn());
+-          t.root.setAutoGenType(GrammarElement.AUTO_GEN_NONE);
+-      }
+-      if ( t.root.getAutoGenType() == GrammarElement.AUTO_GEN_CARET ) {
+-          antlrTool.warning("Suffixing a root node with '^' is redundant; already a root",
+-                       grammar.getFilename(), t.getLine(), t.getColumn());
+-          t.root.setAutoGenType(GrammarElement.AUTO_GEN_NONE);
+-      }
+-
+-		// Generate AST variables
+-		genElementAST(t.root);
+-		if (grammar.buildAST) {
+-			// Save the AST construction state
+-			println("ASTPair __currentAST" + t.ID + " = currentAST.copy();");
+-			// Make the next item added a child of the TreeElement root
+-			println("currentAST.root = currentAST.child;");
+-			println("currentAST.child = null;");
+-		}
+-
+-		// match root
+-        if ( t.root instanceof WildcardElement ) {
+-            println("if (null == _t) throw new MismatchedTokenException();");
+-        }
+-        else {
+-				genMatch(t.root);
+-		}
+-		// move to list of children
+-		println("_t = _t.getFirstChild();");
+-
+-		// walk list of children, generating code for each
+-		for (int i=0; i<t.getAlternatives().size(); i++) {
+-			Alternative a = t.getAlternativeAt(i);
+-			AlternativeElement e = a.head;
+-			while ( e != null ) {
+-				e.generate();
+-				e = e.next;
+-			}
+-		}
+-
+-		if (grammar.buildAST) {
+-			// restore the AST construction state to that just after the
+-			// tree root was added
+-			println("currentAST = __currentAST" + t.ID + ";");
+-		}
+-		// restore AST cursor
+-		println("_t = __t" + t.ID + ";");
+-		// move cursor to sibling of tree just parsed
+-		println("_t = _t.getNextSibling();");
+-	}
+-	/** Generate the tree-parser CSharp file */
+-	public void gen(TreeWalkerGrammar g) throws IOException {
+-		// SAS: debugging stuff removed for now...
+-		setGrammar(g);
+-		if (!(grammar instanceof TreeWalkerGrammar)) {
+-			antlrTool.panic("Internal error generating tree-walker");
+-		}
+-		genBody(g);
+-	}
+-
+-	/** Generate code for the given grammar element.
+-	 * @param wc The wildcard element to generate
+-	 */
+-	public void gen(WildcardElement wc) {
+-		// Variable assignment for labeled elements
+-		if (wc.getLabel()!=null && syntacticPredLevel == 0) {
+-			println(wc.getLabel() + " = " + lt1Value + ";");
+-		}
+-
+-		// AST
+-		genElementAST(wc);
+-		// Match anything but EOF
+-		if (grammar instanceof TreeWalkerGrammar) {
+-			println("if (null == _t) throw new MismatchedTokenException();");
+-		}
+-		else if (grammar instanceof LexerGrammar) {
+-			if ( grammar instanceof LexerGrammar &&
+-				(!saveText||wc.getAutoGenType()==GrammarElement.AUTO_GEN_BANG) ) {
+-				declareSaveIndexVariableIfNeeded();
+-				println("_saveIndex = text.Length;");
+-			}
+-			println("matchNot(EOF/*_CHAR*/);");
+-			if ( grammar instanceof LexerGrammar &&
+-				(!saveText||wc.getAutoGenType()==GrammarElement.AUTO_GEN_BANG) ) {
+-				declareSaveIndexVariableIfNeeded();
+-				println("text.Length = _saveIndex;"); // kill text atom put in buffer
+-			}
+-		}
+-		else {
+-			println("matchNot(" + getValueString(Token.EOF_TYPE) + ");");
+-		}
+-
+-		// tack on tree cursor motion if doing a tree walker
+-		if (grammar instanceof TreeWalkerGrammar) {
+-			println("_t = _t.getNextSibling();");
+-		}
+-	}
+-
+-	/** Generate code for the given grammar element.
+-	 * @param blk The (...)* block to generate
+-	 */
+-	public void gen(ZeroOrMoreBlock blk) {
+-		if ( DEBUG_CODE_GENERATOR ) System.out.println("gen*("+blk+")");
+-		println("{    // ( ... )*");
+-		tabs++;
+-		genBlockPreamble(blk);
+-		String label;
+-		if ( blk.getLabel() != null ) {
+-			label = blk.getLabel();
+-		}
+-		else {
+-			label = "_loop" + blk.ID;
+-		}
+-		println("for (;;)");
+-		println("{");
+-		tabs++;
+-		// generate the init action for ()+ ()* inside the loop
+-        // this allows us to do usefull EOF checking...
+-        genBlockInitAction(blk);
+-
+-		// Tell AST generation to build subrule result
+-		String saveCurrentASTResult = currentASTResult;
+-		if (blk.getLabel() != null) {
+-			currentASTResult = blk.getLabel();
+-		}
+-
+-		boolean ok = grammar.theLLkAnalyzer.deterministic(blk);
+-
+-		// generate exit test if greedy set to false
+-		// and an alt is ambiguous with exit branch
+-		// or when lookahead derived purely from end-of-file
+-		// Lookahead analysis stops when end-of-file is hit,
+-		// returning set {epsilon}.  Since {epsilon} is not
+-		// ambig with any real tokens, no error is reported
+-		// by deterministic() routines and we have to check
+-		// for the case where the lookahead depth didn't get
+-		// set to NONDETERMINISTIC (this only happens when the
+-		// FOLLOW contains real atoms + epsilon).
+-		boolean generateNonGreedyExitPath = false;
+-		int nonGreedyExitDepth = grammar.maxk;
+-
+-		if ( !blk.greedy &&
+-			blk.exitLookaheadDepth<=grammar.maxk &&
+-			blk.exitCache[blk.exitLookaheadDepth].containsEpsilon() )
+-		{
+-			generateNonGreedyExitPath = true;
+-			nonGreedyExitDepth = blk.exitLookaheadDepth;
+-		}
+-		else if ( !blk.greedy &&
+-			blk.exitLookaheadDepth==LLkGrammarAnalyzer.NONDETERMINISTIC )
+-		{
+-			generateNonGreedyExitPath = true;
+-		}
+-		if ( generateNonGreedyExitPath ) {
+-			if ( DEBUG_CODE_GENERATOR ) {
+-				System.out.println("nongreedy (...)* loop; exit depth is "+
+-					blk.exitLookaheadDepth);
+-			}
+-			String predictExit =
+-				getLookaheadTestExpression(blk.exitCache,
+-				nonGreedyExitDepth);
+-			println("// nongreedy exit test");
+-			println("if ("+predictExit+") goto "+label+"_breakloop;");
+-		}
+-
+-		CSharpBlockFinishingInfo howToFinish = genCommonBlock(blk, false);
+-		genBlockFinish(howToFinish, "goto " + label + "_breakloop;");
+-
+-			tabs--;
+-		println("}");
+-		_print(label+"_breakloop:");
+-		println(";");
+-		tabs--;
+-		println("}    // ( ... )*");
+-
+-		// Restore previous AST generation
+-		currentASTResult = saveCurrentASTResult;
+-	}
+-
+-	/** Generate an alternative.
+-	  * @param alt  The alternative to generate
+-	  * @param blk The block to which the alternative belongs
+-	  */
+-	protected void genAlt(Alternative alt, AlternativeBlock blk)
+-	{
+-		// Save the AST generation state, and set it to that of the alt
+-		boolean savegenAST = genAST;
+-		genAST = genAST && alt.getAutoGen();
+-
+-		boolean oldsaveTest = saveText;
+-		saveText = saveText && alt.getAutoGen();
+-
+-		// Reset the variable name map for the alternative
+-		Hashtable saveMap = treeVariableMap;
+-		treeVariableMap = new Hashtable();
+-
+-		// Generate try block around the alt for  error handling
+-		if (alt.exceptionSpec != null) {
+-			println("try        // for error handling");
+-			println("{");
+-			tabs++;
+-		}
+-
+-		AlternativeElement elem = alt.head;
+-		while ( !(elem instanceof BlockEndElement) ) {
+-			elem.generate(); // alt can begin with anything. Ask target to gen.
+-			elem = elem.next;
+-		}
+-
+-		if ( genAST)
+-		{
+-			if (blk instanceof RuleBlock)
+-			{
+-				// Set the AST return value for the rule
+-				RuleBlock rblk = (RuleBlock)blk;
+-				if( usingCustomAST )
+-				{
+-					println(rblk.getRuleName() + "_AST = ("+labeledElementASTType+")currentAST.root;");
+-				}
+-				else
+-				{
+-					println(rblk.getRuleName() + "_AST = currentAST.root;");
+-				}
+-			}
+-			else if (blk.getLabel() != null) {
+-				// ### future: also set AST value for labeled subrules.
+-				// println(blk.getLabel() + "_AST = ("+labeledElementASTType+")currentAST.root;");
+-            	antlrTool.warning("Labeled subrules not yet supported", grammar.getFilename(), blk.getLine(), blk.getColumn());
+-			}
+-		}
+-
+-		if (alt.exceptionSpec != null)
+-		{
+-			// close try block
+-			tabs--;
+-			println("}");
+-			genErrorHandler(alt.exceptionSpec);
+-		}
+-
+-		genAST = savegenAST;
+-		saveText = oldsaveTest;
+-
+-		treeVariableMap = saveMap;
+-	}
+-
+-	/** Generate all the bitsets to be used in the parser or lexer
+-	 * Generate the raw bitset data like "long _tokenSet1_data[] = {...};"
+-	 * and the BitSet object declarations like "BitSet _tokenSet1 = new BitSet(_tokenSet1_data);"
+-	 * Note that most languages do not support object initialization inside a
+-	 * class definition, so other code-generators may have to separate the
+-	 * bitset declarations from the initializations (e.g., put the initializations
+-	 * in the generated constructor instead).
+-	 * @param bitsetList The list of bitsets to generate.
+-	 * @param maxVocabulary Ensure that each generated bitset can contain at least this value.
+-	 */
+-	protected void genBitsets( Vector bitsetList, int maxVocabulary ) {
+-		println("");
+-		for (int i = 0; i < bitsetList.size(); i++)
+-		{
+-			BitSet p = (BitSet)bitsetList.elementAt(i);
+-			// Ensure that generated BitSet is large enough for vocabulary
+-			p.growToInclude(maxVocabulary);
+-            genBitSet(p, i);
+-        }
+-    }
+-
+-    /** Do something simple like:
+-     *  private static final long[] mk_tokenSet_0() {
+-     *    long[] data = { -2305839160922996736L, 63L, 16777216L, 0L, 0L, 0L };
+-     *    return data;
+-     *  }
+-     *  public static final BitSet _tokenSet_0 = new BitSet(mk_tokenSet_0());
+-     *
+-     *  Or, for large bitsets, optimize init so ranges are collapsed into loops.
+-     *  This is most useful for lexers using unicode.
+-     */
+-    private void genBitSet(BitSet p, int id) {
+-        // initialization data
+-        println("private static long[] mk_" + getBitsetName(id) + "()");
+-        println("{");
+-        tabs++;
+-        int n = p.lengthInLongWords();
+-        if ( n<BITSET_OPTIMIZE_INIT_THRESHOLD ) {
+-            println("long[] data = { " + p.toStringOfWords() + "};");
+-        }
+-        else {
+-            // will init manually, allocate space then set values
+-            println("long[] data = new long["+n+"];");
+-            long[] elems = p.toPackedArray();
+-            for (int i = 0; i < elems.length;) {
+-                if ( (i+1)==elems.length || elems[i]!=elems[i+1] ) {
+-                    // last number or no run of numbers, just dump assignment
+-                    println("data["+i+"]="+elems[i]+"L;");
+-                    i++;
+-                }
+-                else
+-				{
+-                    // scan to find end of run
+-                    int j;
+-                    for (j = i + 1; j < elems.length && elems[j]==elems[i]; j++)
+-                    {
+-						;
+-                    }
+-                    // j-1 is last member of run
+-                    println("for (int i = "+i+"; i<="+(j-1)+"; i++) { data[i]="+
+-                            elems[i]+"L; }");
+-                    i = j;
+-                }
+-            }
+-        }
+-
+-        println("return data;");
+-        tabs--;
+-        println("}");
+-		// BitSet object
+-        println("public static readonly BitSet " + getBitsetName(id) + " = new BitSet(" +
+-            "mk_" + getBitsetName(id) + "()" + ");");
+-	}
+-
+-    /** Given the index of a bitset in the bitset list, generate a unique name.
+-     * Specific code-generators may want to override this
+-     * if the language does not allow '_' or numerals in identifiers.
+-     * @param index  The index of the bitset in the bitset list.
+-     */
+-    protected String getBitsetName(int index) {
+-        return "tokenSet_" + index + "_";
+-    }
+-
+-	/** Generate the finish of a block, using a combination of the info
+-	* returned from genCommonBlock() and the action to perform when
+-	* no alts were taken
+-	* @param howToFinish The return of genCommonBlock()
+-	* @param noViableAction What to generate when no alt is taken
+-	*/
+-	private void genBlockFinish(CSharpBlockFinishingInfo howToFinish, String noViableAction)
+-	{
+-
+-		if (howToFinish.needAnErrorClause &&
+-			(howToFinish.generatedAnIf || howToFinish.generatedSwitch))
+-		{
+-			if ( howToFinish.generatedAnIf ) {
+-				println("else");
+-				println("{");
+-			}
+-			else {
+-				println("{");
+-			}
+-			tabs++;
+-			println(noViableAction);
+-			tabs--;
+-			println("}");
+-		}
+-
+-		if ( howToFinish.postscript!=null ) {
+-			if (howToFinish.needAnErrorClause && howToFinish.generatedSwitch &&
+-				!howToFinish.generatedAnIf && noViableAction != null)
+-			{
+-				// Check to make sure that noViableAction is only a throw statement
+-				if (noViableAction.indexOf("throw") == 0 || noViableAction.indexOf("goto") == 0) {
+-					// Remove the break statement since it isn't reachable with a throw exception
+-					int endOfBreak = howToFinish.postscript.indexOf("break;") + 6;
+-					String newPostScript = howToFinish.postscript.substring(endOfBreak);
+-					println(newPostScript);
+-				}
+-				else {
+-					println(howToFinish.postscript);
+-				}
+-			}
+-			else {
+-				println(howToFinish.postscript);
+-			}
+-		}
+-	}
+-
+-    /** Generate the init action for a block, which may be a RuleBlock or a
+-     * plain AlternativeBLock.
+-     * @blk The block for which the preamble is to be generated.
+-     */
+-    protected void genBlockInitAction(AlternativeBlock blk)
+-	{
+-        // dump out init action
+-        if (blk.initAction != null) {
+-            printAction(processActionForSpecialSymbols(blk.initAction, blk.getLine(), currentRule, null));
+-        }
+-    }
+-
+-	/** Generate the header for a block, which may be a RuleBlock or a
+-     * plain AlternativeBLock.  This generates any variable declarations
+-     * and syntactic-predicate-testing variables.
+-	 * @blk The block for which the preamble is to be generated.
+-	 */
+-	protected void genBlockPreamble(AlternativeBlock blk) {
+-		// define labels for rule blocks.
+-		if ( blk instanceof RuleBlock ) {
+-			RuleBlock rblk = (RuleBlock)blk;
+-			if ( rblk.labeledElements!=null ) {
+-				for (int i=0; i<rblk.labeledElements.size(); i++) {
+-
+-					AlternativeElement a = (AlternativeElement)rblk.labeledElements.elementAt(i);
+-					//System.out.println("looking at labeled element: "+a);
+-					//Variables for labeled rule refs and
+-					//subrules are different than variables for
+-					//grammar atoms.  This test is a little tricky
+-					//because we want to get all rule refs and ebnf,
+-					//but not rule blocks or syntactic predicates
+-					if (
+-						a instanceof RuleRefElement ||
+-						a instanceof AlternativeBlock &&
+-						!(a instanceof RuleBlock) &&
+-						!(a instanceof SynPredBlock)
+-						) {
+-
+-						if (
+-							!(a instanceof RuleRefElement) &&
+-							((AlternativeBlock)a).not &&
+-							analyzer.subruleCanBeInverted(((AlternativeBlock)a), grammar instanceof LexerGrammar)
+-							) {
+-							// Special case for inverted subrules that
+-							// will be inlined.  Treat these like
+-							// token or char literal references
+-							println(labeledElementType + " " + a.getLabel() + " = " + labeledElementInit + ";");
+-							if (grammar.buildAST) {
+-								genASTDeclaration(a);
+-							}
+-						}
+-						else {
+-							if (grammar.buildAST) {
+-								// Always gen AST variables for
+-								// labeled elements, even if the
+-								// element itself is marked with !
+-								genASTDeclaration(a);
+-							}
+-							if ( grammar instanceof LexerGrammar ) {
+-								println("Token "+a.getLabel()+" = null;");
+-							}
+-							if (grammar instanceof TreeWalkerGrammar) {
+-								// always generate rule-ref variables
+-								// for tree walker
+-								println(labeledElementType + " " + a.getLabel() + " = " + labeledElementInit + ";");
+-							}
+-						}
+-					}
+-					else {
+-						// It is a token or literal reference.  Generate the
+-						// correct variable type for this grammar
+-						println(labeledElementType + " " + a.getLabel() + " = " + labeledElementInit + ";");
+-						// In addition, generate *_AST variables if building ASTs
+-						if (grammar.buildAST) {
+-							//println(labeledElementASTType+" " + a.getLabel() + "_AST = null;");
+-							if (a instanceof GrammarAtom &&
+-								((GrammarAtom)a).getASTNodeType()!=null ) {
+-								GrammarAtom ga = (GrammarAtom)a;
+-								genASTDeclaration(a, ga.getASTNodeType());
+-							}
+-							else {
+-								genASTDeclaration(a);
+-							}
+-						}
+-					}
+-				}
+-			}
+-		}
+-	}
+-
+-	public void genBody(LexerGrammar g) throws IOException
+-	{
+-		// SAS: moved output creation to method so a subclass can change
+-		//      how the output is generated (for VAJ interface)
+-		setupOutput(grammar.getClassName());
+-
+-		genAST = false;	// no way to gen trees.
+-		saveText = true;	// save consumed characters.
+-
+-		tabs=0;
+-
+-		// Generate header common to all CSharp output files
+-		genHeader();
+-		// Do not use printAction because we assume tabs==0
+-		println(behavior.getHeaderAction(""));
+-
+-      		// Generate the CSharp namespace declaration (if specified)
+-		if (nameSpace != null)
+-			nameSpace.emitDeclarations(currentOutput);
+-		tabs++;
+-
+-		// Generate header specific to lexer CSharp file
+-		// println("import java.io.FileInputStream;");
+-		println("// Generate header specific to lexer CSharp file");
+-		println("using System;");
+-		println("using Stream                          = System.IO.Stream;");
+-		println("using TextReader                      = System.IO.TextReader;");
+-		println("using Hashtable                       = System.Collections.Hashtable;");
+-		println("using Comparer                        = System.Collections.Comparer;");
+-		if ( !(g.caseSensitiveLiterals) )
+-		{
+-			println("using CaseInsensitiveHashCodeProvider = System.Collections.CaseInsensitiveHashCodeProvider;");
+-			println("using CaseInsensitiveComparer         = System.Collections.CaseInsensitiveComparer;");
+-		}
+-		println("");
+-		println("using TokenStreamException            = persistence.antlr.TokenStreamException;");
+-		println("using TokenStreamIOException          = persistence.antlr.TokenStreamIOException;");
+-		println("using TokenStreamRecognitionException = persistence.antlr.TokenStreamRecognitionException;");
+-		println("using CharStreamException             = persistence.antlr.CharStreamException;");
+-		println("using CharStreamIOException           = persistence.antlr.CharStreamIOException;");
+-		println("using ANTLRException                  = persistence.antlr.ANTLRException;");
+-		println("using CharScanner                     = persistence.antlr.CharScanner;");
+-		println("using InputBuffer                     = persistence.antlr.InputBuffer;");
+-		println("using ByteBuffer                      = persistence.antlr.ByteBuffer;");
+-		println("using CharBuffer                      = persistence.antlr.CharBuffer;");
+-		println("using Token                           = persistence.antlr.Token;");
+-		println("using CommonToken                     = persistence.antlr.CommonToken;");
+-		println("using SemanticException               = persistence.antlr.SemanticException;");
+-		println("using RecognitionException            = persistence.antlr.RecognitionException;");
+-		println("using NoViableAltForCharException     = persistence.antlr.NoViableAltForCharException;");
+-		println("using MismatchedCharException         = persistence.antlr.MismatchedCharException;");
+-		println("using TokenStream                     = persistence.antlr.TokenStream;");
+-		println("using LexerSharedInputState           = persistence.antlr.LexerSharedInputState;");
+-		println("using BitSet                          = persistence.antlr.collections.impl.BitSet;");
+-
+-		// Generate user-defined lexer file preamble
+-		println(grammar.preambleAction.getText());
+-
+-		// Generate lexer class definition
+-		String sup=null;
+-		if ( grammar.superClass!=null ) {
+-			sup = grammar.superClass;
+-		}
+-		else {
+-			sup = "persistence.antlr." + grammar.getSuperClass();
+-		}
+-
+-		// print javadoc comment if any
+-		if ( grammar.comment!=null )
+-		{
+-			_println(grammar.comment);
+-		}
+-
+-        Token tprefix = (Token)grammar.options.get("classHeaderPrefix");
+-		if (tprefix == null) {
+-			print("public ");
+-		}
+-        else {
+-            String p = StringUtils.stripFrontBack(tprefix.getText(), "\"", "\"");
+-            if (p == null) {
+-				print("public ");
+-			}
+-			else {
+-                print(p+" ");
+-            }
+-        }
+-
+-		print("class " + grammar.getClassName() + " : "+sup);
+-		println(", TokenStream");
+-		Token tsuffix = (Token)grammar.options.get("classHeaderSuffix");
+-		if ( tsuffix != null )
+-		{
+-			String suffix = StringUtils.stripFrontBack(tsuffix.getText(),"\"","\"");
+-			if ( suffix != null )
+-			{
+-				print(", "+suffix);	// must be an interface name for CSharp
+-			}
+-		}
+-		println(" {");
+-		tabs++;
+-
+-		// Generate 'const' definitions for Token IDs
+-		genTokenDefinitions(grammar.tokenManager);
+-
+-		// Generate user-defined lexer class members
+-		print(
+-			processActionForSpecialSymbols(grammar.classMemberAction.getText(), grammar.classMemberAction.getLine(), currentRule, null)
+-			);
+-
+-		//
+-		// Generate the constructor from InputStream, which in turn
+-		// calls the ByteBuffer constructor
+-		//
+-		println("public " + grammar.getClassName() + "(Stream ins) : this(new ByteBuffer(ins))");
+-		println("{");
+-		println("}");
+-		println("");
+-
+-		//
+-		// Generate the constructor from Reader, which in turn
+-		// calls the CharBuffer constructor
+-		//
+-		println("public " + grammar.getClassName() + "(TextReader r) : this(new CharBuffer(r))");
+-		println("{");
+-		println("}");
+-		println("");
+-
+-		print("public " + grammar.getClassName() + "(InputBuffer ib)");
+-		// if debugging, wrap the input buffer in a debugger
+-		if (grammar.debuggingOutput)
+-			println(" : this(new LexerSharedInputState(new persistence.antlr.debug.DebuggingInputBuffer(ib)))");
+-		else
+-			println(" : this(new LexerSharedInputState(ib))");
+-		println("{");
+-		println("}");
+-		println("");
+-
+-		//
+-		// Generate the constructor from InputBuffer (char or byte)
+-		//
+-		println("public " + grammar.getClassName() + "(LexerSharedInputState state) : base(state)");
+-		println("{");
+-		tabs++;
+-		println("initialize();");
+-		tabs--;
+-		println("}");
+-
+-		// Generate the initialize function
+-		println("private void initialize()");
+-		println("{");
+-		tabs++;
+-
+-		// if debugging, set up array variables and call user-overridable
+-		//   debugging setup method
+-		if ( grammar.debuggingOutput ) {
+-			println("ruleNames  = _ruleNames;");
+-			println("semPredNames = _semPredNames;");
+-			println("setupDebugging();");
+-		}
+-
+-	      // Generate the setting of various generated options.
+-	      // These need to be before the literals since ANTLRHashString depends on
+-	      // the casesensitive stuff.
+-	      println("caseSensitiveLiterals = " + g.caseSensitiveLiterals + ";");
+-	      println("setCaseSensitive(" + g.caseSensitive + ");");
+-
+-		// Generate the initialization of a hashtable
+-		// containing the string literals used in the lexer
+-		// The literals variable itself is in CharScanner
+-		if (g.caseSensitiveLiterals)
+-			println("literals = new Hashtable(null, Comparer.Default);");
+-		else
+-			println("literals = new Hashtable(CaseInsensitiveHashCodeProvider.Default, CaseInsensitiveComparer.Default);");
+-		Enumeration keys = grammar.tokenManager.getTokenSymbolKeys();
+-		while ( keys.hasMoreElements() ) {
+-			String key = (String)keys.nextElement();
+-			if ( key.charAt(0) != '"' ) {
+-				continue;
+-			}
+-			TokenSymbol sym = grammar.tokenManager.getTokenSymbol(key);
+-			if ( sym instanceof StringLiteralSymbol ) {
+-				StringLiteralSymbol s = (StringLiteralSymbol)sym;
+-				println("literals.Add(" + s.getId() + ", " + s.getTokenType() + ");");
+-			}
+-		}
+-
+-		Enumeration ids;
+-		tabs--;
+-		println("}");
+-
+-		// generate the rule name array for debugging
+-		if (grammar.debuggingOutput) {
+-			println("private const string[] _ruleNames = {");
+-
+-			ids = grammar.rules.elements();
+-			int ruleNum=0;
+-			while ( ids.hasMoreElements() ) {
+-				GrammarSymbol sym = (GrammarSymbol) ids.nextElement();
+-				if ( sym instanceof RuleSymbol)
+-					println("  \""+((RuleSymbol)sym).getId()+"\",");
+-			}
+-			println("};");
+-		}
+-
+-		// Generate nextToken() rule.
+-		// nextToken() is a synthetic lexer rule that is the implicit OR of all
+-		// user-defined lexer rules.
+-		genNextToken();
+-
+-		// Generate code for each rule in the lexer
+-		ids = grammar.rules.elements();
+-		int ruleNum=0;
+-		while ( ids.hasMoreElements() ) {
+-			RuleSymbol sym = (RuleSymbol) ids.nextElement();
+-			// Don't generate the synthetic rules
+-			if (!sym.getId().equals("mnextToken")) {
+-				genRule(sym, false, ruleNum++, grammar.tokenManager);
+-			}
+-			exitIfError();
+-		}
+-
+-		// Generate the semantic predicate map for debugging
+-		if (grammar.debuggingOutput)
+-			genSemPredMap();
+-
+-		// Generate the bitsets used throughout the lexer
+-		genBitsets(bitsetsUsed, ((LexerGrammar)grammar).charVocabulary.size());
+-
+-		println("");
+-		tabs--;
+-		println("}");
+-
+-		tabs--;
+-		// Generate the CSharp namespace closures (if required)
+-		if (nameSpace != null)
+-			nameSpace.emitClosures(currentOutput);
+-
+-		// Close the lexer output stream
+-		currentOutput.close();
+-		currentOutput = null;
+-	}
+-
+-	public void genInitFactory( Grammar g ) {
+-		if( g.buildAST )
+-		{
+-			// Generate the method to initialize an ASTFactory when we're
+-			// building AST's
+-			println("static public void initializeASTFactory( ASTFactory factory )");
+-			println("{");
+-			tabs++;
+-
+-			println("factory.setMaxNodeType("+g.tokenManager.maxTokenType()+");");
+-
+-	        // Walk the token vocabulary and generate code to register every TokenID->ASTNodeType
+-	        // mapping specified in the  tokens {...} section with the ASTFactory.
+-			Vector v = g.tokenManager.getVocabulary();
+-			for (int i = 0; i < v.size(); i++) {
+-				String s = (String)v.elementAt(i);
+-				if (s != null) {
+-					TokenSymbol ts = g.tokenManager.getTokenSymbol(s);
+-					if (ts != null && ts.getASTNodeType() != null) {
+-						println("factory.setTokenTypeASTNodeType(" + s + ", \"" + ts.getASTNodeType() + "\");");
+-					}
+-				}
+-			}
+-
+-			tabs--;
+-			println("}");
+-		}
+-	}
+-
+-	public void genBody(ParserGrammar g) throws IOException
+-	{
+-		// Open the output stream for the parser and set the currentOutput
+-		// SAS: moved file setup so subclass could do it (for VAJ interface)
+-		setupOutput(grammar.getClassName());
+-
+-		genAST = grammar.buildAST;
+-
+-		tabs = 0;
+-
+-		// Generate the header common to all output files.
+-		genHeader();
+-		// Do not use printAction because we assume tabs==0
+-		println(behavior.getHeaderAction(""));
+-
+-      		// Generate the CSharp namespace declaration (if specified)
+-		if (nameSpace != null)
+-			nameSpace.emitDeclarations(currentOutput);
+-		tabs++;
+-
+-		// Generate header for the parser
+-		println("// Generate the header common to all output files.");
+-		println("using System;");
+-		println("");
+-		println("using TokenBuffer              = persistence.antlr.TokenBuffer;");
+-		println("using TokenStreamException     = persistence.antlr.TokenStreamException;");
+-		println("using TokenStreamIOException   = persistence.antlr.TokenStreamIOException;");
+-		println("using ANTLRException           = persistence.antlr.ANTLRException;");
+-		println("using " + grammar.getSuperClass() + " = persistence.antlr." + grammar.getSuperClass() + ";");
+-		println("using Token                    = persistence.antlr.Token;");
+-		println("using TokenStream              = persistence.antlr.TokenStream;");
+-		println("using RecognitionException     = persistence.antlr.RecognitionException;");
+-		println("using NoViableAltException     = persistence.antlr.NoViableAltException;");
+-		println("using MismatchedTokenException = persistence.antlr.MismatchedTokenException;");
+-		println("using SemanticException        = persistence.antlr.SemanticException;");
+-		println("using ParserSharedInputState   = persistence.antlr.ParserSharedInputState;");
+-		println("using BitSet                   = persistence.antlr.collections.impl.BitSet;");
+-		if ( genAST ) {
+-			println("using AST                      = persistence.antlr.collections.AST;");
+-			println("using ASTPair                  = persistence.antlr.ASTPair;");
+-			println("using ASTFactory               = persistence.antlr.ASTFactory;");
+-			println("using ASTArray                 = persistence.antlr.collections.impl.ASTArray;");
+-		}
+-
+-		// Output the user-defined parser preamble
+-		println(grammar.preambleAction.getText());
+-
+-		// Generate parser class definition
+-		String sup=null;
+-		if ( grammar.superClass != null )
+-			sup = grammar.superClass;
+-		else
+-			sup = "persistence.antlr." + grammar.getSuperClass();
+-
+-		// print javadoc comment if any
+-		if ( grammar.comment!=null ) {
+-			_println(grammar.comment);
+-		}
+-
+-        Token tprefix = (Token)grammar.options.get("classHeaderPrefix");
+-		if (tprefix == null) {
+-			print("public ");
+-		}
+-        else {
+-            String p = StringUtils.stripFrontBack(tprefix.getText(), "\"", "\"");
+-            if (p == null) {
+-				print("public ");
+-			}
+-			else {
+-                print(p+" ");
+-            }
+-        }
+-
+-		println("class " + grammar.getClassName() + " : "+sup);
+-
+-		Token tsuffix = (Token)grammar.options.get("classHeaderSuffix");
+-		if ( tsuffix != null ) {
+-			String suffix = StringUtils.stripFrontBack(tsuffix.getText(),"\"","\"");
+-			if ( suffix != null )
+-				print("              , "+suffix);	// must be an interface name for CSharp
+-		}
+-		println("{");
+-		tabs++;
+-
+-		// Generate 'const' definitions for Token IDs
+-		genTokenDefinitions(grammar.tokenManager);
+-
+-		// set up an array of all the rule names so the debugger can
+-		// keep track of them only by number -- less to store in tree...
+-		if (grammar.debuggingOutput) {
+-			println("private const string[] _ruleNames = {");
+-			tabs++;
+-
+-			Enumeration ids = grammar.rules.elements();
+-			int ruleNum=0;
+-			while ( ids.hasMoreElements() ) {
+-				GrammarSymbol sym = (GrammarSymbol) ids.nextElement();
+-				if ( sym instanceof RuleSymbol)
+-					println("  \""+((RuleSymbol)sym).getId()+"\",");
+-			}
+-			tabs--;
+-			println("};");
+-		}
+-
+-		// Generate user-defined parser class members
+-		print(
+-			processActionForSpecialSymbols(grammar.classMemberAction.getText(), grammar.classMemberAction.getLine(), currentRule, null)
+-			);
+-
+-		// Generate parser class constructor from TokenBuffer
+-		println("");
+-		println("protected void initialize()");
+-		println("{");
+-		tabs++;
+-		println("tokenNames = tokenNames_;");
+-
+-		if( grammar.buildAST )
+-			println("initializeFactory();");
+-
+-		// if debugging, set up arrays and call the user-overridable
+-		//   debugging setup method
+-		if ( grammar.debuggingOutput ) {
+-			println("ruleNames  = _ruleNames;");
+-			println("semPredNames = _semPredNames;");
+-			println("setupDebugging(tokenBuf);");
+-		}
+-		tabs--;
+-		println("}");
+-		println("");
+-
+-		println("");
+-		println("protected " + grammar.getClassName() + "(TokenBuffer tokenBuf, int k) : base(tokenBuf, k)");
+-		println("{");
+-		tabs++;
+-		println("initialize();");
+-		tabs--;
+-		println("}");
+-		println("");
+-
+-		println("public " + grammar.getClassName() + "(TokenBuffer tokenBuf) : this(tokenBuf," + grammar.maxk + ")");
+-		println("{");
+-		println("}");
+-		println("");
+-
+-		// Generate parser class constructor from TokenStream
+-		println("protected " + grammar.getClassName()+"(TokenStream lexer, int k) : base(lexer,k)");
+-		println("{");
+-		tabs++;
+-		println("initialize();");
+-		tabs--;
+-		println("}");
+-		println("");
+-
+-		println("public " + grammar.getClassName()+"(TokenStream lexer) : this(lexer," + grammar.maxk + ")");
+-		println("{");
+-		println("}");
+-		println("");
+-
+-		println("public " + grammar.getClassName()+"(ParserSharedInputState state) : base(state," + grammar.maxk + ")");
+-		println("{");
+-		tabs++;
+-		println("initialize();");
+-		tabs--;
+-		println("}");
+-		println("");
+-
+-		astTypes = new java.util.Vector(100);
+-
+-		// Generate code for each rule in the grammar
+-		Enumeration ids = grammar.rules.elements();
+-		int ruleNum=0;
+-		while ( ids.hasMoreElements() ) {
+-			GrammarSymbol sym = (GrammarSymbol) ids.nextElement();
+-			if ( sym instanceof RuleSymbol) {
+-				RuleSymbol rs = (RuleSymbol)sym;
+-				genRule(rs, rs.references.size()==0, ruleNum++, grammar.tokenManager);
+-			}
+-			exitIfError();
+-		}
+-		if ( usingCustomAST )
+-		{
+-			// when we are using a custom AST, overload Parser.getAST() to return the
+-			// custom AST type
+-			println("public new " + labeledElementASTType + " getAST()");
+-			println("{");
+-			tabs++;
+-			println("return (" + labeledElementASTType + ") returnAST;");
+-			tabs--;
+-			println("}");
+-			println("");
+-		}
+-
+-		// Generate the method that initializes the ASTFactory when we're
+-		// building AST's
+-		println("private void initializeFactory()");
+-		println("{");
+-		tabs++;
+-		if( grammar.buildAST ) {
+-			println("if (astFactory == null)");
+-			println("{");
+-			tabs++;
+-			if( usingCustomAST )
+-			{
+-				println("astFactory = new ASTFactory(\"" + labeledElementASTType + "\");");
+-			}
+-			else
+-				println("astFactory = new ASTFactory();");
+-			tabs--;
+-			println("}");
+-			println("initializeASTFactory( astFactory );");
+-		}
+-		tabs--;
+-		println("}");
+-		genInitFactory( g );
+-
+-		// Generate the token names
+-		genTokenStrings();
+-
+-		// Generate the bitsets used throughout the grammar
+-		genBitsets(bitsetsUsed, grammar.tokenManager.maxTokenType());
+-
+-		// Generate the semantic predicate map for debugging
+-		if (grammar.debuggingOutput)
+-			genSemPredMap();
+-
+-		// Close class definition
+-		println("");
+-		tabs--;
+-		println("}");
+-
+-		tabs--;
+-		// Generate the CSharp namespace closures (if required)
+-		if (nameSpace != null)
+-			nameSpace.emitClosures(currentOutput);
+-
+-		// Close the parser output stream
+-		currentOutput.close();
+-		currentOutput = null;
+-	}
+-	public void genBody(TreeWalkerGrammar g) throws IOException
+-	{
+-		// Open the output stream for the parser and set the currentOutput
+-		// SAS: move file open to method so subclass can override it
+-		//      (mainly for VAJ interface)
+-		setupOutput(grammar.getClassName());
+-
+-		genAST = grammar.buildAST;
+-		tabs = 0;
+-
+-		// Generate the header common to all output files.
+-		genHeader();
+-		// Do not use printAction because we assume tabs==0
+-		println(behavior.getHeaderAction(""));
+-
+-      // Generate the CSharp namespace declaration (if specified)
+-		if (nameSpace != null)
+-			nameSpace.emitDeclarations(currentOutput);
+-		tabs++;
+-
+-		// Generate header specific to the tree-parser CSharp file
+-		println("// Generate header specific to the tree-parser CSharp file");
+-		println("using System;");
+-		println("");
+-		println("using " + grammar.getSuperClass() + " = persistence.antlr." + grammar.getSuperClass() + ";");
+-		println("using Token                    = persistence.antlr.Token;");
+-		println("using AST                      = persistence.antlr.collections.AST;");
+-		println("using RecognitionException     = persistence.antlr.RecognitionException;");
+-		println("using ANTLRException           = persistence.antlr.ANTLRException;");
+-		println("using NoViableAltException     = persistence.antlr.NoViableAltException;");
+-		println("using MismatchedTokenException = persistence.antlr.MismatchedTokenException;");
+-		println("using SemanticException        = persistence.antlr.SemanticException;");
+-		println("using BitSet                   = persistence.antlr.collections.impl.BitSet;");
+-		println("using ASTPair                  = persistence.antlr.ASTPair;");
+-		println("using ASTFactory               = persistence.antlr.ASTFactory;");
+-		println("using ASTArray                 = persistence.antlr.collections.impl.ASTArray;");
+-
+-		// Output the user-defined parser premamble
+-		println(grammar.preambleAction.getText());
+-
+-		// Generate parser class definition
+-		String sup=null;
+-		if ( grammar.superClass!=null ) {
+-			sup = grammar.superClass;
+-		}
+-		else {
+-			sup = "persistence.antlr." + grammar.getSuperClass();
+-		}
+-		println("");
+-
+-		// print javadoc comment if any
+-		if ( grammar.comment!=null ) {
+-			_println(grammar.comment);
+-		}
+-
+-        Token tprefix = (Token)grammar.options.get("classHeaderPrefix");
+-		if (tprefix == null) {
+-			print("public ");
+-		}
+-        else {
+-            String p = StringUtils.stripFrontBack(tprefix.getText(), "\"", "\"");
+-            if (p == null) {
+-				print("public ");
+-			}
+-			else {
+-                print(p+" ");
+-            }
+-        }
+-
+-		println("class " + grammar.getClassName() + " : "+sup);
+-		Token tsuffix = (Token)grammar.options.get("classHeaderSuffix");
+-		if ( tsuffix != null ) {
+-			String suffix = StringUtils.stripFrontBack(tsuffix.getText(),"\"","\"");
+-			if ( suffix != null ) {
+-				print("              , "+suffix);	// must be an interface name for CSharp
+-			}
+-		}
+-		println("{");
+-		tabs++;
+-
+-		// Generate 'const' definitions for Token IDs
+-		genTokenDefinitions(grammar.tokenManager);
+-
+-		// Generate user-defined parser class members
+-		print(
+-			processActionForSpecialSymbols(grammar.classMemberAction.getText(), grammar.classMemberAction.getLine(), currentRule, null)
+-			);
+-
+-		// Generate default parser class constructor
+-		println("public " + grammar.getClassName() + "()");
+-		println("{");
+-		tabs++;
+-		println("tokenNames = tokenNames_;");
+-		tabs--;
+-		println("}");
+-		println("");
+-
+-		astTypes = new java.util.Vector();
+-		// Generate code for each rule in the grammar
+-		Enumeration ids = grammar.rules.elements();
+-		int ruleNum=0;
+-		String ruleNameInits = "";
+-		while ( ids.hasMoreElements() ) {
+-			GrammarSymbol sym = (GrammarSymbol) ids.nextElement();
+-			if ( sym instanceof RuleSymbol) {
+-				RuleSymbol rs = (RuleSymbol)sym;
+-				genRule(rs, rs.references.size()==0, ruleNum++, grammar.tokenManager);
+-			}
+-			exitIfError();
+-		}
+-
+-		if ( usingCustomAST )
+-		{
+-			// when we are using a custom ast override Parser.getAST to return the
+-			// custom AST type
+-			println("public new " + labeledElementASTType + " getAST()");
+-			println("{");
+-			tabs++;
+-			println("return (" + labeledElementASTType + ") returnAST;");
+-			tabs--;
+-			println("}");
+-			println("");
+-		}
+-
+-		// Generate the ASTFactory initialization function
+-		genInitFactory( grammar );
+-
+-		// Generate the token names
+-		genTokenStrings();
+-
+-		// Generate the bitsets used throughout the grammar
+-		genBitsets(bitsetsUsed, grammar.tokenManager.maxTokenType());
+-
+-		// Close class definition
+-		tabs--;
+-		println("}");
+-		println("");
+-
+-		tabs--;
+-		// Generate the CSharp namespace closures (if required)
+-		if (nameSpace != null)
+-			nameSpace.emitClosures(currentOutput);
+-
+-		// Close the parser output stream
+-		currentOutput.close();
+-		currentOutput = null;
+-	}
+-
+-	/** Generate a series of case statements that implement a BitSet test.
+-	 * @param p The Bitset for which cases are to be generated
+-	 */
+-	protected void genCases(BitSet p) {
+-		if ( DEBUG_CODE_GENERATOR ) System.out.println("genCases("+p+")");
+-		int[] elems;
+-
+-		elems = p.toArray();
+-		// Wrap cases four-per-line for lexer, one-per-line for parser
+-		int wrap = (grammar instanceof LexerGrammar) ? 4 : 1;
+-		int j=1;
+-		boolean startOfLine = true;
+-		for (int i = 0; i < elems.length; i++) {
+-			if (j==1) {
+-				print("");
+-			} else {
+-				_print("  ");
+-			}
+-			_print("case " + getValueString(elems[i]) + ":");
+-			if (j==wrap) {
+-				_println("");
+-				startOfLine = true;
+-				j=1;
+-			}
+-			else {
+-				j++;
+-				startOfLine = false;
+-			}
+-		}
+-		if (!startOfLine) {
+-			_println("");
+-		}
+-	}
+-
+-	/**Generate common code for a block of alternatives; return a
+-	* postscript that needs to be generated at the end of the
+-	* block.  Other routines may append else-clauses and such for
+-	* error checking before the postfix is generated.  If the
+-	* grammar is a lexer, then generate alternatives in an order
+-	* where alternatives requiring deeper lookahead are generated
+-	* first, and EOF in the lookahead set reduces the depth of
+-	* the lookahead.  @param blk The block to generate @param
+-	* noTestForSingle If true, then it does not generate a test
+-	* for a single alternative.
+-	*/
+-	public CSharpBlockFinishingInfo genCommonBlock(AlternativeBlock blk,
+-		boolean noTestForSingle)
+-	{
+-		int nIF=0;
+-		boolean createdLL1Switch = false;
+-		int closingBracesOfIFSequence = 0;
+-		CSharpBlockFinishingInfo finishingInfo = new CSharpBlockFinishingInfo();
+-		if ( DEBUG_CODE_GENERATOR ) System.out.println("genCommonBlock("+blk+")");
+-
+-		// Save the AST generation state, and set it to that of the block
+-		boolean savegenAST = genAST;
+-		genAST = genAST && blk.getAutoGen();
+-
+-			boolean oldsaveTest = saveText;
+-		saveText = saveText && blk.getAutoGen();
+-
+-		// Is this block inverted?  If so, generate special-case code
+-		if ( blk.not &&
+-			analyzer.subruleCanBeInverted(blk, grammar instanceof LexerGrammar) )
+-		{
+-			if ( DEBUG_CODE_GENERATOR ) System.out.println("special case: ~(subrule)");
+-			Lookahead p = analyzer.look(1, blk);
+-			// Variable assignment for labeled elements
+-			if (blk.getLabel() != null && syntacticPredLevel == 0) {
+-				println(blk.getLabel() + " = " + lt1Value + ";");
+-			}
+-
+-			// AST
+-			genElementAST(blk);
+-
+-			String astArgs="";
+-			if (grammar instanceof TreeWalkerGrammar) {
+-				if ( usingCustomAST )
+-					astArgs = "(AST)_t,";
+-				else
+-					astArgs = "_t,";
+-			}
+-
+-			// match the bitset for the alternative
+-			println("match(" + astArgs + getBitsetName(markBitsetForGen(p.fset)) + ");");
+-
+-			// tack on tree cursor motion if doing a tree walker
+-			if (grammar instanceof TreeWalkerGrammar)
+-			{
+-				println("_t = _t.getNextSibling();");
+-			}
+-			return finishingInfo;
+-		}
+-
+-		// Special handling for single alt
+-		if (blk.getAlternatives().size() == 1)
+-		{
+-			Alternative alt = blk.getAlternativeAt(0);
+-			// Generate a warning if there is a synPred for single alt.
+-			if (alt.synPred != null)
+-			{
+-				antlrTool.warning(
+-					"Syntactic predicate superfluous for single alternative",
+-					grammar.getFilename(),
+-               blk.getAlternativeAt(0).synPred.getLine(),
+-               blk.getAlternativeAt(0).synPred.getColumn()
+-					);
+-			}
+-			if (noTestForSingle)
+-			{
+-				if (alt.semPred != null)
+-				{
+-					// Generate validating predicate
+-					genSemPred(alt.semPred, blk.line);
+-				}
+-				genAlt(alt, blk);
+-				return finishingInfo;
+-			}
+-		}
+-
+-		// count number of simple LL(1) cases; only do switch for
+-		// many LL(1) cases (no preds, no end of token refs)
+-		// We don't care about exit paths for (...)*, (...)+
+-		// because we don't explicitly have a test for them
+-		// as an alt in the loop.
+-		//
+-		// Also, we now count how many unicode lookahead sets
+-		// there are--they must be moved to DEFAULT or ELSE
+-		// clause.
+-		int nLL1 = 0;
+-		for (int i=0; i<blk.getAlternatives().size(); i++)
+-		{
+-			Alternative a = blk.getAlternativeAt(i);
+-			if ( suitableForCaseExpression(a) ) {
+-				nLL1++;
+-			}
+-		}
+-
+-		// do LL(1) cases
+-		if ( nLL1 >= makeSwitchThreshold)
+-		{
+-			// Determine the name of the item to be compared
+-			String testExpr = lookaheadString(1);
+-			createdLL1Switch = true;
+-			// when parsing trees, convert null to valid tree node with NULL lookahead
+-			if ( grammar instanceof TreeWalkerGrammar )
+-			{
+-				println("if (null == _t)");
+-				tabs++;
+-				println("_t = ASTNULL;");
+-				tabs--;
+-			}
+-			println("switch ( " + testExpr+" )");
+-			println("{");
+-			//tabs++;
+-			for (int i=0; i<blk.alternatives.size(); i++)
+-			{
+-				Alternative alt = blk.getAlternativeAt(i);
+-				// ignore any non-LL(1) alts, predicated alts,
+-				// or end-of-token alts for case expressions
+-				bSaveIndexCreated = false;
+-				if ( !suitableForCaseExpression(alt) )
+-				{
+-					continue;
+-				}
+-				Lookahead p = alt.cache[1];
+-				if (p.fset.degree() == 0 && !p.containsEpsilon())
+-				{
+-					antlrTool.warning("Alternate omitted due to empty prediction set",
+-						grammar.getFilename(),
+-						alt.head.getLine(), alt.head.getColumn());
+-				}
+-				else
+-				{
+-					genCases(p.fset);
+-					println("{");
+-					tabs++;
+-					genAlt(alt, blk);
+-					println("break;");
+-					tabs--;
+-					println("}");
+-				}
+-			}
+-			println("default:");
+-			tabs++;
+-		}
+-
+-		// do non-LL(1) and nondeterministic cases This is tricky in
+-		// the lexer, because of cases like: STAR : '*' ; ASSIGN_STAR
+-		// : "*="; Since nextToken is generated without a loop, then
+-		// the STAR will have end-of-token as it's lookahead set for
+-		// LA(2).  So, we must generate the alternatives containing
+-		// trailing end-of-token in their lookahead sets *after* the
+-		// alternatives without end-of-token.  This implements the
+-		// usual lexer convention that longer matches come before
+-		// shorter ones, e.g.  "*=" matches ASSIGN_STAR not STAR
+-		//
+-		// For non-lexer grammars, this does not sort the alternates
+-		// by depth Note that alts whose lookahead is purely
+-		// end-of-token at k=1 end up as default or else clauses.
+-		int startDepth = (grammar instanceof LexerGrammar) ? grammar.maxk : 0;
+-		for (int altDepth = startDepth; altDepth >= 0; altDepth--) {
+-			if ( DEBUG_CODE_GENERATOR ) System.out.println("checking depth "+altDepth);
+-			for (int i=0; i<blk.alternatives.size(); i++) {
+-				Alternative alt = blk.getAlternativeAt(i);
+-				if ( DEBUG_CODE_GENERATOR ) System.out.println("genAlt: "+i);
+-				// if we made a switch above, ignore what we already took care
+-				// of.  Specifically, LL(1) alts with no preds
+-				// that do not have end-of-token in their prediction set
+-				// and that are not giant unicode sets.
+-				if ( createdLL1Switch && suitableForCaseExpression(alt) )
+-				{
+-					if ( DEBUG_CODE_GENERATOR ) System.out.println("ignoring alt because it was in the switch");
+-					continue;
+-				}
+-				String e;
+-
+-				boolean unpredicted = false;
+-
+-				if (grammar instanceof LexerGrammar) {
+-					// Calculate the "effective depth" of the alt,
+-					// which is the max depth at which
+-					// cache[depth]!=end-of-token
+-					int effectiveDepth = alt.lookaheadDepth;
+-					if (effectiveDepth == GrammarAnalyzer.NONDETERMINISTIC)
+-					{
+-						// use maximum lookahead
+-						effectiveDepth = grammar.maxk;
+-					}
+-					while ( effectiveDepth >= 1 &&
+-						alt.cache[effectiveDepth].containsEpsilon() )
+-					{
+-						effectiveDepth--;
+-					}
+-					// Ignore alts whose effective depth is other than
+-					// the ones we are generating for this iteration.
+-					if (effectiveDepth != altDepth)
+-					{
+-						if ( DEBUG_CODE_GENERATOR )
+-							System.out.println("ignoring alt because effectiveDepth!=altDepth;"+effectiveDepth+"!="+altDepth);
+-						continue;
+-					}
+-					unpredicted = lookaheadIsEmpty(alt, effectiveDepth);
+-					e = getLookaheadTestExpression(alt, effectiveDepth);
+-				}
+-				else
+-				{
+-					unpredicted = lookaheadIsEmpty(alt, grammar.maxk);
+-					e = getLookaheadTestExpression(alt, grammar.maxk);
+-				}
+-
+-				// Was it a big unicode range that forced unsuitability
+-				// for a case expression?
+-            if (alt.cache[1].fset.degree() > caseSizeThreshold &&
+-                suitableForCaseExpression(alt))
+-				{
+-					if ( nIF==0 )
+-					{
+-						println("if " + e);
+-						println("{");
+-					}
+-					else {
+-						println("else if " + e);
+-						println("{");
+-					}
+-				}
+-				else if (unpredicted &&
+-					alt.semPred==null &&
+-					alt.synPred==null)
+-				{
+-					// The alt has empty prediction set and no
+-					// predicate to help out.  if we have not
+-					// generated a previous if, just put {...} around
+-					// the end-of-token clause
+-					if ( nIF==0 ) {
+-						println("{");
+-					}
+-					else {
+-						println("else {");
+-					}
+-					finishingInfo.needAnErrorClause = false;
+-				}
+-				else
+-				{
+-					// check for sem and syn preds
+-					// Add any semantic predicate expression to the lookahead test
+-					if ( alt.semPred != null ) {
+-						// if debugging, wrap the evaluation of the predicate in a method
+-						//
+-						// translate $ and # references
+-						ActionTransInfo tInfo = new ActionTransInfo();
+-						String actionStr = processActionForSpecialSymbols(alt.semPred,
+-							blk.line,
+-							currentRule,
+-							tInfo);
+-						// ignore translation info...we don't need to
+-						// do anything with it.  call that will inform
+-						// SemanticPredicateListeners of the result
+-						if (((grammar instanceof ParserGrammar) || (grammar instanceof LexerGrammar)) &&
+-								grammar.debuggingOutput) {
+-							e = "("+e+"&& fireSemanticPredicateEvaluated(persistence.antlr.debug.SemanticPredicateEvent.PREDICTING,"+ //FIXME
+-								addSemPred(charFormatter.escapeString(actionStr))+","+actionStr+"))";
+-						}
+-						else {
+-							e = "("+e+"&&("+actionStr +"))";
+-						}
+-					}
+-
+-					// Generate any syntactic predicates
+-					if ( nIF>0 ) {
+-						if ( alt.synPred != null ) {
+-							println("else {");
+-							tabs++;
+-							genSynPred( alt.synPred, e );
+-							closingBracesOfIFSequence++;
+-						}
+-						else {
+-							println("else if " + e + " {");
+-						}
+-					}
+-					else {
+-						if ( alt.synPred != null ) {
+-							genSynPred( alt.synPred, e );
+-						}
+-						else {
+-							// when parsing trees, convert null to valid tree node
+-							// with NULL lookahead.
+-							if ( grammar instanceof TreeWalkerGrammar ) {
+-								println("if (_t == null)");
+-								tabs++;
+-								println("_t = ASTNULL;");
+-								tabs--;
+-							}
+-							println("if " + e);
+-							println("{");
+-						}
+-					}
+-
+-				}
+-
+-				nIF++;
+-				tabs++;
+-				genAlt(alt, blk);
+-				tabs--;
+-				println("}");
+-			}
+-		}
+-
+-		String ps = "";
+-		for (int i=1; i<=closingBracesOfIFSequence; i++) {
+-			ps+="}";
+-		}
+-
+-		// Restore the AST generation state
+-		genAST = savegenAST;
+-
+-		// restore save text state
+-		saveText=oldsaveTest;
+-
+-		// Return the finishing info.
+-		if ( createdLL1Switch ) {
+-			tabs--;
+-			finishingInfo.postscript = ps+"break; }";
+-			finishingInfo.generatedSwitch = true;
+-			finishingInfo.generatedAnIf = nIF>0;
+-			//return new CSharpBlockFinishingInfo(ps+"}",true,nIF>0); // close up switch statement
+-
+-		}
+-		else {
+-			finishingInfo.postscript = ps;
+-			finishingInfo.generatedSwitch = false;
+-			finishingInfo.generatedAnIf = nIF>0;
+-			// return new CSharpBlockFinishingInfo(ps, false,nIF>0);
+-		}
+-		return finishingInfo;
+-	}
+-
+-	private static boolean suitableForCaseExpression(Alternative a) {
+-		return a.lookaheadDepth == 1 &&
+-			a.semPred == null &&
+-			!a.cache[1].containsEpsilon() &&
+-			a.cache[1].fset.degree()<=caseSizeThreshold;
+-	}
+-
+-	/** Generate code to link an element reference into the AST */
+-	private void genElementAST(AlternativeElement el) {
+-		// handle case where you're not building trees, but are in tree walker.
+-		// Just need to get labels set up.
+-		if ( grammar instanceof TreeWalkerGrammar && !grammar.buildAST )
+-		{
+-			String elementRef;
+-			String astName;
+-
+-			// Generate names and declarations of the AST variable(s)
+-			if (el.getLabel() == null)
+-			{
+-				elementRef = lt1Value;
+-				// Generate AST variables for unlabeled stuff
+-				astName = "tmp" + astVarNumber + "_AST";
+-				astVarNumber++;
+-				// Map the generated AST variable in the alternate
+-				mapTreeVariable(el, astName);
+-				// Generate an "input" AST variable also
+-				println(labeledElementASTType+" "+astName+"_in = "+elementRef+";");
+-			}
+-			return;
+-		}
+-
+-		if (grammar.buildAST && syntacticPredLevel == 0)
+-		{
+-      		boolean needASTDecl =
+-      			(genAST &&
+-      			(el.getLabel() != null || (el.getAutoGenType() != GrammarElement.AUTO_GEN_BANG)));
+-
+-      		// RK: if we have a grammar element always generate the decl
+-      		// since some guy can access it from an action and we can't
+-      		// peek ahead (well not without making a mess).
+-      		// I'd prefer taking this out.
+-      		if (el.getAutoGenType() != GrammarElement.AUTO_GEN_BANG &&
+-				(el instanceof TokenRefElement))
+-      			needASTDecl = true;
+-
+-      		boolean doNoGuessTest = (grammar.hasSyntacticPredicate && needASTDecl);
+-
+-			String elementRef;
+-			String astNameBase;
+-
+-			// Generate names and declarations of the AST variable(s)
+-			if (el.getLabel() != null)
+-			{
+-				// if the element is labeled use that name...
+-				elementRef = el.getLabel();
+-				astNameBase = el.getLabel();
+-			}
+-			else
+-			{
+-				// else generate a temporary name...
+-				elementRef = lt1Value;
+-				// Generate AST variables for unlabeled stuff
+-				astNameBase = "tmp" + astVarNumber;
+-				astVarNumber++;
+-			}
+-
+-      		// Generate the declaration if required.
+-      		if (needASTDecl)
+-      		{
+-				// Generate the declaration
+-				if ( el instanceof GrammarAtom )
+-				{
+-					GrammarAtom ga = (GrammarAtom)el;
+-					if ( ga.getASTNodeType()!=null )
+-					{
+-						genASTDeclaration(el, astNameBase, ga.getASTNodeType());
+-						//println(ga.getASTNodeType()+" " + astName+" = null;");
+-					}
+-					else
+-					{
+-						genASTDeclaration(el, astNameBase, labeledElementASTType);
+-						//println(labeledElementASTType+" " + astName + " = null;");
+-					}
+-				}
+-				else
+-				{
+-					genASTDeclaration(el, astNameBase, labeledElementASTType);
+-					//println(labeledElementASTType+" " + astName + " = null;");
+-				}
+-			}
+-
+-	      	// for convenience..
+-    		String astName = astNameBase + "_AST";
+-
+-			// Map the generated AST variable in the alternate
+-			mapTreeVariable(el, astName);
+-			if (grammar instanceof TreeWalkerGrammar)
+-			{
+-				// Generate an "input" AST variable also
+-				println(labeledElementASTType+" " + astName + "_in = null;");
+-			}
+-
+-
+-			// Enclose actions with !guessing
+-			if (doNoGuessTest) {
+-				//println("if (0 == inputState.guessing)");
+-				//println("{");
+-				//tabs++;
+-			}
+-
+-			// if something has a label assume it will be used
+-        	// so we must initialize the RefAST
+-			if (el.getLabel() != null)
+-			{
+-				if ( el instanceof GrammarAtom )
+-				{
+-					println(astName + " = "+ getASTCreateString((GrammarAtom)el, elementRef) + ";");
+-				}
+-				else
+-				{
+-					println(astName + " = "+ getASTCreateString(elementRef) + ";");
+-				}
+-			}
+-
+-			// if it has no label but a declaration exists initialize it.
+-        	if (el.getLabel() == null && needASTDecl)
+-			{
+-				elementRef = lt1Value;
+-				if ( el instanceof GrammarAtom )
+-				{
+-					println(astName + " = "+ getASTCreateString((GrammarAtom)el, elementRef) + ";");
+-				}
+-				else
+-				{
+-					println(astName + " = "+ getASTCreateString(elementRef) + ";");
+-				}
+-				// Map the generated AST variable in the alternate
+-				if (grammar instanceof TreeWalkerGrammar)
+-				{
+-					// set "input" AST variable also
+-					println(astName + "_in = " + elementRef + ";");
+-				}
+-			}
+-
+-			if (genAST)
+-			{
+-				switch (el.getAutoGenType())
+-				{
+-				case GrammarElement.AUTO_GEN_NONE:
+-					if ( usingCustomAST ||
+-						 ( (el instanceof GrammarAtom) &&
+-                           (((GrammarAtom)el).getASTNodeType() != null) ) )
+-						println("astFactory.addASTChild(currentAST, (AST)" + astName + ");");
+-					else
+-						println("astFactory.addASTChild(currentAST, " + astName + ");");
+-					break;
+-				case GrammarElement.AUTO_GEN_CARET:
+-					if ( usingCustomAST ||
+-						 ( (el instanceof GrammarAtom) &&
+-                           (((GrammarAtom)el).getASTNodeType() != null) ) )
+-						println("astFactory.makeASTRoot(currentAST, (AST)" + astName + ");");
+-					else
+-						println("astFactory.makeASTRoot(currentAST, " + astName + ");");
+-					break;
+-				default:
+-					break;
+-				}
+-			}
+-			if (doNoGuessTest)
+-			{
+-				//tabs--;
+-				//println("}");
+-			}
+-		}
+-	}
+-
+-
+-	/** Close the try block and generate catch phrases
+-	 * if the element has a labeled handler in the rule
+-	 */
+-	private void genErrorCatchForElement(AlternativeElement el) {
+-		if (el.getLabel() == null) return;
+-		String r = el.enclosingRuleName;
+-		if ( grammar instanceof LexerGrammar ) {
+-			r = CodeGenerator.encodeLexerRuleName(el.enclosingRuleName);
+-		}
+-		RuleSymbol rs = (RuleSymbol)grammar.getSymbol(r);
+-		if (rs == null) {
+-			antlrTool.panic("Enclosing rule not found!");
+-		}
+-		ExceptionSpec ex = rs.block.findExceptionSpec(el.getLabel());
+-		if (ex != null) {
+-			tabs--;
+-			println("}");
+-			genErrorHandler(ex);
+-		}
+-	}
+-
+-	/** Generate the catch phrases for a user-specified error handler */
+-	private void genErrorHandler(ExceptionSpec ex)
+-	{
+-		// Each ExceptionHandler in the ExceptionSpec is a separate catch
+-		for (int i = 0; i < ex.handlers.size(); i++)
+-		{
+-			ExceptionHandler handler = (ExceptionHandler)ex.handlers.elementAt(i);
+-			// Generate catch phrase
+-			println("catch (" + handler.exceptionTypeAndName.getText() + ")");
+-			println("{");
+-			tabs++;
+-			if (grammar.hasSyntacticPredicate) {
+-				println("if (0 == inputState.guessing)");
+-				println("{");
+-				tabs++;
+-			}
+-
+-		// When not guessing, execute user handler action
+-		ActionTransInfo tInfo = new ActionTransInfo();
+-        printAction(processActionForSpecialSymbols(handler.action.getText(),
+-         					handler.action.getLine(), currentRule, tInfo));
+-
+-			if (grammar.hasSyntacticPredicate)
+-			{
+-				tabs--;
+-				println("}");
+-				println("else");
+-				println("{");
+-				tabs++;
+-				// When guessing, rethrow exception
+-				//println("throw " + extractIdOfAction(handler.exceptionTypeAndName) + ";");
+-				println("throw;");
+-				tabs--;
+-				println("}");
+-			}
+-			// Close catch phrase
+-			tabs--;
+-			println("}");
+-		}
+-	}
+-	/** Generate a try { opening if the element has a labeled handler in the rule */
+-	private void genErrorTryForElement(AlternativeElement el) {
+-		if (el.getLabel() == null) return;
+-		String r = el.enclosingRuleName;
+-		if ( grammar instanceof LexerGrammar ) {
+-			r = CodeGenerator.encodeLexerRuleName(el.enclosingRuleName);
+-		}
+-		RuleSymbol rs = (RuleSymbol)grammar.getSymbol(r);
+-		if (rs == null) {
+-			antlrTool.panic("Enclosing rule not found!");
+-		}
+-		ExceptionSpec ex = rs.block.findExceptionSpec(el.getLabel());
+-		if (ex != null) {
+-			println("try   // for error handling");
+-			println("{");
+-			tabs++;
+-		}
+-	}
+-
+-    protected void genASTDeclaration(AlternativeElement el)
+-    {
+-        genASTDeclaration(el, labeledElementASTType);
+-    }
+-
+-    protected void genASTDeclaration(AlternativeElement el, String node_type)
+-    {
+-        genASTDeclaration(el, el.getLabel(), node_type);
+-    }
+-
+-    protected void genASTDeclaration(AlternativeElement el, String var_name, String node_type)
+-    {
+-        // already declared?
+-        if (declaredASTVariables.contains(el))
+-            return;
+-
+-        // emit code
+-        //String s = StringUtils.stripFrontBack(node_type, "\"", "\"");
+-        //println(s + " " + var_name + "_AST = null;");
+-        println(node_type + " " + var_name + "_AST = null;");
+-
+-        // mark as declared
+-        declaredASTVariables.put(el,el);
+-    }
+-
+-	/** Generate a header that is common to all CSharp files */
+-	protected void genHeader()
+-	{
+-		println("// $ANTLR "+Tool.version+": "+
+-			"\"" + antlrTool.fileMinusPath(antlrTool.grammarFile) + "\"" +
+-			" -> "+
+-			"\""+grammar.getClassName()+".cs\"$");
+-	}
+-
+-	private void genLiteralsTest() {
+-		println("_ttype = testLiteralsTable(_ttype);");
+-	}
+-
+-	private void genLiteralsTestForPartialToken() {
+-		println("_ttype = testLiteralsTable(text.ToString(_begin, text.Length-_begin), _ttype);");
+-	}
+-
+-	protected void genMatch(BitSet b) {
+-	}
+-
+-	protected void genMatch(GrammarAtom atom) {
+-		if ( atom instanceof StringLiteralElement ) {
+-			if ( grammar instanceof LexerGrammar ) {
+-				genMatchUsingAtomText(atom);
+-			}
+-			else {
+-				genMatchUsingAtomTokenType(atom);
+-			}
+-		}
+-		else if ( atom instanceof CharLiteralElement ) {
+-			if ( grammar instanceof LexerGrammar ) {
+-				genMatchUsingAtomText(atom);
+-			}
+-			else {
+-				antlrTool.error("cannot ref character literals in grammar: "+atom);
+-			}
+-		}
+-		else if ( atom instanceof TokenRefElement ) {
+-			genMatchUsingAtomText(atom);
+-		} else if (atom instanceof WildcardElement) {
+-          gen((WildcardElement)atom);
+-      }
+-	}
+-	protected void genMatchUsingAtomText(GrammarAtom atom) {
+-		// match() for trees needs the _t cursor
+-		String astArgs="";
+-		if (grammar instanceof TreeWalkerGrammar) {
+-			if ( usingCustomAST )
+-				astArgs="(AST)_t,";
+-			else
+-				astArgs="_t,";
+-		}
+-
+-		// if in lexer and ! on element, save buffer index to kill later
+-		if ( grammar instanceof LexerGrammar && (!saveText||atom.getAutoGenType()==GrammarElement.AUTO_GEN_BANG) ) {
+-			declareSaveIndexVariableIfNeeded();
+-			println("_saveIndex = text.Length;");
+-		}
+-
+-		print(atom.not ? "matchNot(" : "match(");
+-		_print(astArgs);
+-
+-		// print out what to match
+-		if (atom.atomText.equals("EOF")) {
+-			// horrible hack to handle EOF case
+-			_print("Token.EOF_TYPE");
+-		}
+-		else {
+-				_print(atom.atomText);
+-		}
+-		_println(");");
+-
+-		if ( grammar instanceof LexerGrammar && (!saveText||atom.getAutoGenType()==GrammarElement.AUTO_GEN_BANG) ) {
+-			declareSaveIndexVariableIfNeeded();
+-			println("text.Length = _saveIndex;");		// kill text atom put in buffer
+-		}
+-	}
+-
+-	protected void genMatchUsingAtomTokenType(GrammarAtom atom) {
+-		// match() for trees needs the _t cursor
+-		String astArgs="";
+-		if (grammar instanceof TreeWalkerGrammar) {
+-			if( usingCustomAST )
+-				astArgs="(AST)_t,";
+-			else
+-				astArgs="_t,";
+-		}
+-
+-		// If the literal can be mangled, generate the symbolic constant instead
+-		String mangledName = null;
+-		String s = astArgs + getValueString(atom.getType());
+-
+-		// matching
+-		println( (atom.not ? "matchNot(" : "match(") + s + ");");
+-	}
+-
+-	/** Generate the nextToken() rule.  nextToken() is a synthetic
+-	* lexer rule that is the implicit OR of all user-defined
+-	* lexer rules.
+-	*/
+-	public void genNextToken() {
+-		// Are there any public rules?  If not, then just generate a
+-		// fake nextToken().
+-		boolean hasPublicRules = false;
+-		for (int i = 0; i < grammar.rules.size(); i++) {
+-			RuleSymbol rs = (RuleSymbol)grammar.rules.elementAt(i);
+-			if ( rs.isDefined() && rs.access.equals("public") ) {
+-				hasPublicRules = true;
+-				break;
+-			}
+-		}
+-		if (!hasPublicRules) {
+-			println("");
+-			println("override public Token nextToken()\t\t\t//throws TokenStreamException");
+-			println("{");
+-			tabs++;
+-			println("try");
+-			println("{");
+-			tabs++;
+-			println("uponEOF();");
+-			tabs--;
+-			println("}");
+-			println("catch(CharStreamIOException csioe)");
+-			println("{");
+-			tabs++;
+-			println("throw new TokenStreamIOException(csioe.io);");
+-			tabs--;
+-			println("}");
+-			println("catch(CharStreamException cse)");
+-			println("{");
+-			tabs++;
+-			println("throw new TokenStreamException(cse.Message);");
+-			tabs--;
+-			println("}");
+-			println("return new CommonToken(Token.EOF_TYPE, \"\");");
+-			tabs--;
+-			println("}");
+-			println("");
+-			return;
+-		}
+-
+-		// Create the synthesized nextToken() rule
+-		RuleBlock nextTokenBlk = MakeGrammar.createNextTokenRule(grammar, grammar.rules, "nextToken");
+-		// Define the nextToken rule symbol
+-		RuleSymbol nextTokenRs = new RuleSymbol("mnextToken");
+-		nextTokenRs.setDefined();
+-		nextTokenRs.setBlock(nextTokenBlk);
+-		nextTokenRs.access = "private";
+-		grammar.define(nextTokenRs);
+-		// Analyze the nextToken rule
+-		boolean ok = grammar.theLLkAnalyzer.deterministic(nextTokenBlk);
+-
+-		// Generate the next token rule
+-		String filterRule=null;
+-		if ( ((LexerGrammar)grammar).filterMode ) {
+-			filterRule = ((LexerGrammar)grammar).filterRule;
+-		}
+-
+-		println("");
+-		println("override public Token nextToken()\t\t\t//throws TokenStreamException");
+-		println("{");
+-		tabs++;
+-		println("Token theRetToken = null;");
+-		_println("tryAgain:");
+-		println("for (;;)");
+-		println("{");
+-		tabs++;
+-		println("Token _token = null;");
+-		println("int _ttype = Token.INVALID_TYPE;");
+-		if ( ((LexerGrammar)grammar).filterMode ) {
+-			println("setCommitToPath(false);");
+-			if ( filterRule!=null ) {
+-				// Here's a good place to ensure that the filter rule actually exists
+-            if (!grammar.isDefined(CodeGenerator.encodeLexerRuleName(filterRule))) {
+-            	grammar.antlrTool.error("Filter rule " + filterRule + " does not exist in this lexer");
+-				}
+-				else {
+-					RuleSymbol rs = (RuleSymbol)grammar.getSymbol(CodeGenerator.encodeLexerRuleName(filterRule));
+-					if ( !rs.isDefined() ) {
+-						grammar.antlrTool.error("Filter rule " + filterRule + " does not exist in this lexer");
+-					}
+-					else if ( rs.access.equals("public") ) {
+-						grammar.antlrTool.error("Filter rule " + filterRule + " must be protected");
+-					}
+-				}
+-				println("int _m;");
+-				println("_m = mark();");
+-			}
+-		}
+-		println("resetText();");
+-
+-		println("try     // for char stream error handling");
+-		println("{");
+-		tabs++;
+-
+-		// Generate try around whole thing to trap scanner errors
+-		println("try     // for lexical error handling");
+-		println("{");
+-		tabs++;
+-
+-		// Test for public lexical rules with empty paths
+-		for (int i=0; i<nextTokenBlk.getAlternatives().size(); i++) {
+-			Alternative a = nextTokenBlk.getAlternativeAt(i);
+-			if ( a.cache[1].containsEpsilon() ) {
+-				//String r = a.head.toString();
+-            RuleRefElement rr = (RuleRefElement)a.head;
+-            String r = CodeGenerator.decodeLexerRuleName(rr.targetRule);
+-            antlrTool.warning("public lexical rule "+r+" is optional (can match \"nothing\")");
+-			}
+-		}
+-
+-		// Generate the block
+-		String newline = System.getProperty("line.separator");
+-		CSharpBlockFinishingInfo howToFinish = genCommonBlock(nextTokenBlk, false);
+-		String errFinish = "if (LA(1)==EOF_CHAR) { uponEOF(); returnToken_ = makeToken(Token.EOF_TYPE); }";
+-		errFinish += newline+"\t\t\t\t";
+-		if ( ((LexerGrammar)grammar).filterMode ) {
+-			if ( filterRule==null ) {
+-			//kunle: errFinish += "else { consume(); continue tryAgain; }";
+-			errFinish += "\t\t\t\telse";
+-			errFinish += "\t\t\t\t{";
+-			errFinish += "\t\t\t\t\tconsume();";
+-			errFinish += "\t\t\t\t\tgoto tryAgain;";
+-			errFinish += "\t\t\t\t}";
+-			}
+-			else {
+-				errFinish += "\t\t\t\t\telse"+newline+
+-					"\t\t\t\t\t{"+newline+
+-					"\t\t\t\t\tcommit();"+newline+
+-					"\t\t\t\t\ttry {m"+filterRule+"(false);}"+newline+
+-					"\t\t\t\t\tcatch(RecognitionException e)"+newline+
+-					"\t\t\t\t\t{"+newline+
+-					"\t\t\t\t\t	// catastrophic failure"+newline+
+-					"\t\t\t\t\t	reportError(e);"+newline+
+-					"\t\t\t\t\t	consume();"+newline+
+-					"\t\t\t\t\t}"+newline+
+-					"\t\t\t\t\tgoto tryAgain;"+newline+
+-					"\t\t\t\t}";
+-			}
+-		}
+-		else {
+-			errFinish += "else {"+throwNoViable+"}";
+-		}
+-		genBlockFinish(howToFinish, errFinish);
+-
+-		// at this point a valid token has been matched, undo "mark" that was done
+-		if ( ((LexerGrammar)grammar).filterMode && filterRule!=null ) {
+-			println("commit();");
+-		}
+-
+-		// Generate literals test if desired
+-		// make sure _ttype is set first; note returnToken_ must be
+-		// non-null as the rule was required to create it.
+-		println("if ( null==returnToken_ ) goto tryAgain; // found SKIP token");
+-		println("_ttype = returnToken_.Type;");
+-		if ( ((LexerGrammar)grammar).getTestLiterals()) {
+-			genLiteralsTest();
+-		}
+-
+-		// return token created by rule reference in switch
+-		println("returnToken_.Type = _ttype;");
+-		println("return returnToken_;");
+-
+-		// Close try block
+-		tabs--;
+-		println("}");
+-		println("catch (RecognitionException e) {");
+-		tabs++;
+-		if ( ((LexerGrammar)grammar).filterMode ) {
+-			if ( filterRule==null ) {
+-				println("if (!getCommitToPath())");
+-				println("{");
+-				tabs++;
+-				println("consume();");
+-				println("goto tryAgain;");
+-				tabs--;
+-				println("}");
+-			}
+-			else {
+-				println("if (!getCommitToPath())");
+-				println("{");
+-				tabs++;
+-				println("rewind(_m);");
+-				println("resetText();");
+-				println("try {m"+filterRule+"(false);}");
+-				println("catch(RecognitionException ee) {");
+-				println("	// horrendous failure: error in filter rule");
+-				println("	reportError(ee);");
+-				println("	consume();");
+-				println("}");
+-				//println("goto tryAgain;");
+-				tabs--;
+-				println("}");
+-				println("else");
+-			}
+-		}
+-		if ( nextTokenBlk.getDefaultErrorHandler() ) {
+-			println("{");
+-			tabs++;
+-			println("reportError(e);");
+-			println("consume();");
+-			tabs--;
+-			println("}");
+-		}
+-		else {
+-			// pass on to invoking routine
+-			tabs++;
+-			println("throw new TokenStreamRecognitionException(e);");
+-			tabs--;
+-		}
+-		tabs--;
+-		println("}");
+-
+-		// close CharStreamException try
+-		tabs--;
+-		println("}");
+-		println("catch (CharStreamException cse) {");
+-		println("	if ( cse is CharStreamIOException ) {");
+-		println("		throw new TokenStreamIOException(((CharStreamIOException)cse).io);");
+-		println("	}");
+-		println("	else {");
+-		println("		throw new TokenStreamException(cse.Message);");
+-		println("	}");
+-		println("}");
+-
+-		// close for-loop
+-		tabs--;
+-		println("}");
+-
+-		// close method nextToken
+-		tabs--;
+-		println("}");
+-		println("");
+-	}
+-	/** Gen a named rule block.
+-	 * ASTs are generated for each element of an alternative unless
+-	 * the rule or the alternative have a '!' modifier.
+-	 *
+-	 * If an alternative defeats the default tree construction, it
+-	 * must set <rule>_AST to the root of the returned AST.
+-	 *
+-	 * Each alternative that does automatic tree construction, builds
+-	 * up root and child list pointers in an ASTPair structure.
+-	 *
+-	 * A rule finishes by setting the returnAST variable from the
+-	 * ASTPair.
+-	 *
+-	 * @param rule The name of the rule to generate
+-	 * @param startSymbol true if the rule is a start symbol (i.e., not referenced elsewhere)
+-	*/
+-	public void genRule(RuleSymbol s, boolean startSymbol, int ruleNum, TokenManager tm) {
+-		tabs=1;
+-		if ( DEBUG_CODE_GENERATOR ) System.out.println("genRule("+ s.getId() +")");
+-		if ( !s.isDefined() ) {
+-			antlrTool.error("undefined rule: "+ s.getId());
+-			return;
+-		}
+-
+-		// Generate rule return type, name, arguments
+-		RuleBlock rblk = s.getBlock();
+-		currentRule = rblk;
+-		currentASTResult = s.getId();
+-
+-      // clear list of declared ast variables..
+-      declaredASTVariables.clear();
+-
+-		// Save the AST generation state, and set it to that of the rule
+-		boolean savegenAST = genAST;
+-		genAST = genAST && rblk.getAutoGen();
+-
+-		// boolean oldsaveTest = saveText;
+-		saveText = rblk.getAutoGen();
+-
+-		// print javadoc comment if any
+-		if ( s.comment!=null ) {
+-			_println(s.comment);
+-		}
+-
+-		// Gen method access and final qualifier
+-		//print(s.access + " final ");
+-		print(s.access + " ");
+-
+-		// Gen method return type (note lexer return action set at rule creation)
+-		if (rblk.returnAction != null)
+-		{
+-			// Has specified return value
+-			_print(extractTypeOfAction(rblk.returnAction, rblk.getLine(), rblk.getColumn()) + " ");
+-		} else {
+-			// No specified return value
+-			_print("void ");
+-		}
+-
+-		// Gen method name
+-		_print(s.getId() + "(");
+-
+-		// Additional rule parameters common to all rules for this grammar
+-		_print(commonExtraParams);
+-		if (commonExtraParams.length() != 0 && rblk.argAction != null ) {
+-			_print(",");
+-		}
+-
+-		// Gen arguments
+-		if (rblk.argAction != null)
+-		{
+-			// Has specified arguments
+-			_println("");
+-			tabs++;
+-			println(rblk.argAction);
+-			tabs--;
+-			print(")");
+-		}
+-		else {
+-			// No specified arguments
+-			_print(")");
+-		}
+-
+-		// Gen throws clause and open curly
+-		_print(" //throws " + exceptionThrown);
+-		if ( grammar instanceof ParserGrammar ) {
+-			_print(", TokenStreamException");
+-		}
+-		else if ( grammar instanceof LexerGrammar ) {
+-			_print(", CharStreamException, TokenStreamException");
+-		}
+-		// Add user-defined exceptions unless lexer (for now)
+-		if ( rblk.throwsSpec!=null ) {
+-			if ( grammar instanceof LexerGrammar ) {
+-				antlrTool.error("user-defined throws spec not allowed (yet) for lexer rule "+rblk.ruleName);
+-			}
+-			else {
+-				_print(", "+rblk.throwsSpec);
+-			}
+-		}
+-
+-		_println("");
+-		_println("{");
+-		tabs++;
+-
+-		// Convert return action to variable declaration
+-		if (rblk.returnAction != null)
+-			println(rblk.returnAction + ";");
+-
+-		// print out definitions needed by rules for various grammar types
+-		println(commonLocalVars);
+-
+-		if (grammar.traceRules) {
+-			if ( grammar instanceof TreeWalkerGrammar ) {
+-				if ( usingCustomAST )
+-					println("traceIn(\""+ s.getId() +"\",(AST)_t);");
+-				else
+-					println("traceIn(\""+ s.getId() +"\",_t);");
+-			}
+-			else {
+-				println("traceIn(\""+ s.getId() +"\");");
+-			}
+-		}
+-
+-		if ( grammar instanceof LexerGrammar ) {
+-			// lexer rule default return value is the rule's token name
+-			// This is a horrible hack to support the built-in EOF lexer rule.
+-			if (s.getId().equals("mEOF"))
+-				println("_ttype = Token.EOF_TYPE;");
+-			else
+-				println("_ttype = " + s.getId().substring(1)+";");
+-
+-			// delay creation of _saveIndex until we need it OK?
+-			bSaveIndexCreated = false;
+-
+-			/*
+-			      println("boolean old_saveConsumedInput=saveConsumedInput;");
+-			      if ( !rblk.getAutoGen() ) {		// turn off "save input" if ! on rule
+-			      println("saveConsumedInput=false;");
+-			      }
+-			    */
+-		}
+-
+-		// if debugging, write code to mark entry to the rule
+-		if ( grammar.debuggingOutput)
+-			if (grammar instanceof ParserGrammar)
+-				println("fireEnterRule(" + ruleNum + ",0);");
+-			else if (grammar instanceof LexerGrammar)
+-			println("fireEnterRule(" + ruleNum + ",_ttype);");
+-
+-
+-		// Generate trace code if desired
+-		if ( grammar.debuggingOutput || grammar.traceRules) {
+-			println("try { // debugging");
+-			tabs++;
+-		}
+-
+-		// Initialize AST variables
+-		if (grammar instanceof TreeWalkerGrammar) {
+-			// "Input" value for rule
+-			println(labeledElementASTType+" " + s.getId() + "_AST_in = ("+labeledElementASTType+")_t;");
+-		}
+-		if (grammar.buildAST) {
+-			// Parser member used to pass AST returns from rule invocations
+-			println("returnAST = null;");
+-			// Tracks AST construction
+-			// println("ASTPair currentAST = (inputState.guessing==0) ? new ASTPair() : null;");
+-			println("ASTPair currentAST = new ASTPair();");
+-			// User-settable return value for rule.
+-			println(labeledElementASTType+" " + s.getId() + "_AST = null;");
+-		}
+-
+-		genBlockPreamble(rblk);
+-		genBlockInitAction(rblk);
+-		println("");
+-
+-		// Search for an unlabeled exception specification attached to the rule
+-		ExceptionSpec unlabeledUserSpec = rblk.findExceptionSpec("");
+-
+-		// Generate try block around the entire rule for  error handling
+-		if (unlabeledUserSpec != null || rblk.getDefaultErrorHandler() ) {
+-			println("try {      // for error handling");
+-			tabs++;
+-		}
+-
+-		// Generate the alternatives
+-		if ( rblk.alternatives.size()==1 )
+-		{
+-			// One alternative -- use simple form
+-			Alternative alt = rblk.getAlternativeAt(0);
+-			String pred = alt.semPred;
+-			if ( pred!=null )
+-				genSemPred(pred, currentRule.line);
+-			if (alt.synPred != null) {
+-				antlrTool.warning(
+-					"Syntactic predicate ignored for single alternative",
+-					grammar.getFilename(), alt.synPred.getLine(), alt.synPred.getColumn()
+-					);
+-			}
+-			genAlt(alt, rblk);
+-		}
+-		else
+-		{
+-			// Multiple alternatives -- generate complex form
+-			boolean ok = grammar.theLLkAnalyzer.deterministic(rblk);
+-
+-			CSharpBlockFinishingInfo howToFinish = genCommonBlock(rblk, false);
+-			genBlockFinish(howToFinish, throwNoViable);
+-		}
+-
+-		// Generate catch phrase for error handling
+-		if (unlabeledUserSpec != null || rblk.getDefaultErrorHandler() ) {
+-			// Close the try block
+-			tabs--;
+-			println("}");
+-		}
+-
+-		// Generate user-defined or default catch phrases
+-		if (unlabeledUserSpec != null)
+-		{
+-			genErrorHandler(unlabeledUserSpec);
+-		}
+-		else if (rblk.getDefaultErrorHandler())
+-		{
+-			// Generate default catch phrase
+-			println("catch (" + exceptionThrown + " ex)");
+-			println("{");
+-			tabs++;
+-			// Generate code to handle error if not guessing
+-			if (grammar.hasSyntacticPredicate) {
+-				println("if (0 == inputState.guessing)");
+-				println("{");
+-				tabs++;
+-			}
+-			println("reportError(ex);");
+-			if ( !(grammar instanceof TreeWalkerGrammar) )
+-			{
+-				// Generate code to consume until token in k==1 follow set
+-				Lookahead follow = grammar.theLLkAnalyzer.FOLLOW(1, rblk.endNode);
+-				String followSetName = getBitsetName(markBitsetForGen(follow.fset));
+-				println("consume();");
+-				println("consumeUntil(" + followSetName + ");");
+-			}
+-			else
+-			{
+-				// Just consume one token
+-			println("if (null != _t)");
+-			println("{");
+-			tabs++;
+-			println("_t = _t.getNextSibling();");
+-			tabs--;
+-			println("}");
+-			}
+-			if (grammar.hasSyntacticPredicate)
+-			{
+-				tabs--;
+-				// When guessing, rethrow exception
+-				println("}");
+-				println("else");
+-				println("{");
+-				tabs++;
+-				//println("throw ex;");
+-				println("throw;");
+-				tabs--;
+-				println("}");
+-			}
+-			// Close catch phrase
+-			tabs--;
+-			println("}");
+-		}
+-
+-		// Squirrel away the AST "return" value
+-		if (grammar.buildAST) {
+-			println("returnAST = " + s.getId() + "_AST;");
+-		}
+-
+-		// Set return tree value for tree walkers
+-		if ( grammar instanceof TreeWalkerGrammar ) {
+-			println("retTree_ = _t;");
+-		}
+-
+-		// Generate literals test for lexer rules so marked
+-		if (rblk.getTestLiterals()) {
+-			if ( s.access.equals("protected") ) {
+-				genLiteralsTestForPartialToken();
+-			}
+-			else {
+-				genLiteralsTest();
+-			}
+-		}
+-
+-		// if doing a lexer rule, dump code to create token if necessary
+-		if ( grammar instanceof LexerGrammar ) {
+-			println("if (_createToken && (null == _token) && (_ttype != Token.SKIP))");
+-			println("{");
+-			tabs++;
+-			println("_token = makeToken(_ttype);");
+-			println("_token.setText(text.ToString(_begin, text.Length-_begin));");
+-			tabs--;
+-			println("}");
+-			println("returnToken_ = _token;");
+-		}
+-
+-		// Gen the return statement if there is one (lexer has hard-wired return action)
+-		if (rblk.returnAction != null) {
+-			println("return " + extractIdOfAction(rblk.returnAction, rblk.getLine(), rblk.getColumn()) + ";");
+-		}
+-
+-		if ( grammar.debuggingOutput || grammar.traceRules) {
+-			tabs--;
+-			println("}");
+-			println("finally");
+-			println("{ // debugging");
+-			tabs++;
+-
+-			// If debugging, generate calls to mark exit of rule
+-			if ( grammar.debuggingOutput)
+-				if (grammar instanceof ParserGrammar)
+-					println("fireExitRule(" + ruleNum + ",0);");
+-				else if (grammar instanceof LexerGrammar)
+-				println("fireExitRule(" + ruleNum + ",_ttype);");
+-
+-			if (grammar.traceRules) {
+-				if ( grammar instanceof TreeWalkerGrammar ) {
+-					println("traceOut(\""+ s.getId() +"\",_t);");
+-				}
+-				else {
+-					println("traceOut(\""+ s.getId() +"\");");
+-				}
+-			}
+-
+-			tabs--;
+-			println("}");
+-		}
+-
+-		tabs--;
+-		println("}");
+-		println("");
+-
+-		// Restore the AST generation state
+-		genAST = savegenAST;
+-
+-		// restore char save state
+-		// saveText = oldsaveTest;
+-	}
+-	private void GenRuleInvocation(RuleRefElement rr) {
+-		// dump rule name
+-		_print(rr.targetRule + "(");
+-
+-		// lexers must tell rule if it should set returnToken_
+-		if ( grammar instanceof LexerGrammar ) {
+-			// if labeled, could access Token, so tell rule to create
+-			if ( rr.getLabel() != null ) {
+-				_print("true");
+-			}
+-			else {
+-				_print("false");
+-			}
+-			if (commonExtraArgs.length() != 0 || rr.args!=null ) {
+-				_print(",");
+-			}
+-		}
+-
+-		// Extra arguments common to all rules for this grammar
+-		_print(commonExtraArgs);
+-		if (commonExtraArgs.length() != 0 && rr.args!=null ) {
+-			_print(",");
+-		}
+-
+-		// Process arguments to method, if any
+-		RuleSymbol rs = (RuleSymbol)grammar.getSymbol(rr.targetRule);
+-		if (rr.args != null)
+-		{
+-			// When not guessing, execute user arg action
+-			ActionTransInfo tInfo = new ActionTransInfo();
+-			String args = processActionForSpecialSymbols(rr.args, 0, currentRule, tInfo);
+-			if ( tInfo.assignToRoot || tInfo.refRuleRoot!=null )
+-			{
+-            antlrTool.error("Arguments of rule reference '" + rr.targetRule + "' cannot set or ref #" +
+-                 currentRule.getRuleName(), grammar.getFilename(), rr.getLine(), rr.getColumn());
+-			}
+-			_print(args);
+-
+-			// Warn if the rule accepts no arguments
+-			if (rs.block.argAction == null)
+-			{
+-				antlrTool.warning("Rule '" + rr.targetRule + "' accepts no arguments", grammar.getFilename(), rr.getLine(), rr.getColumn());
+-			}
+-		}
+-		else
+-		{
+-			// For C++, no warning if rule has parameters, because there may be default
+-			// values for all of the parameters
+-			if (rs.block.argAction != null)
+-			{
+-				antlrTool.warning("Missing parameters on reference to rule " + rr.targetRule, grammar.getFilename(), rr.getLine(), rr.getColumn());
+-			}
+-		}
+-		_println(");");
+-
+-		// move down to the first child while parsing
+-		if ( grammar instanceof TreeWalkerGrammar ) {
+-			println("_t = retTree_;");
+-		}
+-	}
+-	protected void genSemPred(String pred, int line) {
+-		// translate $ and # references
+-		ActionTransInfo tInfo = new ActionTransInfo();
+-		pred = processActionForSpecialSymbols(pred, line, currentRule, tInfo);
+-		// ignore translation info...we don't need to do anything with it.
+-		String escapedPred = charFormatter.escapeString(pred);
+-
+-		// if debugging, wrap the semantic predicate evaluation in a method
+-		// that can tell SemanticPredicateListeners the result
+-		if (grammar.debuggingOutput && ((grammar instanceof ParserGrammar) || (grammar instanceof LexerGrammar)))
+-			pred = "fireSemanticPredicateEvaluated(persistence.antlr.debug.SemanticPredicateEvent.VALIDATING,"
+-			+ addSemPred(escapedPred) + "," + pred + ")";
+-		println("if (!(" + pred + "))");
+-		println("  throw new SemanticException(\"" + escapedPred + "\");");
+-	}
+-	/** Write an array of Strings which are the semantic predicate
+-	 *  expressions.  The debugger will reference them by number only
+-	 */
+-	protected void genSemPredMap() {
+-		Enumeration e = semPreds.elements();
+-		println("private string[] _semPredNames = {");
+-		tabs++;
+-		while(e.hasMoreElements())
+-			println("\""+e.nextElement()+"\",");
+-		tabs--;
+-		println("};");
+-	}
+-	protected void genSynPred(SynPredBlock blk, String lookaheadExpr) {
+-		if ( DEBUG_CODE_GENERATOR ) System.out.println("gen=>("+blk+")");
+-
+-		// Dump synpred result variable
+-		println("bool synPredMatched" + blk.ID + " = false;");
+-		// Gen normal lookahead test
+-		println("if (" + lookaheadExpr + ")");
+-		println("{");
+-		tabs++;
+-
+-		// Save input state
+-		if ( grammar instanceof TreeWalkerGrammar ) {
+-			println("AST __t" + blk.ID + " = _t;");
+-		}
+-		else {
+-			println("int _m" + blk.ID + " = mark();");
+-		}
+-
+-		// Once inside the try, assume synpred works unless exception caught
+-		println("synPredMatched" + blk.ID + " = true;");
+-		println("inputState.guessing++;");
+-
+-		// if debugging, tell listeners that a synpred has started
+-		if (grammar.debuggingOutput && ((grammar instanceof ParserGrammar) ||
+-			(grammar instanceof LexerGrammar))) {
+-			println("fireSyntacticPredicateStarted();");
+-		}
+-
+-		syntacticPredLevel++;
+-		println("try {");
+-		tabs++;
+-		gen((AlternativeBlock)blk);		// gen code to test predicate
+-		tabs--;
+-		//println("System.out.println(\"pred "+blk+" succeeded\");");
+-		println("}");
+-		//kunle: lose a few warnings cheaply
+-		//  println("catch (" + exceptionThrown + " pe)");
+-		println("catch (" + exceptionThrown + ")");
+-		println("{");
+-		tabs++;
+-		println("synPredMatched"+blk.ID+" = false;");
+-		//println("System.out.println(\"pred "+blk+" failed\");");
+-		tabs--;
+-		println("}");
+-
+-		// Restore input state
+-		if ( grammar instanceof TreeWalkerGrammar ) {
+-			println("_t = __t"+blk.ID+";");
+-		}
+-		else {
+-			println("rewind(_m"+blk.ID+");");
+-		}
+-
+-		println("inputState.guessing--;");
+-
+-		// if debugging, tell listeners how the synpred turned out
+-		if (grammar.debuggingOutput && ((grammar instanceof ParserGrammar) ||
+-			(grammar instanceof LexerGrammar))) {
+-			println("if (synPredMatched" + blk.ID +")");
+-			println("  fireSyntacticPredicateSucceeded();");
+-			println("else");
+-			println("  fireSyntacticPredicateFailed();");
+-		}
+-
+-		syntacticPredLevel--;
+-		tabs--;
+-
+-		// Close lookahead test
+-		println("}");
+-
+-		// Test synred result
+-		println("if ( synPredMatched"+blk.ID+" )");
+-		println("{");
+-	}
+-	/** Generate a static array containing the names of the tokens,
+-	 * indexed by the token type values.  This static array is used
+-	 * to format error messages so that the token identifers or literal
+-	 * strings are displayed instead of the token numbers.
+-	 *
+-	 * If a lexical rule has a paraphrase, use it rather than the
+-	 * token label.
+-	 */
+-	public void genTokenStrings() {
+-		// Generate a string for each token.  This creates a static
+-		// array of Strings indexed by token type.
+-		println("");
+-		println("public static readonly string[] tokenNames_ = new string[] {");
+-		tabs++;
+-
+-		// Walk the token vocabulary and generate a Vector of strings
+-		// from the tokens.
+-		Vector v = grammar.tokenManager.getVocabulary();
+-		for (int i = 0; i < v.size(); i++)
+-		{
+-			String s = (String)v.elementAt(i);
+-			if (s == null)
+-			{
+-				s = "<"+String.valueOf(i)+">";
+-			}
+-			if ( !s.startsWith("\"") && !s.startsWith("<") ) {
+-				TokenSymbol ts = (TokenSymbol)grammar.tokenManager.getTokenSymbol(s);
+-				if ( ts!=null && ts.getParaphrase()!=null ) {
+-					s = StringUtils.stripFrontBack(ts.getParaphrase(), "\"", "\"");
+-				}
+-			}
+-			else if (s.startsWith("\"")) {
+-				s = StringUtils.stripFrontBack(s, "\"", "\"");
+-			}
+-			print(charFormatter.literalString(s));
+-			if (i != v.size()-1) {
+-				_print(",");
+-			}
+-			_println("");
+-		}
+-
+-		// Close the string array initailizer
+-		tabs--;
+-		println("};");
+-	}
+-	/** Generate the token types CSharp file */
+-	protected void genTokenTypes(TokenManager tm) throws IOException {
+-		// Open the token output CSharp file and set the currentOutput stream
+-		// SAS: file open was moved to a method so a subclass can override
+-		//      This was mainly for the VAJ interface
+-		setupOutput(tm.getName() + TokenTypesFileSuffix);
+-
+-		tabs = 0;
+-
+-		// Generate the header common to all CSharp files
+-		genHeader();
+-		// Do not use printAction because we assume tabs==0
+-		println(behavior.getHeaderAction(""));
+-
+-	      // Generate the CSharp namespace declaration (if specified)
+-		if (nameSpace != null)
+-			nameSpace.emitDeclarations(currentOutput);
+-		tabs++;
+-
+-		// Encapsulate the definitions in a class.  This has to be done as a class because
+-		// they are all constants and CSharp inteface  types cannot contain constants.
+-		println("public class " + tm.getName() + TokenTypesFileSuffix);
+-		//println("public class " + getTokenTypesClassName());
+-		println("{");
+-		tabs++;
+-
+-		genTokenDefinitions(tm);
+-
+-		// Close the interface
+-		tabs--;
+-		println("}");
+-
+-		tabs--;
+-		// Generate the CSharp namespace closures (if required)
+-		if (nameSpace != null)
+-			nameSpace.emitClosures(currentOutput);
+-
+-		// Close the tokens output file
+-		currentOutput.close();
+-		currentOutput = null;
+-		exitIfError();
+-	}
+-	protected void genTokenDefinitions(TokenManager tm) throws IOException {
+-		// Generate a definition for each token type
+-		Vector v = tm.getVocabulary();
+-
+-		// Do special tokens manually
+-		println("public const int EOF = " + Token.EOF_TYPE + ";");
+-		println("public const int NULL_TREE_LOOKAHEAD = " + Token.NULL_TREE_LOOKAHEAD + ";");
+-
+-		for (int i = Token.MIN_USER_TYPE; i < v.size(); i++) {
+-			String s = (String)v.elementAt(i);
+-			if (s != null) {
+-				if ( s.startsWith("\"") ) {
+-					// a string literal
+-					StringLiteralSymbol sl = (StringLiteralSymbol)tm.getTokenSymbol(s);
+-					if ( sl==null ) {
+-						antlrTool.panic("String literal " + s + " not in symbol table");
+-					}
+-					else if ( sl.label != null ) {
+-						println("public const int " + sl.label + " = " + i + ";");
+-					}
+-					else {
+-						String mangledName = mangleLiteral(s);
+-						if (mangledName != null) {
+-							// We were able to create a meaningful mangled token name
+-							println("public const int " + mangledName + " = " + i + ";");
+-							// if no label specified, make the label equal to the mangled name
+-							sl.label = mangledName;
+-						}
+-						else {
+-							println("// " + s + " = " + i);
+-						}
+-					}
+-				}
+-				else if ( !s.startsWith("<") ) {
+-					println("public const int " + s + " = " + i + ";");
+-				}
+-			}
+-		}
+-		println("");
+-	}
+-	/** Process a string for an simple expression for use in xx/action.g
+-	 * it is used to cast simple tokens/references to the right type for
+-	 * the generated language. Basically called for every element in
+-	 * the vector to getASTCreateString(vector V)
+-	 * @param str A String.
+-	 */
+-	public String processStringForASTConstructor( String str )
+-	{
+-		/*
+-		System.out.println("processStringForASTConstructor: str = "+str+
+-		                   ", custom = "+(new Boolean(usingCustomAST)).toString()+
+-		                   ", tree = "+(new Boolean((grammar instanceof TreeWalkerGrammar))).toString()+
+-		                   ", parser = "+(new Boolean((grammar instanceof ParserGrammar))).toString()+
+-		                   ", notDefined = "+(new Boolean((!(grammar.tokenManager.tokenDefined(str))))).toString()
+-		                   );
+-		*/
+-		if( usingCustomAST &&
+-			( (grammar instanceof TreeWalkerGrammar)	||
+-			  (grammar instanceof ParserGrammar) )		&&
+-			!(grammar.tokenManager.tokenDefined(str)) )
+-		{
+-			//System.out.println("processStringForASTConstructor: "+str+" with cast");
+-			return "(AST)"+str;
+-		}
+-		else
+-		{
+-			//System.out.println("processStringForASTConstructor: "+str);
+-			return str;
+-		}
+-	}
+-	/** Get a string for an expression to generate creation of an AST subtree.
+-	  * @param v A Vector of String, where each element is an expression
+-	  *          in the target language yielding an AST node.
+-	  */
+-	public String getASTCreateString(Vector v) {
+-		if (v.size() == 0) {
+-			return "";
+-		}
+-		StringBuffer buf = new StringBuffer();
+-		buf.append("("+labeledElementASTType+
+-			")astFactory.make( (new ASTArray(" + v.size() +
+-			"))");
+-		for (int i = 0; i < v.size(); i++) {
+-			buf.append(".add(" + v.elementAt(i) + ")");
+-		}
+-		buf.append(")");
+-		return buf.toString();
+-	}
+-
+-	/** Get a string for an expression to generate creating of an AST node
+-	 * @param atom The grammar node for which you are creating the node
+-	 * @param str The arguments to the AST constructor
+-	 */
+-	public String getASTCreateString(GrammarAtom atom, String astCtorArgs) {
+-		String astCreateString = "astFactory.create(" + astCtorArgs + ")";
+-
+-		if (atom == null)
+-			return getASTCreateString(astCtorArgs);
+-		else {
+-			if ( atom.getASTNodeType() != null ) {
+-				// this Atom was instantiated from a Token that had an "AST" option - associating
+-				// it with a specific heterogeneous AST type - applied to either:
+-				// 1) it's underlying TokenSymbol (in the "tokens {} section" or,
+-                // 2) a particular token reference in the grammar
+-                //
+-				// For option (1), we simply generate a cast to hetero-AST type
+-				// For option (2), we generate a call to factory.create(Token, ASTNodeType) and cast it too
+-                TokenSymbol ts = grammar.tokenManager.getTokenSymbol(atom.getText());
+-                if ( (ts == null) || (ts.getASTNodeType() != atom.getASTNodeType()) )
+-				    astCreateString = "(" + atom.getASTNodeType() + ") astFactory.create(" + astCtorArgs + ", \"" + atom.getASTNodeType() + "\")";
+-                else if ( (ts != null) && (ts.getASTNodeType() != null) )
+-                    astCreateString = "(" + ts.getASTNodeType() + ") " + astCreateString;
+-			}
+-			else if ( usingCustomAST )
+-				astCreateString = "(" + labeledElementASTType + ") " + astCreateString;
+-		}
+-		return astCreateString;
+-	}
+-
+-    /** Returns a string expression that creates an AST node using the specified
+-     *  AST constructor argument string.
+-	 *  Parses the first (possibly only) argument in the supplied AST ctor argument
+-	 *	string to obtain the token type -- ctorID.
+-	 *
+-	 *  IF the token type is a valid token symbol AND
+-	 *	   it has an associated AST node type     AND
+-	 *	   this is not a #[ID, "T", "ASTType"] constructor
+-	 *	THEN
+-	 *	   generate a call to factory.create(ID, Text, token.ASTNodeType())
+-	 *
+-	 *  #[ID, "T", "ASTType"] constructors are mapped to astFactory.create(ID, "T", "ASTType")
+-	 *
+-	 *  The supported AST constructor forms are:
+-	 *		#[ID]
+-	 *		#[ID, "text"]
+-	 *  	#[ID, "text", ASTclassname]	-- introduced in 2.7.2
+-	 *
+-     * @param astCtorArgs The arguments to the AST constructor
+-     */
+-	public String getASTCreateString(String astCtorArgs) {
+-		// kunle: 19-Aug-2002
+-		// This AST creation string is almost certainly[*1] a manual tree construction request.
+-		// From the manual [I couldn't read ALL of the code ;-)], this can only be one of:
+-		// 1) #[ID]                     -- 'astCtorArgs' contains: 'ID'                     (without quotes)    or,
+-		// 2) #[ID, "T"]                -- 'astCtorArgs' contains: 'ID, "Text"'             (without single quotes) or,
+-		// kunle: 08-Dec-2002 - 2.7.2a6
+-		// 3) #[ID, "T", "ASTTypeName"] -- 'astCtorArgs' contains: 'ID, "T", "ASTTypeName"' (without single quotes)
+-		//
+-		// [*1]  In my tests, 'atom' was '== null' only for manual tree construction requests
+-
+-		if ( astCtorArgs==null ) {
+-			astCtorArgs = "";
+-		}
+-		String astCreateString 	= "astFactory.create(" + astCtorArgs + ")";
+-		String  ctorID   	 	= astCtorArgs;
+-		String	ctorText 	 	= null;
+-		int		commaIndex;
+-		boolean	ctorIncludesCustomType = false;		// Is this a #[ID, "t", "ASTType"] constructor?
+-
+-		commaIndex = astCtorArgs.indexOf(',');
+-		if ( commaIndex != -1 ) {
+-			ctorID   = astCtorArgs.substring(0, commaIndex);					// the 'ID'   portion of #[ID, "Text"]
+-			ctorText = astCtorArgs.substring(commaIndex+1, astCtorArgs.length());	// the 'Text' portion of #[ID, "Text"]
+-			commaIndex = ctorText.indexOf(',');
+-			if (commaIndex != -1 ) {
+-				// This is an AST creation of the form: #[ID, "Text", "ASTTypename"]
+-				// Support for this was introduced with 2.7.2a6
+-				// create default type or (since 2.7.2) 3rd arg is classname
+-				ctorIncludesCustomType = true;
+-			}
+-		}
+-		TokenSymbol ts = grammar.tokenManager.getTokenSymbol(ctorID);
+-		if ( (null != ts) && (null != ts.getASTNodeType()) )
+-			astCreateString = "(" + ts.getASTNodeType() + ") " + astCreateString;
+-		else if ( usingCustomAST )
+-			astCreateString = "(" + labeledElementASTType + ") " + astCreateString;
+-
+-		return astCreateString;
+-	}
+-
+-	protected String getLookaheadTestExpression(Lookahead[] look, int k) {
+-		StringBuffer e = new StringBuffer(100);
+-		boolean first = true;
+-
+-		e.append("(");
+-		for (int i = 1; i <= k; i++) {
+-			BitSet p = look[i].fset;
+-			if (!first) {
+-				e.append(") && (");
+-			}
+-			first = false;
+-
+-			// Syn preds can yield <end-of-syn-pred> (epsilon) lookahead.
+-			// There is no way to predict what that token would be.  Just
+-			// allow anything instead.
+-			if (look[i].containsEpsilon()) {
+-				e.append("true");
+-			} else {
+-				e.append(getLookaheadTestTerm(i, p));
+-			}
+-		}
+-		e.append(")");
+-
+-		return e.toString();
+-	}
+-
+-	/**Generate a lookahead test expression for an alternate.  This
+-	 * will be a series of tests joined by '&&' and enclosed by '()',
+-	 * the number of such tests being determined by the depth of the lookahead.
+-	 */
+-	protected String getLookaheadTestExpression(Alternative alt, int maxDepth) {
+-		int depth = alt.lookaheadDepth;
+-		if ( depth == GrammarAnalyzer.NONDETERMINISTIC ) {
+-			// if the decision is nondeterministic, do the best we can: LL(k)
+-			// any predicates that are around will be generated later.
+-			depth = grammar.maxk;
+-		}
+-
+-		if ( maxDepth==0 ) {
+-			// empty lookahead can result from alt with sem pred
+-			// that can see end of token.  E.g., A : {pred}? ('a')? ;
+-			return "( true )";
+-		}
+-		return "(" + getLookaheadTestExpression(alt.cache,depth) + ")";
+-	}
+-
+-	/**Generate a depth==1 lookahead test expression given the BitSet.
+-	 * This may be one of:
+-	 * 1) a series of 'x==X||' tests
+-	 * 2) a range test using >= && <= where possible,
+-	 * 3) a bitset membership test for complex comparisons
+-	 * @param k The lookahead level
+-	 * @param p The lookahead set for level k
+-	 */
+-	protected String getLookaheadTestTerm(int k, BitSet p) {
+-		// Determine the name of the item to be compared
+-		String ts = lookaheadString(k);
+-
+-		// Generate a range expression if possible
+-		int[] elems = p.toArray();
+-		if (elementsAreRange(elems)) {
+-			return getRangeExpression(k, elems);
+-		}
+-
+-		// Generate a bitset membership test if possible
+-		StringBuffer e;
+-		int degree = p.degree();
+-		if ( degree == 0 ) {
+-			return "true";
+-		}
+-
+-		if (degree >= bitsetTestThreshold) {
+-			int bitsetIdx = markBitsetForGen(p);
+-			return getBitsetName(bitsetIdx) + ".member(" + ts + ")";
+-		}
+-
+-		// Otherwise, generate the long-winded series of "x==X||" tests
+-		e = new StringBuffer();
+-		for (int i = 0; i < elems.length; i++) {
+-			// Get the compared-to item (token or character value)
+-			String cs = getValueString(elems[i]);
+-
+-			// Generate the element comparison
+-			if ( i>0 ) e.append("||");
+-			e.append(ts);
+-			e.append("==");
+-			e.append(cs);
+-		}
+-		return e.toString();
+-	}
+-
+-	/** Return an expression for testing a contiguous renage of elements
+-	 * @param k The lookahead level
+-	 * @param elems The elements representing the set, usually from BitSet.toArray().
+-	 * @return String containing test expression.
+-	 */
+-	public String getRangeExpression(int k, int[] elems) {
+-		if (!elementsAreRange(elems)) {
+-			antlrTool.panic("getRangeExpression called with non-range");
+-		}
+-		int begin = elems[0];
+-		int end = elems[elems.length-1];
+-
+-		return
+-			"(" + lookaheadString(k) + " >= " + getValueString(begin) + " && " +
+-			lookaheadString(k) + " <= " + getValueString(end) + ")";
+-	}
+-
+-	/** getValueString: get a string representation of a token or char value
+-	 * @param value The token or char value
+-	 */
+-	private String getValueString(int value) {
+-		String cs;
+-		if ( grammar instanceof LexerGrammar ) {
+-			cs = charFormatter.literalChar(value);
+-		}
+-		else
+-		{
+-			TokenSymbol ts = grammar.tokenManager.getTokenSymbolAt(value);
+-			if ( ts == null ) {
+-				return ""+value; // return token type as string
+-				// antlrTool.panic("vocabulary for token type " + value + " is null");
+-			}
+-			String tId = ts.getId();
+-			if ( ts instanceof StringLiteralSymbol ) {
+-				// if string literal, use predefined label if any
+-				// if no predefined, try to mangle into LITERAL_xxx.
+-				// if can't mangle, use int value as last resort
+-				StringLiteralSymbol sl = (StringLiteralSymbol)ts;
+-				String label = sl.getLabel();
+-				if ( label!=null ) {
+-					cs = label;
+-				}
+-				else {
+-					cs = mangleLiteral(tId);
+-					if (cs == null) {
+-						cs = String.valueOf(value);
+-					}
+-				}
+-			}
+-			else {
+-				cs = tId;
+-			}
+-		}
+-		return cs;
+-	}
+-
+-	/**Is the lookahead for this alt empty? */
+-	protected boolean lookaheadIsEmpty(Alternative alt, int maxDepth) {
+-		int depth = alt.lookaheadDepth;
+-		if ( depth == GrammarAnalyzer.NONDETERMINISTIC ) {
+-			depth = grammar.maxk;
+-		}
+-		for (int i=1; i<=depth && i<=maxDepth; i++) {
+-			BitSet p = alt.cache[i].fset;
+-			if (p.degree() != 0) {
+-				return false;
+-			}
+-		}
+-		return true;
+-	}
+-
+-	private String lookaheadString(int k) {
+-		if (grammar instanceof TreeWalkerGrammar) {
+-			return "_t.Type";
+-		}
+-		return "LA(" + k + ")";
+-	}
+-
+-	/** Mangle a string literal into a meaningful token name.  This is
+-	  * only possible for literals that are all characters.  The resulting
+-	  * mangled literal name is literalsPrefix with the text of the literal
+-	  * appended.
+-	  * @return A string representing the mangled literal, or null if not possible.
+-	  */
+-	private String mangleLiteral(String s) {
+-		String mangled = antlrTool.literalsPrefix;
+-		for (int i = 1; i < s.length()-1; i++) {
+-			if (!Character.isLetter(s.charAt(i)) &&
+-				s.charAt(i) != '_') {
+-				return null;
+-			}
+-			mangled += s.charAt(i);
+-		}
+-		if ( antlrTool.upperCaseMangledLiterals ) {
+-			mangled = mangled.toUpperCase();
+-		}
+-		return mangled;
+-	}
+-
+-	/** Map an identifier to it's corresponding tree-node variable.
+-	  * This is context-sensitive, depending on the rule and alternative
+-	  * being generated
+-	  * @param idParam The identifier name to map
+-	  * @return The mapped id (which may be the same as the input), or null if the mapping is invalid due to duplicates
+-	  */
+-	public String mapTreeId(String idParam, ActionTransInfo transInfo) {
+-		// if not in an action of a rule, nothing to map.
+-		if ( currentRule==null ) return idParam;
+-
+-		boolean in_var = false;
+-		String id = idParam;
+-		if (grammar instanceof TreeWalkerGrammar)
+-		{
+-			if ( !grammar.buildAST )
+-			{
+-				in_var = true;
+-			}
+-			// If the id ends with "_in", then map it to the input variable
+-			else if (id.length() > 3 && id.lastIndexOf("_in") == id.length()-3)
+-			{
+-				// Strip off the "_in"
+-				id = id.substring(0, id.length()-3);
+-				in_var = true;
+-			}
+-		}
+-
+-		// Check the rule labels.  If id is a label, then the output
+-		// variable is label_AST, and the input variable is plain label.
+-		for (int i = 0; i < currentRule.labeledElements.size(); i++)
+-		{
+-			AlternativeElement elt = (AlternativeElement)currentRule.labeledElements.elementAt(i);
+-			if (elt.getLabel().equals(id))
+-			{
+-				return in_var ? id : id + "_AST";
+-			}
+-		}
+-
+-		// Failing that, check the id-to-variable map for the alternative.
+-		// If the id is in the map, then output variable is the name in the
+-		// map, and input variable is name_in
+-		String s = (String)treeVariableMap.get(id);
+-		if (s != null)
+-		{
+-			if (s == NONUNIQUE)
+-			{
+-				// There is more than one element with this id
+-				antlrTool.error("Ambiguous reference to AST element "+id+
+-								" in rule "+currentRule.getRuleName());
+-				return null;
+-			}
+-			else if (s.equals(currentRule.getRuleName()))
+-			{
+-				// a recursive call to the enclosing rule is
+-				// ambiguous with the rule itself.
+-//				if( in_var )
+-//					System.out.println("returning null (rulename)");
+-				antlrTool.error("Ambiguous reference to AST element "+id+
+-								" in rule "+currentRule.getRuleName());
+-				return null;
+-			}
+-			else
+-			{
+-				return in_var ? s + "_in" : s;
+-			}
+-		}
+-
+-		// Failing that, check the rule name itself.  Output variable
+-		// is rule_AST; input variable is rule_AST_in (treeparsers).
+-		if( id.equals(currentRule.getRuleName()) )
+-		{
+-			String r = in_var ? id + "_AST_in" : id + "_AST";
+-			if ( transInfo!=null ) {
+-				if ( !in_var ) {
+-					transInfo.refRuleRoot = r;
+-				}
+-			}
+-			return r;
+-		}
+-		else
+-		{
+-			// id does not map to anything -- return itself.
+-			return id;
+-		}
+-	}
+-
+-	/** Given an element and the name of an associated AST variable,
+-	  * create a mapping between the element "name" and the variable name.
+-	  */
+-	private void mapTreeVariable(AlternativeElement e, String name)
+-	{
+-		// For tree elements, defer to the root
+-		if (e instanceof TreeElement) {
+-			mapTreeVariable( ((TreeElement)e).root, name);
+-			return;
+-		}
+-
+-		// Determine the name of the element, if any, for mapping purposes
+-		String elName = null;
+-
+-		// Don't map labeled items
+-		if (e.getLabel() == null) {
+-			if (e instanceof TokenRefElement) {
+-				// use the token id
+-				elName = ((TokenRefElement)e).atomText;
+-			}
+-			else if (e instanceof RuleRefElement) {
+-				// use the rule name
+-				elName = ((RuleRefElement)e).targetRule;
+-			}
+-		}
+-		// Add the element to the tree variable map if it has a name
+-		if (elName != null) {
+-			if (treeVariableMap.get(elName) != null) {
+-				// Name is already in the map -- mark it as duplicate
+-				treeVariableMap.remove(elName);
+-				treeVariableMap.put(elName, NONUNIQUE);
+-			}
+-			else {
+-				treeVariableMap.put(elName, name);
+-			}
+-		}
+-	}
+-
+-    /** Lexically process tree-specifiers in the action.
+-     *  This will replace #id and #(...) with the appropriate
+-     *  function calls and/or variables.
+-     */
+-    protected String processActionForSpecialSymbols(String actionStr,
+-                                                    int line,
+-                                                    RuleBlock currentRule,
+-                                                    ActionTransInfo tInfo)
+-	{
+-		if ( actionStr==null || actionStr.length()==0 )
+-			return null;
+-
+-        // The action trans info tells us (at the moment) whether an
+-        // assignment was done to the rule's tree root.
+-        if (grammar==null)
+-            return actionStr;
+-
+-        // see if we have anything to do...
+-        if ((grammar.buildAST && actionStr.indexOf('#') != -1) ||
+-            grammar instanceof TreeWalkerGrammar ||
+-            ((grammar instanceof LexerGrammar ||
+-            grammar instanceof ParserGrammar)
+-			  	&& actionStr.indexOf('$') != -1) )
+-		{
+-            // Create a lexer to read an action and return the translated version
+-            persistence.antlr.actions.csharp.ActionLexer lexer = new persistence.antlr.actions.csharp.ActionLexer(actionStr, currentRule, this, tInfo);
+-
+-            lexer.setLineOffset(line);
+-            lexer.setFilename(grammar.getFilename());
+-            lexer.setTool(antlrTool);
+-
+-            try {
+-                lexer.mACTION(true);
+-                actionStr = lexer.getTokenObject().getText();
+-                // System.out.println("action translated: "+actionStr);
+-                // System.out.println("trans info is "+tInfo);
+-            }
+-            catch (RecognitionException ex) {
+-                lexer.reportError(ex);
+-                return actionStr;
+-            }
+-            catch (TokenStreamException tex) {
+-                antlrTool.panic("Error reading action:"+actionStr);
+-                return actionStr;
+-            }
+-            catch (CharStreamException io) {
+-                antlrTool.panic("Error reading action:"+actionStr);
+-                return actionStr;
+-            }
+-        }
+-        return actionStr;
+-    }
+-
+-	private void setupGrammarParameters(Grammar g) {
+-		if (g instanceof ParserGrammar ||
+-			 g instanceof LexerGrammar  ||
+-			 g instanceof TreeWalkerGrammar
+-			)
+-		{
+-			/* RK: options also have to be added to Grammar.java and for options
+-			 * on the file level entries have to be defined in
+-			 * DefineGrammarSymbols.java and passed around via 'globals' in antlrTool.java
+-			 */
+-			if( antlrTool.nameSpace != null )
+-				nameSpace = new CSharpNameSpace( antlrTool.nameSpace.getName() );
+-			//genHashLines = antlrTool.genHashLines;
+-
+-			/* let grammar level options override filelevel ones...
+-			 */
+-			if( g.hasOption("namespace") ) {
+-				Token t = g.getOption("namespace");
+-				if( t != null ) {
+-					nameSpace = new CSharpNameSpace(t.getText());
+-				}
+-			}
+-			/*
+-			if( g.hasOption("genHashLines") ) {
+-				Token t = g.getOption("genHashLines");
+-				if( t != null )  {
+-					String val = StringUtils.stripFrontBack(t.getText(),"\"","\"");
+-					genHashLines = val.equals("true");
+-				}
+-			}
+-			*/
+-		}
+-
+-		if (g instanceof ParserGrammar) {
+-			labeledElementASTType = "AST";
+-			if ( g.hasOption("ASTLabelType") ) {
+-				Token tsuffix = g.getOption("ASTLabelType");
+-				if ( tsuffix != null ) {
+-					String suffix = StringUtils.stripFrontBack(tsuffix.getText(), "\"", "\"");
+-					if ( suffix != null ) {
+-						usingCustomAST = true;
+-						labeledElementASTType = suffix;
+-					}
+-				}
+-			}
+-			labeledElementType = "Token ";
+-			labeledElementInit = "null";
+-			commonExtraArgs = "";
+-			commonExtraParams = "";
+-			commonLocalVars = "";
+-			lt1Value = "LT(1)";
+-			exceptionThrown = "RecognitionException";
+-			throwNoViable = "throw new NoViableAltException(LT(1), getFilename());";
+-		}
+-		else if (g instanceof LexerGrammar) {
+-			labeledElementType = "char ";
+-			labeledElementInit = "'\\0'";
+-			commonExtraArgs = "";
+-			commonExtraParams = "bool _createToken";
+-			commonLocalVars = "int _ttype; Token _token=null; int _begin=text.Length;";
+-			lt1Value = "LA(1)";
+-			exceptionThrown = "RecognitionException";
+-			throwNoViable = "throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());";
+-		}
+-		else if (g instanceof TreeWalkerGrammar) {
+-			labeledElementASTType = "AST";
+-			labeledElementType = "AST";
+-			if ( g.hasOption("ASTLabelType") ) {
+-				Token tsuffix = g.getOption("ASTLabelType");
+-				if ( tsuffix != null ) {
+-					String suffix = StringUtils.stripFrontBack(tsuffix.getText(), "\"", "\"");
+-					if ( suffix != null ) {
+-						usingCustomAST = true;
+-						labeledElementASTType = suffix;
+-						labeledElementType = suffix;
+-					}
+-				}
+-			}
+-			if ( !g.hasOption("ASTLabelType") ) {
+-				g.setOption("ASTLabelType", new Token(ANTLRTokenTypes.STRING_LITERAL,"AST"));
+-			}
+-			labeledElementInit = "null";
+-			commonExtraArgs = "_t";
+-			commonExtraParams = "AST _t";
+-			commonLocalVars = "";
+-            if (usingCustomAST)
+-            	lt1Value = "(_t==ASTNULL) ? null : (" + labeledElementASTType + ")_t";
+-            else
+-            	lt1Value = "_t";
+-			exceptionThrown = "RecognitionException";
+-			throwNoViable = "throw new NoViableAltException(_t);";
+-		}
+-		else {
+-			antlrTool.panic("Unknown grammar type");
+-		}
+-	}
+-
+-	/** This method exists so a subclass, namely VAJCodeGenerator,
+-	 *  can open the file in its own evil way.  JavaCodeGenerator
+-	 *  simply opens a text file...
+-	 */
+-	public void setupOutput(String className) throws IOException
+-	{
+-		currentOutput = antlrTool.openOutputFile(className + ".cs");
+-	}
+-
+-	/** Helper method from Eric Smith's version of CSharpCodeGenerator.*/
+-	private static String OctalToUnicode(String str)
+-	{
+-		// only do any conversion if the string looks like "'\003'"
+-		if ( (4 <= str.length()) &&
+- 	        ('\'' == str.charAt(0)) &&
+- 	        ('\\' == str.charAt(1)) &&
+- 	        (('0' <= str.charAt(2)) && ('7' >= str.charAt(2))) &&
+- 	        ('\'' == str.charAt(str.length()-1)) )
+-		{
+-			// convert octal representation to decimal, then to hex
+-			Integer x = Integer.valueOf(str.substring(2, str.length()-1), 8);
+-
+-			return "'\\x" + Integer.toHexString(x.intValue()) + "'";
+-		}
+-		else {
+-			return str;
+-		}
+-	}
+-
+-	/** Helper method that returns the name of the interface/class/enum type for
+-	    token type constants.
+-	 */
+-	public String getTokenTypesClassName()
+-	{
+-		TokenManager tm = grammar.tokenManager;
+-		return new String(tm.getName() + TokenTypesFileSuffix);
+-	}
+-
+-	private void declareSaveIndexVariableIfNeeded()
+-	{
+-		if (!bSaveIndexCreated)
+-		{
+-			println("int _saveIndex = 0;");
+-			bSaveIndexCreated = true;
+-		}
+-	}
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/CSharpNameSpace.java glassfish-gil/entity-persistence/src/java/persistence/antlr/CSharpNameSpace.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/CSharpNameSpace.java	2006-08-31 00:34:04.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/CSharpNameSpace.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,55 +0,0 @@
+-package persistence.antlr;
+-
+-/**
+- * ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- * Container for a C++ namespace specification.  Namespaces can be
+- * nested, so this contains a vector of all the nested names.
+- *
+- * @author David Wagner (JPL/Caltech) 8-12-00
+- *
+- */
+-
+-
+-//
+-// ANTLR C# Code Generator by Micheal Jordan
+-//                            Kunle Odutola       : kunle UNDERSCORE odutola AT hotmail DOT com
+-//                            Anthony Oguntimehin
+-//
+-// With many thanks to Eric V. Smith from the ANTLR list.
+-//
+-
+-// HISTORY:
+-//
+-// 17-May-2002 kunle    Original version
+-//
+-
+-import java.util.Vector;
+-import java.util.Enumeration;
+-import java.io.PrintWriter;
+-import java.util.StringTokenizer;
+-
+-public class CSharpNameSpace extends NameSpace
+-{
+-    public CSharpNameSpace(String name)
+-    {
+-		super(name);
+-    }
+-
+-    /**
+-     * Method to generate the required CSharp namespace declarations
+-     */
+-    void emitDeclarations(PrintWriter out) {
+-    	  out.println("namespace " + getName() );
+-    	  out.println("{");
+-    }
+-
+-    /**
+-     * Method to generate the required CSharp namespace closures
+-     */
+-    void emitClosures(PrintWriter out) {
+-    	  out.println("}");
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/debug/DebuggingCharScanner.java glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/DebuggingCharScanner.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/debug/DebuggingCharScanner.java	2006-02-08 22:31:16.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/DebuggingCharScanner.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,237 +0,0 @@
+-package persistence.antlr.debug;
+-
+-import persistence.antlr.*;
+-import persistence.antlr.collections.*;
+-import persistence.antlr.collections.impl.*;
+-import java.io.*;
+-
+-public abstract class DebuggingCharScanner extends CharScanner implements DebuggingParser {
+-	private ParserEventSupport parserEventSupport = new ParserEventSupport(this);
+-	private boolean _notDebugMode = false;
+-	protected String ruleNames[];
+-	protected String semPredNames[];
+-
+-
+-	public DebuggingCharScanner(InputBuffer cb) {
+-		super(cb);
+-	}
+-	public DebuggingCharScanner(LexerSharedInputState state) {
+-		super(state);
+-	}
+-	public void addMessageListener(MessageListener l) {
+-		parserEventSupport.addMessageListener(l);
+-	}
+-	public void addNewLineListener(NewLineListener l) {
+-		parserEventSupport.addNewLineListener(l);
+-	}
+-	public void addParserListener(ParserListener l) {
+-		parserEventSupport.addParserListener(l);
+-	}
+-	public void addParserMatchListener(ParserMatchListener l) {
+-		parserEventSupport.addParserMatchListener(l);
+-	}
+-	public void addParserTokenListener(ParserTokenListener l) {
+-		parserEventSupport.addParserTokenListener(l);
+-	}
+-	public void addSemanticPredicateListener(SemanticPredicateListener l) {
+-		parserEventSupport.addSemanticPredicateListener(l);
+-	}
+-	public void addSyntacticPredicateListener(SyntacticPredicateListener l) {
+-		parserEventSupport.addSyntacticPredicateListener(l);
+-	}
+-	public void addTraceListener(TraceListener l) {
+-		parserEventSupport.addTraceListener(l);
+-	}
+-	public void consume() throws CharStreamException {
+-		int la_1 = -99;
+-		try {la_1 = LA(1);}	
+-		catch (CharStreamException ignoreAnIOException) {}
+-		super.consume();
+-		parserEventSupport.fireConsume(la_1);		
+-	}
+-	protected void fireEnterRule(int num, int data) {
+-		if (isDebugMode())
+-			parserEventSupport.fireEnterRule(num,inputState.guessing,data);
+-	}
+-	protected void fireExitRule(int num, int ttype) {
+-		if (isDebugMode())
+-			parserEventSupport.fireExitRule(num,inputState.guessing, ttype);
+-	}
+-	protected boolean fireSemanticPredicateEvaluated(int type, int num, boolean condition) {
+-		if (isDebugMode())
+-			return parserEventSupport.fireSemanticPredicateEvaluated(type,num,condition,inputState.guessing);
+-		else
+-			return condition;
+-	}
+-	protected void fireSyntacticPredicateFailed() {
+-		if (isDebugMode())
+-			parserEventSupport.fireSyntacticPredicateFailed(inputState.guessing);
+-	}
+-	protected void fireSyntacticPredicateStarted() {
+-		if (isDebugMode())
+-			parserEventSupport.fireSyntacticPredicateStarted(inputState.guessing);
+-	}
+-	protected void fireSyntacticPredicateSucceeded() {
+-		if (isDebugMode())
+-			parserEventSupport.fireSyntacticPredicateSucceeded(inputState.guessing);
+-	}
+-	public String getRuleName(int num) {
+-		return ruleNames[num];
+-	}
+-	public String getSemPredName(int num) {
+-		return semPredNames[num];
+-	}
+-	public synchronized void goToSleep() {
+-		try {wait();}
+-		catch (InterruptedException e) {	}		
+-	}
+-	public boolean isDebugMode() {
+-		return !_notDebugMode;
+-	}
+-	public char LA(int i) throws CharStreamException {
+-		char la = super.LA(i);
+-		parserEventSupport.fireLA(i, la);
+-		return la;
+-	}
+-	protected Token makeToken(int t) {
+-		// do something with char buffer???
+-//		try {
+-//			Token tok = (Token)tokenObjectClass.newInstance();
+-//			tok.setType(t);
+-//			// tok.setText(getText()); done in generated lexer now
+-//			tok.setLine(line);
+-//			return tok;
+-//		}
+-//		catch (InstantiationException ie) {
+-//			panic("can't instantiate a Token");
+-//		}
+-//		catch (IllegalAccessException iae) {
+-//			panic("Token class is not accessible");
+-//		}
+-		return super.makeToken(t);
+-	}
+-	public void match(char c) throws MismatchedCharException, CharStreamException {
+-		char la_1 = LA(1);
+-		try {
+-			super.match(c);
+-			parserEventSupport.fireMatch(c, inputState.guessing);
+-		}
+-		catch (MismatchedCharException e) {
+-			if (inputState.guessing == 0)
+-				parserEventSupport.fireMismatch(la_1, c, inputState.guessing);
+-			throw e;
+-		}
+-	}
+-	public void match(BitSet b) throws MismatchedCharException, CharStreamException {
+-		String text = this.text.toString();
+-		char la_1 = LA(1);
+-		try {
+-			super.match(b);
+-			parserEventSupport.fireMatch(la_1, b, text, inputState.guessing);
+-		}
+-		catch (MismatchedCharException e) {
+-			if (inputState.guessing == 0)
+-				parserEventSupport.fireMismatch(la_1, b, text, inputState.guessing);
+-			throw e;
+-		}
+-	}
+-	public void match(String s) throws MismatchedCharException, CharStreamException {
+-		StringBuffer la_s = new StringBuffer("");
+-		int len = s.length();
+-		// peek at the next len worth of characters
+-		try {
+-			for(int i = 1; i <= len; i++) {
+-				la_s.append(super.LA(i));
+-			}
+-		}
+-		catch(Exception ignoreMe) {}
+-
+-		try {
+-			super.match(s);
+-			parserEventSupport.fireMatch(s, inputState.guessing);
+-		}
+-		catch (MismatchedCharException e) {
+-			if (inputState.guessing == 0)
+-				parserEventSupport.fireMismatch(la_s.toString(), s, inputState.guessing);
+-			throw e;
+-		}
+-
+-	}
+-	public void matchNot(char c) throws MismatchedCharException, CharStreamException {
+-		char la_1 = LA(1);
+-		try {
+-			super.matchNot(c);
+-			parserEventSupport.fireMatchNot(la_1, c, inputState.guessing);
+-		}
+-		catch (MismatchedCharException e) {
+-			if (inputState.guessing == 0)
+-				parserEventSupport.fireMismatchNot(la_1, c, inputState.guessing);
+-			throw e;
+-		}
+-
+-	}
+-	public void matchRange(char c1, char c2) throws MismatchedCharException, CharStreamException {
+-		char la_1 = LA(1);
+-		try {
+-			super.matchRange(c1,c2);
+-			parserEventSupport.fireMatch(la_1, ""+c1+c2, inputState.guessing);
+-		}
+-		catch (MismatchedCharException e) {
+-			if (inputState.guessing == 0)
+-				parserEventSupport.fireMismatch(la_1, ""+c1+c2, inputState.guessing);
+-			throw e;
+-		}
+-
+-	}
+-	public void newline() {
+-		super.newline();
+-		parserEventSupport.fireNewLine(getLine());
+-	}
+-	public void removeMessageListener(MessageListener l) {
+-		parserEventSupport.removeMessageListener(l);
+-	}
+-	public void removeNewLineListener(NewLineListener l) {
+-		parserEventSupport.removeNewLineListener(l);
+-	}
+-	public void removeParserListener(ParserListener l) {
+-		parserEventSupport.removeParserListener(l);
+-	}
+-	public void removeParserMatchListener(ParserMatchListener l) {
+-		parserEventSupport.removeParserMatchListener(l);
+-	}
+-	public void removeParserTokenListener(ParserTokenListener l) {
+-		parserEventSupport.removeParserTokenListener(l);
+-	}
+-	public void removeSemanticPredicateListener(SemanticPredicateListener l) {
+-		parserEventSupport.removeSemanticPredicateListener(l);
+-	}
+-	public void removeSyntacticPredicateListener(SyntacticPredicateListener l) {
+-		parserEventSupport.removeSyntacticPredicateListener(l);
+-	}
+-	public void removeTraceListener(TraceListener l) {	
+-		parserEventSupport.removeTraceListener(l);
+-	}
+-	/** Report exception errors caught in nextToken() */
+-	public void reportError(MismatchedCharException e) {
+-		parserEventSupport.fireReportError(e);
+-		super.reportError(e);
+-	}
+-	/** Parser error-reporting function can be overridden in subclass */
+-	public void reportError(String s) {
+-		parserEventSupport.fireReportError(s);
+-		super.reportError(s);
+-	}
+-	/** Parser warning-reporting function can be overridden in subclass */
+-	public void reportWarning(String s) {
+-		parserEventSupport.fireReportWarning(s);
+-		super.reportWarning(s);
+-	}
+-	public void setDebugMode(boolean value) {
+-		_notDebugMode = !value;
+-	}
+-	public void setupDebugging() {
+-	}
+-	public synchronized void wakeUp() {
+-		notify();
+-	}
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/debug/DebuggingInputBuffer.java glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/DebuggingInputBuffer.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/debug/DebuggingInputBuffer.java	2006-02-08 22:31:16.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/DebuggingInputBuffer.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,63 +0,0 @@
+-package persistence.antlr.debug;
+-
+-import persistence.antlr.InputBuffer;
+-import persistence.antlr.CharStreamException;
+-import java.util.Vector;
+-import java.io.IOException;
+-
+-public class DebuggingInputBuffer extends InputBuffer {
+-	private InputBuffer buffer;
+-	private InputBufferEventSupport inputBufferEventSupport;
+-	private boolean debugMode = true;
+-
+-
+-	public DebuggingInputBuffer(InputBuffer buffer) {
+-		this.buffer = buffer;
+-		inputBufferEventSupport = new InputBufferEventSupport(this);
+-	}
+-	public void addInputBufferListener(InputBufferListener l) {
+-	  inputBufferEventSupport.addInputBufferListener(l);
+-	}
+-	public void consume() {
+-		char la = ' ';
+-		try {la = buffer.LA(1);}
+-		catch (CharStreamException e) {} // vaporize it...
+-		buffer.consume();
+-		if (debugMode)
+-			inputBufferEventSupport.fireConsume(la);
+-	}
+-	public void fill(int a) throws CharStreamException {
+-		buffer.fill(a);
+-	}
+-	public Vector getInputBufferListeners() {
+-		return inputBufferEventSupport.getInputBufferListeners();
+-	}
+-	public boolean isDebugMode() {
+-		return debugMode;
+-	}
+-	public boolean isMarked() {
+-		return buffer.isMarked();
+-	}
+-	public char LA(int i) throws CharStreamException {
+-		char la = buffer.LA(i);
+-		if (debugMode)
+-			inputBufferEventSupport.fireLA(la,i);
+-		return la;
+-	}
+-	public int mark() {
+-		int m = buffer.mark();
+-		inputBufferEventSupport.fireMark(m);
+-		return m;
+-	}
+-	public void removeInputBufferListener(InputBufferListener l) {
+-	  if (inputBufferEventSupport != null)
+-	    inputBufferEventSupport.removeInputBufferListener(l);
+-	}
+-	public void rewind(int mark) {
+-		buffer.rewind(mark);
+-		inputBufferEventSupport.fireRewind(mark);
+-	}
+-	public void setDebugMode(boolean value) {
+-		debugMode = value;
+-	}
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/debug/DebuggingParser.java glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/DebuggingParser.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/debug/DebuggingParser.java	2006-02-08 22:31:17.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/DebuggingParser.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,11 +0,0 @@
+-package persistence.antlr.debug;
+-
+-/**
+- * This type was created in VisualAge.
+- */
+-public interface DebuggingParser {
+-
+-
+-	public String getRuleName(int n);
+-	public String getSemPredName(int n);
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/debug/Event.java glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/Event.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/debug/Event.java	2006-02-08 22:31:17.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/Event.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,26 +0,0 @@
+-package persistence.antlr.debug;
+-
+-import java.util.EventObject;
+-
+-public abstract class Event extends EventObject {
+-	private int type;
+-
+-
+-	public Event(Object source) {
+-		super(source);
+-	}
+-	public Event(Object source, int type) {
+-		super(source);
+-		setType(type);
+-	}
+-	public int getType() {
+-		return type;
+-	}
+-	void setType(int type) {
+-		this.type = type;
+-	}
+-	/** This should NOT be called from anyone other than ParserEventSupport! */
+-	void setValues(int type) {
+-		setType(type);
+-	}
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/debug/GuessingEvent.java glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/GuessingEvent.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/debug/GuessingEvent.java	2006-02-08 22:31:17.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/GuessingEvent.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,24 +0,0 @@
+-package persistence.antlr.debug;
+-
+-public abstract class GuessingEvent extends Event {
+-	private int guessing;
+-
+-
+-	public GuessingEvent(Object source) {
+-		super(source);
+-	}
+-	public GuessingEvent(Object source, int type) {
+-		super(source, type);
+-	}
+-	public int getGuessing() {
+-		return guessing;
+-	}
+-	void setGuessing(int guessing) {
+-		this.guessing = guessing;
+-	}
+-	/** This should NOT be called from anyone other than ParserEventSupport! */
+-	void setValues(int type, int guessing) {
+-		super.setValues(type);
+-		setGuessing(guessing);
+-	}
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/debug/InputBufferAdapter.java glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/InputBufferAdapter.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/debug/InputBufferAdapter.java	2006-02-08 22:31:18.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/InputBufferAdapter.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,24 +0,0 @@
+-package persistence.antlr.debug;
+-
+-/** A dummy implementation of a CharBufferListener -- this class is not
+-  * meant to be used by itself -- it's meant to be subclassed */
+-public abstract class InputBufferAdapter implements InputBufferListener {
+-
+-
+-	public void doneParsing(TraceEvent e) {
+-	}
+-/**
+- * charConsumed method comment.
+- */
+-public void inputBufferConsume(InputBufferEvent e) {
+-}
+-/**
+- * charLA method comment.
+- */
+-public void inputBufferLA(InputBufferEvent e) {
+-}
+-	public void inputBufferMark(InputBufferEvent e) {}
+-	public void inputBufferRewind(InputBufferEvent e) {}
+-	public void refresh() {
+-	}
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/debug/InputBufferEvent.java glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/InputBufferEvent.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/debug/InputBufferEvent.java	2006-02-08 22:31:18.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/InputBufferEvent.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,50 +0,0 @@
+-package persistence.antlr.debug;
+-
+-public class InputBufferEvent extends Event {
+-	char c;
+-	int lookaheadAmount; // amount of lookahead
+-	public static final int CONSUME = 0;
+-	public static final int LA = 1;
+-	public static final int MARK = 2;
+-	public static final int REWIND = 3;
+-
+-
+-/**
+- * CharBufferEvent constructor comment.
+- * @param source java.lang.Object
+- */
+-public InputBufferEvent(Object source) {
+-	super(source);
+-}
+-/**
+- * CharBufferEvent constructor comment.
+- * @param source java.lang.Object
+- */
+-public InputBufferEvent(Object source, int type, char c, int lookaheadAmount) {
+-	super(source);
+-	setValues(type, c, lookaheadAmount);
+-}
+-	public char getChar() {
+-		return c;
+-	}
+-	public int getLookaheadAmount() {
+-		return lookaheadAmount;
+-	}
+-	void setChar(char c) {
+-		this.c = c;
+-	}
+-	void setLookaheadAmount(int la) {
+-		this.lookaheadAmount = la;
+-	}
+-	/** This should NOT be called from anyone other than ParserEventSupport! */
+-	void setValues(int type, char c, int la) {
+-		super.setValues(type);
+-		setChar(c);
+-		setLookaheadAmount(la);
+-	}
+-	public String toString() {
+-		return "CharBufferEvent [" + 
+-			(getType()==CONSUME?"CONSUME, ":"LA, ")+
+-		getChar() + "," + getLookaheadAmount() + "]";
+-	}
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/debug/InputBufferEventSupport.java glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/InputBufferEventSupport.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/debug/InputBufferEventSupport.java	2006-02-08 22:31:18.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/InputBufferEventSupport.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,85 +0,0 @@
+-package persistence.antlr.debug;
+-
+-import java.util.Vector;
+-import persistence.antlr.collections.impl.BitSet;
+-import persistence.antlr.RecognitionException;
+-
+-public class InputBufferEventSupport {
+-	private Object source;
+-	private Vector inputBufferListeners;
+-	private InputBufferEvent  inputBufferEvent;
+-	protected static final int CONSUME=0;
+-	protected static final int LA=1;
+-	protected static final int MARK=2;
+-	protected static final int REWIND=3;
+-
+-
+-	public InputBufferEventSupport(Object source) {
+-		inputBufferEvent = new InputBufferEvent(source);
+-		this.source = source;
+-	}
+-	public void addInputBufferListener(InputBufferListener l) {
+-		if (inputBufferListeners == null) inputBufferListeners = new Vector();
+-		inputBufferListeners.addElement(l);
+-	}
+-	public void fireConsume(char c) {
+-		inputBufferEvent.setValues(InputBufferEvent.CONSUME, c, 0);
+-		fireEvents(CONSUME, inputBufferListeners);		
+-	}
+-	public void fireEvent(int type, ListenerBase l) {
+-		switch(type) {
+-			case CONSUME: ((InputBufferListener)l).inputBufferConsume(inputBufferEvent); break;
+-			case LA:      ((InputBufferListener)l).inputBufferLA(inputBufferEvent); break;
+-			case MARK:    ((InputBufferListener)l).inputBufferMark(inputBufferEvent); break;
+-			case REWIND:  ((InputBufferListener)l).inputBufferRewind(inputBufferEvent); break;
+-			default:
+-				throw new IllegalArgumentException("bad type "+type+" for fireEvent()");
+-		}	
+-	}
+-	public void fireEvents(int type, Vector listeners) {
+-		Vector targets=null;
+-		ListenerBase l=null;
+-		
+-		synchronized (this) {
+-			if (listeners == null) return;
+-			targets = (Vector)listeners.clone();
+-		}
+-		
+-		if (targets != null)
+-			for (int i = 0; i < targets.size(); i++) {
+-				l = (ListenerBase)targets.elementAt(i);
+-				fireEvent(type, l);
+-			}
+-	}
+-	public void fireLA(char c, int la) {
+-		inputBufferEvent.setValues(InputBufferEvent.LA, c, la);
+-		fireEvents(LA, inputBufferListeners);
+-	}
+-	public void fireMark(int pos) {
+-		inputBufferEvent.setValues(InputBufferEvent.MARK, ' ', pos);
+-		fireEvents(MARK, inputBufferListeners);
+-	}
+-	public void fireRewind(int pos) {
+-		inputBufferEvent.setValues(InputBufferEvent.REWIND, ' ', pos);
+-		fireEvents(REWIND, inputBufferListeners);
+-	}
+-	public Vector getInputBufferListeners() {
+-		return inputBufferListeners;
+-	}
+-	protected void refresh(Vector listeners) {
+-		Vector v;
+-		synchronized (listeners) {
+-			v = (Vector)listeners.clone();
+-		}
+-		if (v != null)
+-			for (int i = 0; i < v.size(); i++)
+-				((ListenerBase)v.elementAt(i)).refresh();
+-	}
+-	public void refreshListeners() {
+-		refresh(inputBufferListeners);
+-	}
+-	public void removeInputBufferListener(InputBufferListener l) {
+-		if (inputBufferListeners != null)
+-			inputBufferListeners.removeElement(l);
+-	}
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/debug/InputBufferListener.java glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/InputBufferListener.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/debug/InputBufferListener.java	2006-02-08 22:31:19.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/InputBufferListener.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,10 +0,0 @@
+-package persistence.antlr.debug;
+-
+-public interface InputBufferListener extends ListenerBase {
+-
+-
+-	public void inputBufferConsume(InputBufferEvent e);
+-	public void inputBufferLA(InputBufferEvent e);
+-	public void inputBufferMark(InputBufferEvent e);
+-	public void inputBufferRewind(InputBufferEvent e);
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/debug/InputBufferReporter.java glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/InputBufferReporter.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/debug/InputBufferReporter.java	2006-02-08 22:31:19.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/InputBufferReporter.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,37 +0,0 @@
+-package persistence.antlr.debug;
+-
+-public class InputBufferReporter implements InputBufferListener {
+-
+-
+-/**
+- * doneParsing method comment.
+- */
+-public void doneParsing(TraceEvent e) {
+-}
+-	public void inputBufferChanged(InputBufferEvent e) {
+-		System.out.println(e);
+-	}
+-/**
+- * charBufferConsume method comment.
+- */
+-public void inputBufferConsume(InputBufferEvent e) {
+-	System.out.println(e);
+-}
+-/**
+- * charBufferLA method comment.
+- */
+-public void inputBufferLA(InputBufferEvent e) {
+-	System.out.println(e);
+-}
+-	public void inputBufferMark(InputBufferEvent e) {
+-		System.out.println(e);
+-	}
+-	public void inputBufferRewind(InputBufferEvent e) {
+-		System.out.println(e);
+-	}
+-/**
+- * refresh method comment.
+- */
+-public void refresh() {
+-}
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/debug/ListenerBase.java glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/ListenerBase.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/debug/ListenerBase.java	2006-02-08 22:31:20.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/ListenerBase.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,10 +0,0 @@
+-package persistence.antlr.debug;
+-
+-import java.util.EventListener;
+-
+-public interface ListenerBase extends EventListener {
+-
+-
+-	public void doneParsing(TraceEvent e);
+-	public void refresh();
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/debug/LLkDebuggingParser.java glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/LLkDebuggingParser.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/debug/LLkDebuggingParser.java	2006-02-08 22:31:19.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/LLkDebuggingParser.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,235 +0,0 @@
+-package persistence.antlr.debug;
+-
+-import persistence.antlr.ParserSharedInputState;
+-import persistence.antlr.TokenStreamException;
+-import persistence.antlr.LLkParser;
+-import persistence.antlr.TokenBuffer;
+-import persistence.antlr.TokenStream;
+-import persistence.antlr.MismatchedTokenException;
+-import persistence.antlr.RecognitionException;
+-import persistence.antlr.collections.impl.BitSet;
+-import java.io.IOException;
+-import persistence.antlr.TokenStreamException;
+-
+-import persistence.antlr.debug.ParserEventSupport;
+-
+-import java.lang.reflect.Constructor;
+-
+-public class LLkDebuggingParser extends LLkParser implements DebuggingParser {
+-	protected ParserEventSupport parserEventSupport = new ParserEventSupport(this);
+-
+-	private boolean _notDebugMode = false;
+-	protected String ruleNames[];
+-	protected String semPredNames[];
+-
+-
+-	public LLkDebuggingParser(int k_) {
+-		super(k_);
+-	}
+-	public LLkDebuggingParser(ParserSharedInputState state, int k_) {
+-		super(state, k_);
+-	}
+-	public LLkDebuggingParser(TokenBuffer tokenBuf, int k_) {
+-		super(tokenBuf, k_);
+-	}
+-	public LLkDebuggingParser(TokenStream lexer, int k_) {
+-		super(lexer, k_);
+-	}
+-	public void addMessageListener(MessageListener l) {
+-		parserEventSupport.addMessageListener(l);
+-	}
+-	public void addParserListener(ParserListener l) {
+-		parserEventSupport.addParserListener(l);
+-	}
+-	public void addParserMatchListener(ParserMatchListener l) {
+-		parserEventSupport.addParserMatchListener(l);
+-	}
+-	public void addParserTokenListener(ParserTokenListener l) {
+-		parserEventSupport.addParserTokenListener(l);
+-	}
+-	public void addSemanticPredicateListener(SemanticPredicateListener l) {
+-		parserEventSupport.addSemanticPredicateListener(l);
+-	}
+-	public void addSyntacticPredicateListener(SyntacticPredicateListener l) {
+-		parserEventSupport.addSyntacticPredicateListener(l);
+-	}
+-	public void addTraceListener(TraceListener l) {
+-		parserEventSupport.addTraceListener(l);
+-	}
+-	/**Get another token object from the token stream */
+-	public void consume() {
+-		int la_1 = -99;
+-		try {la_1 = LA(1);}	
+-		catch (TokenStreamException ignoreAnException) {}
+-		super.consume();
+-		parserEventSupport.fireConsume(la_1);
+-	}
+-	protected void fireEnterRule(int num,int data) {
+-		if (isDebugMode())
+-			parserEventSupport.fireEnterRule(num,inputState.guessing,data);
+-	}
+-	protected void fireExitRule(int num,int data) {
+-		if (isDebugMode())
+-			parserEventSupport.fireExitRule(num,inputState.guessing,data);
+-	}
+-	protected boolean fireSemanticPredicateEvaluated(int type, int num, boolean condition) {
+-		if (isDebugMode())
+-			return parserEventSupport.fireSemanticPredicateEvaluated(type,num,condition,inputState.guessing);
+-		else
+-			return condition;
+-	}
+-	protected void fireSyntacticPredicateFailed() {
+-		if (isDebugMode())
+-			parserEventSupport.fireSyntacticPredicateFailed(inputState.guessing);
+-	}
+-	protected void fireSyntacticPredicateStarted() {
+-		if (isDebugMode())
+-			parserEventSupport.fireSyntacticPredicateStarted(inputState.guessing);
+-	}
+-	protected void fireSyntacticPredicateSucceeded() {
+-		if (isDebugMode())
+-			parserEventSupport.fireSyntacticPredicateSucceeded(inputState.guessing);
+-	}
+-	public String getRuleName(int num) {
+-		return ruleNames[num];
+-	}
+-	public String getSemPredName(int num) {
+-		return semPredNames[num];
+-	}
+-	public synchronized void goToSleep() {
+-		try {wait();}
+-		catch (InterruptedException e) {	}		
+-	}
+-	public boolean isDebugMode() {
+-		return !_notDebugMode;
+-	}
+-	public boolean isGuessing() {
+-		return inputState.guessing > 0;
+-	}
+-	/** Return the token type of the ith token of lookahead where i=1
+-	 * is the current token being examined by the parser (i.e., it
+-	 * has not been matched yet).
+-	 */
+-	public int LA(int i) throws TokenStreamException {
+-		int la = super.LA(i);
+-		parserEventSupport.fireLA(i, la);
+-		return la;
+-	}
+-	/**Make sure current lookahead symbol matches token type <tt>t</tt>.
+-	 * Throw an exception upon mismatch, which is catch by either the
+-	 * error handler or by the syntactic predicate.
+-	 */
+-	public void match(int t) throws MismatchedTokenException, TokenStreamException {
+-		String text = LT(1).getText();
+-		int la_1 = LA(1);
+-		try {
+-			super.match(t);
+-			parserEventSupport.fireMatch(t, text, inputState.guessing);
+-		}
+-		catch (MismatchedTokenException e) {
+-			if (inputState.guessing == 0)
+-				parserEventSupport.fireMismatch(la_1, t, text, inputState.guessing);
+-			throw e;
+-		}
+-	}
+-	/**Make sure current lookahead symbol matches the given set
+-	 * Throw an exception upon mismatch, which is catch by either the
+-	 * error handler or by the syntactic predicate.
+-	 */
+-	public void match(BitSet b) throws MismatchedTokenException, TokenStreamException {
+-		String text = LT(1).getText();
+-		int la_1 = LA(1);
+-		try {
+-			super.match(b);
+-			parserEventSupport.fireMatch(la_1,b, text, inputState.guessing);
+-		}
+-		catch (MismatchedTokenException e) {
+-			if (inputState.guessing == 0)
+-				parserEventSupport.fireMismatch(la_1, b, text, inputState.guessing);
+-			throw e;
+-		}
+-	}
+-	public void matchNot(int t) throws MismatchedTokenException, TokenStreamException {
+-		String text = LT(1).getText();
+-		int la_1 = LA(1);
+-		try {
+-			super.matchNot(t);
+-			parserEventSupport.fireMatchNot(la_1, t, text, inputState.guessing);
+-		}
+-		catch (MismatchedTokenException e) {
+-			if (inputState.guessing == 0)
+-				parserEventSupport.fireMismatchNot(la_1, t, text, inputState.guessing);
+-			throw e;
+-		}
+-	}
+-	public void removeMessageListener(MessageListener l) {
+-		parserEventSupport.removeMessageListener(l);
+-	}
+-	public void removeParserListener(ParserListener l) {
+-		parserEventSupport.removeParserListener(l);
+-	}
+-	public void removeParserMatchListener(ParserMatchListener l) {
+-		parserEventSupport.removeParserMatchListener(l);
+-	}
+-	public void removeParserTokenListener(ParserTokenListener l) {
+-		parserEventSupport.removeParserTokenListener(l);
+-	}
+-	public void removeSemanticPredicateListener(SemanticPredicateListener l) {
+-		parserEventSupport.removeSemanticPredicateListener(l);
+-	}
+-	public void removeSyntacticPredicateListener(SyntacticPredicateListener l) {
+-		parserEventSupport.removeSyntacticPredicateListener(l);
+-	}
+-	public void removeTraceListener(TraceListener l) {	
+-		parserEventSupport.removeTraceListener(l);
+-	}
+-	/** Parser error-reporting function can be overridden in subclass */
+-	public void reportError(RecognitionException ex) {
+-		parserEventSupport.fireReportError(ex);
+-		super.reportError(ex);
+-	}
+-	/** Parser error-reporting function can be overridden in subclass */
+-	public void reportError(String s) {
+-		parserEventSupport.fireReportError(s);
+-		super.reportError(s);
+-	}
+-	/** Parser warning-reporting function can be overridden in subclass */
+-	public void reportWarning(String s) {
+-		parserEventSupport.fireReportWarning(s);
+-		super.reportWarning(s);
+-	}
+-	public void setDebugMode(boolean value) {
+-		_notDebugMode = !value;
+-	}
+-	public void setupDebugging(TokenBuffer tokenBuf) {
+-		setupDebugging(null, tokenBuf);
+-	}
+-	public void setupDebugging(TokenStream lexer) {
+-		setupDebugging(lexer, null);
+-	}
+-	/** User can override to do their own debugging */
+-	protected void setupDebugging(TokenStream lexer, TokenBuffer tokenBuf) {
+-		setDebugMode(true);
+-		// default parser debug setup is ParseView
+-		try {
+-			try {
+-				Class.forName("javax.swing.JButton");
+-			}
+-			catch (ClassNotFoundException e) {
+-				System.err.println("Swing is required to use ParseView, but is not present in your CLASSPATH");
+-				System.exit(1);
+-			}
+-			Class c = Class.forName("antlr.parseview.ParseView");
+-			Constructor constructor = c.getConstructor(new Class[] {LLkDebuggingParser.class, TokenStream.class, TokenBuffer.class});
+-			constructor.newInstance(new Object[] {this, lexer, tokenBuf});
+-		}
+-		catch(Exception e) {
+-			System.err.println("Error initializing ParseView: "+e);
+-			System.err.println("Please report this to Scott Stanchfield, thetick at magelang.com");
+-			System.exit(1);
+-		}
+-	}
+-	public synchronized void wakeUp() {
+-		notify();
+-	}
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/debug/MessageAdapter.java glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/MessageAdapter.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/debug/MessageAdapter.java	2006-02-08 22:31:20.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/MessageAdapter.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,10 +0,0 @@
+-package persistence.antlr.debug;
+-
+-public class MessageAdapter implements MessageListener {
+-
+-
+-	public void doneParsing(TraceEvent e) {}
+-	public void refresh() {}
+-	public void reportError(MessageEvent e) {}
+-	public void reportWarning(MessageEvent e) {}
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/debug/MessageEvent.java glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/MessageEvent.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/debug/MessageEvent.java	2006-02-08 22:31:20.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/MessageEvent.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,32 +0,0 @@
+-package persistence.antlr.debug;
+-
+-public class MessageEvent extends Event {
+-	private String text;
+-	public static int WARNING = 0;
+-	public static int ERROR = 1;
+-
+-
+-	public MessageEvent(Object source) {
+-		super(source);
+-	}
+-	public MessageEvent(Object source, int type, String text) {
+-		super(source);
+-		setValues(type,text);
+-	}
+-	public String getText() {
+-		return text;
+-	}
+-	void setText(String text) {
+-		this.text = text;
+-	}
+-	/** This should NOT be called from anyone other than ParserEventSupport! */
+-	void setValues(int type, String text) {
+-		super.setValues(type);
+-		setText(text);
+-	}
+-	public String toString() {
+-		return "ParserMessageEvent [" +
+-		       (getType()==WARNING?"warning,":"error,") +
+-		       getText() + "]";
+-	}
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/debug/MessageListener.java glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/MessageListener.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/debug/MessageListener.java	2006-02-08 22:31:21.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/MessageListener.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,8 +0,0 @@
+-package persistence.antlr.debug;
+-
+-public interface MessageListener extends ListenerBase {
+-
+-
+-	public void reportError(MessageEvent e);
+-	public void reportWarning(MessageEvent e);
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/debug/misc/ASTFrame.java glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/misc/ASTFrame.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/debug/misc/ASTFrame.java	2006-08-31 00:34:15.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/misc/ASTFrame.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,67 +0,0 @@
+-package persistence.antlr.debug.misc;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import persistence.antlr.*;
+-import persistence.antlr.collections.AST;
+-
+-import java.awt.*;
+-import java.awt.event.*;
+-import javax.swing.*;
+-import javax.swing.event.*;
+-import javax.swing.tree.*;
+-
+-public class ASTFrame extends JFrame {
+-    // The initial width and height of the frame
+-    static final int WIDTH = 200;
+-    static final int HEIGHT = 300;
+-
+-    class MyTreeSelectionListener
+-        implements TreeSelectionListener {
+-        public void valueChanged(TreeSelectionEvent event) {
+-            TreePath path = event.getPath();
+-            System.out.println("Selected: " +
+-                               path.getLastPathComponent());
+-            Object elements[] = path.getPath();
+-            for (int i = 0; i < elements.length; i++) {
+-                System.out.print("->" + elements[i]);
+-            }
+-            System.out.println();
+-        }
+-    }
+-
+-    public ASTFrame(String lab, AST r) {
+-        super(lab);
+-
+-        // Create the TreeSelectionListener
+-        TreeSelectionListener listener = new MyTreeSelectionListener();
+-        JTreeASTPanel tp = new JTreeASTPanel(new JTreeASTModel(r), null);
+-        Container content = getContentPane();
+-        content.add(tp, BorderLayout.CENTER);
+-        addWindowListener(new WindowAdapter() {
+-            public void windowClosing(WindowEvent e) {
+-                Frame f = (Frame)e.getSource();
+-                f.setVisible(false);
+-                f.dispose();
+-                // System.exit(0);
+-            }
+-        });
+-        setSize(WIDTH, HEIGHT);
+-    }
+-
+-    public static void main(String args[]) {
+-        // Create the tree nodes
+-        ASTFactory factory = new ASTFactory();
+-        CommonAST r = (CommonAST)factory.create(0, "ROOT");
+-        r.addChild((CommonAST)factory.create(0, "C1"));
+-        r.addChild((CommonAST)factory.create(0, "C2"));
+-        r.addChild((CommonAST)factory.create(0, "C3"));
+-
+-        ASTFrame frame = new ASTFrame("AST JTree Example", r);
+-        frame.setVisible(true);
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/debug/misc/JTreeASTModel.java glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/misc/JTreeASTModel.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/debug/misc/JTreeASTModel.java	2006-08-31 00:34:15.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/misc/JTreeASTModel.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,98 +0,0 @@
+-package persistence.antlr.debug.misc;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import persistence.antlr.collections.AST;
+-
+-import javax.swing.*;
+-import javax.swing.event.*;
+-import javax.swing.tree.*;
+-
+-public class JTreeASTModel implements TreeModel {
+-
+-    AST root = null;
+-
+-    public JTreeASTModel(AST t) {
+-        if (t == null) {
+-            throw new IllegalArgumentException("root is null");
+-        }
+-        root = t;
+-    }
+-
+-    public void addTreeModelListener(TreeModelListener l) {
+-    }
+-
+-    public Object getChild(Object parent, int index) {
+-        if (parent == null) {
+-            return null;
+-        }
+-        AST p = (AST)parent;
+-        AST c = p.getFirstChild();
+-        if (c == null) {
+-            throw new ArrayIndexOutOfBoundsException("node has no children");
+-        }
+-        int i = 0;
+-        while (c != null && i < index) {
+-            c = c.getNextSibling();
+-            i++;
+-        }
+-        return c;
+-    }
+-
+-    public int getChildCount(Object parent) {
+-        if (parent == null) {
+-            throw new IllegalArgumentException("root is null");
+-        }
+-        AST p = (AST)parent;
+-        AST c = p.getFirstChild();
+-        int i = 0;
+-        while (c != null) {
+-            c = c.getNextSibling();
+-            i++;
+-        }
+-        return i;
+-    }
+-
+-    public int getIndexOfChild(Object parent, Object child) {
+-        if (parent == null || child == null) {
+-            throw new IllegalArgumentException("root or child is null");
+-        }
+-        AST p = (AST)parent;
+-        AST c = p.getFirstChild();
+-        if (c == null) {
+-            throw new ArrayIndexOutOfBoundsException("node has no children");
+-        }
+-        int i = 0;
+-        while (c != null && c != child) {
+-            c = c.getNextSibling();
+-            i++;
+-        }
+-        if (c == child) {
+-            return i;
+-        }
+-        throw new java.util.NoSuchElementException("node is not a child");
+-    }
+-
+-    public Object getRoot() {
+-        return root;
+-    }
+-
+-    public boolean isLeaf(Object node) {
+-        if (node == null) {
+-            throw new IllegalArgumentException("node is null");
+-        }
+-        AST t = (AST)node;
+-        return t.getFirstChild() == null;
+-    }
+-
+-    public void removeTreeModelListener(TreeModelListener l) {
+-    }
+-
+-    public void valueForPathChanged(TreePath path, Object newValue) {
+-        System.out.println("heh, who is calling this mystery method?");
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/debug/misc/JTreeASTPanel.java glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/misc/JTreeASTPanel.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/debug/misc/JTreeASTPanel.java	2006-08-31 00:34:15.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/misc/JTreeASTPanel.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,37 +0,0 @@
+-package persistence.antlr.debug.misc;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import java.awt.*;
+-import javax.swing.*;
+-import javax.swing.tree.*;
+-import javax.swing.event.*;
+-
+-public class JTreeASTPanel extends JPanel {
+-    JTree tree;
+-
+-    public JTreeASTPanel(TreeModel tm, TreeSelectionListener listener) {
+-        // use a layout that will stretch tree to panel size
+-        setLayout(new BorderLayout());
+-
+-        // Create tree
+-        tree = new JTree(tm);
+-
+-        // Change line style
+-        tree.putClientProperty("JTree.lineStyle", "Angled");
+-
+-        // Add TreeSelectionListener
+-        if (listener != null)
+-            tree.addTreeSelectionListener(listener);
+-
+-        // Put tree in a scrollable pane's viewport
+-        JScrollPane sp = new JScrollPane();
+-        sp.getViewport().add(tree);
+-
+-        add(sp, BorderLayout.CENTER);
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/debug/NewLineEvent.java glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/NewLineEvent.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/debug/NewLineEvent.java	2006-02-08 22:31:21.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/NewLineEvent.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,27 +0,0 @@
+-package persistence.antlr.debug;
+-
+-public class NewLineEvent extends Event {
+-	private int line;
+-
+-
+-	public NewLineEvent(Object source) {
+-		super(source);
+-	}
+-	public NewLineEvent(Object source, int line) {
+-		super(source);
+-		setValues(line);
+-	}
+-	public int getLine() {
+-		return line;
+-	}
+-	void setLine(int line) {
+-		this.line = line;
+-	}
+-	/** This should NOT be called from anyone other than ParserEventSupport! */
+-	void setValues(int line) {
+-		setLine(line);
+-	}
+-	public String toString() {
+-		return "NewLineEvent [" + line + "]";
+-	}
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/debug/NewLineListener.java glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/NewLineListener.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/debug/NewLineListener.java	2006-02-08 22:31:21.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/NewLineListener.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,7 +0,0 @@
+-package persistence.antlr.debug;
+-
+-public interface NewLineListener extends ListenerBase {
+-
+-
+-	public void hitNewLine(NewLineEvent e);
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/debug/ParserAdapter.java glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/ParserAdapter.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/debug/ParserAdapter.java	2006-02-08 22:31:22.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/ParserAdapter.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,22 +0,0 @@
+-package persistence.antlr.debug;
+-
+-public class ParserAdapter implements ParserListener {
+-
+-
+-	public void doneParsing(TraceEvent e) {}
+-	public void enterRule(TraceEvent e) {}
+-	public void exitRule(TraceEvent e) {}
+-	public void parserConsume(ParserTokenEvent e) {}
+-	public void parserLA(ParserTokenEvent e) {}
+-	public void parserMatch(ParserMatchEvent e) {}
+-	public void parserMatchNot(ParserMatchEvent e) {}
+-	public void parserMismatch(ParserMatchEvent e) {}
+-	public void parserMismatchNot(ParserMatchEvent e) {}
+-	public void refresh() {}
+-	public void reportError(MessageEvent e) {}
+-	public void reportWarning(MessageEvent e) {}
+-	public void semanticPredicateEvaluated(SemanticPredicateEvent e) {}
+-	public void syntacticPredicateFailed(SyntacticPredicateEvent e) {}
+-	public void syntacticPredicateStarted(SyntacticPredicateEvent e) {}
+-	public void syntacticPredicateSucceeded(SyntacticPredicateEvent e) {}
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/debug/ParserController.java glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/ParserController.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/debug/ParserController.java	2006-02-08 22:31:22.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/ParserController.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,8 +0,0 @@
+-package persistence.antlr.debug;
+-
+-public interface ParserController extends ParserListener {
+-
+-
+-	public void checkBreak();
+-	public void setParserEventSupport(ParserEventSupport p);
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/debug/ParserEventSupport.java glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/ParserEventSupport.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/debug/ParserEventSupport.java	2006-02-08 22:31:22.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/ParserEventSupport.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,372 +0,0 @@
+-package persistence.antlr.debug;
+-
+-import java.util.Vector;
+-import java.util.Hashtable;
+-import java.util.Enumeration;
+-import persistence.antlr.collections.impl.BitSet;
+-import persistence.antlr.RecognitionException;
+-
+-
+-/** A class to assist in firing parser events
+- *  NOTE: I intentionally _did_not_ synchronize the event firing and
+- *        add/remove listener methods.  This is because the add/remove should
+- *        _only_ be called by the parser at its start/end, and the _same_thread_
+- *        should be performing the parsing.  This should help performance a tad...
+- */
+-public class ParserEventSupport {
+-	private Object source;
+-	private Hashtable doneListeners;
+-	private Vector matchListeners;
+-	private Vector messageListeners;
+-	private Vector tokenListeners;
+-	private Vector traceListeners;
+-	private Vector semPredListeners;
+-	private Vector synPredListeners;
+-	private Vector newLineListeners;
+-	private ParserMatchEvent        matchEvent;
+-	private MessageEvent            messageEvent;
+-	private ParserTokenEvent        tokenEvent;
+-	private SemanticPredicateEvent  semPredEvent;
+-	private SyntacticPredicateEvent synPredEvent;
+-	private TraceEvent              traceEvent;
+-	private NewLineEvent            newLineEvent;
+-	private ParserController        controller;
+-	protected static final int CONSUME=0;
+-	protected static final int ENTER_RULE=1;
+-	protected static final int EXIT_RULE=2;
+-	protected static final int LA=3;
+-	protected static final int MATCH=4;
+-	protected static final int MATCH_NOT=5;
+-	protected static final int MISMATCH=6;
+-	protected static final int MISMATCH_NOT=7;
+-	protected static final int REPORT_ERROR=8;
+-	protected static final int REPORT_WARNING=9;
+-	protected static final int SEMPRED=10;
+-	protected static final int SYNPRED_FAILED=11;
+-	protected static final int SYNPRED_STARTED=12;
+-	protected static final int SYNPRED_SUCCEEDED=13;
+-	protected static final int NEW_LINE=14;
+-	protected static final int DONE_PARSING=15;
+-	private int ruleDepth = 0;
+-
+-
+-	public ParserEventSupport(Object source) {
+-		matchEvent   = new ParserMatchEvent(source);
+-		messageEvent = new MessageEvent(source);
+-		tokenEvent   = new ParserTokenEvent(source);
+-		traceEvent   = new TraceEvent(source);
+-		semPredEvent = new SemanticPredicateEvent(source);
+-		synPredEvent = new SyntacticPredicateEvent(source);
+-		newLineEvent = new NewLineEvent(source);
+-		this.source = source;
+-	}
+-	public void addDoneListener(ListenerBase l) {
+-		if (doneListeners == null) doneListeners = new Hashtable();
+-		Integer i = (Integer)doneListeners.get(l);
+-		int val;
+-		if (i != null)
+-			val = i.intValue() + 1;
+-		else
+-			val = 1;
+-		doneListeners.put(l, new Integer(val));
+-	}
+-	public void addMessageListener(MessageListener l) {
+-		if (messageListeners == null) messageListeners = new Vector();
+-		messageListeners.addElement(l);
+-		addDoneListener(l);
+-	}
+-	public void addNewLineListener(NewLineListener l) {
+-		if (newLineListeners == null) newLineListeners = new Vector();
+-		newLineListeners.addElement(l);
+-		addDoneListener(l);
+-	}
+-	public void addParserListener(ParserListener l) {
+-		if (l instanceof ParserController) {
+-			((ParserController)l).setParserEventSupport(this);
+-			controller = (ParserController)l;
+-		}	
+-		addParserMatchListener(l);
+-		addParserTokenListener(l);
+-
+-		addMessageListener(l);
+-		addTraceListener(l);
+-		addSemanticPredicateListener(l);
+-		addSyntacticPredicateListener(l);
+-	}
+-	public void addParserMatchListener(ParserMatchListener l) {
+-		if (matchListeners == null) matchListeners = new Vector();
+-		matchListeners.addElement(l);
+-		addDoneListener(l);
+-	}
+-	public void addParserTokenListener(ParserTokenListener l) {
+-		if (tokenListeners == null) tokenListeners = new Vector();
+-		tokenListeners.addElement(l);
+-		addDoneListener(l);
+-	}
+-	public void addSemanticPredicateListener(SemanticPredicateListener l) {
+-		if (semPredListeners == null) semPredListeners = new Vector();
+-		semPredListeners.addElement(l);
+-		addDoneListener(l);
+-	}
+-	public void addSyntacticPredicateListener(SyntacticPredicateListener l) {
+-		if (synPredListeners == null) synPredListeners = new Vector();
+-		synPredListeners.addElement(l);
+-		addDoneListener(l);
+-	}
+-	public void addTraceListener(TraceListener l) {
+-		if (traceListeners == null) traceListeners = new Vector();
+-		traceListeners.addElement(l);
+-		addDoneListener(l);
+-	}
+-	public void fireConsume(int value) {
+-		tokenEvent.setValues(ParserTokenEvent.CONSUME, 1, value);
+-		fireEvents(CONSUME, tokenListeners);		
+-	}
+-	public void fireDoneParsing() {
+-		traceEvent.setValues(TraceEvent.DONE_PARSING, 0,0,0);
+-
+-		Hashtable targets=null;
+-//		Hashtable targets=doneListeners;
+-		ListenerBase l=null;
+-		
+-		synchronized (this) {
+-			if (doneListeners == null) return;
+-			targets = (Hashtable)doneListeners.clone();
+-		}
+-		
+-		if (targets != null) {
+-			Enumeration e = targets.keys();
+-			while(e.hasMoreElements()) {
+-				l = (ListenerBase)e.nextElement();
+-				fireEvent(DONE_PARSING, l);
+-			}
+-		}	
+-		if (controller != null)
+-			controller.checkBreak();
+-	}
+-	public void fireEnterRule(int ruleNum, int guessing, int data) {
+-		ruleDepth++;
+-		traceEvent.setValues(TraceEvent.ENTER, ruleNum, guessing, data);
+-		fireEvents(ENTER_RULE, traceListeners);
+-	}
+-	public void fireEvent(int type, ListenerBase l) {
+-		switch(type) {
+-			case CONSUME:    ((ParserTokenListener)l).parserConsume(tokenEvent); break;
+-			case LA:         ((ParserTokenListener)l).parserLA(tokenEvent);      break;
+-
+-			case ENTER_RULE: ((TraceListener)l).enterRule(traceEvent);           break;
+-			case EXIT_RULE:  ((TraceListener)l).exitRule(traceEvent);            break;
+-
+-			case MATCH:        ((ParserMatchListener)l).parserMatch(matchEvent);       break;
+-			case MATCH_NOT:    ((ParserMatchListener)l).parserMatchNot(matchEvent);    break;
+-			case MISMATCH:     ((ParserMatchListener)l).parserMismatch(matchEvent);    break;
+-			case MISMATCH_NOT: ((ParserMatchListener)l).parserMismatchNot(matchEvent); break;
+-
+-			case SEMPRED:      ((SemanticPredicateListener)l).semanticPredicateEvaluated(semPredEvent); break;
+-
+-			case SYNPRED_STARTED:   ((SyntacticPredicateListener)l).syntacticPredicateStarted(synPredEvent);   break;
+-			case SYNPRED_FAILED:    ((SyntacticPredicateListener)l).syntacticPredicateFailed(synPredEvent);    break;
+-			case SYNPRED_SUCCEEDED: ((SyntacticPredicateListener)l).syntacticPredicateSucceeded(synPredEvent); break;
+-
+-			case REPORT_ERROR:   ((MessageListener)l).reportError(messageEvent);   break;
+-			case REPORT_WARNING: ((MessageListener)l).reportWarning(messageEvent); break;
+-
+-			case DONE_PARSING: l.doneParsing(traceEvent); break;
+-			case NEW_LINE:     ((NewLineListener)l).hitNewLine(newLineEvent); break;
+-			
+-			default:
+-				throw new IllegalArgumentException("bad type "+type+" for fireEvent()");
+-		}	
+-	}
+-	public void fireEvents(int type, Vector listeners) {
+-		ListenerBase l=null;
+-		
+-		if (listeners != null)
+-			for (int i = 0; i < listeners.size(); i++) {
+-				l = (ListenerBase)listeners.elementAt(i);
+-				fireEvent(type, l);
+-			}
+-		if (controller != null)
+-			controller.checkBreak();
+-	}
+-	public void fireExitRule(int ruleNum, int guessing, int data) {
+-		traceEvent.setValues(TraceEvent.EXIT, ruleNum, guessing, data);
+-		fireEvents(EXIT_RULE, traceListeners);
+-		ruleDepth--;
+-		if (ruleDepth == 0)
+-			fireDoneParsing();
+-	}
+-	public void fireLA(int k, int la) {
+-		tokenEvent.setValues(ParserTokenEvent.LA, k, la);
+-		fireEvents(LA, tokenListeners);
+-	}
+-	public void fireMatch(char c, int guessing) {
+-		matchEvent.setValues(ParserMatchEvent.CHAR, c, new Character(c), null, guessing, false, true);
+-		fireEvents(MATCH, matchListeners);
+-	}
+-	public void fireMatch(char value, BitSet b, int guessing) {
+-		matchEvent.setValues(ParserMatchEvent.CHAR_BITSET, value, b, null, guessing, false, true);
+-		fireEvents(MATCH, matchListeners);
+-	}
+-	public void fireMatch(char value, String target, int guessing) {
+-		matchEvent.setValues(ParserMatchEvent.CHAR_RANGE, value, target, null, guessing, false, true);
+-		fireEvents(MATCH, matchListeners);
+-	}
+-	public void fireMatch(int value, BitSet b, String text, int guessing) {
+-		matchEvent.setValues(ParserMatchEvent.BITSET, value, b, text, guessing, false, true);
+-		fireEvents(MATCH, matchListeners);
+-	}
+-	public void fireMatch(int n, String text, int guessing) {
+-		matchEvent.setValues(ParserMatchEvent.TOKEN, n, new Integer(n), text, guessing, false, true);
+-		fireEvents(MATCH, matchListeners);
+-	}
+-	public void fireMatch(String s, int guessing) {
+-		matchEvent.setValues(ParserMatchEvent.STRING, 0, s, null, guessing, false, true);
+-		fireEvents(MATCH, matchListeners);
+-	}
+-	public void fireMatchNot(char value, char n, int guessing) {
+-		matchEvent.setValues(ParserMatchEvent.CHAR, value, new Character(n), null, guessing, true, true);
+-		fireEvents(MATCH_NOT, matchListeners);
+-	}
+-	public void fireMatchNot(int value, int n, String text, int guessing) {
+-		matchEvent.setValues(ParserMatchEvent.TOKEN, value, new Integer(n), text, guessing, true, true);
+-		fireEvents(MATCH_NOT, matchListeners);
+-	}
+-	public void fireMismatch(char value, char n, int guessing) {
+-		matchEvent.setValues(ParserMatchEvent.CHAR, value, new Character(n), null, guessing, false, false);
+-		fireEvents(MISMATCH, matchListeners);
+-	}
+-	public void fireMismatch(char value, BitSet b, int guessing) {
+-		matchEvent.setValues(ParserMatchEvent.CHAR_BITSET, value, b, null, guessing, false, true);
+-		fireEvents(MISMATCH, matchListeners);
+-	}
+-	public void fireMismatch(char value, String target, int guessing) {
+-		matchEvent.setValues(ParserMatchEvent.CHAR_RANGE, value, target, null, guessing, false, true);
+-		fireEvents(MISMATCH, matchListeners);
+-	}
+-	public void fireMismatch(int value, int n, String text, int guessing) {
+-		matchEvent.setValues(ParserMatchEvent.TOKEN, value, new Integer(n), text, guessing, false, false);
+-		fireEvents(MISMATCH, matchListeners);
+-	}
+-	public void fireMismatch(int value, BitSet b, String text, int guessing) {
+-		matchEvent.setValues(ParserMatchEvent.BITSET, value, b, text, guessing, false, true);
+-		fireEvents(MISMATCH, matchListeners);
+-	}
+-	public void fireMismatch(String value, String text, int guessing) {
+-		matchEvent.setValues(ParserMatchEvent.STRING, 0, text, value, guessing, false, true);
+-		fireEvents(MISMATCH, matchListeners);
+-	}
+-	public void fireMismatchNot(char value, char c, int guessing) {
+-		matchEvent.setValues(ParserMatchEvent.CHAR, value, new Character(c), null, guessing, true, true);
+-		fireEvents(MISMATCH_NOT, matchListeners);
+-	}
+-	public void fireMismatchNot(int value, int n, String text, int guessing) {
+-		matchEvent.setValues(ParserMatchEvent.TOKEN, value, new Integer(n), text, guessing, true, true);
+-		fireEvents(MISMATCH_NOT, matchListeners);
+-	}
+-	public void fireNewLine(int line) {
+-		newLineEvent.setValues(line);
+-		fireEvents(NEW_LINE, newLineListeners);
+-	}
+-	public void fireReportError(Exception e) {
+-		messageEvent.setValues(MessageEvent.ERROR, e.toString());
+-		fireEvents(REPORT_ERROR, messageListeners);
+-	}
+-	public void fireReportError(String s) {
+-		messageEvent.setValues(MessageEvent.ERROR, s);
+-		fireEvents(REPORT_ERROR, messageListeners);
+-	}
+-	public void fireReportWarning(String s) {
+-		messageEvent.setValues(MessageEvent.WARNING, s);
+-		fireEvents(REPORT_WARNING, messageListeners);
+-	}
+-	public boolean fireSemanticPredicateEvaluated(int type, int condition, boolean result, int guessing) {
+-		semPredEvent.setValues(type, condition, result, guessing);
+-		fireEvents(SEMPRED, semPredListeners);
+-		return result;
+-	}
+-	public void fireSyntacticPredicateFailed(int guessing) {
+-		synPredEvent.setValues(0, guessing);
+-		fireEvents(SYNPRED_FAILED, synPredListeners);
+-	}
+-	public void fireSyntacticPredicateStarted(int guessing) {
+-		synPredEvent.setValues(0, guessing);
+-		fireEvents(SYNPRED_STARTED, synPredListeners);
+-	}
+-	public void fireSyntacticPredicateSucceeded(int guessing) {
+-		synPredEvent.setValues(0, guessing);
+-		fireEvents(SYNPRED_SUCCEEDED, synPredListeners);
+-	}
+-	protected void refresh(Vector listeners) {
+-		Vector v;
+-		synchronized (listeners) {
+-			v = (Vector)listeners.clone();
+-		}
+-		if (v != null)
+-			for (int i = 0; i < v.size(); i++)
+-				((ListenerBase)v.elementAt(i)).refresh();
+-	}
+-	public void refreshListeners() {
+-		refresh(matchListeners);
+-		refresh(messageListeners);
+-		refresh(tokenListeners);
+-		refresh(traceListeners);
+-		refresh(semPredListeners);
+-		refresh(synPredListeners);
+-	}
+-	public void removeDoneListener(ListenerBase l) {
+-		if (doneListeners == null) return;
+-		Integer i = (Integer)doneListeners.get(l);
+-		int val=0;
+-		if (i != null)
+-			val = i.intValue() - 1;
+-
+-		if (val == 0) 
+-			doneListeners.remove(l);
+-		else
+-			doneListeners.put(l, new Integer(val));
+-	}
+-	public void removeMessageListener(MessageListener l) {
+-		if (messageListeners != null)
+-			messageListeners.removeElement(l);
+-		removeDoneListener(l);
+-	}
+-	public void removeNewLineListener(NewLineListener l) {
+-		if (newLineListeners != null)
+-			newLineListeners.removeElement(l);
+-		removeDoneListener(l);
+-	}
+-	public void removeParserListener(ParserListener l) {
+-		removeParserMatchListener(l);
+-		removeMessageListener(l);
+-		removeParserTokenListener(l);
+-		removeTraceListener(l);
+-		removeSemanticPredicateListener(l);
+-		removeSyntacticPredicateListener(l);
+-	}
+-	public void removeParserMatchListener(ParserMatchListener l) {
+-		if (matchListeners != null)
+-			matchListeners.removeElement(l);
+-		removeDoneListener(l);
+-	}
+-	public void removeParserTokenListener(ParserTokenListener l) {
+-		if (tokenListeners != null)
+-			tokenListeners.removeElement(l);
+-		removeDoneListener(l);
+-	}
+-	public void removeSemanticPredicateListener(SemanticPredicateListener l) {
+-		if (semPredListeners != null)
+-			semPredListeners.removeElement(l);
+-		removeDoneListener(l);
+-	}
+-	public void removeSyntacticPredicateListener(SyntacticPredicateListener l) {
+-		if (synPredListeners != null)
+-			synPredListeners.removeElement(l);
+-		removeDoneListener(l);
+-	}
+-	public void removeTraceListener(TraceListener l) {
+-		if (traceListeners != null)
+-			traceListeners.removeElement(l);
+-		removeDoneListener(l);
+-	}
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/debug/ParserListener.java glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/ParserListener.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/debug/ParserListener.java	2006-02-08 22:31:23.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/ParserListener.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,9 +0,0 @@
+-package persistence.antlr.debug;
+-
+-public interface ParserListener extends SemanticPredicateListener, 
+-										ParserMatchListener, 
+-										MessageListener, 
+-										ParserTokenListener, 
+-										TraceListener, 
+-										SyntacticPredicateListener {
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/debug/ParserMatchAdapter.java glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/ParserMatchAdapter.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/debug/ParserMatchAdapter.java	2006-02-08 22:31:23.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/ParserMatchAdapter.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,12 +0,0 @@
+-package persistence.antlr.debug;
+-
+-public class ParserMatchAdapter implements ParserMatchListener {
+-
+-
+-	public void doneParsing(TraceEvent e) {}
+-	public void parserMatch(ParserMatchEvent e) {}
+-	public void parserMatchNot(ParserMatchEvent e) {}
+-	public void parserMismatch(ParserMatchEvent e) {}
+-	public void parserMismatchNot(ParserMatchEvent e) {}
+-	public void refresh() {}
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/debug/ParserMatchEvent.java glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/ParserMatchEvent.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/debug/ParserMatchEvent.java	2006-02-08 22:31:23.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/ParserMatchEvent.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,74 +0,0 @@
+-package persistence.antlr.debug;
+-
+-public class ParserMatchEvent extends GuessingEvent {
+-	// NOTE: for a mismatch on type STRING, the "text" is used as the lookahead
+-	//       value.  Normally "value" is this
+-	public static int TOKEN=0;
+-	public static int BITSET=1;
+-	public static int CHAR=2;
+-	public static int CHAR_BITSET=3;
+-	public static int STRING=4;
+-	public static int CHAR_RANGE=5;
+-	private boolean inverse;
+-	private boolean matched;
+-	private Object target;
+-	private int value;
+-	private String text;
+-
+-
+-	public ParserMatchEvent(Object source) {
+-		super(source);
+-	}
+-	public ParserMatchEvent(Object source, int type,
+-	                        int value, Object target, String text, int guessing,
+-	                        boolean inverse, boolean matched) {
+-		super(source);
+-		setValues(type,value,target,text,guessing,inverse,matched);
+-	}
+-	public Object getTarget() {
+-		return target;
+-	}
+-	public String getText() {
+-		return text;
+-	}
+-	public int getValue() {
+-		return value;
+-	}
+-	public boolean isInverse() {
+-		return inverse;
+-	}
+-	public boolean isMatched() {
+-		return matched;
+-	}
+-	void setInverse(boolean inverse) {
+-		this.inverse = inverse;
+-	}
+-	void setMatched(boolean matched) {
+-		this.matched = matched;
+-	}
+-	void setTarget(Object target) {
+-		this.target = target;
+-	}
+-	void setText(String text) {
+-		this.text = text;
+-	}
+-	void setValue(int value) {
+-		this.value = value;
+-	}
+-	/** This should NOT be called from anyone other than ParserEventSupport! */
+-	void setValues(int type, int value, Object target, String text, int guessing, boolean inverse, boolean matched) {
+-		super.setValues(type, guessing);
+-		setValue(value);
+-		setTarget(target);
+-		setInverse(inverse);
+-		setMatched(matched);
+-		setText(text);
+-	}
+-	public String toString() {
+-		return "ParserMatchEvent [" + 
+-		       (isMatched()?"ok,":"bad,") +
+-		       (isInverse()?"NOT ":"") +
+-		       (getType()==TOKEN?"token,":"bitset,") +
+-		       getValue() + "," + getTarget() + "," + getGuessing() + "]";
+-	}
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/debug/ParserMatchListener.java glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/ParserMatchListener.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/debug/ParserMatchListener.java	2006-02-08 22:31:24.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/ParserMatchListener.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,10 +0,0 @@
+-package persistence.antlr.debug;
+-
+-public interface ParserMatchListener extends ListenerBase {
+-
+-
+-	public void parserMatch(ParserMatchEvent e);
+-	public void parserMatchNot(ParserMatchEvent e);
+-	public void parserMismatch(ParserMatchEvent e);
+-	public void parserMismatchNot(ParserMatchEvent e);
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/debug/ParserReporter.java glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/ParserReporter.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/debug/ParserReporter.java	2006-02-08 22:31:29.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/ParserReporter.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,42 +0,0 @@
+-package persistence.antlr.debug;
+-
+-public class ParserReporter extends Tracer implements ParserListener {
+-
+-
+-	public void parserConsume(ParserTokenEvent e) {
+-		System.out.println(indent+e);
+-	}
+-	public void parserLA(ParserTokenEvent e) {
+-		System.out.println(indent+e);
+-	}
+-	public void parserMatch(ParserMatchEvent e) {
+-		System.out.println(indent+e);
+-	}
+-	public void parserMatchNot(ParserMatchEvent e) {
+-		System.out.println(indent+e);
+-	}
+-	public void parserMismatch(ParserMatchEvent e) {
+-		System.out.println(indent+e);
+-	}
+-	public void parserMismatchNot(ParserMatchEvent e) {
+-		System.out.println(indent+e);
+-	}
+-	public void reportError(MessageEvent e) {
+-		System.out.println(indent+e);
+-	}
+-	public void reportWarning(MessageEvent e) {
+-		System.out.println(indent+e);
+-	}
+-	public void semanticPredicateEvaluated(SemanticPredicateEvent e) {
+-		System.out.println(indent+e);
+-	}
+-	public void syntacticPredicateFailed(SyntacticPredicateEvent e) {
+-		System.out.println(indent+e);
+-	}
+-	public void syntacticPredicateStarted(SyntacticPredicateEvent e) {
+-		System.out.println(indent+e);
+-	}
+-	public void syntacticPredicateSucceeded(SyntacticPredicateEvent e) {
+-		System.out.println(indent+e);
+-	}
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/debug/ParserTokenAdapter.java glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/ParserTokenAdapter.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/debug/ParserTokenAdapter.java	2006-02-08 22:31:29.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/ParserTokenAdapter.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,10 +0,0 @@
+-package persistence.antlr.debug;
+-
+-public class ParserTokenAdapter implements ParserTokenListener {
+-
+-
+-	public void doneParsing(TraceEvent e) {}
+-	public void parserConsume(ParserTokenEvent e) {}
+-	public void parserLA(ParserTokenEvent e) {}
+-	public void refresh() {}
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/debug/ParserTokenEvent.java glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/ParserTokenEvent.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/debug/ParserTokenEvent.java	2006-02-08 22:31:29.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/ParserTokenEvent.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,44 +0,0 @@
+-package persistence.antlr.debug;
+-
+-public class ParserTokenEvent extends Event {
+-	private int value;
+-	private int amount;
+-	public static int LA=0;
+-	public static int CONSUME=1;
+-
+-
+-	public ParserTokenEvent(Object source) {
+-		super(source);
+-	}
+-	public ParserTokenEvent(Object source, int type,
+-	                        int amount, int value) {
+-		super(source);
+-		setValues(type,amount,value);
+-	}
+-	public int getAmount() {
+-		return amount;
+-	}
+-	public int getValue() {
+-		return value;
+-	}
+-	void setAmount(int amount) {
+-		this.amount = amount;
+-	}
+-	void setValue(int value) {
+-		this.value = value;
+-	}
+-	/** This should NOT be called from anyone other than ParserEventSupport! */
+-	void setValues(int type, int amount, int value) {
+-		super.setValues(type);
+-		setAmount(amount);
+-		setValue(value);
+-	}
+-	public String toString() {
+-		if (getType()==LA)
+-			return "ParserTokenEvent [LA," + getAmount() + "," +
+-			       getValue() + "]"; 
+-		else
+-			return "ParserTokenEvent [consume,1," +
+-			       getValue() + "]"; 
+-	}
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/debug/ParserTokenListener.java glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/ParserTokenListener.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/debug/ParserTokenListener.java	2006-02-08 22:31:30.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/ParserTokenListener.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,8 +0,0 @@
+-package persistence.antlr.debug;
+-
+-public interface ParserTokenListener extends ListenerBase {
+-
+-
+-	public void parserConsume(ParserTokenEvent e);
+-	public void parserLA(ParserTokenEvent e);
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/debug/ParseTreeDebugParser.java glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/ParseTreeDebugParser.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/debug/ParseTreeDebugParser.java	2006-02-08 22:31:22.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/ParseTreeDebugParser.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,116 +0,0 @@
+-package persistence.antlr.debug;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- */
+-
+-import persistence.antlr.*;
+-import persistence.antlr.collections.impl.BitSet;
+-
+-import java.util.Stack;
+-
+-/** Override the standard matching and rule entry/exit routines
+- *  to build parse trees.  This class is useful for 2.7.3 where
+- *  you can specify a superclass like
+- *
+- *   class TinyCParser extends Parser(ParseTreeDebugParser);
+- */
+-public class ParseTreeDebugParser extends LLkParser {
+-	/** Each new rule invocation must have it's own subtree.  Tokens
+-	 *  are added to the current root so we must have a stack of subtree roots.
+-	 */
+-	protected Stack currentParseTreeRoot = new Stack();
+-
+-	/** Track most recently created parse subtree so that when parsing
+-	 *  is finished, we can get to the root.
+-	 */
+-	protected ParseTreeRule mostRecentParseTreeRoot = null;
+-
+-	/** For every rule replacement with a production, we bump up count. */
+-	protected int numberOfDerivationSteps = 1; // n replacements plus step 0
+-
+-	public ParseTreeDebugParser(int k_) {
+-		super(k_);
+-	}
+-
+-	public ParseTreeDebugParser(ParserSharedInputState state, int k_) {
+-		super(state,k_);
+-	}
+-
+-	public ParseTreeDebugParser(TokenBuffer tokenBuf, int k_) {
+-		super(tokenBuf, k_);
+-	}
+-
+-	public ParseTreeDebugParser(TokenStream lexer, int k_) {
+-		super(lexer,k_);
+-	}
+-
+-	public ParseTree getParseTree() {
+-		return mostRecentParseTreeRoot;
+-	}
+-
+-	public int getNumberOfDerivationSteps() {
+-		return numberOfDerivationSteps;
+-	}
+-
+-	public void match(int i) throws MismatchedTokenException, TokenStreamException {
+-		addCurrentTokenToParseTree();
+-		super.match(i);
+-	}
+-
+-	public void match(BitSet bitSet) throws MismatchedTokenException, TokenStreamException {
+-		addCurrentTokenToParseTree();
+-		super.match(bitSet);
+-	}
+-
+-	public void matchNot(int i) throws MismatchedTokenException, TokenStreamException {
+-		addCurrentTokenToParseTree();
+-		super.matchNot(i);
+-	}
+-
+-	/** This adds LT(1) to the current parse subtree.  Note that the match()
+-	 *  routines add the node before checking for correct match.  This means
+-	 *  that, upon mismatched token, there will a token node in the tree
+-	 *  corresponding to where that token was expected.  For no viable
+-	 *  alternative errors, no node will be in the tree as nothing was
+-	 *  matched() (the lookahead failed to predict an alternative).
+-	 */
+-	protected void addCurrentTokenToParseTree() throws TokenStreamException {
+-		if (inputState.guessing>0) {
+-			return;
+-		}
+-		ParseTreeRule root = (ParseTreeRule)currentParseTreeRoot.peek();
+-		ParseTreeToken tokenNode = null;
+-		if ( LA(1)==Token.EOF_TYPE ) {
+-			tokenNode = new ParseTreeToken(new persistence.antlr.CommonToken("EOF"));
+-		}
+-		else {
+-			tokenNode = new ParseTreeToken(LT(1));
+-		}
+-		root.addChild(tokenNode);
+-	}
+-
+-	/** Create a rule node, add to current tree, and make it current root */
+-	public void traceIn(String s) throws TokenStreamException {
+-		if (inputState.guessing>0) {
+-			return;
+-		}
+-		ParseTreeRule subRoot = new ParseTreeRule(s);
+-		if ( currentParseTreeRoot.size()>0 ) {
+-			ParseTreeRule oldRoot = (ParseTreeRule)currentParseTreeRoot.peek();
+-			oldRoot.addChild(subRoot);
+-		}
+-		currentParseTreeRoot.push(subRoot);
+-		numberOfDerivationSteps++;
+-	}
+-
+-	/** Pop current root; back to adding to old root */
+-	public void traceOut(String s) throws TokenStreamException {
+-		if (inputState.guessing>0) {
+-			return;
+-		}
+-		mostRecentParseTreeRoot = (ParseTreeRule)currentParseTreeRoot.pop();
+-	}
+-
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/debug/SemanticPredicateAdapter.java glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/SemanticPredicateAdapter.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/debug/SemanticPredicateAdapter.java	2006-02-08 22:31:30.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/SemanticPredicateAdapter.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,9 +0,0 @@
+-package persistence.antlr.debug;
+-
+-public class SemanticPredicateAdapter implements SemanticPredicateListener {
+-
+-
+-	public void doneParsing(TraceEvent e) {}
+-	public void refresh() {}
+-	public void semanticPredicateEvaluated(SemanticPredicateEvent e) {}
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/debug/SemanticPredicateEvent.java glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/SemanticPredicateEvent.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/debug/SemanticPredicateEvent.java	2006-02-08 22:31:30.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/SemanticPredicateEvent.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,38 +0,0 @@
+-package persistence.antlr.debug;
+-
+-public class SemanticPredicateEvent extends GuessingEvent {
+-	public static final int VALIDATING=0;
+-	public static final int PREDICTING=1;
+-	private int condition;
+-	private boolean result;
+-
+-
+-	public SemanticPredicateEvent(Object source) {
+-		super(source);
+-	}
+-	public SemanticPredicateEvent(Object source, int type) {
+-		super(source, type);
+-	}
+-	public int getCondition() {
+-		return condition;
+-	}
+-	public boolean getResult() {
+-		return result;
+-	}
+-	void setCondition(int condition) {
+-		this.condition = condition;
+-	}
+-	void setResult(boolean result) {
+-		this.result = result;
+-	}
+-	/** This should NOT be called from anyone other than ParserEventSupport! */
+-	void setValues(int type, int condition, boolean result, int guessing) {
+-		super.setValues(type, guessing);
+-		setCondition(condition);
+-		setResult(result);
+-	}
+-	public String toString() {
+-		return "SemanticPredicateEvent [" + 
+-		       getCondition() + "," + getResult() + "," + getGuessing() + "]";
+-	}
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/debug/SemanticPredicateListener.java glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/SemanticPredicateListener.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/debug/SemanticPredicateListener.java	2006-02-08 22:31:30.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/SemanticPredicateListener.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,7 +0,0 @@
+-package persistence.antlr.debug;
+-
+-public interface SemanticPredicateListener extends ListenerBase {
+-
+-
+-	public void semanticPredicateEvaluated(SemanticPredicateEvent e);
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/debug/SyntacticPredicateAdapter.java glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/SyntacticPredicateAdapter.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/debug/SyntacticPredicateAdapter.java	2006-02-08 22:31:31.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/SyntacticPredicateAdapter.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,11 +0,0 @@
+-package persistence.antlr.debug;
+-
+-public class SyntacticPredicateAdapter implements SyntacticPredicateListener {
+-
+-
+-	public void doneParsing(TraceEvent e) {}
+-	public void refresh() {}
+-	public void syntacticPredicateFailed(SyntacticPredicateEvent e) {}
+-	public void syntacticPredicateStarted(SyntacticPredicateEvent e) {}
+-	public void syntacticPredicateSucceeded(SyntacticPredicateEvent e) {}
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/debug/SyntacticPredicateEvent.java glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/SyntacticPredicateEvent.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/debug/SyntacticPredicateEvent.java	2006-02-08 22:31:31.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/SyntacticPredicateEvent.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,19 +0,0 @@
+-package persistence.antlr.debug;
+-
+-public class SyntacticPredicateEvent extends GuessingEvent {
+-
+-
+-	public SyntacticPredicateEvent(Object source) {
+-		super(source);
+-	}
+-	public SyntacticPredicateEvent(Object source, int type) {
+-		super(source, type);
+-	}
+-	/** This should NOT be called from anyone other than ParserEventSupport! */
+-	void setValues(int type, int guessing) {
+-		super.setValues(type, guessing);
+-	}
+-	public String toString() {
+-		return "SyntacticPredicateEvent [" + getGuessing() + "]";
+-	}
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/debug/SyntacticPredicateListener.java glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/SyntacticPredicateListener.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/debug/SyntacticPredicateListener.java	2006-02-08 22:31:31.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/SyntacticPredicateListener.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,9 +0,0 @@
+-package persistence.antlr.debug;
+-
+-public interface SyntacticPredicateListener extends ListenerBase {
+-
+-
+-	public void syntacticPredicateFailed(SyntacticPredicateEvent e);
+-	public void syntacticPredicateStarted(SyntacticPredicateEvent e);
+-	public void syntacticPredicateSucceeded(SyntacticPredicateEvent e);
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/debug/TraceAdapter.java glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/TraceAdapter.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/debug/TraceAdapter.java	2006-02-08 22:31:32.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/TraceAdapter.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,10 +0,0 @@
+-package persistence.antlr.debug;
+-
+-public class TraceAdapter implements TraceListener {
+-
+-
+-	public void doneParsing(TraceEvent e) {}
+-	public void enterRule(TraceEvent e) {}
+-	public void exitRule(TraceEvent e) {}
+-	public void refresh() {}
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/debug/TraceEvent.java glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/TraceEvent.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/debug/TraceEvent.java	2006-02-08 22:31:32.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/TraceEvent.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,41 +0,0 @@
+-package persistence.antlr.debug;
+-
+-public class TraceEvent extends GuessingEvent {
+-	private int ruleNum;
+-	private int data;
+-	public static int ENTER=0;
+-	public static int EXIT=1;
+-	public static int DONE_PARSING=2;
+-
+-
+-	public TraceEvent(Object source) {
+-		super(source);
+-	}
+-	public TraceEvent(Object source, int type, int ruleNum, int guessing, int data) {
+-		super(source);
+-		setValues(type, ruleNum, guessing, data);
+-	}
+-	public int getData() {
+-		return data;
+-	}
+-	public int getRuleNum() {
+-		return ruleNum;
+-	}
+-	void setData(int data) {
+-		this.data = data;
+-	}
+-	void setRuleNum(int ruleNum) {
+-		this.ruleNum = ruleNum;
+-	}
+-	/** This should NOT be called from anyone other than ParserEventSupport! */
+-	 void setValues(int type, int ruleNum, int guessing, int data) {
+-		super.setValues(type, guessing);
+-		setRuleNum(ruleNum);
+-		setData(data);
+-	}
+-	public String toString() {
+-		return "ParserTraceEvent [" + 
+-		       (getType()==ENTER?"enter,":"exit,") +
+-		       getRuleNum() + "," + getGuessing() +"]";
+-	}
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/debug/TraceListener.java glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/TraceListener.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/debug/TraceListener.java	2006-02-08 22:31:32.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/TraceListener.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,8 +0,0 @@
+-package persistence.antlr.debug;
+-
+-public interface TraceListener extends ListenerBase {
+-
+-
+-	public void enterRule(TraceEvent e);
+-	public void exitRule(TraceEvent e);
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/debug/Tracer.java glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/Tracer.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/debug/Tracer.java	2006-02-08 22:31:32.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/debug/Tracer.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,24 +0,0 @@
+-package persistence.antlr.debug;
+-
+-public class Tracer extends TraceAdapter implements TraceListener {
+-	String indent=""; // TBD: should be StringBuffer
+-
+-
+-	protected void dedent() {
+-		if (indent.length() < 2)
+-			indent = "";
+-		else
+-			indent = indent.substring(2);
+-	}
+-	public void enterRule(TraceEvent e) {
+-		System.out.println(indent+e);
+-		indent();
+-	}
+-	public void exitRule(TraceEvent e) {
+-		dedent();
+-		System.out.println(indent+e);
+-	}
+-	protected void indent() {
+-		indent += "  ";
+-	}
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/DefaultFileLineFormatter.java glassfish-gil/entity-persistence/src/java/persistence/antlr/DefaultFileLineFormatter.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/DefaultFileLineFormatter.java	2006-08-31 00:34:06.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/DefaultFileLineFormatter.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,32 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-public class DefaultFileLineFormatter extends FileLineFormatter {
+-    public String getFormatString(String fileName, int line, int column) {
+-        StringBuffer buf = new StringBuffer();
+-
+-        if (fileName != null)
+-            buf.append(fileName + ":");
+-
+-        if (line != -1) {
+-            if (fileName == null)
+-                buf.append("line ");
+-
+-            buf.append(line);
+-
+-            if (column != -1)
+-                buf.append(":" + column);
+-
+-            buf.append(":");
+-        }
+-
+-        buf.append(" ");
+-
+-        return buf.toString();
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/DefaultToolErrorHandler.java glassfish-gil/entity-persistence/src/java/persistence/antlr/DefaultToolErrorHandler.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/DefaultToolErrorHandler.java	2006-08-31 00:34:06.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/DefaultToolErrorHandler.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,118 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import persistence.antlr.collections.impl.BitSet;
+-
+-class DefaultToolErrorHandler implements ToolErrorHandler {
+-	DefaultToolErrorHandler(persistence.antlr.Tool tool) {
+-		antlrTool = tool;
+-	}
+-	private final persistence.antlr.Tool antlrTool;
+-
+-	CharFormatter javaCharFormatter = new JavaCharFormatter();
+-
+-	/** Dump token/character sets to a string array suitable for
+-	 * {@link persistence.antlr.Tool.warning(String[], String, int, int)
+-	 * @param output The array that will contain the token/character set dump,
+-	 *               one element per k (lookahead) value
+-	 * @param outputStartIndex The index into <code>output</code> that the
+-	 *                         dump should start at.
+-	 * @param lexicalAnalysis  true for lexical rule
+-	 * @param depth  The depth of the ambiguity
+-	 * @param sets  An array of bitsets containing the ambiguities
+-	 */
+-	private void dumpSets(String[] output,
+-						  int outputStartIndex,
+-						  Grammar grammar,
+-						  boolean lexicalAnalysis,
+-						  int depth,
+-						  Lookahead[] sets) {
+-		StringBuffer line = new StringBuffer(100);
+-		for (int i = 1; i <= depth; i++) {
+-			line.append("k==").append(i).append(':');
+-			if (lexicalAnalysis) {
+-				String bits = sets[i].fset.toStringWithRanges(",", javaCharFormatter);
+-				if (sets[i].containsEpsilon()) {
+-					line.append("<end-of-token>");
+-					if (bits.length() > 0) {
+-						line.append(',');
+-					}
+-				}
+-				line.append(bits);
+-			} else {
+-				line.append(sets[i].fset.toString(",", grammar.tokenManager.getVocabulary()));
+-			}
+-			output[outputStartIndex++] = line.toString();
+-			line.setLength(0);
+-		}
+-	}
+-
+-	/** Issue a warning about ambiguity between a alternates
+-	 * @param blk  The block being analyzed
+-	 * @param lexicalAnalysis  true for lexical rule
+-	 * @param depth  The depth of the ambiguity
+-	 * @param sets  An array of bitsets containing the ambiguities
+-	 * @param altIdx1  The zero-based index of the first ambiguous alternative
+-	 * @param altIdx2  The zero-based index of the second ambiguous alternative
+-	 */
+-	public void warnAltAmbiguity(Grammar grammar,
+-								 AlternativeBlock blk,
+-								 boolean lexicalAnalysis,
+-								 int depth,
+-								 Lookahead[] sets,
+-								 int altIdx1,
+-								 int altIdx2)
+-	{
+-		final StringBuffer line = new StringBuffer(100);
+-		if (blk instanceof RuleBlock && ((RuleBlock)blk).isLexerAutoGenRule()) {
+-			Alternative ai = blk.getAlternativeAt(altIdx1);
+-			Alternative aj = blk.getAlternativeAt(altIdx2);
+-			RuleRefElement rri = (RuleRefElement)ai.head;
+-			RuleRefElement rrj = (RuleRefElement)aj.head;
+-			String ri = CodeGenerator.reverseLexerRuleName(rri.targetRule);
+-			String rj = CodeGenerator.reverseLexerRuleName(rrj.targetRule);
+-			line.append("lexical nondeterminism between rules ");
+-			line.append(ri).append(" and ").append(rj).append(" upon");
+-		}
+-		else {
+-			if (lexicalAnalysis) {
+-				line.append("lexical ");
+-			}
+-			line.append("nondeterminism between alts ");
+-			line.append(altIdx1 + 1).append(" and ");
+-			line.append(altIdx2 + 1).append(" of block upon");
+-		}
+-		final String [] output = new String [depth + 1];;
+-		output[0] = line.toString();
+-		dumpSets(output, 1, grammar, lexicalAnalysis, depth, sets);
+-		antlrTool.warning(output, grammar.getFilename(), blk.getLine(), blk.getColumn());
+-
+-	}
+-
+-	/** Issue a warning about ambiguity between an alternate and exit path.
+-	 * @param blk  The block being analyzed
+-	 * @param lexicalAnalysis  true for lexical rule
+-	 * @param depth  The depth of the ambiguity
+-	 * @param sets  An array of bitsets containing the ambiguities
+-	 * @param altIdx  The zero-based index of the ambiguous alternative
+-	 */
+-	public void warnAltExitAmbiguity(Grammar grammar,
+-									 BlockWithImpliedExitPath blk,
+-									 boolean lexicalAnalysis,
+-									 int depth,
+-									 Lookahead[] sets,
+-									 int altIdx
+-									 )
+-	{
+-		String [] output = new String[depth + 2];
+-		output[0] = (lexicalAnalysis ? "lexical " : "") + "nondeterminism upon";
+-		dumpSets(output, 1, grammar, lexicalAnalysis, depth, sets);
+-		output[depth + 1] = "between alt " + (altIdx + 1) + " and exit branch of block";
+-		antlrTool.warning(output, grammar.getFilename(), blk.getLine(), blk.getColumn());
+-	}
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/DefineGrammarSymbols.java glassfish-gil/entity-persistence/src/java/persistence/antlr/DefineGrammarSymbols.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/DefineGrammarSymbols.java	2006-08-31 00:34:06.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/DefineGrammarSymbols.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,792 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import java.util.Hashtable;
+-
+-import persistence.antlr.collections.impl.BitSet;
+-
+-/**DefineGrammarSymbols is a behavior for the ANTLRParser that adds all
+- * the token and rule symbols to the grammar symbol table.
+- *
+- * Token types are assigned to token symbols in this class also.
+- * The token type for a token is done in the order seen (lexically).
+- */
+-public class DefineGrammarSymbols implements ANTLRGrammarParseBehavior {
+-    // Contains all of the defined parser and lexer Grammar's indexed by name
+-    protected Hashtable grammars = new Hashtable();
+-    // Contains all the TokenManagers indexed by name
+-    protected Hashtable tokenManagers = new Hashtable();
+-    // Current grammar (parser or lexer)
+-    protected Grammar grammar;
+-    // The tool under which this is invoked
+-    protected Tool tool;
+-    // The grammar analyzer object
+-    LLkAnalyzer analyzer;
+-    // The command-line arguments passed to the tool.
+-    // This allows each grammar to parse the arguments as it is created
+-    String[] args;
+-    // Name for default token manager does not match any valid name
+-    static final String DEFAULT_TOKENMANAGER_NAME = "*default";
+-    // Header actions apply to all parsers unless redefined
+-    // Contains all of the header actions indexed by name
+-    protected Hashtable headerActions = new Hashtable();
+-    // Place where preamble is stored until a grammar is defined
+-    Token thePreambleAction = new CommonToken(Token.INVALID_TYPE, ""); // init to empty token
+-    // The target language
+-    String language = "Java";
+-
+-    protected int numLexers = 0;
+-    protected int numParsers = 0;
+-    protected int numTreeParsers = 0;
+-
+-    public DefineGrammarSymbols(Tool tool_, String[] args_, LLkAnalyzer analyzer_) {
+-        tool = tool_;
+-        args = args_;
+-        analyzer = analyzer_;
+-    }
+-
+-    public void _refStringLiteral(Token lit, Token label, int autoGenType, boolean lastInRule) {
+-        if (!(grammar instanceof LexerGrammar)) {
+-            // String literals are treated like tokens except by the lexer
+-            String str = lit.getText();
+-            if (grammar.tokenManager.getTokenSymbol(str) != null) {
+-                // string symbol is already defined
+-                return;
+-            }
+-            StringLiteralSymbol sl = new StringLiteralSymbol(str);
+-            int tt = grammar.tokenManager.nextTokenType();
+-            sl.setTokenType(tt);
+-            grammar.tokenManager.define(sl);
+-        }
+-    }
+-
+-    /** Reference a token */
+-    public void _refToken(Token assignId,
+-                          Token t,
+-                          Token label,
+-                          Token args,
+-                          boolean inverted,
+-                          int autoGenType,
+-                          boolean lastInRule) {
+-        String id = t.getText();
+-        if (!grammar.tokenManager.tokenDefined(id)) {
+-            /*
+-            // RK: dish out a warning if the token was not defined before.
+-            tool.warning("Token '" + id + "' defined outside tokens section",
+-                         tool.grammarFile, t.getLine(), t.getColumn());
+-            */
+-            int tt = grammar.tokenManager.nextTokenType();
+-            TokenSymbol ts = new TokenSymbol(id);
+-            ts.setTokenType(tt);
+-            grammar.tokenManager.define(ts);
+-        }
+-    }
+-
+-    /** Abort the processing of a grammar due to syntax errors */
+-    public void abortGrammar() {
+-        if (grammar != null && grammar.getClassName() != null) {
+-            grammars.remove(grammar.getClassName());
+-        }
+-        grammar = null;
+-    }
+-
+-    public void beginAlt(boolean doAST_) {
+-    }
+-
+-    public void beginChildList() {
+-    }
+-
+-    // Exception handling
+-    public void beginExceptionGroup() {
+-    }
+-
+-    public void beginExceptionSpec(Token label) {
+-    }
+-
+-    public void beginSubRule(Token label, Token start, boolean not) {
+-    }
+-
+-    public void beginTree(Token tok) throws SemanticException {
+-    }
+-
+-    /** Define a lexer or parser rule */
+-    public void defineRuleName(Token r,
+-                               String access,
+-                               boolean ruleAutoGen,
+-                               String docComment)
+-        throws SemanticException {
+-        String id = r.getText();
+-
+-        //		if ( Character.isUpperCase(id.charAt(0)) ) {
+-        if (r.type == ANTLRTokenTypes.TOKEN_REF) {
+-            // lexer rule
+-            id = CodeGenerator.encodeLexerRuleName(id);
+-            // make sure we define it as token identifier also
+-            if (!grammar.tokenManager.tokenDefined(r.getText())) {
+-                int tt = grammar.tokenManager.nextTokenType();
+-                TokenSymbol ts = new TokenSymbol(r.getText());
+-                ts.setTokenType(tt);
+-                grammar.tokenManager.define(ts);
+-            }
+-        }
+-
+-        RuleSymbol rs;
+-        if (grammar.isDefined(id)) {
+-            // symbol seen before?
+-            rs = (RuleSymbol)grammar.getSymbol(id);
+-            // rule just referenced or has it been defined yet?
+-            if (rs.isDefined()) {
+-                tool.error("redefinition of rule " + id, grammar.getFilename(), r.getLine(), r.getColumn());
+-            }
+-        }
+-        else {
+-            rs = new RuleSymbol(id);
+-            grammar.define(rs);
+-        }
+-        rs.setDefined();
+-        rs.access = access;
+-        rs.comment = docComment;
+-    }
+-
+-    /** Define a token from tokens {...}.
+-     *  Must be label and literal or just label or just a literal.
+-     */
+-    public void defineToken(Token tokname, Token tokliteral) {
+-        String name = null;
+-        String literal = null;
+-        if (tokname != null) {
+-            name = tokname.getText();
+-        }
+-        if (tokliteral != null) {
+-            literal = tokliteral.getText();
+-        }
+-        // System.out.println("defining " + name + " with literal " + literal);
+-        //
+-        if (literal != null) {
+-            StringLiteralSymbol sl = (StringLiteralSymbol)grammar.tokenManager.getTokenSymbol(literal);
+-            if (sl != null) {
+-                // This literal is known already.
+-                // If the literal has no label already, but we can provide
+-                // one here, then no problem, just map the label to the literal
+-                // and don't change anything else.
+-                // Otherwise, labels conflict: error.
+-                if (name == null || sl.getLabel() != null) {
+-                    tool.warning("Redefinition of literal in tokens {...}: " + literal, grammar.getFilename(), tokliteral.getLine(), tokliteral.getColumn());
+-                    return;
+-                }
+-                else if (name != null) {
+-                    // The literal had no label, but new def does.  Set it.
+-                    sl.setLabel(name);
+-                    // Also, map the label to the literal.
+-                    grammar.tokenManager.mapToTokenSymbol(name, sl);
+-                }
+-            }
+-            // if they provide a name/label and that name/label already
+-            // exists, just hook this literal onto old token.
+-            if (name != null) {
+-                TokenSymbol ts = (TokenSymbol)grammar.tokenManager.getTokenSymbol(name);
+-                if (ts != null) {
+-                    // watch out that the label is not more than just a token.
+-                    // If it already has a literal attached, then: conflict.
+-                    if (ts instanceof StringLiteralSymbol) {
+-                        tool.warning("Redefinition of token in tokens {...}: " + name, grammar.getFilename(), tokliteral.getLine(), tokliteral.getColumn());
+-                        return;
+-                    }
+-                    // a simple token symbol such as DECL is defined
+-                    // must convert it to a StringLiteralSymbol with a
+-                    // label by co-opting token type and killing old
+-                    // TokenSymbol.  Kill mapping and entry in vector
+-                    // of token manager.
+-                    // First, claim token type.
+-                    int ttype = ts.getTokenType();
+-                    // now, create string literal with label
+-                    sl = new StringLiteralSymbol(literal);
+-                    sl.setTokenType(ttype);
+-                    sl.setLabel(name);
+-                    // redefine this critter as a string literal
+-                    grammar.tokenManager.define(sl);
+-                    // make sure the label can be used also.
+-                    grammar.tokenManager.mapToTokenSymbol(name, sl);
+-                    return;
+-                }
+-                // here, literal was labeled but not by a known token symbol.
+-            }
+-            sl = new StringLiteralSymbol(literal);
+-            int tt = grammar.tokenManager.nextTokenType();
+-            sl.setTokenType(tt);
+-            sl.setLabel(name);
+-            grammar.tokenManager.define(sl);
+-            if (name != null) {
+-                // make the label point at token symbol too
+-                grammar.tokenManager.mapToTokenSymbol(name, sl);
+-            }
+-        }
+-
+-        // create a token in the token manager not a literal
+-        else {
+-            if (grammar.tokenManager.tokenDefined(name)) {
+-                tool.warning("Redefinition of token in tokens {...}: " + name, grammar.getFilename(), tokname.getLine(), tokname.getColumn());
+-                return;
+-            }
+-            int tt = grammar.tokenManager.nextTokenType();
+-            TokenSymbol ts = new TokenSymbol(name);
+-            ts.setTokenType(tt);
+-            grammar.tokenManager.define(ts);
+-        }
+-    }
+-
+-    public void endAlt() {
+-    }
+-
+-    public void endChildList() {
+-    }
+-
+-    public void endExceptionGroup() {
+-    }
+-
+-    public void endExceptionSpec() {
+-    }
+-
+-    public void endGrammar() {
+-    }
+-
+-    /** Called after the optional options section, to compensate for
+-     * options that may not have been set.
+-     * This method is bigger than it needs to be, but is much more
+-     * clear if I delineate all the cases.
+-     */
+-    public void endOptions() {
+-        // NO VOCAB OPTIONS
+-        if (grammar.exportVocab == null && grammar.importVocab == null) {
+-            grammar.exportVocab = grammar.getClassName();
+-            // Can we get initial vocab from default shared vocab?
+-            if (tokenManagers.containsKey(DEFAULT_TOKENMANAGER_NAME)) {
+-                // Use the already-defined token manager
+-                grammar.exportVocab = DEFAULT_TOKENMANAGER_NAME;
+-                TokenManager tm = (TokenManager)tokenManagers.get(DEFAULT_TOKENMANAGER_NAME);
+-                // System.out.println("No tokenVocabulary for '" + grammar.getClassName() + "', using default '" + tm.getName() + "'");
+-                grammar.setTokenManager(tm);
+-                return;
+-            }
+-            // no shared vocab for file, make new one
+-            // System.out.println("No exportVocab for '" + grammar.getClassName() + "', creating default '" + grammar.exportVocab + "'");
+-            TokenManager tm = new SimpleTokenManager(grammar.exportVocab, tool);
+-            grammar.setTokenManager(tm);
+-            // Add the token manager to the list of token managers
+-            tokenManagers.put(grammar.exportVocab, tm);
+-            // no default vocab, so make this the default vocab
+-            tokenManagers.put(DEFAULT_TOKENMANAGER_NAME, tm);
+-            return;
+-        }
+-
+-        // NO OUTPUT, BUT HAS INPUT VOCAB
+-        if (grammar.exportVocab == null && grammar.importVocab != null) {
+-            grammar.exportVocab = grammar.getClassName();
+-            // first make sure input!=output
+-            if (grammar.importVocab.equals(grammar.exportVocab)) {
+-                tool.warning("Grammar " + grammar.getClassName() +
+-                             " cannot have importVocab same as default output vocab (grammar name); ignored.");
+-                // kill importVocab option and try again: use default vocab
+-                grammar.importVocab = null;
+-                endOptions();
+-                return;
+-            }
+-            // check to see if the vocab is already in memory
+-            // (defined by another grammar in the file).  Not normal situation.
+-            if (tokenManagers.containsKey(grammar.importVocab)) {
+-                // make a copy since we'll be generating a new output vocab
+-                // and we don't want to affect this one.  Set the name to
+-                // the default output vocab==classname.
+-                TokenManager tm = (TokenManager)tokenManagers.get(grammar.importVocab);
+-                // System.out.println("Duping importVocab of " + grammar.importVocab);
+-                TokenManager dup = (TokenManager)tm.clone();
+-                dup.setName(grammar.exportVocab);
+-                // System.out.println("Setting name to " + grammar.exportVocab);
+-                dup.setReadOnly(false);
+-                grammar.setTokenManager(dup);
+-                tokenManagers.put(grammar.exportVocab, dup);
+-                return;
+-            }
+-            // System.out.println("reading in vocab "+grammar.importVocab);
+-            // Must be a file, go get it.
+-            ImportVocabTokenManager tm =
+-                new ImportVocabTokenManager(grammar,
+-                                            grammar.importVocab + CodeGenerator.TokenTypesFileSuffix + CodeGenerator.TokenTypesFileExt,
+-                                            grammar.exportVocab,
+-                                            tool);
+-            tm.setReadOnly(false); // since renamed, can write out
+-            // Add this token manager to the list so its tokens will be generated
+-            tokenManagers.put(grammar.exportVocab, tm);
+-            // System.out.println("vocab renamed to default output vocab of "+tm.getName());
+-            // Assign the token manager to this grammar.
+-            grammar.setTokenManager(tm);
+-
+-            // set default vocab if none
+-            if (!tokenManagers.containsKey(DEFAULT_TOKENMANAGER_NAME)) {
+-                tokenManagers.put(DEFAULT_TOKENMANAGER_NAME, tm);
+-            }
+-
+-            return;
+-        }
+-
+-        // OUTPUT VOCAB, BUT NO INPUT VOCAB
+-        if (grammar.exportVocab != null && grammar.importVocab == null) {
+-            // share with previous vocab if it exists
+-            if (tokenManagers.containsKey(grammar.exportVocab)) {
+-                // Use the already-defined token manager
+-                TokenManager tm = (TokenManager)tokenManagers.get(grammar.exportVocab);
+-                // System.out.println("Sharing exportVocab of " + grammar.exportVocab);
+-                grammar.setTokenManager(tm);
+-                return;
+-            }
+-            // create new output vocab
+-            // System.out.println("Creating exportVocab " + grammar.exportVocab);
+-            TokenManager tm = new SimpleTokenManager(grammar.exportVocab, tool);
+-            grammar.setTokenManager(tm);
+-            // Add the token manager to the list of token managers
+-            tokenManagers.put(grammar.exportVocab, tm);
+-            // set default vocab if none
+-            if (!tokenManagers.containsKey(DEFAULT_TOKENMANAGER_NAME)) {
+-                tokenManagers.put(DEFAULT_TOKENMANAGER_NAME, tm);
+-            }
+-            return;
+-        }
+-
+-        // BOTH INPUT AND OUTPUT VOCAB
+-        if (grammar.exportVocab != null && grammar.importVocab != null) {
+-            // don't want input==output
+-            if (grammar.importVocab.equals(grammar.exportVocab)) {
+-                tool.error("exportVocab of " + grammar.exportVocab + " same as importVocab; probably not what you want");
+-            }
+-            // does the input vocab already exist in memory?
+-            if (tokenManagers.containsKey(grammar.importVocab)) {
+-                // make a copy since we'll be generating a new output vocab
+-                // and we don't want to affect this one.
+-                TokenManager tm = (TokenManager)tokenManagers.get(grammar.importVocab);
+-                // System.out.println("Duping importVocab of " + grammar.importVocab);
+-                TokenManager dup = (TokenManager)tm.clone();
+-                dup.setName(grammar.exportVocab);
+-                // System.out.println("Setting name to " + grammar.exportVocab);
+-                dup.setReadOnly(false);
+-                grammar.setTokenManager(dup);
+-                tokenManagers.put(grammar.exportVocab, dup);
+-                return;
+-            }
+-            // Must be a file, go get it.
+-            ImportVocabTokenManager tm =
+-                new ImportVocabTokenManager(grammar,
+-                                            grammar.importVocab + CodeGenerator.TokenTypesFileSuffix + CodeGenerator.TokenTypesFileExt,
+-                                            grammar.exportVocab,
+-                                            tool);
+-            tm.setReadOnly(false); // write it out as we've changed name
+-            // Add this token manager to the list so its tokens will be generated
+-            tokenManagers.put(grammar.exportVocab, tm);
+-            // Assign the token manager to this grammar.
+-            grammar.setTokenManager(tm);
+-
+-            // set default vocab if none
+-            if (!tokenManagers.containsKey(DEFAULT_TOKENMANAGER_NAME)) {
+-                tokenManagers.put(DEFAULT_TOKENMANAGER_NAME, tm);
+-            }
+-
+-            return;
+-        }
+-    }
+-
+-    public void endRule(String r) {
+-    }
+-
+-    public void endSubRule() {
+-    }
+-
+-    public void endTree() {
+-    }
+-
+-    public void hasError() {
+-    }
+-
+-    public void noASTSubRule() {
+-    }
+-
+-    public void oneOrMoreSubRule() {
+-    }
+-
+-    public void optionalSubRule() {
+-    }
+-
+-    public void setUserExceptions(String thr) {
+-    }
+-
+-    public void refAction(Token action) {
+-    }
+-
+-    public void refArgAction(Token action) {
+-    }
+-
+-    public void refCharLiteral(Token lit, Token label, boolean inverted, int autoGenType, boolean lastInRule) {
+-    }
+-
+-    public void refCharRange(Token t1, Token t2, Token label, int autoGenType, boolean lastInRule) {
+-    }
+-
+-    public void refElementOption(Token option, Token value) {
+-    }
+-
+-    public void refTokensSpecElementOption(Token tok, Token option, Token value) {
+-    }
+-
+-    public void refExceptionHandler(Token exTypeAndName, Token action) {
+-    }
+-
+-    // Header action applies to all parsers and lexers.
+-    public void refHeaderAction(Token name, Token act) {
+-        String key;
+-
+-        if (name == null)
+-            key = "";
+-        else
+-            key = StringUtils.stripFrontBack(name.getText(), "\"", "\"");
+-
+-        // FIXME: depending on the mode the inserted header actions should
+-        // be checked for sanity.
+-        if (headerActions.containsKey(key)) {
+-            if (key.equals(""))
+-                tool.error(act.getLine() + ": header action already defined");
+-            else
+-                tool.error(act.getLine() + ": header action '" + key + "' already defined");
+-        }
+-        headerActions.put(key, act);
+-    }
+-
+-    public String getHeaderAction(String name) {
+-        Token t = (Token)headerActions.get(name);
+-        if (t == null) {
+-            return "";
+-        }
+-        return t.getText();
+-    }
+-
+-    public void refInitAction(Token action) {
+-    }
+-
+-    public void refMemberAction(Token act) {
+-    }
+-
+-    public void refPreambleAction(Token act) {
+-        thePreambleAction = act;
+-    }
+-
+-    public void refReturnAction(Token returnAction) {
+-    }
+-
+-    public void refRule(Token idAssign,
+-                        Token r,
+-                        Token label,
+-                        Token args,
+-                        int autoGenType) {
+-        String id = r.getText();
+-        //		if ( Character.isUpperCase(id.charAt(0)) ) { // lexer rule?
+-        if (r.type == ANTLRTokenTypes.TOKEN_REF) {
+-            // lexer rule?
+-            id = CodeGenerator.encodeLexerRuleName(id);
+-        }
+-        if (!grammar.isDefined(id)) {
+-            grammar.define(new RuleSymbol(id));
+-        }
+-    }
+-
+-    public void refSemPred(Token pred) {
+-    }
+-
+-    public void refStringLiteral(Token lit,
+-                                 Token label,
+-                                 int autoGenType,
+-                                 boolean lastInRule) {
+-        _refStringLiteral(lit, label, autoGenType, lastInRule);
+-    }
+-
+-    /** Reference a token */
+-    public void refToken(Token assignId, Token t, Token label, Token args,
+-                         boolean inverted, int autoGenType, boolean lastInRule) {
+-        _refToken(assignId, t, label, args, inverted, autoGenType, lastInRule);
+-    }
+-
+-    public void refTokenRange(Token t1, Token t2, Token label, int autoGenType, boolean lastInRule) {
+-        // ensure that the DefineGrammarSymbols methods are called; otherwise a range addes more
+-        // token refs to the alternative by calling MakeGrammar.refToken etc...
+-        if (t1.getText().charAt(0) == '"') {
+-            refStringLiteral(t1, null, GrammarElement.AUTO_GEN_NONE, lastInRule);
+-        }
+-        else {
+-            _refToken(null, t1, null, null, false, GrammarElement.AUTO_GEN_NONE, lastInRule);
+-        }
+-        if (t2.getText().charAt(0) == '"') {
+-            _refStringLiteral(t2, null, GrammarElement.AUTO_GEN_NONE, lastInRule);
+-        }
+-        else {
+-            _refToken(null, t2, null, null, false, GrammarElement.AUTO_GEN_NONE, lastInRule);
+-        }
+-    }
+-
+-    public void refTreeSpecifier(Token treeSpec) {
+-    }
+-
+-    public void refWildcard(Token t, Token label, int autoGenType) {
+-    }
+-
+-    /** Get ready to process a new grammar */
+-    public void reset() {
+-        grammar = null;
+-    }
+-
+-    public void setArgOfRuleRef(Token argaction) {
+-    }
+-
+-    /** Set the character vocabulary for a lexer */
+-    public void setCharVocabulary(BitSet b) {
+-        // grammar should enforce that this is only called for lexer
+-        ((LexerGrammar)grammar).setCharVocabulary(b);
+-    }
+-
+-    /** setFileOption: Associate an option value with a key.
+-     * This applies to options for an entire grammar file.
+-     * @param key The token containing the option name
+-     * @param value The token containing the option value.
+-     */
+-    public void setFileOption(Token key, Token value, String filename) {
+-        if (key.getText().equals("language")) {
+-            if (value.getType() == ANTLRParser.STRING_LITERAL) {
+-                language = StringUtils.stripBack(StringUtils.stripFront(value.getText(), '"'), '"');
+-            }
+-            else if (value.getType() == ANTLRParser.TOKEN_REF || value.getType() == ANTLRParser.RULE_REF) {
+-                language = value.getText();
+-            }
+-            else {
+-                tool.error("language option must be string or identifier", filename, value.getLine(), value.getColumn());
+-            }
+-        }
+-        else if (key.getText().equals("mangleLiteralPrefix")) {
+-            if (value.getType() == ANTLRParser.STRING_LITERAL) {
+-                tool.literalsPrefix = StringUtils.stripFrontBack(value.getText(), "\"", "\"");
+-            }
+-            else {
+-                tool.error("mangleLiteralPrefix option must be string", filename, value.getLine(), value.getColumn());
+-            }
+-        }
+-        else if (key.getText().equals("upperCaseMangledLiterals")) {
+-            if (value.getText().equals("true")) {
+-                tool.upperCaseMangledLiterals = true;
+-            }
+-            else if (value.getText().equals("false")) {
+-                tool.upperCaseMangledLiterals = false;
+-            }
+-            else {
+-                grammar.antlrTool.error("Value for upperCaseMangledLiterals must be true or false", filename, key.getLine(), key.getColumn());
+-            }
+-        }
+-        else if (	key.getText().equals("namespaceStd")   ||
+-            	   key.getText().equals("namespaceAntlr") ||
+-            	   key.getText().equals("genHashLines")
+-            	  ) {
+-            if (!language.equals("Cpp")) {
+-                tool.error(key.getText() + " option only valid for C++", filename, key.getLine(), key.getColumn());
+-            }
+-            else {
+-                if (key.getText().equals("noConstructors")) {
+-                    if (!(value.getText().equals("true") || value.getText().equals("false")))
+-                        tool.error("noConstructors option must be true or false", filename, value.getLine(), value.getColumn());
+-                    tool.noConstructors = value.getText().equals("true");
+-                } else if (key.getText().equals("genHashLines")) {
+-                    if (!(value.getText().equals("true") || value.getText().equals("false")))
+-                        tool.error("genHashLines option must be true or false", filename, value.getLine(), value.getColumn());
+-                    tool.genHashLines = value.getText().equals("true");
+-                }
+-                else {
+-                    if (value.getType() != ANTLRParser.STRING_LITERAL) {
+-                        tool.error(key.getText() + " option must be a string", filename, value.getLine(), value.getColumn());
+-                    }
+-                    else {
+-                        if (key.getText().equals("namespaceStd"))
+-                            tool.namespaceStd = value.getText();
+-                        else if (key.getText().equals("namespaceAntlr"))
+-                            tool.namespaceAntlr = value.getText();
+-                    }
+-                }
+-            }
+-        }
+-        else if ( key.getText().equals("namespace") ) {
+-            if ( !language.equals("Cpp") && !language.equals("CSharp") )
+-            {
+-                tool.error(key.getText() + " option only valid for C++ and C# (a.k.a CSharp)", filename, key.getLine(), key.getColumn());
+-            }
+-            else
+-            {
+-                 if (value.getType() != ANTLRParser.STRING_LITERAL)
+-                 {
+-                 		tool.error(key.getText() + " option must be a string", filename, value.getLine(), value.getColumn());
+-                 }
+-                 else {
+-                     if (key.getText().equals("namespace"))
+-                         tool.setNameSpace(value.getText());
+-                 }
+-            }
+-        }
+-        else {
+-            tool.error("Invalid file-level option: " + key.getText(), filename, key.getLine(), value.getColumn());
+-        }
+-    }
+-
+-    /** setGrammarOption: Associate an option value with a key.
+-     * This function forwards to Grammar.setOption for some options.
+-     * @param key The token containing the option name
+-     * @param value The token containing the option value.
+-     */
+-    public void setGrammarOption(Token key, Token value) {
+-        if (key.getText().equals("tokdef") || key.getText().equals("tokenVocabulary")) {
+-            tool.error("tokdef/tokenVocabulary options are invalid >= ANTLR 2.6.0.\n" +
+-                       "  Use importVocab/exportVocab instead.  Please see the documentation.\n" +
+-                       "  The previous options were so heinous that Terence changed the whole\n" +
+-                       "  vocabulary mechanism; it was better to change the names rather than\n" +
+-                       "  subtly change the functionality of the known options.  Sorry!", grammar.getFilename(), value.getLine(), value.getColumn());
+-        }
+-        else if (key.getText().equals("literal") &&
+-            grammar instanceof LexerGrammar) {
+-            tool.error("the literal option is invalid >= ANTLR 2.6.0.\n" +
+-                       "  Use the \"tokens {...}\" mechanism instead.",
+-                       grammar.getFilename(), value.getLine(), value.getColumn());
+-        }
+-        else if (key.getText().equals("exportVocab")) {
+-            // Set the token manager associated with the parser
+-            if (value.getType() == ANTLRParser.RULE_REF || value.getType() == ANTLRParser.TOKEN_REF) {
+-                grammar.exportVocab = value.getText();
+-            }
+-            else {
+-                tool.error("exportVocab must be an identifier", grammar.getFilename(), value.getLine(), value.getColumn());
+-            }
+-        }
+-        else if (key.getText().equals("importVocab")) {
+-            if (value.getType() == ANTLRParser.RULE_REF || value.getType() == ANTLRParser.TOKEN_REF) {
+-                grammar.importVocab = value.getText();
+-            }
+-            else {
+-                tool.error("importVocab must be an identifier", grammar.getFilename(), value.getLine(), value.getColumn());
+-            }
+-        }
+-        else {
+-            // Forward all unrecognized options to the grammar
+-            grammar.setOption(key.getText(), value);
+-        }
+-    }
+-
+-    public void setRuleOption(Token key, Token value) {
+-    }
+-
+-    public void setSubruleOption(Token key, Token value) {
+-    }
+-
+-    /** Start a new lexer */
+-    public void startLexer(String file, Token name, String superClass, String doc) {
+-        if (numLexers > 0) {
+-            tool.panic("You may only have one lexer per grammar file: class " + name.getText());
+-        }
+-        numLexers++;
+-        reset();
+-        //System.out.println("Processing lexer '" + name.getText() + "'");
+-        // Does the lexer already exist?
+-        Grammar g = (Grammar)grammars.get(name);
+-        if (g != null) {
+-            if (!(g instanceof LexerGrammar)) {
+-                tool.panic("'" + name.getText() + "' is already defined as a non-lexer");
+-            }
+-            else {
+-                tool.panic("Lexer '" + name.getText() + "' is already defined");
+-            }
+-        }
+-        else {
+-            // Create a new lexer grammar
+-            LexerGrammar lg = new LexerGrammar(name.getText(), tool, superClass);
+-            lg.comment = doc;
+-            lg.processArguments(args);
+-            lg.setFilename(file);
+-            grammars.put(lg.getClassName(), lg);
+-            // Use any preamble action
+-            lg.preambleAction = thePreambleAction;
+-            thePreambleAction = new CommonToken(Token.INVALID_TYPE, "");
+-            // This is now the current grammar
+-            grammar = lg;
+-        }
+-    }
+-
+-    /** Start a new parser */
+-    public void startParser(String file, Token name, String superClass, String doc) {
+-        if (numParsers > 0) {
+-            tool.panic("You may only have one parser per grammar file: class " + name.getText());
+-        }
+-        numParsers++;
+-        reset();
+-        //System.out.println("Processing parser '" + name.getText() + "'");
+-        // Is this grammar already defined?
+-        Grammar g = (Grammar)grammars.get(name);
+-        if (g != null) {
+-            if (!(g instanceof ParserGrammar)) {
+-                tool.panic("'" + name.getText() + "' is already defined as a non-parser");
+-            }
+-            else {
+-                tool.panic("Parser '" + name.getText() + "' is already defined");
+-            }
+-        }
+-        else {
+-            // Create a new grammar
+-            grammar = new ParserGrammar(name.getText(), tool, superClass);
+-            grammar.comment = doc;
+-            grammar.processArguments(args);
+-            grammar.setFilename(file);
+-            grammars.put(grammar.getClassName(), grammar);
+-            // Use any preamble action
+-            grammar.preambleAction = thePreambleAction;
+-            thePreambleAction = new CommonToken(Token.INVALID_TYPE, "");
+-        }
+-    }
+-
+-    /** Start a new tree-walker */
+-    public void startTreeWalker(String file, Token name, String superClass, String doc) {
+-        if (numTreeParsers > 0) {
+-            tool.panic("You may only have one tree parser per grammar file: class " + name.getText());
+-        }
+-        numTreeParsers++;
+-        reset();
+-        //System.out.println("Processing tree-walker '" + name.getText() + "'");
+-        // Is this grammar already defined?
+-        Grammar g = (Grammar)grammars.get(name);
+-        if (g != null) {
+-            if (!(g instanceof TreeWalkerGrammar)) {
+-                tool.panic("'" + name.getText() + "' is already defined as a non-tree-walker");
+-            }
+-            else {
+-                tool.panic("Tree-walker '" + name.getText() + "' is already defined");
+-            }
+-        }
+-        else {
+-            // Create a new grammar
+-            grammar = new TreeWalkerGrammar(name.getText(), tool, superClass);
+-            grammar.comment = doc;
+-            grammar.processArguments(args);
+-            grammar.setFilename(file);
+-            grammars.put(grammar.getClassName(), grammar);
+-            // Use any preamble action
+-            grammar.preambleAction = thePreambleAction;
+-            thePreambleAction = new CommonToken(Token.INVALID_TYPE, "");
+-        }
+-    }
+-
+-    public void synPred() {
+-    }
+-
+-    public void zeroOrMoreSubRule() {
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/DiagnosticCodeGenerator.java glassfish-gil/entity-persistence/src/java/persistence/antlr/DiagnosticCodeGenerator.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/DiagnosticCodeGenerator.java	2006-08-31 00:34:06.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/DiagnosticCodeGenerator.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,920 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import java.util.Enumeration;
+-
+-import persistence.antlr.collections.impl.BitSet;
+-import persistence.antlr.collections.impl.Vector;
+-
+-import java.io.PrintWriter; //SAS: changed for proper text file io
+-import java.io.IOException;
+-import java.io.FileWriter;
+-
+-/**Generate MyParser.txt, MyLexer.txt and MyParserTokenTypes.txt */
+-public class DiagnosticCodeGenerator extends CodeGenerator {
+-    /** non-zero if inside syntactic predicate generation */
+-    protected int syntacticPredLevel = 0;
+-
+-    /** true during lexer generation, false during parser generation */
+-    protected boolean doingLexRules = false;
+-
+-    /** Create a Diagnostic code-generator using the given Grammar
+-     * The caller must still call setTool, setBehavior, and setAnalyzer
+-     * before generating code.
+-     */
+-    public DiagnosticCodeGenerator() {
+-        super();
+-        charFormatter = new JavaCharFormatter();
+-    }
+-
+-    /**Generate the parser, lexer, and token types documentation */
+-    public void gen() {
+-
+-        // Do the code generation
+-        try {
+-            // Loop over all grammars
+-            Enumeration grammarIter = behavior.grammars.elements();
+-            while (grammarIter.hasMoreElements()) {
+-                Grammar g = (Grammar)grammarIter.nextElement();
+-
+-                // Connect all the components to each other
+-                g.setGrammarAnalyzer(analyzer);
+-                g.setCodeGenerator(this);
+-                analyzer.setGrammar(g);
+-
+-                // To get right overloading behavior across hetrogeneous grammars
+-                g.generate();
+-
+-                if (antlrTool.hasError()) {
+-                    antlrTool.panic("Exiting due to errors.");
+-                }
+-
+-            }
+-
+-            // Loop over all token managers (some of which are lexers)
+-            Enumeration tmIter = behavior.tokenManagers.elements();
+-            while (tmIter.hasMoreElements()) {
+-                TokenManager tm = (TokenManager)tmIter.nextElement();
+-                if (!tm.isReadOnly()) {
+-                    // Write the token manager tokens as Java
+-                    genTokenTypes(tm);
+-                }
+-            }
+-        }
+-        catch (IOException e) {
+-            antlrTool.reportException(e, null);
+-        }
+-    }
+-
+-    /** Generate code for the given grammar element.
+-     * @param blk The {...} action to generate
+-     */
+-    public void gen(ActionElement action) {
+-        if (action.isSemPred) {
+-            // handled elsewhere
+-        }
+-        else {
+-            print("ACTION: ");
+-            _printAction(action.actionText);
+-        }
+-    }
+-
+-    /** Generate code for the given grammar element.
+-     * @param blk The "x|y|z|..." block to generate
+-     */
+-    public void gen(AlternativeBlock blk) {
+-        println("Start of alternative block.");
+-        tabs++;
+-        genBlockPreamble(blk);
+-
+-        boolean ok = grammar.theLLkAnalyzer.deterministic(blk);
+-        if (!ok) {
+-            println("Warning: This alternative block is non-deterministic");
+-        }
+-        genCommonBlock(blk);
+-        tabs--;
+-    }
+-
+-    /** Generate code for the given grammar element.
+-     * @param blk The block-end element to generate.  Block-end
+-     * elements are synthesized by the grammar parser to represent
+-     * the end of a block.
+-     */
+-    public void gen(BlockEndElement end) {
+-        // no-op
+-    }
+-
+-    /** Generate code for the given grammar element.
+-     * @param blk The character literal reference to generate
+-     */
+-    public void gen(CharLiteralElement atom) {
+-        print("Match character ");
+-        if (atom.not) {
+-            _print("NOT ");
+-        }
+-        _print(atom.atomText);
+-        if (atom.label != null) {
+-            _print(", label=" + atom.label);
+-        }
+-        _println("");
+-    }
+-
+-    /** Generate code for the given grammar element.
+-     * @param blk The character-range reference to generate
+-     */
+-    public void gen(CharRangeElement r) {
+-        print("Match character range: " + r.beginText + ".." + r.endText);
+-        if (r.label != null) {
+-            _print(", label = " + r.label);
+-        }
+-        _println("");
+-    }
+-
+-    /** Generate the lexer TXT file */
+-    public void gen(LexerGrammar g) throws IOException {
+-        setGrammar(g);
+-        antlrTool.reportProgress("Generating " + grammar.getClassName() + TokenTypesFileExt);
+-        currentOutput = antlrTool.openOutputFile(grammar.getClassName() + TokenTypesFileExt);
+-        //SAS: changed for proper text file io
+-
+-        tabs = 0;
+-        doingLexRules = true;
+-
+-        // Generate header common to all TXT output files
+-        genHeader();
+-
+-        // Output the user-defined lexer premamble
+-        println("");
+-        println("*** Lexer Preamble Action.");
+-        println("This action will appear before the declaration of your lexer class:");
+-        tabs++;
+-        println(grammar.preambleAction.getText());
+-        tabs--;
+-        println("*** End of Lexer Preamble Action");
+-
+-        // Generate lexer class definition
+-        println("");
+-        println("*** Your lexer class is called '" + grammar.getClassName() + "' and is a subclass of '" + grammar.getSuperClass() + "'.");
+-
+-        // Generate user-defined parser class members
+-        println("");
+-        println("*** User-defined lexer  class members:");
+-        println("These are the member declarations that you defined for your class:");
+-        tabs++;
+-        printAction(grammar.classMemberAction.getText());
+-        tabs--;
+-        println("*** End of user-defined lexer class members");
+-
+-        // Generate string literals
+-        println("");
+-        println("*** String literals used in the parser");
+-        println("The following string literals were used in the parser.");
+-        println("An actual code generator would arrange to place these literals");
+-        println("into a table in the generated lexer, so that actions in the");
+-        println("generated lexer could match token text against the literals.");
+-        println("String literals used in the lexer are not listed here, as they");
+-        println("are incorporated into the mainstream lexer processing.");
+-        tabs++;
+-        // Enumerate all of the symbols and look for string literal symbols
+-        Enumeration ids = grammar.getSymbols();
+-        while (ids.hasMoreElements()) {
+-            GrammarSymbol sym = (GrammarSymbol)ids.nextElement();
+-            // Only processing string literals -- reject other symbol entries
+-            if (sym instanceof StringLiteralSymbol) {
+-                StringLiteralSymbol s = (StringLiteralSymbol)sym;
+-                println(s.getId() + " = " + s.getTokenType());
+-            }
+-        }
+-        tabs--;
+-        println("*** End of string literals used by the parser");
+-
+-        // Generate nextToken() rule.
+-        // nextToken() is a synthetic lexer rule that is the implicit OR of all
+-        // user-defined lexer rules.
+-        genNextToken();
+-
+-        // Generate code for each rule in the lexer
+-        println("");
+-        println("*** User-defined Lexer rules:");
+-        tabs++;
+-
+-        ids = grammar.rules.elements();
+-        while (ids.hasMoreElements()) {
+-            RuleSymbol rs = (RuleSymbol)ids.nextElement();
+-            if (!rs.id.equals("mnextToken")) {
+-                genRule(rs);
+-            }
+-        }
+-
+-        tabs--;
+-        println("");
+-        println("*** End User-defined Lexer rules:");
+-
+-        // Close the lexer output file
+-        currentOutput.close();
+-        currentOutput = null;
+-        doingLexRules = false;
+-    }
+-
+-    /** Generate code for the given grammar element.
+-     * @param blk The (...)+ block to generate
+-     */
+-    public void gen(OneOrMoreBlock blk) {
+-        println("Start ONE-OR-MORE (...)+ block:");
+-        tabs++;
+-        genBlockPreamble(blk);
+-        boolean ok = grammar.theLLkAnalyzer.deterministic(blk);
+-        if (!ok) {
+-            println("Warning: This one-or-more block is non-deterministic");
+-        }
+-        genCommonBlock(blk);
+-        tabs--;
+-        println("End ONE-OR-MORE block.");
+-    }
+-
+-    /** Generate the parser TXT file */
+-    public void gen(ParserGrammar g) throws IOException {
+-        setGrammar(g);
+-        // Open the output stream for the parser and set the currentOutput
+-        antlrTool.reportProgress("Generating " + grammar.getClassName() + TokenTypesFileExt);
+-        currentOutput = antlrTool.openOutputFile(grammar.getClassName() + TokenTypesFileExt);
+-        //SAS: changed for proper text file io
+-
+-        tabs = 0;
+-
+-        // Generate the header common to all output files.
+-        genHeader();
+-
+-        // Output the user-defined parser premamble
+-        println("");
+-        println("*** Parser Preamble Action.");
+-        println("This action will appear before the declaration of your parser class:");
+-        tabs++;
+-        println(grammar.preambleAction.getText());
+-        tabs--;
+-        println("*** End of Parser Preamble Action");
+-
+-        // Generate parser class definition
+-        println("");
+-        println("*** Your parser class is called '" + grammar.getClassName() + "' and is a subclass of '" + grammar.getSuperClass() + "'.");
+-
+-        // Generate user-defined parser class members
+-        println("");
+-        println("*** User-defined parser class members:");
+-        println("These are the member declarations that you defined for your class:");
+-        tabs++;
+-        printAction(grammar.classMemberAction.getText());
+-        tabs--;
+-        println("*** End of user-defined parser class members");
+-
+-        // Generate code for each rule in the grammar
+-        println("");
+-        println("*** Parser rules:");
+-        tabs++;
+-
+-        // Enumerate the parser rules
+-        Enumeration rules = grammar.rules.elements();
+-        while (rules.hasMoreElements()) {
+-            println("");
+-            // Get the rules from the list and downcast it to proper type
+-            GrammarSymbol sym = (GrammarSymbol)rules.nextElement();
+-            // Only process parser rules
+-            if (sym instanceof RuleSymbol) {
+-                genRule((RuleSymbol)sym);
+-            }
+-        }
+-        tabs--;
+-        println("");
+-        println("*** End of parser rules");
+-
+-        println("");
+-        println("*** End of parser");
+-
+-        // Close the parser output stream
+-        currentOutput.close();
+-        currentOutput = null;
+-    }
+-
+-    /** Generate code for the given grammar element.
+-     * @param blk The rule-reference to generate
+-     */
+-    public void gen(RuleRefElement rr) {
+-        RuleSymbol rs = (RuleSymbol)grammar.getSymbol(rr.targetRule);
+-
+-        // Generate the actual rule description
+-        print("Rule Reference: " + rr.targetRule);
+-        if (rr.idAssign != null) {
+-            _print(", assigned to '" + rr.idAssign + "'");
+-        }
+-        if (rr.args != null) {
+-            _print(", arguments = " + rr.args);
+-        }
+-        _println("");
+-
+-        // Perform diagnostics
+-        if (rs == null || !rs.isDefined()) {
+-            println("Rule '" + rr.targetRule + "' is referenced, but that rule is not defined.");
+-            println("\tPerhaps the rule is misspelled, or you forgot to define it.");
+-            return;
+-        }
+-        if (!(rs instanceof RuleSymbol)) {
+-            // Should this ever happen??
+-            println("Rule '" + rr.targetRule + "' is referenced, but that is not a grammar rule.");
+-            return;
+-        }
+-        if (rr.idAssign != null) {
+-            // Warn if the rule has no return type
+-            if (rs.block.returnAction == null) {
+-                println("Error: You assigned from Rule '" + rr.targetRule + "', but that rule has no return type.");
+-            }
+-        }
+-        else {
+-            // Warn about return value if any, but not inside syntactic predicate
+-            if (!(grammar instanceof LexerGrammar) && syntacticPredLevel == 0 && rs.block.returnAction != null) {
+-                println("Warning: Rule '" + rr.targetRule + "' returns a value");
+-            }
+-        }
+-        if (rr.args != null && rs.block.argAction == null) {
+-            println("Error: Rule '" + rr.targetRule + "' accepts no arguments.");
+-        }
+-    }
+-
+-    /** Generate code for the given grammar element.
+-     * @param blk The string-literal reference to generate
+-     */
+-    public void gen(StringLiteralElement atom) {
+-        print("Match string literal ");
+-        _print(atom.atomText);
+-        if (atom.label != null) {
+-            _print(", label=" + atom.label);
+-        }
+-        _println("");
+-    }
+-
+-    /** Generate code for the given grammar element.
+-     * @param blk The token-range reference to generate
+-     */
+-    public void gen(TokenRangeElement r) {
+-        print("Match token range: " + r.beginText + ".." + r.endText);
+-        if (r.label != null) {
+-            _print(", label = " + r.label);
+-        }
+-        _println("");
+-    }
+-
+-    /** Generate code for the given grammar element.
+-     * @param blk The token-reference to generate
+-     */
+-    public void gen(TokenRefElement atom) {
+-        print("Match token ");
+-        if (atom.not) {
+-            _print("NOT ");
+-        }
+-        _print(atom.atomText);
+-        if (atom.label != null) {
+-            _print(", label=" + atom.label);
+-        }
+-        _println("");
+-    }
+-
+-    public void gen(TreeElement t) {
+-        print("Tree reference: " + t);
+-    }
+-
+-    /** Generate the tree-walker TXT file */
+-    public void gen(TreeWalkerGrammar g) throws IOException {
+-        setGrammar(g);
+-        // Open the output stream for the parser and set the currentOutput
+-        antlrTool.reportProgress("Generating " + grammar.getClassName() + TokenTypesFileExt);
+-        currentOutput = antlrTool.openOutputFile(grammar.getClassName() + TokenTypesFileExt);
+-        //SAS: changed for proper text file io
+-
+-        tabs = 0;
+-
+-        // Generate the header common to all output files.
+-        genHeader();
+-
+-        // Output the user-defined parser premamble
+-        println("");
+-        println("*** Tree-walker Preamble Action.");
+-        println("This action will appear before the declaration of your tree-walker class:");
+-        tabs++;
+-        println(grammar.preambleAction.getText());
+-        tabs--;
+-        println("*** End of tree-walker Preamble Action");
+-
+-        // Generate tree-walker class definition
+-        println("");
+-        println("*** Your tree-walker class is called '" + grammar.getClassName() + "' and is a subclass of '" + grammar.getSuperClass() + "'.");
+-
+-        // Generate user-defined tree-walker class members
+-        println("");
+-        println("*** User-defined tree-walker class members:");
+-        println("These are the member declarations that you defined for your class:");
+-        tabs++;
+-        printAction(grammar.classMemberAction.getText());
+-        tabs--;
+-        println("*** End of user-defined tree-walker class members");
+-
+-        // Generate code for each rule in the grammar
+-        println("");
+-        println("*** tree-walker rules:");
+-        tabs++;
+-
+-        // Enumerate the tree-walker rules
+-        Enumeration rules = grammar.rules.elements();
+-        while (rules.hasMoreElements()) {
+-            println("");
+-            // Get the rules from the list and downcast it to proper type
+-            GrammarSymbol sym = (GrammarSymbol)rules.nextElement();
+-            // Only process tree-walker rules
+-            if (sym instanceof RuleSymbol) {
+-                genRule((RuleSymbol)sym);
+-            }
+-        }
+-        tabs--;
+-        println("");
+-        println("*** End of tree-walker rules");
+-
+-        println("");
+-        println("*** End of tree-walker");
+-
+-        // Close the tree-walker output stream
+-        currentOutput.close();
+-        currentOutput = null;
+-    }
+-
+-    /** Generate a wildcard element */
+-    public void gen(WildcardElement wc) {
+-        print("Match wildcard");
+-        if (wc.getLabel() != null) {
+-            _print(", label = " + wc.getLabel());
+-        }
+-        _println("");
+-    }
+-
+-    /** Generate code for the given grammar element.
+-     * @param blk The (...)* block to generate
+-     */
+-    public void gen(ZeroOrMoreBlock blk) {
+-        println("Start ZERO-OR-MORE (...)+ block:");
+-        tabs++;
+-        genBlockPreamble(blk);
+-        boolean ok = grammar.theLLkAnalyzer.deterministic(blk);
+-        if (!ok) {
+-            println("Warning: This zero-or-more block is non-deterministic");
+-        }
+-        genCommonBlock(blk);
+-        tabs--;
+-        println("End ZERO-OR-MORE block.");
+-    }
+-
+-    protected void genAlt(Alternative alt) {
+-        for (
+-            AlternativeElement elem = alt.head;
+-            !(elem instanceof BlockEndElement);
+-            elem = elem.next
+-            ) {
+-            elem.generate();
+-        }
+-        if (alt.getTreeSpecifier() != null) {
+-            println("AST will be built as: " + alt.getTreeSpecifier().getText());
+-        }
+-    }
+-
+-    /** Generate the header for a block, which may be a RuleBlock or a
+-     * plain AlternativeBLock.  This generates any variable declarations,
+-     * init-actions, and syntactic-predicate-testing variables.
+-     * @blk The block for which the preamble is to be generated.
+-     */
+-    protected void genBlockPreamble(AlternativeBlock blk) {
+-        // dump out init action
+-        if (blk.initAction != null) {
+-            printAction("Init action: " + blk.initAction);
+-        }
+-    }
+-
+-    /**Generate common code for a block of alternatives; return a postscript
+-     * that needs to be generated at the end of the block.  Other routines
+-     * may append else-clauses and such for error checking before the postfix
+-     * is generated.
+-     */
+-    public void genCommonBlock(AlternativeBlock blk) {
+-        boolean singleAlt = (blk.alternatives.size() == 1);
+-
+-        println("Start of an alternative block.");
+-        tabs++;
+-        println("The lookahead set for this block is:");
+-        tabs++;
+-        genLookaheadSetForBlock(blk);
+-        tabs--;
+-
+-        if (singleAlt) {
+-            println("This block has a single alternative");
+-            if (blk.getAlternativeAt(0).synPred != null) {
+-                // Generate a warning if there is one alt and it has a synPred
+-                println("Warning: you specified a syntactic predicate for this alternative,");
+-                println("and it is the only alternative of a block and will be ignored.");
+-            }
+-        }
+-        else {
+-            println("This block has multiple alternatives:");
+-            tabs++;
+-        }
+-
+-        for (int i = 0; i < blk.alternatives.size(); i++) {
+-            Alternative alt = blk.getAlternativeAt(i);
+-            AlternativeElement elem = alt.head;
+-
+-            // Print lookahead set for alternate
+-            println("");
+-            if (i != 0) {
+-                print("Otherwise, ");
+-            }
+-            else {
+-                print("");
+-            }
+-            _println("Alternate(" + (i + 1) + ") will be taken IF:");
+-            println("The lookahead set: ");
+-            tabs++;
+-            genLookaheadSetForAlt(alt);
+-            tabs--;
+-            if (alt.semPred != null || alt.synPred != null) {
+-                print("is matched, AND ");
+-            }
+-            else {
+-                println("is matched.");
+-            }
+-
+-            // Dump semantic predicates
+-            if (alt.semPred != null) {
+-                _println("the semantic predicate:");
+-                tabs++;
+-                println(alt.semPred);
+-                if (alt.synPred != null) {
+-                    print("is true, AND ");
+-                }
+-                else {
+-                    println("is true.");
+-                }
+-            }
+-
+-            // Dump syntactic predicate
+-            if (alt.synPred != null) {
+-                _println("the syntactic predicate:");
+-                tabs++;
+-                genSynPred(alt.synPred);
+-                tabs--;
+-                println("is matched.");
+-            }
+-
+-            // Dump the alternative
+-            genAlt(alt);
+-        }
+-        println("");
+-        println("OTHERWISE, a NoViableAlt exception will be thrown");
+-        println("");
+-
+-        if (!singleAlt) {
+-            tabs--;
+-            println("End of alternatives");
+-        }
+-        tabs--;
+-        println("End of alternative block.");
+-    }
+-
+-    /** Generate a textual representation of the follow set
+-     * for a block.
+-     * @param blk  The rule block of interest
+-     */
+-    public void genFollowSetForRuleBlock(RuleBlock blk) {
+-        Lookahead follow = grammar.theLLkAnalyzer.FOLLOW(1, blk.endNode);
+-        printSet(grammar.maxk, 1, follow);
+-    }
+-
+-    /** Generate a header that is common to all TXT files */
+-    protected void genHeader() {
+-        println("ANTLR-generated file resulting from grammar " + antlrTool.grammarFile);
+-        println("Diagnostic output");
+-        println("");
+-        println("Terence Parr, MageLang Institute");
+-        println("with John Lilley, Empathy Software");
+-        println("ANTLR Version " + antlrTool.version + "; 1996,1997");
+-        println("");
+-        println("*** Header Action.");
+-        println("This action will appear at the top of all generated files.");
+-        tabs++;
+-        printAction(behavior.getHeaderAction(""));
+-        tabs--;
+-        println("*** End of Header Action");
+-        println("");
+-    }
+-
+-    /**Generate the lookahead set for an alternate. */
+-    protected void genLookaheadSetForAlt(Alternative alt) {
+-        if (doingLexRules && alt.cache[1].containsEpsilon()) {
+-            println("MATCHES ALL");
+-            return;
+-        }
+-        int depth = alt.lookaheadDepth;
+-        if (depth == GrammarAnalyzer.NONDETERMINISTIC) {
+-            // if the decision is nondeterministic, do the best we can: LL(k)
+-            // any predicates that are around will be generated later.
+-            depth = grammar.maxk;
+-        }
+-        for (int i = 1; i <= depth; i++) {
+-            Lookahead lookahead = alt.cache[i];
+-            printSet(depth, i, lookahead);
+-        }
+-    }
+-
+-    /** Generate a textual representation of the lookahead set
+-     * for a block.
+-     * @param blk  The block of interest
+-     */
+-    public void genLookaheadSetForBlock(AlternativeBlock blk) {
+-        // Find the maximal lookahead depth over all alternatives
+-        int depth = 0;
+-        for (int i = 0; i < blk.alternatives.size(); i++) {
+-            Alternative alt = blk.getAlternativeAt(i);
+-            if (alt.lookaheadDepth == GrammarAnalyzer.NONDETERMINISTIC) {
+-                depth = grammar.maxk;
+-                break;
+-            }
+-            else if (depth < alt.lookaheadDepth) {
+-                depth = alt.lookaheadDepth;
+-            }
+-        }
+-
+-        for (int i = 1; i <= depth; i++) {
+-            Lookahead lookahead = grammar.theLLkAnalyzer.look(i, blk);
+-            printSet(depth, i, lookahead);
+-        }
+-    }
+-
+-    /** Generate the nextToken rule.
+-     * nextToken is a synthetic lexer rule that is the implicit OR of all
+-     * user-defined lexer rules.
+-     */
+-    public void genNextToken() {
+-        println("");
+-        println("*** Lexer nextToken rule:");
+-        println("The lexer nextToken rule is synthesized from all of the user-defined");
+-        println("lexer rules.  It logically consists of one big alternative block with");
+-        println("each user-defined rule being an alternative.");
+-        println("");
+-
+-        // Create the synthesized rule block for nextToken consisting
+-        // of an alternate block containing all the user-defined lexer rules.
+-        RuleBlock blk = MakeGrammar.createNextTokenRule(grammar, grammar.rules, "nextToken");
+-
+-        // Define the nextToken rule symbol
+-        RuleSymbol nextTokenRs = new RuleSymbol("mnextToken");
+-        nextTokenRs.setDefined();
+-        nextTokenRs.setBlock(blk);
+-        nextTokenRs.access = "private";
+-        grammar.define(nextTokenRs);
+-
+-        // Analyze the synthesized block
+-        if (!grammar.theLLkAnalyzer.deterministic(blk)) {
+-            println("The grammar analyzer has determined that the synthesized");
+-            println("nextToken rule is non-deterministic (i.e., it has ambiguities)");
+-            println("This means that there is some overlap of the character");
+-            println("lookahead for two or more of your lexer rules.");
+-        }
+-
+-        genCommonBlock(blk);
+-
+-        println("*** End of nextToken lexer rule.");
+-    }
+-
+-    /** Generate code for a named rule block
+-     * @param s The RuleSymbol describing the rule to generate
+-     */
+-    public void genRule(RuleSymbol s) {
+-        println("");
+-        String ruleType = (doingLexRules ? "Lexer" : "Parser");
+-        println("*** " + ruleType + " Rule: " + s.getId());
+-        if (!s.isDefined()) {
+-            println("This rule is undefined.");
+-            println("This means that the rule was referenced somewhere in the grammar,");
+-            println("but a definition for the rule was not encountered.");
+-            println("It is also possible that syntax errors during the parse of");
+-            println("your grammar file prevented correct processing of the rule.");
+-            println("*** End " + ruleType + " Rule: " + s.getId());
+-            return;
+-        }
+-        tabs++;
+-
+-        if (s.access.length() != 0) {
+-            println("Access: " + s.access);
+-        }
+-
+-        // Get rule return type and arguments
+-        RuleBlock rblk = s.getBlock();
+-
+-        // Gen method return value(s)
+-        if (rblk.returnAction != null) {
+-            println("Return value(s): " + rblk.returnAction);
+-            if (doingLexRules) {
+-                println("Error: you specified return value(s) for a lexical rule.");
+-                println("\tLexical rules have an implicit return type of 'int'.");
+-            }
+-        }
+-        else {
+-            if (doingLexRules) {
+-                println("Return value: lexical rule returns an implicit token type");
+-            }
+-            else {
+-                println("Return value: none");
+-            }
+-        }
+-
+-        // Gen arguments
+-        if (rblk.argAction != null) {
+-            println("Arguments: " + rblk.argAction);
+-        }
+-
+-        // Dump any init-action
+-        genBlockPreamble(rblk);
+-
+-        // Analyze the rule
+-        boolean ok = grammar.theLLkAnalyzer.deterministic(rblk);
+-        if (!ok) {
+-            println("Error: This rule is non-deterministic");
+-        }
+-
+-        // Dump the alternates of the rule
+-        genCommonBlock(rblk);
+-
+-        // Search for an unlabeled exception specification attached to the rule
+-        ExceptionSpec unlabeledUserSpec = rblk.findExceptionSpec("");
+-
+-        // Generate user-defined or default catch phrases
+-        if (unlabeledUserSpec != null) {
+-            println("You specified error-handler(s) for this rule:");
+-            tabs++;
+-            for (int i = 0; i < unlabeledUserSpec.handlers.size(); i++) {
+-                if (i != 0) {
+-                    println("");
+-                }
+-
+-                ExceptionHandler handler = (ExceptionHandler)unlabeledUserSpec.handlers.elementAt(i);
+-                println("Error-handler(" + (i + 1) + ") catches [" + handler.exceptionTypeAndName.getText() + "] and executes:");
+-                printAction(handler.action.getText());
+-            }
+-            tabs--;
+-            println("End error-handlers.");
+-        }
+-        else if (!doingLexRules) {
+-            println("Default error-handling will be generated, which catches all");
+-            println("parser exceptions and consumes tokens until the follow-set is seen.");
+-        }
+-
+-        // Dump the follow set
+-        // Doesn't seem to work for lexical rules...
+-        if (!doingLexRules) {
+-            println("The follow set for this rule is:");
+-            tabs++;
+-            genFollowSetForRuleBlock(rblk);
+-            tabs--;
+-        }
+-
+-        tabs--;
+-        println("*** End " + ruleType + " Rule: " + s.getId());
+-    }
+-
+-    /** Generate the syntactic predicate.  This basically generates
+-     * the alternative block, buts tracks if we are inside a synPred
+-     * @param blk  The syntactic predicate block
+-     */
+-    protected void genSynPred(SynPredBlock blk) {
+-        syntacticPredLevel++;
+-        gen((AlternativeBlock)blk);
+-        syntacticPredLevel--;
+-    }
+-
+-    /** Generate the token types TXT file */
+-    protected void genTokenTypes(TokenManager tm) throws IOException {
+-        // Open the token output TXT file and set the currentOutput stream
+-        antlrTool.reportProgress("Generating " + tm.getName() + TokenTypesFileSuffix + TokenTypesFileExt);
+-        currentOutput = antlrTool.openOutputFile(tm.getName() + TokenTypesFileSuffix + TokenTypesFileExt);
+-        //SAS: changed for proper text file io
+-        tabs = 0;
+-
+-        // Generate the header common to all diagnostic files
+-        genHeader();
+-
+-        // Generate a string for each token.  This creates a static
+-        // array of Strings indexed by token type.
+-        println("");
+-        println("*** Tokens used by the parser");
+-        println("This is a list of the token numeric values and the corresponding");
+-        println("token identifiers.  Some tokens are literals, and because of that");
+-        println("they have no identifiers.  Literals are double-quoted.");
+-        tabs++;
+-
+-        // Enumerate all the valid token types
+-        Vector v = tm.getVocabulary();
+-        for (int i = Token.MIN_USER_TYPE; i < v.size(); i++) {
+-            String s = (String)v.elementAt(i);
+-            if (s != null) {
+-                println(s + " = " + i);
+-            }
+-        }
+-
+-        // Close the interface
+-        tabs--;
+-        println("*** End of tokens used by the parser");
+-
+-        // Close the tokens output file
+-        currentOutput.close();
+-        currentOutput = null;
+-    }
+-
+-    /** Get a string for an expression to generate creation of an AST subtree.
+-     * @param v A Vector of String, where each element is an expression in the target language yielding an AST node.
+-     */
+-    public String getASTCreateString(Vector v) {
+-        return "***Create an AST from a vector here***" + System.getProperty("line.separator");
+-    }
+-
+-    /** Get a string for an expression to generate creating of an AST node
+-     * @param str The arguments to the AST constructor
+-     */
+-    public String getASTCreateString(GrammarAtom atom, String str) {
+-        return "[" + str + "]";
+-    }
+-
+-    /// unused.
+-    protected String processActionForSpecialSymbols(String actionStr,
+-                                                    int line,
+-                                                    RuleBlock currentRule,
+-                                                    ActionTransInfo tInfo) {
+-        return actionStr;
+-    }
+-
+-    /** Map an identifier to it's corresponding tree-node variable.
+-     * This is context-sensitive, depending on the rule and alternative
+-     * being generated
+-     * @param id The identifier name to map
+-     * @param forInput true if the input tree node variable is to be returned, otherwise the output variable is returned.
+-     */
+-    public String mapTreeId(String id, ActionTransInfo tInfo) {
+-        return id;
+-    }
+-
+-    /** Format a lookahead or follow set.
+-     * @param depth The depth of the entire lookahead/follow
+-     * @param k The lookahead level to print
+-     * @param lookahead  The lookahead/follow set to print
+-     */
+-    public void printSet(int depth, int k, Lookahead lookahead) {
+-        int numCols = 5;
+-
+-        int[] elems = lookahead.fset.toArray();
+-
+-        if (depth != 1) {
+-            print("k==" + k + ": {");
+-        }
+-        else {
+-            print("{ ");
+-        }
+-        if (elems.length > numCols) {
+-            _println("");
+-            tabs++;
+-            print("");
+-        }
+-
+-        int column = 0;
+-        for (int i = 0; i < elems.length; i++) {
+-            column++;
+-            if (column > numCols) {
+-                _println("");
+-                print("");
+-                column = 0;
+-            }
+-            if (doingLexRules) {
+-                _print(charFormatter.literalChar(elems[i]));
+-            }
+-            else {
+-                _print((String)grammar.tokenManager.getVocabulary().elementAt(elems[i]));
+-            }
+-            if (i != elems.length - 1) {
+-                _print(", ");
+-            }
+-        }
+-
+-        if (elems.length > numCols) {
+-            _println("");
+-            tabs--;
+-            print("");
+-        }
+-        _println(" }");
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/DocBookCodeGenerator.java glassfish-gil/entity-persistence/src/java/persistence/antlr/DocBookCodeGenerator.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/DocBookCodeGenerator.java	2006-08-31 00:34:06.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/DocBookCodeGenerator.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,822 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-/** TODO: strip comments from javadoc entries
+- */
+-
+-import java.util.Enumeration;
+-
+-import persistence.antlr.collections.impl.BitSet;
+-import persistence.antlr.collections.impl.Vector;
+-
+-import java.io.PrintWriter; //SAS: changed for proper text file io
+-import java.io.IOException;
+-import java.io.FileWriter;
+-
+-/**Generate P.sgml, a cross-linked representation of P with or without actions */
+-public class DocBookCodeGenerator extends CodeGenerator {
+-    /** non-zero if inside syntactic predicate generation */
+-    protected int syntacticPredLevel = 0;
+-
+-    /** true during lexer generation, false during parser generation */
+-    protected boolean doingLexRules = false;
+-
+-    protected boolean firstElementInAlt;
+-
+-    protected AlternativeElement prevAltElem = null;	// what was generated last?
+-
+-    /** Create a Diagnostic code-generator using the given Grammar
+-     * The caller must still call setTool, setBehavior, and setAnalyzer
+-     * before generating code.
+-     */
+-    public DocBookCodeGenerator() {
+-        super();
+-        charFormatter = new JavaCharFormatter();
+-    }
+-
+-    /** Encode a string for printing in a HTML document..
+-     * e.g. encode '<' '>' and similar stuff
+-     * @param s the string to encode
+-     */
+-    static String HTMLEncode(String s) {
+-        StringBuffer buf = new StringBuffer();
+-
+-        for (int i = 0, len = s.length(); i < len; i++) {
+-            char c = s.charAt(i);
+-            if (c == '&')
+-                buf.append("&amp;");
+-            else if (c == '\"')
+-                buf.append("&quot;");
+-            else if (c == '\'')
+-                buf.append("&#039;");
+-            else if (c == '<')
+-                buf.append("&lt;");
+-            else if (c == '>')
+-                buf.append("&gt;");
+-            else
+-                buf.append(c);
+-        }
+-        return buf.toString();
+-    }
+-
+-    /** Encode a string for printing in a HTML document..
+-     * e.g. encode '<' '>' and similar stuff
+-     * @param s the string to encode
+-     */
+-    static String QuoteForId(String s) {
+-        StringBuffer buf = new StringBuffer();
+-
+-        for (int i = 0, len = s.length(); i < len; i++) {
+-            char c = s.charAt(i);
+-            if (c == '_')
+-                buf.append(".");
+-            else
+-                buf.append(c);
+-        }
+-        return buf.toString();
+-    }
+-
+-    public void gen() {
+-        // Do the code generation
+-        try {
+-            // Loop over all grammars
+-            Enumeration grammarIter = behavior.grammars.elements();
+-            while (grammarIter.hasMoreElements()) {
+-                Grammar g = (Grammar)grammarIter.nextElement();
+-
+-                // Connect all the components to each other
+-                /*
+-				g.setGrammarAnalyzer(analyzer);
+-				analyzer.setGrammar(g);
+-				*/
+-                g.setCodeGenerator(this);
+-
+-                // To get right overloading behavior across hetrogeneous grammars
+-                g.generate();
+-
+-                if (antlrTool.hasError()) {
+-                    antlrTool.fatalError("Exiting due to errors.");
+-                }
+-
+-            }
+-
+-        }
+-        catch (IOException e) {
+-            antlrTool.reportException(e, null);
+-        }
+-    }
+-
+-    /** Generate code for the given grammar element.
+-     * @param blk The {...} action to generate
+-     */
+-    public void gen(ActionElement action) {
+-        // no-op
+-    }
+-
+-    /** Generate code for the given grammar element.
+-     * @param blk The "x|y|z|..." block to generate
+-     */
+-    public void gen(AlternativeBlock blk) {
+-        genGenericBlock(blk, "");
+-    }
+-
+-    /** Generate code for the given grammar element.
+-     * @param blk The block-end element to generate.  Block-end
+-     * elements are synthesized by the grammar parser to represent
+-     * the end of a block.
+-     */
+-    public void gen(BlockEndElement end) {
+-        // no-op
+-    }
+-
+-    /** Generate code for the given grammar element.
+-     * @param blk The character literal reference to generate
+-     */
+-    public void gen(CharLiteralElement atom) {
+-        if (atom.not) {
+-            _print("~");
+-        }
+-        _print(HTMLEncode(atom.atomText) + " ");
+-    }
+-
+-    /** Generate code for the given grammar element.
+-     * @param blk The character-range reference to generate
+-     */
+-    public void gen(CharRangeElement r) {
+-        print(r.beginText + ".." + r.endText + " ");
+-    }
+-
+-    /** Generate the lexer HTML file */
+-    public void gen(LexerGrammar g) throws IOException {
+-        setGrammar(g);
+-        antlrTool.reportProgress("Generating " + grammar.getClassName() + TokenTypesFileExt);
+-        currentOutput = antlrTool.openOutputFile(grammar.getClassName() + TokenTypesFileExt);
+-        //SAS: changed for proper text file io
+-
+-        tabs = 0;
+-        doingLexRules = true;
+-
+-        // Generate header common to all TXT output files
+-        genHeader();
+-
+-        // Output the user-defined lexer premamble
+-        // RK: guess not..
+-        // println(grammar.preambleAction.getText());
+-
+-        // Generate lexer class definition
+-        println("");
+-
+-        // print javadoc comment if any
+-        if (grammar.comment != null) {
+-            _println(HTMLEncode(grammar.comment));
+-        }
+-
+-        println("<para>Definition of lexer " + grammar.getClassName() + ", which is a subclass of " + grammar.getSuperClass() + ".</para>");
+-
+-        // Generate user-defined parser class members
+-        // printAction(grammar.classMemberAction.getText());
+-
+-        /*
+-		// Generate string literals
+-		println("");
+-		println("*** String literals used in the parser");
+-		println("The following string literals were used in the parser.");
+-		println("An actual code generator would arrange to place these literals");
+-		println("into a table in the generated lexer, so that actions in the");
+-		println("generated lexer could match token text against the literals.");
+-		println("String literals used in the lexer are not listed here, as they");
+-		println("are incorporated into the mainstream lexer processing.");
+-		tabs++;
+-		// Enumerate all of the symbols and look for string literal symbols
+-		Enumeration ids = grammar.getSymbols();
+-		while ( ids.hasMoreElements() ) {
+-			GrammarSymbol sym = (GrammarSymbol)ids.nextElement();
+-			// Only processing string literals -- reject other symbol entries
+-			if ( sym instanceof StringLiteralSymbol ) {
+-				StringLiteralSymbol s = (StringLiteralSymbol)sym;
+-				println(s.getId() + " = " + s.getTokenType());
+-			}
+-		}
+-		tabs--;
+-		println("*** End of string literals used by the parser");
+-		*/
+-
+-        // Generate nextToken() rule.
+-        // nextToken() is a synthetic lexer rule that is the implicit OR of all
+-        // user-defined lexer rules.
+-        genNextToken();
+-
+-        // Generate code for each rule in the lexer
+-
+-        Enumeration ids = grammar.rules.elements();
+-        while (ids.hasMoreElements()) {
+-            RuleSymbol rs = (RuleSymbol)ids.nextElement();
+-            if (!rs.id.equals("mnextToken")) {
+-                genRule(rs);
+-            }
+-        }
+-
+-        // Close the lexer output file
+-        currentOutput.close();
+-        currentOutput = null;
+-        doingLexRules = false;
+-    }
+-
+-    /** Generate code for the given grammar element.
+-     * @param blk The (...)+ block to generate
+-     */
+-    public void gen(OneOrMoreBlock blk) {
+-        genGenericBlock(blk, "+");
+-    }
+-
+-    /** Generate the parser HTML file */
+-    public void gen(ParserGrammar g) throws IOException {
+-        setGrammar(g);
+-        // Open the output stream for the parser and set the currentOutput
+-        antlrTool.reportProgress("Generating " + grammar.getClassName() + ".sgml");
+-        currentOutput = antlrTool.openOutputFile(grammar.getClassName() + ".sgml");
+-
+-        tabs = 0;
+-
+-        // Generate the header common to all output files.
+-        genHeader();
+-
+-        // Generate parser class definition
+-        println("");
+-
+-        // print javadoc comment if any
+-        if (grammar.comment != null) {
+-            _println(HTMLEncode(grammar.comment));
+-        }
+-
+-        println("<para>Definition of parser " + grammar.getClassName() + ", which is a subclass of " + grammar.getSuperClass() + ".</para>");
+-
+-        // Enumerate the parser rules
+-        Enumeration rules = grammar.rules.elements();
+-        while (rules.hasMoreElements()) {
+-            println("");
+-            // Get the rules from the list and downcast it to proper type
+-            GrammarSymbol sym = (GrammarSymbol)rules.nextElement();
+-            // Only process parser rules
+-            if (sym instanceof RuleSymbol) {
+-                genRule((RuleSymbol)sym);
+-            }
+-        }
+-        tabs--;
+-        println("");
+-
+-        genTail();
+-
+-        // Close the parser output stream
+-        currentOutput.close();
+-        currentOutput = null;
+-    }
+-
+-    /** Generate code for the given grammar element.
+-     * @param blk The rule-reference to generate
+-     */
+-    public void gen(RuleRefElement rr) {
+-        RuleSymbol rs = (RuleSymbol)grammar.getSymbol(rr.targetRule);
+-
+-        // Generate the actual rule description
+-        _print("<link linkend=\"" + QuoteForId(rr.targetRule) + "\">");
+-        _print(rr.targetRule);
+-        _print("</link>");
+-        // RK: Leave out args..
+-        //	if (rr.args != null) {
+-        //		_print("["+rr.args+"]");
+-        //	}
+-        _print(" ");
+-    }
+-
+-    /** Generate code for the given grammar element.
+-     * @param blk The string-literal reference to generate
+-     */
+-    public void gen(StringLiteralElement atom) {
+-        if (atom.not) {
+-            _print("~");
+-        }
+-        _print(HTMLEncode(atom.atomText));
+-        _print(" ");
+-    }
+-
+-    /** Generate code for the given grammar element.
+-     * @param blk The token-range reference to generate
+-     */
+-    public void gen(TokenRangeElement r) {
+-        print(r.beginText + ".." + r.endText + " ");
+-    }
+-
+-    /** Generate code for the given grammar element.
+-     * @param blk The token-reference to generate
+-     */
+-    public void gen(TokenRefElement atom) {
+-        if (atom.not) {
+-            _print("~");
+-        }
+-        _print(atom.atomText);
+-        _print(" ");
+-    }
+-
+-    public void gen(TreeElement t) {
+-        print(t + " ");
+-    }
+-
+-    /** Generate the tree-walker TXT file */
+-    public void gen(TreeWalkerGrammar g) throws IOException {
+-        setGrammar(g);
+-        // Open the output stream for the parser and set the currentOutput
+-        antlrTool.reportProgress("Generating " + grammar.getClassName() + ".sgml");
+-        currentOutput = antlrTool.openOutputFile(grammar.getClassName() + ".sgml");
+-        //SAS: changed for proper text file io
+-
+-        tabs = 0;
+-
+-        // Generate the header common to all output files.
+-        genHeader();
+-
+-        // Output the user-defined parser premamble
+-        println("");
+-//		println("*** Tree-walker Preamble Action.");
+-//		println("This action will appear before the declaration of your tree-walker class:");
+-//		tabs++;
+-//		println(grammar.preambleAction.getText());
+-//		tabs--;
+-//		println("*** End of tree-walker Preamble Action");
+-
+-        // Generate tree-walker class definition
+-        println("");
+-
+-        // print javadoc comment if any
+-        if (grammar.comment != null) {
+-            _println(HTMLEncode(grammar.comment));
+-        }
+-
+-        println("<para>Definition of tree parser " + grammar.getClassName() + ", which is a subclass of " + grammar.getSuperClass() + ".</para>");
+-
+-        // Generate user-defined tree-walker class members
+-//		println("");
+-//		println("*** User-defined tree-walker class members:");
+-//		println("These are the member declarations that you defined for your class:");
+-//		tabs++;
+-//		printAction(grammar.classMemberAction.getText());
+-//		tabs--;
+-//		println("*** End of user-defined tree-walker class members");
+-
+-        // Generate code for each rule in the grammar
+-        println("");
+-//		println("*** tree-walker rules:");
+-        tabs++;
+-
+-        // Enumerate the tree-walker rules
+-        Enumeration rules = grammar.rules.elements();
+-        while (rules.hasMoreElements()) {
+-            println("");
+-            // Get the rules from the list and downcast it to proper type
+-            GrammarSymbol sym = (GrammarSymbol)rules.nextElement();
+-            // Only process tree-walker rules
+-            if (sym instanceof RuleSymbol) {
+-                genRule((RuleSymbol)sym);
+-            }
+-        }
+-        tabs--;
+-        println("");
+-//		println("*** End of tree-walker rules");
+-
+-//		println("");
+-//		println("*** End of tree-walker");
+-
+-        // Close the tree-walker output stream
+-        currentOutput.close();
+-        currentOutput = null;
+-    }
+-
+-    /** Generate a wildcard element */
+-    public void gen(WildcardElement wc) {
+-        /*
+-		if ( wc.getLabel()!=null ) {
+-			_print(wc.getLabel()+"=");
+-		}
+-		*/
+-        _print(". ");
+-    }
+-
+-    /** Generate code for the given grammar element.
+-     * @param blk The (...)* block to generate
+-     */
+-    public void gen(ZeroOrMoreBlock blk) {
+-        genGenericBlock(blk, "*");
+-    }
+-
+-    protected void genAlt(Alternative alt) {
+-        if (alt.getTreeSpecifier() != null) {
+-            _print(alt.getTreeSpecifier().getText());
+-        }
+-        prevAltElem = null;
+-        for (AlternativeElement elem = alt.head;
+-             !(elem instanceof BlockEndElement);
+-             elem = elem.next) {
+-            elem.generate();
+-            firstElementInAlt = false;
+-            prevAltElem = elem;
+-        }
+-    }
+-    /** Generate the header for a block, which may be a RuleBlock or a
+-     * plain AlternativeBLock.  This generates any variable declarations,
+-     * init-actions, and syntactic-predicate-testing variables.
+-     * @blk The block for which the preamble is to be generated.
+-     */
+-//	protected void genBlockPreamble(AlternativeBlock blk) {
+-    // RK: don't dump out init actions
+-    // dump out init action
+-//		if ( blk.initAction!=null ) {
+-//			printAction("{" + blk.initAction + "}");
+-//		}
+-//	}
+-    /** Generate common code for a block of alternatives; return a postscript
+-     * that needs to be generated at the end of the block.  Other routines
+-     * may append else-clauses and such for error checking before the postfix
+-     * is generated.
+-     */
+-    public void genCommonBlock(AlternativeBlock blk) {
+-        if (blk.alternatives.size() > 1)
+-            println("<itemizedlist mark=\"none\">");
+-        for (int i = 0; i < blk.alternatives.size(); i++) {
+-            Alternative alt = blk.getAlternativeAt(i);
+-            AlternativeElement elem = alt.head;
+-
+-            if (blk.alternatives.size() > 1)
+-                print("<listitem><para>");
+-
+-            // dump alt operator |
+-            if (i > 0 && blk.alternatives.size() > 1) {
+-                _print("| ");
+-            }
+-
+-            // Dump the alternative, starting with predicates
+-            //
+-            boolean save = firstElementInAlt;
+-            firstElementInAlt = true;
+-            tabs++;	// in case we do a newline in alt, increase the tab indent
+-
+-            genAlt(alt);
+-            tabs--;
+-            firstElementInAlt = save;
+-            if (blk.alternatives.size() > 1)
+-                _println("</para></listitem>");
+-        }
+-        if (blk.alternatives.size() > 1)
+-            println("</itemizedlist>");
+-    }
+-
+-    /** Generate a textual representation of the follow set
+-     * for a block.
+-     * @param blk  The rule block of interest
+-     */
+-    public void genFollowSetForRuleBlock(RuleBlock blk) {
+-        Lookahead follow = grammar.theLLkAnalyzer.FOLLOW(1, blk.endNode);
+-        printSet(grammar.maxk, 1, follow);
+-    }
+-
+-    protected void genGenericBlock(AlternativeBlock blk, String blkOp) {
+-        if (blk.alternatives.size() > 1) {
+-            // make sure we start on a new line
+-            _println("");
+-            if (!firstElementInAlt) {
+-                // only do newline if the last element wasn't a multi-line block
+-                //if ( prevAltElem==null ||
+-                //	 !(prevAltElem instanceof AlternativeBlock) ||
+-                //	 ((AlternativeBlock)prevAltElem).alternatives.size()==1 )
+-                //{
+-                _println("(");
+-                //}
+-                //else
+-                //{
+-                //	_print("(");
+-                //}
+-                // _println("");
+-                // print("(\t");
+-            }
+-            else {
+-                _print("(");
+-            }
+-        }
+-        else {
+-            _print("( ");
+-        }
+-        // RK: don't dump init actions
+-        //	genBlockPreamble(blk);
+-        genCommonBlock(blk);
+-        if (blk.alternatives.size() > 1) {
+-            _println("");
+-            print(")" + blkOp + " ");
+-            // if not last element of alt, need newline & to indent
+-            if (!(blk.next instanceof BlockEndElement)) {
+-                _println("");
+-                print("");
+-            }
+-        }
+-        else {
+-            _print(")" + blkOp + " ");
+-        }
+-    }
+-
+-    /** Generate a header that is common to all TXT files */
+-    protected void genHeader() {
+-        println("<?xml version=\"1.0\" standalone=\"no\"?>");
+-        println("<!DOCTYPE book PUBLIC \"-//OASIS//DTD DocBook V3.1//EN\">");
+-        println("<book lang=\"en\">");
+-        println("<bookinfo>");
+-        println("<title>Grammar " + grammar.getClassName() + "</title>");
+-        println("  <author>");
+-        println("    <firstname></firstname>");
+-        println("    <othername></othername>");
+-        println("    <surname></surname>");
+-        println("    <affiliation>");
+-        println("     <address>");
+-        println("     <email></email>");
+-        println("     </address>");
+-        println("    </affiliation>");
+-        println("  </author>");
+-        println("  <othercredit>");
+-        println("    <contrib>");
+-        println("    Generated by <ulink url=\"http://www.ANTLR.org/\">ANTLR</ulink>" + antlrTool.version);
+-        println("    from " + antlrTool.grammarFile);
+-        println("    </contrib>");
+-        println("  </othercredit>");
+-        println("  <pubdate></pubdate>");
+-        println("  <abstract>");
+-        println("  <para>");
+-        println("  </para>");
+-        println("  </abstract>");
+-        println("</bookinfo>");
+-        println("<chapter>");
+-        println("<title></title>");
+-    }
+-
+-    /**Generate the lookahead set for an alternate. */
+-    protected void genLookaheadSetForAlt(Alternative alt) {
+-        if (doingLexRules && alt.cache[1].containsEpsilon()) {
+-            println("MATCHES ALL");
+-            return;
+-        }
+-        int depth = alt.lookaheadDepth;
+-        if (depth == GrammarAnalyzer.NONDETERMINISTIC) {
+-            // if the decision is nondeterministic, do the best we can: LL(k)
+-            // any predicates that are around will be generated later.
+-            depth = grammar.maxk;
+-        }
+-        for (int i = 1; i <= depth; i++) {
+-            Lookahead lookahead = alt.cache[i];
+-            printSet(depth, i, lookahead);
+-        }
+-    }
+-
+-    /** Generate a textual representation of the lookahead set
+-     * for a block.
+-     * @param blk  The block of interest
+-     */
+-    public void genLookaheadSetForBlock(AlternativeBlock blk) {
+-        // Find the maximal lookahead depth over all alternatives
+-        int depth = 0;
+-        for (int i = 0; i < blk.alternatives.size(); i++) {
+-            Alternative alt = blk.getAlternativeAt(i);
+-            if (alt.lookaheadDepth == GrammarAnalyzer.NONDETERMINISTIC) {
+-                depth = grammar.maxk;
+-                break;
+-            }
+-            else if (depth < alt.lookaheadDepth) {
+-                depth = alt.lookaheadDepth;
+-            }
+-        }
+-
+-        for (int i = 1; i <= depth; i++) {
+-            Lookahead lookahead = grammar.theLLkAnalyzer.look(i, blk);
+-            printSet(depth, i, lookahead);
+-        }
+-    }
+-
+-    /** Generate the nextToken rule.
+-     * nextToken is a synthetic lexer rule that is the implicit OR of all
+-     * user-defined lexer rules.
+-     */
+-    public void genNextToken() {
+-        println("");
+-        println("/** Lexer nextToken rule:");
+-        println(" *  The lexer nextToken rule is synthesized from all of the user-defined");
+-        println(" *  lexer rules.  It logically consists of one big alternative block with");
+-        println(" *  each user-defined rule being an alternative.");
+-        println(" */");
+-
+-        // Create the synthesized rule block for nextToken consisting
+-        // of an alternate block containing all the user-defined lexer rules.
+-        RuleBlock blk = MakeGrammar.createNextTokenRule(grammar, grammar.rules, "nextToken");
+-
+-        // Define the nextToken rule symbol
+-        RuleSymbol nextTokenRs = new RuleSymbol("mnextToken");
+-        nextTokenRs.setDefined();
+-        nextTokenRs.setBlock(blk);
+-        nextTokenRs.access = "private";
+-        grammar.define(nextTokenRs);
+-
+-        /*
+-		// Analyze the synthesized block
+-		if (!grammar.theLLkAnalyzer.deterministic(blk))
+-		{
+-			println("The grammar analyzer has determined that the synthesized");
+-			println("nextToken rule is non-deterministic (i.e., it has ambiguities)");
+-			println("This means that there is some overlap of the character");
+-			println("lookahead for two or more of your lexer rules.");
+-		}
+-		*/
+-
+-        genCommonBlock(blk);
+-    }
+-
+-    /** Generate code for a named rule block
+-     * @param s The RuleSymbol describing the rule to generate
+-     */
+-    public void genRule(RuleSymbol s) {
+-        if (s == null || !s.isDefined()) return;	// undefined rule
+-        println("");
+-
+-        if (s.access.length() != 0) {
+-            if (!s.access.equals("public")) {
+-                _print("<para>" + s.access + " </para>");
+-            }
+-        }
+-
+-        println("<section id=\"" + QuoteForId(s.getId()) + "\">");
+-        println("<title>" + s.getId() + "</title>");
+-        if (s.comment != null) {
+-            _println("<para>" + HTMLEncode(s.comment) + "</para>");
+-        }
+-        println("<para>");
+-
+-        // Get rule return type and arguments
+-        RuleBlock rblk = s.getBlock();
+-
+-        // RK: for HTML output not of much value...
+-        // Gen method return value(s)
+-//		if (rblk.returnAction != null) {
+-//			_print("["+rblk.returnAction+"]");
+-//		}
+-        // Gen arguments
+-//		if (rblk.argAction != null)
+-//		{
+-//				_print(" returns [" + rblk.argAction+"]");
+-//		}
+-        _println("");
+-        print(s.getId() + ":\t");
+-        tabs++;
+-
+-        // Dump any init-action
+-        // genBlockPreamble(rblk);
+-
+-        // Dump the alternates of the rule
+-        genCommonBlock(rblk);
+-
+-        _println("");
+-//		println(";");
+-        tabs--;
+-        _println("</para>");
+-        _println("</section><!-- section \"" + s.getId() + "\" -->");
+-    }
+-
+-    /** Generate the syntactic predicate.  This basically generates
+-     * the alternative block, buts tracks if we are inside a synPred
+-     * @param blk  The syntactic predicate block
+-     */
+-    protected void genSynPred(SynPredBlock blk) {
+-        // no op
+-    }
+-
+-    public void genTail() {
+-        println("</chapter>");
+-        println("</book>");
+-    }
+-
+-    /** Generate the token types TXT file */
+-    protected void genTokenTypes(TokenManager tm) throws IOException {
+-        // Open the token output TXT file and set the currentOutput stream
+-        antlrTool.reportProgress("Generating " + tm.getName() + TokenTypesFileSuffix + TokenTypesFileExt);
+-        currentOutput = antlrTool.openOutputFile(tm.getName() + TokenTypesFileSuffix + TokenTypesFileExt);
+-        //SAS: changed for proper text file io
+-        tabs = 0;
+-
+-        // Generate the header common to all diagnostic files
+-        genHeader();
+-
+-        // Generate a string for each token.  This creates a static
+-        // array of Strings indexed by token type.
+-        println("");
+-        println("*** Tokens used by the parser");
+-        println("This is a list of the token numeric values and the corresponding");
+-        println("token identifiers.  Some tokens are literals, and because of that");
+-        println("they have no identifiers.  Literals are double-quoted.");
+-        tabs++;
+-
+-        // Enumerate all the valid token types
+-        Vector v = tm.getVocabulary();
+-        for (int i = Token.MIN_USER_TYPE; i < v.size(); i++) {
+-            String s = (String)v.elementAt(i);
+-            if (s != null) {
+-                println(s + " = " + i);
+-            }
+-        }
+-
+-        // Close the interface
+-        tabs--;
+-        println("*** End of tokens used by the parser");
+-
+-        // Close the tokens output file
+-        currentOutput.close();
+-        currentOutput = null;
+-    }
+-
+-    /// unused.
+-    protected String processActionForSpecialSymbols(String actionStr,
+-                                                    int line,
+-                                                    RuleBlock currentRule,
+-                                                    ActionTransInfo tInfo) {
+-        return actionStr;
+-    }
+-
+-    /** Get a string for an expression to generate creation of an AST subtree.
+-     * @param v A Vector of String, where each element is an expression in the target language yielding an AST node.
+-     */
+-    public String getASTCreateString(Vector v) {
+-        return null;
+-    }
+-
+-    /** Get a string for an expression to generate creating of an AST node
+-     * @param str The arguments to the AST constructor
+-     */
+-    public String getASTCreateString(GrammarAtom atom, String str) {
+-        return null;
+-    }
+-
+-    /** Map an identifier to it's corresponding tree-node variable.
+-     * This is context-sensitive, depending on the rule and alternative
+-     * being generated
+-     * @param id The identifier name to map
+-     * @param forInput true if the input tree node variable is to be returned, otherwise the output variable is returned.
+-     */
+-    public String mapTreeId(String id, ActionTransInfo tInfo) {
+-        return id;
+-    }
+-
+-    /** Format a lookahead or follow set.
+-     * @param depth The depth of the entire lookahead/follow
+-     * @param k The lookahead level to print
+-     * @param lookahead  The lookahead/follow set to print
+-     */
+-    public void printSet(int depth, int k, Lookahead lookahead) {
+-        int numCols = 5;
+-
+-        int[] elems = lookahead.fset.toArray();
+-
+-        if (depth != 1) {
+-            print("k==" + k + ": {");
+-        }
+-        else {
+-            print("{ ");
+-        }
+-        if (elems.length > numCols) {
+-            _println("");
+-            tabs++;
+-            print("");
+-        }
+-
+-        int column = 0;
+-        for (int i = 0; i < elems.length; i++) {
+-            column++;
+-            if (column > numCols) {
+-                _println("");
+-                print("");
+-                column = 0;
+-            }
+-            if (doingLexRules) {
+-                _print(charFormatter.literalChar(elems[i]));
+-            }
+-            else {
+-                _print((String)grammar.tokenManager.getVocabulary().elementAt(elems[i]));
+-            }
+-            if (i != elems.length - 1) {
+-                _print(", ");
+-            }
+-        }
+-
+-        if (elems.length > numCols) {
+-            _println("");
+-            tabs--;
+-            print("");
+-        }
+-        _println(" }");
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/DumpASTVisitor.java glassfish-gil/entity-persistence/src/java/persistence/antlr/DumpASTVisitor.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/DumpASTVisitor.java	2006-08-31 00:34:06.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/DumpASTVisitor.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,66 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import java.io.*;
+-
+-import persistence.antlr.collections.AST;
+-
+-/** Simple class to dump the contents of an AST to the output */
+-public class DumpASTVisitor implements ASTVisitor {
+-    protected int level = 0;
+-
+-
+-    private void tabs() {
+-        for (int i = 0; i < level; i++) {
+-            System.out.print("   ");
+-        }
+-    }
+-
+-    public void visit(AST node) {
+-        // Flatten this level of the tree if it has no children
+-        boolean flatten = /*true*/ false;
+-        AST node2;
+-        for (node2 = node; node2 != null; node2 = node2.getNextSibling()) {
+-            if (node2.getFirstChild() != null) {
+-                flatten = false;
+-                break;
+-            }
+-        }
+-
+-        for (node2 = node; node2 != null; node2 = node2.getNextSibling()) {
+-            if (!flatten || node2 == node) {
+-                tabs();
+-            }
+-            if (node2.getText() == null) {
+-                System.out.print("nil");
+-            }
+-            else {
+-                System.out.print(node2.getText());
+-            }
+-
+-            System.out.print(" [" + node2.getType() + "] ");
+-
+-            if (flatten) {
+-                System.out.print(" ");
+-            }
+-            else {
+-                System.out.println("");
+-            }
+-
+-            if (node2.getFirstChild() != null) {
+-                level++;
+-                visit(node2.getFirstChild());
+-                level--;
+-            }
+-        }
+-
+-        if (flatten) {
+-            System.out.println("");
+-        }
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/ExceptionHandler.java glassfish-gil/entity-persistence/src/java/persistence/antlr/ExceptionHandler.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/ExceptionHandler.java	2006-08-31 00:34:06.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/ExceptionHandler.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,21 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-class ExceptionHandler {
+-    // Type of the ANTLR exception class to catch and the variable decl
+-    protected Token exceptionTypeAndName;
+-    // The action to be executed when the exception is caught
+-    protected Token action;
+-
+-
+-    public ExceptionHandler(Token exceptionTypeAndName_,
+-                            Token action_) {
+-        exceptionTypeAndName = exceptionTypeAndName_;
+-        action = action_;
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/ExceptionSpec.java glassfish-gil/entity-persistence/src/java/persistence/antlr/ExceptionSpec.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/ExceptionSpec.java	2006-08-31 00:34:06.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/ExceptionSpec.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,28 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import persistence.antlr.collections.impl.Vector;
+-
+-class ExceptionSpec {
+-    // Non-null if this refers to a labeled rule
+-    // Use a token instead of a string to get the line information
+-    protected Token label;
+-
+-    // List of ExceptionHandler (catch phrases)
+-    protected Vector handlers;
+-
+-
+-    public ExceptionSpec(Token label_) {
+-        label = label_;
+-        handlers = new Vector();
+-    }
+-
+-    public void addHandler(ExceptionHandler handler) {
+-        handlers.appendElement(handler);
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/FileCopyException.java glassfish-gil/entity-persistence/src/java/persistence/antlr/FileCopyException.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/FileCopyException.java	2006-08-31 00:34:06.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/FileCopyException.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,13 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-class FileCopyException extends java.io.IOException {
+-    public FileCopyException(String msg) {
+-        super(msg);
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/FileLineFormatter.java glassfish-gil/entity-persistence/src/java/persistence/antlr/FileLineFormatter.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/FileLineFormatter.java	2006-08-31 00:34:06.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/FileLineFormatter.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,26 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-public abstract class FileLineFormatter {
+-
+-    private static FileLineFormatter formatter = new DefaultFileLineFormatter();
+-
+-    public static FileLineFormatter getFormatter() {
+-        return formatter;
+-    }
+-
+-    public static void setFormatter(FileLineFormatter f) {
+-        formatter = f;
+-    }
+-
+-    /** @param fileName the file that should appear in the prefix. (or null)
+-     * @param line the line (or -1)
+-     * @param column the column (or -1)
+-     */
+-    public abstract String getFormatString(String fileName, int line, int column);
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/GrammarAnalyzer.java glassfish-gil/entity-persistence/src/java/persistence/antlr/GrammarAnalyzer.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/GrammarAnalyzer.java	2006-08-31 00:34:07.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/GrammarAnalyzer.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,35 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-/**A GrammarAnalyzer computes lookahead from Grammar (which contains
+- * a grammar symbol table) and can then answer questions about the
+- * grammar.
+- *
+- * To access the RuleBlock for a rule name, the grammar symbol table
+- * is consulted.
+- *
+- * There should be no distinction between static & dynamic analysis.
+- * In other words, some of the easy analysis can be done statically
+- * and then the part that is hard statically can be deferred to
+- * parse-time.  Interestingly, computing LL(k) for k>1 lookahead
+- * statically is O(|T|^k) where T is the grammar vocabulary, but,
+- * is O(k) at run-time (ignoring the large constant associated with
+- * the size of the grammar).  In English, the difference can be
+- * described as "find the set of all possible k-sequences of input"
+- * versus "does this specific k-sequence match?".
+- */
+-public interface GrammarAnalyzer {
+-    /**The epsilon token type is an imaginary type used
+-     * during analysis.  It indicates an incomplete look() computation.
+-     * Must be kept consistent with Token constants to be between
+-     * MIN_USER_TYPE and INVALID_TYPE.
+-     */
+-    // public static final int EPSILON_TYPE = 2;
+-    public static final int NONDETERMINISTIC = Integer.MAX_VALUE; // lookahead depth
+-    public static final int LOOKAHEAD_DEPTH_INIT = -1;
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/GrammarAtom.java glassfish-gil/entity-persistence/src/java/persistence/antlr/GrammarAtom.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/GrammarAtom.java	2006-08-31 00:34:07.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/GrammarAtom.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,67 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-/** A GrammarAtom is either a token ref, a character ref, or string.
+- * The analysis doesn't care.
+- */
+-abstract class GrammarAtom extends AlternativeElement {
+-    protected String label;
+-    protected String atomText;
+-    protected int tokenType = Token.INVALID_TYPE;
+-    protected boolean not = false;	// ~T or ~'c' or ~"foo"
+-    /** Set to type of AST node to create during parse.  Defaults to what is
+-     *  set in the TokenSymbol.
+-     */
+-    protected String ASTNodeType = null;
+-
+-    public GrammarAtom(Grammar g, Token t, int autoGenType) {
+-        super(g, t, autoGenType);
+-        atomText = t.getText();
+-    }
+-
+-    public String getLabel() {
+-        return label;
+-    }
+-
+-    public String getText() {
+-        return atomText;
+-    }
+-
+-    public int getType() {
+-        return tokenType;
+-    }
+-
+-    public void setLabel(String label_) {
+-        label = label_;
+-    }
+-
+-    public String getASTNodeType() {
+-        return ASTNodeType;
+-    }
+-
+-    public void setASTNodeType(String type) {
+-        ASTNodeType = type;
+-    }
+-
+-    public void setOption(Token option, Token value) {
+-        if (option.getText().equals("AST")) {
+-            setASTNodeType(value.getText());
+-        }
+-        else {
+-            grammar.antlrTool.error("Invalid element option:" + option.getText(),
+-                               grammar.getFilename(), option.getLine(), option.getColumn());
+-        }
+-    }
+-
+-    public String toString() {
+-        String s = " ";
+-        if (label != null) s += label + ":";
+-        if (not) s += "~";
+-        return s + atomText;
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/GrammarElement.java glassfish-gil/entity-persistence/src/java/persistence/antlr/GrammarElement.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/GrammarElement.java	2006-08-31 00:34:07.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/GrammarElement.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,61 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-/**A GrammarElement is a generic node in our
+- * data structure that holds a grammar in memory.
+- * This data structure can be used for static
+- * analysis or for dynamic analysis (during parsing).
+- * Every node must know which grammar owns it, how
+- * to generate code, and how to do analysis.
+- */
+-abstract class GrammarElement {
+-    public static final int AUTO_GEN_NONE = 1;
+-    public static final int AUTO_GEN_CARET = 2;
+-    public static final int AUTO_GEN_BANG = 3;
+-
+-    /*
+-	 * Note that Java does static argument type matching to
+-	 * determine which function to execute on the receiver.
+-	 * Here, that implies that we cannot simply say
+-	 * grammar.generator.gen(this) in GrammarElement or
+-	 * only CodeGenerator.gen(GrammarElement ge) would
+-	 * ever be called.
+-	 */
+-    protected Grammar grammar;
+-    protected int line;
+-    protected int column;
+-
+-    public GrammarElement(Grammar g) {
+-        grammar = g;
+-        line = -1;
+-        column = -1;
+-    }
+-
+-    public GrammarElement(Grammar g, Token start) {
+-        grammar = g;
+-        line = start.getLine();
+-        column = start.getColumn();
+-    }
+-
+-    public void generate() {
+-    }
+-
+-    public int getLine() {
+-        return line;
+-    }
+-
+-    public int getColumn() {
+-        return column;
+-    }
+-
+-    public Lookahead look(int k) {
+-        return null;
+-    }
+-
+-    public abstract String toString();
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/Grammar.java glassfish-gil/entity-persistence/src/java/persistence/antlr/Grammar.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/Grammar.java	2006-08-31 00:34:06.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/Grammar.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,287 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import java.util.Hashtable;
+-import java.util.Enumeration;
+-import java.io.IOException;
+-
+-import persistence.antlr.collections.impl.BitSet;
+-import persistence.antlr.collections.impl.Vector;
+-
+-/**A Grammar holds a set of rules (which are stored
+- * in a symbol table).  Most of the time a grammar
+- * needs a code generator and an LLkAnalyzer too.
+- */
+-public abstract class Grammar {
+-    protected Tool antlrTool;
+-    protected CodeGenerator generator;
+-    protected LLkGrammarAnalyzer theLLkAnalyzer;
+-    protected Hashtable symbols;
+-    protected boolean buildAST = false;
+-    protected boolean analyzerDebug = false;
+-    protected boolean interactive = false;
+-    protected String superClass = null;
+-
+-    /** The token manager associated with the grammar, if any.
+-     // The token manager is responsible for maintaining the set of valid tokens, and
+-     // is conceptually shared between the lexer and parser.  This may be either a
+-     // LexerGrammar or a ImportVocabTokenManager.
+-     */
+-    protected TokenManager tokenManager;
+-
+-    /** The name of the export vocabulary...used to generate the output
+-     *  token types interchange file.
+-     */
+-    protected String exportVocab = null;
+-
+-    /** The name of the import vocabulary.  "Initial conditions"
+-     */
+-    protected String importVocab = null;
+-
+-    // Mapping from String keys to Token option values
+-    protected Hashtable options;
+-    // Vector of RuleSymbol entries
+-    protected Vector rules;
+-
+-    protected Token preambleAction = new CommonToken(Token.INVALID_TYPE, "");
+-    protected String className = null;
+-    protected String fileName = null;
+-    protected Token classMemberAction = new CommonToken(Token.INVALID_TYPE, "");
+-    protected boolean hasSyntacticPredicate = false;
+-    protected boolean hasUserErrorHandling = false;
+-
+-    // max lookahead that can be attempted for this parser.
+-    protected int maxk = 1;
+-
+-    // options
+-    protected boolean traceRules = false;
+-    protected boolean debuggingOutput = false;
+-    protected boolean defaultErrorHandler = true;
+-
+-    protected String comment = null; // javadoc comment
+-
+-    public Grammar(String className_, Tool tool_, String superClass) {
+-        className = className_;
+-        antlrTool = tool_;
+-        symbols = new Hashtable();
+-        options = new Hashtable();
+-        rules = new Vector(100);
+-        this.superClass = superClass;
+-    }
+-
+-    /** Define a rule */
+-    public void define(RuleSymbol rs) {
+-        rules.appendElement(rs);
+-        // add the symbol to the rules hash table
+-        symbols.put(rs.getId(), rs);
+-    }
+-
+-    /** Top-level call to generate the code for this grammar */
+-    public abstract void generate() throws IOException;
+-
+-    protected String getClassName() {
+-        return className;
+-    }
+-
+-    /* Does this grammar have a default error handler? */
+-    public boolean getDefaultErrorHandler() {
+-        return defaultErrorHandler;
+-    }
+-
+-    public String getFilename() {
+-        return fileName;
+-    }
+-
+-    /** Get an integer option.  Given the name of the option find its
+-     * associated integer value.  If the associated value is not an integer or
+-     * is not in the table, then throw an exception of type NumberFormatException.
+-     * @param key The name of the option
+-     * @return The value associated with the key.
+-     */
+-    public int getIntegerOption(String key) throws NumberFormatException {
+-        Token t = (Token)options.get(key);
+-        if (t == null || t.getType() != ANTLRTokenTypes.INT) {
+-            throw new NumberFormatException();
+-        }
+-        else {
+-            return Integer.parseInt(t.getText());
+-        }
+-    }
+-
+-    /** Get an option.  Given the name of the option find its associated value.
+-     * @param key The name of the option
+-     * @return The value associated with the key, or null if the key has not been set.
+-     */
+-    public Token getOption(String key) {
+-        return (Token)options.get(key);
+-    }
+-
+-    // Get name of class from which generated parser/lexer inherits
+-    protected abstract String getSuperClass();
+-
+-    public GrammarSymbol getSymbol(String s) {
+-        return (GrammarSymbol)symbols.get(s);
+-    }
+-
+-    public Enumeration getSymbols() {
+-        return symbols.elements();
+-    }
+-
+-    /** Check the existence of an option in the table
+-     * @param key The name of the option
+-     * @return true if the option is in the table
+-     */
+-    public boolean hasOption(String key) {
+-        return options.containsKey(key);
+-    }
+-
+-    /** Is a rule symbol defined? (not used for tokens) */
+-    public boolean isDefined(String s) {
+-        return symbols.containsKey(s);
+-    }
+-
+-    /**Process command line arguments.  Implemented in subclasses */
+-    public abstract void processArguments(String[] args);
+-
+-    public void setCodeGenerator(CodeGenerator gen) {
+-        generator = gen;
+-    }
+-
+-    public void setFilename(String s) {
+-        fileName = s;
+-    }
+-
+-    public void setGrammarAnalyzer(LLkGrammarAnalyzer a) {
+-        theLLkAnalyzer = a;
+-    }
+-
+-    /** Set a generic option.
+-     * This associates a generic option key with a Token value.
+-     * No validation is performed by this method, although users of the value
+-     * (code generation and/or analysis) may require certain formats.
+-     * The value is stored as a token so that the location of an error
+-     * can be reported.
+-     * @param key The name of the option.
+-     * @param value The value to associate with the key.
+-     * @return true if the option was a valid generic grammar option, false o/w
+-     */
+-    public boolean setOption(String key, Token value) {
+-        options.put(key, value);
+-        String s = value.getText();
+-        int i;
+-        if (key.equals("k")) {
+-            try {
+-                maxk = getIntegerOption("k");
+-				if ( maxk<=0 ) {
+-					antlrTool.error("option 'k' must be greater than 0 (was " +
+-									value.getText() + ")",
+-									getFilename(),
+-									value.getLine(),
+-									value.getColumn());
+-					maxk = 1;
+-				}
+-            }
+-            catch (NumberFormatException e) {
+-                antlrTool.error("option 'k' must be an integer (was " + value.getText() + ")", getFilename(), value.getLine(), value.getColumn());
+-            }
+-            return true;
+-        }
+-        if (key.equals("codeGenMakeSwitchThreshold")) {
+-            try {
+-                i = getIntegerOption("codeGenMakeSwitchThreshold");
+-            }
+-            catch (NumberFormatException e) {
+-                antlrTool.error("option 'codeGenMakeSwitchThreshold' must be an integer", getFilename(), value.getLine(), value.getColumn());
+-            }
+-            return true;
+-        }
+-        if (key.equals("codeGenBitsetTestThreshold")) {
+-            try {
+-                i = getIntegerOption("codeGenBitsetTestThreshold");
+-            }
+-            catch (NumberFormatException e) {
+-                antlrTool.error("option 'codeGenBitsetTestThreshold' must be an integer", getFilename(), value.getLine(), value.getColumn());
+-            }
+-            return true;
+-        }
+-        if (key.equals("defaultErrorHandler")) {
+-            if (s.equals("true")) {
+-                defaultErrorHandler = true;
+-            }
+-            else if (s.equals("false")) {
+-                defaultErrorHandler = false;
+-            }
+-            else {
+-                antlrTool.error("Value for defaultErrorHandler must be true or false", getFilename(), value.getLine(), value.getColumn());
+-            }
+-            return true;
+-        }
+-        if (key.equals("analyzerDebug")) {
+-            if (s.equals("true")) {
+-                analyzerDebug = true;
+-            }
+-            else if (s.equals("false")) {
+-                analyzerDebug = false;
+-            }
+-            else {
+-                antlrTool.error("option 'analyzerDebug' must be true or false", getFilename(), value.getLine(), value.getColumn());
+-            }
+-            return true;
+-        }
+-        if (key.equals("codeGenDebug")) {
+-            if (s.equals("true")) {
+-                analyzerDebug = true;
+-            }
+-            else if (s.equals("false")) {
+-                analyzerDebug = false;
+-            }
+-            else {
+-                antlrTool.error("option 'codeGenDebug' must be true or false", getFilename(), value.getLine(), value.getColumn());
+-            }
+-            return true;
+-        }
+-        if (key.equals("classHeaderSuffix")) {
+-            return true;
+-        }
+-        if (key.equals("classHeaderPrefix")) {
+-            return true;
+-        }
+-        if (key.equals("namespaceAntlr")) {
+-            return true;
+-        }
+-        if (key.equals("namespaceStd")) {
+-            return true;
+-        }
+-        if (key.equals("genHashLines")) {
+-            return true;
+-        }
+-        if (key.equals("noConstructors")) {
+-            return true;
+-        }
+-        return false;
+-    }
+-
+-    public void setTokenManager(TokenManager tokenManager_) {
+-        tokenManager = tokenManager_;
+-    }
+-
+-    /** Print out the grammar without actions */
+-    public String toString() {
+-        StringBuffer buf = new StringBuffer(20000);
+-        Enumeration ids = rules.elements();
+-        while (ids.hasMoreElements()) {
+-            RuleSymbol rs = (RuleSymbol)ids.nextElement();
+-            if (!rs.id.equals("mnextToken")) {
+-                buf.append(rs.getBlock().toString());
+-                buf.append("\n\n");
+-            }
+-        }
+-        return buf.toString();
+-    }
+-
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/GrammarSymbol.java glassfish-gil/entity-persistence/src/java/persistence/antlr/GrammarSymbol.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/GrammarSymbol.java	2006-08-31 00:34:07.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/GrammarSymbol.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,29 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-/**A GrammarSymbol is a generic symbol that can be
+- * added to the symbol table for a grammar.
+- */
+-abstract class GrammarSymbol {
+-    protected String id;
+-
+-    public GrammarSymbol() {
+-    }
+-
+-    public GrammarSymbol(String s) {
+-        id = s;
+-    }
+-
+-    public String getId() {
+-        return id;
+-    }
+-
+-    public void setId(String s) {
+-        id = s;
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/HTMLCodeGenerator.java glassfish-gil/entity-persistence/src/java/persistence/antlr/HTMLCodeGenerator.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/HTMLCodeGenerator.java	2006-08-31 00:34:07.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/HTMLCodeGenerator.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,796 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import java.util.Enumeration;
+-
+-import persistence.antlr.collections.impl.BitSet;
+-import persistence.antlr.collections.impl.Vector;
+-
+-import java.io.PrintWriter; //SAS: changed for proper text file io
+-import java.io.IOException;
+-import java.io.FileWriter;
+-
+-/**Generate P.html, a cross-linked representation of P with or without actions */
+-public class HTMLCodeGenerator extends CodeGenerator {
+-    /** non-zero if inside syntactic predicate generation */
+-    protected int syntacticPredLevel = 0;
+-
+-    /** true during lexer generation, false during parser generation */
+-    protected boolean doingLexRules = false;
+-
+-    protected boolean firstElementInAlt;
+-
+-    protected AlternativeElement prevAltElem = null;	// what was generated last?
+-
+-    /** Create a Diagnostic code-generator using the given Grammar
+-     * The caller must still call setTool, setBehavior, and setAnalyzer
+-     * before generating code.
+-     */
+-    public HTMLCodeGenerator() {
+-        super();
+-        charFormatter = new JavaCharFormatter();
+-    }
+-
+-    /** Encode a string for printing in a HTML document..
+-     * e.g. encode '<' '>' and similar stuff
+-     * @param s the string to encode
+-     */
+-    static String HTMLEncode(String s) {
+-        StringBuffer buf = new StringBuffer();
+-
+-        for (int i = 0, len = s.length(); i < len; i++) {
+-            char c = s.charAt(i);
+-            if (c == '&')
+-                buf.append("&amp;");
+-            else if (c == '\"')
+-                buf.append("&quot;");
+-            else if (c == '\'')
+-                buf.append("&#039;");
+-            else if (c == '<')
+-                buf.append("&lt;");
+-            else if (c == '>')
+-                buf.append("&gt;");
+-            else
+-                buf.append(c);
+-        }
+-        return buf.toString();
+-    }
+-
+-    public void gen() {
+-        // Do the code generation
+-        try {
+-            // Loop over all grammars
+-            Enumeration grammarIter = behavior.grammars.elements();
+-            while (grammarIter.hasMoreElements()) {
+-                Grammar g = (Grammar)grammarIter.nextElement();
+-
+-                // Connect all the components to each other
+-                /*
+-				g.setGrammarAnalyzer(analyzer);
+-				analyzer.setGrammar(g);
+-				*/
+-                g.setCodeGenerator(this);
+-
+-                // To get right overloading behavior across hetrogeneous grammars
+-                g.generate();
+-
+-                if (antlrTool.hasError()) {
+-                    antlrTool.fatalError("Exiting due to errors.");
+-                }
+-
+-            }
+-
+-        }
+-        catch (IOException e) {
+-            antlrTool.reportException(e, null);
+-        }
+-    }
+-
+-    /** Generate code for the given grammar element.
+-     * @param blk The {...} action to generate
+-     */
+-    public void gen(ActionElement action) {
+-        // no-op
+-    }
+-
+-    /** Generate code for the given grammar element.
+-     * @param blk The "x|y|z|..." block to generate
+-     */
+-    public void gen(AlternativeBlock blk) {
+-        genGenericBlock(blk, "");
+-    }
+-
+-    /** Generate code for the given grammar element.
+-     * @param blk The block-end element to generate.  Block-end
+-     * elements are synthesized by the grammar parser to represent
+-     * the end of a block.
+-     */
+-    public void gen(BlockEndElement end) {
+-        // no-op
+-    }
+-
+-    /** Generate code for the given grammar element.
+-     * @param blk The character literal reference to generate
+-     */
+-    public void gen(CharLiteralElement atom) {
+-        if (atom.not) {
+-            _print("~");
+-        }
+-        _print(HTMLEncode(atom.atomText) + " ");
+-    }
+-
+-    /** Generate code for the given grammar element.
+-     * @param blk The character-range reference to generate
+-     */
+-    public void gen(CharRangeElement r) {
+-        print(r.beginText + ".." + r.endText + " ");
+-    }
+-
+-    /** Generate the lexer HTML file */
+-    public void gen(LexerGrammar g) throws IOException {
+-        setGrammar(g);
+-        antlrTool.reportProgress("Generating " + grammar.getClassName() + TokenTypesFileExt);
+-        currentOutput = antlrTool.openOutputFile(grammar.getClassName() + TokenTypesFileExt);
+-        //SAS: changed for proper text file io
+-
+-        tabs = 0;
+-        doingLexRules = true;
+-
+-        // Generate header common to all TXT output files
+-        genHeader();
+-
+-        // Output the user-defined lexer premamble
+-        // RK: guess not..
+-        // println(grammar.preambleAction.getText());
+-
+-        // Generate lexer class definition
+-        println("");
+-
+-        // print javadoc comment if any
+-        if (grammar.comment != null) {
+-            _println(HTMLEncode(grammar.comment));
+-        }
+-
+-        println("Definition of lexer " + grammar.getClassName() + ", which is a subclass of " + grammar.getSuperClass() + ".");
+-
+-        // Generate user-defined parser class members
+-        // printAction(grammar.classMemberAction.getText());
+-
+-        /*
+-		// Generate string literals
+-		println("");
+-		println("*** String literals used in the parser");
+-		println("The following string literals were used in the parser.");
+-		println("An actual code generator would arrange to place these literals");
+-		println("into a table in the generated lexer, so that actions in the");
+-		println("generated lexer could match token text against the literals.");
+-		println("String literals used in the lexer are not listed here, as they");
+-		println("are incorporated into the mainstream lexer processing.");
+-		tabs++;
+-		// Enumerate all of the symbols and look for string literal symbols
+-		Enumeration ids = grammar.getSymbols();
+-		while ( ids.hasMoreElements() ) {
+-			GrammarSymbol sym = (GrammarSymbol)ids.nextElement();
+-			// Only processing string literals -- reject other symbol entries
+-			if ( sym instanceof StringLiteralSymbol ) {
+-				StringLiteralSymbol s = (StringLiteralSymbol)sym;
+-				println(s.getId() + " = " + s.getTokenType());
+-			}
+-		}
+-		tabs--;
+-		println("*** End of string literals used by the parser");
+-		*/
+-
+-        // Generate nextToken() rule.
+-        // nextToken() is a synthetic lexer rule that is the implicit OR of all
+-        // user-defined lexer rules.
+-        genNextToken();
+-
+-        // Generate code for each rule in the lexer
+-
+-        Enumeration ids = grammar.rules.elements();
+-        while (ids.hasMoreElements()) {
+-            RuleSymbol rs = (RuleSymbol)ids.nextElement();
+-            if (!rs.id.equals("mnextToken")) {
+-                genRule(rs);
+-            }
+-        }
+-
+-        // Close the lexer output file
+-        currentOutput.close();
+-        currentOutput = null;
+-        doingLexRules = false;
+-    }
+-
+-    /** Generate code for the given grammar element.
+-     * @param blk The (...)+ block to generate
+-     */
+-    public void gen(OneOrMoreBlock blk) {
+-        genGenericBlock(blk, "+");
+-    }
+-
+-    /** Generate the parser HTML file */
+-    public void gen(ParserGrammar g) throws IOException {
+-        setGrammar(g);
+-        // Open the output stream for the parser and set the currentOutput
+-        antlrTool.reportProgress("Generating " + grammar.getClassName() + ".html");
+-        currentOutput = antlrTool.openOutputFile(grammar.getClassName() + ".html");
+-
+-        tabs = 0;
+-
+-        // Generate the header common to all output files.
+-        genHeader();
+-
+-        // Generate parser class definition
+-        println("");
+-
+-        // print javadoc comment if any
+-        if (grammar.comment != null) {
+-            _println(HTMLEncode(grammar.comment));
+-        }
+-
+-        println("Definition of parser " + grammar.getClassName() + ", which is a subclass of " + grammar.getSuperClass() + ".");
+-
+-        // Enumerate the parser rules
+-        Enumeration rules = grammar.rules.elements();
+-        while (rules.hasMoreElements()) {
+-            println("");
+-            // Get the rules from the list and downcast it to proper type
+-            GrammarSymbol sym = (GrammarSymbol)rules.nextElement();
+-            // Only process parser rules
+-            if (sym instanceof RuleSymbol) {
+-                genRule((RuleSymbol)sym);
+-            }
+-        }
+-        tabs--;
+-        println("");
+-
+-        genTail();
+-
+-        // Close the parser output stream
+-        currentOutput.close();
+-        currentOutput = null;
+-    }
+-
+-    /** Generate code for the given grammar element.
+-     * @param blk The rule-reference to generate
+-     */
+-    public void gen(RuleRefElement rr) {
+-        RuleSymbol rs = (RuleSymbol)grammar.getSymbol(rr.targetRule);
+-
+-        // Generate the actual rule description
+-        _print("<a href=\"" + grammar.getClassName() + ".html#" + rr.targetRule + "\">");
+-        _print(rr.targetRule);
+-        _print("</a>");
+-        // RK: Leave out args..
+-        //	if (rr.args != null) {
+-        //		_print("["+rr.args+"]");
+-        //	}
+-        _print(" ");
+-    }
+-
+-    /** Generate code for the given grammar element.
+-     * @param blk The string-literal reference to generate
+-     */
+-    public void gen(StringLiteralElement atom) {
+-        if (atom.not) {
+-            _print("~");
+-        }
+-        _print(HTMLEncode(atom.atomText));
+-        _print(" ");
+-    }
+-
+-    /** Generate code for the given grammar element.
+-     * @param blk The token-range reference to generate
+-     */
+-    public void gen(TokenRangeElement r) {
+-        print(r.beginText + ".." + r.endText + " ");
+-    }
+-
+-    /** Generate code for the given grammar element.
+-     * @param blk The token-reference to generate
+-     */
+-    public void gen(TokenRefElement atom) {
+-        if (atom.not) {
+-            _print("~");
+-        }
+-        _print(atom.atomText);
+-        _print(" ");
+-    }
+-
+-    public void gen(TreeElement t) {
+-        print(t + " ");
+-    }
+-
+-    /** Generate the tree-walker TXT file */
+-    public void gen(TreeWalkerGrammar g) throws IOException {
+-        setGrammar(g);
+-        // Open the output stream for the parser and set the currentOutput
+-        antlrTool.reportProgress("Generating " + grammar.getClassName() + ".html");
+-        currentOutput = antlrTool.openOutputFile(grammar.getClassName() + ".html");
+-        //SAS: changed for proper text file io
+-
+-        tabs = 0;
+-
+-        // Generate the header common to all output files.
+-        genHeader();
+-
+-        // Output the user-defined parser premamble
+-        println("");
+-//		println("*** Tree-walker Preamble Action.");
+-//		println("This action will appear before the declaration of your tree-walker class:");
+-//		tabs++;
+-//		println(grammar.preambleAction.getText());
+-//		tabs--;
+-//		println("*** End of tree-walker Preamble Action");
+-
+-        // Generate tree-walker class definition
+-        println("");
+-
+-        // print javadoc comment if any
+-        if (grammar.comment != null) {
+-            _println(HTMLEncode(grammar.comment));
+-        }
+-
+-        println("Definition of tree parser " + grammar.getClassName() + ", which is a subclass of " + grammar.getSuperClass() + ".");
+-
+-        // Generate user-defined tree-walker class members
+-//		println("");
+-//		println("*** User-defined tree-walker class members:");
+-//		println("These are the member declarations that you defined for your class:");
+-//		tabs++;
+-//		printAction(grammar.classMemberAction.getText());
+-//		tabs--;
+-//		println("*** End of user-defined tree-walker class members");
+-
+-        // Generate code for each rule in the grammar
+-        println("");
+-//		println("*** tree-walker rules:");
+-        tabs++;
+-
+-        // Enumerate the tree-walker rules
+-        Enumeration rules = grammar.rules.elements();
+-        while (rules.hasMoreElements()) {
+-            println("");
+-            // Get the rules from the list and downcast it to proper type
+-            GrammarSymbol sym = (GrammarSymbol)rules.nextElement();
+-            // Only process tree-walker rules
+-            if (sym instanceof RuleSymbol) {
+-                genRule((RuleSymbol)sym);
+-            }
+-        }
+-        tabs--;
+-        println("");
+-//		println("*** End of tree-walker rules");
+-
+-//		println("");
+-//		println("*** End of tree-walker");
+-
+-        // Close the tree-walker output stream
+-        currentOutput.close();
+-        currentOutput = null;
+-    }
+-
+-    /** Generate a wildcard element */
+-    public void gen(WildcardElement wc) {
+-        /*
+-		if ( wc.getLabel()!=null ) {
+-			_print(wc.getLabel()+"=");
+-		}
+-		*/
+-        _print(". ");
+-    }
+-
+-    /** Generate code for the given grammar element.
+-     * @param blk The (...)* block to generate
+-     */
+-    public void gen(ZeroOrMoreBlock blk) {
+-        genGenericBlock(blk, "*");
+-    }
+-
+-    protected void genAlt(Alternative alt) {
+-        if (alt.getTreeSpecifier() != null) {
+-            _print(alt.getTreeSpecifier().getText());
+-        }
+-        prevAltElem = null;
+-        for (AlternativeElement elem = alt.head;
+-             !(elem instanceof BlockEndElement);
+-             elem = elem.next) {
+-            elem.generate();
+-            firstElementInAlt = false;
+-            prevAltElem = elem;
+-        }
+-    }
+-    /** Generate the header for a block, which may be a RuleBlock or a
+-     * plain AlternativeBLock.  This generates any variable declarations,
+-     * init-actions, and syntactic-predicate-testing variables.
+-     * @blk The block for which the preamble is to be generated.
+-     */
+-//	protected void genBlockPreamble(AlternativeBlock blk) {
+-    // RK: don't dump out init actions
+-    // dump out init action
+-//		if ( blk.initAction!=null ) {
+-//			printAction("{" + blk.initAction + "}");
+-//		}
+-//	}
+-    /**Generate common code for a block of alternatives; return a postscript
+-     * that needs to be generated at the end of the block.  Other routines
+-     * may append else-clauses and such for error checking before the postfix
+-     * is generated.
+-     */
+-    public void genCommonBlock(AlternativeBlock blk) {
+-        for (int i = 0; i < blk.alternatives.size(); i++) {
+-            Alternative alt = blk.getAlternativeAt(i);
+-            AlternativeElement elem = alt.head;
+-
+-            // dump alt operator |
+-            if (i > 0 && blk.alternatives.size() > 1) {
+-                _println("");
+-                print("|\t");
+-            }
+-
+-            // Dump the alternative, starting with predicates
+-            //
+-            boolean save = firstElementInAlt;
+-            firstElementInAlt = true;
+-            tabs++;	// in case we do a newline in alt, increase the tab indent
+-
+-            // RK: don't dump semantic/syntactic predicates
+-            // only obscures grammar.
+-            //
+-            // Dump semantic predicates
+-            //
+-            //	if (alt.semPred != null) {
+-            //		println("{" + alt.semPred + "}?");
+-            //	}
+-            // Dump syntactic predicate
+-            //	if (alt.synPred != null) {
+-            //		genSynPred(alt.synPred);
+-            //	}
+-            genAlt(alt);
+-            tabs--;
+-            firstElementInAlt = save;
+-        }
+-    }
+-
+-    /** Generate a textual representation of the follow set
+-     * for a block.
+-     * @param blk  The rule block of interest
+-     */
+-    public void genFollowSetForRuleBlock(RuleBlock blk) {
+-        Lookahead follow = grammar.theLLkAnalyzer.FOLLOW(1, blk.endNode);
+-        printSet(grammar.maxk, 1, follow);
+-    }
+-
+-    protected void genGenericBlock(AlternativeBlock blk, String blkOp) {
+-        if (blk.alternatives.size() > 1) {
+-            // make sure we start on a new line
+-            if (!firstElementInAlt) {
+-                // only do newline if the last element wasn't a multi-line block
+-                if (prevAltElem == null ||
+-                    !(prevAltElem instanceof AlternativeBlock) ||
+-                    ((AlternativeBlock)prevAltElem).alternatives.size() == 1) {
+-                    _println("");
+-                    print("(\t");
+-                }
+-                else {
+-                    _print("(\t");
+-                }
+-                // _println("");
+-                // print("(\t");
+-            }
+-            else {
+-                _print("(\t");
+-            }
+-        }
+-        else {
+-            _print("( ");
+-        }
+-        // RK: don't dump init actions
+-        //	genBlockPreamble(blk);
+-        genCommonBlock(blk);
+-        if (blk.alternatives.size() > 1) {
+-            _println("");
+-            print(")" + blkOp + " ");
+-            // if not last element of alt, need newline & to indent
+-            if (!(blk.next instanceof BlockEndElement)) {
+-                _println("");
+-                print("");
+-            }
+-        }
+-        else {
+-            _print(")" + blkOp + " ");
+-        }
+-    }
+-
+-    /** Generate a header that is common to all TXT files */
+-    protected void genHeader() {
+-        println("<!DOCTYPE html PUBLIC \"-//W3C//DTD HTML 4.01 Transitional//EN\">");
+-        println("<HTML>");
+-        println("<HEAD>");
+-        println("<TITLE>Grammar " + antlrTool.grammarFile + "</TITLE>");
+-        println("</HEAD>");
+-        println("<BODY>");
+-        println("<table summary=\"\" border=\"1\" cellpadding=\"5\">");
+-        println("<tr>");
+-        println("<td>");
+-        println("<font size=\"+2\">Grammar " + grammar.getClassName() + "</font><br>");
+-        println("<a href=\"http://www.ANTLR.org\">ANTLR</a>-generated HTML file from " + antlrTool.grammarFile);
+-        println("<p>");
+-        println("Terence Parr, <a href=\"http://www.magelang.com\">MageLang Institute</a>");
+-        println("<br>ANTLR Version " + antlrTool.version + "; 1989-1999");
+-        println("</td>");
+-        println("</tr>");
+-        println("</table>");
+-        println("<PRE>");
+-        // RK: see no reason for printing include files and stuff...
+-//		tabs++;
+-//		printAction(behavior.getHeaderAction(""));
+-//		tabs--;
+-    }
+-
+-    /**Generate the lookahead set for an alternate. */
+-    protected void genLookaheadSetForAlt(Alternative alt) {
+-        if (doingLexRules && alt.cache[1].containsEpsilon()) {
+-            println("MATCHES ALL");
+-            return;
+-        }
+-        int depth = alt.lookaheadDepth;
+-        if (depth == GrammarAnalyzer.NONDETERMINISTIC) {
+-            // if the decision is nondeterministic, do the best we can: LL(k)
+-            // any predicates that are around will be generated later.
+-            depth = grammar.maxk;
+-        }
+-        for (int i = 1; i <= depth; i++) {
+-            Lookahead lookahead = alt.cache[i];
+-            printSet(depth, i, lookahead);
+-        }
+-    }
+-
+-    /** Generate a textual representation of the lookahead set
+-     * for a block.
+-     * @param blk  The block of interest
+-     */
+-    public void genLookaheadSetForBlock(AlternativeBlock blk) {
+-        // Find the maximal lookahead depth over all alternatives
+-        int depth = 0;
+-        for (int i = 0; i < blk.alternatives.size(); i++) {
+-            Alternative alt = blk.getAlternativeAt(i);
+-            if (alt.lookaheadDepth == GrammarAnalyzer.NONDETERMINISTIC) {
+-                depth = grammar.maxk;
+-                break;
+-            }
+-            else if (depth < alt.lookaheadDepth) {
+-                depth = alt.lookaheadDepth;
+-            }
+-        }
+-
+-        for (int i = 1; i <= depth; i++) {
+-            Lookahead lookahead = grammar.theLLkAnalyzer.look(i, blk);
+-            printSet(depth, i, lookahead);
+-        }
+-    }
+-
+-    /** Generate the nextToken rule.
+-     * nextToken is a synthetic lexer rule that is the implicit OR of all
+-     * user-defined lexer rules.
+-     */
+-    public void genNextToken() {
+-        println("");
+-        println("/** Lexer nextToken rule:");
+-        println(" *  The lexer nextToken rule is synthesized from all of the user-defined");
+-        println(" *  lexer rules.  It logically consists of one big alternative block with");
+-        println(" *  each user-defined rule being an alternative.");
+-        println(" */");
+-
+-        // Create the synthesized rule block for nextToken consisting
+-        // of an alternate block containing all the user-defined lexer rules.
+-        RuleBlock blk = MakeGrammar.createNextTokenRule(grammar, grammar.rules, "nextToken");
+-
+-        // Define the nextToken rule symbol
+-        RuleSymbol nextTokenRs = new RuleSymbol("mnextToken");
+-        nextTokenRs.setDefined();
+-        nextTokenRs.setBlock(blk);
+-        nextTokenRs.access = "private";
+-        grammar.define(nextTokenRs);
+-
+-        /*
+-		// Analyze the synthesized block
+-		if (!grammar.theLLkAnalyzer.deterministic(blk))
+-		{
+-			println("The grammar analyzer has determined that the synthesized");
+-			println("nextToken rule is non-deterministic (i.e., it has ambiguities)");
+-			println("This means that there is some overlap of the character");
+-			println("lookahead for two or more of your lexer rules.");
+-		}
+-		*/
+-
+-        genCommonBlock(blk);
+-    }
+-
+-    /** Generate code for a named rule block
+-     * @param s The RuleSymbol describing the rule to generate
+-     */
+-    public void genRule(RuleSymbol s) {
+-        if (s == null || !s.isDefined()) return;	// undefined rule
+-        println("");
+-        if (s.comment != null) {
+-            _println(HTMLEncode(s.comment));
+-        }
+-        if (s.access.length() != 0) {
+-            if (!s.access.equals("public")) {
+-                _print(s.access + " ");
+-            }
+-        }
+-        _print("<a name=\"" + s.getId() + "\">");
+-        _print(s.getId());
+-        _print("</a>");
+-
+-        // Get rule return type and arguments
+-        RuleBlock rblk = s.getBlock();
+-
+-        // RK: for HTML output not of much value...
+-        // Gen method return value(s)
+-//		if (rblk.returnAction != null) {
+-//			_print("["+rblk.returnAction+"]");
+-//		}
+-        // Gen arguments
+-//		if (rblk.argAction != null)
+-//		{
+-//				_print(" returns [" + rblk.argAction+"]");
+-//		}
+-        _println("");
+-        tabs++;
+-        print(":\t");
+-
+-        // Dump any init-action
+-        // genBlockPreamble(rblk);
+-
+-        // Dump the alternates of the rule
+-        genCommonBlock(rblk);
+-
+-        _println("");
+-        println(";");
+-        tabs--;
+-    }
+-
+-    /** Generate the syntactic predicate.  This basically generates
+-     * the alternative block, buts tracks if we are inside a synPred
+-     * @param blk  The syntactic predicate block
+-     */
+-    protected void genSynPred(SynPredBlock blk) {
+-        syntacticPredLevel++;
+-        genGenericBlock(blk, " =>");
+-        syntacticPredLevel--;
+-    }
+-
+-    public void genTail() {
+-        println("</PRE>");
+-        println("</BODY>");
+-        println("</HTML>");
+-    }
+-
+-    /** Generate the token types TXT file */
+-    protected void genTokenTypes(TokenManager tm) throws IOException {
+-        // Open the token output TXT file and set the currentOutput stream
+-        antlrTool.reportProgress("Generating " + tm.getName() + TokenTypesFileSuffix + TokenTypesFileExt);
+-        currentOutput = antlrTool.openOutputFile(tm.getName() + TokenTypesFileSuffix + TokenTypesFileExt);
+-        //SAS: changed for proper text file io
+-        tabs = 0;
+-
+-        // Generate the header common to all diagnostic files
+-        genHeader();
+-
+-        // Generate a string for each token.  This creates a static
+-        // array of Strings indexed by token type.
+-        println("");
+-        println("*** Tokens used by the parser");
+-        println("This is a list of the token numeric values and the corresponding");
+-        println("token identifiers.  Some tokens are literals, and because of that");
+-        println("they have no identifiers.  Literals are double-quoted.");
+-        tabs++;
+-
+-        // Enumerate all the valid token types
+-        Vector v = tm.getVocabulary();
+-        for (int i = Token.MIN_USER_TYPE; i < v.size(); i++) {
+-            String s = (String)v.elementAt(i);
+-            if (s != null) {
+-                println(s + " = " + i);
+-            }
+-        }
+-
+-        // Close the interface
+-        tabs--;
+-        println("*** End of tokens used by the parser");
+-
+-        // Close the tokens output file
+-        currentOutput.close();
+-        currentOutput = null;
+-    }
+-
+-    /** Get a string for an expression to generate creation of an AST subtree.
+-     * @param v A Vector of String, where each element is an expression in the target language yielding an AST node.
+-     */
+-    public String getASTCreateString(Vector v) {
+-        return null;
+-    }
+-
+-    /** Get a string for an expression to generate creating of an AST node
+-     * @param str The arguments to the AST constructor
+-     */
+-    public String getASTCreateString(GrammarAtom atom, String str) {
+-        return null;
+-    }
+-
+-    /** Map an identifier to it's corresponding tree-node variable.
+-     * This is context-sensitive, depending on the rule and alternative
+-     * being generated
+-     * @param id The identifier name to map
+-     * @param forInput true if the input tree node variable is to be returned, otherwise the output variable is returned.
+-     */
+-    public String mapTreeId(String id, ActionTransInfo tInfo) {
+-        return id;
+-    }
+-
+-    /// unused.
+-    protected String processActionForSpecialSymbols(String actionStr,
+-                                                    int line,
+-                                                    RuleBlock currentRule,
+-                                                    ActionTransInfo tInfo) {
+-        return actionStr;
+-    }
+-
+-    /** Format a lookahead or follow set.
+-     * @param depth The depth of the entire lookahead/follow
+-     * @param k The lookahead level to print
+-     * @param lookahead  The lookahead/follow set to print
+-     */
+-    public void printSet(int depth, int k, Lookahead lookahead) {
+-        int numCols = 5;
+-
+-        int[] elems = lookahead.fset.toArray();
+-
+-        if (depth != 1) {
+-            print("k==" + k + ": {");
+-        }
+-        else {
+-            print("{ ");
+-        }
+-        if (elems.length > numCols) {
+-            _println("");
+-            tabs++;
+-            print("");
+-        }
+-
+-        int column = 0;
+-        for (int i = 0; i < elems.length; i++) {
+-            column++;
+-            if (column > numCols) {
+-                _println("");
+-                print("");
+-                column = 0;
+-            }
+-            if (doingLexRules) {
+-                _print(charFormatter.literalChar(elems[i]));
+-            }
+-            else {
+-                _print((String)grammar.tokenManager.getVocabulary().elementAt(elems[i]));
+-            }
+-            if (i != elems.length - 1) {
+-                _print(", ");
+-            }
+-        }
+-
+-        if (elems.length > numCols) {
+-            _println("");
+-            tabs--;
+-            print("");
+-        }
+-        _println(" }");
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/ImportVocabTokenManager.java glassfish-gil/entity-persistence/src/java/persistence/antlr/ImportVocabTokenManager.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/ImportVocabTokenManager.java	2006-08-31 00:34:07.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/ImportVocabTokenManager.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,100 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import java.io.*;
+-import java.util.Hashtable;
+-import java.util.Enumeration;
+-
+-import persistence.antlr.collections.impl.Vector;
+-
+-/** Static implementation of the TokenManager, used for importVocab option  */
+-class ImportVocabTokenManager extends SimpleTokenManager implements Cloneable {
+-    private String filename;
+-    protected Grammar grammar;
+-
+-    // FIXME: it would be nice if the path to the original grammar file was
+-    // also searched.
+-    ImportVocabTokenManager(Grammar grammar, String filename_, String name_, Tool tool_) {
+-        // initialize
+-        super(name_, tool_);
+-
+-        this.grammar = grammar;
+-        this.filename = filename_;
+-
+-        // Figure out exactly where the file lives.  Check $PWD first,
+-        // and then search in -o <output_dir>.
+-        //
+-        File grammarFile = new File(filename);
+-
+-        if (!grammarFile.exists()) {
+-            grammarFile = new File(antlrTool.getOutputDirectory(), filename);
+-
+-            if (!grammarFile.exists()) {
+-                antlrTool.panic("Cannot find importVocab file '" + filename + "'");
+-            }
+-        }
+-
+-        setReadOnly(true);
+-
+-        // Read a file with lines of the form ID=number
+-        try {
+-            Reader fileIn = new BufferedReader(new FileReader(grammarFile));
+-            ANTLRTokdefLexer tokdefLexer = new ANTLRTokdefLexer(fileIn);
+-            ANTLRTokdefParser tokdefParser = new ANTLRTokdefParser(tokdefLexer);
+-            tokdefParser.setTool(antlrTool);
+-            tokdefParser.setFilename(filename);
+-            tokdefParser.file(this);
+-        }
+-        catch (FileNotFoundException fnf) {
+-            antlrTool.panic("Cannot find importVocab file '" + filename + "'");
+-        }
+-        catch (RecognitionException ex) {
+-            antlrTool.panic("Error parsing importVocab file '" + filename + "': " + ex.toString());
+-        }
+-        catch (TokenStreamException ex) {
+-            antlrTool.panic("Error reading importVocab file '" + filename + "'");
+-        }
+-    }
+-
+-    public Object clone() {
+-        ImportVocabTokenManager tm;
+-        tm = (ImportVocabTokenManager)super.clone();
+-        tm.filename = this.filename;
+-        tm.grammar = this.grammar;
+-        return tm;
+-    }
+-
+-    /** define a token. */
+-    public void define(TokenSymbol ts) {
+-        super.define(ts);
+-    }
+-
+-    /** define a token.  Intended for use only when reading the importVocab file. */
+-    public void define(String s, int ttype) {
+-        TokenSymbol ts = null;
+-        if (s.startsWith("\"")) {
+-            ts = new StringLiteralSymbol(s);
+-        }
+-        else {
+-            ts = new TokenSymbol(s);
+-        }
+-        ts.setTokenType(ttype);
+-        super.define(ts);
+-        maxToken = (ttype + 1) > maxToken ? (ttype + 1) : maxToken;	// record maximum token type
+-    }
+-
+-    /** importVocab token manager is read-only if output would be same as input */
+-    public boolean isReadOnly() {
+-        return readOnly;
+-    }
+-
+-    /** Get the next unused token type. */
+-    public int nextTokenType() {
+-        return super.nextTokenType();
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/InputBuffer.java glassfish-gil/entity-persistence/src/java/persistence/antlr/InputBuffer.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/InputBuffer.java	2006-08-31 00:34:07.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/InputBuffer.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,130 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-// SAS: Added this class to genericise the input buffers for scanners
+-//      This allows a scanner to use a binary (FileInputStream) or
+-//      text (FileReader) stream of data; the generated scanner
+-//      subclass will define the input stream
+-//      There are two subclasses to this: CharBuffer and ByteBuffer
+-
+-import java.io.IOException;
+-
+-/**A Stream of characters fed to the lexer from a InputStream that can
+- * be rewound via mark()/rewind() methods.
+- * <p>
+- * A dynamic array is used to buffer up all the input characters.  Normally,
+- * "k" characters are stored in the buffer.  More characters may be stored during
+- * guess mode (testing syntactic predicate), or when LT(i>k) is referenced.
+- * Consumption of characters is deferred.  In other words, reading the next
+- * character is not done by conume(), but deferred until needed by LA or LT.
+- * <p>
+- *
+- * @see persistence.antlr.CharQueue
+- */
+-public abstract class InputBuffer {
+-    // Number of active markers
+-    protected int nMarkers = 0;
+-
+-    // Additional offset used when markers are active
+-    protected int markerOffset = 0;
+-
+-    // Number of calls to consume() since last LA() or LT() call
+-    protected int numToConsume = 0;
+-
+-    // Circular queue
+-    protected CharQueue queue;
+-
+-    /** Create an input buffer */
+-    public InputBuffer() {
+-        queue = new CharQueue(1);
+-    }
+-
+-    /** This method updates the state of the input buffer so that
+-     *  the text matched since the most recent mark() is no longer
+-     *  held by the buffer.  So, you either do a mark/rewind for
+-     *  failed predicate or mark/commit to keep on parsing without
+-     *  rewinding the input.
+-     */
+-    public void commit() {
+-        nMarkers--;
+-    }
+-
+-    /** Mark another character for deferred consumption */
+-    public void consume() {
+-        numToConsume++;
+-    }
+-
+-    /** Ensure that the input buffer is sufficiently full */
+-    public abstract void fill(int amount) throws CharStreamException;
+-
+-    public String getLAChars() {
+-        StringBuffer la = new StringBuffer();
+-        for (int i = markerOffset; i < queue.nbrEntries; i++)
+-            la.append(queue.elementAt(i));
+-        return la.toString();
+-    }
+-
+-    public String getMarkedChars() {
+-        StringBuffer marked = new StringBuffer();
+-        for (int i = 0; i < markerOffset; i++)
+-            marked.append(queue.elementAt(i));
+-        return marked.toString();
+-    }
+-
+-    public boolean isMarked() {
+-        return (nMarkers != 0);
+-    }
+-
+-    /** Get a lookahead character */
+-    public char LA(int i) throws CharStreamException {
+-        fill(i);
+-        return queue.elementAt(markerOffset + i - 1);
+-    }
+-
+-    /**Return an integer marker that can be used to rewind the buffer to
+-     * its current state.
+-     */
+-    public int mark() {
+-        syncConsume();
+-        nMarkers++;
+-        return markerOffset;
+-    }
+-
+-    /**Rewind the character buffer to a marker.
+-     * @param mark Marker returned previously from mark()
+-     */
+-    public void rewind(int mark) {
+-        syncConsume();
+-        markerOffset = mark;
+-        nMarkers--;
+-    }
+-
+-    /** Reset the input buffer
+-     */
+-    public void reset() {
+-        nMarkers = 0;
+-        markerOffset = 0;
+-        numToConsume = 0;
+-        queue.reset();
+-    }
+-
+-    /** Sync up deferred consumption */
+-    protected void syncConsume() {
+-        while (numToConsume > 0) {
+-            if (nMarkers > 0) {
+-                // guess mode -- leave leading characters and bump offset.
+-                markerOffset++;
+-            }
+-            else {
+-                // normal mode -- remove first character
+-                queue.removeFirst();
+-            }
+-            numToConsume--;
+-        }
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/JavaBlockFinishingInfo.java glassfish-gil/entity-persistence/src/java/persistence/antlr/JavaBlockFinishingInfo.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/JavaBlockFinishingInfo.java	2006-08-31 00:34:07.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/JavaBlockFinishingInfo.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,33 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-class JavaBlockFinishingInfo {
+-    String postscript;		// what to generate to terminate block
+-    boolean generatedSwitch;// did block finish with "default:" of switch?
+-    boolean generatedAnIf;
+-
+-    /** When generating an if or switch, end-of-token lookahead sets
+-     *  will become the else or default clause, don't generate an
+-     *  error clause in this case.
+-     */
+-    boolean needAnErrorClause;
+-
+-
+-    public JavaBlockFinishingInfo() {
+-        postscript = null;
+-        generatedSwitch = generatedSwitch = false;
+-        needAnErrorClause = true;
+-    }
+-
+-    public JavaBlockFinishingInfo(String ps, boolean genS, boolean generatedAnIf, boolean n) {
+-        postscript = ps;
+-        generatedSwitch = genS;
+-        this.generatedAnIf = generatedAnIf;
+-        needAnErrorClause = n;
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/JavaCharFormatter.java glassfish-gil/entity-persistence/src/java/persistence/antlr/JavaCharFormatter.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/JavaCharFormatter.java	2006-08-31 00:34:07.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/JavaCharFormatter.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,86 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-class JavaCharFormatter implements CharFormatter {
+-
+-
+-    /** Given a character value, return a string representing the character
+-     * that can be embedded inside a string literal or character literal
+-     * This works for Java/C/C++ code-generation and languages with compatible
+-     * special-character-escapment.
+-     * Code-generators for languages should override this method.
+-     * @param c   The character of interest.
+-     * @param forCharLiteral  true to escape for char literal, false for string literal
+-     */
+-    public String escapeChar(int c, boolean forCharLiteral) {
+-        switch (c) {
+-            //		case GrammarAnalyzer.EPSILON_TYPE : return "<end-of-token>";
+-            case '\n':
+-                return "\\n";
+-            case '\t':
+-                return "\\t";
+-            case '\r':
+-                return "\\r";
+-            case '\\':
+-                return "\\\\";
+-            case '\'':
+-                return forCharLiteral ? "\\'" : "'";
+-            case '"':
+-                return forCharLiteral ? "\"" : "\\\"";
+-            default :
+-                if (c < ' ' || c > 126) {
+-                    if ((0x0000 <= c) && (c <= 0x000F)) {
+-                        return "\\u000" + Integer.toString(c, 16);
+-                    }
+-                    else if ((0x0010 <= c) && (c <= 0x00FF)) {
+-                        return "\\u00" + Integer.toString(c, 16);
+-                    }
+-                    else if ((0x0100 <= c) && (c <= 0x0FFF)) {
+-                        return "\\u0" + Integer.toString(c, 16);
+-                    }
+-                    else {
+-                        return "\\u" + Integer.toString(c, 16);
+-                    }
+-                }
+-                else {
+-                    return String.valueOf((char)c);
+-                }
+-        }
+-    }
+-
+-    /** Converts a String into a representation that can be use as a literal
+-     * when surrounded by double-quotes.
+-     * @param s The String to be changed into a literal
+-     */
+-    public String escapeString(String s) {
+-        String retval = new String();
+-        for (int i = 0; i < s.length(); i++) {
+-            retval += escapeChar(s.charAt(i), false);
+-        }
+-        return retval;
+-    }
+-
+-    /** Given a character value, return a string representing the character
+-     * literal that can be recognized by the target language compiler.
+-     * This works for languages that use single-quotes for character literals.
+-     * Code-generators for languages should override this method.
+-     * @param c   The character of interest.
+-     */
+-    public String literalChar(int c) {
+-        return "'" + escapeChar(c, true) + "'";
+-    }
+-
+-    /** Converts a String into a string literal
+-     * This works for languages that use double-quotes for string literals.
+-     * Code-generators for languages should override this method.
+-     * @param s The String to be changed into a literal
+-     */
+-    public String literalString(String s) {
+-        return "\"" + escapeString(s) + "\"";
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/JavaCodeGenerator.java glassfish-gil/entity-persistence/src/java/persistence/antlr/JavaCodeGenerator.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/JavaCodeGenerator.java	2006-08-31 00:34:07.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/JavaCodeGenerator.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,3484 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import java.util.Enumeration;
+-import java.util.Hashtable;
+-
+-import persistence.antlr.collections.impl.BitSet;
+-import persistence.antlr.collections.impl.Vector;
+-
+-import java.io.PrintWriter; //SAS: changed for proper text file io
+-import java.io.IOException;
+-import java.io.FileWriter;
+-
+-/**Generate MyParser.java, MyLexer.java and MyParserTokenTypes.java */
+-public class JavaCodeGenerator extends CodeGenerator {
+-    // non-zero if inside syntactic predicate generation
+-    protected int syntacticPredLevel = 0;
+-
+-    // Are we generating ASTs (for parsers and tree parsers) right now?
+-    protected boolean genAST = false;
+-
+-    // Are we saving the text consumed (for lexers) right now?
+-    protected boolean saveText = false;
+-
+-    // Grammar parameters set up to handle different grammar classes.
+-    // These are used to get instanceof tests out of code generation
+-    String labeledElementType;
+-    String labeledElementASTType;
+-    String labeledElementInit;
+-    String commonExtraArgs;
+-    String commonExtraParams;
+-    String commonLocalVars;
+-    String lt1Value;
+-    String exceptionThrown;
+-    String throwNoViable;
+-
+-    /** Tracks the rule being generated.  Used for mapTreeId */
+-    RuleBlock currentRule;
+-
+-    /** Tracks the rule or labeled subrule being generated.  Used for
+-     AST generation. */
+-    String currentASTResult;
+-
+-    /** Mapping between the ids used in the current alt, and the
+-     * names of variables used to represent their AST values.
+-     */
+-    Hashtable treeVariableMap = new Hashtable();
+-
+-    /** Used to keep track of which AST variables have been defined in a rule
+-     * (except for the #rule_name and #rule_name_in var's
+-     */
+-    Hashtable declaredASTVariables = new Hashtable();
+-
+-    /* Count of unnamed generated variables */
+-    int astVarNumber = 1;
+-
+-    /** Special value used to mark duplicate in treeVariableMap */
+-    protected static final String NONUNIQUE = new String();
+-
+-    public static final int caseSizeThreshold = 127; // ascii is max
+-
+-    private Vector semPreds;
+-
+-    /** Create a Java code-generator using the given Grammar.
+-     * The caller must still call setTool, setBehavior, and setAnalyzer
+-     * before generating code.
+-     */
+-    public JavaCodeGenerator() {
+-        super();
+-        charFormatter = new JavaCharFormatter();
+-    }
+-
+-    /** Adds a semantic predicate string to the sem pred vector
+-     These strings will be used to build an array of sem pred names
+-     when building a debugging parser.  This method should only be
+-     called when the debug option is specified
+-     */
+-    protected int addSemPred(String predicate) {
+-        semPreds.appendElement(predicate);
+-        return semPreds.size() - 1;
+-    }
+-
+-    public void exitIfError() {
+-        if (antlrTool.hasError()) {
+-            antlrTool.fatalError("Exiting due to errors.");
+-        }
+-    }
+-
+-    /**Generate the parser, lexer, treeparser, and token types in Java */
+-    public void gen() {
+-        // Do the code generation
+-        try {
+-            // Loop over all grammars
+-            Enumeration grammarIter = behavior.grammars.elements();
+-            while (grammarIter.hasMoreElements()) {
+-                Grammar g = (Grammar)grammarIter.nextElement();
+-                // Connect all the components to each other
+-                g.setGrammarAnalyzer(analyzer);
+-                g.setCodeGenerator(this);
+-                analyzer.setGrammar(g);
+-                // To get right overloading behavior across hetrogeneous grammars
+-                setupGrammarParameters(g);
+-                g.generate();
+-                // print out the grammar with lookahead sets (and FOLLOWs)
+-                // System.out.print(g.toString());
+-                exitIfError();
+-            }
+-
+-            // Loop over all token managers (some of which are lexers)
+-            Enumeration tmIter = behavior.tokenManagers.elements();
+-            while (tmIter.hasMoreElements()) {
+-                TokenManager tm = (TokenManager)tmIter.nextElement();
+-                if (!tm.isReadOnly()) {
+-                    // Write the token manager tokens as Java
+-                    // this must appear before genTokenInterchange so that
+-                    // labels are set on string literals
+-                    genTokenTypes(tm);
+-                    // Write the token manager tokens as plain text
+-                    genTokenInterchange(tm);
+-                }
+-                exitIfError();
+-            }
+-        }
+-        catch (IOException e) {
+-            antlrTool.reportException(e, null);
+-        }
+-    }
+-
+-    /** Generate code for the given grammar element.
+-     * @param blk The {...} action to generate
+-     */
+-    public void gen(ActionElement action) {
+-        if (DEBUG_CODE_GENERATOR) System.out.println("genAction(" + action + ")");
+-        if (action.isSemPred) {
+-            genSemPred(action.actionText, action.line);
+-        }
+-        else {
+-            if (grammar.hasSyntacticPredicate) {
+-                println("if ( inputState.guessing==0 ) {");
+-                tabs++;
+-            }
+-
+-			// get the name of the followSet for the current rule so that we
+-            // can replace $FOLLOW in the .g file.
+-            ActionTransInfo tInfo = new ActionTransInfo();
+-            String actionStr = processActionForSpecialSymbols(action.actionText,
+-															  action.getLine(),
+-															  currentRule,
+-															  tInfo);
+-
+-            if (tInfo.refRuleRoot != null) {
+-                // Somebody referenced "#rule", make sure translated var is valid
+-                // assignment to #rule is left as a ref also, meaning that assignments
+-                // with no other refs like "#rule = foo();" still forces this code to be
+-                // generated (unnecessarily).
+-                println(tInfo.refRuleRoot + " = (" + labeledElementASTType + ")currentAST.root;");
+-            }
+-
+-            // dump the translated action
+-            printAction(actionStr);
+-
+-            if (tInfo.assignToRoot) {
+-                // Somebody did a "#rule=", reset internal currentAST.root
+-                println("currentAST.root = " + tInfo.refRuleRoot + ";");
+-                // reset the child pointer too to be last sibling in sibling list
+-                println("currentAST.child = " + tInfo.refRuleRoot + "!=null &&" + tInfo.refRuleRoot + ".getFirstChild()!=null ?");
+-                tabs++;
+-                println(tInfo.refRuleRoot + ".getFirstChild() : " + tInfo.refRuleRoot + ";");
+-                tabs--;
+-                println("currentAST.advanceChildToEnd();");
+-            }
+-
+-            if (grammar.hasSyntacticPredicate) {
+-                tabs--;
+-                println("}");
+-            }
+-        }
+-    }
+-
+-    /** Generate code for the given grammar element.
+-     * @param blk The "x|y|z|..." block to generate
+-     */
+-    public void gen(AlternativeBlock blk) {
+-        if (DEBUG_CODE_GENERATOR) System.out.println("gen(" + blk + ")");
+-        println("{");
+-        genBlockPreamble(blk);
+-        genBlockInitAction(blk);
+-
+-        // Tell AST generation to build subrule result
+-        String saveCurrentASTResult = currentASTResult;
+-        if (blk.getLabel() != null) {
+-            currentASTResult = blk.getLabel();
+-        }
+-
+-        boolean ok = grammar.theLLkAnalyzer.deterministic(blk);
+-
+-        JavaBlockFinishingInfo howToFinish = genCommonBlock(blk, true);
+-        genBlockFinish(howToFinish, throwNoViable);
+-
+-        println("}");
+-
+-        // Restore previous AST generation
+-        currentASTResult = saveCurrentASTResult;
+-    }
+-
+-    /** Generate code for the given grammar element.
+-     * @param blk The block-end element to generate.  Block-end
+-     * elements are synthesized by the grammar parser to represent
+-     * the end of a block.
+-     */
+-    public void gen(BlockEndElement end) {
+-        if (DEBUG_CODE_GENERATOR) System.out.println("genRuleEnd(" + end + ")");
+-    }
+-
+-    /** Generate code for the given grammar element.
+-     * @param blk The character literal reference to generate
+-     */
+-    public void gen(CharLiteralElement atom) {
+-        if (DEBUG_CODE_GENERATOR) System.out.println("genChar(" + atom + ")");
+-
+-        if (atom.getLabel() != null) {
+-            println(atom.getLabel() + " = " + lt1Value + ";");
+-        }
+-
+-        boolean oldsaveText = saveText;
+-        saveText = saveText && atom.getAutoGenType() == GrammarElement.AUTO_GEN_NONE;
+-        genMatch(atom);
+-        saveText = oldsaveText;
+-    }
+-
+-    /** Generate code for the given grammar element.
+-     * @param blk The character-range reference to generate
+-     */
+-    public void gen(CharRangeElement r) {
+-        if (r.getLabel() != null && syntacticPredLevel == 0) {
+-            println(r.getLabel() + " = " + lt1Value + ";");
+-        }
+-        boolean flag = ( grammar instanceof LexerGrammar &&
+-            ( !saveText ||
+-            r.getAutoGenType() ==
+-            GrammarElement.AUTO_GEN_BANG ) );
+-        if (flag) {
+-            println("_saveIndex=text.length();");
+-        }
+-
+-        println("matchRange(" + r.beginText + "," + r.endText + ");");
+-
+-        if (flag) {
+-            println("text.setLength(_saveIndex);");
+-        }
+-    }
+-
+-    /** Generate the lexer Java file */
+-    public void gen(LexerGrammar g) throws IOException {
+-        // If debugging, create a new sempred vector for this grammar
+-        if (g.debuggingOutput)
+-            semPreds = new Vector();
+-
+-        setGrammar(g);
+-        if (!(grammar instanceof LexerGrammar)) {
+-            antlrTool.panic("Internal error generating lexer");
+-        }
+-
+-        // SAS: moved output creation to method so a subclass can change
+-        //      how the output is generated (for VAJ interface)
+-        setupOutput(grammar.getClassName());
+-
+-        genAST = false;	// no way to gen trees.
+-        saveText = true;	// save consumed characters.
+-
+-        tabs = 0;
+-
+-        // Generate header common to all Java output files
+-        genHeader();
+-        // Do not use printAction because we assume tabs==0
+-        println(behavior.getHeaderAction(""));
+-
+-        // Generate header specific to lexer Java file
+-        // println("import java.io.FileInputStream;");
+-        println("import java.io.InputStream;");
+-        println("import persistence.antlr.TokenStreamException;");
+-        println("import persistence.antlr.TokenStreamIOException;");
+-        println("import persistence.antlr.TokenStreamRecognitionException;");
+-        println("import persistence.antlr.CharStreamException;");
+-        println("import persistence.antlr.CharStreamIOException;");
+-        println("import persistence.antlr.ANTLRException;");
+-        println("import java.io.Reader;");
+-        println("import java.util.Hashtable;");
+-        println("import persistence.antlr." + grammar.getSuperClass() + ";");
+-        println("import persistence.antlr.InputBuffer;");
+-        println("import persistence.antlr.ByteBuffer;");
+-        println("import persistence.antlr.CharBuffer;");
+-        println("import persistence.antlr.Token;");
+-        println("import persistence.antlr.CommonToken;");
+-        println("import persistence.antlr.RecognitionException;");
+-        println("import persistence.antlr.NoViableAltForCharException;");
+-        println("import persistence.antlr.MismatchedCharException;");
+-        println("import persistence.antlr.TokenStream;");
+-        println("import persistence.antlr.ANTLRHashString;");
+-        println("import persistence.antlr.LexerSharedInputState;");
+-        println("import persistence.antlr.collections.impl.BitSet;");
+-        println("import persistence.antlr.SemanticException;");
+-
+-        // Generate user-defined lexer file preamble
+-        println(grammar.preambleAction.getText());
+-
+-        // Generate lexer class definition
+-        String sup = null;
+-        if (grammar.superClass != null) {
+-            sup = grammar.superClass;
+-        }
+-        else {
+-            sup = "persistence.antlr." + grammar.getSuperClass();
+-        }
+-
+-        // print javadoc comment if any
+-        if (grammar.comment != null) {
+-            _println(grammar.comment);
+-        }
+-
+-		// get prefix (replaces "public" and lets user specify)
+-		String prefix = "public";
+-		Token tprefix = (Token)grammar.options.get("classHeaderPrefix");
+-		if (tprefix != null) {
+-			String p = StringUtils.stripFrontBack(tprefix.getText(), "\"", "\"");
+-			if (p != null) {
+-				prefix = p;
+-			}
+-		}
+-
+-		print(prefix+" ");
+-		print("class " + grammar.getClassName() + " extends " + sup);
+-		println(" implements " + grammar.tokenManager.getName() + TokenTypesFileSuffix + ", TokenStream");
+-		Token tsuffix = (Token)grammar.options.get("classHeaderSuffix");
+-        if (tsuffix != null) {
+-            String suffix = StringUtils.stripFrontBack(tsuffix.getText(), "\"", "\"");
+-            if (suffix != null) {
+-                print(", " + suffix);	// must be an interface name for Java
+-            }
+-        }
+-        println(" {");
+-
+-        // Generate user-defined lexer class members
+-        print(
+-            processActionForSpecialSymbols(grammar.classMemberAction.getText(), grammar.classMemberAction.getLine(), currentRule, null)
+-        );
+-
+-        //
+-        // Generate the constructor from InputStream, which in turn
+-        // calls the ByteBuffer constructor
+-        //
+-        println("public " + grammar.getClassName() + "(InputStream in) {");
+-        tabs++;
+-        println("this(new ByteBuffer(in));");
+-        tabs--;
+-        println("}");
+-
+-        //
+-        // Generate the constructor from Reader, which in turn
+-        // calls the CharBuffer constructor
+-        //
+-        println("public " + grammar.getClassName() + "(Reader in) {");
+-        tabs++;
+-        println("this(new CharBuffer(in));");
+-        tabs--;
+-        println("}");
+-
+-        println("public " + grammar.getClassName() + "(InputBuffer ib) {");
+-        tabs++;
+-        // if debugging, wrap the input buffer in a debugger
+-        if (grammar.debuggingOutput)
+-            println("this(new LexerSharedInputState(new persistence.antlr.debug.DebuggingInputBuffer(ib)));");
+-        else
+-            println("this(new LexerSharedInputState(ib));");
+-        tabs--;
+-        println("}");
+-
+-        //
+-        // Generate the constructor from InputBuffer (char or byte)
+-        //
+-        println("public " + grammar.getClassName() + "(LexerSharedInputState state) {");
+-        tabs++;
+-
+-        println("super(state);");
+-        // if debugging, set up array variables and call user-overridable
+-        //   debugging setup method
+-        if (grammar.debuggingOutput) {
+-            println("  ruleNames  = _ruleNames;");
+-            println("  semPredNames = _semPredNames;");
+-            println("  setupDebugging();");
+-        }
+-
+-        // Generate the setting of various generated options.
+-        // These need to be before the literals since ANTLRHashString depends on
+-        // the casesensitive stuff.
+-        println("caseSensitiveLiterals = " + g.caseSensitiveLiterals + ";");
+-        println("setCaseSensitive(" + g.caseSensitive + ");");
+-
+-        // Generate the initialization of a hashtable
+-        // containing the string literals used in the lexer
+-        // The literals variable itself is in CharScanner
+-        println("literals = new Hashtable();");
+-        Enumeration keys = grammar.tokenManager.getTokenSymbolKeys();
+-        while (keys.hasMoreElements()) {
+-            String key = (String)keys.nextElement();
+-            if (key.charAt(0) != '"') {
+-                continue;
+-            }
+-            TokenSymbol sym = grammar.tokenManager.getTokenSymbol(key);
+-            if (sym instanceof StringLiteralSymbol) {
+-                StringLiteralSymbol s = (StringLiteralSymbol)sym;
+-                println("literals.put(new ANTLRHashString(" + s.getId() + ", this), new Integer(" + s.getTokenType() + "));");
+-            }
+-        }
+-        tabs--;
+-
+-        Enumeration ids;
+-        println("}");
+-
+-        // generate the rule name array for debugging
+-        if (grammar.debuggingOutput) {
+-            println("private static final String _ruleNames[] = {");
+-
+-            ids = grammar.rules.elements();
+-            int ruleNum = 0;
+-            while (ids.hasMoreElements()) {
+-                GrammarSymbol sym = (GrammarSymbol)ids.nextElement();
+-                if (sym instanceof RuleSymbol)
+-                    println("  \"" + ((RuleSymbol)sym).getId() + "\",");
+-            }
+-            println("};");
+-        }
+-
+-        // Generate nextToken() rule.
+-        // nextToken() is a synthetic lexer rule that is the implicit OR of all
+-        // user-defined lexer rules.
+-        genNextToken();
+-
+-        // Generate code for each rule in the lexer
+-        ids = grammar.rules.elements();
+-        int ruleNum = 0;
+-        while (ids.hasMoreElements()) {
+-            RuleSymbol sym = (RuleSymbol)ids.nextElement();
+-            // Don't generate the synthetic rules
+-            if (!sym.getId().equals("mnextToken")) {
+-                genRule(sym, false, ruleNum++);
+-            }
+-            exitIfError();
+-        }
+-
+-        // Generate the semantic predicate map for debugging
+-        if (grammar.debuggingOutput)
+-            genSemPredMap();
+-
+-        // Generate the bitsets used throughout the lexer
+-        genBitsets(bitsetsUsed, ((LexerGrammar)grammar).charVocabulary.size());
+-
+-        println("");
+-        println("}");
+-
+-        // Close the lexer output stream
+-        currentOutput.close();
+-        currentOutput = null;
+-    }
+-
+-    /** Generate code for the given grammar element.
+-     * @param blk The (...)+ block to generate
+-     */
+-    public void gen(OneOrMoreBlock blk) {
+-        if (DEBUG_CODE_GENERATOR) System.out.println("gen+(" + blk + ")");
+-        String label;
+-        String cnt;
+-        println("{");
+-        genBlockPreamble(blk);
+-        if (blk.getLabel() != null) {
+-            cnt = "_cnt_" + blk.getLabel();
+-        }
+-        else {
+-            cnt = "_cnt" + blk.ID;
+-        }
+-        println("int " + cnt + "=0;");
+-        if (blk.getLabel() != null) {
+-            label = blk.getLabel();
+-        }
+-        else {
+-            label = "_loop" + blk.ID;
+-        }
+-        println(label + ":");
+-        println("do {");
+-        tabs++;
+-        // generate the init action for ()+ ()* inside the loop
+-        // this allows us to do usefull EOF checking...
+-        genBlockInitAction(blk);
+-
+-        // Tell AST generation to build subrule result
+-        String saveCurrentASTResult = currentASTResult;
+-        if (blk.getLabel() != null) {
+-            currentASTResult = blk.getLabel();
+-        }
+-
+-        boolean ok = grammar.theLLkAnalyzer.deterministic(blk);
+-
+-        // generate exit test if greedy set to false
+-        // and an alt is ambiguous with exit branch
+-        // or when lookahead derived purely from end-of-file
+-        // Lookahead analysis stops when end-of-file is hit,
+-        // returning set {epsilon}.  Since {epsilon} is not
+-        // ambig with any real tokens, no error is reported
+-        // by deterministic() routines and we have to check
+-        // for the case where the lookahead depth didn't get
+-        // set to NONDETERMINISTIC (this only happens when the
+-        // FOLLOW contains real atoms + epsilon).
+-        boolean generateNonGreedyExitPath = false;
+-        int nonGreedyExitDepth = grammar.maxk;
+-
+-        if (!blk.greedy &&
+-            blk.exitLookaheadDepth <= grammar.maxk &&
+-            blk.exitCache[blk.exitLookaheadDepth].containsEpsilon()) {
+-            generateNonGreedyExitPath = true;
+-            nonGreedyExitDepth = blk.exitLookaheadDepth;
+-        }
+-        else if (!blk.greedy &&
+-            blk.exitLookaheadDepth == LLkGrammarAnalyzer.NONDETERMINISTIC) {
+-            generateNonGreedyExitPath = true;
+-        }
+-
+-        // generate exit test if greedy set to false
+-        // and an alt is ambiguous with exit branch
+-        if (generateNonGreedyExitPath) {
+-            if (DEBUG_CODE_GENERATOR) {
+-                System.out.println("nongreedy (...)+ loop; exit depth is " +
+-                                   blk.exitLookaheadDepth);
+-            }
+-            String predictExit =
+-                getLookaheadTestExpression(blk.exitCache,
+-                                           nonGreedyExitDepth);
+-            println("// nongreedy exit test");
+-            println("if ( " + cnt + ">=1 && " + predictExit + ") break " + label + ";");
+-        }
+-
+-        JavaBlockFinishingInfo howToFinish = genCommonBlock(blk, false);
+-        genBlockFinish(
+-            howToFinish,
+-            "if ( " + cnt + ">=1 ) { break " + label + "; } else {" + throwNoViable + "}"
+-        );
+-
+-        println(cnt + "++;");
+-        tabs--;
+-        println("} while (true);");
+-        println("}");
+-
+-        // Restore previous AST generation
+-        currentASTResult = saveCurrentASTResult;
+-    }
+-
+-    /** Generate the parser Java file */
+-    public void gen(ParserGrammar g) throws IOException {
+-
+-        // if debugging, set up a new vector to keep track of sempred
+-        //   strings for this grammar
+-        if (g.debuggingOutput)
+-            semPreds = new Vector();
+-
+-        setGrammar(g);
+-        if (!(grammar instanceof ParserGrammar)) {
+-            antlrTool.panic("Internal error generating parser");
+-        }
+-
+-        // Open the output stream for the parser and set the currentOutput
+-        // SAS: moved file setup so subclass could do it (for VAJ interface)
+-        setupOutput(grammar.getClassName());
+-
+-        genAST = grammar.buildAST;
+-
+-        tabs = 0;
+-
+-        // Generate the header common to all output files.
+-        genHeader();
+-        // Do not use printAction because we assume tabs==0
+-        println(behavior.getHeaderAction(""));
+-
+-        // Generate header for the parser
+-        println("import persistence.antlr.TokenBuffer;");
+-        println("import persistence.antlr.TokenStreamException;");
+-        println("import persistence.antlr.TokenStreamIOException;");
+-        println("import persistence.antlr.ANTLRException;");
+-        println("import persistence.antlr." + grammar.getSuperClass() + ";");
+-        println("import persistence.antlr.Token;");
+-        println("import persistence.antlr.TokenStream;");
+-        println("import persistence.antlr.RecognitionException;");
+-        println("import persistence.antlr.NoViableAltException;");
+-        println("import persistence.antlr.MismatchedTokenException;");
+-        println("import persistence.antlr.SemanticException;");
+-        println("import persistence.antlr.ParserSharedInputState;");
+-        println("import persistence.antlr.collections.impl.BitSet;");
+-        if ( genAST ) {
+-			println("import persistence.antlr.collections.AST;");
+-			println("import java.util.Hashtable;");
+-			println("import persistence.antlr.ASTFactory;");
+-            println("import persistence.antlr.ASTPair;");
+-            println("import persistence.antlr.collections.impl.ASTArray;");
+-        }
+-
+-        // Output the user-defined parser preamble
+-        println(grammar.preambleAction.getText());
+-
+-        // Generate parser class definition
+-        String sup = null;
+-        if (grammar.superClass != null)
+-            sup = grammar.superClass;
+-        else
+-            sup = "persistence.antlr." + grammar.getSuperClass();
+-
+-        // print javadoc comment if any
+-        if (grammar.comment != null) {
+-            _println(grammar.comment);
+-        }
+-
+-		// get prefix (replaces "public" and lets user specify)
+-		String prefix = "public";
+-		Token tprefix = (Token)grammar.options.get("classHeaderPrefix");
+-		if (tprefix != null) {
+-			String p = StringUtils.stripFrontBack(tprefix.getText(), "\"", "\"");
+-			if (p != null) {
+-				prefix = p;
+-			}
+-		}
+-
+-		print(prefix+" ");
+-		print("class " + grammar.getClassName() + " extends " + sup);
+-        println("       implements " + grammar.tokenManager.getName() + TokenTypesFileSuffix);
+-
+-        Token tsuffix = (Token)grammar.options.get("classHeaderSuffix");
+-        if (tsuffix != null) {
+-            String suffix = StringUtils.stripFrontBack(tsuffix.getText(), "\"", "\"");
+-            if (suffix != null)
+-                print(", " + suffix);	// must be an interface name for Java
+-        }
+-        println(" {");
+-
+-        // set up an array of all the rule names so the debugger can
+-        // keep track of them only by number -- less to store in tree...
+-        if (grammar.debuggingOutput) {
+-            println("private static final String _ruleNames[] = {");
+-
+-            Enumeration ids = grammar.rules.elements();
+-            int ruleNum = 0;
+-            while (ids.hasMoreElements()) {
+-                GrammarSymbol sym = (GrammarSymbol)ids.nextElement();
+-                if (sym instanceof RuleSymbol)
+-                    println("  \"" + ((RuleSymbol)sym).getId() + "\",");
+-            }
+-            println("};");
+-        }
+-
+-        // Generate user-defined parser class members
+-        print(
+-            processActionForSpecialSymbols(grammar.classMemberAction.getText(), grammar.classMemberAction.getLine(), currentRule, null)
+-        );
+-
+-        // Generate parser class constructor from TokenBuffer
+-        println("");
+-        println("protected " + grammar.getClassName() + "(TokenBuffer tokenBuf, int k) {");
+-        println("  super(tokenBuf,k);");
+-        println("  tokenNames = _tokenNames;");
+-        // if debugging, set up arrays and call the user-overridable
+-        //   debugging setup method
+-        if (grammar.debuggingOutput) {
+-            println("  ruleNames  = _ruleNames;");
+-            println("  semPredNames = _semPredNames;");
+-            println("  setupDebugging(tokenBuf);");
+-        }
+-		if ( grammar.buildAST ) {
+-			println("  buildTokenTypeASTClassMap();");
+-			println("  astFactory = new ASTFactory(getTokenTypeToASTClassMap());");
+-		}
+-        println("}");
+-        println("");
+-
+-        println("public " + grammar.getClassName() + "(TokenBuffer tokenBuf) {");
+-        println("  this(tokenBuf," + grammar.maxk + ");");
+-        println("}");
+-        println("");
+-
+-        // Generate parser class constructor from TokenStream
+-        println("protected " + grammar.getClassName() + "(TokenStream lexer, int k) {");
+-        println("  super(lexer,k);");
+-        println("  tokenNames = _tokenNames;");
+-
+-        // if debugging, set up arrays and call the user-overridable
+-        //   debugging setup method
+-        if (grammar.debuggingOutput) {
+-            println("  ruleNames  = _ruleNames;");
+-            println("  semPredNames = _semPredNames;");
+-            println("  setupDebugging(lexer);");
+-        }
+-		if ( grammar.buildAST ) {
+-			println("  buildTokenTypeASTClassMap();");
+-			println("  astFactory = new ASTFactory(getTokenTypeToASTClassMap());");
+-		}
+-        println("}");
+-        println("");
+-
+-        println("public " + grammar.getClassName() + "(TokenStream lexer) {");
+-        println("  this(lexer," + grammar.maxk + ");");
+-        println("}");
+-        println("");
+-
+-        println("public " + grammar.getClassName() + "(ParserSharedInputState state) {");
+-        println("  super(state," + grammar.maxk + ");");
+-        println("  tokenNames = _tokenNames;");
+-		if ( grammar.buildAST ) {
+-			println("  buildTokenTypeASTClassMap();");
+-			println("  astFactory = new ASTFactory(getTokenTypeToASTClassMap());");
+-		}
+-        println("}");
+-        println("");
+-
+-        // Generate code for each rule in the grammar
+-        Enumeration ids = grammar.rules.elements();
+-        int ruleNum = 0;
+-        while (ids.hasMoreElements()) {
+-            GrammarSymbol sym = (GrammarSymbol)ids.nextElement();
+-            if (sym instanceof RuleSymbol) {
+-                RuleSymbol rs = (RuleSymbol)sym;
+-                genRule(rs, rs.references.size() == 0, ruleNum++);
+-            }
+-            exitIfError();
+-        }
+-
+-        // Generate the token names
+-        genTokenStrings();
+-
+-		if ( grammar.buildAST ) {
+-			genTokenASTNodeMap();
+-		}
+-
+-        // Generate the bitsets used throughout the grammar
+-        genBitsets(bitsetsUsed, grammar.tokenManager.maxTokenType());
+-
+-        // Generate the semantic predicate map for debugging
+-        if (grammar.debuggingOutput)
+-            genSemPredMap();
+-
+-        // Close class definition
+-        println("");
+-        println("}");
+-
+-        // Close the parser output stream
+-        currentOutput.close();
+-        currentOutput = null;
+-    }
+-
+-    /** Generate code for the given grammar element.
+-     * @param blk The rule-reference to generate
+-     */
+-    public void gen(RuleRefElement rr) {
+-        if (DEBUG_CODE_GENERATOR) System.out.println("genRR(" + rr + ")");
+-        RuleSymbol rs = (RuleSymbol)grammar.getSymbol(rr.targetRule);
+-        if (rs == null || !rs.isDefined()) {
+-            // Is this redundant???
+-            antlrTool.error("Rule '" + rr.targetRule + "' is not defined", grammar.getFilename(), rr.getLine(), rr.getColumn());
+-            return;
+-        }
+-        if (!(rs instanceof RuleSymbol)) {
+-            // Is this redundant???
+-            antlrTool.error("'" + rr.targetRule + "' does not name a grammar rule", grammar.getFilename(), rr.getLine(), rr.getColumn());
+-            return;
+-        }
+-
+-        genErrorTryForElement(rr);
+-
+-        // AST value for labeled rule refs in tree walker.
+-        // This is not AST construction;  it is just the input tree node value.
+-        if (grammar instanceof TreeWalkerGrammar &&
+-            rr.getLabel() != null &&
+-            syntacticPredLevel == 0) {
+-            println(rr.getLabel() + " = _t==ASTNULL ? null : " + lt1Value + ";");
+-        }
+-
+-        // if in lexer and ! on rule ref or alt or rule, save buffer index to kill later
+-        if (grammar instanceof LexerGrammar && (!saveText || rr.getAutoGenType() == GrammarElement.AUTO_GEN_BANG)) {
+-            println("_saveIndex=text.length();");
+-        }
+-
+-        // Process return value assignment if any
+-        printTabs();
+-        if (rr.idAssign != null) {
+-            // Warn if the rule has no return type
+-            if (rs.block.returnAction == null) {
+-                antlrTool.warning("Rule '" + rr.targetRule + "' has no return type", grammar.getFilename(), rr.getLine(), rr.getColumn());
+-            }
+-            _print(rr.idAssign + "=");
+-        }
+-        else {
+-            // Warn about return value if any, but not inside syntactic predicate
+-            if (!(grammar instanceof LexerGrammar) && syntacticPredLevel == 0 && rs.block.returnAction != null) {
+-                antlrTool.warning("Rule '" + rr.targetRule + "' returns a value", grammar.getFilename(), rr.getLine(), rr.getColumn());
+-            }
+-        }
+-
+-        // Call the rule
+-        GenRuleInvocation(rr);
+-
+-        // if in lexer and ! on element or alt or rule, save buffer index to kill later
+-        if (grammar instanceof LexerGrammar && (!saveText || rr.getAutoGenType() == GrammarElement.AUTO_GEN_BANG)) {
+-            println("text.setLength(_saveIndex);");
+-        }
+-
+-        // if not in a syntactic predicate
+-        if (syntacticPredLevel == 0) {
+-            boolean doNoGuessTest = (
+-                grammar.hasSyntacticPredicate &&
+-                (
+-                grammar.buildAST && rr.getLabel() != null ||
+-                (genAST && rr.getAutoGenType() == GrammarElement.AUTO_GEN_NONE)
+-                )
+-                );
+-            if (doNoGuessTest) {
+-                // println("if (inputState.guessing==0) {");
+-                // tabs++;
+-            }
+-
+-            if (grammar.buildAST && rr.getLabel() != null) {
+-                // always gen variable for rule return on labeled rules
+-                println(rr.getLabel() + "_AST = (" + labeledElementASTType + ")returnAST;");
+-            }
+-            if (genAST) {
+-                switch (rr.getAutoGenType()) {
+-                    case GrammarElement.AUTO_GEN_NONE:
+-                        // println("theASTFactory.addASTChild(currentAST, returnAST);");
+-                        println("astFactory.addASTChild(currentAST, returnAST);");
+-                        break;
+-                    case GrammarElement.AUTO_GEN_CARET:
+-                        antlrTool.error("Internal: encountered ^ after rule reference");
+-                        break;
+-                    default:
+-                        break;
+-                }
+-            }
+-
+-            // if a lexer and labeled, Token label defined at rule level, just set it here
+-            if (grammar instanceof LexerGrammar && rr.getLabel() != null) {
+-                println(rr.getLabel() + "=_returnToken;");
+-            }
+-
+-            if (doNoGuessTest) {
+-                // tabs--;
+-                // println("}");
+-            }
+-        }
+-        genErrorCatchForElement(rr);
+-    }
+-
+-    /** Generate code for the given grammar element.
+-     * @param blk The string-literal reference to generate
+-     */
+-    public void gen(StringLiteralElement atom) {
+-        if (DEBUG_CODE_GENERATOR) System.out.println("genString(" + atom + ")");
+-
+-        // Variable declarations for labeled elements
+-        if (atom.getLabel() != null && syntacticPredLevel == 0) {
+-            println(atom.getLabel() + " = " + lt1Value + ";");
+-        }
+-
+-        // AST
+-        genElementAST(atom);
+-
+-        // is there a bang on the literal?
+-        boolean oldsaveText = saveText;
+-        saveText = saveText && atom.getAutoGenType() == GrammarElement.AUTO_GEN_NONE;
+-
+-        // matching
+-        genMatch(atom);
+-
+-        saveText = oldsaveText;
+-
+-        // tack on tree cursor motion if doing a tree walker
+-        if (grammar instanceof TreeWalkerGrammar) {
+-            println("_t = _t.getNextSibling();");
+-        }
+-    }
+-
+-    /** Generate code for the given grammar element.
+-     * @param blk The token-range reference to generate
+-     */
+-    public void gen(TokenRangeElement r) {
+-        genErrorTryForElement(r);
+-        if (r.getLabel() != null && syntacticPredLevel == 0) {
+-            println(r.getLabel() + " = " + lt1Value + ";");
+-        }
+-
+-        // AST
+-        genElementAST(r);
+-
+-        // match
+-        println("matchRange(" + r.beginText + "," + r.endText + ");");
+-        genErrorCatchForElement(r);
+-    }
+-
+-    /** Generate code for the given grammar element.
+-     * @param blk The token-reference to generate
+-     */
+-    public void gen(TokenRefElement atom) {
+-        if (DEBUG_CODE_GENERATOR) System.out.println("genTokenRef(" + atom + ")");
+-        if (grammar instanceof LexerGrammar) {
+-            antlrTool.panic("Token reference found in lexer");
+-        }
+-        genErrorTryForElement(atom);
+-        // Assign Token value to token label variable
+-        if (atom.getLabel() != null && syntacticPredLevel == 0) {
+-            println(atom.getLabel() + " = " + lt1Value + ";");
+-        }
+-
+-        // AST
+-        genElementAST(atom);
+-        // matching
+-        genMatch(atom);
+-        genErrorCatchForElement(atom);
+-
+-        // tack on tree cursor motion if doing a tree walker
+-        if (grammar instanceof TreeWalkerGrammar) {
+-            println("_t = _t.getNextSibling();");
+-        }
+-    }
+-
+-    public void gen(TreeElement t) {
+-        // save AST cursor
+-        println("AST __t" + t.ID + " = _t;");
+-
+-        // If there is a label on the root, then assign that to the variable
+-        if (t.root.getLabel() != null) {
+-            println(t.root.getLabel() + " = _t==ASTNULL ? null :(" + labeledElementASTType + ")_t;");
+-        }
+-
+-        // check for invalid modifiers ! and ^ on tree element roots
+-        if ( t.root.getAutoGenType() == GrammarElement.AUTO_GEN_BANG ) {
+-            antlrTool.error("Suffixing a root node with '!' is not implemented",
+-                         grammar.getFilename(), t.getLine(), t.getColumn());
+-            t.root.setAutoGenType(GrammarElement.AUTO_GEN_NONE);
+-        }
+-        if ( t.root.getAutoGenType() == GrammarElement.AUTO_GEN_CARET ) {
+-            antlrTool.warning("Suffixing a root node with '^' is redundant; already a root",
+-                         grammar.getFilename(), t.getLine(), t.getColumn());
+-            t.root.setAutoGenType(GrammarElement.AUTO_GEN_NONE);
+-        }
+-
+-        // Generate AST variables
+-        genElementAST(t.root);
+-        if (grammar.buildAST) {
+-            // Save the AST construction state
+-            println("ASTPair __currentAST" + t.ID + " = currentAST.copy();");
+-            // Make the next item added a child of the TreeElement root
+-            println("currentAST.root = currentAST.child;");
+-            println("currentAST.child = null;");
+-        }
+-
+-        // match root
+-        if ( t.root instanceof WildcardElement ) {
+-            println("if ( _t==null ) throw new MismatchedTokenException();");
+-        }
+-        else {
+-            genMatch(t.root);
+-        }
+-        // move to list of children
+-        println("_t = _t.getFirstChild();");
+-
+-        // walk list of children, generating code for each
+-        for (int i = 0; i < t.getAlternatives().size(); i++) {
+-            Alternative a = t.getAlternativeAt(i);
+-            AlternativeElement e = a.head;
+-            while (e != null) {
+-                e.generate();
+-                e = e.next;
+-            }
+-        }
+-
+-        if (grammar.buildAST) {
+-            // restore the AST construction state to that just after the
+-            // tree root was added
+-            println("currentAST = __currentAST" + t.ID + ";");
+-        }
+-        // restore AST cursor
+-        println("_t = __t" + t.ID + ";");
+-        // move cursor to sibling of tree just parsed
+-        println("_t = _t.getNextSibling();");
+-    }
+-
+-    /** Generate the tree-parser Java file */
+-    public void gen(TreeWalkerGrammar g) throws IOException {
+-        // SAS: debugging stuff removed for now...
+-        setGrammar(g);
+-        if (!(grammar instanceof TreeWalkerGrammar)) {
+-            antlrTool.panic("Internal error generating tree-walker");
+-        }
+-        // Open the output stream for the parser and set the currentOutput
+-        // SAS: move file open to method so subclass can override it
+-        //      (mainly for VAJ interface)
+-        setupOutput(grammar.getClassName());
+-
+-        genAST = grammar.buildAST;
+-        tabs = 0;
+-
+-        // Generate the header common to all output files.
+-        genHeader();
+-        // Do not use printAction because we assume tabs==0
+-        println(behavior.getHeaderAction(""));
+-
+-        // Generate header for the parser
+-        println("import persistence.antlr." + grammar.getSuperClass() + ";");
+-        println("import persistence.antlr.Token;");
+-        println("import persistence.antlr.collections.AST;");
+-        println("import persistence.antlr.RecognitionException;");
+-        println("import persistence.antlr.ANTLRException;");
+-        println("import persistence.antlr.NoViableAltException;");
+-        println("import persistence.antlr.MismatchedTokenException;");
+-        println("import persistence.antlr.SemanticException;");
+-        println("import persistence.antlr.collections.impl.BitSet;");
+-        println("import persistence.antlr.ASTPair;");
+-        println("import persistence.antlr.collections.impl.ASTArray;");
+-
+-        // Output the user-defined parser premamble
+-        println(grammar.preambleAction.getText());
+-
+-        // Generate parser class definition
+-        String sup = null;
+-        if (grammar.superClass != null) {
+-            sup = grammar.superClass;
+-        }
+-        else {
+-            sup = "persistence.antlr." + grammar.getSuperClass();
+-        }
+-        println("");
+-
+-        // print javadoc comment if any
+-        if (grammar.comment != null) {
+-            _println(grammar.comment);
+-        }
+-
+-		// get prefix (replaces "public" and lets user specify)
+-		String prefix = "public";
+-		Token tprefix = (Token)grammar.options.get("classHeaderPrefix");
+-		if (tprefix != null) {
+-			String p = StringUtils.stripFrontBack(tprefix.getText(), "\"", "\"");
+-			if (p != null) {
+-				prefix = p;
+-			}
+-		}
+-
+-		print(prefix+" ");
+-		print("class " + grammar.getClassName() + " extends " + sup);
+-        println("       implements " + grammar.tokenManager.getName() + TokenTypesFileSuffix);
+-        Token tsuffix = (Token)grammar.options.get("classHeaderSuffix");
+-        if (tsuffix != null) {
+-            String suffix = StringUtils.stripFrontBack(tsuffix.getText(), "\"", "\"");
+-            if (suffix != null) {
+-                print(", " + suffix);	// must be an interface name for Java
+-            }
+-        }
+-        println(" {");
+-
+-        // Generate user-defined parser class members
+-        print(
+-            processActionForSpecialSymbols(grammar.classMemberAction.getText(), grammar.classMemberAction.getLine(), currentRule, null)
+-        );
+-
+-        // Generate default parser class constructor
+-        println("public " + grammar.getClassName() + "() {");
+-        tabs++;
+-        println("tokenNames = _tokenNames;");
+-        tabs--;
+-        println("}");
+-        println("");
+-
+-        // Generate code for each rule in the grammar
+-        Enumeration ids = grammar.rules.elements();
+-        int ruleNum = 0;
+-        String ruleNameInits = "";
+-        while (ids.hasMoreElements()) {
+-            GrammarSymbol sym = (GrammarSymbol)ids.nextElement();
+-            if (sym instanceof RuleSymbol) {
+-                RuleSymbol rs = (RuleSymbol)sym;
+-                genRule(rs, rs.references.size() == 0, ruleNum++);
+-            }
+-            exitIfError();
+-        }
+-
+-        // Generate the token names
+-        genTokenStrings();
+-
+-        // Generate the bitsets used throughout the grammar
+-        genBitsets(bitsetsUsed, grammar.tokenManager.maxTokenType());
+-
+-        // Close class definition
+-        println("}");
+-        println("");
+-
+-        // Close the parser output stream
+-        currentOutput.close();
+-        currentOutput = null;
+-    }
+-
+-    /** Generate code for the given grammar element.
+-     * @param wc The wildcard element to generate
+-     */
+-    public void gen(WildcardElement wc) {
+-        // Variable assignment for labeled elements
+-        if (wc.getLabel() != null && syntacticPredLevel == 0) {
+-            println(wc.getLabel() + " = " + lt1Value + ";");
+-        }
+-
+-        // AST
+-        genElementAST(wc);
+-        // Match anything but EOF
+-        if (grammar instanceof TreeWalkerGrammar) {
+-            println("if ( _t==null ) throw new MismatchedTokenException();");
+-        }
+-        else if (grammar instanceof LexerGrammar) {
+-            if (grammar instanceof LexerGrammar &&
+-                (!saveText || wc.getAutoGenType() == GrammarElement.AUTO_GEN_BANG)) {
+-                println("_saveIndex=text.length();");
+-            }
+-            println("matchNot(EOF_CHAR);");
+-            if (grammar instanceof LexerGrammar &&
+-                (!saveText || wc.getAutoGenType() == GrammarElement.AUTO_GEN_BANG)) {
+-                println("text.setLength(_saveIndex);"); // kill text atom put in buffer
+-            }
+-        }
+-        else {
+-            println("matchNot(" + getValueString(Token.EOF_TYPE) + ");");
+-        }
+-
+-        // tack on tree cursor motion if doing a tree walker
+-        if (grammar instanceof TreeWalkerGrammar) {
+-            println("_t = _t.getNextSibling();");
+-        }
+-    }
+-
+-    /** Generate code for the given grammar element.
+-     * @param blk The (...)* block to generate
+-     */
+-    public void gen(ZeroOrMoreBlock blk) {
+-        if (DEBUG_CODE_GENERATOR) System.out.println("gen*(" + blk + ")");
+-        println("{");
+-        genBlockPreamble(blk);
+-        String label;
+-        if (blk.getLabel() != null) {
+-            label = blk.getLabel();
+-        }
+-        else {
+-            label = "_loop" + blk.ID;
+-        }
+-        println(label + ":");
+-        println("do {");
+-        tabs++;
+-        // generate the init action for ()* inside the loop
+-        // this allows us to do usefull EOF checking...
+-        genBlockInitAction(blk);
+-
+-        // Tell AST generation to build subrule result
+-        String saveCurrentASTResult = currentASTResult;
+-        if (blk.getLabel() != null) {
+-            currentASTResult = blk.getLabel();
+-        }
+-
+-        boolean ok = grammar.theLLkAnalyzer.deterministic(blk);
+-
+-        // generate exit test if greedy set to false
+-        // and an alt is ambiguous with exit branch
+-        // or when lookahead derived purely from end-of-file
+-        // Lookahead analysis stops when end-of-file is hit,
+-        // returning set {epsilon}.  Since {epsilon} is not
+-        // ambig with any real tokens, no error is reported
+-        // by deterministic() routines and we have to check
+-        // for the case where the lookahead depth didn't get
+-        // set to NONDETERMINISTIC (this only happens when the
+-        // FOLLOW contains real atoms + epsilon).
+-        boolean generateNonGreedyExitPath = false;
+-        int nonGreedyExitDepth = grammar.maxk;
+-
+-        if (!blk.greedy &&
+-            blk.exitLookaheadDepth <= grammar.maxk &&
+-            blk.exitCache[blk.exitLookaheadDepth].containsEpsilon()) {
+-            generateNonGreedyExitPath = true;
+-            nonGreedyExitDepth = blk.exitLookaheadDepth;
+-        }
+-        else if (!blk.greedy &&
+-            blk.exitLookaheadDepth == LLkGrammarAnalyzer.NONDETERMINISTIC) {
+-            generateNonGreedyExitPath = true;
+-        }
+-        if (generateNonGreedyExitPath) {
+-            if (DEBUG_CODE_GENERATOR) {
+-                System.out.println("nongreedy (...)* loop; exit depth is " +
+-                                   blk.exitLookaheadDepth);
+-            }
+-            String predictExit =
+-                getLookaheadTestExpression(blk.exitCache,
+-                                           nonGreedyExitDepth);
+-            println("// nongreedy exit test");
+-            println("if (" + predictExit + ") break " + label + ";");
+-        }
+-
+-        JavaBlockFinishingInfo howToFinish = genCommonBlock(blk, false);
+-        genBlockFinish(howToFinish, "break " + label + ";");
+-
+-        tabs--;
+-        println("} while (true);");
+-        println("}");
+-
+-        // Restore previous AST generation
+-        currentASTResult = saveCurrentASTResult;
+-    }
+-
+-    /** Generate an alternative.
+-     * @param alt  The alternative to generate
+-     * @param blk The block to which the alternative belongs
+-     */
+-    protected void genAlt(Alternative alt, AlternativeBlock blk) {
+-        // Save the AST generation state, and set it to that of the alt
+-        boolean savegenAST = genAST;
+-        genAST = genAST && alt.getAutoGen();
+-
+-        boolean oldsaveTest = saveText;
+-        saveText = saveText && alt.getAutoGen();
+-
+-        // Reset the variable name map for the alternative
+-        Hashtable saveMap = treeVariableMap;
+-        treeVariableMap = new Hashtable();
+-
+-        // Generate try block around the alt for  error handling
+-        if (alt.exceptionSpec != null) {
+-            println("try {      // for error handling");
+-            tabs++;
+-        }
+-
+-        AlternativeElement elem = alt.head;
+-        while (!(elem instanceof BlockEndElement)) {
+-            elem.generate(); // alt can begin with anything. Ask target to gen.
+-            elem = elem.next;
+-        }
+-
+-        if (genAST) {
+-            if (blk instanceof RuleBlock) {
+-                // Set the AST return value for the rule
+-                RuleBlock rblk = (RuleBlock)blk;
+-                if (grammar.hasSyntacticPredicate) {
+-                    // println("if ( inputState.guessing==0 ) {");
+-                    // tabs++;
+-                }
+-                println(rblk.getRuleName() + "_AST = (" + labeledElementASTType + ")currentAST.root;");
+-                if (grammar.hasSyntacticPredicate) {
+-                    // --tabs;
+-                    // println("}");
+-                }
+-            }
+-            else if (blk.getLabel() != null) {
+-                // ### future: also set AST value for labeled subrules.
+-                // println(blk.getLabel() + "_AST = ("+labeledElementASTType+")currentAST.root;");
+-                antlrTool.warning("Labeled subrules not yet supported", grammar.getFilename(), blk.getLine(), blk.getColumn());
+-            }
+-        }
+-
+-        if (alt.exceptionSpec != null) {
+-            // close try block
+-            tabs--;
+-            println("}");
+-            genErrorHandler(alt.exceptionSpec);
+-        }
+-
+-        genAST = savegenAST;
+-        saveText = oldsaveTest;
+-
+-        treeVariableMap = saveMap;
+-    }
+-
+-    /** Generate all the bitsets to be used in the parser or lexer
+-     * Generate the raw bitset data like "long _tokenSet1_data[] = {...};"
+-     * and the BitSet object declarations like "BitSet _tokenSet1 = new BitSet(_tokenSet1_data);"
+-     * Note that most languages do not support object initialization inside a
+-     * class definition, so other code-generators may have to separate the
+-     * bitset declarations from the initializations (e.g., put the initializations
+-     * in the generated constructor instead).
+-     * @param bitsetList The list of bitsets to generate.
+-     * @param maxVocabulary Ensure that each generated bitset can contain at least this value.
+-     */
+-    protected void genBitsets(Vector bitsetList,
+-                              int maxVocabulary
+-                              ) {
+-        println("");
+-        for (int i = 0; i < bitsetList.size(); i++) {
+-            BitSet p = (BitSet)bitsetList.elementAt(i);
+-            // Ensure that generated BitSet is large enough for vocabulary
+-            p.growToInclude(maxVocabulary);
+-            genBitSet(p, i);
+-        }
+-    }
+-
+-    /** Do something simple like:
+-     *  private static final long[] mk_tokenSet_0() {
+-     *    long[] data = { -2305839160922996736L, 63L, 16777216L, 0L, 0L, 0L };
+-     *    return data;
+-     *  }
+-     *  public static final BitSet _tokenSet_0 = new BitSet(mk_tokenSet_0());
+-     *
+-     *  Or, for large bitsets, optimize init so ranges are collapsed into loops.
+-     *  This is most useful for lexers using unicode.
+-     */
+-    private void genBitSet(BitSet p, int id) {
+-        // initialization data
+-        println(
+-            "private static final long[] mk" + getBitsetName(id) + "() {"
+-        );
+-        int n = p.lengthInLongWords();
+-        if ( n<BITSET_OPTIMIZE_INIT_THRESHOLD ) {
+-            println("\tlong[] data = { " + p.toStringOfWords() + "};");
+-        }
+-        else {
+-            // will init manually, allocate space then set values
+-            println("\tlong[] data = new long["+n+"];");
+-            long[] elems = p.toPackedArray();
+-            for (int i = 0; i < elems.length;) {
+-				if ( elems[i]==0 ) {
+-					// done automatically by Java, don't waste time/code
+-					i++;
+-					continue;
+-				}
+-                if ( (i+1)==elems.length || elems[i]!=elems[i+1] ) {
+-                    // last number or no run of numbers, just dump assignment
+-                    println("\tdata["+i+"]="+elems[i]+"L;");
+-                    i++;
+-                }
+-                else {
+-                    // scan to find end of run
+-                    int j;
+-                    for (j = i + 1;
+-                         j < elems.length && elems[j]==elems[i];
+-                         j++)
+-                    {
+-                    }
+-                    // j-1 is last member of run
+-                    println("\tfor (int i = "+i+"; i<="+(j-1)+"; i++) { data[i]="+
+-                            elems[i]+"L; }");
+-                    i = j;
+-                }
+-            }
+-        }
+-
+-        println("\treturn data;");
+-        println("}");
+-        // BitSet object
+-        println(
+-            "public static final BitSet " + getBitsetName(id) + " = new BitSet(" +
+-            "mk" + getBitsetName(id) + "()" +
+-            ");"
+-        );
+-    }
+-
+-    /** Generate the finish of a block, using a combination of the info
+-     * returned from genCommonBlock() and the action to perform when
+-     * no alts were taken
+-     * @param howToFinish The return of genCommonBlock()
+-     * @param noViableAction What to generate when no alt is taken
+-     */
+-    private void genBlockFinish(JavaBlockFinishingInfo howToFinish, String noViableAction) {
+-        if (howToFinish.needAnErrorClause &&
+-            (howToFinish.generatedAnIf || howToFinish.generatedSwitch)) {
+-            if (howToFinish.generatedAnIf) {
+-                println("else {");
+-            }
+-            else {
+-                println("{");
+-            }
+-            tabs++;
+-            println(noViableAction);
+-            tabs--;
+-            println("}");
+-        }
+-
+-        if (howToFinish.postscript != null) {
+-            println(howToFinish.postscript);
+-        }
+-    }
+-
+-    /** Generate the init action for a block, which may be a RuleBlock or a
+-     * plain AlternativeBLock.
+-     * @blk The block for which the preamble is to be generated.
+-     */
+-    protected void genBlockInitAction(AlternativeBlock blk) {
+-        // dump out init action
+-        if (blk.initAction != null) {
+-            printAction(processActionForSpecialSymbols(blk.initAction, blk.getLine(), currentRule, null));
+-        }
+-    }
+-
+-    /** Generate the header for a block, which may be a RuleBlock or a
+-     * plain AlternativeBLock.  This generates any variable declarations
+-     * and syntactic-predicate-testing variables.
+-     * @blk The block for which the preamble is to be generated.
+-     */
+-    protected void genBlockPreamble(AlternativeBlock blk) {
+-        // define labels for rule blocks.
+-        if (blk instanceof RuleBlock) {
+-            RuleBlock rblk = (RuleBlock)blk;
+-            if (rblk.labeledElements != null) {
+-                for (int i = 0; i < rblk.labeledElements.size(); i++) {
+-                    AlternativeElement a = (AlternativeElement)rblk.labeledElements.elementAt(i);
+-                    // System.out.println("looking at labeled element: "+a);
+-                    // Variables for labeled rule refs and
+-                    // subrules are different than variables for
+-                    // grammar atoms.  This test is a little tricky
+-                    // because we want to get all rule refs and ebnf,
+-                    // but not rule blocks or syntactic predicates
+-                    if (
+-                        a instanceof RuleRefElement ||
+-                        a instanceof AlternativeBlock &&
+-                        !(a instanceof RuleBlock) &&
+-                        !(a instanceof SynPredBlock)
+-                    ) {
+-
+-                        if (
+-                            !(a instanceof RuleRefElement) &&
+-                            ((AlternativeBlock)a).not &&
+-                            analyzer.subruleCanBeInverted(((AlternativeBlock)a), grammar instanceof LexerGrammar)
+-                        ) {
+-                            // Special case for inverted subrules that
+-                            // will be inlined.  Treat these like
+-                            // token or char literal references
+-                            println(labeledElementType + " " + a.getLabel() + " = " + labeledElementInit + ";");
+-                            if (grammar.buildAST) {
+-                                genASTDeclaration(a);
+-                            }
+-                        }
+-                        else {
+-                            if (grammar.buildAST) {
+-                                // Always gen AST variables for
+-                                // labeled elements, even if the
+-                                // element itself is marked with !
+-                                genASTDeclaration(a);
+-                            }
+-                            if (grammar instanceof LexerGrammar) {
+-                                println("Token " + a.getLabel() + "=null;");
+-                            }
+-                            if (grammar instanceof TreeWalkerGrammar) {
+-                                // always generate rule-ref variables
+-                                // for tree walker
+-                                println(labeledElementType + " " + a.getLabel() + " = " + labeledElementInit + ";");
+-                            }
+-                        }
+-                    }
+-                    else {
+-                        // It is a token or literal reference.  Generate the
+-                        // correct variable type for this grammar
+-                        println(labeledElementType + " " + a.getLabel() + " = " + labeledElementInit + ";");
+-
+-                        // In addition, generate *_AST variables if
+-                        // building ASTs
+-                        if (grammar.buildAST) {
+-                            if (a instanceof GrammarAtom &&
+-                                ((GrammarAtom)a).getASTNodeType() != null) {
+-                                GrammarAtom ga = (GrammarAtom)a;
+-                                genASTDeclaration(a, ga.getASTNodeType());
+-                            }
+-                            else {
+-                                genASTDeclaration(a);
+-                            }
+-                        }
+-                    }
+-                }
+-            }
+-        }
+-    }
+-
+-    /** Generate a series of case statements that implement a BitSet test.
+-     * @param p The Bitset for which cases are to be generated
+-     */
+-    protected void genCases(BitSet p) {
+-        if (DEBUG_CODE_GENERATOR) System.out.println("genCases(" + p + ")");
+-        int[] elems;
+-
+-        elems = p.toArray();
+-        // Wrap cases four-per-line for lexer, one-per-line for parser
+-        int wrap = (grammar instanceof LexerGrammar) ? 4 : 1;
+-        int j = 1;
+-        boolean startOfLine = true;
+-        for (int i = 0; i < elems.length; i++) {
+-            if (j == 1) {
+-                print("");
+-            }
+-            else {
+-                _print("  ");
+-            }
+-            _print("case " + getValueString(elems[i]) + ":");
+-
+-            if (j == wrap) {
+-                _println("");
+-                startOfLine = true;
+-                j = 1;
+-            }
+-            else {
+-                j++;
+-                startOfLine = false;
+-            }
+-        }
+-        if (!startOfLine) {
+-            _println("");
+-        }
+-    }
+-
+-    /**Generate common code for a block of alternatives; return a
+-     * postscript that needs to be generated at the end of the
+-     * block.  Other routines may append else-clauses and such for
+-     * error checking before the postfix is generated.  If the
+-     * grammar is a lexer, then generate alternatives in an order
+-     * where alternatives requiring deeper lookahead are generated
+-     * first, and EOF in the lookahead set reduces the depth of
+-     * the lookahead.  @param blk The block to generate @param
+-     * noTestForSingle If true, then it does not generate a test
+-     * for a single alternative.
+-     */
+-    public JavaBlockFinishingInfo genCommonBlock(AlternativeBlock blk,
+-                                                 boolean noTestForSingle) {
+-        int nIF = 0;
+-        boolean createdLL1Switch = false;
+-        int closingBracesOfIFSequence = 0;
+-        JavaBlockFinishingInfo finishingInfo = new JavaBlockFinishingInfo();
+-        if (DEBUG_CODE_GENERATOR) System.out.println("genCommonBlock(" + blk + ")");
+-
+-        // Save the AST generation state, and set it to that of the block
+-        boolean savegenAST = genAST;
+-        genAST = genAST && blk.getAutoGen();
+-
+-        boolean oldsaveTest = saveText;
+-        saveText = saveText && blk.getAutoGen();
+-
+-        // Is this block inverted?  If so, generate special-case code
+-        if (
+-            blk.not &&
+-            analyzer.subruleCanBeInverted(blk, grammar instanceof LexerGrammar)
+-        ) {
+-            if (DEBUG_CODE_GENERATOR) System.out.println("special case: ~(subrule)");
+-            Lookahead p = analyzer.look(1, blk);
+-            // Variable assignment for labeled elements
+-            if (blk.getLabel() != null && syntacticPredLevel == 0) {
+-                println(blk.getLabel() + " = " + lt1Value + ";");
+-            }
+-
+-            // AST
+-            genElementAST(blk);
+-
+-            String astArgs = "";
+-            if (grammar instanceof TreeWalkerGrammar) {
+-                astArgs = "_t,";
+-            }
+-
+-            // match the bitset for the alternative
+-            println("match(" + astArgs + getBitsetName(markBitsetForGen(p.fset)) + ");");
+-
+-            // tack on tree cursor motion if doing a tree walker
+-            if (grammar instanceof TreeWalkerGrammar) {
+-                println("_t = _t.getNextSibling();");
+-            }
+-            return finishingInfo;
+-        }
+-
+-        // Special handling for single alt
+-        if (blk.getAlternatives().size() == 1) {
+-            Alternative alt = blk.getAlternativeAt(0);
+-            // Generate a warning if there is a synPred for single alt.
+-            if (alt.synPred != null) {
+-                antlrTool.warning(
+-                    "Syntactic predicate superfluous for single alternative",
+-                    grammar.getFilename(),
+-                    blk.getAlternativeAt(0).synPred.getLine(),
+-                    blk.getAlternativeAt(0).synPred.getColumn()
+-                );
+-            }
+-            if (noTestForSingle) {
+-                if (alt.semPred != null) {
+-                    // Generate validating predicate
+-                    genSemPred(alt.semPred, blk.line);
+-                }
+-                genAlt(alt, blk);
+-                return finishingInfo;
+-            }
+-        }
+-
+-        // count number of simple LL(1) cases; only do switch for
+-        // many LL(1) cases (no preds, no end of token refs)
+-        // We don't care about exit paths for (...)*, (...)+
+-        // because we don't explicitly have a test for them
+-        // as an alt in the loop.
+-        //
+-        // Also, we now count how many unicode lookahead sets
+-        // there are--they must be moved to DEFAULT or ELSE
+-        // clause.
+-        int nLL1 = 0;
+-        for (int i = 0; i < blk.getAlternatives().size(); i++) {
+-            Alternative a = blk.getAlternativeAt(i);
+-            if (suitableForCaseExpression(a)) {
+-                nLL1++;
+-            }
+-        }
+-
+-        // do LL(1) cases
+-        if (nLL1 >= makeSwitchThreshold) {
+-            // Determine the name of the item to be compared
+-            String testExpr = lookaheadString(1);
+-            createdLL1Switch = true;
+-            // when parsing trees, convert null to valid tree node with NULL lookahead
+-            if (grammar instanceof TreeWalkerGrammar) {
+-                println("if (_t==null) _t=ASTNULL;");
+-            }
+-            println("switch ( " + testExpr + ") {");
+-            for (int i = 0; i < blk.alternatives.size(); i++) {
+-                Alternative alt = blk.getAlternativeAt(i);
+-                // ignore any non-LL(1) alts, predicated alts,
+-                // or end-of-token alts for case expressions
+-                if (!suitableForCaseExpression(alt)) {
+-                    continue;
+-                }
+-                Lookahead p = alt.cache[1];
+-                if (p.fset.degree() == 0 && !p.containsEpsilon()) {
+-                    antlrTool.warning("Alternate omitted due to empty prediction set",
+-                                 grammar.getFilename(),
+-                                 alt.head.getLine(), alt.head.getColumn());
+-                }
+-                else {
+-                    genCases(p.fset);
+-                    println("{");
+-                    tabs++;
+-                    genAlt(alt, blk);
+-                    println("break;");
+-                    tabs--;
+-                    println("}");
+-                }
+-            }
+-            println("default:");
+-            tabs++;
+-        }
+-
+-        // do non-LL(1) and nondeterministic cases This is tricky in
+-        // the lexer, because of cases like: STAR : '*' ; ASSIGN_STAR
+-        // : "*="; Since nextToken is generated without a loop, then
+-        // the STAR will have end-of-token as it's lookahead set for
+-        // LA(2).  So, we must generate the alternatives containing
+-        // trailing end-of-token in their lookahead sets *after* the
+-        // alternatives without end-of-token.  This implements the
+-        // usual lexer convention that longer matches come before
+-        // shorter ones, e.g.  "*=" matches ASSIGN_STAR not STAR
+-        //
+-        // For non-lexer grammars, this does not sort the alternates
+-        // by depth Note that alts whose lookahead is purely
+-        // end-of-token at k=1 end up as default or else clauses.
+-        int startDepth = (grammar instanceof LexerGrammar) ? grammar.maxk : 0;
+-        for (int altDepth = startDepth; altDepth >= 0; altDepth--) {
+-            if (DEBUG_CODE_GENERATOR) System.out.println("checking depth " + altDepth);
+-            for (int i = 0; i < blk.alternatives.size(); i++) {
+-                Alternative alt = blk.getAlternativeAt(i);
+-                if (DEBUG_CODE_GENERATOR) System.out.println("genAlt: " + i);
+-                // if we made a switch above, ignore what we already took care
+-                // of.  Specifically, LL(1) alts with no preds
+-                // that do not have end-of-token in their prediction set
+-                // and that are not giant unicode sets.
+-                if (createdLL1Switch && suitableForCaseExpression(alt)) {
+-                    if (DEBUG_CODE_GENERATOR) System.out.println("ignoring alt because it was in the switch");
+-                    continue;
+-                }
+-                String e;
+-
+-                boolean unpredicted = false;
+-
+-                if (grammar instanceof LexerGrammar) {
+-                    // Calculate the "effective depth" of the alt,
+-                    // which is the max depth at which
+-                    // cache[depth]!=end-of-token
+-                    int effectiveDepth = alt.lookaheadDepth;
+-                    if (effectiveDepth == GrammarAnalyzer.NONDETERMINISTIC) {
+-                        // use maximum lookahead
+-                        effectiveDepth = grammar.maxk;
+-                    }
+-                    while (effectiveDepth >= 1 &&
+-                        alt.cache[effectiveDepth].containsEpsilon()) {
+-                        effectiveDepth--;
+-                    }
+-                    // Ignore alts whose effective depth is other than
+-                    // the ones we are generating for this iteration.
+-                    if (effectiveDepth != altDepth) {
+-                        if (DEBUG_CODE_GENERATOR)
+-                            System.out.println("ignoring alt because effectiveDepth!=altDepth;" + effectiveDepth + "!=" + altDepth);
+-                        continue;
+-                    }
+-                    unpredicted = lookaheadIsEmpty(alt, effectiveDepth);
+-                    e = getLookaheadTestExpression(alt, effectiveDepth);
+-                }
+-                else {
+-                    unpredicted = lookaheadIsEmpty(alt, grammar.maxk);
+-                    e = getLookaheadTestExpression(alt, grammar.maxk);
+-                }
+-
+-                // Was it a big unicode range that forced unsuitability
+-                // for a case expression?
+-                if (alt.cache[1].fset.degree() > caseSizeThreshold &&
+-                    suitableForCaseExpression(alt)) {
+-                    if (nIF == 0) {
+-                        println("if " + e + " {");
+-                    }
+-                    else {
+-                        println("else if " + e + " {");
+-                    }
+-                }
+-                else if (unpredicted &&
+-                    alt.semPred == null &&
+-                    alt.synPred == null) {
+-                    // The alt has empty prediction set and no
+-                    // predicate to help out.  if we have not
+-                    // generated a previous if, just put {...} around
+-                    // the end-of-token clause
+-                    if (nIF == 0) {
+-                        println("{");
+-                    }
+-                    else {
+-                        println("else {");
+-                    }
+-                    finishingInfo.needAnErrorClause = false;
+-                }
+-                else { // check for sem and syn preds
+-
+-                    // Add any semantic predicate expression to the
+-                    // lookahead test
+-                    if (alt.semPred != null) {
+-                        // if debugging, wrap the evaluation of the
+-                        // predicate in a method translate $ and #
+-                        // references
+-                        ActionTransInfo tInfo = new ActionTransInfo();
+-                        String actionStr =
+-                            processActionForSpecialSymbols(alt.semPred,
+-                                                           blk.line,
+-                                                           currentRule,
+-                                                           tInfo);
+-                        // ignore translation info...we don't need to
+-                        // do anything with it.  call that will inform
+-                        // SemanticPredicateListeners of the result
+-                        if (((grammar instanceof ParserGrammar) ||
+-                            (grammar instanceof LexerGrammar)) &&
+-                            grammar.debuggingOutput) {
+-                            e = "(" + e + "&& fireSemanticPredicateEvaluated(persistence.antlr.debug.SemanticPredicateEvent.PREDICTING," +
+-                                addSemPred(charFormatter.escapeString(actionStr)) + "," + actionStr + "))";
+-                        }
+-                        else {
+-                            e = "(" + e + "&&(" + actionStr + "))";
+-                        }
+-                    }
+-
+-                    // Generate any syntactic predicates
+-                    if (nIF > 0) {
+-                        if (alt.synPred != null) {
+-                            println("else {");
+-                            tabs++;
+-                            genSynPred(alt.synPred, e);
+-                            closingBracesOfIFSequence++;
+-                        }
+-                        else {
+-                            println("else if " + e + " {");
+-                        }
+-                    }
+-                    else {
+-                        if (alt.synPred != null) {
+-                            genSynPred(alt.synPred, e);
+-                        }
+-                        else {
+-                            // when parsing trees, convert null to
+-                            // valid tree node with NULL lookahead.
+-                            if (grammar instanceof TreeWalkerGrammar) {
+-                                println("if (_t==null) _t=ASTNULL;");
+-                            }
+-                            println("if " + e + " {");
+-                        }
+-                    }
+-
+-                }
+-
+-                nIF++;
+-                tabs++;
+-                genAlt(alt, blk);
+-                tabs--;
+-                println("}");
+-            }
+-        }
+-        String ps = "";
+-        for (int i = 1; i <= closingBracesOfIFSequence; i++) {
+-            ps += "}";
+-        }
+-
+-        // Restore the AST generation state
+-        genAST = savegenAST;
+-
+-        // restore save text state
+-        saveText = oldsaveTest;
+-
+-        // Return the finishing info.
+-        if (createdLL1Switch) {
+-            tabs--;
+-            finishingInfo.postscript = ps + "}";
+-            finishingInfo.generatedSwitch = true;
+-            finishingInfo.generatedAnIf = nIF > 0;
+-            //return new JavaBlockFinishingInfo(ps+"}",true,nIF>0); // close up switch statement
+-
+-        }
+-        else {
+-            finishingInfo.postscript = ps;
+-            finishingInfo.generatedSwitch = false;
+-            finishingInfo.generatedAnIf = nIF > 0;
+-            // return new JavaBlockFinishingInfo(ps, false,nIF>0);
+-        }
+-        return finishingInfo;
+-    }
+-
+-    private static boolean suitableForCaseExpression(Alternative a) {
+-        return
+-            a.lookaheadDepth == 1 &&
+-            a.semPred == null &&
+-            !a.cache[1].containsEpsilon() &&
+-            a.cache[1].fset.degree() <= caseSizeThreshold;
+-    }
+-
+-    /** Generate code to link an element reference into the AST */
+-    private void genElementAST(AlternativeElement el) {
+-        // handle case where you're not building trees, but are in tree walker.
+-        // Just need to get labels set up.
+-        if (grammar instanceof TreeWalkerGrammar && !grammar.buildAST) {
+-            String elementRef;
+-            String astName;
+-
+-            // Generate names and declarations of the AST variable(s)
+-            if (el.getLabel() == null) {
+-                elementRef = lt1Value;
+-                // Generate AST variables for unlabeled stuff
+-                astName = "tmp" + astVarNumber + "_AST";
+-                astVarNumber++;
+-                // Map the generated AST variable in the alternate
+-                mapTreeVariable(el, astName);
+-                // Generate an "input" AST variable also
+-                println(labeledElementASTType + " " + astName + "_in = " + elementRef + ";");
+-            }
+-            return;
+-        }
+-
+-        if (grammar.buildAST && syntacticPredLevel == 0) {
+-            boolean needASTDecl =
+-                (genAST &&
+-                (el.getLabel() != null ||
+-                el.getAutoGenType() != GrammarElement.AUTO_GEN_BANG
+-                )
+-                );
+-
+-            // RK: if we have a grammar element always generate the decl
+-            // since some guy can access it from an action and we can't
+-            // peek ahead (well not without making a mess).
+-            // I'd prefer taking this out.
+-            if (el.getAutoGenType() != GrammarElement.AUTO_GEN_BANG &&
+-                (el instanceof TokenRefElement))
+-            {
+-                needASTDecl = true;
+-            }
+-
+-            boolean doNoGuessTest =
+-                (grammar.hasSyntacticPredicate && needASTDecl);
+-
+-            String elementRef;
+-            String astNameBase;
+-
+-            // Generate names and declarations of the AST variable(s)
+-            if (el.getLabel() != null) {
+-                elementRef = el.getLabel();
+-                astNameBase = el.getLabel();
+-            }
+-            else {
+-                elementRef = lt1Value;
+-                // Generate AST variables for unlabeled stuff
+-                astNameBase = "tmp" + astVarNumber;
+-                ;
+-                astVarNumber++;
+-            }
+-
+-            // Generate the declaration if required.
+-            if (needASTDecl) {
+-                // Generate the declaration
+-                if (el instanceof GrammarAtom) {
+-                    GrammarAtom ga = (GrammarAtom)el;
+-                    if (ga.getASTNodeType() != null) {
+-                        genASTDeclaration(el, astNameBase, ga.getASTNodeType());
+-//						println(ga.getASTNodeType()+" " + astName+" = null;");
+-                    }
+-                    else {
+-                        genASTDeclaration(el, astNameBase, labeledElementASTType);
+-//						println(labeledElementASTType+" " + astName + " = null;");
+-                    }
+-                }
+-                else {
+-                    genASTDeclaration(el, astNameBase, labeledElementASTType);
+-//					println(labeledElementASTType+" " + astName + " = null;");
+-                }
+-            }
+-
+-            // for convenience..
+-            String astName = astNameBase + "_AST";
+-
+-            // Map the generated AST variable in the alternate
+-            mapTreeVariable(el, astName);
+-            if (grammar instanceof TreeWalkerGrammar) {
+-                // Generate an "input" AST variable also
+-                println(labeledElementASTType + " " + astName + "_in = null;");
+-            }
+-
+-            // Enclose actions with !guessing
+-            if (doNoGuessTest) {
+-                // println("if (inputState.guessing==0) {");
+-                // tabs++;
+-            }
+-
+-            // if something has a label assume it will be used
+-            // so we must initialize the RefAST
+-            if (el.getLabel() != null) {
+-                if (el instanceof GrammarAtom) {
+-                    println(astName + " = " + getASTCreateString((GrammarAtom)el, elementRef) + ";");
+-                }
+-                else {
+-                    println(astName + " = " + getASTCreateString(elementRef) + ";");
+-                }
+-            }
+-
+-            // if it has no label but a declaration exists initialize it.
+-            if (el.getLabel() == null && needASTDecl) {
+-                elementRef = lt1Value;
+-                if (el instanceof GrammarAtom) {
+-                    println(astName + " = " + getASTCreateString((GrammarAtom)el, elementRef) + ";");
+-                }
+-                else {
+-                    println(astName + " = " + getASTCreateString(elementRef) + ";");
+-                }
+-                // Map the generated AST variable in the alternate
+-                if (grammar instanceof TreeWalkerGrammar) {
+-                    // set "input" AST variable also
+-                    println(astName + "_in = " + elementRef + ";");
+-                }
+-            }
+-
+-            if (genAST) {
+-                switch (el.getAutoGenType()) {
+-                    case GrammarElement.AUTO_GEN_NONE:
+-                        println("astFactory.addASTChild(currentAST, " + astName + ");");
+-                        break;
+-                    case GrammarElement.AUTO_GEN_CARET:
+-                        println("astFactory.makeASTRoot(currentAST, " + astName + ");");
+-                        break;
+-                    default:
+-                        break;
+-                }
+-            }
+-            if (doNoGuessTest) {
+-                // tabs--;
+-                // println("}");
+-            }
+-        }
+-    }
+-
+-    /** Close the try block and generate catch phrases
+-     * if the element has a labeled handler in the rule
+-     */
+-    private void genErrorCatchForElement(AlternativeElement el) {
+-        if (el.getLabel() == null) return;
+-        String r = el.enclosingRuleName;
+-        if (grammar instanceof LexerGrammar) {
+-            r = CodeGenerator.encodeLexerRuleName(el.enclosingRuleName);
+-        }
+-        RuleSymbol rs = (RuleSymbol)grammar.getSymbol(r);
+-        if (rs == null) {
+-            antlrTool.panic("Enclosing rule not found!");
+-        }
+-        ExceptionSpec ex = rs.block.findExceptionSpec(el.getLabel());
+-        if (ex != null) {
+-            tabs--;
+-            println("}");
+-            genErrorHandler(ex);
+-        }
+-    }
+-
+-    /** Generate the catch phrases for a user-specified error handler */
+-    private void genErrorHandler(ExceptionSpec ex) {
+-        // Each ExceptionHandler in the ExceptionSpec is a separate catch
+-        for (int i = 0; i < ex.handlers.size(); i++) {
+-            ExceptionHandler handler = (ExceptionHandler)ex.handlers.elementAt(i);
+-            // Generate catch phrase
+-            println("catch (" + handler.exceptionTypeAndName.getText() + ") {");
+-            tabs++;
+-            if (grammar.hasSyntacticPredicate) {
+-                println("if (inputState.guessing==0) {");
+-                tabs++;
+-            }
+-
+-            // When not guessing, execute user handler action
+-			ActionTransInfo tInfo = new ActionTransInfo();
+-            printAction(
+-                processActionForSpecialSymbols(handler.action.getText(),
+-                                               handler.action.getLine(),
+-                                               currentRule, tInfo)
+-            );
+-
+-            if (grammar.hasSyntacticPredicate) {
+-                tabs--;
+-                println("} else {");
+-                tabs++;
+-                // When guessing, rethrow exception
+-                println(
+-                    "throw " +
+-                    extractIdOfAction(handler.exceptionTypeAndName) +
+-                    ";"
+-                );
+-                tabs--;
+-                println("}");
+-            }
+-            // Close catch phrase
+-            tabs--;
+-            println("}");
+-        }
+-    }
+-
+-    /** Generate a try { opening if the element has a labeled handler in the rule */
+-    private void genErrorTryForElement(AlternativeElement el) {
+-        if (el.getLabel() == null) return;
+-        String r = el.enclosingRuleName;
+-        if (grammar instanceof LexerGrammar) {
+-            r = CodeGenerator.encodeLexerRuleName(el.enclosingRuleName);
+-        }
+-        RuleSymbol rs = (RuleSymbol)grammar.getSymbol(r);
+-        if (rs == null) {
+-            antlrTool.panic("Enclosing rule not found!");
+-        }
+-        ExceptionSpec ex = rs.block.findExceptionSpec(el.getLabel());
+-        if (ex != null) {
+-            println("try { // for error handling");
+-            tabs++;
+-        }
+-    }
+-
+-    protected void genASTDeclaration(AlternativeElement el) {
+-        genASTDeclaration(el, labeledElementASTType);
+-    }
+-
+-    protected void genASTDeclaration(AlternativeElement el, String node_type) {
+-        genASTDeclaration(el, el.getLabel(), node_type);
+-    }
+-
+-    protected void genASTDeclaration(AlternativeElement el, String var_name, String node_type) {
+-        // already declared?
+-        if (declaredASTVariables.contains(el))
+-            return;
+-
+-        // emit code
+-        println(node_type + " " + var_name + "_AST = null;");
+-
+-        // mark as declared
+-        declaredASTVariables.put(el,el);
+-    }
+-
+-    /** Generate a header that is common to all Java files */
+-    protected void genHeader() {
+-        println("// $ANTLR " + Tool.version + ": " +
+-                "\"" + antlrTool.fileMinusPath(antlrTool.grammarFile) + "\"" +
+-                " -> " +
+-                "\"" + grammar.getClassName() + ".java\"$");
+-    }
+-
+-    private void genLiteralsTest() {
+-        println("_ttype = testLiteralsTable(_ttype);");
+-    }
+-
+-    private void genLiteralsTestForPartialToken() {
+-        println("_ttype = testLiteralsTable(new String(text.getBuffer(),_begin,text.length()-_begin),_ttype);");
+-    }
+-
+-    protected void genMatch(BitSet b) {
+-    }
+-
+-    protected void genMatch(GrammarAtom atom) {
+-        if (atom instanceof StringLiteralElement) {
+-            if (grammar instanceof LexerGrammar) {
+-                genMatchUsingAtomText(atom);
+-            }
+-            else {
+-                genMatchUsingAtomTokenType(atom);
+-            }
+-        }
+-        else if (atom instanceof CharLiteralElement) {
+-            if (grammar instanceof LexerGrammar) {
+-                genMatchUsingAtomText(atom);
+-            }
+-            else {
+-                antlrTool.error("cannot ref character literals in grammar: " + atom);
+-            }
+-        }
+-        else if (atom instanceof TokenRefElement) {
+-            genMatchUsingAtomText(atom);
+-        }
+-        else if (atom instanceof WildcardElement) {
+-            gen((WildcardElement)atom);
+-        }
+-    }
+-
+-    protected void genMatchUsingAtomText(GrammarAtom atom) {
+-        // match() for trees needs the _t cursor
+-        String astArgs = "";
+-        if (grammar instanceof TreeWalkerGrammar) {
+-            astArgs = "_t,";
+-        }
+-
+-        // if in lexer and ! on element, save buffer index to kill later
+-        if (grammar instanceof LexerGrammar && (!saveText || atom.getAutoGenType() == GrammarElement.AUTO_GEN_BANG)) {
+-            println("_saveIndex=text.length();");
+-        }
+-
+-        print(atom.not ? "matchNot(" : "match(");
+-        _print(astArgs);
+-
+-        // print out what to match
+-        if (atom.atomText.equals("EOF")) {
+-            // horrible hack to handle EOF case
+-            _print("Token.EOF_TYPE");
+-        }
+-        else {
+-            _print(atom.atomText);
+-        }
+-        _println(");");
+-
+-        if (grammar instanceof LexerGrammar && (!saveText || atom.getAutoGenType() == GrammarElement.AUTO_GEN_BANG)) {
+-            println("text.setLength(_saveIndex);");		// kill text atom put in buffer
+-        }
+-    }
+-
+-    protected void genMatchUsingAtomTokenType(GrammarAtom atom) {
+-        // match() for trees needs the _t cursor
+-        String astArgs = "";
+-        if (grammar instanceof TreeWalkerGrammar) {
+-            astArgs = "_t,";
+-        }
+-
+-        // If the literal can be mangled, generate the symbolic constant instead
+-        String mangledName = null;
+-        String s = astArgs + getValueString(atom.getType());
+-
+-        // matching
+-        println((atom.not ? "matchNot(" : "match(") + s + ");");
+-    }
+-
+-    /** Generate the nextToken() rule.  nextToken() is a synthetic
+-     * lexer rule that is the implicit OR of all user-defined
+-     * lexer rules.
+-     */
+-    public void genNextToken() {
+-        // Are there any public rules?  If not, then just generate a
+-        // fake nextToken().
+-        boolean hasPublicRules = false;
+-        for (int i = 0; i < grammar.rules.size(); i++) {
+-            RuleSymbol rs = (RuleSymbol)grammar.rules.elementAt(i);
+-            if (rs.isDefined() && rs.access.equals("public")) {
+-                hasPublicRules = true;
+-                break;
+-            }
+-        }
+-        if (!hasPublicRules) {
+-            println("");
+-            println("public Token nextToken() throws TokenStreamException {");
+-            println("\ttry {uponEOF();}");
+-            println("\tcatch(CharStreamIOException csioe) {");
+-            println("\t\tthrow new TokenStreamIOException(csioe.io);");
+-            println("\t}");
+-            println("\tcatch(CharStreamException cse) {");
+-            println("\t\tthrow new TokenStreamException(cse.getMessage());");
+-            println("\t}");
+-            println("\treturn new CommonToken(Token.EOF_TYPE, \"\");");
+-            println("}");
+-            println("");
+-            return;
+-        }
+-
+-        // Create the synthesized nextToken() rule
+-        RuleBlock nextTokenBlk = MakeGrammar.createNextTokenRule(grammar, grammar.rules, "nextToken");
+-        // Define the nextToken rule symbol
+-        RuleSymbol nextTokenRs = new RuleSymbol("mnextToken");
+-        nextTokenRs.setDefined();
+-        nextTokenRs.setBlock(nextTokenBlk);
+-        nextTokenRs.access = "private";
+-        grammar.define(nextTokenRs);
+-        // Analyze the nextToken rule
+-        boolean ok = grammar.theLLkAnalyzer.deterministic(nextTokenBlk);
+-
+-        // Generate the next token rule
+-        String filterRule = null;
+-        if (((LexerGrammar)grammar).filterMode) {
+-            filterRule = ((LexerGrammar)grammar).filterRule;
+-        }
+-
+-        println("");
+-        println("public Token nextToken() throws TokenStreamException {");
+-        tabs++;
+-        println("Token theRetToken=null;");
+-        _println("tryAgain:");
+-        println("for (;;) {");
+-        tabs++;
+-        println("Token _token = null;");
+-        println("int _ttype = Token.INVALID_TYPE;");
+-        if (((LexerGrammar)grammar).filterMode) {
+-            println("setCommitToPath(false);");
+-            if (filterRule != null) {
+-                // Here's a good place to ensure that the filter rule actually exists
+-                if (!grammar.isDefined(CodeGenerator.encodeLexerRuleName(filterRule))) {
+-                    grammar.antlrTool.error("Filter rule " + filterRule + " does not exist in this lexer");
+-                }
+-                else {
+-                    RuleSymbol rs = (RuleSymbol)grammar.getSymbol(CodeGenerator.encodeLexerRuleName(filterRule));
+-                    if (!rs.isDefined()) {
+-                        grammar.antlrTool.error("Filter rule " + filterRule + " does not exist in this lexer");
+-                    }
+-                    else if (rs.access.equals("public")) {
+-                        grammar.antlrTool.error("Filter rule " + filterRule + " must be protected");
+-                    }
+-                }
+-                println("int _m;");
+-                println("_m = mark();");
+-            }
+-        }
+-        println("resetText();");
+-
+-        println("try {   // for char stream error handling");
+-        tabs++;
+-
+-        // Generate try around whole thing to trap scanner errors
+-        println("try {   // for lexical error handling");
+-        tabs++;
+-
+-        // Test for public lexical rules with empty paths
+-        for (int i = 0; i < nextTokenBlk.getAlternatives().size(); i++) {
+-            Alternative a = nextTokenBlk.getAlternativeAt(i);
+-            if (a.cache[1].containsEpsilon()) {
+-                //String r = a.head.toString();
+-                RuleRefElement rr = (RuleRefElement)a.head;
+-                String r = CodeGenerator.decodeLexerRuleName(rr.targetRule);
+-                antlrTool.warning("public lexical rule "+r+" is optional (can match \"nothing\")");
+-            }
+-        }
+-
+-        // Generate the block
+-        String newline = System.getProperty("line.separator");
+-        JavaBlockFinishingInfo howToFinish = genCommonBlock(nextTokenBlk, false);
+-        String errFinish = "if (LA(1)==EOF_CHAR) {uponEOF(); _returnToken = makeToken(Token.EOF_TYPE);}";
+-        errFinish += newline + "\t\t\t\t";
+-        if (((LexerGrammar)grammar).filterMode) {
+-            if (filterRule == null) {
+-                errFinish += "else {consume(); continue tryAgain;}";
+-            }
+-            else {
+-                errFinish += "else {" + newline +
+-                    "\t\t\t\t\tcommit();" + newline +
+-                    "\t\t\t\t\ttry {m" + filterRule + "(false);}" + newline +
+-                    "\t\t\t\t\tcatch(RecognitionException e) {" + newline +
+-                    "\t\t\t\t\t	// catastrophic failure" + newline +
+-                    "\t\t\t\t\t	reportError(e);" + newline +
+-                    "\t\t\t\t\t	consume();" + newline +
+-                    "\t\t\t\t\t}" + newline +
+-                    "\t\t\t\t\tcontinue tryAgain;" + newline +
+-                    "\t\t\t\t}";
+-            }
+-        }
+-        else {
+-            errFinish += "else {" + throwNoViable + "}";
+-        }
+-        genBlockFinish(howToFinish, errFinish);
+-
+-        // at this point a valid token has been matched, undo "mark" that was done
+-        if (((LexerGrammar)grammar).filterMode && filterRule != null) {
+-            println("commit();");
+-        }
+-
+-        // Generate literals test if desired
+-        // make sure _ttype is set first; note _returnToken must be
+-        // non-null as the rule was required to create it.
+-        println("if ( _returnToken==null ) continue tryAgain; // found SKIP token");
+-        println("_ttype = _returnToken.getType();");
+-        if (((LexerGrammar)grammar).getTestLiterals()) {
+-            genLiteralsTest();
+-        }
+-
+-        // return token created by rule reference in switch
+-        println("_returnToken.setType(_ttype);");
+-        println("return _returnToken;");
+-
+-        // Close try block
+-        tabs--;
+-        println("}");
+-        println("catch (RecognitionException e) {");
+-        tabs++;
+-        if (((LexerGrammar)grammar).filterMode) {
+-            if (filterRule == null) {
+-                println("if ( !getCommitToPath() ) {consume(); continue tryAgain;}");
+-            }
+-            else {
+-                println("if ( !getCommitToPath() ) {");
+-                tabs++;
+-                println("rewind(_m);");
+-                println("resetText();");
+-                println("try {m" + filterRule + "(false);}");
+-                println("catch(RecognitionException ee) {");
+-                println("	// horrendous failure: error in filter rule");
+-                println("	reportError(ee);");
+-                println("	consume();");
+-                println("}");
+-                println("continue tryAgain;");
+-                tabs--;
+-                println("}");
+-            }
+-        }
+-        if (nextTokenBlk.getDefaultErrorHandler()) {
+-            println("reportError(e);");
+-            println("consume();");
+-        }
+-        else {
+-            // pass on to invoking routine
+-            println("throw new TokenStreamRecognitionException(e);");
+-        }
+-        tabs--;
+-        println("}");
+-
+-        // close CharStreamException try
+-        tabs--;
+-        println("}");
+-        println("catch (CharStreamException cse) {");
+-        println("	if ( cse instanceof CharStreamIOException ) {");
+-        println("		throw new TokenStreamIOException(((CharStreamIOException)cse).io);");
+-        println("	}");
+-        println("	else {");
+-        println("		throw new TokenStreamException(cse.getMessage());");
+-        println("	}");
+-        println("}");
+-
+-        // close for-loop
+-        tabs--;
+-        println("}");
+-
+-        // close method nextToken
+-        tabs--;
+-        println("}");
+-        println("");
+-    }
+-
+-    /** Gen a named rule block.
+-     * ASTs are generated for each element of an alternative unless
+-     * the rule or the alternative have a '!' modifier.
+-     *
+-     * If an alternative defeats the default tree construction, it
+-     * must set <rule>_AST to the root of the returned AST.
+-     *
+-     * Each alternative that does automatic tree construction, builds
+-     * up root and child list pointers in an ASTPair structure.
+-     *
+-     * A rule finishes by setting the returnAST variable from the
+-     * ASTPair.
+-     *
+-     * @param rule The name of the rule to generate
+-     * @param startSymbol true if the rule is a start symbol (i.e., not referenced elsewhere)
+-     */
+-    public void genRule(RuleSymbol s, boolean startSymbol, int ruleNum) {
+-        tabs = 1;
+-
+-        if (DEBUG_CODE_GENERATOR) System.out.println("genRule(" + s.getId() + ")");
+-        if (!s.isDefined()) {
+-            antlrTool.error("undefined rule: " + s.getId());
+-            return;
+-        }
+-
+-        // Generate rule return type, name, arguments
+-        RuleBlock rblk = s.getBlock();
+-
+-        currentRule = rblk;
+-        currentASTResult = s.getId();
+-
+-        // clear list of declared ast variables..
+-        declaredASTVariables.clear();
+-
+-        // Save the AST generation state, and set it to that of the rule
+-        boolean savegenAST = genAST;
+-        genAST = genAST && rblk.getAutoGen();
+-
+-        // boolean oldsaveTest = saveText;
+-        saveText = rblk.getAutoGen();
+-
+-        // print javadoc comment if any
+-        if (s.comment != null) {
+-            _println(s.comment);
+-        }
+-
+-        // Gen method access and final qualifier
+-        print(s.access + " final ");
+-
+-        // Gen method return type (note lexer return action set at rule creation)
+-        if (rblk.returnAction != null) {
+-            // Has specified return value
+-            _print(extractTypeOfAction(rblk.returnAction, rblk.getLine(), rblk.getColumn()) + " ");
+-        }
+-        else {
+-            // No specified return value
+-            _print("void ");
+-        }
+-
+-        // Gen method name
+-        _print(s.getId() + "(");
+-
+-        // Additional rule parameters common to all rules for this grammar
+-        _print(commonExtraParams);
+-        if (commonExtraParams.length() != 0 && rblk.argAction != null) {
+-            _print(",");
+-        }
+-
+-        // Gen arguments
+-        if (rblk.argAction != null) {
+-            // Has specified arguments
+-            _println("");
+-            tabs++;
+-            println(rblk.argAction);
+-            tabs--;
+-            print(")");
+-        }
+-        else {
+-            // No specified arguments
+-            _print(")");
+-        }
+-
+-        // Gen throws clause and open curly
+-        _print(" throws " + exceptionThrown);
+-        if (grammar instanceof ParserGrammar) {
+-            _print(", TokenStreamException");
+-        }
+-        else if (grammar instanceof LexerGrammar) {
+-            _print(", CharStreamException, TokenStreamException");
+-        }
+-        // Add user-defined exceptions unless lexer (for now)
+-        if (rblk.throwsSpec != null) {
+-            if (grammar instanceof LexerGrammar) {
+-                antlrTool.error("user-defined throws spec not allowed (yet) for lexer rule " + rblk.ruleName);
+-            }
+-            else {
+-                _print(", " + rblk.throwsSpec);
+-            }
+-        }
+-
+-        _println(" {");
+-        tabs++;
+-
+-        // Convert return action to variable declaration
+-        if (rblk.returnAction != null)
+-            println(rblk.returnAction + ";");
+-
+-        // print out definitions needed by rules for various grammar types
+-        println(commonLocalVars);
+-
+-        if (grammar.traceRules) {
+-            if (grammar instanceof TreeWalkerGrammar) {
+-                println("traceIn(\"" + s.getId() + "\",_t);");
+-            }
+-            else {
+-                println("traceIn(\"" + s.getId() + "\");");
+-            }
+-        }
+-
+-        if (grammar instanceof LexerGrammar) {
+-            // lexer rule default return value is the rule's token name
+-            // This is a horrible hack to support the built-in EOF lexer rule.
+-            if (s.getId().equals("mEOF"))
+-                println("_ttype = Token.EOF_TYPE;");
+-            else
+-                println("_ttype = " + s.getId().substring(1) + ";");
+-            println("int _saveIndex;");		// used for element! (so we can kill text matched for element)
+-            /*
+-			 println("boolean old_saveConsumedInput=saveConsumedInput;");
+-			 if ( !rblk.getAutoGen() ) {		// turn off "save input" if ! on rule
+-			 println("saveConsumedInput=false;");
+-			 }
+-			 */
+-        }
+-
+-        // if debugging, write code to mark entry to the rule
+-        if (grammar.debuggingOutput)
+-            if (grammar instanceof ParserGrammar)
+-                println("fireEnterRule(" + ruleNum + ",0);");
+-            else if (grammar instanceof LexerGrammar)
+-                println("fireEnterRule(" + ruleNum + ",_ttype);");
+-
+-        // Generate trace code if desired
+-        if (grammar.debuggingOutput || grammar.traceRules) {
+-            println("try { // debugging");
+-            tabs++;
+-        }
+-
+-        // Initialize AST variables
+-        if (grammar instanceof TreeWalkerGrammar) {
+-            // "Input" value for rule
+-	    println(labeledElementASTType + " " + s.getId() + "_AST_in = (_t == ASTNULL) ? null : (" + labeledElementASTType + ")_t;");
+-        }
+-        if (grammar.buildAST) {
+-            // Parser member used to pass AST returns from rule invocations
+-            println("returnAST = null;");
+-            // Tracks AST construction
+-            // println("ASTPair currentAST = (inputState.guessing==0) ? new ASTPair() : null;");
+-            println("ASTPair currentAST = new ASTPair();");
+-            // User-settable return value for rule.
+-            println(labeledElementASTType + " " + s.getId() + "_AST = null;");
+-        }
+-
+-        genBlockPreamble(rblk);
+-        genBlockInitAction(rblk);
+-        println("");
+-
+-        // Search for an unlabeled exception specification attached to the rule
+-        ExceptionSpec unlabeledUserSpec = rblk.findExceptionSpec("");
+-
+-        // Generate try block around the entire rule for  error handling
+-        if (unlabeledUserSpec != null || rblk.getDefaultErrorHandler()) {
+-            println("try {      // for error handling");
+-            tabs++;
+-        }
+-
+-        // Generate the alternatives
+-        if (rblk.alternatives.size() == 1) {
+-            // One alternative -- use simple form
+-            Alternative alt = rblk.getAlternativeAt(0);
+-            String pred = alt.semPred;
+-            if (pred != null)
+-                genSemPred(pred, currentRule.line);
+-            if (alt.synPred != null) {
+-                antlrTool.warning(
+-                    "Syntactic predicate ignored for single alternative",
+-                    grammar.getFilename(),
+-                    alt.synPred.getLine(),
+-                    alt.synPred.getColumn()
+-                );
+-            }
+-            genAlt(alt, rblk);
+-        }
+-        else {
+-            // Multiple alternatives -- generate complex form
+-            boolean ok = grammar.theLLkAnalyzer.deterministic(rblk);
+-
+-            JavaBlockFinishingInfo howToFinish = genCommonBlock(rblk, false);
+-            genBlockFinish(howToFinish, throwNoViable);
+-        }
+-
+-        // Generate catch phrase for error handling
+-        if (unlabeledUserSpec != null || rblk.getDefaultErrorHandler()) {
+-            // Close the try block
+-            tabs--;
+-            println("}");
+-        }
+-
+-        // Generate user-defined or default catch phrases
+-        if (unlabeledUserSpec != null) {
+-            genErrorHandler(unlabeledUserSpec);
+-        }
+-        else if (rblk.getDefaultErrorHandler()) {
+-            // Generate default catch phrase
+-            println("catch (" + exceptionThrown + " ex) {");
+-            tabs++;
+-            // Generate code to handle error if not guessing
+-            if (grammar.hasSyntacticPredicate) {
+-                println("if (inputState.guessing==0) {");
+-                tabs++;
+-            }
+-            println("reportError(ex);");
+-            if (!(grammar instanceof TreeWalkerGrammar)) {
+-                // Generate code to consume until token in k==1 follow set
+-                Lookahead follow = grammar.theLLkAnalyzer.FOLLOW(1, rblk.endNode);
+-                String followSetName = getBitsetName(markBitsetForGen(follow.fset));
+-                println("consume();");
+-                println("consumeUntil(" + followSetName + ");");
+-            }
+-            else {
+-                // Just consume one token
+-                println("if (_t!=null) {_t = _t.getNextSibling();}");
+-            }
+-            if (grammar.hasSyntacticPredicate) {
+-                tabs--;
+-                // When guessing, rethrow exception
+-                println("} else {");
+-                println("  throw ex;");
+-                println("}");
+-            }
+-            // Close catch phrase
+-            tabs--;
+-            println("}");
+-        }
+-
+-        // Squirrel away the AST "return" value
+-        if (grammar.buildAST) {
+-            println("returnAST = " + s.getId() + "_AST;");
+-        }
+-
+-        // Set return tree value for tree walkers
+-        if (grammar instanceof TreeWalkerGrammar) {
+-            println("_retTree = _t;");
+-        }
+-
+-        // Generate literals test for lexer rules so marked
+-        if (rblk.getTestLiterals()) {
+-            if (s.access.equals("protected")) {
+-                genLiteralsTestForPartialToken();
+-            }
+-            else {
+-                genLiteralsTest();
+-            }
+-        }
+-
+-        // if doing a lexer rule, dump code to create token if necessary
+-        if (grammar instanceof LexerGrammar) {
+-            println("if ( _createToken && _token==null && _ttype!=Token.SKIP ) {");
+-            println("	_token = makeToken(_ttype);");
+-            println("	_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));");
+-            println("}");
+-            println("_returnToken = _token;");
+-        }
+-
+-        // Gen the return statement if there is one (lexer has hard-wired return action)
+-        if (rblk.returnAction != null) {
+-            println("return " + extractIdOfAction(rblk.returnAction, rblk.getLine(), rblk.getColumn()) + ";");
+-        }
+-
+-        if (grammar.debuggingOutput || grammar.traceRules) {
+-            tabs--;
+-            println("} finally { // debugging");
+-            tabs++;
+-
+-            // If debugging, generate calls to mark exit of rule
+-            if (grammar.debuggingOutput)
+-                if (grammar instanceof ParserGrammar)
+-                    println("fireExitRule(" + ruleNum + ",0);");
+-                else if (grammar instanceof LexerGrammar)
+-                    println("fireExitRule(" + ruleNum + ",_ttype);");
+-
+-            if (grammar.traceRules) {
+-                if (grammar instanceof TreeWalkerGrammar) {
+-                    println("traceOut(\"" + s.getId() + "\",_t);");
+-                }
+-                else {
+-                    println("traceOut(\"" + s.getId() + "\");");
+-                }
+-            }
+-
+-            tabs--;
+-            println("}");
+-        }
+-
+-        tabs--;
+-        println("}");
+-        println("");
+-
+-        // Restore the AST generation state
+-        genAST = savegenAST;
+-
+-        // restore char save state
+-        // saveText = oldsaveTest;
+-    }
+-
+-    private void GenRuleInvocation(RuleRefElement rr) {
+-        // dump rule name
+-        _print(rr.targetRule + "(");
+-
+-        // lexers must tell rule if it should set _returnToken
+-        if (grammar instanceof LexerGrammar) {
+-            // if labeled, could access Token, so tell rule to create
+-            if (rr.getLabel() != null) {
+-                _print("true");
+-            }
+-            else {
+-                _print("false");
+-            }
+-            if (commonExtraArgs.length() != 0 || rr.args != null) {
+-                _print(",");
+-            }
+-        }
+-
+-        // Extra arguments common to all rules for this grammar
+-        _print(commonExtraArgs);
+-        if (commonExtraArgs.length() != 0 && rr.args != null) {
+-            _print(",");
+-        }
+-
+-        // Process arguments to method, if any
+-        RuleSymbol rs = (RuleSymbol)grammar.getSymbol(rr.targetRule);
+-        if (rr.args != null) {
+-            // When not guessing, execute user arg action
+-            ActionTransInfo tInfo = new ActionTransInfo();
+-            String args = processActionForSpecialSymbols(rr.args, 0, currentRule, tInfo);
+-            if (tInfo.assignToRoot || tInfo.refRuleRoot != null) {
+-                antlrTool.error("Arguments of rule reference '" + rr.targetRule + "' cannot set or ref #" +
+-                           currentRule.getRuleName(), grammar.getFilename(), rr.getLine(), rr.getColumn());
+-            }
+-            _print(args);
+-
+-            // Warn if the rule accepts no arguments
+-            if (rs.block.argAction == null) {
+-                antlrTool.warning("Rule '" + rr.targetRule + "' accepts no arguments", grammar.getFilename(), rr.getLine(), rr.getColumn());
+-            }
+-        }
+-        else {
+-            // For C++, no warning if rule has parameters, because there may be default
+-            // values for all of the parameters
+-            if (rs.block.argAction != null) {
+-                antlrTool.warning("Missing parameters on reference to rule " + rr.targetRule, grammar.getFilename(), rr.getLine(), rr.getColumn());
+-            }
+-        }
+-        _println(");");
+-
+-        // move down to the first child while parsing
+-        if (grammar instanceof TreeWalkerGrammar) {
+-            println("_t = _retTree;");
+-        }
+-    }
+-
+-    protected void genSemPred(String pred, int line) {
+-        // translate $ and # references
+-        ActionTransInfo tInfo = new ActionTransInfo();
+-        pred = processActionForSpecialSymbols(pred, line, currentRule, tInfo);
+-        // ignore translation info...we don't need to do anything with it.
+-        String escapedPred = charFormatter.escapeString(pred);
+-
+-        // if debugging, wrap the semantic predicate evaluation in a method
+-        // that can tell SemanticPredicateListeners the result
+-        if (grammar.debuggingOutput && ((grammar instanceof ParserGrammar) || (grammar instanceof LexerGrammar)))
+-            pred = "fireSemanticPredicateEvaluated(persistence.antlr.debug.SemanticPredicateEvent.VALIDATING,"
+-                + addSemPred(escapedPred) + "," + pred + ")";
+-        println("if (!(" + pred + "))");
+-        println("  throw new SemanticException(\"" + escapedPred + "\");");
+-    }
+-
+-    /** Write an array of Strings which are the semantic predicate
+-     *  expressions.  The debugger will reference them by number only
+-     */
+-    protected void genSemPredMap() {
+-        Enumeration e = semPreds.elements();
+-        println("private String _semPredNames[] = {");
+-        while (e.hasMoreElements())
+-            println("\"" + e.nextElement() + "\",");
+-        println("};");
+-    }
+-
+-    protected void genSynPred(SynPredBlock blk, String lookaheadExpr) {
+-        if (DEBUG_CODE_GENERATOR) System.out.println("gen=>(" + blk + ")");
+-
+-        // Dump synpred result variable
+-        println("boolean synPredMatched" + blk.ID + " = false;");
+-        // Gen normal lookahead test
+-        println("if (" + lookaheadExpr + ") {");
+-        tabs++;
+-
+-        // Save input state
+-        if (grammar instanceof TreeWalkerGrammar) {
+-            println("AST __t" + blk.ID + " = _t;");
+-        }
+-        else {
+-            println("int _m" + blk.ID + " = mark();");
+-        }
+-
+-        // Once inside the try, assume synpred works unless exception caught
+-        println("synPredMatched" + blk.ID + " = true;");
+-        println("inputState.guessing++;");
+-
+-        // if debugging, tell listeners that a synpred has started
+-        if (grammar.debuggingOutput && ((grammar instanceof ParserGrammar) ||
+-            (grammar instanceof LexerGrammar))) {
+-            println("fireSyntacticPredicateStarted();");
+-        }
+-
+-        syntacticPredLevel++;
+-        println("try {");
+-        tabs++;
+-        gen((AlternativeBlock)blk);		// gen code to test predicate
+-        tabs--;
+-        //println("System.out.println(\"pred "+blk+" succeeded\");");
+-        println("}");
+-        println("catch (" + exceptionThrown + " pe) {");
+-        tabs++;
+-        println("synPredMatched" + blk.ID + " = false;");
+-        //println("System.out.println(\"pred "+blk+" failed\");");
+-        tabs--;
+-        println("}");
+-
+-        // Restore input state
+-        if (grammar instanceof TreeWalkerGrammar) {
+-            println("_t = __t" + blk.ID + ";");
+-        }
+-        else {
+-            println("rewind(_m" + blk.ID + ");");
+-        }
+-
+-        println("inputState.guessing--;");
+-
+-        // if debugging, tell listeners how the synpred turned out
+-        if (grammar.debuggingOutput && ((grammar instanceof ParserGrammar) ||
+-            (grammar instanceof LexerGrammar))) {
+-            println("if (synPredMatched" + blk.ID + ")");
+-            println("  fireSyntacticPredicateSucceeded();");
+-            println("else");
+-            println("  fireSyntacticPredicateFailed();");
+-        }
+-
+-        syntacticPredLevel--;
+-        tabs--;
+-
+-        // Close lookahead test
+-        println("}");
+-
+-        // Test synred result
+-        println("if ( synPredMatched" + blk.ID + " ) {");
+-    }
+-
+-    /** Generate a static array containing the names of the tokens,
+-     * indexed by the token type values.  This static array is used
+-     * to format error messages so that the token identifers or literal
+-     * strings are displayed instead of the token numbers.
+-     *
+-     * If a lexical rule has a paraphrase, use it rather than the
+-     * token label.
+-     */
+-    public void genTokenStrings() {
+-        // Generate a string for each token.  This creates a static
+-        // array of Strings indexed by token type.
+-        println("");
+-        println("public static final String[] _tokenNames = {");
+-        tabs++;
+-
+-        // Walk the token vocabulary and generate a Vector of strings
+-        // from the tokens.
+-        Vector v = grammar.tokenManager.getVocabulary();
+-        for (int i = 0; i < v.size(); i++) {
+-            String s = (String)v.elementAt(i);
+-            if (s == null) {
+-                s = "<" + String.valueOf(i) + ">";
+-            }
+-            if (!s.startsWith("\"") && !s.startsWith("<")) {
+-                TokenSymbol ts = (TokenSymbol)grammar.tokenManager.getTokenSymbol(s);
+-                if (ts != null && ts.getParaphrase() != null) {
+-                    s = StringUtils.stripFrontBack(ts.getParaphrase(), "\"", "\"");
+-                }
+-            }
+-            print(charFormatter.literalString(s));
+-            if (i != v.size() - 1) {
+-                _print(",");
+-            }
+-            _println("");
+-        }
+-
+-        // Close the string array initailizer
+-        tabs--;
+-        println("};");
+-    }
+-
+-	/** Create and set Integer token type objects that map
+-	 *  to Java Class objects (which AST node to create).
+-     */
+-    protected void genTokenASTNodeMap() {
+-		println("");
+-		println("protected void buildTokenTypeASTClassMap() {");
+-        // Generate a map.put("T","TNode") for each token
+-		// if heterogeneous node known for that token T.
+-        tabs++;
+-		boolean generatedNewHashtable = false;
+-		int n = 0;
+-        // Walk the token vocabulary and generate puts.
+-		Vector v = grammar.tokenManager.getVocabulary();
+-		for (int i = 0; i < v.size(); i++) {
+-			String s = (String)v.elementAt(i);
+-			if (s != null) {
+-				TokenSymbol ts = grammar.tokenManager.getTokenSymbol(s);
+-				if (ts != null && ts.getASTNodeType() != null) {
+-					n++;
+-					if ( !generatedNewHashtable ) {
+-						// only generate if we are going to add a mapping
+-						println("tokenTypeToASTClassMap = new Hashtable();");
+-						generatedNewHashtable = true;
+-					}
+-					println("tokenTypeToASTClassMap.put(new Integer("+ts.getTokenType()+"), "+
+-							ts.getASTNodeType()+".class);");
+-				}
+-			}
+-		}
+-
+-        if ( n==0 ) {
+-			println("tokenTypeToASTClassMap=null;");
+-		}
+-        tabs--;
+-        println("};");
+-    }
+-
+-    /** Generate the token types Java file */
+-    protected void genTokenTypes(TokenManager tm) throws IOException {
+-        // Open the token output Java file and set the currentOutput stream
+-        // SAS: file open was moved to a method so a subclass can override
+-        //      This was mainly for the VAJ interface
+-        setupOutput(tm.getName() + TokenTypesFileSuffix);
+-
+-        tabs = 0;
+-
+-        // Generate the header common to all Java files
+-        genHeader();
+-        // Do not use printAction because we assume tabs==0
+-        println(behavior.getHeaderAction(""));
+-
+-        // Encapsulate the definitions in an interface.  This can be done
+-        // because they are all constants.
+-        println("public interface " + tm.getName() + TokenTypesFileSuffix + " {");
+-        tabs++;
+-
+-        // Generate a definition for each token type
+-        Vector v = tm.getVocabulary();
+-
+-        // Do special tokens manually
+-        println("int EOF = " + Token.EOF_TYPE + ";");
+-        println("int NULL_TREE_LOOKAHEAD = " + Token.NULL_TREE_LOOKAHEAD + ";");
+-
+-        for (int i = Token.MIN_USER_TYPE; i < v.size(); i++) {
+-            String s = (String)v.elementAt(i);
+-            if (s != null) {
+-                if (s.startsWith("\"")) {
+-                    // a string literal
+-                    StringLiteralSymbol sl = (StringLiteralSymbol)tm.getTokenSymbol(s);
+-                    if (sl == null) {
+-                        antlrTool.panic("String literal " + s + " not in symbol table");
+-                    }
+-                    else if (sl.label != null) {
+-                        println("int " + sl.label + " = " + i + ";");
+-                    }
+-                    else {
+-                        String mangledName = mangleLiteral(s);
+-                        if (mangledName != null) {
+-                            // We were able to create a meaningful mangled token name
+-                            println("int " + mangledName + " = " + i + ";");
+-                            // if no label specified, make the label equal to the mangled name
+-                            sl.label = mangledName;
+-                        }
+-                        else {
+-                            println("// " + s + " = " + i);
+-                        }
+-                    }
+-                }
+-                else if (!s.startsWith("<")) {
+-                    println("int " + s + " = " + i + ";");
+-                }
+-            }
+-        }
+-
+-        // Close the interface
+-        tabs--;
+-        println("}");
+-
+-        // Close the tokens output file
+-        currentOutput.close();
+-        currentOutput = null;
+-        exitIfError();
+-    }
+-
+-    /** Get a string for an expression to generate creation of an AST subtree.
+-     * @param v A Vector of String, where each element is an expression in the target language yielding an AST node.
+-     */
+-    public String getASTCreateString(Vector v) {
+-        if (v.size() == 0) {
+-            return "";
+-        }
+-        StringBuffer buf = new StringBuffer();
+-        buf.append("(" + labeledElementASTType +
+-                   ")astFactory.make( (new ASTArray(" + v.size() +
+-                   "))");
+-        for (int i = 0; i < v.size(); i++) {
+-            buf.append(".add(" + v.elementAt(i) + ")");
+-        }
+-        buf.append(")");
+-        return buf.toString();
+-    }
+-
+-    /** Get a string for an expression to generate creating of an AST node
+-     * @param atom The grammar node for which you are creating the node
+-     * @param str The arguments to the AST constructor
+-     */
+-    public String getASTCreateString(GrammarAtom atom, String astCtorArgs) {
+-		//System.out.println("getASTCreateString("+atom+","+astCtorArgs+")");
+-		if (atom != null && atom.getASTNodeType() != null) {
+-			// they specified a type either on the reference or in tokens{} section
+-			return "("+atom.getASTNodeType()+")"+
+-					"astFactory.create("+astCtorArgs+",\""+atom.getASTNodeType()+"\")";
+-        }
+-        else {
+-			// must be an action or something since not referencing an atom
+-            return getASTCreateString(astCtorArgs);
+-        }
+-    }
+-
+-    /** Get a string for an expression to generate creating of an AST node.
+-	 *  Parse the first (possibly only) argument looking for the token type.
+-	 *  If the token type is a valid token symbol, ask for it's AST node type
+-	 *  and add to the end if only 2 arguments.  The forms are #[T], #[T,"t"],
+-	 *  and as of 2.7.2 #[T,"t",ASTclassname].
+-	 *
+-     * @param str The arguments to the AST constructor
+-     */
+-    public String getASTCreateString(String astCtorArgs) {
+-        //System.out.println("AST CTOR: "+astCtorArgs);
+-		if ( astCtorArgs==null ) {
+-			astCtorArgs = "";
+-		}
+-		int nCommas = 0;
+-		for (int i=0; i<astCtorArgs.length(); i++) {
+-			if ( astCtorArgs.charAt(i)==',' ) {
+-				nCommas++;
+-			}
+-		}
+-		//System.out.println("num commas="+nCommas);
+-		if ( nCommas<2 ) { // if 1 or 2 args
+-			int firstComma = astCtorArgs.indexOf(',');
+-			int lastComma = astCtorArgs.lastIndexOf(',');
+-			String tokenName = astCtorArgs;
+-			if ( nCommas>0 ) {
+-				tokenName = astCtorArgs.substring(0,firstComma);
+-			}
+-			//System.out.println("Checking for ast node type of "+tokenName);
+-			TokenSymbol ts = grammar.tokenManager.getTokenSymbol(tokenName);
+-			if ( ts!=null ) {
+-				String astNodeType = ts.getASTNodeType();
+-				//System.out.println("node type of "+tokenName+" is "+astNodeType);
+-				String emptyText = "";
+-				if ( nCommas==0 ) {
+-					// need to add 2nd arg of blank text for token text
+-					emptyText = ",\"\"";
+-				}
+-				if ( astNodeType!=null ) {
+-					return "("+astNodeType+")"+
+-							"astFactory.create("+astCtorArgs+emptyText+",\""+astNodeType+"\")";
+-				}
+-                // fall through and just do a regular create with cast on front
+-                // if necessary (it differs from default "AST").
+-			}
+-			if ( labeledElementASTType.equals("AST") ) {
+-				return "astFactory.create("+astCtorArgs+")";
+-			}
+-			return "("+labeledElementASTType+")"+
+-					"astFactory.create("+astCtorArgs+")";
+-		}
+-		// create default type or (since 2.7.2) 3rd arg is classname
+-		return "(" + labeledElementASTType + ")astFactory.create(" + astCtorArgs + ")";
+-    }
+-
+-    protected String getLookaheadTestExpression(Lookahead[] look, int k) {
+-        StringBuffer e = new StringBuffer(100);
+-        boolean first = true;
+-
+-        e.append("(");
+-        for (int i = 1; i <= k; i++) {
+-            BitSet p = look[i].fset;
+-            if (!first) {
+-                e.append(") && (");
+-            }
+-            first = false;
+-
+-            // Syn preds can yield <end-of-syn-pred> (epsilon) lookahead.
+-            // There is no way to predict what that token would be.  Just
+-            // allow anything instead.
+-            if (look[i].containsEpsilon()) {
+-                e.append("true");
+-            }
+-            else {
+-                e.append(getLookaheadTestTerm(i, p));
+-            }
+-        }
+-        e.append(")");
+-
+-        return e.toString();
+-    }
+-
+-    /**Generate a lookahead test expression for an alternate.  This
+-     * will be a series of tests joined by '&&' and enclosed by '()',
+-     * the number of such tests being determined by the depth of the lookahead.
+-     */
+-    protected String getLookaheadTestExpression(Alternative alt, int maxDepth) {
+-        int depth = alt.lookaheadDepth;
+-        if (depth == GrammarAnalyzer.NONDETERMINISTIC) {
+-            // if the decision is nondeterministic, do the best we can: LL(k)
+-            // any predicates that are around will be generated later.
+-            depth = grammar.maxk;
+-        }
+-
+-        if (maxDepth == 0) {
+-            // empty lookahead can result from alt with sem pred
+-            // that can see end of token.  E.g., A : {pred}? ('a')? ;
+-            return "( true )";
+-        }
+-
+-        return "(" + getLookaheadTestExpression(alt.cache, depth) + ")";
+-    }
+-
+-    /**Generate a depth==1 lookahead test expression given the BitSet.
+-     * This may be one of:
+-     * 1) a series of 'x==X||' tests
+-     * 2) a range test using >= && <= where possible,
+-     * 3) a bitset membership test for complex comparisons
+-     * @param k The lookahead level
+-     * @param p The lookahead set for level k
+-     */
+-    protected String getLookaheadTestTerm(int k, BitSet p) {
+-        // Determine the name of the item to be compared
+-        String ts = lookaheadString(k);
+-
+-        // Generate a range expression if possible
+-        int[] elems = p.toArray();
+-        if (elementsAreRange(elems)) {
+-            return getRangeExpression(k, elems);
+-        }
+-
+-        // Generate a bitset membership test if possible
+-        StringBuffer e;
+-        int degree = p.degree();
+-        if (degree == 0) {
+-            return "true";
+-        }
+-
+-        if (degree >= bitsetTestThreshold) {
+-            int bitsetIdx = markBitsetForGen(p);
+-            return getBitsetName(bitsetIdx) + ".member(" + ts + ")";
+-        }
+-
+-        // Otherwise, generate the long-winded series of "x==X||" tests
+-        e = new StringBuffer();
+-        for (int i = 0; i < elems.length; i++) {
+-            // Get the compared-to item (token or character value)
+-            String cs = getValueString(elems[i]);
+-
+-            // Generate the element comparison
+-            if (i > 0) e.append("||");
+-            e.append(ts);
+-            e.append("==");
+-            e.append(cs);
+-        }
+-        return e.toString();
+-    }
+-
+-    /** Return an expression for testing a contiguous renage of elements
+-     * @param k The lookahead level
+-     * @param elems The elements representing the set, usually from BitSet.toArray().
+-     * @return String containing test expression.
+-     */
+-    public String getRangeExpression(int k, int[] elems) {
+-        if (!elementsAreRange(elems)) {
+-            antlrTool.panic("getRangeExpression called with non-range");
+-        }
+-        int begin = elems[0];
+-        int end = elems[elems.length - 1];
+-        return
+-            "(" + lookaheadString(k) + " >= " + getValueString(begin) + " && " +
+-            lookaheadString(k) + " <= " + getValueString(end) + ")";
+-    }
+-
+-    /** getValueString: get a string representation of a token or char value
+-     * @param value The token or char value
+-     */
+-    private String getValueString(int value) {
+-        String cs;
+-        if (grammar instanceof LexerGrammar) {
+-            cs = charFormatter.literalChar(value);
+-        }
+-        else {
+-            TokenSymbol ts = grammar.tokenManager.getTokenSymbolAt(value);
+-            if (ts == null) {
+-                return "" + value; // return token type as string
+-                // tool.panic("vocabulary for token type " + value + " is null");
+-            }
+-            String tId = ts.getId();
+-            if (ts instanceof StringLiteralSymbol) {
+-                // if string literal, use predefined label if any
+-                // if no predefined, try to mangle into LITERAL_xxx.
+-                // if can't mangle, use int value as last resort
+-                StringLiteralSymbol sl = (StringLiteralSymbol)ts;
+-                String label = sl.getLabel();
+-                if (label != null) {
+-                    cs = label;
+-                }
+-                else {
+-                    cs = mangleLiteral(tId);
+-                    if (cs == null) {
+-                        cs = String.valueOf(value);
+-                    }
+-                }
+-            }
+-            else {
+-                cs = tId;
+-            }
+-        }
+-        return cs;
+-    }
+-
+-    /**Is the lookahead for this alt empty? */
+-    protected boolean lookaheadIsEmpty(Alternative alt, int maxDepth) {
+-        int depth = alt.lookaheadDepth;
+-        if (depth == GrammarAnalyzer.NONDETERMINISTIC) {
+-            depth = grammar.maxk;
+-        }
+-        for (int i = 1; i <= depth && i <= maxDepth; i++) {
+-            BitSet p = alt.cache[i].fset;
+-            if (p.degree() != 0) {
+-                return false;
+-            }
+-        }
+-        return true;
+-    }
+-
+-    private String lookaheadString(int k) {
+-        if (grammar instanceof TreeWalkerGrammar) {
+-            return "_t.getType()";
+-        }
+-        return "LA(" + k + ")";
+-    }
+-
+-    /** Mangle a string literal into a meaningful token name.  This is
+-     * only possible for literals that are all characters.  The resulting
+-     * mangled literal name is literalsPrefix with the text of the literal
+-     * appended.
+-     * @return A string representing the mangled literal, or null if not possible.
+-     */
+-    private String mangleLiteral(String s) {
+-        String mangled = antlrTool.literalsPrefix;
+-        for (int i = 1; i < s.length() - 1; i++) {
+-            if (!Character.isLetter(s.charAt(i)) &&
+-                s.charAt(i) != '_') {
+-                return null;
+-            }
+-            mangled += s.charAt(i);
+-        }
+-        if (antlrTool.upperCaseMangledLiterals) {
+-            mangled = mangled.toUpperCase();
+-        }
+-        return mangled;
+-    }
+-
+-    /** Map an identifier to it's corresponding tree-node variable.
+-     * This is context-sensitive, depending on the rule and alternative
+-     * being generated
+-     * @param idParam The identifier name to map
+-     * @return The mapped id (which may be the same as the input), or null if the mapping is invalid due to duplicates
+-     */
+-    public String mapTreeId(String idParam, ActionTransInfo transInfo) {
+-        // if not in an action of a rule, nothing to map.
+-        if (currentRule == null) return idParam;
+-
+-        boolean in_var = false;
+-        String id = idParam;
+-        if (grammar instanceof TreeWalkerGrammar) {
+-            if (!grammar.buildAST) {
+-                in_var = true;
+-            }
+-            // If the id ends with "_in", then map it to the input variable
+-            else if (id.length() > 3 && id.lastIndexOf("_in") == id.length() - 3) {
+-                // Strip off the "_in"
+-                id = id.substring(0, id.length() - 3);
+-                in_var = true;
+-            }
+-        }
+-
+-        // Check the rule labels.  If id is a label, then the output
+-        // variable is label_AST, and the input variable is plain label.
+-        for (int i = 0; i < currentRule.labeledElements.size(); i++) {
+-            AlternativeElement elt = (AlternativeElement)currentRule.labeledElements.elementAt(i);
+-            if (elt.getLabel().equals(id)) {
+-                return in_var ? id : id + "_AST";
+-            }
+-        }
+-
+-        // Failing that, check the id-to-variable map for the alternative.
+-        // If the id is in the map, then output variable is the name in the
+-        // map, and input variable is name_in
+-        String s = (String)treeVariableMap.get(id);
+-        if (s != null) {
+-            if (s == NONUNIQUE) {
+-                // There is more than one element with this id
+-				antlrTool.error("Ambiguous reference to AST element "+id+
+-								" in rule "+currentRule.getRuleName());
+-
+-                return null;
+-            }
+-            else if (s.equals(currentRule.getRuleName())) {
+-                // a recursive call to the enclosing rule is
+-                // ambiguous with the rule itself.
+-				antlrTool.error("Ambiguous reference to AST element "+id+
+-								" in rule "+currentRule.getRuleName());
+-                return null;
+-            }
+-            else {
+-                return in_var ? s + "_in" : s;
+-            }
+-        }
+-
+-        // Failing that, check the rule name itself.  Output variable
+-        // is rule_AST; input variable is rule_AST_in (treeparsers).
+-        if (id.equals(currentRule.getRuleName())) {
+-            String r = in_var ? id + "_AST_in" : id + "_AST";
+-            if (transInfo != null) {
+-                if (!in_var) {
+-                    transInfo.refRuleRoot = r;
+-                }
+-            }
+-            return r;
+-        }
+-        else {
+-            // id does not map to anything -- return itself.
+-            return id;
+-        }
+-    }
+-
+-    /** Given an element and the name of an associated AST variable,
+-     * create a mapping between the element "name" and the variable name.
+-     */
+-    private void mapTreeVariable(AlternativeElement e, String name) {
+-        // For tree elements, defer to the root
+-        if (e instanceof TreeElement) {
+-            mapTreeVariable(((TreeElement)e).root, name);
+-            return;
+-        }
+-
+-        // Determine the name of the element, if any, for mapping purposes
+-        String elName = null;
+-
+-        // Don't map labeled items
+-        if (e.getLabel() == null) {
+-            if (e instanceof TokenRefElement) {
+-                // use the token id
+-                elName = ((TokenRefElement)e).atomText;
+-            }
+-            else if (e instanceof RuleRefElement) {
+-                // use the rule name
+-                elName = ((RuleRefElement)e).targetRule;
+-            }
+-        }
+-        // Add the element to the tree variable map if it has a name
+-        if (elName != null) {
+-            if (treeVariableMap.get(elName) != null) {
+-                // Name is already in the map -- mark it as duplicate
+-                treeVariableMap.remove(elName);
+-                treeVariableMap.put(elName, NONUNIQUE);
+-            }
+-            else {
+-                treeVariableMap.put(elName, name);
+-            }
+-        }
+-    }
+-
+-    /** Lexically process $var and tree-specifiers in the action.
+-     *  This will replace #id and #(...) with the appropriate
+-     *  function calls and/or variables etc...
+-     */
+-    protected String processActionForSpecialSymbols(String actionStr,
+-                                                    int line,
+-                                                    RuleBlock currentRule,
+-                                                    ActionTransInfo tInfo) {
+-        if (actionStr == null || actionStr.length() == 0) return null;
+-
+-        // The action trans info tells us (at the moment) whether an
+-        // assignment was done to the rule's tree root.
+-        if (grammar == null)
+-            return actionStr;
+-
+-        // see if we have anything to do...
+-        if ((grammar.buildAST && actionStr.indexOf('#') != -1) ||
+-            grammar instanceof TreeWalkerGrammar ||
+-            ((grammar instanceof LexerGrammar ||
+-            grammar instanceof ParserGrammar)
+-            && actionStr.indexOf('$') != -1)) {
+-            // Create a lexer to read an action and return the translated version
+-            persistence.antlr.actions.java.ActionLexer lexer =
+-					new persistence.antlr.actions.java.ActionLexer(actionStr,
+-													   currentRule,
+-													   this,
+-													   tInfo);
+-
+-            lexer.setLineOffset(line);
+-            lexer.setFilename(grammar.getFilename());
+-            lexer.setTool(antlrTool);
+-
+-            try {
+-                lexer.mACTION(true);
+-                actionStr = lexer.getTokenObject().getText();
+-                // System.out.println("action translated: "+actionStr);
+-					// System.out.println("trans info is "+tInfo);
+-            }
+-            catch (RecognitionException ex) {
+-                lexer.reportError(ex);
+-                return actionStr;
+-            }
+-            catch (TokenStreamException tex) {
+-                antlrTool.panic("Error reading action:" + actionStr);
+-                return actionStr;
+-            }
+-            catch (CharStreamException io) {
+-                antlrTool.panic("Error reading action:" + actionStr);
+-                return actionStr;
+-            }
+-        }
+-        return actionStr;
+-    }
+-
+-    private void setupGrammarParameters(Grammar g) {
+-        if (g instanceof ParserGrammar) {
+-            labeledElementASTType = "AST";
+-            if (g.hasOption("ASTLabelType")) {
+-                Token tsuffix = g.getOption("ASTLabelType");
+-                if (tsuffix != null) {
+-                    String suffix = StringUtils.stripFrontBack(tsuffix.getText(), "\"", "\"");
+-                    if (suffix != null) {
+-                        labeledElementASTType = suffix;
+-                    }
+-                }
+-            }
+-            labeledElementType = "Token ";
+-            labeledElementInit = "null";
+-            commonExtraArgs = "";
+-            commonExtraParams = "";
+-            commonLocalVars = "";
+-            lt1Value = "LT(1)";
+-            exceptionThrown = "RecognitionException";
+-            throwNoViable = "throw new NoViableAltException(LT(1), getFilename());";
+-        }
+-        else if (g instanceof LexerGrammar) {
+-            labeledElementType = "char ";
+-            labeledElementInit = "'\\0'";
+-            commonExtraArgs = "";
+-            commonExtraParams = "boolean _createToken";
+-            commonLocalVars = "int _ttype; Token _token=null; int _begin=text.length();";
+-            lt1Value = "LA(1)";
+-            exceptionThrown = "RecognitionException";
+-            throwNoViable = "throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());";
+-        }
+-        else if (g instanceof TreeWalkerGrammar) {
+-            labeledElementASTType = "AST";
+-            labeledElementType = "AST";
+-            if (g.hasOption("ASTLabelType")) {
+-                Token tsuffix = g.getOption("ASTLabelType");
+-                if (tsuffix != null) {
+-                    String suffix = StringUtils.stripFrontBack(tsuffix.getText(), "\"", "\"");
+-                    if (suffix != null) {
+-                        labeledElementASTType = suffix;
+-                        labeledElementType = suffix;
+-                    }
+-                }
+-            }
+-            if (!g.hasOption("ASTLabelType")) {
+-                g.setOption("ASTLabelType", new Token(ANTLRTokenTypes.STRING_LITERAL, "AST"));
+-            }
+-            labeledElementInit = "null";
+-            commonExtraArgs = "_t";
+-            commonExtraParams = "AST _t";
+-            commonLocalVars = "";
+-            lt1Value = "(" + labeledElementASTType + ")_t";
+-            exceptionThrown = "RecognitionException";
+-            throwNoViable = "throw new NoViableAltException(_t);";
+-        }
+-        else {
+-            antlrTool.panic("Unknown grammar type");
+-        }
+-    }
+-
+-    /** This method exists so a subclass, namely VAJCodeGenerator,
+-     *  can open the file in its own evil way.  JavaCodeGenerator
+-     *  simply opens a text file...
+-     */
+-    public void setupOutput(String className) throws IOException {
+-        currentOutput = antlrTool.openOutputFile(className + ".java");
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/LexerGrammar.java glassfish-gil/entity-persistence/src/java/persistence/antlr/LexerGrammar.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/LexerGrammar.java	2006-08-31 00:34:08.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/LexerGrammar.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,168 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import java.util.Hashtable;
+-import java.util.Enumeration;
+-import java.io.IOException;
+-
+-import persistence.antlr.collections.impl.BitSet;
+-import persistence.antlr.collections.impl.Vector;
+-
+-/** Lexer-specific grammar subclass */
+-class LexerGrammar extends Grammar {
+-    // character set used by lexer
+-    protected BitSet charVocabulary;
+-    // true if the lexer generates literal testing code for nextToken
+-    protected boolean testLiterals = true;
+-    // true if the lexer generates case-sensitive LA(k) testing
+-    protected boolean caseSensitiveLiterals = true;
+-    /** true if the lexer generates case-sensitive literals testing */
+-    protected boolean caseSensitive = true;
+-    /** true if lexer is to ignore all unrecognized tokens */
+-    protected boolean filterMode = false;
+-
+-    /** if filterMode is true, then filterRule can indicate an optional
+-     *  rule to use as the scarf language.  If null, programmer used
+-     *  plain "filter=true" not "filter=rule".
+-     */
+-    protected String filterRule = null;
+-
+-    LexerGrammar(String className_, Tool tool_, String superClass) {
+-        super(className_, tool_, superClass);
+-        charVocabulary = new BitSet();
+-
+-        // Lexer usually has no default error handling
+-        defaultErrorHandler = false;
+-    }
+-
+-    /** Top-level call to generate the code	 */
+-    public void generate() throws IOException {
+-        generator.gen(this);
+-    }
+-
+-    public String getSuperClass() {
+-        // If debugging, use debugger version of scanner
+-        if (debuggingOutput)
+-            return "debug.DebuggingCharScanner";
+-        return "CharScanner";
+-    }
+-
+-    // Get the testLiterals option value
+-    public boolean getTestLiterals() {
+-        return testLiterals;
+-    }
+-
+-    /**Process command line arguments.
+-     * -trace			have all rules call traceIn/traceOut
+-     * -traceLexer		have lexical rules call traceIn/traceOut
+-     * -debug			generate debugging output for parser debugger
+-     */
+-    public void processArguments(String[] args) {
+-        for (int i = 0; i < args.length; i++) {
+-            if (args[i].equals("-trace")) {
+-                traceRules = true;
+-                antlrTool.setArgOK(i);
+-            }
+-            else if (args[i].equals("-traceLexer")) {
+-                traceRules = true;
+-                antlrTool.setArgOK(i);
+-            }
+-            else if (args[i].equals("-debug")) {
+-                debuggingOutput = true;
+-                antlrTool.setArgOK(i);
+-            }
+-        }
+-    }
+-
+-    /** Set the character vocabulary used by the lexer */
+-    public void setCharVocabulary(BitSet b) {
+-        charVocabulary = b;
+-    }
+-
+-    /** Set lexer options */
+-    public boolean setOption(String key, Token value) {
+-        String s = value.getText();
+-        if (key.equals("buildAST")) {
+-            antlrTool.warning("buildAST option is not valid for lexer", getFilename(), value.getLine(), value.getColumn());
+-            return true;
+-        }
+-        if (key.equals("testLiterals")) {
+-            if (s.equals("true")) {
+-                testLiterals = true;
+-            }
+-            else if (s.equals("false")) {
+-                testLiterals = false;
+-            }
+-            else {
+-                antlrTool.warning("testLiterals option must be true or false", getFilename(), value.getLine(), value.getColumn());
+-            }
+-            return true;
+-        }
+-        if (key.equals("interactive")) {
+-            if (s.equals("true")) {
+-                interactive = true;
+-            }
+-            else if (s.equals("false")) {
+-                interactive = false;
+-            }
+-            else {
+-                antlrTool.error("interactive option must be true or false", getFilename(), value.getLine(), value.getColumn());
+-            }
+-            return true;
+-        }
+-        if (key.equals("caseSensitive")) {
+-            if (s.equals("true")) {
+-                caseSensitive = true;
+-            }
+-            else if (s.equals("false")) {
+-                caseSensitive = false;
+-            }
+-            else {
+-                antlrTool.warning("caseSensitive option must be true or false", getFilename(), value.getLine(), value.getColumn());
+-            }
+-            return true;
+-        }
+-        if (key.equals("caseSensitiveLiterals")) {
+-            if (s.equals("true")) {
+-                caseSensitiveLiterals = true;
+-            }
+-            else if (s.equals("false")) {
+-                caseSensitiveLiterals = false;
+-            }
+-            else {
+-                antlrTool.warning("caseSensitiveLiterals option must be true or false", getFilename(), value.getLine(), value.getColumn());
+-            }
+-            return true;
+-        }
+-        if (key.equals("filter")) {
+-            if (s.equals("true")) {
+-                filterMode = true;
+-            }
+-            else if (s.equals("false")) {
+-                filterMode = false;
+-            }
+-            else if (value.getType() == ANTLRTokenTypes.TOKEN_REF) {
+-                filterMode = true;
+-                filterRule = s;
+-            }
+-            else {
+-                antlrTool.warning("filter option must be true, false, or a lexer rule name", getFilename(), value.getLine(), value.getColumn());
+-            }
+-            return true;
+-        }
+-        if (key.equals("longestPossible")) {
+-            antlrTool.warning("longestPossible option has been deprecated; ignoring it...", getFilename(), value.getLine(), value.getColumn());
+-            return true;
+-        }
+-        if (super.setOption(key, value)) {
+-            return true;
+-        }
+-        antlrTool.error("Invalid option: " + key, getFilename(), value.getLine(), value.getColumn());
+-        return false;
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/LexerSharedInputState.java glassfish-gil/entity-persistence/src/java/persistence/antlr/LexerSharedInputState.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/LexerSharedInputState.java	2006-08-31 00:34:08.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/LexerSharedInputState.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,50 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import java.io.Reader;
+-import java.io.InputStream;
+-
+-/** This object contains the data associated with an
+- *  input stream of characters.  Multiple lexers
+- *  share a single LexerSharedInputState to lex
+- *  the same input stream.
+- */
+-public class LexerSharedInputState {
+-    protected int column = 1;
+-    protected int line = 1;
+-    protected int tokenStartColumn = 1;
+-    protected int tokenStartLine = 1;
+-    protected InputBuffer input;
+-
+-    /** What file (if known) caused the problem? */
+-    protected String filename;
+-
+-    public int guessing = 0;
+-
+-    public LexerSharedInputState(InputBuffer inbuf) {
+-        input = inbuf;
+-    }
+-
+-    public LexerSharedInputState(InputStream in) {
+-        this(new ByteBuffer(in));
+-    }
+-
+-    public LexerSharedInputState(Reader in) {
+-        this(new CharBuffer(in));
+-    }
+-
+-    public void reset() {
+-        column = 1;
+-        line = 1;
+-        tokenStartColumn = 1;
+-        tokenStartLine = 1;
+-        guessing = 0;
+-        filename = null;
+-        input.reset();
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/LLkAnalyzer.java glassfish-gil/entity-persistence/src/java/persistence/antlr/LLkAnalyzer.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/LLkAnalyzer.java	2006-08-31 00:34:07.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/LLkAnalyzer.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,1094 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import persistence.antlr.collections.impl.BitSet;
+-import persistence.antlr.collections.impl.Vector;
+-
+-/**A linear-approximate LL(k) grammar analzyer.
+- *
+- * All lookahead elements are sets of token types.
+- *
+- * @author  Terence Parr, John Lilley
+- * @see     persistence.antlr.Grammar
+- * @see     persistence.antlr.Lookahead
+- */
+-public class LLkAnalyzer implements LLkGrammarAnalyzer {
+-    // Set "analyzerDebug" to true
+-    public boolean DEBUG_ANALYZER = false;
+-    private AlternativeBlock currentBlock;
+-    protected Tool tool = null;
+-    protected Grammar grammar = null;
+-    // True if analyzing a lexical grammar
+-    protected boolean lexicalAnalysis = false;
+-    // Used for formatting bit sets in default (Java) format
+-    CharFormatter charFormatter = new JavaCharFormatter();
+-
+-    /** Create an LLk analyzer */
+-    public LLkAnalyzer(Tool tool_) {
+-        tool = tool_;
+-    }
+-
+-    /** Return true if someone used the '.' wildcard default idiom.
+-     *  Either #(. children) or '.' as an alt by itself.
+-     */
+-    protected boolean altUsesWildcardDefault(Alternative alt) {
+-        AlternativeElement head = alt.head;
+-        // if element is #(. blah) then check to see if el is root
+-        if (head instanceof TreeElement &&
+-            ((TreeElement)head).root instanceof WildcardElement) {
+-            return true;
+-        }
+-        if (head instanceof WildcardElement && head.next instanceof BlockEndElement) {
+-            return true;
+-        }
+-        return false;
+-    }
+-
+-    /**Is this block of alternatives LL(k)?  Fill in alternative cache for this block.
+-     * @return true if the block is deterministic
+-     */
+-    public boolean deterministic(AlternativeBlock blk) {
+-        /** The lookahead depth for this decision */
+-        int k = 1;	// start at k=1
+-        if (DEBUG_ANALYZER) System.out.println("deterministic(" + blk + ")");
+-        boolean det = true;
+-        int nalts = blk.alternatives.size();
+-        AlternativeBlock saveCurrentBlock = currentBlock;
+-        Alternative wildcardAlt = null;
+-        currentBlock = blk;
+-
+-        /* don't allow nongreedy (...) blocks */
+-        if (blk.greedy == false && !(blk instanceof OneOrMoreBlock) && !(blk instanceof ZeroOrMoreBlock)) {
+-            tool.warning("Being nongreedy only makes sense for (...)+ and (...)*", grammar.getFilename(), blk.getLine(), blk.getColumn());
+-        }
+-
+-        // SPECIAL CASE: only one alternative.  We don't need to check the
+-        // determinism, but other code expects the lookahead cache to be
+-        // set for the single alt.
+-        if (nalts == 1) {
+-            AlternativeElement e = blk.getAlternativeAt(0).head;
+-            currentBlock.alti = 0;
+-            blk.getAlternativeAt(0).cache[1] = e.look(1);
+-            blk.getAlternativeAt(0).lookaheadDepth = 1;	// set lookahead to LL(1)
+-            currentBlock = saveCurrentBlock;
+-            return true;	// always deterministic for one alt
+-        }
+-
+-        outer:
+-            for (int i = 0; i < nalts - 1; i++) {
+-                currentBlock.alti = i;
+-                currentBlock.analysisAlt = i;	// which alt are we analyzing?
+-                currentBlock.altj = i + 1;		// reset this alt.  Haven't computed yet,
+-                // but we need the alt number.
+-                inner:
+-                    // compare against other alternatives with lookahead depth k
+-                    for (int j = i + 1; j < nalts; j++) {
+-                        currentBlock.altj = j;
+-                        if (DEBUG_ANALYZER) System.out.println("comparing " + i + " against alt " + j);
+-                        currentBlock.analysisAlt = j;	// which alt are we analyzing?
+-                        k = 1;	// always attempt minimum lookahead possible.
+-
+-                        // check to see if there is a lookahead depth that distinguishes
+-                        // between alternatives i and j.
+-                        Lookahead[] r = new Lookahead[grammar.maxk + 1];
+-                        boolean haveAmbiguity;
+-                        do {
+-                            haveAmbiguity = false;
+-                            if (DEBUG_ANALYZER) System.out.println("checking depth " + k + "<=" + grammar.maxk);
+-                            Lookahead p,q;
+-                            p = getAltLookahead(blk, i, k);
+-                            q = getAltLookahead(blk, j, k);
+-
+-                            // compare LOOK(alt i) with LOOK(alt j).  Is there an intersection?
+-                            // Lookahead must be disjoint.
+-                            if (DEBUG_ANALYZER) System.out.println("p is " + p.toString(",", charFormatter, grammar));
+-                            if (DEBUG_ANALYZER) System.out.println("q is " + q.toString(",", charFormatter, grammar));
+-                            // r[i] = p.fset.and(q.fset);
+-                            r[k] = p.intersection(q);
+-                            if (DEBUG_ANALYZER) System.out.println("intersection at depth " + k + " is " + r[k].toString());
+-                            if (!r[k].nil()) {
+-                                haveAmbiguity = true;
+-                                k++;
+-                            }
+-                            // go until no more lookahead to use or no intersection
+-                        } while (haveAmbiguity && k <= grammar.maxk);
+-
+-                        Alternative ai = blk.getAlternativeAt(i);
+-                        Alternative aj = blk.getAlternativeAt(j);
+-                        if (haveAmbiguity) {
+-                            det = false;
+-                            ai.lookaheadDepth = NONDETERMINISTIC;
+-                            aj.lookaheadDepth = NONDETERMINISTIC;
+-
+-                            /* if ith alt starts with a syntactic predicate, computing the
+-                             * lookahead is still done for code generation, but messages
+-                             * should not be generated when comparing against alt j.
+-                             * Alternatives with syn preds that are unnecessary do
+-                             * not result in syn pred try-blocks.
+-                             */
+-                            if (ai.synPred != null) {
+-                                if (DEBUG_ANALYZER) {
+-                                    System.out.println("alt " + i + " has a syn pred");
+-                                }
+-                                // The alt with the (...)=> block is nondeterministic for sure.
+-                                // If the (...)=> conflicts with alt j, j is nondeterministic.
+-                                // This prevents alt j from being in any switch statements.
+-                                // move on to next alternative=>no possible ambiguity!
+-                                //						continue inner;
+-                            }
+-
+-                            /* if ith alt starts with a semantic predicate, computing the
+-                             * lookahead is still done for code generation, but messages
+-                             * should not be generated when comparing against alt j.
+-                             */
+-                            else if (ai.semPred != null) {
+-                                if (DEBUG_ANALYZER) {
+-                                    System.out.println("alt " + i + " has a sem pred");
+-                                }
+-                            }
+-
+-                            /* if jth alt is exactly the wildcard or wildcard root of tree,
+-                             * then remove elements from alt i lookahead from alt j's lookahead.
+-                             * Don't do an ambiguity warning.
+-                             */
+-                            else if (altUsesWildcardDefault(aj)) {
+-                                // System.out.println("removing pred sets");
+-                                // removeCompetingPredictionSetsFromWildcard(aj.cache, aj.head, grammar.maxk);
+-                                wildcardAlt = aj;
+-                            }
+-
+-                            /* If the user specified warnWhenFollowAmbig=false, then we
+-                             * can turn off this warning IFF one of the alts is empty;
+-                             * that is, it points immediately at the end block.
+-                             */
+-                            else if (!blk.warnWhenFollowAmbig &&
+-                                (ai.head instanceof BlockEndElement ||
+-                                aj.head instanceof BlockEndElement)) {
+-                                // System.out.println("ai.head pts to "+ai.head.getClass());
+-                                // System.out.println("aj.head pts to "+aj.head.getClass());
+-                            }
+-
+-                            /* If they have the generateAmbigWarnings option off for the block
+-                             * then don't generate a warning.
+-                             */
+-                            else if (!blk.generateAmbigWarnings) {
+-                            }
+-
+-                            /* If greedy=true and *one* empty alt shut off warning. */
+-                            else if (blk.greedySet && blk.greedy &&
+-                                ((ai.head instanceof BlockEndElement &&
+-                                !(aj.head instanceof BlockEndElement)) ||
+-                                (aj.head instanceof BlockEndElement &&
+-                                !(ai.head instanceof BlockEndElement)))) {
+-                                // System.out.println("greedy set to true; one alt empty");
+-                            }
+-
+-
+-                            /* We have no choice, but to report a nondetermism */
+-                            else {
+-                                tool.errorHandler.warnAltAmbiguity(
+-                                    grammar,
+-                                    blk, // the block
+-                                    lexicalAnalysis, // true if lexical
+-                                    grammar.maxk, // depth of ambiguity
+-                                    r, // set of linear ambiguities
+-                                    i, // first ambiguous alternative
+-                                    j				// second ambiguous alternative
+-                                );
+-                            }
+-                        }
+-                        else {
+-                            // a lookahead depth, k, was found where i and j do not conflict
+-                            ai.lookaheadDepth = Math.max(ai.lookaheadDepth, k);
+-                            aj.lookaheadDepth = Math.max(aj.lookaheadDepth, k);
+-                        }
+-                    }
+-            }
+-
+-        // finished with block.
+-
+-        // If had wildcard default clause idiom, remove competing lookahead
+-        /*
+-		  if ( wildcardAlt!=null ) {
+-		  removeCompetingPredictionSetsFromWildcard(wildcardAlt.cache, wildcardAlt.head, grammar.maxk);
+-		  }
+-		*/
+-
+-        currentBlock = saveCurrentBlock;
+-        return det;
+-    }
+-
+-    /**Is (...)+ block LL(1)?  Fill in alternative cache for this block.
+-     * @return true if the block is deterministic
+-     */
+-    public boolean deterministic(OneOrMoreBlock blk) {
+-        if (DEBUG_ANALYZER) System.out.println("deterministic(...)+(" + blk + ")");
+-        AlternativeBlock saveCurrentBlock = currentBlock;
+-        currentBlock = blk;
+-        boolean blkOk = deterministic((AlternativeBlock)blk);
+-        // block has been checked, now check that what follows does not conflict
+-        // with the lookahead of the (...)+ block.
+-        boolean det = deterministicImpliedPath(blk);
+-        currentBlock = saveCurrentBlock;
+-        return det && blkOk;
+-    }
+-
+-    /**Is (...)* block LL(1)?  Fill in alternative cache for this block.
+-     * @return true if the block is deterministic
+-     */
+-    public boolean deterministic(ZeroOrMoreBlock blk) {
+-        if (DEBUG_ANALYZER) System.out.println("deterministic(...)*(" + blk + ")");
+-        AlternativeBlock saveCurrentBlock = currentBlock;
+-        currentBlock = blk;
+-        boolean blkOk = deterministic((AlternativeBlock)blk);
+-        // block has been checked, now check that what follows does not conflict
+-        // with the lookahead of the (...)* block.
+-        boolean det = deterministicImpliedPath(blk);
+-        currentBlock = saveCurrentBlock;
+-        return det && blkOk;
+-    }
+-
+-    /**Is this (...)* or (...)+ block LL(k)?
+-     * @return true if the block is deterministic
+-     */
+-    public boolean deterministicImpliedPath(BlockWithImpliedExitPath blk) {
+-        /** The lookahead depth for this decision considering implied exit path */
+-        int k;
+-        boolean det = true;
+-        Vector alts = blk.getAlternatives();
+-        int nalts = alts.size();
+-        currentBlock.altj = -1;	// comparing against implicit optional/exit alt
+-
+-        if (DEBUG_ANALYZER) System.out.println("deterministicImpliedPath");
+-        for (int i = 0; i < nalts; i++) {		// check follow against all alts
+-            Alternative alt = blk.getAlternativeAt(i);
+-
+-            if (alt.head instanceof BlockEndElement) {
+-                tool.warning("empty alternative makes no sense in (...)* or (...)+", grammar.getFilename(), blk.getLine(), blk.getColumn());
+-            }
+-
+-            k = 1;							// assume eac alt is LL(1) with exit branch
+-            // check to see if there is a lookahead depth that distinguishes
+-            // between alternative i and the exit branch.
+-            Lookahead[] r = new Lookahead[grammar.maxk + 1];
+-            boolean haveAmbiguity;
+-            do {
+-                haveAmbiguity = false;
+-                if (DEBUG_ANALYZER) System.out.println("checking depth " + k + "<=" + grammar.maxk);
+-                Lookahead p;
+-                Lookahead follow = blk.next.look(k);
+-                blk.exitCache[k] = follow;
+-                currentBlock.alti = i;
+-                p = getAltLookahead(blk, i, k);
+-
+-                if (DEBUG_ANALYZER) System.out.println("follow is " + follow.toString(",", charFormatter, grammar));
+-                if (DEBUG_ANALYZER) System.out.println("p is " + p.toString(",", charFormatter, grammar));
+-                //r[k] = follow.fset.and(p.fset);
+-                r[k] = follow.intersection(p);
+-                if (DEBUG_ANALYZER) System.out.println("intersection at depth " + k + " is " + r[k]);
+-                if (!r[k].nil()) {
+-                    haveAmbiguity = true;
+-                    k++;
+-                }
+-                // go until no more lookahead to use or no intersection
+-            } while (haveAmbiguity && k <= grammar.maxk);
+-
+-            if (haveAmbiguity) {
+-                det = false;
+-                alt.lookaheadDepth = NONDETERMINISTIC;
+-                blk.exitLookaheadDepth = NONDETERMINISTIC;
+-                Alternative ambigAlt = blk.getAlternativeAt(currentBlock.alti);
+-
+-                /* If the user specified warnWhenFollowAmbig=false, then we
+-                 * can turn off this warning.
+-                 */
+-                if (!blk.warnWhenFollowAmbig) {
+-                }
+-
+-                /* If they have the generateAmbigWarnings option off for the block
+-                 * then don't generate a warning.
+-                 */
+-                else if (!blk.generateAmbigWarnings) {
+-                }
+-
+-                /* If greedy=true and alt not empty, shut off warning */
+-                else if (blk.greedy == true && blk.greedySet &&
+-                    !(ambigAlt.head instanceof BlockEndElement)) {
+-                    if (DEBUG_ANALYZER) System.out.println("greedy loop");
+-                }
+-
+-                /* If greedy=false then shut off warning...will have
+-                 * to add "if FOLLOW break"
+-                 * block during code gen to compensate for removal of warning.
+-                 */
+-                else if (blk.greedy == false &&
+-                    !(ambigAlt.head instanceof BlockEndElement)) {
+-                    if (DEBUG_ANALYZER) System.out.println("nongreedy loop");
+-                    // if FOLLOW not single k-string (|set[k]| can
+-                    // be > 1 actually) then must warn them that
+-                    // loop may terminate incorrectly.
+-                    // For example, ('a'..'d')+ ("ad"|"cb")
+-                    if (!lookaheadEquivForApproxAndFullAnalysis(blk.exitCache, grammar.maxk)) {
+-                        tool.warning(new String[]{
+-                            "nongreedy block may exit incorrectly due",
+-                            "\tto limitations of linear approximate lookahead (first k-1 sets",
+-                            "\tin lookahead not singleton)."},
+-                                     grammar.getFilename(), blk.getLine(), blk.getColumn());
+-                    }
+-                }
+-
+-                // no choice but to generate a warning
+-                else {
+-                    tool.errorHandler.warnAltExitAmbiguity(
+-                        grammar,
+-                        blk, // the block
+-                        lexicalAnalysis, // true if lexical
+-                        grammar.maxk, // depth of ambiguity
+-                        r, // set of linear ambiguities
+-                        i		// ambiguous alternative
+-                    );
+-                }
+-            }
+-            else {
+-                alt.lookaheadDepth = Math.max(alt.lookaheadDepth, k);
+-                blk.exitLookaheadDepth = Math.max(blk.exitLookaheadDepth, k);
+-            }
+-        }
+-        return det;
+-    }
+-
+-    /**Compute the lookahead set of whatever follows references to
+-     * the rule associated witht the FOLLOW block.
+-     */
+-    public Lookahead FOLLOW(int k, RuleEndElement end) {
+-        // what rule are we trying to compute FOLLOW of?
+-        RuleBlock rb = (RuleBlock)end.block;
+-        // rule name is different in lexer
+-        String rule;
+-        if (lexicalAnalysis) {
+-            rule = CodeGenerator.encodeLexerRuleName(rb.getRuleName());
+-        }
+-        else {
+-            rule = rb.getRuleName();
+-        }
+-
+-        if (DEBUG_ANALYZER) System.out.println("FOLLOW(" + k + "," + rule + ")");
+-
+-        // are we in the midst of computing this FOLLOW already?
+-        if (end.lock[k]) {
+-            if (DEBUG_ANALYZER) System.out.println("FOLLOW cycle to " + rule);
+-            return new Lookahead(rule);
+-        }
+-
+-        // Check to see if there is cached value
+-        if (end.cache[k] != null) {
+-            if (DEBUG_ANALYZER) {
+-                System.out.println("cache entry FOLLOW(" + k + ") for " + rule + ": " + end.cache[k].toString(",", charFormatter, grammar));
+-            }
+-            // if the cache is a complete computation then simply return entry
+-            if (end.cache[k].cycle == null) {
+-                return (Lookahead)end.cache[k].clone();
+-            }
+-            // A cache entry exists, but it is a reference to a cyclic computation.
+-            RuleSymbol rs = (RuleSymbol)grammar.getSymbol(end.cache[k].cycle);
+-            RuleEndElement re = rs.getBlock().endNode;
+-            // The other entry may not exist because it is still being
+-            // computed when this cycle cache entry was found here.
+-            if (re.cache[k] == null) {
+-                // return the cycle...that's all we can do at the moment.
+-                return (Lookahead)end.cache[k].clone();
+-            }
+-            else {
+-                if (DEBUG_ANALYZER) {
+-                    System.out.println("combining FOLLOW(" + k + ") for " + rule + ": from "+end.cache[k].toString(",", charFormatter, grammar) + " with FOLLOW for "+((RuleBlock)re.block).getRuleName()+": "+re.cache[k].toString(",", charFormatter, grammar));
+-                }
+-                // combine results from other rule's FOLLOW
+-                if ( re.cache[k].cycle==null ) {
+-                    // current rule depends on another rule's FOLLOW and
+-                    // it is complete with no cycle; just kill our cycle and
+-                    // combine full result from other rule's FOLLOW
+-                    end.cache[k].combineWith(re.cache[k]);
+-                    end.cache[k].cycle = null; // kill cycle as we're complete
+-                }
+-                else {
+-                    // the FOLLOW cache for other rule has a cycle also.
+-                    // Here is where we bubble up a cycle.  We better recursively
+-                    // wipe out cycles (partial computations).  I'm a little nervous
+-                    // that we might leave a cycle here, however.
+-                    Lookahead refFOLLOW = FOLLOW(k, re);
+-                    end.cache[k].combineWith( refFOLLOW );
+-                    // all cycles should be gone, but if not, record ref to cycle
+-                    end.cache[k].cycle = refFOLLOW.cycle;
+-                }
+-                if (DEBUG_ANALYZER) {
+-                    System.out.println("saving FOLLOW(" + k + ") for " + rule + ": from "+end.cache[k].toString(",", charFormatter, grammar));
+-                }
+-                // Return the updated cache entry associated
+-                // with the cycle reference.
+-                return (Lookahead)end.cache[k].clone();
+-            }
+-        }
+-
+-        end.lock[k] = true;	// prevent FOLLOW computation cycles
+-
+-        Lookahead p = new Lookahead();
+-
+-        RuleSymbol rs = (RuleSymbol)grammar.getSymbol(rule);
+-
+-        // Walk list of references to this rule to compute FOLLOW
+-        for (int i = 0; i < rs.numReferences(); i++) {
+-            RuleRefElement rr = rs.getReference(i);
+-            if (DEBUG_ANALYZER) System.out.println("next[" + rule + "] is " + rr.next.toString());
+-            Lookahead q = rr.next.look(k);
+-            if (DEBUG_ANALYZER) System.out.println("FIRST of next[" + rule + "] ptr is " + q.toString());
+-            /* If there is a cycle then if the cycle is to the rule for
+-			 * this end block, you have a cycle to yourself.  Remove the
+-			 * cycle indication--the lookahead is complete.
+-			 */
+-            if (q.cycle != null && q.cycle.equals(rule)) {
+-                q.cycle = null;	// don't want cycle to yourself!
+-            }
+-            // add the lookahead into the current FOLLOW computation set
+-            p.combineWith(q);
+-            if (DEBUG_ANALYZER) System.out.println("combined FOLLOW[" + rule + "] is " + p.toString());
+-        }
+-
+-        end.lock[k] = false; // we're not doing FOLLOW anymore
+-
+-        // if no rules follow this, it can be a start symbol or called by a start sym.
+-        // set the follow to be end of file.
+-        if (p.fset.nil() && p.cycle == null) {
+-            if (grammar instanceof TreeWalkerGrammar) {
+-                // Tree grammars don't see EOF, they see end of sibling list or
+-                // "NULL TREE LOOKAHEAD".
+-                p.fset.add(Token.NULL_TREE_LOOKAHEAD);
+-            }
+-            else if (grammar instanceof LexerGrammar) {
+-                // Lexical grammars use Epsilon to indicate that the end of rule has been hit
+-                // EOF would be misleading; any character can follow a token rule not just EOF
+-                // as in a grammar (where a start symbol is followed by EOF).  There is no
+-                // sequence info in a lexer between tokens to indicate what is the last token
+-                // to be seen.
+-                // p.fset.add(EPSILON_TYPE);
+-                p.setEpsilon();
+-            }
+-            else {
+-                p.fset.add(Token.EOF_TYPE);
+-            }
+-        }
+-
+-        // Cache the result of the FOLLOW computation
+-        if (DEBUG_ANALYZER) {
+-            System.out.println("saving FOLLOW(" + k + ") for " + rule + ": " + p.toString(",", charFormatter, grammar));
+-        }
+-        end.cache[k] = (Lookahead)p.clone();
+-
+-        return p;
+-    }
+-
+-    private Lookahead getAltLookahead(AlternativeBlock blk, int alt, int k) {
+-        Lookahead p;
+-        Alternative a = blk.getAlternativeAt(alt);
+-        AlternativeElement e = a.head;
+-        //System.out.println("getAltLookahead("+k+","+e+"), cache size is "+a.cache.length);
+-        if (a.cache[k] == null) {
+-            p = e.look(k);
+-            a.cache[k] = p;
+-        }
+-        else {
+-            p = a.cache[k];
+-        }
+-        return p;
+-    }
+-
+-    /**Actions are ignored */
+-    public Lookahead look(int k, ActionElement action) {
+-        if (DEBUG_ANALYZER) System.out.println("lookAction(" + k + "," + action + ")");
+-        return action.next.look(k);
+-    }
+-
+-    /**Combine the lookahead computed for each alternative */
+-    public Lookahead look(int k, AlternativeBlock blk) {
+-        if (DEBUG_ANALYZER) System.out.println("lookAltBlk(" + k + "," + blk + ")");
+-        AlternativeBlock saveCurrentBlock = currentBlock;
+-        currentBlock = blk;
+-        Lookahead p = new Lookahead();
+-        for (int i = 0; i < blk.alternatives.size(); i++) {
+-            if (DEBUG_ANALYZER) System.out.println("alt " + i + " of " + blk);
+-            // must set analysis alt
+-            currentBlock.analysisAlt = i;
+-            Alternative alt = blk.getAlternativeAt(i);
+-            AlternativeElement elem = alt.head;
+-            if (DEBUG_ANALYZER) {
+-                if (alt.head == alt.tail) {
+-                    System.out.println("alt " + i + " is empty");
+-                }
+-            }
+-            Lookahead q = elem.look(k);
+-            p.combineWith(q);
+-        }
+-        if (k == 1 && blk.not && subruleCanBeInverted(blk, lexicalAnalysis)) {
+-            // Invert the lookahead set
+-            if (lexicalAnalysis) {
+-                BitSet b = (BitSet)((LexerGrammar)grammar).charVocabulary.clone();
+-                int[] elems = p.fset.toArray();
+-                for (int j = 0; j < elems.length; j++) {
+-                    b.remove(elems[j]);
+-                }
+-                p.fset = b;
+-            }
+-            else {
+-                p.fset.notInPlace(Token.MIN_USER_TYPE, grammar.tokenManager.maxTokenType());
+-            }
+-        }
+-        currentBlock = saveCurrentBlock;
+-        return p;
+-    }
+-
+-    /**Compute what follows this place-holder node and possibly
+-     * what begins the associated loop unless the
+-     * node is locked.
+-     * <p>
+-     * if we hit the end of a loop, we have to include
+-     * what tokens can begin the loop as well.  If the start
+-     * node is locked, then we simply found an empty path
+-     * through this subrule while analyzing it.  If the
+-     * start node is not locked, then this node was hit
+-     * during a FOLLOW operation and the FIRST of this
+-     * block must be included in that lookahead computation.
+-     */
+-    public Lookahead look(int k, BlockEndElement end) {
+-        if (DEBUG_ANALYZER) System.out.println("lookBlockEnd(" + k + ", " + end.block + "); lock is " + end.lock[k]);
+-        if (end.lock[k]) {
+-            // computation in progress => the tokens we would have
+-            // computed (had we not been locked) will be included
+-            // in the set by that computation with the lock on this
+-            // node.
+-            return new Lookahead();
+-        }
+-
+-        Lookahead p;
+-
+-        /* Hitting the end of a loop means you can see what begins the loop */
+-        if (end.block instanceof ZeroOrMoreBlock ||
+-            end.block instanceof OneOrMoreBlock) {
+-            // compute what can start the block,
+-            // but lock end node so we don't do it twice in same
+-            // computation.
+-            end.lock[k] = true;
+-            p = look(k, end.block);
+-            end.lock[k] = false;
+-        }
+-        else {
+-            p = new Lookahead();
+-        }
+-
+-        /* Tree blocks do not have any follow because they are children
+-		 * of what surrounds them.  For example, A #(B C) D results in
+-		 * a look() for the TreeElement end of NULL_TREE_LOOKAHEAD, which
+-		 * indicates that nothing can follow the last node of tree #(B C)
+-		 */
+-        if (end.block instanceof TreeElement) {
+-            p.combineWith(Lookahead.of(Token.NULL_TREE_LOOKAHEAD));
+-        }
+-
+-        /* Syntactic predicates such as ( (A)? )=> have no follow per se.
+-		 * We cannot accurately say what would be matched following a
+-		 * syntactic predicate (you MIGHT be ok if you said it was whatever
+-		 * followed the alternative predicted by the predicate).  Hence,
+-		 * (like end-of-token) we return Epsilon to indicate "unknown
+-		 * lookahead."
+-		 */
+-        else if (end.block instanceof SynPredBlock) {
+-            p.setEpsilon();
+-        }
+-
+-        // compute what can follow the block
+-        else {
+-            Lookahead q = end.block.next.look(k);
+-            p.combineWith(q);
+-        }
+-
+-        return p;
+-    }
+-
+-    /**Return this char as the lookahead if k=1.
+-     * <p>### Doesn't work for ( 'a' 'b' | 'a' ~'b' ) yet!!!
+-     * <p>
+-     * If the atom has the <tt>not</tt> flag on, then
+-     * create the set complement of the tokenType
+-     * which is the set of all characters referenced
+-     * in the grammar with this char turned off.
+-     * Also remove characters from the set that
+-     * are currently allocated for predicting
+-     * previous alternatives.  This avoids ambiguity
+-     * messages and is more properly what is meant.
+-     * ( 'a' | ~'a' ) implies that the ~'a' is the
+-     * "else" clause.
+-     * <p>
+-     * NOTE: we do <b>NOT</b> include exit path in
+-     * the exclusion set. E.g.,
+-     * ( 'a' | ~'a' )* 'b'
+-     * should exit upon seeing a 'b' during the loop.
+-     */
+-    public Lookahead look(int k, CharLiteralElement atom) {
+-        if (DEBUG_ANALYZER) System.out.println("lookCharLiteral(" + k + "," + atom + ")");
+-        // Skip until analysis hits k==1
+-        if (k > 1) {
+-            return atom.next.look(k - 1);
+-        }
+-        if (lexicalAnalysis) {
+-            if (atom.not) {
+-                BitSet b = (BitSet)((LexerGrammar)grammar).charVocabulary.clone();
+-                if (DEBUG_ANALYZER) System.out.println("charVocab is " + b.toString());
+-                // remove stuff predicted by preceding alts and follow of block
+-                removeCompetingPredictionSets(b, atom);
+-                if (DEBUG_ANALYZER) System.out.println("charVocab after removal of prior alt lookahead " + b.toString());
+-                // now remove element that is stated not to be in the set
+-                b.clear(atom.getType());
+-                return new Lookahead(b);
+-            }
+-            else {
+-                return Lookahead.of(atom.getType());
+-            }
+-        }
+-        else {
+-            // Should have been avoided by MakeGrammar
+-            tool.panic("Character literal reference found in parser");
+-            // ... so we make the compiler happy
+-            return Lookahead.of(atom.getType());
+-        }
+-    }
+-
+-    public Lookahead look(int k, CharRangeElement r) {
+-        if (DEBUG_ANALYZER) System.out.println("lookCharRange(" + k + "," + r + ")");
+-        // Skip until analysis hits k==1
+-        if (k > 1) {
+-            return r.next.look(k - 1);
+-        }
+-        BitSet p = BitSet.of(r.begin);
+-        for (int i = r.begin + 1; i <= r.end; i++) {
+-            p.add(i);
+-        }
+-        return new Lookahead(p);
+-    }
+-
+-    public Lookahead look(int k, GrammarAtom atom) {
+-        if (DEBUG_ANALYZER) System.out.println("look(" + k + "," + atom + "[" + atom.getType() + "])");
+-
+-        if (lexicalAnalysis) {
+-            // MakeGrammar should have created a rule reference instead
+-            tool.panic("token reference found in lexer");
+-        }
+-        // Skip until analysis hits k==1
+-        if (k > 1) {
+-            return atom.next.look(k - 1);
+-        }
+-        Lookahead l = Lookahead.of(atom.getType());
+-        if (atom.not) {
+-            // Invert the lookahead set against the token vocabulary
+-            int maxToken = grammar.tokenManager.maxTokenType();
+-            l.fset.notInPlace(Token.MIN_USER_TYPE, maxToken);
+-            // remove stuff predicted by preceding alts and follow of block
+-            removeCompetingPredictionSets(l.fset, atom);
+-        }
+-        return l;
+-    }
+-
+-    /**The lookahead of a (...)+ block is the combined lookahead of
+-     * all alternatives and, if an empty path is found, the lookahead
+-     * of what follows the block.
+-     */
+-    public Lookahead look(int k, OneOrMoreBlock blk) {
+-        if (DEBUG_ANALYZER) System.out.println("look+" + k + "," + blk + ")");
+-        Lookahead p = look(k, (AlternativeBlock)blk);
+-        return p;
+-    }
+-
+-    /**Combine the lookahead computed for each alternative.
+-     * Lock the node so that no other computation may come back
+-     * on itself--infinite loop.  This also implies infinite left-recursion
+-     * in the grammar (or an error in this algorithm ;)).
+-     */
+-    public Lookahead look(int k, RuleBlock blk) {
+-        if (DEBUG_ANALYZER) System.out.println("lookRuleBlk(" + k + "," + blk + ")");
+-        Lookahead p = look(k, (AlternativeBlock)blk);
+-        return p;
+-    }
+-
+-    /**If not locked or noFOLLOW set, compute FOLLOW of a rule.
+-     * <p>
+-     * TJP says 8/12/99: not true anymore:
+-     * Lexical rules never compute follow.  They set epsilon and
+-     * the code generator gens code to check for any character.
+-     * The code generator must remove the tokens used to predict
+-     * any previous alts in the same block.
+-     * <p>
+-     * When the last node of a rule is reached and noFOLLOW,
+-     * it implies that a "local" FOLLOW will be computed
+-     * after this call.  I.e.,
+-     * <pre>
+-     *		a : b A;
+-     *		b : B | ;
+-     *		c : b C;
+-     * </pre>
+-     * Here, when computing the look of rule b from rule a,
+-     * we want only {B,EPSILON_TYPE} so that look(b A) will
+-     * be {B,A} not {B,A,C}.
+-     * <p>
+-     * if the end block is not locked and the FOLLOW is
+-     * wanted, the algorithm must compute the lookahead
+-     * of what follows references to this rule.  If
+-     * end block is locked, FOLLOW will return an empty set
+-     * with a cycle to the rule associated with this end block.
+-     */
+-    public Lookahead look(int k, RuleEndElement end) {
+-        if (DEBUG_ANALYZER)
+-            System.out.println("lookRuleBlockEnd(" + k + "); noFOLLOW=" +
+-                               end.noFOLLOW + "; lock is " + end.lock[k]);
+-        if (/*lexicalAnalysis ||*/ end.noFOLLOW) {
+-            Lookahead p = new Lookahead();
+-            p.setEpsilon();
+-            p.epsilonDepth = BitSet.of(k);
+-            return p;
+-        }
+-        Lookahead p = FOLLOW(k, end);
+-        return p;
+-    }
+-
+-    /**Compute the lookahead contributed by a rule reference.
+-     *
+-     * <p>
+-     * When computing ruleref lookahead, we don't want the FOLLOW
+-     * computation done if an empty path exists for the rule.
+-     * The FOLLOW is too loose of a set...we want only to
+-     * include the "local" FOLLOW or what can follow this
+-     * particular ref to the node.  In other words, we use
+-     * context information to reduce the complexity of the
+-     * analysis and strengthen the parser.
+-     *
+-     * The noFOLLOW flag is used as a means of restricting
+-     * the FOLLOW to a "local" FOLLOW.  This variable is
+-     * orthogonal to the <tt>lock</tt> variable that prevents
+-     * infinite recursion.  noFOLLOW does not care about what k is.
+-     */
+-    public Lookahead look(int k, RuleRefElement rr) {
+-        if (DEBUG_ANALYZER) System.out.println("lookRuleRef(" + k + "," + rr + ")");
+-        RuleSymbol rs = (RuleSymbol)grammar.getSymbol(rr.targetRule);
+-        if (rs == null || !rs.defined) {
+-            tool.error("no definition of rule " + rr.targetRule, grammar.getFilename(), rr.getLine(), rr.getColumn());
+-            return new Lookahead();
+-        }
+-        RuleBlock rb = rs.getBlock();
+-        RuleEndElement end = rb.endNode;
+-        boolean saveEnd = end.noFOLLOW;
+-        end.noFOLLOW = true;
+-        // go off to the rule and get the lookahead (w/o FOLLOW)
+-        Lookahead p = look(k, rr.targetRule);
+-        if (DEBUG_ANALYZER) System.out.println("back from rule ref to " + rr.targetRule);
+-        // restore state of end block
+-        end.noFOLLOW = saveEnd;
+-
+-        // check for infinite recursion.  If a cycle is returned: trouble!
+-        if (p.cycle != null) {
+-            tool.error("infinite recursion to rule " + p.cycle + " from rule " +
+-                       rr.enclosingRuleName, grammar.getFilename(), rr.getLine(), rr.getColumn());
+-        }
+-
+-        // is the local FOLLOW required?
+-        if (p.containsEpsilon()) {
+-            if (DEBUG_ANALYZER)
+-                System.out.println("rule ref to " +
+-                                   rr.targetRule + " has eps, depth: " + p.epsilonDepth);
+-
+-            // remove epsilon
+-            p.resetEpsilon();
+-            // fset.clear(EPSILON_TYPE);
+-
+-            // for each lookahead depth that saw epsilon
+-            int[] depths = p.epsilonDepth.toArray();
+-            p.epsilonDepth = null;		// clear all epsilon stuff
+-            for (int i = 0; i < depths.length; i++) {
+-                int rk = k - (k - depths[i]);
+-                Lookahead q = rr.next.look(rk);	// see comments in Lookahead
+-                p.combineWith(q);
+-            }
+-            // note: any of these look() computations for local follow can
+-            // set EPSILON in the set again if the end of this rule is found.
+-        }
+-
+-        return p;
+-    }
+-
+-    public Lookahead look(int k, StringLiteralElement atom) {
+-        if (DEBUG_ANALYZER) System.out.println("lookStringLiteral(" + k + "," + atom + ")");
+-        if (lexicalAnalysis) {
+-            // need more lookahead than string can provide?
+-            if (k > atom.processedAtomText.length()) {
+-                return atom.next.look(k - atom.processedAtomText.length());
+-            }
+-            else {
+-                // get char at lookahead depth k, from the processed literal text
+-                return Lookahead.of(atom.processedAtomText.charAt(k - 1));
+-            }
+-        }
+-        else {
+-            // Skip until analysis hits k==1
+-            if (k > 1) {
+-                return atom.next.look(k - 1);
+-            }
+-            Lookahead l = Lookahead.of(atom.getType());
+-            if (atom.not) {
+-                // Invert the lookahead set against the token vocabulary
+-                int maxToken = grammar.tokenManager.maxTokenType();
+-                l.fset.notInPlace(Token.MIN_USER_TYPE, maxToken);
+-            }
+-            return l;
+-        }
+-    }
+-
+-    /**The lookahead of a (...)=> block is the lookahead of
+-     * what follows the block.  By definition, the syntactic
+-     * predicate block defies static analysis (you want to try it
+-     * out at run-time).  The LOOK of (a)=>A B is A for LL(1)
+-     * ### is this even called?
+-     */
+-    public Lookahead look(int k, SynPredBlock blk) {
+-        if (DEBUG_ANALYZER) System.out.println("look=>(" + k + "," + blk + ")");
+-        return blk.next.look(k);
+-    }
+-
+-    public Lookahead look(int k, TokenRangeElement r) {
+-        if (DEBUG_ANALYZER) System.out.println("lookTokenRange(" + k + "," + r + ")");
+-        // Skip until analysis hits k==1
+-        if (k > 1) {
+-            return r.next.look(k - 1);
+-        }
+-        BitSet p = BitSet.of(r.begin);
+-        for (int i = r.begin + 1; i <= r.end; i++) {
+-            p.add(i);
+-        }
+-        return new Lookahead(p);
+-    }
+-
+-    public Lookahead look(int k, TreeElement t) {
+-        if (DEBUG_ANALYZER)
+-            System.out.println("look(" + k + "," + t.root + "[" + t.root.getType() + "])");
+-        if (k > 1) {
+-            return t.next.look(k - 1);
+-        }
+-        Lookahead l = null;
+-        if (t.root instanceof WildcardElement) {
+-            l = t.root.look(1); // compute FIRST set minus previous rows
+-        }
+-        else {
+-            l = Lookahead.of(t.root.getType());
+-            if (t.root.not) {
+-                // Invert the lookahead set against the token vocabulary
+-                int maxToken = grammar.tokenManager.maxTokenType();
+-                l.fset.notInPlace(Token.MIN_USER_TYPE, maxToken);
+-            }
+-        }
+-        return l;
+-    }
+-
+-    public Lookahead look(int k, WildcardElement wc) {
+-        if (DEBUG_ANALYZER) System.out.println("look(" + k + "," + wc + ")");
+-
+-        // Skip until analysis hits k==1
+-        if (k > 1) {
+-            return wc.next.look(k - 1);
+-        }
+-
+-        BitSet b;
+-        if (lexicalAnalysis) {
+-            // Copy the character vocabulary
+-            b = (BitSet)((LexerGrammar)grammar).charVocabulary.clone();
+-        }
+-        else {
+-            b = new BitSet(1);
+-            // Invert the lookahead set against the token vocabulary
+-            int maxToken = grammar.tokenManager.maxTokenType();
+-            b.notInPlace(Token.MIN_USER_TYPE, maxToken);
+-            if (DEBUG_ANALYZER) System.out.println("look(" + k + "," + wc + ") after not: " + b);
+-        }
+-
+-        // Remove prediction sets from competing alternatives
+-        // removeCompetingPredictionSets(b, wc);
+-
+-        return new Lookahead(b);
+-    }
+-
+-    /** The (...)* element is the combined lookahead of the alternatives and what can
+-     *  follow the loop.
+-     */
+-    public Lookahead look(int k, ZeroOrMoreBlock blk) {
+-        if (DEBUG_ANALYZER) System.out.println("look*(" + k + "," + blk + ")");
+-        Lookahead p = look(k, (AlternativeBlock)blk);
+-        Lookahead q = blk.next.look(k);
+-        p.combineWith(q);
+-        return p;
+-    }
+-
+-    /**Compute the combined lookahead for all productions of a rule.
+-     * If the lookahead returns with epsilon, at least one epsilon
+-     * path exists (one that consumes no tokens).  The noFOLLOW
+-     * flag being set for this endruleblk, indicates that the
+-     * a rule ref invoked this rule.
+-     *
+-     * Currently only look(RuleRef) calls this.  There is no need
+-     * for the code generator to call this.
+-     */
+-    public Lookahead look(int k, String rule) {
+-        if (DEBUG_ANALYZER) System.out.println("lookRuleName(" + k + "," + rule + ")");
+-        RuleSymbol rs = (RuleSymbol)grammar.getSymbol(rule);
+-        RuleBlock rb = rs.getBlock();
+-
+-        if (rb.lock[k]) {
+-            if (DEBUG_ANALYZER)
+-                System.out.println("infinite recursion to rule " + rb.getRuleName());
+-            return new Lookahead(rule);
+-        }
+-
+-        // have we computed it before?
+-        if (rb.cache[k] != null) {
+-            if (DEBUG_ANALYZER) {
+-                System.out.println("found depth " + k + " result in FIRST " + rule + " cache: " +
+-                                   rb.cache[k].toString(",", charFormatter, grammar));
+-            }
+-            return (Lookahead)rb.cache[k].clone();
+-        }
+-
+-        rb.lock[k] = true;
+-        Lookahead p = look(k, (RuleBlock)rb);
+-        rb.lock[k] = false;
+-
+-        // cache results
+-        rb.cache[k] = (Lookahead)p.clone();
+-        if (DEBUG_ANALYZER) {
+-            System.out.println("saving depth " + k + " result in FIRST " + rule + " cache: " +
+-                               rb.cache[k].toString(",", charFormatter, grammar));
+-        }
+-        return p;
+-    }
+-
+-    /** If the first k-1 sets are singleton sets, the appoximate
+-     *  lookahead analysis is equivalent to full lookahead analysis.
+-     */
+-    public static boolean lookaheadEquivForApproxAndFullAnalysis(Lookahead[] bset, int k) {
+-        // first k-1 sets degree 1?
+-        for (int i = 1; i <= k - 1; i++) {
+-            BitSet look = bset[i].fset;
+-            if (look.degree() > 1) {
+-                return false;
+-            }
+-        }
+-        return true;
+-    }
+-
+-    /** Remove the prediction sets from preceding alternatives
+-     * and follow set, but *only* if this element is the first element
+-     * of the alternative.  The class members currenBlock and
+-     * currentBlock.analysisAlt must be set correctly.
+-     * @param b The prediction bitset to be modified
+-     * @el The element of interest
+-     */
+-    private void removeCompetingPredictionSets(BitSet b, AlternativeElement el) {
+-        // Only do this if the element is the first element of the alt,
+-        // because we are making an implicit assumption that k==1.
+-        GrammarElement head = currentBlock.getAlternativeAt(currentBlock.analysisAlt).head;
+-        // if element is #(. blah) then check to see if el is root
+-        if (head instanceof TreeElement) {
+-            if (((TreeElement)head).root != el) {
+-                return;
+-            }
+-        }
+-        else if (el != head) {
+-            return;
+-        }
+-        for (int i = 0; i < currentBlock.analysisAlt; i++) {
+-            AlternativeElement e = currentBlock.getAlternativeAt(i).head;
+-            b.subtractInPlace(e.look(1).fset);
+-        }
+-    }
+-
+-    /** Remove the prediction sets from preceding alternatives
+-     * The class members currenBlock must be set correctly.
+-     * Remove prediction sets from 1..k.
+-     * @param look The prediction lookahead to be modified
+-     * @el The element of interest
+-     * @k  How deep into lookahead to modify
+-     */
+-    private void removeCompetingPredictionSetsFromWildcard(Lookahead[] look, AlternativeElement el, int k) {
+-        for (int d = 1; d <= k; d++) {
+-            for (int i = 0; i < currentBlock.analysisAlt; i++) {
+-                AlternativeElement e = currentBlock.getAlternativeAt(i).head;
+-                look[d].fset.subtractInPlace(e.look(d).fset);
+-            }
+-        }
+-    }
+-
+-    /** reset the analyzer so it looks like a new one */
+-    private void reset() {
+-        grammar = null;
+-        DEBUG_ANALYZER = false;
+-        currentBlock = null;
+-        lexicalAnalysis = false;
+-    }
+-
+-    /** Set the grammar for the analyzer */
+-    public void setGrammar(Grammar g) {
+-        if (grammar != null) {
+-            reset();
+-        }
+-        grammar = g;
+-
+-        // Is this lexical?
+-        lexicalAnalysis = (grammar instanceof LexerGrammar);
+-        DEBUG_ANALYZER = grammar.analyzerDebug;
+-    }
+-
+-    public boolean subruleCanBeInverted(AlternativeBlock blk, boolean forLexer) {
+-        if (
+-            blk instanceof ZeroOrMoreBlock ||
+-            blk instanceof OneOrMoreBlock ||
+-            blk instanceof SynPredBlock
+-        ) {
+-            return false;
+-        }
+-        // Cannot invert an empty subrule
+-        if (blk.alternatives.size() == 0) {
+-            return false;
+-        }
+-        // The block must only contain alternatives with a single element,
+-        // where each element is a char, token, char range, or token range.
+-        for (int i = 0; i < blk.alternatives.size(); i++) {
+-            Alternative alt = blk.getAlternativeAt(i);
+-            // Cannot have anything interesting in the alternative ...
+-            if (alt.synPred != null || alt.semPred != null || alt.exceptionSpec != null) {
+-                return false;
+-            }
+-            // ... and there must be one simple element
+-            AlternativeElement elt = alt.head;
+-            if (
+-                !(
+-                elt instanceof CharLiteralElement ||
+-                elt instanceof TokenRefElement ||
+-                elt instanceof CharRangeElement ||
+-                elt instanceof TokenRangeElement ||
+-                (elt instanceof StringLiteralElement && !forLexer)
+-                ) ||
+-                !(elt.next instanceof BlockEndElement) ||
+-                elt.getAutoGenType() != GrammarElement.AUTO_GEN_NONE
+-            ) {
+-                return false;
+-            }
+-        }
+-        return true;
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/LLkGrammarAnalyzer.java glassfish-gil/entity-persistence/src/java/persistence/antlr/LLkGrammarAnalyzer.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/LLkGrammarAnalyzer.java	2006-08-31 00:34:08.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/LLkGrammarAnalyzer.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,57 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-public interface LLkGrammarAnalyzer extends GrammarAnalyzer {
+-
+-
+-    public boolean deterministic(AlternativeBlock blk);
+-
+-    public boolean deterministic(OneOrMoreBlock blk);
+-
+-    public boolean deterministic(ZeroOrMoreBlock blk);
+-
+-    public Lookahead FOLLOW(int k, RuleEndElement end);
+-
+-    public Lookahead look(int k, ActionElement action);
+-
+-    public Lookahead look(int k, AlternativeBlock blk);
+-
+-    public Lookahead look(int k, BlockEndElement end);
+-
+-    public Lookahead look(int k, CharLiteralElement atom);
+-
+-    public Lookahead look(int k, CharRangeElement end);
+-
+-    public Lookahead look(int k, GrammarAtom atom);
+-
+-    public Lookahead look(int k, OneOrMoreBlock blk);
+-
+-    public Lookahead look(int k, RuleBlock blk);
+-
+-    public Lookahead look(int k, RuleEndElement end);
+-
+-    public Lookahead look(int k, RuleRefElement rr);
+-
+-    public Lookahead look(int k, StringLiteralElement atom);
+-
+-    public Lookahead look(int k, SynPredBlock blk);
+-
+-    public Lookahead look(int k, TokenRangeElement end);
+-
+-    public Lookahead look(int k, TreeElement end);
+-
+-    public Lookahead look(int k, WildcardElement wc);
+-
+-    public Lookahead look(int k, ZeroOrMoreBlock blk);
+-
+-    public Lookahead look(int k, String rule);
+-
+-    public void setGrammar(Grammar g);
+-
+-    public boolean subruleCanBeInverted(AlternativeBlock blk, boolean forLexer);
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/LLkParser.java glassfish-gil/entity-persistence/src/java/persistence/antlr/LLkParser.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/LLkParser.java	2006-08-31 00:34:08.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/LLkParser.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,84 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import java.io.IOException;
+-
+-/**An LL(k) parser.
+- *
+- * @see persistence.antlr.Token
+- * @see persistence.antlr.TokenBuffer
+- */
+-public class LLkParser extends Parser {
+-    int k;
+-
+-    public LLkParser(int k_) {
+-        k = k_;
+-    }
+-
+-    public LLkParser(ParserSharedInputState state, int k_) {
+-        super(state);
+-		k = k_;
+-    }
+-
+-    public LLkParser(TokenBuffer tokenBuf, int k_) {
+-        k = k_;
+-        setTokenBuffer(tokenBuf);
+-    }
+-
+-    public LLkParser(TokenStream lexer, int k_) {
+-        k = k_;
+-        TokenBuffer tokenBuf = new TokenBuffer(lexer);
+-        setTokenBuffer(tokenBuf);
+-    }
+-
+-    /**Consume another token from the input stream.  Can only write sequentially!
+-     * If you need 3 tokens ahead, you must consume() 3 times.
+-     * <p>
+-     * Note that it is possible to overwrite tokens that have not been matched.
+-     * For example, calling consume() 3 times when k=2, means that the first token
+-     * consumed will be overwritten with the 3rd.
+-     */
+-    public void consume() {
+-        inputState.input.consume();
+-    }
+-
+-    public int LA(int i) throws TokenStreamException {
+-        return inputState.input.LA(i);
+-    }
+-
+-    public Token LT(int i) throws TokenStreamException {
+-        return inputState.input.LT(i);
+-    }
+-
+-    private void trace(String ee, String rname) throws TokenStreamException {
+-        traceIndent();
+-        System.out.print(ee + rname + ((inputState.guessing > 0)?"; [guessing]":"; "));
+-        for (int i = 1; i <= k; i++) {
+-            if (i != 1) {
+-                System.out.print(", ");
+-            }
+-            if ( LT(i)!=null ) {
+-                System.out.print("LA(" + i + ")==" + LT(i).getText());
+-            }
+-            else {
+-                System.out.print("LA(" + i + ")==null");
+-            }
+-        }
+-        System.out.println("");
+-    }
+-
+-    public void traceIn(String rname) throws TokenStreamException {
+-        traceDepth += 1;
+-        trace("> ", rname);
+-    }
+-
+-    public void traceOut(String rname) throws TokenStreamException {
+-        trace("< ", rname);
+-        traceDepth -= 1;
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/Lookahead.java glassfish-gil/entity-persistence/src/java/persistence/antlr/Lookahead.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/Lookahead.java	2006-08-31 00:34:08.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/Lookahead.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,217 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import persistence.antlr.collections.impl.BitSet;
+-import persistence.antlr.collections.impl.Vector;
+-
+-/**This object holds all information needed to represent
+- * the lookahead for any particular lookahead computation
+- * for a <b>single</b> lookahead depth.  Final lookahead
+- * information is a simple bit set, but intermediate
+- * stages need computation cycle and FOLLOW information.
+- *
+- * <p>
+- * Concerning the <tt>cycle</tt> variable.
+- * If lookahead is computed for a RuleEnd node, then
+- * computation is part of a FOLLOW cycle for this rule.
+- * If lookahead is computed for a RuleBlock node, the
+- * computation is part of a FIRST cycle to this rule.
+- *
+- * <p>
+- * Concerning the <tt>epsilonDepth</tt> variable.
+- * This is not the depth relative to the rule reference
+- * that epsilon was encountered.  That value is
+- * <pre>
+- * 		initial_k - epsilonDepth + 1
+- * </pre>
+- * Also, lookahead depths past rule ref for local follow are:
+- * <pre>
+- * 		initial_k - (initial_k - epsilonDepth)
+- * </pre>
+- * Used for rule references.  If we try
+- * to compute look(k, ruleref) and there are fewer
+- * than k lookahead terminals before the end of the
+- * the rule, epsilon will be returned (don't want to
+- * pass the end of the rule).  We must track when the
+- * the lookahead got stuck.  For example,
+- * <pre>
+- * 		a : b A B E F G;
+- * 		b : C ;
+- * </pre>
+- * LOOK(5, ref-to(b)) is {<EPSILON>} with depth = 4, which
+- * indicates that at 2 (5-4+1) tokens ahead, end of rule was reached.
+- * Therefore, the token at 4=5-(5-4) past rule ref b must be
+- * included in the set == F.
+- * The situation is complicated by the fact that a computation
+- * may hit the end of a rule at many different depths.  For example,
+- * <pre>
+- * 		a : b A B C ;
+- * 		b : E F		// epsilon depth of 1 relative to initial k=3
+- * 		  | G		// epsilon depth of 2
+- * 		  ;
+- * </pre>
+- * Here, LOOK(3,ref-to(b)) returns epsilon, but the depths are
+- * {1, 2}; i.e., 3-(3-1) and 3-(3-2).  Those are the lookahead depths
+- * past the rule ref needed for the local follow.
+- *
+- * <p>
+- * This is null unless an epsilon is created.
+- *
+- * @see persistence.antlr.Lookahead#combineWith(Lookahead)
+- */
+-public class Lookahead implements Cloneable {
+-    /** actual bitset of the lookahead */
+-    BitSet fset;
+-    /** is this computation part of a computation cycle? */
+-    String cycle;
+-    /** What k values were being computed when end of rule hit? */
+-    BitSet epsilonDepth;
+-    /** Does this lookahead depth include Epsilon token type? This
+-     *  is used to avoid having a bit in the set for Epsilon as it
+-     *  conflicts with parsing binary files.
+-     */
+-    boolean hasEpsilon = false;
+-
+-    public Lookahead() {
+-        fset = new BitSet();
+-    }
+-
+-    /** create a new lookahead set with the LL(1) set to the parameter */
+-    public Lookahead(BitSet p) {
+-        fset = p;
+-    }
+-
+-    /** create an empty lookahead set, but with cycle */
+-    public Lookahead(String c) {
+-        this();
+-        cycle = c;
+-    }
+-
+-    /** Make a deep copy of everything in this object */
+-    public Object clone() {
+-        Lookahead p = null;
+-        try {
+-            p = (Lookahead)super.clone();
+-            p.fset = (BitSet)fset.clone();
+-            p.cycle = cycle; // strings are immutable
+-            if (epsilonDepth != null) {
+-                p.epsilonDepth = (BitSet)epsilonDepth.clone();
+-            }
+-        }
+-        catch (CloneNotSupportedException e) {
+-            throw new InternalError();
+-        }
+-        return p;
+-    }
+-
+-    public void combineWith(Lookahead q) {
+-        if (cycle == null) {	// track at least one cycle
+-            cycle = q.cycle;
+-        }
+-
+-        if (q.containsEpsilon()) {
+-            hasEpsilon = true;
+-        }
+-
+-        // combine epsilon depths
+-        if (epsilonDepth != null) {
+-            if (q.epsilonDepth != null) {
+-                epsilonDepth.orInPlace(q.epsilonDepth);
+-            }
+-        }
+-        else if (q.epsilonDepth != null) {
+-            epsilonDepth = (BitSet)q.epsilonDepth.clone();
+-        }
+-        fset.orInPlace(q.fset);
+-    }
+-
+-    public boolean containsEpsilon() {
+-        return hasEpsilon;
+-    }
+-
+-    /** What is the intersection of two lookahead depths?
+-     *  Only the Epsilon "bit" and bitset are considered.
+-     */
+-    public Lookahead intersection(Lookahead q) {
+-        Lookahead p = new Lookahead(fset.and(q.fset));
+-        if (this.hasEpsilon && q.hasEpsilon) {
+-            p.setEpsilon();
+-        }
+-        return p;
+-    }
+-
+-    public boolean nil() {
+-        return fset.nil() && !hasEpsilon;
+-    }
+-
+-    public static Lookahead of(int el) {
+-        Lookahead look = new Lookahead();
+-        look.fset.add(el);
+-        return look;
+-    }
+-
+-    public void resetEpsilon() {
+-        hasEpsilon = false;
+-    }
+-
+-    public void setEpsilon() {
+-        hasEpsilon = true;
+-    }
+-
+-    public String toString() {
+-        String e = "",b,f = "",d = "";
+-        b = fset.toString(",");
+-        if (containsEpsilon()) {
+-            e = "+<epsilon>";
+-        }
+-        if (cycle != null) {
+-            f = "; FOLLOW(" + cycle + ")";
+-        }
+-        if (epsilonDepth != null) {
+-            d = "; depths=" + epsilonDepth.toString(",");
+-        }
+-        return b + e + f + d;
+-
+-    }
+-
+-    public String toString(String separator, CharFormatter formatter) {
+-        String e = "",b,f = "",d = "";
+-        b = fset.toString(separator, formatter);
+-        if (containsEpsilon()) {
+-            e = "+<epsilon>";
+-        }
+-        if (cycle != null) {
+-            f = "; FOLLOW(" + cycle + ")";
+-        }
+-        if (epsilonDepth != null) {
+-            d = "; depths=" + epsilonDepth.toString(",");
+-        }
+-        return b + e + f + d;
+-    }
+-
+-    public String toString(String separator, CharFormatter formatter, Grammar g) {
+-        if (g instanceof LexerGrammar) {
+-            return toString(separator, formatter);
+-        }
+-        else {
+-            return toString(separator, g.tokenManager.getVocabulary());
+-        }
+-    }
+-
+-    public String toString(String separator, Vector vocab) {
+-        String b,f = "",d = "";
+-        b = fset.toString(separator, vocab);
+-        if (cycle != null) {
+-            f = "; FOLLOW(" + cycle + ")";
+-        }
+-        if (epsilonDepth != null) {
+-            d = "; depths=" + epsilonDepth.toString(",");
+-        }
+-        return b + f + d;
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/MakeGrammar.java glassfish-gil/entity-persistence/src/java/persistence/antlr/MakeGrammar.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/MakeGrammar.java	2006-08-31 00:34:08.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/MakeGrammar.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,790 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import persistence.antlr.collections.Stack;
+-import persistence.antlr.collections.impl.LList;
+-import persistence.antlr.collections.impl.Vector;
+-
+-public class MakeGrammar extends DefineGrammarSymbols {
+-
+-    protected Stack blocks = new LList(); // track subrules--Stack<BlockContext>
+-    protected RuleRefElement lastRuleRef;
+-
+-    protected RuleEndElement ruleEnd;   // used if not nested
+-    protected RuleBlock ruleBlock;		// points to block of current rule.
+-    protected int nested = 0;			// nesting inside a subrule
+-    protected boolean grammarError = false;
+-
+-    ExceptionSpec currentExceptionSpec = null;
+-
+-    public MakeGrammar(Tool tool_, String[] args_, LLkAnalyzer analyzer_) {
+-        super(tool_, args_, analyzer_);
+-    }
+-
+-    /** Abort the processing of a grammar (due to syntax errors) */
+-    public void abortGrammar() {
+-        String s = "unknown grammar";
+-        if (grammar != null) {
+-            s = grammar.getClassName();
+-        }
+-        tool.error("aborting grammar '" + s + "' due to errors");
+-        super.abortGrammar();
+-    }
+-
+-    protected void addElementToCurrentAlt(AlternativeElement e) {
+-        e.enclosingRuleName = ruleBlock.ruleName;
+-        context().addAlternativeElement(e);
+-    }
+-
+-    public void beginAlt(boolean doAutoGen_) {
+-        super.beginAlt(doAutoGen_);
+-        Alternative alt = new Alternative();
+-        alt.setAutoGen(doAutoGen_);
+-        context().block.addAlternative(alt);
+-    }
+-
+-    public void beginChildList() {
+-        super.beginChildList();
+-        context().block.addAlternative(new Alternative());
+-    }
+-
+-    /** Add an exception group to a rule (currently a no-op) */
+-    public void beginExceptionGroup() {
+-        super.beginExceptionGroup();
+-        if (!(context().block instanceof RuleBlock)) {
+-            tool.panic("beginExceptionGroup called outside of rule block");
+-        }
+-    }
+-
+-    /** Add an exception spec to an exception group or rule block */
+-    public void beginExceptionSpec(Token label) {
+-        // Hack the label string a bit to remove leading/trailing space.
+-        if (label != null) {
+-            label.setText(StringUtils.stripFront(StringUtils.stripBack(label.getText(), " \n\r\t"), " \n\r\t"));
+-        }
+-        super.beginExceptionSpec(label);
+-        // Don't check for currentExceptionSpec!=null because syntax errors
+-        // may leave it set to something.
+-        currentExceptionSpec = new ExceptionSpec(label);
+-    }
+-
+-    public void beginSubRule(Token label, Token start, boolean not) {
+-        super.beginSubRule(label, start, not);
+-        // we don't know what kind of subrule it is yet.
+-        // push a dummy one that will allow us to collect the
+-        // alternatives.  Later, we'll switch to real object.
+-        blocks.push(new BlockContext());
+-        context().block = new AlternativeBlock(grammar, start, not);
+-        context().altNum = 0; // reset alternative number
+-        nested++;
+-        // create a final node to which the last elememt of each
+-        // alternative will point.
+-        context().blockEnd = new BlockEndElement(grammar);
+-        // make sure end node points to start of block
+-        context().blockEnd.block = context().block;
+-        labelElement(context().block, label);
+-    }
+-
+-    public void beginTree(Token tok) throws SemanticException {
+-        if (!(grammar instanceof TreeWalkerGrammar)) {
+-            tool.error("Trees only allowed in TreeParser", grammar.getFilename(), tok.getLine(), tok.getColumn());
+-            throw new SemanticException("Trees only allowed in TreeParser");
+-        }
+-        super.beginTree(tok);
+-        blocks.push(new TreeBlockContext());
+-        context().block = new TreeElement(grammar, tok);
+-        context().altNum = 0; // reset alternative number
+-    }
+-
+-    public BlockContext context() {
+-        if (blocks.height() == 0) {
+-            return null;
+-        }
+-        else {
+-            return (BlockContext)blocks.top();
+-        }
+-    }
+-
+-    /**Used to build nextToken() for the lexer.
+-     * This builds a rule which has every "public" rule in the given Vector of
+-     * rules as it's alternate.  Each rule ref generates a Token object.
+-     * @param g  The Grammar that is being processed
+-     * @param lexRules A vector of lexer rules that will be used to create an alternate block.
+-     * @param rname The name of the resulting rule.
+-     */
+-    public static RuleBlock createNextTokenRule(Grammar g, Vector lexRules, String rname) {
+-        // create actual rule data structure
+-        RuleBlock rb = new RuleBlock(g, rname);
+-        rb.setDefaultErrorHandler(g.getDefaultErrorHandler());
+-        RuleEndElement ruleEnd = new RuleEndElement(g);
+-        rb.setEndElement(ruleEnd);
+-        ruleEnd.block = rb;
+-        // Add an alternative for each element of the rules vector.
+-        for (int i = 0; i < lexRules.size(); i++) {
+-            RuleSymbol r = (RuleSymbol)lexRules.elementAt(i);
+-            if (!r.isDefined()) {
+-                g.antlrTool.error("Lexer rule " + r.id.substring(1) + " is not defined");
+-            }
+-            else {
+-                if (r.access.equals("public")) {
+-					Alternative alt = new Alternative(); // create alt we'll add to ref rule
+-					RuleBlock targetRuleBlock = r.getBlock();
+-					Vector targetRuleAlts = targetRuleBlock.getAlternatives();
+-					// collect a sem pred if only one alt and it's at the start;
+-					// simple, but faster to implement until real hoisting
+-					if ( targetRuleAlts!=null && targetRuleAlts.size()==1 ) {
+-						Alternative onlyAlt = (Alternative)targetRuleAlts.elementAt(0);
+-						if ( onlyAlt.semPred!=null ) {
+-							// ok, has sem pred, make this rule ref alt have a pred
+-							alt.semPred = onlyAlt.semPred;
+-							// REMOVE predicate from target rule???  NOPE, another
+-							// rule other than nextToken() might invoke it.
+-						}
+-					}
+-
+-                    // create a rule ref to lexer rule
+-                    // the Token is a RULE_REF not a TOKEN_REF since the
+-                    // conversion to mRulename has already taken place
+-                    RuleRefElement rr =
+-                        new RuleRefElement(g,
+-                                           new CommonToken(ANTLRTokenTypes.RULE_REF, r.getId()),
+-                                           GrammarElement.AUTO_GEN_NONE);
+-                    rr.setLabel("theRetToken");
+-                    rr.enclosingRuleName = "nextToken";
+-                    rr.next = ruleEnd;
+-					alt.addElement(rr);  		// add rule ref to alt
+-                    alt.setAutoGen(true);		// keep text of elements
+-                    rb.addAlternative(alt);		// add alt to rule block
+-                    r.addReference(rr);			// track ref to this rule in rule blk
+-                }
+-            }
+-        }
+-
+-        rb.setAutoGen(true);		// keep text of elements
+-        rb.prepareForAnalysis();
+-        //System.out.println(rb);
+-        return rb;
+-    }
+-
+-    /** Return block as if they had typed: "( rule )?" */
+-    private AlternativeBlock createOptionalRuleRef(String rule, Token start) {
+-        // Make the subrule
+-        AlternativeBlock blk = new AlternativeBlock(grammar, start, false);
+-
+-        // Make sure rule is defined
+-        String mrule = CodeGenerator.encodeLexerRuleName(rule); // can only be a lexer rule!
+-        if (!grammar.isDefined(mrule)) {
+-            grammar.define(new RuleSymbol(mrule));
+-        }
+-
+-        // Make the rule ref element
+-        // RK: fixme probably easier to abuse start token..
+-        Token t = new CommonToken(ANTLRTokenTypes.TOKEN_REF, rule);
+-        t.setLine(start.getLine());
+-        t.setLine(start.getColumn());
+-        RuleRefElement rref =
+-            new RuleRefElement(grammar, t, GrammarElement.AUTO_GEN_NONE);
+-
+-        rref.enclosingRuleName = ruleBlock.ruleName;
+-
+-        // Make the end of block element
+-        BlockEndElement end = new BlockEndElement(grammar);
+-        end.block = blk;		// end block points back to start of blk
+-
+-        // Make an alternative, putting the rule ref into it
+-        Alternative alt = new Alternative(rref);
+-        alt.addElement(end); // last element in alt points to end of block
+-
+-        // Add the alternative to this block
+-        blk.addAlternative(alt);
+-
+-        // create an empty (optional) alt and add to blk
+-        Alternative optAlt = new Alternative();
+-        optAlt.addElement(end);	// points immediately to end of block
+-
+-        blk.addAlternative(optAlt);
+-
+-        blk.prepareForAnalysis();
+-        return blk;
+-    }
+-
+-    public void defineRuleName(Token r,
+-                               String access,
+-                               boolean ruleAutoGen,
+-                               String docComment)
+-        throws SemanticException {
+-        //		if ( Character.isUpperCase(r.getText().charAt(0)) ) {
+-        if (r.type == ANTLRTokenTypes.TOKEN_REF) {
+-            if (!(grammar instanceof LexerGrammar)) {
+-                tool.error("Lexical rule " + r.getText() +
+-                           " defined outside of lexer",
+-                           grammar.getFilename(), r.getLine(), r.getColumn());
+-                r.setText(r.getText().toLowerCase());
+-            }
+-        }
+-        else {
+-            if (grammar instanceof LexerGrammar) {
+-                tool.error("Lexical rule names must be upper case, '" + r.getText() +
+-                           "' is not",
+-                           grammar.getFilename(), r.getLine(), r.getColumn());
+-                r.setText(r.getText().toUpperCase());
+-            }
+-        }
+-
+-        super.defineRuleName(r, access, ruleAutoGen, docComment);
+-        String id = r.getText();
+-        //		if ( Character.isUpperCase(id.charAt(0)) ) { // lexer rule?
+-        if (r.type == ANTLRTokenTypes.TOKEN_REF) { // lexer rule?
+-            id = CodeGenerator.encodeLexerRuleName(id);
+-        }
+-        RuleSymbol rs = (RuleSymbol)grammar.getSymbol(id);
+-        RuleBlock rb = new RuleBlock(grammar, r.getText(), r.getLine(), ruleAutoGen);
+-
+-        // Lexer rules do not generate default error handling
+-        rb.setDefaultErrorHandler(grammar.getDefaultErrorHandler());
+-
+-        ruleBlock = rb;
+-        blocks.push(new BlockContext()); // enter new context
+-        context().block = rb;
+-        rs.setBlock(rb);
+-        ruleEnd = new RuleEndElement(grammar);
+-        rb.setEndElement(ruleEnd);
+-        nested = 0;
+-    }
+-
+-    public void endAlt() {
+-        super.endAlt();
+-        if (nested == 0) {	// all rule-level alts link to ruleEnd node
+-            addElementToCurrentAlt(ruleEnd);
+-        }
+-        else {
+-            addElementToCurrentAlt(context().blockEnd);
+-        }
+-        context().altNum++;
+-    }
+-
+-    public void endChildList() {
+-        super.endChildList();
+-        // create a final node to which the last elememt of the single
+-        // alternative will point.  Done for compatibility with analyzer.
+-        // Does NOT point to any block like alternative blocks because the
+-        // TreeElement is not a block.  This is used only as a placeholder.
+-        BlockEndElement be = new BlockEndElement(grammar);
+-        be.block = context().block;
+-        addElementToCurrentAlt(be);
+-    }
+-
+-    public void endExceptionGroup() {
+-        super.endExceptionGroup();
+-    }
+-
+-    public void endExceptionSpec() {
+-        super.endExceptionSpec();
+-        if (currentExceptionSpec == null) {
+-            tool.panic("exception processing internal error -- no active exception spec");
+-        }
+-        if (context().block instanceof RuleBlock) {
+-            // Named rule
+-            ((RuleBlock)context().block).addExceptionSpec(currentExceptionSpec);
+-        }
+-        else {
+-            // It must be a plain-old alternative block
+-            if (context().currentAlt().exceptionSpec != null) {
+-                tool.error("Alternative already has an exception specification", grammar.getFilename(), context().block.getLine(), context().block.getColumn());
+-            }
+-            else {
+-                context().currentAlt().exceptionSpec = currentExceptionSpec;
+-            }
+-        }
+-        currentExceptionSpec = null;
+-    }
+-
+-    /** Called at the end of processing a grammar */
+-    public void endGrammar() {
+-        if (grammarError) {
+-            abortGrammar();
+-        }
+-        else {
+-            super.endGrammar();
+-        }
+-    }
+-
+-    public void endRule(String rule) {
+-        super.endRule(rule);
+-        BlockContext ctx = (BlockContext)blocks.pop();	// remove scope
+-        // record the start of this block in the ending node
+-        ruleEnd.block = ctx.block;
+-        ruleEnd.block.prepareForAnalysis();
+-        //System.out.println(ctx.block);
+-    }
+-
+-    public void endSubRule() {
+-        super.endSubRule();
+-        nested--;
+-        // remove subrule context from scope stack
+-        BlockContext ctx = (BlockContext)blocks.pop();
+-        AlternativeBlock block = ctx.block;
+-
+-        // If the subrule is marked with ~, check that it is
+-        // a valid candidate for analysis
+-        if (
+-            block.not &&
+-            !(block instanceof SynPredBlock) &&
+-            !(block instanceof ZeroOrMoreBlock) &&
+-            !(block instanceof OneOrMoreBlock)
+-        ) {
+-            if (!analyzer.subruleCanBeInverted(block, grammar instanceof LexerGrammar)) {
+-                String newline = System.getProperty("line.separator");
+-                tool.error(
+-                    "This subrule cannot be inverted.  Only subrules of the form:" + newline +
+-                    "    (T1|T2|T3...) or" + newline +
+-                    "    ('c1'|'c2'|'c3'...)" + newline +
+-                    "may be inverted (ranges are also allowed).",
+-                    grammar.getFilename(),
+-                    block.getLine(), block.getColumn()
+-                );
+-            }
+-        }
+-
+-        // add the subrule as element if not a syn pred
+-        if (block instanceof SynPredBlock) {
+-            // record a reference to the recently-recognized syn pred in the
+-            // enclosing block.
+-            SynPredBlock synpred = (SynPredBlock)block;
+-            context().block.hasASynPred = true;
+-            context().currentAlt().synPred = synpred;
+-            grammar.hasSyntacticPredicate = true;
+-            synpred.removeTrackingOfRuleRefs(grammar);
+-        }
+-        else {
+-            addElementToCurrentAlt(block);
+-        }
+-        ctx.blockEnd.block.prepareForAnalysis();
+-    }
+-
+-    public void endTree() {
+-        super.endTree();
+-        BlockContext ctx = (BlockContext)blocks.pop();
+-        addElementToCurrentAlt(ctx.block);		// add new TreeElement to enclosing alt.
+-    }
+-
+-    /** Remember that a major error occured in the grammar */
+-    public void hasError() {
+-        grammarError = true;
+-    }
+-
+-    private void labelElement(AlternativeElement el, Token label) {
+-        if (label != null) {
+-            // Does this label already exist?
+-            for (int i = 0; i < ruleBlock.labeledElements.size(); i++) {
+-                AlternativeElement altEl = (AlternativeElement)ruleBlock.labeledElements.elementAt(i);
+-                String l = altEl.getLabel();
+-                if (l != null && l.equals(label.getText())) {
+-                    tool.error("Label '" + label.getText() + "' has already been defined", grammar.getFilename(), label.getLine(), label.getColumn());
+-                    return;
+-                }
+-            }
+-            // add this node to the list of labeled elements
+-            el.setLabel(label.getText());
+-            ruleBlock.labeledElements.appendElement(el);
+-        }
+-    }
+-
+-    public void noAutoGenSubRule() {
+-        context().block.setAutoGen(false);
+-    }
+-
+-    public void oneOrMoreSubRule() {
+-        if (context().block.not) {
+-            tool.error("'~' cannot be applied to (...)* subrule", grammar.getFilename(), context().block.getLine(), context().block.getColumn());
+-        }
+-        // create the right kind of object now that we know what that is
+-        // and switch the list of alternatives.  Adjust the stack of blocks.
+-        // copy any init action also.
+-        OneOrMoreBlock b = new OneOrMoreBlock(grammar);
+-        setBlock(b, context().block);
+-        BlockContext old = (BlockContext)blocks.pop(); // remove old scope; we want new type of subrule
+-        blocks.push(new BlockContext());
+-        context().block = b;
+-        context().blockEnd = old.blockEnd;
+-        context().blockEnd.block = b;
+-    }
+-
+-    public void optionalSubRule() {
+-        if (context().block.not) {
+-            tool.error("'~' cannot be applied to (...)? subrule", grammar.getFilename(), context().block.getLine(), context().block.getColumn());
+-        }
+-        // convert (X)? -> (X|) so that we can ignore optional blocks altogether!
+-        // It already thinks that we have a simple subrule, just add option block.
+-        beginAlt(false);
+-        endAlt();
+-    }
+-
+-    public void refAction(Token action) {
+-        super.refAction(action);
+-        context().block.hasAnAction = true;
+-        addElementToCurrentAlt(new ActionElement(grammar, action));
+-    }
+-
+-    public void setUserExceptions(String thr) {
+-        ((RuleBlock)context().block).throwsSpec = thr;
+-    }
+-
+-    // Only called for rule blocks
+-    public void refArgAction(Token action) {
+-        ((RuleBlock)context().block).argAction = action.getText();
+-    }
+-
+-    public void refCharLiteral(Token lit, Token label, boolean inverted, int autoGenType, boolean lastInRule) {
+-        if (!(grammar instanceof LexerGrammar)) {
+-            tool.error("Character literal only valid in lexer", grammar.getFilename(), lit.getLine(), lit.getColumn());
+-            return;
+-        }
+-        super.refCharLiteral(lit, label, inverted, autoGenType, lastInRule);
+-        CharLiteralElement cl = new CharLiteralElement((LexerGrammar)grammar, lit, inverted, autoGenType);
+-
+-        // Generate a warning for non-lowercase ASCII when case-insensitive
+-        if (
+-            !((LexerGrammar)grammar).caseSensitive && cl.getType() < 128 &&
+-            Character.toLowerCase((char)cl.getType()) != (char)cl.getType()
+-        ) {
+-            tool.warning("Character literal must be lowercase when caseSensitive=false", grammar.getFilename(), lit.getLine(), lit.getColumn());
+-        }
+-
+-        addElementToCurrentAlt(cl);
+-        labelElement(cl, label);
+-
+-        // if ignore option is set, must add an optional call to the specified rule.
+-        String ignore = ruleBlock.getIgnoreRule();
+-        if (!lastInRule && ignore != null) {
+-            addElementToCurrentAlt(createOptionalRuleRef(ignore, lit));
+-        }
+-    }
+-
+-    public void refCharRange(Token t1, Token t2, Token label, int autoGenType, boolean lastInRule) {
+-        if (!(grammar instanceof LexerGrammar)) {
+-            tool.error("Character range only valid in lexer", grammar.getFilename(), t1.getLine(), t1.getColumn());
+-            return;
+-        }
+-        int rangeMin = ANTLRLexer.tokenTypeForCharLiteral(t1.getText());
+-        int rangeMax = ANTLRLexer.tokenTypeForCharLiteral(t2.getText());
+-        if (rangeMax < rangeMin) {
+-            tool.error("Malformed range.", grammar.getFilename(), t1.getLine(), t1.getColumn());
+-            return;
+-        }
+-
+-        // Generate a warning for non-lowercase ASCII when case-insensitive
+-        if (!((LexerGrammar)grammar).caseSensitive) {
+-            if (rangeMin < 128 && Character.toLowerCase((char)rangeMin) != (char)rangeMin) {
+-                tool.warning("Character literal must be lowercase when caseSensitive=false", grammar.getFilename(), t1.getLine(), t1.getColumn());
+-            }
+-            if (rangeMax < 128 && Character.toLowerCase((char)rangeMax) != (char)rangeMax) {
+-                tool.warning("Character literal must be lowercase when caseSensitive=false", grammar.getFilename(), t2.getLine(), t2.getColumn());
+-            }
+-        }
+-
+-        super.refCharRange(t1, t2, label, autoGenType, lastInRule);
+-        CharRangeElement cr = new CharRangeElement((LexerGrammar)grammar, t1, t2, autoGenType);
+-        addElementToCurrentAlt(cr);
+-        labelElement(cr, label);
+-
+-        // if ignore option is set, must add an optional call to the specified rule.
+-        String ignore = ruleBlock.getIgnoreRule();
+-        if (!lastInRule && ignore != null) {
+-            addElementToCurrentAlt(createOptionalRuleRef(ignore, t1));
+-        }
+-    }
+-
+-    public void refTokensSpecElementOption(Token tok,
+-                                           Token option,
+-                                           Token value) {
+-        /*
+-		System.out.println("setting tokens spec option for "+tok.getText());
+-		System.out.println(option.getText()+","+value.getText());
+-		*/
+-        TokenSymbol ts = (TokenSymbol)
+-            grammar.tokenManager.getTokenSymbol(tok.getText());
+-        if (ts == null) {
+-            tool.panic("cannot find " + tok.getText() + "in tokens {...}");
+-        }
+-        if (option.getText().equals("AST")) {
+-            ts.setASTNodeType(value.getText());
+-        }
+-        else {
+-            grammar.antlrTool.error("invalid tokens {...} element option:" +
+-                               option.getText(),
+-                               grammar.getFilename(),
+-                               option.getLine(), option.getColumn());
+-        }
+-    }
+-
+-    public void refElementOption(Token option, Token value) {
+-        /*
+-		System.out.println("setting option for "+context().currentElement());
+-		System.out.println(option.getText()+","+value.getText());
+-		*/
+-        AlternativeElement e = context().currentElement();
+-        if (e instanceof StringLiteralElement ||
+-            e instanceof TokenRefElement ||
+-            e instanceof WildcardElement) {
+-            ((GrammarAtom)e).setOption(option, value);
+-        }
+-        else {
+-            tool.error("cannot use element option (" + option.getText() +
+-                       ") for this kind of element",
+-                       grammar.getFilename(), option.getLine(), option.getColumn());
+-        }
+-    }
+-
+-    /** Add an exception handler to an exception spec */
+-    public void refExceptionHandler(Token exTypeAndName, Token action) {
+-        super.refExceptionHandler(exTypeAndName, action);
+-        if (currentExceptionSpec == null) {
+-            tool.panic("exception handler processing internal error");
+-        }
+-        currentExceptionSpec.addHandler(new ExceptionHandler(exTypeAndName, action));
+-    }
+-
+-    public void refInitAction(Token action) {
+-        super.refAction(action);
+-        context().block.setInitAction(action.getText());
+-    }
+-
+-    public void refMemberAction(Token act) {
+-        grammar.classMemberAction = act;
+-    }
+-
+-    public void refPreambleAction(Token act) {
+-        super.refPreambleAction(act);
+-    }
+-
+-    // Only called for rule blocks
+-    public void refReturnAction(Token returnAction) {
+-        if (grammar instanceof LexerGrammar) {
+-            String name = CodeGenerator.encodeLexerRuleName(((RuleBlock)context().block).getRuleName());
+-            RuleSymbol rs = (RuleSymbol)grammar.getSymbol(name);
+-            if (rs.access.equals("public")) {
+-                tool.warning("public Lexical rules cannot specify return type", grammar.getFilename(), returnAction.getLine(), returnAction.getColumn());
+-                return;
+-            }
+-        }
+-        ((RuleBlock)context().block).returnAction = returnAction.getText();
+-    }
+-
+-    public void refRule(Token idAssign,
+-                        Token r,
+-                        Token label,
+-                        Token args,
+-                        int autoGenType) {
+-        // Disallow parser rule references in the lexer
+-        if (grammar instanceof LexerGrammar) {
+-            //			if (!Character.isUpperCase(r.getText().charAt(0))) {
+-            if (r.type != ANTLRTokenTypes.TOKEN_REF) {
+-                tool.error("Parser rule " + r.getText() + " referenced in lexer");
+-                return;
+-            }
+-            if (autoGenType == GrammarElement.AUTO_GEN_CARET) {
+-                tool.error("AST specification ^ not allowed in lexer", grammar.getFilename(), r.getLine(), r.getColumn());
+-            }
+-        }
+-
+-        super.refRule(idAssign, r, label, args, autoGenType);
+-        lastRuleRef = new RuleRefElement(grammar, r, autoGenType);
+-        if (args != null) {
+-            lastRuleRef.setArgs(args.getText());
+-        }
+-        if (idAssign != null) {
+-            lastRuleRef.setIdAssign(idAssign.getText());
+-        }
+-        addElementToCurrentAlt(lastRuleRef);
+-
+-        String id = r.getText();
+-        //		if ( Character.isUpperCase(id.charAt(0)) ) { // lexer rule?
+-        if (r.type == ANTLRTokenTypes.TOKEN_REF) { // lexer rule?
+-            id = CodeGenerator.encodeLexerRuleName(id);
+-        }
+-        // update symbol table so it knows what nodes reference the rule.
+-        RuleSymbol rs = (RuleSymbol)grammar.getSymbol(id);
+-        rs.addReference(lastRuleRef);
+-        labelElement(lastRuleRef, label);
+-    }
+-
+-    public void refSemPred(Token pred) {
+-        //System.out.println("refSemPred "+pred.getText());
+-        super.refSemPred(pred);
+-        //System.out.println("context().block: "+context().block);
+-        if (context().currentAlt().atStart()) {
+-            context().currentAlt().semPred = pred.getText();
+-        }
+-        else {
+-            ActionElement a = new ActionElement(grammar, pred);
+-            a.isSemPred = true;
+-            addElementToCurrentAlt(a);
+-        }
+-        //System.out.println("DONE refSemPred "+pred.getText());
+-    }
+-
+-    public void refStringLiteral(Token lit, Token label, int autoGenType, boolean lastInRule) {
+-        super.refStringLiteral(lit, label, autoGenType, lastInRule);
+-        if (grammar instanceof TreeWalkerGrammar && autoGenType == GrammarElement.AUTO_GEN_CARET) {
+-            tool.error("^ not allowed in here for tree-walker", grammar.getFilename(), lit.getLine(), lit.getColumn());
+-        }
+-        StringLiteralElement sl = new StringLiteralElement(grammar, lit, autoGenType);
+-
+-        // If case-insensitive, then check each char of the stirng literal
+-        if (grammar instanceof LexerGrammar && !((LexerGrammar)grammar).caseSensitive) {
+-            for (int i = 1; i < lit.getText().length() - 1; i++) {
+-                char c = lit.getText().charAt(i);
+-                if (c < 128 && Character.toLowerCase(c) != c) {
+-                    tool.warning("Characters of string literal must be lowercase when caseSensitive=false", grammar.getFilename(), lit.getLine(), lit.getColumn());
+-                    break;
+-                }
+-            }
+-        }
+-
+-        addElementToCurrentAlt(sl);
+-        labelElement(sl, label);
+-
+-        // if ignore option is set, must add an optional call to the specified rule.
+-        String ignore = ruleBlock.getIgnoreRule();
+-        if (!lastInRule && ignore != null) {
+-            addElementToCurrentAlt(createOptionalRuleRef(ignore, lit));
+-        }
+-    }
+-
+-    public void refToken(Token idAssign, Token t, Token label, Token args,
+-                         boolean inverted, int autoGenType, boolean lastInRule) {
+-        if (grammar instanceof LexerGrammar) {
+-            // In lexer, token references are really rule references
+-            if (autoGenType == GrammarElement.AUTO_GEN_CARET) {
+-                tool.error("AST specification ^ not allowed in lexer", grammar.getFilename(), t.getLine(), t.getColumn());
+-            }
+-            if (inverted) {
+-                tool.error("~TOKEN is not allowed in lexer", grammar.getFilename(), t.getLine(), t.getColumn());
+-            }
+-            refRule(idAssign, t, label, args, autoGenType);
+-
+-            // if ignore option is set, must add an optional call to the specified token rule.
+-            String ignore = ruleBlock.getIgnoreRule();
+-            if (!lastInRule && ignore != null) {
+-                addElementToCurrentAlt(createOptionalRuleRef(ignore, t));
+-            }
+-        }
+-        else {
+-            // Cannot have token ref args or assignment outside of lexer
+-            if (idAssign != null) {
+-                tool.error("Assignment from token reference only allowed in lexer", grammar.getFilename(), idAssign.getLine(), idAssign.getColumn());
+-            }
+-            if (args != null) {
+-                tool.error("Token reference arguments only allowed in lexer", grammar.getFilename(), args.getLine(), args.getColumn());
+-            }
+-            super.refToken(idAssign, t, label, args, inverted, autoGenType, lastInRule);
+-            TokenRefElement te = new TokenRefElement(grammar, t, inverted, autoGenType);
+-            addElementToCurrentAlt(te);
+-            labelElement(te, label);
+-        }
+-    }
+-
+-    public void refTokenRange(Token t1, Token t2, Token label, int autoGenType, boolean lastInRule) {
+-        if (grammar instanceof LexerGrammar) {
+-            tool.error("Token range not allowed in lexer", grammar.getFilename(), t1.getLine(), t1.getColumn());
+-            return;
+-        }
+-        super.refTokenRange(t1, t2, label, autoGenType, lastInRule);
+-        TokenRangeElement tr = new TokenRangeElement(grammar, t1, t2, autoGenType);
+-        if (tr.end < tr.begin) {
+-            tool.error("Malformed range.", grammar.getFilename(), t1.getLine(), t1.getColumn());
+-            return;
+-        }
+-        addElementToCurrentAlt(tr);
+-        labelElement(tr, label);
+-    }
+-
+-    public void refTreeSpecifier(Token treeSpec) {
+-        context().currentAlt().treeSpecifier = treeSpec;
+-    }
+-
+-    public void refWildcard(Token t, Token label, int autoGenType) {
+-        super.refWildcard(t, label, autoGenType);
+-        WildcardElement wc = new WildcardElement(grammar, t, autoGenType);
+-        addElementToCurrentAlt(wc);
+-        labelElement(wc, label);
+-    }
+-
+-    /** Get ready to process a new grammar */
+-    public void reset() {
+-        super.reset();
+-        blocks = new LList();
+-        lastRuleRef = null;
+-        ruleEnd = null;
+-        ruleBlock = null;
+-        nested = 0;
+-        currentExceptionSpec = null;
+-        grammarError = false;
+-    }
+-
+-    public void setArgOfRuleRef(Token argAction) {
+-        super.setArgOfRuleRef(argAction);
+-        lastRuleRef.setArgs(argAction.getText());
+-    }
+-
+-    public static void setBlock(AlternativeBlock b, AlternativeBlock src) {
+-        b.setAlternatives(src.getAlternatives());
+-        b.initAction = src.initAction;
+-        //b.lookaheadDepth = src.lookaheadDepth;
+-        b.label = src.label;
+-        b.hasASynPred = src.hasASynPred;
+-        b.hasAnAction = src.hasAnAction;
+-        b.warnWhenFollowAmbig = src.warnWhenFollowAmbig;
+-        b.generateAmbigWarnings = src.generateAmbigWarnings;
+-        b.line = src.line;
+-        b.greedy = src.greedy;
+-        b.greedySet = src.greedySet;
+-    }
+-
+-    public void setRuleOption(Token key, Token value) {
+-        //((RuleBlock)context().block).setOption(key, value);
+-        ruleBlock.setOption(key, value);
+-    }
+-
+-    public void setSubruleOption(Token key, Token value) {
+-        ((AlternativeBlock)context().block).setOption(key, value);
+-    }
+-
+-    public void synPred() {
+-        if (context().block.not) {
+-            tool.error("'~' cannot be applied to syntactic predicate", grammar.getFilename(), context().block.getLine(), context().block.getColumn());
+-        }
+-        // create the right kind of object now that we know what that is
+-        // and switch the list of alternatives.  Adjust the stack of blocks.
+-        // copy any init action also.
+-        SynPredBlock b = new SynPredBlock(grammar);
+-        setBlock(b, context().block);
+-        BlockContext old = (BlockContext)blocks.pop(); // remove old scope; we want new type of subrule
+-        blocks.push(new BlockContext());
+-        context().block = b;
+-        context().blockEnd = old.blockEnd;
+-        context().blockEnd.block = b;
+-    }
+-
+-    public void zeroOrMoreSubRule() {
+-        if (context().block.not) {
+-            tool.error("'~' cannot be applied to (...)+ subrule", grammar.getFilename(), context().block.getLine(), context().block.getColumn());
+-        }
+-        // create the right kind of object now that we know what that is
+-        // and switch the list of alternatives.  Adjust the stack of blocks.
+-        // copy any init action also.
+-        ZeroOrMoreBlock b = new ZeroOrMoreBlock(grammar);
+-        setBlock(b, context().block);
+-        BlockContext old = (BlockContext)blocks.pop(); // remove old scope; we want new type of subrule
+-        blocks.push(new BlockContext());
+-        context().block = b;
+-        context().blockEnd = old.blockEnd;
+-        context().blockEnd.block = b;
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/MismatchedCharException.java glassfish-gil/entity-persistence/src/java/persistence/antlr/MismatchedCharException.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/MismatchedCharException.java	2006-08-31 00:34:08.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/MismatchedCharException.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,145 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import persistence.antlr.collections.impl.BitSet;
+-
+-public class MismatchedCharException extends RecognitionException {
+-    // Types of chars
+-    public static final int CHAR = 1;
+-    public static final int NOT_CHAR = 2;
+-    public static final int RANGE = 3;
+-    public static final int NOT_RANGE = 4;
+-    public static final int SET = 5;
+-    public static final int NOT_SET = 6;
+-
+-    // One of the above
+-    public int mismatchType;
+-
+-    // what was found on the input stream
+-    public int foundChar;
+-
+-    // For CHAR/NOT_CHAR and RANGE/NOT_RANGE
+-    public int expecting;
+-
+-    // For RANGE/NOT_RANGE (expecting is lower bound of range)
+-    public int upper;
+-
+-    // For SET/NOT_SET
+-    public BitSet set;
+-
+-    // who knows...they may want to ask scanner questions
+-    public CharScanner scanner;
+-
+-    /**
+-     * MismatchedCharException constructor comment.
+-     */
+-    public MismatchedCharException() {
+-        super("Mismatched char");
+-    }
+-
+-    // Expected range / not range
+-    public MismatchedCharException(char c, char lower, char upper_, boolean matchNot, CharScanner scanner_) {
+-        super("Mismatched char", scanner_.getFilename(), scanner_.getLine(), scanner_.getColumn());
+-        mismatchType = matchNot ? NOT_RANGE : RANGE;
+-        foundChar = c;
+-        expecting = lower;
+-        upper = upper_;
+-        scanner = scanner_;
+-    }
+-
+-    // Expected token / not token
+-    public MismatchedCharException(char c, char expecting_, boolean matchNot, CharScanner scanner_) {
+-        super("Mismatched char", scanner_.getFilename(), scanner_.getLine(), scanner_.getColumn());
+-        mismatchType = matchNot ? NOT_CHAR : CHAR;
+-        foundChar = c;
+-        expecting = expecting_;
+-        scanner = scanner_;
+-    }
+-
+-    // Expected BitSet / not BitSet
+-    public MismatchedCharException(char c, BitSet set_, boolean matchNot, CharScanner scanner_) {
+-        super("Mismatched char", scanner_.getFilename(), scanner_.getLine(), scanner_.getColumn());
+-        mismatchType = matchNot ? NOT_SET : SET;
+-        foundChar = c;
+-        set = set_;
+-        scanner = scanner_;
+-    }
+-
+-    /**
+-     * Returns a clean error message (no line number/column information)
+-     */
+-    public String getMessage() {
+-        StringBuffer sb = new StringBuffer();
+-
+-        switch (mismatchType) {
+-            case CHAR:
+-                sb.append("expecting ");   appendCharName(sb, expecting);
+-                sb.append(", found ");     appendCharName(sb, foundChar);
+-                break;
+-            case NOT_CHAR:
+-                sb.append("expecting anything but '");
+-                appendCharName(sb, expecting);
+-                sb.append("'; got it anyway");
+-                break;
+-            case RANGE:
+-            case NOT_RANGE:
+-                sb.append("expecting token ");
+-                if (mismatchType == NOT_RANGE)
+-                    sb.append("NOT ");
+-                sb.append("in range: ");
+-                appendCharName(sb, expecting);
+-                sb.append("..");
+-                appendCharName(sb, upper);
+-                sb.append(", found ");
+-                appendCharName(sb, foundChar);
+-                break;
+-            case SET:
+-            case NOT_SET:
+-                sb.append("expecting " + (mismatchType == NOT_SET ? "NOT " : "") + "one of (");
+-                int[] elems = set.toArray();
+-                for (int i = 0; i < elems.length; i++) {
+-                    appendCharName(sb, elems[i]);
+-                }
+-                sb.append("), found ");
+-                appendCharName(sb, foundChar);
+-                break;
+-            default :
+-                sb.append(super.getMessage());
+-                break;
+-        }
+-
+-        return sb.toString();
+-    }
+-
+-    /** Append a char to the msg buffer.  If special,
+-	 *  then show escaped version
+-	 */
+-	private void appendCharName(StringBuffer sb, int c) {
+-        switch (c) {
+-		case 65535 :
+-			// 65535 = (char) -1 = EOF
+-            sb.append("'<EOF>'");
+-			break;
+-		case '\n' :
+-			sb.append("'\\n'");
+-			break;
+-		case '\r' :
+-			sb.append("'\\r'");
+-			break;
+-		case '\t' :
+-			sb.append("'\\t'");
+-			break;
+-		default :
+-            sb.append('\'');
+-            sb.append((char) c);
+-            sb.append('\'');
+-			break;
+-        }
+-    }
+-}
+-
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/MismatchedTokenException.java glassfish-gil/entity-persistence/src/java/persistence/antlr/MismatchedTokenException.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/MismatchedTokenException.java	2006-08-31 00:34:08.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/MismatchedTokenException.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,171 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import persistence.antlr.collections.impl.BitSet;
+-import persistence.antlr.collections.AST;
+-
+-public class MismatchedTokenException extends RecognitionException {
+-    // Token names array for formatting
+-    String[] tokenNames;
+-    // The token that was encountered
+-    public Token token;
+-    // The offending AST node if tree walking
+-    public AST node;
+-
+-    String tokenText = null; // taken from node or token object
+-
+-    // Types of tokens
+-    public static final int TOKEN = 1;
+-    public static final int NOT_TOKEN = 2;
+-    public static final int RANGE = 3;
+-    public static final int NOT_RANGE = 4;
+-    public static final int SET = 5;
+-    public static final int NOT_SET = 6;
+-    // One of the above
+-    public int mismatchType;
+-
+-    // For TOKEN/NOT_TOKEN and RANGE/NOT_RANGE
+-    public int expecting;
+-
+-    // For RANGE/NOT_RANGE (expecting is lower bound of range)
+-    public int upper;
+-
+-    // For SET/NOT_SET
+-    public BitSet set;
+-
+-    /** Looking for AST wildcard, didn't find it */
+-    public MismatchedTokenException() {
+-        super("Mismatched Token: expecting any AST node", "<AST>", -1, -1);
+-    }
+-
+-    // Expected range / not range
+-    public MismatchedTokenException(String[] tokenNames_, AST node_, int lower, int upper_, boolean matchNot) {
+-        super("Mismatched Token", "<AST>", node_==null? -1:node_.getLine(), node_==null? -1:node_.getColumn());
+-        tokenNames = tokenNames_;
+-        node = node_;
+-        if (node_ == null) {
+-            tokenText = "<empty tree>";
+-        }
+-        else {
+-            tokenText = node_.toString();
+-        }
+-        mismatchType = matchNot ? NOT_RANGE : RANGE;
+-        expecting = lower;
+-        upper = upper_;
+-    }
+-
+-    // Expected token / not token
+-    public MismatchedTokenException(String[] tokenNames_, AST node_, int expecting_, boolean matchNot) {
+-		super("Mismatched Token", "<AST>", node_==null? -1:node_.getLine(), node_==null? -1:node_.getColumn());
+-        tokenNames = tokenNames_;
+-        node = node_;
+-        if (node_ == null) {
+-            tokenText = "<empty tree>";
+-        }
+-        else {
+-            tokenText = node_.toString();
+-        }
+-        mismatchType = matchNot ? NOT_TOKEN : TOKEN;
+-        expecting = expecting_;
+-    }
+-
+-    // Expected BitSet / not BitSet
+-    public MismatchedTokenException(String[] tokenNames_, AST node_, BitSet set_, boolean matchNot) {
+-		super("Mismatched Token", "<AST>", node_==null? -1:node_.getLine(), node_==null? -1:node_.getColumn());
+-        tokenNames = tokenNames_;
+-        node = node_;
+-        if (node_ == null) {
+-            tokenText = "<empty tree>";
+-        }
+-        else {
+-            tokenText = node_.toString();
+-        }
+-        mismatchType = matchNot ? NOT_SET : SET;
+-        set = set_;
+-    }
+-
+-    // Expected range / not range
+-    public MismatchedTokenException(String[] tokenNames_, Token token_, int lower, int upper_, boolean matchNot, String fileName_) {
+-        super("Mismatched Token", fileName_, token_.getLine(), token_.getColumn());
+-        tokenNames = tokenNames_;
+-        token = token_;
+-        tokenText = token_.getText();
+-        mismatchType = matchNot ? NOT_RANGE : RANGE;
+-        expecting = lower;
+-        upper = upper_;
+-    }
+-
+-    // Expected token / not token
+-    public MismatchedTokenException(String[] tokenNames_, Token token_, int expecting_, boolean matchNot, String fileName_) {
+-        super("Mismatched Token", fileName_, token_.getLine(), token_.getColumn());
+-        tokenNames = tokenNames_;
+-        token = token_;
+-        tokenText = token_.getText();
+-        mismatchType = matchNot ? NOT_TOKEN : TOKEN;
+-        expecting = expecting_;
+-    }
+-
+-    // Expected BitSet / not BitSet
+-    public MismatchedTokenException(String[] tokenNames_, Token token_, BitSet set_, boolean matchNot, String fileName_) {
+-        super("Mismatched Token", fileName_, token_.getLine(), token_.getColumn());
+-        tokenNames = tokenNames_;
+-        token = token_;
+-        tokenText = token_.getText();
+-        mismatchType = matchNot ? NOT_SET : SET;
+-        set = set_;
+-    }
+-
+-    /**
+-     * Returns a clean error message (no line number/column information)
+-     */
+-    public String getMessage() {
+-        StringBuffer sb = new StringBuffer();
+-
+-        switch (mismatchType) {
+-            case TOKEN:
+-                sb.append("expecting " + tokenName(expecting) + ", found '" + tokenText + "'");
+-                break;
+-            case NOT_TOKEN:
+-                sb.append("expecting anything but " + tokenName(expecting) + "; got it anyway");
+-                break;
+-            case RANGE:
+-                sb.append("expecting token in range: " + tokenName(expecting) + ".." + tokenName(upper) + ", found '" + tokenText + "'");
+-                break;
+-            case NOT_RANGE:
+-                sb.append("expecting token NOT in range: " + tokenName(expecting) + ".." + tokenName(upper) + ", found '" + tokenText + "'");
+-                break;
+-            case SET:
+-            case NOT_SET:
+-                sb.append("expecting " + (mismatchType == NOT_SET ? "NOT " : "") + "one of (");
+-                int[] elems = set.toArray();
+-                for (int i = 0; i < elems.length; i++) {
+-                    sb.append(" ");
+-                    sb.append(tokenName(elems[i]));
+-                }
+-                sb.append("), found '" + tokenText + "'");
+-                break;
+-            default :
+-                sb.append(super.getMessage());
+-                break;
+-        }
+-
+-        return sb.toString();
+-    }
+-
+-    private String tokenName(int tokenType) {
+-        if (tokenType == Token.INVALID_TYPE) {
+-            return "<Set of tokens>";
+-        }
+-        else if (tokenType < 0 || tokenType >= tokenNames.length) {
+-            return "<" + String.valueOf(tokenType) + ">";
+-        }
+-        else {
+-            return tokenNames[tokenType];
+-        }
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/NameSpace.java glassfish-gil/entity-persistence/src/java/persistence/antlr/NameSpace.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/NameSpace.java	2006-08-31 00:34:08.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/NameSpace.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,63 +0,0 @@
+-package persistence.antlr;
+-
+-/**
+- * ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- * Container for a C++ namespace specification.  Namespaces can be
+- * nested, so this contains a vector of all the nested names.
+- *
+- * @author David Wagner (JPL/Caltech) 8-12-00
+- *
+- */
+-
+-import java.util.Vector;
+-import java.util.Enumeration;
+-import java.io.PrintWriter;
+-import java.util.StringTokenizer;
+-
+-public class NameSpace {
+-    private Vector names = new Vector();
+-    private String _name;
+-
+-    public NameSpace(String name) {
+-    	  _name = new String(name);
+-        parse(name);
+-    }
+-
+-	 public String getName()
+-	 {
+-	 	return _name;
+-	 }
+-	
+-    /**
+-     * Parse a C++ namespace declaration into seperate names
+-     * splitting on ::  We could easily parameterize this to make
+-     * the delimiter a language-specific parameter, or use subclasses
+-     * to support C++ namespaces versus java packages. -DAW
+-     */
+-    protected void parse(String name) {
+-        StringTokenizer tok = new StringTokenizer(name, "::");
+-        while (tok.hasMoreTokens())
+-            names.addElement(tok.nextToken());
+-    }
+-
+-    /**
+-     * Method to generate the required C++ namespace declarations
+-     */
+-    void emitDeclarations(PrintWriter out) {
+-        for (Enumeration n = names.elements(); n.hasMoreElements();) {
+-            String s = (String)n.nextElement();
+-            out.println("ANTLR_BEGIN_NAMESPACE(" + s + ")");
+-        }
+-    }
+-
+-    /**
+-     * Method to generate the required C++ namespace closures
+-     */
+-    void emitClosures(PrintWriter out) {
+-        for (int i = 0; i < names.size(); ++i)
+-            out.println("ANTLR_END_NAMESPACE");
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/NoViableAltException.java glassfish-gil/entity-persistence/src/java/persistence/antlr/NoViableAltException.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/NoViableAltException.java	2006-08-31 00:34:08.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/NoViableAltException.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,39 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import persistence.antlr.collections.AST;
+-
+-public class NoViableAltException extends RecognitionException {
+-    public Token token;
+-    public AST node;	// handles parsing and treeparsing
+-
+-    public NoViableAltException(AST t) {
+-        super("NoViableAlt", "<AST>", t.getLine(), t.getColumn());
+-        node = t;
+-    }
+-
+-    public NoViableAltException(Token t, String fileName_) {
+-        super("NoViableAlt", fileName_, t.getLine(), t.getColumn());
+-        token = t;
+-    }
+-
+-    /**
+-     * Returns a clean error message (no line number/column information)
+-     */
+-    public String getMessage() {
+-        if (token != null) {
+-            return "unexpected token: " + token.getText();
+-        }
+-
+-        // must a tree parser error if token==null
+-        if (node == TreeParser.ASTNULL) {
+-            return "unexpected end of subtree";
+-        }
+-        return "unexpected AST node: " + node.toString();
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/NoViableAltForCharException.java glassfish-gil/entity-persistence/src/java/persistence/antlr/NoViableAltForCharException.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/NoViableAltForCharException.java	2006-08-31 00:34:08.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/NoViableAltForCharException.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,62 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-public class NoViableAltForCharException extends RecognitionException {
+-    public char foundChar;
+-
+-    public NoViableAltForCharException(char c, CharScanner scanner) {
+-        super("NoViableAlt", scanner.getFilename(),
+-              scanner.getLine(), scanner.getColumn());
+-        foundChar = c;
+-    }
+-
+-    /** @deprecated As of ANTLR 2.7.2 use {@see #NoViableAltForCharException(char, String, int, int) } */
+-    public NoViableAltForCharException(char c, String fileName, int line) {
+-        this(c, fileName, line, -1);
+-    }
+-    
+-    public NoViableAltForCharException(char c, String fileName, int line, int column) {
+-        super("NoViableAlt", fileName, line, column);
+-        foundChar = c;
+-    }
+-
+-    /**
+-     * Returns a clean error message (no line number/column information)
+-     */
+-    public String getMessage() {
+-        String mesg = "unexpected char: ";
+-
+-        // I'm trying to mirror a change in the C++ stuff.
+-        // But java seems to lack something isprint-ish..
+-        // so we do it manually. This is probably to restrictive.
+-
+-        if ((foundChar >= ' ') && (foundChar <= '~')) {
+-            mesg += '\'';
+-            mesg += foundChar;
+-            mesg += '\'';
+-        }
+-        else {
+-            mesg += "0x";
+-
+-            int t = (int)foundChar >> 4;
+-
+-            if (t < 10)
+-                mesg += (char)(t | 0x30);
+-            else
+-                mesg += (char)(t + 0x37);
+-
+-            t = (int)foundChar & 0xF;
+-
+-            if (t < 10)
+-                mesg += (char)(t | 0x30);
+-            else
+-                mesg += (char)(t + 0x37);
+-        }
+-        return mesg;
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/OneOrMoreBlock.java glassfish-gil/entity-persistence/src/java/persistence/antlr/OneOrMoreBlock.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/OneOrMoreBlock.java	2006-08-31 00:34:08.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/OneOrMoreBlock.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,30 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-class OneOrMoreBlock extends BlockWithImpliedExitPath {
+-
+-    public OneOrMoreBlock(Grammar g) {
+-        super(g);
+-    }
+-
+-    public OneOrMoreBlock(Grammar g, Token start) {
+-        super(g, start);
+-    }
+-
+-    public void generate() {
+-        grammar.generator.gen(this);
+-    }
+-
+-    public Lookahead look(int k) {
+-        return grammar.theLLkAnalyzer.look(k, this);
+-    }
+-
+-    public String toString() {
+-        return super.toString() + "+";
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/ParserGrammar.java glassfish-gil/entity-persistence/src/java/persistence/antlr/ParserGrammar.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/ParserGrammar.java	2006-08-31 00:34:09.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/ParserGrammar.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,98 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import java.util.Hashtable;
+-import java.util.Enumeration;
+-import java.io.IOException;
+-
+-import persistence.antlr.collections.impl.BitSet;
+-import persistence.antlr.collections.impl.Vector;
+-
+-
+-/** Parser-specific grammar subclass */
+-class ParserGrammar extends Grammar {
+-
+-
+-    ParserGrammar(String className_, Tool tool_, String superClass) {
+-        super(className_, tool_, superClass);
+-    }
+-
+-    /** Top-level call to generate the code for this grammar */
+-    public void generate() throws IOException {
+-        generator.gen(this);
+-    }
+-
+-    // Get name of class from which generated parser/lexer inherits
+-    protected String getSuperClass() {
+-        // if debugging, choose the debugging version of the parser
+-        if (debuggingOutput)
+-            return "debug.LLkDebuggingParser";
+-        return "LLkParser";
+-    }
+-
+-    /**Process command line arguments.
+-     * -trace			have all rules call traceIn/traceOut
+-     * -traceParser		have parser rules call traceIn/traceOut
+-     * -debug			generate debugging output for parser debugger
+-     */
+-    public void processArguments(String[] args) {
+-        for (int i = 0; i < args.length; i++) {
+-            if (args[i].equals("-trace")) {
+-                traceRules = true;
+-                antlrTool.setArgOK(i);
+-            }
+-            else if (args[i].equals("-traceParser")) {
+-                traceRules = true;
+-                antlrTool.setArgOK(i);
+-            }
+-            else if (args[i].equals("-debug")) {
+-                debuggingOutput = true;
+-                antlrTool.setArgOK(i);
+-            }
+-        }
+-    }
+-
+-    /** Set parser options -- performs action on the following options:
+-     */
+-    public boolean setOption(String key, Token value) {
+-        String s = value.getText();
+-        if (key.equals("buildAST")) {
+-            if (s.equals("true")) {
+-                buildAST = true;
+-            }
+-            else if (s.equals("false")) {
+-                buildAST = false;
+-            }
+-            else {
+-                antlrTool.error("buildAST option must be true or false", getFilename(), value.getLine(), value.getColumn());
+-            }
+-            return true;
+-        }
+-        if (key.equals("interactive")) {
+-            if (s.equals("true")) {
+-                interactive = true;
+-            }
+-            else if (s.equals("false")) {
+-                interactive = false;
+-            }
+-            else {
+-                antlrTool.error("interactive option must be true or false", getFilename(), value.getLine(), value.getColumn());
+-            }
+-            return true;
+-        }
+-        if (key.equals("ASTLabelType")) {
+-            super.setOption(key, value);
+-            return true;
+-        }
+-        if (super.setOption(key, value)) {
+-            return true;
+-        }
+-        antlrTool.error("Invalid option: " + key, getFilename(), value.getLine(), value.getColumn());
+-        return false;
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/Parser.java glassfish-gil/entity-persistence/src/java/persistence/antlr/Parser.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/Parser.java	2006-08-31 00:34:09.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/Parser.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,370 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import persistence.antlr.collections.impl.BitSet;
+-import persistence.antlr.collections.AST;
+-import persistence.antlr.collections.impl.ASTArray;
+-
+-/**A generic ANTLR parser (LL(k) for k>=1) containing a bunch of
+- * utility routines useful at any lookahead depth.  We distinguish between
+- * the LL(1) and LL(k) parsers because of efficiency.  This may not be
+- * necessary in the near future.
+- *
+- * Each parser object contains the state of the parse including a lookahead
+- * cache (the form of which is determined by the subclass), whether or
+- * not the parser is in guess mode, where tokens come from, etc...
+- *
+- * <p>
+- * During <b>guess</b> mode, the current lookahead token(s) and token type(s)
+- * cache must be saved because the token stream may not have been informed
+- * to save the token (via <tt>mark</tt>) before the <tt>try</tt> block.
+- * Guessing is started by:
+- * <ol>
+- * <li>saving the lookahead cache.
+- * <li>marking the current position in the TokenBuffer.
+- * <li>increasing the guessing level.
+- * </ol>
+- *
+- * After guessing, the parser state is restored by:
+- * <ol>
+- * <li>restoring the lookahead cache.
+- * <li>rewinding the TokenBuffer.
+- * <li>decreasing the guessing level.
+- * </ol>
+- *
+- * @see persistence.antlr.Token
+- * @see persistence.antlr.TokenBuffer
+- * @see persistence.antlr.Tokenizer
+- * @see persistence.antlr.LL1Parser
+- * @see persistence.antlr.LLkParser
+- */
+-
+-import java.io.IOException;
+-import java.util.Hashtable;
+-
+-import persistence.antlr.debug.MessageListener;
+-import persistence.antlr.debug.ParserListener;
+-import persistence.antlr.debug.ParserMatchListener;
+-import persistence.antlr.debug.ParserTokenListener;
+-import persistence.antlr.debug.SemanticPredicateListener;
+-import persistence.antlr.debug.SyntacticPredicateListener;
+-import persistence.antlr.debug.TraceListener;
+-
+-public abstract class Parser {
+-    protected ParserSharedInputState inputState;
+-
+-    /** Nesting level of registered handlers */
+-    // protected int exceptionLevel = 0;
+-
+-    /** Table of token type to token names */
+-    protected String[] tokenNames;
+-
+-    /** AST return value for a rule is squirreled away here */
+-    protected AST returnAST;
+-
+-    /** AST support code; parser delegates to this object.
+-	 *  This is set during parser construction by default
+-	 *  to either "new ASTFactory()" or a ctor that
+-	 *  has a token type to class map for hetero nodes.
+-	 */
+-    protected ASTFactory astFactory = null;
+-
+-	/** Constructed if any AST types specified in tokens{..}.
+-	 *  Maps an Integer->Class object.
+-	 */
+-	protected Hashtable tokenTypeToASTClassMap = null;
+-
+-    private boolean ignoreInvalidDebugCalls = false;
+-
+-    /** Used to keep track of indentdepth for traceIn/Out */
+-    protected int traceDepth = 0;
+-
+-    public Parser() {
+-        this(new ParserSharedInputState());
+-    }
+-
+-    public Parser(ParserSharedInputState state) {
+-        inputState = state;
+-    }
+-
+-	/** If the user specifies a tokens{} section with heterogeneous
+-	 *  AST node types, then ANTLR generates code to fill
+-	 *  this mapping.
+-	 */
+-	public Hashtable getTokenTypeToASTClassMap() {
+-		return tokenTypeToASTClassMap;
+-	}
+-
+-    public void addMessageListener(MessageListener l) {
+-        if (!ignoreInvalidDebugCalls)
+-            throw new IllegalArgumentException("addMessageListener() is only valid if parser built for debugging");
+-    }
+-
+-    public void addParserListener(ParserListener l) {
+-        if (!ignoreInvalidDebugCalls)
+-            throw new IllegalArgumentException("addParserListener() is only valid if parser built for debugging");
+-    }
+-
+-    public void addParserMatchListener(ParserMatchListener l) {
+-        if (!ignoreInvalidDebugCalls)
+-            throw new IllegalArgumentException("addParserMatchListener() is only valid if parser built for debugging");
+-    }
+-
+-    public void addParserTokenListener(ParserTokenListener l) {
+-        if (!ignoreInvalidDebugCalls)
+-            throw new IllegalArgumentException("addParserTokenListener() is only valid if parser built for debugging");
+-    }
+-
+-    public void addSemanticPredicateListener(SemanticPredicateListener l) {
+-        if (!ignoreInvalidDebugCalls)
+-            throw new IllegalArgumentException("addSemanticPredicateListener() is only valid if parser built for debugging");
+-    }
+-
+-    public void addSyntacticPredicateListener(SyntacticPredicateListener l) {
+-        if (!ignoreInvalidDebugCalls)
+-            throw new IllegalArgumentException("addSyntacticPredicateListener() is only valid if parser built for debugging");
+-    }
+-
+-    public void addTraceListener(TraceListener l) {
+-        if (!ignoreInvalidDebugCalls)
+-            throw new IllegalArgumentException("addTraceListener() is only valid if parser built for debugging");
+-    }
+-
+-    /**Get another token object from the token stream */
+-    public abstract void consume() throws TokenStreamException;
+-
+-    /** Consume tokens until one matches the given token */
+-    public void consumeUntil(int tokenType) throws TokenStreamException {
+-        while (LA(1) != Token.EOF_TYPE && LA(1) != tokenType) {
+-            consume();
+-        }
+-    }
+-
+-    /** Consume tokens until one matches the given token set */
+-    public void consumeUntil(BitSet set) throws TokenStreamException {
+-        while (LA(1) != Token.EOF_TYPE && !set.member(LA(1))) {
+-            consume();
+-        }
+-    }
+-
+-    protected void defaultDebuggingSetup(TokenStream lexer, TokenBuffer tokBuf) {
+-        // by default, do nothing -- we're not debugging
+-    }
+-
+-    /** Get the AST return value squirreled away in the parser */
+-    public AST getAST() {
+-        return returnAST;
+-    }
+-
+-    public ASTFactory getASTFactory() {
+-        return astFactory;
+-    }
+-
+-    public String getFilename() {
+-        return inputState.filename;
+-    }
+-
+-    public ParserSharedInputState getInputState() {
+-        return inputState;
+-    }
+-
+-    public void setInputState(ParserSharedInputState state) {
+-        inputState = state;
+-    }
+-
+-    public String getTokenName(int num) {
+-        return tokenNames[num];
+-    }
+-
+-    public String[] getTokenNames() {
+-        return tokenNames;
+-    }
+-
+-    public boolean isDebugMode() {
+-        return false;
+-    }
+-
+-    /** Return the token type of the ith token of lookahead where i=1
+-     * is the current token being examined by the parser (i.e., it
+-     * has not been matched yet).
+-     */
+-    public abstract int LA(int i) throws TokenStreamException;
+-
+-    /**Return the ith token of lookahead */
+-    public abstract Token LT(int i) throws TokenStreamException;
+-
+-    // Forwarded to TokenBuffer
+-    public int mark() {
+-        return inputState.input.mark();
+-    }
+-
+-    /**Make sure current lookahead symbol matches token type <tt>t</tt>.
+-     * Throw an exception upon mismatch, which is catch by either the
+-     * error handler or by the syntactic predicate.
+-     */
+-    public void match(int t) throws MismatchedTokenException, TokenStreamException {
+-        if (LA(1) != t)
+-            throw new MismatchedTokenException(tokenNames, LT(1), t, false, getFilename());
+-        else
+-        // mark token as consumed -- fetch next token deferred until LA/LT
+-            consume();
+-    }
+-
+-    /**Make sure current lookahead symbol matches the given set
+-     * Throw an exception upon mismatch, which is catch by either the
+-     * error handler or by the syntactic predicate.
+-     */
+-    public void match(BitSet b) throws MismatchedTokenException, TokenStreamException {
+-        if (!b.member(LA(1)))
+-            throw new MismatchedTokenException(tokenNames, LT(1), b, false, getFilename());
+-        else
+-        // mark token as consumed -- fetch next token deferred until LA/LT
+-            consume();
+-    }
+-
+-    public void matchNot(int t) throws MismatchedTokenException, TokenStreamException {
+-        if (LA(1) == t)
+-        // Throws inverted-sense exception
+-            throw new MismatchedTokenException(tokenNames, LT(1), t, true, getFilename());
+-        else
+-        // mark token as consumed -- fetch next token deferred until LA/LT
+-            consume();
+-    }
+-
+-    /** @deprecated as of 2.7.2. This method calls System.exit() and writes
+-     *  directly to stderr, which is usually not appropriate when
+-     *  a parser is embedded into a larger application. Since the method is
+-     *  <code>static</code>, it cannot be overridden to avoid these problems.
+-     *  ANTLR no longer uses this method internally or in generated code.
+-     */
+-    public static void panic() {
+-        System.err.println("Parser: panic");
+-        System.exit(1);
+-    }
+-
+-    public void removeMessageListener(MessageListener l) {
+-        if (!ignoreInvalidDebugCalls)
+-            throw new RuntimeException("removeMessageListener() is only valid if parser built for debugging");
+-    }
+-
+-    public void removeParserListener(ParserListener l) {
+-        if (!ignoreInvalidDebugCalls)
+-            throw new RuntimeException("removeParserListener() is only valid if parser built for debugging");
+-    }
+-
+-    public void removeParserMatchListener(ParserMatchListener l) {
+-        if (!ignoreInvalidDebugCalls)
+-            throw new RuntimeException("removeParserMatchListener() is only valid if parser built for debugging");
+-    }
+-
+-    public void removeParserTokenListener(ParserTokenListener l) {
+-        if (!ignoreInvalidDebugCalls)
+-            throw new RuntimeException("removeParserTokenListener() is only valid if parser built for debugging");
+-    }
+-
+-    public void removeSemanticPredicateListener(SemanticPredicateListener l) {
+-        if (!ignoreInvalidDebugCalls)
+-            throw new IllegalArgumentException("removeSemanticPredicateListener() is only valid if parser built for debugging");
+-    }
+-
+-    public void removeSyntacticPredicateListener(SyntacticPredicateListener l) {
+-        if (!ignoreInvalidDebugCalls)
+-            throw new IllegalArgumentException("removeSyntacticPredicateListener() is only valid if parser built for debugging");
+-    }
+-
+-    public void removeTraceListener(TraceListener l) {
+-        if (!ignoreInvalidDebugCalls)
+-            throw new RuntimeException("removeTraceListener() is only valid if parser built for debugging");
+-    }
+-
+-    /** Parser error-reporting function can be overridden in subclass */
+-    public void reportError(RecognitionException ex) {
+-        System.err.println(ex);
+-    }
+-
+-    /** Parser error-reporting function can be overridden in subclass */
+-    public void reportError(String s) {
+-        if (getFilename() == null) {
+-            System.err.println("error: " + s);
+-        }
+-        else {
+-            System.err.println(getFilename() + ": error: " + s);
+-        }
+-    }
+-
+-    /** Parser warning-reporting function can be overridden in subclass */
+-    public void reportWarning(String s) {
+-        if (getFilename() == null) {
+-            System.err.println("warning: " + s);
+-        }
+-        else {
+-            System.err.println(getFilename() + ": warning: " + s);
+-        }
+-    }
+-
+-    public void rewind(int pos) {
+-        inputState.input.rewind(pos);
+-    }
+-
+-    /** Specify an object with support code (shared by
+-     *  Parser and TreeParser.  Normally, the programmer
+-     *  does not play with this, using setASTNodeType instead.
+-     */
+-    public void setASTFactory(ASTFactory f) {
+-        astFactory = f;
+-    }
+-
+-    public void setASTNodeClass(String cl) {
+-        astFactory.setASTNodeType(cl);
+-    }
+-
+-    /** Specify the type of node to create during tree building; use setASTNodeClass now
+-     *  to be consistent with Token Object Type accessor.
+-	 *  @deprecated since 2.7.1
+-     */
+-    public void setASTNodeType(String nodeType) {
+-        setASTNodeClass(nodeType);
+-    }
+-
+-    public void setDebugMode(boolean debugMode) {
+-        if (!ignoreInvalidDebugCalls)
+-            throw new RuntimeException("setDebugMode() only valid if parser built for debugging");
+-    }
+-
+-    public void setFilename(String f) {
+-        inputState.filename = f;
+-    }
+-
+-    public void setIgnoreInvalidDebugCalls(boolean value) {
+-        ignoreInvalidDebugCalls = value;
+-    }
+-
+-    /** Set or change the input token buffer */
+-    public void setTokenBuffer(TokenBuffer t) {
+-        inputState.input = t;
+-    }
+-
+-    public void traceIndent() {
+-        for (int i = 0; i < traceDepth; i++)
+-            System.out.print(" ");
+-    }
+-
+-    public void traceIn(String rname) throws TokenStreamException {
+-        traceDepth += 1;
+-        traceIndent();
+-        System.out.println("> " + rname + "; LA(1)==" + LT(1).getText() +
+-                           ((inputState.guessing > 0)?" [guessing]":""));
+-    }
+-
+-    public void traceOut(String rname) throws TokenStreamException {
+-        traceIndent();
+-        System.out.println("< " + rname + "; LA(1)==" + LT(1).getText() +
+-                           ((inputState.guessing > 0)?" [guessing]":""));
+-        traceDepth -= 1;
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/ParserSharedInputState.java glassfish-gil/entity-persistence/src/java/persistence/antlr/ParserSharedInputState.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/ParserSharedInputState.java	2006-08-31 00:34:09.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/ParserSharedInputState.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,29 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-/** This object contains the data associated with an
+- *  input stream of tokens.  Multiple parsers
+- *  share a single ParserSharedInputState to parse
+- *  the same stream of tokens.
+- */
+-public class ParserSharedInputState {
+-    /** Where to get token objects */
+-    protected TokenBuffer input;
+-
+-    /** Are we guessing (guessing>0)? */
+-    public int guessing = 0;
+-
+-    /** What file (if known) caused the problem? */
+-    protected String filename;
+-
+-    public void reset() {
+-         guessing = 0;
+-         filename = null;
+-         input.reset();
+-     }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/ParseTree.java glassfish-gil/entity-persistence/src/java/persistence/antlr/ParseTree.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/ParseTree.java	2006-02-08 22:30:55.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/ParseTree.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,51 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- */
+-
+-import persistence.antlr.*;
+-import persistence.antlr.collections.AST;
+-
+-public abstract class ParseTree extends BaseAST {
+-
+-	/** Walk parse tree and return requested number of derivation steps.
+-	 *  If steps <= 0, return node text.  If steps == 1, return derivation
+-	 *  string at step.
+-	 */
+-	public String getLeftmostDerivationStep(int step) {
+-        if ( step<=0 ) {
+-			return toString();
+-		}
+-		StringBuffer buf = new StringBuffer(2000);
+-        getLeftmostDerivation(buf, step);
+-		return buf.toString();
+-	}
+-
+-	public String getLeftmostDerivation(int maxSteps) {
+-		StringBuffer buf = new StringBuffer(2000);
+-		buf.append("    "+this.toString());
+-		buf.append("\n");
+-		for (int d=1; d<maxSteps; d++) {
+-			buf.append(" =>");
+-			buf.append(getLeftmostDerivationStep(d));
+-			buf.append("\n");
+-		}
+-		return buf.toString();
+-	}
+-
+-	/** Get derivation and return how many you did (less than requested for
+-	 *  subtree roots.
+-	 */
+-	protected abstract int getLeftmostDerivation(StringBuffer buf, int step);
+-
+-	// just satisfy BaseAST interface; unused as we manually create nodes
+-
+-	public void initialize(int i, String s) {
+-	}
+-	public void initialize(AST ast) {
+-	}
+-	public void initialize(Token token) {
+-	}
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/ParseTreeRule.java glassfish-gil/entity-persistence/src/java/persistence/antlr/ParseTreeRule.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/ParseTreeRule.java	2006-02-08 22:30:55.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/ParseTreeRule.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,69 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- */
+-
+-import persistence.antlr.Token;
+-import persistence.antlr.collections.AST;
+-
+-public class ParseTreeRule extends ParseTree {
+-	public static final int INVALID_ALT = -1;
+-
+-	protected String ruleName;
+-	protected int altNumber;  // unused until I modify antlr to record this
+-
+-	public ParseTreeRule(String ruleName) {
+-		this(ruleName,INVALID_ALT);
+-	}
+-
+-	public ParseTreeRule(String ruleName, int altNumber) {
+-		this.ruleName = ruleName;
+-		this.altNumber = altNumber;
+-	}
+-
+-	public String getRuleName() {
+-		return ruleName;
+-	}
+-
+-	/** Do a step-first walk, building up a buffer of tokens until
+-	 *  you've reached a particular step and print out any rule subroots
+-	 *  insteads of descending.
+-	 */
+-	protected int getLeftmostDerivation(StringBuffer buf, int step) {
+-		int numReplacements = 0;
+-		if ( step<=0 ) {
+-			buf.append(' ');
+-			buf.append(toString());
+-			return numReplacements;
+-		}
+-		AST child = getFirstChild();
+-		numReplacements = 1;
+-		// walk child printing them out, descending into at most one
+-		while ( child!=null ) {
+-			if ( numReplacements>=step || child instanceof ParseTreeToken ) {
+-				buf.append(' ');
+-				buf.append(child.toString());
+-			}
+-			else {
+-				// descend for at least one more derivation; update count
+-				int remainingReplacements = step-numReplacements;
+-				int n = ((ParseTree)child).getLeftmostDerivation(buf,
+-																 remainingReplacements);
+-				numReplacements += n;
+-			}
+-			child = child.getNextSibling();
+-		}
+-		return numReplacements;
+-	}
+-
+-	public String toString() {
+-		if ( altNumber==INVALID_ALT ) {
+-			return '<'+ruleName+'>';
+-		}
+-		else {
+-			return '<'+ruleName+"["+altNumber+"]>";
+-		}
+-	}
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/ParseTreeToken.java glassfish-gil/entity-persistence/src/java/persistence/antlr/ParseTreeToken.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/ParseTreeToken.java	2006-02-08 22:30:55.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/ParseTreeToken.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,30 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- */
+-
+-import persistence.antlr.Token;
+-import persistence.antlr.collections.AST;
+-
+-public class ParseTreeToken extends ParseTree {
+-	protected Token token;
+-
+-	public ParseTreeToken(Token token) {
+-		this.token = token;
+-	}
+-
+-	protected int getLeftmostDerivation(StringBuffer buf, int step) {
+-		buf.append(' ');
+-		buf.append(toString());
+-		return step; // did on replacements
+-	}
+-
+-	public String toString() {
+-		if ( token!=null ) {
+-			return token.getText();
+-		}
+-		return "<missing token>";
+-	}
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/preprocessor/GrammarFile.java glassfish-gil/entity-persistence/src/java/persistence/antlr/preprocessor/GrammarFile.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/preprocessor/GrammarFile.java	2006-08-31 00:34:16.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/preprocessor/GrammarFile.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,88 +0,0 @@
+-package persistence.antlr.preprocessor;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import persistence.antlr.collections.impl.IndexedVector;
+-
+-import java.util.Enumeration;
+-import java.io.*;
+-
+-/** Stores header action, grammar preamble, file options, and
+- *  list of grammars in the file
+- */
+-public class GrammarFile {
+-    protected String fileName;
+-    protected String headerAction = "";
+-    protected IndexedVector options;
+-    protected IndexedVector grammars;
+-    protected boolean expanded = false;	// any grammars expanded within?
+-	protected persistence.antlr.Tool tool;
+-
+-    public GrammarFile(persistence.antlr.Tool tool, String f) {
+-        fileName = f;
+-        grammars = new IndexedVector();
+-        this.tool = tool;
+-    }
+-
+-    public void addGrammar(Grammar g) {
+-        grammars.appendElement(g.getName(), g);
+-    }
+-
+-    public void generateExpandedFile() throws IOException {
+-        if (!expanded) {
+-            return;	// don't generate if nothing got expanded
+-        }
+-        String expandedFileName = nameForExpandedGrammarFile(this.getName());
+-
+-        // create the new grammar file with expanded grammars
+-        PrintWriter expF = tool.openOutputFile(expandedFileName);
+-        expF.println(toString());
+-        expF.close();
+-    }
+-
+-    public IndexedVector getGrammars() {
+-        return grammars;
+-    }
+-
+-    public String getName() {
+-        return fileName;
+-    }
+-
+-    public String nameForExpandedGrammarFile(String f) {
+-        if (expanded) {
+-            // strip path to original input, make expanded file in current dir
+-            return "expanded" + tool.fileMinusPath(f);
+-        }
+-        else {
+-            return f;
+-        }
+-    }
+-
+-    public void setExpanded(boolean exp) {
+-        expanded = exp;
+-    }
+-
+-    public void addHeaderAction(String a) {
+-        headerAction += a + System.getProperty("line.separator");
+-    }
+-
+-    public void setOptions(IndexedVector o) {
+-        options = o;
+-    }
+-
+-    public String toString() {
+-        String h = headerAction == null ? "" : headerAction;
+-        String o = options == null ? "" : Hierarchy.optionsToString(options);
+-
+-        StringBuffer s = new StringBuffer(10000); s.append(h); s.append(o);
+-        for (Enumeration e = grammars.elements(); e.hasMoreElements();) {
+-            Grammar g = (Grammar)e.nextElement();
+-            s.append(g.toString());
+-        }
+-        return s.toString();
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/preprocessor/Grammar.java glassfish-gil/entity-persistence/src/java/persistence/antlr/preprocessor/Grammar.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/preprocessor/Grammar.java	2006-08-31 00:34:16.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/preprocessor/Grammar.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,280 +0,0 @@
+-package persistence.antlr.preprocessor;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import persistence.antlr.collections.impl.IndexedVector;
+-
+-import java.util.Hashtable;
+-import java.util.Enumeration;
+-import java.io.IOException;
+-
+-class Grammar {
+-    protected String name;
+-    protected String fileName;		// where does it come from?
+-    protected String superGrammar;	// null if no super class
+-    protected String type;				// lexer? parser? tree parser?
+-    protected IndexedVector rules;	// text of rules as they were read in
+-    protected IndexedVector options;// rule options
+-    protected String tokenSection;	// the tokens{} stuff
+-    protected String preambleAction;// action right before grammar
+-    protected String memberAction;	// action inside grammar
+-    protected Hierarchy hier;			// hierarchy of grammars
+-    protected boolean predefined = false;	// one of the predefined grammars?
+-    protected boolean alreadyExpanded = false;
+-    protected boolean specifiedVocabulary = false;	// found importVocab option?
+-
+-	/** if not derived from another grammar, might still specify a non-ANTLR
+-	 *  class to derive from like this "class T extends Parser(MyParserClass);"
+-	 */
+-	protected String superClass = null;
+-
+-    protected String importVocab = null;
+-    protected String exportVocab = null;
+-    protected persistence.antlr.Tool antlrTool;
+-
+-    public Grammar(persistence.antlr.Tool tool, String name, String superGrammar, IndexedVector rules) {
+-        this.name = name;
+-        this.superGrammar = superGrammar;
+-        this.rules = rules;
+-        this.antlrTool = tool;
+-    }
+-
+-    public void addOption(Option o) {
+-        if (options == null) {	// if not already there, create it
+-            options = new IndexedVector();
+-        }
+-        options.appendElement(o.getName(), o);
+-    }
+-
+-    public void addRule(Rule r) {
+-        rules.appendElement(r.getName(), r);
+-    }
+-
+-    /** Copy all nonoverridden rules, vocabulary, and options into this grammar from
+-     *  supergrammar chain.  The change is made in place; e.g., this grammar's vector
+-     *  of rules gets bigger.  This has side-effects: all grammars on path to
+-     *  root of hierarchy are expanded also.
+-     */
+-    public void expandInPlace() {
+-        // if this grammar already expanded, just return
+-        if (alreadyExpanded) {
+-            return;
+-        }
+-
+-        // Expand super grammar first (unless it's a predefined or subgrammar of predefined)
+-        Grammar superG = getSuperGrammar();
+-        if (superG == null)
+-            return; // error (didn't provide superclass)
+-        if (exportVocab == null) {
+-            // if no exportVocab for this grammar, make it same as grammar name
+-            exportVocab = getName();
+-        }
+-        if (superG.isPredefined())
+-            return; // can't expand Lexer, Parser, ...
+-        superG.expandInPlace();
+-
+-        // expand current grammar now.
+-        alreadyExpanded = true;
+-        // track whether a grammar file needed to have a grammar expanded
+-        GrammarFile gf = hier.getFile(getFileName());
+-        gf.setExpanded(true);
+-
+-        // Copy rules from supergrammar into this grammar
+-        IndexedVector inhRules = superG.getRules();
+-        for (Enumeration e = inhRules.elements(); e.hasMoreElements();) {
+-            Rule r = (Rule)e.nextElement();
+-            inherit(r, superG);
+-        }
+-
+-        // Copy options from supergrammar into this grammar
+-        // Modify tokdef options so that they point to dir of enclosing grammar
+-        IndexedVector inhOptions = superG.getOptions();
+-        if (inhOptions != null) {
+-            for (Enumeration e = inhOptions.elements(); e.hasMoreElements();) {
+-                Option o = (Option)e.nextElement();
+-                inherit(o, superG);
+-            }
+-        }
+-
+-        // add an option to load the superGrammar's output vocab
+-        if ((options != null && options.getElement("importVocab") == null) || options == null) {
+-            // no importVocab found, add one that grabs superG's output vocab
+-            Option inputV = new Option("importVocab", superG.exportVocab + ";", this);
+-            addOption(inputV);
+-            // copy output vocab file to current dir
+-            String originatingGrFileName = superG.getFileName();
+-            String path = antlrTool.pathToFile(originatingGrFileName);
+-            String superExportVocabFileName = path + superG.exportVocab +
+-                persistence.antlr.CodeGenerator.TokenTypesFileSuffix +
+-                persistence.antlr.CodeGenerator.TokenTypesFileExt;
+-            String newImportVocabFileName = antlrTool.fileMinusPath(superExportVocabFileName);
+-            if (path.equals("." + System.getProperty("file.separator"))) {
+-                // don't copy tokdef file onto itself (must be current directory)
+-                // System.out.println("importVocab file same dir; leaving as " + superExportVocabFileName);
+-            }
+-            else {
+-                try {
+-                    antlrTool.copyFile(superExportVocabFileName, newImportVocabFileName);
+-                }
+-                catch (IOException io) {
+-                    antlrTool.toolError("cannot find/copy importVocab file " + superExportVocabFileName);
+-                    return;
+-                }
+-            }
+-        }
+-
+-        // copy member action from supergrammar into this grammar
+-        inherit(superG.memberAction, superG);
+-    }
+-
+-    public String getFileName() {
+-        return fileName;
+-    }
+-
+-    public String getName() {
+-        return name;
+-    }
+-
+-    public IndexedVector getOptions() {
+-        return options;
+-    }
+-
+-    public IndexedVector getRules() {
+-        return rules;
+-    }
+-
+-    public Grammar getSuperGrammar() {
+-        if (superGrammar == null) return null;
+-        Grammar g = (Grammar)hier.getGrammar(superGrammar);
+-        return g;
+-    }
+-
+-    public String getSuperGrammarName() {
+-        return superGrammar;
+-    }
+-
+-    public String getType() {
+-        return type;
+-    }
+-
+-    public void inherit(Option o, Grammar superG) {
+-        // do NOT inherit importVocab/exportVocab options under any circumstances
+-        if (o.getName().equals("importVocab") ||
+-            o.getName().equals("exportVocab")) {
+-            return;
+-        }
+-
+-        Option overriddenOption = null;
+-        if (options != null) {	// do we even have options?
+-            overriddenOption = (Option)options.getElement(o.getName());
+-        }
+-        // if overridden, do not add to this grammar
+-        if (overriddenOption == null) { // not overridden
+-            addOption(o);	// copy option into this grammar--not overridden
+-        }
+-    }
+-
+-    public void inherit(Rule r, Grammar superG) {
+-        // if overridden, do not add to this grammar
+-        Rule overriddenRule = (Rule)rules.getElement(r.getName());
+-        if (overriddenRule != null) {
+-            // rule is overridden in this grammar.
+-            if (!overriddenRule.sameSignature(r)) {
+-                // warn if different sig
+-                antlrTool.warning("rule " + getName() + "." + overriddenRule.getName() +
+-                                   " has different signature than " +
+-                                   superG.getName() + "." + overriddenRule.getName());
+-            }
+-        }
+-        else {  // not overridden, copy rule into this
+-            addRule(r);
+-        }
+-    }
+-
+-    public void inherit(String memberAction, Grammar superG) {
+-        if (this.memberAction != null) return;	// do nothing if already have member action
+-        if (memberAction != null) { // don't have one here, use supergrammar's action
+-            this.memberAction = memberAction;
+-        }
+-    }
+-
+-    public boolean isPredefined() {
+-        return predefined;
+-    }
+-
+-    public void setFileName(String f) {
+-        fileName = f;
+-    }
+-
+-    public void setHierarchy(Hierarchy hier) {
+-        this.hier = hier;
+-    }
+-
+-    public void setMemberAction(String a) {
+-        memberAction = a;
+-    }
+-
+-    public void setOptions(IndexedVector options) {
+-        this.options = options;
+-    }
+-
+-    public void setPreambleAction(String a) {
+-        preambleAction = a;
+-    }
+-
+-    public void setPredefined(boolean b) {
+-        predefined = b;
+-    }
+-
+-    public void setTokenSection(String tk) {
+-        tokenSection = tk;
+-    }
+-
+-    public void setType(String t) {
+-        type = t;
+-    }
+-
+-    public String toString() {
+-        StringBuffer s = new StringBuffer(10000);
+-        if (preambleAction != null) {
+-            s.append(preambleAction);
+-        }
+-        if (superGrammar == null) {
+-			return "class " + name + ";";
+-        }
+-		if ( superClass!=null ) {
+-			// replace with specified superclass not actual grammar
+-			// user must make sure that the superclass derives from super grammar class
+-			s.append("class " + name + " extends " + superClass + ";");
+-		}
+-		else {
+-			s.append("class " + name + " extends " + type + ";");
+-		}
+-		s.append(
+-			System.getProperty("line.separator") +
+-            System.getProperty("line.separator"));
+-        if (options != null) {
+-            s.append(Hierarchy.optionsToString(options));
+-        }
+-        if (tokenSection != null) {
+-            s.append(tokenSection + "\n");
+-        }
+-        if (memberAction != null) {
+-            s.append(memberAction + System.getProperty("line.separator"));
+-        }
+-        for (int i = 0; i < rules.size(); i++) {
+-            Rule r = (Rule)rules.elementAt(i);
+-            if (!getName().equals(r.enclosingGrammar.getName())) {
+-                s.append("// inherited from grammar " + r.enclosingGrammar.getName() + System.getProperty("line.separator"));
+-            }
+-            s.append(r +
+-                System.getProperty("line.separator") +
+-                System.getProperty("line.separator"));
+-        }
+-        return s.toString();
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/preprocessor/Hierarchy.java glassfish-gil/entity-persistence/src/java/persistence/antlr/preprocessor/Hierarchy.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/preprocessor/Hierarchy.java	2006-08-31 00:34:16.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/preprocessor/Hierarchy.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,156 +0,0 @@
+-package persistence.antlr.preprocessor;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import persistence.antlr.collections.impl.IndexedVector;
+-
+-import java.util.Hashtable;
+-import java.util.Enumeration;
+-import java.io.*;
+-
+-import persistence.antlr.*;
+-import persistence.antlr.preprocessor.Grammar;
+-
+-public class Hierarchy {
+-    protected Grammar LexerRoot = null;
+-    protected Grammar ParserRoot = null;
+-    protected Grammar TreeParserRoot = null;
+-    protected Hashtable symbols;	// table of grammars
+-    protected Hashtable files;	// table of grammar files read in
+-    protected persistence.antlr.Tool antlrTool;
+-
+-    public Hierarchy(persistence.antlr.Tool tool) {
+-        this.antlrTool = tool;
+-        LexerRoot = new Grammar(tool, "Lexer", null, null);
+-        ParserRoot = new Grammar(tool, "Parser", null, null);
+-        TreeParserRoot = new Grammar(tool, "TreeParser", null, null);
+-        symbols = new Hashtable(10);
+-        files = new Hashtable(10);
+-
+-        LexerRoot.setPredefined(true);
+-        ParserRoot.setPredefined(true);
+-        TreeParserRoot.setPredefined(true);
+-
+-        symbols.put(LexerRoot.getName(), LexerRoot);
+-        symbols.put(ParserRoot.getName(), ParserRoot);
+-        symbols.put(TreeParserRoot.getName(), TreeParserRoot);
+-    }
+-
+-    public void addGrammar(Grammar gr) {
+-        gr.setHierarchy(this);
+-        // add grammar to hierarchy
+-        symbols.put(gr.getName(), gr);
+-        // add grammar to file.
+-        GrammarFile f = getFile(gr.getFileName());
+-        f.addGrammar(gr);
+-    }
+-
+-    public void addGrammarFile(GrammarFile gf) {
+-        files.put(gf.getName(), gf);
+-    }
+-
+-    public void expandGrammarsInFile(String fileName) {
+-        GrammarFile f = getFile(fileName);
+-        for (Enumeration e = f.getGrammars().elements(); e.hasMoreElements();) {
+-            Grammar g = (Grammar)e.nextElement();
+-            g.expandInPlace();
+-        }
+-    }
+-
+-    public Grammar findRoot(Grammar g) {
+-        if (g.getSuperGrammarName() == null) {		// at root
+-            return g;
+-        }
+-        // return root of super.
+-        Grammar sg = g.getSuperGrammar();
+-        if (sg == null) return g;		// return this grammar if super missing
+-        return findRoot(sg);
+-    }
+-
+-    public GrammarFile getFile(String fileName) {
+-        return (GrammarFile)files.get(fileName);
+-    }
+-
+-    public Grammar getGrammar(String gr) {
+-        return (Grammar)symbols.get(gr);
+-    }
+-
+-    public static String optionsToString(IndexedVector options) {
+-        String s = "options {" + System.getProperty("line.separator");
+-        for (Enumeration e = options.elements(); e.hasMoreElements();) {
+-            s += (Option)e.nextElement() + System.getProperty("line.separator");
+-        }
+-        s += "}" +
+-            System.getProperty("line.separator") +
+-            System.getProperty("line.separator");
+-        return s;
+-    }
+-
+-    public void readGrammarFile(String file) throws FileNotFoundException {
+-        Reader grStream = new BufferedReader(new FileReader(file));
+-        addGrammarFile(new GrammarFile(antlrTool, file));
+-
+-        // Create the simplified grammar lexer/parser
+-        PreprocessorLexer ppLexer = new PreprocessorLexer(grStream);
+-        ppLexer.setFilename(file);
+-        Preprocessor pp = new Preprocessor(ppLexer);
+-		pp.setTool(antlrTool);
+-        pp.setFilename(file);
+-
+-        // populate the hierarchy with class(es) read in
+-        try {
+-            pp.grammarFile(this, file);
+-        }
+-        catch (TokenStreamException io) {
+-            antlrTool.toolError("Token stream error reading grammar(s):\n" + io);
+-        }
+-        catch (ANTLRException se) {
+-            antlrTool.toolError("error reading grammar(s):\n" + se);
+-        }
+-    }
+-
+-    /** Return true if hierarchy is complete, false if not */
+-    public boolean verifyThatHierarchyIsComplete() {
+-        boolean complete = true;
+-        // Make a pass to ensure all grammars are defined
+-        for (Enumeration e = symbols.elements(); e.hasMoreElements();) {
+-            Grammar c = (Grammar)e.nextElement();
+-            if (c.getSuperGrammarName() == null) {
+-                continue;		// at root: ignore predefined roots
+-            }
+-            Grammar superG = c.getSuperGrammar();
+-            if (superG == null) {
+-                antlrTool.toolError("grammar " + c.getSuperGrammarName() + " not defined");
+-                complete = false;
+-                symbols.remove(c.getName()); // super not defined, kill sub
+-            }
+-        }
+-
+-        if (!complete) return false;
+-
+-        // Make another pass to set the 'type' field of each grammar
+-        // This makes it easy later to ask a grammar what its type
+-        // is w/o having to search hierarchy.
+-        for (Enumeration e = symbols.elements(); e.hasMoreElements();) {
+-            Grammar c = (Grammar)e.nextElement();
+-            if (c.getSuperGrammarName() == null) {
+-                continue;		// ignore predefined roots
+-            }
+-            c.setType(findRoot(c).getName());
+-        }
+-
+-        return true;
+-    }
+-
+-    public persistence.antlr.Tool getTool() {
+-        return antlrTool;
+-    }
+-
+-    public void setTool(persistence.antlr.Tool antlrTool) {
+-        this.antlrTool = antlrTool;
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/preprocessor/Option.java glassfish-gil/entity-persistence/src/java/persistence/antlr/preprocessor/Option.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/preprocessor/Option.java	2006-08-31 00:34:16.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/preprocessor/Option.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,49 +0,0 @@
+-package persistence.antlr.preprocessor;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import persistence.antlr.collections.impl.Vector;
+-
+-class Option {
+-    protected String name;
+-    protected String rhs;
+-    protected Grammar enclosingGrammar;
+-
+-    public Option(String n, String rhs, Grammar gr) {
+-        name = n;
+-        this.rhs = rhs;
+-        setEnclosingGrammar(gr);
+-    }
+-
+-    public Grammar getEnclosingGrammar() {
+-        return enclosingGrammar;
+-    }
+-
+-    public String getName() {
+-        return name;
+-    }
+-
+-    public String getRHS() {
+-        return rhs;
+-    }
+-
+-    public void setEnclosingGrammar(Grammar g) {
+-        enclosingGrammar = g;
+-    }
+-
+-    public void setName(String n) {
+-        name = n;
+-    }
+-
+-    public void setRHS(String rhs) {
+-        this.rhs = rhs;
+-    }
+-
+-    public String toString() {
+-        return "\t" + name + "=" + rhs;
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/preprocessor/Preprocessor.java glassfish-gil/entity-persistence/src/java/persistence/antlr/preprocessor/Preprocessor.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/preprocessor/Preprocessor.java	2006-02-08 22:31:35.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/preprocessor/Preprocessor.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,867 +0,0 @@
+-// $ANTLR : "preproc.g" -> "Preprocessor.java"$
+-
+-package persistence.antlr.preprocessor;
+-
+-import persistence.antlr.TokenBuffer;
+-import persistence.antlr.TokenStreamException;
+-import persistence.antlr.TokenStreamIOException;
+-import persistence.antlr.ANTLRException;
+-import persistence.antlr.LLkParser;
+-import persistence.antlr.Token;
+-import persistence.antlr.TokenStream;
+-import persistence.antlr.RecognitionException;
+-import persistence.antlr.NoViableAltException;
+-import persistence.antlr.MismatchedTokenException;
+-import persistence.antlr.SemanticException;
+-import persistence.antlr.ParserSharedInputState;
+-import persistence.antlr.collections.impl.BitSet;
+-
+-import persistence.antlr.collections.impl.IndexedVector;
+-import java.util.Hashtable;
+-import persistence.antlr.preprocessor.Grammar;
+-
+-public class Preprocessor extends persistence.antlr.LLkParser       implements PreprocessorTokenTypes
+- {
+-
+-	// This chunk of error reporting code provided by Brian Smith
+-
+-    private persistence.antlr.Tool antlrTool;
+-
+-    /** In order to make it so existing subclasses don't break, we won't require
+-     * that the persistence.antlr.Tool instance be passed as a constructor element. Instead,
+-     * the persistence.antlr.Tool instance should register itself via {@link #initTool(persistence.antlr.Tool)}
+-     * @throws IllegalStateException if a tool has already been registered
+-     * @since 2.7.2
+-     */
+-    public void setTool(persistence.antlr.Tool tool) {
+-        if (antlrTool == null) {
+-            antlrTool = tool;
+-		}
+-        else {
+-            throw new IllegalStateException("persistence.antlr.Tool already registered");
+-		}
+-    }
+-
+-    /** @since 2.7.2 */
+-    protected persistence.antlr.Tool getTool() {
+-        return antlrTool;
+-    }
+-
+-    /** Delegates the error message to the tool if any was registered via
+-     *  {@link #initTool(persistence.antlr.Tool)}
+-     *  @since 2.7.2
+-     */
+-    public void reportError(String s) {
+-        if (getTool() != null) {
+-            getTool().error(s, getFilename(), -1, -1);
+-		}
+-        else {
+-            super.reportError(s);
+-		}
+-    }
+-
+-    /** Delegates the error message to the tool if any was registered via
+-     *  {@link #initTool(persistence.antlr.Tool)}
+-     *  @since 2.7.2
+-     */
+-    public void reportError(RecognitionException e) {
+-        if (getTool() != null) {
+-            getTool().error(e.getErrorMessage(), e.getFilename(), e.getLine(), e.getColumn());
+-		}
+-        else {
+-            super.reportError(e);
+-		}
+-    }
+-
+-    /** Delegates the warning message to the tool if any was registered via
+-     *  {@link #initTool(persistence.antlr.Tool)}
+-     *  @since 2.7.2
+-     */
+-    public void reportWarning(String s) {
+-        if (getTool() != null) {
+-            getTool().warning(s, getFilename(), -1, -1);
+-		}
+-        else {
+-            super.reportWarning(s);
+-		}
+-    }
+-
+-protected Preprocessor(TokenBuffer tokenBuf, int k) {
+-  super(tokenBuf,k);
+-  tokenNames = _tokenNames;
+-}
+-
+-public Preprocessor(TokenBuffer tokenBuf) {
+-  this(tokenBuf,1);
+-}
+-
+-protected Preprocessor(TokenStream lexer, int k) {
+-  super(lexer,k);
+-  tokenNames = _tokenNames;
+-}
+-
+-public Preprocessor(TokenStream lexer) {
+-  this(lexer,1);
+-}
+-
+-public Preprocessor(ParserSharedInputState state) {
+-  super(state,1);
+-  tokenNames = _tokenNames;
+-}
+-
+-	public final void grammarFile(
+-		Hierarchy hier, String file
+-	) throws RecognitionException, TokenStreamException {
+-		
+-		Token  hdr = null;
+-		
+-			Grammar gr;
+-			IndexedVector opt=null;
+-		
+-		
+-		try {      // for error handling
+-			{
+-			_loop265:
+-			do {
+-				if ((LA(1)==HEADER_ACTION)) {
+-					hdr = LT(1);
+-					match(HEADER_ACTION);
+-					hier.getFile(file).addHeaderAction(hdr.getText());
+-				}
+-				else {
+-					break _loop265;
+-				}
+-				
+-			} while (true);
+-			}
+-			{
+-			switch ( LA(1)) {
+-			case OPTIONS_START:
+-			{
+-				opt=optionSpec(null);
+-				break;
+-			}
+-			case EOF:
+-			case ACTION:
+-			case LITERAL_class:
+-			{
+-				break;
+-			}
+-			default:
+-			{
+-				throw new NoViableAltException(LT(1), getFilename());
+-			}
+-			}
+-			}
+-			{
+-			_loop268:
+-			do {
+-				if ((LA(1)==ACTION||LA(1)==LITERAL_class)) {
+-					gr=class_def(file, hier);
+-					
+-								if ( gr!=null && opt!=null ) {
+-									hier.getFile(file).setOptions(opt);
+-								}
+-								if ( gr!=null ) {
+-									gr.setFileName(file);
+-									hier.addGrammar(gr);
+-								}
+-								
+-				}
+-				else {
+-					break _loop268;
+-				}
+-				
+-			} while (true);
+-			}
+-			match(Token.EOF_TYPE);
+-		}
+-		catch (RecognitionException ex) {
+-			reportError(ex);
+-			consume();
+-			consumeUntil(_tokenSet_0);
+-		}
+-	}
+-	
+-	public final IndexedVector  optionSpec(
+-		Grammar gr
+-	) throws RecognitionException, TokenStreamException {
+-		IndexedVector options;
+-		
+-		Token  op = null;
+-		Token  rhs = null;
+-		
+-			options = new IndexedVector();
+-		
+-		
+-		try {      // for error handling
+-			match(OPTIONS_START);
+-			{
+-			_loop280:
+-			do {
+-				if ((LA(1)==ID)) {
+-					op = LT(1);
+-					match(ID);
+-					rhs = LT(1);
+-					match(ASSIGN_RHS);
+-					
+-									Option newOp = new Option(op.getText(),rhs.getText(),gr);
+-									options.appendElement(newOp.getName(),newOp);
+-									if ( gr!=null && op.getText().equals("importVocab") ) {
+-										gr.specifiedVocabulary = true;
+-										gr.importVocab = rhs.getText();
+-									}
+-									else if ( gr!=null && op.getText().equals("exportVocab") ) {
+-										// don't want ';' included in outputVocab.
+-										// This is heinously inconsistent!  Ugh.
+-										gr.exportVocab = rhs.getText().substring(0,rhs.getText().length()-1);
+-										gr.exportVocab = gr.exportVocab.trim();
+-									}
+-									
+-				}
+-				else {
+-					break _loop280;
+-				}
+-				
+-			} while (true);
+-			}
+-			match(RCURLY);
+-		}
+-		catch (RecognitionException ex) {
+-			reportError(ex);
+-			consume();
+-			consumeUntil(_tokenSet_1);
+-		}
+-		return options;
+-	}
+-	
+-	public final Grammar  class_def(
+-		String file, Hierarchy hier
+-	) throws RecognitionException, TokenStreamException {
+-		Grammar gr;
+-		
+-		Token  preamble = null;
+-		Token  sub = null;
+-		Token  sup = null;
+-		Token  tk = null;
+-		Token  memberA = null;
+-		
+-			gr=null;
+-			IndexedVector rules = new IndexedVector(100);
+-			IndexedVector classOptions = null;
+-			String sc = null;
+-		
+-		
+-		try {      // for error handling
+-			{
+-			switch ( LA(1)) {
+-			case ACTION:
+-			{
+-				preamble = LT(1);
+-				match(ACTION);
+-				break;
+-			}
+-			case LITERAL_class:
+-			{
+-				break;
+-			}
+-			default:
+-			{
+-				throw new NoViableAltException(LT(1), getFilename());
+-			}
+-			}
+-			}
+-			match(LITERAL_class);
+-			sub = LT(1);
+-			match(ID);
+-			match(LITERAL_extends);
+-			sup = LT(1);
+-			match(ID);
+-			{
+-			switch ( LA(1)) {
+-			case SUBRULE_BLOCK:
+-			{
+-				sc=superClass();
+-				break;
+-			}
+-			case SEMI:
+-			{
+-				break;
+-			}
+-			default:
+-			{
+-				throw new NoViableAltException(LT(1), getFilename());
+-			}
+-			}
+-			}
+-			match(SEMI);
+-			
+-						gr = (Grammar)hier.getGrammar(sub.getText());
+-						if ( gr!=null ) {
+-			//				antlr.Tool.toolError("redefinition of grammar "+gr.getName()+" ignored");
+-							gr=null;
+-							throw new SemanticException("redefinition of grammar "+sub.getText(), file, sub.getLine(), sub.getColumn());
+-						}
+-						else {
+-							gr = new Grammar(hier.getTool(), sub.getText(), sup.getText(), rules);
+-							gr.superClass=sc;
+-							if ( preamble!=null ) {
+-								gr.setPreambleAction(preamble.getText());
+-							}
+-						}
+-					
+-			{
+-			switch ( LA(1)) {
+-			case OPTIONS_START:
+-			{
+-				classOptions=optionSpec(gr);
+-				break;
+-			}
+-			case ACTION:
+-			case ID:
+-			case TOKENS_SPEC:
+-			case LITERAL_protected:
+-			case LITERAL_private:
+-			case LITERAL_public:
+-			{
+-				break;
+-			}
+-			default:
+-			{
+-				throw new NoViableAltException(LT(1), getFilename());
+-			}
+-			}
+-			}
+-			
+-					if ( gr!=null ) {
+-						gr.setOptions(classOptions);
+-					}
+-					
+-			{
+-			switch ( LA(1)) {
+-			case TOKENS_SPEC:
+-			{
+-				tk = LT(1);
+-				match(TOKENS_SPEC);
+-				gr.setTokenSection(tk.getText());
+-				break;
+-			}
+-			case ACTION:
+-			case ID:
+-			case LITERAL_protected:
+-			case LITERAL_private:
+-			case LITERAL_public:
+-			{
+-				break;
+-			}
+-			default:
+-			{
+-				throw new NoViableAltException(LT(1), getFilename());
+-			}
+-			}
+-			}
+-			{
+-			switch ( LA(1)) {
+-			case ACTION:
+-			{
+-				memberA = LT(1);
+-				match(ACTION);
+-				gr.setMemberAction(memberA.getText());
+-				break;
+-			}
+-			case ID:
+-			case LITERAL_protected:
+-			case LITERAL_private:
+-			case LITERAL_public:
+-			{
+-				break;
+-			}
+-			default:
+-			{
+-				throw new NoViableAltException(LT(1), getFilename());
+-			}
+-			}
+-			}
+-			{
+-			int _cnt277=0;
+-			_loop277:
+-			do {
+-				if ((_tokenSet_2.member(LA(1)))) {
+-					rule(gr);
+-				}
+-				else {
+-					if ( _cnt277>=1 ) { break _loop277; } else {throw new NoViableAltException(LT(1), getFilename());}
+-				}
+-				
+-				_cnt277++;
+-			} while (true);
+-			}
+-		}
+-		catch (RecognitionException ex) {
+-			reportError(ex);
+-			consume();
+-			consumeUntil(_tokenSet_3);
+-		}
+-		return gr;
+-	}
+-	
+-	public final String  superClass() throws RecognitionException, TokenStreamException {
+-		String sup;
+-		
+-		sup=LT(1).getText();
+-		
+-		try {      // for error handling
+-			match(SUBRULE_BLOCK);
+-		}
+-		catch (RecognitionException ex) {
+-			reportError(ex);
+-			consume();
+-			consumeUntil(_tokenSet_4);
+-		}
+-		return sup;
+-	}
+-	
+-	public final void rule(
+-		Grammar gr
+-	) throws RecognitionException, TokenStreamException {
+-		
+-		Token  r = null;
+-		Token  arg = null;
+-		Token  ret = null;
+-		Token  init = null;
+-		Token  blk = null;
+-		
+-			IndexedVector o = null;	// options for rule
+-			String vis = null;
+-			boolean bang=false;
+-			String eg=null, thr="";
+-		
+-		
+-		try {      // for error handling
+-			{
+-			switch ( LA(1)) {
+-			case LITERAL_protected:
+-			{
+-				match(LITERAL_protected);
+-				vis="protected";
+-				break;
+-			}
+-			case LITERAL_private:
+-			{
+-				match(LITERAL_private);
+-				vis="private";
+-				break;
+-			}
+-			case LITERAL_public:
+-			{
+-				match(LITERAL_public);
+-				vis="public";
+-				break;
+-			}
+-			case ID:
+-			{
+-				break;
+-			}
+-			default:
+-			{
+-				throw new NoViableAltException(LT(1), getFilename());
+-			}
+-			}
+-			}
+-			r = LT(1);
+-			match(ID);
+-			{
+-			switch ( LA(1)) {
+-			case BANG:
+-			{
+-				match(BANG);
+-				bang=true;
+-				break;
+-			}
+-			case ACTION:
+-			case OPTIONS_START:
+-			case ARG_ACTION:
+-			case LITERAL_returns:
+-			case RULE_BLOCK:
+-			case LITERAL_throws:
+-			{
+-				break;
+-			}
+-			default:
+-			{
+-				throw new NoViableAltException(LT(1), getFilename());
+-			}
+-			}
+-			}
+-			{
+-			switch ( LA(1)) {
+-			case ARG_ACTION:
+-			{
+-				arg = LT(1);
+-				match(ARG_ACTION);
+-				break;
+-			}
+-			case ACTION:
+-			case OPTIONS_START:
+-			case LITERAL_returns:
+-			case RULE_BLOCK:
+-			case LITERAL_throws:
+-			{
+-				break;
+-			}
+-			default:
+-			{
+-				throw new NoViableAltException(LT(1), getFilename());
+-			}
+-			}
+-			}
+-			{
+-			switch ( LA(1)) {
+-			case LITERAL_returns:
+-			{
+-				match(LITERAL_returns);
+-				ret = LT(1);
+-				match(ARG_ACTION);
+-				break;
+-			}
+-			case ACTION:
+-			case OPTIONS_START:
+-			case RULE_BLOCK:
+-			case LITERAL_throws:
+-			{
+-				break;
+-			}
+-			default:
+-			{
+-				throw new NoViableAltException(LT(1), getFilename());
+-			}
+-			}
+-			}
+-			{
+-			switch ( LA(1)) {
+-			case LITERAL_throws:
+-			{
+-				thr=throwsSpec();
+-				break;
+-			}
+-			case ACTION:
+-			case OPTIONS_START:
+-			case RULE_BLOCK:
+-			{
+-				break;
+-			}
+-			default:
+-			{
+-				throw new NoViableAltException(LT(1), getFilename());
+-			}
+-			}
+-			}
+-			{
+-			switch ( LA(1)) {
+-			case OPTIONS_START:
+-			{
+-				o=optionSpec(null);
+-				break;
+-			}
+-			case ACTION:
+-			case RULE_BLOCK:
+-			{
+-				break;
+-			}
+-			default:
+-			{
+-				throw new NoViableAltException(LT(1), getFilename());
+-			}
+-			}
+-			}
+-			{
+-			switch ( LA(1)) {
+-			case ACTION:
+-			{
+-				init = LT(1);
+-				match(ACTION);
+-				break;
+-			}
+-			case RULE_BLOCK:
+-			{
+-				break;
+-			}
+-			default:
+-			{
+-				throw new NoViableAltException(LT(1), getFilename());
+-			}
+-			}
+-			}
+-			blk = LT(1);
+-			match(RULE_BLOCK);
+-			eg=exceptionGroup();
+-			
+-					String rtext = blk.getText()+eg;
+-					Rule ppr = new Rule(r.getText(),rtext,o,gr);
+-					ppr.setThrowsSpec(thr);
+-					if ( arg!=null ) {
+-						ppr.setArgs(arg.getText());
+-					}
+-					if ( ret!=null ) {
+-						ppr.setReturnValue(ret.getText());
+-					}
+-					if ( init!=null ) {
+-						ppr.setInitAction(init.getText());
+-					}
+-					if ( bang ) {
+-						ppr.setBang();
+-					}
+-					ppr.setVisibility(vis);
+-					if ( gr!=null ) {
+-						gr.addRule(ppr);
+-					}
+-					
+-		}
+-		catch (RecognitionException ex) {
+-			reportError(ex);
+-			consume();
+-			consumeUntil(_tokenSet_5);
+-		}
+-	}
+-	
+-	public final String  throwsSpec() throws RecognitionException, TokenStreamException {
+-		String t;
+-		
+-		Token  a = null;
+-		Token  b = null;
+-		t="throws ";
+-		
+-		try {      // for error handling
+-			match(LITERAL_throws);
+-			a = LT(1);
+-			match(ID);
+-			t+=a.getText();
+-			{
+-			_loop291:
+-			do {
+-				if ((LA(1)==COMMA)) {
+-					match(COMMA);
+-					b = LT(1);
+-					match(ID);
+-					t+=","+b.getText();
+-				}
+-				else {
+-					break _loop291;
+-				}
+-				
+-			} while (true);
+-			}
+-		}
+-		catch (RecognitionException ex) {
+-			reportError(ex);
+-			consume();
+-			consumeUntil(_tokenSet_6);
+-		}
+-		return t;
+-	}
+-	
+-	public final String  exceptionGroup() throws RecognitionException, TokenStreamException {
+-		String g;
+-		
+-		String e=null; g="";
+-		
+-		try {      // for error handling
+-			{
+-			_loop294:
+-			do {
+-				if ((LA(1)==LITERAL_exception)) {
+-					e=exceptionSpec();
+-					g += e;
+-				}
+-				else {
+-					break _loop294;
+-				}
+-				
+-			} while (true);
+-			}
+-		}
+-		catch (RecognitionException ex) {
+-			reportError(ex);
+-			consume();
+-			consumeUntil(_tokenSet_5);
+-		}
+-		return g;
+-	}
+-	
+-	public final String  exceptionSpec() throws RecognitionException, TokenStreamException {
+-		String es;
+-		
+-		Token  aa = null;
+-		String h=null;
+-		es = System.getProperty("line.separator")+"exception ";
+-		
+-		
+-		try {      // for error handling
+-			match(LITERAL_exception);
+-			{
+-			switch ( LA(1)) {
+-			case ARG_ACTION:
+-			{
+-				aa = LT(1);
+-				match(ARG_ACTION);
+-				es += aa.getText();
+-				break;
+-			}
+-			case EOF:
+-			case ACTION:
+-			case LITERAL_class:
+-			case ID:
+-			case LITERAL_protected:
+-			case LITERAL_private:
+-			case LITERAL_public:
+-			case LITERAL_exception:
+-			case LITERAL_catch:
+-			{
+-				break;
+-			}
+-			default:
+-			{
+-				throw new NoViableAltException(LT(1), getFilename());
+-			}
+-			}
+-			}
+-			{
+-			_loop298:
+-			do {
+-				if ((LA(1)==LITERAL_catch)) {
+-					h=exceptionHandler();
+-					es += h;
+-				}
+-				else {
+-					break _loop298;
+-				}
+-				
+-			} while (true);
+-			}
+-		}
+-		catch (RecognitionException ex) {
+-			reportError(ex);
+-			consume();
+-			consumeUntil(_tokenSet_7);
+-		}
+-		return es;
+-	}
+-	
+-	public final String  exceptionHandler() throws RecognitionException, TokenStreamException {
+-		String h;
+-		
+-		Token  a1 = null;
+-		Token  a2 = null;
+-		h=null;
+-		
+-		try {      // for error handling
+-			match(LITERAL_catch);
+-			a1 = LT(1);
+-			match(ARG_ACTION);
+-			a2 = LT(1);
+-			match(ACTION);
+-			h = System.getProperty("line.separator")+
+-						 "catch "+a1.getText()+" "+a2.getText();
+-		}
+-		catch (RecognitionException ex) {
+-			reportError(ex);
+-			consume();
+-			consumeUntil(_tokenSet_8);
+-		}
+-		return h;
+-	}
+-	
+-	
+-	public static final String[] _tokenNames = {
+-		"<0>",
+-		"EOF",
+-		"<2>",
+-		"NULL_TREE_LOOKAHEAD",
+-		"\"tokens\"",
+-		"HEADER_ACTION",
+-		"SUBRULE_BLOCK",
+-		"ACTION",
+-		"\"class\"",
+-		"ID",
+-		"\"extends\"",
+-		"SEMI",
+-		"TOKENS_SPEC",
+-		"OPTIONS_START",
+-		"ASSIGN_RHS",
+-		"RCURLY",
+-		"\"protected\"",
+-		"\"private\"",
+-		"\"public\"",
+-		"BANG",
+-		"ARG_ACTION",
+-		"\"returns\"",
+-		"RULE_BLOCK",
+-		"\"throws\"",
+-		"COMMA",
+-		"\"exception\"",
+-		"\"catch\"",
+-		"ALT",
+-		"ELEMENT",
+-		"LPAREN",
+-		"RPAREN",
+-		"ID_OR_KEYWORD",
+-		"CURLY_BLOCK_SCARF",
+-		"WS",
+-		"NEWLINE",
+-		"COMMENT",
+-		"SL_COMMENT",
+-		"ML_COMMENT",
+-		"CHAR_LITERAL",
+-		"STRING_LITERAL",
+-		"ESC",
+-		"DIGIT",
+-		"XDIGIT"
+-	};
+-	
+-	private static final long[] mk_tokenSet_0() {
+-		long[] data = { 2L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_0 = new BitSet(mk_tokenSet_0());
+-	private static final long[] mk_tokenSet_1() {
+-		long[] data = { 4658050L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_1 = new BitSet(mk_tokenSet_1());
+-	private static final long[] mk_tokenSet_2() {
+-		long[] data = { 459264L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_2 = new BitSet(mk_tokenSet_2());
+-	private static final long[] mk_tokenSet_3() {
+-		long[] data = { 386L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_3 = new BitSet(mk_tokenSet_3());
+-	private static final long[] mk_tokenSet_4() {
+-		long[] data = { 2048L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_4 = new BitSet(mk_tokenSet_4());
+-	private static final long[] mk_tokenSet_5() {
+-		long[] data = { 459650L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_5 = new BitSet(mk_tokenSet_5());
+-	private static final long[] mk_tokenSet_6() {
+-		long[] data = { 4202624L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_6 = new BitSet(mk_tokenSet_6());
+-	private static final long[] mk_tokenSet_7() {
+-		long[] data = { 34014082L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_7 = new BitSet(mk_tokenSet_7());
+-	private static final long[] mk_tokenSet_8() {
+-		long[] data = { 101122946L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_8 = new BitSet(mk_tokenSet_8());
+-	
+-	}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/preprocessor/PreprocessorLexer.java glassfish-gil/entity-persistence/src/java/persistence/antlr/preprocessor/PreprocessorLexer.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/preprocessor/PreprocessorLexer.java	2006-02-08 22:31:35.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/preprocessor/PreprocessorLexer.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,1388 +0,0 @@
+-// $ANTLR : "preproc.g" -> "PreprocessorLexer.java"$
+-
+-package persistence.antlr.preprocessor;
+-
+-import java.io.InputStream;
+-import persistence.antlr.TokenStreamException;
+-import persistence.antlr.TokenStreamIOException;
+-import persistence.antlr.TokenStreamRecognitionException;
+-import persistence.antlr.CharStreamException;
+-import persistence.antlr.CharStreamIOException;
+-import persistence.antlr.ANTLRException;
+-import java.io.Reader;
+-import java.util.Hashtable;
+-import persistence.antlr.CharScanner;
+-import persistence.antlr.InputBuffer;
+-import persistence.antlr.ByteBuffer;
+-import persistence.antlr.CharBuffer;
+-import persistence.antlr.Token;
+-import persistence.antlr.CommonToken;
+-import persistence.antlr.RecognitionException;
+-import persistence.antlr.NoViableAltForCharException;
+-import persistence.antlr.MismatchedCharException;
+-import persistence.antlr.TokenStream;
+-import persistence.antlr.ANTLRHashString;
+-import persistence.antlr.LexerSharedInputState;
+-import persistence.antlr.collections.impl.BitSet;
+-import persistence.antlr.SemanticException;
+-
+-public class PreprocessorLexer extends persistence.antlr.CharScanner implements PreprocessorTokenTypes, TokenStream
+- {
+-public PreprocessorLexer(InputStream in) {
+-	this(new ByteBuffer(in));
+-}
+-public PreprocessorLexer(Reader in) {
+-	this(new CharBuffer(in));
+-}
+-public PreprocessorLexer(InputBuffer ib) {
+-	this(new LexerSharedInputState(ib));
+-}
+-public PreprocessorLexer(LexerSharedInputState state) {
+-	super(state);
+-	caseSensitiveLiterals = true;
+-	setCaseSensitive(true);
+-	literals = new Hashtable();
+-	literals.put(new ANTLRHashString("public", this), new Integer(18));
+-	literals.put(new ANTLRHashString("class", this), new Integer(8));
+-	literals.put(new ANTLRHashString("throws", this), new Integer(23));
+-	literals.put(new ANTLRHashString("catch", this), new Integer(26));
+-	literals.put(new ANTLRHashString("private", this), new Integer(17));
+-	literals.put(new ANTLRHashString("extends", this), new Integer(10));
+-	literals.put(new ANTLRHashString("protected", this), new Integer(16));
+-	literals.put(new ANTLRHashString("returns", this), new Integer(21));
+-	literals.put(new ANTLRHashString("tokens", this), new Integer(4));
+-	literals.put(new ANTLRHashString("exception", this), new Integer(25));
+-}
+-
+-public Token nextToken() throws TokenStreamException {
+-	Token theRetToken=null;
+-tryAgain:
+-	for (;;) {
+-		Token _token = null;
+-		int _ttype = Token.INVALID_TYPE;
+-		resetText();
+-		try {   // for char stream error handling
+-			try {   // for lexical error handling
+-				switch ( LA(1)) {
+-				case ':':
+-				{
+-					mRULE_BLOCK(true);
+-					theRetToken=_returnToken;
+-					break;
+-				}
+-				case '\t':  case '\n':  case '\r':  case ' ':
+-				{
+-					mWS(true);
+-					theRetToken=_returnToken;
+-					break;
+-				}
+-				case '/':
+-				{
+-					mCOMMENT(true);
+-					theRetToken=_returnToken;
+-					break;
+-				}
+-				case '{':
+-				{
+-					mACTION(true);
+-					theRetToken=_returnToken;
+-					break;
+-				}
+-				case '"':
+-				{
+-					mSTRING_LITERAL(true);
+-					theRetToken=_returnToken;
+-					break;
+-				}
+-				case '\'':
+-				{
+-					mCHAR_LITERAL(true);
+-					theRetToken=_returnToken;
+-					break;
+-				}
+-				case '!':
+-				{
+-					mBANG(true);
+-					theRetToken=_returnToken;
+-					break;
+-				}
+-				case ';':
+-				{
+-					mSEMI(true);
+-					theRetToken=_returnToken;
+-					break;
+-				}
+-				case ',':
+-				{
+-					mCOMMA(true);
+-					theRetToken=_returnToken;
+-					break;
+-				}
+-				case '}':
+-				{
+-					mRCURLY(true);
+-					theRetToken=_returnToken;
+-					break;
+-				}
+-				case ')':
+-				{
+-					mRPAREN(true);
+-					theRetToken=_returnToken;
+-					break;
+-				}
+-				case 'A':  case 'B':  case 'C':  case 'D':
+-				case 'E':  case 'F':  case 'G':  case 'H':
+-				case 'I':  case 'J':  case 'K':  case 'L':
+-				case 'M':  case 'N':  case 'O':  case 'P':
+-				case 'Q':  case 'R':  case 'S':  case 'T':
+-				case 'U':  case 'V':  case 'W':  case 'X':
+-				case 'Y':  case 'Z':  case '_':  case 'a':
+-				case 'b':  case 'c':  case 'd':  case 'e':
+-				case 'f':  case 'g':  case 'h':  case 'i':
+-				case 'j':  case 'k':  case 'l':  case 'm':
+-				case 'n':  case 'o':  case 'p':  case 'q':
+-				case 'r':  case 's':  case 't':  case 'u':
+-				case 'v':  case 'w':  case 'x':  case 'y':
+-				case 'z':
+-				{
+-					mID_OR_KEYWORD(true);
+-					theRetToken=_returnToken;
+-					break;
+-				}
+-				case '=':
+-				{
+-					mASSIGN_RHS(true);
+-					theRetToken=_returnToken;
+-					break;
+-				}
+-				case '[':
+-				{
+-					mARG_ACTION(true);
+-					theRetToken=_returnToken;
+-					break;
+-				}
+-				default:
+-					if ((LA(1)=='(') && (_tokenSet_0.member(LA(2)))) {
+-						mSUBRULE_BLOCK(true);
+-						theRetToken=_returnToken;
+-					}
+-					else if ((LA(1)=='(') && (true)) {
+-						mLPAREN(true);
+-						theRetToken=_returnToken;
+-					}
+-				else {
+-					if (LA(1)==EOF_CHAR) {uponEOF(); _returnToken = makeToken(Token.EOF_TYPE);}
+-				else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());}
+-				}
+-				}
+-				if ( _returnToken==null ) continue tryAgain; // found SKIP token
+-				_ttype = _returnToken.getType();
+-				_ttype = testLiteralsTable(_ttype);
+-				_returnToken.setType(_ttype);
+-				return _returnToken;
+-			}
+-			catch (RecognitionException e) {
+-				throw new TokenStreamRecognitionException(e);
+-			}
+-		}
+-		catch (CharStreamException cse) {
+-			if ( cse instanceof CharStreamIOException ) {
+-				throw new TokenStreamIOException(((CharStreamIOException)cse).io);
+-			}
+-			else {
+-				throw new TokenStreamException(cse.getMessage());
+-			}
+-		}
+-	}
+-}
+-
+-	public final void mRULE_BLOCK(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = RULE_BLOCK;
+-		int _saveIndex;
+-		
+-		match(':');
+-		{
+-		if ((_tokenSet_1.member(LA(1))) && (_tokenSet_2.member(LA(2)))) {
+-			_saveIndex=text.length();
+-			mWS(false);
+-			text.setLength(_saveIndex);
+-		}
+-		else if ((_tokenSet_2.member(LA(1))) && (true)) {
+-		}
+-		else {
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		
+-		}
+-		mALT(false);
+-		{
+-		switch ( LA(1)) {
+-		case '\t':  case '\n':  case '\r':  case ' ':
+-		{
+-			_saveIndex=text.length();
+-			mWS(false);
+-			text.setLength(_saveIndex);
+-			break;
+-		}
+-		case ';':  case '|':
+-		{
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		}
+-		}
+-		{
+-		_loop306:
+-		do {
+-			if ((LA(1)=='|')) {
+-				match('|');
+-				{
+-				if ((_tokenSet_1.member(LA(1))) && (_tokenSet_2.member(LA(2)))) {
+-					_saveIndex=text.length();
+-					mWS(false);
+-					text.setLength(_saveIndex);
+-				}
+-				else if ((_tokenSet_2.member(LA(1))) && (true)) {
+-				}
+-				else {
+-					throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-				}
+-				
+-				}
+-				mALT(false);
+-				{
+-				switch ( LA(1)) {
+-				case '\t':  case '\n':  case '\r':  case ' ':
+-				{
+-					_saveIndex=text.length();
+-					mWS(false);
+-					text.setLength(_saveIndex);
+-					break;
+-				}
+-				case ';':  case '|':
+-				{
+-					break;
+-				}
+-				default:
+-				{
+-					throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-				}
+-				}
+-				}
+-			}
+-			else {
+-				break _loop306;
+-			}
+-			
+-		} while (true);
+-		}
+-		match(';');
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	public final void mWS(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = WS;
+-		int _saveIndex;
+-		
+-		{
+-		int _cnt348=0;
+-		_loop348:
+-		do {
+-			if ((LA(1)==' ') && (true)) {
+-				match(' ');
+-			}
+-			else if ((LA(1)=='\t') && (true)) {
+-				match('\t');
+-			}
+-			else if ((LA(1)=='\n'||LA(1)=='\r') && (true)) {
+-				mNEWLINE(false);
+-			}
+-			else {
+-				if ( _cnt348>=1 ) { break _loop348; } else {throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());}
+-			}
+-			
+-			_cnt348++;
+-		} while (true);
+-		}
+-		_ttype = Token.SKIP;
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mALT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = ALT;
+-		int _saveIndex;
+-		
+-		{
+-		_loop317:
+-		do {
+-			if ((_tokenSet_3.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+-				mELEMENT(false);
+-			}
+-			else {
+-				break _loop317;
+-			}
+-			
+-		} while (true);
+-		}
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	public final void mSUBRULE_BLOCK(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = SUBRULE_BLOCK;
+-		int _saveIndex;
+-		
+-		match('(');
+-		{
+-		if ((_tokenSet_1.member(LA(1))) && (_tokenSet_0.member(LA(2)))) {
+-			mWS(false);
+-		}
+-		else if ((_tokenSet_0.member(LA(1))) && (true)) {
+-		}
+-		else {
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		
+-		}
+-		mALT(false);
+-		{
+-		_loop312:
+-		do {
+-			if ((_tokenSet_4.member(LA(1))) && (_tokenSet_0.member(LA(2)))) {
+-				{
+-				switch ( LA(1)) {
+-				case '\t':  case '\n':  case '\r':  case ' ':
+-				{
+-					mWS(false);
+-					break;
+-				}
+-				case '|':
+-				{
+-					break;
+-				}
+-				default:
+-				{
+-					throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-				}
+-				}
+-				}
+-				match('|');
+-				{
+-				if ((_tokenSet_1.member(LA(1))) && (_tokenSet_0.member(LA(2)))) {
+-					mWS(false);
+-				}
+-				else if ((_tokenSet_0.member(LA(1))) && (true)) {
+-				}
+-				else {
+-					throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-				}
+-				
+-				}
+-				mALT(false);
+-			}
+-			else {
+-				break _loop312;
+-			}
+-			
+-		} while (true);
+-		}
+-		{
+-		switch ( LA(1)) {
+-		case '\t':  case '\n':  case '\r':  case ' ':
+-		{
+-			mWS(false);
+-			break;
+-		}
+-		case ')':
+-		{
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		}
+-		}
+-		match(')');
+-		{
+-		if ((LA(1)=='=') && (LA(2)=='>')) {
+-			match("=>");
+-		}
+-		else if ((LA(1)=='*') && (true)) {
+-			match('*');
+-		}
+-		else if ((LA(1)=='+') && (true)) {
+-			match('+');
+-		}
+-		else if ((LA(1)=='?') && (true)) {
+-			match('?');
+-		}
+-		else {
+-		}
+-		
+-		}
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mELEMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = ELEMENT;
+-		int _saveIndex;
+-		
+-		switch ( LA(1)) {
+-		case '/':
+-		{
+-			mCOMMENT(false);
+-			break;
+-		}
+-		case '{':
+-		{
+-			mACTION(false);
+-			break;
+-		}
+-		case '"':
+-		{
+-			mSTRING_LITERAL(false);
+-			break;
+-		}
+-		case '\'':
+-		{
+-			mCHAR_LITERAL(false);
+-			break;
+-		}
+-		case '(':
+-		{
+-			mSUBRULE_BLOCK(false);
+-			break;
+-		}
+-		case '\n':  case '\r':
+-		{
+-			mNEWLINE(false);
+-			break;
+-		}
+-		default:
+-			if ((_tokenSet_5.member(LA(1)))) {
+-				{
+-				match(_tokenSet_5);
+-				}
+-			}
+-		else {
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		}
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	public final void mCOMMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = COMMENT;
+-		int _saveIndex;
+-		
+-		{
+-		if ((LA(1)=='/') && (LA(2)=='/')) {
+-			mSL_COMMENT(false);
+-		}
+-		else if ((LA(1)=='/') && (LA(2)=='*')) {
+-			mML_COMMENT(false);
+-		}
+-		else {
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		
+-		}
+-		_ttype = Token.SKIP;
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	public final void mACTION(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = ACTION;
+-		int _saveIndex;
+-		
+-		match('{');
+-		{
+-		_loop378:
+-		do {
+-			// nongreedy exit test
+-			if ((LA(1)=='}') && (true)) break _loop378;
+-			if ((LA(1)=='\n'||LA(1)=='\r') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+-				mNEWLINE(false);
+-			}
+-			else if ((LA(1)=='{') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+-				mACTION(false);
+-			}
+-			else if ((LA(1)=='\'') && (_tokenSet_6.member(LA(2)))) {
+-				mCHAR_LITERAL(false);
+-			}
+-			else if ((LA(1)=='/') && (LA(2)=='*'||LA(2)=='/')) {
+-				mCOMMENT(false);
+-			}
+-			else if ((LA(1)=='"') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+-				mSTRING_LITERAL(false);
+-			}
+-			else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+-				matchNot(EOF_CHAR);
+-			}
+-			else {
+-				break _loop378;
+-			}
+-			
+-		} while (true);
+-		}
+-		match('}');
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	public final void mSTRING_LITERAL(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = STRING_LITERAL;
+-		int _saveIndex;
+-		
+-		match('"');
+-		{
+-		_loop363:
+-		do {
+-			if ((LA(1)=='\\')) {
+-				mESC(false);
+-			}
+-			else if ((_tokenSet_7.member(LA(1)))) {
+-				matchNot('"');
+-			}
+-			else {
+-				break _loop363;
+-			}
+-			
+-		} while (true);
+-		}
+-		match('"');
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	public final void mCHAR_LITERAL(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = CHAR_LITERAL;
+-		int _saveIndex;
+-		
+-		match('\'');
+-		{
+-		if ((LA(1)=='\\')) {
+-			mESC(false);
+-		}
+-		else if ((_tokenSet_8.member(LA(1)))) {
+-			matchNot('\'');
+-		}
+-		else {
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		
+-		}
+-		match('\'');
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mNEWLINE(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = NEWLINE;
+-		int _saveIndex;
+-		
+-		{
+-		if ((LA(1)=='\r') && (LA(2)=='\n')) {
+-			match('\r');
+-			match('\n');
+-			newline();
+-		}
+-		else if ((LA(1)=='\r') && (true)) {
+-			match('\r');
+-			newline();
+-		}
+-		else if ((LA(1)=='\n')) {
+-			match('\n');
+-			newline();
+-		}
+-		else {
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		
+-		}
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	public final void mBANG(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = BANG;
+-		int _saveIndex;
+-		
+-		match('!');
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	public final void mSEMI(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = SEMI;
+-		int _saveIndex;
+-		
+-		match(';');
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	public final void mCOMMA(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = COMMA;
+-		int _saveIndex;
+-		
+-		match(',');
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	public final void mRCURLY(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = RCURLY;
+-		int _saveIndex;
+-		
+-		match('}');
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	public final void mLPAREN(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = LPAREN;
+-		int _saveIndex;
+-		
+-		match('(');
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	public final void mRPAREN(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = RPAREN;
+-		int _saveIndex;
+-		
+-		match(')');
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-/** This rule picks off keywords in the lexer that need to be
+- *  handled specially.  For example, "header" is the start
+- *  of the header action (used to distinguish between options
+- *  block and an action).  We do not want "header" to go back
+- *  to the parser as a simple keyword...it must pick off
+- *  the action afterwards.
+- */
+-	public final void mID_OR_KEYWORD(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = ID_OR_KEYWORD;
+-		int _saveIndex;
+-		Token id=null;
+-		
+-		mID(true);
+-		id=_returnToken;
+-		_ttype = id.getType();
+-		{
+-		if (((_tokenSet_9.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')))&&(id.getText().equals("header"))) {
+-			{
+-			if ((_tokenSet_1.member(LA(1))) && (_tokenSet_9.member(LA(2)))) {
+-				mWS(false);
+-			}
+-			else if ((_tokenSet_9.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+-			}
+-			else {
+-				throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-			}
+-			
+-			}
+-			{
+-			switch ( LA(1)) {
+-			case '"':
+-			{
+-				mSTRING_LITERAL(false);
+-				break;
+-			}
+-			case '\t':  case '\n':  case '\r':  case ' ':
+-			case '/':  case '{':
+-			{
+-				break;
+-			}
+-			default:
+-			{
+-				throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-			}
+-			}
+-			}
+-			{
+-			_loop331:
+-			do {
+-				switch ( LA(1)) {
+-				case '\t':  case '\n':  case '\r':  case ' ':
+-				{
+-					mWS(false);
+-					break;
+-				}
+-				case '/':
+-				{
+-					mCOMMENT(false);
+-					break;
+-				}
+-				default:
+-				{
+-					break _loop331;
+-				}
+-				}
+-			} while (true);
+-			}
+-			mACTION(false);
+-			_ttype = HEADER_ACTION;
+-		}
+-		else if (((_tokenSet_10.member(LA(1))) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff')))&&(id.getText().equals("tokens"))) {
+-			{
+-			_loop333:
+-			do {
+-				switch ( LA(1)) {
+-				case '\t':  case '\n':  case '\r':  case ' ':
+-				{
+-					mWS(false);
+-					break;
+-				}
+-				case '/':
+-				{
+-					mCOMMENT(false);
+-					break;
+-				}
+-				default:
+-				{
+-					break _loop333;
+-				}
+-				}
+-			} while (true);
+-			}
+-			mCURLY_BLOCK_SCARF(false);
+-			_ttype = TOKENS_SPEC;
+-		}
+-		else if (((_tokenSet_10.member(LA(1))) && (true))&&(id.getText().equals("options"))) {
+-			{
+-			_loop335:
+-			do {
+-				switch ( LA(1)) {
+-				case '\t':  case '\n':  case '\r':  case ' ':
+-				{
+-					mWS(false);
+-					break;
+-				}
+-				case '/':
+-				{
+-					mCOMMENT(false);
+-					break;
+-				}
+-				default:
+-				{
+-					break _loop335;
+-				}
+-				}
+-			} while (true);
+-			}
+-			match('{');
+-			_ttype = OPTIONS_START;
+-		}
+-		else {
+-		}
+-		
+-		}
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mID(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = ID;
+-		int _saveIndex;
+-		
+-		{
+-		switch ( LA(1)) {
+-		case 'a':  case 'b':  case 'c':  case 'd':
+-		case 'e':  case 'f':  case 'g':  case 'h':
+-		case 'i':  case 'j':  case 'k':  case 'l':
+-		case 'm':  case 'n':  case 'o':  case 'p':
+-		case 'q':  case 'r':  case 's':  case 't':
+-		case 'u':  case 'v':  case 'w':  case 'x':
+-		case 'y':  case 'z':
+-		{
+-			matchRange('a','z');
+-			break;
+-		}
+-		case 'A':  case 'B':  case 'C':  case 'D':
+-		case 'E':  case 'F':  case 'G':  case 'H':
+-		case 'I':  case 'J':  case 'K':  case 'L':
+-		case 'M':  case 'N':  case 'O':  case 'P':
+-		case 'Q':  case 'R':  case 'S':  case 'T':
+-		case 'U':  case 'V':  case 'W':  case 'X':
+-		case 'Y':  case 'Z':
+-		{
+-			matchRange('A','Z');
+-			break;
+-		}
+-		case '_':
+-		{
+-			match('_');
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		}
+-		}
+-		{
+-		_loop342:
+-		do {
+-			switch ( LA(1)) {
+-			case 'a':  case 'b':  case 'c':  case 'd':
+-			case 'e':  case 'f':  case 'g':  case 'h':
+-			case 'i':  case 'j':  case 'k':  case 'l':
+-			case 'm':  case 'n':  case 'o':  case 'p':
+-			case 'q':  case 'r':  case 's':  case 't':
+-			case 'u':  case 'v':  case 'w':  case 'x':
+-			case 'y':  case 'z':
+-			{
+-				matchRange('a','z');
+-				break;
+-			}
+-			case 'A':  case 'B':  case 'C':  case 'D':
+-			case 'E':  case 'F':  case 'G':  case 'H':
+-			case 'I':  case 'J':  case 'K':  case 'L':
+-			case 'M':  case 'N':  case 'O':  case 'P':
+-			case 'Q':  case 'R':  case 'S':  case 'T':
+-			case 'U':  case 'V':  case 'W':  case 'X':
+-			case 'Y':  case 'Z':
+-			{
+-				matchRange('A','Z');
+-				break;
+-			}
+-			case '_':
+-			{
+-				match('_');
+-				break;
+-			}
+-			case '0':  case '1':  case '2':  case '3':
+-			case '4':  case '5':  case '6':  case '7':
+-			case '8':  case '9':
+-			{
+-				matchRange('0','9');
+-				break;
+-			}
+-			default:
+-			{
+-				break _loop342;
+-			}
+-			}
+-		} while (true);
+-		}
+-		_ttype = testLiteralsTable(new String(text.getBuffer(),_begin,text.length()-_begin),_ttype);
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mCURLY_BLOCK_SCARF(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = CURLY_BLOCK_SCARF;
+-		int _saveIndex;
+-		
+-		match('{');
+-		{
+-		_loop338:
+-		do {
+-			// nongreedy exit test
+-			if ((LA(1)=='}') && (true)) break _loop338;
+-			if ((LA(1)=='\n'||LA(1)=='\r') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+-				mNEWLINE(false);
+-			}
+-			else if ((LA(1)=='"') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+-				mSTRING_LITERAL(false);
+-			}
+-			else if ((LA(1)=='\'') && (_tokenSet_6.member(LA(2)))) {
+-				mCHAR_LITERAL(false);
+-			}
+-			else if ((LA(1)=='/') && (LA(2)=='*'||LA(2)=='/')) {
+-				mCOMMENT(false);
+-			}
+-			else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+-				matchNot(EOF_CHAR);
+-			}
+-			else {
+-				break _loop338;
+-			}
+-			
+-		} while (true);
+-		}
+-		match('}');
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	public final void mASSIGN_RHS(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = ASSIGN_RHS;
+-		int _saveIndex;
+-		
+-		_saveIndex=text.length();
+-		match('=');
+-		text.setLength(_saveIndex);
+-		{
+-		_loop345:
+-		do {
+-			// nongreedy exit test
+-			if ((LA(1)==';') && (true)) break _loop345;
+-			if ((LA(1)=='"') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+-				mSTRING_LITERAL(false);
+-			}
+-			else if ((LA(1)=='\'') && (_tokenSet_6.member(LA(2)))) {
+-				mCHAR_LITERAL(false);
+-			}
+-			else if ((LA(1)=='\n'||LA(1)=='\r') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+-				mNEWLINE(false);
+-			}
+-			else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+-				matchNot(EOF_CHAR);
+-			}
+-			else {
+-				break _loop345;
+-			}
+-			
+-		} while (true);
+-		}
+-		match(';');
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mSL_COMMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = SL_COMMENT;
+-		int _saveIndex;
+-		
+-		match("//");
+-		{
+-		_loop355:
+-		do {
+-			// nongreedy exit test
+-			if ((LA(1)=='\n'||LA(1)=='\r') && (true)) break _loop355;
+-			if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+-				matchNot(EOF_CHAR);
+-			}
+-			else {
+-				break _loop355;
+-			}
+-			
+-		} while (true);
+-		}
+-		mNEWLINE(false);
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mML_COMMENT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = ML_COMMENT;
+-		int _saveIndex;
+-		
+-		match("/*");
+-		{
+-		_loop358:
+-		do {
+-			// nongreedy exit test
+-			if ((LA(1)=='*') && (LA(2)=='/')) break _loop358;
+-			if ((LA(1)=='\n'||LA(1)=='\r') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+-				mNEWLINE(false);
+-			}
+-			else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+-				matchNot(EOF_CHAR);
+-			}
+-			else {
+-				break _loop358;
+-			}
+-			
+-		} while (true);
+-		}
+-		match("*/");
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mESC(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = ESC;
+-		int _saveIndex;
+-		
+-		match('\\');
+-		{
+-		switch ( LA(1)) {
+-		case 'n':
+-		{
+-			match('n');
+-			break;
+-		}
+-		case 'r':
+-		{
+-			match('r');
+-			break;
+-		}
+-		case 't':
+-		{
+-			match('t');
+-			break;
+-		}
+-		case 'b':
+-		{
+-			match('b');
+-			break;
+-		}
+-		case 'f':
+-		{
+-			match('f');
+-			break;
+-		}
+-		case 'w':
+-		{
+-			match('w');
+-			break;
+-		}
+-		case 'a':
+-		{
+-			match('a');
+-			break;
+-		}
+-		case '"':
+-		{
+-			match('"');
+-			break;
+-		}
+-		case '\'':
+-		{
+-			match('\'');
+-			break;
+-		}
+-		case '\\':
+-		{
+-			match('\\');
+-			break;
+-		}
+-		case '0':  case '1':  case '2':  case '3':
+-		{
+-			{
+-			matchRange('0','3');
+-			}
+-			{
+-			if (((LA(1) >= '0' && LA(1) <= '9')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+-				mDIGIT(false);
+-				{
+-				if (((LA(1) >= '0' && LA(1) <= '9')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+-					mDIGIT(false);
+-				}
+-				else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && (true)) {
+-				}
+-				else {
+-					throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-				}
+-				
+-				}
+-			}
+-			else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && (true)) {
+-			}
+-			else {
+-				throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-			}
+-			
+-			}
+-			break;
+-		}
+-		case '4':  case '5':  case '6':  case '7':
+-		{
+-			{
+-			matchRange('4','7');
+-			}
+-			{
+-			if (((LA(1) >= '0' && LA(1) <= '9')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+-				mDIGIT(false);
+-			}
+-			else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && (true)) {
+-			}
+-			else {
+-				throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-			}
+-			
+-			}
+-			break;
+-		}
+-		case 'u':
+-		{
+-			match('u');
+-			mXDIGIT(false);
+-			mXDIGIT(false);
+-			mXDIGIT(false);
+-			mXDIGIT(false);
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		}
+-		}
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mDIGIT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = DIGIT;
+-		int _saveIndex;
+-		
+-		matchRange('0','9');
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	protected final void mXDIGIT(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = XDIGIT;
+-		int _saveIndex;
+-		
+-		switch ( LA(1)) {
+-		case '0':  case '1':  case '2':  case '3':
+-		case '4':  case '5':  case '6':  case '7':
+-		case '8':  case '9':
+-		{
+-			matchRange('0','9');
+-			break;
+-		}
+-		case 'a':  case 'b':  case 'c':  case 'd':
+-		case 'e':  case 'f':
+-		{
+-			matchRange('a','f');
+-			break;
+-		}
+-		case 'A':  case 'B':  case 'C':  case 'D':
+-		case 'E':  case 'F':
+-		{
+-			matchRange('A','F');
+-			break;
+-		}
+-		default:
+-		{
+-			throw new NoViableAltForCharException((char)LA(1), getFilename(), getLine(), getColumn());
+-		}
+-		}
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	public final void mARG_ACTION(boolean _createToken) throws RecognitionException, CharStreamException, TokenStreamException {
+-		int _ttype; Token _token=null; int _begin=text.length();
+-		_ttype = ARG_ACTION;
+-		int _saveIndex;
+-		
+-		match('[');
+-		{
+-		_loop375:
+-		do {
+-			// nongreedy exit test
+-			if ((LA(1)==']') && (true)) break _loop375;
+-			if ((LA(1)=='[') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+-				mARG_ACTION(false);
+-			}
+-			else if ((LA(1)=='\n'||LA(1)=='\r') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+-				mNEWLINE(false);
+-			}
+-			else if ((LA(1)=='\'') && (_tokenSet_6.member(LA(2)))) {
+-				mCHAR_LITERAL(false);
+-			}
+-			else if ((LA(1)=='"') && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+-				mSTRING_LITERAL(false);
+-			}
+-			else if (((LA(1) >= '\u0003' && LA(1) <= '\u00ff')) && ((LA(2) >= '\u0003' && LA(2) <= '\u00ff'))) {
+-				matchNot(EOF_CHAR);
+-			}
+-			else {
+-				break _loop375;
+-			}
+-			
+-		} while (true);
+-		}
+-		match(']');
+-		if ( _createToken && _token==null && _ttype!=Token.SKIP ) {
+-			_token = makeToken(_ttype);
+-			_token.setText(new String(text.getBuffer(), _begin, text.length()-_begin));
+-		}
+-		_returnToken = _token;
+-	}
+-	
+-	
+-	private static final long[] mk_tokenSet_0() {
+-		long[] data = new long[8];
+-		data[0]=-576460752303423496L;
+-		for (int i = 1; i<=3; i++) { data[i]=-1L; }
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_0 = new BitSet(mk_tokenSet_0());
+-	private static final long[] mk_tokenSet_1() {
+-		long[] data = { 4294977024L, 0L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_1 = new BitSet(mk_tokenSet_1());
+-	private static final long[] mk_tokenSet_2() {
+-		long[] data = new long[8];
+-		data[0]=-2199023255560L;
+-		for (int i = 1; i<=3; i++) { data[i]=-1L; }
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_2 = new BitSet(mk_tokenSet_2());
+-	private static final long[] mk_tokenSet_3() {
+-		long[] data = new long[8];
+-		data[0]=-576462951326679048L;
+-		for (int i = 1; i<=3; i++) { data[i]=-1L; }
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_3 = new BitSet(mk_tokenSet_3());
+-	private static final long[] mk_tokenSet_4() {
+-		long[] data = { 4294977024L, 1152921504606846976L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_4 = new BitSet(mk_tokenSet_4());
+-	private static final long[] mk_tokenSet_5() {
+-		long[] data = new long[8];
+-		data[0]=-576605355262354440L;
+-		data[1]=-576460752303423489L;
+-		for (int i = 2; i<=3; i++) { data[i]=-1L; }
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_5 = new BitSet(mk_tokenSet_5());
+-	private static final long[] mk_tokenSet_6() {
+-		long[] data = new long[8];
+-		data[0]=-549755813896L;
+-		for (int i = 1; i<=3; i++) { data[i]=-1L; }
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_6 = new BitSet(mk_tokenSet_6());
+-	private static final long[] mk_tokenSet_7() {
+-		long[] data = new long[8];
+-		data[0]=-17179869192L;
+-		data[1]=-268435457L;
+-		for (int i = 2; i<=3; i++) { data[i]=-1L; }
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_7 = new BitSet(mk_tokenSet_7());
+-	private static final long[] mk_tokenSet_8() {
+-		long[] data = new long[8];
+-		data[0]=-549755813896L;
+-		data[1]=-268435457L;
+-		for (int i = 2; i<=3; i++) { data[i]=-1L; }
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_8 = new BitSet(mk_tokenSet_8());
+-	private static final long[] mk_tokenSet_9() {
+-		long[] data = { 140758963201536L, 576460752303423488L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_9 = new BitSet(mk_tokenSet_9());
+-	private static final long[] mk_tokenSet_10() {
+-		long[] data = { 140741783332352L, 576460752303423488L, 0L, 0L, 0L};
+-		return data;
+-	}
+-	public static final BitSet _tokenSet_10 = new BitSet(mk_tokenSet_10());
+-	
+-	}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/preprocessor/PreprocessorTokenTypes.java glassfish-gil/entity-persistence/src/java/persistence/antlr/preprocessor/PreprocessorTokenTypes.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/preprocessor/PreprocessorTokenTypes.java	2006-02-08 22:31:36.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/preprocessor/PreprocessorTokenTypes.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,47 +0,0 @@
+-// $ANTLR : "preproc.g" -> "Preprocessor.java"$
+-
+-package persistence.antlr.preprocessor;
+-
+-public interface PreprocessorTokenTypes {
+-	int EOF = 1;
+-	int NULL_TREE_LOOKAHEAD = 3;
+-	int LITERAL_tokens = 4;
+-	int HEADER_ACTION = 5;
+-	int SUBRULE_BLOCK = 6;
+-	int ACTION = 7;
+-	int LITERAL_class = 8;
+-	int ID = 9;
+-	int LITERAL_extends = 10;
+-	int SEMI = 11;
+-	int TOKENS_SPEC = 12;
+-	int OPTIONS_START = 13;
+-	int ASSIGN_RHS = 14;
+-	int RCURLY = 15;
+-	int LITERAL_protected = 16;
+-	int LITERAL_private = 17;
+-	int LITERAL_public = 18;
+-	int BANG = 19;
+-	int ARG_ACTION = 20;
+-	int LITERAL_returns = 21;
+-	int RULE_BLOCK = 22;
+-	int LITERAL_throws = 23;
+-	int COMMA = 24;
+-	int LITERAL_exception = 25;
+-	int LITERAL_catch = 26;
+-	int ALT = 27;
+-	int ELEMENT = 28;
+-	int LPAREN = 29;
+-	int RPAREN = 30;
+-	int ID_OR_KEYWORD = 31;
+-	int CURLY_BLOCK_SCARF = 32;
+-	int WS = 33;
+-	int NEWLINE = 34;
+-	int COMMENT = 35;
+-	int SL_COMMENT = 36;
+-	int ML_COMMENT = 37;
+-	int CHAR_LITERAL = 38;
+-	int STRING_LITERAL = 39;
+-	int ESC = 40;
+-	int DIGIT = 41;
+-	int XDIGIT = 42;
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/preprocessor/Rule.java glassfish-gil/entity-persistence/src/java/persistence/antlr/preprocessor/Rule.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/preprocessor/Rule.java	2006-08-31 00:34:16.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/preprocessor/Rule.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,155 +0,0 @@
+-package persistence.antlr.preprocessor;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import persistence.antlr.collections.impl.IndexedVector;
+-
+-import java.util.Hashtable;
+-import java.util.Enumeration;
+-
+-class Rule {
+-    protected String name;
+-    protected String block;
+-    protected String args;
+-    protected String returnValue;
+-    protected String throwsSpec;
+-    protected String initAction;
+-    protected IndexedVector options;
+-    protected String visibility;
+-    protected Grammar enclosingGrammar;
+-    protected boolean bang = false;
+-
+-    public Rule(String n, String b, IndexedVector options, Grammar gr) {
+-        name = n;
+-        block = b;
+-        this.options = options;
+-        setEnclosingGrammar(gr);
+-    }
+-
+-    public String getArgs() {
+-        return args;
+-    }
+-
+-    public boolean getBang() {
+-        return bang;
+-    }
+-
+-    public String getName() {
+-        return name;
+-    }
+-
+-    public String getReturnValue() {
+-        return returnValue;
+-    }
+-
+-    public String getVisibility() {
+-        return visibility;
+-    }
+-
+-    /** If 'rule' narrows the visible of 'this', return true;
+-     *  For example, 'this' is public and 'rule' is private,
+-     *  true is returned.  You cannot narrow the vis. of
+-     *  a rule.
+-     */
+-    public boolean narrowerVisibility(Rule rule) {
+-        if (visibility.equals("public")) {
+-            if (!rule.equals("public")) {
+-                return true;	// everything narrower than public
+-            }
+-            return false;
+-        }
+-        else if (visibility.equals("protected")) {
+-            if (rule.equals("private")) {
+-                return true;	// private narrower than protected
+-            }
+-            return false;
+-        }
+-        else if (visibility.equals("private")) {
+-            return false;	// nothing is narrower than private
+-        }
+-        return false;
+-    }
+-
+-    /** Two rules have the same signature if they have:
+-     *  	same name
+-     *		same return value
+-     *		same args
+-     *	I do a simple string compare now, but later
+-     *	the type could be pulled out so it is insensitive
+-     *	to names of args etc...
+-     */
+-    public boolean sameSignature(Rule rule) {
+-        boolean nSame = true;
+-        boolean aSame = true;
+-        boolean rSame = true;
+-
+-        nSame = name.equals(rule.getName());
+-        if (args != null) {
+-            aSame = args.equals(rule.getArgs());
+-        }
+-        if (returnValue != null) {
+-            rSame = returnValue.equals(rule.getReturnValue());
+-        }
+-        return nSame && aSame && rSame;
+-    }
+-
+-    public void setArgs(String a) {
+-        args = a;
+-    }
+-
+-    public void setBang() {
+-        bang = true;
+-    }
+-
+-    public void setEnclosingGrammar(Grammar g) {
+-        enclosingGrammar = g;
+-    }
+-
+-    public void setInitAction(String a) {
+-        initAction = a;
+-    }
+-
+-    public void setOptions(IndexedVector options) {
+-        this.options = options;
+-    }
+-
+-    public void setReturnValue(String ret) {
+-        returnValue = ret;
+-    }
+-
+-    public void setThrowsSpec(String t) {
+-        throwsSpec = t;
+-    }
+-
+-    public void setVisibility(String v) {
+-        visibility = v;
+-    }
+-
+-    public String toString() {
+-        String s = "";
+-        String retString = returnValue == null ? "" : "returns " + returnValue;
+-        String argString = args == null ? "" : args;
+-        String bang = getBang() ? "!" : "";
+-
+-        s += visibility == null ? "" : visibility + " ";
+-        s += name + bang + argString + " " + retString + throwsSpec;
+-        if (options != null) {
+-            s += System.getProperty("line.separator") +
+-                "options {" +
+-                System.getProperty("line.separator");
+-            for (Enumeration e = options.elements(); e.hasMoreElements();) {
+-                s += (Option)e.nextElement() + System.getProperty("line.separator");
+-            }
+-            s += "}" + System.getProperty("line.separator");
+-        }
+-        if (initAction != null) {
+-            s += initAction + System.getProperty("line.separator");
+-        }
+-        s += block;
+-        return s;
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/preprocessor/Tool.java glassfish-gil/entity-persistence/src/java/persistence/antlr/preprocessor/Tool.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/preprocessor/Tool.java	2006-08-31 00:34:16.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/preprocessor/Tool.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,144 +0,0 @@
+-package persistence.antlr.preprocessor;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import java.io.*;
+-import persistence.antlr.collections.impl.Vector;
+-import java.util.Enumeration;
+-
+-/** Tester for the preprocessor */
+-public class Tool {
+-    protected Hierarchy theHierarchy;
+-    protected String grammarFileName;
+-    protected String[] args;
+-    protected int nargs;		// how many args in new args list
+-    protected Vector grammars;
+-    protected persistence.antlr.Tool antlrTool;
+-
+-    public Tool(persistence.antlr.Tool t, String[] args) {
+-        antlrTool = t;
+-        processArguments(args);
+-    }
+-
+-    public static void main(String[] args) {
+-        persistence.antlr.Tool antlrTool = new persistence.antlr.Tool();
+-        Tool theTool = new Tool(antlrTool, args);
+-        theTool.preprocess();
+-        String[] a = theTool.preprocessedArgList();
+-        for (int i = 0; i < a.length; i++) {
+-            System.out.print(" " + a[i]);
+-        }
+-        System.out.println();
+-    }
+-
+-    public boolean preprocess() {
+-        if (grammarFileName == null) {
+-            antlrTool.toolError("no grammar file specified");
+-            return false;
+-        }
+-        if (grammars != null) {
+-            theHierarchy = new Hierarchy(antlrTool);
+-            for (Enumeration e = grammars.elements(); e.hasMoreElements();) {
+-                String f = (String)e.nextElement();
+-                try {
+-                    theHierarchy.readGrammarFile(f);
+-                }
+-                catch (FileNotFoundException fe) {
+-                    antlrTool.toolError("file " + f + " not found");
+-                    return false;
+-                }
+-            }
+-        }
+-
+-        // do the actual inheritance stuff
+-        boolean complete = theHierarchy.verifyThatHierarchyIsComplete();
+-        if (!complete)
+-            return false;
+-        theHierarchy.expandGrammarsInFile(grammarFileName);
+-        GrammarFile gf = theHierarchy.getFile(grammarFileName);
+-        String expandedFileName = gf.nameForExpandedGrammarFile(grammarFileName);
+-
+-        // generate the output file if necessary
+-        if (expandedFileName.equals(grammarFileName)) {
+-            args[nargs++] = grammarFileName;			// add to argument list
+-        }
+-        else {
+-            try {
+-                gf.generateExpandedFile(); 				// generate file to feed ANTLR
+-                args[nargs++] = antlrTool.getOutputDirectory() +
+-                    System.getProperty("file.separator") +
+-                    expandedFileName;		// add to argument list
+-            }
+-            catch (IOException io) {
+-                antlrTool.toolError("cannot write expanded grammar file " + expandedFileName);
+-                return false;
+-            }
+-        }
+-        return true;
+-    }
+-
+-    /** create new arg list with correct length to pass to ANTLR */
+-    public String[] preprocessedArgList() {
+-        String[] a = new String[nargs];
+-        System.arraycopy(args, 0, a, 0, nargs);
+-        args = a;
+-        return args;
+-    }
+-
+-    /** Process -glib options and grammar file.  Create a new args list
+-     *  that does not contain the -glib option.  The grammar file name
+-     *  might be modified and, hence, is not added yet to args list.
+-     */
+-    private void processArguments(String[] incomingArgs) {
+-		 this.nargs = 0;
+-		 this.args = new String[incomingArgs.length];
+-		 for (int i = 0; i < incomingArgs.length; i++) {
+-			 if ( incomingArgs[i].length() == 0 )
+-			 {
+-				 antlrTool.warning("Zero length argument ignoring...");
+-				 continue;
+-			 }
+-			 if (incomingArgs[i].equals("-glib")) {
+-				 // if on a pc and they use a '/', warn them
+-				 if (File.separator.equals("\\") &&
+-					  incomingArgs[i].indexOf('/') != -1) {
+-					 antlrTool.warning("-glib cannot deal with '/' on a PC: use '\\'; ignoring...");
+-				 }
+-				 else {
+-					 grammars = antlrTool.parseSeparatedList(incomingArgs[i + 1], ';');
+-					 i++;
+-				 }
+-			 }
+-			 else if (incomingArgs[i].equals("-o")) {
+-				 args[this.nargs++] = incomingArgs[i];
+-				 if (i + 1 >= incomingArgs.length) {
+-					 antlrTool.error("missing output directory with -o option; ignoring");
+-				 }
+-				 else {
+-					 i++;
+-					 args[this.nargs++] = incomingArgs[i];
+-					 antlrTool.setOutputDirectory(incomingArgs[i]);
+-				 }
+-			 }
+-			 else if (incomingArgs[i].charAt(0) == '-') {
+-				 args[this.nargs++] = incomingArgs[i];
+-			 }
+-			 else {
+-				 // Must be the grammar file
+-				 grammarFileName = incomingArgs[i];
+-				 if (grammars == null) {
+-					 grammars = new Vector(10);
+-				 }
+-				 grammars.appendElement(grammarFileName);	// process it too
+-				 if ((i + 1) < incomingArgs.length) {
+-					 antlrTool.warning("grammar file must be last; ignoring other arguments...");
+-					 break;
+-				 }
+-			 }
+-		 }
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/PreservingFileWriter.java glassfish-gil/entity-persistence/src/java/persistence/antlr/PreservingFileWriter.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/PreservingFileWriter.java	2006-08-31 00:34:09.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/PreservingFileWriter.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,137 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- * @author Ric Klaren <klaren at cs.utwente.nl>
+- */
+-
+-import java.io.*;
+-
+-/** PreservingFileWriter only overwrites target if the new file is different.
+- Mainly added in order to prevent big and unnecessary recompiles in C++
+- projects.
+- I/O is buffered.
+-*/
+-public class PreservingFileWriter extends FileWriter {
+-	protected File target_file;	/// the file we intend to write to
+-	protected File tmp_file;		/// the tmp file we create at first
+-
+-	public PreservingFileWriter(String file) throws IOException
+-	{
+-		super(file+".antlr.tmp");
+-
+-		// set up File thingy for target..
+-		target_file = new File(file);
+-
+-		String parentdirname = target_file.getParent();
+-		if( parentdirname != null )
+-	    {
+-			File parentdir = new File(parentdirname);
+-
+-			if (!parentdir.exists())
+-				throw new IOException("destination directory of '"+file+"' doesn't exist");
+-			if (!parentdir.canWrite())
+-				throw new IOException("destination directory of '"+file+"' isn't writeable");
+-		}
+-		if( target_file.exists() && ! target_file.canWrite() )
+-			throw new IOException("cannot write to '"+file+"'");
+-
+-		// and for the temp file
+-		tmp_file = new File(file+".antlr.tmp");
+-		// have it nuked at exit
+-		// RK: this is broken on java 1.4 and
+-		// is not compatible with java 1.1 (which is a big problem I'm told :) )
+-		// sigh. Any real language would do this in a destructor ;) ;)
+-		// tmp_file.deleteOnExit();
+-	}
+-
+-	/** Close the file and see if the actual target is different
+-	 * if so the target file is overwritten by the copy. If not we do nothing
+-	 */
+-	public void close() throws IOException
+-	{
+-		Reader source = null;
+-		Writer target = null;
+-
+-		try {
+-			// close the tmp file so we can access it safely...
+-			super.close();
+-
+-			char[] buffer = new char[1024];
+-			int cnt;
+-
+-			// target_file != tmp_file so we have to compare and move it..
+-			if( target_file.length() == tmp_file.length() )
+-			{
+-				// Do expensive read'n'compare
+-				Reader tmp;
+-				char[] buf2 = new char[1024];
+-
+-				source = new BufferedReader(new FileReader(tmp_file));
+-				tmp = new BufferedReader(new FileReader(target_file));
+-				int cnt1, cnt2;
+-				boolean equal = true;
+-
+-				while( equal )
+-				{
+-					cnt1 = source.read(buffer,0,1024);
+-					cnt2 = tmp.read(buf2,0,1024);
+-					if( cnt1 != cnt2 )
+-					{
+-						equal = false;
+-						break;
+-					}
+-					if( cnt1 == -1 )		// EOF
+-						break;
+-					for( int i = 0; i < cnt1; i++ )
+-					{
+-						if( buffer[i] != buf2[i] )
+-						{
+-							equal = false;
+-							break;
+-						}
+-					}
+-				}
+-				// clean up...
+-				source.close();
+-				tmp.close();
+-
+-				source = tmp = null;
+-
+-				if( equal )
+-					return;
+-			}
+-
+-			source = new BufferedReader(new FileReader(tmp_file));
+-			target = new BufferedWriter(new FileWriter(target_file));
+-
+-			while(true)
+-			{
+-				cnt = source.read(buffer,0,1024);
+-				if( cnt == -1 )
+-					break;
+-				target.write(buffer, 0, cnt );
+-			}
+-		}
+-		finally {
+-			if( source != null )
+-			{
+-				try { source.close(); }
+-				catch( IOException e ) { ; }
+-			}
+-			if( target != null )
+-			{
+-				try { target.close(); }
+-				catch( IOException e ) { ;	}
+-			}
+-			// RK: Now if I'm correct this should be called anytime.
+-			if( tmp_file != null && tmp_file.exists() )
+-			{
+-				tmp_file.delete();
+-				tmp_file = null;
+-			}
+-		}
+-	}
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/RecognitionException.java glassfish-gil/entity-persistence/src/java/persistence/antlr/RecognitionException.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/RecognitionException.java	2006-08-31 00:34:09.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/RecognitionException.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,69 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-public class RecognitionException extends ANTLRException {
+-    public String fileName;		// not used by treeparsers
+-    public int line;
+-    public int column;
+-
+-    public RecognitionException() {
+-        super("parsing error");
+-        fileName = null;
+-        line = -1;
+-        column = -1;
+-    }
+-
+-    /**
+-     * RecognitionException constructor comment.
+-     * @param s java.lang.String
+-     */
+-    public RecognitionException(String s) {
+-        super(s);
+-        fileName = null;
+-        line = -1;
+-        column = -1;
+-    }
+-
+-    /** @deprecated As of ANTLR 2.7.2 use {@see #RecognitionException(char, String, int, int) } */
+-    public RecognitionException(String s, String fileName_, int line_) {
+-        this(s, fileName_, line_, -1);
+-    }
+-    
+-    /**
+-     * RecognitionException constructor comment.
+-     * @param s java.lang.String
+-     */
+-    public RecognitionException(String s, String fileName_, int line_, int column_) {
+-        super(s);
+-        fileName = fileName_;
+-        line = line_;
+-        column = column_;
+-    }
+-
+-    public String getFilename() {
+-        return fileName;
+-    }
+-
+-    public int getLine() {
+-        return line;
+-    }
+-
+-    public int getColumn() {
+-        return column;
+-    }
+-
+-    /** @deprecated As of ANTLR 2.7.0 */
+-    public String getErrorMessage() {
+-        return getMessage();
+-    }
+-
+-    public String toString() {
+-        return FileLineFormatter.getFormatter().
+-            getFormatString(fileName, line, column) + getMessage();
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/RuleBlock.java glassfish-gil/entity-persistence/src/java/persistence/antlr/RuleBlock.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/RuleBlock.java	2006-08-31 00:34:09.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/RuleBlock.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,210 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import persistence.antlr.collections.impl.Vector;
+-
+-import java.util.Hashtable;
+-
+-/**A list of alternatives and info contained in
+- * the rule definition.
+- */
+-public class RuleBlock extends AlternativeBlock {
+-    protected String ruleName;
+-    protected String argAction = null;	// string for rule arguments [...]
+-    protected String throwsSpec = null;
+-    protected String returnAction = null;// string for rule return type(s) <...>
+-    protected RuleEndElement endNode;	// which node ends this rule?
+-
+-    // Generate literal-testing code for lexer rule?
+-    protected boolean testLiterals = false;
+-
+-    Vector labeledElements;	// List of labeled elements found in this rule
+-    // This is a list of AlternativeElement (or subclass)
+-
+-    protected boolean[] lock;	// for analysis; used to avoid infinite loops
+-    // 1..k
+-    protected Lookahead cache[];// Each rule can cache it's lookahead computation.
+-
+-    // This cache contains an epsilon
+-    // imaginary token if the FOLLOW is required.  No
+-    // FOLLOW information is cached here.
+-    // The FIRST(rule) is stored in this cache; 1..k
+-    // This set includes FIRST of all alts.
+-
+-    Hashtable exceptionSpecs;		// table of String-to-ExceptionSpec.
+-
+-    // grammar-settable options
+-    protected boolean defaultErrorHandler = true;
+-    protected String ignoreRule = null;
+-
+-    /** Construct a named rule. */
+-    public RuleBlock(Grammar g, String r) {
+-        super(g);
+-        ruleName = r;
+-        labeledElements = new Vector();
+-        cache = new Lookahead[g.maxk + 1];
+-        exceptionSpecs = new Hashtable();
+-        setAutoGen(g instanceof ParserGrammar);
+-    }
+-
+-    /** Construct a named rule with line number information */
+-    public RuleBlock(Grammar g, String r, int line, boolean doAutoGen_) {
+-        this(g, r);
+-        this.line = line;
+-        setAutoGen(doAutoGen_);
+-    }
+-
+-    public void addExceptionSpec(ExceptionSpec ex) {
+-        if (findExceptionSpec(ex.label) != null) {
+-            if (ex.label != null) {
+-                grammar.antlrTool.error("Rule '" + ruleName + "' already has an exception handler for label: " + ex.label);
+-            }
+-            else {
+-                grammar.antlrTool.error("Rule '" + ruleName + "' already has an exception handler");
+-            }
+-        }
+-        else {
+-            exceptionSpecs.put((ex.label == null ? "" : ex.label.getText()), ex);
+-        }
+-    }
+-
+-    public ExceptionSpec findExceptionSpec(Token label) {
+-        return (ExceptionSpec)exceptionSpecs.get(label == null ? "" : label.getText());
+-    }
+-
+-    public ExceptionSpec findExceptionSpec(String label) {
+-        return (ExceptionSpec)exceptionSpecs.get(label == null ? "" : label);
+-    }
+-
+-    public void generate() {
+-        grammar.generator.gen(this);
+-    }
+-
+-    public boolean getDefaultErrorHandler() {
+-        return defaultErrorHandler;
+-    }
+-
+-    public RuleEndElement getEndElement() {
+-        return endNode;
+-    }
+-
+-    public String getIgnoreRule() {
+-        return ignoreRule;
+-    }
+-
+-    public String getRuleName() {
+-        return ruleName;
+-    }
+-
+-    public boolean getTestLiterals() {
+-        return testLiterals;
+-    }
+-
+-    public boolean isLexerAutoGenRule() {
+-        return ruleName.equals("nextToken");
+-    }
+-
+-    public Lookahead look(int k) {
+-        return grammar.theLLkAnalyzer.look(k, this);
+-    }
+-
+-    public void prepareForAnalysis() {
+-        super.prepareForAnalysis();
+-        lock = new boolean[grammar.maxk + 1];
+-    }
+-
+-    // rule option values
+-    public void setDefaultErrorHandler(boolean value) {
+-        defaultErrorHandler = value;
+-    }
+-
+-    public void setEndElement(RuleEndElement re) {
+-        endNode = re;
+-    }
+-
+-    public void setOption(Token key, Token value) {
+-        if (key.getText().equals("defaultErrorHandler")) {
+-            if (value.getText().equals("true")) {
+-                defaultErrorHandler = true;
+-            }
+-            else if (value.getText().equals("false")) {
+-                defaultErrorHandler = false;
+-            }
+-            else {
+-                grammar.antlrTool.error("Value for defaultErrorHandler must be true or false", grammar.getFilename(), key.getLine(), key.getColumn());
+-            }
+-        }
+-        else if (key.getText().equals("testLiterals")) {
+-            if (!(grammar instanceof LexerGrammar)) {
+-                grammar.antlrTool.error("testLiterals option only valid for lexer rules", grammar.getFilename(), key.getLine(), key.getColumn());
+-            }
+-            else {
+-                if (value.getText().equals("true")) {
+-                    testLiterals = true;
+-                }
+-                else if (value.getText().equals("false")) {
+-                    testLiterals = false;
+-                }
+-                else {
+-                    grammar.antlrTool.error("Value for testLiterals must be true or false", grammar.getFilename(), key.getLine(), key.getColumn());
+-                }
+-            }
+-        }
+-        else if (key.getText().equals("ignore")) {
+-            if (!(grammar instanceof LexerGrammar)) {
+-                grammar.antlrTool.error("ignore option only valid for lexer rules", grammar.getFilename(), key.getLine(), key.getColumn());
+-            }
+-            else {
+-                ignoreRule = value.getText();
+-            }
+-        }
+-        else if (key.getText().equals("paraphrase")) {
+-            if (!(grammar instanceof LexerGrammar)) {
+-                grammar.antlrTool.error("paraphrase option only valid for lexer rules", grammar.getFilename(), key.getLine(), key.getColumn());
+-            }
+-            else {
+-                // find token def associated with this rule
+-                TokenSymbol ts = grammar.tokenManager.getTokenSymbol(ruleName);
+-                if (ts == null) {
+-                    grammar.antlrTool.panic("cannot find token associated with rule " + ruleName);
+-                }
+-                ts.setParaphrase(value.getText());
+-            }
+-        }
+-        else if (key.getText().equals("generateAmbigWarnings")) {
+-            if (value.getText().equals("true")) {
+-                generateAmbigWarnings = true;
+-            }
+-            else if (value.getText().equals("false")) {
+-                generateAmbigWarnings = false;
+-            }
+-            else {
+-                grammar.antlrTool.error("Value for generateAmbigWarnings must be true or false", grammar.getFilename(), key.getLine(), key.getColumn());
+-            }
+-        }
+-        else {
+-            grammar.antlrTool.error("Invalid rule option: " + key.getText(), grammar.getFilename(), key.getLine(), key.getColumn());
+-        }
+-    }
+-
+-    public String toString() {
+-        String s = " FOLLOW={";
+-        Lookahead cache[] = endNode.cache;
+-        int k = grammar.maxk;
+-        boolean allNull = true;
+-        for (int j = 1; j <= k; j++) {
+-            if (cache[j] == null) continue;
+-            s += cache[j].toString(",", grammar.tokenManager.getVocabulary());
+-            allNull = false;
+-            if (j < k && cache[j + 1] != null) s += ";";
+-        }
+-        s += "}";
+-        if (allNull) s = "";
+-        return ruleName + ": " + super.toString() + " ;" + s;
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/RuleEndElement.java glassfish-gil/entity-persistence/src/java/persistence/antlr/RuleEndElement.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/RuleEndElement.java	2006-08-31 00:34:09.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/RuleEndElement.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,32 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-/**Contains a list of all places that reference
+- * this enclosing rule.  Useful for FOLLOW computations.
+- */
+-class RuleEndElement extends BlockEndElement {
+-    protected Lookahead[] cache;	// Each rule can cache it's lookahead computation.
+-    // The FOLLOW(rule) is stored in this cache.
+-    // 1..k
+-    protected boolean noFOLLOW;
+-
+-
+-    public RuleEndElement(Grammar g) {
+-        super(g);
+-        cache = new Lookahead[g.maxk + 1];
+-    }
+-
+-    public Lookahead look(int k) {
+-        return grammar.theLLkAnalyzer.look(k, this);
+-    }
+-
+-    public String toString() {
+-        //return " [RuleEnd]";
+-        return "";
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/RuleRefElement.java glassfish-gil/entity-persistence/src/java/persistence/antlr/RuleRefElement.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/RuleRefElement.java	2006-08-31 00:34:09.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/RuleRefElement.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,72 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-class RuleRefElement extends AlternativeElement {
+-    protected String targetRule; // which rule is being called?
+-    protected String args = null;		 // were any args passed to rule?
+-    protected String idAssign = null;	 // is the return type assigned to a variable?
+-    protected String label;
+-
+-
+-    public RuleRefElement(Grammar g, Token t, int autoGenType_) {
+-        super(g, t, autoGenType_);
+-        targetRule = t.getText();
+-        //		if ( Character.isUpperCase(targetRule.charAt(0)) ) { // lexer rule?
+-        if (t.type == ANTLRTokenTypes.TOKEN_REF) { // lexer rule?
+-            targetRule = CodeGenerator.encodeLexerRuleName(targetRule);
+-        }
+-    }
+-
+-//	public RuleRefElement(Grammar g, String t, int line, int autoGenType_) {
+-//		super(g, autoGenType_);
+-//		targetRule = t;
+-//		if ( Character.isUpperCase(targetRule.charAt(0)) ) { // lexer rule?
+-//			targetRule = CodeGenerator.lexerRuleName(targetRule);
+-//		}
+-//		this.line = line;
+-//	}
+-
+-    public void generate() {
+-        grammar.generator.gen(this);
+-    }
+-
+-    public String getArgs() {
+-        return args;
+-    }
+-
+-    public String getIdAssign() {
+-        return idAssign;
+-    }
+-
+-    public String getLabel() {
+-        return label;
+-    }
+-
+-    public Lookahead look(int k) {
+-        return grammar.theLLkAnalyzer.look(k, this);
+-    }
+-
+-    public void setArgs(String a) {
+-        args = a;
+-    }
+-
+-    public void setIdAssign(String id) {
+-        idAssign = id;
+-    }
+-
+-    public void setLabel(String label_) {
+-        label = label_;
+-    }
+-
+-    public String toString() {
+-        if (args != null)
+-            return " " + targetRule + args;
+-        else
+-            return " " + targetRule;
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/RuleSymbol.java glassfish-gil/entity-persistence/src/java/persistence/antlr/RuleSymbol.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/RuleSymbol.java	2006-08-31 00:34:09.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/RuleSymbol.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,52 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import persistence.antlr.collections.impl.Vector;
+-
+-class RuleSymbol extends GrammarSymbol {
+-    RuleBlock block;	// list of alternatives
+-    boolean defined;	// has the rule been defined yet?
+-    Vector references;	// list of all nodes referencing this rule
+-    // not strictly needed by generic symbol table
+-    // but we will almost always analyze/gen code
+-    String access;	// access specifier for this rule
+-    String comment;	// A javadoc comment if any.
+-
+-    public RuleSymbol(String r) {
+-        super(r);
+-        references = new Vector();
+-    }
+-
+-    public void addReference(RuleRefElement e) {
+-        references.appendElement(e);
+-    }
+-
+-    public RuleBlock getBlock() {
+-        return block;
+-    }
+-
+-    public RuleRefElement getReference(int i) {
+-        return (RuleRefElement)references.elementAt(i);
+-    }
+-
+-    public boolean isDefined() {
+-        return defined;
+-    }
+-
+-    public int numReferences() {
+-        return references.size();
+-    }
+-
+-    public void setBlock(RuleBlock rb) {
+-        block = rb;
+-    }
+-
+-    public void setDefined() {
+-        defined = true;
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/SemanticException.java glassfish-gil/entity-persistence/src/java/persistence/antlr/SemanticException.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/SemanticException.java	2006-08-31 00:34:09.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/SemanticException.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,22 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-public class SemanticException extends RecognitionException {
+-    public SemanticException(String s) {
+-        super(s);
+-    }
+-
+-    /** @deprecated As of ANTLR 2.7.2 use {@see #SemanticException(char, String, int, int) } */
+-	public SemanticException(String s, String fileName, int line) {
+-        this(s, fileName, line, -1);
+-    }
+-
+-	public SemanticException(String s, String fileName, int line, int column) {
+-        super(s, fileName, line, column);
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/SimpleTokenManager.java glassfish-gil/entity-persistence/src/java/persistence/antlr/SimpleTokenManager.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/SimpleTokenManager.java	2006-08-31 00:34:09.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/SimpleTokenManager.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,141 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import java.io.*;
+-import java.util.Hashtable;
+-import java.util.Enumeration;
+-
+-import persistence.antlr.collections.impl.Vector;
+-
+-class SimpleTokenManager implements TokenManager, Cloneable {
+-    protected int maxToken = Token.MIN_USER_TYPE;
+-    // Token vocabulary is Vector of String's
+-    protected Vector vocabulary;
+-    // Hash table is a mapping from Strings to TokenSymbol
+-    private Hashtable table;
+-    // the ANTLR tool
+-    protected Tool antlrTool;
+-    // Name of the token manager
+-    protected String name;
+-
+-    protected boolean readOnly = false;
+-
+-    SimpleTokenManager(String name_, Tool tool_) {
+-        antlrTool = tool_;
+-        name = name_;
+-        // Don't make a bigger vector than we need, because it will show up in output sets.
+-        vocabulary = new Vector(1);
+-        table = new Hashtable();
+-
+-        // define EOF symbol
+-        TokenSymbol ts = new TokenSymbol("EOF");
+-        ts.setTokenType(Token.EOF_TYPE);
+-        define(ts);
+-
+-        // define <null-tree-lookahead> but only in the vocabulary vector
+-        vocabulary.ensureCapacity(Token.NULL_TREE_LOOKAHEAD);
+-        vocabulary.setElementAt("NULL_TREE_LOOKAHEAD", Token.NULL_TREE_LOOKAHEAD);
+-    }
+-
+-    public Object clone() {
+-        SimpleTokenManager tm;
+-        try {
+-            tm = (SimpleTokenManager)super.clone();
+-            tm.vocabulary = (Vector)this.vocabulary.clone();
+-            tm.table = (Hashtable)this.table.clone();
+-            tm.maxToken = this.maxToken;
+-            tm.antlrTool = this.antlrTool;
+-            tm.name = this.name;
+-        }
+-        catch (CloneNotSupportedException e) {
+-            antlrTool.panic("cannot clone token manager");
+-            return null;
+-        }
+-        return tm;
+-    }
+-
+-    /** define a token */
+-    public void define(TokenSymbol ts) {
+-        // Add the symbol to the vocabulary vector
+-        vocabulary.ensureCapacity(ts.getTokenType());
+-        vocabulary.setElementAt(ts.getId(), ts.getTokenType());
+-        // add the symbol to the hash table
+-        mapToTokenSymbol(ts.getId(), ts);
+-    }
+-
+-    /** Simple token manager doesn't have a name -- must be set externally */
+-    public String getName() {
+-        return name;
+-    }
+-
+-    /** Get a token symbol by index */
+-    public String getTokenStringAt(int idx) {
+-        return (String)vocabulary.elementAt(idx);
+-    }
+-
+-    /** Get the TokenSymbol for a string */
+-    public TokenSymbol getTokenSymbol(String sym) {
+-        return (TokenSymbol)table.get(sym);
+-    }
+-
+-    /** Get a token symbol by index */
+-    public TokenSymbol getTokenSymbolAt(int idx) {
+-        return getTokenSymbol(getTokenStringAt(idx));
+-    }
+-
+-    /** Get an enumerator over the symbol table */
+-    public Enumeration getTokenSymbolElements() {
+-        return table.elements();
+-    }
+-
+-    public Enumeration getTokenSymbolKeys() {
+-        return table.keys();
+-    }
+-
+-    /** Get the token vocabulary (read-only).
+-     * @return A Vector of TokenSymbol
+-     */
+-    public Vector getVocabulary() {
+-        return vocabulary;
+-    }
+-
+-    /** Simple token manager is not read-only */
+-    public boolean isReadOnly() {
+-        return false;
+-    }
+-
+-    /** Map a label or string to an existing token symbol */
+-    public void mapToTokenSymbol(String name, TokenSymbol sym) {
+-        // System.out.println("mapToTokenSymbol("+name+","+sym+")");
+-        table.put(name, sym);
+-    }
+-
+-    /** Get the highest token type in use */
+-    public int maxTokenType() {
+-        return maxToken - 1;
+-    }
+-
+-    /** Get the next unused token type */
+-    public int nextTokenType() {
+-        return maxToken++;
+-    }
+-
+-    /** Set the name of the token manager */
+-    public void setName(String name_) {
+-        name = name_;
+-    }
+-
+-    public void setReadOnly(boolean ro) {
+-        readOnly = ro;
+-    }
+-
+-    /** Is a token symbol defined? */
+-    public boolean tokenDefined(String symbol) {
+-        return table.containsKey(symbol);
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/StringLiteralElement.java glassfish-gil/entity-persistence/src/java/persistence/antlr/StringLiteralElement.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/StringLiteralElement.java	2006-08-31 00:34:09.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/StringLiteralElement.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,64 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-class StringLiteralElement extends GrammarAtom {
+-    // atomText with quotes stripped and escape codes processed
+-    protected String processedAtomText;
+-
+-
+-    public StringLiteralElement(Grammar g, Token t, int autoGenType) {
+-        super(g, t, autoGenType);
+-        if (!(g instanceof LexerGrammar)) {
+-            // lexer does not have token types for string literals
+-            TokenSymbol ts = grammar.tokenManager.getTokenSymbol(atomText);
+-            if (ts == null) {
+-                g.antlrTool.error("Undefined literal: " + atomText, grammar.getFilename(), t.getLine(), t.getColumn());
+-            }
+-            else {
+-                tokenType = ts.getTokenType();
+-            }
+-        }
+-        line = t.getLine();
+-
+-        // process the string literal text by removing quotes and escaping chars
+-        // If a lexical grammar, add the characters to the char vocabulary
+-        processedAtomText = new String();
+-        for (int i = 1; i < atomText.length() - 1; i++) {
+-            char c = atomText.charAt(i);
+-            if (c == '\\') {
+-                if (i + 1 < atomText.length() - 1) {
+-                    i++;
+-                    c = atomText.charAt(i);
+-                    switch (c) {
+-                        case 'n':
+-                            c = '\n';
+-                            break;
+-                        case 'r':
+-                            c = '\r';
+-                            break;
+-                        case 't':
+-                            c = '\t';
+-                            break;
+-                    }
+-                }
+-            }
+-            if (g instanceof LexerGrammar) {
+-                ((LexerGrammar)g).charVocabulary.add(c);
+-            }
+-            processedAtomText += c;
+-        }
+-    }
+-
+-    public void generate() {
+-        grammar.generator.gen(this);
+-    }
+-
+-    public Lookahead look(int k) {
+-        return grammar.theLLkAnalyzer.look(k, this);
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/StringLiteralSymbol.java glassfish-gil/entity-persistence/src/java/persistence/antlr/StringLiteralSymbol.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/StringLiteralSymbol.java	2006-08-31 00:34:10.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/StringLiteralSymbol.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,24 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-class StringLiteralSymbol extends TokenSymbol {
+-    protected String label;	// was this string literal labeled?
+-
+-
+-    public StringLiteralSymbol(String r) {
+-        super(r);
+-    }
+-
+-    public String getLabel() {
+-        return label;
+-    }
+-
+-    public void setLabel(String label) {
+-        this.label = label;
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/StringUtils.java glassfish-gil/entity-persistence/src/java/persistence/antlr/StringUtils.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/StringUtils.java	2006-02-08 22:30:59.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/StringUtils.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,85 +0,0 @@
+-package persistence.antlr;
+-
+-public class StringUtils {
+-    /** General-purpose utility function for removing
+-     * characters from back of string
+-     * @param s The string to process
+-     * @param c The character to remove
+-     * @return The resulting string
+-     */
+-    static public String stripBack(String s, char c) {
+-        while (s.length() > 0 && s.charAt(s.length() - 1) == c) {
+-            s = s.substring(0, s.length() - 1);
+-        }
+-        return s;
+-    }
+-
+-    /** General-purpose utility function for removing
+-     * characters from back of string
+-     * @param s The string to process
+-     * @param remove A string containing the set of characters to remove
+-     * @return The resulting string
+-     */
+-    static public String stripBack(String s, String remove) {
+-        boolean changed;
+-        do {
+-            changed = false;
+-            for (int i = 0; i < remove.length(); i++) {
+-                char c = remove.charAt(i);
+-                while (s.length() > 0 && s.charAt(s.length() - 1) == c) {
+-                    changed = true;
+-                    s = s.substring(0, s.length() - 1);
+-                }
+-            }
+-        } while (changed);
+-        return s;
+-    }
+-
+-    /** General-purpose utility function for removing
+-     * characters from front of string
+-     * @param s The string to process
+-     * @param c The character to remove
+-     * @return The resulting string
+-     */
+-    static public String stripFront(String s, char c) {
+-        while (s.length() > 0 && s.charAt(0) == c) {
+-            s = s.substring(1);
+-        }
+-        return s;
+-    }
+-
+-    /** General-purpose utility function for removing
+-     * characters from front of string
+-     * @param s The string to process
+-     * @param remove A string containing the set of characters to remove
+-     * @return The resulting string
+-     */
+-    static public String stripFront(String s, String remove) {
+-        boolean changed;
+-        do {
+-            changed = false;
+-            for (int i = 0; i < remove.length(); i++) {
+-                char c = remove.charAt(i);
+-                while (s.length() > 0 && s.charAt(0) == c) {
+-                    changed = true;
+-                    s = s.substring(1);
+-                }
+-            }
+-        } while (changed);
+-        return s;
+-    }
+-
+-    /** General-purpose utility function for removing
+-     * characters from the front and back of string
+-     * @param s The string to process
+-     * @param head exact string to strip from head
+-     * @param tail exact string to strip from tail
+-     * @return The resulting string
+-     */
+-    public static String stripFrontBack(String src, String head, String tail) {
+-        int h = src.indexOf(head);
+-        int t = src.lastIndexOf(tail);
+-        if (h == -1 || t == -1) return src;
+-        return src.substring(h + 1, t);
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/SynPredBlock.java glassfish-gil/entity-persistence/src/java/persistence/antlr/SynPredBlock.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/SynPredBlock.java	2006-08-31 00:34:10.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/SynPredBlock.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,30 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-class SynPredBlock extends AlternativeBlock {
+-
+-    public SynPredBlock(Grammar g) {
+-        super(g);
+-    }
+-
+-    public SynPredBlock(Grammar g, Token start) {
+-        super(g, start, false);
+-    }
+-
+-    public void generate() {
+-        grammar.generator.gen(this);
+-    }
+-
+-    public Lookahead look(int k) {
+-        return grammar.theLLkAnalyzer.look(k, this);
+-    }
+-
+-    public String toString() {
+-        return super.toString() + "=>";
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/TokenBuffer.java glassfish-gil/entity-persistence/src/java/persistence/antlr/TokenBuffer.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/TokenBuffer.java	2006-08-31 00:34:10.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/TokenBuffer.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,125 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-/**A Stream of Token objects fed to the parser from a Tokenizer that can
+- * be rewound via mark()/rewind() methods.
+- * <p>
+- * A dynamic array is used to buffer up all the input tokens.  Normally,
+- * "k" tokens are stored in the buffer.  More tokens may be stored during
+- * guess mode (testing syntactic predicate), or when LT(i>k) is referenced.
+- * Consumption of tokens is deferred.  In other words, reading the next
+- * token is not done by conume(), but deferred until needed by LA or LT.
+- * <p>
+- *
+- * @see persistence.antlr.Token
+- * @see persistence.antlr.Tokenizer
+- * @see persistence.antlr.TokenQueue
+- */
+-
+-import java.io.IOException;
+-
+-public class TokenBuffer {
+-
+-    // Token source
+-    protected TokenStream input;
+-
+-    // Number of active markers
+-    int nMarkers = 0;
+-
+-    // Additional offset used when markers are active
+-    int markerOffset = 0;
+-
+-    // Number of calls to consume() since last LA() or LT() call
+-    int numToConsume = 0;
+-
+-    // Circular queue
+-    TokenQueue queue;
+-
+-    /** Create a token buffer */
+-    public TokenBuffer(TokenStream input_) {
+-        input = input_;
+-        queue = new TokenQueue(1);
+-    }
+-
+-    /** Reset the input buffer to empty state */
+-    public final void reset() {
+-        nMarkers = 0;
+-        markerOffset = 0;
+-        numToConsume = 0;
+-        queue.reset();
+-    }
+-
+-    /** Mark another token for deferred consumption */
+-    public final void consume() {
+-        numToConsume++;
+-    }
+-
+-    /** Ensure that the token buffer is sufficiently full */
+-    private final void fill(int amount) throws TokenStreamException {
+-        syncConsume();
+-        // Fill the buffer sufficiently to hold needed tokens
+-        while (queue.nbrEntries < amount + markerOffset) {
+-            // Append the next token
+-            queue.append(input.nextToken());
+-        }
+-    }
+-
+-    /** return the Tokenizer (needed by ParseView) */
+-    public TokenStream getInput() {
+-        return input;
+-    }
+-
+-    /** Get a lookahead token value */
+-    public final int LA(int i) throws TokenStreamException {
+-        fill(i);
+-        return queue.elementAt(markerOffset + i - 1).type;
+-    }
+-
+-    /** Get a lookahead token */
+-    public final Token LT(int i) throws TokenStreamException {
+-        fill(i);
+-        return queue.elementAt(markerOffset + i - 1);
+-    }
+-
+-    /**Return an integer marker that can be used to rewind the buffer to
+-     * its current state.
+-     */
+-    public final int mark() {
+-        syncConsume();
+-//System.out.println("Marking at " + markerOffset);
+-//try { for (int i = 1; i <= 2; i++) { System.out.println("LA("+i+")=="+LT(i).getText()); } } catch (ScannerException e) {}
+-        nMarkers++;
+-        return markerOffset;
+-    }
+-
+-    /**Rewind the token buffer to a marker.
+-     * @param mark Marker returned previously from mark()
+-     */
+-    public final void rewind(int mark) {
+-        syncConsume();
+-        markerOffset = mark;
+-        nMarkers--;
+-//System.out.println("Rewinding to " + mark);
+-//try { for (int i = 1; i <= 2; i++) { System.out.println("LA("+i+")=="+LT(i).getText()); } } catch (ScannerException e) {}
+-    }
+-
+-    /** Sync up deferred consumption */
+-    private final void syncConsume() {
+-        while (numToConsume > 0) {
+-            if (nMarkers > 0) {
+-                // guess mode -- leave leading tokens and bump offset.
+-                markerOffset++;
+-            }
+-            else {
+-                // normal mode -- remove first token
+-                queue.removeFirst();
+-            }
+-            numToConsume--;
+-        }
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/Token.java glassfish-gil/entity-persistence/src/java/persistence/antlr/Token.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/Token.java	2006-08-31 00:34:10.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/Token.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,77 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-/** A token is minimally a token type.  Subclasses can add the text matched
+- *  for the token and line info.
+- */
+-public class Token implements Cloneable {
+-    // constants
+-    public static final int MIN_USER_TYPE = 4;
+-    public static final int NULL_TREE_LOOKAHEAD = 3;
+-    public static final int INVALID_TYPE = 0;
+-    public static final int EOF_TYPE = 1;
+-    public static final int SKIP = -1;
+-
+-    // each Token has at least a token type
+-    int type = INVALID_TYPE;
+-
+-    // the illegal token object
+-    public static Token badToken = new Token(INVALID_TYPE, "<no text>");
+-
+-    public Token() {
+-    }
+-
+-    public Token(int t) {
+-        type = t;
+-    }
+-
+-    public Token(int t, String txt) {
+-        type = t;
+-        setText(txt);
+-    }
+-
+-    public int getColumn() {
+-        return 0;
+-    }
+-
+-    public int getLine() {
+-        return 0;
+-    }
+-
+-	public String getFilename() {
+-		return null;
+-	}
+-
+-	public void setFilename(String name) {
+-	}
+-
+-    public String getText() {
+-        return "<no text>";
+-    }
+-
+-	public void setText(String t) {
+-    }
+-
+-    public void setColumn(int c) {
+-    }
+-
+-    public void setLine(int l) {
+-    }
+-
+-	public int getType() {
+-        return type;
+-    }
+-
+-    public void setType(int t) {
+-        type = t;
+-    }
+-
+-    public String toString() {
+-        return "[\"" + getText() + "\",<" + type + ">]";
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/TokenManager.java glassfish-gil/entity-persistence/src/java/persistence/antlr/TokenManager.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/TokenManager.java	2006-08-31 00:34:10.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/TokenManager.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,58 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import java.util.Hashtable;
+-import java.util.Enumeration;
+-
+-import persistence.antlr.collections.impl.Vector;
+-
+-/** Interface that describes the set of defined tokens */
+-interface TokenManager {
+-    public Object clone();
+-
+-    /** define a token symbol */
+-    public void define(TokenSymbol ts);
+-
+-    /** Get the name of the token manager */
+-    public String getName();
+-
+-    /** Get a token string by index */
+-    public String getTokenStringAt(int idx);
+-
+-    /** Get the TokenSymbol for a string */
+-    public TokenSymbol getTokenSymbol(String sym);
+-
+-    public TokenSymbol getTokenSymbolAt(int idx);
+-
+-    /** Get an enumerator over the symbol table */
+-    public Enumeration getTokenSymbolElements();
+-
+-    public Enumeration getTokenSymbolKeys();
+-
+-    /** Get the token vocabulary (read-only).
+-     * @return A Vector of Strings indexed by token type */
+-    public Vector getVocabulary();
+-
+-    /** Is this token manager read-only? */
+-    public boolean isReadOnly();
+-
+-    public void mapToTokenSymbol(String name, TokenSymbol sym);
+-
+-    /** Get the highest token type in use */
+-    public int maxTokenType();
+-
+-    /** Get the next unused token type */
+-    public int nextTokenType();
+-
+-    public void setName(String n);
+-
+-    public void setReadOnly(boolean ro);
+-
+-    /** Is a token symbol defined? */
+-    public boolean tokenDefined(String symbol);
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/TokenQueue.java glassfish-gil/entity-persistence/src/java/persistence/antlr/TokenQueue.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/TokenQueue.java	2006-08-31 00:34:10.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/TokenQueue.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,95 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-/** A private circular buffer object used by the token buffer */
+-class TokenQueue {
+-    /** Physical circular buffer of tokens */
+-    private Token[] buffer;
+-    /** buffer.length-1 for quick modulos */
+-    private int sizeLessOne;
+-    /** physical index of front token */
+-    private int offset;
+-    /** number of tokens in the queue */
+-    protected int nbrEntries;
+-
+-    public TokenQueue(int minSize) {
+-        // Find first power of 2 >= to requested size
+-        int size;
+-        if ( minSize<0 ) {
+-            init(16); // pick some value for them
+-            return;
+-        }
+-        // check for overflow
+-        if ( minSize>=(Integer.MAX_VALUE/2) ) {
+-            init(Integer.MAX_VALUE); // wow that's big.
+-            return;
+-        }
+-        for (size = 2; size < minSize; size *= 2) {
+-            ;
+-        }
+-        init(size);
+-    }
+-
+-    /** Add token to end of the queue
+-     * @param tok The token to add
+-     */
+-    public final void append(Token tok) {
+-        if (nbrEntries == buffer.length) {
+-            expand();
+-        }
+-        buffer[(offset + nbrEntries) & sizeLessOne] = tok;
+-        nbrEntries++;
+-    }
+-
+-    /** Fetch a token from the queue by index
+-     * @param idx The index of the token to fetch, where zero is the token at the front of the queue
+-     */
+-    public final Token elementAt(int idx) {
+-        return buffer[(offset + idx) & sizeLessOne];
+-    }
+-
+-    /** Expand the token buffer by doubling its capacity */
+-    private final void expand() {
+-        Token[] newBuffer = new Token[buffer.length * 2];
+-        // Copy the contents to the new buffer
+-        // Note that this will store the first logical item in the
+-        // first physical array element.
+-        for (int i = 0; i < buffer.length; i++) {
+-            newBuffer[i] = elementAt(i);
+-        }
+-        // Re-initialize with new contents, keep old nbrEntries
+-        buffer = newBuffer;
+-        sizeLessOne = buffer.length - 1;
+-        offset = 0;
+-    }
+-
+-    /** Initialize the queue.
+-     * @param size The initial size of the queue
+-     */
+-    private final void init(int size) {
+-        // Allocate buffer
+-        buffer = new Token[size];
+-        // Other initialization
+-        sizeLessOne = size - 1;
+-        offset = 0;
+-        nbrEntries = 0;
+-    }
+-
+-    /** Clear the queue. Leaving the previous buffer alone.
+-     */
+-    public final void reset() {
+-        offset = 0;
+-        nbrEntries = 0;
+-    }
+-
+-    /** Remove token from front of queue */
+-    public final void removeFirst() {
+-        offset = (offset + 1) & sizeLessOne;
+-        nbrEntries--;
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/TokenRangeElement.java glassfish-gil/entity-persistence/src/java/persistence/antlr/TokenRangeElement.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/TokenRangeElement.java	2006-08-31 00:34:10.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/TokenRangeElement.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,49 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-class TokenRangeElement extends AlternativeElement {
+-    String label;
+-    protected int begin = Token.INVALID_TYPE;
+-    protected int end = Token.INVALID_TYPE;
+-    protected String beginText;
+-    protected String endText;
+-
+-    public TokenRangeElement(Grammar g, Token t1, Token t2, int autoGenType) {
+-        super(g, t1, autoGenType);
+-        begin = grammar.tokenManager.getTokenSymbol(t1.getText()).getTokenType();
+-        beginText = t1.getText();
+-        end = grammar.tokenManager.getTokenSymbol(t2.getText()).getTokenType();
+-        endText = t2.getText();
+-        line = t1.getLine();
+-    }
+-
+-    public void generate() {
+-        grammar.generator.gen(this);
+-    }
+-
+-    public String getLabel() {
+-        return label;
+-    }
+-
+-    public Lookahead look(int k) {
+-        return grammar.theLLkAnalyzer.look(k, this);
+-    }
+-
+-    public void setLabel(String label_) {
+-        label = label_;
+-    }
+-
+-    public String toString() {
+-        if (label != null) {
+-            return " " + label + ":" + beginText + ".." + endText;
+-        }
+-        else {
+-            return " " + beginText + ".." + endText;
+-        }
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/TokenRefElement.java glassfish-gil/entity-persistence/src/java/persistence/antlr/TokenRefElement.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/TokenRefElement.java	2006-08-31 00:34:10.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/TokenRefElement.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,39 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-class TokenRefElement extends GrammarAtom {
+-
+-    public TokenRefElement(Grammar g,
+-                           Token t,
+-                           boolean inverted,
+-                           int autoGenType) {
+-        super(g, t, autoGenType);
+-        not = inverted;
+-        TokenSymbol ts = grammar.tokenManager.getTokenSymbol(atomText);
+-        if (ts == null) {
+-            g.antlrTool.error("Undefined token symbol: " +
+-                         atomText, grammar.getFilename(), t.getLine(), t.getColumn());
+-        }
+-        else {
+-            tokenType = ts.getTokenType();
+-            // set the AST node type to whatever was set in tokens {...}
+-            // section (if anything);
+-            // Lafter, after this is created, the element option can set this.
+-            setASTNodeType(ts.getASTNodeType());
+-        }
+-        line = t.getLine();
+-    }
+-
+-    public void generate() {
+-        grammar.generator.gen(this);
+-    }
+-
+-    public Lookahead look(int k) {
+-        return grammar.theLLkAnalyzer.look(k, this);
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/TokenStreamBasicFilter.java glassfish-gil/entity-persistence/src/java/persistence/antlr/TokenStreamBasicFilter.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/TokenStreamBasicFilter.java	2006-08-31 00:34:10.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/TokenStreamBasicFilter.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,42 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import persistence.antlr.collections.impl.BitSet;
+-
+-/** This object is a TokenStream that passes through all
+- *  tokens except for those that you tell it to discard.
+- *  There is no buffering of the tokens.
+- */
+-public class TokenStreamBasicFilter implements TokenStream {
+-    /** The set of token types to discard */
+-    protected BitSet discardMask;
+-
+-    /** The input stream */
+-    protected TokenStream input;
+-
+-    public TokenStreamBasicFilter(TokenStream input) {
+-        this.input = input;
+-        discardMask = new BitSet();
+-    }
+-
+-    public void discard(int ttype) {
+-        discardMask.add(ttype);
+-    }
+-
+-    public void discard(BitSet mask) {
+-        discardMask = mask;
+-    }
+-
+-    public Token nextToken() throws TokenStreamException {
+-        Token tok = input.nextToken();
+-        while (tok != null && discardMask.member(tok.getType())) {
+-            tok = input.nextToken();
+-        }
+-        return tok;
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/TokenStreamException.java glassfish-gil/entity-persistence/src/java/persistence/antlr/TokenStreamException.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/TokenStreamException.java	2006-08-31 00:34:10.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/TokenStreamException.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,19 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-/**
+- * Anything that goes wrong while generating a stream of tokens.
+- */
+-public class TokenStreamException extends ANTLRException {
+-    public TokenStreamException() {
+-    }
+-
+-    public TokenStreamException(String s) {
+-        super(s);
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/TokenStreamHiddenTokenFilter.java glassfish-gil/entity-persistence/src/java/persistence/antlr/TokenStreamHiddenTokenFilter.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/TokenStreamHiddenTokenFilter.java	2006-08-31 00:34:11.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/TokenStreamHiddenTokenFilter.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,151 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import persistence.antlr.collections.impl.BitSet;
+-
+-/**This object filters a token stream coming from a lexer
+- * or another TokenStream so that only certain token channels
+- * get transmitted to the parser.
+- *
+- * Any of the channels can be filtered off as "hidden" channels whose
+- * tokens can be accessed from the parser.
+- */
+-public class TokenStreamHiddenTokenFilter extends TokenStreamBasicFilter implements TokenStream {
+-    // protected BitSet discardMask;
+-    protected BitSet hideMask;
+-
+-    protected CommonHiddenStreamToken nextMonitoredToken;
+-
+-    /** track tail of hidden list emanating from previous
+-     *  monitored token
+-     */
+-    protected CommonHiddenStreamToken lastHiddenToken;
+-
+-    protected CommonHiddenStreamToken firstHidden = null;
+-
+-    public TokenStreamHiddenTokenFilter(TokenStream input) {
+-        super(input);
+-        hideMask = new BitSet();
+-    }
+-
+-    protected void consume() throws TokenStreamException {
+-        nextMonitoredToken = (CommonHiddenStreamToken)input.nextToken();
+-    }
+-
+-    private void consumeFirst() throws TokenStreamException {
+-        consume(); // get first token of input stream
+-
+-        // Handle situation where hidden or discarded tokens
+-        // appear first in input stream
+-        CommonHiddenStreamToken p = null;
+-        // while hidden or discarded scarf tokens
+-        while (hideMask.member(LA(1).getType()) || discardMask.member(LA(1).getType())) {
+-            if (hideMask.member(LA(1).getType())) {
+-                if (p == null) {
+-                    p = LA(1);
+-                }
+-                else {
+-                    p.setHiddenAfter(LA(1));
+-                    LA(1).setHiddenBefore(p); // double-link
+-                    p = LA(1);
+-                }
+-                lastHiddenToken = p;
+-                if (firstHidden == null) {
+-                    firstHidden = p; // record hidden token if first
+-                }
+-            }
+-            consume();
+-        }
+-    }
+-
+-    public BitSet getDiscardMask() {
+-        return discardMask;
+-    }
+-
+-    /** Return a ptr to the hidden token appearing immediately after
+-     *  token t in the input stream.
+-     */
+-    public CommonHiddenStreamToken getHiddenAfter(CommonHiddenStreamToken t) {
+-        return t.getHiddenAfter();
+-    }
+-
+-    /** Return a ptr to the hidden token appearing immediately before
+-     *  token t in the input stream.
+-     */
+-    public CommonHiddenStreamToken getHiddenBefore(CommonHiddenStreamToken t) {
+-        return t.getHiddenBefore();
+-    }
+-
+-    public BitSet getHideMask() {
+-        return hideMask;
+-    }
+-
+-    /** Return the first hidden token if one appears
+-     *  before any monitored token.
+-     */
+-    public CommonHiddenStreamToken getInitialHiddenToken() {
+-        return firstHidden;
+-    }
+-
+-    public void hide(int m) {
+-        hideMask.add(m);
+-    }
+-
+-    public void hide(BitSet mask) {
+-        hideMask = mask;
+-    }
+-
+-    protected CommonHiddenStreamToken LA(int i) {
+-        return nextMonitoredToken;
+-    }
+-
+-    /** Return the next monitored token.
+-     *  Test the token following the monitored token.
+-     *  If following is another monitored token, save it
+-     *  for the next invocation of nextToken (like a single
+-     *  lookahead token) and return it then.
+-     *  If following is unmonitored, nondiscarded (hidden)
+-     *  channel token, add it to the monitored token.
+-     *
+-     *  Note: EOF must be a monitored Token.
+-     */
+-    public Token nextToken() throws TokenStreamException {
+-        // handle an initial condition; don't want to get lookahead
+-        // token of this splitter until first call to nextToken
+-        if (LA(1) == null) {
+-            consumeFirst();
+-        }
+-
+-        // we always consume hidden tokens after monitored, thus,
+-        // upon entry LA(1) is a monitored token.
+-        CommonHiddenStreamToken monitored = LA(1);
+-        // point to hidden tokens found during last invocation
+-        monitored.setHiddenBefore(lastHiddenToken);
+-        lastHiddenToken = null;
+-
+-        // Look for hidden tokens, hook them into list emanating
+-        // from the monitored tokens.
+-        consume();
+-        CommonHiddenStreamToken p = monitored;
+-        // while hidden or discarded scarf tokens
+-        while (hideMask.member(LA(1).getType()) || discardMask.member(LA(1).getType())) {
+-            if (hideMask.member(LA(1).getType())) {
+-                // attach the hidden token to the monitored in a chain
+-                // link forwards
+-                p.setHiddenAfter(LA(1));
+-                // link backwards
+-                if (p != monitored) { //hidden cannot point to monitored tokens
+-                    LA(1).setHiddenBefore(p);
+-                }
+-                p = lastHiddenToken = LA(1);
+-            }
+-            consume();
+-        }
+-        return monitored;
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/TokenStreamIOException.java glassfish-gil/entity-persistence/src/java/persistence/antlr/TokenStreamIOException.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/TokenStreamIOException.java	2006-08-31 00:34:11.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/TokenStreamIOException.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,25 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import java.io.IOException;
+-
+-/**
+- * Wraps an IOException in a TokenStreamException
+- */
+-public class TokenStreamIOException extends TokenStreamException {
+-    public IOException io;
+-
+-    /**
+-     * TokenStreamIOException constructor comment.
+-     * @param s java.lang.String
+-     */
+-    public TokenStreamIOException(IOException io) {
+-        super(io.getMessage());
+-        this.io = io;
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/TokenStream.java glassfish-gil/entity-persistence/src/java/persistence/antlr/TokenStream.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/TokenStream.java	2006-08-31 00:34:10.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/TokenStream.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,11 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-public interface TokenStream {
+-    public Token nextToken() throws TokenStreamException;
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/TokenStreamRecognitionException.java glassfish-gil/entity-persistence/src/java/persistence/antlr/TokenStreamRecognitionException.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/TokenStreamRecognitionException.java	2006-08-31 00:34:11.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/TokenStreamRecognitionException.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,24 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-/**
+- * Wraps a RecognitionException in a TokenStreamException so you
+- * can pass it along.
+- */
+-public class TokenStreamRecognitionException extends TokenStreamException {
+-    public RecognitionException recog;
+-
+-    public TokenStreamRecognitionException(RecognitionException re) {
+-        super(re.getMessage());
+-        this.recog = re;
+-    }
+-
+-    public String toString() {
+-        return recog.toString();
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/TokenStreamRetryException.java glassfish-gil/entity-persistence/src/java/persistence/antlr/TokenStreamRetryException.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/TokenStreamRetryException.java	2006-08-31 00:34:11.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/TokenStreamRetryException.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,17 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-/**
+- * Aborted recognition of current token. Try to get one again.
+- * Used by TokenStreamSelector.retry() to force nextToken()
+- * of stream to re-enter and retry.
+- */
+-public class TokenStreamRetryException extends TokenStreamException {
+-    public TokenStreamRetryException() {
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/TokenStreamRewriteEngine.java glassfish-gil/entity-persistence/src/java/persistence/antlr/TokenStreamRewriteEngine.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/TokenStreamRewriteEngine.java	2006-02-08 22:31:04.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/TokenStreamRewriteEngine.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,422 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- */
+-
+-import persistence.antlr.collections.impl.BitSet;
+-import java.util.*;
+-
+-/** This token stream tracks the *entire* token stream coming from
+- *  a lexer, but does not pass on the whitespace (or whatever else
+- *  you want to discard) to the parser.
+- *
+- *  This class can then be asked for the ith token in the input stream.
+- *  Useful for dumping out the input stream exactly after doing some
+- *  augmentation or other manipulations.  Tokens are index from 0..n-1
+- *
+- *  You can insert stuff, replace, and delete chunks.  Note that the
+- *  operations are done lazily--only if you convert the buffer to a
+- *  String.  This is very efficient because you are not moving data around
+- *  all the time.  As the buffer of tokens is converted to strings, the
+- *  toString() method(s) check to see if there is an operation at the
+- *  current index.  If so, the operation is done and then normal String
+- *  rendering continues on the buffer.  This is like having multiple Turing
+- *  machine instruction streams (programs) operating on a single input tape. :)
+- *
+- *  Since the operations are done lazily at toString-time, operations do not
+- *  screw up the token index values.  That is, an insert operation at token
+- *  index i does not change the index values for tokens i+1..n-1.
+- *
+- *  Because operations never actually alter the buffer, you may always get
+- *  the original token stream back without undoing anything.  Since
+- *  the instructions are queued up, you can easily simulate transactions and
+- *  roll back any changes if there is an error just by removing instructions.
+- *  For example,
+- *
+- * 		TokenStreamRewriteEngine rewriteEngine =
+- * 			new TokenStreamRewriteEngine(lexer);
+- *      JavaRecognizer parser = new JavaRecognizer(rewriteEngine);
+- *      ...
+- *      rewriteEngine.insertAfter("pass1", t, "foobar");}
+- * 		rewriteEngine.insertAfter("pass2", u, "start");}
+- * 		System.out.println(rewriteEngine.toString("pass1"));
+- * 		System.out.println(rewriteEngine.toString("pass2"));
+- *
+- *  You can also have multiple "instruction streams" and get multiple
+- *  rewrites from a single pass over the input.  Just name the instruction
+- *  streams and use that name again when printing the buffer.  This could be
+- *  useful for generating a C file and also its header file--all from the
+- *  same buffer.
+- *
+- *  If you don't use named rewrite streams, a "default" stream is used.
+- *
+- *  Terence Parr, parrt at cs.usfca.edu
+- *  University of San Francisco
+- *  February 2004
+- */
+-public class TokenStreamRewriteEngine implements TokenStream {
+-	public static final int MIN_TOKEN_INDEX = 0;
+-
+-	static class RewriteOperation {
+-		protected int index;
+-		protected String text;
+-		protected RewriteOperation(int index, String text) {
+-			this.index = index;
+-			this.text = text;
+-		}
+-		/** Execute the rewrite operation by possibly adding to the buffer.
+-		 *  Return the index of the next token to operate on.
+-		 */
+-		public int execute(StringBuffer buf) {
+-			return index;
+-		}
+-	}
+-
+-	static class InsertBeforeOp extends RewriteOperation {
+-		public InsertBeforeOp(int index, String text) {
+-			super(index,text);
+-		}
+-		public int execute(StringBuffer buf) {
+-			buf.append(text);
+-			return index;
+-		}
+-	}
+-
+-	static class ReplaceOp extends RewriteOperation {
+-		protected int lastIndex;
+-		public ReplaceOp(int from, int to, String text) {
+-			super(from,text);
+-			lastIndex = to;
+-		}
+-		public int execute(StringBuffer buf) {
+-			if ( text!=null ) {
+-				buf.append(text);
+-			}
+-			return lastIndex+1;
+-		}
+-	}
+-
+-	static class DeleteOp extends ReplaceOp {
+-		public DeleteOp(int from, int to) {
+-			super(from, to, null);
+-		}
+-	}
+-
+-	public static final String DEFAULT_PROGRAM_NAME = "default";
+-    public static final int PROGRAM_INIT_SIZE = 100;
+-
+-	/** Track the incoming list of tokens */
+-	protected List tokens;
+-
+-	/** You may have multiple, named streams of rewrite operations.
+-	 *  I'm calling these things "programs."
+-	 *  Maps String (name) -> rewrite (List)
+-	 */
+-	protected Map programs = null;
+-
+-	/** Map String (program name) -> Integer index */
+-	protected Map lastRewriteTokenIndexes = null;
+-
+-	/** track index of tokens */
+-	protected int index = MIN_TOKEN_INDEX;
+-
+-	/** Who do we suck tokens from? */
+-	protected TokenStream stream;
+-
+-	/** Which (whitespace) token(s) to throw out */
+-	protected BitSet discardMask = new BitSet();
+-
+-	public TokenStreamRewriteEngine(TokenStream upstream) {
+-		this(upstream,1000);
+-	}
+-
+-	public TokenStreamRewriteEngine(TokenStream upstream, int initialSize) {
+-		stream = upstream;
+-		tokens = new ArrayList(initialSize);
+-		programs = new HashMap();
+-		programs.put(DEFAULT_PROGRAM_NAME,
+-							   new ArrayList(PROGRAM_INIT_SIZE));
+-		lastRewriteTokenIndexes = new HashMap();
+-	}
+-
+-	public Token nextToken() throws TokenStreamException {
+-		TokenWithIndex t;
+-		// suck tokens until end of stream or we find a non-discarded token
+-		do {
+-           	t = (TokenWithIndex)stream.nextToken();
+-			if ( t!=null ) {
+-				t.setIndex(index);  // what is t's index in list?
+-				if ( t.getType()!=Token.EOF_TYPE ) {
+-					tokens.add(t);  // track all tokens except EOF
+-				}
+-				index++;			// move to next position
+-			}
+-		} while ( t!=null && discardMask.member(t.getType()) );
+-		return t;
+-	}
+-
+-	public void rollback(int instructionIndex) {
+-		rollback(DEFAULT_PROGRAM_NAME, instructionIndex);
+-	}
+-
+-	/** Rollback the instruction stream for a program so that
+-	 *  the indicated instruction (via instructionIndex) is no
+-	 *  longer in the stream.  UNTESTED!
+-	 */
+-	public void rollback(String programName, int instructionIndex) {
+-		List is = (List)programs.get(programName);
+-		if ( is!=null ) {
+-			programs.put(programName, is.subList(MIN_TOKEN_INDEX,instructionIndex));
+-		}
+-	}
+-
+-	public void deleteProgram() {
+-		deleteProgram(DEFAULT_PROGRAM_NAME);
+-	}
+-
+-	/** Reset the program so that no instructions exist */
+-	public void deleteProgram(String programName) {
+-		rollback(programName, MIN_TOKEN_INDEX);
+-	}
+-
+-	/** If op.index > lastRewriteTokenIndexes, just add to the end.
+-	 *  Otherwise, do linear */
+-	protected void addToSortedRewriteList(RewriteOperation op) {
+-		addToSortedRewriteList(DEFAULT_PROGRAM_NAME, op);
+-	}
+-
+-	protected void addToSortedRewriteList(String programName, RewriteOperation op) {
+-		List rewrites = getProgram(programName);
+-		// if at or beyond last op's index, just append
+-		if ( op.index>=getLastRewriteTokenIndex(programName) ) {
+-			rewrites.add(op); // append to list of operations
+-			// record the index of this operation for next time through
+-			setLastRewriteTokenIndex(programName, op.index);
+-			return;
+-		}
+-		// not after the last one, so must insert to ordered list
+-		Comparator comparator = new Comparator() {
+-			public int compare(Object o, Object o1) {
+-				RewriteOperation a = (RewriteOperation)o;
+-				RewriteOperation b = (RewriteOperation)o1;
+-				if ( a.index<b.index ) return -1;
+-				if ( a.index>b.index ) return 1;
+-				return 0;
+-			}
+-		};
+-        int pos = Collections.binarySearch(rewrites, op, comparator);
+-		if ( pos<0 ) {
+-			rewrites.add(-pos-1, op);
+-		}
+-	}
+-
+-	public void insertAfter(Token t, String text) {
+-		insertAfter(DEFAULT_PROGRAM_NAME, t, text);
+-	}
+-
+-	public void insertAfter(int index, String text) {
+-		insertAfter(DEFAULT_PROGRAM_NAME, index, text);
+-	}
+-
+-	public void insertAfter(String programName, Token t, String text) {
+-		insertAfter(programName,((TokenWithIndex)t).getIndex(), text); 
+-	}
+-
+-	public void insertAfter(String programName, int index, String text) {
+-		// to insert after, just insert before next index (even if past end)
+-		insertBefore(programName,index+1, text); 
+-	}
+-
+-	public void insertBefore(Token t, String text) {
+-		insertBefore(DEFAULT_PROGRAM_NAME, t, text);
+-	}
+-
+-	public void insertBefore(int index, String text) {
+-		insertBefore(DEFAULT_PROGRAM_NAME, index, text);
+-	}
+-
+-	public void insertBefore(String programName, Token t, String text) {
+-		insertBefore(programName, ((TokenWithIndex)t).getIndex(), text);
+-	}
+-
+-	public void insertBefore(String programName, int index, String text) {
+-		addToSortedRewriteList(programName, new InsertBeforeOp(index,text));
+-	}
+-
+-	public void replace(int index, String text) {
+-		replace(DEFAULT_PROGRAM_NAME, index, index, text);
+-	}
+-
+-	public void replace(int from, int to, String text) {
+-		replace(DEFAULT_PROGRAM_NAME, from, to, text);
+-	}
+-
+-	public void replace(Token indexT, String text) {
+-		replace(DEFAULT_PROGRAM_NAME, indexT, indexT, text);
+-	}
+-
+-	public void replace(Token from, Token to, String text) {
+-		replace(DEFAULT_PROGRAM_NAME, from, to, text);
+-	}
+-
+-	public void replace(String programName, int from, int to, String text) {
+-		addToSortedRewriteList(new ReplaceOp(from, to, text));
+-	}
+-
+-	public void replace(String programName, Token from, Token to, String text) {
+-		replace(programName,
+-				((TokenWithIndex)from).getIndex(),
+-				((TokenWithIndex)to).getIndex(),
+-				text);
+-	}
+-
+-	public void delete(int index) {
+-		delete(DEFAULT_PROGRAM_NAME, index, index);
+-	}
+-
+-	public void delete(int from, int to) {
+-		delete(DEFAULT_PROGRAM_NAME, from, to);
+-	}
+-
+-	public void delete(Token indexT) {
+-		delete(DEFAULT_PROGRAM_NAME, indexT, indexT);
+-	}
+-
+-	public void delete(Token from, Token to) {
+-		delete(DEFAULT_PROGRAM_NAME, from, to);
+-	}
+-
+-	public void delete(String programName, int from, int to) {
+-		replace(programName,from,to,null);
+-	}
+-
+-	public void delete(String programName, Token from, Token to) {
+-		replace(programName,from,to,null);
+-	}
+-
+-	public void discard(int ttype) {
+-		discardMask.add(ttype);
+-	}
+-
+-	public TokenWithIndex getToken(int i) {
+-		return (TokenWithIndex)tokens.get(i);
+-	}
+-
+-	public int getTokenStreamSize() {
+-		return tokens.size();
+-	}
+-
+-	public String toOriginalString() {
+-		return toOriginalString(MIN_TOKEN_INDEX, getTokenStreamSize()-1);
+-	}
+-
+-	public String toOriginalString(int start, int end) {
+-		StringBuffer buf = new StringBuffer();
+-		for (int i=start; i>=MIN_TOKEN_INDEX && i<=end && i<tokens.size(); i++) {
+-			buf.append(getToken(i).getText());
+-		}
+-		return buf.toString();
+-	}
+-
+-	public String toString() {
+-		return toString(MIN_TOKEN_INDEX, getTokenStreamSize());
+-	}
+-
+-	public String toString(String programName) {
+-		return toString(programName, MIN_TOKEN_INDEX, getTokenStreamSize());
+-	}
+-
+-	public String toString(int start, int end) {
+-		return toString(DEFAULT_PROGRAM_NAME, start, end);
+-	}
+-
+-	public String toString(String programName, int start, int end) {
+-		List rewrites = (List)programs.get(programName);
+-		if ( rewrites==null ) {
+-			return null; // invalid program
+-		}
+-		StringBuffer buf = new StringBuffer();
+-
+-		/** Index of first rewrite we have not done */
+-		int rewriteOpIndex = 0;
+-
+-		int tokenCursor=start;
+-		while ( tokenCursor>=MIN_TOKEN_INDEX &&
+-				tokenCursor<=end &&
+-				tokenCursor<tokens.size() )
+-		{
+-			if ( rewriteOpIndex<rewrites.size() ) {
+-				RewriteOperation op =
+-						(RewriteOperation)rewrites.get(rewriteOpIndex);
+-				while ( tokenCursor==op.index && rewriteOpIndex<rewrites.size() ) {
+-					/*
+-					System.out.println("execute op "+rewriteOpIndex+
+-									   " (type "+op.getClass().getName()+")"
+-									   +" at index "+op.index);
+-					*/
+-					tokenCursor = op.execute(buf);
+-					rewriteOpIndex++;
+-					if ( rewriteOpIndex<rewrites.size() ) {
+-						op = (RewriteOperation)rewrites.get(rewriteOpIndex);
+-					}
+-				}
+-			}
+-			if ( tokenCursor<end ) {
+-				buf.append(getToken(tokenCursor).getText());
+-				tokenCursor++;
+-			}
+-		}
+-		// now see if there are operations (append) beyond last token index
+-		for (int opi=rewriteOpIndex; opi<rewrites.size(); opi++) {
+-			RewriteOperation op =
+-					(RewriteOperation)rewrites.get(opi);
+-			op.execute(buf); // must be insertions if after last token
+-		}
+-
+-		return buf.toString();
+-	}
+-
+-	public String toDebugString() {
+-		return toDebugString(MIN_TOKEN_INDEX, getTokenStreamSize());
+-	}
+-
+-	public String toDebugString(int start, int end) {
+-		StringBuffer buf = new StringBuffer();
+-		for (int i=start; i>=MIN_TOKEN_INDEX && i<=end && i<tokens.size(); i++) {
+-			buf.append(getToken(i));
+-		}
+-		return buf.toString();
+-	}
+-
+-	public int getLastRewriteTokenIndex() {
+-		return getLastRewriteTokenIndex(DEFAULT_PROGRAM_NAME);
+-	}
+-
+-	protected int getLastRewriteTokenIndex(String programName) {
+-		Integer I = (Integer)lastRewriteTokenIndexes.get(programName);
+-		if ( I==null ) {
+-			return -1;
+-		}
+-		return I.intValue();
+-	}
+-
+-	protected void setLastRewriteTokenIndex(String programName, int i) {
+-		lastRewriteTokenIndexes.put(programName, new Integer(i));
+-	}
+-
+-	protected List getProgram(String name) {
+-		List is = (List)programs.get(name);
+-		if ( is==null ) {
+-			is = initializeProgram(name);
+-		}
+-		return is;
+-	}
+-
+-	private List initializeProgram(String name) {
+-		List is = new ArrayList(PROGRAM_INIT_SIZE);
+-		programs.put(name, is);
+-		return is;
+-	}
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/TokenStreamSelector.java glassfish-gil/entity-persistence/src/java/persistence/antlr/TokenStreamSelector.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/TokenStreamSelector.java	2006-08-31 00:34:11.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/TokenStreamSelector.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,106 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import java.util.Hashtable;
+-
+-import persistence.antlr.collections.impl.LList;
+-import persistence.antlr.collections.Stack;
+-
+-import java.io.IOException;
+-
+-/** A token stream MUX (multiplexor) knows about n token streams
+- *  and can multiplex them onto the same channel for use by token
+- *  stream consumer like a parser.  This is a way to have multiple
+- *  lexers break up the same input stream for a single parser.
+- *	Or, you can have multiple instances of the same lexer handle
+- *  multiple input streams; this works great for includes.
+- */
+-public class TokenStreamSelector implements TokenStream {
+-    /** The set of inputs to the MUX */
+-    protected Hashtable inputStreamNames;
+-
+-    /** The currently-selected token stream input */
+-    protected TokenStream input;
+-
+-    /** Used to track stack of input streams */
+-    protected Stack streamStack = new LList();
+-
+-    public TokenStreamSelector() {
+-        super();
+-        inputStreamNames = new Hashtable();
+-    }
+-
+-    public void addInputStream(TokenStream stream, String key) {
+-        inputStreamNames.put(key, stream);
+-    }
+-
+-    /** Return the stream from tokens are being pulled at
+-     *  the moment.
+-     */
+-    public TokenStream getCurrentStream() {
+-        return input;
+-    }
+-
+-    public TokenStream getStream(String sname) {
+-        TokenStream stream = (TokenStream)inputStreamNames.get(sname);
+-        if (stream == null) {
+-            throw new IllegalArgumentException("TokenStream " + sname + " not found");
+-        }
+-        return stream;
+-    }
+-
+-    public Token nextToken() throws TokenStreamException {
+-        // return input.nextToken();
+-        // keep looking for a token until you don't
+-        // get a retry exception.
+-        for (; ;) {
+-            try {
+-                return input.nextToken();
+-            }
+-            catch (TokenStreamRetryException r) {
+-                // just retry "forever"
+-            }
+-        }
+-    }
+-
+-    public TokenStream pop() {
+-        TokenStream stream = (TokenStream)streamStack.pop();
+-        select(stream);
+-        return stream;
+-    }
+-
+-    public void push(TokenStream stream) {
+-        streamStack.push(input); // save current stream
+-        select(stream);
+-    }
+-
+-    public void push(String sname) {
+-        streamStack.push(input);
+-        select(sname);
+-    }
+-
+-    /** Abort recognition of current Token and try again.
+-     *  A stream can push a new stream (for include files
+-     *  for example, and then retry(), which will cause
+-     *  the current stream to abort back to this.nextToken().
+-     *  this.nextToken() then asks for a token from the
+-     *  current stream, which is the new "substream."
+-     */
+-    public void retry() throws TokenStreamRetryException {
+-        throw new TokenStreamRetryException();
+-    }
+-
+-    /** Set the stream without pushing old stream */
+-    public void select(TokenStream stream) {
+-        input = stream;
+-    }
+-
+-    public void select(String sname) throws IllegalArgumentException {
+-        input = getStream(sname);
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/TokenSymbol.java glassfish-gil/entity-persistence/src/java/persistence/antlr/TokenSymbol.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/TokenSymbol.java	2006-08-31 00:34:11.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/TokenSymbol.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,45 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-class TokenSymbol extends GrammarSymbol {
+-    protected int ttype;
+-    /** describes what token matches in "human terms" */
+-    protected String paraphrase = null;
+-
+-    /** Set to a value in the tokens {...} section */
+-    protected String ASTNodeType;
+-
+-    public TokenSymbol(String r) {
+-        super(r);
+-        ttype = Token.INVALID_TYPE;
+-    }
+-
+-    public String getASTNodeType() {
+-        return ASTNodeType;
+-    }
+-
+-    public void setASTNodeType(String type) {
+-        ASTNodeType = type;
+-    }
+-
+-    public String getParaphrase() {
+-        return paraphrase;
+-    }
+-
+-    public int getTokenType() {
+-        return ttype;
+-    }
+-
+-    public void setParaphrase(String p) {
+-        paraphrase = p;
+-    }
+-
+-    public void setTokenType(int t) {
+-        ttype = t;
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/TokenWithIndex.java glassfish-gil/entity-persistence/src/java/persistence/antlr/TokenWithIndex.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/TokenWithIndex.java	2006-02-08 22:31:05.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/TokenWithIndex.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,35 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- */
+-
+-/** This token knows what index 0..n-1 it is from beginning of stream.
+- *  Designed to work with TokenStreamRewriteEngine.java
+- */
+-public class TokenWithIndex extends CommonToken {
+-    /** Index into token array indicating position in input stream */
+-    int index;
+-
+-    public TokenWithIndex() {
+-	super();
+-    }
+-
+-    public TokenWithIndex(int i, String t) {
+-	super(i,t);
+-    }
+-
+-	public void setIndex(int i) {
+-		index = i;
+-	}
+-
+-	public int getIndex() {
+-		return index;
+-	}
+-
+-	public String toString() {
+-		return "["+index+":\"" + getText() + "\",<" + getType() + ">,line=" + line + ",col=" +
+-col + "]\n";
+-	}
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/ToolErrorHandler.java glassfish-gil/entity-persistence/src/java/persistence/antlr/ToolErrorHandler.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/ToolErrorHandler.java	2006-08-31 00:34:11.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/ToolErrorHandler.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,47 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import persistence.antlr.collections.impl.BitSet;
+-
+-interface ToolErrorHandler {
+-
+-
+-    /** Issue a warning about ambiguity between a alternates
+-     * @param blk  The block being analyzed
+-     * @param lexicalAnalysis  true for lexical rule
+-     * @param depth  The depth of the ambiguity
+-     * @param sets  An array of bitsets containing the ambiguities
+-     * @param altIdx1  The zero-based index of the first ambiguous alternative
+-     * @param altIdx2  The zero-based index of the second ambiguous alternative
+-     */
+-    public void warnAltAmbiguity(
+-        Grammar grammar,
+-        AlternativeBlock blk,
+-        boolean lexicalAnalysis,
+-        int depth,
+-        Lookahead[] sets,
+-        int altIdx1,
+-        int altIdx2
+-        );
+-
+-    /** Issue a warning about ambiguity between an alternate and exit path.
+-     * @param blk  The block being analyzed
+-     * @param lexicalAnalysis  true for lexical rule
+-     * @param depth  The depth of the ambiguity
+-     * @param sets  An array of bitsets containing the ambiguities
+-     * @param altIdx  The zero-based index of the ambiguous alternative
+-     */
+-    public void warnAltExitAmbiguity(
+-        Grammar grammar,
+-        BlockWithImpliedExitPath blk,
+-        boolean lexicalAnalysis,
+-        int depth,
+-        Lookahead[] sets,
+-        int altIdx
+-        );
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/Tool.java glassfish-gil/entity-persistence/src/java/persistence/antlr/Tool.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/Tool.java	2006-08-31 00:34:11.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/Tool.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,634 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import java.io.*;
+-
+-import persistence.antlr.collections.impl.BitSet;
+-import persistence.antlr.collections.impl.Vector;
+-import persistence.antlr.PreservingFileWriter;
+-import persistence.antlr.Version;
+-
+-public class Tool {
+-    public static String version = "";
+-
+-    /** Object that handles analysis errors */
+-    ToolErrorHandler errorHandler;
+-
+-    /** Was there an error during parsing or analysis? */
+-    protected boolean hasError = false;
+-
+-    /** Generate diagnostics? (vs code) */
+-    boolean genDiagnostics = false;
+-
+-    /** Generate DocBook vs code? */
+-    boolean genDocBook = false;
+-
+-    /** Generate HTML vs code? */
+-    boolean genHTML = false;
+-
+-    /** Current output directory for generated files */
+-    protected static String outputDir = ".";
+-
+-    // Grammar input
+-    protected String grammarFile;
+-    transient Reader f = new InputStreamReader(System.in);
+-    // SAS: changed for proper text io
+-    //  transient DataInputStream in = null;
+-
+-    protected static String literalsPrefix = "LITERAL_";
+-    protected static boolean upperCaseMangledLiterals = false;
+-
+-    /** C++ file level options */
+-    protected NameSpace nameSpace = null;
+-    protected String namespaceAntlr = null;
+-    protected String namespaceStd = null;
+-    protected boolean genHashLines = true;
+-    protected boolean noConstructors = false;
+-
+-    private BitSet cmdLineArgValid = new BitSet();
+-
+-    /** Construct a new Tool. */
+-    public Tool() {
+-        errorHandler = new DefaultToolErrorHandler(this);
+-    }
+-
+-    public String getGrammarFile() {
+-        return grammarFile;
+-    }
+-
+-    public boolean hasError() {
+-        return hasError;
+-    }
+-
+-    public NameSpace getNameSpace() {
+-        return nameSpace;
+-    }
+-
+-    public String getNamespaceStd() {
+-        return namespaceStd;
+-    }
+-
+-    public String getNamespaceAntlr() {
+-        return namespaceAntlr;
+-    }
+-
+-    public boolean getGenHashLines() {
+-        return genHashLines;
+-    }
+-
+-    public String getLiteralsPrefix() {
+-        return literalsPrefix;
+-    }
+-
+-    public boolean getUpperCaseMangledLiterals() {
+-        return upperCaseMangledLiterals;
+-    }
+-
+-    public void setFileLineFormatter(FileLineFormatter formatter) {
+-        FileLineFormatter.setFormatter(formatter);
+-    }
+-
+-    protected void checkForInvalidArguments(String[] args, BitSet cmdLineArgValid) {
+-        // check for invalid command line args
+-        for (int a = 0; a < args.length; a++) {
+-            if (!cmdLineArgValid.member(a)) {
+-                warning("invalid command-line argument: " + args[a] + "; ignored");
+-            }
+-        }
+-    }
+-
+-    /** This example is from the book _Java in a Nutshell_ by David
+-     * Flanagan.  Written by David Flanagan.  Copyright (c) 1996
+-     * O'Reilly & Associates.  You may study, use, modify, and
+-     * distribute this example for any purpose.  This example is
+-     * provided WITHOUT WARRANTY either expressed or implied.  */
+-    public void copyFile(String source_name, String dest_name)
+-        throws IOException {
+-        File source_file = new File(source_name);
+-        File destination_file = new File(dest_name);
+-        Reader source = null;
+-        Writer destination = null;
+-        char[] buffer;
+-        int bytes_read;
+-
+-        try {
+-            // First make sure the specified source file
+-            // exists, is a file, and is readable.
+-            if (!source_file.exists() || !source_file.isFile())
+-                throw new FileCopyException("FileCopy: no such source file: " +
+-                                            source_name);
+-            if (!source_file.canRead())
+-                throw new FileCopyException("FileCopy: source file " +
+-                                            "is unreadable: " + source_name);
+-
+-            // If the destination exists, make sure it is a writeable file
+-            // and ask before overwriting it.  If the destination doesn't
+-            // exist, make sure the directory exists and is writeable.
+-            if (destination_file.exists()) {
+-                if (destination_file.isFile()) {
+-                    DataInputStream in = new DataInputStream(System.in);
+-                    String response;
+-
+-                    if (!destination_file.canWrite())
+-                        throw new FileCopyException("FileCopy: destination " +
+-                                                    "file is unwriteable: " + dest_name);
+-                    /*
+-                      System.out.print("File " + dest_name +
+-                      " already exists.  Overwrite? (Y/N): ");
+-                      System.out.flush();
+-                      response = in.readLine();
+-                      if (!response.equals("Y") && !response.equals("y"))
+-                      throw new FileCopyException("FileCopy: copy cancelled.");
+-                    */
+-                }
+-                else {
+-                    throw new FileCopyException("FileCopy: destination "
+-                                                + "is not a file: " + dest_name);
+-                }
+-            }
+-            else {
+-                File parentdir = parent(destination_file);
+-                if (!parentdir.exists())
+-                    throw new FileCopyException("FileCopy: destination "
+-                                                + "directory doesn't exist: " + dest_name);
+-                if (!parentdir.canWrite())
+-                    throw new FileCopyException("FileCopy: destination "
+-                                                + "directory is unwriteable: " + dest_name);
+-            }
+-
+-            // If we've gotten this far, then everything is okay; we can
+-            // copy the file.
+-            source = new BufferedReader(new FileReader(source_file));
+-            destination = new BufferedWriter(new FileWriter(destination_file));
+-
+-            buffer = new char[1024];
+-            while (true) {
+-                bytes_read = source.read(buffer, 0, 1024);
+-                if (bytes_read == -1) break;
+-                destination.write(buffer, 0, bytes_read);
+-            }
+-        }
+-            // No matter what happens, always close any streams we've opened.
+-        finally {
+-            if (source != null) {
+-                try {
+-                    source.close();
+-                }
+-                catch (IOException e) {
+-                    ;
+-                }
+-            }
+-            if (destination != null) {
+-                try {
+-                    destination.close();
+-                }
+-                catch (IOException e) {
+-                    ;
+-                }
+-            }
+-        }
+-    }
+-
+-    /** Perform processing on the grammar file.  Can only be called
+-     * from main() @param args The command-line arguments passed to
+-     * main().  This wrapper does the System.exit for use with command-line.
+-     */
+-    public void doEverythingWrapper(String[] args) {
+-        int exitCode = doEverything(args);
+-        System.exit(exitCode);
+-    }
+-
+-    /** Process args and have ANTLR do it's stuff without calling System.exit.
+-     *  Just return the result code.  Makes it easy for ANT build tool.
+-     */
+-    public int doEverything(String[] args) {
+-        // run the preprocessor to handle inheritance first.
+-
+-        // Start preprocessor. This strips generates an argument list
+-        // without -glib options (inside preTool)
+-        persistence.antlr.preprocessor.Tool preTool = new persistence.antlr.preprocessor.Tool(this, args);
+-
+-        boolean preprocess_ok = preTool.preprocess();
+-        String[] modifiedArgs = preTool.preprocessedArgList();
+-
+-        // process arguments for the Tool
+-        processArguments(modifiedArgs);
+-        if (!preprocess_ok) {
+-            return 1;
+-        }
+-
+-        f = getGrammarReader();
+-
+-        ANTLRLexer lexer = new ANTLRLexer(f);
+-        TokenBuffer tokenBuf = new TokenBuffer(lexer);
+-        LLkAnalyzer analyzer = new LLkAnalyzer(this);
+-        MakeGrammar behavior = new MakeGrammar(this, args, analyzer);
+-
+-        try {
+-            ANTLRParser p = new ANTLRParser(tokenBuf, behavior, this);
+-            p.setFilename(grammarFile);
+-            p.grammar();
+-            if (hasError()) {
+-                fatalError("Exiting due to errors.");
+-            }
+-            checkForInvalidArguments(modifiedArgs, cmdLineArgValid);
+-
+-            // Create the right code generator according to the "language" option
+-            CodeGenerator codeGen;
+-
+-            // SAS: created getLanguage() method so subclass can override
+-            //      (necessary for VAJ interface)
+-            String codeGenClassName = "persistence.antlr." + getLanguage(behavior) + "CodeGenerator";
+-            try {
+-                Class codeGenClass = Class.forName(codeGenClassName);
+-                codeGen = (CodeGenerator)codeGenClass.newInstance();
+-                codeGen.setBehavior(behavior);
+-                codeGen.setAnalyzer(analyzer);
+-                codeGen.setTool(this);
+-                codeGen.gen();
+-            }
+-            catch (ClassNotFoundException cnfe) {
+-                panic("Cannot instantiate code-generator: " + codeGenClassName);
+-            }
+-            catch (InstantiationException ie) {
+-                panic("Cannot instantiate code-generator: " + codeGenClassName);
+-            }
+-            catch (IllegalArgumentException ie) {
+-                panic("Cannot instantiate code-generator: " + codeGenClassName);
+-            }
+-            catch (IllegalAccessException iae) {
+-                panic("code-generator class '" + codeGenClassName + "' is not accessible");
+-            }
+-        }
+-        catch (RecognitionException pe) {
+-            fatalError("Unhandled parser error: " + pe.getMessage());
+-        }
+-        catch (TokenStreamException io) {
+-            fatalError("TokenStreamException: " + io.getMessage());
+-        }
+-        return 0;
+-    }
+-
+-    /** Issue an error
+-     * @param s The message
+-     */
+-    public void error(String s) {
+-        hasError = true;
+-        System.err.println("error: " + s);
+-    }
+-
+-    /** Issue an error with line number information
+-     * @param s The message
+-     * @param file The file that has the error (or null)
+-     * @param line The grammar file line number on which the error occured (or -1)
+-     * @param column The grammar file column number on which the error occured (or -1)
+-     */
+-    public void error(String s, String file, int line, int column) {
+-        hasError = true;
+-        System.err.println(FileLineFormatter.getFormatter().
+-                           getFormatString(file, line, column) + s);
+-    }
+-
+-    /** When we are 1.1 compatible...
+-public static Object factory2 (String p, Object[] initargs) {
+-     Class c;
+-     Object o = null;
+-     try {
+-     int argslen = initargs.length;
+-     Class cl[] = new Class[argslen];
+-     for (int i=0;i&lt;argslen;i++) {
+-     cl[i] = Class.forName(initargs[i].getClass().getName());
+-     }
+-     c = Class.forName (p);
+-     Constructor con = c.getConstructor (cl);
+-     o = con.newInstance (initargs);
+-     } catch (Exception e) {
+-     System.err.println ("Can't make a " + p);
+-     }
+-     return o;
+-     }
+-     */
+-    public Object factory(String p) {
+-        Class c;
+-        Object o = null;
+-        try {
+-            c = Class.forName(p);// get class def
+-            o = c.newInstance(); // make a new one
+-        }
+-        catch (Exception e) {
+-            // either class not found,
+-            // class is interface/abstract, or
+-            // class or initializer is not accessible.
+-            warning("Can't create an object of type " + p);
+-            return null;
+-        }
+-        return o;
+-    }
+-
+-    public String fileMinusPath(String f) {
+-        String separator = System.getProperty("file.separator");
+-        int endOfPath = f.lastIndexOf(separator);
+-        if (endOfPath == -1) {
+-            return f;   // no path found
+-        }
+-        return f.substring(endOfPath + 1);
+-    }
+-
+-    /** Determine the language used for this run of ANTLR
+-     *  This was made a method so the subclass can override it
+-     */
+-    public String getLanguage(MakeGrammar behavior) {
+-        if (genDiagnostics) {
+-            return "Diagnostic";
+-        }
+-        if (genHTML) {
+-            return "HTML";
+-        }
+-        if (genDocBook) {
+-            return "DocBook";
+-        }
+-        return behavior.language;
+-    }
+-
+-    public String getOutputDirectory() {
+-        return outputDir;
+-    }
+-
+-    private static void help() {
+-        System.err.println("usage: java persistence.antlr.Tool [args] file.g");
+-        System.err.println("  -o outputDir       specify output directory where all output generated.");
+-        System.err.println("  -glib superGrammar specify location of supergrammar file.");
+-        System.err.println("  -debug             launch the ParseView debugger upon parser invocation.");
+-        System.err.println("  -html              generate a html file from your grammar.");
+-        System.err.println("  -docbook           generate a docbook sgml file from your grammar.");
+-        System.err.println("  -diagnostic        generate a textfile with diagnostics.");
+-        System.err.println("  -trace             have all rules call traceIn/traceOut.");
+-        System.err.println("  -traceLexer        have lexer rules call traceIn/traceOut.");
+-        System.err.println("  -traceParser       have parser rules call traceIn/traceOut.");
+-        System.err.println("  -traceTreeParser   have tree parser rules call traceIn/traceOut.");
+-        System.err.println("  -h|-help|--help    this message");
+-    }
+-
+-    public static void main(String[] args) {
+-        System.err.println("ANTLR Parser Generator   Version " +
+-                           Version.project_version + "   1989-2004 jGuru.com");
+-        version = Version.project_version;
+-
+-        try {
+-            if (args.length == 0) {
+-                help();
+-                System.exit(1);
+-            }
+-            for (int i = 0; i < args.length; ++i) {
+-                if (args[i].equals("-h")
+-                    || args[i].equals("-help")
+-                    || args[i].equals("--help")
+-                ) {
+-                    help();
+-                    System.exit(1);
+-                }
+-            }
+-
+-            Tool theTool = new Tool();
+-            theTool.doEverything(args);
+-            theTool = null;
+-        }
+-        catch (Exception e) {
+-            System.err.println(System.getProperty("line.separator") +
+-                               System.getProperty("line.separator"));
+-            System.err.println("#$%%*&@# internal error: " + e.toString());
+-            System.err.println("[complain to nearest government official");
+-            System.err.println(" or send hate-mail to parrt at jguru.com;");
+-            System.err.println(" please send stack trace with report.]" +
+-                               System.getProperty("line.separator"));
+-            e.printStackTrace();
+-        }
+-System.exit(0);
+-    }
+-
+-	/** This method is used by all code generators to create new output
+-	 * files. If the outputDir set by -o is not present it will be created here.
+-	 */
+-	public PrintWriter openOutputFile(String f) throws IOException {
+-		if( outputDir != "." ) {
+-			File out_dir = new File(outputDir);
+-			if( ! out_dir.exists() )
+-				out_dir.mkdirs();
+-		}
+-		return new PrintWriter(new PreservingFileWriter(outputDir + System.getProperty("file.separator") + f));
+-	}
+-
+-    public Reader getGrammarReader() {
+-        Reader f = null;
+-        try {
+-            if (grammarFile != null) {
+-                f = new BufferedReader(new FileReader(grammarFile));
+-            }
+-        }
+-        catch (IOException e) {
+-            fatalError("cannot open grammar file " + grammarFile);
+-        }
+-        return f;
+-    }
+-
+-    /** @since 2.7.2
+-     */
+-    public void reportException(Exception e, String message) {
+-        System.err.println(message == null ? e.getMessage()
+-                                           : message + ": " + e.getMessage());
+-    }
+-
+-    /** @since 2.7.2
+-     */
+-    public void reportProgress(String message) {
+-        System.out.println(message);
+-    }
+-
+-    /** An error occured that should stop the Tool from doing any work.
+-     *  The default implementation currently exits (via
+-     *  {@link java.lang.System.exit(int)} after printing an error message to
+-     *  <var>stderr</var>. However, the tools should expect that a subclass
+-     *  will override this to throw an unchecked exception such as
+-     *  {@link java.lang.IllegalStateException} or another subclass of
+-     *  {@link java.lang.RuntimeException}. <em>If this method is overriden,
+-     *  <strong>it must never return normally</strong>; i.e. it must always
+-     *  throw an exception or call System.exit</em>.
+-     *  @since 2.7.2
+-     *  @param s The message
+-     */
+-    public void fatalError(String message) {
+-        System.err.println(message);
+-        System.exit(1);
+-    }
+-
+-    /** Issue an unknown fatal error. <em>If this method is overriden,
+-     *  <strong>it must never return normally</strong>; i.e. it must always
+-     *  throw an exception or call System.exit</em>.
+-     *  @deprecated as of 2.7.2 use {@link #fatalError(String)}. By default
+-     *              this method executes <code>fatalError("panic");</code>.
+-     */
+-    public void panic() {
+-        fatalError("panic");
+-    }
+-
+-    /** Issue a fatal error message. <em>If this method is overriden,
+-     *  <strong>it must never return normally</strong>; i.e. it must always
+-     *  throw an exception or call System.exit</em>.
+-     *  @deprecated as of 2.7.2 use {@link #fatalError(String)}. By defaykt
+-     *              this method executes <code>fatalError("panic: " + s);</code>.
+-     * @param s The message
+-     */
+-    public void panic(String s) {
+-        fatalError("panic: " + s);
+-    }
+-
+-    // File.getParent() can return null when the file is specified without
+-    // a directory or is in the root directory.
+-    // This method handles those cases.
+-    public File parent(File f) {
+-        String dirname = f.getParent();
+-        if (dirname == null) {
+-            if (f.isAbsolute())
+-                return new File(File.separator);
+-            else
+-                return new File(System.getProperty("user.dir"));
+-        }
+-        return new File(dirname);
+-    }
+-
+-    /** Parse a list such as "f1.g;f2.g;..." and return a Vector
+-     *  of the elements.
+-     */
+-    public static Vector parseSeparatedList(String list, char separator) {
+-        java.util.StringTokenizer st =
+-		new java.util.StringTokenizer(list, String.valueOf(separator));
+-        Vector v = new Vector(10);
+-        while ( st.hasMoreTokens() ) {
+-             v.appendElement(st.nextToken());
+-        }
+-        if (v.size() == 0) return null;
+-        return v;
+-    }
+-
+-    /** given a filename, strip off the directory prefix (if any)
+-     *  and return it.  Return "./" if f has no dir prefix.
+-     */
+-    public String pathToFile(String f) {
+-        String separator = System.getProperty("file.separator");
+-        int endOfPath = f.lastIndexOf(separator);
+-        if (endOfPath == -1) {
+-            // no path, use current directory
+-            return "." + System.getProperty("file.separator");
+-        }
+-        return f.substring(0, endOfPath + 1);
+-    }
+-
+-    /** <p>Process the command-line arguments.  Can only be called by Tool.
+-     * A bitset is collected of all correct arguments via setArgOk.</p>
+-     * @param args The command-line arguments passed to main()
+-     *
+-     */
+-    protected void processArguments(String[] args) {
+-        for (int i = 0; i < args.length; i++) {
+-            if (args[i].equals("-diagnostic")) {
+-                genDiagnostics = true;
+-                genHTML = false;
+-                setArgOK(i);
+-            }
+-            else if (args[i].equals("-o")) {
+-                setArgOK(i);
+-                if (i + 1 >= args.length) {
+-                    error("missing output directory with -o option; ignoring");
+-                }
+-                else {
+-                    i++;
+-                    setOutputDirectory(args[i]);
+-                    setArgOK(i);
+-                }
+-            }
+-            else if (args[i].equals("-html")) {
+-                genHTML = true;
+-                genDiagnostics = false;
+-                setArgOK(i);
+-            }
+-            else if (args[i].equals("-docbook")) {
+-                genDocBook = true;
+-                genDiagnostics = false;
+-                setArgOK(i);
+-            }
+-            else {
+-                if (args[i].charAt(0) != '-') {
+-                    // Must be the grammar file
+-                    grammarFile = args[i];
+-                    setArgOK(i);
+-                }
+-            }
+-        }
+-    }
+-
+-    public void setArgOK(int i) {
+-        cmdLineArgValid.add(i);
+-    }
+-
+-    public void setOutputDirectory(String o) {
+-        outputDir = o;
+-    }
+-
+-    /** Issue an error; used for general tool errors not for grammar stuff
+-     * @param s The message
+-     */
+-    public void toolError(String s) {
+-        System.err.println("error: " + s);
+-    }
+-
+-    /** Issue a warning
+-     * @param s the message
+-     */
+-    public void warning(String s) {
+-        System.err.println("warning: " + s);
+-    }
+-
+-    /** Issue a warning with line number information
+-     * @param s The message
+-     * @param file The file that has the warning (or null)
+-     * @param line The grammar file line number on which the warning occured (or -1)
+-     * @param column The grammar file line number on which the warning occured (or -1)
+-     */
+-    public void warning(String s, String file, int line, int column) {
+-        System.err.println(FileLineFormatter.getFormatter().
+-                           getFormatString(file, line, column) + "warning:" + s);
+-    }
+-
+-    /** Issue a warning with line number information
+-     * @param s The lines of the message
+-     * @param file The file that has the warning
+-     * @param line The grammar file line number on which the warning occured
+-     */
+-    public void warning(String[] s, String file, int line, int column) {
+-        if (s == null || s.length == 0) {
+-            panic("bad multi-line message to Tool.warning");
+-        }
+-        System.err.println(FileLineFormatter.getFormatter().
+-                           getFormatString(file, line, column) + "warning:" + s[0]);
+-        for (int i = 1; i < s.length; i++) {
+-            System.err.println(FileLineFormatter.getFormatter().
+-                               getFormatString(file, line, column) + "    " + s[i]);
+-        }
+-    }
+-
+-    /**
+-     * Support C++ & C# namespaces (for now).
+-     * C++: Add a nested namespace name to the current namespace.
+-     * C# : Specify an enclosing namespace for the generated code.
+-     * DAW: David Wagner -- C# support by kunle odutola
+-     */
+-    public void setNameSpace(String name) {
+-        if (null == nameSpace)
+-            nameSpace = new NameSpace(StringUtils.stripFrontBack(name, "\"", "\""));
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/TreeBlockContext.java glassfish-gil/entity-persistence/src/java/persistence/antlr/TreeBlockContext.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/TreeBlockContext.java	2006-08-31 00:34:11.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/TreeBlockContext.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,34 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-/**The context needed to add root,child elements to a Tree.  There
+- * is only one alternative (i.e., a list of children).  We subclass to
+- * specialize. MakeGrammar.addElementToCurrentAlt will work correctly
+- * now for either a block of alts or a Tree child list.
+- *
+- * The first time addAlternativeElement is called, it sets the root element
+- * rather than adding it to one of the alternative lists.  Rather than have
+- * the grammar duplicate the rules for grammar atoms etc... we use the same
+- * grammar and same refToken behavior etc...  We have to special case somewhere
+- * and here is where we do it.
+- */
+-class TreeBlockContext extends BlockContext {
+-    protected boolean nextElementIsRoot = true;
+-
+-
+-    public void addAlternativeElement(AlternativeElement e) {
+-        TreeElement tree = (TreeElement)block;
+-        if (nextElementIsRoot) {
+-            tree.root = (GrammarAtom)e;
+-            nextElementIsRoot = false;
+-        }
+-        else {
+-            super.addAlternativeElement(e);
+-        }
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/TreeElement.java glassfish-gil/entity-persistence/src/java/persistence/antlr/TreeElement.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/TreeElement.java	2006-08-31 00:34:11.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/TreeElement.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,35 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-/** A TreeElement is a block with one alternative and a root node */
+-class TreeElement extends AlternativeBlock {
+-    GrammarAtom root;
+-
+-    public TreeElement(Grammar g, Token start) {
+-        super(g, start, false);
+-    }
+-
+-    public void generate() {
+-        grammar.generator.gen(this);
+-    }
+-
+-    public Lookahead look(int k) {
+-        return grammar.theLLkAnalyzer.look(k, this);
+-    }
+-
+-    public String toString() {
+-        String s = " #(" + root;
+-        Alternative a = (Alternative)alternatives.elementAt(0);
+-        AlternativeElement p = a.head;
+-        while (p != null) {
+-            s += p;
+-            p = p.next;
+-        }
+-        return s + " )";
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/TreeParser.java glassfish-gil/entity-persistence/src/java/persistence/antlr/TreeParser.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/TreeParser.java	2006-08-31 00:34:11.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/TreeParser.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,156 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import java.util.NoSuchElementException;
+-
+-import persistence.antlr.collections.AST;
+-import persistence.antlr.collections.impl.BitSet;
+-
+-public class TreeParser {
+-    /** The AST Null object; the parsing cursor is set to this when
+-     *  it is found to be null.  This way, we can test the
+-     *  token type of a node without having to have tests for null
+-     *  everywhere.
+-     */
+-    public static ASTNULLType ASTNULL = new ASTNULLType();
+-
+-    /** Where did this rule leave off parsing; avoids a return parameter */
+-    protected AST _retTree;
+-
+-    /** guessing nesting level; guessing==0 implies not guessing */
+-    // protected int guessing = 0;
+-
+-    /** Nesting level of registered handlers */
+-    // protected int exceptionLevel = 0;
+-
+-    protected TreeParserSharedInputState inputState;
+-
+-    /** Table of token type to token names */
+-    protected String[] tokenNames;
+-
+-    /** AST return value for a rule is squirreled away here */
+-    protected AST returnAST;
+-
+-    /** AST support code; parser and treeparser delegate to this object */
+-    protected ASTFactory astFactory = new ASTFactory();
+-
+-    /** Used to keep track of indentdepth for traceIn/Out */
+-    protected int traceDepth = 0;
+-
+-    public TreeParser() {
+-        inputState = new TreeParserSharedInputState();
+-    }
+-
+-    /** Get the AST return value squirreled away in the parser */
+-    public AST getAST() {
+-        return returnAST;
+-    }
+-
+-    public ASTFactory getASTFactory() {
+-        return astFactory;
+-    }
+-
+-    public String getTokenName(int num) {
+-        return tokenNames[num];
+-    }
+-
+-    public String[] getTokenNames() {
+-        return tokenNames;
+-    }
+-
+-    protected void match(AST t, int ttype) throws MismatchedTokenException {
+-        //System.out.println("match("+ttype+"); cursor is "+t);
+-        if (t == null || t == ASTNULL || t.getType() != ttype) {
+-            throw new MismatchedTokenException(getTokenNames(), t, ttype, false);
+-        }
+-    }
+-
+-    /**Make sure current lookahead symbol matches the given set
+-     * Throw an exception upon mismatch, which is catch by either the
+-     * error handler or by the syntactic predicate.
+-     */
+-    public void match(AST t, BitSet b) throws MismatchedTokenException {
+-        if (t == null || t == ASTNULL || !b.member(t.getType())) {
+-            throw new MismatchedTokenException(getTokenNames(), t, b, false);
+-        }
+-    }
+-
+-    protected void matchNot(AST t, int ttype) throws MismatchedTokenException {
+-        //System.out.println("match("+ttype+"); cursor is "+t);
+-        if (t == null || t == ASTNULL || t.getType() == ttype) {
+-            throw new MismatchedTokenException(getTokenNames(), t, ttype, true);
+-        }
+-    }
+-
+-    /** @deprecated as of 2.7.2. This method calls System.exit() and writes
+-     *  directly to stderr, which is usually not appropriate when
+-     *  a parser is embedded into a larger application. Since the method is 
+-     *  <code>static</code>, it cannot be overridden to avoid these problems.
+-     *  ANTLR no longer uses this method internally or in generated code.
+-     */
+-    public static void panic() {
+-        System.err.println("TreeWalker: panic");
+-        System.exit(1);
+-    }
+-
+-    /** Parser error-reporting function can be overridden in subclass */
+-    public void reportError(RecognitionException ex) {
+-        System.err.println(ex.toString());
+-    }
+-
+-    /** Parser error-reporting function can be overridden in subclass */
+-    public void reportError(String s) {
+-        System.err.println("error: " + s);
+-    }
+-
+-    /** Parser warning-reporting function can be overridden in subclass */
+-    public void reportWarning(String s) {
+-        System.err.println("warning: " + s);
+-    }
+-
+-    /** Specify an object with support code (shared by
+-     *  Parser and TreeParser.  Normally, the programmer
+-     *  does not play with this, using setASTNodeType instead.
+-     */
+-    public void setASTFactory(ASTFactory f) {
+-        astFactory = f;
+-    }
+-
+-    /** Specify the type of node to create during tree building.
+-     * 	@deprecated since 2.7.2
+-     */
+-    public void setASTNodeType(String nodeType) {
+-        setASTNodeClass(nodeType);
+-    }
+-
+-    /** Specify the type of node to create during tree building */
+-    public void setASTNodeClass(String nodeType) {
+-        astFactory.setASTNodeType(nodeType);
+-    }
+-
+-    public void traceIndent() {
+-        for (int i = 0; i < traceDepth; i++)
+-            System.out.print(" ");
+-    }
+-
+-    public void traceIn(String rname, AST t) {
+-        traceDepth += 1;
+-        traceIndent();
+-        System.out.println("> " + rname +
+-                           "(" + (t != null?t.toString():"null") + ")" +
+-                           ((inputState.guessing > 0)?" [guessing]":""));
+-    }
+-
+-    public void traceOut(String rname, AST t) {
+-        traceIndent();
+-        System.out.println("< " + rname +
+-                           "(" + (t != null?t.toString():"null") + ")" +
+-                           ((inputState.guessing > 0)?" [guessing]":""));
+-        traceDepth--;
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/TreeParserSharedInputState.java glassfish-gil/entity-persistence/src/java/persistence/antlr/TreeParserSharedInputState.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/TreeParserSharedInputState.java	2006-08-31 00:34:11.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/TreeParserSharedInputState.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,18 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-/** This object contains the data associated with an
+- *  input AST.  Multiple parsers
+- *  share a single TreeParserSharedInputState to parse
+- *  the same tree or to have the parser walk multiple
+- *  trees.
+- */
+-public class TreeParserSharedInputState {
+-    /** Are we guessing (guessing>0)? */
+-    public int guessing = 0;
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/TreeSpecifierNode.java glassfish-gil/entity-persistence/src/java/persistence/antlr/TreeSpecifierNode.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/TreeSpecifierNode.java	2006-08-31 00:34:12.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/TreeSpecifierNode.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,47 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-class TreeSpecifierNode {
+-    private TreeSpecifierNode parent = null;
+-    private TreeSpecifierNode firstChild = null;
+-    private TreeSpecifierNode nextSibling = null;
+-    private Token tok;
+-
+-
+-    TreeSpecifierNode(Token tok_) {
+-        tok = tok_;
+-    }
+-
+-    public TreeSpecifierNode getFirstChild() {
+-        return firstChild;
+-    }
+-
+-    public TreeSpecifierNode getNextSibling() {
+-        return nextSibling;
+-    }
+-
+-    // Accessors
+-    public TreeSpecifierNode getParent() {
+-        return parent;
+-    }
+-
+-    public Token getToken() {
+-        return tok;
+-    }
+-
+-    public void setFirstChild(TreeSpecifierNode child) {
+-        firstChild = child;
+-        child.parent = this;
+-    }
+-
+-    // Structure-building
+-    public void setNextSibling(TreeSpecifierNode sibling) {
+-        nextSibling = sibling;
+-        sibling.parent = parent;
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/TreeWalkerGrammar.java glassfish-gil/entity-persistence/src/java/persistence/antlr/TreeWalkerGrammar.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/TreeWalkerGrammar.java	2006-08-31 00:34:12.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/TreeWalkerGrammar.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,84 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-import java.util.Hashtable;
+-import java.util.Enumeration;
+-import java.io.IOException;
+-
+-import persistence.antlr.collections.impl.BitSet;
+-import persistence.antlr.collections.impl.Vector;
+-
+-
+-/** Parser-specific grammar subclass */
+-class TreeWalkerGrammar extends Grammar {
+-    // true for transform mode
+-    protected boolean transform = false;
+-
+-
+-    TreeWalkerGrammar(String className_, Tool tool_, String superClass) {
+-        super(className_, tool_, superClass);
+-    }
+-
+-    /** Top-level call to generate the code for this grammar */
+-    public void generate() throws IOException {
+-        generator.gen(this);
+-    }
+-
+-    // Get name of class from which generated parser/lexer inherits
+-    protected String getSuperClass() {
+-        return "TreeParser";
+-    }
+-
+-    /**Process command line arguments.
+-     * -trace			have all rules call traceIn/traceOut
+-     * -traceParser		have parser rules call traceIn/traceOut
+-     * -debug			generate debugging output for parser debugger
+-     */
+-    public void processArguments(String[] args) {
+-        for (int i = 0; i < args.length; i++) {
+-            if (args[i].equals("-trace")) {
+-                traceRules = true;
+-                antlrTool.setArgOK(i);
+-            }
+-            else if (args[i].equals("-traceTreeParser")) {
+-                traceRules = true;
+-                antlrTool.setArgOK(i);
+-            }
+-//			else if ( args[i].equals("-debug") ) {
+-//				debuggingOutput = true;
+-//				superClass = "parseview.DebuggingTreeWalker";
+-//				Tool.setArgOK(i);
+-//			}
+-        }
+-    }
+-
+-    /** Set tree parser options */
+-    public boolean setOption(String key, Token value) {
+-        if (key.equals("buildAST")) {
+-            if (value.getText().equals("true")) {
+-                buildAST = true;
+-            }
+-            else if (value.getText().equals("false")) {
+-                buildAST = false;
+-            }
+-            else {
+-                antlrTool.error("buildAST option must be true or false", getFilename(), value.getLine(), value.getColumn());
+-            }
+-            return true;
+-        }
+-        if (key.equals("ASTLabelType")) {
+-            super.setOption(key, value);
+-            return true;
+-        }
+-        if (super.setOption(key, value)) {
+-            return true;
+-        }
+-        antlrTool.error("Invalid option: " + key, getFilename(), value.getLine(), value.getColumn());
+-        return false;
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/Version.java glassfish-gil/entity-persistence/src/java/persistence/antlr/Version.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/Version.java	2006-02-08 22:31:07.000000000 +0100
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/Version.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,9 +0,0 @@
+-package persistence.antlr;
+-
+-public class Version {
+-    public static final String version = "2";
+-    public static final String subversion = "7";
+-    public static final String patchlevel = "3";
+-    public static final String datestamp = "20040322";
+-    public static final String project_version = "2.7.3";
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/WildcardElement.java glassfish-gil/entity-persistence/src/java/persistence/antlr/WildcardElement.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/WildcardElement.java	2006-08-31 00:34:12.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/WildcardElement.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,38 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-class WildcardElement extends GrammarAtom {
+-    protected String label;
+-
+-    public WildcardElement(Grammar g, Token t, int autoGenType) {
+-        super(g, t, autoGenType);
+-        line = t.getLine();
+-    }
+-
+-    public void generate() {
+-        grammar.generator.gen(this);
+-    }
+-
+-    public String getLabel() {
+-        return label;
+-    }
+-
+-    public Lookahead look(int k) {
+-        return grammar.theLLkAnalyzer.look(k, this);
+-    }
+-
+-    public void setLabel(String label_) {
+-        label = label_;
+-    }
+-
+-    public String toString() {
+-        String s = " ";
+-        if (label != null) s += label + ":";
+-        return s + ".";
+-    }
+-}
+diff -Nru glassfish/entity-persistence/src/java/persistence/antlr/ZeroOrMoreBlock.java glassfish-gil/entity-persistence/src/java/persistence/antlr/ZeroOrMoreBlock.java
+--- glassfish/entity-persistence/src/java/persistence/antlr/ZeroOrMoreBlock.java	2006-08-31 00:34:12.000000000 +0200
++++ glassfish-gil/entity-persistence/src/java/persistence/antlr/ZeroOrMoreBlock.java	1970-01-01 01:00:00.000000000 +0100
+@@ -1,30 +0,0 @@
+-package persistence.antlr;
+-
+-/* ANTLR Translator Generator
+- * Project led by Terence Parr at http://www.jGuru.com
+- * Software rights: http://www.antlr.org/license.html
+- *
+- */
+-
+-class ZeroOrMoreBlock extends BlockWithImpliedExitPath {
+-
+-    public ZeroOrMoreBlock(Grammar g) {
+-        super(g);
+-    }
+-
+-    public ZeroOrMoreBlock(Grammar g, Token start) {
+-        super(g, start);
+-    }
+-
+-    public void generate() {
+-        grammar.generator.gen(this);
+-    }
+-
+-    public Lookahead look(int k) {
+-        return grammar.theLLkAnalyzer.look(k, this);
+-    }
+-
+-    public String toString() {
+-        return super.toString() + "*";
+-    }
+-}
+diff -Nru glassfish/entity-persistence/toplink-essentials-agent.pom glassfish-gil/entity-persistence/toplink-essentials-agent.pom
+--- glassfish/entity-persistence/toplink-essentials-agent.pom	2007-02-16 21:41:00.000000000 +0100
++++ glassfish-gil/entity-persistence/toplink-essentials-agent.pom	2012-05-24 17:50:02.000000000 +0200
+@@ -3,4 +3,11 @@
+   <groupId>toplink.essentials</groupId>
+   <artifactId>toplink-essentials-agent</artifactId>
+   <version>@VERSION@</version>
++  <dependencies>
++    <dependency>
++      <groupId>toplink.essentials</groupId>
++      <artifactId>toplink-essentials</artifactId>
++      <version>@VERSION@</version>
++    </dependency>
++  </dependencies>
+ </project>
+diff -Nru glassfish/entity-persistence/toplink-essentials.pom glassfish-gil/entity-persistence/toplink-essentials.pom
+--- glassfish/entity-persistence/toplink-essentials.pom	2007-02-16 21:41:00.000000000 +0100
++++ glassfish-gil/entity-persistence/toplink-essentials.pom	2012-05-24 17:49:57.000000000 +0200
+@@ -9,5 +9,10 @@
+       <artifactId>persistence-api</artifactId>
+       <version>1.0b</version>
+     </dependency>
++    <dependency>
++      <groupId>antlr</groupId>
++      <artifactId>antlr</artifactId>
++      <version>2.7.6</version>
++    </dependency>
+   </dependencies>
+ </project>
diff --git a/glassfish-toplink-essentials.spec b/glassfish-toplink-essentials.spec
new file mode 100644
index 0000000..ee4a572
--- /dev/null
+++ b/glassfish-toplink-essentials.spec
@@ -0,0 +1,122 @@
+Name:          glassfish-toplink-essentials
+Version:       2.0.46
+Release:       3%{?dist}
+Summary:       Glassfish JPA Toplink Essentials
+Group:         Development/Libraries
+License:       CDDL or GPLv2 with exceptions
+URL:           http://glassfish.java.net/javaee5/persistence/
+Source0:       http://dlc.sun.com.edgesuite.net/javaee5/promoted/source/glassfish-persistence-v2-b46-src.zip
+# wget http://download.java.net/javaee5/v2.1.1_branch/promoted/source/glassfish-v2.1.1-b31g-src.zip
+# unzip glassfish-v2.1.1-b31g-src.zip
+# mkdir -p glassfish-bootstrap
+# mv glassfish/bootstrap/* glassfish-bootstrap
+# tar czf glassfish-bootstrap.tar.gz glassfish-bootstrap
+Source1:       glassfish-bootstrap.tar.gz
+# fix javadoc build
+Patch0:        glassfish-entity-persistence-build.patch
+
+Patch1:        glassfish-persistence-2.0.41-jdk7.patch
+Patch2:        glassfish-persistence-2.0.41-agent-remove-manifest-classpath.patch
+Patch3:        glassfish-persistence-2.0.41-use_system_antlr.patch
+
+BuildRequires: java-devel
+BuildRequires: jpackage-utils
+
+BuildRequires: ant
+BuildRequires: antlr-tool
+BuildRequires: geronimo-jta
+BuildRequires: geronimo-jpa
+
+Requires:      antlr-tool
+Requires:      geronimo-jpa
+Requires:      geronimo-jta
+
+Requires:      java
+Requires:      jpackage-utils
+BuildArch:     noarch
+
+%description
+Glassfish Persistence Implementation.
+
+%package javadoc
+Group:         Documentation
+Summary:       Javadoc for %{name} Implementation
+Requires:      jpackage-utils
+
+%description javadoc
+This package contains javadoc for %{name}.
+
+%prep
+%setup -q -c
+
+tar xzf %{SOURCE1}
+mv glassfish-bootstrap glassfish/bootstrap
+find . -name "*.class" -delete
+find . -name "*.jar" -delete
+
+%patch0 -b .sav0
+%patch1 -b .sav1
+%patch2 -b .sav2
+%patch3 -b .sav3
+
+sed -i -e 's/@VERSION@/%{version}/' glassfish/entity-persistence/toplink-essentials.pom
+sed -i -e 's/@VERSION@/%{version}/' glassfish/entity-persistence/toplink-essentials-agent.pom
+
+cd glassfish/bootstrap/legal
+for d in CDDLv1.0.txt LICENSE.txt COPYRIGHT 3RD-PARTY-LICENSE.txt ; do
+  iconv -f iso8859-1 -t utf-8 $d > $d.conv && mv -f $d.conv $d
+  sed -i 's/\r//' $d
+done
+
+%build
+
+pushd glassfish/entity-persistence
+  export CLASSPATH=$(build-classpath geronimo-jpa)
+  %ant -Djavaee.jar=$(build-classpath geronimo-jta) -Dglassfish.schemas.home=$PWD/../persistence-api/schemas all docs
+popd
+
+%install
+
+mkdir -p %{buildroot}%{_javadir}/glassfish
+install -m 644 publish/glassfish/lib/toplink-essentials.jar %{buildroot}%{_javadir}/%{name}.jar
+install -m 644 publish/glassfish/lib/toplink-essentials-agent.jar %{buildroot}%{_javadir}/%{name}-agent.jar
+
+mkdir -p %{buildroot}%{_mavenpomdir}
+install -pm 644 glassfish/entity-persistence/toplink-essentials.pom \
+    %{buildroot}%{_mavenpomdir}/JPP-%{name}.pom
+%add_maven_depmap JPP-%{name}.pom %{name}.jar
+install -pm 644 glassfish/entity-persistence/toplink-essentials-agent.pom \
+    %{buildroot}%{_mavenpomdir}/JPP-%{name}-agent.pom
+%add_maven_depmap JPP-%{name}-agent.pom %{name}-agent.jar
+
+mkdir -p %{buildroot}%{_javadocdir}/%{name}
+cp -pr glassfish/entity-persistence/build/javadoc/* %{buildroot}%{_javadocdir}/%{name}
+
+%files
+%{_javadir}/%{name}.jar
+%{_javadir}/%{name}-agent.jar
+%{_mavenpomdir}/JPP-%{name}.pom
+%{_mavenpomdir}/JPP-%{name}-agent.pom
+%{_mavendepmapfragdir}/%{name}
+%doc glassfish/bootstrap/legal/*
+
+%files javadoc
+%{_javadocdir}/%{name}
+%doc glassfish/bootstrap/legal/3RD-PARTY-LICENSE*.txt
+%doc glassfish/bootstrap/legal/CDDL*.txt
+%doc glassfish/bootstrap/legal/COPYRIGHT
+%doc glassfish/bootstrap/legal/LICENSE.txt
+
+%changelog
+* Thu Jun 28 2012 gil cattaneo <puntogil at libero.it> 2.0.46-3
+- moved in files in %%{_javadir}
+- fixed Url and source0 url
+
+* Thu Jun 28 2012 gil cattaneo <puntogil at libero.it> 2.0.46-2
+- change license tag
+
+* Thu May 24 2012 gil cattaneo <puntogil at libero.it> 2.0.46-1
+- update to 2.0.46
+
+* Fri Apr 06 2012 gil cattaneo <puntogil at libero.it> 2.0.41-1
+- initial rpm
\ No newline at end of file
diff --git a/sources b/sources
index e69de29..1584798 100644
--- a/sources
+++ b/sources
@@ -0,0 +1,2 @@
+ed49d8aa5c539ac0da699b4ca39f74c8  glassfish-bootstrap.tar.gz
+efd7acb74e5b6417d29801ad70e6c883  glassfish-persistence-v2-b46-src.zip


More information about the scm-commits mailing list