public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
* [gentoo-commits] gentoo commit in src/patchsets/seamonkey/2.1: 1000-config_rules_install_dist_files.patch 1001-fix_jemalloc_vs_aslr.patch 1003-update_ply_2.5_to_3.3.patch 2000-seamonkey_gentoo_install_dirs.patch 2001-gentoo_no_app_updates.patch 2020-gentoo-prefix-support.patch 5000_gconf-config-update.patch 5000_use-chrome-manifest.patch 5001_fix-prefs-directory.patch 5001_support-gcc46.patch 5002_independant-omnijar.patch 5002_libpng-1.5-support.patch 5003_fix-resources-path.patch 5004_enable-omnijar-in-xulrunner.patch
@ 2011-06-23 11:41 Lars Wendler (polynomial-c)
  0 siblings, 0 replies; only message in thread
From: Lars Wendler (polynomial-c) @ 2011-06-23 11:41 UTC (permalink / raw
  To: gentoo-commits

polynomial-c    11/06/23 11:41:40

  Added:                1000-config_rules_install_dist_files.patch
                        1001-fix_jemalloc_vs_aslr.patch
                        1003-update_ply_2.5_to_3.3.patch
                        2000-seamonkey_gentoo_install_dirs.patch
                        2001-gentoo_no_app_updates.patch
                        2020-gentoo-prefix-support.patch
                        5000_gconf-config-update.patch
                        5000_use-chrome-manifest.patch
                        5001_fix-prefs-directory.patch
                        5001_support-gcc46.patch
                        5002_independant-omnijar.patch
                        5002_libpng-1.5-support.patch
                        5003_fix-resources-path.patch
                        5004_enable-omnijar-in-xulrunner.patch
  Log:
  New patchset for seamonkey-2.1

Revision  Changes    Path
1.1                  src/patchsets/seamonkey/2.1/1000-config_rules_install_dist_files.patch

file : http://sources.gentoo.org/viewvc.cgi/gentoo/src/patchsets/seamonkey/2.1/1000-config_rules_install_dist_files.patch?rev=1.1&view=markup
plain: http://sources.gentoo.org/viewvc.cgi/gentoo/src/patchsets/seamonkey/2.1/1000-config_rules_install_dist_files.patch?rev=1.1&content-type=text/plain

Index: 1000-config_rules_install_dist_files.patch
===================================================================
This is from: 
    https://bugzilla.mozilla.org/show_bug.cgi?id=386904
    https://bugzilla.mozilla.org/attachment.cgi?id=270967


 -- Summary:

This patch implements DIST_FILES install:: target as well as
DIST_CHROME_FILES install target.

 -- Evaluation:

DIST_FILES is now used in browser/app/Makefile.in to install
application.ini; however, DEST_FILES is only implemented for
libs::, but not for install::.

In consequence, make install'ed installs of firefox now break
with missing application.ini file.

When writing this fix, it became obvious that DIST_CHROME_FILES
install: target is missing too ... though it doesn't cause any
problems because its not really used.

---
 config/rules.mk |   28 ++++++++++++++++++++++++++++
 1 file changed, 28 insertions(+)

Index: mozilla/config/rules.mk
===================================================================
--- mozilla.orig/config/rules.mk	2007-08-18 15:15:17.000000000 +0000
+++ mozilla/config/rules.mk	2007-08-18 15:15:36.000000000 +0000
@@ -1699,28 +1699,56 @@
 	@$(EXIT_ON_ERROR) \
 	for f in $(DIST_FILES); do \
 	  dest=$(FINAL_TARGET)/`basename $$f`; \
 	  $(RM) -f $$dest; \
 	  $(PYTHON) $(MOZILLA_DIR)/config/Preprocessor.py \
 	    $(XULAPP_DEFINES) $(DEFINES) $(ACDEFINES) $(XULPPFLAGS) \
 	    $(srcdir)/$$f > $$dest; \
 	done
+
+install:: $(DIST_FILES)
+ifndef XPI_NAME
+ifndef NO_INSTALL
+	@$(EXIT_ON_ERROR) \
+	for f in $(DIST_FILES); do \
+	  dest=$(DESTDIR)$(mozappdir)/`basename $$f`; \
+	  $(RM) -f $$dest; \
+	  $(PYTHON) $(MOZILLA_DIR)/config/Preprocessor.py \
+	    $(XULAPP_DEFINES) $(DEFINES) $(ACDEFINES) $(XULPPFLAGS) \
+	    $(srcdir)/$$f > $$dest; \
+	done
+endif
+endif
 endif
 
 ifneq ($(DIST_CHROME_FILES),)
 libs:: $(DIST_CHROME_FILES)
 	@$(EXIT_ON_ERROR) \
 	for f in $(DIST_CHROME_FILES); do \
 	  dest=$(FINAL_TARGET)/chrome/`basename $$f`; \
 	  $(RM) -f $$dest; \
 	  $(PYTHON) $(MOZILLA_DIR)/config/Preprocessor.py \
 	    $(XULAPP_DEFINES) $(DEFINES) $(ACDEFINES) $(XULPPFLAGS) \
 	    $(srcdir)/$$f > $$dest; \
 	done
+
+install:: $(DIST_CHROME_FILES)
+ifndef XPI_NAME
+ifndef NO_INSTALL
+	@$(EXIT_ON_ERROR) \
+	for f in $(DIST_CHROME_FILES); do \
+	  dest=$(DESTDIR)$(mozappdir)/chrome/`basename $$f`; \
+	  $(RM) -f $$dest; \
+	  $(PYTHON) $(MOZILLA_DIR)/config/Preprocessor.py \
+	    $(XULAPP_DEFINES) $(DEFINES) $(ACDEFINES) $(XULPPFLAGS) \
+	    $(srcdir)/$$f > $$dest; \
+	done
+endif
+endif
 endif
 
 ifneq ($(XPI_PKGNAME),)
 libs realchrome::
 ifdef STRIP_XPI
 ifndef MOZ_DEBUG
 	@echo "Stripping $(XPI_PKGNAME) package directory..."
 	@echo $(FINAL_TARGET)



1.1                  src/patchsets/seamonkey/2.1/1001-fix_jemalloc_vs_aslr.patch

file : http://sources.gentoo.org/viewvc.cgi/gentoo/src/patchsets/seamonkey/2.1/1001-fix_jemalloc_vs_aslr.patch?rev=1.1&view=markup
plain: http://sources.gentoo.org/viewvc.cgi/gentoo/src/patchsets/seamonkey/2.1/1001-fix_jemalloc_vs_aslr.patch?rev=1.1&content-type=text/plain

Index: 1001-fix_jemalloc_vs_aslr.patch
===================================================================
--- mozilla/memory/jemalloc/jemalloc.c
+++ mozilla/memory/jemalloc/jemalloc.c
@@ -392,7 +392,7 @@ __FBSDID("$FreeBSD: head/lib/libc/stdlib
 static const bool __isthreaded = true;
 #endif
 
-#if defined(MOZ_MEMORY_SOLARIS) && defined(MAP_ALIGN) && !defined(JEMALLOC_NEVER_USES_MAP_ALIGN)
+#if defined(MOZ_MEMORY_SOLARIS) || defined(MOZ_MEMORY_LINUX) || defined(MOZ_MEMORY_BSD)
 #define JEMALLOC_USES_MAP_ALIGN	 /* Required on Solaris 10. Might improve performance elsewhere. */
 #endif
 
@@ -2305,20 +2305,31 @@ pages_map_align(size_t size, int pfd, si
 	 * We don't use MAP_FIXED here, because it can cause the *replacement*
 	 * of existing mappings, and we only want to create new mappings.
 	 */
-#ifdef MALLOC_PAGEFILE
-	if (pfd != -1) {
-		ret = mmap((void *)alignment, size, PROT_READ | PROT_WRITE, MAP_PRIVATE |
-		    MAP_NOSYNC | MAP_ALIGN, pfd, 0);
-	} else
-#endif
-	       {
-		ret = mmap((void *)alignment, size, PROT_READ | PROT_WRITE, MAP_PRIVATE |
-		    MAP_NOSYNC | MAP_ALIGN | MAP_ANON, -1, 0);
-	}
+	ret = mmap(NULL, size + alignment, PROT_READ | PROT_WRITE, MAP_PRIVATE |
+		    MAP_NOSYNC| MAP_ANON, -1, 0);
 	assert(ret != NULL);
 
 	if (ret == MAP_FAILED)
 		ret = NULL;
+	else {
+		uintptr_t aligned_ret;
+		size_t extra_size;
+
+		aligned_ret = (uintptr_t)ret + alignment - 1;
+		aligned_ret &= ~(alignment - 1);
+		extra_size = aligned_ret - (uintptr_t)ret;
+		munmap(ret, extra_size);
+		munmap(ret + extra_size + size, alignment - extra_size);
+		ret = (void *)aligned_ret;
+#ifdef MALLOC_PAGEFILE
+		if (pfd != -1) {
+			ret = mmap(ret, size, PROT_READ | PROT_WRITE, MAP_PRIVATE |
+			    MAP_NOSYNC | MAP_FIXED, pfd, 0);
+		}
+		if (ret == MAP_FAILED)
+			ret = NULL;
+#endif
+	}
 	return (ret);
 }
 #endif




1.1                  src/patchsets/seamonkey/2.1/1003-update_ply_2.5_to_3.3.patch

file : http://sources.gentoo.org/viewvc.cgi/gentoo/src/patchsets/seamonkey/2.1/1003-update_ply_2.5_to_3.3.patch?rev=1.1&view=markup
plain: http://sources.gentoo.org/viewvc.cgi/gentoo/src/patchsets/seamonkey/2.1/1003-update_ply_2.5_to_3.3.patch?rev=1.1&content-type=text/plain

Index: 1003-update_ply_2.5_to_3.3.patch
===================================================================
# HG changeset patch
# Parent 414ff9016349c51367bc5f9c1a815ac14177109b

--- mozilla/other-licenses/ply/COPYING
+++ mozilla/other-licenses/ply/COPYING
@@ -1,504 +1,28 @@
-		  GNU LESSER GENERAL PUBLIC LICENSE
-		       Version 2.1, February 1999
+Copyright (C) 2001-2009,
+David M. Beazley (Dabeaz LLC)
+All rights reserved.
 
- Copyright (C) 1991, 1999 Free Software Foundation, Inc.
-     59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
 
-[This is the first released version of the Lesser GPL.  It also counts
- as the successor of the GNU Library Public License, version 2, hence
- the version number 2.1.]
+* Redistributions of source code must retain the above copyright notice,
+  this list of conditions and the following disclaimer.  
+* Redistributions in binary form must reproduce the above copyright notice, 
+  this list of conditions and the following disclaimer in the documentation
+  and/or other materials provided with the distribution.  
+* Neither the name of the David Beazley or Dabeaz LLC may be used to
+  endorse or promote products derived from this software without
+  specific prior written permission. 
 
-			    Preamble
-
-  The licenses for most software are designed to take away your
-freedom to share and change it.  By contrast, the GNU General Public
-Licenses are intended to guarantee your freedom to share and change
-free software--to make sure the software is free for all its users.
-
-  This license, the Lesser General Public License, applies to some
-specially designated software packages--typically libraries--of the
-Free Software Foundation and other authors who decide to use it.  You
-can use it too, but we suggest you first think carefully about whether
-this license or the ordinary General Public License is the better
-strategy to use in any particular case, based on the explanations below.
-
-  When we speak of free software, we are referring to freedom of use,
-not price.  Our General Public Licenses are designed to make sure that
-you have the freedom to distribute copies of free software (and charge
-for this service if you wish); that you receive source code or can get
-it if you want it; that you can change the software and use pieces of
-it in new free programs; and that you are informed that you can do
-these things.
-
-  To protect your rights, we need to make restrictions that forbid
-distributors to deny you these rights or to ask you to surrender these
-rights.  These restrictions translate to certain responsibilities for
-you if you distribute copies of the library or if you modify it.
-
-  For example, if you distribute copies of the library, whether gratis
-or for a fee, you must give the recipients all the rights that we gave
-you.  You must make sure that they, too, receive or can get the source
-code.  If you link other code with the library, you must provide
-complete object files to the recipients, so that they can relink them
-with the library after making changes to the library and recompiling
-it.  And you must show them these terms so they know their rights.
-
-  We protect your rights with a two-step method: (1) we copyright the
-library, and (2) we offer you this license, which gives you legal
-permission to copy, distribute and/or modify the library.
-
-  To protect each distributor, we want to make it very clear that
-there is no warranty for the free library.  Also, if the library is
-modified by someone else and passed on, the recipients should know
-that what they have is not the original version, so that the original
-author's reputation will not be affected by problems that might be
-introduced by others.
-\f
-  Finally, software patents pose a constant threat to the existence of
-any free program.  We wish to make sure that a company cannot
-effectively restrict the users of a free program by obtaining a
-restrictive license from a patent holder.  Therefore, we insist that
-any patent license obtained for a version of the library must be
-consistent with the full freedom of use specified in this license.
-
-  Most GNU software, including some libraries, is covered by the
-ordinary GNU General Public License.  This license, the GNU Lesser
-General Public License, applies to certain designated libraries, and
-is quite different from the ordinary General Public License.  We use
-this license for certain libraries in order to permit linking those
-libraries into non-free programs.
-
-  When a program is linked with a library, whether statically or using
-a shared library, the combination of the two is legally speaking a
-combined work, a derivative of the original library.  The ordinary
-General Public License therefore permits such linking only if the
-entire combination fits its criteria of freedom.  The Lesser General
-Public License permits more lax criteria for linking other code with
-the library.
-
-  We call this license the "Lesser" General Public License because it
-does Less to protect the user's freedom than the ordinary General
-Public License.  It also provides other free software developers Less
-of an advantage over competing non-free programs.  These disadvantages
-are the reason we use the ordinary General Public License for many
-libraries.  However, the Lesser license provides advantages in certain
-special circumstances.
-
-  For example, on rare occasions, there may be a special need to
-encourage the widest possible use of a certain library, so that it becomes
-a de-facto standard.  To achieve this, non-free programs must be
-allowed to use the library.  A more frequent case is that a free
-library does the same job as widely used non-free libraries.  In this
-case, there is little to gain by limiting the free library to free
-software only, so we use the Lesser General Public License.
-
-  In other cases, permission to use a particular library in non-free
-programs enables a greater number of people to use a large body of
-free software.  For example, permission to use the GNU C Library in
-non-free programs enables many more people to use the whole GNU
-operating system, as well as its variant, the GNU/Linux operating
-system.
-
-  Although the Lesser General Public License is Less protective of the
-users' freedom, it does ensure that the user of a program that is
-linked with the Library has the freedom and the wherewithal to run
-that program using a modified version of the Library.
-
-  The precise terms and conditions for copying, distribution and
-modification follow.  Pay close attention to the difference between a
-"work based on the library" and a "work that uses the library".  The
-former contains code derived from the library, whereas the latter must
-be combined with the library in order to run.
-\f
-		  GNU LESSER GENERAL PUBLIC LICENSE
-   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
-
-  0. This License Agreement applies to any software library or other
-program which contains a notice placed by the copyright holder or
-other authorized party saying it may be distributed under the terms of
-this Lesser General Public License (also called "this License").
-Each licensee is addressed as "you".
-
-  A "library" means a collection of software functions and/or data
-prepared so as to be conveniently linked with application programs
-(which use some of those functions and data) to form executables.
-
-  The "Library", below, refers to any such software library or work
-which has been distributed under these terms.  A "work based on the
-Library" means either the Library or any derivative work under
-copyright law: that is to say, a work containing the Library or a
-portion of it, either verbatim or with modifications and/or translated
-straightforwardly into another language.  (Hereinafter, translation is
-included without limitation in the term "modification".)
-
-  "Source code" for a work means the preferred form of the work for
-making modifications to it.  For a library, complete source code means
-all the source code for all modules it contains, plus any associated
-interface definition files, plus the scripts used to control compilation
-and installation of the library.
-
-  Activities other than copying, distribution and modification are not
-covered by this License; they are outside its scope.  The act of
-running a program using the Library is not restricted, and output from
-such a program is covered only if its contents constitute a work based
-on the Library (independent of the use of the Library in a tool for
-writing it).  Whether that is true depends on what the Library does
-and what the program that uses the Library does.
-  
-  1. You may copy and distribute verbatim copies of the Library's
-complete source code as you receive it, in any medium, provided that
-you conspicuously and appropriately publish on each copy an
-appropriate copyright notice and disclaimer of warranty; keep intact
-all the notices that refer to this License and to the absence of any
-warranty; and distribute a copy of this License along with the
-Library.
-
-  You may charge a fee for the physical act of transferring a copy,
-and you may at your option offer warranty protection in exchange for a
-fee.
-\f
-  2. You may modify your copy or copies of the Library or any portion
-of it, thus forming a work based on the Library, and copy and
-distribute such modifications or work under the terms of Section 1
-above, provided that you also meet all of these conditions:
-
-    a) The modified work must itself be a software library.
-
-    b) You must cause the files modified to carry prominent notices
-    stating that you changed the files and the date of any change.
-
-    c) You must cause the whole of the work to be licensed at no
-    charge to all third parties under the terms of this License.
-
-    d) If a facility in the modified Library refers to a function or a
-    table of data to be supplied by an application program that uses
-    the facility, other than as an argument passed when the facility
-    is invoked, then you must make a good faith effort to ensure that,
-    in the event an application does not supply such function or
-    table, the facility still operates, and performs whatever part of
-    its purpose remains meaningful.
-
-    (For example, a function in a library to compute square roots has
-    a purpose that is entirely well-defined independent of the
-    application.  Therefore, Subsection 2d requires that any
-    application-supplied function or table used by this function must
-    be optional: if the application does not supply it, the square
-    root function must still compute square roots.)
-
-These requirements apply to the modified work as a whole.  If
-identifiable sections of that work are not derived from the Library,
-and can be reasonably considered independent and separate works in
-themselves, then this License, and its terms, do not apply to those
-sections when you distribute them as separate works.  But when you
-distribute the same sections as part of a whole which is a work based
-on the Library, the distribution of the whole must be on the terms of
-this License, whose permissions for other licensees extend to the
-entire whole, and thus to each and every part regardless of who wrote
-it.
-
-Thus, it is not the intent of this section to claim rights or contest
-your rights to work written entirely by you; rather, the intent is to
-exercise the right to control the distribution of derivative or
-collective works based on the Library.
-
-In addition, mere aggregation of another work not based on the Library
-with the Library (or with a work based on the Library) on a volume of
-a storage or distribution medium does not bring the other work under
-the scope of this License.
-
-  3. You may opt to apply the terms of the ordinary GNU General Public
-License instead of this License to a given copy of the Library.  To do
-this, you must alter all the notices that refer to this License, so
-that they refer to the ordinary GNU General Public License, version 2,
-instead of to this License.  (If a newer version than version 2 of the
-ordinary GNU General Public License has appeared, then you can specify
-that version instead if you wish.)  Do not make any other change in
-these notices.
-\f
-  Once this change is made in a given copy, it is irreversible for
-that copy, so the ordinary GNU General Public License applies to all
-subsequent copies and derivative works made from that copy.
-
-  This option is useful when you wish to copy part of the code of
-the Library into a program that is not a library.
-
-  4. You may copy and distribute the Library (or a portion or
-derivative of it, under Section 2) in object code or executable form
-under the terms of Sections 1 and 2 above provided that you accompany
-it with the complete corresponding machine-readable source code, which
-must be distributed under the terms of Sections 1 and 2 above on a
-medium customarily used for software interchange.
-
-  If distribution of object code is made by offering access to copy
-from a designated place, then offering equivalent access to copy the
-source code from the same place satisfies the requirement to
-distribute the source code, even though third parties are not
-compelled to copy the source along with the object code.
-
-  5. A program that contains no derivative of any portion of the
-Library, but is designed to work with the Library by being compiled or
-linked with it, is called a "work that uses the Library".  Such a
-work, in isolation, is not a derivative work of the Library, and
-therefore falls outside the scope of this License.
-
-  However, linking a "work that uses the Library" with the Library
-creates an executable that is a derivative of the Library (because it
-contains portions of the Library), rather than a "work that uses the
-library".  The executable is therefore covered by this License.
-Section 6 states terms for distribution of such executables.
-
-  When a "work that uses the Library" uses material from a header file
-that is part of the Library, the object code for the work may be a
-derivative work of the Library even though the source code is not.
-Whether this is true is especially significant if the work can be
-linked without the Library, or if the work is itself a library.  The
-threshold for this to be true is not precisely defined by law.
-
-  If such an object file uses only numerical parameters, data
-structure layouts and accessors, and small macros and small inline
-functions (ten lines or less in length), then the use of the object
-file is unrestricted, regardless of whether it is legally a derivative
-work.  (Executables containing this object code plus portions of the
-Library will still fall under Section 6.)
-
-  Otherwise, if the work is a derivative of the Library, you may
-distribute the object code for the work under the terms of Section 6.
-Any executables containing that work also fall under Section 6,
-whether or not they are linked directly with the Library itself.
-\f
-  6. As an exception to the Sections above, you may also combine or
-link a "work that uses the Library" with the Library to produce a
-work containing portions of the Library, and distribute that work
-under terms of your choice, provided that the terms permit
-modification of the work for the customer's own use and reverse
-engineering for debugging such modifications.
-
-  You must give prominent notice with each copy of the work that the
-Library is used in it and that the Library and its use are covered by
-this License.  You must supply a copy of this License.  If the work
-during execution displays copyright notices, you must include the
-copyright notice for the Library among them, as well as a reference
-directing the user to the copy of this License.  Also, you must do one
-of these things:
-
-    a) Accompany the work with the complete corresponding
-    machine-readable source code for the Library including whatever
-    changes were used in the work (which must be distributed under
-    Sections 1 and 2 above); and, if the work is an executable linked
-    with the Library, with the complete machine-readable "work that
-    uses the Library", as object code and/or source code, so that the
-    user can modify the Library and then relink to produce a modified
-    executable containing the modified Library.  (It is understood
-    that the user who changes the contents of definitions files in the
-    Library will not necessarily be able to recompile the application
-    to use the modified definitions.)
-
-    b) Use a suitable shared library mechanism for linking with the
-    Library.  A suitable mechanism is one that (1) uses at run time a
-    copy of the library already present on the user's computer system,
-    rather than copying library functions into the executable, and (2)
-    will operate properly with a modified version of the library, if
-    the user installs one, as long as the modified version is
-    interface-compatible with the version that the work was made with.
-
-    c) Accompany the work with a written offer, valid for at
-    least three years, to give the same user the materials
-    specified in Subsection 6a, above, for a charge no more
-    than the cost of performing this distribution.
-
-    d) If distribution of the work is made by offering access to copy
-    from a designated place, offer equivalent access to copy the above
-    specified materials from the same place.
-
-    e) Verify that the user has already received a copy of these
-    materials or that you have already sent this user a copy.
-
-  For an executable, the required form of the "work that uses the
-Library" must include any data and utility programs needed for
-reproducing the executable from it.  However, as a special exception,
-the materials to be distributed need not include anything that is
-normally distributed (in either source or binary form) with the major
-components (compiler, kernel, and so on) of the operating system on
-which the executable runs, unless that component itself accompanies
-the executable.
-
-  It may happen that this requirement contradicts the license
-restrictions of other proprietary libraries that do not normally
-accompany the operating system.  Such a contradiction means you cannot
-use both them and the Library together in an executable that you
-distribute.
-\f
-  7. You may place library facilities that are a work based on the
-Library side-by-side in a single library together with other library
-facilities not covered by this License, and distribute such a combined
-library, provided that the separate distribution of the work based on
-the Library and of the other library facilities is otherwise
-permitted, and provided that you do these two things:
-
-    a) Accompany the combined library with a copy of the same work
-    based on the Library, uncombined with any other library
-    facilities.  This must be distributed under the terms of the
-    Sections above.
-
-    b) Give prominent notice with the combined library of the fact
-    that part of it is a work based on the Library, and explaining
-    where to find the accompanying uncombined form of the same work.
-
-  8. You may not copy, modify, sublicense, link with, or distribute
-the Library except as expressly provided under this License.  Any
-attempt otherwise to copy, modify, sublicense, link with, or
-distribute the Library is void, and will automatically terminate your
-rights under this License.  However, parties who have received copies,
-or rights, from you under this License will not have their licenses
-terminated so long as such parties remain in full compliance.
-
-  9. You are not required to accept this License, since you have not
-signed it.  However, nothing else grants you permission to modify or
-distribute the Library or its derivative works.  These actions are
-prohibited by law if you do not accept this License.  Therefore, by
-modifying or distributing the Library (or any work based on the
-Library), you indicate your acceptance of this License to do so, and
-all its terms and conditions for copying, distributing or modifying
-the Library or works based on it.
-
-  10. Each time you redistribute the Library (or any work based on the
-Library), the recipient automatically receives a license from the
-original licensor to copy, distribute, link with or modify the Library
-subject to these terms and conditions.  You may not impose any further
-restrictions on the recipients' exercise of the rights granted herein.
-You are not responsible for enforcing compliance by third parties with
-this License.
-\f
-  11. If, as a consequence of a court judgment or allegation of patent
-infringement or for any other reason (not limited to patent issues),
-conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License.  If you cannot
-distribute so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you
-may not distribute the Library at all.  For example, if a patent
-license would not permit royalty-free redistribution of the Library by
-all those who receive copies directly or indirectly through you, then
-the only way you could satisfy both it and this License would be to
-refrain entirely from distribution of the Library.
-
-If any portion of this section is held invalid or unenforceable under any
-particular circumstance, the balance of the section is intended to apply,
-and the section as a whole is intended to apply in other circumstances.
-
-It is not the purpose of this section to induce you to infringe any
-patents or other property right claims or to contest validity of any
-such claims; this section has the sole purpose of protecting the
-integrity of the free software distribution system which is
-implemented by public license practices.  Many people have made
-generous contributions to the wide range of software distributed
-through that system in reliance on consistent application of that
-system; it is up to the author/donor to decide if he or she is willing
-to distribute software through any other system and a licensee cannot
-impose that choice.
-
-This section is intended to make thoroughly clear what is believed to
-be a consequence of the rest of this License.
-
-  12. If the distribution and/or use of the Library is restricted in
-certain countries either by patents or by copyrighted interfaces, the
-original copyright holder who places the Library under this License may add
-an explicit geographical distribution limitation excluding those countries,
-so that distribution is permitted only in or among countries not thus
-excluded.  In such case, this License incorporates the limitation as if
-written in the body of this License.
-
-  13. The Free Software Foundation may publish revised and/or new
-versions of the Lesser General Public License from time to time.
-Such new versions will be similar in spirit to the present version,
-but may differ in detail to address new problems or concerns.
-
-Each version is given a distinguishing version number.  If the Library
-specifies a version number of this License which applies to it and
-"any later version", you have the option of following the terms and
-conditions either of that version or of any later version published by
-the Free Software Foundation.  If the Library does not specify a
-license version number, you may choose any version ever published by
-the Free Software Foundation.
-\f
-  14. If you wish to incorporate parts of the Library into other free
-programs whose distribution conditions are incompatible with these,
-write to the author to ask for permission.  For software which is
-copyrighted by the Free Software Foundation, write to the Free
-Software Foundation; we sometimes make exceptions for this.  Our
-decision will be guided by the two goals of preserving the free status
-of all derivatives of our free software and of promoting the sharing
-and reuse of software generally.
-
-			    NO WARRANTY
-
-  15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
-WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
-EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
-OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
-KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
-LIBRARY IS WITH YOU.  SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
-THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
-  16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
-WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
-AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
-FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
-CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
-LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
-RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
-FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
-SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
-DAMAGES.
-
-		     END OF TERMS AND CONDITIONS
-\f
-           How to Apply These Terms to Your New Libraries
-
-  If you develop a new library, and you want it to be of the greatest
-possible use to the public, we recommend making it free software that
-everyone can redistribute and change.  You can do so by permitting
-redistribution under these terms (or, alternatively, under the terms of the
-ordinary General Public License).
-
-  To apply these terms, attach the following notices to the library.  It is
-safest to attach them to the start of each source file to most effectively
-convey the exclusion of warranty; and each file should have at least the
-"copyright" line and a pointer to where the full notice is found.
-
-    <one line to give the library's name and a brief idea of what it does.>
-    Copyright (C) <year>  <name of author>
-
-    This library is free software; you can redistribute it and/or
-    modify it under the terms of the GNU Lesser General Public
-    License as published by the Free Software Foundation; either
-    version 2.1 of the License, or (at your option) any later version.
-
-    This library is distributed in the hope that it will be useful,
-    but WITHOUT ANY WARRANTY; without even the implied warranty of
-    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-    Lesser General Public License for more details.
-
-    You should have received a copy of the GNU Lesser General Public
-    License along with this library; if not, write to the Free Software
-    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
-
-Also add information on how to contact you by electronic and paper mail.
-
-You should also get your employer (if you work as a programmer) or your
-school, if any, to sign a "copyright disclaimer" for the library, if
-necessary.  Here is a sample; alter the names:
-
-  Yoyodyne, Inc., hereby disclaims all copyright interest in the
-  library `Frob' (a library for tweaking knobs) written by James Random Hacker.
-
-  <signature of Ty Coon>, 1 April 1990
-  Ty Coon, President of Vice
-
-That's all there is to it!
-
-
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
--- mozilla/other-licenses/ply/README
+++ mozilla/other-licenses/ply/README
@@ -1,9 +1,9 @@
 David Beazley's PLY (Python Lex-Yacc)
 http://www.dabeaz.com/ply/
 
-Licensed under the GPL (v2.1 or later).
+Licensed under BSD.
 
-This directory contains just the code and license from PLY version 2.5;
+This directory contains just the code and license from PLY version 3.3;
 the full distribution (see the URL) also contains examples, tests,
 documentation, and a longer README.
 
--- mozilla/other-licenses/ply/ply/lex.py
+++ mozilla/other-licenses/ply/ply/lex.py
@@ -1,88 +1,119 @@
 # -----------------------------------------------------------------------------
 # ply: lex.py
 #
-# Author: David M. Beazley (dave@dabeaz.com)
+# Copyright (C) 2001-2009,
+# David M. Beazley (Dabeaz LLC)
+# All rights reserved.
 #
-# Copyright (C) 2001-2008, David M. Beazley
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+# 
+# * Redistributions of source code must retain the above copyright notice,
+#   this list of conditions and the following disclaimer.  
+# * Redistributions in binary form must reproduce the above copyright notice, 
+#   this list of conditions and the following disclaimer in the documentation
+#   and/or other materials provided with the distribution.  
+# * Neither the name of the David Beazley or Dabeaz LLC may be used to
+#   endorse or promote products derived from this software without
+#  specific prior written permission. 
 #
-# This library is free software; you can redistribute it and/or
-# modify it under the terms of the GNU Lesser General Public
-# License as published by the Free Software Foundation; either
-# version 2.1 of the License, or (at your option) any later version.
-#
-# This library is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-# Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public
-# License along with this library; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
-#
-# See the file COPYING for a complete copy of the LGPL.
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 # -----------------------------------------------------------------------------
 
-__version__    = "2.5"
-__tabversion__ = "2.4"       # Version of table file used
+__version__    = "3.3"
+__tabversion__ = "3.2"       # Version of table file used
 
 import re, sys, types, copy, os
 
+# This tuple contains known string types
+try:
+    # Python 2.6
+    StringTypes = (types.StringType, types.UnicodeType)
+except AttributeError:
+    # Python 3.0
+    StringTypes = (str, bytes)
+
+# Extract the code attribute of a function. Different implementations
+# are for Python 2/3 compatibility.
+
+if sys.version_info[0] < 3:
+    def func_code(f):
+        return f.func_code
+else:
+    def func_code(f):
+        return f.__code__
+
 # This regular expression is used to match valid token names
 _is_identifier = re.compile(r'^[a-zA-Z0-9_]+$')
 
-# _INSTANCETYPE sets the valid set of instance types recognized
-# by PLY when lexers are defined by a class. In order to maintain
-# backwards compatibility with Python-2.0, we have to check for
-# the existence of ObjectType.
-
-try:
-    _INSTANCETYPE = (types.InstanceType, types.ObjectType)
-except AttributeError:
-    _INSTANCETYPE = types.InstanceType
-    class object: pass       # Note: needed if no new-style classes present
-
 # Exception thrown when invalid token encountered and no default error
 # handler is defined.
 
 class LexError(Exception):
     def __init__(self,message,s):
          self.args = (message,)
          self.text = s
 
-# An object used to issue one-time warning messages for various features
-
-class LexWarning(object):
-   def __init__(self):
-      self.warned = 0
-   def __call__(self,msg):
-      if not self.warned:
-         sys.stderr.write("ply.lex: Warning: " + msg+"\n")
-         self.warned = 1
-
-_SkipWarning = LexWarning()         # Warning for use of t.skip() on tokens
-
 # Token class.  This class is used to represent the tokens produced.
 class LexToken(object):
     def __str__(self):
         return "LexToken(%s,%r,%d,%d)" % (self.type,self.value,self.lineno,self.lexpos)
     def __repr__(self):
         return str(self)
-    def skip(self,n):
-        self.lexer.skip(n)
-        _SkipWarning("Calling t.skip() on a token is deprecated.  Please use t.lexer.skip()")
+
+# This object is a stand-in for a logging object created by the 
+# logging module.  
+
+class PlyLogger(object):
+    def __init__(self,f):
+        self.f = f
+    def critical(self,msg,*args,**kwargs):
+        self.f.write((msg % args) + "\n")
+
+    def warning(self,msg,*args,**kwargs):
+        self.f.write("WARNING: "+ (msg % args) + "\n")
+
+    def error(self,msg,*args,**kwargs):
+        self.f.write("ERROR: " + (msg % args) + "\n")
+
+    info = critical
+    debug = critical
+
+# Null logger is used when no output is generated. Does nothing.
+class NullLogger(object):
+    def __getattribute__(self,name):
+        return self
+    def __call__(self,*args,**kwargs):
+        return self
 
 # -----------------------------------------------------------------------------
-# Lexer class
+#                        === Lexing Engine ===
 #
-# This class encapsulates all of the methods and data associated with a lexer.
+# The following Lexer class implements the lexer runtime.   There are only
+# a few public methods and attributes:
 #
 #    input()          -  Store a new string in the lexer
 #    token()          -  Get the next token
+#    clone()          -  Clone the lexer
+#
+#    lineno           -  Current line number
+#    lexpos           -  Current position in the input string
 # -----------------------------------------------------------------------------
 
 class Lexer:
     def __init__(self):
         self.lexre = None             # Master regular expression. This is a list of
                                       # tuples (re,findex) where re is a compiled
                                       # regular expression and findex is a list
                                       # mapping regex group numbers to rules
@@ -100,17 +131,16 @@ class Lexer:
         self.lexpos = 0               # Current position in input text
         self.lexlen = 0               # Length of the input text
         self.lexerrorf = None         # Error rule (if any)
         self.lextokens = None         # List of valid tokens
         self.lexignore = ""           # Ignored characters
         self.lexliterals = ""         # Literal characters that can be passed through
         self.lexmodule = None         # Module
         self.lineno = 1               # Current line number
-        self.lexdebug = 0             # Debugging mode
         self.lexoptimize = 0          # Optimized mode
 
     def clone(self,object=None):
         c = copy.copy(self)
 
         # If the object parameter has been supplied, it means we are attaching the
         # lexer to a new object.  In this case, we have to rebind all methods in
         # the lexstatere and lexstateerrorf tables.
@@ -140,16 +170,17 @@ class Lexer:
     # ------------------------------------------------------------
     def writetab(self,tabfile,outputdir=""):
         if isinstance(tabfile,types.ModuleType):
             return
         basetabfilename = tabfile.split(".")[-1]
         filename = os.path.join(outputdir,basetabfilename)+".py"
         tf = open(filename,"w")
         tf.write("# %s.py. This file automatically created by PLY (version %s). Don't edit!\n" % (tabfile,__version__))
+        tf.write("_tabversion   = %s\n" % repr(__version__))
         tf.write("_lextokens    = %s\n" % repr(self.lextokens))
         tf.write("_lexreflags   = %s\n" % repr(self.lexreflags))
         tf.write("_lexliterals  = %s\n" % repr(self.lexliterals))
         tf.write("_lexstateinfo = %s\n" % repr(self.lexstateinfo))
 
         tabre = { }
         # Collect all functions in the initial state
         initial = self.lexstatere["INITIAL"]
@@ -179,55 +210,64 @@ class Lexer:
 
     # ------------------------------------------------------------
     # readtab() - Read lexer information from a tab file
     # ------------------------------------------------------------
     def readtab(self,tabfile,fdict):
         if isinstance(tabfile,types.ModuleType):
             lextab = tabfile
         else:
-            exec "import %s as lextab" % tabfile
+            if sys.version_info[0] < 3:
+                exec("import %s as lextab" % tabfile)
+            else:
+                env = { }
+                exec("import %s as lextab" % tabfile, env,env)
+                lextab = env['lextab']
+
+        if getattr(lextab,"_tabversion","0.0") != __version__:
+            raise ImportError("Inconsistent PLY version")
+
         self.lextokens      = lextab._lextokens
         self.lexreflags     = lextab._lexreflags
         self.lexliterals    = lextab._lexliterals
         self.lexstateinfo   = lextab._lexstateinfo
         self.lexstateignore = lextab._lexstateignore
         self.lexstatere     = { }
         self.lexstateretext = { }
         for key,lre in lextab._lexstatere.items():
              titem = []
              txtitem = []
              for i in range(len(lre)):
-                  titem.append((re.compile(lre[i][0],lextab._lexreflags),_names_to_funcs(lre[i][1],fdict)))
+                  titem.append((re.compile(lre[i][0],lextab._lexreflags | re.VERBOSE),_names_to_funcs(lre[i][1],fdict)))
                   txtitem.append(lre[i][0])
              self.lexstatere[key] = titem
              self.lexstateretext[key] = txtitem
         self.lexstateerrorf = { }
         for key,ef in lextab._lexstateerrorf.items():
              self.lexstateerrorf[key] = fdict[ef]
         self.begin('INITIAL')
 
     # ------------------------------------------------------------
     # input() - Push a new string into the lexer
     # ------------------------------------------------------------
     def input(self,s):
         # Pull off the first character to see if s looks like a string
         c = s[:1]
-        if not (isinstance(c,types.StringType) or isinstance(c,types.UnicodeType)):
-            raise ValueError, "Expected a string"
+        if not isinstance(c,StringTypes):
+            raise ValueError("Expected a string")
         self.lexdata = s
         self.lexpos = 0
         self.lexlen = len(s)
 
     # ------------------------------------------------------------
     # begin() - Changes the lexing state
     # ------------------------------------------------------------
     def begin(self,state):
-        if not self.lexstatere.has_key(state):
-            raise ValueError, "Undefined state"
+        if not state in self.lexstatere:
+            raise ValueError("Undefined state")
         self.lexre = self.lexstatere[state]
         self.lexretext = self.lexstateretext[state]
         self.lexignore = self.lexstateignore.get(state,"")
         self.lexerrorf = self.lexstateerrorf.get(state,None)
         self.lexstate = state
 
     # ------------------------------------------------------------
     # push_state() - Changes the lexing state and saves old on stack
@@ -250,17 +290,17 @@ class Lexer:
 
     # ------------------------------------------------------------
     # skip() - Skip ahead n characters
     # ------------------------------------------------------------
     def skip(self,n):
         self.lexpos += n
 
     # ------------------------------------------------------------
-    # token() - Return the next token from the Lexer
+    # opttoken() - Return the next token from the Lexer
     #
     # Note: This function has been carefully implemented to be as fast
     # as possible.  Don't make changes unless you really know what
     # you are doing
     # ------------------------------------------------------------
     def token(self):
         # Make local copies of frequently referenced attributes
         lexpos    = self.lexpos
@@ -294,39 +334,35 @@ class Lexer:
                       self.lexpos = m.end()
                       return tok
                    else:
                       lexpos = m.end()
                       break
 
                 lexpos = m.end()
 
-                # if func not callable, it means it's an ignored token
-                if not callable(func):
-                   break
-
                 # If token is processed by a function, call it
 
                 tok.lexer = self      # Set additional attributes useful in token rules
                 self.lexmatch = m
                 self.lexpos = lexpos
 
                 newtok = func(tok)
 
                 # Every function must return a token, if nothing, we just move to next token
                 if not newtok:
                     lexpos    = self.lexpos         # This is here in case user has updated lexpos.
                     lexignore = self.lexignore      # This is here in case there was a state change
                     break
 
                 # Verify type of the token.  If not in the token map, raise an error
                 if not self.lexoptimize:
-                    if not self.lextokens.has_key(newtok.type):
-                        raise LexError, ("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
-                            func.func_code.co_filename, func.func_code.co_firstlineno,
+                    if not newtok.type in self.lextokens:
+                        raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
+                            func_code(func).co_filename, func_code(func).co_firstlineno,
                             func.__name__, newtok.type),lexdata[lexpos:])
 
                 return newtok
             else:
                 # No match, see if in literals
                 if lexdata[lexpos] in self.lexliterals:
                     tok = LexToken()
                     tok.value = lexdata[lexpos]
@@ -343,70 +379,70 @@ class Lexer:
                     tok.lineno = self.lineno
                     tok.type = "error"
                     tok.lexer = self
                     tok.lexpos = lexpos
                     self.lexpos = lexpos
                     newtok = self.lexerrorf(tok)
                     if lexpos == self.lexpos:
                         # Error method didn't change text position at all. This is an error.
-                        raise LexError, ("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
+                        raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
                     lexpos = self.lexpos
                     if not newtok: continue
                     return newtok
 
                 self.lexpos = lexpos
-                raise LexError, ("Illegal character '%s' at index %d" % (lexdata[lexpos],lexpos), lexdata[lexpos:])
+                raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos],lexpos), lexdata[lexpos:])
 
         self.lexpos = lexpos + 1
         if self.lexdata is None:
-             raise RuntimeError, "No input string given with input()"
+             raise RuntimeError("No input string given with input()")
         return None
 
+    # Iterator interface
+    def __iter__(self):
+        return self
+
+    def next(self):
+        t = self.token()
+        if t is None:
+            raise StopIteration
+        return t
+
+    __next__ = next
+
 # -----------------------------------------------------------------------------
-# _validate_file()
+#                           ==== Lex Builder ===
 #
-# This checks to see if there are duplicated t_rulename() functions or strings
-# in the parser input file.  This is done using a simple regular expression
-# match on each line in the given file.  If the file can't be located or opened,
-# a true result is returned by default.
+# The functions and classes below are used to collect lexing information
+# and build a Lexer object from it.
 # -----------------------------------------------------------------------------
 
-def _validate_file(filename):
-    import os.path
-    base,ext = os.path.splitext(filename)
-    if ext != '.py': return 1        # No idea what the file is. Return OK
+# -----------------------------------------------------------------------------
+# get_caller_module_dict()
+#
+# This function returns a dictionary containing all of the symbols defined within
+# a caller further down the call stack.  This is used to get the environment
+# associated with the yacc() call if none was provided.
+# -----------------------------------------------------------------------------
 
+def get_caller_module_dict(levels):
     try:
-        f = open(filename)
-        lines = f.readlines()
-        f.close()
-    except IOError:
-        return 1                     # Couldn't find the file.  Don't worry about it
+        raise RuntimeError
+    except RuntimeError:
+        e,b,t = sys.exc_info()
+        f = t.tb_frame
+        while levels > 0:
+            f = f.f_back                   
+            levels -= 1
+        ldict = f.f_globals.copy()
+        if f.f_globals != f.f_locals:
+            ldict.update(f.f_locals)
 
-    fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(')
-    sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=')
-
-    counthash = { }
-    linen = 1
-    noerror = 1
-    for l in lines:
-        m = fre.match(l)
-        if not m:
-            m = sre.match(l)
-        if m:
-            name = m.group(1)
-            prev = counthash.get(name)
-            if not prev:
-                counthash[name] = linen
-            else:
-                print >>sys.stderr, "%s:%d: Rule %s redefined. Previously defined on line %d" % (filename,linen,name,prev)
-                noerror = 0
-        linen += 1
-    return noerror
+        return ldict
 
 # -----------------------------------------------------------------------------
 # _funcs_to_names()
 #
 # Given a list of regular expression functions, this converts it to a list
 # suitable for output to a table file
 # -----------------------------------------------------------------------------
 
@@ -461,17 +497,17 @@ def _form_master_re(relist,reflags,ldict
             elif handle is not None:
                 lexindexnames[i] = f
                 if f.find("ignore_") > 0:
                     lexindexfunc[i] = (None,None)
                 else:
                     lexindexfunc[i] = (None, toknames[f])
         
         return [(lexre,lexindexfunc)],[regex],[lexindexnames]
-    except Exception,e:
+    except Exception:
         m = int(len(relist)/2)
         if m == 0: m = 1
         llist, lre, lnames = _form_master_re(relist[:m],reflags,ldict,toknames)
         rlist, rre, rnames = _form_master_re(relist[m:],reflags,ldict,toknames)
         return llist+rlist, lre+rre, lnames+rnames
 
 # -----------------------------------------------------------------------------
 # def _statetoken(s,names)
@@ -481,360 +517,487 @@ def _form_master_re(relist,reflags,ldict
 # is a tuple of state names and tokenname is the name of the token.  For example,
 # calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM')
 # -----------------------------------------------------------------------------
 
 def _statetoken(s,names):
     nonstate = 1
     parts = s.split("_")
     for i in range(1,len(parts)):
-         if not names.has_key(parts[i]) and parts[i] != 'ANY': break
+         if not parts[i] in names and parts[i] != 'ANY': break
     if i > 1:
        states = tuple(parts[1:i])
     else:
        states = ('INITIAL',)
 
     if 'ANY' in states:
-       states = tuple(names.keys())
+       states = tuple(names)
 
     tokenname = "_".join(parts[i:])
     return (states,tokenname)
 
+
+# -----------------------------------------------------------------------------
+# LexerReflect()
+#
+# This class represents information needed to build a lexer as extracted from a
+# user's input file.
+# -----------------------------------------------------------------------------
+class LexerReflect(object):
+    def __init__(self,ldict,log=None,reflags=0):
+        self.ldict      = ldict
+        self.error_func = None
+        self.tokens     = []
+        self.reflags    = reflags
+        self.stateinfo  = { 'INITIAL' : 'inclusive'}
+        self.files      = {}
+        self.error      = 0
+
+        if log is None:
+            self.log = PlyLogger(sys.stderr)
+        else:
+            self.log = log
+
+    # Get all of the basic information
+    def get_all(self):
+        self.get_tokens()
+        self.get_literals()
+        self.get_states()
+        self.get_rules()
+        
+    # Validate all of the information
+    def validate_all(self):
+        self.validate_tokens()
+        self.validate_literals()
+        self.validate_rules()
+        return self.error
+
+    # Get the tokens map
+    def get_tokens(self):
+        tokens = self.ldict.get("tokens",None)
+        if not tokens:
+            self.log.error("No token list is defined")
+            self.error = 1
+            return
+
+        if not isinstance(tokens,(list, tuple)):
+            self.log.error("tokens must be a list or tuple")
+            self.error = 1
+            return
+        
+        if not tokens:
+            self.log.error("tokens is empty")
+            self.error = 1
+            return
+
+        self.tokens = tokens
+
+    # Validate the tokens
+    def validate_tokens(self):
+        terminals = {}
+        for n in self.tokens:
+            if not _is_identifier.match(n):
+                self.log.error("Bad token name '%s'",n)
+                self.error = 1
+            if n in terminals:
+                self.log.warning("Token '%s' multiply defined", n)
+            terminals[n] = 1
+
+    # Get the literals specifier
+    def get_literals(self):
+        self.literals = self.ldict.get("literals","")
+
+    # Validate literals
+    def validate_literals(self):
+        try:
+            for c in self.literals:
+                if not isinstance(c,StringTypes) or len(c) > 1:
+                    self.log.error("Invalid literal %s. Must be a single character", repr(c))
+                    self.error = 1
+                    continue
+
+        except TypeError:
+            self.log.error("Invalid literals specification. literals must be a sequence of characters")
+            self.error = 1
+
+    def get_states(self):
+        self.states = self.ldict.get("states",None)
+        # Build statemap
+        if self.states:
+             if not isinstance(self.states,(tuple,list)):
+                  self.log.error("states must be defined as a tuple or list")
+                  self.error = 1
+             else:
+                  for s in self.states:
+                        if not isinstance(s,tuple) or len(s) != 2:
+                               self.log.error("Invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')",repr(s))
+                               self.error = 1
+                               continue
+                        name, statetype = s
+                        if not isinstance(name,StringTypes):
+                               self.log.error("State name %s must be a string", repr(name))
+                               self.error = 1
+                               continue
+                        if not (statetype == 'inclusive' or statetype == 'exclusive'):
+                               self.log.error("State type for state %s must be 'inclusive' or 'exclusive'",name)
+                               self.error = 1
+                               continue
+                        if name in self.stateinfo:
+                               self.log.error("State '%s' already defined",name)
+                               self.error = 1
+                               continue
+                        self.stateinfo[name] = statetype
+
+    # Get all of the symbols with a t_ prefix and sort them into various
+    # categories (functions, strings, error functions, and ignore characters)
+
+    def get_rules(self):
+        tsymbols = [f for f in self.ldict if f[:2] == 't_' ]
+
+        # Now build up a list of functions and a list of strings
+
+        self.toknames = { }        # Mapping of symbols to token names
+        self.funcsym =  { }        # Symbols defined as functions
+        self.strsym =   { }        # Symbols defined as strings
+        self.ignore   = { }        # Ignore strings by state
+        self.errorf   = { }        # Error functions by state
+
+        for s in self.stateinfo:
+             self.funcsym[s] = []
+             self.strsym[s] = []
+
+        if len(tsymbols) == 0:
+            self.log.error("No rules of the form t_rulename are defined")
+            self.error = 1
+            return
+
+        for f in tsymbols:
+            t = self.ldict[f]
+            states, tokname = _statetoken(f,self.stateinfo)
+            self.toknames[f] = tokname
+
+            if hasattr(t,"__call__"):
+                if tokname == 'error':
+                    for s in states:
+                        self.errorf[s] = t
+                elif tokname == 'ignore':
+                    line = func_code(t).co_firstlineno
+                    file = func_code(t).co_filename
+                    self.log.error("%s:%d: Rule '%s' must be defined as a string",file,line,t.__name__)
+                    self.error = 1
+                else:
+                    for s in states: 
+                        self.funcsym[s].append((f,t))
+            elif isinstance(t, StringTypes):
+                if tokname == 'ignore':
+                    for s in states:
+                        self.ignore[s] = t
+                    if "\\" in t:
+                        self.log.warning("%s contains a literal backslash '\\'",f)
+
+                elif tokname == 'error':
+                    self.log.error("Rule '%s' must be defined as a function", f)
+                    self.error = 1
+                else:
+                    for s in states: 
+                        self.strsym[s].append((f,t))
+            else:
+                self.log.error("%s not defined as a function or string", f)
+                self.error = 1
+
+        # Sort the functions by line number
+        for f in self.funcsym.values():
+            if sys.version_info[0] < 3:
+                f.sort(lambda x,y: cmp(func_code(x[1]).co_firstlineno,func_code(y[1]).co_firstlineno))
+            else:
+                # Python 3.0
+                f.sort(key=lambda x: func_code(x[1]).co_firstlineno)
+
+        # Sort the strings by regular expression length
+        for s in self.strsym.values():
+            if sys.version_info[0] < 3:
+                s.sort(lambda x,y: (len(x[1]) < len(y[1])) - (len(x[1]) > len(y[1])))
+            else:
+                # Python 3.0
+                s.sort(key=lambda x: len(x[1]),reverse=True)
+
+    # Validate all of the t_rules collected 
+    def validate_rules(self):
+        for state in self.stateinfo:
+            # Validate all rules defined by functions
+
+            
+
+            for fname, f in self.funcsym[state]:
+                line = func_code(f).co_firstlineno
+                file = func_code(f).co_filename
+                self.files[file] = 1
+
+                tokname = self.toknames[fname]
+                if isinstance(f, types.MethodType):
+                    reqargs = 2
+                else:
+                    reqargs = 1
+                nargs = func_code(f).co_argcount
+                if nargs > reqargs:
+                    self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,f.__name__)
+                    self.error = 1
+                    continue
+
+                if nargs < reqargs:
+                    self.log.error("%s:%d: Rule '%s' requires an argument", file,line,f.__name__)
+                    self.error = 1
+                    continue
+
+                if not f.__doc__:
+                    self.log.error("%s:%d: No regular expression defined for rule '%s'",file,line,f.__name__)
+                    self.error = 1
+                    continue
+
+                try:
+                    c = re.compile("(?P<%s>%s)" % (fname,f.__doc__), re.VERBOSE | self.reflags)
+                    if c.match(""):
+                        self.log.error("%s:%d: Regular expression for rule '%s' matches empty string", file,line,f.__name__)
+                        self.error = 1
+                except re.error:
+                    _etype, e, _etrace = sys.exc_info()
+                    self.log.error("%s:%d: Invalid regular expression for rule '%s'. %s", file,line,f.__name__,e)
+                    if '#' in f.__doc__:
+                        self.log.error("%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'",file,line, f.__name__)
+                    self.error = 1
+
+            # Validate all rules defined by strings
+            for name,r in self.strsym[state]:
+                tokname = self.toknames[name]
+                if tokname == 'error':
+                    self.log.error("Rule '%s' must be defined as a function", name)
+                    self.error = 1
+                    continue
+
+                if not tokname in self.tokens and tokname.find("ignore_") < 0:
+                    self.log.error("Rule '%s' defined for an unspecified token %s",name,tokname)
+                    self.error = 1
+                    continue
+
+                try:
+                    c = re.compile("(?P<%s>%s)" % (name,r),re.VERBOSE | self.reflags)
+                    if (c.match("")):
+                         self.log.error("Regular expression for rule '%s' matches empty string",name)
+                         self.error = 1
+                except re.error:
+                    _etype, e, _etrace = sys.exc_info()
+                    self.log.error("Invalid regular expression for rule '%s'. %s",name,e)
+                    if '#' in r:
+                         self.log.error("Make sure '#' in rule '%s' is escaped with '\\#'",name)
+                    self.error = 1
+
+            if not self.funcsym[state] and not self.strsym[state]:
+                self.log.error("No rules defined for state '%s'",state)
+                self.error = 1
+
+            # Validate the error function
+            efunc = self.errorf.get(state,None)
+            if efunc:
+                f = efunc
+                line = func_code(f).co_firstlineno
+                file = func_code(f).co_filename
+                self.files[file] = 1
+
+                if isinstance(f, types.MethodType):
+                    reqargs = 2
+                else:
+                    reqargs = 1
+                nargs = func_code(f).co_argcount
+                if nargs > reqargs:
+                    self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,f.__name__)
+                    self.error = 1
+
+                if nargs < reqargs:
+                    self.log.error("%s:%d: Rule '%s' requires an argument", file,line,f.__name__)
+                    self.error = 1
+
+        for f in self.files:
+            self.validate_file(f)
+
+
+    # -----------------------------------------------------------------------------
+    # validate_file()
+    #
+    # This checks to see if there are duplicated t_rulename() functions or strings
+    # in the parser input file.  This is done using a simple regular expression
+    # match on each line in the given file.  
+    # -----------------------------------------------------------------------------
+
+    def validate_file(self,filename):
+        import os.path
+        base,ext = os.path.splitext(filename)
+        if ext != '.py': return         # No idea what the file is. Return OK
+
+        try:
+            f = open(filename)
+            lines = f.readlines()
+            f.close()
+        except IOError:
+            return                      # Couldn't find the file.  Don't worry about it
+
+        fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(')
+        sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=')
+
+        counthash = { }
+        linen = 1
+        for l in lines:
+            m = fre.match(l)
+            if not m:
+                m = sre.match(l)
+            if m:
+                name = m.group(1)
+                prev = counthash.get(name)
+                if not prev:
+                    counthash[name] = linen
+                else:
+                    self.log.error("%s:%d: Rule %s redefined. Previously defined on line %d",filename,linen,name,prev)
+                    self.error = 1
+            linen += 1
+            
 # -----------------------------------------------------------------------------
 # lex(module)
 #
 # Build all of the regular expression rules from definitions in the supplied module
 # -----------------------------------------------------------------------------
-def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,nowarn=0,outputdir=""):
+def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,nowarn=0,outputdir="", debuglog=None, errorlog=None):
     global lexer
     ldict = None
     stateinfo  = { 'INITIAL' : 'inclusive'}
-    error = 0
-    files = { }
     lexobj = Lexer()
-    lexobj.lexdebug = debug
     lexobj.lexoptimize = optimize
     global token,input
 
-    if nowarn: warn = 0
-    else: warn = 1
+    if errorlog is None:
+        errorlog = PlyLogger(sys.stderr)
 
+    if debug:
+        if debuglog is None:
+            debuglog = PlyLogger(sys.stderr)
+
+    # Get the module dictionary used for the lexer
     if object: module = object
 
     if module:
-        # User supplied a module object.
-        if isinstance(module, types.ModuleType):
-            ldict = module.__dict__
-        elif isinstance(module, _INSTANCETYPE):
-            _items = [(k,getattr(module,k)) for k in dir(module)]
-            ldict = { }
-            for (i,v) in _items:
-                ldict[i] = v
-        else:
-            raise ValueError,"Expected a module or instance"
-        lexobj.lexmodule = module
+        _items = [(k,getattr(module,k)) for k in dir(module)]
+        ldict = dict(_items)
+    else:
+        ldict = get_caller_module_dict(2)
 
-    else:
-        # No module given.  We might be able to get information from the caller.
-        try:
-            raise RuntimeError
-        except RuntimeError:
-            e,b,t = sys.exc_info()
-            f = t.tb_frame
-            f = f.f_back                    # Walk out to our calling function
-            if f.f_globals is f.f_locals:   # Collect global and local variations from caller
-               ldict = f.f_globals
-            else:
-               ldict = f.f_globals.copy()
-               ldict.update(f.f_locals)
+    # Collect parser information from the dictionary
+    linfo = LexerReflect(ldict,log=errorlog,reflags=reflags)
+    linfo.get_all()
+    if not optimize:
+        if linfo.validate_all():
+            raise SyntaxError("Can't build lexer")
 
     if optimize and lextab:
         try:
             lexobj.readtab(lextab,ldict)
             token = lexobj.token
             input = lexobj.input
             lexer = lexobj
             return lexobj
 
         except ImportError:
             pass
 
-    # Get the tokens, states, and literals variables (if any)
-
-    tokens = ldict.get("tokens",None)
-    states = ldict.get("states",None)
-    literals = ldict.get("literals","")
-
-    if not tokens:
-        raise SyntaxError,"lex: module does not define 'tokens'"
-
-    if not (isinstance(tokens,types.ListType) or isinstance(tokens,types.TupleType)):
-        raise SyntaxError,"lex: tokens must be a list or tuple."
+    # Dump some basic debugging information
+    if debug:
+        debuglog.info("lex: tokens   = %r", linfo.tokens)
+        debuglog.info("lex: literals = %r", linfo.literals)
+        debuglog.info("lex: states   = %r", linfo.stateinfo)
 
     # Build a dictionary of valid token names
     lexobj.lextokens = { }
-    if not optimize:
-        for n in tokens:
-            if not _is_identifier.match(n):
-                print >>sys.stderr, "lex: Bad token name '%s'" % n
-                error = 1
-            if warn and lexobj.lextokens.has_key(n):
-                print >>sys.stderr, "lex: Warning. Token '%s' multiply defined." % n
-            lexobj.lextokens[n] = None
+    for n in linfo.tokens:
+        lexobj.lextokens[n] = 1
+
+    # Get literals specification
+    if isinstance(linfo.literals,(list,tuple)):
+        lexobj.lexliterals = type(linfo.literals[0])().join(linfo.literals)
     else:
-        for n in tokens: lexobj.lextokens[n] = None
+        lexobj.lexliterals = linfo.literals
 
-    if debug:
-        print "lex: tokens = '%s'" % lexobj.lextokens.keys()
-
-    try:
-         for c in literals:
-               if not (isinstance(c,types.StringType) or isinstance(c,types.UnicodeType)) or len(c) > 1:
-                    print >>sys.stderr, "lex: Invalid literal %s. Must be a single character" % repr(c)
-                    error = 1
-                    continue
-
-    except TypeError:
-         print >>sys.stderr, "lex: Invalid literals specification. literals must be a sequence of characters."
-         error = 1
-
-    lexobj.lexliterals = literals
-
-    # Build statemap
-    if states:
-         if not (isinstance(states,types.TupleType) or isinstance(states,types.ListType)):
-              print >>sys.stderr, "lex: states must be defined as a tuple or list."
-              error = 1
-         else:
-              for s in states:
-                    if not isinstance(s,types.TupleType) or len(s) != 2:
-                           print >>sys.stderr, "lex: invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')" % repr(s)
-                           error = 1
-                           continue
-                    name, statetype = s
-                    if not isinstance(name,types.StringType):
-                           print >>sys.stderr, "lex: state name %s must be a string" % repr(name)
-                           error = 1
-                           continue
-                    if not (statetype == 'inclusive' or statetype == 'exclusive'):
-                           print >>sys.stderr, "lex: state type for state %s must be 'inclusive' or 'exclusive'" % name
-                           error = 1
-                           continue
-                    if stateinfo.has_key(name):
-                           print >>sys.stderr, "lex: state '%s' already defined." % name
-                           error = 1
-                           continue
-                    stateinfo[name] = statetype
-
-    # Get a list of symbols with the t_ or s_ prefix
-    tsymbols = [f for f in ldict.keys() if f[:2] == 't_' ]
-
-    # Now build up a list of functions and a list of strings
-
-    funcsym =  { }        # Symbols defined as functions
-    strsym =   { }        # Symbols defined as strings
-    toknames = { }        # Mapping of symbols to token names
-
-    for s in stateinfo.keys():
-         funcsym[s] = []
-         strsym[s] = []
-
-    ignore   = { }        # Ignore strings by state
-    errorf   = { }        # Error functions by state
-
-    if len(tsymbols) == 0:
-        raise SyntaxError,"lex: no rules of the form t_rulename are defined."
-
-    for f in tsymbols:
-        t = ldict[f]
-        states, tokname = _statetoken(f,stateinfo)
-        toknames[f] = tokname
-
-        if callable(t):
-            for s in states: funcsym[s].append((f,t))
-        elif (isinstance(t, types.StringType) or isinstance(t,types.UnicodeType)):
-            for s in states: strsym[s].append((f,t))
-        else:
-            print >>sys.stderr, "lex: %s not defined as a function or string" % f
-            error = 1
-
-    # Sort the functions by line number
-    for f in funcsym.values():
-        f.sort(lambda x,y: cmp(x[1].func_code.co_firstlineno,y[1].func_code.co_firstlineno))
-
-    # Sort the strings by regular expression length
-    for s in strsym.values():
-        s.sort(lambda x,y: (len(x[1]) < len(y[1])) - (len(x[1]) > len(y[1])))
+    # Get the stateinfo dictionary
+    stateinfo = linfo.stateinfo
 
     regexs = { }
-
     # Build the master regular expressions
-    for state in stateinfo.keys():
+    for state in stateinfo:
         regex_list = []
 
         # Add rules defined by functions first
-        for fname, f in funcsym[state]:
-            line = f.func_code.co_firstlineno
-            file = f.func_code.co_filename
-            files[file] = None
-            tokname = toknames[fname]
-
-            ismethod = isinstance(f, types.MethodType)
-
-            if not optimize:
-                nargs = f.func_code.co_argcount
-                if ismethod:
-                    reqargs = 2
-                else:
-                    reqargs = 1
-                if nargs > reqargs:
-                    print >>sys.stderr, "%s:%d: Rule '%s' has too many arguments." % (file,line,f.__name__)
-                    error = 1
-                    continue
-
-                if nargs < reqargs:
-                    print >>sys.stderr, "%s:%d: Rule '%s' requires an argument." % (file,line,f.__name__)
-                    error = 1
-                    continue
-
-                if tokname == 'ignore':
-                    print >>sys.stderr, "%s:%d: Rule '%s' must be defined as a string." % (file,line,f.__name__)
-                    error = 1
-                    continue
-
-            if tokname == 'error':
-                errorf[state] = f
-                continue
-
-            if f.__doc__:
-                if not optimize:
-                    try:
-                        c = re.compile("(?P<%s>%s)" % (fname,f.__doc__), re.VERBOSE | reflags)
-                        if c.match(""):
-                             print >>sys.stderr, "%s:%d: Regular expression for rule '%s' matches empty string." % (file,line,f.__name__)
-                             error = 1
-                             continue
-                    except re.error,e:
-                        print >>sys.stderr, "%s:%d: Invalid regular expression for rule '%s'. %s" % (file,line,f.__name__,e)
-                        if '#' in f.__doc__:
-                             print >>sys.stderr, "%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'." % (file,line, f.__name__)
-                        error = 1
-                        continue
-
-                    if debug:
-                        print "lex: Adding rule %s -> '%s' (state '%s')" % (f.__name__,f.__doc__, state)
-
-                # Okay. The regular expression seemed okay.  Let's append it to the master regular
-                # expression we're building
-
-                regex_list.append("(?P<%s>%s)" % (fname,f.__doc__))
-            else:
-                print >>sys.stderr, "%s:%d: No regular expression defined for rule '%s'" % (file,line,f.__name__)
+        for fname, f in linfo.funcsym[state]:
+            line = func_code(f).co_firstlineno
+            file = func_code(f).co_filename
+            regex_list.append("(?P<%s>%s)" % (fname,f.__doc__))
+            if debug:
+                debuglog.info("lex: Adding rule %s -> '%s' (state '%s')",fname,f.__doc__, state)
 
         # Now add all of the simple rules
-        for name,r in strsym[state]:
-            tokname = toknames[name]
-
-            if tokname == 'ignore':
-                 if "\\" in r:
-                      print >>sys.stderr, "lex: Warning. %s contains a literal backslash '\\'" % name
-                 ignore[state] = r
-                 continue
-
-            if not optimize:
-                if tokname == 'error':
-                    raise SyntaxError,"lex: Rule '%s' must be defined as a function" % name
-                    error = 1
-                    continue
-
-                if not lexobj.lextokens.has_key(tokname) and tokname.find("ignore_") < 0:
-                    print >>sys.stderr, "lex: Rule '%s' defined for an unspecified token %s." % (name,tokname)
-                    error = 1
-                    continue
-                try:
-                    c = re.compile("(?P<%s>%s)" % (name,r),re.VERBOSE | reflags)
-                    if (c.match("")):
-                         print >>sys.stderr, "lex: Regular expression for rule '%s' matches empty string." % name
-                         error = 1
-                         continue
-                except re.error,e:
-                    print >>sys.stderr, "lex: Invalid regular expression for rule '%s'. %s" % (name,e)
-                    if '#' in r:
-                         print >>sys.stderr, "lex: Make sure '#' in rule '%s' is escaped with '\\#'." % name
-
-                    error = 1
-                    continue
-                if debug:
-                    print "lex: Adding rule %s -> '%s' (state '%s')" % (name,r,state)
-
+        for name,r in linfo.strsym[state]:
             regex_list.append("(?P<%s>%s)" % (name,r))
-
-        if not regex_list:
-             print >>sys.stderr, "lex: No rules defined for state '%s'" % state
-             error = 1
+            if debug:
+                debuglog.info("lex: Adding rule %s -> '%s' (state '%s')",name,r, state)
 
         regexs[state] = regex_list
 
-
-    if not optimize:
-        for f in files.keys():
-           if not _validate_file(f):
-                error = 1
-
-    if error:
-        raise SyntaxError,"lex: Unable to build lexer."
-
-    # From this point forward, we're reasonably confident that we can build the lexer.
-    # No more errors will be generated, but there might be some warning messages.
-
     # Build the master regular expressions
 
-    for state in regexs.keys():
-        lexre, re_text, re_names = _form_master_re(regexs[state],reflags,ldict,toknames)
+    if debug:
+        debuglog.info("lex: ==== MASTER REGEXS FOLLOW ====")
+
+    for state in regexs:
+        lexre, re_text, re_names = _form_master_re(regexs[state],reflags,ldict,linfo.toknames)
         lexobj.lexstatere[state] = lexre
         lexobj.lexstateretext[state] = re_text
         lexobj.lexstaterenames[state] = re_names
         if debug:
             for i in range(len(re_text)):
-                 print "lex: state '%s'. regex[%d] = '%s'" % (state, i, re_text[i])
+                debuglog.info("lex: state '%s' : regex[%d] = '%s'",state, i, re_text[i])
 
-    # For inclusive states, we need to add the INITIAL state
-    for state,type in stateinfo.items():
-        if state != "INITIAL" and type == 'inclusive':
+    # For inclusive states, we need to add the regular expressions from the INITIAL state
+    for state,stype in stateinfo.items():
+        if state != "INITIAL" and stype == 'inclusive':
              lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL'])
              lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL'])
              lexobj.lexstaterenames[state].extend(lexobj.lexstaterenames['INITIAL'])
 
     lexobj.lexstateinfo = stateinfo
     lexobj.lexre = lexobj.lexstatere["INITIAL"]
     lexobj.lexretext = lexobj.lexstateretext["INITIAL"]
+    lexobj.lexreflags = reflags
 
     # Set up ignore variables
-    lexobj.lexstateignore = ignore
+    lexobj.lexstateignore = linfo.ignore
     lexobj.lexignore = lexobj.lexstateignore.get("INITIAL","")
 
     # Set up error functions
-    lexobj.lexstateerrorf = errorf
-    lexobj.lexerrorf = errorf.get("INITIAL",None)
-    if warn and not lexobj.lexerrorf:
-        print >>sys.stderr, "lex: Warning. no t_error rule is defined."
+    lexobj.lexstateerrorf = linfo.errorf
+    lexobj.lexerrorf = linfo.errorf.get("INITIAL",None)
+    if not lexobj.lexerrorf:
+        errorlog.warning("No t_error rule is defined")
 
     # Check state information for ignore and error rules
     for s,stype in stateinfo.items():
         if stype == 'exclusive':
-              if warn and not errorf.has_key(s):
-                   print >>sys.stderr, "lex: Warning. no error rule is defined for exclusive state '%s'" % s
-              if warn and not ignore.has_key(s) and lexobj.lexignore:
-                   print >>sys.stderr, "lex: Warning. no ignore rule is defined for exclusive state '%s'" % s
+              if not s in linfo.errorf:
+                   errorlog.warning("No error rule is defined for exclusive state '%s'", s)
+              if not s in linfo.ignore and lexobj.lexignore:
+                   errorlog.warning("No ignore rule is defined for exclusive state '%s'", s)
         elif stype == 'inclusive':
-              if not errorf.has_key(s):
-                   errorf[s] = errorf.get("INITIAL",None)
-              if not ignore.has_key(s):
-                   ignore[s] = ignore.get("INITIAL","")
-
+              if not s in linfo.errorf:
+                   linfo.errorf[s] = linfo.errorf.get("INITIAL",None)
+              if not s in linfo.ignore:
+                   linfo.ignore[s] = linfo.ignore.get("INITIAL","")
 
     # Create global versions of the token() and input() functions
     token = lexobj.token
     input = lexobj.input
     lexer = lexobj
 
     # If in optimize mode, we write the lextab
     if lextab and optimize:
@@ -851,45 +1014,44 @@ def lex(module=None,object=None,debug=0,
 def runmain(lexer=None,data=None):
     if not data:
         try:
             filename = sys.argv[1]
             f = open(filename)
             data = f.read()
             f.close()
         except IndexError:
-            print "Reading from standard input (type EOF to end):"
+            sys.stdout.write("Reading from standard input (type EOF to end):\n")
             data = sys.stdin.read()
 
     if lexer:
         _input = lexer.input
     else:
         _input = input
     _input(data)
     if lexer:
         _token = lexer.token
     else:
         _token = token
 
     while 1:
         tok = _token()
         if not tok: break
-        print "(%s,%r,%d,%d)" % (tok.type, tok.value, tok.lineno,tok.lexpos)
-
+        sys.stdout.write("(%s,%r,%d,%d)\n" % (tok.type, tok.value, tok.lineno,tok.lexpos))
 
 # -----------------------------------------------------------------------------
 # @TOKEN(regex)
 #
 # This decorator function can be used to set the regex expression on a function
 # when its docstring might need to be set in an alternative way
 # -----------------------------------------------------------------------------
 
 def TOKEN(r):
     def set_doc(f):
-        if callable(r):
+        if hasattr(r,"__call__"):
             f.__doc__ = r.__doc__
         else:
             f.__doc__ = r
         return f
     return set_doc
 
 # Alternative spelling of the TOKEN decorator
 Token = TOKEN
--- mozilla/other-licenses/ply/ply/yacc.py
+++ mozilla/other-licenses/ply/ply/yacc.py
@@ -1,31 +1,40 @@
-#-----------------------------------------------------------------------------
+# -----------------------------------------------------------------------------
 # ply: yacc.py
 #
-# Author(s): David M. Beazley (dave@dabeaz.com)
+# Copyright (C) 2001-2009,
+# David M. Beazley (Dabeaz LLC)
+# All rights reserved.
 #
-# Copyright (C) 2001-2008, David M. Beazley
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+# 
+# * Redistributions of source code must retain the above copyright notice,
+#   this list of conditions and the following disclaimer.  
+# * Redistributions in binary form must reproduce the above copyright notice, 
+#   this list of conditions and the following disclaimer in the documentation
+#   and/or other materials provided with the distribution.  
+# * Neither the name of the David Beazley or Dabeaz LLC may be used to
+#   endorse or promote products derived from this software without
+#  specific prior written permission. 
 #
-# This library is free software; you can redistribute it and/or
-# modify it under the terms of the GNU Lesser General Public
-# License as published by the Free Software Foundation; either
-# version 2.1 of the License, or (at your option) any later version.
-#
-# This library is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-# Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public
-# License along with this library; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
-#
-# See the file COPYING for a complete copy of the LGPL.
-#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# -----------------------------------------------------------------------------
 #
 # This implements an LR parser that is constructed from grammar rules defined
 # as Python functions. The grammer is specified by supplying the BNF inside
 # Python documentation strings.  The inspiration for this technique was borrowed
 # from John Aycock's Spark parsing system.  PLY might be viewed as cross between
 # Spark and the GNU bison utility.
 #
 # The current implementation is only somewhat object-oriented. The
@@ -45,18 +54,18 @@
 #
 # Construction of LR parsing tables is fairly complicated and expensive.
 # To make this module run fast, a *LOT* of work has been put into
 # optimization---often at the expensive of readability and what might
 # consider to be good Python "coding style."   Modify the code at your
 # own risk!
 # ----------------------------------------------------------------------------
 
-__version__    = "2.5"
-__tabversion__ = "2.4"       # Table version
+__version__    = "3.3"
+__tabversion__ = "3.2"       # Table version
 
 #-----------------------------------------------------------------------------
 #                     === User configurable parameters ===
 #
 # Change these to modify the default behavior of yacc (if you wish)
 #-----------------------------------------------------------------------------
 
 yaccdebug   = 1                # Debugging mode.  If set, yacc generates a
@@ -66,34 +75,93 @@ debug_file  = 'parser.out'     # Default
 tab_module  = 'parsetab'       # Default name of the table module
 default_lr  = 'LALR'           # Default LR table generation method
 
 error_count = 3                # Number of symbols that must be shifted to leave recovery mode
 
 yaccdevel   = 0                # Set to True if developing yacc.  This turns off optimized
                                # implementations of certain functions.
 
-import re, types, sys, cStringIO, md5, os.path
-
+resultlimit = 40               # Size limit of results when running in debug mode.
+
+pickle_protocol = 0            # Protocol to use when writing pickle files
+
+import re, types, sys, os.path
+
+# Compatibility function for python 2.6/3.0
+if sys.version_info[0] < 3:
+    def func_code(f):
+        return f.func_code
+else:
+    def func_code(f):
+        return f.__code__
+
+# Compatibility
+try:
+    MAXINT = sys.maxint
+except AttributeError:
+    MAXINT = sys.maxsize
+
+# Python 2.x/3.0 compatibility.
+def load_ply_lex():
+    if sys.version_info[0] < 3:
+        import lex
+    else:
+        import ply.lex as lex
+    return lex
+
+# This object is a stand-in for a logging object created by the 
+# logging module.   PLY will use this by default to create things
+# such as the parser.out file.  If a user wants more detailed
+# information, they can create their own logging object and pass
+# it into PLY.
+
+class PlyLogger(object):
+    def __init__(self,f):
+        self.f = f
+    def debug(self,msg,*args,**kwargs):
+        self.f.write((msg % args) + "\n")
+    info     = debug
+
+    def warning(self,msg,*args,**kwargs):
+        self.f.write("WARNING: "+ (msg % args) + "\n")
+
+    def error(self,msg,*args,**kwargs):
+        self.f.write("ERROR: " + (msg % args) + "\n")
+
+    critical = debug
+
+# Null logger is used when no output is generated. Does nothing.
+class NullLogger(object):
+    def __getattribute__(self,name):
+        return self
+    def __call__(self,*args,**kwargs):
+        return self
+        
 # Exception raised for yacc-related errors
 class YaccError(Exception):   pass
 
-# Exception raised for errors raised in production rules
-class SyntaxError(Exception): pass
-
-
-# Available instance types.  This is used when parsers are defined by a class.
-# it's a little funky because I want to preserve backwards compatibility
-# with Python 2.0 where types.ObjectType is undefined.
-
-try:
-    _INSTANCETYPE = (types.InstanceType, types.ObjectType)
-except AttributeError:
-    _INSTANCETYPE = types.InstanceType
-    class object: pass     # Note: needed if no new-style classes present
+# Format the result message that the parser produces when running in debug mode.
+def format_result(r):
+    repr_str = repr(r)
+    if '\n' in repr_str: repr_str = repr(repr_str)
+    if len(repr_str) > resultlimit:
+        repr_str = repr_str[:resultlimit]+" ..."
+    result = "<%s @ 0x%x> (%s)" % (type(r).__name__,id(r),repr_str)
+    return result
+
+
+# Format stack entries when the parser is running in debug mode
+def format_stack_entry(r):
+    repr_str = repr(r)
+    if '\n' in repr_str: repr_str = repr(repr_str)
+    if len(repr_str) < 16:
+        return repr_str
+    else:
+        return "<%s @ 0x%x>" % (type(r).__name__,id(r))
 
 #-----------------------------------------------------------------------------
 #                        ===  LR Parsing Engine ===
 #
 # The following classes are used for the LR parser itself.  These are not
 # used during table construction and are independent of the actual LR
 # table generation algorithm
 #-----------------------------------------------------------------------------
@@ -137,16 +205,19 @@ class YaccProduction:
         return [s.value for s in self.slice[i:j]]
 
     def __len__(self):
         return len(self.slice)
 
     def lineno(self,n):
         return getattr(self.slice[n],"lineno",0)
 
+    def set_lineno(self,n,lineno):
+        self.slice[n].lineno = lineno
+
     def linespan(self,n):
         startline = getattr(self.slice[n],"lineno",0)
         endline = getattr(self.slice[n],"endlineno",startline)
         return startline,endline
 
     def lexpos(self,n):
         return getattr(self.slice[n],"lexpos",0)
 
@@ -154,51 +225,44 @@ class YaccProduction:
         startpos = getattr(self.slice[n],"lexpos",0)
         endpos = getattr(self.slice[n],"endlexpos",startpos)
         return startpos,endpos
 
     def error(self):
        raise SyntaxError
 
 
-# The LR Parsing engine.   This is defined as a class so that multiple parsers
-# can exist in the same process.  A user never instantiates this directly.
-# Instead, the global yacc() function should be used to create a suitable Parser
-# object.
-
-class Parser:
-    def __init__(self,magic=None):
-
-        # This is a hack to keep users from trying to instantiate a Parser
-        # object directly.
-
-        if magic != "xyzzy":
-            raise YaccError, "Can't directly instantiate Parser. Use yacc() instead."
-
-        # Reset internal state
-        self.productions = None          # List of productions
-        self.errorfunc   = None          # Error handling function
-        self.action      = { }           # LR Action table
-        self.goto        = { }           # LR goto table
-        self.require     = { }           # Attribute require table
-        self.method      = "Unknown LR"  # Table construction method used
+# -----------------------------------------------------------------------------
+#                               == LRParser ==
+#
+# The LR Parsing engine.
+# -----------------------------------------------------------------------------
+
+class LRParser:
+    def __init__(self,lrtab,errorf):
+        self.productions = lrtab.lr_productions
+        self.action      = lrtab.lr_action
+        self.goto        = lrtab.lr_goto
+        self.errorfunc   = errorf
 
     def errok(self):
         self.errorok     = 1
 
     def restart(self):
         del self.statestack[:]
         del self.symstack[:]
         sym = YaccSymbol()
         sym.type = '$end'
         self.symstack.append(sym)
         self.statestack.append(0)
 
     def parse(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None):
         if debug or yaccdevel:
+            if isinstance(debug,int):
+                debug = PlyLogger(sys.stderr)
             return self.parsedebug(input,lexer,debug,tracking,tokenfunc)
         elif tracking:
             return self.parseopt(input,lexer,debug,tracking,tokenfunc)
         else:
             return self.parseopt_notrack(input,lexer,debug,tracking,tokenfunc)
         
 
     # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
@@ -210,30 +274,34 @@ class Parser:
     # enclosed in:
     #
     #      #--! DEBUG
     #      statements
     #      #--! DEBUG
     #
     # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
 
-    def parsedebug(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None):
+    def parsedebug(self,input=None,lexer=None,debug=None,tracking=0,tokenfunc=None):
         lookahead = None                 # Current lookahead symbol
         lookaheadstack = [ ]             # Stack of lookahead symbols
         actions = self.action            # Local reference to action table (to avoid lookup on self.)
         goto    = self.goto              # Local reference to goto table (to avoid lookup on self.)
         prod    = self.productions       # Local reference to production list (to avoid lookup on self.)
         pslice  = YaccProduction(None)   # Production object passed to grammar rules
         errorcount = 0                   # Used during error recovery 
-        endsym  = "$end"                 # End symbol
+
+        # --! DEBUG
+        debug.info("PLY: PARSE DEBUG START")
+        # --! DEBUG
+
         # If no lexer was given, we will try to use the lex module
         if not lexer:
-            import lex
+            lex = load_ply_lex()
             lexer = lex.lexer
-        
+
         # Set up the lexer and parser objects on pslice
         pslice.lexer = lexer
         pslice.parser = self
 
         # If input was supplied, pass to lexer
         if input is not None:
             lexer.input(input)
 
@@ -252,65 +320,55 @@ class Parser:
 
         pslice.stack = symstack         # Put in the production
         errtoken   = None               # Err token
 
         # The start state is assumed to be (0,$end)
 
         statestack.append(0)
         sym = YaccSymbol()
-        sym.type = endsym
+        sym.type = "$end"
         symstack.append(sym)
         state = 0
         while 1:
             # Get the next symbol on the input.  If a lookahead symbol
             # is already set, we just use that. Otherwise, we'll pull
             # the next token off of the lookaheadstack or from the lexer
 
             # --! DEBUG
-            if debug > 1:
-                print 'state', state
+            debug.debug('')
+            debug.debug('State  : %s', state)
             # --! DEBUG
 
             if not lookahead:
                 if not lookaheadstack:
                     lookahead = get_token()     # Get the next token
                 else:
                     lookahead = lookaheadstack.pop()
                 if not lookahead:
                     lookahead = YaccSymbol()
-                    lookahead.type = endsym
+                    lookahead.type = "$end"
 
             # --! DEBUG
-            if debug:
-                errorlead = ("%s . %s" % (" ".join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip()
+            debug.debug('Stack  : %s',
+                        ("%s . %s" % (" ".join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
             # --! DEBUG
 
             # Check the action table
             ltype = lookahead.type
             t = actions[state].get(ltype)
 
-            # --! DEBUG
-            if debug > 1:
-                print 'action', t
-            # --! DEBUG
-
             if t is not None:
                 if t > 0:
                     # shift a symbol on the stack
-                    if ltype is endsym:
-                        # Error, end of input
-                        sys.stderr.write("yacc: Parse error. EOF\n")
-                        return
                     statestack.append(t)
                     state = t
                     
                     # --! DEBUG
-                    if debug > 1:
-                        sys.stderr.write("%-60s shift state %s\n" % (errorlead, t))
+                    debug.debug("Action : Shift and goto state %s", t)
                     # --! DEBUG
 
                     symstack.append(lookahead)
                     lookahead = None
 
                     # Decrease error count on successful shift
                     if errorcount: errorcount -=1
                     continue
@@ -322,18 +380,21 @@ class Parser:
                     plen  = p.len
 
                     # Get production function
                     sym = YaccSymbol()
                     sym.type = pname       # Production name
                     sym.value = None
 
                     # --! DEBUG
-                    if debug > 1:
-                        sys.stderr.write("%-60s reduce %d\n" % (errorlead, -t))
+                    if plen:
+                        debug.info("Action : Reduce rule [%s] with %s and goto state %d", p.str, "["+",".join([format_stack_entry(_v.value) for _v in symstack[-plen:]])+"]",-t)
+                    else:
+                        debug.info("Action : Reduce rule [%s] with %s and goto state %d", p.str, [],-t)
+                        
                     # --! DEBUG
 
                     if plen:
                         targ = symstack[-plen-1:]
                         targ[0] = sym
 
                         # --! TRACKING
                         if tracking:
@@ -350,19 +411,22 @@ class Parser:
                         # The code enclosed in this section is duplicated 
                         # below as a performance optimization.  Make sure
                         # changes get made in both locations.
 
                         pslice.slice = targ
                         
                         try:
                             # Call the grammar rule with our special slice object
-                            p.func(pslice)
                             del symstack[-plen:]
                             del statestack[-plen:]
+                            p.callable(pslice)
+                            # --! DEBUG
+                            debug.info("Result : %s", format_result(pslice[0]))
+                            # --! DEBUG
                             symstack.append(sym)
                             state = goto[statestack[-1]][pname]
                             statestack.append(state)
                         except SyntaxError:
                             # If an error was set. Enter error recovery state
                             lookaheadstack.append(lookahead)
                             symstack.pop()
                             statestack.pop()
@@ -388,17 +452,20 @@ class Parser:
                         # The code enclosed in this section is duplicated 
                         # above as a performance optimization.  Make sure
                         # changes get made in both locations.
 
                         pslice.slice = targ
 
                         try:
                             # Call the grammar rule with our special slice object
-                            p.func(pslice)
+                            p.callable(pslice)
+                            # --! DEBUG
+                            debug.info("Result : %s", format_result(pslice[0]))
+                            # --! DEBUG
                             symstack.append(sym)
                             state = goto[statestack[-1]][pname]
                             statestack.append(state)
                         except SyntaxError:
                             # If an error was set. Enter error recovery state
                             lookaheadstack.append(lookahead)
                             symstack.pop()
                             statestack.pop()
@@ -407,46 +474,53 @@ class Parser:
                             lookahead = sym
                             errorcount = error_count
                             self.errorok = 0
                         continue
                         # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
 
                 if t == 0:
                     n = symstack[-1]
-                    return getattr(n,"value",None)
+                    result = getattr(n,"value",None)
+                    # --! DEBUG
+                    debug.info("Done   : Returning %s", format_result(result))
+                    debug.info("PLY: PARSE DEBUG END")
+                    # --! DEBUG
+                    return result
 
             if t == None:
 
                 # --! DEBUG
-                if debug:
-                    sys.stderr.write(errorlead + "\n")
+                debug.error('Error  : %s',
+                            ("%s . %s" % (" ".join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
                 # --! DEBUG
 
                 # We have some kind of parsing error here.  To handle
                 # this, we are going to push the current token onto
                 # the tokenstack and replace it with an 'error' token.
                 # If there are any synchronization rules, they may
                 # catch it.
                 #
                 # In addition to pushing the error token, we call call
                 # the user defined p_error() function if this is the
                 # first syntax error.  This function is only called if
                 # errorcount == 0.
                 if errorcount == 0 or self.errorok:
                     errorcount = error_count
                     self.errorok = 0
                     errtoken = lookahead
-                    if errtoken.type is endsym:
+                    if errtoken.type == "$end":
                         errtoken = None               # End of file!
                     if self.errorfunc:
                         global errok,token,restart
                         errok = self.errok        # Set some special functions available in error recovery
                         token = get_token
                         restart = self.restart
+                        if errtoken and not hasattr(errtoken,'lexer'):
+                            errtoken.lexer = lexer
                         tok = self.errorfunc(errtoken)
                         del errok, token, restart   # Delete special functions
 
                         if self.errorok:
                             # User must have done some kind of panic
                             # mode recovery on their own.  The
                             # returned token is the next lookahead
                             lookahead = tok
@@ -466,29 +540,29 @@ class Parser:
 
                 else:
                     errorcount = error_count
 
                 # case 1:  the statestack only has 1 entry on it.  If we're in this state, the
                 # entire parse has been rolled back and we're completely hosed.   The token is
                 # discarded and we just keep going.
 
-                if len(statestack) <= 1 and lookahead.type is not endsym:
+                if len(statestack) <= 1 and lookahead.type != "$end":
                     lookahead = None
                     errtoken = None
                     state = 0
                     # Nuke the pushback stack
                     del lookaheadstack[:]
                     continue
 
                 # case 2: the statestack has a couple of entries on it, but we're
                 # at the end of the file. nuke the top entry and generate an error token
 
                 # Start nuking entries on the stack
-                if lookahead.type is endsym:
+                if lookahead.type == "$end":
                     # Whoa. We're really hosed here. Bail out
                     return
 
                 if lookahead.type != 'error':
                     sym = symstack[-1]
                     if sym.type == 'error':
                         # Hmmm. Error is on top of stack, we'll just nuke input
                         # symbol and continue
@@ -504,17 +578,17 @@ class Parser:
                 else:
                     symstack.pop()
                     statestack.pop()
                     state = statestack[-1]       # Potential bug fix
 
                 continue
 
             # Call an error function here
-            raise RuntimeError, "yacc: internal parser error!!!\n"
+            raise RuntimeError("yacc: internal parser error!!!\n")
 
     # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
     # parseopt().
     #
     # Optimized version of parse() method.  DO NOT EDIT THIS CODE DIRECTLY.
     # Edit the debug version above, then copy any modifications to the method
     # below while removing #--! DEBUG sections.
     # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
@@ -526,17 +600,17 @@ class Parser:
         actions = self.action            # Local reference to action table (to avoid lookup on self.)
         goto    = self.goto              # Local reference to goto table (to avoid lookup on self.)
         prod    = self.productions       # Local reference to production list (to avoid lookup on self.)
         pslice  = YaccProduction(None)   # Production object passed to grammar rules
         errorcount = 0                   # Used during error recovery 
 
         # If no lexer was given, we will try to use the lex module
         if not lexer:
-            import lex
+            lex = load_ply_lex()
             lexer = lex.lexer
         
         # Set up the lexer and parser objects on pslice
         pslice.lexer = lexer
         pslice.parser = self
 
         # If input was supplied, pass to lexer
         if input is not None:
@@ -581,20 +655,16 @@ class Parser:
 
             # Check the action table
             ltype = lookahead.type
             t = actions[state].get(ltype)
 
             if t is not None:
                 if t > 0:
                     # shift a symbol on the stack
-                    if ltype == '$end':
-                        # Error, end of input
-                        sys.stderr.write("yacc: Parse error. EOF\n")
-                        return
                     statestack.append(t)
                     state = t
 
                     symstack.append(lookahead)
                     lookahead = None
 
                     # Decrease error count on successful shift
                     if errorcount: errorcount -=1
@@ -630,19 +700,19 @@ class Parser:
                         # The code enclosed in this section is duplicated 
                         # below as a performance optimization.  Make sure
                         # changes get made in both locations.
 
                         pslice.slice = targ
                         
                         try:
                             # Call the grammar rule with our special slice object
-                            p.func(pslice)
                             del symstack[-plen:]
                             del statestack[-plen:]
+                            p.callable(pslice)
                             symstack.append(sym)
                             state = goto[statestack[-1]][pname]
                             statestack.append(state)
                         except SyntaxError:
                             # If an error was set. Enter error recovery state
                             lookaheadstack.append(lookahead)
                             symstack.pop()
                             statestack.pop()
@@ -668,17 +738,17 @@ class Parser:
                         # The code enclosed in this section is duplicated 
                         # above as a performance optimization.  Make sure
                         # changes get made in both locations.
 
                         pslice.slice = targ
 
                         try:
                             # Call the grammar rule with our special slice object
-                            p.func(pslice)
+                            p.callable(pslice)
                             symstack.append(sym)
                             state = goto[statestack[-1]][pname]
                             statestack.append(state)
                         except SyntaxError:
                             # If an error was set. Enter error recovery state
                             lookaheadstack.append(lookahead)
                             symstack.pop()
                             statestack.pop()
@@ -712,16 +782,18 @@ class Parser:
                     errtoken = lookahead
                     if errtoken.type == '$end':
                         errtoken = None               # End of file!
                     if self.errorfunc:
                         global errok,token,restart
                         errok = self.errok        # Set some special functions available in error recovery
                         token = get_token
                         restart = self.restart
+                        if errtoken and not hasattr(errtoken,'lexer'):
+                            errtoken.lexer = lexer
                         tok = self.errorfunc(errtoken)
                         del errok, token, restart   # Delete special functions
 
                         if self.errorok:
                             # User must have done some kind of panic
                             # mode recovery on their own.  The
                             # returned token is the next lookahead
                             lookahead = tok
@@ -779,17 +851,17 @@ class Parser:
                 else:
                     symstack.pop()
                     statestack.pop()
                     state = statestack[-1]       # Potential bug fix
 
                 continue
 
             # Call an error function here
-            raise RuntimeError, "yacc: internal parser error!!!\n"
+            raise RuntimeError("yacc: internal parser error!!!\n")
 
     # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
     # parseopt_notrack().
     #
     # Optimized version of parseopt() with line number tracking removed. 
     # DO NOT EDIT THIS CODE DIRECTLY. Copy the optimized version and remove
     # code in the #--! TRACKING sections
     # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
@@ -800,17 +872,17 @@ class Parser:
         actions = self.action            # Local reference to action table (to avoid lookup on self.)
         goto    = self.goto              # Local reference to goto table (to avoid lookup on self.)
         prod    = self.productions       # Local reference to production list (to avoid lookup on self.)
         pslice  = YaccProduction(None)   # Production object passed to grammar rules
         errorcount = 0                   # Used during error recovery 
 
         # If no lexer was given, we will try to use the lex module
         if not lexer:
-            import lex
+            lex = load_ply_lex()
             lexer = lex.lexer
         
         # Set up the lexer and parser objects on pslice
         pslice.lexer = lexer
         pslice.parser = self
 
         # If input was supplied, pass to lexer
         if input is not None:
@@ -855,20 +927,16 @@ class Parser:
 
             # Check the action table
             ltype = lookahead.type
             t = actions[state].get(ltype)
 
             if t is not None:
                 if t > 0:
                     # shift a symbol on the stack
-                    if ltype == '$end':
-                        # Error, end of input
-                        sys.stderr.write("yacc: Parse error. EOF\n")
-                        return
                     statestack.append(t)
                     state = t
 
                     symstack.append(lookahead)
                     lookahead = None
 
                     # Decrease error count on successful shift
                     if errorcount: errorcount -=1
@@ -893,19 +961,19 @@ class Parser:
                         # The code enclosed in this section is duplicated 
                         # below as a performance optimization.  Make sure
                         # changes get made in both locations.
 
                         pslice.slice = targ
                         
                         try:
                             # Call the grammar rule with our special slice object
-                            p.func(pslice)
                             del symstack[-plen:]
                             del statestack[-plen:]
+                            p.callable(pslice)
                             symstack.append(sym)
                             state = goto[statestack[-1]][pname]
                             statestack.append(state)
                         except SyntaxError:
                             # If an error was set. Enter error recovery state
                             lookaheadstack.append(lookahead)
                             symstack.pop()
                             statestack.pop()
@@ -925,17 +993,17 @@ class Parser:
                         # The code enclosed in this section is duplicated 
                         # above as a performance optimization.  Make sure
                         # changes get made in both locations.
 
                         pslice.slice = targ
 
                         try:
                             # Call the grammar rule with our special slice object
-                            p.func(pslice)
+                            p.callable(pslice)
                             symstack.append(sym)
                             state = goto[statestack[-1]][pname]
                             statestack.append(state)
                         except SyntaxError:
                             # If an error was set. Enter error recovery state
                             lookaheadstack.append(lookahead)
                             symstack.pop()
                             statestack.pop()
@@ -969,16 +1037,18 @@ class Parser:
                     errtoken = lookahead
                     if errtoken.type == '$end':
                         errtoken = None               # End of file!
                     if self.errorfunc:
                         global errok,token,restart
                         errok = self.errok        # Set some special functions available in error recovery
                         token = get_token
                         restart = self.restart
+                        if errtoken and not hasattr(errtoken,'lexer'):
+                            errtoken.lexer = lexer
                         tok = self.errorfunc(errtoken)
                         del errok, token, restart   # Delete special functions
 
                         if self.errorok:
                             # User must have done some kind of panic
                             # mode recovery on their own.  The
                             # returned token is the next lookahead
                             lookahead = tok
@@ -1036,1115 +1106,783 @@ class Parser:
                 else:
                     symstack.pop()
                     statestack.pop()
                     state = statestack[-1]       # Potential bug fix
 
                 continue
 
             # Call an error function here
-            raise RuntimeError, "yacc: internal parser error!!!\n"
-
+            raise RuntimeError("yacc: internal parser error!!!\n")
 
 # -----------------------------------------------------------------------------
-#                          === Parser Construction ===
+#                          === Grammar Representation ===
 #
-# The following functions and variables are used to implement the yacc() function
-# itself.   This is pretty hairy stuff involving lots of error checking,
-# construction of LR items, kernels, and so forth.   Although a lot of
-# this work is done using global variables, the resulting Parser object
-# is completely self contained--meaning that it is safe to repeatedly
-# call yacc() with different grammars in the same application.
+# The following functions, classes, and variables are used to represent and
+# manipulate the rules that make up a grammar. 
 # -----------------------------------------------------------------------------
 
-# -----------------------------------------------------------------------------
-# validate_file()
-#
-# This function checks to see if there are duplicated p_rulename() functions
-# in the parser module file.  Without this function, it is really easy for
-# users to make mistakes by cutting and pasting code fragments (and it's a real
-# bugger to try and figure out why the resulting parser doesn't work).  Therefore,
-# we just do a little regular expression pattern matching of def statements
-# to try and detect duplicates.
-# -----------------------------------------------------------------------------
-
-def validate_file(filename):
-    base,ext = os.path.splitext(filename)
-    if ext != '.py': return 1          # No idea. Assume it's okay.
-
-    try:
-        f = open(filename)
-        lines = f.readlines()
-        f.close()
-    except IOError:
-        return 1                       # Oh well
-
-    # Match def p_funcname(
-    fre = re.compile(r'\s*def\s+(p_[a-zA-Z_0-9]*)\(')
-    counthash = { }
-    linen = 1
-    noerror = 1
-    for l in lines:
-        m = fre.match(l)
-        if m:
-            name = m.group(1)
-            prev = counthash.get(name)
-            if not prev:
-                counthash[name] = linen
-            else:
-                sys.stderr.write("%s:%d: Function %s redefined. Previously defined on line %d\n" % (filename,linen,name,prev))
-                noerror = 0
-        linen += 1
-    return noerror
-
-# This function looks for functions that might be grammar rules, but which don't have the proper p_suffix.
-def validate_dict(d):
-    for n,v in d.items():
-        if n[0:2] == 'p_' and type(v) in (types.FunctionType, types.MethodType): continue
-        if n[0:2] == 't_': continue
-
-        if n[0:2] == 'p_':
-            sys.stderr.write("yacc: Warning. '%s' not defined as a function\n" % n)
-        if 1 and isinstance(v,types.FunctionType) and v.func_code.co_argcount == 1:
-            try:
-                doc = v.__doc__.split(" ")
-                if doc[1] == ':':
-                    sys.stderr.write("%s:%d: Warning. Possible grammar rule '%s' defined without p_ prefix.\n" % (v.func_code.co_filename, v.func_code.co_firstlineno,n))
-            except StandardError:
-                pass
-
-# -----------------------------------------------------------------------------
-#                           === GRAMMAR FUNCTIONS ===
-#
-# The following global variables and functions are used to store, manipulate,
-# and verify the grammar rules specified by the user.
-# -----------------------------------------------------------------------------
-
-# Initialize all of the global variables used during grammar construction
-def initialize_vars():
-    global Productions, Prodnames, Prodmap, Terminals
-    global Nonterminals, First, Follow, Precedence, UsedPrecedence, LRitems
-    global Errorfunc, Signature, Requires
-
-    Productions  = [None]  # A list of all of the productions.  The first
-                           # entry is always reserved for the purpose of
-                           # building an augmented grammar
-
-    Prodnames    = { }     # A dictionary mapping the names of nonterminals to a list of all
-                           # productions of that nonterminal.
-
-    Prodmap      = { }     # A dictionary that is only used to detect duplicate
-                           # productions.
-
-    Terminals    = { }     # A dictionary mapping the names of terminal symbols to a
-                           # list of the rules where they are used.
-
-    Nonterminals = { }     # A dictionary mapping names of nonterminals to a list
-                           # of rule numbers where they are used.
-
-    First        = { }     # A dictionary of precomputed FIRST(x) symbols
-
-    Follow       = { }     # A dictionary of precomputed FOLLOW(x) symbols
-
-    Precedence   = { }     # Precedence rules for each terminal. Contains tuples of the
-                           # form ('right',level) or ('nonassoc', level) or ('left',level)
-
-    UsedPrecedence = { }   # Precedence rules that were actually used by the grammer.
-                           # This is only used to provide error checking and to generate
-                           # a warning about unused precedence rules.
-
-    LRitems      = [ ]     # A list of all LR items for the grammar.  These are the
-                           # productions with the "dot" like E -> E . PLUS E
-
-    Errorfunc    = None    # User defined error handler
-
-    Signature    = md5.new()   # Digital signature of the grammar rules, precedence
-                               # and other information.  Used to determined when a
-                               # parsing table needs to be regenerated.
-    
-    Signature.update(__tabversion__)
-
-    Requires     = { }     # Requires list
-
-    # File objects used when creating the parser.out debugging file
-    global _vf, _vfc
-    _vf           = cStringIO.StringIO()
-    _vfc          = cStringIO.StringIO()
+import re
+
+# regex matching identifiers
+_is_identifier = re.compile(r'^[a-zA-Z0-9_-]+$')
 
 # -----------------------------------------------------------------------------
 # class Production:
 #
 # This class stores the raw information about a single production or grammar rule.
-# It has a few required attributes:
+# A grammar rule refers to a specification such as this:
 #
-#       name     - Name of the production (nonterminal)
-#       prod     - A list of symbols making up its production
+#       expr : expr PLUS term 
+#
+# Here are the basic attributes defined on all productions
+#
+#       name     - Name of the production.  For example 'expr'
+#       prod     - A list of symbols on the right side ['expr','PLUS','term']
+#       prec     - Production precedence level
 #       number   - Production number.
+#       func     - Function that executes on reduce
+#       file     - File where production function is defined
+#       lineno   - Line number where production function is defined
 #
-# In addition, a few additional attributes are used to help with debugging or
-# optimization of table generation.
+# The following attributes are defined or optional.
 #
-#       file     - File where production action is defined.
-#       lineno   - Line number where action is defined
-#       func     - Action function
-#       prec     - Precedence level
-#       lr_next  - Next LR item. Example, if we are ' E -> E . PLUS E'
-#                  then lr_next refers to 'E -> E PLUS . E'
-#       lr_index - LR item index (location of the ".") in the prod list.
+#       len       - Length of the production (number of symbols on right hand side)
+#       usyms     - Set of unique symbols found in the production
+# -----------------------------------------------------------------------------
+
+class Production(object):
+    reduced = 0
+    def __init__(self,number,name,prod,precedence=('right',0),func=None,file='',line=0):
+        self.name     = name
+        self.prod     = tuple(prod)
+        self.number   = number
+        self.func     = func
+        self.callable = None
+        self.file     = file
+        self.line     = line
+        self.prec     = precedence
+
+        # Internal settings used during table construction
+        
+        self.len  = len(self.prod)   # Length of the production
+
+        # Create a list of unique production symbols used in the production
+        self.usyms = [ ]             
+        for s in self.prod:
+            if s not in self.usyms:
+                self.usyms.append(s)
+
+        # List of all LR items for the production
+        self.lr_items = []
+        self.lr_next = None
+
+        # Create a string representation
+        if self.prod:
+            self.str = "%s -> %s" % (self.name," ".join(self.prod))
+        else:
+            self.str = "%s -> <empty>" % self.name
+
+    def __str__(self):
+        return self.str
+
+    def __repr__(self):
+        return "Production("+str(self)+")"
+
+    def __len__(self):
+        return len(self.prod)
+
+    def __nonzero__(self):
+        return 1
+
+    def __getitem__(self,index):
+        return self.prod[index]
+            
+    # Return the nth lr_item from the production (or None if at the end)
+    def lr_item(self,n):
+        if n > len(self.prod): return None
+        p = LRItem(self,n)
+
+        # Precompute the list of productions immediately following.  Hack. Remove later
+        try:
+            p.lr_after = Prodnames[p.prod[n+1]]
+        except (IndexError,KeyError):
+            p.lr_after = []
+        try:
+            p.lr_before = p.prod[n-1]
+        except IndexError:
+            p.lr_before = None
+
+        return p
+    
+    # Bind the production function name to a callable
+    def bind(self,pdict):
+        if self.func:
+            self.callable = pdict[self.func]
+
+# This class serves as a minimal standin for Production objects when
+# reading table data from files.   It only contains information
+# actually used by the LR parsing engine, plus some additional
+# debugging information.
+class MiniProduction(object):
+    def __init__(self,str,name,len,func,file,line):
+        self.name     = name
+        self.len      = len
+        self.func     = func
+        self.callable = None
+        self.file     = file
+        self.line     = line
+        self.str      = str
+    def __str__(self):
+        return self.str
+    def __repr__(self):
+        return "MiniProduction(%s)" % self.str
+
+    # Bind the production function name to a callable
+    def bind(self,pdict):
+        if self.func:
+            self.callable = pdict[self.func]
+
+
+# -----------------------------------------------------------------------------
+# class LRItem
+#
+# This class represents a specific stage of parsing a production rule.  For
+# example: 
+#
+#       expr : expr . PLUS term 
+#
+# In the above, the "." represents the current location of the parse.  Here
+# basic attributes:
+#
+#       name       - Name of the production.  For example 'expr'
+#       prod       - A list of symbols on the right side ['expr','.', 'PLUS','term']
+#       number     - Production number.
+#
+#       lr_next      Next LR item. Example, if we are ' expr -> expr . PLUS term'
+#                    then lr_next refers to 'expr -> expr PLUS . term'
+#       lr_index   - LR item index (location of the ".") in the prod list.
 #       lookaheads - LALR lookahead symbols for this item
-#       len      - Length of the production (number of symbols on right hand side)
+#       len        - Length of the production (number of symbols on right hand side)
+#       lr_after    - List of all productions that immediately follow
+#       lr_before   - Grammar symbol immediately before
 # -----------------------------------------------------------------------------
 
-class Production:
-    def __init__(self,**kw):
-        for k,v in kw.items():
-            setattr(self,k,v)
-        self.lr_index = -1
-        self.lr0_added = 0    # Flag indicating whether or not added to LR0 closure
-        self.lr1_added = 0    # Flag indicating whether or not added to LR1
-        self.usyms = [ ]
+class LRItem(object):
+    def __init__(self,p,n):
+        self.name       = p.name
+        self.prod       = list(p.prod)
+        self.number     = p.number
+        self.lr_index   = n
         self.lookaheads = { }
-        self.lk_added = { }
-        self.setnumbers = [ ]
+        self.prod.insert(n,".")
+        self.prod       = tuple(self.prod)
+        self.len        = len(self.prod)
+        self.usyms      = p.usyms
 
     def __str__(self):
         if self.prod:
             s = "%s -> %s" % (self.name," ".join(self.prod))
         else:
             s = "%s -> <empty>" % self.name
         return s
 
     def __repr__(self):
-        return str(self)
-
-    # Compute lr_items from the production
-    def lr_item(self,n):
-        if n > len(self.prod): return None
-        p = Production()
-        p.name = self.name
-        p.prod = list(self.prod)
-        p.number = self.number
-        p.lr_index = n
-        p.lookaheads = { }
-        p.setnumbers = self.setnumbers
-        p.prod.insert(n,".")
-        p.prod = tuple(p.prod)
-        p.len = len(p.prod)
-        p.usyms = self.usyms
-
-        # Precompute list of productions immediately following
+        return "LRItem("+str(self)+")"
+
+# -----------------------------------------------------------------------------
+# rightmost_terminal()
+#
+# Return the rightmost terminal from a list of symbols.  Used in add_production()
+# -----------------------------------------------------------------------------
+def rightmost_terminal(symbols, terminals):
+    i = len(symbols) - 1
+    while i >= 0:
+        if symbols[i] in terminals:
+            return symbols[i]
+        i -= 1
+    return None
+
+# -----------------------------------------------------------------------------
+#                           === GRAMMAR CLASS ===
+#
+# The following class represents the contents of the specified grammar along
+# with various computed properties such as first sets, follow sets, LR items, etc.
+# This data is used for critical parts of the table generation process later.
+# -----------------------------------------------------------------------------
+
+class GrammarError(YaccError): pass
+
+class Grammar(object):
+    def __init__(self,terminals):
+        self.Productions  = [None]  # A list of all of the productions.  The first
+                                    # entry is always reserved for the purpose of
+                                    # building an augmented grammar
+
+        self.Prodnames    = { }     # A dictionary mapping the names of nonterminals to a list of all
+                                    # productions of that nonterminal.
+
+        self.Prodmap      = { }     # A dictionary that is only used to detect duplicate
+                                    # productions.
+
+        self.Terminals    = { }     # A dictionary mapping the names of terminal symbols to a
+                                    # list of the rules where they are used.
+
+        for term in terminals:
+            self.Terminals[term] = []
+
+        self.Terminals['error'] = []
+
+        self.Nonterminals = { }     # A dictionary mapping names of nonterminals to a list
+                                    # of rule numbers where they are used.
+
+        self.First        = { }     # A dictionary of precomputed FIRST(x) symbols
+
+        self.Follow       = { }     # A dictionary of precomputed FOLLOW(x) symbols
+
+        self.Precedence   = { }     # Precedence rules for each terminal. Contains tuples of the
+                                    # form ('right',level) or ('nonassoc', level) or ('left',level)
+
+        self.UsedPrecedence = { }   # Precedence rules that were actually used by the grammer.
+                                    # This is only used to provide error checking and to generate
+                                    # a warning about unused precedence rules.
+
+        self.Start = None           # Starting symbol for the grammar
+
+
+    def __len__(self):
+        return len(self.Productions)
+
+    def __getitem__(self,index):
+        return self.Productions[index]
+
+    # -----------------------------------------------------------------------------
+    # set_precedence()
+    #
+    # Sets the precedence for a given terminal. assoc is the associativity such as
+    # 'left','right', or 'nonassoc'.  level is a numeric level.
+    #
+    # -----------------------------------------------------------------------------
+
+    def set_precedence(self,term,assoc,level):
+        assert self.Productions == [None],"Must call set_precedence() before add_production()"
+        if term in self.Precedence:
+            raise GrammarError("Precedence already specified for terminal '%s'" % term)
+        if assoc not in ['left','right','nonassoc']:
+            raise GrammarError("Associativity must be one of 'left','right', or 'nonassoc'")
+        self.Precedence[term] = (assoc,level)
+ 
+    # -----------------------------------------------------------------------------
+    # add_production()
+    #
+    # Given an action function, this function assembles a production rule and
+    # computes its precedence level.
+    #
+    # The production rule is supplied as a list of symbols.   For example,
+    # a rule such as 'expr : expr PLUS term' has a production name of 'expr' and
+    # symbols ['expr','PLUS','term'].
+    #
+    # Precedence is determined by the precedence of the right-most non-terminal
+    # or the precedence of a terminal specified by %prec.
+    #
+    # A variety of error checks are performed to make sure production symbols
+    # are valid and that %prec is used correctly.
+    # -----------------------------------------------------------------------------
+
+    def add_production(self,prodname,syms,func=None,file='',line=0):
+
+        if prodname in self.Terminals:
+            raise GrammarError("%s:%d: Illegal rule name '%s'. Already defined as a token" % (file,line,prodname))
+        if prodname == 'error':
+            raise GrammarError("%s:%d: Illegal rule name '%s'. error is a reserved word" % (file,line,prodname))
+        if not _is_identifier.match(prodname):
+            raise GrammarError("%s:%d: Illegal rule name '%s'" % (file,line,prodname))
+
+        # Look for literal tokens 
+        for n,s in enumerate(syms):
+            if s[0] in "'\"":
+                 try:
+                     c = eval(s)
+                     if (len(c) > 1):
+                          raise GrammarError("%s:%d: Literal token %s in rule '%s' may only be a single character" % (file,line,s, prodname))
+                     if not c in self.Terminals:
+                          self.Terminals[c] = []
+                     syms[n] = c
+                     continue
+                 except SyntaxError:
+                     pass
+            if not _is_identifier.match(s) and s != '%prec':
+                raise GrammarError("%s:%d: Illegal name '%s' in rule '%s'" % (file,line,s, prodname))
+        
+        # Determine the precedence level
+        if '%prec' in syms:
+            if syms[-1] == '%prec':
+                raise GrammarError("%s:%d: Syntax error. Nothing follows %%prec" % (file,line))
+            if syms[-2] != '%prec':
+                raise GrammarError("%s:%d: Syntax error. %%prec can only appear at the end of a grammar rule" % (file,line))
+            precname = syms[-1]
+            prodprec = self.Precedence.get(precname,None)
+            if not prodprec:
+                raise GrammarError("%s:%d: Nothing known about the precedence of '%s'" % (file,line,precname))
+            else:
+                self.UsedPrecedence[precname] = 1
+            del syms[-2:]     # Drop %prec from the rule
+        else:
+            # If no %prec, precedence is determined by the rightmost terminal symbol
+            precname = rightmost_terminal(syms,self.Terminals)
+            prodprec = self.Precedence.get(precname,('right',0)) 
+            
+        # See if the rule is already in the rulemap
+        map = "%s -> %s" % (prodname,syms)
+        if map in self.Prodmap:
+            m = self.Prodmap[map]
+            raise GrammarError("%s:%d: Duplicate rule %s. " % (file,line, m) +
+                               "Previous definition at %s:%d" % (m.file, m.line))
+
+        # From this point on, everything is valid.  Create a new Production instance
+        pnumber  = len(self.Productions)
+        if not prodname in self.Nonterminals:
+            self.Nonterminals[prodname] = [ ]
+
+        # Add the production number to Terminals and Nonterminals
+        for t in syms:
+            if t in self.Terminals:
+                self.Terminals[t].append(pnumber)
+            else:
+                if not t in self.Nonterminals:
+                    self.Nonterminals[t] = [ ]
+                self.Nonterminals[t].append(pnumber)
+
+        # Create a production and add it to the list of productions
+        p = Production(pnumber,prodname,syms,prodprec,func,file,line)
+        self.Productions.append(p)
+        self.Prodmap[map] = p
+
+        # Add to the global productions list
         try:
-            p.lrafter = Prodnames[p.prod[n+1]]
-        except (IndexError,KeyError),e:
-            p.lrafter = []
-        try:
-            p.lrbefore = p.prod[n-1]
-        except IndexError:
-            p.lrbefore = None
-
-        return p
-
-class MiniProduction:
-    pass
-
-# regex matching identifiers
-_is_identifier = re.compile(r'^[a-zA-Z0-9_-]+$')
-
-# -----------------------------------------------------------------------------
-# add_production()
-#
-# Given an action function, this function assembles a production rule.
-# The production rule is assumed to be found in the function's docstring.
-# This rule has the general syntax:
-#
-#              name1 ::= production1
-#                     |  production2
-#                     |  production3
-#                    ...
-#                     |  productionn
-#              name2 ::= production1
-#                     |  production2
-#                    ...
-# -----------------------------------------------------------------------------
-
-def add_production(f,file,line,prodname,syms):
-
-    if Terminals.has_key(prodname):
-        sys.stderr.write("%s:%d: Illegal rule name '%s'. Already defined as a token.\n" % (file,line,prodname))
-        return -1
-    if prodname == 'error':
-        sys.stderr.write("%s:%d: Illegal rule name '%s'. error is a reserved word.\n" % (file,line,prodname))
-        return -1
-
-    if not _is_identifier.match(prodname):
-        sys.stderr.write("%s:%d: Illegal rule name '%s'\n" % (file,line,prodname))
-        return -1
-
-    for x in range(len(syms)):
-        s = syms[x]
-        if s[0] in "'\"":
-             try:
-                 c = eval(s)
-                 if (len(c) > 1):
-                      sys.stderr.write("%s:%d: Literal token %s in rule '%s' may only be a single character\n" % (file,line,s, prodname))
-                      return -1
-                 if not Terminals.has_key(c):
-                      Terminals[c] = []
-                 syms[x] = c
-                 continue
-             except SyntaxError:
-                 pass
-        if not _is_identifier.match(s) and s != '%prec':
-            sys.stderr.write("%s:%d: Illegal name '%s' in rule '%s'\n" % (file,line,s, prodname))
-            return -1
-
-    # See if the rule is already in the rulemap
-    map = "%s -> %s" % (prodname,syms)
-    if Prodmap.has_key(map):
-        m = Prodmap[map]
-        sys.stderr.write("%s:%d: Duplicate rule %s.\n" % (file,line, m))
-        sys.stderr.write("%s:%d: Previous definition at %s:%d\n" % (file,line, m.file, m.line))
-        return -1
-
-    p = Production()
-    p.name = prodname
-    p.prod = syms
-    p.file = file
-    p.line = line
-    p.func = f
-    p.number = len(Productions)
-
-
-    Productions.append(p)
-    Prodmap[map] = p
-    if not Nonterminals.has_key(prodname):
-        Nonterminals[prodname] = [ ]
-
-    # Add all terminals to Terminals
-    i = 0
-    while i < len(p.prod):
-        t = p.prod[i]
-        if t == '%prec':
-            try:
-                precname = p.prod[i+1]
-            except IndexError:
-                sys.stderr.write("%s:%d: Syntax error. Nothing follows %%prec.\n" % (p.file,p.line))
-                return -1
-
-            prec = Precedence.get(precname,None)
-            if not prec:
-                sys.stderr.write("%s:%d: Nothing known about the precedence of '%s'\n" % (p.file,p.line,precname))
-                return -1
-            else:
-                p.prec = prec
-                UsedPrecedence[precname] = 1
-            del p.prod[i]
-            del p.prod[i]
-            continue
-
-        if Terminals.has_key(t):
-            Terminals[t].append(p.number)
-            # Is a terminal.  We'll assign a precedence to p based on this
-            if not hasattr(p,"prec"):
-                p.prec = Precedence.get(t,('right',0))
-        else:
-            if not Nonterminals.has_key(t):
-                Nonterminals[t] = [ ]
-            Nonterminals[t].append(p.number)
-        i += 1
-
-    if not hasattr(p,"prec"):
-        p.prec = ('right',0)
-
-    # Set final length of productions
-    p.len  = len(p.prod)
-    p.prod = tuple(p.prod)
-
-    # Calculate unique syms in the production
-    p.usyms = [ ]
-    for s in p.prod:
-        if s not in p.usyms:
-            p.usyms.append(s)
-
-    # Add to the global productions list
-    try:
-        Prodnames[p.name].append(p)
-    except KeyError:
-        Prodnames[p.name] = [ p ]
-    return 0
-
-# Given a raw rule function, this function rips out its doc string
-# and adds rules to the grammar
-
-def add_function(f):
-    line = f.func_code.co_firstlineno
-    file = f.func_code.co_filename
-    error = 0
-
-    if isinstance(f,types.MethodType):
-        reqdargs = 2
-    else:
-        reqdargs = 1
-
-    if f.func_code.co_argcount > reqdargs:
-        sys.stderr.write("%s:%d: Rule '%s' has too many arguments.\n" % (file,line,f.__name__))
-        return -1
-
-    if f.func_code.co_argcount < reqdargs:
-        sys.stderr.write("%s:%d: Rule '%s' requires an argument.\n" % (file,line,f.__name__))
-        return -1
-
-    if f.__doc__:
-        # Split the doc string into lines
-        pstrings = f.__doc__.splitlines()
-        lastp = None
-        dline = line
-        for ps in pstrings:
-            dline += 1
-            p = ps.split()
+            self.Prodnames[prodname].append(p)
+        except KeyError:
+            self.Prodnames[prodname] = [ p ]
+        return 0
+
+    # -----------------------------------------------------------------------------
+    # set_start()
+    #
+    # Sets the starting symbol and creates the augmented grammar.  Production 
+    # rule 0 is S' -> start where start is the start symbol.
+    # -----------------------------------------------------------------------------
+
+    def set_start(self,start=None):
+        if not start:
+            start = self.Productions[1].name
+        if start not in self.Nonterminals:
+            raise GrammarError("start symbol %s undefined" % start)
+        self.Productions[0] = Production(0,"S'",[start])
+        self.Nonterminals[start].append(0)
+        self.Start = start
+
+    # -----------------------------------------------------------------------------
+    # find_unreachable()
+    #
+    # Find all of the nonterminal symbols that can't be reached from the starting
+    # symbol.  Returns a list of nonterminals that can't be reached.
+    # -----------------------------------------------------------------------------
+
+    def find_unreachable(self):
+        
+        # Mark all symbols that are reachable from a symbol s
+        def mark_reachable_from(s):
+            if reachable[s]:
+                # We've already reached symbol s.
+                return
+            reachable[s] = 1
+            for p in self.Prodnames.get(s,[]):
+                for r in p.prod:
+                    mark_reachable_from(r)
+
+        reachable   = { }
+        for s in list(self.Terminals) + list(self.Nonterminals):
+            reachable[s] = 0
+
+        mark_reachable_from( self.Productions[0].prod[0] )
+
+        return [s for s in list(self.Nonterminals)
+                        if not reachable[s]]
+    
+    # -----------------------------------------------------------------------------
+    # infinite_cycles()
+    #
+    # This function looks at the various parsing rules and tries to detect
+    # infinite recursion cycles (grammar rules where there is no possible way
+    # to derive a string of only terminals).
+    # -----------------------------------------------------------------------------
+
+    def infinite_cycles(self):
+        terminates = {}
+
+        # Terminals:
+        for t in self.Terminals:
+            terminates[t] = 1
+
+        terminates['$end'] = 1
+
+        # Nonterminals:
+
+        # Initialize to false:
+        for n in self.Nonterminals:
+            terminates[n] = 0
+
+        # Then propagate termination until no change:
+        while 1:
+            some_change = 0
+            for (n,pl) in self.Prodnames.items():
+                # Nonterminal n terminates iff any of its productions terminates.
+                for p in pl:
+                    # Production p terminates iff all of its rhs symbols terminate.
+                    for s in p.prod:
+                        if not terminates[s]:
+                            # The symbol s does not terminate,
+                            # so production p does not terminate.
+                            p_terminates = 0
+                            break
+                    else:
+                        # didn't break from the loop,
+                        # so every symbol s terminates
+                        # so production p terminates.
+                        p_terminates = 1
+
+                    if p_terminates:
+                        # symbol n terminates!
+                        if not terminates[n]:
+                            terminates[n] = 1
+                            some_change = 1
+                        # Don't need to consider any more productions for this n.
+                        break
+
+            if not some_change:
+                break
+
+        infinite = []
+        for (s,term) in terminates.items():
+            if not term:
+                if not s in self.Prodnames and not s in self.Terminals and s != 'error':
+                    # s is used-but-not-defined, and we've already warned of that,
+                    # so it would be overkill to say that it's also non-terminating.
+                    pass
+                else:
+                    infinite.append(s)
+
+        return infinite
+
+
+    # -----------------------------------------------------------------------------
+    # undefined_symbols()
+    #
+    # Find all symbols that were used the grammar, but not defined as tokens or
+    # grammar rules.  Returns a list of tuples (sym, prod) where sym in the symbol
+    # and prod is the production where the symbol was used. 
+    # -----------------------------------------------------------------------------
+    def undefined_symbols(self):
+        result = []
+        for p in self.Productions:
             if not p: continue
-            try:
-                if p[0] == '|':
-                    # This is a continuation of a previous rule
-                    if not lastp:
-                        sys.stderr.write("%s:%d: Misplaced '|'.\n" % (file,dline))
-                        return -1
-                    prodname = lastp
-                    if len(p) > 1:
-                        syms = p[1:]
-                    else:
-                        syms = [ ]
+
+            for s in p.prod:
+                if not s in self.Prodnames and not s in self.Terminals and s != 'error':
+                    result.append((s,p))
+        return result
+
+    # -----------------------------------------------------------------------------
+    # unused_terminals()
+    #
+    # Find all terminals that were defined, but not used by the grammar.  Returns
+    # a list of all symbols.
+    # -----------------------------------------------------------------------------
+    def unused_terminals(self):
+        unused_tok = []
+        for s,v in self.Terminals.items():
+            if s != 'error' and not v:
+                unused_tok.append(s)
+
+        return unused_tok
+
+    # ------------------------------------------------------------------------------
+    # unused_rules()
+    #
+    # Find all grammar rules that were defined,  but not used (maybe not reachable)
+    # Returns a list of productions.
+    # ------------------------------------------------------------------------------
+
+    def unused_rules(self):
+        unused_prod = []
+        for s,v in self.Nonterminals.items():
+            if not v:
+                p = self.Prodnames[s][0]
+                unused_prod.append(p)
+        return unused_prod
+
+    # -----------------------------------------------------------------------------
+    # unused_precedence()
+    #
+    # Returns a list of tuples (term,precedence) corresponding to precedence
+    # rules that were never used by the grammar.  term is the name of the terminal
+    # on which precedence was applied and precedence is a string such as 'left' or
+    # 'right' corresponding to the type of precedence. 
+    # -----------------------------------------------------------------------------
+
+    def unused_precedence(self):
+        unused = []
+        for termname in self.Precedence:
+            if not (termname in self.Terminals or termname in self.UsedPrecedence):
+                unused.append((termname,self.Precedence[termname][0]))
+                
+        return unused
+
+    # -------------------------------------------------------------------------
+    # _first()
+    #
+    # Compute the value of FIRST1(beta) where beta is a tuple of symbols.
+    #
+    # During execution of compute_first1, the result may be incomplete.
+    # Afterward (e.g., when called from compute_follow()), it will be complete.
+    # -------------------------------------------------------------------------
+    def _first(self,beta):
+
+        # We are computing First(x1,x2,x3,...,xn)
+        result = [ ]
+        for x in beta:
+            x_produces_empty = 0
+
+            # Add all the non-<empty> symbols of First[x] to the result.
+            for f in self.First[x]:
+                if f == '<empty>':
+                    x_produces_empty = 1
                 else:
-                    prodname = p[0]
-                    lastp = prodname
-                    assign = p[1]
-                    if len(p) > 2:
-                        syms = p[2:]
-                    else:
-                        syms = [ ]
-                    if assign != ':' and assign != '::=':
-                        sys.stderr.write("%s:%d: Syntax error. Expected ':'\n" % (file,dline))
-                        return -1
-
-
-                e = add_production(f,file,dline,prodname,syms)
-                error += e
-
-
-            except StandardError:
-                sys.stderr.write("%s:%d: Syntax error in rule '%s'\n" % (file,dline,ps))
-                error -= 1
-    else:
-        sys.stderr.write("%s:%d: No documentation string specified in function '%s'\n" % (file,line,f.__name__))
-    return error
-
-
-# Cycle checking code (Michael Dyck)
-
-def compute_reachable():
-    '''
-    Find each symbol that can be reached from the start symbol.
-    Print a warning for any nonterminals that can't be reached.
-    (Unused terminals have already had their warning.)
-    '''
-    Reachable = { }
-    for s in Terminals.keys() + Nonterminals.keys():
-        Reachable[s] = 0
-
-    mark_reachable_from( Productions[0].prod[0], Reachable )
-
-    for s in Nonterminals.keys():
-        if not Reachable[s]:
-            sys.stderr.write("yacc: Symbol '%s' is unreachable.\n" % s)
-
-def mark_reachable_from(s, Reachable):
-    '''
-    Mark all symbols that are reachable from symbol s.
-    '''
-    if Reachable[s]:
-        # We've already reached symbol s.
-        return
-    Reachable[s] = 1
-    for p in Prodnames.get(s,[]):
-        for r in p.prod:
-            mark_reachable_from(r, Reachable)
-
-# -----------------------------------------------------------------------------
-# compute_terminates()
-#
-# This function looks at the various parsing rules and tries to detect
-# infinite recursion cycles (grammar rules where there is no possible way
-# to derive a string of only terminals).
-# -----------------------------------------------------------------------------
-def compute_terminates():
-    '''
-    Raise an error for any symbols that don't terminate.
-    '''
-    Terminates = {}
-
-    # Terminals:
-    for t in Terminals.keys():
-        Terminates[t] = 1
-
-    Terminates['$end'] = 1
-
-    # Nonterminals:
-
-    # Initialize to false:
-    for n in Nonterminals.keys():
-        Terminates[n] = 0
-
-    # Then propagate termination until no change:
-    while 1:
-        some_change = 0
-        for (n,pl) in Prodnames.items():
-            # Nonterminal n terminates iff any of its productions terminates.
-            for p in pl:
-                # Production p terminates iff all of its rhs symbols terminate.
-                for s in p.prod:
-                    if not Terminates[s]:
-                        # The symbol s does not terminate,
-                        # so production p does not terminate.
-                        p_terminates = 0
-                        break
-                else:
-                    # didn't break from the loop,
-                    # so every symbol s terminates
-                    # so production p terminates.
-                    p_terminates = 1
-
-                if p_terminates:
-                    # symbol n terminates!
-                    if not Terminates[n]:
-                        Terminates[n] = 1
-                        some_change = 1
-                    # Don't need to consider any more productions for this n.
-                    break
-
-        if not some_change:
-            break
-
-    some_error = 0
-    for (s,terminates) in Terminates.items():
-        if not terminates:
-            if not Prodnames.has_key(s) and not Terminals.has_key(s) and s != 'error':
-                # s is used-but-not-defined, and we've already warned of that,
-                # so it would be overkill to say that it's also non-terminating.
+                    if f not in result: result.append(f)
+
+            if x_produces_empty:
+                # We have to consider the next x in beta,
+                # i.e. stay in the loop.
                 pass
             else:
-                sys.stderr.write("yacc: Infinite recursion detected for symbol '%s'.\n" % s)
-                some_error = 1
-
-    return some_error
+                # We don't have to consider any further symbols in beta.
+                break
+        else:
+            # There was no 'break' from the loop,
+            # so x_produces_empty was true for all x in beta,
+            # so beta produces empty as well.
+            result.append('<empty>')
+
+        return result
+
+    # -------------------------------------------------------------------------
+    # compute_first()
+    #
+    # Compute the value of FIRST1(X) for all symbols
+    # -------------------------------------------------------------------------
+    def compute_first(self):
+        if self.First:
+            return self.First
+
+        # Terminals:
+        for t in self.Terminals:
+            self.First[t] = [t]
+
+        self.First['$end'] = ['$end']
+
+        # Nonterminals:
+
+        # Initialize to the empty set:
+        for n in self.Nonterminals:
+            self.First[n] = []
+
+        # Then propagate symbols until no change:
+        while 1:
+            some_change = 0
+            for n in self.Nonterminals:
+                for p in self.Prodnames[n]:
+                    for f in self._first(p.prod):
+                        if f not in self.First[n]:
+                            self.First[n].append( f )
+                            some_change = 1
+            if not some_change:
+                break
+        
+        return self.First
+
+    # ---------------------------------------------------------------------
+    # compute_follow()
+    #
+    # Computes all of the follow sets for every non-terminal symbol.  The
+    # follow set is the set of all symbols that might follow a given
+    # non-terminal.  See the Dragon book, 2nd Ed. p. 189.
+    # ---------------------------------------------------------------------
+    def compute_follow(self,start=None):
+        # If already computed, return the result
+        if self.Follow:
+            return self.Follow
+
+        # If first sets not computed yet, do that first.
+        if not self.First:
+            self.compute_first()
+
+        # Add '$end' to the follow list of the start symbol
+        for k in self.Nonterminals:
+            self.Follow[k] = [ ]
+
+        if not start:
+            start = self.Productions[1].name
+
+        self.Follow[start] = [ '$end' ]
+
+        while 1:
+            didadd = 0
+            for p in self.Productions[1:]:
+                # Here is the production set
+                for i in range(len(p.prod)):
+                    B = p.prod[i]
+                    if B in self.Nonterminals:
+                        # Okay. We got a non-terminal in a production
+                        fst = self._first(p.prod[i+1:])
+                        hasempty = 0
+                        for f in fst:
+                            if f != '<empty>' and f not in self.Follow[B]:
+                                self.Follow[B].append(f)
+                                didadd = 1
+                            if f == '<empty>':
+                                hasempty = 1
+                        if hasempty or i == (len(p.prod)-1):
+                            # Add elements of follow(a) to follow(b)
+                            for f in self.Follow[p.name]:
+                                if f not in self.Follow[B]:
+                                    self.Follow[B].append(f)
+                                    didadd = 1
+            if not didadd: break
+        return self.Follow
+
+
+    # -----------------------------------------------------------------------------
+    # build_lritems()
+    #
+    # This function walks the list of productions and builds a complete set of the
+    # LR items.  The LR items are stored in two ways:  First, they are uniquely
+    # numbered and placed in the list _lritems.  Second, a linked list of LR items
+    # is built for each production.  For example:
+    #
+    #   E -> E PLUS E
+    #
+    # Creates the list
+    #
+    #  [E -> . E PLUS E, E -> E . PLUS E, E -> E PLUS . E, E -> E PLUS E . ]
+    # -----------------------------------------------------------------------------
+
+    def build_lritems(self):
+        for p in self.Productions:
+            lastlri = p
+            i = 0
+            lr_items = []
+            while 1:
+                if i > len(p):
+                    lri = None
+                else:
+                    lri = LRItem(p,i)
+                    # Precompute the list of productions immediately following
+                    try:
+                        lri.lr_after = self.Prodnames[lri.prod[i+1]]
+                    except (IndexError,KeyError):
+                        lri.lr_after = []
+                    try:
+                        lri.lr_before = lri.prod[i-1]
+                    except IndexError:
+                        lri.lr_before = None
+
+                lastlri.lr_next = lri
+                if not lri: break
+                lr_items.append(lri)
+                lastlri = lri
+                i += 1
+            p.lr_items = lr_items
 
 # -----------------------------------------------------------------------------
-# verify_productions()
+#                            == Class LRTable ==
 #
-# This function examines all of the supplied rules to see if they seem valid.
+# This basic class represents a basic table of LR parsing information.  
+# Methods for generating the tables are not defined here.  They are defined
+# in the derived class LRGeneratedTable.
 # -----------------------------------------------------------------------------
-def verify_productions(cycle_check=1):
-    error = 0
-    for p in Productions:
-        if not p: continue
-
-        for s in p.prod:
-            if not Prodnames.has_key(s) and not Terminals.has_key(s) and s != 'error':
-                sys.stderr.write("%s:%d: Symbol '%s' used, but not defined as a token or a rule.\n" % (p.file,p.line,s))
-                error = 1
-                continue
-
-    unused_tok = 0
-    # Now verify all of the tokens
-    if yaccdebug:
-        _vf.write("Unused terminals:\n\n")
-    for s,v in Terminals.items():
-        if s != 'error' and not v:
-            sys.stderr.write("yacc: Warning. Token '%s' defined, but not used.\n" % s)
-            if yaccdebug: _vf.write("   %s\n"% s)
-            unused_tok += 1
-
-    # Print out all of the productions
-    if yaccdebug:
-        _vf.write("\nGrammar\n\n")
-        for i in range(1,len(Productions)):
-            _vf.write("Rule %-5d %s\n" % (i, Productions[i]))
-
-    unused_prod = 0
-    # Verify the use of all productions
-    for s,v in Nonterminals.items():
-        if not v:
-            p = Prodnames[s][0]
-            sys.stderr.write("%s:%d: Warning. Rule '%s' defined, but not used.\n" % (p.file,p.line, s))
-            unused_prod += 1
-
-
-    if unused_tok == 1:
-        sys.stderr.write("yacc: Warning. There is 1 unused token.\n")
-    if unused_tok > 1:
-        sys.stderr.write("yacc: Warning. There are %d unused tokens.\n" % unused_tok)
-
-    if unused_prod == 1:
-        sys.stderr.write("yacc: Warning. There is 1 unused rule.\n")
-    if unused_prod > 1:
-        sys.stderr.write("yacc: Warning. There are %d unused rules.\n" % unused_prod)
-
-    if yaccdebug:
-        _vf.write("\nTerminals, with rules where they appear\n\n")
-        ks = Terminals.keys()
-        ks.sort()
-        for k in ks:
-            _vf.write("%-20s : %s\n" % (k, " ".join([str(s) for s in Terminals[k]])))
-        _vf.write("\nNonterminals, with rules where they appear\n\n")
-        ks = Nonterminals.keys()
-        ks.sort()
-        for k in ks:
-            _vf.write("%-20s : %s\n" % (k, " ".join([str(s) for s in Nonterminals[k]])))
-
-    if (cycle_check):
-        compute_reachable()
-        error += compute_terminates()
-#        error += check_cycles()
-    return error
-
+
+class VersionError(YaccError): pass
+
+class LRTable(object):
+    def __init__(self):
+        self.lr_action = None
+        self.lr_goto = None
+        self.lr_productions = None
+        self.lr_method = None
+
+    def read_table(self,module):
+        if isinstance(module,types.ModuleType):
+            parsetab = module
+        else:
+            if sys.version_info[0] < 3:
+                exec("import %s as parsetab" % module)
+            else:
+                env = { }
+                exec("import %s as parsetab" % module, env, env)
+                parsetab = env['parsetab']
+
+        if parsetab._tabversion != __tabversion__:
+            raise VersionError("yacc table file version is out of date")
+
+        self.lr_action = parsetab._lr_action
+        self.lr_goto = parsetab._lr_goto
+
+        self.lr_productions = []
+        for p in parsetab._lr_productions:
+            self.lr_productions.append(MiniProduction(*p))
+
+        self.lr_method = parsetab._lr_method
+        return parsetab._lr_signature
+
+    def read_pickle(self,filename):
+        try:
+            import cPickle as pickle
+        except ImportError:
+            import pickle
+
+        in_f = open(filename,"rb")
+
+        tabversion = pickle.load(in_f)
+        if tabversion != __tabversion__:
+            raise VersionError("yacc table file version is out of date")
+        self.lr_method = pickle.load(in_f)
+        signature      = pickle.load(in_f)
+        self.lr_action = pickle.load(in_f)
+        self.lr_goto   = pickle.load(in_f)
+        productions    = pickle.load(in_f)
+
+        self.lr_productions = []
+        for p in productions:
+            self.lr_productions.append(MiniProduction(*p))
+
+        in_f.close()
+        return signature
+
+    # Bind all production function names to callable objects in pdict
+    def bind_callables(self,pdict):
+        for p in self.lr_productions:
+            p.bind(pdict)
+    
 # -----------------------------------------------------------------------------
-# build_lritems()
+#                           === LR Generator ===
 #
-# This function walks the list of productions and builds a complete set of the
-# LR items.  The LR items are stored in two ways:  First, they are uniquely
-# numbered and placed in the list _lritems.  Second, a linked list of LR items
-# is built for each production.  For example:
-#
-#   E -> E PLUS E
-#
-# Creates the list
-#
-#  [E -> . E PLUS E, E -> E . PLUS E, E -> E PLUS . E, E -> E PLUS E . ]
+# The following classes and functions are used to generate LR parsing tables on 
+# a grammar.
 # -----------------------------------------------------------------------------
 
-def build_lritems():
-    for p in Productions:
-        lastlri = p
-        lri = p.lr_item(0)
-        i = 0
-        while 1:
-            lri = p.lr_item(i)
-            lastlri.lr_next = lri
-            if not lri: break
-            lri.lr_num = len(LRitems)
-            LRitems.append(lri)
-            lastlri = lri
-            i += 1
-
-    # In order for the rest of the parser generator to work, we need to
-    # guarantee that no more lritems are generated.  Therefore, we nuke
-    # the p.lr_item method.  (Only used in debugging)
-    # Production.lr_item = None
-
-# -----------------------------------------------------------------------------
-# add_precedence()
-#
-# Given a list of precedence rules, add to the precedence table.
-# -----------------------------------------------------------------------------
-
-def add_precedence(plist):
-    plevel = 0
-    error = 0
-    for p in plist:
-        plevel += 1
-        try:
-            prec = p[0]
-            terms = p[1:]
-            if prec != 'left' and prec != 'right' and prec != 'nonassoc':
-                sys.stderr.write("yacc: Invalid precedence '%s'\n" % prec)
-                return -1
-            for t in terms:
-                if Precedence.has_key(t):
-                    sys.stderr.write("yacc: Precedence already specified for terminal '%s'\n" % t)
-                    error += 1
-                    continue
-                Precedence[t] = (prec,plevel)
-        except:
-            sys.stderr.write("yacc: Invalid precedence table.\n")
-            error += 1
-
-    return error
-
-# -----------------------------------------------------------------------------
-# check_precedence()
-#
-# Checks the use of the Precedence tables.  This makes sure all of the symbols
-# are terminals or were used with %prec
-# -----------------------------------------------------------------------------
-
-def check_precedence():
-    error = 0
-    for precname in Precedence.keys():
-        if not (Terminals.has_key(precname) or UsedPrecedence.has_key(precname)):
-            sys.stderr.write("yacc: Precedence rule '%s' defined for unknown symbol '%s'\n" % (Precedence[precname][0],precname))
-            error += 1
-    return error
-
-# -----------------------------------------------------------------------------
-# augment_grammar()
-#
-# Compute the augmented grammar.  This is just a rule S' -> start where start
-# is the starting symbol.
-# -----------------------------------------------------------------------------
-
-def augment_grammar(start=None):
-    if not start:
-        start = Productions[1].name
-    Productions[0] = Production(name="S'",prod=[start],number=0,len=1,prec=('right',0),func=None)
-    Productions[0].usyms = [ start ]
-    Nonterminals[start].append(0)
-
-
-# -------------------------------------------------------------------------
-# first()
-#
-# Compute the value of FIRST1(beta) where beta is a tuple of symbols.
-#
-# During execution of compute_first1, the result may be incomplete.
-# Afterward (e.g., when called from compute_follow()), it will be complete.
-# -------------------------------------------------------------------------
-def first(beta):
-
-    # We are computing First(x1,x2,x3,...,xn)
-    result = [ ]
-    for x in beta:
-        x_produces_empty = 0
-
-        # Add all the non-<empty> symbols of First[x] to the result.
-        for f in First[x]:
-            if f == '<empty>':
-                x_produces_empty = 1
-            else:
-                if f not in result: result.append(f)
-
-        if x_produces_empty:
-            # We have to consider the next x in beta,
-            # i.e. stay in the loop.
-            pass
-        else:
-            # We don't have to consider any further symbols in beta.
-            break
-    else:
-        # There was no 'break' from the loop,
-        # so x_produces_empty was true for all x in beta,
-        # so beta produces empty as well.
-        result.append('<empty>')
-
-    return result
-
-
-# FOLLOW(x)
-# Given a non-terminal.  This function computes the set of all symbols
-# that might follow it.  Dragon book, p. 189.
-
-def compute_follow(start=None):
-    # Add '$end' to the follow list of the start symbol
-    for k in Nonterminals.keys():
-        Follow[k] = [ ]
-
-    if not start:
-        start = Productions[1].name
-
-    Follow[start] = [ '$end' ]
-
-    while 1:
-        didadd = 0
-        for p in Productions[1:]:
-            # Here is the production set
-            for i in range(len(p.prod)):
-                B = p.prod[i]
-                if Nonterminals.has_key(B):
-                    # Okay. We got a non-terminal in a production
-                    fst = first(p.prod[i+1:])
-                    hasempty = 0
-                    for f in fst:
-                        if f != '<empty>' and f not in Follow[B]:
-                            Follow[B].append(f)
-                            didadd = 1
-                        if f == '<empty>':
-                            hasempty = 1
-                    if hasempty or i == (len(p.prod)-1):
-                        # Add elements of follow(a) to follow(b)
-                        for f in Follow[p.name]:
-                            if f not in Follow[B]:
-                                Follow[B].append(f)
-                                didadd = 1
-        if not didadd: break
-
-    if 0 and yaccdebug:
-        _vf.write('\nFollow:\n')
-        for k in Nonterminals.keys():
-            _vf.write("%-20s : %s\n" % (k, " ".join([str(s) for s in Follow[k]])))
-
-# -------------------------------------------------------------------------
-# compute_first1()
-#
-# Compute the value of FIRST1(X) for all symbols
-# -------------------------------------------------------------------------
-def compute_first1():
-
-    # Terminals:
-    for t in Terminals.keys():
-        First[t] = [t]
-
-    First['$end'] = ['$end']
-    First['#'] = ['#'] # what's this for?
-
-    # Nonterminals:
-
-    # Initialize to the empty set:
-    for n in Nonterminals.keys():
-        First[n] = []
-
-    # Then propagate symbols until no change:
-    while 1:
-        some_change = 0
-        for n in Nonterminals.keys():
-            for p in Prodnames[n]:
-                for f in first(p.prod):
-                    if f not in First[n]:
-                        First[n].append( f )
-                        some_change = 1
-        if not some_change:
-            break
-
-    if 0 and yaccdebug:
-        _vf.write('\nFirst:\n')
-        for k in Nonterminals.keys():
-            _vf.write("%-20s : %s\n" %
-                (k, " ".join([str(s) for s in First[k]])))
-
-# -----------------------------------------------------------------------------
-#                           === SLR Generation ===
-#
-# The following functions are used to construct SLR (Simple LR) parsing tables
-# as described on p.221-229 of the dragon book.
-# -----------------------------------------------------------------------------
-
-# Global variables for the LR parsing engine
-def lr_init_vars():
-    global _lr_action, _lr_goto, _lr_method
-    global _lr_goto_cache, _lr0_cidhash
-
-    _lr_action       = { }        # Action table
-    _lr_goto         = { }        # Goto table
-    _lr_method       = "Unknown"  # LR method used
-    _lr_goto_cache   = { }
-    _lr0_cidhash     = { }
-
-
-# Compute the LR(0) closure operation on I, where I is a set of LR(0) items.
-# prodlist is a list of productions.
-
-_add_count = 0       # Counter used to detect cycles
-
-def lr0_closure(I):
-    global _add_count
-
-    _add_count += 1
-    prodlist = Productions
-
-    # Add everything in I to J
-    J = I[:]
-    didadd = 1
-    while didadd:
-        didadd = 0
-        for j in J:
-            for x in j.lrafter:
-                if x.lr0_added == _add_count: continue
-                # Add B --> .G to J
-                J.append(x.lr_next)
-                x.lr0_added = _add_count
-                didadd = 1
-
-    return J
-
-# Compute the LR(0) goto function goto(I,X) where I is a set
-# of LR(0) items and X is a grammar symbol.   This function is written
-# in a way that guarantees uniqueness of the generated goto sets
-# (i.e. the same goto set will never be returned as two different Python
-# objects).  With uniqueness, we can later do fast set comparisons using
-# id(obj) instead of element-wise comparison.
-
-def lr0_goto(I,x):
-    # First we look for a previously cached entry
-    g = _lr_goto_cache.get((id(I),x),None)
-    if g: return g
-
-    # Now we generate the goto set in a way that guarantees uniqueness
-    # of the result
-
-    s = _lr_goto_cache.get(x,None)
-    if not s:
-        s = { }
-        _lr_goto_cache[x] = s
-
-    gs = [ ]
-    for p in I:
-        n = p.lr_next
-        if n and n.lrbefore == x:
-            s1 = s.get(id(n),None)
-            if not s1:
-                s1 = { }
-                s[id(n)] = s1
-            gs.append(n)
-            s = s1
-    g = s.get('$end',None)
-    if not g:
-        if gs:
-            g = lr0_closure(gs)
-            s['$end'] = g
-        else:
-            s['$end'] = gs
-    _lr_goto_cache[(id(I),x)] = g
-    return g
-
-_lr0_cidhash = { }
-
-# Compute the LR(0) sets of item function
-def lr0_items():
-
-    C = [ lr0_closure([Productions[0].lr_next]) ]
-    i = 0
-    for I in C:
-        _lr0_cidhash[id(I)] = i
-        i += 1
-
-    # Loop over the items in C and each grammar symbols
-    i = 0
-    while i < len(C):
-        I = C[i]
-        i += 1
-
-        # Collect all of the symbols that could possibly be in the goto(I,X) sets
-        asyms = { }
-        for ii in I:
-            for s in ii.usyms:
-                asyms[s] = None
-
-        for x in asyms.keys():
-            g = lr0_goto(I,x)
-            if not g:  continue
-            if _lr0_cidhash.has_key(id(g)): continue
-            _lr0_cidhash[id(g)] = len(C)
-            C.append(g)
-
-    return C
-
-# -----------------------------------------------------------------------------
-#                       ==== LALR(1) Parsing ====
-#
-# LALR(1) parsing is almost exactly the same as SLR except that instead of
-# relying upon Follow() sets when performing reductions, a more selective
-# lookahead set that incorporates the state of the LR(0) machine is utilized.
-# Thus, we mainly just have to focus on calculating the lookahead sets.
-#
-# The method used here is due to DeRemer and Pennelo (1982).
-#
-# DeRemer, F. L., and T. J. Pennelo: "Efficient Computation of LALR(1)
-#     Lookahead Sets", ACM Transactions on Programming Languages and Systems,
-#     Vol. 4, No. 4, Oct. 1982, pp. 615-649
-#
-# Further details can also be found in:
-#
-#  J. Tremblay and P. Sorenson, "The Theory and Practice of Compiler Writing",
-#      McGraw-Hill Book Company, (1985).
-#
-# Note:  This implementation is a complete replacement of the LALR(1)
-#        implementation in PLY-1.x releases.   That version was based on
-#        a less efficient algorithm and it had bugs in its implementation.
-# -----------------------------------------------------------------------------
-
-# -----------------------------------------------------------------------------
-# compute_nullable_nonterminals()
-#
-# Creates a dictionary containing all of the non-terminals that might produce
-# an empty production.
-# -----------------------------------------------------------------------------
-
-def compute_nullable_nonterminals():
-    nullable = {}
-    num_nullable = 0
-    while 1:
-       for p in Productions[1:]:
-           if p.len == 0:
-                nullable[p.name] = 1
-                continue
-           for t in p.prod:
-                if not nullable.has_key(t): break
-           else:
-                nullable[p.name] = 1
-       if len(nullable) == num_nullable: break
-       num_nullable = len(nullable)
-    return nullable
-
-# -----------------------------------------------------------------------------
-# find_nonterminal_trans(C)
-#
-# Given a set of LR(0) items, this functions finds all of the non-terminal
-# transitions.    These are transitions in which a dot appears immediately before
-# a non-terminal.   Returns a list of tuples of the form (state,N) where state
-# is the state number and N is the nonterminal symbol.
-#
-# The input C is the set of LR(0) items.
-# -----------------------------------------------------------------------------
-
-def find_nonterminal_transitions(C):
-     trans = []
-     for state in range(len(C)):
-         for p in C[state]:
-             if p.lr_index < p.len - 1:
-                  t = (state,p.prod[p.lr_index+1])
-                  if Nonterminals.has_key(t[1]):
-                        if t not in trans: trans.append(t)
-         state = state + 1
-     return trans
-
-# -----------------------------------------------------------------------------
-# dr_relation()
-#
-# Computes the DR(p,A) relationships for non-terminal transitions.  The input
-# is a tuple (state,N) where state is a number and N is a nonterminal symbol.
-#
-# Returns a list of terminals.
-# -----------------------------------------------------------------------------
-
-def dr_relation(C,trans,nullable):
-    dr_set = { }
-    state,N = trans
-    terms = []
-
-    g = lr0_goto(C[state],N)
-    for p in g:
-       if p.lr_index < p.len - 1:
-           a = p.prod[p.lr_index+1]
-           if Terminals.has_key(a):
-               if a not in terms: terms.append(a)
-
-    # This extra bit is to handle the start state
-    if state == 0 and N == Productions[0].prod[0]:
-       terms.append('$end')
-
-    return terms
-
-# -----------------------------------------------------------------------------
-# reads_relation()
-#
-# Computes the READS() relation (p,A) READS (t,C).
-# -----------------------------------------------------------------------------
-
-def reads_relation(C, trans, empty):
-    # Look for empty transitions
-    rel = []
-    state, N = trans
-
-    g = lr0_goto(C[state],N)
-    j = _lr0_cidhash.get(id(g),-1)
-    for p in g:
-        if p.lr_index < p.len - 1:
-             a = p.prod[p.lr_index + 1]
-             if empty.has_key(a):
-                  rel.append((j,a))
-
-    return rel
-
-# -----------------------------------------------------------------------------
-# compute_lookback_includes()
-#
-# Determines the lookback and includes relations
-#
-# LOOKBACK:
-#
-# This relation is determined by running the LR(0) state machine forward.
-# For example, starting with a production "N : . A B C", we run it forward
-# to obtain "N : A B C ."   We then build a relationship between this final
-# state and the starting state.   These relationships are stored in a dictionary
-# lookdict.
-#
-# INCLUDES:
-#
-# Computes the INCLUDE() relation (p,A) INCLUDES (p',B).
-#
-# This relation is used to determine non-terminal transitions that occur
-# inside of other non-terminal transition states.   (p,A) INCLUDES (p', B)
-# if the following holds:
-#
-#       B -> LAT, where T -> epsilon and p' -L-> p
-#
-# L is essentially a prefix (which may be empty), T is a suffix that must be
-# able to derive an empty string.  State p' must lead to state p with the string L.
-#
-# -----------------------------------------------------------------------------
-
-def compute_lookback_includes(C,trans,nullable):
-
-    lookdict = {}          # Dictionary of lookback relations
-    includedict = {}       # Dictionary of include relations
-
-    # Make a dictionary of non-terminal transitions
-    dtrans = {}
-    for t in trans:
-        dtrans[t] = 1
-
-    # Loop over all transitions and compute lookbacks and includes
-    for state,N in trans:
-        lookb = []
-        includes = []
-        for p in C[state]:
-            if p.name != N: continue
-
-            # Okay, we have a name match.  We now follow the production all the way
-            # through the state machine until we get the . on the right hand side
-
-            lr_index = p.lr_index
-            j = state
-            while lr_index < p.len - 1:
-                 lr_index = lr_index + 1
-                 t = p.prod[lr_index]
-
-                 # Check to see if this symbol and state are a non-terminal transition
-                 if dtrans.has_key((j,t)):
-                       # Yes.  Okay, there is some chance that this is an includes relation
-                       # the only way to know for certain is whether the rest of the
-                       # production derives empty
-
-                       li = lr_index + 1
-                       while li < p.len:
-                            if Terminals.has_key(p.prod[li]): break      # No forget it
-                            if not nullable.has_key(p.prod[li]): break
-                            li = li + 1
-                       else:
-                            # Appears to be a relation between (j,t) and (state,N)
-                            includes.append((j,t))
-
-                 g = lr0_goto(C[j],t)               # Go to next set
-                 j = _lr0_cidhash.get(id(g),-1)     # Go to next state
-
-            # When we get here, j is the final state, now we have to locate the production
-            for r in C[j]:
-                 if r.name != p.name: continue
-                 if r.len != p.len:   continue
-                 i = 0
-                 # This look is comparing a production ". A B C" with "A B C ."
-                 while i < r.lr_index:
-                      if r.prod[i] != p.prod[i+1]: break
-                      i = i + 1
-                 else:
-                      lookb.append((j,r))
-        for i in includes:
-             if not includedict.has_key(i): includedict[i] = []
-             includedict[i].append((state,N))
-        lookdict[(state,N)] = lookb
-
-    return lookdict,includedict
-
 # -----------------------------------------------------------------------------
 # digraph()
 # traverse()
 #
 # The following two functions are used to compute set valued functions
 # of the form:
 #
 #     F(x) = F'(x) U U{F(y) | x R y}
@@ -2176,720 +1914,1363 @@ def traverse(x,N,stack,F,X,R,FP):
     rel = R(x)               # Get y's related to x
     for y in rel:
         if N[y] == 0:
              traverse(y,N,stack,F,X,R,FP)
         N[x] = min(N[x],N[y])
         for a in F.get(y,[]):
             if a not in F[x]: F[x].append(a)
     if N[x] == d:
-       N[stack[-1]] = sys.maxint
+       N[stack[-1]] = MAXINT
        F[stack[-1]] = F[x]
        element = stack.pop()
        while element != x:
-           N[stack[-1]] = sys.maxint
+           N[stack[-1]] = MAXINT
            F[stack[-1]] = F[x]
            element = stack.pop()
 
+class LALRError(YaccError): pass
+
 # -----------------------------------------------------------------------------
-# compute_read_sets()
+#                             == LRGeneratedTable ==
 #
-# Given a set of LR(0) items, this function computes the read sets.
-#
-# Inputs:  C        =  Set of LR(0) items
-#          ntrans   = Set of nonterminal transitions
-#          nullable = Set of empty transitions
-#
-# Returns a set containing the read sets
+# This class implements the LR table generation algorithm.  There are no
+# public methods except for write()
 # -----------------------------------------------------------------------------
 
-def compute_read_sets(C, ntrans, nullable):
-    FP = lambda x: dr_relation(C,x,nullable)
-    R =  lambda x: reads_relation(C,x,nullable)
-    F = digraph(ntrans,R,FP)
-    return F
-
-# -----------------------------------------------------------------------------
-# compute_follow_sets()
-#
-# Given a set of LR(0) items, a set of non-terminal transitions, a readset,
-# and an include set, this function computes the follow sets
-#
-# Follow(p,A) = Read(p,A) U U {Follow(p',B) | (p,A) INCLUDES (p',B)}
-#
-# Inputs:
-#            ntrans     = Set of nonterminal transitions
-#            readsets   = Readset (previously computed)
-#            inclsets   = Include sets (previously computed)
-#
-# Returns a set containing the follow sets
-# -----------------------------------------------------------------------------
-
-def compute_follow_sets(ntrans,readsets,inclsets):
-     FP = lambda x: readsets[x]
-     R  = lambda x: inclsets.get(x,[])
-     F = digraph(ntrans,R,FP)
-     return F
-
-# -----------------------------------------------------------------------------
-# add_lookaheads()
-#
-# Attaches the lookahead symbols to grammar rules.
-#
-# Inputs:    lookbacks         -  Set of lookback relations
-#            followset         -  Computed follow set
-#
-# This function directly attaches the lookaheads to productions contained
-# in the lookbacks set
-# -----------------------------------------------------------------------------
-
-def add_lookaheads(lookbacks,followset):
-    for trans,lb in lookbacks.items():
-        # Loop over productions in lookback
-        for state,p in lb:
-             if not p.lookaheads.has_key(state):
-                  p.lookaheads[state] = []
-             f = followset.get(trans,[])
-             for a in f:
-                  if a not in p.lookaheads[state]: p.lookaheads[state].append(a)
-
-# -----------------------------------------------------------------------------
-# add_lalr_lookaheads()
-#
-# This function does all of the work of adding lookahead information for use
-# with LALR parsing
-# -----------------------------------------------------------------------------
-
-def add_lalr_lookaheads(C):
-    # Determine all of the nullable nonterminals
-    nullable = compute_nullable_nonterminals()
-
-    # Find all non-terminal transitions
-    trans = find_nonterminal_transitions(C)
-
-    # Compute read sets
-    readsets = compute_read_sets(C,trans,nullable)
-
-    # Compute lookback/includes relations
-    lookd, included = compute_lookback_includes(C,trans,nullable)
-
-    # Compute LALR FOLLOW sets
-    followsets = compute_follow_sets(trans,readsets,included)
-
-    # Add all of the lookaheads
-    add_lookaheads(lookd,followsets)
-
-# -----------------------------------------------------------------------------
-# lr_parse_table()
-#
-# This function constructs the parse tables for SLR or LALR
-# -----------------------------------------------------------------------------
-def lr_parse_table(method):
-    global _lr_method
-    goto = _lr_goto           # Goto array
-    action = _lr_action       # Action array
-    actionp = { }             # Action production array (temporary)
-
-    _lr_method = method
-
-    n_srconflict = 0
-    n_rrconflict = 0
-
-    if yaccdebug:
-        sys.stderr.write("yacc: Generating %s parsing table...\n" % method)
-        _vf.write("\n\nParsing method: %s\n\n" % method)
-
-    # Step 1: Construct C = { I0, I1, ... IN}, collection of LR(0) items
-    # This determines the number of states
-
-    C = lr0_items()
-
-    if method == 'LALR':
-        add_lalr_lookaheads(C)
-
-
-    # Build the parser table, state by state
-    st = 0
-    for I in C:
-        # Loop over each production in I
-        actlist = [ ]              # List of actions
-        st_action  = { }
-        st_actionp = { }
-        st_goto    = { }
-        if yaccdebug:
-            _vf.write("\nstate %d\n\n" % st)
+class LRGeneratedTable(LRTable):
+    def __init__(self,grammar,method='LALR',log=None):
+        if method not in ['SLR','LALR']:
+            raise LALRError("Unsupported method %s" % method)
+
+        self.grammar = grammar
+        self.lr_method = method
+
+        # Set up the logger
+        if not log:
+            log = NullLogger()
+        self.log = log
+
+        # Internal attributes
+        self.lr_action     = {}        # Action table
+        self.lr_goto       = {}        # Goto table
+        self.lr_productions  = grammar.Productions    # Copy of grammar Production array
+        self.lr_goto_cache = {}        # Cache of computed gotos
+        self.lr0_cidhash   = {}        # Cache of closures
+
+        self._add_count    = 0         # Internal counter used to detect cycles
+
+        # Diagonistic information filled in by the table generator
+        self.sr_conflict   = 0
+        self.rr_conflict   = 0
+        self.conflicts     = []        # List of conflicts
+
+        self.sr_conflicts  = []
+        self.rr_conflicts  = []
+
+        # Build the tables
+        self.grammar.build_lritems()
+        self.grammar.compute_first()
+        self.grammar.compute_follow()
+        self.lr_parse_table()
+
+    # Compute the LR(0) closure operation on I, where I is a set of LR(0) items.
+
+    def lr0_closure(self,I):
+        self._add_count += 1
+
+        # Add everything in I to J
+        J = I[:]
+        didadd = 1
+        while didadd:
+            didadd = 0
+            for j in J:
+                for x in j.lr_after:
+                    if getattr(x,"lr0_added",0) == self._add_count: continue
+                    # Add B --> .G to J
+                    J.append(x.lr_next)
+                    x.lr0_added = self._add_count
+                    didadd = 1
+
+        return J
+
+    # Compute the LR(0) goto function goto(I,X) where I is a set
+    # of LR(0) items and X is a grammar symbol.   This function is written
+    # in a way that guarantees uniqueness of the generated goto sets
+    # (i.e. the same goto set will never be returned as two different Python
+    # objects).  With uniqueness, we can later do fast set comparisons using
+    # id(obj) instead of element-wise comparison.
+
+    def lr0_goto(self,I,x):
+        # First we look for a previously cached entry
+        g = self.lr_goto_cache.get((id(I),x),None)
+        if g: return g
+
+        # Now we generate the goto set in a way that guarantees uniqueness
+        # of the result
+
+        s = self.lr_goto_cache.get(x,None)
+        if not s:
+            s = { }
+            self.lr_goto_cache[x] = s
+
+        gs = [ ]
+        for p in I:
+            n = p.lr_next
+            if n and n.lr_before == x:
+                s1 = s.get(id(n),None)
+                if not s1:
+                    s1 = { }
+                    s[id(n)] = s1
+                gs.append(n)
+                s = s1
+        g = s.get('$end',None)
+        if not g:
+            if gs:
+                g = self.lr0_closure(gs)
+                s['$end'] = g
+            else:
+                s['$end'] = gs
+        self.lr_goto_cache[(id(I),x)] = g
+        return g
+
+    # Compute the LR(0) sets of item function
+    def lr0_items(self):
+
+        C = [ self.lr0_closure([self.grammar.Productions[0].lr_next]) ]
+        i = 0
+        for I in C:
+            self.lr0_cidhash[id(I)] = i
+            i += 1
+
+        # Loop over the items in C and each grammar symbols
+        i = 0
+        while i < len(C):
+            I = C[i]
+            i += 1
+
+            # Collect all of the symbols that could possibly be in the goto(I,X) sets
+            asyms = { }
+            for ii in I:
+                for s in ii.usyms:
+                    asyms[s] = None
+
+            for x in asyms:
+                g = self.lr0_goto(I,x)
+                if not g:  continue
+                if id(g) in self.lr0_cidhash: continue
+                self.lr0_cidhash[id(g)] = len(C)
+                C.append(g)
+
+        return C
+
+    # -----------------------------------------------------------------------------
+    #                       ==== LALR(1) Parsing ====
+    #
+    # LALR(1) parsing is almost exactly the same as SLR except that instead of
+    # relying upon Follow() sets when performing reductions, a more selective
+    # lookahead set that incorporates the state of the LR(0) machine is utilized.
+    # Thus, we mainly just have to focus on calculating the lookahead sets.
+    #
+    # The method used here is due to DeRemer and Pennelo (1982).
+    #
+    # DeRemer, F. L., and T. J. Pennelo: "Efficient Computation of LALR(1)
+    #     Lookahead Sets", ACM Transactions on Programming Languages and Systems,
+    #     Vol. 4, No. 4, Oct. 1982, pp. 615-649
+    #
+    # Further details can also be found in:
+    #
+    #  J. Tremblay and P. Sorenson, "The Theory and Practice of Compiler Writing",
+    #      McGraw-Hill Book Company, (1985).
+    #
+    # -----------------------------------------------------------------------------
+
+    # -----------------------------------------------------------------------------
+    # compute_nullable_nonterminals()
+    #
+    # Creates a dictionary containing all of the non-terminals that might produce
+    # an empty production.
+    # -----------------------------------------------------------------------------
+
+    def compute_nullable_nonterminals(self):
+        nullable = {}
+        num_nullable = 0
+        while 1:
+           for p in self.grammar.Productions[1:]:
+               if p.len == 0:
+                    nullable[p.name] = 1
+                    continue
+               for t in p.prod:
+                    if not t in nullable: break
+               else:
+                    nullable[p.name] = 1
+           if len(nullable) == num_nullable: break
+           num_nullable = len(nullable)
+        return nullable
+
+    # -----------------------------------------------------------------------------
+    # find_nonterminal_trans(C)
+    #
+    # Given a set of LR(0) items, this functions finds all of the non-terminal
+    # transitions.    These are transitions in which a dot appears immediately before
+    # a non-terminal.   Returns a list of tuples of the form (state,N) where state
+    # is the state number and N is the nonterminal symbol.
+    #
+    # The input C is the set of LR(0) items.
+    # -----------------------------------------------------------------------------
+
+    def find_nonterminal_transitions(self,C):
+         trans = []
+         for state in range(len(C)):
+             for p in C[state]:
+                 if p.lr_index < p.len - 1:
+                      t = (state,p.prod[p.lr_index+1])
+                      if t[1] in self.grammar.Nonterminals:
+                            if t not in trans: trans.append(t)
+             state = state + 1
+         return trans
+
+    # -----------------------------------------------------------------------------
+    # dr_relation()
+    #
+    # Computes the DR(p,A) relationships for non-terminal transitions.  The input
+    # is a tuple (state,N) where state is a number and N is a nonterminal symbol.
+    #
+    # Returns a list of terminals.
+    # -----------------------------------------------------------------------------
+
+    def dr_relation(self,C,trans,nullable):
+        dr_set = { }
+        state,N = trans
+        terms = []
+
+        g = self.lr0_goto(C[state],N)
+        for p in g:
+           if p.lr_index < p.len - 1:
+               a = p.prod[p.lr_index+1]
+               if a in self.grammar.Terminals:
+                   if a not in terms: terms.append(a)
+
+        # This extra bit is to handle the start state
+        if state == 0 and N == self.grammar.Productions[0].prod[0]:
+           terms.append('$end')
+
+        return terms
+
+    # -----------------------------------------------------------------------------
+    # reads_relation()
+    #
+    # Computes the READS() relation (p,A) READS (t,C).
+    # -----------------------------------------------------------------------------
+
+    def reads_relation(self,C, trans, empty):
+        # Look for empty transitions
+        rel = []
+        state, N = trans
+
+        g = self.lr0_goto(C[state],N)
+        j = self.lr0_cidhash.get(id(g),-1)
+        for p in g:
+            if p.lr_index < p.len - 1:
+                 a = p.prod[p.lr_index + 1]
+                 if a in empty:
+                      rel.append((j,a))
+
+        return rel
+
+    # -----------------------------------------------------------------------------
+    # compute_lookback_includes()
+    #
+    # Determines the lookback and includes relations
+    #
+    # LOOKBACK:
+    #
+    # This relation is determined by running the LR(0) state machine forward.
+    # For example, starting with a production "N : . A B C", we run it forward
+    # to obtain "N : A B C ."   We then build a relationship between this final
+    # state and the starting state.   These relationships are stored in a dictionary
+    # lookdict.
+    #
+    # INCLUDES:
+    #
+    # Computes the INCLUDE() relation (p,A) INCLUDES (p',B).
+    #
+    # This relation is used to determine non-terminal transitions that occur
+    # inside of other non-terminal transition states.   (p,A) INCLUDES (p', B)
+    # if the following holds:
+    #
+    #       B -> LAT, where T -> epsilon and p' -L-> p
+    #
+    # L is essentially a prefix (which may be empty), T is a suffix that must be
+    # able to derive an empty string.  State p' must lead to state p with the string L.
+    #
+    # -----------------------------------------------------------------------------
+
+    def compute_lookback_includes(self,C,trans,nullable):
+
+        lookdict = {}          # Dictionary of lookback relations
+        includedict = {}       # Dictionary of include relations
+
+        # Make a dictionary of non-terminal transitions
+        dtrans = {}
+        for t in trans:
+            dtrans[t] = 1
+
+        # Loop over all transitions and compute lookbacks and includes
+        for state,N in trans:
+            lookb = []
+            includes = []
+            for p in C[state]:
+                if p.name != N: continue
+
+                # Okay, we have a name match.  We now follow the production all the way
+                # through the state machine until we get the . on the right hand side
+
+                lr_index = p.lr_index
+                j = state
+                while lr_index < p.len - 1:
+                     lr_index = lr_index + 1
+                     t = p.prod[lr_index]
+
+                     # Check to see if this symbol and state are a non-terminal transition
+                     if (j,t) in dtrans:
+                           # Yes.  Okay, there is some chance that this is an includes relation
+                           # the only way to know for certain is whether the rest of the
+                           # production derives empty
+
+                           li = lr_index + 1
+                           while li < p.len:
+                                if p.prod[li] in self.grammar.Terminals: break      # No forget it
+                                if not p.prod[li] in nullable: break
+                                li = li + 1
+                           else:
+                                # Appears to be a relation between (j,t) and (state,N)
+                                includes.append((j,t))
+
+                     g = self.lr0_goto(C[j],t)               # Go to next set
+                     j = self.lr0_cidhash.get(id(g),-1)     # Go to next state
+
+                # When we get here, j is the final state, now we have to locate the production
+                for r in C[j]:
+                     if r.name != p.name: continue
+                     if r.len != p.len:   continue
+                     i = 0
+                     # This look is comparing a production ". A B C" with "A B C ."
+                     while i < r.lr_index:
+                          if r.prod[i] != p.prod[i+1]: break
+                          i = i + 1
+                     else:
+                          lookb.append((j,r))
+            for i in includes:
+                 if not i in includedict: includedict[i] = []
+                 includedict[i].append((state,N))
+            lookdict[(state,N)] = lookb
+
+        return lookdict,includedict
+
+    # -----------------------------------------------------------------------------
+    # compute_read_sets()
+    #
+    # Given a set of LR(0) items, this function computes the read sets.
+    #
+    # Inputs:  C        =  Set of LR(0) items
+    #          ntrans   = Set of nonterminal transitions
+    #          nullable = Set of empty transitions
+    #
+    # Returns a set containing the read sets
+    # -----------------------------------------------------------------------------
+
+    def compute_read_sets(self,C, ntrans, nullable):
+        FP = lambda x: self.dr_relation(C,x,nullable)
+        R =  lambda x: self.reads_relation(C,x,nullable)
+        F = digraph(ntrans,R,FP)
+        return F
+
+    # -----------------------------------------------------------------------------
+    # compute_follow_sets()
+    #
+    # Given a set of LR(0) items, a set of non-terminal transitions, a readset,
+    # and an include set, this function computes the follow sets
+    #
+    # Follow(p,A) = Read(p,A) U U {Follow(p',B) | (p,A) INCLUDES (p',B)}
+    #
+    # Inputs:
+    #            ntrans     = Set of nonterminal transitions
+    #            readsets   = Readset (previously computed)
+    #            inclsets   = Include sets (previously computed)
+    #
+    # Returns a set containing the follow sets
+    # -----------------------------------------------------------------------------
+
+    def compute_follow_sets(self,ntrans,readsets,inclsets):
+         FP = lambda x: readsets[x]
+         R  = lambda x: inclsets.get(x,[])
+         F = digraph(ntrans,R,FP)
+         return F
+
+    # -----------------------------------------------------------------------------
+    # add_lookaheads()
+    #
+    # Attaches the lookahead symbols to grammar rules.
+    #
+    # Inputs:    lookbacks         -  Set of lookback relations
+    #            followset         -  Computed follow set
+    #
+    # This function directly attaches the lookaheads to productions contained
+    # in the lookbacks set
+    # -----------------------------------------------------------------------------
+
+    def add_lookaheads(self,lookbacks,followset):
+        for trans,lb in lookbacks.items():
+            # Loop over productions in lookback
+            for state,p in lb:
+                 if not state in p.lookaheads:
+                      p.lookaheads[state] = []
+                 f = followset.get(trans,[])
+                 for a in f:
+                      if a not in p.lookaheads[state]: p.lookaheads[state].append(a)
+
+    # -----------------------------------------------------------------------------
+    # add_lalr_lookaheads()
+    #
+    # This function does all of the work of adding lookahead information for use
+    # with LALR parsing
+    # -----------------------------------------------------------------------------
+
+    def add_lalr_lookaheads(self,C):
+        # Determine all of the nullable nonterminals
+        nullable = self.compute_nullable_nonterminals()
+
+        # Find all non-terminal transitions
+        trans = self.find_nonterminal_transitions(C)
+
+        # Compute read sets
+        readsets = self.compute_read_sets(C,trans,nullable)
+
+        # Compute lookback/includes relations
+        lookd, included = self.compute_lookback_includes(C,trans,nullable)
+
+        # Compute LALR FOLLOW sets
+        followsets = self.compute_follow_sets(trans,readsets,included)
+
+        # Add all of the lookaheads
+        self.add_lookaheads(lookd,followsets)
+
+    # -----------------------------------------------------------------------------
+    # lr_parse_table()
+    #
+    # This function constructs the parse tables for SLR or LALR
+    # -----------------------------------------------------------------------------
+    def lr_parse_table(self):
+        Productions = self.grammar.Productions
+        Precedence  = self.grammar.Precedence
+        goto   = self.lr_goto         # Goto array
+        action = self.lr_action       # Action array
+        log    = self.log             # Logger for output
+
+        actionp = { }                 # Action production array (temporary)
+        
+        log.info("Parsing method: %s", self.lr_method)
+
+        # Step 1: Construct C = { I0, I1, ... IN}, collection of LR(0) items
+        # This determines the number of states
+
+        C = self.lr0_items()
+
+        if self.lr_method == 'LALR':
+            self.add_lalr_lookaheads(C)
+
+        # Build the parser table, state by state
+        st = 0
+        for I in C:
+            # Loop over each production in I
+            actlist = [ ]              # List of actions
+            st_action  = { }
+            st_actionp = { }
+            st_goto    = { }
+            log.info("")
+            log.info("state %d", st)
+            log.info("")
             for p in I:
-                _vf.write("    (%d) %s\n" % (p.number, str(p)))
-            _vf.write("\n")
-
-        for p in I:
-            try:
-                if p.len == p.lr_index + 1:
-                    if p.name == "S'":
-                        # Start symbol. Accept!
-                        st_action["$end"] = 0
-                        st_actionp["$end"] = p
+                log.info("    (%d) %s", p.number, str(p))
+            log.info("")
+
+            for p in I:
+                    if p.len == p.lr_index + 1:
+                        if p.name == "S'":
+                            # Start symbol. Accept!
+                            st_action["$end"] = 0
+                            st_actionp["$end"] = p
+                        else:
+                            # We are at the end of a production.  Reduce!
+                            if self.lr_method == 'LALR':
+                                laheads = p.lookaheads[st]
+                            else:
+                                laheads = self.grammar.Follow[p.name]
+                            for a in laheads:
+                                actlist.append((a,p,"reduce using rule %d (%s)" % (p.number,p)))
+                                r = st_action.get(a,None)
+                                if r is not None:
+                                    # Whoa. Have a shift/reduce or reduce/reduce conflict
+                                    if r > 0:
+                                        # Need to decide on shift or reduce here
+                                        # By default we favor shifting. Need to add
+                                        # some precedence rules here.
+                                        sprec,slevel = Productions[st_actionp[a].number].prec
+                                        rprec,rlevel = Precedence.get(a,('right',0))
+                                        if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')):
+                                            # We really need to reduce here.
+                                            st_action[a] = -p.number
+                                            st_actionp[a] = p
+                                            if not slevel and not rlevel:
+                                                log.info("  ! shift/reduce conflict for %s resolved as reduce",a)
+                                                self.sr_conflicts.append((st,a,'reduce'))
+                                            Productions[p.number].reduced += 1
+                                        elif (slevel == rlevel) and (rprec == 'nonassoc'):
+                                            st_action[a] = None
+                                        else:
+                                            # Hmmm. Guess we'll keep the shift
+                                            if not rlevel:
+                                                log.info("  ! shift/reduce conflict for %s resolved as shift",a)
+                                                self.sr_conflicts.append((st,a,'shift'))
+                                    elif r < 0:
+                                        # Reduce/reduce conflict.   In this case, we favor the rule
+                                        # that was defined first in the grammar file
+                                        oldp = Productions[-r]
+                                        pp = Productions[p.number]
+                                        if oldp.line > pp.line:
+                                            st_action[a] = -p.number
+                                            st_actionp[a] = p
+                                            chosenp,rejectp = pp,oldp
+                                            Productions[p.number].reduced += 1
+                                            Productions[oldp.number].reduced -= 1
+                                        else:
+                                            chosenp,rejectp = oldp,pp
+                                        self.rr_conflicts.append((st,chosenp,rejectp))
+                                        log.info("  ! reduce/reduce conflict for %s resolved using rule %d (%s)", a,st_actionp[a].number, st_actionp[a])
+                                    else:
+                                        raise LALRError("Unknown conflict in state %d" % st)
+                                else:
+                                    st_action[a] = -p.number
+                                    st_actionp[a] = p
+                                    Productions[p.number].reduced += 1
                     else:
-                        # We are at the end of a production.  Reduce!
-                        if method == 'LALR':
-                            laheads = p.lookaheads[st]
-                        else:
-                            laheads = Follow[p.name]
-                        for a in laheads:
-                            actlist.append((a,p,"reduce using rule %d (%s)" % (p.number,p)))
-                            r = st_action.get(a,None)
-                            if r is not None:
-                                # Whoa. Have a shift/reduce or reduce/reduce conflict
-                                if r > 0:
-                                    # Need to decide on shift or reduce here
-                                    # By default we favor shifting. Need to add
-                                    # some precedence rules here.
-                                    sprec,slevel = Productions[st_actionp[a].number].prec
-                                    rprec,rlevel = Precedence.get(a,('right',0))
-                                    if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')):
-                                        # We really need to reduce here.
-                                        st_action[a] = -p.number
-                                        st_actionp[a] = p
-                                        if not slevel and not rlevel:
-                                            _vfc.write("shift/reduce conflict in state %d resolved as reduce.\n" % st)
-                                            _vf.write("  ! shift/reduce conflict for %s resolved as reduce.\n" % a)
-                                            n_srconflict += 1
-                                    elif (slevel == rlevel) and (rprec == 'nonassoc'):
-                                        st_action[a] = None
+                        i = p.lr_index
+                        a = p.prod[i+1]       # Get symbol right after the "."
+                        if a in self.grammar.Terminals:
+                            g = self.lr0_goto(I,a)
+                            j = self.lr0_cidhash.get(id(g),-1)
+                            if j >= 0:
+                                # We are in a shift state
+                                actlist.append((a,p,"shift and go to state %d" % j))
+                                r = st_action.get(a,None)
+                                if r is not None:
+                                    # Whoa have a shift/reduce or shift/shift conflict
+                                    if r > 0:
+                                        if r != j:
+                                            raise LALRError("Shift/shift conflict in state %d" % st)
+                                    elif r < 0:
+                                        # Do a precedence check.
+                                        #   -  if precedence of reduce rule is higher, we reduce.
+                                        #   -  if precedence of reduce is same and left assoc, we reduce.
+                                        #   -  otherwise we shift
+                                        rprec,rlevel = Productions[st_actionp[a].number].prec
+                                        sprec,slevel = Precedence.get(a,('right',0))
+                                        if (slevel > rlevel) or ((slevel == rlevel) and (rprec == 'right')):
+                                            # We decide to shift here... highest precedence to shift
+                                            Productions[st_actionp[a].number].reduced -= 1
+                                            st_action[a] = j
+                                            st_actionp[a] = p
+                                            if not rlevel:
+                                                log.info("  ! shift/reduce conflict for %s resolved as shift",a)
+                                                self.sr_conflicts.append((st,a,'shift'))
+                                        elif (slevel == rlevel) and (rprec == 'nonassoc'):
+                                            st_action[a] = None
+                                        else:
+                                            # Hmmm. Guess we'll keep the reduce
+                                            if not slevel and not rlevel:
+                                                log.info("  ! shift/reduce conflict for %s resolved as reduce",a)
+                                                self.sr_conflicts.append((st,a,'reduce'))
+
                                     else:
-                                        # Hmmm. Guess we'll keep the shift
-                                        if not rlevel:
-                                            _vfc.write("shift/reduce conflict in state %d resolved as shift.\n" % st)
-                                            _vf.write("  ! shift/reduce conflict for %s resolved as shift.\n" % a)
-                                            n_srconflict +=1
-                                elif r < 0:
-                                    # Reduce/reduce conflict.   In this case, we favor the rule
-                                    # that was defined first in the grammar file
-                                    oldp = Productions[-r]
-                                    pp = Productions[p.number]
-                                    if oldp.line > pp.line:
-                                        st_action[a] = -p.number
-                                        st_actionp[a] = p
-                                    # sys.stderr.write("Reduce/reduce conflict in state %d\n" % st)
-                                    n_rrconflict += 1
-                                    _vfc.write("reduce/reduce conflict in state %d resolved using rule %d (%s).\n" % (st, st_actionp[a].number, st_actionp[a]))
-                                    _vf.write("  ! reduce/reduce conflict for %s resolved using rule %d (%s).\n" % (a,st_actionp[a].number, st_actionp[a]))
+                                        raise LALRError("Unknown conflict in state %d" % st)
                                 else:
-                                    sys.stderr.write("Unknown conflict in state %d\n" % st)
-                            else:
-                                st_action[a] = -p.number
-                                st_actionp[a] = p
-                else:
-                    i = p.lr_index
-                    a = p.prod[i+1]       # Get symbol right after the "."
-                    if Terminals.has_key(a):
-                        g = lr0_goto(I,a)
-                        j = _lr0_cidhash.get(id(g),-1)
-                        if j >= 0:
-                            # We are in a shift state
-                            actlist.append((a,p,"shift and go to state %d" % j))
-                            r = st_action.get(a,None)
-                            if r is not None:
-                                # Whoa have a shift/reduce or shift/shift conflict
-                                if r > 0:
-                                    if r != j:
-                                        sys.stderr.write("Shift/shift conflict in state %d\n" % st)
-                                elif r < 0:
-                                    # Do a precedence check.
-                                    #   -  if precedence of reduce rule is higher, we reduce.
-                                    #   -  if precedence of reduce is same and left assoc, we reduce.
-                                    #   -  otherwise we shift
-                                    rprec,rlevel = Productions[st_actionp[a].number].prec
-                                    sprec,slevel = Precedence.get(a,('right',0))
-                                    if (slevel > rlevel) or ((slevel == rlevel) and (rprec == 'right')):
-                                        # We decide to shift here... highest precedence to shift
-                                        st_action[a] = j
-                                        st_actionp[a] = p
-                                        if not rlevel:
-                                            n_srconflict += 1
-                                            _vfc.write("shift/reduce conflict in state %d resolved as shift.\n" % st)
-                                            _vf.write("  ! shift/reduce conflict for %s resolved as shift.\n" % a)
-                                    elif (slevel == rlevel) and (rprec == 'nonassoc'):
-                                        st_action[a] = None
-                                    else:
-                                        # Hmmm. Guess we'll keep the reduce
-                                        if not slevel and not rlevel:
-                                            n_srconflict +=1
-                                            _vfc.write("shift/reduce conflict in state %d resolved as reduce.\n" % st)
-                                            _vf.write("  ! shift/reduce conflict for %s resolved as reduce.\n" % a)
-
-                                else:
-                                    sys.stderr.write("Unknown conflict in state %d\n" % st)
-                            else:
-                                st_action[a] = j
-                                st_actionp[a] = p
-
-            except StandardError,e:
-               print sys.exc_info()
-               raise YaccError, "Hosed in lr_parse_table"
-
-        # Print the actions associated with each terminal
-        if yaccdebug:
-          _actprint = { }
-          for a,p,m in actlist:
-            if st_action.has_key(a):
-                if p is st_actionp[a]:
-                    _vf.write("    %-15s %s\n" % (a,m))
-                    _actprint[(a,m)] = 1
-          _vf.write("\n")
-          for a,p,m in actlist:
-            if st_action.has_key(a):
-                if p is not st_actionp[a]:
-                    if not _actprint.has_key((a,m)):
-                        _vf.write("  ! %-15s [ %s ]\n" % (a,m))
+                                    st_action[a] = j
+                                    st_actionp[a] = p
+
+            # Print the actions associated with each terminal
+            _actprint = { }
+            for a,p,m in actlist:
+                if a in st_action:
+                    if p is st_actionp[a]:
+                        log.info("    %-15s %s",a,m)
                         _actprint[(a,m)] = 1
-
-        # Construct the goto table for this state
-        if yaccdebug:
-            _vf.write("\n")
-        nkeys = { }
-        for ii in I:
-            for s in ii.usyms:
-                if Nonterminals.has_key(s):
-                    nkeys[s] = None
-        for n in nkeys.keys():
-            g = lr0_goto(I,n)
-            j = _lr0_cidhash.get(id(g),-1)
-            if j >= 0:
-                st_goto[n] = j
-                if yaccdebug:
-                    _vf.write("    %-30s shift and go to state %d\n" % (n,j))
-
-        action[st] = st_action
-        actionp[st] = st_actionp
-        goto[st] = st_goto
-
-        st += 1
-
-    if yaccdebug:
-        if n_srconflict == 1:
-            sys.stderr.write("yacc: %d shift/reduce conflict\n" % n_srconflict)
-        if n_srconflict > 1:
-            sys.stderr.write("yacc: %d shift/reduce conflicts\n" % n_srconflict)
-        if n_rrconflict == 1:
-            sys.stderr.write("yacc: %d reduce/reduce conflict\n" % n_rrconflict)
-        if n_rrconflict > 1:
-            sys.stderr.write("yacc: %d reduce/reduce conflicts\n" % n_rrconflict)
-
-# -----------------------------------------------------------------------------
-#                          ==== LR Utility functions ====
-# -----------------------------------------------------------------------------
-
-# -----------------------------------------------------------------------------
-# _lr_write_tables()
-#
-# This function writes the LR parsing tables to a file
-# -----------------------------------------------------------------------------
-
-def lr_write_tables(modulename=tab_module,outputdir=''):
-    if isinstance(modulename, types.ModuleType):
-        print >>sys.stderr, "Warning module %s is inconsistent with the grammar (ignored)" % modulename
-        return
-
-    basemodulename = modulename.split(".")[-1]
-    filename = os.path.join(outputdir,basemodulename) + ".py"
-    try:
-        f = open(filename,"w")
-
-        f.write("""
+            log.info("")
+            # Print the actions that were not used. (debugging)
+            not_used = 0
+            for a,p,m in actlist:
+                if a in st_action:
+                    if p is not st_actionp[a]:
+                        if not (a,m) in _actprint:
+                            log.debug("  ! %-15s [ %s ]",a,m)
+                            not_used = 1
+                            _actprint[(a,m)] = 1
+            if not_used:
+                log.debug("")
+
+            # Construct the goto table for this state
+
+            nkeys = { }
+            for ii in I:
+                for s in ii.usyms:
+                    if s in self.grammar.Nonterminals:
+                        nkeys[s] = None
+            for n in nkeys:
+                g = self.lr0_goto(I,n)
+                j = self.lr0_cidhash.get(id(g),-1)
+                if j >= 0:
+                    st_goto[n] = j
+                    log.info("    %-30s shift and go to state %d",n,j)
+
+            action[st] = st_action
+            actionp[st] = st_actionp
+            goto[st] = st_goto
+            st += 1
+
+
+    # -----------------------------------------------------------------------------
+    # write()
+    #
+    # This function writes the LR parsing tables to a file
+    # -----------------------------------------------------------------------------
+
+    def write_table(self,modulename,outputdir='',signature=""):
+        basemodulename = modulename.split(".")[-1]
+        filename = os.path.join(outputdir,basemodulename) + ".py"
+        try:
+            f = open(filename,"w")
+
+            f.write("""
 # %s
 # This file is automatically generated. Do not edit.
-
-_lr_method = %s
-
-_lr_signature = %s
-""" % (filename, repr(_lr_method), repr(Signature.digest())))
-
-        # Change smaller to 0 to go back to original tables
-        smaller = 1
-
-        # Factor out names to try and make smaller
-        if smaller:
-            items = { }
-
-            for s,nd in _lr_action.items():
-               for name,v in nd.items():
-                  i = items.get(name)
-                  if not i:
-                     i = ([],[])
-                     items[name] = i
-                  i[0].append(s)
-                  i[1].append(v)
-
-            f.write("\n_lr_action_items = {")
-            for k,v in items.items():
-                f.write("%r:([" % k)
-                for i in v[0]:
-                    f.write("%r," % i)
-                f.write("],[")
-                for i in v[1]:
-                    f.write("%r," % i)
-
-                f.write("]),")
-            f.write("}\n")
-
-            f.write("""
+_tabversion = %r
+
+_lr_method = %r
+
+_lr_signature = %r
+    """ % (filename, __tabversion__, self.lr_method, signature))
+
+            # Change smaller to 0 to go back to original tables
+            smaller = 1
+
+            # Factor out names to try and make smaller
+            if smaller:
+                items = { }
+
+                for s,nd in self.lr_action.items():
+                   for name,v in nd.items():
+                      i = items.get(name)
+                      if not i:
+                         i = ([],[])
+                         items[name] = i
+                      i[0].append(s)
+                      i[1].append(v)
+
+                f.write("\n_lr_action_items = {")
+                for k,v in items.items():
+                    f.write("%r:([" % k)
+                    for i in v[0]:
+                        f.write("%r," % i)
+                    f.write("],[")
+                    for i in v[1]:
+                        f.write("%r," % i)
+
+                    f.write("]),")
+                f.write("}\n")
+
+                f.write("""
 _lr_action = { }
 for _k, _v in _lr_action_items.items():
    for _x,_y in zip(_v[0],_v[1]):
-      if not _lr_action.has_key(_x):  _lr_action[_x] = { }
+      if not _x in _lr_action:  _lr_action[_x] = { }
       _lr_action[_x][_k] = _y
 del _lr_action_items
 """)
 
-        else:
-            f.write("\n_lr_action = { ");
-            for k,v in _lr_action.items():
-                f.write("(%r,%r):%r," % (k[0],k[1],v))
-            f.write("}\n");
-
-        if smaller:
-            # Factor out names to try and make smaller
-            items = { }
-
-            for s,nd in _lr_goto.items():
-               for name,v in nd.items():
-                  i = items.get(name)
-                  if not i:
-                     i = ([],[])
-                     items[name] = i
-                  i[0].append(s)
-                  i[1].append(v)
-
-            f.write("\n_lr_goto_items = {")
-            for k,v in items.items():
-                f.write("%r:([" % k)
-                for i in v[0]:
-                    f.write("%r," % i)
-                f.write("],[")
-                for i in v[1]:
-                    f.write("%r," % i)
-
-                f.write("]),")
-            f.write("}\n")
-
-            f.write("""
+            else:
+                f.write("\n_lr_action = { ");
+                for k,v in self.lr_action.items():
+                    f.write("(%r,%r):%r," % (k[0],k[1],v))
+                f.write("}\n");
+
+            if smaller:
+                # Factor out names to try and make smaller
+                items = { }
+
+                for s,nd in self.lr_goto.items():
+                   for name,v in nd.items():
+                      i = items.get(name)
+                      if not i:
+                         i = ([],[])
+                         items[name] = i
+                      i[0].append(s)
+                      i[1].append(v)
+
+                f.write("\n_lr_goto_items = {")
+                for k,v in items.items():
+                    f.write("%r:([" % k)
+                    for i in v[0]:
+                        f.write("%r," % i)
+                    f.write("],[")
+                    for i in v[1]:
+                        f.write("%r," % i)
+
+                    f.write("]),")
+                f.write("}\n")
+
+                f.write("""
 _lr_goto = { }
 for _k, _v in _lr_goto_items.items():
    for _x,_y in zip(_v[0],_v[1]):
-       if not _lr_goto.has_key(_x): _lr_goto[_x] = { }
+       if not _x in _lr_goto: _lr_goto[_x] = { }
        _lr_goto[_x][_k] = _y
 del _lr_goto_items
 """)
+            else:
+                f.write("\n_lr_goto = { ");
+                for k,v in self.lr_goto.items():
+                    f.write("(%r,%r):%r," % (k[0],k[1],v))
+                f.write("}\n");
+
+            # Write production table
+            f.write("_lr_productions = [\n")
+            for p in self.lr_productions:
+                if p.func:
+                    f.write("  (%r,%r,%d,%r,%r,%d),\n" % (p.str,p.name, p.len, p.func,p.file,p.line))
+                else:
+                    f.write("  (%r,%r,%d,None,None,None),\n" % (str(p),p.name, p.len))
+            f.write("]\n")
+            f.close()
+
+        except IOError:
+            e = sys.exc_info()[1]
+            sys.stderr.write("Unable to create '%s'\n" % filename)
+            sys.stderr.write(str(e)+"\n")
+            return
+
+
+    # -----------------------------------------------------------------------------
+    # pickle_table()
+    #
+    # This function pickles the LR parsing tables to a supplied file object
+    # -----------------------------------------------------------------------------
+
+    def pickle_table(self,filename,signature=""):
+        try:
+            import cPickle as pickle
+        except ImportError:
+            import pickle
+        outf = open(filename,"wb")
+        pickle.dump(__tabversion__,outf,pickle_protocol)
+        pickle.dump(self.lr_method,outf,pickle_protocol)
+        pickle.dump(signature,outf,pickle_protocol)
+        pickle.dump(self.lr_action,outf,pickle_protocol)
+        pickle.dump(self.lr_goto,outf,pickle_protocol)
+
+        outp = []
+        for p in self.lr_productions:
+            if p.func:
+                outp.append((p.str,p.name, p.len, p.func,p.file,p.line))
+            else:
+                outp.append((str(p),p.name,p.len,None,None,None))
+        pickle.dump(outp,outf,pickle_protocol)
+        outf.close()
+
+# -----------------------------------------------------------------------------
+#                            === INTROSPECTION ===
+#
+# The following functions and classes are used to implement the PLY
+# introspection features followed by the yacc() function itself.
+# -----------------------------------------------------------------------------
+
+# -----------------------------------------------------------------------------
+# get_caller_module_dict()
+#
+# This function returns a dictionary containing all of the symbols defined within
+# a caller further down the call stack.  This is used to get the environment
+# associated with the yacc() call if none was provided.
+# -----------------------------------------------------------------------------
+
+def get_caller_module_dict(levels):
+    try:
+        raise RuntimeError
+    except RuntimeError:
+        e,b,t = sys.exc_info()
+        f = t.tb_frame
+        while levels > 0:
+            f = f.f_back                   
+            levels -= 1
+        ldict = f.f_globals.copy()
+        if f.f_globals != f.f_locals:
+            ldict.update(f.f_locals)
+
+        return ldict
+
+# -----------------------------------------------------------------------------
+# parse_grammar()
+#
+# This takes a raw grammar rule string and parses it into production data
+# -----------------------------------------------------------------------------
+def parse_grammar(doc,file,line):
+    grammar = []
+    # Split the doc string into lines
+    pstrings = doc.splitlines()
+    lastp = None
+    dline = line
+    for ps in pstrings:
+        dline += 1
+        p = ps.split()
+        if not p: continue
+        try:
+            if p[0] == '|':
+                # This is a continuation of a previous rule
+                if not lastp:
+                    raise SyntaxError("%s:%d: Misplaced '|'" % (file,dline))
+                prodname = lastp
+                syms = p[1:]
+            else:
+                prodname = p[0]
+                lastp = prodname
+                syms   = p[2:]
+                assign = p[1]
+                if assign != ':' and assign != '::=':
+                    raise SyntaxError("%s:%d: Syntax error. Expected ':'" % (file,dline))
+
+            grammar.append((file,dline,prodname,syms))
+        except SyntaxError:
+            raise
+        except Exception:
+            raise SyntaxError("%s:%d: Syntax error in rule '%s'" % (file,dline,ps.strip()))
+
+    return grammar
+
+# -----------------------------------------------------------------------------
+# ParserReflect()
+#
+# This class represents information extracted for building a parser including
+# start symbol, error function, tokens, precedence list, action functions,
+# etc.
+# -----------------------------------------------------------------------------
+class ParserReflect(object):
+    def __init__(self,pdict,log=None):
+        self.pdict      = pdict
+        self.start      = None
+        self.error_func = None
+        self.tokens     = None
+        self.files      = {}
+        self.grammar    = []
+        self.error      = 0
+
+        if log is None:
+            self.log = PlyLogger(sys.stderr)
         else:
-            f.write("\n_lr_goto = { ");
-            for k,v in _lr_goto.items():
-                f.write("(%r,%r):%r," % (k[0],k[1],v))
-            f.write("}\n");
-
-        # Write production table
-        f.write("_lr_productions = [\n")
-        for p in Productions:
-            if p:
-                if (p.func):
-                    f.write("  (%r,%d,%r,%r,%d),\n" % (p.name, p.len, p.func.__name__,p.file,p.line))
-                else:
-                    f.write("  (%r,%d,None,None,None),\n" % (p.name, p.len))
+            self.log = log
+
+    # Get all of the basic information
+    def get_all(self):
+        self.get_start()
+        self.get_error_func()
+        self.get_tokens()
+        self.get_precedence()
+        self.get_pfunctions()
+        
+    # Validate all of the information
+    def validate_all(self):
+        self.validate_start()
+        self.validate_error_func()
+        self.validate_tokens()
+        self.validate_precedence()
+        self.validate_pfunctions()
+        self.validate_files()
+        return self.error
+
+    # Compute a signature over the grammar
+    def signature(self):
+        try:
+            from hashlib import md5
+        except ImportError:
+            from md5 import md5
+        try:
+            sig = md5()
+            if self.start:
+                sig.update(self.start.encode('latin-1'))
+            if self.prec:
+                sig.update("".join(["".join(p) for p in self.prec]).encode('latin-1'))
+            if self.tokens:
+                sig.update(" ".join(self.tokens).encode('latin-1'))
+            for f in self.pfuncs:
+                if f[3]:
+                    sig.update(f[3].encode('latin-1'))
+        except (TypeError,ValueError):
+            pass
+        return sig.digest()
+
+    # -----------------------------------------------------------------------------
+    # validate_file()
+    #
+    # This method checks to see if there are duplicated p_rulename() functions
+    # in the parser module file.  Without this function, it is really easy for
+    # users to make mistakes by cutting and pasting code fragments (and it's a real
+    # bugger to try and figure out why the resulting parser doesn't work).  Therefore,
+    # we just do a little regular expression pattern matching of def statements
+    # to try and detect duplicates.
+    # -----------------------------------------------------------------------------
+
+    def validate_files(self):
+        # Match def p_funcname(
+        fre = re.compile(r'\s*def\s+(p_[a-zA-Z_0-9]*)\(')
+
+        for filename in self.files.keys():
+            base,ext = os.path.splitext(filename)
+            if ext != '.py': return 1          # No idea. Assume it's okay.
+
+            try:
+                f = open(filename)
+                lines = f.readlines()
+                f.close()
+            except IOError:
+                continue
+
+            counthash = { }
+            for linen,l in enumerate(lines):
+                linen += 1
+                m = fre.match(l)
+                if m:
+                    name = m.group(1)
+                    prev = counthash.get(name)
+                    if not prev:
+                        counthash[name] = linen
+                    else:
+                        self.log.warning("%s:%d: Function %s redefined. Previously defined on line %d", filename,linen,name,prev)
+
+    # Get the start symbol
+    def get_start(self):
+        self.start = self.pdict.get('start')
+
+    # Validate the start symbol
+    def validate_start(self):
+        if self.start is not None:
+            if not isinstance(self.start,str):
+                self.log.error("'start' must be a string")
+
+    # Look for error handler
+    def get_error_func(self):
+        self.error_func = self.pdict.get('p_error')
+
+    # Validate the error function
+    def validate_error_func(self):
+        if self.error_func:
+            if isinstance(self.error_func,types.FunctionType):
+                ismethod = 0
+            elif isinstance(self.error_func, types.MethodType):
+                ismethod = 1
             else:
-                f.write("  None,\n")
-        f.write("]\n")
-
-        f.close()
-
-    except IOError,e:
-        print >>sys.stderr, "Unable to create '%s'" % filename
-        print >>sys.stderr, e
-        return
-
-def lr_read_tables(module=tab_module,optimize=0):
-    global _lr_action, _lr_goto, _lr_productions, _lr_method
-    try:
-        if isinstance(module,types.ModuleType):
-            parsetab = module
-        else:
-            exec "import %s as parsetab" % module
-
-        if (optimize) or (Signature.digest() == parsetab._lr_signature):
-            _lr_action = parsetab._lr_action
-            _lr_goto   = parsetab._lr_goto
-            _lr_productions = parsetab._lr_productions
-            _lr_method = parsetab._lr_method
-            return 1
-        else:
-            return 0
-
-    except (ImportError,AttributeError):
-        return 0
-
+                self.log.error("'p_error' defined, but is not a function or method")
+                self.error = 1
+                return
+
+            eline = func_code(self.error_func).co_firstlineno
+            efile = func_code(self.error_func).co_filename
+            self.files[efile] = 1
+
+            if (func_code(self.error_func).co_argcount != 1+ismethod):
+                self.log.error("%s:%d: p_error() requires 1 argument",efile,eline)
+                self.error = 1
+
+    # Get the tokens map
+    def get_tokens(self):
+        tokens = self.pdict.get("tokens",None)
+        if not tokens:
+            self.log.error("No token list is defined")
+            self.error = 1
+            return
+
+        if not isinstance(tokens,(list, tuple)):
+            self.log.error("tokens must be a list or tuple")
+            self.error = 1
+            return
+        
+        if not tokens:
+            self.log.error("tokens is empty")
+            self.error = 1
+            return
+
+        self.tokens = tokens
+
+    # Validate the tokens
+    def validate_tokens(self):
+        # Validate the tokens.
+        if 'error' in self.tokens:
+            self.log.error("Illegal token name 'error'. Is a reserved word")
+            self.error = 1
+            return
+
+        terminals = {}
+        for n in self.tokens:
+            if n in terminals:
+                self.log.warning("Token '%s' multiply defined", n)
+            terminals[n] = 1
+
+    # Get the precedence map (if any)
+    def get_precedence(self):
+        self.prec = self.pdict.get("precedence",None)
+
+    # Validate and parse the precedence map
+    def validate_precedence(self):
+        preclist = []
+        if self.prec:
+            if not isinstance(self.prec,(list,tuple)):
+                self.log.error("precedence must be a list or tuple")
+                self.error = 1
+                return
+            for level,p in enumerate(self.prec):
+                if not isinstance(p,(list,tuple)):
+                    self.log.error("Bad precedence table")
+                    self.error = 1
+                    return
+
+                if len(p) < 2:
+                    self.log.error("Malformed precedence entry %s. Must be (assoc, term, ..., term)",p)
+                    self.error = 1
+                    return
+                assoc = p[0]
+                if not isinstance(assoc,str):
+                    self.log.error("precedence associativity must be a string")
+                    self.error = 1
+                    return
+                for term in p[1:]:
+                    if not isinstance(term,str):
+                        self.log.error("precedence items must be strings")
+                        self.error = 1
+                        return
+                    preclist.append((term,assoc,level+1))
+        self.preclist = preclist
+
+    # Get all p_functions from the grammar
+    def get_pfunctions(self):
+        p_functions = []
+        for name, item in self.pdict.items():
+            if name[:2] != 'p_': continue
+            if name == 'p_error': continue
+            if isinstance(item,(types.FunctionType,types.MethodType)):
+                line = func_code(item).co_firstlineno
+                file = func_code(item).co_filename
+                p_functions.append((line,file,name,item.__doc__))
+
+        # Sort all of the actions by line number
+        p_functions.sort()
+        self.pfuncs = p_functions
+
+
+    # Validate all of the p_functions
+    def validate_pfunctions(self):
+        grammar = []
+        # Check for non-empty symbols
+        if len(self.pfuncs) == 0:
+            self.log.error("no rules of the form p_rulename are defined")
+            self.error = 1
+            return 
+        
+        for line, file, name, doc in self.pfuncs:
+            func = self.pdict[name]
+            if isinstance(func, types.MethodType):
+                reqargs = 2
+            else:
+                reqargs = 1
+            if func_code(func).co_argcount > reqargs:
+                self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,func.__name__)
+                self.error = 1
+            elif func_code(func).co_argcount < reqargs:
+                self.log.error("%s:%d: Rule '%s' requires an argument",file,line,func.__name__)
+                self.error = 1
+            elif not func.__doc__:
+                self.log.warning("%s:%d: No documentation string specified in function '%s' (ignored)",file,line,func.__name__)
+            else:
+                try:
+                    parsed_g = parse_grammar(doc,file,line)
+                    for g in parsed_g:
+                        grammar.append((name, g))
+                except SyntaxError:
+                    e = sys.exc_info()[1]
+                    self.log.error(str(e))
+                    self.error = 1
+
+                # Looks like a valid grammar rule
+                # Mark the file in which defined.
+                self.files[file] = 1
+
+        # Secondary validation step that looks for p_ definitions that are not functions
+        # or functions that look like they might be grammar rules.
+
+        for n,v in self.pdict.items():
+            if n[0:2] == 'p_' and isinstance(v, (types.FunctionType, types.MethodType)): continue
+            if n[0:2] == 't_': continue
+            if n[0:2] == 'p_' and n != 'p_error':
+                self.log.warning("'%s' not defined as a function", n)
+            if ((isinstance(v,types.FunctionType) and func_code(v).co_argcount == 1) or
+                (isinstance(v,types.MethodType) and func_code(v).co_argcount == 2)):
+                try:
+                    doc = v.__doc__.split(" ")
+                    if doc[1] == ':':
+                        self.log.warning("%s:%d: Possible grammar rule '%s' defined without p_ prefix",
+                                         func_code(v).co_filename, func_code(v).co_firstlineno,n)
+                except Exception:
+                    pass
+
+        self.grammar = grammar
 
 # -----------------------------------------------------------------------------
 # yacc(module)
 #
-# Build the parser module
+# Build a parser
 # -----------------------------------------------------------------------------
 
-def yacc(method=default_lr, debug=yaccdebug, module=None, tabmodule=tab_module, start=None, check_recursion=1, optimize=0,write_tables=1,debugfile=debug_file,outputdir=''):
-    global yaccdebug
-    yaccdebug = debug
-
-    initialize_vars()
-    files = { }
-    error = 0
-
-
-    # Add parsing method to signature
-    Signature.update(method)
-
-    # If a "module" parameter was supplied, extract its dictionary.
-    # Note: a module may in fact be an instance as well.
-
+def yacc(method='LALR', debug=yaccdebug, module=None, tabmodule=tab_module, start=None, 
+         check_recursion=1, optimize=0, write_tables=1, debugfile=debug_file,outputdir='',
+         debuglog=None, errorlog = None, picklefile=None):
+
+    global parse                 # Reference to the parsing method of the last built parser
+
+    # If pickling is enabled, table files are not created
+
+    if picklefile:
+        write_tables = 0
+
+    if errorlog is None:
+        errorlog = PlyLogger(sys.stderr)
+
+    # Get the module dictionary used for the parser
     if module:
-        # User supplied a module object.
-        if isinstance(module, types.ModuleType):
-            ldict = module.__dict__
-        elif isinstance(module, _INSTANCETYPE):
-            _items = [(k,getattr(module,k)) for k in dir(module)]
-            ldict = { }
-            for i in _items:
-                ldict[i[0]] = i[1]
+        _items = [(k,getattr(module,k)) for k in dir(module)]
+        pdict = dict(_items)
+    else:
+        pdict = get_caller_module_dict(2)
+
+    # Collect parser information from the dictionary
+    pinfo = ParserReflect(pdict,log=errorlog)
+    pinfo.get_all()
+
+    if pinfo.error:
+        raise YaccError("Unable to build parser")
+
+    # Check signature against table files (if any)
+    signature = pinfo.signature()
+
+    # Read the tables
+    try:
+        lr = LRTable()
+        if picklefile:
+            read_signature = lr.read_pickle(picklefile)
         else:
-            raise ValueError,"Expected a module"
-
-    else:
-        # No module given.  We might be able to get information from the caller.
-        # Throw an exception and unwind the traceback to get the globals
-
+            read_signature = lr.read_table(tabmodule)
+        if optimize or (read_signature == signature):
+            try:
+                lr.bind_callables(pinfo.pdict)
+                parser = LRParser(lr,pinfo.error_func)
+                parse = parser.parse
+                return parser
+            except Exception:
+                e = sys.exc_info()[1]
+                errorlog.warning("There was a problem loading the table file: %s", repr(e))
+    except VersionError:
+        e = sys.exc_info()
+        errorlog.warning(str(e))
+    except Exception:
+        pass
+
+    if debuglog is None:
+        if debug:
+            debuglog = PlyLogger(open(debugfile,"w"))
+        else:
+            debuglog = NullLogger()
+
+    debuglog.info("Created by PLY version %s (http://www.dabeaz.com/ply)", __version__)
+
+
+    errors = 0
+
+    # Validate the parser information
+    if pinfo.validate_all():
+        raise YaccError("Unable to build parser")
+    
+    if not pinfo.error_func:
+        errorlog.warning("no p_error() function is defined")
+
+    # Create a grammar object
+    grammar = Grammar(pinfo.tokens)
+
+    # Set precedence level for terminals
+    for term, assoc, level in pinfo.preclist:
         try:
-            raise RuntimeError
-        except RuntimeError:
-            e,b,t = sys.exc_info()
-            f = t.tb_frame
-            f = f.f_back           # Walk out to our calling function
-            if f.f_globals is f.f_locals:   # Collect global and local variations from caller
-               ldict = f.f_globals
-            else:
-               ldict = f.f_globals.copy()
-               ldict.update(f.f_locals)
-
-    # Add starting symbol to signature
-    if not start:
-        start = ldict.get("start",None)
-    if start:
-        Signature.update(start)
-
-    # Look for error handler
-    ef = ldict.get('p_error',None)
-    if ef:
-        if isinstance(ef,types.FunctionType):
-            ismethod = 0
-        elif isinstance(ef, types.MethodType):
-            ismethod = 1
+            grammar.set_precedence(term,assoc,level)
+        except GrammarError:
+            e = sys.exc_info()[1]
+            errorlog.warning("%s",str(e))
+
+    # Add productions to the grammar
+    for funcname, gram in pinfo.grammar:
+        file, line, prodname, syms = gram
+        try:
+            grammar.add_production(prodname,syms,funcname,file,line)
+        except GrammarError:
+            e = sys.exc_info()[1]
+            errorlog.error("%s",str(e))
+            errors = 1
+
+    # Set the grammar start symbols
+    try:
+        if start is None:
+            grammar.set_start(pinfo.start)
         else:
-            raise YaccError,"'p_error' defined, but is not a function or method."
-        eline = ef.func_code.co_firstlineno
-        efile = ef.func_code.co_filename
-        files[efile] = None
-
-        if (ef.func_code.co_argcount != 1+ismethod):
-            raise YaccError,"%s:%d: p_error() requires 1 argument." % (efile,eline)
-        global Errorfunc
-        Errorfunc = ef
-    else:
-        print >>sys.stderr, "yacc: Warning. no p_error() function is defined."
-
-    # If running in optimized mode.  We're going to read tables instead
-
-    if (optimize and lr_read_tables(tabmodule,1)):
-        # Read parse table
-        del Productions[:]
-        for p in _lr_productions:
-            if not p:
-                Productions.append(None)
-            else:
-                m = MiniProduction()
-                m.name = p[0]
-                m.len  = p[1]
-                m.file = p[3]
-                m.line = p[4]
-                if p[2]:
-                    m.func = ldict[p[2]]
-                Productions.append(m)
-
-    else:
-        # Get the tokens map
-        if (module and isinstance(module,_INSTANCETYPE)):
-            tokens = getattr(module,"tokens",None)
-        else:
-            tokens = ldict.get("tokens",None)
-
-        if not tokens:
-            raise YaccError,"module does not define a list 'tokens'"
-        if not (isinstance(tokens,types.ListType) or isinstance(tokens,types.TupleType)):
-            raise YaccError,"tokens must be a list or tuple."
-
-        # Check to see if a requires dictionary is defined.
-        requires = ldict.get("require",None)
-        if requires:
-            if not (isinstance(requires,types.DictType)):
-                raise YaccError,"require must be a dictionary."
-
-            for r,v in requires.items():
-                try:
-                    if not (isinstance(v,types.ListType)):
-                        raise TypeError
-                    v1 = [x.split(".") for x in v]
-                    Requires[r] = v1
-                except StandardError:
-                    print >>sys.stderr, "Invalid specification for rule '%s' in require. Expected a list of strings" % r
-
-
-        # Build the dictionary of terminals.  We a record a 0 in the
-        # dictionary to track whether or not a terminal is actually
-        # used in the grammar
-
-        if 'error' in tokens:
-            print >>sys.stderr, "yacc: Illegal token 'error'.  Is a reserved word."
-            raise YaccError,"Illegal token name"
-
-        for n in tokens:
-            if Terminals.has_key(n):
-                print >>sys.stderr, "yacc: Warning. Token '%s' multiply defined." % n
-            Terminals[n] = [ ]
-
-        Terminals['error'] = [ ]
-
-        # Get the precedence map (if any)
-        prec = ldict.get("precedence",None)
-        if prec:
-            if not (isinstance(prec,types.ListType) or isinstance(prec,types.TupleType)):
-                raise YaccError,"precedence must be a list or tuple."
-            add_precedence(prec)
-            Signature.update(repr(prec))
-
-        for n in tokens:
-            if not Precedence.has_key(n):
-                Precedence[n] = ('right',0)         # Default, right associative, 0 precedence
-
-        # Get the list of built-in functions with p_ prefix
-        symbols = [ldict[f] for f in ldict.keys()
-               if (type(ldict[f]) in (types.FunctionType, types.MethodType) and ldict[f].__name__[:2] == 'p_'
-                   and ldict[f].__name__ != 'p_error')]
-
-        # Check for non-empty symbols
-        if len(symbols) == 0:
-            raise YaccError,"no rules of the form p_rulename are defined."
-
-        # Sort the symbols by line number
-        symbols.sort(lambda x,y: cmp(x.func_code.co_firstlineno,y.func_code.co_firstlineno))
-
-        # Add all of the symbols to the grammar
-        for f in symbols:
-            if (add_function(f)) < 0:
-                error += 1
-            else:
-                files[f.func_code.co_filename] = None
-
-        # Make a signature of the docstrings
-        for f in symbols:
-            if f.__doc__:
-                Signature.update(f.__doc__)
-
-        lr_init_vars()
-
-        if error:
-            raise YaccError,"Unable to construct parser."
-
-        if not lr_read_tables(tabmodule):
-
-            # Validate files
-            for filename in files.keys():
-                if not validate_file(filename):
-                    error = 1
-
-            # Validate dictionary
-            validate_dict(ldict)
-
-            if start and not Prodnames.has_key(start):
-                raise YaccError,"Bad starting symbol '%s'" % start
-
-            augment_grammar(start)
-            error = verify_productions(cycle_check=check_recursion)
-            otherfunc = [ldict[f] for f in ldict.keys()
-               if (type(f) in (types.FunctionType,types.MethodType) and ldict[f].__name__[:2] != 'p_')]
-
-            # Check precedence rules
-            if check_precedence():
-                error = 1
-
-            if error:
-                raise YaccError,"Unable to construct parser."
-
-            build_lritems()
-            compute_first1()
-            compute_follow(start)
-
-            if method in ['SLR','LALR']:
-                lr_parse_table(method)
-            else:
-                raise YaccError, "Unknown parsing method '%s'" % method
-
-            if write_tables:
-                lr_write_tables(tabmodule,outputdir)
-
-            if yaccdebug:
-                try:
-                    f = open(os.path.join(outputdir,debugfile),"w")
-                    f.write(_vfc.getvalue())
-                    f.write("\n\n")
-                    f.write(_vf.getvalue())
-                    f.close()
-                except IOError,e:
-                    print >>sys.stderr, "yacc: can't create '%s'" % debugfile,e
-
-    # Made it here.   Create a parser object and set up its internal state.
-    # Set global parse() method to bound method of parser object.
-
-    p = Parser("xyzzy")
-    p.productions = Productions
-    p.errorfunc = Errorfunc
-    p.action = _lr_action
-    p.goto   = _lr_goto
-    p.method = _lr_method
-    p.require = Requires
-
-    global parse
-    parse = p.parse
-
-    global parser
-    parser = p
-
-    # Clean up all of the globals we created
-    if (not optimize):
-        yacc_cleanup()
-    return p
-
-# yacc_cleanup function.  Delete all of the global variables
-# used during table construction
-
-def yacc_cleanup():
-    global _lr_action, _lr_goto, _lr_method, _lr_goto_cache
-    del _lr_action, _lr_goto, _lr_method, _lr_goto_cache
-
-    global Productions, Prodnames, Prodmap, Terminals
-    global Nonterminals, First, Follow, Precedence, UsedPrecedence, LRitems
-    global Errorfunc, Signature, Requires
-
-    del Productions, Prodnames, Prodmap, Terminals
-    del Nonterminals, First, Follow, Precedence, UsedPrecedence, LRitems
-    del Errorfunc, Signature, Requires
-
-    global _vf, _vfc
-    del _vf, _vfc
-
-
-# Stub that raises an error if parsing is attempted without first calling yacc()
-def parse(*args,**kwargs):
-    raise YaccError, "yacc: No parser built with yacc()"
+            grammar.set_start(start)
+    except GrammarError:
+        e = sys.exc_info()[1]
+        errorlog.error(str(e))
+        errors = 1
+
+    if errors:
+        raise YaccError("Unable to build parser")
+
+    # Verify the grammar structure
+    undefined_symbols = grammar.undefined_symbols()
+    for sym, prod in undefined_symbols:
+        errorlog.error("%s:%d: Symbol '%s' used, but not defined as a token or a rule",prod.file,prod.line,sym)
+        errors = 1
+
+    unused_terminals = grammar.unused_terminals()
+    if unused_terminals:
+        debuglog.info("")
+        debuglog.info("Unused terminals:")
+        debuglog.info("")
+        for term in unused_terminals:
+            errorlog.warning("Token '%s' defined, but not used", term)
+            debuglog.info("    %s", term)
+
+    # Print out all productions to the debug log
+    if debug:
+        debuglog.info("")
+        debuglog.info("Grammar")
+        debuglog.info("")
+        for n,p in enumerate(grammar.Productions):
+            debuglog.info("Rule %-5d %s", n, p)
+
+    # Find unused non-terminals
+    unused_rules = grammar.unused_rules()
+    for prod in unused_rules:
+        errorlog.warning("%s:%d: Rule '%s' defined, but not used", prod.file, prod.line, prod.name)
+
+    if len(unused_terminals) == 1:
+        errorlog.warning("There is 1 unused token")
+    if len(unused_terminals) > 1:
+        errorlog.warning("There are %d unused tokens", len(unused_terminals))
+
+    if len(unused_rules) == 1:
+        errorlog.warning("There is 1 unused rule")
+    if len(unused_rules) > 1:
+        errorlog.warning("There are %d unused rules", len(unused_rules))
+
+    if debug:
+        debuglog.info("")
+        debuglog.info("Terminals, with rules where they appear")
+        debuglog.info("")
+        terms = list(grammar.Terminals)
+        terms.sort()
+        for term in terms:
+            debuglog.info("%-20s : %s", term, " ".join([str(s) for s in grammar.Terminals[term]]))
+        
+        debuglog.info("")
+        debuglog.info("Nonterminals, with rules where they appear")
+        debuglog.info("")
+        nonterms = list(grammar.Nonterminals)
+        nonterms.sort()
+        for nonterm in nonterms:
+            debuglog.info("%-20s : %s", nonterm, " ".join([str(s) for s in grammar.Nonterminals[nonterm]]))
+        debuglog.info("")
+
+    if check_recursion:
+        unreachable = grammar.find_unreachable()
+        for u in unreachable:
+            errorlog.warning("Symbol '%s' is unreachable",u)
+
+        infinite = grammar.infinite_cycles()
+        for inf in infinite:
+            errorlog.error("Infinite recursion detected for symbol '%s'", inf)
+            errors = 1
+        
+    unused_prec = grammar.unused_precedence()
+    for term, assoc in unused_prec:
+        errorlog.error("Precedence rule '%s' defined for unknown symbol '%s'", assoc, term)
+        errors = 1
+
+    if errors:
+        raise YaccError("Unable to build parser")
+    
+    # Run the LRGeneratedTable on the grammar
+    if debug:
+        errorlog.debug("Generating %s tables", method)
+            
+    lr = LRGeneratedTable(grammar,method,debuglog)
+
+    if debug:
+        num_sr = len(lr.sr_conflicts)
+
+        # Report shift/reduce and reduce/reduce conflicts
+        if num_sr == 1:
+            errorlog.warning("1 shift/reduce conflict")
+        elif num_sr > 1:
+            errorlog.warning("%d shift/reduce conflicts", num_sr)
+
+        num_rr = len(lr.rr_conflicts)
+        if num_rr == 1:
+            errorlog.warning("1 reduce/reduce conflict")
+        elif num_rr > 1:
+            errorlog.warning("%d reduce/reduce conflicts", num_rr)
+
+    # Write out conflicts to the output file
+    if debug and (lr.sr_conflicts or lr.rr_conflicts):
+        debuglog.warning("")
+        debuglog.warning("Conflicts:")
+        debuglog.warning("")
+
+        for state, tok, resolution in lr.sr_conflicts:
+            debuglog.warning("shift/reduce conflict for %s in state %d resolved as %s",  tok, state, resolution)
+        
+        already_reported = {}
+        for state, rule, rejected in lr.rr_conflicts:
+            if (state,id(rule),id(rejected)) in already_reported:
+                continue
+            debuglog.warning("reduce/reduce conflict in state %d resolved using rule (%s)", state, rule)
+            debuglog.warning("rejected rule (%s) in state %d", rejected,state)
+            errorlog.warning("reduce/reduce conflict in state %d resolved using rule (%s)", state, rule)
+            errorlog.warning("rejected rule (%s) in state %d", rejected, state)
+            already_reported[state,id(rule),id(rejected)] = 1
+        
+        warned_never = []
+        for state, rule, rejected in lr.rr_conflicts:
+            if not rejected.reduced and (rejected not in warned_never):
+                debuglog.warning("Rule (%s) is never reduced", rejected)
+                errorlog.warning("Rule (%s) is never reduced", rejected)
+                warned_never.append(rejected)
+
+    # Write the table file if requested
+    if write_tables:
+        lr.write_table(tabmodule,outputdir,signature)
+
+    # Write a pickled version of the tables
+    if picklefile:
+        lr.pickle_table(picklefile,signature)
+
+    # Build the parser
+    lr.bind_callables(pinfo.pdict)
+    parser = LRParser(lr,pinfo.error_func)
+
+    parse = parser.parse
+    return parser



1.1                  src/patchsets/seamonkey/2.1/2000-seamonkey_gentoo_install_dirs.patch

file : http://sources.gentoo.org/viewvc.cgi/gentoo/src/patchsets/seamonkey/2.1/2000-seamonkey_gentoo_install_dirs.patch?rev=1.1&view=markup
plain: http://sources.gentoo.org/viewvc.cgi/gentoo/src/patchsets/seamonkey/2.1/2000-seamonkey_gentoo_install_dirs.patch?rev=1.1&content-type=text/plain

Index: 2000-seamonkey_gentoo_install_dirs.patch
===================================================================
--- mozilla-1.9.1.orig/config/autoconf.mk.in	2009-03-06 03:01:44.000000000 +0530
+++ mozilla-1.9.1/config/autoconf.mk.in	2009-04-26 18:59:19.992997721 +0530
@@ -57,14 +57,14 @@
 prefix		= @prefix@
 exec_prefix	= @exec_prefix@
 bindir		= @bindir@
-includedir	= @includedir@/$(MOZ_APP_NAME)-$(MOZ_APP_VERSION)
+includedir	= @includedir@/$(MOZ_APP_NAME)
 libdir		= @libdir@
 datadir		= @datadir@
 mandir		= @mandir@
-idldir		= $(datadir)/idl/$(MOZ_APP_NAME)-$(MOZ_APP_VERSION)
+idldir		= $(datadir)/idl/$(MOZ_APP_NAME)
 
-installdir	= $(libdir)/$(MOZ_APP_NAME)-$(MOZ_APP_VERSION)
-sdkdir		= $(libdir)/$(MOZ_APP_NAME)-devel-$(MOZ_APP_VERSION)
+installdir	= $(libdir)/$(MOZ_APP_NAME)
+sdkdir		= $(libdir)/$(MOZ_APP_NAME)-devel
 
 DIST		= $(DEPTH)/dist
 LIBXUL_SDK      = @LIBXUL_SDK@



1.1                  src/patchsets/seamonkey/2.1/2001-gentoo_no_app_updates.patch

file : http://sources.gentoo.org/viewvc.cgi/gentoo/src/patchsets/seamonkey/2.1/2001-gentoo_no_app_updates.patch?rev=1.1&view=markup
plain: http://sources.gentoo.org/viewvc.cgi/gentoo/src/patchsets/seamonkey/2.1/2001-gentoo_no_app_updates.patch?rev=1.1&content-type=text/plain

Index: 2001-gentoo_no_app_updates.patch
===================================================================
--- mozilla/browser/components/preferences/advanced.js
+++ mozilla/browser/components/preferences/advanced.js
@@ -408,7 +408,7 @@
     var enabledPref = document.getElementById("app.update.enabled");
     var enableAppUpdate = document.getElementById("enableAppUpdate");
 
-    enableAppUpdate.disabled = !aus.canCheckForUpdates || enabledPref.locked;
+    enableAppUpdate.disabled = true;
   },
 
   /**
@@ -423,8 +423,7 @@
     var updateModeLabel = document.getElementById("updateModeLabel");
     var updateMode = document.getElementById("updateMode");
     
-    var disable = enabledPref.locked || !enabledPref.value ||
-                  autoPref.locked;
+    var disable = true;
     updateModeLabel.disabled = updateMode.disabled = disable;
   },
 
@@ -440,8 +439,7 @@
     
     var warnIncompatible = document.getElementById("warnIncompatible");
     
-    var disable = enabledPref.locked || !enabledPref.value || autoPref.locked ||
-                  !autoPref.value || modePref.locked;
+    var disable = true;
     warnIncompatible.disabled = disable;
   },



1.1                  src/patchsets/seamonkey/2.1/2020-gentoo-prefix-support.patch

file : http://sources.gentoo.org/viewvc.cgi/gentoo/src/patchsets/seamonkey/2.1/2020-gentoo-prefix-support.patch?rev=1.1&view=markup
plain: http://sources.gentoo.org/viewvc.cgi/gentoo/src/patchsets/seamonkey/2.1/2020-gentoo-prefix-support.patch?rev=1.1&content-type=text/plain

Index: 2020-gentoo-prefix-support.patch
===================================================================
--- mozilla/extensions/java/xpcom/interfaces/org/mozilla/xpcom/Mozilla.java
+++ mozilla/extensions/java/xpcom/interfaces/org/mozilla/xpcom/Mozilla.java
@@ -465,18 +465,18 @@ IXPCOMError {
       grePath = getPathFromConfigFile(env, aVersions, aProperties);
       if (grePath != null) {
         return grePath;
       }
     }
 
     final String greUserConfFile = ".gre.config";
     final String greUserConfDir = ".gre.d";
-    final String greConfPath = "/etc/gre.conf";
-    final String greConfDir = "/etc/gre.d";
+    final String greConfPath = "@GENTOO_PORTAGE_EPREFIX@/etc/gre.conf";
+    final String greConfDir = "@GENTOO_PORTAGE_EPREFIX@/etc/gre.d";
 
     env = System.getProperty("user.home");
     if (env != null) {
       // Look in ~/.gre.config
       grePath = getPathFromConfigFile(env + File.separator + greUserConfFile,
                                       aVersions, aProperties);
       if (grePath != null) {
         return grePath;
--- mozilla/xpcom/build/nsXPCOMPrivate.h
+++ mozilla/xpcom/build/nsXPCOMPrivate.h
@@ -274,18 +274,18 @@ void LogTerm();
 #define GRE_FRAMEWORK_NAME "XUL.framework"
 #define XUL_DLL            "XUL"
 #else
 #define XPCOM_SEARCH_KEY  "LD_LIBRARY_PATH"
 #define XUL_DLL   "libxul"MOZ_DLL_SUFFIX
 #endif
 
 #define GRE_CONF_NAME ".gre.config"
-#define GRE_CONF_PATH "/etc/gre.conf"
-#define GRE_CONF_DIR  "/etc/gre.d"
+#define GRE_CONF_PATH "@GENTOO_PORTAGE_EPREFIX@/etc/gre.conf"
+#define GRE_CONF_DIR  "@GENTOO_PORTAGE_EPREFIX@/etc/gre.d"
 #define GRE_USER_CONF_DIR ".gre.d"
 #endif
 
 #if defined(XP_WIN) || defined(XP_OS2)
   #define XPCOM_FILE_PATH_SEPARATOR       "\\"
   #define XPCOM_ENV_PATH_SEPARATOR        ";"
 #elif defined(XP_UNIX) || defined(XP_BEOS)
   #define XPCOM_FILE_PATH_SEPARATOR       "/"
--- mozilla/xulrunner/app/nsRegisterGREUnix.cpp
+++ mozilla/xulrunner/app/nsRegisterGREUnix.cpp
@@ -107,17 +107,17 @@ RegisterXULRunner(PRBool aRegisterGlobal
                   const GREProperty *aProperties, PRUint32 aPropertiesLen,
                   const char *aGREMilestone)
 {
   // Register ourself in /etc/gre.d or ~/.gre.d/ and record what key we created
   // for future unregistration.
 
   nsresult rv;
 
-  char root[MAXPATHLEN] = "/etc/gre.d";
+  char root[MAXPATHLEN] = "@GENTOO_PORTAGE_EPREFIX@/etc/gre.d";
 
   if (!aRegisterGlobally) {
     char *home = PR_GetEnv("HOME");
     if (!home || !*home)
       return PR_FALSE;
 
     PR_snprintf(root, MAXPATHLEN, "%s/.gre.d", home);
   }
@@ -199,17 +199,17 @@ RegisterXULRunner(PRBool aRegisterGlobal
 }
 
 void
 UnregisterXULRunner(PRBool aRegisterGlobally, nsIFile* aLocation,
                     const char *aGREMilestone)
 {
   nsresult rv;
 
-  char root[MAXPATHLEN] = "/etc/gre.d";
+  char root[MAXPATHLEN] = "@GENTOO_PORTAGE_EPREFIX@/etc/gre.d";
 
   if (!aRegisterGlobally) {
     char *home = PR_GetEnv("HOME");
     if (!home || !*home)
       return;
 
     PR_snprintf(root, MAXPATHLEN, "%s/.gre.d", home);
   }
--- mozilla/xulrunner/installer/Makefile.in
+++ mozilla/xulrunner/installer/Makefile.in
@@ -68,17 +68,17 @@ INSTALL_SDK = 1
 include $(topsrcdir)/toolkit/mozapps/installer/packager.mk
 
 $(MOZILLA_VERSION).system.conf: $(topsrcdir)/config/milestone.txt Makefile
 	printf "[%s]\nGRE_PATH=%s\nxulrunner=true\nabi=%s" \
 	  $(MOZILLA_VERSION) $(installdir) $(TARGET_XPCOM_ABI)> $@
 
 ifndef SKIP_GRE_REGISTRATION
 # to register xulrunner per-user, override this with $HOME/.gre.d
-regdir = /etc/gre.d
+regdir = @sysconfdir@/gre.d
 
 install:: $(MOZILLA_VERSION).system.conf
 	$(NSINSTALL) -D $(DESTDIR)$(regdir)
 	$(SYSINSTALL) $(IFLAGS1) $^ $(DESTDIR)$(regdir)
 endif
 
 # Add pkg-config files to the install:: target
 



1.1                  src/patchsets/seamonkey/2.1/5000_gconf-config-update.patch

file : http://sources.gentoo.org/viewvc.cgi/gentoo/src/patchsets/seamonkey/2.1/5000_gconf-config-update.patch?rev=1.1&view=markup
plain: http://sources.gentoo.org/viewvc.cgi/gentoo/src/patchsets/seamonkey/2.1/5000_gconf-config-update.patch?rev=1.1&content-type=text/plain

Index: 5000_gconf-config-update.patch
===================================================================
--- configure.in
+++ configure.in
@@ -5031,16 +5031,21 @@ then
             fi
             MOZ_ENABLE_GIO=
         ])
     fi
 
     dnl ========================================================
     dnl = GConf support module
     dnl ========================================================
+    MOZ_ARG_DISABLE_BOOL(gconf,
+    [  --disable-gconf      Disable Gconf support ],
+        MOZ_ENABLE_GCONF=,
+        MOZ_ENABLE_GCONF=force)
+
     if test "$MOZ_ENABLE_GCONF"
     then
         PKG_CHECK_MODULES(MOZ_GCONF, gconf-2.0 >= $GCONF_VERSION gobject-2.0 ,[
             MOZ_ENABLE_GCONF=1
         ],[
             MOZ_ENABLE_GCONF=
         ])
      fi
--- mozilla/configure.in
+++ mozilla/configure.in
@@ -5701,16 +5701,21 @@ then
 
     AC_SUBST(MOZ_ENABLE_GIO)
     AC_SUBST(MOZ_GIO_CFLAGS)
     AC_SUBST(MOZ_GIO_LIBS)
    
     dnl ========================================================
     dnl = GConf support module
     dnl ========================================================
+    MOZ_ARG_DISABLE_BOOL(gconf,
+    [  --disable-gconf      Disable Gconf support ],
+        MOZ_ENABLE_GCONF=,
+        MOZ_ENABLE_GCONF=force)
+
     if test "$MOZ_ENABLE_GCONF"
     then
         PKG_CHECK_MODULES(MOZ_GCONF, gconf-2.0 >= $GCONF_VERSION gobject-2.0 ,[
             MOZ_GCONF_LIBS=`echo $MOZ_GCONF_LIBS | sed 's/-llinc\>//'`
             MOZ_ENABLE_GCONF=1
         ],[
             if test "$MOZ_ENABLE_GCONF" = "force"
             then



1.1                  src/patchsets/seamonkey/2.1/5000_use-chrome-manifest.patch

file : http://sources.gentoo.org/viewvc.cgi/gentoo/src/patchsets/seamonkey/2.1/5000_use-chrome-manifest.patch?rev=1.1&view=markup
plain: http://sources.gentoo.org/viewvc.cgi/gentoo/src/patchsets/seamonkey/2.1/5000_use-chrome-manifest.patch?rev=1.1&content-type=text/plain

Index: 5000_use-chrome-manifest.patch
===================================================================
# HG changeset patch
# Parent a7dea879b4b445a23186f438900562155bb39e99
Bug 620931 part 1 - Use chrome manifest to register resource://gre-resources/

--- mozilla/layout/style/jar.mn
+++ mozilla/layout/style/jar.mn
@@ -1,8 +1,10 @@
 toolkit.jar:
    res/ua.css    (ua.css)
    res/html.css    (html.css)
    res/quirk.css    (quirk.css)
    res/viewsource.css    (viewsource.css)
 *  res/forms.css    (forms.css)
    res/arrow.gif    (arrow.gif)
    res/arrowd.gif   (arrowd.gif)
+
+% resource gre-resources %res/
--- mozilla/netwerk/protocol/res/nsResProtocolHandler.cpp
+++ mozilla/netwerk/protocol/res/nsResProtocolHandler.cpp
@@ -75,17 +75,16 @@ static nsResProtocolHandler *gResHandler
 //
 // this enables PR_LOG_ALWAYS level information and places all output in
 // the file log.txt
 //
 static PRLogModuleInfo *gResLog;
 #endif
 
 #define kGRE           NS_LITERAL_CSTRING("gre")
-#define kGRE_RESOURCES NS_LITERAL_CSTRING("gre-resources")
 
 //----------------------------------------------------------------------------
 // nsResURL : overrides nsStandardURL::GetFile to provide nsIFile resolution
 //----------------------------------------------------------------------------
 
 nsresult
 nsResURL::EnsureFile()
 {
@@ -197,28 +196,16 @@ nsResProtocolHandler::Init()
     NS_ENSURE_SUCCESS(rv, rv);
 
     //
     // make resource://gre/ point to the GRE directory
     //
     rv = AddSpecialDir(NS_GRE_DIR, kGRE);
     NS_ENSURE_SUCCESS(rv, rv);
 
-    // make resource://gre-resources/ point to gre toolkit[.jar]/res
-    nsCOMPtr<nsIURI> greURI;
-    nsCOMPtr<nsIURI> greResURI;
-    GetSubstitution(kGRE, getter_AddRefs(greURI));
-#ifdef MOZ_CHROME_FILE_FORMAT_JAR
-    NS_NAMED_LITERAL_CSTRING(strGRE_RES_URL, "jar:chrome/toolkit.jar!/res/");
-#else
-    NS_NAMED_LITERAL_CSTRING(strGRE_RES_URL, "chrome/toolkit/res/");
-#endif
-    rv = mIOService->NewURI(strGRE_RES_URL, nsnull, greURI,
-                            getter_AddRefs(greResURI));
-    SetSubstitution(kGRE_RESOURCES, greResURI);
     //XXXbsmedberg Neil wants a resource://pchrome/ for the profile chrome dir...
     // but once I finish multiple chrome registration I'm not sure that it is needed
 
     // XXX dveditz: resource://pchrome/ defeats profile directory salting
     // if web content can load it. Tread carefully.
 
     return rv;
 }
@@ -242,22 +229,16 @@ nsResProtocolHandler::Init(nsIFile *aOmn
     // these entries should be kept in sync with the normal Init function
 
     // resource:/// points to jar:omni.jar!/
     SetSubstitution(EmptyCString(), uri);
 
     // resource://gre/ points to jar:omni.jar!/
     SetSubstitution(kGRE, uri);
 
-    urlStr += "chrome/toolkit/res/";
-    rv = mIOService->NewURI(urlStr, nsnull, nsnull, getter_AddRefs(uri));
-    NS_ENSURE_SUCCESS(rv, rv);
-
-    // resource://gre-resources/ points to jar:omni.jar!/chrome/toolkit/res/
-    SetSubstitution(kGRE_RESOURCES, uri);
     return NS_OK;
 }
 #endif
 
 #ifdef MOZ_IPC
 static PLDHashOperator
 EnumerateSubstitution(const nsACString& aKey,
                       nsIURI* aURI,




1.1                  src/patchsets/seamonkey/2.1/5001_fix-prefs-directory.patch

file : http://sources.gentoo.org/viewvc.cgi/gentoo/src/patchsets/seamonkey/2.1/5001_fix-prefs-directory.patch?rev=1.1&view=markup
plain: http://sources.gentoo.org/viewvc.cgi/gentoo/src/patchsets/seamonkey/2.1/5001_fix-prefs-directory.patch?rev=1.1&content-type=text/plain

Index: 5001_fix-prefs-directory.patch
===================================================================
# HG changeset patch
# Parent 3038cccba1a071d6b418e15442d0f2d9f3dcb11d
Bug 620931 part 2 - When building --with-libxul-sdk, use the right preferences directory

--- mozilla/browser/locales/Makefile.in
+++ mozilla/browser/locales/Makefile.in
@@ -183,17 +183,17 @@ install:: $(addsuffix .xml,$(SEARCH_PLUG
 	$(SYSINSTALL) $(IFLAGS1) $^ $(DESTDIR)$(mozappdir)/searchplugins
 
 
 libs-%:
 	$(NSINSTALL) -D $(DIST)/install
 	@$(MAKE) -C ../../toolkit/locales libs-$* BOTH_MANIFESTS=1
 	@$(MAKE) -C ../../services/sync/locales AB_CD=$* XPI_NAME=locale-$* BOTH_MANIFESTS=1
 	@$(MAKE) -C ../../extensions/spellcheck/locales AB_CD=$* XPI_NAME=locale-$* BOTH_MANIFESTS=1
-	@$(MAKE) libs AB_CD=$* XPI_NAME=locale-$* PREF_DIR=defaults/pref BOTH_MANIFESTS=1
+	@$(MAKE) libs AB_CD=$* XPI_NAME=locale-$* PREF_DIR=$(PREF_DIR) BOTH_MANIFESTS=1
 	@$(MAKE) -C $(DEPTH)/$(MOZ_BRANDING_DIRECTORY)/locales AB_CD=$* XPI_NAME=locale-$* BOTH_MANIFESTS=1
 
 
 repackage-win32-installer: WIN32_INSTALLER_OUT="$(_ABS_DIST)/$(PKG_INST_PATH)$(PKG_INST_BASENAME).exe"
 repackage-win32-installer: $(WIN32_INSTALLER_IN) $(SUBMAKEFILES)
 	@echo "Repackaging $(WIN32_INSTALLER_IN) into $(WIN32_INSTALLER_OUT)."
 	$(MAKE) -C $(DEPTH)/$(MOZ_BRANDING_DIRECTORY) export
 	$(MAKE) -C ../installer/windows CONFIG_DIR=l10ngen l10ngen/setup.exe l10ngen/7zSD.sfx
--- mozilla/toolkit/mozapps/installer/packager.mk
+++ mozilla/toolkit/mozapps/installer/packager.mk
@@ -307,17 +307,17 @@ OMNIJAR_FILES	= \
   res \
   defaults \
   greprefs.js \
   jsloader \
   $(NULL)
 
 NON_OMNIJAR_FILES += \
   chrome/icons/\* \
-  defaults/pref/channel-prefs.js \
+  $(PREF_DIR)/channel-prefs.js \
   res/cursors/\* \
   res/MainMenu.nib/\* \
   $(NULL)
 
 PACK_OMNIJAR	= \
   rm -f omni.jar components/binary.manifest && \
   grep -h '^binary-component' components/*.manifest > binary.manifest ; \
   sed -e 's/^binary-component/\#binary-component/' components/components.manifest > components.manifest && \




1.1                  src/patchsets/seamonkey/2.1/5001_support-gcc46.patch

file : http://sources.gentoo.org/viewvc.cgi/gentoo/src/patchsets/seamonkey/2.1/5001_support-gcc46.patch?rev=1.1&view=markup
plain: http://sources.gentoo.org/viewvc.cgi/gentoo/src/patchsets/seamonkey/2.1/5001_support-gcc46.patch?rev=1.1&content-type=text/plain

Index: 5001_support-gcc46.patch
===================================================================
# HG changeset patch
# User Rafael Ávila de Espíndola <respindola@mozilla.com>
# Date 1301009215 14400
# Node ID ca41c566399956643d79ac88bfac1d63ea3b2bcc
# Parent  92b43aa07b7d8ca9833c98ddb3d4da30d0693ed8
Bug 623126 - Add constructor for nsDebugImpl, nsTraceRefcntImpl, EmptyEnumeratorImpl, and nsSimpleUnicharStreamFactory to placate CLang; r=bsmedberg

--- mozilla/xpcom/base/nsDebugImpl.h
+++ mozilla/xpcom/base/nsDebugImpl.h
@@ -35,16 +35,17 @@
  * ***** END LICENSE BLOCK ***** */
 
 #include "nsIDebug.h"
 #include "nsIDebug2.h"
 
 class nsDebugImpl : public nsIDebug2
 {
 public:
+    nsDebugImpl() {}
     NS_DECL_ISUPPORTS
     NS_DECL_NSIDEBUG
     NS_DECL_NSIDEBUG2
     
     static nsresult Create(nsISupports* outer, const nsIID& aIID, void* *aInstancePtr);
 };
 
 
--- mozilla/xpcom/base/nsTraceRefcntImpl.h
+++ mozilla/xpcom/base/nsTraceRefcntImpl.h
@@ -39,16 +39,17 @@
 #define nsTraceRefcntImpl_h___
 
 #include <stdio.h> // for FILE
 #include "nsITraceRefcnt.h"
 
 class nsTraceRefcntImpl : public nsITraceRefcnt
 {
 public:
+  nsTraceRefcntImpl() {}
   NS_DECL_ISUPPORTS
   NS_DECL_NSITRACEREFCNT
 
   static NS_COM void Startup();
   static NS_COM void Shutdown();
 
   enum StatisticsType {
     ALL_STATS,
--- mozilla/xpcom/glue/nsEnumeratorUtils.cpp
+++ mozilla/xpcom/glue/nsEnumeratorUtils.cpp
@@ -47,16 +47,17 @@
 
 #include "nsCOMPtr.h"
 
 class EmptyEnumeratorImpl : public nsISimpleEnumerator,
                             public nsIUTF8StringEnumerator,
                             public nsIStringEnumerator
 {
 public:
+    EmptyEnumeratorImpl() {}
     // nsISupports interface
     NS_DECL_ISUPPORTS_INHERITED  // not really inherited, but no mRefCnt
 
     // nsISimpleEnumerator
     NS_DECL_NSISIMPLEENUMERATOR
     NS_DECL_NSIUTF8STRINGENUMERATOR
     // can't use NS_DECL_NSISTRINGENUMERATOR because they share the
     // HasMore() signature
--- mozilla/xpcom/io/nsUnicharInputStream.h
+++ mozilla/xpcom/io/nsUnicharInputStream.h
@@ -46,16 +46,17 @@
 #define NS_SIMPLE_UNICHAR_STREAM_FACTORY_CID \
 { 0x428dca6f, 0x1a0f, 0x4cda, { 0xb5, 0x16, 0xd, 0x52, 0x44, 0x74, 0x5a, 0x6a } }
 
 class nsSimpleUnicharStreamFactory :
   public nsIFactory, 
   private nsISimpleUnicharStreamFactory
 {
 public:
+  nsSimpleUnicharStreamFactory() {}
   NS_DECL_ISUPPORTS_INHERITED
   NS_DECL_NSIFACTORY
   NS_DECL_NSISIMPLEUNICHARSTREAMFACTORY
 
   static NS_COM nsSimpleUnicharStreamFactory* GetInstance();
 
 private:
   static const nsSimpleUnicharStreamFactory kInstance;


--- mozilla/embedding/browser/gtk/src/EmbedPrivate.cpp
+++ mozilla/embedding/browser/gtk/src/EmbedPrivate.cpp
@@ -99,6 +99,7 @@ nsIDirectoryServiceProvider *EmbedPrivate::sAppFileLocProvider = nsnull;
 class GTKEmbedDirectoryProvider : public nsIDirectoryServiceProvider2
 {
 public:
+  GTKEmbedDirectoryProvider() {}
   NS_DECL_ISUPPORTS_INHERITED
   NS_DECL_NSIDIRECTORYSERVICEPROVIDER
   NS_DECL_NSIDIRECTORYSERVICEPROVIDER2

# HG changeset patch
# User Rafael Ávila de Espíndola <respindola@mozilla.com>
# Date 1301009369 14400
# Node ID 570781a55419e42e0ddd43d5d9da070dd022772d
# Parent  b26e0f9e43e8f44b6663fdb29e9ee8a14c955e22
Bug 623123 - Add constructor for nsXULAppInfo (which inherits from nsIXULAppInfo) to placate CLang; r=bsmedberg

--- mozilla/toolkit/xre/nsAppRunner.cpp
+++ mozilla/toolkit/xre/nsAppRunner.cpp
@@ -608,16 +608,17 @@ class nsXULAppInfo : public nsIXULAppInf
 #endif
 #ifdef MOZ_CRASHREPORTER
                      public nsICrashReporter,
 #endif
                      public nsIXULRuntime
 
 {
 public:
+  nsXULAppInfo() {}
   NS_DECL_ISUPPORTS_INHERITED
   NS_DECL_NSIXULAPPINFO
   NS_DECL_NSIXULRUNTIME
 #ifdef MOZ_CRASHREPORTER
   NS_DECL_NSICRASHREPORTER
 #endif
 #ifdef XP_WIN
   NS_DECL_NSIWINAPPHELPER

# HG changeset patch
# User Rafael Ávila de Espíndola <respindola@mozilla.com>
# Date 1301011307 14400
# Node ID 067fdfe97f0d9b0400e665d38578c0b3a0f670db
# Parent  17015e66511231d618d2915aecfc146c18a873a5
Bug 623116 - Add constructor for nsASCIICaseInsensitiveStringComparator (which inherits from nsStringComparator) to placate CLang; r=smontagu

--- mozilla/intl/unicharutil/util/nsUnicharUtils.h
+++ mozilla/intl/unicharutil/util/nsUnicharUtils.h
@@ -98,16 +98,17 @@ public:
   PRBool Equals(const A& a, const B& b) const {
     return a.Equals(b, nsCaseInsensitiveStringComparator());
   }
 };
 
 class nsASCIICaseInsensitiveStringComparator : public nsStringComparator
 {
 public:
+  nsASCIICaseInsensitiveStringComparator() {}
   virtual int operator() (const PRUnichar*,
                           const PRUnichar*,
                           PRUint32,
                           PRUint32) const;
 };
 
 inline PRBool
 CaseInsensitiveFindInReadable(const nsAString& aPattern,





1.1                  src/patchsets/seamonkey/2.1/5002_independant-omnijar.patch

file : http://sources.gentoo.org/viewvc.cgi/gentoo/src/patchsets/seamonkey/2.1/5002_independant-omnijar.patch?rev=1.1&view=markup
plain: http://sources.gentoo.org/viewvc.cgi/gentoo/src/patchsets/seamonkey/2.1/5002_independant-omnijar.patch?rev=1.1&content-type=text/plain

Index: 5002_independant-omnijar.patch
===================================================================
From: Mike Hommey <mh@glandium.org>
Date: Fri, 25 Feb 2011 12:53:36 +0100
Subject: Bug 620931 part 3 - Allow GRE and XUL application to use omni.jar independently

We now store two independent locations for an omni.jar, allowing GRE/XRE and
XUL application to each have their own omni.jar. And since xulrunner setups
are very independent from the XUL applications, we implement support for both
omni.jar and non omni.jar cases in the same runtime, with the side effect of
allowing to switch from one to the other manually without rebuilding the
binaries.

We let the mozilla::Omnijar API handle both cases, so that callers don't need
too much work to support them.

We also make the preferences service load the same set of preferences in all
the various cases (unified vs. separate, omni.jar vs. no omni.jar).

The child process launcher for IPC is modified to pass the base directories
needed for the mozilla::Omnijar API initialization in the child process.

Finally, the startupcache file name canonicalization is modified to separate
APP and GRE resources.
---
 ipc/glue/GeckoChildProcessHost.cpp               |   34 +++--
 js/src/xpconnect/loader/mozJSComponentLoader.cpp |  161 ++++++++++++++------
 modules/libjar/nsJAR.cpp                         |   19 +--
 modules/libpref/src/nsPrefService.cpp            |  160 +++++++++++---------
 netwerk/protocol/res/nsResProtocolHandler.cpp    |   69 ++------
 startupcache/StartupCache.cpp                    |   17 ++-
 toolkit/xre/nsAppRunner.cpp                      |   34 +++--
 toolkit/xre/nsEmbedFunctions.cpp                 |    4 +-
 xpcom/build/Makefile.in                          |    5 +-
 xpcom/build/Omnijar.cpp                          |  181 +++++++++++++++++-----
 xpcom/build/Omnijar.h                            |  131 ++++++++++++++--
 xpcom/build/nsXPComInit.cpp                      |   26 ++--
 xpcom/components/nsComponentManager.cpp          |   31 ++--
 13 files changed, 574 insertions(+), 298 deletions(-)

diff --git a/ipc/glue/GeckoChildProcessHost.cpp b/ipc/glue/GeckoChildProcessHost.cpp
index 67a9651..6c8fa41 100644
--- mozilla/ipc/glue/GeckoChildProcessHost.cpp
+++ mozilla/ipc/glue/GeckoChildProcessHost.cpp
@@ -445,16 +445,19 @@ GeckoChildProcessHost::PerformAsyncLaunch(std::vector<std::string> aExtraOpts, b
 
   childArgv.insert(childArgv.end(), aExtraOpts.begin(), aExtraOpts.end());
 
-#ifdef MOZ_OMNIJAR
   // Make sure the child process can find the omnijar
   // See XRE_InitCommandLine in nsAppRunner.cpp
-  nsCAutoString omnijarPath;
-  if (mozilla::OmnijarPath()) {
-    mozilla::OmnijarPath()->GetNativePath(omnijarPath);
-    childArgv.push_back("-omnijar");
-    childArgv.push_back(omnijarPath.get());
+  nsCAutoString path;
+  nsCOMPtr<nsIFile> file = mozilla::Omnijar::GetBase(mozilla::Omnijar::GRE);
+  if (file && NS_SUCCEEDED(file->GetNativePath(path))) {
+    childArgv.push_back("-grebase");
+    childArgv.push_back(path.get());
+  }
+  file = mozilla::Omnijar::GetBase(mozilla::Omnijar::APP);
+  if (file && NS_SUCCEEDED(file->GetNativePath(path))) {
+    childArgv.push_back("-appbase");
+    childArgv.push_back(path.get());
   }
-#endif
 
   childArgv.push_back(pidstring);
 
@@ -557,16 +560,19 @@ GeckoChildProcessHost::PerformAsyncLaunch(std::vector<std::string> aExtraOpts, b
 
   cmdLine.AppendLooseValue(std::wstring(mGroupId.get()));
 
-#ifdef MOZ_OMNIJAR
   // Make sure the child process can find the omnijar
   // See XRE_InitCommandLine in nsAppRunner.cpp
-  nsAutoString omnijarPath;
-  if (mozilla::OmnijarPath()) {
-    mozilla::OmnijarPath()->GetPath(omnijarPath);
-    cmdLine.AppendLooseValue(UTF8ToWide("-omnijar"));
-    cmdLine.AppendLooseValue(omnijarPath.get());
+  nsAutoString path;
+  nsCOMPtr<nsIFile> file = mozilla::Omnijar::GetBase(mozilla::Omnijar::GRE);
+  if (file && NS_SUCCEEDED(file->GetPath(path))) {
+    cmdLine.AppendLooseValue(UTF8ToWide("-grebase"));
+    cmdLine.AppendLooseValue(path.get());
+  }
+  file = mozilla::Omnijar::GetBase(mozilla::Omnijar::APP);
+  if (file && NS_SUCCEEDED(file->GetPath(path))) {
+    cmdLine.AppendLooseValue(UTF8ToWide("-appbase"));
+    cmdLine.AppendLooseValue(path.get());
   }
-#endif
 
   cmdLine.AppendLooseValue(UTF8ToWide(pidstring));
 
diff --git a/js/src/xpconnect/loader/mozJSComponentLoader.cpp b/js/src/xpconnect/loader/mozJSComponentLoader.cpp
index 7296fc1..4b9bcf7 100644
--- mozilla/js/src/xpconnect/loader/mozJSComponentLoader.cpp
+++ mozilla/js/src/xpconnect/loader/mozJSComponentLoader.cpp
@@ -86,6 +86,7 @@
 #include "nsILocalFileWin.h"
 #endif
 #include "xpcprivate.h"
+#include "nsIResProtocolHandler.h"
 
 #ifdef MOZ_ENABLE_LIBXUL
 #include "mozilla/scache/StartupCache.h"
@@ -626,24 +627,11 @@ mozJSComponentLoader::LoadModuleFromJAR(nsILocalFile *aJarFile,
 #if !defined(XPCONNECT_STANDALONE)
     nsresult rv;
 
-    nsCAutoString fullSpec;
-
-#ifdef MOZ_OMNIJAR
-    PRBool equal;
-    rv = aJarFile->Equals(mozilla::OmnijarPath(), &equal);
-    if (NS_SUCCEEDED(rv) && equal) {
-        fullSpec = "resource://gre/";
-    } else {
-#endif
-        nsCAutoString fileSpec;
-        NS_GetURLSpecFromActualFile(aJarFile, fileSpec);
-        fullSpec = "jar:";
-        fullSpec += fileSpec;
-        fullSpec += "!/";
-#ifdef MOZ_OMNIJAR
-    }
-#endif
-
+    nsCAutoString fullSpec, fileSpec;
+    NS_GetURLSpecFromActualFile(aJarFile, fileSpec);
+    fullSpec = "jar:";
+    fullSpec += fileSpec;
+    fullSpec += "!/";
     fullSpec += aComponentPath;
 
     nsCOMPtr<nsIURI> uri;
@@ -838,47 +826,128 @@ class JSScriptHolder
     JSScript *mScript;
 };
 
+static const char baseName[2][5] = { "gre/", "app/" };
+
+static inline PRBool
+canonicalizeBase(nsCAutoString &spec, nsACString &out, mozilla::Omnijar::Type aType)
+{
+    nsCAutoString base;
+    nsresult rv = mozilla::Omnijar::GetURIString(aType, base);
+
+    if (NS_FAILED(rv) || !base.Length())
+        return PR_FALSE;
+
+    if (base.Compare(spec.get(), PR_FALSE, base.Length()))
+        return PR_FALSE;
+
+    out.Append("/resource/");
+    out.Append(baseName[aType]);
+    out.Append(Substring(spec, base.Length()));
+    return PR_TRUE;
+}
 /**
  * PathifyURI transforms mozilla .js uris into useful zip paths
  * to make it makes it easier to manipulate startup cache entries
  * using standard zip tools.
  * Transformations applied:
- *  * jsloader/<scheme> prefix is used to group mozJSComponentLoader cache entries in
+ *  * jsloader/ prefix is used to group mozJSComponentLoader cache entries in
  *    a top-level zip directory.
- *  * In MOZ_OMNIJAR case resource:/// and resource://gre/ URIs refer to the same path
- *    so treat both of them as resource://gre/
+ *  * resource:// URIs are resolved to their corresponding file/jar URI to
+ *    canonicalize resources URIs other than gre and app.
+ *  * Paths under GRE or APP directory have their base path replaced with
+ *    resource/gre or resource/app to avoid depending on install location.
+ *  * jar:file:///path/to/file.jar!/sub/path urls are replaced with
+ *    /path/to/file.jar/sub/path
  *  * .bin suffix is added to the end of the path to indicate that jsloader/ entries
  *     are binary representations of JS source.
  * For example:
- *  resource://gre/modules/XPCOMUtils.jsm becomes
- *  jsloader/resource/gre/modules/XPCOMUtils.jsm.bin
+ *  resource://gre/modules/XPCOMUtils.jsm or
+ *  file://$GRE_DIR/modules/XPCOMUtils.jsm or
+ *  jar:file://$GRE_DIR/omni.jar!/modules/XPCOMUtils.jsm become
+ *     jsloader/resource/gre/modules/XPCOMUtils.jsm.bin
+ *  file://$PROFILE_DIR/extensions/{uuid}/components/component.js becomes
+ *     jsloader/$PROFILE_DIR/extensions/%7Buuid%7D/components/component.js.bin
+ *  jar:file://$PROFILE_DIR/extensions/some.xpi!/components/component.js becomes
+ *     jsloader/$PROFILE_DIR/extensions/some.xpi/components/component.js.bin
  */
 static nsresult
 PathifyURI(nsIURI *in, nsACString &out)
 { 
-   out = "jsloader/";
-   nsCAutoString scheme;
-   nsresult rv = in->GetScheme(scheme);
-   NS_ENSURE_SUCCESS(rv, rv);
-   out.Append(scheme);
-   nsCAutoString host;
-   // OK for GetHost to fail since it's not implemented sometimes
-   in->GetHost(host);
-#ifdef MOZ_OMNIJAR
-   if (scheme.Equals("resource") && host.Length() == 0){
-       host = "gre";
-   }
-#endif
-   if (host.Length()) {
-       out.Append("/");
-       out.Append(host);
-   }
-   nsCAutoString path;
-   rv = in->GetPath(path);
-   NS_ENSURE_SUCCESS(rv, rv);
-   out.Append(path);
-   out.Append(".bin");
-   return NS_OK;
+    PRBool equals;
+    nsresult rv;
+    nsCOMPtr<nsIURI> uri = in;
+    nsCAutoString spec;
+
+    out = "jsloader";
+
+    // Resolve resource:// URIs. At the end of this if/else block, we
+    // have both spec and uri variables identifying the same URI.
+    if (NS_SUCCEEDED(in->SchemeIs("resource", &equals)) && equals) {
+        nsCOMPtr<nsIIOService> ioService = do_GetIOService(&rv);
+        NS_ENSURE_SUCCESS(rv, rv);
+
+        nsCOMPtr<nsIProtocolHandler> ph;
+        rv = ioService->GetProtocolHandler("resource", getter_AddRefs(ph));
+        NS_ENSURE_SUCCESS(rv, rv);
+
+        nsCOMPtr<nsIResProtocolHandler> irph(do_QueryInterface(ph, &rv));
+        NS_ENSURE_SUCCESS(rv, rv);
+
+        rv = irph->ResolveURI(in, spec);
+        NS_ENSURE_SUCCESS(rv, rv);
+
+        rv = ioService->NewURI(spec, nsnull, nsnull, getter_AddRefs(uri));
+        NS_ENSURE_SUCCESS(rv, rv);
+    } else {
+        rv = in->GetSpec(spec);
+        NS_ENSURE_SUCCESS(rv, rv);
+    }
+
+    if (!canonicalizeBase(spec, out, mozilla::Omnijar::GRE) &&
+        !canonicalizeBase(spec, out, mozilla::Omnijar::APP)) {
+        if (NS_SUCCEEDED(uri->SchemeIs("file", &equals)) && equals) {
+            nsCOMPtr<nsIFileURL> baseFileURL;
+            baseFileURL = do_QueryInterface(uri, &rv);
+            NS_ENSURE_SUCCESS(rv, rv);
+
+            nsCAutoString path;
+            rv = baseFileURL->GetPath(path);
+            NS_ENSURE_SUCCESS(rv, rv);
+
+            out.Append(path);
+        } else if (NS_SUCCEEDED(uri->SchemeIs("jar", &equals)) && equals) {
+            nsCOMPtr<nsIJARURI> jarURI = do_QueryInterface(uri, &rv);
+            NS_ENSURE_SUCCESS(rv, rv);
+
+            nsCOMPtr<nsIURI> jarFileURI;
+            rv = jarURI->GetJARFile(getter_AddRefs(jarFileURI));
+            NS_ENSURE_SUCCESS(rv, rv);
+
+            nsCOMPtr<nsIFileURL> jarFileURL;
+            jarFileURL = do_QueryInterface(jarFileURI, &rv);
+            NS_ENSURE_SUCCESS(rv, rv);
+
+            nsCAutoString path;
+            rv = jarFileURL->GetPath(path);
+            NS_ENSURE_SUCCESS(rv, rv);
+            out.Append(path);
+
+            rv = jarURI->GetJAREntry(path);
+            NS_ENSURE_SUCCESS(rv, rv);
+            out.Append("/");
+            out.Append(path);
+        } else { // Very unlikely
+            nsCAutoString spec;
+            rv = uri->GetSpec(spec);
+            NS_ENSURE_SUCCESS(rv, rv);
+
+            out.Append("/");
+            out.Append(spec);
+        }
+    }
+
+    out.Append(".bin");
+    return NS_OK;
 }
 
 /* static */
diff --git a/modules/libjar/nsJAR.cpp b/modules/libjar/nsJAR.cpp
index 0738948..336511e 100644
--- mozilla/modules/libjar/nsJAR.cpp
+++ mozilla/modules/libjar/nsJAR.cpp
@@ -176,16 +176,13 @@ nsJAR::Open(nsIFile* zipFile)
   mLock = PR_NewLock();
   NS_ENSURE_TRUE(mLock, NS_ERROR_OUT_OF_MEMORY);
   
-#ifdef MOZ_OMNIJAR
   // The omnijar is special, it is opened early on and closed late
   // this avoids reopening it
-  PRBool equals;
-  nsresult rv = zipFile->Equals(mozilla::OmnijarPath(), &equals);
-  if (NS_SUCCEEDED(rv) && equals) {
-    mZip = mozilla::OmnijarReader();
+  nsZipArchive *zip = mozilla::Omnijar::GetReader(zipFile);
+  if (zip) {
+    mZip = zip;
     return NS_OK;
   }
-#endif
   return mZip->OpenArchive(zipFile);
 }
 
@@ -239,13 +236,12 @@ nsJAR::Close()
   mGlobalStatus = JAR_MANIFEST_NOT_PARSED;
   mTotalItemsInManifest = 0;
 
-#ifdef MOZ_OMNIJAR
-  if (mZip == mozilla::OmnijarReader()) {
+  if ((mZip == mozilla::Omnijar::GetReader(mozilla::Omnijar::GRE)) ||
+      (mZip == mozilla::Omnijar::GetReader(mozilla::Omnijar::APP))) {
     mZip.forget();
     mZip = new nsZipArchive();
     return NS_OK;
   }
-#endif
   return mZip->CloseArchive();
 }
 
@@ -396,12 +392,11 @@ nsJAR::GetCertificatePrincipal(const char* aFilename, nsIPrincipal** aPrincipal)
     return NS_ERROR_NULL_POINTER;
   *aPrincipal = nsnull;
 
-#ifdef MOZ_OMNIJAR
   // Don't check signatures in the omnijar - this is only
   // interesting for extensions/XPIs.
-  if (mZip == mozilla::OmnijarReader())
+  if ((mZip == mozilla::Omnijar::GetReader(mozilla::Omnijar::GRE)) ||
+      (mZip == mozilla::Omnijar::GetReader(mozilla::Omnijar::APP)))
     return NS_OK;
-#endif
 
   //-- Parse the manifest
   nsresult rv = ParseManifest();
diff --git a/modules/libpref/src/nsPrefService.cpp b/modules/libpref/src/nsPrefService.cpp
index 55eeb51..83b0609 100644
--- mozilla/modules/libpref/src/nsPrefService.cpp
+++ mozilla/modules/libpref/src/nsPrefService.cpp
@@ -72,10 +72,8 @@
 
 #include "nsITimelineService.h"
 
-#ifdef MOZ_OMNIJAR
 #include "mozilla/Omnijar.h"
 #include "nsZipArchive.h"
-#endif
 
 // Definitions
 #define INITIAL_PREF_FILES 10
@@ -798,30 +796,6 @@ static nsresult pref_LoadPrefsInDirList(const char *listId)
   return NS_OK;
 }
 
-//----------------------------------------------------------------------------------------
-// Initialize default preference JavaScript buffers from
-// appropriate TEXT resources
-//----------------------------------------------------------------------------------------
-static nsresult pref_InitDefaults()
-{
-  nsCOMPtr<nsIFile> greprefsFile;
-  nsresult          rv;
-
-  rv = NS_GetSpecialDirectory(NS_GRE_DIR, getter_AddRefs(greprefsFile));
-  NS_ENSURE_SUCCESS(rv, rv);
-
-  rv = greprefsFile->AppendNative(NS_LITERAL_CSTRING("greprefs.js"));
-  NS_ENSURE_SUCCESS(rv, rv);
-
-  rv = openPrefFile(greprefsFile);
-  if (NS_FAILED(rv)) {
-    NS_WARNING("Error parsing GRE default preferences. Is this an old-style embedding app?");
-  }
-
-  return NS_OK;
-}
-
-#ifdef MOZ_OMNIJAR
 static nsresult pref_ReadPrefFromJar(nsZipArchive* jarReader, const char *name)
 {
   nsZipItemPtr<char> manifest(jarReader, name, true);
@@ -835,77 +809,121 @@ static nsresult pref_ReadPrefFromJar(nsZipArchive* jarReader, const char *name)
   return rv;
 }
 
-static nsresult pref_InitAppDefaultsFromOmnijar()
+//----------------------------------------------------------------------------------------
+// Initialize default preference JavaScript buffers from
+// appropriate TEXT resources
+//----------------------------------------------------------------------------------------
+static nsresult pref_InitInitialObjects()
 {
   nsresult rv;
 
-  nsZipArchive* jarReader = mozilla::OmnijarReader();
-  if (!jarReader)
-    return pref_InitDefaults();
-
-  rv = pref_ReadPrefFromJar(jarReader, "greprefs.js");
-  NS_ENSURE_SUCCESS(rv, rv);
+  // In omni.jar case, we load the following prefs:
+  // - jar:$gre/omni.jar!/greprefs.js
+  // - jar:$gre/omni.jar!/defaults/pref/*.js
+  // In non omni.jar case, we load:
+  // - $gre/greprefs.js
+  //
+  // When $app == $gre, we additionally load, in all cases:
+  // - $gre/defaults/pref/*.js
+  // This is kept for bug 591866 (channel-prefs.js should not be in omni.jar).
+  // We load all files instead of channel-prefs.js only to have the same
+  // behaviour as $app != $gre.
+  //
+  // When $app != $gre, we additionally load, in omni.jar case:
+  // - jar:$app/omni.jar!/defaults/preferences/*.js
+  // - $app/defaults/preferences/*.js
+  // and in non omni.jar case:
+  // - $app/defaults/preferences/*.js
 
   nsZipFind *findPtr;
-  rv = jarReader->FindInit("defaults/pref/*.js$", &findPtr);
-  NS_ENSURE_SUCCESS(rv, rv);
-
-  nsAutoPtr<nsZipFind> find(findPtr);
-
+  nsAutoPtr<nsZipFind> find;
   nsTArray<nsCString> prefEntries;
   const char *entryName;
   PRUint16 entryNameLen;
-  while (NS_SUCCEEDED(find->FindNext(&entryName, &entryNameLen))) {
-    prefEntries.AppendElement(Substring(entryName, entryName + entryNameLen));
-  }
 
-  prefEntries.Sort();
-  for (PRUint32 i = prefEntries.Length(); i--; ) {
-    rv = pref_ReadPrefFromJar(jarReader, prefEntries[i].get());
-    if (NS_FAILED(rv))
-      NS_WARNING("Error parsing preferences.");
-  }
+  nsZipArchive* jarReader = mozilla::Omnijar::GetReader(mozilla::Omnijar::GRE);
+  if (jarReader) {
+    // Load jar:$gre/omni.jar!/greprefs.js
+    rv = pref_ReadPrefFromJar(jarReader, "greprefs.js");
+    NS_ENSURE_SUCCESS(rv, rv);
 
-  return NS_OK;
-}
-#endif
+    // Load jar:$gre/omni.jar!/defaults/pref/*.js
+    rv = jarReader->FindInit("defaults/pref/*.js$", &findPtr);
+    NS_ENSURE_SUCCESS(rv, rv);
 
-static nsresult pref_InitInitialObjects()
-{
-  nsresult rv;
+    find = findPtr;
+    while (NS_SUCCEEDED(find->FindNext(&entryName, &entryNameLen))) {
+      prefEntries.AppendElement(Substring(entryName, entryName + entryNameLen));
+    }
 
-  // first we parse the GRE default prefs. This also works if we're not using a GRE, 
-#ifdef MOZ_OMNIJAR
-  rv = pref_InitAppDefaultsFromOmnijar();
-#else
-  rv = pref_InitDefaults();
-#endif
-  NS_ENSURE_SUCCESS(rv, rv);
+    prefEntries.Sort();
+    for (PRUint32 i = prefEntries.Length(); i--; ) {
+      rv = pref_ReadPrefFromJar(jarReader, prefEntries[i].get());
+      if (NS_FAILED(rv))
+        NS_WARNING("Error parsing preferences.");
+    }
+  } else {
+    // Load $gre/greprefs.js
+    nsCOMPtr<nsIFile> greprefsFile;
+    rv = NS_GetSpecialDirectory(NS_GRE_DIR, getter_AddRefs(greprefsFile));
+    NS_ENSURE_SUCCESS(rv, rv);
 
-  nsCOMPtr<nsIFile> defaultPrefDir;
-  // now parse the "application" default preferences
-  rv = NS_GetSpecialDirectory(NS_APP_PREF_DEFAULTS_50_DIR, getter_AddRefs(defaultPrefDir));
-  NS_ENSURE_SUCCESS(rv, rv);
+    rv = greprefsFile->AppendNative(NS_LITERAL_CSTRING("greprefs.js"));
+    NS_ENSURE_SUCCESS(rv, rv);
+
+    rv = openPrefFile(greprefsFile);
+    if (NS_FAILED(rv))
+      NS_WARNING("Error parsing GRE default preferences. Is this an old-style embedding app?");
+  }
+
+  if (!mozilla::Omnijar::HasOmnijar(mozilla::Omnijar::APP)) {
+    // Load $gre/defaults/pref/*.js
+    nsCOMPtr<nsIFile> defaultPrefDir;
+
+    rv = NS_GetSpecialDirectory(NS_APP_PREF_DEFAULTS_50_DIR, getter_AddRefs(defaultPrefDir));
+    NS_ENSURE_SUCCESS(rv, rv);
 
-  /* these pref file names should not be used: we process them after all other application pref files for backwards compatibility */
-  static const char* specialFiles[] = {
+    /* these pref file names should not be used: we process them after all other application pref files for backwards compatibility */
+    static const char* specialFiles[] = {
 #if defined(XP_MAC) || defined(XP_MACOSX)
       "macprefs.js"
 #elif defined(XP_WIN)
       "winpref.js"
 #elif defined(XP_UNIX)
       "unix.js"
-#if defined(_AIX)
+#if defined(VMS)
+      , "openvms.js"
+#elif defined(_AIX)
       , "aix.js"
 #endif
 #elif defined(XP_OS2)
       "os2pref.js"
+#elif defined(XP_BEOS)
+      "beos.js"
 #endif
-  };
+    };
 
-  rv = pref_LoadPrefsInDir(defaultPrefDir, specialFiles, NS_ARRAY_LENGTH(specialFiles));
-  if (NS_FAILED(rv)) {
-    NS_WARNING("Error parsing application default preferences.");
+    rv = pref_LoadPrefsInDir(defaultPrefDir, specialFiles, NS_ARRAY_LENGTH(specialFiles));
+    if (NS_FAILED(rv))
+      NS_WARNING("Error parsing application default preferences.");
+  }
+
+  // Load jar:$app/omni.jar!/defaults/preferences/*.js
+  nsZipArchive *appJarReader = mozilla::Omnijar::GetReader(mozilla::Omnijar::APP);
+  if (appJarReader) {
+    rv = appJarReader->FindInit("defaults/preferences/*.js$", &findPtr);
+    NS_ENSURE_SUCCESS(rv, rv);
+    find = findPtr;
+    prefEntries.Clear();
+    while (NS_SUCCEEDED(find->FindNext(&entryName, &entryNameLen))) {
+      prefEntries.AppendElement(Substring(entryName, entryName + entryNameLen));
+    }
+    prefEntries.Sort();
+    for (PRUint32 i = prefEntries.Length(); i--; ) {
+      rv = pref_ReadPrefFromJar(appJarReader, prefEntries[i].get());
+      if (NS_FAILED(rv))
+        NS_WARNING("Error parsing preferences.");
+    }
   }
 
   rv = pref_LoadPrefsInDirList(NS_APP_PREFS_DEFAULTS_DIR_LIST);
diff --git a/netwerk/protocol/res/nsResProtocolHandler.cpp b/netwerk/protocol/res/nsResProtocolHandler.cpp
index 0431b43..cae9ee7 100644
--- mozilla/netwerk/protocol/res/nsResProtocolHandler.cpp
+++ mozilla/netwerk/protocol/res/nsResProtocolHandler.cpp
@@ -157,20 +157,6 @@ nsResProtocolHandler::~nsResProtocolHandler()
 }
 
 nsresult
-nsResProtocolHandler::AddSpecialDir(const char* aSpecialDir, const nsACString& aSubstitution)
-{
-    nsCOMPtr<nsIFile> file;
-    nsresult rv = NS_GetSpecialDirectory(aSpecialDir, getter_AddRefs(file));
-    NS_ENSURE_SUCCESS(rv, rv);
-
-    nsCOMPtr<nsIURI> uri;
-    rv = mIOService->NewFileURI(file, getter_AddRefs(uri));
-    NS_ENSURE_SUCCESS(rv, rv);
-
-    return SetSubstitution(aSubstitution, uri);
-}
-
-nsresult
 nsResProtocolHandler::Init()
 {
     if (!mSubstitutions.Init(32))
@@ -181,24 +167,31 @@ nsResProtocolHandler::Init()
     mIOService = do_GetIOService(&rv);
     NS_ENSURE_SUCCESS(rv, rv);
 
-#ifdef MOZ_OMNIJAR
-    nsCOMPtr<nsIFile> omniJar(mozilla::OmnijarPath());
-    if (omniJar)
-        return Init(omniJar);
-#endif
-
-    // these entries should be kept in sync with the omnijar Init function
+    nsCAutoString appURI, greURI;
+    rv = mozilla::Omnijar::GetURIString(mozilla::Omnijar::APP, appURI);
+    NS_ENSURE_SUCCESS(rv, rv);
+    rv = mozilla::Omnijar::GetURIString(mozilla::Omnijar::GRE, greURI);
+    NS_ENSURE_SUCCESS(rv, rv);
 
     //
-    // make resource:/// point to the application directory
+    // make resource:/// point to the application directory or omnijar
     //
-    rv = AddSpecialDir(NS_OS_CURRENT_PROCESS_DIR, EmptyCString());
+    nsCOMPtr<nsIURI> uri;
+    rv = NS_NewURI(getter_AddRefs(uri), appURI.Length() ? appURI : greURI);
+    NS_ENSURE_SUCCESS(rv, rv);
+
+    rv = SetSubstitution(EmptyCString(), uri);
     NS_ENSURE_SUCCESS(rv, rv);
 
     //
     // make resource://gre/ point to the GRE directory
     //
-    rv = AddSpecialDir(NS_GRE_DIR, kGRE);
+    if (appURI.Length()) { // We already have greURI in uri if appURI.Length() is 0.
+        rv = NS_NewURI(getter_AddRefs(uri), greURI);
+        NS_ENSURE_SUCCESS(rv, rv);
+    }
+
+    rv = SetSubstitution(kGRE, uri);
     NS_ENSURE_SUCCESS(rv, rv);
 
     //XXXbsmedberg Neil wants a resource://pchrome/ for the profile chrome dir...
@@ -210,34 +203,6 @@ nsResProtocolHandler::Init()
     return rv;
 }
 
-#ifdef MOZ_OMNIJAR
-nsresult
-nsResProtocolHandler::Init(nsIFile *aOmniJar)
-{
-    nsresult rv;
-    nsCOMPtr<nsIURI> uri;
-    nsCAutoString omniJarSpec;
-    NS_GetURLSpecFromActualFile(aOmniJar, omniJarSpec, mIOService);
-
-    nsCAutoString urlStr("jar:");
-    urlStr += omniJarSpec;
-    urlStr += "!/";
-
-    rv = mIOService->NewURI(urlStr, nsnull, nsnull, getter_AddRefs(uri));
-    NS_ENSURE_SUCCESS(rv, rv);
-
-    // these entries should be kept in sync with the normal Init function
-
-    // resource:/// points to jar:omni.jar!/
-    SetSubstitution(EmptyCString(), uri);
-
-    // resource://gre/ points to jar:omni.jar!/
-    SetSubstitution(kGRE, uri);
-
-    return NS_OK;
-}
-#endif
-
 #ifdef MOZ_IPC
 static PLDHashOperator
 EnumerateSubstitution(const nsACString& aKey,
diff --git a/startupcache/StartupCache.cpp b/startupcache/StartupCache.cpp
index daca67c..62d252b 100644
--- mozilla/startupcache/StartupCache.cpp
+++ mozilla/startupcache/StartupCache.cpp
@@ -242,17 +242,26 @@ StartupCache::GetBuffer(const char* id, char** outbuf, PRUint32* length)
     } 
   }
 
-#ifdef MOZ_OMNIJAR
-  if (mozilla::OmnijarReader()) {
+  if (mozilla::Omnijar::GetReader(mozilla::Omnijar::APP)) {
     // no need to checksum omnijarred entries
-    nsZipItemPtr<char> zipItem(mozilla::OmnijarReader(), id);
+    nsZipItemPtr<char> zipItem(mozilla::Omnijar::GetReader(mozilla::Omnijar::APP), id);
     if (zipItem) {
       *outbuf = zipItem.Forget();
       *length = zipItem.Length();
       return NS_OK;
     } 
   }
-#endif
+
+  if (mozilla::Omnijar::GetReader(mozilla::Omnijar::GRE)) {
+    // no need to checksum omnijarred entries
+    nsZipItemPtr<char> zipItem(mozilla::Omnijar::GetReader(mozilla::Omnijar::GRE), id);
+    if (zipItem) {
+      *outbuf = zipItem.Forget();
+      *length = zipItem.Length();
+      return NS_OK;
+    } 
+  }
+
   return NS_ERROR_NOT_AVAILABLE;
 }
 
diff --git a/toolkit/xre/nsAppRunner.cpp b/toolkit/xre/nsAppRunner.cpp
index 1f1ddc8..30a5c7e 100644
--- mozilla/toolkit/xre/nsAppRunner.cpp
+++ mozilla/toolkit/xre/nsAppRunner.cpp
@@ -3912,25 +3912,35 @@ XRE_InitCommandLine(int aArgc, char* aArgv[])
 #endif
 #endif
 
-#ifdef MOZ_OMNIJAR
-  const char *omnijarPath = nsnull;
-  ArgResult ar = CheckArg("omnijar", PR_FALSE, &omnijarPath);
+  const char *path = nsnull;
+  ArgResult ar = CheckArg("grebase", PR_FALSE, &path);
   if (ar == ARG_BAD) {
-    PR_fprintf(PR_STDERR, "Error: argument -omnijar requires an omnijar path\n");
+    PR_fprintf(PR_STDERR, "Error: argument -grebase requires a path argument\n");
     return NS_ERROR_FAILURE;
   }
 
-  if (!omnijarPath)
+  if (!path)
     return rv;
 
-  nsCOMPtr<nsILocalFile> omnijar;
-  rv = NS_NewNativeLocalFile(nsDependentCString(omnijarPath), PR_TRUE,
-                             getter_AddRefs(omnijar));
-  if (NS_SUCCEEDED(rv))
-    mozilla::SetOmnijar(omnijar);
-#endif
+  nsCOMPtr<nsILocalFile> greBase;
+  rv = XRE_GetFileFromPath(path, getter_AddRefs(greBase));
+  if (NS_FAILED(rv))
+    return rv;
 
-  return rv;
+  ar = CheckArg("appbase", PR_FALSE, &path);
+  if (ar == ARG_BAD) {
+    PR_fprintf(PR_STDERR, "Error: argument -appbase requires a path argument\n");
+    return NS_ERROR_FAILURE;
+  }
+
+  nsCOMPtr<nsILocalFile> appBase;
+  if (path) {
+      rv = XRE_GetFileFromPath(path, getter_AddRefs(appBase));
+      if (NS_FAILED(rv))
+        return rv;
+  }
+
+  return mozilla::Omnijar::SetBase(greBase, appBase);
 }
 
 nsresult
diff --git a/toolkit/xre/nsEmbedFunctions.cpp b/toolkit/xre/nsEmbedFunctions.cpp
index 53ec37f..b777049 100644
--- mozilla/toolkit/xre/nsEmbedFunctions.cpp
+++ mozilla/toolkit/xre/nsEmbedFunctions.cpp
@@ -517,9 +517,7 @@ XRE_InitChildProcess(int aArgc,
       // Allow ProcessChild to clean up after itself before going out of
       // scope and being deleted
       process->CleanUp();
-#ifdef MOZ_OMNIJAR
-      mozilla::SetOmnijar(nsnull);
-#endif
+      mozilla::Omnijar::SetBase(nsnull, nsnull);
     }
   }
 
diff --git a/xpcom/build/Makefile.in b/xpcom/build/Makefile.in
index 9421f4c..b08ff56 100644
--- mozilla/xpcom/build/Makefile.in
+++ mozilla/xpcom/build/Makefile.in
@@ -69,6 +69,7 @@ CPPSRCS		= \
 		nsXPComInit.cpp \
 		nsXPCOMStrings.cpp \
 		Services.cpp \
+		Omnijar.cpp \
 		$(NULL)
 
 ifndef MOZ_ENABLE_LIBXUL
@@ -77,10 +78,6 @@ CPPSRCS += dlldeps.cpp
 endif
 endif
 
-ifdef MOZ_OMNIJAR
-CPPSRCS += Omnijar.cpp
-endif
-
 SHARED_LIBRARY_LIBS = \
 		$(DEPTH)/chrome/src/$(LIB_PREFIX)chrome_s.$(LIB_SUFFIX) \
 		../ds/$(LIB_PREFIX)xpcomds_s.$(LIB_SUFFIX) \
diff --git a/xpcom/build/Omnijar.cpp b/xpcom/build/Omnijar.cpp
index 14a0dcf..f5d9c31 100644
--- mozilla/xpcom/build/Omnijar.cpp
+++ mozilla/xpcom/build/Omnijar.cpp
@@ -21,6 +21,7 @@
  *
  * Contributor(s):
  *   Michael Wu <mwu@mozilla.com>
+ *   Mike Hommey <mh@glandium.org>
  *
  * Alternatively, the contents of this file may be used under the terms of
  * either the GNU General Public License Version 2 or later (the "GPL"), or
@@ -38,64 +39,170 @@
 
 #include "Omnijar.h"
 
-#include "nsILocalFile.h"
-#include "nsXULAppAPI.h"
+#include "nsIFile.h"
 #include "nsZipArchive.h"
+#include "nsNetUtil.h"
 
-static nsILocalFile* sOmnijarPath = nsnull;
-static nsZipArchive* sOmnijarReader = nsnull;
+namespace mozilla {
 
-static void
-SetupReader()
+nsIFile *Omnijar::sPath[2] = { nsnull, nsnull };
+PRBool Omnijar::sIsOmnijar[2] = { PR_FALSE, PR_FALSE };
+
+#ifdef MOZ_ENABLE_LIBXUL
+nsZipArchive *Omnijar::sReader[2] = { nsnull, nsnull };
+#endif
+
+static already_AddRefed<nsIFile>
+ComputePath(nsIFile *aPath, PRBool &aIsOmnijar)
 {
-    if (!sOmnijarPath) {
-        return;
-    }
+    PRBool isDir;
+    aIsOmnijar = PR_FALSE;
+    if (!aPath || NS_FAILED(aPath->IsDirectory(&isDir)) || !isDir)
+        return nsnull;
 
-    nsZipArchive* zipReader = new nsZipArchive();
-    if (!zipReader) {
-        NS_IF_RELEASE(sOmnijarPath);
-        return;
+    nsCOMPtr<nsIFile> path;
+#ifdef MOZ_ENABLE_LIBXUL
+    // Search for omni.jar in the given directory
+    if (!isDir || NS_FAILED(aPath->Clone(getter_AddRefs(path))))
+        return nsnull;
+
+    if (NS_FAILED(path->AppendNative(NS_LITERAL_CSTRING("omni.jar"))))
+        return nsnull;
+
+    if (NS_FAILED(path->Exists(&aIsOmnijar)))
+        return nsnull;
+#endif
+
+    if (!aIsOmnijar && NS_FAILED(aPath->Clone(getter_AddRefs(path))))
+        return nsnull;
+
+    return path.forget();
+}
+
+nsresult
+Omnijar::SetBase(nsIFile *aGrePath, nsIFile *aAppPath)
+{
+    NS_ABORT_IF_FALSE(aGrePath || !aAppPath, "Omnijar::SetBase(NULL, something) call forbidden");
+
+#ifdef MOZ_ENABLE_LIBXUL
+    if (sReader[GRE]) {
+        sReader[GRE]->CloseArchive();
+        delete sReader[GRE];
     }
+    if (sReader[APP]) {
+        sReader[APP]->CloseArchive();
+        delete sReader[APP];
+    }
+    sReader[APP] = sReader[GRE] = nsnull;
+#endif
 
-    if (NS_FAILED(zipReader->OpenArchive(sOmnijarPath))) {
-        delete zipReader;
-        NS_IF_RELEASE(sOmnijarPath);
-        return;
+    nsresult rv;
+    PRBool equals;
+    if (aAppPath) {
+        rv = aAppPath->Equals(aGrePath, &equals);
+        NS_ENSURE_SUCCESS(rv, rv);
+    } else {
+        equals = PR_TRUE;
     }
 
-    sOmnijarReader = zipReader;
+    nsCOMPtr<nsIFile> grePath = ComputePath(aGrePath, sIsOmnijar[GRE]);
+    nsCOMPtr<nsIFile> appPath = ComputePath(equals ? nsnull : aAppPath, sIsOmnijar[APP]);
+
+    NS_IF_RELEASE(sPath[GRE]);
+    sPath[GRE] = grePath;
+    NS_IF_ADDREF(sPath[GRE]);
+
+    NS_IF_RELEASE(sPath[APP]);
+    sPath[APP] = appPath;
+    NS_IF_ADDREF(sPath[APP]);
+
+    return NS_OK;
 }
 
-nsILocalFile*
-mozilla::OmnijarPath()
+already_AddRefed<nsIFile>
+Omnijar::GetBase(Type aType)
 {
-    if (!sOmnijarReader)
-        SetupReader();
+    NS_ABORT_IF_FALSE(sPath[0], "Omnijar not initialized");
 
-    return sOmnijarPath;
+    if (!sIsOmnijar[aType]) {
+        NS_IF_ADDREF(sPath[aType]);
+        return sPath[aType];
+    }
+
+    nsCOMPtr<nsIFile> file, path;
+    if (NS_FAILED(sPath[aType]->Clone(getter_AddRefs(file))))
+        return nsnull;
+
+    if (NS_FAILED(file->GetParent(getter_AddRefs(path))))
+        return nsnull;
+    return path.forget();
 }
 
-nsZipArchive*
-mozilla::OmnijarReader()
+#ifdef MOZ_ENABLE_LIBXUL
+nsZipArchive *
+Omnijar::GetReader(Type aType)
 {
-    if (!sOmnijarReader)
-        SetupReader();
+    if (!sIsOmnijar[aType])
+        return nsnull;
+
+    if (sReader[aType])
+        return sReader[aType];
+
+    nsZipArchive* zipReader = new nsZipArchive();
+    if (!zipReader)
+        return nsnull;
 
-    return sOmnijarReader;
+    if (NS_FAILED(zipReader->OpenArchive(sPath[aType]))) {
+        delete zipReader;
+        return nsnull;
+    }
+
+    return (sReader[aType] = zipReader);
 }
 
-void
-mozilla::SetOmnijar(nsILocalFile* aPath)
+nsZipArchive *
+Omnijar::GetReader(nsIFile *aPath)
 {
-    NS_IF_RELEASE(sOmnijarPath);
-    if (sOmnijarReader) {
-        sOmnijarReader->CloseArchive();
-        delete sOmnijarReader;
-        sOmnijarReader = nsnull;
+    PRBool equals;
+    nsresult rv;
+
+    if (sIsOmnijar[GRE]) {
+        rv = sPath[GRE]->Equals(aPath, &equals);
+        if (NS_SUCCEEDED(rv) && equals)
+            return GetReader(GRE);
     }
+    if (sIsOmnijar[APP]) {
+        rv = sPath[APP]->Equals(aPath, &equals);
+        if (NS_SUCCEEDED(rv) && equals)
+            return GetReader(APP);
+    }
+    return nsnull;
+}
+#endif
+
+nsresult
+Omnijar::GetURIString(Type aType, nsCString &result)
+{
+    NS_ABORT_IF_FALSE(sPath[0], "Omnijar not initialized");
 
-    sOmnijarPath = aPath;
-    NS_IF_ADDREF(sOmnijarPath);
+    result = "";
+
+    if ((aType == APP) && (!sPath[APP]))
+        return NS_OK;
+
+    nsCAutoString omniJarSpec;
+    nsresult rv = NS_GetURLSpecFromActualFile(sPath[aType], omniJarSpec);
+    NS_ENSURE_SUCCESS(rv, rv);
+
+    if (sIsOmnijar[aType]) {
+        result = "jar:";
+        result += omniJarSpec;
+        result += "!";
+    } else {
+        result = omniJarSpec;
+    }
+    result += "/";
+    return NS_OK;
 }
 
+} /* namespace mozilla */
diff --git a/xpcom/build/Omnijar.h b/xpcom/build/Omnijar.h
index 33a2b4a..a36678a 100644
--- mozilla/xpcom/build/Omnijar.h
+++ mozilla/xpcom/build/Omnijar.h
@@ -21,6 +21,7 @@
  *
  * Contributor(s):
  *   Michael Wu <mwu@mozilla.com>
+ *   Mike Hommey <mh@glandium.org>
  *
  * Alternatively, the contents of this file may be used under the terms of
  * either the GNU General Public License Version 2 or later (the "GPL"), or
@@ -39,24 +40,132 @@
 #ifndef mozilla_Omnijar_h
 #define mozilla_Omnijar_h
 
-class nsILocalFile;
-class nsZipArchive;
+#include "nscore.h"
+#include "nsTArray.h"
+#include "nsCOMPtr.h"
+#include "nsString.h"
 
-#ifdef MOZ_OMNIJAR
+class nsIFile;
+class nsZipArchive;
+class nsIURI;
 
 namespace mozilla {
 
+#ifdef MOZ_ENABLE_LIBXUL
+#define OMNIJAR_EXPORT
+#else
+#define OMNIJAR_EXPORT NS_EXPORT
+#endif
+
+class OMNIJAR_EXPORT Omnijar {
+private:
+/**
+ * Store an nsIFile for either a base directory when there is no omni.jar,
+ * or omni.jar itself. We can store two paths here, one for GRE
+ * (corresponding to resource://gre/) and one for APP
+ * (corresponding to resource:/// and resource://app/), but only
+ * store one when both point to the same location (unified).
+ */
+static nsIFile *sPath[2];
 /**
- * This returns the path to the omnijar.
- * If the omnijar isn't available, this function will return null.
- * Callers should fallback to flat packaging if null.
+ * Store whether the corresponding sPath is an omni.jar or a directory
  */
-nsILocalFile *OmnijarPath();
-nsZipArchive *OmnijarReader();
-void SetOmnijar(nsILocalFile* aPath);
+static PRBool sIsOmnijar[2];
 
-} /* namespace mozilla */
+#ifdef MOZ_ENABLE_LIBXUL
+/**
+ * Cached nsZipArchives for the corresponding sPath
+ */
+static nsZipArchive *sReader[2];
+#endif
+
+public:
+enum Type {
+    GRE = 0,
+    APP = 1
+};
+
+/**
+ * Returns whether SetBase has been called at least once with
+ * a valid nsIFile
+ */
+static PRBool
+IsInitialized()
+{
+    // GRE path is always set after initialization.
+    return sPath[0] != nsnull;
+}
+
+/**
+ * Sets the base directories for GRE and APP. APP base directory
+ * may be nsnull, in case the APP and GRE directories are the same.
+ */
+static nsresult SetBase(nsIFile *aGrePath, nsIFile *aAppPath);
+
+/**
+ * Returns an nsIFile pointing to the omni.jar file for GRE or APP.
+ * Returns nsnull when there is no corresponding omni.jar.
+ * Also returns nsnull for APP in the unified case.
+ */
+static already_AddRefed<nsIFile>
+GetPath(Type aType)
+{
+    NS_ABORT_IF_FALSE(sPath[0], "Omnijar not initialized");
 
-#endif /* MOZ_OMNIJAR */
+    if (sIsOmnijar[aType]) {
+        NS_IF_ADDREF(sPath[aType]);
+        return sPath[aType];
+    }
+    return nsnull;
+}
+
+/**
+ * Returns whether GRE or APP use an omni.jar. Returns PR_False when
+ * using an omni.jar in the unified case.
+ */
+static PRBool
+HasOmnijar(Type aType)
+{
+    return sIsOmnijar[aType];
+}
+
+/**
+ * Returns the base directory for GRE or APP. In the unified case,
+ * returns nsnull for APP.
+ */
+static already_AddRefed<nsIFile> GetBase(Type aType);
+
+/**
+ * Returns a nsZipArchive pointer for the omni.jar file for GRE or
+ * APP. Returns nsnull in the same cases GetPath() would.
+ */
+#ifdef MOZ_ENABLE_LIBXUL
+static nsZipArchive *GetReader(Type aType);
+#else
+static nsZipArchive *GetReader(Type aType) { return nsnull; }
+#endif
+
+/**
+ * Returns a nsZipArchive pointer for the given path IAOI the given
+ * path is the omni.jar for either GRE or APP.
+ */
+#ifdef MOZ_ENABLE_LIBXUL
+static nsZipArchive *GetReader(nsIFile *aPath);
+#else
+static nsZipArchive *GetReader(nsIFile *aPath) { return nsnull; }
+#endif
+
+/**
+ * Returns the URI string corresponding to the omni.jar or directory
+ * for GRE or APP. i.e. jar:/path/to/omni.jar!/ for omni.jar and
+ * /path/to/base/dir/ otherwise. Returns an empty string for APP in
+ * the unified case.
+ * The returned URI is guaranteed to end with a slash.
+ */
+static nsresult GetURIString(Type aType, nsCString &result);
+
+}; /* class Omnijar */
+
+} /* namespace mozilla */
 
 #endif /* mozilla_Omnijar_h */
diff --git a/xpcom/build/nsXPComInit.cpp b/xpcom/build/nsXPComInit.cpp
index 722bbc5..043a2a2 100644
--- mozilla/xpcom/build/nsXPComInit.cpp
+++ mozilla/xpcom/build/nsXPComInit.cpp
@@ -467,25 +467,25 @@ NS_InitXPCOM2(nsIServiceManager* *result,
         if (NS_FAILED(rv)) return rv;
     }
 
-#ifdef MOZ_OMNIJAR
     NS_TIME_FUNCTION_MARK("Next: Omnijar init");
 
-    if (!mozilla::OmnijarPath()) {
-        nsCOMPtr<nsILocalFile> omnijar;
+    if (!mozilla::Omnijar::IsInitialized()) {
+        nsCOMPtr<nsILocalFile> greDir, appDir;
         nsCOMPtr<nsIFile> file;
 
-        rv = NS_ERROR_FAILURE;
         nsDirectoryService::gService->Get(NS_GRE_DIR,
                                           NS_GET_IID(nsIFile),
                                           getter_AddRefs(file));
-        if (file)
-            rv = file->Append(NS_LITERAL_STRING("omni.jar"));
-        if (NS_SUCCEEDED(rv))
-            omnijar = do_QueryInterface(file);
-        if (NS_SUCCEEDED(rv))
-            mozilla::SetOmnijar(omnijar);
+        greDir = do_QueryInterface(file);
+
+        nsDirectoryService::gService->Get(NS_XPCOM_CURRENT_PROCESS_DIR,
+                                          NS_GET_IID(nsIFile),
+                                          getter_AddRefs(file));
+        appDir = do_QueryInterface(file);
+
+        rv = mozilla::Omnijar::SetBase(greDir, appDir);
+        NS_ENSURE_SUCCESS(rv, rv);
     }
-#endif
 
 #ifdef MOZ_IPC
     if ((sCommandLineWasInitialized = !CommandLine::IsInitialized())) {
@@ -774,9 +774,7 @@ ShutdownXPCOM(nsIServiceManager* servMgr)
     }
 #endif
 
-#ifdef MOZ_OMNIJAR
-    mozilla::SetOmnijar(nsnull);
-#endif
+    mozilla::Omnijar::SetBase(nsnull, nsnull);
 
     NS_LogTerm();
 
diff --git a/xpcom/components/nsComponentManager.cpp b/xpcom/components/nsComponentManager.cpp
index bdf6645..2a0be15 100644
--- mozilla/xpcom/components/nsComponentManager.cpp
+++ mozilla/xpcom/components/nsComponentManager.cpp
@@ -180,8 +180,6 @@ NS_DEFINE_CID(kCategoryManagerCID, NS_CATEGORYMANAGER_CID);
 #define COMPMGR_TIME_FUNCTION_CONTRACTID(cid) do {} while (0)
 #endif
 
-#define kOMNIJAR_PREFIX  NS_LITERAL_CSTRING("resource:///")
-
 nsresult
 nsGetServiceFromCategory::operator()(const nsIID& aIID, void** aInstancePtr) const
 {
@@ -395,14 +393,20 @@ nsresult nsComponentManagerImpl::Init()
     for (PRUint32 i = 0; i < sStaticModules->Length(); ++i)
         RegisterModule((*sStaticModules)[i], NULL);
 
-#ifdef MOZ_OMNIJAR
-    if (mozilla::OmnijarPath()) {
-        nsCOMPtr<nsIZipReader> omnijarReader = new nsJAR();
-        rv = omnijarReader->Open(mozilla::OmnijarPath());
-        if (NS_SUCCEEDED(rv))
-            RegisterJarManifest(omnijarReader, "chrome.manifest", false);
+    nsCOMPtr<nsIFile> appOmnijar = mozilla::Omnijar::GetPath(mozilla::Omnijar::APP);
+    if (appOmnijar) {
+        cl = sModuleLocations->InsertElementAt(1); // Insert after greDir
+        cl->type = NS_COMPONENT_LOCATION;
+        cl->location = do_QueryInterface(appOmnijar);
+        cl->jar = true;
+    }
+    nsCOMPtr<nsIFile> greOmnijar = mozilla::Omnijar::GetPath(mozilla::Omnijar::GRE);
+    if (greOmnijar) {
+        cl = sModuleLocations->InsertElementAt(0);
+        cl->type = NS_COMPONENT_LOCATION;
+        cl->location = do_QueryInterface(greOmnijar);
+        cl->jar = true;
     }
-#endif
 
     for (PRUint32 i = 0; i < sModuleLocations->Length(); ++i) {
         ComponentLocation& l = sModuleLocations->ElementAt(i);
@@ -417,15 +421,6 @@ nsresult nsComponentManagerImpl::Init()
             RegisterJarManifest(reader, "chrome.manifest", false);
     }
 
-#ifdef MOZ_OMNIJAR
-    if (mozilla::OmnijarPath()) {
-        cl = sModuleLocations->InsertElementAt(0);
-        cl->type = NS_COMPONENT_LOCATION;
-        cl->location = mozilla::OmnijarPath();
-        cl->jar = true;
-    }
-#endif
-
     nsCategoryManager::GetSingleton()->SuppressNotifications(false);
 
     mStatus = NORMAL;



1.1                  src/patchsets/seamonkey/2.1/5002_libpng-1.5-support.patch

file : http://sources.gentoo.org/viewvc.cgi/gentoo/src/patchsets/seamonkey/2.1/5002_libpng-1.5-support.patch?rev=1.1&view=markup
plain: http://sources.gentoo.org/viewvc.cgi/gentoo/src/patchsets/seamonkey/2.1/5002_libpng-1.5-support.patch?rev=1.1&content-type=text/plain

Index: 5002_libpng-1.5-support.patch
===================================================================
# HG changeset patch
# User Glenn Randers-Pehrson <glennrp+bmo@gmail.com>
# Date 1301357640 14400
# Node ID a60e842a1561ee51bf6eb3f27dc71ea3f840fa03
# Parent  c88a58ccb0af893431816e55a1bb0b00012db197
Bug 645519 - Firefox-4.0 compile fails if "--with-system-png" is ON; r=joe

--- mozilla/modules/libpr0n/decoders/nsPNGDecoder.cpp
+++ mozilla/modules/libpr0n/decoders/nsPNGDecoder.cpp
@@ -381,23 +381,34 @@ PNGGetColorProfile(png_structp png_ptr, 
                    int color_type, qcms_data_type *inType, PRUint32 *intent)
 {
   qcms_profile *profile = nsnull;
   *intent = QCMS_INTENT_PERCEPTUAL; // Our default
 
   // First try to see if iCCP chunk is present
   if (png_get_valid(png_ptr, info_ptr, PNG_INFO_iCCP)) {
     png_uint_32 profileLen;
+#if (PNG_LIBPNG_VER < 10500)
     char *profileData, *profileName;
+#else
+    png_bytep profileData;
+    png_charp profileName;
+#endif
     int compression;
 
     png_get_iCCP(png_ptr, info_ptr, &profileName, &compression,
                  &profileData, &profileLen);
 
-    profile = qcms_profile_from_memory(profileData, profileLen);
+    profile = qcms_profile_from_memory(
+#if (PNG_LIBPNG_VER < 10500)
+                                       profileData,
+#else
+                                       (char *)profileData,
+#endif
+                                       profileLen);
     if (profile) {
       PRUint32 profileSpace = qcms_profile_get_color_space(profile);
 
       PRBool mismatch = PR_FALSE;
       if (color_type & PNG_COLOR_MASK_COLOR) {
         if (profileSpace != icSigRgbData)
           mismatch = PR_TRUE;
       } else {





1.1                  src/patchsets/seamonkey/2.1/5003_fix-resources-path.patch

file : http://sources.gentoo.org/viewvc.cgi/gentoo/src/patchsets/seamonkey/2.1/5003_fix-resources-path.patch?rev=1.1&view=markup
plain: http://sources.gentoo.org/viewvc.cgi/gentoo/src/patchsets/seamonkey/2.1/5003_fix-resources-path.patch?rev=1.1&content-type=text/plain

Index: 5003_fix-resources-path.patch
===================================================================
# HG changeset patch
# Parent ff1b810f78226d7f4010909d3cde05a57fdcf20c
Bug 620931 part 4 - Fix resource://app/ to always point to the same as resource:///

--- mozilla/netwerk/protocol/res/nsResProtocolHandler.cpp
+++ mozilla/netwerk/protocol/res/nsResProtocolHandler.cpp
@@ -74,16 +74,17 @@ static nsResProtocolHandler *gResHandler
 //    set NSPR_LOG_FILE=log.txt
 //
 // this enables PR_LOG_ALWAYS level information and places all output in
 // the file log.txt
 //
 static PRLogModuleInfo *gResLog;
 #endif
 
+#define kAPP           NS_LITERAL_CSTRING("app")
 #define kGRE           NS_LITERAL_CSTRING("gre")
 
 //----------------------------------------------------------------------------
 // nsResURL : overrides nsStandardURL::GetFile to provide nsIFile resolution
 //----------------------------------------------------------------------------
 
 nsresult
 nsResURL::EnsureFile()
@@ -179,16 +180,22 @@ nsResProtocolHandler::Init()
     nsCOMPtr<nsIURI> uri;
     rv = NS_NewURI(getter_AddRefs(uri), appURI.Length() ? appURI : greURI);
     NS_ENSURE_SUCCESS(rv, rv);
 
     rv = SetSubstitution(EmptyCString(), uri);
     NS_ENSURE_SUCCESS(rv, rv);
 
     //
+    // make resource://app/ point to the application directory or omnijar
+    //
+    rv = SetSubstitution(kAPP, uri);
+    NS_ENSURE_SUCCESS(rv, rv);
+
+    //
     // make resource://gre/ point to the GRE directory
     //
     if (appURI.Length()) { // We already have greURI in uri if appURI.Length() is 0.
         rv = NS_NewURI(getter_AddRefs(uri), greURI);
         NS_ENSURE_SUCCESS(rv, rv);
     }
 
     rv = SetSubstitution(kGRE, uri);
--- mozilla/toolkit/xre/nsXREDirProvider.cpp
+++ mozilla/toolkit/xre/nsXREDirProvider.cpp
@@ -300,19 +300,16 @@ nsXREDirProvider::GetFile(const char* aP
     }
   }
   else if (!strcmp(aProperty, XRE_EXECUTABLE_FILE) && gArgv[0]) {
     nsCOMPtr<nsILocalFile> lf;
     rv = XRE_GetBinaryPath(gArgv[0], getter_AddRefs(lf));
     if (NS_SUCCEEDED(rv))
       file = lf;
   }
-  else if (!strcmp(aProperty, "resource:app")) {
-    rv = GetAppDir()->Clone(getter_AddRefs(file));
-  }
 
   else if (!strcmp(aProperty, NS_APP_PROFILE_DIR_STARTUP) && mProfileDir) {
     return mProfileDir->Clone(aFile);
   }
   else if (!strcmp(aProperty, NS_APP_PROFILE_LOCAL_DIR_STARTUP)) {
     if (mProfileLocalDir)
       return mProfileLocalDir->Clone(aFile);
 




1.1                  src/patchsets/seamonkey/2.1/5004_enable-omnijar-in-xulrunner.patch

file : http://sources.gentoo.org/viewvc.cgi/gentoo/src/patchsets/seamonkey/2.1/5004_enable-omnijar-in-xulrunner.patch?rev=1.1&view=markup
plain: http://sources.gentoo.org/viewvc.cgi/gentoo/src/patchsets/seamonkey/2.1/5004_enable-omnijar-in-xulrunner.patch?rev=1.1&content-type=text/plain

Index: 5004_enable-omnijar-in-xulrunner.patch
===================================================================
# HG changeset patch
# Parent 7d2228db71a299afca60babff632a967d2d6c456
Bug 620931 part 5 - Enable omni.jar by default on xulrunner

--- mozilla/xulrunner/confvars.sh
+++ mozilla/xulrunner/confvars.sh
@@ -36,14 +36,15 @@
 #
 # ***** END LICENSE BLOCK *****
 
 MOZ_APP_NAME=xulrunner
 MOZ_APP_DISPLAYNAME=XULRunner
 MOZ_UPDATER=1
 MOZ_XULRUNNER=1
 MOZ_ENABLE_LIBXUL=1
+MOZ_CHROME_FILE_FORMAT=omni
 MOZ_STATIC_BUILD_UNSUPPORTED=1
 MOZ_APP_VERSION=$MOZILLA_VERSION
 if test "$MOZ_STORAGE"; then
   MOZ_PLACES=1
 fi
 MOZ_EXTENSIONS_DEFAULT=" gnomevfs"







^ permalink raw reply related	[flat|nested] only message in thread

only message in thread, other threads:[~2011-06-23 11:42 UTC | newest]

Thread overview: (only message) (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2011-06-23 11:41 [gentoo-commits] gentoo commit in src/patchsets/seamonkey/2.1: 1000-config_rules_install_dist_files.patch 1001-fix_jemalloc_vs_aslr.patch 1003-update_ply_2.5_to_3.3.patch 2000-seamonkey_gentoo_install_dirs.patch 2001-gentoo_no_app_updates.patch 2020-gentoo-prefix-support.patch 5000_gconf-config-update.patch 5000_use-chrome-manifest.patch 5001_fix-prefs-directory.patch 5001_support-gcc46.patch 5002_independant-omnijar.patch 5002_libpng-1.5-support.patch 5003_fix-resources-path.patch 5004_enable-omnijar-in-xulrunner.patch Lars Wendler (polynomial-c)

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox