libtango (0.99.9.dfsg-1) debian-dir only changes

Summary

 debian/changelog                |   34 
 debian/compat                   |    1 
 debian/control                  |   30 
 debian/copyright                |  314 +
 debian/libtango-headers.install |    2 
 debian/libtango-ldc-dev.install |    2 
 debian/patches/01_versions.diff |   15 
 debian/patches/02_compress.diff | 6727 ++++++++++++++++++++++++++++++++++++++++
 debian/patches/03_ldc.diff      |   53 
 debian/patches/04_device.diff   |   11 
 debian/patches/series           |    4 
 debian/rules                    |   56 
 12 files changed, 7249 insertions(+)

    
download this patch

Patch contents

--- libtango-0.99.9.dfsg.orig/debian/copyright
+++ libtango-0.99.9.dfsg/debian/copyright
@@ -0,0 +1,314 @@
+This work was packaged for Debian by:
+
+    Arthur Loiret <aloiret@debian.org> on Sat, 07 Nov 2009 08:47:34 +0000
+
+It was downloaded from the SVN repository.
+
+Upstream Authors:
+
+    Kris Bell, Lars Ivar Igesund, Sean Kelly, Michael Parker,
+    And many others.
+
+Copyright:
+
+    Copyright (c) 2005-2006 Lars Ivar Igesund
+    Copyright (c) 2004-2009 Kris Bell
+    Copyright (C) 2005-2006 Sean Kelly
+    Copyright (c) 2007 Daniel Keep
+    Copyright (C) 2008-2009 Fawzi Mohamed
+    Copyright (c) 2006 James Pelcis
+    Copyright (c) 2008 Darryl Bleau
+    Copyright (C) 2000-2008 by Digital Mars
+    Copyright (C) 2004 David Friedman
+    Copyright (C) 2004 Christopher E. Miller
+    Copyright (C) 2008 Chris Wright
+    Copyright (c) 2006 Juan Jose Comellas
+    Copyright (c) 2008 Jeff Davey
+    Copyright (C) 2008 Don Clugston
+    Copyright (c) 2005 John Chapman
+    Copyright 1984, 1987, 1989, 1995, 2000 by Stephen L. Moshier
+    Copyright (c) 2008 Steven Schveighoffer
+    Copyright (C) 2007-2008 Anders Halager
+    Copyright (c) 2006 UWB. All rights reserved
+    Copyright (c) Lester L. Martin II
+    Copyright (C) 1996-2006 Julian Seward <jseward@bzip.org>
+    Copyright (c) 2008 Robin Kreis
+    Copyright (C) 1994 X Consortium
+    Copyright (c) 2007-2008 Matti Niemenmaa.
+    Copyright (c) 2007 Peter Triller.
+    Copyright (c) 2007-2008 Jascha Wetzel.
+    Copyright (c) 2007 Peter Triller.
+    Copyright (c) 2006 Keinfarbton.
+    Copyright: Copyright (C) 2007 Tomasz Stachowiak
+    Copyright (c) 2004-2008 Tango group
+    Copyright (C) 2005-2009 The Tango Team
+
+Licenses:
+
+The following files are under licenced under GPL-v3:
+
+    doc/dil/data/lang_de.d
+    doc/dil/data/lang_en.d
+    doc/dil/data/lang_fi.d
+    doc/dil/data/lang_tr.d
+
+On Debian GNU systems, the complete text of the GPL version 3
+can be found in `/usr/share/common-licenses/GPL-3'.
+
+For other files:
+
+    Tango is Open Source software, distributed by a group of developers which
+    has been set up for the purpose of providing a vendor-neutral owner of
+    Tango intellectual property. The goals of all Tango licensing decisions
+    are to:
+
+      * Encourage adoption
+      * Discourage political contention
+      * Encourage collaboration and integration with other projects
+      * Be transparent
+
+    Tango is dual-licensed:
+      * Academic Free License v3.0
+        http://www.dsource.org/projects/tango/wiki/AcademicFreeLicense30) [2]
+      * BSD License
+        (http://www.dsource.org/projects/tango/wiki/BSDLicense) [1][3]
+
+    The preferred license is the Academic Free License v3.0. All Tango
+    projects release their code under the terms of this license. Both licenses:
+
+      * Allow commercial use without encumbrance
+      * Provide broad rights to make new products and derivative works
+      * Place no requirement on users to contribute back (although we
+        appreciate it if you do)
+
+    Users who wish to include Tango with software licensed under the (L)GPL
+    will want to use Tango under the terms of the BSD License. [1] Tango
+    projects may request a variance from the developers to release their
+    projects under additional licenses in conjunction with the AFL.
+
+    If you have further questions regarding Tango licensing, please do not
+    hesitate to contact us (http://dsource.org/projects/tango/wiki/Contact).
+
+
+    [1] The advertising clause has not been a part of the BSD License since
+    July 22, 1999.
+    (ftp://ftp.cs.berkeley.edu/pub/4bsd/README.Impt.License.Change)
+
+    [2] Academic Free License v3.0
+    Original Work: Tango
+    Licensor: Tango contributors
+
+    This Academic Free License (the "License") applies to any original work
+    of authorship (the "Original Work") whose owner (the "Licensor") has
+    placed the following licensing notice adjacent to the copyright notice
+    for the Original Work:
+
+    Licensed under the Academic Free License version 3.0
+    1. Grant of Copyright License. Licensor grants You a worldwide,
+    royalty-free, non-exclusive, sublicensable license, for the duration
+    of the copyright, to do the following:
+
+        a) to reproduce the Original Work in copies, either alone or as part
+           of a collective work;
+
+        b) to translate, adapt, alter, transform, modify, or arrange the
+           Original Work, thereby creating derivative works
+           ("Derivative Works") based upon the Original Work;
+
+        c) to distribute or communicate copies of the Original Work and
+           Derivative Works to the public, under any license of your choice
+           that does not contradict the terms and conditions, including
+           Licensor’s reserved rights and remedies, in this Academic Free
+           License;
+
+        d) to perform the Original Work publicly; and
+
+        e) to display the Original Work publicly.
+
+    2. Grant of Patent License. Licensor grants You a worldwide, royalty-free,
+    non-exclusive, sublicensable license, under patent claims owned or
+    controlled by the Licensor that are embodied in the Original Work as
+    furnished by the Licensor, for the duration of the patents, to make, use,
+    sell, offer for sale, have made, and import the Original Work and
+    Derivative Works.
+
+    3. Grant of Source Code License. The term "Source Code" means the
+    preferred form of the Original Work for making modifications to it and
+    all available documentation describing how to modify the Original Work.
+    Licensor agrees to provide a machine-readable copy of the Source Code
+    of the Original Work along with each copy of the Original Work that
+    Licensor distributes. Licensor reserves the right to satisfy this
+    obligation by placing a machine-readable copy of the Source Code in an
+    information repository reasonably calculated to permit inexpensive and
+    convenient access by You for as long as Licensor continues to distribute
+    the Original Work.
+
+    4. Exclusions From License Grant. Neither the names of Licensor, nor the
+    names of any contributors to the Original Work, nor any of their
+    trademarks or service marks, may be used to endorse or promote products
+    derived from this Original Work without express prior permission of the
+    Licensor. Except as expressly stated herein, nothing in this License
+    grants any license to Licensor’s trademarks, copyrights, patents, trade
+    secrets or any other intellectual property. No patent license is granted
+    to make, use, sell, offer for sale, have made, or import embodiments of
+    any patent claims other than the licensed claims defined in Section 2.
+    No license is granted to the trademarks of Licensor even if such marks
+    are included in the Original Work. Nothing in this License shall be
+    interpreted to prohibit Licensor from licensing under terms different
+    from this License any Original Work that Licensor otherwise would have
+    a right to license.
+
+    5. External Deployment. The term "External Deployment" means the use,
+    distribution, or communication of the Original Work or Derivative Works
+    in any way such that the Original Work or Derivative Works may be used
+    by anyone other than You, whether those works are distributed or
+    communicated to those persons or made available as an application
+    intended for use over a network. As an express condition for the grants
+    of license hereunder, You must treat any External Deployment by You of
+    the Original Work or a Derivative Work as a distribution under section
+    1(c).
+
+    6. Attribution Rights. You must retain, in the Source Code of any
+    Derivative Works that You create, all copyright, patent, or trademark
+    notices from the Source Code of the Original Work, as well as any
+    notices of licensing and any descriptive text identified therein as an
+    "Attribution Notice." You must cause the Source Code for any Derivative
+    Works that You create to carry a prominent Attribution Notice reasonably
+    calculated to inform recipients that You have modified the Original Work.
+
+    7. Warranty of Provenance and Disclaimer of Warranty. Licensor warrants
+    that the copyright in and to the Original Work and the patent rights
+    granted herein by Licensor are owned by the Licensor or are sublicensed
+    to You under the terms of this License with the permission of the
+    contributor(s) of those copyrights and patent rights. Except as expressly
+    stated in the immediately preceding sentence, the Original Work is
+    provided under this License on an "AS IS" BASIS and WITHOUT WARRANTY,
+    either express or implied, including, without limitation, the warranties
+    of non-infringement, merchantability or fitness for a particular purpose.
+    THE ENTIRE RISK AS TO THE QUALITY OF THE ORIGINAL WORK IS WITH YOU. This
+    DISCLAIMER OF WARRANTY constitutes an essential part of this License. No
+    license to the Original Work is granted by this License except under this
+    disclaimer.
+
+    8. Limitation of Liability. Under no circumstances and under no legal
+    theory, whether in tort (including negligence), contract, or otherwise,
+    shall the Licensor be liable to anyone for any indirect, special,
+    incidental, or consequential damages of any character arising as a result
+    of this License or the use of the Original Work including, without
+    limitation, damages for loss of goodwill, work stoppage, computer failure
+    or malfunction, or any and all other commercial damages or losses. This
+    limitation of liability shall not apply to the extent applicable law
+    prohibits such limitation.
+
+    9. Acceptance and Termination. If, at any time, You expressly assented to
+    this License, that assent indicates your clear and irrevocable acceptance
+    of this License and all of its terms and conditions. If You distribute or
+    communicate copies of the Original Work or a Derivative Work, You must
+    make a reasonable effort under the circumstances to obtain the express
+    assent of recipients to the terms of this License. This License
+    conditions your rights to undertake the activities listed in Section 1,
+    including your right to create Derivative Works based upon the Original
+    Work, and doing so without honoring these terms and conditions is
+    prohibited by copyright law and international treaty. Nothing in this
+    License is intended to affect copyright exceptions and limitations
+    (including “fair use” or “fair dealing”). This License shall terminate
+    immediately and You may no longer exercise any of the rights granted to
+    You by this License upon your failure to honor the conditions in Section
+    1(c).
+
+    10. Termination for Patent Action. This License shall terminate
+    automatically and You may no longer exercise any of the rights granted to
+    You by this License as of the date You commence an action, including a
+    cross-claim or counterclaim, against Licensor or any licensee alleging
+    that the Original Work infringes a patent. This termination provision
+    shall not apply for an action alleging patent infringement by
+    combinations of the Original Work with other software or hardware.
+
+    11. Jurisdiction, Venue and Governing Law. Any action or suit relating to
+    this License may be brought only in the courts of a jurisdiction wherein
+    the Licensor resides or in which Licensor conducts its primary business,
+    and under the laws of that jurisdiction excluding its conflict-of-law
+    provisions. The application of the United Nations Convention on Contracts
+    for the International Sale of Goods is expressly excluded. Any use of the
+    Original Work outside the scope of this License or after its termination
+    shall be subject to the requirements and penalties of copyright or patent
+    law in the appropriate jurisdiction. This section shall survive the
+    termination of this License.
+
+    12. Attorneys’ Fees. In any action to enforce the terms of this License
+    or seeking damages relating thereto, the prevailing party shall be
+    entitled to recover its costs and expenses, including, without limitation,
+    reasonable attorneys' fees and costs incurred in connection with such
+    action, including any appeal of such action. This section shall survive
+    the termination of this License.
+
+    13. Miscellaneous. If any provision of this License is held to be
+    unenforceable, such provision shall be reformed only to the extent
+    necessary to make it enforceable.
+
+    14. Definition of "You" in This License. "You" throughout this License,
+    whether in upper or lower case, means an individual or a legal entity
+    exercising rights under, and complying with all of the terms of, this
+    License. For legal entities, "You" includes any entity that controls, is
+    controlled by, or is under common control with you. For purposes of this
+    definition, "control" means (i) the power, direct or indirect, to cause
+    the direction or management of such entity, whether by contract or
+    otherwise, or (ii) ownership of fifty percent (50%) or more of the
+    outstanding shares, or (iii) beneficial ownership of such entity.
+
+    15. Right to Use. You may use the Original Work in all ways not otherwise
+    restricted or conditioned by this License or by law, and Licensor
+    promises not to interfere with or be responsible for such uses by You.
+
+    16. Modification of This License. This License is Copyright © 2005
+    Lawrence Rosen. Permission is granted to copy, distribute, or communicate
+    this License without modification. Nothing in this License permits You to
+    modify this License as applied to the Original Work or to Derivative
+    Works. However, You may modify the text of this License and copy,
+    distribute or communicate your modified version (the "Modified License")
+    and apply it to other original works of authorship subject to the
+    following conditions: (i) You may not indicate in any way that your
+    Modified License is the "Academic Free License" or "AFL" and you may not
+    use those names in the name of your Modified License; (ii) You must
+    replace the notice specified in the first paragraph above with the notice
+    "Licensed under <insert your license name here>" or with a notice of your
+    own that is not confusingly similar to the notice in this License; and
+    (iii) You may not claim that your original works are open source software
+    unless your Modified License has been approved by Open Source Initiative
+    (OSI) and You comply with its license review and certification process.
+
+    [3]. BSD license
+    Copyright (c) 2004-2008, Tango contributors
+    All rights reserved.
+
+     * Redistribution and use in source and binary forms, with or without
+       modification, are permitted provided that the following conditions
+       are met:
+     * Redistributions of source code must retain the above copyright notice,
+       this list of conditions and the following disclaimer.
+     * Redistributions in binary form must reproduce the above copyright
+       notice, this list of conditions and the following disclaimer in the
+       documentation and/or other materials provided with the distribution.
+     * Neither the name of the <ORGANIZATION> nor the names of its
+       contributors may be used to endorse or promote products derived from
+       this software without specific prior written permission.
+
+    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+    "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+    OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+    SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+    TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+    PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+    LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+    NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+    SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+The Debian packaging is:
+
+    Copyright (C) 2009 Arthur Loiret <aloiret@debian.org>
+
+and is licensed under the GPL version 3,
+see `/usr/share/common-licenses/GPL-3'.
--- libtango-0.99.9.dfsg.orig/debian/libtango-ldc-dev.install
+++ libtango-0.99.9.dfsg/debian/libtango-ldc-dev.install
@@ -0,0 +1,2 @@
+objs/base/libtango-base-ldc.a			/usr/lib/d/
+objs/user/libtango-user-ldc.a			/usr/lib/d/
--- libtango-0.99.9.dfsg.orig/debian/rules
+++ libtango-0.99.9.dfsg/debian/rules
@@ -0,0 +1,56 @@
+#!/usr/bin/make -f
+
+include /usr/share/quilt/quilt.make
+
+versions := ldc
+
+ldc_compiler := /usr/bin/ldc
+
+build: build-stamp
+build-stamp: $(foreach version,$(versions),build-stamp-$(version))
+	touch $@
+build-stamp-%: patch
+	dh_testdir
+	rm -rf stamps objs
+	mkdir stamps objs
+	$(MAKE) compiler=$*
+	touch $@
+
+clean: unpatch
+	dh_testdir
+	dh_testroot
+	rm -rf stamps objs
+	find build -type d -name "objs-*" | xargs rm -rf
+	rm -f build-stamp-*
+	dh_clean
+
+install: build
+	dh_testdir
+	dh_prep
+	dh_install
+	for v in $(versions) ; do \
+		mkdir -p debian/libtango-$${v}-dev/usr/include/d ; \
+		mv debian/libtango-headers/usr/include/d/tango/core/rt/compiler/$${v} \
+			debian/libtango-$${v}-dev/usr/include/d/$${v} ; \
+	done
+	rm -rf debian/libtango-headers/usr/include/d/tango/core/rt/compiler
+
+binary-arch: build
+
+binary-arch: build install
+	dh_testdir
+	dh_testroot
+	dh_installdocs
+	dh_installchangelogs
+	dh_link
+	dh_strip
+	dh_compress
+	dh_fixperms
+	dh_installdeb
+	dh_shlibdeps
+	dh_gencontrol
+	dh_md5sums
+	dh_builddeb
+
+binary: binary-indep binary-arch
+.PHONY: build clean binary-indep binary-arch binary
--- libtango-0.99.9.dfsg.orig/debian/compat
+++ libtango-0.99.9.dfsg/debian/compat
@@ -0,0 +1 @@
+7
--- libtango-0.99.9.dfsg.orig/debian/libtango-headers.install
+++ libtango-0.99.9.dfsg/debian/libtango-headers.install
@@ -0,0 +1,2 @@
+object.di							/usr/include/d/
+tango								/usr/include/d/
--- libtango-0.99.9.dfsg.orig/debian/control
+++ libtango-0.99.9.dfsg/debian/control
@@ -0,0 +1,30 @@
+Source: libtango
+Section: libdevel
+Priority: extra
+Maintainer: Arthur Loiret <aloiret@debian.org>
+Build-Depends: debhelper (>= 7), quilt (>= 0.40), ldc (>= 0.9.1+hg1634)
+Standards-Version: 3.8.4
+Homepage: http://dsource.org/projects/tango
+
+Package: libtango-headers
+Architecture: all
+Depends: ${shlibs:Depends}, ${misc:Depends}
+Description: The Developer's Library for D -- Headers
+ Tango is a cross-platform open-source software library, written in the
+ D programming language for D programmers. It is structured as a cohesive
+ and comprehensive library for general purpose usage, and is supported by
+ .
+ This package provides the library headers.
+
+Package: libtango-ldc-dev
+Architecture: any
+Depends: ${shlibs:Depends}, ${misc:Depends}, libtango-headers (= ${source:Version})
+Recommends: ldc (>= 0.9.1)
+Description: The Developer's Library for D -- LDC version
+ Tango is a cross-platform open-source software library, written in the
+ D programming language for D programmers. It is structured as a cohesive
+ and comprehensive library for general purpose usage, and is supported by
+ a growing number of recognized D enthusiasts.
+ .
+ This package contains the ldc version of the library.
+
--- libtango-0.99.9.dfsg.orig/debian/changelog
+++ libtango-0.99.9.dfsg/debian/changelog
@@ -0,0 +1,34 @@
+libtango (0.99.9.dfsg-1) unstable; urgency=low
+
+  * New upstream release.
+    - debian/patches/02_compress.diff: Refresh.
+    - debian/patches/03_ldc.diff: Add.
+  * debian/control:
+    - Build-Depends on ldc ldc (>= 0.9.1+hg1634).
+    - Bump Standards-Version to 3.8.4.
+
+ -- Arthur Loiret <aloiret@debian.org>  Sat, 27 Feb 2010 11:00:12 +0000
+
+libtango (0.99.8+svn5259.dfsg-2) unstable; urgency=low
+
+  * Build-Depends on ldc (>= 0.9.1+hg1596-3).
+  * Move compiler-stecific headers from libtango-headers to
+    libtango-$(compiler)-dev.
+  * Make the Makefile less verbose.
+
+ -- Arthur Loiret <aloiret@debian.org>  Mon, 07 Dec 2009 18:18:43 +0000
+
+libtango (0.99.8+svn5259.dfsg-1) unstable; urgency=low
+
+  * Upload to unstable.  Closes: #508072.
+  * New upstream snapshot.
+  * debian/patches/01_versions.diff: Fix build.
+  * debian/patches/02_compress.diff: Temporary disable compression modules.
+
+ -- Arthur Loiret <aloiret@debian.org>  Sat, 05 Dec 2009 17:52:30 +0000
+
+libtango (0.99.8+svn5027.dfsg-1~exp1) experimental; urgency=low
+
+  * Initial release.
+
+ -- Arthur Loiret <aloiret@debian.org>  Sat, 07 Nov 2009 13:07:31 +0000
--- libtango-0.99.9.dfsg.orig/debian/patches/03_ldc.diff
+++ libtango-0.99.9.dfsg/debian/patches/03_ldc.diff
@@ -0,0 +1,53 @@
+--- a/tango/core/rt/compiler/ldc/rt/lifetime.d
++++ b/tango/core/rt/compiler/ldc/rt/lifetime.d
+@@ -786,6 +786,7 @@
+     return *cast(long*)px;
+ }
+ 
+++/
+ 
+ /**
+  *
+@@ -849,10 +850,11 @@
+ 
+ 
+ /**
+- *
++ * Appends a single element to an array.
+  */
+-extern (C) byte[] _d_arrayappendcT(TypeInfo ti, ref byte[] x, ...)
++extern (C) byte[] _d_arrayappendcT(TypeInfo ti, void* array, void* element)
+ {
++    auto x = cast(byte[]*)array;
+     auto sizeelem = ti.next.tsize();            // array element size
+     auto info = gc_query(x.ptr);
+     auto length = x.length;
+@@ -879,16 +881,16 @@
+         assert(newcap >= newlength * sizeelem);
+         newdata = cast(byte *)gc_malloc(newcap + 1, info.attr);
+         memcpy(newdata, x.ptr, length * sizeelem);
+-        (cast(void**)(&x))[1] = newdata;
++        (cast(void**)x)[1] = newdata;
+     }
+   L1:
+-    byte *argp = cast(byte *)(&ti + 2);
++    byte *argp = cast(byte *)element;
+ 
+-    *cast(size_t *)&x = newlength;
++    *cast(size_t *)x = newlength;
+     x.ptr[length * sizeelem .. newsize] = argp[0 .. sizeelem];
+     assert((cast(size_t)x.ptr & 15) == 0);
+     assert(gc_sizeOf(x.ptr) > x.length * sizeelem);
+-    return x;
++    return *x;
+ }
+ 
+ 
+@@ -1128,6 +1130,7 @@
+     return result;
+ }
+ 
++/+
+ 
+ /**
+  *
--- libtango-0.99.9.dfsg.orig/debian/patches/04_device.diff
+++ libtango-0.99.9.dfsg/debian/patches/04_device.diff
@@ -0,0 +1,11 @@
+--- a/tango/io/device/Device.d
++++ b/tango/io/device/Device.d
+@@ -274,7 +274,7 @@
+                 {
+                         if (handle >= 0)
+                            {
+-                           if (scheduler)
++                           //if (scheduler)
+                                // TODO Not supported on Posix
+                                // scheduler.close (handle, toString);
+                            posix.close (handle);
--- libtango-0.99.9.dfsg.orig/debian/patches/02_compress.diff
+++ libtango-0.99.9.dfsg/debian/patches/02_compress.diff
@@ -0,0 +1,6727 @@
+--- a/tango/io/compress/BzipStream.d
++++ /dev/null
+@@ -1,616 +0,0 @@
+-/*******************************************************************************
+-
+-    copyright:  Copyright (C) 2007 Daniel Keep.  All rights reserved.
+-
+-    license:    BSD style: $(LICENSE)
+-
+-    version:    Initial release: July 2007
+-
+-    author:     Daniel Keep
+-
+-*******************************************************************************/
+-
+-module tango.io.compress.BzipStream;
+-
+-private import tango.io.compress.c.bzlib;
+-
+-private import tango.core.Exception : IOException;
+-
+-private import tango.io.device.Conduit : InputFilter, OutputFilter;
+-
+-private import tango.io.model.IConduit : InputStream, OutputStream, IConduit;
+-
+-private
+-{
+-    /* This constant controls the size of the input/output buffers we use
+-     * internally.  There's no particular reason to pick this size.  It might
+-     * be an idea to run some benchmarks to work out what a good number is.
+-     */
+-    private enum { BUFFER_SIZE = 4*1024 };
+-
+-    private enum { DEFAULT_BLOCKSIZE = 9 };
+-    private enum { DEFAULT_WORKFACTOR = 0 };
+-}
+-
+-/*******************************************************************************
+-  
+-    This output filter can be used to perform compression of data into a bzip2
+-    stream.
+-
+-*******************************************************************************/
+-
+-class BzipOutput : OutputFilter
+-{
+-    /***************************************************************************
+-
+-        This enumeration represents several pre-defined compression block
+-        sizes, measured in hundreds of kilobytes.  See the documentation for
+-        the BzipOutput class' constructor for more details.
+-
+-    ***************************************************************************/
+-
+-    enum BlockSize : int
+-    {
+-        Normal = 9,
+-        Fast = 1,
+-        Best = 9,
+-    }
+-
+-    private
+-    {
+-        bool bzs_valid = false;
+-        bz_stream bzs;
+-        ubyte[] out_chunk;
+-        size_t _written = 0;
+-    }
+-
+-    /***************************************************************************
+-
+-        Constructs a new bzip2 compression filter.  You need to pass in the
+-        stream that the compression filter will write to.  If you are using
+-        this filter with a conduit, the idiom to use is:
+-
+-        ---
+-        auto output = new BzipOutput(myConduit.output);
+-        output.write(myContent);
+-        ---
+-
+-        blockSize relates to the size of the window bzip2 uses when
+-        compressing data and determines how much memory is required to
+-        decompress a stream.  It is measured in hundreds of kilobytes.
+-        
+-        ccording to the bzip2 documentation, there is no dramatic difference
+-        between the various block sizes, so the default should suffice in most
+-        cases.
+-
+-        BlockSize.Normal (the default) is the same as BlockSize.Best
+-        (or 9).  The blockSize may be any integer between 1 and 9 inclusive.
+-
+-    ***************************************************************************/
+-
+-    this(OutputStream stream, int blockSize = BlockSize.Normal)
+-    {
+-        init(stream, blockSize);
+-        scope(failure) kill_bzs();
+-
+-        super(stream);
+-        out_chunk = new ubyte[BUFFER_SIZE];
+-    }
+-
+-    /*
+-     * This method performs initialisation for the stream.  Note that this may
+-     * be called more than once for an instance, provided the instance is
+-     * either new or as part of a call to reset.
+-     */
+-    private void init(OutputStream stream, int blockSize)
+-    {
+-        if( blockSize < 1 || blockSize > 9 )
+-            throw new BzipException("bzip2 block size must be between"
+-                    " 1 and 9");
+-
+-        auto ret = BZ2_bzCompressInit(&bzs, blockSize, 0, DEFAULT_WORKFACTOR);
+-        if( ret != BZ_OK )
+-            throw new BzipException(ret);
+-
+-        bzs_valid = true;
+-    }
+-
+-    ~this()
+-    {
+-        if( bzs_valid )
+-            kill_bzs();
+-    }
+-
+-    /***************************************************************************
+-        
+-        Resets and re-initialises this instance.
+-
+-        If you are creating compression streams inside a loop, you may wish to
+-        use this method to re-use a single instance.  This prevents the
+-        potentially costly re-allocation of internal buffers.
+-
+-        The stream must have already been closed before calling reset.
+-
+-    ***************************************************************************/ 
+-
+-    void reset(OutputStream stream, int blockSize = BlockSize.Normal)
+-    {
+-        // If the stream is still valid, bail.
+-        if( bzs_valid )
+-            throw new BzipStillOpenException;
+-
+-        init(stream, blockSize);
+-    }
+-
+-    /***************************************************************************
+-
+-        Compresses the given data to the underlying conduit.
+-
+-        Returns the number of bytes from src that were compressed, which may
+-        be less than given.
+-
+-    ***************************************************************************/
+-
+-    size_t write(void[] src)
+-    {
+-        check_valid();
+-        scope(failure) kill_bzs();
+-
+-        bzs.avail_in = src.length;
+-        bzs.next_in = cast(ubyte*)src.ptr;
+-
+-        do
+-        {
+-            bzs.avail_out = out_chunk.length;
+-            bzs.next_out = out_chunk.ptr;
+-
+-            auto ret = BZ2_bzCompress(&bzs, BZ_RUN);
+-            if( ret != BZ_RUN_OK )
+-                throw new BzipException(ret);
+-
+-            // Push the compressed bytes out to the stream, until it's either
+-            // written them all, or choked.
+-            auto have = out_chunk.length-bzs.avail_out;
+-            auto out_buffer = out_chunk[0..have];
+-            do
+-            {
+-                auto w = sink.write(out_buffer);
+-                if( w == IConduit.Eof )
+-                    return w;
+-
+-                out_buffer = out_buffer[w..$];
+-                _written += w;
+-            }
+-            while( out_buffer.length > 0 );
+-        }
+-        // Loop while we are still using up the whole output buffer
+-        while( bzs.avail_out == 0 );
+-
+-        assert( bzs.avail_in == 0, "failed to compress all provided data" );
+-
+-        return src.length;
+-    }
+-
+-    /***************************************************************************
+-
+-        This read-only property returns the number of compressed bytes that
+-        have been written to the underlying stream.  Following a call to
+-        either close or commit, this will contain the total compressed size of
+-        the input data stream.
+-
+-    ***************************************************************************/
+-
+-    size_t written()
+-    {
+-        return _written;
+-    }
+-
+-    /***************************************************************************
+-
+-        Close the compression stream.  This will cause any buffered content to
+-        be committed to the underlying stream.
+-
+-    ***************************************************************************/
+-
+-    void close()
+-    {
+-        if( bzs_valid ) commit;
+-        super.close;
+-    }
+-
+-    /***************************************************************************
+-
+-        Purge any buffered content.  Calling this will implicitly end the
+-        bzip2 stream, so it should not be called until you are finished
+-        compressing data.  Any calls to either write or commit after a
+-        compression filter has been committed will throw an exception.
+-
+-        The only difference between calling this method and calling close is
+-        that the underlying stream will not be closed.
+-
+-    ***************************************************************************/
+-
+-    void commit()
+-    {
+-        check_valid();
+-        scope(failure) kill_bzs();
+-
+-        bzs.avail_in = 0;
+-        bzs.next_in = null;
+-
+-        bool finished = false;
+-
+-        do
+-        {
+-            bzs.avail_out = out_chunk.length;
+-            bzs.next_out = out_chunk.ptr;
+-
+-            auto ret = BZ2_bzCompress(&bzs, BZ_FINISH);
+-            switch( ret )
+-            {
+-                case BZ_FINISH_OK:
+-                    break;
+-
+-                case BZ_STREAM_END:
+-                    finished = true;
+-                    break;
+-
+-                default:
+-                    throw new BzipException(ret);
+-            }
+-
+-            auto have = out_chunk.length - bzs.avail_out;
+-            auto out_buffer = out_chunk[0..have];
+-            if( have > 0 )
+-            {
+-                do
+-                {
+-                    auto w = sink.write(out_buffer);
+-                    if( w == IConduit.Eof )
+-                        return;
+-
+-                    out_buffer = out_buffer[w..$];
+-                    _written += w;
+-                }
+-                while( out_buffer.length > 0 );
+-            }
+-        }
+-        while( !finished );
+-
+-        kill_bzs();
+-    }
+-
+-    // Disable seeking
+-    override long seek(long offset, Anchor anchor = Anchor.Begin)
+-    {
+-        throw new IOException("BzipOutput does not support seek requests");
+-    }
+-
+-    // This function kills the stream: it deallocates the internal state, and
+-    // unsets the bzs_valid flag.
+-    private void kill_bzs()
+-    {
+-        check_valid();
+-
+-        BZ2_bzCompressEnd(&bzs);
+-        bzs_valid = false;
+-    }
+-
+-    // Asserts that the stream is still valid and usable (except that this
+-    // check doesn't get elided with -release).
+-    private void check_valid()
+-    {
+-        if( !bzs_valid )
+-            throw new BzipClosedException;
+-    }
+-}
+-
+-/*******************************************************************************
+-  
+-    This input filter can be used to perform decompression of bzip2 streams.
+-
+-*******************************************************************************/
+-
+-class BzipInput : InputFilter
+-{
+-    private
+-    {
+-        bool bzs_valid = false;
+-        bz_stream bzs;
+-        ubyte[] in_chunk;
+-    }
+-
+-    /***************************************************************************
+-
+-        Constructs a new bzip2 decompression filter.  You need to pass in the
+-        stream that the decompression filter will read from.  If you are using
+-        this filter with a conduit, the idiom to use is:
+-
+-        ---
+-        auto input = new BzipInput(myConduit.input);
+-        input.read(myContent);
+-        ---
+-
+-        The small argument, if set to true, instructs bzip2 to perform
+-        decompression using half the regular amount of memory, at the cost of
+-        running at half speed.
+-
+-    ***************************************************************************/
+-
+-    this(InputStream stream, bool small=false)
+-    {
+-        init(stream, small);
+-        scope(failure) kill_bzs();
+-
+-        super(stream);
+-        in_chunk = new ubyte[BUFFER_SIZE];
+-    }
+-
+-    /*
+-     * This method performs initialisation for the stream.  Note that this may
+-     * be called more than once for an instance, provided the instance is
+-     * either new or as part of a call to reset.
+-     */
+-    private void init(InputStream stream, bool small)
+-    {
+-        auto ret = BZ2_bzDecompressInit(&bzs, 0, small?1:0);
+-        if( ret != BZ_OK )
+-            throw new BzipException(ret);
+-
+-        bzs_valid = true;
+-    }
+-
+-    ~this()
+-    {
+-        if( bzs_valid )
+-            kill_bzs();
+-    }
+-
+-    /***************************************************************************
+-        
+-        Resets and re-initialises this instance.
+-
+-        If you are creating compression streams inside a loop, you may wish to
+-        use this method to re-use a single instance.  This prevents the
+-        potentially costly re-allocation of internal buffers.
+-
+-        The stream must have already been closed before calling reset.
+-
+-    ***************************************************************************/ 
+-
+-    void reset(InputStream stream, bool small=false)
+-    {
+-        // If the stream is still valid, bail.
+-        if( bzs_valid )
+-            throw new BzipStillOpenException;
+-
+-        init(stream, small);
+-    }
+-
+-    /***************************************************************************
+-
+-        Decompresses data from the underlying conduit into a target array.
+-
+-        Returns the number of bytes stored into dst, which may be less than
+-        requested.
+-
+-    ***************************************************************************/ 
+-
+-    size_t read(void[] dst)
+-    {
+-        if( !bzs_valid )
+-            return IConduit.Eof;
+-
+-        scope(failure) kill_bzs();
+-
+-        bool finished = false;
+-
+-        bzs.avail_out = dst.length;
+-        bzs.next_out = cast(ubyte*)dst.ptr;
+-
+-        do
+-        {
+-            if( bzs.avail_in == 0 )
+-            {
+-                auto len = source.read(in_chunk);
+-                if( len == IConduit.Eof )
+-                    return IConduit.Eof;
+-
+-                bzs.avail_in = len;
+-                bzs.next_in = in_chunk.ptr;
+-            }
+-
+-            auto ret = BZ2_bzDecompress(&bzs);
+-            if( ret == BZ_STREAM_END )
+-            {
+-                kill_bzs();
+-                finished = true;
+-            }
+-            else if( ret != BZ_OK )
+-                throw new BzipException(ret);
+-        }
+-        while( !finished && bzs.avail_out > 0 );
+-
+-        return dst.length - bzs.avail_out;
+-    }
+-
+-    /***************************************************************************
+-
+-        Closes the compression stream.
+-
+-    ***************************************************************************/ 
+-
+-    override void close()
+-    {
+-        check_valid();
+-
+-        // Kill the stream.  Don't deallocate the buffer since the user may
+-        // yet reset the stream.
+-        kill_bzs();
+-        super.close();
+-    }
+-
+-    // Disable seeking
+-    override long seek(long offset, Anchor anchor = Anchor.Begin)
+-    {
+-        throw new IOException("BzipOutput does not support seek requests");
+-    }
+-
+-    // This function kills the stream: it deallocates the internal state, and
+-    // unsets the bzs_valid flag.
+-    private void kill_bzs()
+-    {
+-        check_valid();
+-
+-        BZ2_bzDecompressEnd(&bzs);
+-        bzs_valid = false;
+-    }
+-
+-    // Asserts that the stream is still valid and usable (except that this
+-    // check doesn't get elided with -release).
+-    private void check_valid()
+-    {
+-        if( !bzs_valid )
+-            throw new BzipClosedException;
+-    }
+-}
+-
+-/*******************************************************************************
+-  
+-    This exception is thrown when an error occurs in the underlying bzip2
+-    library.
+-
+-*******************************************************************************/
+-
+-class BzipException : IOException
+-{
+-    this(in int code)
+-    {
+-        super(codeName(code));
+-    }
+-
+-    this(char[] msg)
+-    {
+-        super(msg);
+-    }
+-
+-    private char[] codeName(in int code)
+-    {
+-        char[] name;
+-
+-        switch( code )
+-        {
+-            case BZ_OK:                 name = "BZ_OK";                 break;
+-            case BZ_RUN_OK:             name = "BZ_RUN_OK";             break;
+-            case BZ_FLUSH_OK:           name = "BZ_FLUSH_OK";           break;
+-            case BZ_STREAM_END:         name = "BZ_STREAM_END";         break;
+-            case BZ_SEQUENCE_ERROR:     name = "BZ_SEQUENCE_ERROR";     break;
+-            case BZ_PARAM_ERROR:        name = "BZ_PARAM_ERROR";        break;
+-            case BZ_MEM_ERROR:          name = "BZ_MEM_ERROR";          break;
+-            case BZ_DATA_ERROR:         name = "BZ_DATA_ERROR";         break;
+-            case BZ_DATA_ERROR_MAGIC:   name = "BZ_DATA_ERROR_MAGIC";   break;
+-            case BZ_IO_ERROR:           name = "BZ_IO_ERROR";           break;
+-            case BZ_UNEXPECTED_EOF:     name = "BZ_UNEXPECTED_EOF";     break;
+-            case BZ_OUTBUFF_FULL:       name = "BZ_OUTBUFF_FULL";       break;
+-            case BZ_CONFIG_ERROR:       name = "BZ_CONFIG_ERROR";       break;
+-            default:                    name = "BZ_UNKNOWN";
+-        }
+-
+-        return name;
+-    }
+-}
+-
+-/*******************************************************************************
+-  
+-    This exception is thrown if you attempt to perform a read, write or flush
+-    operation on a closed bzip2 filter stream.  This can occur if the input
+-    stream has finished, or an output stream was flushed.
+-
+-*******************************************************************************/
+-
+-class BzipClosedException : IOException
+-{
+-    this()
+-    {
+-        super("cannot operate on closed bzip2 stream");
+-    }
+-}
+-
+-/*******************************************************************************
+-  
+-    This exception is thrown if you attempt to reset a compression stream that
+-    is still open.  You must either close or commit a stream before it can be
+-    reset.
+-
+-*******************************************************************************/
+-
+-class BzipStillOpenException : IOException
+-{
+-    this()
+-    {
+-        super("cannot reset an open bzip2 stream");
+-    }
+-}
+-
+-/* *****************************************************************************
+-
+-    This section contains a simple unit test for this module.  It is hidden
+-    behind a version statement because it introduces additional dependencies.
+-
+-***************************************************************************** */
+-
+-debug(UnitTest):
+-
+-import tango.io.device.Array : Array;
+-
+-unittest
+-{
+-    const char[] message =
+-        "All dwarfs are by nature dutiful, serious, literate, obedient "
+-        "and thoughtful people whose only minor failing is a tendency, "
+-        "after one drink, to rush at enemies screaming \"Arrrrrrgh!\" and "
+-        "axing their legs off at the knee.";
+-
+-    const ubyte[] message_z = [
+-        0x42, 0x5a, 0x68, 0x39, 0x31, 0x41, 0x59, 0x26,
+-        0x53, 0x59, 0x40, 0x98, 0xbe, 0xaa, 0x00, 0x00,
+-        0x16, 0xd5, 0x80, 0x10, 0x00, 0x70, 0x05, 0x20,
+-        0x00, 0x3f, 0xef, 0xde, 0xe0, 0x30, 0x00, 0xac,
+-        0xd8, 0x8a, 0x3d, 0x34, 0x6a, 0x6d, 0x4c, 0x4f,
+-        0x24, 0x31, 0x0d, 0x08, 0x98, 0x9b, 0x48, 0x9a,
+-        0x7a, 0x80, 0x00, 0x06, 0xa6, 0xd2, 0xa7, 0xe9,
+-        0xaa, 0x37, 0xa8, 0xd4, 0xf5, 0x3f, 0x54, 0x63,
+-        0x51, 0xe9, 0x2d, 0x4b, 0x99, 0xe1, 0xcc, 0xca,
+-        0xda, 0x75, 0x04, 0x42, 0x14, 0xc8, 0x6a, 0x8e,
+-        0x23, 0xc1, 0x3e, 0xb1, 0x8a, 0x16, 0xd2, 0x55,
+-        0x9a, 0x3e, 0x56, 0x1a, 0xb1, 0x83, 0x11, 0xa6,
+-        0x50, 0x4f, 0xd3, 0xed, 0x21, 0x40, 0xaa, 0xd1,
+-        0x95, 0x2c, 0xda, 0xcb, 0xb7, 0x0e, 0xce, 0x65,
+-        0xfc, 0x63, 0xf2, 0x88, 0x5b, 0x36, 0xda, 0xf0,
+-        0xf5, 0xd2, 0x9c, 0xe6, 0xf1, 0x87, 0x12, 0x87,
+-        0xce, 0x56, 0x0c, 0xf5, 0x65, 0x4d, 0x2e, 0xd6,
+-        0x27, 0x61, 0x2b, 0x74, 0xcd, 0x5e, 0x3b, 0x02,
+-        0x42, 0x4e, 0x0b, 0x80, 0xa8, 0x70, 0x04, 0x48,
+-        0xfb, 0x93, 0x4c, 0x41, 0xa8, 0x2a, 0xdf, 0xf2,
+-        0x67, 0x37, 0x28, 0xad, 0x38, 0xd4, 0x5c, 0xd6,
+-        0x34, 0x8b, 0x49, 0x5e, 0x90, 0xb2, 0x06, 0xce,
+-        0x0a, 0x83, 0x29, 0x84, 0x20, 0xd7, 0x5f, 0xc5,
+-        0xdc, 0x91, 0x4e, 0x14, 0x24, 0x10, 0x26, 0x2f,
+-        0xaa, 0x80];
+-
+-    scope cond = new Array(1024, 1024);
+-    scope comp = new BzipOutput(cond);
+-    comp.write(message);
+-    comp.close;
+-
+-    assert( comp.written == message_z.length );
+-
+-    assert( message_z == cast(ubyte[])(cond.slice) );
+-
+-    scope decomp = new BzipInput(cond);
+-    auto buffer = new ubyte[256];
+-    buffer = buffer[0 .. decomp.read(buffer)];
+-
+-    assert( cast(ubyte[])message == buffer );
+-}
+-
+--- a/tango/io/compress/ZlibStream.d
++++ /dev/null
+@@ -1,971 +0,0 @@
+-/*******************************************************************************
+-
+-    copyright:  Copyright (C) 2007 Daniel Keep.  All rights reserved.
+-
+-    license:    BSD style: $(LICENSE)
+-
+-    author:     Daniel Keep
+-
+-    version:    Feb 08: Added support for different stream encodings, removed
+-                        old "window bits" ctors.
+-                        
+-                Dec 07: Added support for "window bits", needed for Zip support.
+-                
+-                Jul 07: Initial release.
+-
+-*******************************************************************************/
+-
+-module tango.io.compress.ZlibStream;
+-
+-private import tango.io.compress.c.zlib;
+-
+-private import tango.stdc.stringz : fromStringz;
+-
+-private import tango.core.Exception : IOException;
+-
+-private import tango.io.device.Conduit : InputFilter, OutputFilter;
+-
+-private import tango.io.model.IConduit : InputStream, OutputStream, IConduit;
+-
+-private import tango.text.convert.Integer : toString;
+-
+-
+-/* This constant controls the size of the input/output buffers we use
+- * internally.  This should be a fairly sane value (it's suggested by the zlib
+- * documentation), that should only need changing for memory-constrained
+- * platforms/use cases.
+- *
+- * An alternative would be to make the chunk size a template parameter to the
+- * filters themselves, but Tango already has more than enough template
+- * parameters getting in the way :)
+- */
+-
+-private enum { CHUNKSIZE = 256 * 1024 };
+-
+-/* This constant specifies the default windowBits value.  This is taken from
+- * documentation in zlib.h.  It shouldn't break anything if zlib changes to
+- * a different default.
+- */
+-
+-private enum { WINDOWBITS_DEFAULT = 15 };
+-
+-/*******************************************************************************
+-  
+-    This input filter can be used to perform decompression of zlib streams.
+-
+-*******************************************************************************/
+-
+-class ZlibInput : InputFilter
+-{
+-    /***************************************************************************
+-    
+-        This enumeration allows you to specify the encoding of the compressed
+-        stream.
+-    
+-    ***************************************************************************/
+-
+-    enum Encoding : int
+-    {
+-        /**
+-         *  The code should attempt to automatically determine what the encoding
+-         *  of the stream should be.  Note that this cannot detect the case
+-         *  where the stream was compressed with no encoding.
+-         */
+-        Guess,
+-        /**
+-         *  Stream has zlib encoding.
+-         */
+-        Zlib,
+-        /**
+-         *  Stream has gzip encoding.
+-         */
+-        Gzip,
+-        /**
+-         *  Stream has no encoding.
+-         */
+-        None
+-    }
+-
+-    private
+-    {
+-        /* Used to make sure we don't try to perform operations on a dead
+-         * stream. */
+-        bool zs_valid = false;
+-
+-        z_stream zs;
+-        ubyte[] in_chunk;
+-    }
+-    
+-    /***************************************************************************
+-
+-        Constructs a new zlib decompression filter.  You need to pass in the
+-        stream that the decompression filter will read from.  If you are using
+-        this filter with a conduit, the idiom to use is:
+-
+-        ---
+-        auto input = new ZlibInput(myConduit.input));
+-        input.read(myContent);
+-        ---
+-
+-        The optional windowBits parameter is the base two logarithm of the
+-        window size, and should be in the range 8-15, defaulting to 15 if not
+-        specified.  Additionally, the windowBits parameter may be negative to
+-        indicate that zlib should omit the standard zlib header and trailer,
+-        with the window size being -windowBits.
+-        
+-      Params:
+-        stream = compressed input stream.
+-        
+-        encoding =
+-            stream encoding.  Defaults to Encoding.Guess, which
+-            should be sufficient unless the stream was compressed with
+-            no encoding; in this case, you must manually specify
+-            Encoding.None.
+-            
+-        windowBits =
+-            the base two logarithm of the window size, and should be in the
+-            range 8-15, defaulting to 15 if not specified.
+-
+-    ***************************************************************************/
+-
+-    this(InputStream stream, Encoding encoding,
+-            int windowBits = WINDOWBITS_DEFAULT)
+-    {
+-        init(stream, encoding, windowBits);
+-        scope(failure) kill_zs();
+-
+-        super(stream);
+-        in_chunk = new ubyte[CHUNKSIZE];
+-    }
+-    
+-    /// ditto
+-    this(InputStream stream)
+-    {
+-        // DRK 2009-02-26
+-        // Removed unique implementation in favour of passing on to another
+-        // constructor.  The specific implementation was because the default
+-        // value of windowBits is documented in zlib.h, but not actually
+-        // exposed.  Using inflateInit over inflateInit2 ensured we would
+-        // never get it wrong.  That said, the default value of 15 is REALLY
+-        // unlikely to change: values below that aren't terribly useful, and
+-        // values higher than 15 are already used for other purposes.
+-        // Also, this leads to less code which is always good.  :D
+-        this(stream, Encoding.init);
+-    }
+-
+-    /*
+-     * This method performs initialisation for the stream.  Note that this may
+-     * be called more than once for an instance, provided the instance is
+-     * either new or as part of a call to reset.
+-     */
+-    private void init(InputStream stream, Encoding encoding, int windowBits)
+-    {
+-        /*
+-         * Here's how windowBits works, according to zlib.h:
+-         * 
+-         * 8 .. 15
+-         *      zlib encoding.
+-         *      
+-         * (8 .. 15) + 16
+-         *      gzip encoding.
+-         *      
+-         * (8 .. 15) + 32
+-         *      auto-detect encoding.
+-         *      
+-         * (8 .. 15) * -1
+-         *      raw/no encoding.
+-         *      
+-         * Since we're going to be playing with the value, we DO care whether
+-         * windowBits is in the expected range, so we'll check it.
+-         */
+-        if( !( 8 <= windowBits && windowBits <= 15 ) )
+-        {
+-            // No compression for you!
+-            throw new ZlibException("invalid windowBits argument"
+-                ~ .toString(windowBits));
+-        }
+-        
+-        switch( encoding )
+-        {
+-        case Encoding.Zlib:
+-            // no-op
+-            break;
+-            
+-        case Encoding.Gzip:
+-            windowBits += 16;
+-            break;
+-
+-        case Encoding.Guess:
+-            windowBits += 32;
+-            break;
+-            
+-        case Encoding.None:
+-            windowBits *= -1;
+-            break;
+-
+-        default:
+-            assert (false);
+-        }
+-        
+-        // Allocate inflate state
+-        with( zs )
+-        {
+-            zalloc = null;
+-            zfree = null;
+-            opaque = null;
+-            avail_in = 0;
+-            next_in = null;
+-        }
+-
+-        auto ret = inflateInit2(&zs, windowBits);
+-        if( ret != Z_OK )
+-            throw new ZlibException(ret);
+-
+-        zs_valid = true;
+-
+-        // Note that this is redundant when init is called from the ctor, but
+-        // it is NOT REDUNDANT when called from reset.  source is declared in
+-        // InputFilter.
+-        //
+-        // This code is a wee bit brittle, since if the ctor of InputFilter
+-        // changes, this code might break in subtle, hard to find ways.
+-        //
+-        // See ticket #1837
+-        this.source = stream;
+-    }
+-    
+-    ~this()
+-    {
+-        if( zs_valid )
+-            kill_zs();
+-    }
+-
+-    /***************************************************************************
+-        
+-        Resets and re-initialises this instance.
+-
+-        If you are creating compression streams inside a loop, you may wish to
+-        use this method to re-use a single instance.  This prevents the
+-        potentially costly re-allocation of internal buffers.
+-
+-        The stream must have already been closed before calling reset.
+-
+-    ***************************************************************************/ 
+-
+-    void reset(InputStream stream, Encoding encoding,
+-            int windowBits = WINDOWBITS_DEFAULT)
+-    {
+-        // If the stream is still valid, bail.
+-        if( zs_valid )
+-            throw new ZlibStillOpenException;
+-        
+-        init(stream, encoding, windowBits);
+-    }
+-
+-    /// ditto
+-
+-    void reset(InputStream stream)
+-    {
+-        reset(stream, Encoding.init);
+-    }
+-
+-    /***************************************************************************
+-
+-        Decompresses data from the underlying conduit into a target array.
+-
+-        Returns the number of bytes stored into dst, which may be less than
+-        requested.
+-
+-    ***************************************************************************/ 
+-
+-    override size_t read(void[] dst)
+-    {
+-        if( !zs_valid )
+-            return IConduit.Eof;
+-
+-        // Check to see if we've run out of input data.  If we have, get some
+-        // more.
+-        if( zs.avail_in == 0 )
+-        {
+-            auto len = source.read(in_chunk);
+-            if( len == IConduit.Eof )
+-                return IConduit.Eof;
+-
+-            zs.avail_in = len;
+-            zs.next_in = in_chunk.ptr;
+-        }
+-
+-        // We'll tell zlib to inflate straight into the target array.
+-        zs.avail_out = dst.length;
+-        zs.next_out = cast(ubyte*)dst.ptr;
+-        auto ret = inflate(&zs, Z_NO_FLUSH);
+-
+-        switch( ret )
+-        {
+-            case Z_NEED_DICT:
+-                // Whilst not technically an error, this should never happen
+-                // for general-use code, so treat it as an error.
+-            case Z_DATA_ERROR:
+-            case Z_MEM_ERROR:
+-                kill_zs();
+-                throw new ZlibException(ret);
+-
+-            case Z_STREAM_END:
+-                // zlib stream is finished; kill the stream so we don't try to
+-                // read from it again.
+-                kill_zs();
+-                break;
+-
+-            default:
+-        }
+-
+-        return dst.length - zs.avail_out;
+-    }
+-
+-    /***************************************************************************
+-
+-        Closes the compression stream.
+-
+-    ***************************************************************************/ 
+-
+-    override void close()
+-    {
+-        // Kill the stream.  Don't deallocate the buffer since the user may
+-        // yet reset the stream.
+-        if( zs_valid )
+-            kill_zs();
+-
+-        super.close();
+-    }
+-
+-    // Disable seeking
+-    override long seek(long offset, Anchor anchor = Anchor.Begin)
+-    {
+-        throw new IOException("ZlibInput does not support seek requests");
+-    }
+-
+-    // This function kills the stream: it deallocates the internal state, and
+-    // unsets the zs_valid flag.
+-    private void kill_zs()
+-    {
+-        check_valid();
+-
+-        inflateEnd(&zs);
+-        zs_valid = false;
+-    }
+-
+-    // Asserts that the stream is still valid and usable (except that this
+-    // check doesn't get elided with -release).
+-    private void check_valid()
+-    {
+-        if( !zs_valid )
+-            throw new ZlibClosedException;
+-    }
+-}
+-
+-/*******************************************************************************
+-  
+-    This output filter can be used to perform compression of data into a zlib
+-    stream.
+-
+-*******************************************************************************/
+-
+-class ZlibOutput : OutputFilter
+-{
+-    /***************************************************************************
+-
+-        This enumeration represents several pre-defined compression levels.
+-
+-        Any integer between -1 and 9 inclusive may be used as a level,
+-        although the symbols in this enumeration should suffice for most
+-        use-cases.
+-
+-    ***************************************************************************/
+-
+-    enum Level : int
+-    {
+-        /**
+-         * Default compression level.  This is selected for a good compromise
+-         * between speed and compression, and the exact compression level is
+-         * determined by the underlying zlib library.  Should be roughly
+-         * equivalent to compression level 6.
+-         */
+-        Normal = -1,
+-        /**
+-         * Do not perform compression.  This will cause the stream to expand
+-         * slightly to accommodate stream metadata.
+-         */
+-        None = 0,
+-        /**
+-         * Minimal compression; the fastest level which performs at least
+-         * some compression.
+-         */
+-        Fast = 1,
+-        /**
+-         * Maximal compression.
+-         */
+-        Best = 9
+-    }
+-
+-    /***************************************************************************
+-    
+-        This enumeration allows you to specify what the encoding of the
+-        compressed stream should be.
+-    
+-    ***************************************************************************/
+-
+-    enum Encoding : int
+-    {
+-        /**
+-         *  Stream should use zlib encoding.
+-         */
+-        Zlib,
+-        /**
+-         *  Stream should use gzip encoding.
+-         */
+-        Gzip,
+-        /**
+-         *  Stream should use no encoding.
+-         */
+-        None
+-    }
+-
+-    private
+-    {
+-        bool zs_valid = false;
+-        z_stream zs;
+-        ubyte[] out_chunk;
+-        size_t _written = 0;
+-    }
+-
+-    /***************************************************************************
+-
+-        Constructs a new zlib compression filter.  You need to pass in the
+-        stream that the compression filter will write to.  If you are using
+-        this filter with a conduit, the idiom to use is:
+-
+-        ---
+-        auto output = new ZlibOutput(myConduit.output);
+-        output.write(myContent);
+-        ---
+-
+-        The optional windowBits parameter is the base two logarithm of the
+-        window size, and should be in the range 8-15, defaulting to 15 if not
+-        specified.  Additionally, the windowBits parameter may be negative to
+-        indicate that zlib should omit the standard zlib header and trailer,
+-        with the window size being -windowBits.
+-
+-    ***************************************************************************/
+-
+-    this(OutputStream stream, Level level, Encoding encoding,
+-            int windowBits = WINDOWBITS_DEFAULT)
+-    {
+-        init(stream, level, encoding, windowBits);
+-        scope(failure) kill_zs();
+-
+-        super(stream);
+-        out_chunk = new ubyte[CHUNKSIZE];
+-    }
+-    
+-    /// ditto
+-    this(OutputStream stream, Level level = Level.Normal)
+-    {
+-        // DRK 2009-02-26
+-        // Removed unique implementation in favour of passing on to another
+-        // constructor.  See ZlibInput.this(InputStream).
+-        this(stream, level, Encoding.init);
+-    }
+-
+-    /*
+-     * This method performs initialisation for the stream.  Note that this may
+-     * be called more than once for an instance, provided the instance is
+-     * either new or as part of a call to reset.
+-     */
+-    private void init(OutputStream stream, Level level, Encoding encoding,
+-            int windowBits)
+-    {
+-        /*
+-         * Here's how windowBits works, according to zlib.h:
+-         * 
+-         * 8 .. 15
+-         *      zlib encoding.
+-         *      
+-         * (8 .. 15) + 16
+-         *      gzip encoding.
+-         *      
+-         * (8 .. 15) + 32
+-         *      auto-detect encoding.
+-         *      
+-         * (8 .. 15) * -1
+-         *      raw/no encoding.
+-         *      
+-         * Since we're going to be playing with the value, we DO care whether
+-         * windowBits is in the expected range, so we'll check it.
+-         * 
+-         * Also, note that OUR Encoding enum doesn't contain the 'Guess'
+-         * member.  I'm still waiting on tango.io.psychic...
+-         */
+-        if( !( 8 <= windowBits && windowBits <= 15 ) )
+-        {
+-            // No compression for you!
+-            throw new ZlibException("invalid windowBits argument"
+-                ~ .toString(windowBits));
+-        }
+-        
+-        switch( encoding )
+-        {
+-        case Encoding.Zlib:
+-            // no-op
+-            break;
+-            
+-        case Encoding.Gzip:
+-            windowBits += 16;
+-            break;
+-            
+-        case Encoding.None:
+-            windowBits *= -1;
+-            break;
+-
+-        default:
+-            assert (false);
+-        }
+-        
+-        // Allocate deflate state
+-        with( zs )
+-        {
+-            zalloc = null;
+-            zfree = null;
+-            opaque = null;
+-        }
+-
+-        auto ret = deflateInit2(&zs, level, Z_DEFLATED, windowBits, 8,
+-                Z_DEFAULT_STRATEGY);
+-        if( ret != Z_OK )
+-            throw new ZlibException(ret);
+-
+-        zs_valid = true;
+-
+-        // This is NOT REDUNDANT.  See ZlibInput.init.
+-        this.sink = stream;
+-    }
+-
+-    ~this()
+-    {
+-        if( zs_valid )
+-            kill_zs();
+-    }
+-
+-    /***************************************************************************
+-        
+-        Resets and re-initialises this instance.
+-
+-        If you are creating compression streams inside a loop, you may wish to
+-        use this method to re-use a single instance.  This prevents the
+-        potentially costly re-allocation of internal buffers.
+-
+-        The stream must have already been closed or committed before calling
+-        reset.
+-
+-    ***************************************************************************/ 
+-
+-    void reset(OutputStream stream, Level level, Encoding encoding,
+-            int windowBits = WINDOWBITS_DEFAULT)
+-    {
+-        // If the stream is still valid, bail.
+-        if( zs_valid )
+-            throw new ZlibStillOpenException;
+-
+-        init(stream, level, encoding, windowBits);
+-    }
+-
+-    /// ditto
+-    void reset(OutputStream stream, Level level = Level.Normal)
+-    {
+-        reset(stream, level, Encoding.init);
+-    }
+-
+-    /***************************************************************************
+-
+-        Compresses the given data to the underlying conduit.
+-
+-        Returns the number of bytes from src that were compressed; write
+-        should always consume all data provided to it, although it may not be
+-        immediately written to the underlying output stream.
+-
+-    ***************************************************************************/
+-
+-    override size_t write(void[] src)
+-    {
+-        check_valid();
+-        scope(failure) kill_zs();
+-
+-        zs.avail_in = src.length;
+-        zs.next_in = cast(ubyte*)src.ptr;
+-
+-        do
+-        {
+-            zs.avail_out = out_chunk.length;
+-            zs.next_out = out_chunk.ptr;
+-
+-            auto ret = deflate(&zs, Z_NO_FLUSH);
+-            if( ret == Z_STREAM_ERROR )
+-                throw new ZlibException(ret);
+-
+-            // Push the compressed bytes out to the stream, until it's either
+-            // written them all, or choked.
+-            auto have = out_chunk.length-zs.avail_out;
+-            auto out_buffer = out_chunk[0..have];
+-            do
+-            {
+-                auto w = sink.write(out_buffer);
+-                if( w == IConduit.Eof )
+-                    return w;
+-
+-                out_buffer = out_buffer[w..$];
+-                _written += w;
+-            }
+-            while( out_buffer.length > 0 );
+-        }
+-        // Loop while we are still using up the whole output buffer
+-        while( zs.avail_out == 0 );
+-
+-        assert( zs.avail_in == 0, "failed to compress all provided data" );
+-
+-        return src.length;
+-    }
+-
+-    /***************************************************************************
+-
+-        This read-only property returns the number of compressed bytes that
+-        have been written to the underlying stream.  Following a call to
+-        either close or commit, this will contain the total compressed size of
+-        the input data stream.
+-
+-    ***************************************************************************/
+-
+-    size_t written()
+-    {
+-        return _written;
+-    }
+-
+-    /***************************************************************************
+-
+-        Close the compression stream.  This will cause any buffered content to
+-        be committed to the underlying stream.
+-
+-    ***************************************************************************/
+-
+-    override void close()
+-    {
+-        // Only commit if the stream is still open.
+-        if( zs_valid ) commit;
+-
+-        super.close;
+-    }
+-
+-    /***************************************************************************
+-
+-        Purge any buffered content.  Calling this will implicitly end the zlib
+-        stream, so it should not be called until you are finished compressing
+-        data.  Any calls to either write or commit after a compression filter
+-        has been committed will throw an exception.
+-
+-        The only difference between calling this method and calling close is
+-        that the underlying stream will not be closed.
+-
+-    ***************************************************************************/
+-
+-    void commit()
+-    {
+-        check_valid();
+-        scope(failure) kill_zs();
+-
+-        zs.avail_in = 0;
+-        zs.next_in = null;
+-
+-        bool finished = false;
+-
+-        do
+-        {
+-            zs.avail_out = out_chunk.length;
+-            zs.next_out = out_chunk.ptr;
+-
+-            auto ret = deflate(&zs, Z_FINISH);
+-            switch( ret )
+-            {
+-                case Z_OK:
+-                    // Keep going
+-                    break;
+-
+-                case Z_STREAM_END:
+-                    // We're done!
+-                    finished = true;
+-                    break;
+-
+-                default:
+-                    throw new ZlibException(ret);
+-            }
+-
+-            auto have = out_chunk.length - zs.avail_out;
+-            auto out_buffer = out_chunk[0..have];
+-            if( have > 0 )
+-            {
+-                do
+-                {
+-                    auto w = sink.write(out_buffer);
+-                    if( w == IConduit.Eof )
+-                        return;
+-
+-                    out_buffer = out_buffer[w..$];
+-                    _written += w;
+-                }
+-                while( out_buffer.length > 0 );
+-            }
+-        }
+-        while( !finished );
+-
+-        kill_zs();
+-    }
+-
+-    // Disable seeking
+-    override long seek(long offset, Anchor anchor = Anchor.Begin)
+-    {
+-        throw new IOException("ZlibOutput does not support seek requests");
+-    }
+-
+-    // This function kills the stream: it deallocates the internal state, and
+-    // unsets the zs_valid flag.
+-    private void kill_zs()
+-    {
+-        check_valid();
+-
+-        deflateEnd(&zs);
+-        zs_valid = false;
+-    }
+-
+-    // Asserts that the stream is still valid and usable (except that this
+-    // check doesn't get elided with -release).
+-    private void check_valid()
+-    {
+-        if( !zs_valid )
+-            throw new ZlibClosedException;
+-    }
+-}
+-
+-/*******************************************************************************
+-  
+-    This exception is thrown if you attempt to perform a read, write or flush
+-    operation on a closed zlib filter stream.  This can occur if the input
+-    stream has finished, or an output stream was flushed.
+-
+-*******************************************************************************/
+-
+-class ZlibClosedException : IOException
+-{
+-    this()
+-    {
+-        super("cannot operate on closed zlib stream");
+-    }
+-}
+-
+-/*******************************************************************************
+-  
+-    This exception is thrown if you attempt to reset a compression stream that
+-    is still open.  You must either close or commit a stream before it can be
+-    reset.
+-
+-*******************************************************************************/
+-
+-class ZlibStillOpenException : IOException
+-{
+-    this()
+-    {
+-        super("cannot reset an open zlib stream");
+-    }
+-}
+-
+-/*******************************************************************************
+-  
+-    This exception is thrown when an error occurs in the underlying zlib
+-    library.  Where possible, it will indicate both the name of the error, and
+-    any textural message zlib has provided.
+-
+-*******************************************************************************/
+-
+-class ZlibException : IOException
+-{
+-    /*
+-     * Use this if you want to throw an exception that isn't actually
+-     * generated by zlib.
+-     */
+-    this(char[] msg)
+-    {
+-        super(msg);
+-    }
+-    
+-    /*
+-     * code is the error code returned by zlib.  The exception message will
+-     * be the name of the error code.
+-     */
+-    this(int code)
+-    {
+-        super(codeName(code));
+-    }
+-
+-    /*
+-     * As above, except that it appends msg as well.
+-     */
+-    this(int code, char* msg)
+-    {
+-        super(codeName(code)~": "~fromStringz(msg));
+-    }
+-
+-    protected char[] codeName(int code)
+-    {
+-        char[] name;
+-
+-        switch( code )
+-        {
+-            case Z_OK:              name = "Z_OK";              break;
+-            case Z_STREAM_END:      name = "Z_STREAM_END";      break;
+-            case Z_NEED_DICT:       name = "Z_NEED_DICT";       break;
+-            case Z_ERRNO:           name = "Z_ERRNO";           break;
+-            case Z_STREAM_ERROR:    name = "Z_STREAM_ERROR";    break;
+-            case Z_DATA_ERROR:      name = "Z_DATA_ERROR";      break;
+-            case Z_MEM_ERROR:       name = "Z_MEM_ERROR";       break;
+-            case Z_BUF_ERROR:       name = "Z_BUF_ERROR";       break;
+-            case Z_VERSION_ERROR:   name = "Z_VERSION_ERROR";   break;
+-            default:                name = "Z_UNKNOWN";
+-        }
+-
+-        return name;
+-    }
+-}
+-
+-/* *****************************************************************************
+-
+-    This section contains a simple unit test for this module.  It is hidden
+-    behind a version statement because it introduces additional dependencies.
+-
+-***************************************************************************** */
+-
+-debug(UnitTest) {
+-
+-import tango.io.device.Array : Array;
+-
+-void check_array(char[] FILE=__FILE__, int LINE=__LINE__)(
+-        ubyte[] as, ubyte[] bs, lazy char[] msg)
+-{
+-    assert( as.length == bs.length,
+-        FILE ~":"~ toString(LINE) ~ ": " ~ msg()
+-        ~ "array lengths differ (" ~ toString(as.length)
+-        ~ " vs " ~ toString(bs.length) ~ ")" );
+-    
+-    foreach( i, a ; as )
+-    {
+-        auto b = bs[i];
+-        
+-        assert( a == b,
+-            FILE ~":"~ toString(LINE) ~ ": " ~ msg()
+-            ~ "arrays differ at " ~ toString(i)
+-            ~ " (" ~ toString(cast(int) a)
+-            ~ " vs " ~ toString(cast(int) b) ~ ")" );
+-    }
+-}
+-
+-unittest
+-{
+-    // One ring to rule them all, one ring to find them,
+-    // One ring to bring them all and in the darkness bind them.
+-    const char[] message = 
+-        "Ash nazg durbatulûk, ash nazg gimbatul, "
+-        "ash nazg thrakatulûk, agh burzum-ishi krimpatul.";
+-    
+-    static assert( message.length == 90 );
+-
+-    // This compressed data was created using Python 2.5's built in zlib
+-    // module, with the default compression level.
+-    {
+-        const ubyte[] message_z = [
+-            0x78,0x9c,0x73,0x2c,0xce,0x50,0xc8,0x4b,
+-            0xac,0x4a,0x57,0x48,0x29,0x2d,0x4a,0x4a,
+-            0x2c,0x29,0xcd,0x39,0xbc,0x3b,0x5b,0x47,
+-            0x21,0x11,0x26,0x9a,0x9e,0x99,0x0b,0x16,
+-            0x45,0x12,0x2a,0xc9,0x28,0x4a,0xcc,0x46,
+-            0xa8,0x4c,0xcf,0x50,0x48,0x2a,0x2d,0xaa,
+-            0x2a,0xcd,0xd5,0xcd,0x2c,0xce,0xc8,0x54,
+-            0xc8,0x2e,0xca,0xcc,0x2d,0x00,0xc9,0xea,
+-            0x01,0x00,0x1f,0xe3,0x22,0x99];
+-    
+-        scope cond_z = new Array(2048);
+-        scope comp = new ZlibOutput(cond_z);
+-        comp.write (message);
+-        comp.close;
+-    
+-        assert( comp.written == message_z.length );
+-        
+-        /+
+-        Stdout("message_z:").newline;
+-        foreach( b ; cast(ubyte[]) cond_z.slice )
+-            Stdout.format("0x{0:x2},", b);
+-        Stdout.newline.newline;
+-        +/
+-    
+-        //assert( message_z == cast(ubyte[])(cond_z.slice) );
+-        check_array!(__FILE__,__LINE__)
+-            ( message_z, cast(ubyte[]) cond_z.slice, "message_z " );
+-    
+-        scope decomp = new ZlibInput(cond_z);
+-        auto buffer = new ubyte[256];
+-        buffer = buffer[0 .. decomp.read(buffer)];
+-    
+-        //assert( cast(ubyte[])message == buffer );
+-        check_array!(__FILE__,__LINE__)
+-            ( cast(ubyte[]) message, buffer, "message (zlib) " );
+-    }
+-    
+-    // This compressed data was created using the Cygwin gzip program
+-    // with default options.  The original file was called "testdata.txt".
+-    {
+-        const ubyte[] message_gz = [
+-            0x1f,0x8b,0x08,0x00,0x80,0x70,0x6f,0x45,
+-            0x00,0x03,0x73,0x2c,0xce,0x50,0xc8,0x4b,
+-            0xac,0x4a,0x57,0x48,0x29,0x2d,0x4a,0x4a,
+-            0x2c,0x29,0xcd,0x39,0xbc,0x3b,0x5b,0x47,
+-            0x21,0x11,0x26,0x9a,0x9e,0x99,0x0b,0x16,
+-            0x45,0x12,0x2a,0xc9,0x28,0x4a,0xcc,0x46,
+-            0xa8,0x4c,0xcf,0x50,0x48,0x2a,0x2d,0xaa,
+-            0x2a,0xcd,0xd5,0xcd,0x2c,0xce,0xc8,0x54,
+-            0xc8,0x2e,0xca,0xcc,0x2d,0x00,0xc9,0xea,
+-            0x01,0x00,0x45,0x38,0xbc,0x58,0x5a,0x00,
+-            0x00,0x00];
+-        
+-        // Compresses the original message, and outputs the bytes.  You can use
+-        // this to test the output of ZlibOutput with gzip.  If you use this,
+-        // don't forget to import Stdout somewhere.
+-        /+
+-        scope comp_gz = new Array(2048);
+-        scope comp = new ZlibOutput(comp_gz, ZlibOutput.Level.Normal, ZlibOutput.Encoding.Gzip, WINDOWBITS_DEFAULT);
+-        comp.write(message);
+-        comp.close;
+-        
+-        Stdout.format("message_gz ({0} bytes):", comp_gz.slice.length).newline;
+-        foreach( b ; cast(ubyte[]) comp_gz.slice )
+-            Stdout.format("0x{0:x2},", b);
+-        Stdout.newline;
+-        +/
+-        
+-        // We aren't going to test that we can compress to a gzip stream
+-        // since gzip itself always adds stuff like the filename, timestamps,
+-        // etc.  We'll just make sure we can DECOMPRESS gzip streams.
+-        scope decomp_gz = new Array(message_gz.dup);
+-        scope decomp = new ZlibInput(decomp_gz);
+-        auto buffer = new ubyte[256];
+-        buffer = buffer[0 .. decomp.read(buffer)];
+-        
+-        //assert( cast(ubyte[]) message == buffer );
+-        check_array!(__FILE__,__LINE__)
+-            ( cast(ubyte[]) message, buffer, "message (gzip) ");
+-    }
+-}
+-}
+--- a/tango/io/compress/Zip.d
++++ /dev/null
+@@ -1,2976 +0,0 @@
+-/*******************************************************************************
+- *
+- * copyright:   Copyright (c) 2007 Daniel Keep.  All rights reserved.
+- *
+- * license:     BSD style: $(LICENSE)
+- *
+- * version:     Initial release: December 2007
+- *
+- * author:      Daniel Keep
+- *
+- ******************************************************************************/
+-
+-module tango.io.compress.Zip;
+-
+-/*
+-
+-TODO
+-====
+-
+-* Disable UTF encoding until I've worked out what version of Zip that's
+-  related to... (actually; it's entirely possible that's it's merely a
+-  *proposal* at the moment.) (*Done*)
+-
+-* Make ZipEntry safe: make them aware that their creating reader has been
+-  destroyed.
+-
+-*/
+-
+-import tango.core.ByteSwap : ByteSwap;
+-import tango.io.device.Array : Array;
+-import tango.io.device.File : File;
+-import tango.io.FilePath : FilePath, PathView;
+-import tango.io.device.FileMap : FileMap;
+-import tango.io.compress.ZlibStream : ZlibInput, ZlibOutput;
+-import tango.util.digest.Crc32 : Crc32;
+-import tango.io.model.IConduit : IConduit, InputStream, OutputStream;
+-import tango.io.stream.Digester : DigestInput;
+-import tango.time.Time : Time, TimeSpan;
+-import tango.time.WallClock : WallClock;
+-import tango.time.chrono.Gregorian : Gregorian;
+-
+-import Path = tango.io.Path;
+-import Integer = tango.text.convert.Integer;
+-
+-debug(Zip) import tango.io.Stdout : Stderr;
+-
+-//////////////////////////////////////////////////////////////////////////////
+-//////////////////////////////////////////////////////////////////////////////
+-//
+-// Implementation crap
+-//
+-// Why is this here, you ask?  Because of bloody DMD forward reference bugs.
+-// For pete's sake, Walter, FIX THEM, please!
+-//
+-// To skip to the actual user-visible stuff, search for "Shared stuff".
+-
+-private
+-{
+-
+-//////////////////////////////////////////////////////////////////////////////
+-//////////////////////////////////////////////////////////////////////////////
+-//
+-// LocalFileHeader
+-//
+-
+-    align(1)
+-    struct LocalFileHeaderData
+-    {
+-        ushort      extract_version = ushort.max;
+-        ushort      general_flags = 0;
+-        ushort      compression_method = 0;
+-        ushort      modification_file_time = 0;
+-        ushort      modification_file_date = 0;
+-        uint        crc_32 = 0; // offsetof = 10
+-        uint        compressed_size = 0;
+-        uint        uncompressed_size = 0;
+-        ushort      file_name_length = 0;
+-        ushort      extra_field_length = 0;
+-
+-        debug(Zip) void dump()
+-        {
+-            Stderr
+-            ("LocalFileHeader.Data {")("\n")
+-            ("  extract_version = ")(extract_version)("\n")
+-            ("  general_flags = ")(general_flags)("\n")
+-            ("  compression_method = ")(compression_method)("\n")
+-            ("  modification_file_time = ")(modification_file_time)("\n")
+-            ("  modification_file_date = ")(modification_file_date)("\n")
+-            ("  crc_32 = ")(crc_32)("\n")
+-            ("  compressed_size = ")(compressed_size)("\n")
+-            ("  uncompressed_size = ")(uncompressed_size)("\n")
+-            ("  file_name_length = ")(file_name_length)("\n")
+-            ("  extra_field_length = ")(extra_field_length)("\n")
+-            ("}").newline;
+-        }
+-    }
+-
+-struct LocalFileHeader
+-{
+-    const uint signature = 0x04034b50;
+-
+-    alias LocalFileHeaderData Data;
+-    Data data;
+-    static assert( Data.sizeof == 26 );
+-
+-    char[] file_name;
+-    ubyte[] extra_field;
+-
+-    void[] data_arr()
+-    {
+-        return (&data)[0..1];
+-    }
+-
+-    void put(OutputStream output)
+-    {
+-        // Make sure var-length fields will fit.
+-        if( file_name.length > ushort.max )
+-            ZipException.fntoolong;
+-
+-        if( extra_field.length > ushort.max )
+-            ZipException.eftoolong;
+-
+-        // Encode filename
+-        auto file_name = utf8_to_cp437(this.file_name);
+-        scope(exit) if( file_name !is cast(ubyte[])this.file_name )
+-            delete file_name;
+-
+-        if( file_name is null )
+-            ZipException.fnencode;
+-
+-        // Update lengths in data
+-        Data data = this.data;
+-        data.file_name_length = cast(ushort) file_name.length;
+-        data.extra_field_length = cast(ushort) extra_field.length;
+-
+-        // Do it
+-        version( BigEndian ) swapAll(data);
+-        writeExact(output, (&data)[0..1]);
+-        writeExact(output, file_name);
+-        writeExact(output, extra_field);
+-    }
+-
+-    void fill(InputStream src)
+-    {
+-        readExact(src, data_arr);
+-        version( BigEndian ) swapAll(data);
+-
+-        //debug(Zip) data.dump;
+-
+-        auto tmp = new ubyte[data.file_name_length];
+-        readExact(src, tmp);
+-        file_name = cp437_to_utf8(tmp);
+-        if( cast(char*) tmp.ptr !is file_name.ptr ) delete tmp;
+-
+-        extra_field = new ubyte[data.extra_field_length];
+-        readExact(src, extra_field);
+-    }
+-
+-    /*
+-     * This method will check to make sure that the local and central headers
+-     * are the same; if they're not, then that indicates that the archive is
+-     * corrupt.
+-     */
+-    bool agrees_with(FileHeader h)
+-    {
+-        // NOTE: extra_field used to be compared with h.extra_field, but this caused
+-        // an assertion in certain archives. I found a mention of these fields being
+-        // allowed to be different, so I think it in general is wrong to include in
+-        // this sanity check. larsivi 20081111
+-        if( data.extract_version != h.data.extract_version
+-                || data.general_flags != h.data.general_flags
+-                || data.compression_method != h.data.compression_method
+-                || data.modification_file_time != h.data.modification_file_time
+-                || data.modification_file_date != h.data.modification_file_date
+-                || file_name != h.file_name )
+-            return false;
+-        
+-        // We need a separate check for the sizes and crc32, since these will
+-        // be zero if a trailing descriptor was used.
+-        if( !h.usingDataDescriptor && (
+-                   data.crc_32 != h.data.crc_32
+-                || data.compressed_size != h.data.compressed_size
+-                || data.uncompressed_size != h.data.uncompressed_size ) )
+-            return false;
+-
+-        return true;
+-    }
+-}
+-
+-//////////////////////////////////////////////////////////////////////////////
+-//////////////////////////////////////////////////////////////////////////////
+-//
+-// FileHeader
+-//
+-
+-    align(1)
+-    struct FileHeaderData
+-    {
+-        ubyte       zip_version;
+-        ubyte       file_attribute_type;
+-        ushort      extract_version;
+-        ushort      general_flags;
+-        ushort      compression_method;
+-        ushort      modification_file_time;
+-        ushort      modification_file_date;
+-        uint        crc_32;
+-        uint        compressed_size;
+-        uint        uncompressed_size;
+-        ushort      file_name_length;
+-        ushort      extra_field_length;
+-        ushort      file_comment_length;
+-        ushort      disk_number_start;
+-        ushort      internal_file_attributes = 0;
+-        uint        external_file_attributes = 0;
+-        int         relative_offset_of_local_header;
+-
+-        debug(Zip) void dump()
+-        {
+-            Stderr
+-            ("FileHeader.Data {\n")
+-            ("  zip_version = ")(zip_version)("\n")
+-            ("  file_attribute_type = ")(file_attribute_type)("\n")
+-            ("  extract_version = ")(extract_version)("\n")
+-            ("  general_flags = ")(general_flags)("\n")
+-            ("  compression_method = ")(compression_method)("\n")
+-            ("  modification_file_time = ")(modification_file_time)("\n")
+-            ("  modification_file_date = ")(modification_file_date)("\n")
+-            ("  crc_32 = ")(crc_32)("\n")
+-            ("  compressed_size = ")(compressed_size)("\n")
+-            ("  uncompressed_size = ")(uncompressed_size)("\n")
+-            ("  file_name_length = ")(file_name_length)("\n")
+-            ("  extra_field_length = ")(extra_field_length)("\n")
+-            ("  file_comment_length = ")(file_comment_length)("\n")
+-            ("  disk_number_start = ")(disk_number_start)("\n")
+-            ("  internal_file_attributes = ")(internal_file_attributes)("\n")
+-            ("  external_file_attributes = ")(external_file_attributes)("\n")
+-            ("  relative_offset_of_local_header = ")(relative_offset_of_local_header)
+-                ("\n")
+-            ("}").newline;
+-        }
+-
+-        void fromLocal(LocalFileHeader.Data data)
+-        {
+-            extract_version = data.extract_version;
+-            general_flags = data.general_flags;
+-            compression_method = data.compression_method;
+-            modification_file_time = data.modification_file_time;
+-            modification_file_date = data.modification_file_date;
+-            crc_32 = data.crc_32;
+-            compressed_size = data.compressed_size;
+-            uncompressed_size = data.uncompressed_size;
+-            file_name_length = data.file_name_length;
+-            extra_field_length = data.extra_field_length;
+-        }
+-    }
+-
+-struct FileHeader
+-{
+-    const uint signature = 0x02014b50;
+-
+-    alias FileHeaderData Data;
+-    Data* data;
+-    static assert( Data.sizeof == 42 );
+-
+-    char[] file_name;
+-    ubyte[] extra_field;
+-    char[] file_comment;
+-
+-    bool usingDataDescriptor()
+-    {
+-        return !!(data.general_flags & 1<<3);
+-    }
+-
+-    uint compressionOptions()
+-    {
+-        return (data.general_flags >> 1) & 0b11;
+-    }
+-
+-    bool usingUtf8()
+-    {
+-        //return !!(data.general_flags & 1<<11);
+-        return false;
+-    }
+-
+-    void[] data_arr()
+-    {
+-        return (cast(void*)data)[0 .. Data.sizeof];
+-    }
+-
+-    void put(OutputStream output)
+-    {
+-        // Make sure the var-length fields will fit.
+-        if( file_name.length > ushort.max )
+-            ZipException.fntoolong;
+-
+-        if( extra_field.length > ushort.max )
+-            ZipException.eftoolong;
+-
+-        if( file_comment.length > ushort.max )
+-            ZipException.cotoolong;
+-
+-        // encode the filename and comment
+-        auto file_name = utf8_to_cp437(this.file_name);
+-        scope(exit) if( file_name !is cast(ubyte[])this.file_name )
+-            delete file_name;
+-        auto file_comment = utf8_to_cp437(this.file_comment);
+-        scope(exit) if( file_comment !is cast(ubyte[])this.file_comment )
+-            delete file_comment;
+-
+-        if( file_name is null )
+-            ZipException.fnencode;
+-
+-        if( file_comment is null && this.file_comment !is null )
+-            ZipException.coencode;
+-
+-        // Update the lengths
+-        Data data = *(this.data);
+-        data.file_name_length = cast(ushort) file_name.length;
+-        data.extra_field_length = cast(ushort) extra_field.length;
+-        data.file_comment_length = cast(ushort) file_comment.length;
+-
+-        // Ok; let's do this!
+-        version( BigEndian ) swapAll(data);
+-        writeExact(output, (&data)[0..1]);
+-        writeExact(output, file_name);
+-        writeExact(output, extra_field);
+-        writeExact(output, file_comment);
+-    }
+-
+-    long map(void[] src)
+-    {
+-        //debug(Zip) Stderr.formatln("FileHeader.map([0..{}])",src.length);
+-
+-        auto old_ptr = src.ptr;
+-
+-        data = cast(Data*) src.ptr;
+-        src = src[Data.sizeof..$];
+-        version( BigEndian ) swapAll(*data);
+-
+-        //debug(Zip) data.dump;
+-
+-        char[] function(ubyte[]) conv_fn;
+-        if( usingUtf8 )
+-            conv_fn = &cp437_to_utf8;
+-        else
+-            conv_fn = &utf8_to_utf8;
+-
+-        file_name = conv_fn(
+-                cast(ubyte[]) src[0..data.file_name_length]);
+-        src = src[data.file_name_length..$];
+-
+-        extra_field = cast(ubyte[]) src[0..data.extra_field_length];
+-        src = src[data.extra_field_length..$];
+-
+-        file_comment = conv_fn(
+-                cast(ubyte[]) src[0..data.file_comment_length]);
+-        src = src[data.file_comment_length..$];
+-
+-        // Return how many bytes we've eaten
+-        //debug(Zip) Stderr.formatln(" . used {} bytes", cast(long)(src.ptr - old_ptr));
+-        return cast(long)(src.ptr - old_ptr);
+-    }
+-}
+-
+-//////////////////////////////////////////////////////////////////////////////
+-//////////////////////////////////////////////////////////////////////////////
+-//
+-// EndOfCDRecord
+-//
+-
+-    align(1)
+-    struct EndOfCDRecordData
+-    {
+-        ushort      disk_number = 0;
+-        ushort      disk_with_start_of_central_directory = 0;
+-        ushort      central_directory_entries_on_this_disk;
+-        ushort      central_directory_entries_total;
+-        uint        size_of_central_directory;
+-        uint        offset_of_start_of_cd_from_starting_disk;
+-        ushort      file_comment_length;
+-
+-        debug(Zip) void dump()
+-        {
+-            Stderr
+-                .formatln("EndOfCDRecord.Data {}","{")
+-                .formatln("  disk_number = {}", disk_number)
+-                .formatln("  disk_with_start_of_central_directory = {}",
+-                        disk_with_start_of_central_directory)
+-                .formatln("  central_directory_entries_on_this_disk = {}",
+-                        central_directory_entries_on_this_disk)
+-                .formatln("  central_directory_entries_total = {}",
+-                        central_directory_entries_total)
+-                .formatln("  size_of_central_directory = {}",
+-                        size_of_central_directory)
+-                .formatln("  offset_of_start_of_cd_from_starting_disk = {}",
+-                        offset_of_start_of_cd_from_starting_disk)
+-                .formatln("  file_comment_length = {}", file_comment_length)
+-                .formatln("}");
+-        }
+-    }
+-
+-struct EndOfCDRecord
+-{
+-    const uint  signature = 0x06054b50;
+-
+-    alias EndOfCDRecordData Data;
+-    Data data;
+-    static assert( data.sizeof == 18 );
+-
+-    char[] file_comment;
+-
+-    void[] data_arr()
+-    {
+-        return (cast(void*)&data)[0 .. data.sizeof];
+-    }
+-
+-    void put(OutputStream output)
+-    {
+-        // Set up the comment; check length, encode
+-        if( file_comment.length > ushort.max )
+-            ZipException.cotoolong;
+-
+-        auto file_comment = utf8_to_cp437(this.file_comment);
+-        scope(exit) if( file_comment !is cast(ubyte[])this.file_comment )
+-                delete file_comment;
+-
+-        // Set up data block
+-        Data data = this.data;
+-        data.file_comment_length = cast(ushort) file_comment.length;
+-
+-        version( BigEndian ) swapAll(data);
+-        writeExact(output, (&data)[0..1]);
+-    }
+-
+-    void fill(void[] src)
+-    {
+-        //Stderr.formatln("EndOfCDRecord.fill([0..{}])",src.length);
+-
+-        auto _data = data_arr;
+-        _data[] = src[0.._data.length];
+-        src = src[_data.length..$];
+-        version( BigEndian ) swapAll(data);
+-
+-        //data.dump;
+-
+-        file_comment = cast(char[]) src[0..data.file_comment_length].dup;
+-    }
+-}
+-
+-// End of implementation crap
+-}
+-
+-//////////////////////////////////////////////////////////////////////////////
+-//////////////////////////////////////////////////////////////////////////////
+-//
+-// Shared stuff
+-
+-public
+-{
+-    /**
+-     * This enumeration denotes the kind of compression used on a file.
+-     */
+-    enum Method
+-    {
+-        /// No compression should be used.
+-        Store,
+-        /// Deflate compression.
+-        Deflate,
+-        /**
+-         * This is a special value used for unsupported or unrecognised
+-         * compression methods.  This value is only used internally.
+-         */
+-        Unsupported
+-    }
+-}
+-
+-private
+-{
+-    const ushort ZIP_VERSION = 20;
+-    const ushort MAX_EXTRACT_VERSION = 20;
+-
+-    /*                                     compression flags
+-                                  uses trailing descriptor |
+-                               utf-8 encoding            | |
+-                                            ^            ^ /\               */
+-    const ushort SUPPORTED_FLAGS = 0b00_0_0_0_0000_0_0_0_1_11_0;
+-    const ushort UNSUPPORTED_FLAGS = ~SUPPORTED_FLAGS;
+-
+-    Method toMethod(ushort method)
+-    {
+-        switch( method )
+-        {
+-            case 0:     return Method.Store;
+-            case 8:     return Method.Deflate;
+-            default:    return Method.Unsupported;
+-        }
+-    }
+-
+-    ushort fromMethod(Method method)
+-    {
+-        switch( method )
+-        {
+-            case Method.Store:      return 0;
+-            case Method.Deflate:    return 8;
+-            default:
+-                assert(false, "unsupported compression method");
+-        }
+-    }
+-
+-    /* NOTE: This doesn't actually appear to work.  Using the default magic
+-     * number with Tango's Crc32 digest works, however.
+-     */
+-    //const CRC_MAGIC = 0xdebb20e3u;
+-}
+-
+-//////////////////////////////////////////////////////////////////////////////
+-//////////////////////////////////////////////////////////////////////////////
+-//
+-// ZipReader
+-
+-interface ZipReader
+-{
+-    bool streamed();
+-    void close();
+-    bool more();
+-    ZipEntry get();
+-    ZipEntry get(ZipEntry);
+-    int opApply(int delegate(ref ZipEntry));
+-}
+-
+-//////////////////////////////////////////////////////////////////////////////
+-//////////////////////////////////////////////////////////////////////////////
+-//
+-// ZipWriter
+-
+-interface ZipWriter
+-{
+-    void finish();
+-    void putFile(ZipEntryInfo info, char[] path);
+-    void putFile(ZipEntryInfo info, char[] path);
+-    void putStream(ZipEntryInfo info, InputStream source);
+-    void putEntry(ZipEntryInfo info, ZipEntry entry);
+-    void putData(ZipEntryInfo info, void[] data);
+-    Method method();
+-    Method method(Method);
+-}
+-
+-//////////////////////////////////////////////////////////////////////////////
+-//////////////////////////////////////////////////////////////////////////////
+-//
+-// ZipBlockReader
+-
+-/**
+- * The ZipBlockReader class is used to parse a Zip archive.  It exposes the
+- * contents of the archive via an iteration interface.  For instance, to loop
+- * over all files in an archive, one can use either
+- *
+- * -----
+- *  foreach( entry ; reader )
+- *      ...
+- * -----
+- *
+- * Or
+- *
+- * -----
+- *  while( reader.more )
+- *  {
+- *      auto entry = reader.get;
+- *      ...
+- *  }
+- * -----
+- *
+- * See the ZipEntry class for more information on the contents of entries.
+- *
+- * Note that this class can only be used with input sources which can be
+- * freely seeked.  Also note that you may open a ZipEntry instance produced by
+- * this reader at any time until the ZipReader that created it is closed.
+- */
+-class ZipBlockReader : ZipReader
+-{
+-    /**
+-     * Creates a ZipBlockReader using the specified file on the local
+-     * filesystem.
+-     */
+-    this(char[] path)
+-    {
+-        file_source = new File(path);
+-        this(file_source);
+-    }
+-
+-    /**
+-     * Creates a ZipBlockReader using the provided InputStream.  Please note
+-     * that this InputStream must be attached to a conduit implementing the 
+-     * IConduit.Seek interface.
+-     */
+-    this(InputStream source)
+-    in
+-    {
+-        assert( cast(IConduit.Seek) source.conduit, "source stream must be seekable" );
+-    }
+-    body
+-    {
+-        this.source = source;
+-        this.seeker = source; //cast(IConduit.Seek) source;
+-    }
+-
+-    bool streamed() { return false; }
+-
+-    /**
+-     * Closes the reader, and releases all resources.  After this operation,
+-     * all ZipEntry instances created by this ZipReader are invalid and should
+-     * not be used.
+-     */
+-    void close()
+-    {
+-        // NOTE: Originally more of the GC allocated data in this class were
+-        // explicitly deleted here, such as cd_data - this caused segfaults
+-        // and have been removed as they were not necessary from correctness
+-        // point of view, and the memory usage win is questionable.
+-        state = State.Done;
+-        source = null;
+-        seeker = null;
+-        delete headers;
+-
+-        if( file_source !is null )  
+-          {
+-          file_source.close;
+-          delete file_source;
+-          }
+-    }
+-
+-    /**
+-     * Returns true if and only if there are additional files in the archive
+-     * which have not been read via the get method.  This returns true before
+-     * the first call to get (assuming the opened archive is non-empty), and
+-     * false after the last file has been accessed.
+-     */
+-    bool more()
+-    {
+-        switch( state )
+-        {
+-            case State.Init:
+-                read_cd;
+-                assert( state == State.Open );
+-                return more;
+-
+-            case State.Open:
+-                return (current_index < headers.length);
+-
+-            case State.Done:
+-                return false;
+-
+-            default:
+-                assert(false);
+-        }
+-    }
+-
+-    /**
+-     * Retrieves the next file from the archive.  Note that although this does
+-     * perform IO operations, it will not read the contents of the file.
+-     *
+-     * The optional reuse argument can be used to instruct the reader to reuse
+-     * an existing ZipEntry instance.  If passed a null reference, it will
+-     * create a new ZipEntry instance.
+-     */
+-    ZipEntry get()
+-    {
+-        if( !more )
+-            ZipExhaustedException();
+-
+-        return new ZipEntry(headers[current_index++], &open_file);
+-    }
+-
+-    /// ditto
+-    ZipEntry get(ZipEntry reuse)
+-    {
+-        if( !more )
+-            ZipExhaustedException();
+-
+-        if( reuse is null )
+-            return new ZipEntry(headers[current_index++], &open_file);
+-        else
+-            return reuse.reset(headers[current_index++], &open_file);
+-    }
+-
+-    /**
+-     * This is used to iterate over the contents of an archive using a foreach
+-     * loop.  Please note that the iteration will reuse the ZipEntry instance
+-     * passed to your loop.  If you wish to keep the instance and re-use it
+-     * later, you $(B must) use the dup member to create a copy.
+-     */
+-    int opApply(int delegate(ref ZipEntry) dg)
+-    {
+-        int result = 0;
+-        ZipEntry entry;
+-
+-        while( more )
+-        {
+-            entry = get(entry);
+-
+-            result = dg(entry);
+-            if( result )
+-                break;
+-        }
+-
+-        if( entry !is null )
+-            delete entry;
+-
+-        return result;
+-    }
+-
+-private:
+-    InputStream source;
+-    InputStream seeker; //IConduit.Seek seeker;
+-
+-    enum State { Init, Open, Done }
+-    State state;
+-    size_t current_index = 0;
+-    FileHeader[] headers;
+-
+-    // These should be killed when the reader is closed.
+-    ubyte[] cd_data;
+-    File file_source = null;
+-
+-    /*
+-     * This function will read the contents of the central directory.  Split
+-     * or spanned archives aren't supported.
+-     */
+-    void read_cd()
+-    in
+-    {
+-        assert( state == State.Init );
+-        assert( headers is null );
+-        assert( cd_data is null );
+-    }
+-    out
+-    {
+-        assert( state == State.Open );
+-        assert( headers !is null );
+-        assert( cd_data !is null );
+-        assert( current_index == 0 );
+-    }
+-    body
+-    {
+-        //Stderr.formatln("ZipReader.read_cd()");
+-
+-        // First, we need to locate the end of cd record, so that we know
+-        // where the cd itself is, and how big it is.
+-        auto eocdr = read_eocd_record;
+-
+-        // Now, make sure the archive is all in one file.
+-        if( eocdr.data.disk_number !=
+-                    eocdr.data.disk_with_start_of_central_directory
+-                || eocdr.data.central_directory_entries_on_this_disk !=
+-                    eocdr.data.central_directory_entries_total )
+-            ZipNotSupportedException.spanned;
+-
+-        // Ok, read the whole damn thing in one go.
+-        cd_data = new ubyte[eocdr.data.size_of_central_directory];
+-        long cd_offset = eocdr.data.offset_of_start_of_cd_from_starting_disk;
+-        seeker.seek(cd_offset, seeker.Anchor.Begin);
+-        readExact(source, cd_data);
+-
+-        // Cake.  Now, we need to break it up into records.
+-        headers = new FileHeader[
+-            eocdr.data.central_directory_entries_total];
+-
+-        long cdr_offset = cd_offset;
+-
+-        // Ok, map the CD data into file headers.
+-        foreach( i,ref header ; headers )
+-        {
+-            //Stderr.formatln(" . reading header {}...", i);
+-
+-            // Check signature
+-            {
+-                uint sig = (cast(uint[])(cd_data[0..4]))[0];
+-                version( BigEndian ) swap(sig);
+-                if( sig != FileHeader.signature )
+-                    ZipException.badsig("file header");
+-            }
+-
+-            auto used = header.map(cd_data[4..$]);
+-            assert( used <= (size_t.max-4) );
+-            cd_data = cd_data[4+cast(size_t)used..$];
+-
+-            // Update offset for next record
+-            cdr_offset += 4 /* for sig. */ + used;
+-        }
+-
+-        // Done!
+-        state = State.Open;
+-    }
+-
+-    /*
+-     * This will locate the end of CD record in the open stream.
+-     *
+-     * This code sucks, but that's because Zip sucks.
+-     *
+-     * Basically, the EOCD record is stuffed somewhere at the end of the file.
+-     * In a brilliant move, the record is *variably sized*, which means we
+-     * have to do a linear backwards search to find it.
+-     *
+-     * The header itself (including the signature) is at minimum 22 bytes
+-     * long, plus anywhere between 0 and 2^16-1 bytes of comment.  That means
+-     * we need to read the last 2^16-1 + 22 bytes from the file, and look for
+-     * the signature [0x50,0x4b,0x05,0x06] in [0 .. $-18].
+-     *
+-     * If we find the EOCD record, we'll return its contents.  If we couldn't
+-     * find it, we'll throw an exception.
+-     */
+-    EndOfCDRecord read_eocd_record()
+-    in
+-    {
+-        assert( state == State.Init );
+-    }
+-    body
+-    {
+-        //Stderr.formatln("read_eocd_record()");
+-
+-        // Signature + record + max. comment length
+-        const max_chunk_len = 4 + EndOfCDRecord.Data.sizeof + ushort.max;
+-
+-        auto file_len = seeker.seek(0, seeker.Anchor.End);
+-        assert( file_len <= size_t.max );
+-
+-        // We're going to need min(max_chunk_len, file_len) bytes.
+-        size_t chunk_len = max_chunk_len;
+-        if( file_len < max_chunk_len )
+-            chunk_len = cast(size_t) file_len;
+-        //Stderr.formatln(" . chunk_len = {}", chunk_len);
+-
+-        // Seek back and read in the chunk.  Don't forget to clean up after
+-        // ourselves.
+-        seeker.seek(-cast(long)chunk_len, seeker.Anchor.End);
+-        auto chunk_offset = seeker.seek(0, seeker.Anchor.Current);
+-        //Stderr.formatln(" . chunk_offset = {}", chunk_offset);
+-        auto chunk = new ubyte[chunk_len];
+-        scope(exit) delete chunk;
+-        readExact(source, chunk);
+-
+-        // Now look for our magic number.  Don't forget that on big-endian
+-        // machines, we need to byteswap the value we're looking for.
+-        uint eocd_magic = EndOfCDRecord.signature;
+-        version( BigEndian )
+-            swap(eocd_magic);
+-
+-        size_t eocd_loc = -1;
+-
+-        if( chunk_len >= 18 )
+-            for( size_t i=chunk_len-18; i>=0; --i )
+-            {
+-                if( *(cast(uint*)(chunk.ptr+i)) == eocd_magic )
+-                {
+-                    // Found the bugger!  Make sure we skip the signature (forgot
+-                    // to do that originally; talk about weird errors :P)
+-                    eocd_loc = i+4;
+-                    break;
+-                }
+-            }
+-
+-        // If we didn't find it, then we'll assume that this is not a valid
+-        // archive.
+-        if( eocd_loc == -1 )
+-            ZipException.missingdir;
+-
+-        // Ok, so we found it; now what?  Now we need to read the record
+-        // itself in.  eocd_loc is the offset within the chunk where the eocd
+-        // record was found, so slice it out.
+-        EndOfCDRecord eocdr;
+-        eocdr.fill(chunk[eocd_loc..$]);
+-
+-        // Excellent.  We're done here.
+-        return eocdr;
+-    }
+-
+-    /*
+-     * Opens the specified file for reading.  If the raw argument passed is
+-     * true, then the file is *not* decompressed.
+-     */
+-    InputStream open_file(FileHeader header, bool raw)
+-    {
+-        // Check to make sure that we actually *can* open this file.
+-        if( header.data.extract_version > MAX_EXTRACT_VERSION )
+-            ZipNotSupportedException.zipver(header.data.extract_version);
+-
+-        if( header.data.general_flags & UNSUPPORTED_FLAGS )
+-            ZipNotSupportedException.flags;
+-
+-        if( toMethod(header.data.compression_method) == Method.Unsupported )
+-            ZipNotSupportedException.method(header.data.compression_method);
+-
+-        // Open a raw stream
+-        InputStream stream = open_file_raw(header);
+-
+-        // If that's all they wanted, pass it back.
+-        if( raw )
+-            return stream;
+-
+-        // Next up, wrap in an appropriate decompression stream
+-        switch( toMethod(header.data.compression_method) )
+-        {
+-            case Method.Store:
+-                // Do nothing: \o/
+-                break;
+-
+-            case Method.Deflate:
+-                // Wrap in a zlib stream.  We want a raw deflate stream,
+-                // so force no encoding.
+-                stream = new ZlibInput(stream, ZlibInput.Encoding.None);
+-                break;
+-
+-            default:
+-                assert(false);
+-        }
+-
+-        // We done, yo!
+-        return stream;
+-    }
+-
+-    /*
+-     * Opens a file's raw input stream.  Basically, this returns a slice of
+-     * the archive's input stream.
+-     */
+-    InputStream open_file_raw(FileHeader header)
+-    {
+-        // Seek to and parse the local file header
+-        seeker.seek(header.data.relative_offset_of_local_header,
+-                seeker.Anchor.Begin);
+-
+-        {
+-            uint sig;
+-            readExact(source, (&sig)[0..1]);
+-            version( BigEndian ) swap(sig);
+-            if( sig != LocalFileHeader.signature )
+-                ZipException.badsig("local file header");
+-        }
+-
+-        LocalFileHeader lheader; lheader.fill(source);
+-
+-        if( !lheader.agrees_with(header) )
+-            ZipException.incons(header.file_name);
+-
+-        // Ok; get a slice stream for the file
+-        return new SliceSeekInputStream(
+-             source, seeker.seek(0, seeker.Anchor.Current),
+-             header.data.compressed_size);
+-    }
+-}
+-
+-//////////////////////////////////////////////////////////////////////////////
+-//////////////////////////////////////////////////////////////////////////////
+-//
+-// ZipBlockWriter
+-
+-/**
+- * The ZipBlockWriter class is used to create a Zip archive.  It uses a
+- * writing iterator interface.
+- *
+- * Note that this class can only be used with output streams which can be
+- * freely seeked.
+- */
+-
+-class ZipBlockWriter : ZipWriter
+-{
+-    /**
+-     * Creates a ZipBlockWriter using the specified file on the local
+-     * filesystem.
+-     */
+-    this(char[] path)
+-    {
+-        file_output = new File(path, File.WriteCreate);
+-        this(file_output);
+-    }
+-
+-    /**
+-     * Creates a ZipBlockWriter using the provided OutputStream.  Please note
+-     * that this OutputStream must be attached to a conduit implementing the 
+-     * IConduit.Seek interface.
+-     */
+-    this(OutputStream output)
+-    in
+-    {
+-        assert( output !is null );
+-        assert( (cast(IConduit.Seek) output.conduit) !is null );
+-    }
+-    body
+-    {
+-        this.output = output;
+-        this.seeker = output; // cast(IConduit.Seek) output;
+-
+-        // Default to Deflate compression
+-        method = Method.Deflate;
+-    }
+-
+-    /**
+-     * Finalises the archive, writes out the central directory, and closes the
+-     * output stream.
+-     */
+-    void finish()
+-    {
+-        put_cd;
+-        output.close();
+-        output = null;
+-        seeker = null;
+-
+-        if( file_output !is null ) delete file_output;
+-    }
+-
+-    /**
+-     * Adds a file from the local filesystem to the archive.
+-     */
+-    void putFile(ZipEntryInfo info, char[] path)
+-    {
+-        scope file = new File(path);
+-        scope(exit) file.close();
+-        putStream(info, file);
+-    }
+-
+-    /**
+-     * Adds a file using the contents of the given InputStream to the archive.
+-     */
+-    void putStream(ZipEntryInfo info, InputStream source)
+-    {
+-        put_compressed(info, source);
+-    }
+-
+-    /**
+-     * Transfers a file from another archive into this archive.  Note that
+-     * this method will not perform any compression: whatever compression was
+-     * applied to the file originally will be preserved.
+-     */
+-    void putEntry(ZipEntryInfo info, ZipEntry entry)
+-    {
+-        put_raw(info, entry);
+-    }
+-
+-    /**
+-     * Adds a file using the contents of the given array to the archive.
+-     */
+-    void putData(ZipEntryInfo info, void[] data)
+-    {
+-        //scope mc = new MemoryConduit(data);
+-        scope mc = new Array(data);
+-        scope(exit) mc.close;
+-        put_compressed(info, mc);
+-    }
+-
+-    /**
+-     * This property allows you to control what compression method should be
+-     * used for files being added to the archive.
+-     */
+-    Method method() { return _method; }
+-    Method method(Method v) { return _method = v; } /// ditto
+-
+-private:
+-    OutputStream output;
+-    OutputStream seeker;
+-    File file_output;
+-
+-    Method _method;
+-
+-    struct Entry
+-    {
+-        FileHeaderData data;
+-        long header_position;
+-        char[] filename;
+-        char[] comment;
+-        ubyte[] extra;
+-    }
+-    Entry[] entries;
+-
+-    void put_cd()
+-    {
+-        // check that there aren't too many CD entries
+-        if( entries.length > ushort.max )
+-            ZipException.toomanyentries;
+-
+-        auto cd_pos = seeker.seek(0, seeker.Anchor.Current);
+-        if( cd_pos > uint.max )
+-            ZipException.toolong;
+-
+-        foreach( entry ; entries )
+-        {
+-            FileHeader header;
+-            header.data = &entry.data;
+-            header.file_name = entry.filename;
+-            header.extra_field = entry.extra;
+-            header.file_comment = entry.comment;
+-
+-            write(output, FileHeader.signature);
+-            header.put(output);
+-        }
+-
+-        auto cd_len = seeker.seek(0, seeker.Anchor.Current) - cd_pos;
+-
+-        if( cd_len > uint.max )
+-            ZipException.cdtoolong;
+-
+-        {
+-            assert( entries.length < ushort.max );
+-            assert( cd_len < uint.max );
+-            assert( cd_pos < uint.max );
+-
+-            EndOfCDRecord eocdr;
+-            eocdr.data.central_directory_entries_on_this_disk =
+-                cast(ushort) entries.length;
+-            eocdr.data.central_directory_entries_total =
+-                cast(ushort) entries.length;
+-            eocdr.data.size_of_central_directory =
+-                cast(uint) cd_len;
+-            eocdr.data.offset_of_start_of_cd_from_starting_disk =
+-                cast(uint) cd_pos;
+-
+-            write(output, EndOfCDRecord.signature);
+-            eocdr.put(output);
+-        }
+-    }
+-
+-    void put_raw(ZipEntryInfo info, ZipEntry entry)
+-    {
+-        // Write out local file header
+-        LocalFileHeader.Data lhdata;
+-        auto chdata = entry.header.data;
+-        lhdata.extract_version = chdata.extract_version;
+-
+-        // Note: we need to mask off the data descriptor bit because we aren't
+-        // going to write one.
+-        lhdata.general_flags = chdata.general_flags & ~(1<<3);
+-        lhdata.compression_method = chdata.compression_method;
+-        lhdata.crc_32 = chdata.crc_32;
+-        lhdata.compressed_size = chdata.compressed_size;
+-        lhdata.uncompressed_size = chdata.uncompressed_size;
+-
+-        timeToDos(info.modified, lhdata.modification_file_time,
+-                                 lhdata.modification_file_date);
+-
+-        put_local_header(lhdata, info.name);
+-
+-        // Store comment
+-        entries[$-1].comment = info.comment;
+-
+-        // Output file contents
+-        {
+-            auto input = entry.open_raw;
+-            scope(exit) input.close;
+-            output.copy(input).flush();
+-        }
+-    }
+-
+-    void put_compressed(ZipEntryInfo info, InputStream source)
+-    {
+-        debug(Zip) Stderr.formatln("ZipBlockWriter.put_compressed()");
+-
+-        // Write out partial local file header
+-        auto header_pos = seeker.seek(0, seeker.Anchor.Current);
+-        debug(Zip) Stderr.formatln(" . header for {} at {}", info.name, header_pos);
+-        put_local_header(info, _method);
+-
+-        // Store comment
+-        entries[$-1].comment = info.comment;
+-
+-        uint crc;
+-        uint compressed_size;
+-        uint uncompressed_size;
+-
+-        // Output file contents
+-        {
+-            // Input/output chains
+-            InputStream in_chain = source;
+-            OutputStream out_chain = new WrapSeekOutputStream(output);
+-
+-            // Count number of bytes coming in from the source file
+-            scope in_counter = new CounterInput(in_chain);
+-            in_chain = in_counter;
+-            assert( in_counter.count <= typeof(uncompressed_size).max );
+-            scope(success) uncompressed_size = cast(uint) in_counter.count;
+-
+-            // Count the number of bytes going out to the archive
+-            scope out_counter = new CounterOutput(out_chain);
+-            out_chain = out_counter;
+-            assert( out_counter.count <= typeof(compressed_size).max );
+-            scope(success) compressed_size = cast(uint) out_counter.count;
+-
+-            // Add crc
+-            scope crc_d = new Crc32(/*CRC_MAGIC*/);
+-            scope crc_s = new DigestInput(in_chain, crc_d);
+-            in_chain = crc_s;
+-            scope(success)
+-            {
+-                debug(Zip) Stderr.formatln(" . Success: storing CRC.");
+-                crc = crc_d.crc32Digest;
+-            }
+-
+-            // Add compression
+-            ZlibOutput compress;
+-            scope(exit) if( compress !is null ) delete compress;
+-
+-            switch( _method )
+-            {
+-                case Method.Store:
+-                    break;
+-
+-                case Method.Deflate:
+-                    compress = new ZlibOutput(out_chain,
+-                            ZlibOutput.Level.init, ZlibOutput.Encoding.None);
+-                    out_chain = compress;
+-                    break;
+-
+-                default:
+-                    assert(false);
+-            }
+-
+-            // All done.
+-            scope(exit) in_chain.close();
+-            scope(success) in_chain.flush();
+-            scope(exit) out_chain.close();
+-
+-            out_chain.copy(in_chain).flush;
+-
+-            debug(Zip) if( compress !is null )
+-            {
+-                Stderr.formatln(" . compressed to {} bytes", compress.written);
+-            }
+-
+-            debug(Zip) Stderr.formatln(" . wrote {} bytes", out_counter.count);
+-            debug(Zip) Stderr.formatln(" . contents written");
+-        }
+-
+-        debug(Zip) Stderr.formatln(" . CRC for \"{}\": 0x{:x8}", info.name, crc);
+-
+-        // Rewind, and patch the header
+-        auto final_pos = seeker.seek(0, seeker.Anchor.Current);
+-        seeker.seek(header_pos);
+-        patch_local_header(crc, compressed_size, uncompressed_size);
+-
+-        // Seek back to the end of the file, and we're done!
+-        seeker.seek(final_pos);
+-    }
+-
+-    /*
+-     * Patches the local file header starting at the current output location
+-     * with updated crc and size information.  Also updates the current last
+-     * Entry.
+-     */
+-    void patch_local_header(uint crc_32, uint compressed_size,
+-            uint uncompressed_size)
+-    {
+-        /* BUG: For some reason, this code won't compile.  No idea why... if
+-         * you instantiate LFHD, it says that there is no "offsetof" property.
+-         */
+-        /+
+-        alias LocalFileHeaderData LFHD;
+-        static assert( LFHD.compressed_size.offsetof
+-                == LFHD.crc_32.offsetof + 4 );
+-        static assert( LFHD.uncompressed_size.offsetof
+-                == LFHD.compressed_size.offsetof + 4 );
+-        +/
+-
+-        // Don't forget we have to seek past the signature, too
+-        // BUG: .offsetof is broken here
+-        /+seeker.seek(LFHD.crc_32.offsetof+4, seeker.Anchor.Current);+/
+-        seeker.seek(10+4, seeker.Anchor.Current);
+-        write(output, crc_32);
+-        write(output, compressed_size);
+-        write(output, uncompressed_size);
+-
+-        with( entries[$-1] )
+-        {
+-            data.crc_32 = crc_32;
+-            data.compressed_size = compressed_size;
+-            data.uncompressed_size = uncompressed_size;
+-        }
+-    }
+-
+-    /*
+-     * Generates and outputs a local file header from the given info block and
+-     * compression method.  Note that the crc_32, compressed_size and
+-     * uncompressed_size header fields will be set to zero, and must be
+-     * patched.
+-     */
+-    void put_local_header(ZipEntryInfo info, Method method)
+-    {
+-        LocalFileHeader.Data data;
+-
+-        data.compression_method = fromMethod(method);
+-        timeToDos(info.modified, data.modification_file_time,
+-                                 data.modification_file_date);
+-
+-        put_local_header(data, info.name);
+-    }
+-
+-    /*
+-     * Writes the given local file header data and filename out to the output
+-     * stream.  It also appends a new Entry with the data and filename.
+-     */
+-    void put_local_header(LocalFileHeaderData data,
+-            char[] file_name)
+-    {
+-        auto f_name = Path.normalize(file_name);
+-        auto p = Path.parse(f_name);
+-
+-        // Compute Zip version
+-        if( data.extract_version == data.extract_version.max )
+-        {
+-
+-            ushort zipver = 10;
+-            void minver(ushort v) { zipver = v>zipver ? v : zipver; }
+-
+-            {
+-                // Compression method
+-                switch( data.compression_method )
+-                {
+-                    case 0: minver(10); break;
+-                    case 8: minver(20); break;
+-                    default:
+-                        assert(false);
+-                }
+-
+-                // File is a folder
+-                if( f_name.length > 0 && f_name[$-1] == '/' )
+-                    // Is a directory, not a real file
+-                    minver(20);
+-            }
+-            data.extract_version = zipver;
+-        }
+-
+-        /+// Encode filename
+-        auto file_name_437 = utf8_to_cp437(file_name);
+-        if( file_name_437 is null )
+-            ZipException.fnencode;+/
+-
+-        /+// Set up file name length
+-        if( file_name_437.length > ushort.max )
+-            ZipException.fntoolong;
+-
+-        data.file_name_length = file_name_437.length;+/
+-
+-        LocalFileHeader header;
+-        header.data = data;
+-        if (p.isAbsolute)
+-            f_name = f_name[p.root.length+1..$]; 
+-        header.file_name = f_name;
+-
+-        // Write out the header and the filename
+-        auto header_pos = seeker.seek(0, seeker.Anchor.Current);
+-
+-        write(output, LocalFileHeader.signature);
+-        header.put(output);
+-
+-        // Save the header
+-        assert( header_pos <= int.max );
+-        Entry entry;
+-        entry.data.fromLocal(header.data);
+-        entry.filename = header.file_name;
+-        entry.header_position = header_pos;
+-        entry.data.relative_offset_of_local_header = cast(int) header_pos;
+-        entries ~= entry;
+-    }
+-}
+-
+-//////////////////////////////////////////////////////////////////////////////
+-//////////////////////////////////////////////////////////////////////////////
+-//
+-// ZipEntry
+-
+-/**
+- * This class is used to represent a single entry in an archive.
+- * Specifically, it combines meta-data about the file (see the info field)
+- * along with the two basic operations on an entry: open and verify.
+- */
+-class ZipEntry
+-{
+-    /**
+-     * Header information on the file.  See the ZipEntryInfo structure for
+-     * more information.
+-     */
+-    ZipEntryInfo info;
+-
+-    /**
+-     * Size (in bytes) of the file's uncompressed contents.
+-     */
+-    uint size()
+-    {
+-        return header.data.uncompressed_size;
+-    }
+-
+-    /**
+-     * Opens a stream for reading from the file.  The contents of this stream
+-     * represent the decompressed contents of the file stored in the archive.
+-     *
+-     * You should not assume that the returned stream is seekable.
+-     *
+-     * Note that the returned stream may be safely closed without affecting
+-     * the underlying archive stream.
+-     *
+-     * If the file has not yet been verified, then the stream will be checked
+-     * as you read from it.  When the stream is either exhausted or closed,
+-     * then the integrity of the file's data will be checked.  This means that
+-     * if the file is corrupt, an exception will be thrown only after you have
+-     * finished reading from the stream.  If you wish to make sure the data is
+-     * valid before you read from the file, call the verify method.
+-     */
+-    InputStream open()
+-    {
+-        // If we haven't verified yet, wrap the stream in the appropriate
+-        // decorators.
+-        if( !verified )
+-            return new ZipEntryVerifier(this, open_dg(header, false));
+-
+-        else
+-            return open_dg(header, false);
+-    }
+-
+-    /**
+-     * Verifies the contents of this file by computing the CRC32 checksum,
+-     * and comparing it against the stored one.  Throws an exception if the
+-     * checksums do not match.
+-     *
+-     * Not valid on streamed Zip archives.
+-     */
+-    void verify()
+-    {
+-        // If we haven't verified the contents yet, just read everything in
+-        // to trigger it.
+-        auto s = open;
+-        auto buffer = new ubyte[s.conduit.bufferSize];
+-        while( s.read(buffer) != s.Eof )
+-            {/*Do nothing*/}
+-        s.close;
+-    }
+-
+-    /**
+-     * Creates a new, independent copy of this instance.
+-     */
+-    ZipEntry dup()
+-    {
+-        return new ZipEntry(header, open_dg);
+-    }
+-
+-private:
+-    /*
+-     * Callback used to open the file.
+-     */
+-    alias InputStream delegate(FileHeader, bool raw) open_dg_t;
+-    open_dg_t open_dg;
+-
+-    /*
+-     * Raw ZIP header.
+-     */
+-    FileHeader header;
+-
+-    /*
+-     * The flag used to keep track of whether the file's contents have been
+-     * verified.
+-     */
+-    bool verified = false;
+-
+-    /*
+-     * Opens a stream that does not perform any decompression or
+-     * transformation of the file contents.  This is used internally by
+-     * ZipWriter to perform fast zip to zip transfers without having to
+-     * decompress and then recompress the contents.
+-     *
+-     * Note that because zip stores CRCs for the *uncompressed* data, this
+-     * method currently does not do any verification.
+-     */
+-    InputStream open_raw()
+-    {
+-        return open_dg(header, true);
+-    }
+-
+-    /*
+-     * Creates a new ZipEntry from the FileHeader.
+-     */
+-    this(FileHeader header, open_dg_t open_dg)
+-    {
+-        this.reset(header, open_dg);
+-    }
+-
+-    /*
+-     * Resets the current instance with new values.
+-     */
+-    ZipEntry reset(FileHeader header, open_dg_t open_dg)
+-    {
+-        this.header = header;
+-        this.open_dg = open_dg;
+-        with( info )
+-        {
+-            name = Path.standard(header.file_name.dup);
+-            dosToTime(header.data.modification_file_time,
+-                      header.data.modification_file_date,
+-                      modified);
+-            comment = header.file_comment.dup;
+-        }
+-
+-        this.verified = false;
+-
+-        return this;
+-    }
+-}
+-
+-/**
+- * This structure contains various pieces of meta-data on a file.  The
+- * contents of this structure may be safely mutated.
+- *
+- * This structure is also used to specify meta-data about a file when adding
+- * it to an archive.
+- */
+-struct ZipEntryInfo
+-{
+-    /// Full path and file name of this file.
+-    char[] name;
+-    /// Modification timestamp.  If this is left uninitialised when passed to
+-    /// a ZipWriter, it will be reset to the current system time.
+-    Time modified = Time.min;
+-    /// Comment on the file.
+-    char[] comment;
+-}
+-
+-//////////////////////////////////////////////////////////////////////////////
+-//////////////////////////////////////////////////////////////////////////////
+-//////////////////////////////////////////////////////////////////////////////
+-//
+-// Exceptions
+-//
+-
+-/**
+- * This is the base class from which all exceptions generated by this module
+- * derive from.
+- */
+-class ZipException : Exception
+-{
+-    this(char[] msg) { super(msg); }
+-
+-private:
+-    alias typeof(this) thisT;
+-    static void opCall(char[] msg) { throw new ZipException(msg); }
+-
+-    static void badsig()
+-    {
+-        thisT("corrupt signature or unexpected section found");
+-    }
+-
+-    static void badsig(char[] type)
+-    {
+-        thisT("corrupt "~type~" signature or unexpected section found");
+-    }
+-
+-    static void incons(char[] name)
+-    {
+-        thisT("inconsistent headers for file \""~name~"\"; "
+-                "archive is likely corrupted");
+-    }
+-
+-    static void missingdir()
+-    {
+-        thisT("could not locate central archive directory; "
+-                "file is corrupt or possibly not a Zip archive");
+-    }
+-
+-    static void toomanyentries()
+-    {
+-        thisT("too many archive entries");
+-    }
+-
+-    static void toolong()
+-    {
+-        thisT("archive is too long; limited to 4GB total");
+-    }
+-
+-    static void cdtoolong()
+-    {
+-        thisT("central directory is too long; limited to 4GB total");
+-    }
+-
+-    static void fntoolong()
+-    {
+-        thisT("file name too long; limited to 65,535 characters");
+-    }
+-
+-    static void eftoolong()
+-    {
+-        thisT("extra field too long; limited to 65,535 characters");
+-    }
+-
+-    static void cotoolong()
+-    {
+-        thisT("extra field too long; limited to 65,535 characters");
+-    }
+-
+-    static void fnencode()
+-    {
+-        thisT("could not encode filename into codepage 437");
+-    }
+-
+-    static void coencode()
+-    {
+-        thisT("could not encode comment into codepage 437");
+-    }
+-
+-    static void tooold()
+-    {
+-        thisT("cannot represent dates before January 1, 1980");
+-    }
+-}
+-
+-/**
+- * This exception is thrown if a ZipReader detects that a file's contents do
+- * not match the stored checksum.
+- */
+-class ZipChecksumException : ZipException
+-{
+-    this(char[] name)
+-    {
+-        super("checksum failed on zip entry \""~name~"\"");
+-    }
+-
+-private:
+-    static void opCall(char[] name) { throw new ZipChecksumException(name); }
+-}
+-
+-/**
+- * This exception is thrown if you call get reader method when there are no
+- * more files in the archive.
+- */
+-class ZipExhaustedException : ZipException
+-{
+-    this() { super("no more entries in archive"); }
+-
+-private:
+-    static void opCall() { throw new ZipExhaustedException; }
+-}
+-
+-/**
+- * This exception is thrown if you attempt to read an archive that uses
+- * features not supported by the reader.
+- */
+-class ZipNotSupportedException : ZipException
+-{
+-    this(char[] msg) { super(msg); }
+-
+-private:
+-    alias ZipNotSupportedException thisT;
+-
+-    static void opCall(char[] msg)
+-    {
+-        throw new thisT(msg ~ " not supported");
+-    }
+-
+-    static void spanned()
+-    {
+-        thisT("split and multi-disk archives");
+-    }
+-
+-    static void zipver(ushort ver)
+-    {
+-        throw new thisT("zip format version "
+-                ~Integer.toString(ver / 10)
+-                ~"."
+-                ~Integer.toString(ver % 10)
+-                ~" not supported; maximum of version "
+-                ~Integer.toString(MAX_EXTRACT_VERSION / 10)
+-                ~"."
+-                ~Integer.toString(MAX_EXTRACT_VERSION % 10)
+-                ~" supported.");
+-    }
+-
+-    static void flags()
+-    {
+-        throw new thisT("unknown or unsupported file flags enabled");
+-    }
+-
+-    static void method(ushort m)
+-    {
+-        // Cheat here and work out what the method *actually* is
+-        char[] ms;
+-        switch( m )
+-        {
+-            case 0:
+-            case 8:     assert(false); // supported
+-
+-            case 1:     ms = "Shrink"; break;
+-            case 2:     ms = "Reduce (factor 1)"; break;
+-            case 3:     ms = "Reduce (factor 2)"; break;
+-            case 4:     ms = "Reduce (factor 3)"; break;
+-            case 5:     ms = "Reduce (factor 4)"; break;
+-            case 6:     ms = "Implode"; break;
+-
+-            case 9:     ms = "Deflate64"; break;
+-            case 10:    ms = "TERSE (old)"; break;
+-
+-            case 12:    ms = "Bzip2"; break;
+-            case 14:    ms = "LZMA"; break;
+-
+-            case 18:    ms = "TERSE (new)"; break;
+-            case 19:    ms = "LZ77"; break;
+-
+-            case 97:    ms = "WavPack"; break;
+-            case 98:    ms = "PPMd"; break;
+-
+-            default:    ms = "unknown";
+-        }
+-
+-        thisT(ms ~ " compression method");
+-    }
+-}
+-
+-//////////////////////////////////////////////////////////////////////////////
+-//////////////////////////////////////////////////////////////////////////////
+-//
+-// Convenience methods
+-
+-void createArchive(char[] archive, Method method, char[][] files...)
+-{
+-    scope zw = new ZipBlockWriter(archive);
+-    zw.method = method;
+-
+-    foreach( file ; files )
+-    {
+-        scope fp = new FilePath(file);
+-        
+-        ZipEntryInfo zi;
+-        zi.name = file;
+-        zi.modified = fp.modified;
+-
+-        zw.putFile(zi, file);
+-    }
+-
+-    zw.finish;
+-}
+-
+-void extractArchive(char[] archive, char[] dest)
+-{
+-    scope zr = new ZipBlockReader(archive);
+-
+-    foreach( entry ; zr )
+-    {
+-        // Skip directories
+-        if( entry.info.name[$-1] == '/' ||
+-            entry.info.name[$-1] == '\\') continue;
+-
+-        auto path = Path.join(dest, entry.info.name);
+-        path = Path.normalize(path);
+-
+-        // Create the parent directory if necessary.
+-        auto parent = Path.parse(path).parent;
+-        if( !Path.exists(parent) )
+-        {
+-            Path.createPath(parent);
+-        }
+-
+-        path = Path.native(path);
+-
+-        // Write out the file
+-        scope fout = new File(path, File.WriteCreate);
+-        fout.copy(entry.open);
+-        fout.close;
+-
+-        // Update timestamps
+-        auto oldTS = Path.timeStamps(path);
+-        Path.timeStamps(path, oldTS.accessed, entry.info.modified);
+-    }
+-}
+-
+-//////////////////////////////////////////////////////////////////////////////
+-//////////////////////////////////////////////////////////////////////////////
+-//////////////////////////////////////////////////////////////////////////////
+-//
+-// Private implementation stuff
+-//
+-
+-private:
+-
+-//////////////////////////////////////////////////////////////////////////////
+-//////////////////////////////////////////////////////////////////////////////
+-//
+-// Verification stuff
+-
+-/*
+- * This class wraps an input stream, and computes the CRC as it passes
+- * through.  On the event of either a close or EOF, it checks the CRC against
+- * the one in the provided ZipEntry.  If they don't match, it throws an
+- * exception.
+- */
+-
+-class ZipEntryVerifier : InputStream
+-{
+-    this(ZipEntry entry, InputStream source)
+-    in
+-    {
+-        assert( entry !is null );
+-        assert( source !is null );
+-    }
+-    body
+-    {
+-        this.entry = entry;
+-        this.digest = new Crc32;
+-        this.source = new DigestInput(source, digest);
+-    }
+-
+-    IConduit conduit()
+-    {
+-        return source.conduit;
+-    }
+-
+-    InputStream input()
+-    {
+-        return source;
+-    }
+-
+-    long seek (long ofs, Anchor anchor = Anchor.Begin) 
+-    {
+-        return source.seek (ofs, anchor);
+-    }
+-
+-    void close()
+-    {
+-        check;
+-
+-        this.source.close;
+-        this.entry = null;
+-        this.digest = null;
+-        this.source = null;
+-    }
+-
+-    size_t read(void[] dst)
+-    {
+-        auto bytes = source.read(dst);
+-        if( bytes == IConduit.Eof )
+-            check;
+-        return bytes;
+-    }
+-
+-    override void[] load(size_t max=-1)
+-    {
+-        return Conduit.load(this, max);
+-    }
+-    
+-    override InputStream flush()
+-    {
+-        this.source.flush;
+-        return this;
+-    }
+-
+-private:
+-    Crc32 digest;
+-    InputStream source;
+-    ZipEntry entry;
+-
+-    void check()
+-    {
+-        if( digest is null ) return;
+-
+-        auto crc = digest.crc32Digest;
+-        delete digest;
+-
+-        if( crc != entry.header.data.crc_32 )
+-            ZipChecksumException(entry.info.name);
+-
+-        else
+-            entry.verified = true;
+-    }
+-}
+-
+-//////////////////////////////////////////////////////////////////////////////
+-//////////////////////////////////////////////////////////////////////////////
+-//
+-// IO functions
+-
+-/*
+- * Really, seriously, read some bytes without having to go through a sodding
+- * buffer.
+- */
+-void readExact(InputStream s, void[] dst)
+-{
+-    //Stderr.formatln("readExact(s, [0..{}])", dst.length);
+-    while( dst.length > 0 )
+-    {
+-        auto octets = s.read(dst);
+-        //Stderr.formatln(" . octets = {}", octets);
+-        if( octets == -1 ) // Beware the dangers of MAGICAL THINKING
+-            throw new Exception("unexpected end of stream");
+-        dst = dst[octets..$];
+-    }
+-}
+-
+-/*
+- * Really, seriously, write some bytes.
+- */
+-void writeExact(OutputStream s, void[] src)
+-{
+-    while( src.length > 0 )
+-    {
+-        auto octets = s.write(src);
+-        if( octets == -1 )
+-            throw new Exception("unexpected end of stream");
+-        src = src[octets..$];
+-    }
+-}
+-
+-void write(T)(OutputStream s, T value)
+-{
+-    version( BigEndian ) swap(value);
+-    writeExact(s, (&value)[0..1]);
+-}
+-
+-//////////////////////////////////////////////////////////////////////////////
+-//////////////////////////////////////////////////////////////////////////////
+-//
+-// Endian garbage
+-
+-void swapAll(T)(ref T data)
+-{
+-    static if( is(typeof(T.record_fields)) )
+-        const fields = T.record_fields;
+-    else
+-        const fields = data.tupleof.length;
+-
+-    foreach( i,_ ; data.tupleof )
+-    {
+-        if( i == fields ) break;
+-        swap(data.tupleof[i]);
+-    }
+-}
+-
+-void swap(T)(ref T data)
+-{
+-    static if( T.sizeof == 1 )
+-        {}
+-    else static if( T.sizeof == 2 )
+-        ByteSwap.swap16(&data, 2);
+-    else static if( T.sizeof == 4 )
+-        ByteSwap.swap32(&data, 4);
+-    else static if( T.sizeof == 8 )
+-        ByteSwap.swap64(&data, 8);
+-    else static if( T.sizeof == 10 )
+-        ByteSwap.swap80(&data, 10);
+-    else
+-        static assert(false, "Can't swap "~T.stringof~"s.");
+-}
+-
+-//////////////////////////////////////////////////////////////////////////////
+-//////////////////////////////////////////////////////////////////////////////
+-//
+-// IBM Code Page 437 stuff
+-//
+-
+-const char[][] cp437_to_utf8_map_low = [
+-    "\u0000"[], "\u263a",   "\u263b",   "\u2665",
+-    "\u2666",   "\u2663",   "\u2660",   "\u2022",
+-    "\u25d8",   "\u25cb",   "\u25d9",   "\u2642",
+-    "\u2640",   "\u266a",   "\u266b",   "\u263c",
+-
+-    "\u25b6",   "\u25c0",   "\u2195",   "\u203c",
+-    "\u00b6",   "\u00a7",   "\u25ac",   "\u21a8",
+-    "\u2191",   "\u2193",   "\u2192",   "\u2190",
+-    "\u221f",   "\u2194",   "\u25b2",   "\u25bc"
+-];
+-
+-const char[][] cp437_to_utf8_map_high = [
+-    "\u00c7"[], "\u00fc",   "\u00e9",   "\u00e2",
+-    "\u00e4",   "\u00e0",   "\u00e5",   "\u00e7",
+-    "\u00ea",   "\u00eb",   "\u00e8",   "\u00ef",
+-    "\u00ee",   "\u00ec",   "\u00c4",   "\u00c5",
+-
+-    "\u00c9",   "\u00e6",   "\u00c6",   "\u00f4",
+-    "\u00f6",   "\u00f2",   "\u00fb",   "\u00f9",
+-    "\u00ff",   "\u00d6",   "\u00dc",   "\u00f8",
+-    "\u00a3",   "\u00a5",   "\u20a7",   "\u0192",
+-
+-    "\u00e1",   "\u00ed",   "\u00f3",   "\u00fa",
+-    "\u00f1",   "\u00d1",   "\u00aa",   "\u00ba",
+-    "\u00bf",   "\u2310",   "\u00ac",   "\u00bd",
+-    "\u00bc",   "\u00a1",   "\u00ab",   "\u00bb",
+-
+-    "\u2591",   "\u2592",   "\u2593",   "\u2502",
+-    "\u2524",   "\u2561",   "\u2562",   "\u2556",
+-    "\u2555",   "\u2563",   "\u2551",   "\u2557",
+-    "\u255d",   "\u255c",   "\u255b",   "\u2510",
+-
+-    "\u2514",   "\u2534",   "\u252c",   "\u251c",
+-    "\u2500",   "\u253c",   "\u255e",   "\u255f",
+-    "\u255a",   "\u2554",   "\u2569",   "\u2566",
+-    "\u2560",   "\u2550",   "\u256c",   "\u2567",
+-
+-    "\u2568",   "\u2564",   "\u2565",   "\u2559",
+-    "\u2558",   "\u2552",   "\u2553",   "\u256b",
+-    "\u256a",   "\u2518",   "\u250c",   "\u2588",
+-    "\u2584",   "\u258c",   "\u2590",   "\u2580",
+-    "\u03b1",   "\u00df",   "\u0393",   "\u03c0",
+-    "\u03a3",   "\u03c3",   "\u00b5",   "\u03c4",
+-    "\u03a6",   "\u0398",   "\u03a9",   "\u03b4",
+-    "\u221e",   "\u03c6",   "\u03b5",   "\u2229",
+-
+-    "\u2261",   "\u00b1",   "\u2265",   "\u2264",
+-    "\u2320",   "\u2321",   "\u00f7",   "\u2248",
+-    "\u00b0",   "\u2219",   "\u00b7",   "\u221a",
+-    "\u207f",   "\u00b2",   "\u25a0",   "\u00a0"
+-];
+-
+-char[] cp437_to_utf8(ubyte[] s)
+-{
+-    foreach( i,c ; s )
+-    {
+-        if( (1 <= c && c <= 31) || c >= 127 )
+-        {
+-            /* Damn; we got a character not in ASCII.  Since this is the first
+-             * non-ASCII character we found, copy everything up to this point
+-             * into the output verbatim.  We'll allocate twice as much space
+-             * as there are remaining characters to ensure we don't need to do
+-             * any further allocations.
+-             */
+-            auto r = new char[i+2*(s.length-i)];
+-            r[0..i] = cast(char[]) s[0..i];
+-            size_t k=i; // current length
+-
+-            // We insert new characters at r[i+j+k]
+-
+-            foreach( d ; s[i..$] )
+-            {
+-                if( 32 <= d && d <= 126 || d == 0 )
+-                {
+-                    r[k++] = d;
+-                }
+-                else if( 1 <= d && d <= 31 )
+-                {
+-                    char[] repl = cp437_to_utf8_map_low[d];
+-                    r[k..k+repl.length] = repl[];
+-                    k += repl.length;
+-                }
+-                else if( d == 127 )
+-                {
+-                    char[] repl = "\u2302";
+-                    r[k..k+repl.length] = repl[];
+-                    k += repl.length;
+-                }
+-                else if( d > 127 )
+-                {
+-                    char[] repl = cp437_to_utf8_map_high[d-128];
+-                    r[k..k+repl.length] = repl[];
+-                    k += repl.length;
+-                }
+-                else
+-                    assert(false);
+-            }
+-
+-            return r[0..k];
+-        }
+-    }
+-
+-    /* If we got here, then all the characters in s are also in ASCII, which
+-     * means it's also valid UTF-8; return the string unmodified.
+-     */
+-    return cast(char[]) s;
+-}
+-
+-debug( UnitTest )
+-{
+-    unittest
+-    {
+-        char[] c(char[] s) { return cp437_to_utf8(cast(ubyte[]) s); }
+-
+-        auto s = c("Hi there \x01 old \x0c!");
+-        assert( s == "Hi there \u263a old \u2640!", "\""~s~"\"" );
+-        s = c("Marker \x7f and divide \xf6.");
+-        assert( s == "Marker \u2302 and divide \u00f7.", "\""~s~"\"" );
+-    }
+-}
+-
+-const char[dchar] utf8_to_cp437_map;
+-
+-static this()
+-{
+-    utf8_to_cp437_map = [
+-        '\u0000': '\x00', '\u263a': '\x01', '\u263b': '\x02', '\u2665': '\x03',
+-        '\u2666': '\x04', '\u2663': '\x05', '\u2660': '\x06', '\u2022': '\x07',
+-        '\u25d8': '\x08', '\u25cb': '\x09', '\u25d9': '\x0a', '\u2642': '\x0b',
+-        '\u2640': '\x0c', '\u266a': '\x0d', '\u266b': '\x0e', '\u263c': '\x0f',
+-
+-        '\u25b6': '\x10', '\u25c0': '\x11', '\u2195': '\x12', '\u203c': '\x13',
+-        '\u00b6': '\x14', '\u00a7': '\x15', '\u25ac': '\x16', '\u21a8': '\x17',
+-        '\u2191': '\x18', '\u2193': '\x19', '\u2192': '\x1a', '\u2190': '\x1b',
+-        '\u221f': '\x1c', '\u2194': '\x1d', '\u25b2': '\x1e', '\u25bc': '\x1f',
+-
+-        /*
+-         * Printable ASCII range (well, most of it) is handled specially.
+-         */
+-
+-        '\u00c7': '\x80', '\u00fc': '\x81', '\u00e9': '\x82', '\u00e2': '\x83',
+-        '\u00e4': '\x84', '\u00e0': '\x85', '\u00e5': '\x86', '\u00e7': '\x87',
+-        '\u00ea': '\x88', '\u00eb': '\x89', '\u00e8': '\x8a', '\u00ef': '\x8b',
+-        '\u00ee': '\x8c', '\u00ec': '\x8d', '\u00c4': '\x8e', '\u00c5': '\x8f',
+-
+-        '\u00c9': '\x90', '\u00e6': '\x91', '\u00c6': '\x92', '\u00f4': '\x93',
+-        '\u00f6': '\x94', '\u00f2': '\x95', '\u00fb': '\x96', '\u00f9': '\x97',
+-        '\u00ff': '\x98', '\u00d6': '\x99', '\u00dc': '\x9a', '\u00f8': '\x9b',
+-        '\u00a3': '\x9c', '\u00a5': '\x9d', '\u20a7': '\x9e', '\u0192': '\x9f',
+-
+-        '\u00e1': '\xa0', '\u00ed': '\xa1', '\u00f3': '\xa2', '\u00fa': '\xa3',
+-        '\u00f1': '\xa4', '\u00d1': '\xa5', '\u00aa': '\xa6', '\u00ba': '\xa7',
+-        '\u00bf': '\xa8', '\u2310': '\xa9', '\u00ac': '\xaa', '\u00bd': '\xab',
+-        '\u00bc': '\xac', '\u00a1': '\xad', '\u00ab': '\xae', '\u00bb': '\xaf',
+-
+-        '\u2591': '\xb0', '\u2592': '\xb1', '\u2593': '\xb2', '\u2502': '\xb3',
+-        '\u2524': '\xb4', '\u2561': '\xb5', '\u2562': '\xb6', '\u2556': '\xb7',
+-        '\u2555': '\xb8', '\u2563': '\xb9', '\u2551': '\xba', '\u2557': '\xbb',
+-        '\u255d': '\xbc', '\u255c': '\xbd', '\u255b': '\xbe', '\u2510': '\xbf',
+-
+-        '\u2514': '\xc0', '\u2534': '\xc1', '\u252c': '\xc2', '\u251c': '\xc3',
+-        '\u2500': '\xc4', '\u253c': '\xc5', '\u255e': '\xc6', '\u255f': '\xc7',
+-        '\u255a': '\xc8', '\u2554': '\xc9', '\u2569': '\xca', '\u2566': '\xcb',
+-        '\u2560': '\xcc', '\u2550': '\xcd', '\u256c': '\xce', '\u2567': '\xcf',
+-
+-        '\u2568': '\xd0', '\u2564': '\xd1', '\u2565': '\xd2', '\u2559': '\xd3',
+-        '\u2558': '\xd4', '\u2552': '\xd5', '\u2553': '\xd6', '\u256b': '\xd7',
+-        '\u256a': '\xd8', '\u2518': '\xd9', '\u250c': '\xda', '\u2588': '\xdb',
+-        '\u2584': '\xdc', '\u258c': '\xdd', '\u2590': '\xde', '\u2580': '\xdf',
+-
+-        '\u03b1': '\xe0', '\u00df': '\xe1', '\u0393': '\xe2', '\u03c0': '\xe3',
+-        '\u03a3': '\xe4', '\u03c3': '\xe5', '\u00b5': '\xe6', '\u03c4': '\xe7',
+-        '\u03a6': '\xe8', '\u0398': '\xe9', '\u03a9': '\xea', '\u03b4': '\xeb',
+-        '\u221e': '\xec', '\u03c6': '\xed', '\u03b5': '\xee', '\u2229': '\xef',
+-
+-        '\u2261': '\xf0', '\u00b1': '\xf1', '\u2265': '\xf2', '\u2264': '\xf3',
+-        '\u2320': '\xf4', '\u2321': '\xf5', '\u00f7': '\xf6', '\u2248': '\xf7',
+-        '\u00b0': '\xf8', '\u2219': '\xf9', '\u00b7': '\xfa', '\u221a': '\xfb',
+-        '\u207f': '\xfc', '\u00b2': '\xfd', '\u25a0': '\xfe', '\u00a0': '\xff'
+-    ];
+-}
+-
+-ubyte[] utf8_to_cp437(char[] s)
+-{
+-    foreach( i,dchar c ; s )
+-    {
+-        if( !((32 <= c && c <= 126) || c == 0) )
+-        {
+-            /* We got a character not in CP 437: we need to create a buffer to
+-             * hold the new string.  Since UTF-8 is *always* larger than CP
+-             * 437, we need, at most, an array of the same number of elements.
+-             */
+-            auto r = new ubyte[s.length];
+-            r[0..i] = cast(ubyte[]) s[0..i];
+-            size_t k=i;
+-
+-            foreach( dchar d ; s[i..$] )
+-            {
+-                if( 32 <= d && d <= 126 || d == 0 )
+-                    r[k++] = d;
+-
+-                else if( d == '\u2302' )
+-                    r[k++] = '\x7f';
+-
+-                else if( auto e_ptr = d in utf8_to_cp437_map )
+-                    r[k++] = *e_ptr;
+-
+-                else
+-                {
+-                    throw new Exception("cannot encode character \""
+-                            ~ Integer.toString(cast(uint)d)
+-                            ~ "\" in codepage 437.");
+-                }
+-            }
+-
+-            return r[0..k];
+-        }
+-    }
+-
+-    // If we got here, then the entire string is printable ASCII, which just
+-    // happens to *also* be valid CP 437!  Huzzah!
+-    return cast(ubyte[]) s;
+-}
+-
+-debug( UnitTest )
+-{
+-    unittest
+-    {
+-        alias cp437_to_utf8 x;
+-        alias utf8_to_cp437 y;
+-
+-        ubyte[256] s;
+-        foreach( i,ref c ; s )
+-            c = i;
+-
+-        auto a = x(s);
+-        auto b = y(a);
+-        if(!( b == s ))
+-        {
+-            // Display list of characters that failed to convert as expected,
+-            // and what value we got.
+-            auto hex = "0123456789abcdef";
+-            auto msg = "".dup;
+-            foreach( i,ch ; b )
+-            {
+-                if( ch != i )
+-                {
+-                    msg ~= hex[i>>4];
+-                    msg ~= hex[i&15];
+-                    msg ~= " (";
+-                    msg ~= hex[ch>>4];
+-                    msg ~= hex[ch&15];
+-                    msg ~= "), ";
+-                }
+-            }
+-            msg ~= "failed.";
+-
+-            assert( false, msg );
+-        }
+-    }
+-}
+-
+-/*
+- * This is here to simplify the code elsewhere.
+- */
+-char[] utf8_to_utf8(ubyte[] s) { return cast(char[]) s; }
+-ubyte[] utf8_to_utf8(char[] s) { return cast(ubyte[]) s; }
+-
+-//////////////////////////////////////////////////////////////////////////////
+-//////////////////////////////////////////////////////////////////////////////
+-//
+-// Date/time stuff
+-
+-void dosToTime(ushort dostime, ushort dosdate, out Time time)
+-{
+-    uint sec, min, hour, day, mon, year;
+-    sec = (dostime & 0b00000_000000_11111) * 2;
+-    min = (dostime & 0b00000_111111_00000) >> 5;
+-    hour= (dostime & 0b11111_000000_00000) >> 11;
+-    day = (dosdate & 0b0000000_0000_11111);
+-    mon = (dosdate & 0b0000000_1111_00000) >> 5;
+-    year=((dosdate & 0b1111111_0000_00000) >> 9) + 1980;
+-
+-    // This code rules!
+-    time = Gregorian.generic.toTime(year, mon, day, hour, min, sec);
+-}
+-
+-void timeToDos(Time time, out ushort dostime, out ushort dosdate)
+-{
+-    // Treat Time.min specially
+-    if( time == Time.min )
+-        time = WallClock.now;
+-
+-    // *muttering happily*
+-    auto date = Gregorian.generic.toDate(time);
+-    if( date.year < 1980 )
+-        ZipException.tooold;
+-
+-    auto tod = time.time();
+-    dostime = cast(ushort) (
+-        (tod.seconds / 2)
+-      | (tod.minutes << 5)
+-      | (tod.hours   << 11));
+-
+-    dosdate = cast(ushort) (
+-        (date.day)
+-      | (date.month << 5)
+-      | ((date.year - 1980) << 9));
+-}
+-
+-// ************************************************************************** //
+-// ************************************************************************** //
+-// ************************************************************************** //
+-
+-// Dependencies
+-private:
+-
+-import tango.io.device.Conduit : Conduit;
+-
+-/*******************************************************************************
+-
+-    copyright:  Copyright © 2007 Daniel Keep.  All rights reserved.
+-
+-    license:    BSD style: $(LICENSE)
+-
+-    version:    Prerelease
+-
+-    author:     Daniel Keep
+-
+-*******************************************************************************/
+-
+-//module tangox.io.stream.CounterStream;
+-
+-//import tango.io.device.Conduit : Conduit;
+-//import tango.io.model.IConduit : IConduit, InputStream, OutputStream;
+-
+-/**
+- * The counter stream classes are used to keep track of how many bytes flow
+- * through a stream.
+- *
+- * To use them, simply wrap it around an existing stream.  The number of bytes
+- * that have flowed through the wrapped stream may be accessed using the
+- * count member.
+- */
+-class CounterInput : InputStream
+-{
+-    ///
+-    this(InputStream input)
+-    in
+-    {
+-        assert( input !is null );
+-    }
+-    body
+-    {
+-        this.source = input;
+-    }
+-
+-    override IConduit conduit()
+-    {
+-        return source.conduit;
+-    }
+-
+-    InputStream input()
+-    {
+-        return source;
+-    }
+-
+-    long seek (long ofs, Anchor anchor = Anchor.Begin) 
+-    {
+-        return source.seek (ofs, anchor);
+-    }
+-
+-    override void close()
+-    {
+-        source.close();
+-        source = null;
+-    }
+-
+-    override size_t read(void[] dst)
+-    {
+-        auto read = source.read(dst);
+-        if( read != IConduit.Eof )
+-            _count += read;
+-        return read;
+-    }
+-
+-    override void[] load(size_t max=-1)
+-    {
+-        return Conduit.load(this, max);
+-    }
+-
+-    override InputStream flush()
+-    {
+-        source.flush();
+-        return this;
+-    }
+-
+-    ///
+-    long count() { return _count; }
+-
+-private:
+-    InputStream source;
+-    long _count;
+-}
+-
+-/// ditto
+-class CounterOutput : OutputStream
+-{
+-    ///
+-    this(OutputStream output)
+-    in
+-    {
+-        assert( output !is null );
+-    }
+-    body
+-    {
+-        this.sink = output;
+-    }
+-
+-    override IConduit conduit()
+-    {
+-        return sink.conduit;
+-    }
+-
+-    OutputStream output()
+-    {
+-        return sink;
+-    }
+-
+-    long seek (long ofs, Anchor anchor = Anchor.Begin) 
+-    {
+-        return sink.seek (ofs, anchor);
+-    }
+-
+-    override void close()
+-    {
+-        sink.close();
+-        sink = null;
+-    }
+-
+-    override size_t write(void[] dst)
+-    {
+-        auto wrote = sink.write(dst);
+-        if( wrote != IConduit.Eof )
+-            _count += wrote;
+-        return wrote;
+-    }
+-
+-    override OutputStream copy(InputStream src, size_t max=-1)
+-    {
+-        Conduit.transfer(src, this, max);
+-        return this;
+-    }
+-
+-    override OutputStream flush()
+-    {
+-        sink.flush();
+-        return this;
+-    }
+-
+-  ///
+-    long count() { return _count; }
+-
+-private:
+-    OutputStream sink;
+-    long _count;
+-}
+-
+-/*******************************************************************************
+-
+-    copyright:  Copyright © 2007 Daniel Keep.  All rights reserved.
+-
+-    license:    BSD style: $(LICENSE)
+-
+-    version:    Prerelease
+-
+-    author:     Daniel Keep
+-
+-*******************************************************************************/
+-
+-//module tangox.io.stream.SliceStream;
+-
+-//import tango.io.device.Conduit : Conduit;
+-//import tango.io.model.IConduit : IConduit, InputStream, OutputStream;
+-
+-/**
+- * This stream can be used to provide stream-based access to a subset of
+- * another stream.  It is akin to slicing an array.
+- *
+- * This stream fully supports seeking, and as such requires that the
+- * underlying stream also support seeking.
+- */
+-class SliceSeekInputStream : InputStream
+-{
+-    //alias IConduit.Seek.Anchor Anchor;
+-
+-    /**
+-     * Create a new slice stream from the given source, covering the content
+-     * starting at position begin, for length bytes.
+-     */
+-    this(InputStream source, long begin, long length)
+-    in
+-    {
+-        assert( source !is null );
+-        assert( (cast(IConduit.Seek) source.conduit) !is null );
+-        assert( begin >= 0 );
+-        assert( length >= 0 );
+-    }
+-    body
+-    {
+-        this.source = source;
+-        this.seeker = source; //cast(IConduit.Seek) source;
+-        this.begin = begin;
+-        this.length = length;
+-    }
+-
+-    override IConduit conduit()
+-    {
+-        return source.conduit;
+-    }
+-
+-    override void close()
+-    {
+-        source = null;
+-        seeker = null;
+-    }
+-
+-    override size_t read(void[] dst)
+-    {
+-        // If we're at the end of the slice, return eof
+-        if( _position >= length )
+-            return IConduit.Eof;
+-
+-        // Otherwise, make sure we don't try to read past the end of the slice
+-        if( _position+dst.length > length )
+-            dst.length = cast(size_t) (length-_position);
+-
+-        // Seek source stream to the appropriate location.
+-        if( seeker.seek(0, Anchor.Current) != begin+_position )
+-            seeker.seek(begin+_position, Anchor.Begin);
+-
+-        // Do the read
+-        auto read = source.read(dst);
+-        if( read == IConduit.Eof )
+-            // If we got an Eof, we'll consider that a bug for the moment.
+-            // TODO: proper exception
+-            throw new Exception("unexpected end-of-stream");
+-
+-        _position += read;
+-        return read;
+-    }
+-
+-    override void[] load(size_t max=-1)
+-    {
+-        return Conduit.load(this, max);
+-    }
+-
+-    override InputStream flush()
+-    {
+-        source.flush();
+-        return this;
+-    }
+-
+-    InputStream input()
+-    {
+-        return source;
+-    }
+-
+-    override long seek(long offset, Anchor anchor = cast(Anchor)0)
+-    {
+-        switch( anchor )
+-        {
+-            case Anchor.Begin:
+-                _position = offset;
+-                break;
+-
+-            case Anchor.Current:
+-                _position += offset;
+-                if( _position < 0 ) _position = 0;
+-                break;
+-
+-            case Anchor.End:
+-                _position = length+offset;
+-                if( _position < 0 ) _position = 0;
+-                break;
+-
+-            default:
+-                assert(false);
+-        }
+-
+-        return _position;
+-    }
+-
+-private:
+-    InputStream source;
+-    InputStream seeker;
+-
+-    long _position, begin, length;
+-
+-    invariant
+-    {
+-        assert( cast(Object) source is cast(Object) seeker );
+-        assert( begin >= 0 );
+-        assert( length >= 0 );
+-        assert( _position >= 0 );
+-    }
+-}
+-
+-/**
+- * This stream can be used to provide stream-based access to a subset of
+- * another stream.  It is akin to slicing an array.
+- */
+-class SliceInputStream : InputStream
+-{
+-    /**
+-     * Create a new slice stream from the given source, covering the content
+-     * starting at the current seek position for length bytes.
+-     */
+-    this(InputStream source, long length)
+-    in
+-    {
+-        assert( source !is null );
+-        assert( length >= 0 );
+-    }
+-    body
+-    {
+-        this.source = source;
+-        this._length = length;
+-    }
+-
+-    override IConduit conduit()
+-    {
+-        return source.conduit;
+-    }
+-
+-    override void close()
+-    {
+-        source = null;
+-    }
+-
+-    InputStream input()
+-    {
+-        return source;
+-    }
+-
+-    long seek (long ofs, Anchor anchor = Anchor.Begin) 
+-    {
+-        return source.seek (ofs, anchor);
+-    }
+-
+-    override size_t read(void[] dst)
+-    {
+-        // If we're at the end of the slice, return eof
+-        if( _length <= 0 )
+-            return IConduit.Eof;
+-
+-        // Otherwise, make sure we don't try to read past the end of the slice
+-        if( dst.length > _length )
+-            dst.length = cast(size_t) _length;
+-
+-        // Do the read
+-        auto read = source.read(dst);
+-        if( read == IConduit.Eof )
+-            // If we got an Eof, we'll consider that a bug for the moment.
+-            // TODO: proper exception
+-            throw new Exception("unexpected end-of-stream");
+-
+-        _length -= read;
+-        return read;
+-    }
+-
+-    override void[] load(size_t max=-1)
+-    {
+-        return Conduit.load(this, max);
+-    }
+-
+-    override InputStream flush()
+-    {
+-        source.flush();
+-        return this;
+-    }
+-
+-private:
+-    InputStream source;
+-    long _length;
+-
+-    invariant
+-    {
+-        if( _length > 0 ) assert( source !is null );
+-    }
+-}
+-
+-/**
+- * This stream can be used to provide stream-based access to a subset of
+- * another stream.  It is akin to slicing an array.
+- *
+- * This stream fully supports seeking, and as such requires that the
+- * underlying stream also support seeking.
+- */
+-class SliceSeekOutputStream : OutputStream
+-{
+-    //alias IConduit.Seek.Anchor Anchor;
+-
+-    /**
+-     * Create a new slice stream from the given source, covering the content
+-     * starting at position begin, for length bytes.
+-     */
+-    this(OutputStream source, long begin, long length)
+-    in
+-    {
+-        assert( (cast(IConduit.Seek) source.conduit) !is null );
+-        assert( begin >= 0 );
+-        assert( length >= 0 );
+-    }
+-    body
+-    {
+-        this.source = source;
+-        this.seeker = source; //cast(IConduit.Seek) source;
+-        this.begin = begin;
+-        this.length = length;
+-    }
+-
+-    override IConduit conduit()
+-    {
+-        return source.conduit;
+-    }
+-
+-    override void close()
+-    {
+-        source = null;
+-        seeker = null;
+-    }
+-
+-    size_t write(void[] src)
+-    {
+-        // If we're at the end of the slice, return eof
+-        if( _position >= length )
+-            return IConduit.Eof;
+-
+-        // Otherwise, make sure we don't try to write past the end of the
+-        // slice
+-        if( _position+src.length > length )
+-            src.length = cast(size_t) (length-_position);
+-
+-        // Seek source stream to the appropriate location.
+-        if( seeker.seek(0, Anchor.Current) != begin+_position )
+-            seeker.seek(begin+_position, Anchor.Begin);
+-
+-        // Do the write
+-        auto wrote = source.write(src);
+-        if( wrote == IConduit.Eof )
+-            // If we got an Eof, we'll consider that a bug for the moment.
+-            // TODO: proper exception
+-            throw new Exception("unexpected end-of-stream");
+-
+-        _position += wrote;
+-        return wrote;
+-    }
+-
+-    override OutputStream copy(InputStream src, size_t max=-1)
+-    {
+-        Conduit.transfer(src, this, max);
+-        return this;
+-    }
+-
+-    override OutputStream flush()
+-    {
+-        source.flush();
+-        return this;
+-    }
+-
+-    override OutputStream output()
+-    {
+-        return source;
+-    }
+-
+-    override long seek(long offset, Anchor anchor = cast(Anchor)0)
+-    {
+-        switch( anchor )
+-        {
+-            case Anchor.Begin:
+-                _position = offset;
+-                break;
+-
+-            case Anchor.Current:
+-                _position += offset;
+-                if( _position < 0 ) _position = 0;
+-                break;
+-
+-            case Anchor.End:
+-                _position = length+offset;
+-                if( _position < 0 ) _position = 0;
+-                break;
+-
+-            default:
+-                assert(false);
+-        }
+-
+-        return _position;
+-    }
+-
+-private:
+-    OutputStream source;
+-    OutputStream seeker;
+-
+-    long _position, begin, length;
+-
+-    invariant
+-    {
+-        assert( cast(Object) source is cast(Object) seeker );
+-        assert( begin >= 0 );
+-        assert( length >= 0 );
+-        assert( _position >= 0 );
+-    }
+-}
+-
+-/*******************************************************************************
+-
+-    copyright:  Copyright © 2007 Daniel Keep.  All rights reserved.
+-
+-    license:    BSD style: $(LICENSE)
+-
+-    version:    Prerelease
+-
+-    author:     Daniel Keep
+-
+-*******************************************************************************/
+-
+-//module tangox.io.stream.WrapStream;
+-
+-//import tango.io.device.Conduit : Conduit;
+-//import tango.io.model.IConduit : IConduit, InputStream, OutputStream;
+-
+-/**
+- * This stream can be used to provide access to another stream.
+- * Its distinguishing feature is that users cannot close the underlying
+- * stream.
+- *
+- * This stream fully supports seeking, and as such requires that the
+- * underlying stream also support seeking.
+- */
+-class WrapSeekInputStream : InputStream
+-{
+-    //alias IConduit.Seek.Anchor Anchor;
+-
+-    /**
+-     * Create a new wrap stream from the given source.
+-     */
+-    this(InputStream source)
+-    in
+-    {
+-        assert( source !is null );
+-        assert( (cast(IConduit.Seek) source.conduit) !is null );
+-    }
+-    body
+-    {
+-        this.source = source;
+-        this.seeker = source; //cast(IConduit.Seek) source;
+-        this._position = seeker.seek(0, Anchor.Current);
+-    }
+-
+-    /// ditto
+-    this(InputStream source, long position)
+-    in
+-    {
+-        assert( position >= 0 );
+-    }
+-    body
+-    {
+-        this(source);
+-        this._position = position;
+-    }
+-
+-    override IConduit conduit()
+-    {
+-        return source.conduit;
+-    }
+-
+-    override void close()
+-    {
+-        source = null;
+-        seeker = null;
+-    }
+-
+-    override size_t read(void[] dst)
+-    {
+-        if( seeker.seek(0, Anchor.Current) != _position )
+-            seeker.seek(_position, Anchor.Begin);
+-
+-        auto read = source.read(dst);
+-        if( read != IConduit.Eof )
+-            _position += read;
+-
+-        return read;
+-    }
+-
+-    override void[] load(size_t max=-1)
+-    {
+-        return Conduit.load(this, max);
+-    }
+-
+-    override InputStream flush()
+-    {
+-        source.flush();
+-        return this;
+-    }
+-
+-    InputStream input()
+-    {
+-        return source;
+-    }
+-
+-    override long seek(long offset, Anchor anchor = cast(Anchor)0)
+-    {
+-        seeker.seek(_position, Anchor.Begin);
+-        return (_position = seeker.seek(offset, anchor));
+-    }
+-
+-private:
+-    InputStream source;
+-    InputStream seeker;
+-    long _position;
+-
+-    invariant
+-    {
+-        assert( cast(Object) source is cast(Object) seeker );
+-        assert( _position >= 0 );
+-    }
+-}
+-
+-/**
+- * This stream can be used to provide access to another stream.
+- * Its distinguishing feature is that the users cannot close the underlying
+- * stream.
+- *
+- * This stream fully supports seeking, and as such requires that the
+- * underlying stream also support seeking.
+- */
+-class WrapSeekOutputStream : OutputStream
+-{
+-    //alias IConduit.Seek.Anchor Anchor;
+-
+-    /**
+-     * Create a new wrap stream from the given source.
+-     */
+-    this(OutputStream source)
+-    in
+-    {
+-        assert( (cast(IConduit.Seek) source.conduit) !is null );
+-    }
+-    body
+-    {
+-        this.source = source;
+-        this.seeker = source; //cast(IConduit.Seek) source;
+-        this._position = seeker.seek(0, Anchor.Current);
+-    }
+-
+-    /// ditto
+-    this(OutputStream source, long position)
+-    in
+-    {
+-        assert( position >= 0 );
+-    }
+-    body
+-    {
+-        this(source);
+-        this._position = position;
+-    }
+-
+-    override IConduit conduit()
+-    {
+-        return source.conduit;
+-    }
+-
+-    override void close()
+-    {
+-        source = null;
+-        seeker = null;
+-    }
+-
+-    size_t write(void[] src)
+-    {
+-        if( seeker.seek(0, Anchor.Current) != _position )
+-            seeker.seek(_position, Anchor.Begin);
+-
+-        auto wrote = source.write(src);
+-        if( wrote != IConduit.Eof )
+-            _position += wrote;
+-        return wrote;
+-    }
+-
+-    override OutputStream copy(InputStream src, size_t max=-1)
+-    {
+-        Conduit.transfer(src, this, max);
+-        return this;
+-    }
+-
+-    override OutputStream flush()
+-    {
+-        source.flush();
+-        return this;
+-    }
+-
+-    override OutputStream output()
+-    {
+-        return source;
+-    }
+-
+-    override long seek(long offset, Anchor anchor = cast(Anchor)0)
+-    {
+-        seeker.seek(_position, Anchor.Begin);
+-        return (_position = seeker.seek(offset, anchor));
+-    }
+-
+-private:
+-    OutputStream source;
+-    OutputStream seeker;
+-    long _position;
+-
+-    invariant
+-    {
+-        assert( cast(Object) source is cast(Object) seeker );
+-        assert( _position >= 0 );
+-    }
+-}
+-
+-
+--- a/tango/io/vfs/ZipFolder.d
++++ /dev/null
+@@ -1,2152 +0,0 @@
+-/*******************************************************************************
+-
+-    copyright:  Copyright © 2007 Daniel Keep.  All rights reserved.
+-
+-    license:    BSD style: $(LICENSE)
+-
+-    version:    The Great Namechange: February 2008
+-
+-                Initial release: December 2007
+-
+-    author:     Daniel Keep
+-
+-*******************************************************************************/
+-
+-module tango.io.vfs.ZipFolder;
+-
+-import Path = tango.io.Path;
+-import tango.io.device.File : File;
+-import tango.io.FilePath : FilePath;
+-import tango.io.device.TempFile : TempFile;
+-import tango.io.compress.Zip : ZipReader, ZipBlockReader,
+-       ZipWriter, ZipBlockWriter, ZipEntry, ZipEntryInfo, Method;
+-import tango.io.model.IConduit : IConduit, InputStream, OutputStream;
+-import tango.io.vfs.model.Vfs : VfsFolder, VfsFolderEntry, VfsFile,
+-       VfsFolders, VfsFiles, VfsFilter, VfsStats, VfsFilterInfo,
+-       VfsInfo, VfsSync;
+-import tango.time.Time : Time;
+-
+-debug( ZipFolder )
+-{
+-    import tango.io.Stdout : Stderr;
+-}
+-
+-// This disables code that is causing heap corruption in Tango 0.99.3
+-version = Bug_HeapCorruption;
+-
+-// ************************************************************************ //
+-// ************************************************************************ //
+-
+-private
+-{
+-    enum EntryType { Dir, File }
+-   
+-    /*
+-     * Entries are what make up the internal tree that describes the
+-     * filesystem of the archive.  Each Entry is either a directory or a file.
+-     */
+-    struct Entry
+-    {
+-        EntryType type;
+-
+-        union
+-        {
+-            DirEntry dir;
+-            FileEntry file;
+-        }
+-
+-        char[] fullname;
+-        char[] name;
+-
+-        /+
+-        invariant
+-        {
+-            assert( (type == EntryType.Dir)
+-                 || (type == EntryType.File) );
+-
+-            assert( fullname.nz() );
+-            assert( name.nz() );
+-        }
+-        +/
+-
+-        VfsFilterInfo vfsFilterInfo;
+-
+-        VfsInfo vfsInfo()
+-        {
+-            return &vfsFilterInfo;
+-        }
+-
+-        /*
+-         * Updates the VfsInfo structure for this entry.
+-         */
+-        void makeVfsInfo()
+-        {
+-            with( vfsFilterInfo )
+-            {
+-                // Cheat horribly here
+-                name = this.name;
+-                path = this.fullname[0..($-name.length+"/".length)];
+-
+-                folder = isDir;
+-                bytes = folder ? 0 : fileSize;
+-            }
+-        }
+-
+-        bool isDir()
+-        {
+-            return (type == EntryType.Dir);
+-        }
+-
+-        bool isFile()
+-        {
+-            return (type == EntryType.File);
+-        }
+-
+-        ulong fileSize()
+-        in
+-        {
+-            assert( type == EntryType.File );
+-        }
+-        body
+-        {
+-            if( file.zipEntry !is null )
+-                return file.zipEntry.size;
+-
+-            else if( file.tempFile !is null )
+-            {
+-                assert( file.tempFile.length >= 0 );
+-                return cast(ulong) file.tempFile.length;
+-            }
+-            else
+-                return 0;
+-        }
+-
+-        /*
+-         * Opens a File Entry for reading.
+-         *
+-         * BUG: Currently, if a user opens a new or unmodified file for input,
+-         * and then opens it for output, the two streams will be working with
+-         * different underlying conduits.  This means that the result of
+-         * openInput should probably be wrapped in some kind of switching
+-         * stream that can update when the backing store for the file changes.
+-         */
+-        InputStream openInput()
+-        in
+-        {
+-            assert( type == EntryType.File );
+-        }
+-        body
+-        {
+-            if( file.zipEntry !is null )
+-            {
+-                file.zipEntry.verify;
+-                return file.zipEntry.open;
+-            }
+-            else if( file.tempFile !is null )
+-                return new WrapSeekInputStream(file.tempFile, 0);
+-
+-            else
+-               {
+-               throw new Exception ("cannot open input stream for '"~fullname~"'");
+-               //return new DummyInputStream;
+-               }
+-        }
+-
+-        /*
+-         * Opens a file entry for output.
+-         */
+-        OutputStream openOutput()
+-        in
+-        {
+-            assert( type == EntryType.File );
+-        }
+-        body
+-        {
+-            if( file.tempFile !is null )
+-                return new WrapSeekOutputStream(file.tempFile);
+-
+-            else
+-            {
+-                // Ok; we need to make a temporary file to store output in.
+-                // If we already have a zip entry, we need to dump that into
+-                // the temp. file and remove the zipEntry.
+-                if( file.zipEntry !is null )
+-                {
+-                    {
+-                        auto zi = file.zipEntry.open;
+-                        scope(exit) zi.close;
+-    
+-                        file.tempFile = new TempFile;
+-                        file.tempFile.copy(zi).close;
+-
+-                        debug( ZipFolder )
+-                            Stderr.formatln("Entry.openOutput: duplicated"
+-                                    " temp file {} for {}",
+-                                    file.tempFile, this.fullname);
+-                    }
+-
+-                    // TODO: Copy file info if available
+-
+-                    file.zipEntry = null;
+-                }
+-                else
+-                {
+-                    // Otherwise, just make a new, blank temp file
+-                    file.tempFile = new TempFile;
+-
+-                    debug( ZipFolder )
+-                        Stderr.formatln("Entry.openOutput: created"
+-                                " temp file {} for {}",
+-                                file.tempFile, this.fullname);
+-                }
+-
+-                assert( file.tempFile !is null );
+-                return openOutput;
+-            }
+-        }
+-
+-        void dispose()
+-        {
+-            fullname = name = null;
+-            
+-            with( vfsFilterInfo )
+-            {
+-                name = path = null;
+-            }
+-
+-            dispose_children;
+-        }
+-
+-        void dispose_children()
+-        {
+-            switch( type )
+-            {
+-                case EntryType.Dir:
+-                    auto keys = dir.children.keys;
+-                    scope(exit) delete keys;
+-                    foreach( k ; keys )
+-                    {
+-                        auto child = dir.children[k];
+-                        child.dispose();
+-                        dir.children.remove(k);
+-                        delete child;
+-                    }
+-                    dir.children = dir.children.init;
+-                    break;
+-
+-                case EntryType.File:
+-                    if( file.zipEntry !is null )
+-                    {
+-                        // Don't really need to do anything here
+-                        file.zipEntry = null;
+-                    }
+-                    else if( file.tempFile !is null )
+-                    {
+-                        // Detatch to destroy the physical file itself
+-                        file.tempFile.detach();
+-                        file.tempFile = null;
+-                    }
+-                    break;
+-
+-                default:
+-                    debug( ZipFolder ) Stderr.formatln(
+-                            "Entry.dispose_children: unknown type {}",
+-                            type);
+-                    assert(false);
+-            }
+-        }
+-    }
+-
+-    struct DirEntry
+-    {
+-        Entry*[char[]] children;
+-    }
+-
+-    struct FileEntry
+-    {
+-        ZipEntry zipEntry;
+-        TempFile tempFile;
+-
+-        invariant
+-        {
+-            auto zn = zipEntry is null;
+-            auto tn = tempFile is null;
+-            assert( (zn && tn)
+-          /* zn xor tn */ || (!(zn&&tn)&&(zn||tn)) );
+-        }
+-    }
+-}
+-
+-// ************************************************************************ //
+-// ************************************************************************ //
+-
+-/**
+- * ZipFolder serves as the root object for all Zip archives in the VFS.
+- * Presently, it can only open archives on the local filesystem.
+- */
+-class ZipFolder : ZipSubFolder
+-{
+-    /**
+-     * Opens an archive from the local filesystem.  If the readonly argument
+-     * is specified as true, then modification of the archive will be
+-     * explicitly disallowed.
+-     */
+-    this(char[] path, bool readonly=false)
+-    out { assert( valid ); }
+-    body
+-    {
+-        debug( ZipFolder )
+-            Stderr.formatln(`ZipFolder("{}", {})`, path, readonly);
+-        this.resetArchive(path, readonly);
+-        super(this, root);
+-    }
+-
+-    /**
+-     * Closes the archive, and releases all internal resources.  If the commit
+-     * argument is true (the default), then changes to the archive will be
+-     * flushed out to disk.  If false, changes will simply be discarded.
+-     */
+-    final override VfsFolder close(bool commit = true)
+-    in { assert( valid ); }
+-    body
+-    {
+-        debug( ZipFolder )
+-            Stderr.formatln("ZipFolder.close({})",commit);
+-
+-        // MUTATE
+-        if( commit ) sync;
+-
+-        // Close ZipReader
+-        if( zr !is null )
+-        {
+-            zr.close();
+-            delete zr;
+-        }
+-
+-        // Destroy entries
+-        root.dispose();
+-        version( Bug_HeapCorruption )
+-            root = null;
+-        else
+-            delete root;
+-
+-        return this;
+-    }
+-
+-    /**
+-     * Flushes all changes to the archive out to disk.
+-     */
+-    final override VfsFolder sync()
+-    in { assert( valid ); }
+-    out
+-    {
+-        assert( valid );
+-        assert( !modified );
+-    }
+-    body
+-    {
+-        debug( ZipFolder )
+-            Stderr("ZipFolder.sync()").newline;
+-
+-        if( !modified )
+-            return this;
+-
+-version( ZipFolder_NonMutating )
+-{
+-        mutate_error("ZipFolder.sync");
+-        assert(false);
+-}
+-else
+-{
+-        enforce_mutable;
+-        
+-        // First, we need to determine if we have any zip entries.  If we
+-        // don't, then we can write directly to the path.  If there *are*
+-        // zip entries, then we'll need to write to a temporary path instead.
+-        OutputStream os;
+-        TempFile tempFile;
+-        scope(exit) if( tempFile !is null ) delete tempFile;
+-
+-        auto p = Path.parse (path);
+-        foreach( file ; this.tree.catalog )
+-        {
+-            if( auto zf = cast(ZipFile) file )
+-                if( zf.entry.file.zipEntry !is null )
+-                {
+-                    tempFile = new TempFile(p.path, TempFile.Permanent);
+-                    os = tempFile;
+-                    debug( ZipFolder )
+-                        Stderr.formatln(" sync: created temp file {}",
+-                                tempFile.path);
+-                    break;
+-                }
+-        }
+-
+-        if( tempFile is null )
+-        {
+-            // Kill the current zip reader so we can re-open the file it's
+-            // using.
+-            if( zr !is null )
+-            {
+-                zr.close;
+-                delete zr;
+-            }
+-
+-            os = new File(path, File.WriteCreate);
+-        }
+-
+-        // Now, we can create the archive.
+-        {
+-            scope zw = new ZipBlockWriter(os);
+-            foreach( file ; this.tree.catalog )
+-            {
+-                auto zei = ZipEntryInfo(file.toString[1..$]);
+-                // BUG: Passthru doesn't maintain compression for some
+-                // reason...
+-                if( auto zf = cast(ZipFile) file )
+-                {
+-                    if( zf.entry.file.zipEntry !is null )
+-                        zw.putEntry(zei, zf.entry.file.zipEntry);
+-                    else
+-                        zw.putStream(zei, file.input);
+-                }
+-                else
+-                    zw.putStream(zei, file.input);
+-            }
+-            zw.finish;
+-        }
+-
+-        // With that done, we can free all our handles, etc.
+-        debug( ZipFolder )
+-            Stderr(" sync: close").newline;
+-        this.close(/*commit*/ false);
+-        os.close;
+-
+-        // If we wrote the archive into a temporary file, move that over the
+-        // top of the old archive.
+-        if( tempFile !is null )
+-        {
+-            debug( ZipFolder )
+-                Stderr(" sync: destroying temp file").newline;
+-
+-            debug( ZipFolder )
+-                Stderr.formatln(" sync: renaming {} to {}",
+-                        tempFile, path);
+-
+-            Path.rename (tempFile.toString, path);
+-        }
+-
+-        // Finally, re-open the archive so that we have all the nicely
+-        // compressed files.
+-        debug( ZipFolder )
+-            Stderr(" sync: reset archive").newline;
+-        this.resetArchive(path, readonly);
+-        
+-        debug( ZipFolder )
+-            Stderr(" sync: reset folder").newline;
+-        this.reset(this, root);
+-
+-        debug( ZipFolder )
+-            Stderr(" sync: done").newline;
+-
+-        return this;
+-}
+-    }
+-
+-    /**
+-     * Indicates whether the archive was opened for read-only access.  Note
+-     * that in addition to the readonly constructor flag, this is also
+-     * influenced by whether the file itself is read-only or not.
+-     */
+-    final bool readonly() { return _readonly; }
+-
+-    /**
+-     * Allows you to read and specify the path to the archive.  The effect of
+-     * setting this is to change where the archive will be written to when
+-     * flushed to disk.
+-     */
+-    final char[] path() { return _path; }
+-    final char[] path(char[] v) { return _path = v; } /// ditto
+-
+-private:
+-    ZipReader zr;
+-    Entry* root;
+-    char[] _path;
+-    bool _readonly;
+-    bool modified = false;
+-
+-    final bool readonly(bool v) { return _readonly = v; }
+-
+-    final bool closed()
+-    {
+-        debug( ZipFolder )
+-            Stderr("ZipFolder.closed()").newline;
+-        return (root is null);
+-    }
+-
+-    final bool valid()
+-    {
+-        debug( ZipFolder )
+-            Stderr("ZipFolder.valid()").newline;
+-        return !closed;
+-    }
+-
+-    final OutputStream mutateStream(OutputStream source)
+-    {
+-        return new EventSeekOutputStream(source,
+-                EventSeekOutputStream.Callbacks(
+-                    null,
+-                    null,
+-                    &mutate_write,
+-                    null));
+-    }
+-
+-    void mutate_write(uint bytes, void[] src)
+-    {
+-        if( !(bytes == 0 || bytes == IConduit.Eof) )
+-            this.modified = true;
+-    }
+-
+-    void resetArchive(char[] path, bool readonly=false)
+-    out { assert( valid ); }
+-    body
+-    {
+-        debug( ZipFolder )
+-            Stderr.formatln(`ZipFolder.resetArchive("{}", {})`, path, readonly);
+-
+-        debug( ZipFolder )
+-            Stderr.formatln(" .. size of Entry: {0}, {0:x} bytes", Entry.sizeof);
+-
+-        this.path = path;
+-        this.readonly = readonly;
+-
+-        // Make sure the modified flag is set appropriately
+-        scope(exit) modified = false;
+-
+-        // First, create a root entry
+-        root = new Entry;
+-        root.type = EntryType.Dir;
+-        root.fullname = root.name = "/";
+-
+-        // If the user allowed writing, also allow creating a new archive.
+-        // Note that we MUST drop out here if the archive DOES NOT exist,
+-        // since Path.isWriteable will throw an exception if called on a
+-        // non-existent path.
+-        if( !this.readonly && !Path.exists(path) )
+-            return;
+-
+-        // Update readonly to reflect the write-protected status of the
+-        // archive.
+-        this.readonly = this.readonly || !Path.isWritable(path);
+-
+-        // Parse the contents of the archive
+-        foreach( zipEntry ; zr )
+-        {
+-            // Normalise name
+-            auto name = FilePath(zipEntry.info.name).standard.toString;
+-
+-            // If the last character is '/', treat as a directory and skip
+-            // TODO: is there a better way of detecting this?
+-            if( name[$-1] == '/' )
+-                continue;
+-
+-            // Now, we need to locate the right spot to insert this entry.
+-            {
+-                // That's CURrent ENTity, not current OR currant...
+-                Entry* curent = root;
+-                char[] h,t;
+-                headTail(name,h,t);
+-                while( t.nz() )
+-                {
+-                    assert( curent.isDir );
+-                    if( auto nextent = (h in curent.dir.children) )
+-                        curent = *nextent;
+-                    
+-                    else
+-                    {
+-                        // Create new directory entry
+-                        Entry* dirent = new Entry;
+-                        dirent.type = EntryType.Dir;
+-                        if( curent.fullname != "/" )
+-                            dirent.fullname = curent.fullname ~ "/" ~ h;
+-                        else
+-                            dirent.fullname = "/" ~ h;
+-                        dirent.name = dirent.fullname[$-h.length..$];
+-
+-                        // Insert into current entry
+-                        curent.dir.children[dirent.name] = dirent;
+-
+-                        // Make it the new current entry
+-                        curent = dirent;
+-                    }
+-
+-                    headTail(t,h,t);
+-                }
+-
+-                // Getting here means that t is empty, which means the final
+-                // component of the path--the file name--is in h.  The entry
+-                // of the containing directory is in curent.
+-
+-                // Make sure the file isn't already there (you never know!)
+-                assert( !(h in curent.dir.children) );
+-
+-                // Create a new file entry for it.
+-                {
+-                    // BUG: Bug_HeapCorruption
+-                    // with ZipTest, on the resetArchive operation, on
+-                    // the second time through this next line, it erroneously
+-                    // allocates filent 16 bytes lower than curent.  Entry
+-                    // is *way* larger than 16 bytes, and this causes it to
+-                    // zero-out the existing root element, which leads to
+-                    // segfaults later on at line +12:
+-                    //
+-                    //      // Insert
+-                    //      curent.dir.children[filent.name] = filent;
+-
+-                    Entry* filent = new Entry;
+-                    filent.type = EntryType.File;
+-                    if( curent.fullname != "/" )
+-                        filent.fullname = curent.fullname ~ "/" ~ h;
+-                    else
+-                        filent.fullname = "/" ~ h;
+-                    filent.name = filent.fullname[$-h.length..$];
+-                    filent.file.zipEntry = zipEntry.dup;
+-
+-                    filent.makeVfsInfo;
+-
+-                    // Insert
+-                    curent.dir.children[filent.name] = filent;
+-                }
+-            }
+-        }
+-    }
+-}
+-
+-// ************************************************************************ //
+-// ************************************************************************ //
+-
+-/**
+- * This class represents a folder in an archive.  In addition to supporting
+- * the sync operation, you can also use the archive member to get a reference
+- * to the underlying ZipFolder instance.
+- */
+-class ZipSubFolder : VfsFolder, VfsSync
+-{
+-    ///
+-    final char[] name()
+-    in { assert( valid ); }
+-    body
+-    {
+-        return entry.name;
+-    }
+-
+-    ///
+-    final override char[] toString()
+-    in { assert( valid ); }
+-    body
+-    {
+-        return entry.fullname;
+-    }
+-
+-    ///
+-    final VfsFile file(char[] path)
+-    in
+-    {
+-        assert( valid );
+-        assert( !Path.parse(path).isAbsolute );
+-    }
+-    body
+-    {
+-        auto fp = Path.parse(path);
+-        auto dir = fp.path;
+-        auto name = fp.file;
+-
+-        if (dir.length > 0 && '/' == dir[$-1]) {
+-            dir = dir[0..$-1];
+-        }
+-		
+-        // If the file is in another directory, then we need to look up that
+-        // up first.
+-        if( dir.nz() )
+-        {
+-            auto dir_ent = this.folder(dir);
+-            auto dir_obj = dir_ent.open;
+-            return dir_obj.file(name);
+-        }
+-        else
+-        {
+-            // Otherwise, we need to check and see whether the file is in our
+-            // entry list.
+-            if( auto file_entry = (name in this.entry.dir.children) )
+-            {
+-                // It is; create a new object for it.
+-                return new ZipFile(archive, this.entry, *file_entry);
+-            }
+-            else
+-            {
+-                // Oh dear... return a holding object.
+-                return new ZipFile(archive, this.entry, name);
+-            }
+-        }
+-    }
+-
+-    ///
+-    final VfsFolderEntry folder(char[] path)
+-    in
+-    {
+-        assert( valid );
+-        assert( !Path.parse(path).isAbsolute );
+-    }
+-    body
+-    {
+-        // Locate the folder in question.  We do this by "walking" the
+-        // path components.  If we find a component that doesn't exist,
+-        // then we create a ZipSubFolderEntry for the remainder.
+-        Entry* curent = this.entry;
+-
+-        // h is the "head" of the path, t is the remainder.  ht is both
+-        // joined together.
+-        char[] h,t,ht;
+-        ht = path;
+-
+-        do
+-        {
+-            // Split ht at the first path separator.
+-            assert( ht.nz() );
+-            headTail(ht,h,t);
+-
+-            // Look for a pre-existing subentry
+-            auto subent = (h in curent.dir.children);
+-            if( t.nz() && !!subent )
+-            {
+-                // Move to the subentry, and split the tail on the next
+-                // iteration.
+-                curent = *subent;
+-                ht = t;
+-            }
+-            else
+-                // If the next component doesn't exist, return a folder entry.
+-                // If the tail is empty, return a folder entry as well (let
+-                // the ZipSubFolderEntry do the last lookup.)
+-                return new ZipSubFolderEntry(archive, curent, ht);
+-        }
+-        while( true )
+-        //assert(false);
+-    }
+-
+-    ///
+-    final VfsFolders self()
+-    in { assert( valid ); }
+-    body
+-    {
+-        return new ZipSubFolderGroup(archive, this, false);
+-    }
+-
+-    ///
+-    final VfsFolders tree()
+-    in { assert( valid ); }
+-    body
+-    {
+-        return new ZipSubFolderGroup(archive, this, true);
+-    }
+-
+-    ///
+-    final int opApply(int delegate(ref VfsFolder) dg)
+-    in { assert( valid ); }
+-    body
+-    {
+-        int result = 0;
+-
+-        foreach( _,childEntry ; this.entry.dir.children )
+-        {
+-            if( childEntry.isDir )
+-            {
+-                VfsFolder childFolder = new ZipSubFolder(archive, childEntry);
+-                if( (result = dg(childFolder)) != 0 )
+-                    break;
+-            }
+-        }
+-
+-        return result;
+-    }
+-
+-    ///
+-    final VfsFolder clear()
+-    in { assert( valid ); }
+-    body
+-    {
+-version( ZipFolder_NonMutating )
+-{
+-        mutate_error("VfsFolder.clear");
+-        assert(false);
+-}
+-else
+-{
+-        // MUTATE
+-        enforce_mutable;
+-
+-        // Disposing of the underlying entry subtree should do our job for us.
+-        entry.dispose_children;
+-        mutate;
+-        return this;
+-}
+-    }
+-
+-    ///
+-    final bool writable()
+-    in { assert( valid ); }
+-    body
+-    {
+-        return !archive.readonly;
+-    }
+-
+-    /**
+-     * Closes this folder object.  If commit is true, then the folder is
+-     * sync'ed before being closed.
+-     */
+-    override VfsFolder close(bool commit = true)
+-    in { assert( valid ); }
+-    body
+-    {
+-        // MUTATE
+-        if( commit ) sync;
+-
+-        // Just clean up our pointers
+-        archive = null;
+-        entry = null;
+-        return this;
+-    }
+-
+-    /**
+-     * This will flush any changes to the archive to disk.  Note that this
+-     * applies to the entire archive, not just this folder and its contents.
+-     */
+-    override VfsFolder sync()
+-    in { assert( valid ); }
+-    body
+-    {
+-        // MUTATE
+-        archive.sync;
+-        return this;
+-    }
+-
+-    ///
+-    final void verify(VfsFolder folder, bool mounting)
+-    in { assert( valid ); }
+-    body
+-    {
+-        auto zipfolder = cast(ZipSubFolder) folder;
+-
+-        if( mounting
+-                && zipfolder !is null
+-                && zipfolder.archive is archive )
+-        {
+-            auto src = this.toString;
+-            auto dst = zipfolder.toString;
+-
+-            auto len = src.length > dst.length ? dst.length : src.length;
+-
+-            if( src[0..len] == dst[0..len] )
+-                error(`folders "`~dst~`" and "`~src~`" in archive "`
+-                        ~archive.path~`" overlap`);
+-        }
+-    }
+-
+-    /**
+-     * Returns a reference to the underlying ZipFolder instance.
+-     */
+-    final ZipFolder archive() { return _archive; }
+-
+-private:
+-    ZipFolder _archive;
+-    Entry* entry;
+-    VfsStats stats;
+-
+-    final ZipFolder archive(ZipFolder v) { return _archive = v; }
+-
+-    this(ZipFolder archive, Entry* entry)
+-    {
+-        this.reset(archive, entry);
+-    }
+-
+-    final void reset(ZipFolder archive, Entry* entry)
+-    in
+-    {
+-        assert( archive !is null );
+-        assert( entry.isDir );
+-    }
+-    out { assert( valid ); }
+-    body
+-    {
+-        this.archive = archive;
+-        this.entry = entry;
+-    }
+-
+-    final bool valid()
+-    {
+-        return( (archive !is null) && !archive.closed );
+-    }
+-
+-    final void enforce_mutable()
+-    in { assert( valid ); }
+-    body
+-    {
+-        if( archive.readonly )
+-            // TODO: exception
+-            throw new Exception("cannot mutate a read-only Zip archive");
+-    }
+-
+-    final void mutate()
+-    in { assert( valid ); }
+-    body
+-    {
+-        enforce_mutable;
+-        archive.modified = true;
+-    }
+-
+-    final ZipSubFolder[] folders(bool collect)
+-    in { assert( valid ); }
+-    body
+-    {
+-        ZipSubFolder[] folders;
+-        stats = stats.init;
+-
+-        foreach( _,childEntry ; entry.dir.children )
+-        {
+-            if( childEntry.isDir )
+-            {
+-                if( collect ) folders ~= new ZipSubFolder(archive, childEntry);
+-                ++ stats.folders;
+-            }
+-            else
+-            {
+-                assert( childEntry.isFile );
+-                stats.bytes += childEntry.fileSize;
+-                ++ stats.files;
+-            }
+-        }
+-
+-        return folders;
+-    }
+-
+-    final Entry*[] files(ref VfsStats stats, VfsFilter filter = null)
+-    in { assert( valid ); }
+-    body
+-    {
+-        Entry*[] files;
+-
+-        foreach( _,childEntry ; entry.dir.children )
+-        {
+-            if( childEntry.isFile )
+-                if( filter is null || filter(childEntry.vfsInfo) )
+-                {
+-                    files ~= childEntry;
+-                    stats.bytes += childEntry.fileSize;
+-                    ++stats.files;
+-                }
+-        }
+-
+-        return files;
+-    }
+-}
+-
+-// ************************************************************************ //
+-// ************************************************************************ //
+-
+-/**
+- * This class represents a file within an archive.
+- */
+-class ZipFile : VfsFile
+-{
+-    ///
+-    final char[] name()
+-    in { assert( valid ); }
+-    body
+-    {
+-        if( entry ) return entry.name;
+-        else        return name_;
+-    }
+-
+-    ///
+-    final override char[] toString()
+-    in { assert( valid ); }
+-    body
+-    {
+-        if( entry ) return entry.fullname;
+-        else        return parent.fullname ~ "/" ~ name_;
+-    }
+-
+-    ///
+-    final bool exists()
+-    in { assert( valid ); }
+-    body
+-    {
+-        // If we've only got a parent and a name, this means we don't actually
+-        // exist; EXISTENTIAL CRISIS TEIM!!!
+-        return !!entry;
+-    }
+-
+-    ///
+-    final ulong size()
+-    in { assert( valid ); }
+-    body
+-    {
+-        if( exists )
+-            return entry.fileSize;
+-        else
+-            error("ZipFile.size: cannot reliably determine size of a "
+-                    "non-existent file");
+-
+-        assert(false);
+-    }
+-
+-    ///
+-    final VfsFile copy(VfsFile source)
+-    in { assert( valid ); }
+-    body
+-    {
+-version( ZipFolder_NonMutating )
+-{
+-        mutate_error("ZipFile.copy");
+-        assert(false);
+-}
+-else
+-{
+-        // MUTATE
+-        enforce_mutable;
+-
+-        if( !exists ) this.create;
+-        this.output.copy(source.input);
+-
+-        return this;
+-}
+-    }
+-
+-    ///
+-    final VfsFile move(VfsFile source)
+-    in { assert( valid ); }
+-    body
+-    {
+-version( ZipFolder_NonMutating )
+-{
+-        mutate_error("ZipFile.move");
+-        assert(false);
+-}
+-else
+-{
+-        // MUTATE
+-        enforce_mutable;
+-
+-        this.copy(source);
+-        source.remove;
+-
+-        return this;
+-}
+-    }
+-
+-    ///
+-    final VfsFile create()
+-    in { assert( valid ); }
+-    out { assert( valid ); }
+-    body
+-    {
+-version( ZipFolder_NonMutating )
+-{
+-        mutate_error("ZipFile.create");
+-        assert(false);
+-}
+-else
+-{
+-        if( exists )
+-            error("ZipFile.create: cannot create already existing file: "
+-                    "this folder ain't big enough for the both of 'em");
+-
+-        // MUTATE
+-        enforce_mutable;
+-
+-        auto entry = new Entry;
+-        entry.type = EntryType.File;
+-        entry.fullname = parent.fullname.dir_app(name);
+-        entry.name = entry.fullname[$-name.length..$];
+-        entry.makeVfsInfo;
+-
+-        assert( !(entry.name in parent.dir.children) );
+-        parent.dir.children[entry.name] = entry;
+-        this.reset(archive, parent, entry);
+-        mutate;
+-
+-        // Done
+-        return this;
+-}
+-    }
+-
+-    ///
+-    final VfsFile create(InputStream stream)
+-    in { assert( valid ); }
+-    body
+-    {
+-version( ZipFolder_NonMutating )
+-{
+-        mutate_error("ZipFile.create");
+-        assert(false);
+-}
+-else
+-{
+-        create;
+-        output.copy(stream).close;
+-        return this;
+-}
+-    }
+-
+-    ///
+-    final VfsFile remove()
+-    in{ assert( valid ); }
+-    out { assert( valid ); }
+-    body
+-    {
+-version( ZipFolder_NonMutating )
+-{
+-        mutate_error("ZipFile.remove");
+-        assert(false);
+-}
+-else
+-{
+-        if( !exists )
+-            error("ZipFile.remove: cannot remove non-existent file; "
+-                    "rather redundant, really");
+-
+-        // MUTATE
+-        enforce_mutable;
+-
+-        // Save the old name
+-        auto old_name = name;
+-
+-        // Do the removal
+-        assert( !!(name in parent.dir.children) );
+-        parent.dir.children.remove(name);
+-        entry.dispose;
+-        entry = null;
+-        mutate;
+-
+-        // Swap out our now empty entry for the name, so the file can be
+-        // directly recreated.
+-        this.reset(archive, parent, old_name);
+-
+-        return this;
+-}
+-    }
+-
+-    ///
+-    final InputStream input()
+-    in { assert( valid ); }
+-    body
+-    {
+-        if( exists )
+-            return entry.openInput;
+-
+-        else
+-            error("ZipFile.input: cannot open non-existent file for input; "
+-                    "results would not be very useful");
+-
+-        assert(false);
+-    }
+-
+-    ///
+-    final OutputStream output()
+-    in { assert( valid ); }
+-    body
+-    {
+-version( ZipFolder_NonMutable )
+-{
+-        mutate_error("ZipFile.output");
+-        assert(false);
+-}
+-else
+-{
+-        // MUTATE
+-        enforce_mutable;
+-        
+-        // Don't call mutate; defer that until the user actually writes to or
+-        // modifies the underlying stream.
+-        return archive.mutateStream(entry.openOutput);
+-}
+-    }
+-
+-    ///
+-    final VfsFile dup()
+-    in { assert( valid ); }
+-    body
+-    {
+-        if( entry )
+-            return new ZipFile(archive, parent, entry);
+-        else
+-            return new ZipFile(archive, parent, name);
+-    }
+-
+-    ///
+-    final Time modified()
+-    {
+-        return entry.file.zipEntry.info.modified;
+-    }
+-    
+-    private:
+-    ZipFolder archive;
+-    Entry* entry;
+-
+-    Entry* parent;
+-    char[] name_;
+-
+-    this()
+-    out { assert( !valid ); }
+-    body
+-    {
+-    }
+-
+-    this(ZipFolder archive, Entry* parent, Entry* entry)
+-    in
+-    {
+-        assert( archive !is null );
+-        assert( parent );
+-        assert( parent.isDir );
+-        assert( entry );
+-        assert( entry.isFile );
+-        assert( parent.dir.children[entry.name] is entry );
+-    }
+-    out { assert( valid ); }
+-    body
+-    {
+-        this.reset(archive, parent, entry);
+-    }
+-
+-    this(ZipFolder archive, Entry* parent, char[] name)
+-    in
+-    {
+-        assert( archive !is null );
+-        assert( parent );
+-        assert( parent.isDir );
+-        assert( name.nz() );
+-        assert( !(name in parent.dir.children) );
+-    }
+-    out { assert( valid ); }
+-    body
+-    {
+-        this.reset(archive, parent, name);
+-    }
+-
+-    final bool valid()
+-    {
+-        return( (archive !is null) && !archive.closed );
+-    }
+-
+-    final void enforce_mutable()
+-    in { assert( valid ); }
+-    body
+-    {
+-        if( archive.readonly )
+-            // TODO: exception
+-            throw new Exception("cannot mutate a read-only Zip archive");
+-    }
+-
+-    final void mutate()
+-    in { assert( valid ); }
+-    body
+-    {
+-        enforce_mutable;
+-        archive.modified = true;
+-    }
+-
+-    final void reset(ZipFolder archive, Entry* parent, Entry* entry)
+-    in
+-    {
+-        assert( archive !is null );
+-        assert( parent );
+-        assert( parent.isDir );
+-        assert( entry );
+-        assert( entry.isFile );
+-        assert( parent.dir.children[entry.name] is entry );
+-    }
+-    out { assert( valid ); }
+-    body
+-    {
+-        this.parent = parent;
+-        this.archive = archive;
+-        this.entry = entry;
+-        this.name_ = null;
+-    }
+-
+-    final void reset(ZipFolder archive, Entry* parent, char[] name)
+-    in
+-    {
+-        assert( archive !is null );
+-        assert( parent );
+-        assert( parent.isDir );
+-        assert( name.nz() );
+-        assert( !(name in parent.dir.children) );
+-    }
+-    out { assert( valid ); }
+-    body
+-    {
+-        this.archive = archive;
+-        this.parent = parent;
+-        this.entry = null;
+-        this.name_ = name;
+-    }
+-
+-    final void close()
+-    in { assert( valid ); }
+-    out { assert( !valid ); }
+-    body
+-    {
+-        archive = null;
+-        parent = null;
+-        entry = null;
+-        name_ = null;
+-    }
+-}
+-
+-// ************************************************************************ //
+-// ************************************************************************ //
+-
+-class ZipSubFolderEntry : VfsFolderEntry
+-{
+-    final VfsFolder open()
+-    in { assert( valid ); }
+-    body
+-    {
+-        auto entry = (name in parent.dir.children);
+-        if( entry )
+-            return new ZipSubFolder(archive, *entry);
+-
+-        else
+-        {
+-            // NOTE: this can be called with a multi-part path.
+-            error("ZipSubFolderEntry.open: \""
+-                    ~ parent.fullname ~ "/" ~ name
+-                    ~ "\" does not exist");
+-
+-            assert(false);
+-        }
+-    }
+-
+-    final VfsFolder create()
+-    in { assert( valid ); }
+-    body
+-    {
+-version( ZipFolder_NonMutating )
+-{
+-        // TODO: different exception if folder exists (this operation is
+-        // currently invalid either way...)
+-        mutate_error("ZipSubFolderEntry.create");
+-        assert(false);
+-}
+-else
+-{
+-        // MUTATE
+-        enforce_mutable;
+-
+-        // If the folder exists, we can't really create it, now can we?
+-        if( this.exists )
+-            error("ZipSubFolderEntry.create: cannot create folder that already "
+-                    "exists, and believe me, I *tried*");
+-        
+-        // Ok, I suppose I can do this for ya...
+-        auto entry = new Entry;
+-        entry.type = EntryType.Dir;
+-        entry.fullname = parent.fullname.dir_app(name);
+-        entry.name = entry.fullname[$-name.length..$];
+-        entry.makeVfsInfo;
+-
+-        assert( !(entry.name in parent.dir.children) );
+-        parent.dir.children[entry.name] = entry;
+-        mutate;
+-
+-        // Done
+-        return new ZipSubFolder(archive, entry);
+-}
+-    }
+-
+-    final bool exists()
+-    in { assert( valid ); }
+-    body
+-    {
+-        return !!(name in parent.dir.children);
+-    }
+-
+-private:
+-    ZipFolder archive;
+-    Entry* parent;
+-    char[] name;
+-
+-    this(ZipFolder archive, Entry* parent, char[] name)
+-    in
+-    {
+-        assert( archive !is null );
+-        assert( parent.isDir );
+-        assert( name.nz() );
+-        assert( name.single_path_part() );
+-    }
+-    out { assert( valid ); }
+-    body
+-    {
+-        this.archive = archive;
+-        this.parent = parent;
+-        this.name = name;
+-    }
+-
+-    final bool valid()
+-    {
+-        return (archive !is null) && !archive.closed;
+-    }
+-    
+-    final void enforce_mutable()
+-    in { assert( valid ); }
+-    body
+-    {
+-        if( archive.readonly )
+-            // TODO: exception
+-            throw new Exception("cannot mutate a read-only Zip archive");
+-    }
+-
+-    final void mutate()
+-    in { assert( valid ); }
+-    body
+-    {
+-        enforce_mutable;
+-        archive.modified = true;
+-    }
+-}
+-
+-// ************************************************************************ //
+-// ************************************************************************ //
+-
+-class ZipSubFolderGroup : VfsFolders
+-{
+-    final int opApply(int delegate(ref VfsFolder) dg)
+-    in { assert( valid ); }
+-    body
+-    {
+-        int result = 0;
+-
+-        foreach( folder ; members )
+-        {
+-            VfsFolder x = folder;
+-            if( (result = dg(x)) != 0 )
+-                break;
+-        }
+-
+-        return result;
+-    }
+-
+-    final uint files()
+-    in { assert( valid ); }
+-    body
+-    {
+-        uint files = 0;
+-
+-        foreach( folder ; members )
+-            files += folder.stats.files;
+-
+-        return files;
+-    }
+-
+-    final uint folders()
+-    in { assert( valid ); }
+-    body
+-    {
+-        return members.length;
+-    }
+-
+-    final uint entries()
+-    in { assert( valid ); }
+-    body
+-    {
+-        return files + folders;
+-    }
+-
+-    final ulong bytes()
+-    in { assert( valid ); }
+-    body
+-    {
+-        ulong bytes = 0;
+-
+-        foreach( folder ; members )
+-            bytes += folder.stats.bytes;
+-
+-        return bytes;
+-    }
+-
+-    final VfsFolders subset(char[] pattern)
+-    in { assert( valid ); }
+-    body
+-    {
+-        ZipSubFolder[] set;
+-
+-        foreach( folder ; members )
+-            if( Path.patternMatch(folder.name, pattern) )
+-                set ~= folder;
+-
+-        return new ZipSubFolderGroup(archive, set);
+-    }
+-
+-    final VfsFiles catalog(char[] pattern)
+-    in { assert( valid ); }
+-    body
+-    {
+-        bool filter (VfsInfo info)
+-        {
+-                return Path.patternMatch(info.name, pattern);
+-        }
+-
+-        return catalog (&filter);
+-    }
+-
+-    final VfsFiles catalog(VfsFilter filter = null)
+-    in { assert( valid ); }
+-    body
+-    {
+-        return new ZipFileGroup(archive, this, filter);
+-    }
+-
+-private:
+-    ZipFolder archive;
+-    ZipSubFolder[] members;
+-
+-    this(ZipFolder archive, ZipSubFolder root, bool recurse)
+-    out { assert( valid ); }
+-    body
+-    {
+-        this.archive = archive;
+-        members = root ~ scan(root, recurse);
+-    }
+-
+-    this(ZipFolder archive, ZipSubFolder[] members)
+-    out { assert( valid ); }
+-    body
+-    {
+-        this.archive = archive;
+-        this.members = members;
+-    }
+-
+-    final bool valid()
+-    {
+-        return (archive !is null) && !archive.closed;
+-    }
+-
+-    final ZipSubFolder[] scan(ZipSubFolder root, bool recurse)
+-    in { assert( valid ); }
+-    body
+-    {
+-        auto folders = root.folders(recurse);
+-
+-        if( recurse )
+-            foreach( child ; folders )
+-                folders ~= scan(child, recurse);
+-
+-        return folders;
+-    }
+-}
+-
+-// ************************************************************************ //
+-// ************************************************************************ //
+-
+-class ZipFileGroup : VfsFiles
+-{
+-    final int opApply(int delegate(ref VfsFile) dg)
+-    in { assert( valid ); }
+-    body
+-    {
+-        int result = 0;
+-        auto file = new ZipFile;
+-
+-        foreach( entry ; group )
+-        {
+-            file.reset(archive,entry.parent,entry.entry);
+-            VfsFile x = file;
+-            if( (result = dg(x)) != 0 )
+-                break;
+-        }
+-
+-        return result;
+-    }
+-
+-    final uint files()
+-    in { assert( valid ); }
+-    body
+-    {
+-        return group.length;
+-    }
+-
+-    final ulong bytes()
+-    in { assert( valid ); }
+-    body
+-    {
+-        return stats.bytes;
+-    }
+-
+-private:
+-    ZipFolder archive;
+-    FileEntry[] group;
+-    VfsStats stats;
+-
+-    struct FileEntry
+-    {
+-        Entry* parent;
+-        Entry* entry;
+-    }
+-
+-    this(ZipFolder archive, ZipSubFolderGroup host, VfsFilter filter)
+-    out { assert( valid ); }
+-    body
+-    {
+-        this.archive = archive;
+-        foreach( folder ; host.members )
+-            foreach( file ; folder.files(stats, filter) )
+-                group ~= FileEntry(folder.entry, file);
+-    }
+-
+-    final bool valid()
+-    {
+-        return (archive !is null) && !archive.closed;
+-    }
+-}
+-
+-// ************************************************************************ //
+-// ************************************************************************ //
+-
+-private:
+-
+-void error(char[] msg)
+-{
+-    throw new Exception(msg);
+-}
+-
+-void mutate_error(char[] method)
+-{
+-    error(method ~ ": mutating the contents of a ZipFolder "
+-            "is not supported yet; terribly sorry");
+-}
+-
+-bool nz(char[] s)
+-{
+-    return s.length > 0;
+-}
+-
+-bool zero(char[] s)
+-{
+-    return s.length == 0;
+-}
+-
+-bool single_path_part(char[] s)
+-{
+-    foreach( c ; s )
+-        if( c == '/' ) return false;
+-    return true;
+-}
+-
+-char[] dir_app(char[] dir, char[] name)
+-{
+-    return dir ~ (dir[$-1]!='/' ? "/" : "") ~ name;
+-}
+-
+-void headTail(char[] path, out char[] head, out char[] tail)
+-{
+-    foreach( i,dchar c ; path[1..$] )
+-        if( c == '/' )
+-        {
+-            head = path[0..i+1];
+-            tail = path[i+2..$];
+-            return;
+-        }
+-
+-    head = path;
+-    tail = null;
+-}
+-
+-unittest
+-{
+-    char[] h,t;
+-
+-    headTail("/a/b/c", h, t);
+-    assert( h == "/a" );
+-    assert( t == "b/c" );
+-
+-    headTail("a/b/c", h, t);
+-    assert( h == "a" );
+-    assert( t == "b/c" );
+-
+-    headTail("a/", h, t);
+-    assert( h == "a" );
+-    assert( t == "" );
+-
+-    headTail("a", h, t);
+-    assert( h == "a" );
+-    assert( t == "" );
+-}
+-
+-// ************************************************************************** //
+-// ************************************************************************** //
+-// ************************************************************************** //
+-
+-// Dependencies
+-private:
+-import tango.io.device.Conduit : Conduit;
+-
+-/*******************************************************************************
+-
+-    copyright:  Copyright © 2007 Daniel Keep.  All rights reserved.
+-
+-    license:    BSD style: $(LICENSE)
+-
+-    version:    Prerelease
+-
+-    author:     Daniel Keep
+-
+-*******************************************************************************/
+-
+-//module tangox.io.stream.DummyStream;
+-
+-//import tango.io.device.Conduit : Conduit;
+-//import tango.io.model.IConduit : IConduit, InputStream, OutputStream;
+-
+-/**
+- * The dummy stream classes are used to provide simple, empty stream objects
+- * where one is required, but none is available.
+- *
+- * Note that, currently, these classes return 'null' for the underlying
+- * conduit, which will likely break code which expects streams to have an
+- * underlying conduit.
+- */
+-private deprecated class DummyInputStream : InputStream // IConduit.Seek
+-{
+-    //alias IConduit.Seek.Anchor Anchor;
+-
+-    override InputStream input() {return null;}
+-    override IConduit conduit() { return null; }
+-    override void close() {}
+-    override size_t read(void[] dst) { return IConduit.Eof; }
+-    override InputStream flush() { return this; }
+-    override void[] load(size_t max=-1)
+-    {
+-        return Conduit.load(this, max);
+-    }
+-    override long seek(long offset, Anchor anchor = cast(Anchor)0) { return 0; }
+-}
+-
+-/// ditto
+-private deprecated class DummyOutputStream : OutputStream //, IConduit.Seek
+-{
+-    //alias IConduit.Seek.Anchor Anchor;
+-
+-    override OutputStream output() {return null;}
+-    override IConduit conduit() { return null; }
+-    override void close() {}
+-    override size_t write(void[] src) { return IConduit.Eof; }
+-    override OutputStream copy(InputStream src, size_t max=-1)
+-    {
+-        Conduit.transfer(src, this, max);
+-        return this;
+-    }
+-    override OutputStream flush() { return this; }
+-    override long seek(long offset, Anchor anchor = cast(Anchor)0) { return 0; }
+-}
+-
+-/*******************************************************************************
+-
+-    copyright:  Copyright © 2007 Daniel Keep.  All rights reserved.
+-
+-    license:    BSD style: $(LICENSE)
+-
+-    version:    Prerelease
+-
+-    author:     Daniel Keep
+-
+-*******************************************************************************/
+-
+-//module tangox.io.stream.EventStream;
+-
+-//import tango.io.device.Conduit : Conduit;
+-//import tango.io.model.IConduit : IConduit, InputStream, OutputStream;
+-
+-/**
+- * The event stream classes are designed to allow you to receive feedback on
+- * how a stream chain is being used.  This is done through the use of
+- * delegate callbacks which are invoked just before the associated method is
+- * complete.
+- */
+-class EventSeekInputStream : InputStream //, IConduit.Seek
+-{
+-    ///
+-    struct Callbacks
+-    {
+-        void delegate()                     close; ///
+-        void delegate()                     clear; ///
+-        void delegate(uint, void[])         read; ///
+-        void delegate(long, long, Anchor)   seek; ///
+-    }
+-
+-    //alias IConduit.Seek.Anchor Anchor;
+-
+-    ///
+-    this(InputStream source, Callbacks callbacks)
+-    in
+-    {
+-        assert( source !is null );
+-        assert( (cast(IConduit.Seek) source.conduit) !is null );
+-    }
+-    body
+-    {
+-        this.source = source;
+-        this.seeker = source; //cast(IConduit.Seek) source;
+-        this.callbacks = callbacks;
+-    }
+-
+-    override IConduit conduit()
+-    {
+-        return source.conduit;
+-    }
+-
+-    InputStream input()
+-    {
+-        return source;
+-    }
+-
+-    override void close()
+-    {
+-        source.close;
+-        source = null;
+-        seeker = null;
+-        if( callbacks.close ) callbacks.close();
+-    }
+-
+-    override size_t read(void[] dst)
+-    {
+-        auto result = source.read(dst);
+-        if( callbacks.read ) callbacks.read(result, dst);
+-        return result;
+-    }
+-
+-    override InputStream flush()
+-    {
+-        source.flush();
+-        if( callbacks.clear ) callbacks.clear();
+-        return this;
+-    }
+-
+-    override void[] load(size_t max=-1)
+-    {
+-        return Conduit.load(this, max);
+-    }
+-
+-    override long seek(long offset, Anchor anchor = cast(Anchor)0)
+-    {
+-        auto result = seeker.seek(offset, anchor);
+-        if( callbacks.seek ) callbacks.seek(result, offset, anchor);
+-        return result;
+-    }
+-
+-private:
+-    InputStream source;
+-    InputStream seeker; //IConduit.Seek seeker;
+-    Callbacks callbacks;
+-
+-    invariant
+-    {
+-        assert( cast(Object) source is cast(Object) seeker );
+-    }
+-}
+-
+-/// ditto
+-class EventSeekOutputStream : OutputStream //, IConduit.Seek
+-{
+-    ///
+-    struct Callbacks
+-    {
+-        void delegate()                     close; ///
+-        void delegate()                     flush; ///
+-        void delegate(uint, void[])         write; ///
+-        void delegate(long, long, Anchor)   seek; ///
+-    }
+-
+-    //alias IConduit.Seek.Anchor Anchor;
+-
+-    ///
+-    this(OutputStream source, Callbacks callbacks)
+-    in
+-    {
+-        assert( source !is null );
+-        assert( (cast(IConduit.Seek) source.conduit) !is null );
+-    }
+-    body
+-    {
+-        this.source = source;
+-        this.seeker = source; //cast(IConduit.Seek) source;
+-        this.callbacks = callbacks;
+-    }
+-
+-    override IConduit conduit()
+-    {
+-        return source.conduit;
+-    }
+-
+-    override OutputStream output()
+-    {
+-        return source;
+-    }
+-
+-    override void close()
+-    {
+-        source.close;
+-        source = null;
+-        seeker = null;
+-        if( callbacks.close ) callbacks.close();
+-    }
+-
+-    override size_t write(void[] dst)
+-    {
+-        auto result = source.write(dst);
+-        if( callbacks.write ) callbacks.write(result, dst);
+-        return result;
+-    }
+-
+-    override OutputStream flush()
+-    {
+-        source.flush();
+-        if( callbacks.flush ) callbacks.flush();
+-        return this;
+-    }
+-
+-    override long seek(long offset, Anchor anchor = cast(Anchor)0)
+-    {
+-        auto result = seeker.seek(offset, anchor);
+-        if( callbacks.seek ) callbacks.seek(result, offset, anchor);
+-        return result;
+-    }
+-
+-    override OutputStream copy(InputStream src, size_t max=-1)
+-    {
+-        Conduit.transfer(src, this, max);
+-        return this;
+-    }
+-
+-private:
+-    OutputStream source;
+-    OutputStream seeker; //IConduit.Seek seeker;
+-    Callbacks callbacks;
+-
+-    invariant
+-    {
+-        assert( cast(Object) source is cast(Object) seeker );
+-    }
+-}
+-
+-/*******************************************************************************
+-
+-    copyright:  Copyright © 2007 Daniel Keep.  All rights reserved.
+-
+-    license:    BSD style: $(LICENSE)
+-
+-    version:    Prerelease
+-
+-    author:     Daniel Keep
+-
+-*******************************************************************************/
+-
+-//module tangox.io.stream.WrapStream;
+-
+-//import tango.io.device.Conduit : Conduit;
+-//import tango.io.model.IConduit : IConduit, InputStream, OutputStream;
+-
+-/**
+- * This stream can be used to provide access to another stream.
+- * Its distinguishing feature is that users cannot close the underlying
+- * stream.
+- *
+- * This stream fully supports seeking, and as such requires that the
+- * underlying stream also support seeking.
+- */
+-class WrapSeekInputStream : InputStream //, IConduit.Seek
+-{
+-    //alias IConduit.Seek.Anchor Anchor;
+-
+-    /**
+-     * Create a new wrap stream from the given source.
+-     */
+-    this(InputStream source)
+-    in
+-    {
+-        assert( source !is null );
+-        assert( (cast(IConduit.Seek) source.conduit) !is null );
+-    }
+-    body
+-    {
+-        this.source = source;
+-        this.seeker = source; //cast(IConduit.Seek) source;
+-        this._position = seeker.seek(0, Anchor.Current);
+-    }
+-
+-    /// ditto
+-    this(InputStream source, long position)
+-    in
+-    {
+-        assert( position >= 0 );
+-    }
+-    body
+-    {
+-        this(source);
+-        this._position = position;
+-    }
+-
+-    override IConduit conduit()
+-    {
+-        return source.conduit;
+-    }
+-
+-    InputStream input()
+-    {
+-        return source;
+-    }
+-
+-    override void close()
+-    {
+-        source = null;
+-        seeker = null;
+-    }
+-
+-    override size_t read(void[] dst)
+-    {
+-        if( seeker.seek(0, Anchor.Current) != _position )
+-            seeker.seek(_position, Anchor.Begin);
+-
+-        auto read = source.read(dst);
+-        if( read != IConduit.Eof )
+-            _position += read;
+-
+-        return read;
+-    }
+-
+-    override InputStream flush()
+-    {
+-        source.flush();
+-        return this;
+-    }
+-
+-    override void[] load(size_t max=-1)
+-    {
+-        return Conduit.load(this, max);
+-    }
+-
+-    override long seek(long offset, Anchor anchor = cast(Anchor)0)
+-    {
+-        seeker.seek(_position, Anchor.Begin);
+-        return (_position = seeker.seek(offset, anchor));
+-    }
+-
+-private:
+-    InputStream source;
+-    InputStream seeker; //IConduit.Seek seeker;
+-    long _position;
+-
+-    invariant
+-    {
+-        assert( cast(Object) source is cast(Object) seeker );
+-        assert( _position >= 0 );
+-    }
+-}
+-
+-/**
+- * This stream can be used to provide access to another stream.
+- * Its distinguishing feature is that the users cannot close the underlying
+- * stream.
+- *
+- * This stream fully supports seeking, and as such requires that the
+- * underlying stream also support seeking.
+- */
+-class WrapSeekOutputStream : OutputStream//, IConduit.Seek
+-{
+-    //alias IConduit.Seek.Anchor Anchor;
+-
+-    /**
+-     * Create a new wrap stream from the given source.
+-     */
+-    this(OutputStream source)
+-    in
+-    {
+-        assert( (cast(IConduit.Seek) source.conduit) !is null );
+-    }
+-    body
+-    {
+-        this.source = source;
+-        this.seeker = source; //cast(IConduit.Seek) source;
+-        this._position = seeker.seek(0, Anchor.Current);
+-    }
+-
+-    /// ditto
+-    this(OutputStream source, long position)
+-    in
+-    {
+-        assert( position >= 0 );
+-    }
+-    body
+-    {
+-        this(source);
+-        this._position = position;
+-    }
+-
+-    override IConduit conduit()
+-    {
+-        return source.conduit;
+-    }
+-
+-    override OutputStream output()
+-    {
+-        return source;
+-    }
+-
+-    override void close()
+-    {
+-        source = null;
+-        seeker = null;
+-    }
+-
+-    size_t write(void[] src)
+-    {
+-        if( seeker.seek(0, Anchor.Current) != _position )
+-            seeker.seek(_position, Anchor.Begin);
+-
+-        auto wrote = source.write(src);
+-        if( wrote != IConduit.Eof )
+-            _position += wrote;
+-        return wrote;
+-    }
+-
+-    override OutputStream copy(InputStream src, size_t max=-1)
+-    {
+-        Conduit.transfer(src, this, max);
+-        return this;
+-    }
+-
+-    override OutputStream flush()
+-    {
+-        source.flush();
+-        return this;
+-    }
+-
+-    override long seek(long offset, Anchor anchor = cast(Anchor)0)
+-    {
+-        seeker.seek(_position, Anchor.Begin);
+-        return (_position = seeker.seek(offset, anchor));
+-    }
+-
+-private:
+-    OutputStream source;
+-    OutputStream seeker; //IConduit.Seek seeker;
+-    long _position;
+-
+-    invariant
+-    {
+-        assert( cast(Object) source is cast(Object) seeker );
+-        assert( _position >= 0 );
+-    }
+-}
+-
+-
--- libtango-0.99.9.dfsg.orig/debian/patches/series
+++ libtango-0.99.9.dfsg/debian/patches/series
@@ -0,0 +1,4 @@
+01_versions.diff -p1
+02_compress.diff -p1
+03_ldc.diff -p1
+04_device.diff -p1
--- libtango-0.99.9.dfsg.orig/debian/patches/01_versions.diff
+++ libtango-0.99.9.dfsg/debian/patches/01_versions.diff
@@ -0,0 +1,15 @@
+--- a/tango/text/locale/Win32.d
++++ b/tango/text/locale/Win32.d
+@@ -12,6 +12,7 @@
+ 
+ module tango.text.locale.Win32;
+ 
++version (Win32) {
+ alias tango.text.locale.Win32 nativeMethods;
+ 
+ extern (Windows)
+@@ -51,3 +52,4 @@
+ 
+   return CompareStringW(sortId, ignoreCase ? 0x1 : 0x0, string1.ptr, len1, string2.ptr, len2) - 2;
+ }
++} // Win32